1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-13 20:57:06 +01:00

Compare commits

..

4848 Commits

Author SHA1 Message Date
b284d50863 Checking in fixed adaptive WilsonFlow 2021-06-07 14:20:27 -04:00
92def28bd3 Update README.md 2021-06-06 04:52:05 -04:00
ca10bfa1c7 removing Travis CI constantly failing due to overtime (no way we can compile Grid on free time anymore) 2021-06-04 11:12:22 +01:00
0e27e3847d Remove synch 2021-06-03 04:24:19 +00:00
8cfc7342cd staggered hand unroll read coalesce 2021-05-05 14:17:18 -07:00
15ae317858 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2021-05-04 08:40:38 -07:00
834f536b5f Fastest option on SyCL is now std::complex 2021-05-04 08:40:18 -07:00
c332d9f08b Merge pull request #356 from felixerben/bugfix/stoutSmearing
Jamie's fix
2021-04-27 14:10:49 -04:00
cf2923d5dd Jamie's fix 2021-04-27 16:53:37 +01:00
0e4413ddde Merge pull request #355 from felixerben/bugfix/stoutSmearing
bugfix 3D stout smearing
2021-04-27 08:01:55 -04:00
009ccd581e bugfix 3D stout smearing 2021-04-26 10:36:33 +01:00
8cd4263974 Tests compile 2021-04-25 22:20:37 -04:00
d45c868656 Change interface 2021-04-25 10:53:34 -04:00
955a8113de Expose label only to reduce number of parameters 2021-04-25 10:36:38 -04:00
dbe210dd53 Open the ens_id 2021-04-25 10:25:59 -04:00
86e11743ca set twists 2021-04-20 10:19:11 -04:00
980e721f6e Update MetaData.h 2021-04-13 09:33:01 -04:00
e2a0142d87 Merge pull request #348 from AndrewYongZhenNing/develop
Conserved Tadpole Implementation for Shamir Action Only
2021-04-06 10:49:00 -04:00
895244ecc3 Merge with upstream; implemented conserved tadpole for Shamir action. 2021-04-06 13:46:33 +01:00
addeb621a7 Implemented tadpole operator for Shamir action. 2021-04-06 13:45:37 +01:00
a7fb25adf6 Make Cshift fields static to avoid repeated reallocaate overhead 2021-03-29 21:44:14 +02:00
e947992957 Improved force terms 2021-03-29 20:04:06 +02:00
bb89a82a07 Staggered coalseced read 2021-03-29 20:01:15 +02:00
8bdadbadac Cold start 2021-03-18 15:41:14 -04:00
15c50a7442 Explicit instantiate the template function 2021-03-18 15:40:42 -04:00
49b0af2c95 Update of tests to compile with the sRNG addition.
Audited the code conventions (again) with the CPS momentum denominator
and added anti periodic in time to the Test_mobius_force.cc and
tested the Test_dwf_gpforce.

Promoted thesee to test full HMC hamiltonian, tr P^2/2 + phidag MdagM phi

with the same pdot and Udot as audited in the Integrator.h etc...

With full comments and sources for factors.
2021-03-18 09:10:02 -04:00
9c2b37218a sRNG parameter added 2021-03-18 06:24:11 -04:00
3c67d626ba Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2021-03-12 15:36:55 +01:00
51f506553c Read out the local ID once, and store 2021-03-12 15:33:04 +01:00
226be84937 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2021-03-12 09:31:50 -05:00
001814b442 updated to do list. Start adding DDHMC work items 2021-03-12 09:31:17 -05:00
db3ac67506 Update thread issue 2021-03-12 14:55:07 +01:00
da91a884ef NVCC versions found buggy added as guard 2021-03-11 23:54:53 +01:00
a71e6755e3 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2021-03-11 22:43:06 +01:00
cd5891eecd Test that fails on Cuda 11.0 2021-03-11 22:34:28 +01:00
5bb7336f27 Merge pull request #347 from pjgeorg/fix-autotools-avx512
Fix inconsistent SIMD option AVX512

Thanks
2021-03-11 16:29:07 -05:00
ce1fc1f48a Possible fallback plan for Fionn's compiler bbug in nvcc 2021-03-11 22:20:53 +01:00
82402c6a7c Add simd option SKL for ICC 2021-03-11 13:08:40 +01:00
d9c4afe5b7 Fix inconsistent configure option AVX512
Before this change AVX512 enabled different instruction sets depending
on the compiler:

For Intel C++ Compiler Classic (ICC):
    AVX512F, AVX512CD, AVX512DQ, AVX512BW, AVX512VL
    i.e. Intel Xeon Skylake and newer

For Intel ICX, gcc, clang:
    AVX512F, AVX512CD, AVX512ER, AVX512PF
    i.e. Intel Xeon Phi x200/x205 (KNL/KNM)

With this commit AVX512 now only enables the common instruction sets
supported by all CPUs supporting any AVX-512 instructions set:
AVX512F and AVX512CD (called COMMON-AVX512 by icc)
2021-03-11 12:58:49 +01:00
f786ff8d69 Extend test from Fionn, fails on A100 apparently 2021-03-10 14:32:06 -05:00
a651caed5f Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2021-03-10 06:23:51 -08:00
0e21adb3f6 Gives 200GF/s on SyCL/DG1 8^4, doesn't uglify develop for other platforms too badly.
Easy to revert to clean more C++ stylistic code. Theres a SYCL_HACK macro I will clean up later once dpcpp
evolves a central nervous systems.
2021-03-10 05:40:51 -08:00
58bf9b9e6d Clean up test 2021-03-10 02:45:22 +01:00
2146eebb65 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2021-03-09 04:31:46 +01:00
6a429ee6d3 2d loop hits Nvidia 16bit limit on large local vols 2021-03-09 04:31:10 +01:00
4d1ea15c79 More verbosity. The 16bit limit on Grid.y, Grid.z is annoying 2021-03-09 04:29:37 +01:00
a76cb005e0 Update Tensor_exp.h 2021-03-08 13:37:57 -05:00
a9604367c1 Merge pull request #336 from lehner/feature/gpt
Make ShmDims configurable; adjust GRID_MAX_SIMD to allow for 128 byte width on GPUs
2021-03-05 13:17:19 -05:00
d7065023cc Merge pull request #332 from mmphys/feature/mres_schur
Optional changes to Test_cayley_mres e.g. Schur solver
2021-03-05 12:47:07 -05:00
89d299ceec Merge pull request #333 from mmphys/bugfix/LatTransfer
Fix convertType for GPU in Lattice_transfer.h
2021-03-05 12:46:33 -05:00
e34eda66df Merge pull request #344 from felixerben/feature/XiToSigma
Feature/xi to sigma
2021-03-05 12:45:44 -05:00
b24181aa4f Update Coordinate.h
Revert GRID_MAX_SIMD change
2021-03-05 16:56:58 +01:00
aa173e2998 Update README.md 2021-03-05 10:25:33 -05:00
7a19432e0b whitespace 2021-03-05 10:57:09 +00:00
9b15704290 tested and consitent 2021-03-05 10:42:32 +00:00
017f955b2d Merge branch 'develop' into feature/mres_schur
* develop:
  Pass serial RNG around
  Sycl happier
2021-03-04 20:42:02 +00:00
f252d69eef Merge branch 'develop' into bugfix/LatTransfer
* develop:
  Pass serial RNG around
  Sycl happier
2021-03-04 20:41:30 +00:00
3b06e4655e Merge branch 'develop' into feature/XiToSigma 2021-03-04 20:06:16 +00:00
d4b4de8f42 changes 2021-03-04 20:01:24 +00:00
c90beee774 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2021-03-03 23:50:29 +01:00
1eea9d73b9 Pass serial RNG around 2021-03-03 23:50:01 +01:00
679d1d22f7 Sycl happier 2021-03-03 11:21:43 -08:00
b2b5e0b98c Merge branch 'develop' into feature/mres_schur
* develop:
  Hand unrolled to use optimised code paths on GPU for coalesced reads in Wilson case. Other cases to do. This now includes comms code path.
  Better SIMD usage/coalescence
2021-03-03 16:15:12 +00:00
03e54722c1 Merge branch 'develop' into bugfix/LatTransfer
* develop:
  Hand unrolled to use optimised code paths on GPU for coalesced reads in Wilson case. Other cases to do. This now includes comms code path.
2021-03-03 16:13:23 +00:00
442336bd96 Hand unrolled to use optimised code paths on GPU for coalesced reads in Wilson case.
Other cases to do. This now includes comms code path.
2021-03-02 14:50:51 +01:00
9c9566b9c9 Merge pull request #23 from paboyle/develop
Sync
2021-03-01 12:33:51 +01:00
1059a81a3c Merge branch 'develop' into bugfix/LatTransfer
* develop:
  Better SIMD usage/coalescence
2021-02-27 00:21:36 +00:00
2e61556389 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2021-02-26 17:52:20 +01:00
f9b1f240f6 Better SIMD usage/coalescence 2021-02-26 17:51:41 +01:00
69f41469dd Merge branch 'develop' into bugfix/LatTransfer
* develop: (26 commits)
  Added the ability to apply a custom "filter" to the conjugate momentum in the Integrator classes, applied both after refresh and after applying the forces Added a conjugate momentum "filter" that applies a phase to each site. With sites set to 1.0 or 0.0 this acts as a mask and enables, for example, the freezing of inactive gauge links in DDHMC Added tests/forces/Test_momentum_filter demonstrating the use of the filter to freeze boundary links
  Correct misleading ac help string
  Enable performance counting in WilsonFermion like in others
  changed back A2AUtils warning
  changed if and accelerator_for - no runtime errors any more
  Mac OS (Darwin) sed -i flag for in-place editing differs from posix / gnu
  Seems the intention with AutoConf produced Grid/Config.h was to use sed to translate standard PACKAGE_ #defines into GRID_ however due to missing '' after -i this hasn't been working. Perhaps it is too late to fix this, since we don't know who/what is relying on this downstream? ... but if they are, and AutoConf is being used, then likely these #defines have just been redefined anyway. Seems reasonable to redefine PACKAGE and VERSION as well, as none of these macros are used throughout Grid or Hadrons.
  Fixed compile issues with maxLocalNorm2 for non-scalar lattices maxLocalNorm2 test now reuses the random field
  MADWF 5d source option for hadrons - look at Grid of source Abort on GPU error
  maxLocalNorm2()
  change back benchmark_ITT
  prettify
  Flop cout matches DiRAC-ITT-2020
  revert changes
  merge develop
  fixes
  weird bug in 2pt function...
  revert changes
  final version, tested on CPU and GPU
  bugfix
  ...
2021-02-25 09:19:17 +00:00
d620b303ff Merge branch 'develop' into feature/mres_schur
* develop: (26 commits)
  Added the ability to apply a custom "filter" to the conjugate momentum in the Integrator classes, applied both after refresh and after applying the forces Added a conjugate momentum "filter" that applies a phase to each site. With sites set to 1.0 or 0.0 this acts as a mask and enables, for example, the freezing of inactive gauge links in DDHMC Added tests/forces/Test_momentum_filter demonstrating the use of the filter to freeze boundary links
  Correct misleading ac help string
  Enable performance counting in WilsonFermion like in others
  changed back A2AUtils warning
  changed if and accelerator_for - no runtime errors any more
  Mac OS (Darwin) sed -i flag for in-place editing differs from posix / gnu
  Seems the intention with AutoConf produced Grid/Config.h was to use sed to translate standard PACKAGE_ #defines into GRID_ however due to missing '' after -i this hasn't been working. Perhaps it is too late to fix this, since we don't know who/what is relying on this downstream? ... but if they are, and AutoConf is being used, then likely these #defines have just been redefined anyway. Seems reasonable to redefine PACKAGE and VERSION as well, as none of these macros are used throughout Grid or Hadrons.
  Fixed compile issues with maxLocalNorm2 for non-scalar lattices maxLocalNorm2 test now reuses the random field
  MADWF 5d source option for hadrons - look at Grid of source Abort on GPU error
  maxLocalNorm2()
  change back benchmark_ITT
  prettify
  Flop cout matches DiRAC-ITT-2020
  revert changes
  merge develop
  fixes
  weird bug in 2pt function...
  revert changes
  final version, tested on CPU and GPU
  bugfix
  ...
2021-02-24 18:07:27 +00:00
157fd1428d Merge pull request #342 from paboyle/feature/link-update-mask
Feature/link update mask
2021-02-24 11:29:52 -05:00
c791cb2214 Merge branch 'develop' into feature/link-update-mask 2021-02-23 11:51:54 -05:00
d5ab571a89 Added the ability to apply a custom "filter" to the conjugate momentum in the Integrator classes, applied both after refresh and after applying the forces
Added a conjugate momentum "filter" that applies a phase to each site. With sites set to 1.0 or 0.0 this acts as a mask and enables, for example, the freezing of inactive gauge links in DDHMC
Added tests/forces/Test_momentum_filter demonstrating the use of the filter to freeze boundary links
2021-02-23 11:49:56 -05:00
0ed800f6e4 merge develop 2021-02-23 14:54:46 +00:00
0a32183825 Merge pull request #335 from felixerben/gpu/baryons
Gpu/baryons
2021-02-23 09:30:16 -05:00
2cacfbde2a Merge pull request #341 from DanielRichtmann/fix/minor-things
Minor fixes
2021-02-22 09:28:50 -05:00
c073e62e0b Correct misleading ac help string 2021-02-22 15:25:44 +01:00
e3d019bc2f Enable performance counting in WilsonFermion like in others 2021-02-22 15:25:40 +01:00
7ae030f585 changed back A2AUtils warning 2021-02-18 13:24:50 +00:00
86b58d5aff changed if and accelerator_for - no runtime errors any more 2021-02-18 12:04:32 +00:00
26e8b9f4a5 Merge pull request #340 from mmphys/bugfix/config
Mac OS (Darwin) sed -i flag for in-place editing differs from posix / gnu
2021-02-17 11:56:21 -05:00
35114c9e62 Mac OS (Darwin) sed -i flag for in-place editing differs from posix / gnu 2021-02-17 13:24:15 +00:00
dfd28a85c9 Merge pull request #339 from mmphys/bugfix/config
Optional rename PACKAGE_ to GRID_ in Grid/Config.h
2021-02-15 13:53:26 -05:00
a503332924 Seems the intention with AutoConf produced Grid/Config.h was to use sed to translate standard PACKAGE_ #defines into GRID_ however due to missing '' after -i this hasn't been working.
Perhaps it is too late to fix this, since we don't know who/what is relying on this downstream? ... but if they are, and AutoConf is being used, then likely these #defines have just been redefined anyway. Seems reasonable to redefine PACKAGE and VERSION as well, as none of these macros are used throughout Grid or Hadrons.
2021-02-14 21:27:54 +00:00
1ac13ec3a7 Merge pull request #338 from paboyle/bugfix/maxnorm2
Fixed compile issues with maxLocalNorm2 for non-scalar lattices
2021-02-08 12:08:11 -05:00
55de69a569 Fixed compile issues with maxLocalNorm2 for non-scalar lattices
maxLocalNorm2 test now reuses the random field
2021-02-08 12:03:16 -05:00
eda9ab487b MADWF 5d source option for hadrons - look at Grid of source
Abort on GPU error
2021-02-08 10:47:22 -05:00
cd99edcc5f maxLocalNorm2() 2021-02-04 18:25:49 -05:00
4705aa541d Allow user to configure ShmDims via environment variables 2021-02-04 14:25:55 +01:00
3215d88a91 Simplify syntax with Grid::EnableIf post code review. Updated EnableIf so that ReturnType defaults to void in same way as std::enable_if see https://en.cppreference.com/w/cpp/types/enable_if 2021-02-03 15:17:03 +00:00
9b9a53f870 ... 2021-02-02 13:06:43 +00:00
019ffe17d4 Allow for GPU vector width beyond 64 2021-02-02 11:32:23 +01:00
bc496dd844 change back benchmark_ITT 2021-01-28 14:29:56 +00:00
a673b6a54d prettify 2021-01-28 14:15:09 +00:00
1bf2e4d187 Merge branch 'develop' into gpu/baryons 2021-01-27 21:17:37 +00:00
96dd7a8fbd Flop cout matches DiRAC-ITT-2020 2021-01-27 21:14:52 +00:00
7905afa9f5 revert changes 2021-01-27 21:14:52 +00:00
712bb40650 merge develop 2021-01-27 21:14:52 +00:00
81d88d9f4d fixes 2021-01-27 21:09:51 +00:00
77063418da Fix issue for GPU by ensuring accelerator_inline version of convertType is available for Grid::complex<T>. This removes many warnings in Hadrons
Simplify the SFINAE syntax and correct convertType for iScalar
2021-01-25 15:09:36 +00:00
2983b6fdf6 Optional (superficial) changes to make comparison with Hadrons WardIdentity module easier: use Schur solver; example of Hadrons random gauge init; logging updates; only solve reverse propagator if provided 2021-01-23 12:41:48 +00:00
69f1f04f74 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2021-01-21 21:39:59 -05:00
11a5fd09d6 Hot config 2021-01-21 21:39:41 -05:00
ff1fa98808 Fix for GPU conserveed current 2021-01-21 21:38:23 -05:00
df16202865 weird bug in 2pt function... 2021-01-19 19:25:27 +00:00
3ff7c2c02a Merge branch 'develop' into gpu/baryons 2021-01-19 12:34:13 +00:00
fc6d07897f revert changes 2021-01-19 12:32:48 +00:00
f9c8e5c8ef Merge branch 'develop' of github.com:paboyle/Grid into develop 2021-01-19 12:30:29 +00:00
8bfa0e74f8 final version, tested on CPU and GPU 2021-01-19 12:27:57 +00:00
9b73a937e7 bugfix 2021-01-18 18:57:05 +00:00
b0339bc5a4 Merge branch 'feature/conjugate-bc-dirs' into develop 2021-01-15 09:28:39 -05:00
3c23a947cc Fixed test for very much non-unit det 2021-01-15 09:16:02 -05:00
56111bb823 Merge branch 'develop' into feature/conjugate-bc-dirs 2021-01-14 21:01:22 -05:00
99445673f6 Gparity fix, and plaquette IO 2021-01-14 21:00:36 -05:00
97a59643f7 Red black coarse space 2021-01-14 20:49:13 -05:00
579595f547 Red black on coarse space 2021-01-14 20:48:35 -05:00
281ac5fc12 Red black support on coars 2021-01-14 20:48:08 -05:00
d8fa903b02 G5 on coarse spaces 2021-01-14 20:47:28 -05:00
eaff0f3aeb Gamma5 on coaree spaces 2021-01-14 20:46:58 -05:00
e8e20c01b2 Coarsened vector test 2021-01-14 20:46:21 -05:00
a4afc3ea2a Red black coarse space 2021-01-14 20:44:16 -05:00
fa12b9a329 bugfix 2021-01-13 10:04:17 +00:00
45fc7ded3a test for sum 2021-01-12 09:10:37 +00:00
74de2d9742 whitespace changes 2021-01-08 18:28:36 +00:00
e759367d42 tested and working 2021-01-08 18:04:50 +00:00
299d0de066 Merge pull request #21 from paboyle/develop
Sync
2020-12-22 20:59:15 +01:00
3fe75bc7cb Merge pull request #329 from nmeyer-ur/feature/a64fx-3
Revised dslash/dwf kernels for A64FX
2020-12-20 08:17:15 -05:00
45d49d8648 clean up 2020-12-19 03:35:18 +01:00
6013183361 removed Asm impls 2020-12-19 03:25:01 +01:00
4b882e8056 fixed lost bracket 2020-12-19 03:09:20 +01:00
3f9ae6e7e7 Merge branch 'develop' into feature/a64fx-3 2020-12-19 02:37:11 +01:00
909acd55cd vnum variant for prefetches 2020-12-19 02:00:22 +01:00
4dd9e39e0d up to +36% performance gain for dslash/dwf on QPACE 4 using GCC 10.1.1 2020-12-19 00:54:31 +01:00
b4c1317ab4 Merge pull request #22 from DanielRichtmann/feature/clover-access-specifier
Clover access specifier
2020-12-18 16:20:19 +01:00
f36d6f3923 compiles on GPU. 3pt still wrong!!!! 2020-12-17 17:04:08 +00:00
7adb253e25 Merge pull request #328 from mmphys/feature/mrespatch
Enable existing conserved current code for CUDA
2020-12-17 11:10:29 -05:00
808f1e0e8c merge develop 2020-12-15 16:33:29 +00:00
873519e960 Enable existing conserved current code for CUDA (compiles OK for CUDA 10.1). Add option to Test_cayley_mres to load a configuration 2020-12-14 16:06:10 +00:00
9aec4a3c26 SYCL 2020-12-10 02:11:17 -08:00
c438118fd7 Change access specifier of clover fields in order to allow deriving classes to access these 2020-12-08 14:42:11 +01:00
70510d151b Merge pull request #327 from paboyle/feature/gparity_twist_GPU
Feature/gparity twist gpu
2020-12-07 12:02:20 -05:00
9e7bacb5a4 Merge branch 'develop' into feature/gparity_twist_GPU 2020-12-07 11:55:39 -05:00
2ef1fa66a8 Improved performance of G-parity kernel for GPUs by simplifying multLink implementation 2020-12-07 11:53:35 -05:00
cf76741ec6 Intel DPCPP Gold happy now (compiles all, runs Benchmark_dwf_fp32 ) 2020-12-03 03:47:11 -08:00
497e7c1c40 Duplicate code 2020-12-02 17:55:30 -08:00
888eacd3b8 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-11-24 21:46:33 -05:00
321f0f51b5 Project to SU(N) 2020-11-24 21:46:10 -05:00
17ec9c5545 Merge pull request #20 from paboyle/develop
Sync
2020-11-24 12:20:43 +01:00
30ad9578a2 Merge branch 'lehner-feature/gpt' into develop 2020-11-24 06:10:24 -05:00
9dce101586 Merge branch 'feature/gpt' of https://github.com/lehner/Grid into lehner-feature/gpt 2020-11-24 06:10:16 -05:00
97e264d0ff Christoph's changes 2020-11-23 15:46:11 +00:00
683a5e5bf5 Stencil use host vector for integera table on enable-shared=no and mirror it on device 2020-11-23 15:39:51 +00:00
d4861a362c Stencil use non-UVM memory for look up table on enable-shared=no 2020-11-23 15:38:49 +00:00
5ff3eae027 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-11-20 13:14:44 -05:00
147dc15d26 Update 2020-11-20 13:13:59 -05:00
c61ea72949 Merge pull request #19 from paboyle/develop
Sync
2020-11-20 17:31:13 +01:00
86e8b9fe38 ALLOC_ALIGN removed 2020-11-20 17:07:16 +01:00
612e468889 Configurable ALLOC_ALIGN and ALLOC_CACHE 2020-11-20 16:48:28 +01:00
4ea8d128c2 Merge pull request #18 from paboyle/develop
Sync
2020-11-20 15:36:50 +01:00
e49b7f2f88 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-11-19 19:24:41 +01:00
aace3d47b9 partial work in progress 2020-11-19 19:24:14 +01:00
d5049949a4 Starting to fix reunitarise 2020-11-19 19:23:41 +01:00
f1c7480e3c Warning remove 2020-11-19 19:23:03 +01:00
5adae5d6ff Unused variable remove 2020-11-19 19:22:12 +01:00
a8412ace05 Merge pull request #317 from i-kanamori/develop
adding an error check for input: Parameters.StartingType
2020-11-18 23:09:40 -05:00
9fd1c2ad4b Merge pull request #325 from DanielRichtmann/feature/threaded-clover-inversion
Threaded clover term inversion
2020-11-18 23:08:37 -05:00
4cf3575353 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-11-18 03:07:36 +00:00
804a810d68 Wildcard mismatch 2020-11-18 03:06:53 +00:00
8fcb392e24 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-11-17 04:51:31 -08:00
dd8d70eeff Build without LIME 2020-11-17 04:41:15 -08:00
aa8aba6543 --shm-force-mpi 2020-11-16 20:15:50 -05:00
13df14f96e Switch off SHM paths with --disable-shm 2020-11-16 18:07:15 -05:00
3aab983760 Flop count set as in DiRAC-ITT-2020 (mistaken 20% low, but must maintain consistency) 2020-11-16 17:13:58 +01:00
9c4dcc5ea3 Merge branch 'master' into develop 2020-11-16 16:34:57 +01:00
a1063ddbb9 Update options and simplify 2020-11-13 04:11:03 +01:00
18ef8056ec Hide Shared Memory 2020-11-13 04:10:40 +01:00
1c673977fa Must ask for COMMMS_THREADS 2020-11-13 03:59:36 +01:00
e9bc748828 Useful GPU machine benchmark for GDR used to shakeout Booster at Juelich - see slack earlyaccess channel 2020-11-13 03:58:34 +01:00
f48156529b Work on 2,2,2,8 ranks 2020-11-13 03:57:58 +01:00
d05ce01809 TOFU behaviour now optional THREAD_MULTIPLE or THREAD_SERIALIZED 2020-11-13 03:52:19 +01:00
cf23eff60e Device to Device, Memset, cannot assume UVM == Communicable 2020-11-13 03:51:08 +01:00
6e313575be Use of default GPU is behaviour, not a system property. Move Summit specific to configure.ac 2020-11-13 03:50:16 +01:00
b13d1f7238 TOFU compat flag to help Isaaku 2020-11-13 03:49:44 +01:00
b5e7945dd9 Option for host or device Cshift implementation 2020-11-13 01:38:54 +01:00
7535566f54 Option for bounce through the SHM buffer 2020-11-12 22:54:27 +01:00
50b808ab33 Configure option between host and device 2020-11-12 22:28:12 +01:00
f16c2665f5 Host memory explict 2020-11-12 20:29:58 +01:00
41e28015ae Volume divisible guarantee 2020-11-07 13:32:16 +01:00
3594ce877b speedup in Sigma-to-nucleon 2020-11-03 20:04:30 +00:00
9bae6b889a speedup in Sigma-to-nucleon 2020-11-03 20:03:09 +00:00
4014dfd5b9 first tested version 2020-11-03 16:13:08 +00:00
67023c334b bugfix 2020-11-03 13:07:37 +00:00
a3de7026c8 bugfix 2020-11-03 12:51:50 +00:00
ee11678b1f added Xi-to-Sigma rare decays 2020-11-03 12:41:35 +00:00
a0ccbb3bd6 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-11-01 01:16:35 +00:00
5eeabaa2bb HIP fix 2020-11-01 01:16:01 +00:00
00d0d6d008 Hip Free managed 2020-10-31 18:14:31 -04:00
537a9f7030 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-10-31 18:13:30 -04:00
cc9c993f74 Project on group fix on GPU tracked to reciprocal sqrt collision between CUDA and Grid rsqrt 2020-10-31 18:12:47 -04:00
d10422ded8 Test project on group 2020-10-31 18:12:30 -04:00
f313565a3c HiP compile 2020-10-31 12:12:40 +00:00
b3881d2636 Thread inversion of clover term 2020-10-30 16:18:58 +01:00
61d5860b46 Merge pull request #318 from rrhodgson/feature/BaryonSpinMat
Added untraced baryon contraction code
2020-10-28 18:39:59 +00:00
52d17987dc BaryonUtils.h updated debug output 2020-10-23 11:41:08 +01:00
19d8bba97d BaryonUtils function naming change 2020-10-21 11:58:53 +01:00
463d72d322 Added untraced baryon contraction code 2020-10-19 16:13:28 +01:00
d060341168 add an error check for Parameters.StartingType 2020-10-16 21:39:17 +09:00
c772bcd514 Merge https://github.com/paboyle/Grid into develop 2020-10-16 20:30:32 +09:00
3362f8dfa0 happy compile 2020-10-14 22:59:41 -04:00
bf3c9857e0 Closure changes 2020-10-14 21:37:14 -04:00
a88b3ceca5 Closure cases 2020-10-14 21:33:51 -04:00
aa135412f5 toComplex, toReal 2020-10-13 22:25:01 -04:00
9945399e60 Reaality issues fix by drop from ET 2020-10-13 22:24:32 -04:00
5eeffa49e8 Reality forced included 2020-10-13 22:23:57 -04:00
3f06209720 Pretty print 2020-10-13 22:18:51 -04:00
12e239dd9f Merge branch 'release/dirac-ITT-2020' 2020-10-13 13:38:29 -04:00
af2301afbb Merge pull request #312 from i-kanamori/debug_512
add reordring of random number generators in IO
2020-10-13 11:42:12 -04:00
f98856a26f Merge pull request #314 from smangham/issue_readme_precision
Fix for deprecated configure options in documentation (issue #313)
2020-10-13 11:41:38 -04:00
d55cc5b380 Fixed typo on --enable-comm, removed all references to --enable-precision except for config options, where it is listed as deprecated. Removed travis test for single precision. 2020-10-12 12:33:13 +01:00
c2b688abc9 Benchmark_IO: reducing max local volume to 32^4 2020-10-10 16:52:56 +01:00
b0d61b9687 Benchmark_IO cleaner output 2020-10-09 21:46:45 +01:00
5f893bf9af Benchmark_IO procurement sizes 2020-10-09 21:31:59 +01:00
0e17bd6597 I/O benchmark cleanup 2020-10-09 20:29:57 +01:00
22caa158cc multi-pass I/O benchmark, with statistic and robustness summary 2020-10-09 20:29:40 +01:00
b24a504d7c hook to access last parallel I/O performance measurement 2020-10-09 20:28:54 +01:00
992ef6e9fc more runtime 2020-10-08 22:19:20 -04:00
f32a320bc3 Single prec benchmark in double prec compile 2020-10-08 19:52:08 -04:00
5f0fe029d2 Improve meemory benchmarks for GPU (avoid host mem ping pong) 2020-10-08 19:51:28 -04:00
6b1486e89b fixing number of colours defaulting to 4 in most cases 2020-10-08 16:31:24 +01:00
3f9c427a3a Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-10-07 13:12:57 -04:00
d201277652 Expose Nc as a compile time configure option.
Remove precision option
2020-10-07 13:07:00 -04:00
fdda7cf9cf Merge branch 'feature/benchmark-io-update' into develop 2020-10-07 15:57:53 +01:00
e22d30f715 Merge branch 'develop' into feature/benchmark-io-update 2020-10-07 15:56:39 +01:00
1ba25a0d8c more I/O benchmark code cleaning 2020-10-07 15:38:41 +01:00
9ba3647bdf script to convert I/O benchmark logs to CSV 2020-10-07 15:35:03 +01:00
5ee832f738 I/O benchmark code cleaning 2020-10-07 15:31:51 +01:00
467deee46f Merge branch 'debug_512' into develop 2020-10-07 15:18:44 +09:00
35a69a5133 SU4 x SU4 2020-10-06 21:48:35 -04:00
e9c5a271a8 fixing potential issues with log alignment and timer I/O 2020-10-06 17:58:16 +01:00
acac2d6938 standard C/C++ I/O in benchmark 2020-10-06 17:57:00 +01:00
97db2b8d20 add reordring of random number generator in IO 2020-10-06 17:25:59 +09:00
80fd6ab407 Merge pull request #17 from paboyle/develop
sync upstream
2020-10-06 09:01:39 +02:00
5534921bee Merge pull request #16 from DanielRichtmann/feature/gpt-coarsenedmatrix
Enable checkerboard operations for CoarsenedMatrix
2020-10-01 10:55:13 +02:00
ace9cd64bb dpcpp happy 2020-09-29 08:03:46 -07:00
a3e2aeb603 dpcpp options happiness 2020-09-29 06:50:10 -07:00
049dd25785 Revert accidental commit thanks michael 2020-09-23 04:13:50 -04:00
d43d372294 Merge pull request #311 from mmphys/bugfix/MPIasynch
Asynchronous calls removed - reflect this in Communicator_none.cc
2020-09-22 10:41:48 -04:00
b71a081cba Asynchronous calls removed - reflect this in Communicator_none.cc
(Opportunistic doc update - OpenMP support on Mac OS)
2020-09-21 09:33:23 +01:00
c48909590b MPI asynch call removal 2020-09-17 20:47:32 +01:00
446ef40570 HIP IPC 2020-09-17 20:31:46 +01:00
81441e98f4 HIP runs sensible 2020-09-16 03:35:03 +01:00
ecd3f890f5 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-09-16 02:30:14 +01:00
1c881ce23c HIP does not like half2 visible members x and y so must define own Half2 2020-09-16 02:28:33 +01:00
dacbbdd051 Hip Happy Birthday 2020-09-16 00:37:02 +01:00
2859955a03 HIP requires "inline" 2020-09-16 00:36:13 +01:00
cc220abd1d inline for HIP 2020-09-16 00:35:38 +01:00
d1c0c0197e HipCC requires inline on definition 2020-09-16 00:35:06 +01:00
fd9424ef27 innlines required to make HIP happy 2020-09-16 00:34:32 +01:00
a5c35c4024 Make HIP / Vega happy 2020-09-16 00:33:53 +01:00
e03b64dc06 HIP default flaags to work on ROCM 2020-09-16 00:33:09 +01:00
4677c40195 HIP improvements 2020-09-16 00:32:27 +01:00
288c615782 Hip improvements 2020-09-16 00:31:50 +01:00
48e81cf6f8 Hip Pragmas 2020-09-16 00:31:03 +01:00
5cffa05c7e remove slab allocator file 2020-09-13 14:06:25 -04:00
d50a2164d7 remove slab allocator 2020-09-13 14:06:06 -04:00
32ff766dbd fix evict scheme, slab alloc 2020-09-13 14:02:53 -04:00
01652d8cfe SlabAllocator 2020-09-13 05:56:02 -04:00
4d2dc7ba03 Enable even-odd for CoarsenedMatrix 2020-09-11 20:32:02 +02:00
51d1beb1f3 Merge pull request #15 from paboyle/develop
Sync with upstream
2020-09-07 14:20:33 +02:00
65b724bb5f 2 level hddcr 2020-09-03 21:46:43 -04:00
6dbd117aa5 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-09-03 20:30:49 -04:00
198b29f618 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-09-03 20:29:54 -04:00
a8309638d4 UVM check in MPI calls 2020-09-03 20:29:26 -04:00
f98a4e880e Merge pull request #310 from kostrzewa/accelerator_vector_stream_op_no_backspace
do not use backspace in AcceleratorVector (Coordinate) output stream operator
2020-09-03 20:24:59 -04:00
8244caff25 Remove the asynchronous non-Stencil calls. 2020-09-03 18:52:55 -04:00
bcd7895362 Include cuda.h 2020-09-03 15:49:13 -04:00
85b1c5df39 A never hit case that is not 100% confident is asserted for safety 2020-09-03 15:48:16 -04:00
b4255140d6 Stale data member eliminated 2020-09-03 15:47:46 -04:00
0c3095e173 Comms buffers to device memory 2020-09-03 15:45:35 -04:00
d3ce60713d UVM, Device and Lattice/aligned allocators 2020-09-03 15:44:13 -04:00
eac1f08b7b Close expressions passed as an argument 2020-09-01 15:30:33 -04:00
1654c4f3c0 Closure improved 2020-09-01 15:29:45 -04:00
8807d998bc closure improved 2020-09-01 15:29:11 -04:00
5791021dcd Speed up Cshift more with coalesced 2020-09-01 15:28:15 -04:00
c273fb051c Peek poke laattice 2020-09-01 15:27:59 -04:00
c545530170 little worry large Nbasis doesnt compile GPU 2020-09-01 00:14:33 -04:00
d982a5b6d5 Fix coaarsened 2020-09-01 00:14:04 -04:00
15ca8637f3 No norms in HermOp 2020-09-01 00:13:32 -04:00
cbc995b74c Made better interface 2020-09-01 00:12:54 -04:00
8b74174d74 Eigen tensor serialisatiino happy undeer GPU. Regret agreeing to let us couple Eigen types to Grid IO 2020-09-01 00:03:26 -04:00
e21fef17df real and imag part not in ET 2020-08-31 23:56:26 -04:00
3d27708f07 Basic where test 2020-08-31 23:55:49 -04:00
b918744184 Prettificatoin 2020-08-31 23:54:46 -04:00
7d14a3c086 Where working 2020-08-31 23:53:46 -04:00
e14a84317d GPU math unary calls 2020-08-31 23:50:49 -04:00
6c31b99f1f I knew coupling Eigen Tensor to Grid serialisation was a bad iddea.
Now the complex is different on GPU creates probblems
2020-08-31 23:49:19 -04:00
9522dcd611 Remove dead commented ouot coode 2020-08-31 23:40:29 -04:00
ed469898dc coalesced ET expressions 2020-08-31 23:38:40 -04:00
1eee94a809 Sorting real/im in read coalesced GPU ET 2020-08-31 23:36:49 -04:00
54523369a3 do not use backspace in Coordinate output stream operator 2020-08-31 19:39:36 +02:00
a98c91c2a5 Merge pull request #309 from kostrzewa/format_benchmark_wilson_sweep
Format benchmark wilson sweep
2020-08-31 12:43:46 -04:00
a9b92867a8 use tabulator 2020-08-31 18:41:17 +02:00
65920faeba correct formatting of Benchmark_wilson_sweep output 2020-08-31 18:39:27 +02:00
249e2db87d Merge pull request #14 from DanielRichtmann/feature/gpt-coarsenedmatrix
Expose more functions in CMat
2020-08-27 15:18:56 +02:00
cf3535d16e Expose more functions in CMat 2020-08-27 14:06:48 +02:00
d61ee817f4 Merge pull request #13 from DanielRichtmann/feature/gpt-coarsenedmatrix
Changes needed for GPT MG
2020-08-27 12:11:06 +02:00
3448b7387c Almost there to coalesced ET 2020-08-26 17:04:49 -04:00
47b89d2739 Pragma protection improvementt 2020-08-26 17:04:27 -04:00
2a75516330 state MPI/SLURM message only on world_rank zero 2020-08-26 12:34:17 -04:00
b2087f14c4 Fix CoarsenedMatrix regarding illegal memory accesses
Need a reference to geom since the lambda copies the this pointer which points to host memory, see
- https://docs.nvidia.com/cuda/cuda-c-programming-guide/#star-this-capture
- https://devblogs.nvidia.com/new-compiler-features-cuda-8/
2020-08-24 17:46:47 +02:00
dd1ba266b2 Fix mapping between dir + disp and point in CMat 2020-08-24 17:46:46 +02:00
1292d59563 Add a typedef + broaden interface of CMat 2020-08-24 17:46:45 +02:00
9877ed9bf8 Merge pull request #12 from paboyle/develop
Sync
2020-08-22 16:35:35 +02:00
f0dc0f3621 fix compile issue on Qpace3 2020-08-22 13:57:33 +02:00
1efe30d6cc SLurm stop nodes using same GPU 2020-08-21 02:02:53 +02:00
0b787e9fe0 Avoid namespaec collision to make gcc happy 2020-08-20 22:23:29 +02:00
37ec4b241c Default thread count sensible 2020-08-20 22:12:31 +02:00
63b0a19f37 Merge pull request #11 from paboyle/develop
Sync
2020-08-20 20:53:39 +02:00
90ea7dfa99 Accelerator loops for device resident comms buf 2020-08-19 22:40:44 +02:00
f866d7c33e Merge pull request #307 from lehner/feature/gpt
Merged Nils's A64FX and minor fixes (MemoryManager::InitMessage, Tensor_index zeroit, ...)
2020-08-18 23:27:21 -04:00
542bdef198 cleanup comments 2020-08-14 18:39:44 +02:00
06007db3d9 true shm_none implementation with GPUs that disables the use of device shared memory for the stencils 2020-08-14 18:37:00 +02:00
12e6059a70 Merge branch 'feature/gpt' of https://github.com/lehner/Grid into feature/gpt 2020-08-13 16:16:52 +02:00
dbaa24ebf6 further GPU memory access fixes (with this GPT passes all single-rank tests on non-summit GPUs) 2020-08-13 16:14:15 +02:00
3276aa67dc Update 2020-08-12 14:15:53 -04:00
3b30b9f0c0 Merge branch 'feature/gpt' of https://github.com/lehner/Grid into feature/gpt 2020-08-06 16:59:17 +02:00
69db4816f7 fix variable capture in Scatter_plane_merge on accelerators 2020-08-06 16:57:16 +02:00
3abe09025a when using SHM_NONE allow multiple ranks per node but without using shared memory 2020-08-06 14:42:38 +02:00
e33878e0de Trigger re-run of CI 2020-08-06 11:50:24 +02:00
27b4fbf3f0 assert for forbidden code path and fix check for faster CPU codepath in basisRotate 2020-08-03 07:57:33 -04:00
968a90633a Zero -> zeroit in Tensor_index 2020-07-31 02:07:17 -04:00
6365a89ba3 create separate InitMessage for MemoryManager that can be called after communicator setup 2020-07-30 07:25:05 -04:00
ddbb008694 Merge pull request #10 from lehner/feature/gpt-sycl
Feature/gpt sycl
2020-07-30 13:12:09 +02:00
7997e0a449 Merge branch 'feature/gpt' into feature/gpt-sycl 2020-07-30 13:11:31 +02:00
197612bc7a fast cpu basisRotate and other small cleanups 2020-07-30 07:08:54 -04:00
0e88bf4bff remove Nils's default pragma 2020-07-29 10:24:35 -04:00
3e64d78469 include versions.h again and add back asserts in Test_simd 2020-07-29 10:18:05 -04:00
2004611def Merge pull request #9 from nmeyer-ur/feature/a64fx-2
Feature/a64fx 2
2020-07-29 14:54:20 +02:00
a2868c96a4 Merge pull request #8 from paboyle/develop
Doc recompile
2020-07-29 14:10:07 +02:00
7cf7f11e1a Doc recompile 2020-07-22 14:44:11 -04:00
ea7f8fda5e fix typo 2020-07-22 09:34:05 +02:00
906b78811b exit in Init when using --comms-overlap 2020-07-22 08:57:01 +02:00
97703b181b Merge pull request #7 from paboyle/develop
Merge current develop
2020-07-12 16:24:53 +02:00
d9474c6cb6 compiler-independent build using --enable-simd=A64FX 2020-07-09 10:07:02 +02:00
bbd145382b enable --enable-simd=A64FX in configure 2020-07-08 12:43:51 +02:00
1b08cb7300 Merge branch 'develop' into feature/a64fx-2 2020-07-08 08:18:18 +02:00
337d9dc043 move barrier in Benchmark_wilson 2020-07-08 08:13:40 +02:00
8726e94ea7 merge upstream develop 2020-07-07 20:26:47 +02:00
67db4993c2 reset head, update SVE readme 2020-07-07 19:54:52 +02:00
f1f655d92b Merge pull request #304 from Heinrich-BR/develop
ScalarImpl.h updates
2020-07-06 10:16:03 +01:00
43334e88c3 Tiny change in a comment for clarity 2020-07-04 16:11:16 +01:00
4f1e66b044 Fixed HMC SU(N) integrator which was causing fields to leave Lie Algebra manifold for N>2 2020-07-04 03:53:06 +01:00
fd3c8b0e85 correct build instructions qp4 2020-07-01 09:00:38 +02:00
1635c263ee disable TOFU by default 2020-06-30 19:27:08 +02:00
64fe5b21b4 Merge pull request #298 from rrhodgson/feature/baryon
Update baryon 2pt and add 3pt function
2020-06-29 18:45:00 +01:00
ee9889821d Runs through to coarse space solve 2020-06-29 12:59:52 -04:00
eb470aa6dc Update to baryon and added comments/fix whitespace 2020-06-29 09:43:01 +01:00
77af9a3ddc Baryon revert sign 2020-06-26 10:08:42 +01:00
102089798c BaryonUtils: update to autoView 2020-06-25 16:41:58 +01:00
39cea8b5a7 Merge branch 'develop' into feature/baryon 2020-06-25 16:24:07 +01:00
a65f66d2db Merge branch 'feature/baryon3pt' into feature/baryon 2020-06-25 16:20:59 +01:00
936c5ecf69 Reduction GPU no compile fix 2020-06-24 17:28:31 -04:00
22cfbdbbb3 Boost precision in inner products in single 2020-06-24 12:52:31 -04:00
093d1ee21b Force initial values 2020-06-24 08:54:49 -04:00
d6ba2581ce Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-06-24 08:25:08 -04:00
577c064184 Memory manager initialise earlier 2020-06-24 08:24:38 -04:00
2ff1fa6fad UVM used shared for CPU alloccations andd ddont migrate 2020-06-23 22:14:56 -04:00
70be1bd8be Adding code under development 2020-06-23 10:24:21 -04:00
4ef50ba31f Baryon speedup 2020-06-23 11:44:20 +01:00
3e97a26f90 BaryonGamm3pt threads -> accelerator 2020-06-23 11:35:32 +01:00
599f28f6ef Baryon bug fixes 2020-06-23 11:10:26 +01:00
c48da35921 Memory Vector UVM and Lattice alignedAllocator separate 2020-06-22 20:21:53 -04:00
6c5fa8dcd8 Aligned allocate on CPU put through this interface 2020-06-20 14:34:29 -04:00
0d2f913a1a String.h for linux 2020-06-20 09:37:31 -04:00
5b117865b2 Merge pull request #6 from paboyle/sycl
Sycl
2020-06-20 09:44:44 +02:00
1a74816c25 Hopeefully fixed 2020-06-19 17:50:52 -04:00
73de335256 Merge branch 'develop' into sycl 2020-06-19 17:44:16 -04:00
228fd450ce Typo fix (excusee - my keyboard is starting to break) 2020-06-19 17:36:05 -04:00
b949cf6b12 PeekLocal needs a view to keep thread safe.
ALLOCATION_CACHEE reenable
2020-06-19 17:13:27 -04:00
11bc1aeadc TThread count defaultt to fastest 2020-06-19 14:30:35 -04:00
66005929af Set up the cache size on all ranks 2020-06-19 12:50:54 -04:00
05bbc49a99 Edge case in GetShmDim check 2020-06-19 12:01:23 -04:00
ff7c847735 Merge branch 'sycl' of https://github.com/paboyle/Grid into sycl 2020-06-19 01:22:16 -04:00
1aa988b2af Comms overlap fix UVM case 2020-06-19 01:21:14 -04:00
edf17708a8 Range improvement 2020-06-18 22:41:06 -04:00
81a8209749 ConvertType for blockInnerProduct 2020-06-18 11:53:21 -04:00
a87e45ba25 SVE readme update 2020-06-18 11:23:08 +02:00
465856331a switch back to serialized; wrong results on single too 2020-06-15 15:39:39 +02:00
cc958aa9ed switch back to standard MPI_init due to wrong results in Benchmark_wilson using comms-overlap 2020-06-15 14:21:38 +02:00
f46f029dbb Merge pull request #292 from lehner/feature/gpt-sycl
Catch edge case in SharedMemoryMPI::GetShmDims; Change default units …
2020-06-14 13:43:27 -04:00
3dccd7aa2c Catch edge case in SharedMemoryMPI::GetShmDims; Change default units to consistent MB in init args; Want last element not past last element in MemoryManagerCache.cc 2020-06-14 13:26:01 -04:00
a25e4b3d0c pred 32/64 for float/double instead of 8 in VLA patch 2020-06-13 14:44:37 +02:00
d1210ca12a switch to double/float instead of float64_t/float32_t in VLA patch 2020-06-13 13:59:32 +02:00
36ea0e222a type traits for ComplexF/D in VLA patch; cosmetics in VLS intrinsics 2020-06-13 13:42:35 +02:00
65e6e7da6f Merge pull request #291 from lehner/feature/gpt-sycl
Feature/gpt sycl
2020-06-12 20:42:32 -04:00
b5e87e8d97 summit compile fixes 2020-06-12 18:16:12 -04:00
5f5807d60a cleanup 2020-06-12 14:48:23 -04:00
92281ec22d add 3 op Mult for VLA 2020-06-12 18:49:05 +02:00
87266ce099 comment out fcmla in vector types: need also MultAddReal 2020-06-12 18:37:19 +02:00
2a23f133e8 reenable fcmla for VLA 2020-06-12 17:30:38 +02:00
8dbf790f62 correct tbl2 for sp 2020-06-12 17:12:34 +02:00
2402b4940e vec_imm in float 2020-06-12 15:17:38 +02:00
2111052fbe apply VLA patch for memcpy reduction suggested by Arm, CAS-162542-D6W7Z7 2020-06-12 14:49:19 +02:00
7974acff54 merged sycl to feature-gpt 2020-06-12 06:49:38 -04:00
f0d17d2b49 Added Baryon3pt code 2020-06-12 11:35:52 +01:00
244c003a1b Updated Baryon code 2020-06-12 11:00:25 +01:00
0174f5f742 look for librt when using shm=shmopen 2020-06-11 16:50:43 +01:00
32b2b59be4 Offload 2020-06-10 20:36:26 -04:00
86bb0cc24b Keep on GPU 2020-06-10 20:00:00 -04:00
84c19587e7 Offload 2020-06-10 19:59:31 -04:00
237ce92540 Offload loops 2020-06-10 19:59:11 -04:00
a7ffc61e82 acceleratorSIMTlane() 2020-06-10 19:58:33 -04:00
fd97f64612 Merge branch 'sycl' of https://github.com/paboyle/Grid into sycl 2020-06-10 12:58:13 -04:00
8720aecb80 Offload more loops 2020-06-10 12:57:55 -04:00
cdf0a04fc5 Merge branch 'develop' into sycl 2020-06-09 04:00:12 -04:00
616d3dd737 CCommpile updates 2020-06-08 18:57:41 -04:00
8b066baca8 Implement transient mechanism 2020-06-08 18:28:53 -04:00
e97f3688db Fix the HMC issue - kernel was launchnig asynchronously 2020-06-08 17:01:15 -04:00
433766ac62 revert Add/SubTimesI and prefetching in stencil
This reverts commit 9b2699226c.
2020-06-08 12:02:53 +02:00
93a37c8f68 test prefetch to L2 in stencil 2020-06-08 09:39:50 +02:00
89a1e78390 Merge branch 'sycl' of https://github.com/paboyle/Grid into sycl 2020-06-05 23:20:37 -04:00
ffbb3fc02c Merge pull request #287 from felixerben/baryon-cleaner
slightly cleaner baryon 2pt code
2020-06-05 22:54:52 -04:00
5a73ef3647 Minor tweak to compile 2020-06-05 21:50:15 -04:00
87e5d2f4b7 Merge branch 'sycl' of https://www.github.com/paboyle/Grid into sycl 2020-06-05 17:32:21 -07:00
d720f10758 Liink error fix 2020-06-05 17:29:20 -07:00
14fcd0912a Merge branch 'sycl' of https://github.com/paboyle/Grid into sycl 2020-06-05 19:14:17 -04:00
3111c0bd4f Single precisiono hardwire 2020-06-05 19:13:27 -04:00
e03064490e Merge branch 'sycl' of https://github.com/paboyle/Grid into sycl 2020-06-05 18:53:39 -04:00
1a4c8c3387 Global edit with change to View usage. autoView() creates a wrapper object that closes the view when scope closes. 2020-06-05 18:52:35 -04:00
2b1e259441 Decode of SYCL devices fix 2020-06-04 17:16:55 -07:00
f39c2a240b Priintinig and device memory size detection 2020-06-04 14:58:03 -04:00
0d95805cde Print improvement 2020-06-03 22:50:32 -04:00
f67830587f Accelerator loop use 2020-06-03 22:50:09 -04:00
6bf7f839ff Better printing and logging 2020-06-03 09:28:57 -04:00
e3147881a9 Cache scheme 2020-06-03 09:23:48 -04:00
9872c76825 introduce AddTimesI and SubTimesI; slight benefit in operators, but < 1%; breaks all other impls 2020-06-03 15:20:13 +02:00
fb559614ad Initialise meemory manager 2020-06-03 09:12:47 -04:00
e93e12b6a4 More verbose SYCL setup 2020-06-03 09:12:11 -04:00
0c3112cd94 Use view mechanism 2020-06-03 09:11:51 -04:00
8cfd5d2639 Need lattice view 2020-06-03 09:11:28 -04:00
1c9f20b15e Views must be closed 2020-06-03 09:10:29 -04:00
32237895bd Reorg memory manager for O(1) hash table 2020-06-03 09:09:52 -04:00
5ee3ea2144 round-up after testing of prefetches in stencil close 2020-06-03 11:58:20 +02:00
c5c2dbc0ce Optional CUDA info 2020-06-02 14:21:49 -04:00
9fcb47ee63 Explicit error message instead of infinite loop in GlobalSharedMemory::GetShmDims 2020-06-02 07:44:38 -04:00
5050833b42 revert changes due to performance penalty in Wilson using MPI 2020-06-02 13:08:57 +02:00
7bee4ebb54 correct predication for svcadd 2020-06-02 10:51:39 +02:00
71cf9851e7 correct type for vecd in TimesI and TimesMinusI 2020-06-02 10:44:15 +02:00
b4735c9904 correct zero in svcadd 2020-06-02 10:38:05 +02:00
9b2699226c use fcadd in TimesI and TimesMinusI instead of tbl and neg 2020-06-02 10:32:44 +02:00
5f52804907 update calculation of data 2020-05-30 10:55:17 +02:00
936071773e correct throughput in wilson and dwf 2020-05-29 22:15:59 +02:00
1732f9319e more mods; counters seem to work correctly 2020-05-29 18:44:00 +02:00
91c81cab30 some corrections; compiles on my laptop; untested 2020-05-29 18:19:22 +02:00
38164f8480 include counters in WilsonFermionImplementation.h 2020-05-29 17:59:26 +02:00
f013979791 add counter support in WilsonFermion.h 2020-05-29 17:13:59 +02:00
e947b563ea add space in stencil output 2020-05-29 17:11:17 +02:00
5cb3530c34 enable counters in Benchmark_wilson 2020-05-29 15:44:52 +02:00
250008372f update SVE readme 2020-05-29 15:44:25 +02:00
1d252d0922 Accelerator inline 2020-05-28 11:45:25 -04:00
006cc8a8f1 Staggereed move to accelerator 2020-05-28 08:33:06 -04:00
4fedd8d29f switch to MPI_THREAD_SERIALIZED instead of SINGLE 2020-05-27 14:08:34 +02:00
cf2938688a Sycl unhappy fix 2020-05-25 08:36:53 -07:00
ee63721bad int unhappiness sycl fix 2020-05-25 08:36:24 -07:00
22c5168d70 Sycl happier 2020-05-25 08:35:56 -07:00
949ac3cd24 Must avoid non-trivial copy constructors 2020-05-25 08:35:28 -07:00
7bc0166c1c SYCLL maknig happy - must avoid non ttrivial copy constructors 2020-05-25 08:34:19 -07:00
cb0d1b3399 hopefullly fix buildd fail 2020-05-24 21:27:00 -04:00
d1f1ccc705 HIP changes 2020-05-24 21:18:49 -04:00
c7519a237a Assertions fail on HIP foor unknown reasons - dedbugging 2020-05-24 14:02:47 -04:00
32be2b13d3 Updates for HiP 2020-05-24 14:00:55 -04:00
92b342a477 Hip reduction too 2020-05-24 13:50:28 -04:00
556da86ac3 HIP fp16 2020-05-24 13:41:58 -04:00
8285e41574 View location / access mode 2020-05-21 16:14:41 -04:00
f999408e92 View locatoin and access mode 2020-05-21 16:14:20 -04:00
a7abda89e2 View location & access mode 2020-05-21 16:13:59 -04:00
7860a50f70 Make view specify where and drive data motion - first cut.
This is a compile tiime option --enable-unified=yes/no
2020-05-21 16:13:16 -04:00
6ddcef1bca fix build error enabling fcmla/mac in vector types for VLA 2020-05-21 21:21:03 +02:00
8c5a5fdfce disable fcmla in vector type building for VLA 2020-05-21 19:41:42 +02:00
046b1cbbc0 enable fcmla in tensor arithmetics; fixed-size works, VLA does not compile 2020-05-21 19:39:07 +02:00
a65ce237c1 clean up; Exch1 VLA sp+dp integrate, tested, working 2020-05-21 09:48:06 +02:00
cd27f1005d clean up; Exch1 sp integrate, tested, working 2020-05-21 08:45:43 +02:00
f8c0a59221 clean up; Exch1 dp integrate, tested, working 2020-05-21 02:48:14 +02:00
832485699f save some cycles in HtoD and DtoH by direct instead of multi-pass conversion 2020-05-20 23:04:35 +02:00
81484a4760 symmetrize Mult and MultAddComplex 2020-05-20 22:36:45 +02:00
9a86059761 symmetrize VLA and fixed size build messages 2020-05-20 20:05:42 +02:00
b780b7b7a0 guard prevents multiple TOFU messages 2020-05-20 19:20:59 +02:00
9e085bd04e guard prevents multiple A64FX build messages 2020-05-20 19:16:30 +02:00
6c6812a5ca GB/s output 2020-05-20 12:26:57 +01:00
8358ee38c4 pull develop 2020-05-19 08:56:18 -04:00
1f154fe652 some cleanup in BaryonUtils 2020-05-19 13:48:56 +01:00
d708c0258d some cleanup in BaryonUtils 2020-05-19 13:48:00 +01:00
a7635fd5ba summit mem 2020-05-18 17:52:26 -04:00
6b6bf537d3 comment out mac in vector types 2020-05-18 20:36:16 +02:00
323a651c71 correct typo 2020-05-18 19:58:27 +02:00
9f212679f1 support fcmla in vector_types, untested 2020-05-18 19:55:18 +02:00
032f7dde1a update SVE readme, asm generator 2020-05-18 19:10:36 +02:00
ebb60330c9 Automatic data motion options beginning 2020-05-17 16:34:25 -04:00
5aa60be17d SerialisableClassName method for serialisable enum, and boolean to test if a serialisable object is an enum 2020-05-15 20:00:34 +01:00
50b1db1e8b implemented correct _m form (using 3 operands instead of 2) 2020-05-15 10:01:05 +02:00
015d8bb38a introduced assertions in Benchmark_wilson, removed data output from Benchmark_dwf 2020-05-15 09:15:50 +02:00
10a34312dc some fixed-size code clean up 2020-05-14 23:20:16 +02:00
db8c0e7584 replaced _x form with _m form when using even/odd predication 2020-05-14 23:17:35 +02:00
32fbdf4fb1 Merge pull request #5 from paboyle/develop
Sync upstream
2020-05-13 09:02:56 +02:00
a9847aa866 Dependence fix 2020-05-12 20:03:37 -04:00
2e652431e5 No compile on summiit fix 2020-05-12 18:56:47 -04:00
8b5b55b682 Make tests all compile ccurrent Grid, mostly MdagM removal of norms fixes but a few minor
issues fiixed too
2020-05-12 17:57:24 -04:00
0e3c49f687 TransposeIndex was broken by Christoph 2020-05-12 17:57:01 -04:00
cb7ee37562 Close expressions in arg to cshift 2020-05-12 17:56:40 -04:00
82f71643a4 Remove the norm in MdagM 2020-05-12 17:55:53 -04:00
d15ccad8a7 switched to vec* in Reduce 2020-05-12 20:41:14 +02:00
0009b5cee8 updated SVE_README 2020-05-12 19:02:33 +02:00
20d1941a45 enabled asm kernels for fixed-size A64FXFIXEDSIZE 2020-05-12 19:01:12 +02:00
d24d8e8398 Use X-direction as more bits meaningful on CUDA.
2^31-1 shoulddd always bee enough for SIMD and thread reduced local volume

e.g. 32*2^31 = 2^36 = (2^9)^4 or 512^4 ias big enough.

Where 32 is gpu_threads * Nsimd = 8*4
2020-05-12 10:35:49 -04:00
162e4bb567 no automatic prefetching for now 2020-05-12 07:01:23 -04:00
07c0c02f8c Speed up Cshift 2020-05-11 17:02:01 -04:00
8c31c065b5 Keep the Vector fixed to protect it from realloc 2020-05-11 17:00:30 -04:00
b7c76ede29 Removed some assertions in Test_simd and removed exit() in Reduce 2020-05-11 22:43:00 +02:00
05edf803bd corrected typo 2020-05-12 03:59:59 +09:00
b1c86900b2 Merge pull request #4 from paboyle/develop
merge
2020-05-11 20:59:29 +02:00
78b8e40f83 switched to gcc's internal data types 2020-05-11 18:11:23 +02:00
fc2e9850d3 temporarily enable TOFU by default when using A64FX or A64FXFIXEDSIZE 2020-05-11 13:25:02 +02:00
ffaaed679e MPI_THREAD_SINGLE hack for Fugaku, enabled by -DTOFU 2020-05-11 13:21:39 +02:00
bbbee5660d First compiile on HiP 2020-05-10 05:28:09 -04:00
ea08f193e7 Allocator cache spliit into large/small pools 2020-05-10 05:24:26 -04:00
2bb2c68e15 Separate pools for small and large allocations cache 2020-05-09 22:57:21 -04:00
efe5bc6a3c Split allocator cache into two pools of different sizes 2020-05-09 22:27:56 -04:00
b2fd8b993a fixed-size clean up 2020-05-09 22:53:42 +02:00
291ee8c3d0 updated fixed-size implementation; only Exch1 and prefetches missing 2020-05-09 22:18:02 +02:00
e1a5b3ea49 unions for tables eliminate explicit loads, gcc does not complain 2020-05-09 21:21:57 +02:00
55a55660cb reverted changes 2020-05-09 12:48:42 +02:00
384da487bd Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-05-08 18:55:11 -04:00
ee1de82a53 Working ITT benchmark again 2020-05-08 18:54:50 -04:00
2b576fc185 Comment deadd codde remove 2020-05-08 18:54:29 -04:00
52081acfa5 NVCC compile fixes 2020-05-08 13:14:12 -04:00
b01b7f761a Merge pull request #283 from DanielRichtmann/feature/minor-fixes
Some small fixes
2020-05-08 10:52:03 -04:00
c83471bfd0 Fix missing checkerboards for adj und conjugate 2020-05-08 16:44:03 +02:00
ab0c5d77fb Correct NonHermitianSchurOperatorBase 2020-05-08 16:44:02 +02:00
779e3c7442 Const-correctness for retrieval routines of GridStopWatch 2020-05-08 16:43:52 +02:00
0c570824f2 Add missing declaration of GridCmdOptionInt 2020-05-08 16:43:51 +02:00
f8b8e00090 Systematise the accelerator primitives and locate to Grid/threads/Accelerator.h / Accelerator.cc
Aim to reduce the amount of cuda and other code variations floating around all over the place.

Will move GpuInit iinto Accelerator.cc from Init.cc
Need to worry about SharedMemoryMPI.cc and the Peer2Peer windows
2020-05-08 06:23:55 -07:00
0dd1bdfa94 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-05-08 09:21:43 -04:00
1d65e2f62c Slightly faster Chebyshev; ifdef'ed out the fastest until tested numerics
Lifteed from HDCR setup
2020-05-08 09:20:54 -04:00
93920c4811 Remove verbose 2020-05-08 09:19:54 -04:00
6859a3e1d4 Schur operator 2020-05-08 09:19:12 -04:00
21ca182c36 Comments remove 2020-05-08 09:18:24 -04:00
ceb8b374da API change v3 2020-05-08 15:04:44 +02:00
4bc2ad2894 API change v2 2020-05-08 15:00:25 +02:00
798af3e68f retry changing StoD API 2020-05-08 14:34:59 +02:00
b0ef2367f3 testing alternate call to PrecisionChange 2020-05-08 14:22:44 +02:00
71a7350a85 changed 2nd argument in Reduce to native vector type 2020-05-08 12:26:51 +02:00
6f79369955 trying to get rid of macro definition error 2020-05-08 12:19:24 +02:00
f9cb6b979f corrected more typos 2020-05-08 12:11:01 +02:00
ed4d9d17f8 corrected type 2020-05-08 12:09:22 +02:00
fbed02690d some changes in breaking out A64FX: use -DA64FXFIXEDSIZE for fixed size, but also define GEN 2020-05-08 12:05:31 +02:00
39f3ae5b1d corrected more types 2020-05-08 11:07:14 +02:00
e64bec8c8e pulled SVE typedefs out of Optimization 2020-05-08 11:04:21 +02:00
0893b4e552 fixed typos in PrecisionChange 2020-05-08 10:59:07 +02:00
92f0f29670 fixed double overloading vecf in Div, corrected typos 2020-05-08 10:57:23 +02:00
48a340a9d1 GEN seems to defined by default -> some fixes applied 2020-05-08 10:47:49 +02:00
f45621109b placed typedefs in Optimization 2020-05-08 10:41:52 +02:00
32d1a0bbea added even more debug output 2020-05-08 10:39:26 +02:00
267cce66a1 added more debug output 2020-05-08 10:29:28 +02:00
3417147b11 added real fma, corrected typos in tbls; integrated, must supply A64FXGCC with GEN in configure 2020-05-08 10:20:19 +02:00
b338719bc8 first transition to fixed-size done, excl. Exch; next step: integration 2020-05-07 22:33:28 +02:00
2b81cbe2c2 first attempt to introduce tables using fixed-size; still incomplete 2020-05-07 22:01:19 +02:00
acff9d6ed2 transition to fixed size data types almost done; still incomplete 2020-05-07 21:24:07 +02:00
053b4dd495 Merge pull request #282 from felixerben/baryon-reversal
Baryon reversal
2020-05-07 18:09:17 +01:00
a306a49788 first mods for fixed size; still incomplete 2020-05-07 19:07:49 +02:00
42bb5f0721 asserrtion 2020-05-07 18:06:12 +01:00
253bcc3426 back to old version 2020-05-07 18:03:17 +01:00
a887206413 Merge pull request #281 from felixerben/feature/baryonSpeedup
Feature/baryon speedup
2020-05-07 13:41:29 +01:00
591ebb6213 Merge branch 'develop' of github.com:paboyle/Grid into feature/baryonSpeedup 2020-05-07 11:13:21 +01:00
56e2f7d088 deleted test routines. cleaned up fast version. assert Ns=4,Nc=3. 2020-05-07 10:03:45 +01:00
7ef03c5368 updated SVE readme 2020-05-06 16:30:37 +02:00
525418abfb Merge pull request #273 from lehner/feature/gpt
Feature/gpt
2020-05-06 10:10:51 -04:00
5f780806c2 Merge pull request #279 from paboyle/bugfix/nvcc-config
configure fix for nvcc with extra arguments as CXX
2020-05-06 10:07:52 -04:00
3c6ffcb48c Merge branch 'develop' into feature/gpt 2020-05-06 15:03:35 +02:00
87984ece7d add Lattice_basis.h 2020-05-06 08:47:18 -04:00
e9b295f967 Synchronize blocking infrastructure with GPT 2020-05-06 08:42:28 -04:00
224cbf0453 Merge pull request #280 from mmphys/bugfix/ET_go_home
Bugfix/et go home
2020-05-05 17:56:51 -04:00
c1e57d4357 Merge branch 'develop' into bugfix/ET_go_home
* develop:
  SYCL prep - no sycl just make it compile through DPC++
  dpc++ didn't like rdtsc()
  Make compile if HAVE_LIME=0
  Lime optional
2020-05-05 22:35:04 +01:00
28a1fcaaff First compile against SYCL 2020-05-05 11:13:27 -07:00
6b64727161 disable comments 2020-05-05 05:05:36 -04:00
04863f8f38 debug new AcceleratorView 2020-05-04 16:07:03 -04:00
04927d2e40 SYCL prep - no sycl just make it compile through DPC++ 2020-05-04 10:28:29 -07:00
7caed4edd9 dpc++ didn't like rdtsc() 2020-05-04 10:27:05 -07:00
59c51d2c35 Make compile if HAVE_LIME=0 2020-05-04 10:26:20 -07:00
ff53b231c8 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-05-04 10:25:10 -07:00
fc19cf905b Lime optional 2020-05-04 10:24:48 -07:00
2a1387e992 rankInnerProduct 2020-05-03 17:27:11 -04:00
9bfa51bffb cleanup comment 2020-05-03 09:12:52 -04:00
38532753f4 interface cleanup 2020-05-03 08:58:32 -04:00
949be9605c fix pragmas 2020-05-02 16:20:03 -04:00
63cf201ee7 Add AdviseInfrequentUse 2020-05-02 11:38:42 -04:00
c8af498a2a BinaryIO fix for alternative little-endian format name (used in 96I ensemble) 2020-05-01 03:45:50 -04:00
ddb192bac7 re-work double precision promotion for summit 2020-04-30 16:09:57 -04:00
7666300a6f Merge branch 'develop' into bugfix/ET_go_home
* develop:
  Basis rotate stack passig to GPU reduction
  Clean up warning
2020-04-30 20:10:32 +01:00
4a4b9e305d Fix: strToVec enters infinite loop and exhausts memory if operator>> fails before the end of string, e.g. if parsing "0_0_0" for momentum instead of "0 0 0". 2020-04-30 19:40:04 +01:00
9b2d2d0fc3 Basis rotate stack passig to GPU reduction 2020-04-30 12:31:07 -04:00
5011753f4f Clean up warning 2020-04-30 10:23:48 -04:00
dbaeefaeef All Eigen::TensorMap objects are fixed (i.e. cannot be dynamically resized) 2020-04-30 15:02:51 +01:00
dee96cbf82 Added workaround in configure to still catch Cuda compiler when nvcc with extra arguments (eg -ccbin) is used as CXX 2020-04-29 10:37:11 -04:00
dd3ebc2ce4 Slow compile on NVCC switch off conserved current 2020-04-29 08:43:12 -04:00
103e7ae2f0 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-04-29 03:05:36 -04:00
29ae5615c0 Seqeuential fix 2020-04-29 03:05:15 -04:00
6240e02619 added assertion to avoid potential infinite loop 2020-04-27 18:50:53 +01:00
f4033ad8cb baryon speedup by a factor 2 2020-04-27 17:46:14 +01:00
5abec5b8a9 SVE_readme update, update Grid_vector_types.h 2020-04-25 13:48:26 +02:00
499edc0636 updated SVE_README.txt; defined ARMCLANGCOMPAT macro 2020-04-25 13:41:24 +02:00
d990e61be3 armclang 20.1 settings in SVE readme 2020-04-25 12:11:43 +02:00
3edb2dc2da removed -static from gcc CXXFLAGS 2020-04-24 13:04:34 +02:00
f1fe444d4f blocked precision promotion infrastructure upgrade 2020-04-24 06:27:20 -04:00
345721220e resolved merge conflict 2020-04-24 10:14:21 +02:00
6db68d6ecb added SVE configure for armclang and gcc 2020-04-24 10:10:47 +02:00
dae820aa96 Merge pull request #277 from mmphys/bugfix/grid-config
Bugfix/grid config
2020-04-23 10:26:54 -04:00
5daf176f4a Updated to expose GRID_CXXLD in addition to CXXLD.
NB: CXXLD required as this is what drives linking behaviour.
2020-04-23 15:25:53 +01:00
e96c86ec14 Make grid-config message more specific for --cxx and --cxxld 2020-04-23 13:10:45 +01:00
09f0963d1f changes in configure.ac ; to be verified 2020-04-23 11:27:03 +02:00
6f44e3c192 reverted changes in configure.ac ; included SVE configure readme 2020-04-23 11:18:50 +02:00
c2c3cad20d Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2020-04-23 04:35:42 -04:00
edec9ee2e2 Conserved current rewrite done. Zmobius working 2020-04-23 04:34:01 -04:00
ed70cce542 Test for 5D DWF obserevables 2020-04-23 04:29:45 -04:00
4701201b5f grid-config: Expose CXXLD (for GPU build) and update help 2020-04-22 18:42:30 +01:00
5893888f87 removed default no-strict-aliasing for gcc-10.0.1 exclusively 2020-04-22 19:29:55 +02:00
39b448affb Merge remote-tracking branch 'origin/develop' into feature/a64fx-2 2020-04-22 17:34:12 +02:00
e54a8f05a9 Exchange1 with generic version for now, should use svtbl2 in final version 2020-04-20 22:45:27 +02:00
0782b76ed4 Merge pull request #274 from paboyle/feature/zmobius_paramcompute
ZMobius parameter computation
2020-04-20 14:39:29 -04:00
0896f2cead Added missing include guards in bigfloat_double.h 2020-04-20 10:30:38 -04:00
181709bba4 Merge branch 'develop' into feature/zmobius_paramcompute 2020-04-20 09:12:34 -04:00
64b72fc17f testing gcc 10.0.1: build errors in Exchange1 using -DA64FX and in Lattice_base.h building Dslash only 2020-04-19 01:25:40 +02:00
091d5c605e towards more precise blocking 2020-04-17 04:25:28 -04:00
6fdce60492 revised BodyA64FX; 990 GiB/s Wilson, 687 GiB/s DW using intrinsics (armclang 20.0) 2020-04-16 22:43:32 +02:00
90229cfb0f Merge pull request #270 from milc-qcd/feature/CGinfo
feature/CGinfo
2020-04-16 11:46:08 -04:00
0475c46ecb Merge pull request #256 from djm2131/feature/BiCGSTAB
Import BiCGSTAB solvers and tests
2020-04-16 11:45:15 -04:00
3cca10e617 Merge pull request #276 from nils-asmussen/fix/regression_nt
fix regression in tests/core/Test_qed.cc
2020-04-16 11:42:39 -04:00
327da332bb Merge branch 'develop' of https://github.com/paboyle/Grid into feature/gpt 2020-04-16 11:30:17 -04:00
852db4626a re-introduced HOTFIX cause Grid binaries give wrong results otherwise; checked in good gridverter.py 2020-04-15 18:22:19 +02:00
43dc2814dd fix regression in core/Test_qed.cc 2020-04-15 16:10:15 +01:00
6504a098cc 999 GiB/s Wilson; 694 GiB/s DW (DP) 2020-04-15 15:06:52 +02:00
79a385faca disabled armclang hotfix cause armclang 20.0 performance gets a little 2020-04-15 11:46:55 +02:00
c12a67030a 980 GiB/s Wilson; 680 GiB/s DW (DP) 2020-04-15 10:55:06 +02:00
581392f2f2 now with pf, best results so far using intrinsics+pf 2020-04-12 22:06:14 +02:00
113f277b6a enable dslash asm using -DA64FXASM, additionaly -DDSLASHINTRIN for intrinsics impl 2020-04-11 04:55:01 +02:00
f3a8d039a2 Merge branch 'feature/hdcr' into develop 2020-04-10 22:01:52 -04:00
974586bedc Dslash finally works; cleaned up; uses MOVPRFX in assembly 2020-04-10 22:26:40 +02:00
4e864e56c9 develop pull 2020-04-10 17:19:18 +01:00
014dbfa464 Compile fix with OpDirAll 2020-04-10 11:57:09 -04:00
3b0e07882f Adding another form of polynomial 2020-04-10 11:28:33 -04:00
8e81a811d0 Merge branch 'feature/hdcr' into develop 2020-04-10 11:14:49 -04:00
aa13118127 Missing conjugate already fixed in develop 2020-04-10 11:11:24 -04:00
6cdb09c884 Faster copy region 2020-04-10 11:10:52 -04:00
a65bc64f10 Accelerator peek poke 2020-04-10 11:09:59 -04:00
11dec4883c Don't throw assert 2020-04-10 11:09:11 -04:00
afa458c812 Extra solvers 2020-04-10 11:08:19 -04:00
dc50190b8f Faster GPU basis rotation
May need to later include Regensburg optimised CPU variant
2020-04-10 11:06:04 -04:00
160f78c1e4 changed debug output to variable direct 3 2020-04-10 12:23:07 +02:00
7e4e1bbbc2 changed debug output to variable direct 2 2020-04-10 12:22:04 +02:00
e699b7e9f9 changed debug output to variable direct 2020-04-10 12:18:30 +02:00
a28bc0de90 debug register address test in WilsonHand 2020-04-10 12:07:45 +02:00
14d0fe4d6c added predication in WilsonHand 2020-04-10 12:04:00 +02:00
0ad2e0815c debug output in WilsonHand 2020-04-10 11:56:29 +02:00
1c8ca05e16 Merge branch 'feature/a64fx-2' of https://github.com/nmeyer-ur/Grid into feature/a64fx-2 2020-04-09 23:32:19 +02:00
dc9c8340bb switched to DSLASHINTRIN for A64FX Dslash intrinsics 2020-04-09 23:30:23 +02:00
19eef97503 specialized A64FX Dslash kernels 2020-04-09 23:25:25 +02:00
635246ce50 corrected typo 2020-04-09 21:42:50 +02:00
5cdbb7e71e fixed A64FX Dslash; compiles, but does not specialize -> assertion 2020-04-09 21:23:39 +02:00
8123590a1b changes 2020-04-09 16:45:47 +02:00
86c9c4da8b changes 2020-04-09 16:40:06 +02:00
cd1efee866 changes 2020-04-09 16:35:13 +02:00
bd310932f7 changes 2020-04-09 16:32:31 +02:00
304762e7ac changes 2020-04-09 16:26:01 +02:00
d79ab03a6c changes 2020-04-09 16:19:25 +02:00
d5708e0eb2 more changes 2020-04-09 15:43:34 +02:00
123f6b7a61 more changes 2020-04-09 15:17:19 +02:00
2b6457dd9a added xp/xm recon accum 2020-04-09 15:13:19 +02:00
b367cbd422 defined ADD_RESULT 2020-04-09 15:08:45 +02:00
e252c1aca3 addressing 2020-04-09 15:03:12 +02:00
b140c6a4f9 addressing 2020-04-09 15:01:15 +02:00
326de36467 revised sU addressing scheme 2020-04-09 14:44:25 +02:00
9f224a1647 fixed typo in single 2020-04-09 14:30:21 +02:00
bb46ba9b5f fixed array size in single 2020-04-09 14:28:45 +02:00
dd5a22b36b revised declarations 2020-04-09 14:21:27 +02:00
1ea85b9972 Disabled build message 2020-04-09 13:47:21 +02:00
8fb63f1c25 added A64FX Wilson kernels single precision 2020-04-09 13:41:04 +02:00
77fa586f6c introduced A64FX Wilson kernels 2020-04-09 13:30:06 +02:00
96e8e44fd4 Merge pull request #2 from DanielRichtmann/feature/fused-innerproduct-norm2
Fused innerProduct + norm2 on first argument operation
2020-04-06 13:16:58 +02:00
5fc8a273e7 Fused innerProduct + norm2 on first argument operation 2020-04-06 11:52:29 +02:00
d671a63e78 Update README.md 2020-04-03 19:52:15 +01:00
15238e8d5e reduce acle works, clean up 2020-04-03 20:40:44 +02:00
b27e31957a reduce acle revised 2020-04-03 19:46:15 +02:00
46927771e3 reduce acle still needs overhaul 2020-04-03 19:30:48 +02:00
d8cea77707 define simd width in header 2020-04-03 19:22:25 +02:00
5f8a76d490 clean up, reduction in acle 2020-04-03 19:18:24 +02:00
28d49a3b60 build problem resolved 2020-04-03 16:52:48 +02:00
b4c624ece6 added A64FX support 2020-04-03 15:43:23 +02:00
2c22db841a Added momentum scaling to scalar HMC theories in order to follow UKQCD/CPS conventions 2020-04-02 17:38:47 +01:00
856d168e41 global sum over vectors of uint64_t 2020-03-29 07:56:05 -04:00
6235c7ba98 IPP path fix in configure 2020-03-27 17:23:29 +00:00
7e13724882 removing Hadrons 2020-03-27 12:03:32 +00:00
b6cbdd2aa3 Merge pull request #1 from DanielRichtmann/feature/read-openqcd
Feature/read openqcd
2020-03-26 17:39:04 +01:00
a2188ea875 remove debugging printf from WilsonKernelsImplementation 2020-03-26 09:12:36 -04:00
989af65807 Check in parallel reader for openqcd configs 2020-03-24 11:20:54 +01:00
60db3133d3 make trace,adj,transpose unary operators 2020-03-16 17:59:56 -04:00
c9b737a4e7 make trace,adj,transpose unary operators 2020-03-16 17:58:30 -04:00
037bb6ea73 Check in reader for openqcd configs
This reader is suboptimal in the sense that it opens the entire config on every MPI rank.
2020-03-16 14:28:02 +01:00
05ebc458e2 Merge pull request #260 from mmphys/feature/distil
Distillation: save eigenvalues of the Laplacian for all timeslices
2020-03-13 14:00:21 +00:00
3753508957 Making change 1) as simple as possible 2) as much like MSink/Point.hpp as possible 2020-03-12 13:47:51 +00:00
c1677fccf6 Merge branch 'develop' into feature/distil
* develop:
  bugfix ZPerambulator
  registered module supporting ZMobius action
  changed to push_back according to request
  Added Hadrons_Error in case blockSize is set too large
  bugfix in perambulator module

# Conflicts:
#	Hadrons/Modules/MDistil/Perambulator.hpp
2020-03-12 12:45:18 +00:00
35e8e31749 Merge pull request #272 from mmphys/feature/ZPeramb
bugfix ZPerambulator
2020-03-12 12:28:04 +00:00
34813e9b04 Merge branch 'develop' into feature/ZPeramb 2020-03-12 12:27:56 +00:00
373cf61abb bugfix ZPerambulator 2020-03-12 11:44:43 +00:00
4e8fbc4b49 Merge pull request #271 from mmphys/feature/ZDistil
registered module supporting ZMobius action
2020-03-12 10:54:07 +00:00
516ac1d4d5 registered module supporting ZMobius action 2020-03-12 10:52:27 +00:00
318f63eb34 Merge pull request #268 from mmphys/a2a-error-log
Added Hadrons_Error in case blockSize is set too large
2020-03-11 11:09:00 +00:00
16503d7532 Merge pull request #267 from mmphys/feature/distil-bugfix
bugfix in perambulator module
2020-03-11 11:08:23 +00:00
0fa93383b7 changed to push_back according to request 2020-03-11 09:05:01 +00:00
0a827aa7bf Added Hadrons_Error in case blockSize is set too large 2020-03-11 08:52:52 +00:00
165c68e28e Change TrueResiduals to TrueResidualShift and IterationsToComplete to IterationsToCompleteShift 2020-02-29 17:51:51 -06:00
b32b1ca642 bugfix in perambulator module 2020-02-26 12:06:45 +00:00
9479bc8486 Make IterationsToComplete and TrueResidual externally accessible 2020-02-19 17:43:57 -06:00
8a5c13d5fb Still fast moving in changes 2020-02-06 17:57:26 -05:00
bdccb0c91f Working 2 types of decomposition 2020-02-06 17:26:55 -05:00
68b45f6444 Lower left/upper right region cut paste 2020-02-06 15:50:26 -05:00
ef9b3e658a extra typedef 2020-02-06 15:47:14 -05:00
b9ca40cc44 More precise power method at start 2020-02-06 10:09:14 -05:00
2f421a5db1 Commeent fix 2020-02-06 10:08:27 -05:00
10192dfc71 Wall source momenta must be specified for spatial components only.
So we don't break existing scripts, allow momentum in time direction as well, but only if zero.
Fail early, so do the check in setup()
2020-01-31 15:02:03 +00:00
c69a3b6ef6 When saving eigenvectors, LapEvec now saves eigenvalues for every timeslice as well.
I.e. nT x nVec eigenvalues are saved in FileName.evals.conf.h5.
A new named tensor, "TimesliceEvals" can be used to simplify restoring these from disk.
NB: The changes in BaseIO add support so that Eigen tensors can be easily used in MPI operations, e.g. GlobalSum.
See LapEvec.hpp for an example of how this is done.
2020-01-29 21:20:20 +00:00
852fc1b001 True Hierachical multigrid for DWF 2020-01-27 13:45:10 -05:00
2b5de5bba5 MdagM operator without norm option 2020-01-27 13:44:30 -05:00
2e85cae74e Add Jacobi polynomials 2020-01-27 13:43:49 -05:00
76c823781e Much faster coarsening 2020-01-27 13:43:19 -05:00
114db3b99d Optional MdagM without norms 2020-01-27 13:42:51 -05:00
49e123dbda Use explicit linalg calls to get coalesce optimisations on GPU 2020-01-27 12:44:51 -05:00
8cec294ec9 Make CG a bit less verbose as gettign annoying in nested algorithms.
Can use Iterative logging if you want to see more
2020-01-27 12:44:04 -05:00
eb5b720e94 Normal Equations can be used in HDCR now 2020-01-27 12:43:29 -05:00
b2736ec80b Make PrecGCR recursive - it can precondition itself 2020-01-27 12:42:48 -05:00
086256a032 Less sloppy convergence test on PowerMethod 2020-01-27 12:41:59 -05:00
afc7426f39 Much bigger pointer cache in case of Nvidia due to cost of setting up UVM allocations 2020-01-27 12:41:16 -05:00
7c061e20c9 All directions of dirac operator for fastt coarsening 2020-01-27 12:40:13 -05:00
e5d1c09665 Faster DhopDirAll for little dirac operator coarsening 2020-01-27 12:38:54 -05:00
8016a465ae Remove extraneous variable 2020-01-27 12:35:37 -05:00
d8b9742092 DhopDirAll for faster matrix elements of little Dirac operator 2020-01-27 12:34:54 -05:00
1bd87c35d7 Read coalescing on Nvidia 2020-01-27 12:29:56 -05:00
fa856c9669 Disable information message 2020-01-27 12:28:46 -05:00
48008e4d8b Thread coordinate creation loop 2020-01-27 12:28:16 -05:00
55cdb17691 Integer divide for blocking 2020-01-27 12:27:45 -05:00
2ed39ebb7a Perambulator won't even allocate memory for unsmeared sinks unless the filename is specified.
Prior to this update, memory is allocated regardless of whether these are requested.
2020-01-24 13:01:06 +00:00
96671bbb24 Added ability to pass callback to MADWF that is called every inner iteration and allows user to, for example, adjust the inner solver tolerance depending on residual
Added a general implementation of the Remez algorithm for producing arbitrary rational polynomial approximation with optional restriction to even/odd polynomials
Added implementation of computation of ZMobius parameters
Added Test_zMADWF_prec to test ZMobius in MADWF
2020-01-17 12:45:30 -08:00
554542b773 Merge branch 'feature/hdcr' of https://github.com/paboyle/Grid into feature/hdcr 2020-01-06 11:47:56 -05:00
03da4040e2 Make summit happy 2020-01-06 11:47:48 -05:00
e583035614 Change to interface to minise comms in evaluating coarse space operator 2020-01-06 11:43:59 -05:00
3c3d6a94f3 OPtimising the force term a bit 2020-01-04 03:16:23 -05:00
205ea4bbb2 More verboose Lanczos 2020-01-04 03:13:40 -05:00
039eb7b2eb Make the force term and coarsening multigrid more optimised 2020-01-04 03:12:17 -05:00
f7e4bd1f6d Getting more optimised 2020-01-04 03:11:53 -05:00
0afecfcae7 Nearing well optimised state 2020-01-04 03:11:19 -05:00
ba40a3f763 Alternate low pass filter option 2020-01-03 05:29:09 -05:00
aa920aa532 Improved DWF multigrid 2019-12-28 10:32:35 -05:00
c0d8e4dce5 Improved Multigrid for DWF 2019-12-28 10:32:15 -05:00
0ca1992151 Remove warning in tensor layout comparison. Make default names and index names visible for PerambTensor and NoiseTensor 2019-12-20 13:53:27 +00:00
df2b0c4e79 Merge branch 'develop' into feature/distil
* develop:
  Missing conjugate in MooeeInvDag
  Allow subspace setup to no converge
  fp16 mandatory. Use SFW is not available as hdw
2019-12-20 13:24:59 +00:00
9cfd64c604 Coarse grid on GPU, not fast enough yet. Need a 10x 2019-12-17 05:24:45 -05:00
e478404291 Tuned up significantly on GPU, but another 10x in coarse space required 2019-12-17 05:03:25 -05:00
9aafd20468 Simple block project promote runs faster on GPU 2019-12-17 05:01:39 -05:00
5d834486c9 Merge pull request #259 from grid-test-organisation/feature/5d-improvement-fix
Missing conjugate in MooeeInvDag
2019-12-16 04:20:37 -05:00
f7373e97a4 Missing conjugate in MooeeInvDag 2019-12-16 10:05:50 +01:00
9e15474999 Accelerator loop attempt at speed up 2019-12-14 05:28:16 -05:00
152b525a4d Typo fix 2019-12-13 22:44:42 -05:00
d18994eddc offload more of mgrid to GPU 2019-12-13 22:08:11 -05:00
b8bd8cd2ae Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-12-13 21:32:10 -05:00
736b19485e Faster set up and some dead code ifdef'ed out 2019-12-13 21:30:48 -05:00
c7637a84ad Documentation tweak for peculiarities of OpenMPI --prefix 2019-12-12 17:00:03 +00:00
a7772c827b Documentation tweak 2019-12-12 16:05:22 +00:00
8e83398861 Merge pull request #257 from AndrewYongZhenNing/develop
Added NamedTensor.hpp
2019-12-11 21:36:59 +00:00
843ca9350a Fix naming conventions to be consistent with Peter 2019-12-11 11:46:18 -05:00
f47b2b6e13 Added NamedTensor.hpp 2019-12-11 15:56:46 +00:00
5bfd1470ad Merge branch 'develop' into feature/hdcr 2019-12-10 21:51:06 -05:00
6957b0b58a Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-12-10 21:50:42 -05:00
d73f0b8618 Verbose for temporary debug 2019-12-10 21:50:06 -05:00
0b3a3562c3 Some MPI (summit) create sigusr2, so trap that 2019-12-10 21:49:12 -05:00
710fee5d26 Subspace setup testing code
and timing verbose
2019-12-10 21:48:42 -05:00
bab0bf2e93 Merge branch 'develop' into feature/hdcr 2019-12-10 21:47:41 -05:00
848079e8ba Merge pull request #235 from grid-test-organisation/feature/5d-improvement
MooeeInv and M5D optimisations + enable threading with nvcc
2019-12-10 21:45:03 -05:00
f2a4f13111 Must offload the Coarsened matrix if Stencil buffers are device resident 2019-12-10 19:32:12 -05:00
4180a4a8a7 Import BiCGSTAB solvers and tests 2019-12-10 17:20:35 -05:00
b9b9fcbfa0 Merge pull request #229 from nils-asmussen/feature/JacobiSmear
MSource::jacobi smear + sort file contents of Modules.hpp and modules.inc
2019-12-09 22:50:02 +00:00
bbe48998a8 sort Modules.hpp and modules.inc + add module JacobiSmear 2019-12-09 18:06:29 +00:00
6446671a9c Merge pull request #241 from nils-asmussen/fix/remQCDns_ignore_ws
Undo whitespace changes in fix/removeQCDremnants to allow comparing relevant changes
2019-12-09 18:02:21 +00:00
110373ea79 Merge pull request #204 from nils-asmussen/sha256sum_Eigen_download
bootstrap.sh: verify checksum of Eigen tar file
2019-12-09 18:01:46 +00:00
a986786192 bootstrap.sh: verify checksum of Eigen tar file if sha256sum is installed 2019-12-09 17:11:21 +00:00
edd1c924eb Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-12-09 03:53:01 -05:00
9b6b0caa55 Junk commit fix 2019-12-09 03:01:58 -05:00
2a48617ac5 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-12-09 03:00:00 -05:00
876d9c957c QMR 2019-12-09 02:59:49 -05:00
295e535f93 QMR 2019-12-09 02:59:35 -05:00
58a31f0763 QMR implemented, preserve even if not used much 2019-12-09 02:59:13 -05:00
3d2fe80780 Temporary size depends on checkerboard/uncheckerboard. The Mdir cares 2019-12-09 02:58:24 -05:00
e43fce1083 Clean up and simplify a little. 2019-12-09 02:55:45 -05:00
0dfdf80407 Logging 2019-12-09 02:54:52 -05:00
2912071f83 Add non hermitian operator 2019-12-09 02:51:53 -05:00
26605ef387 HDCR back to working 2019-12-09 02:51:01 -05:00
1e5ac576d9 Merge commit 'f7698b93ca57ea3aa4d72b133ad9ca5d1e703661' into develop
# Conflicts:
#	Hadrons/Modules.hpp
#	Hadrons/modules.inc
2019-12-06 11:59:21 +00:00
d5492b426f Hadrons: better order in module list 2019-12-06 11:56:26 +00:00
d428858c9d Merge pull request #255 from fionnoh/feature/sparseNoise
Feature/sparse noise
2019-12-06 11:43:27 +00:00
f7698b93ca corrected comments about quark line directions 2019-12-06 09:46:52 +00:00
7ce77690b8 Naming conventon also applied to metadata 2019-12-05 17:38:43 +00:00
164ed9c434 Naming conventon also applied to metadata 2019-12-05 17:38:00 +00:00
a54157e682 more definitions changed 2019-12-05 17:08:09 +00:00
58b6a0d8d1 changed some naming conditions to resemble rare-kaons 2019-12-05 16:56:54 +00:00
1a5e562bde only one FIMPL left! 2019-12-05 16:46:58 +00:00
45be26cf3f Merge branch 'develop' of https://github.com/fionnoh/Grid into feature/sparseNoise 2019-12-05 16:18:47 +00:00
5227ffccb7 Added James' sparse noise code and a module to use it 2019-12-05 15:50:03 +00:00
a0b47cc0be Merge pull request #254 from fionnoh/bugfix/eigenMigration
Updated Eigen URL after migration to gitlab
2019-12-05 15:26:38 +00:00
b766038810 new syntax after merge 2019-12-04 18:08:00 +00:00
cd9fd80a5d merged in develop 2019-12-04 17:12:46 +00:00
d6100cc35a Merge pull request #253 from mmphys/feature/distil
Fix phase convention adjustment error
2019-12-04 14:58:51 +00:00
29a1530510 Updated Eigen URL after migration to gitlab 2019-12-04 13:49:22 +00:00
15119eaf03 Fix phase convention adjustment error (and make no assumptions about node layout) 2019-12-04 09:59:58 +00:00
188e12ffbb Merge pull request #249 from mmphys/feature/distil
Feature/distil
2019-12-03 18:06:00 +00:00
e940f4db7e removed unused parameter parity 2019-12-03 12:01:31 +00:00
9c7f269489 typo in fimpl4 2019-12-03 11:19:54 +00:00
07feaf9531 updated ascii-doc preamble 2019-12-03 11:17:35 +00:00
7983ff2fdd Merge branch 'develop' into feature/distil
* develop:
  Change to reporting
  NVCC timer support
  Fix nocompilee under NVCC
  --enable-summit flag
  IBM summit optimisation. Synchronise in node is still btweeen 2 halves of AC922, so could be a little faster
  Sliced propagator contraction was not producing any results because buf.size()=0
  several typos in hadrons
2019-11-30 16:47:03 +00:00
2db814f2b7 Resolve conflicts in BaryonUtils (just use latest from develop) 2019-11-29 18:19:35 +00:00
6418f06771 Add option to save the eigenvectors of the Laplacian.
If they are saved, then metadata saved are:
solverXml	Parameters for this LapEvec module instance
OperatorXml	module type and parameters (if any) for the module that created the gauge field
2019-11-29 18:06:18 +00:00
8a5576f73c cleared up how exactly q_spec has to be defined 2019-11-28 12:35:18 +00:00
997790ad24 Allow subspace setup to no converge 2019-11-26 14:04:28 -05:00
900d6fad21 fp16 mandatory. Use SFW is not available as hdw 2019-11-26 13:26:43 -05:00
799ff0c96e speed-up 2019-11-26 15:28:47 +00:00
5fd5c25114 now two seperate functions for Eye and NonEye 2019-11-26 13:44:55 +00:00
62b3799c77 Merge pull request #251 from fionnoh/bugfix/WallWallMeson
MContraction::Meson bugfix
2019-11-26 12:46:03 +00:00
d1a89af8c9 Change to reporting 2019-11-22 10:49:10 -05:00
d91ba1f6cc NVCC timer support 2019-11-21 20:11:19 +00:00
f4d27e7090 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-11-21 20:09:31 +00:00
feb1ff3494 Fix nocompilee under NVCC 2019-11-21 20:03:39 +00:00
8ef6175acc Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-11-21 15:02:21 -05:00
e4399e3ee1 --enable-summit flag 2019-11-21 15:02:10 -05:00
98ea67b636 IBM summit optimisation. Synchronise in node is still btweeen 2 halves of AC922, so could
be a little faster
2019-11-21 15:00:46 -05:00
421a4395af Sigma to Nucleon contractions 2019-11-21 17:25:37 +00:00
cf95a460a5 Sliced propagator contraction was not producing any results because buf.size()=0 2019-11-21 17:17:55 +00:00
a60e20f265 Merge pull request #250 from mmphys/hadrons-typos
several typos in hadrons
2019-11-20 17:10:08 +00:00
9261c0da89 several typos in hadrons 2019-11-20 17:06:32 +00:00
b350a24ded fixed test_distil 2019-11-18 15:29:20 +00:00
13a0db7162 Reverse changes not intended to be part of distillation release 2019-11-18 12:34:49 +00:00
18177d9709 Review changes 2019-11-18 11:59:13 +00:00
7bf42b9c0e HADRONS_ERROR 2019-11-18 10:27:35 +00:00
2d6f4e0c09 fixed issue with HADRONS_ERROR, no idea why this works 2019-11-15 13:46:47 +00:00
7f06c40107 _var -> var_ 2019-11-15 13:26:24 +00:00
9f75065205 eigen_strong_inline gone 2019-11-15 13:22:20 +00:00
271a02230e assert -> ERROR 2019-11-15 11:11:50 +00:00
b1e8b5b5ce changed default behaviour as discussed with antonin 2019-11-15 11:00:25 +00:00
25d2521d77 small stuff 2019-11-13 16:34:09 +00:00
500ef17143 beauty 2019-11-13 15:14:51 +00:00
ee9dd22643 worked on test_distil 2019-11-13 14:59:44 +00:00
a977d9901b cleanup 2019-11-13 14:52:06 +00:00
667ffb70db changed error type 2019-11-13 12:16:56 +00:00
65b3059bd7 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-11-13 11:51:14 +00:00
5238808ccd No DistilVectors specified in xml no throws an error 2019-11-13 11:50:55 +00:00
8f88fee680 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil
* 'feature/distil' of github.com:mmphys/Grid:
  made notation DPar->dp consistent over modules
2019-11-13 11:34:10 +00:00
fcc412a1c2 Remove conditional compilation to support GPU build 2019-11-13 11:32:23 +00:00
12e415330f made notation DPar->dp consistent over modules 2019-11-13 11:21:08 +00:00
66e0811317 Attempt to fix cuda build 2019-11-13 00:02:51 +00:00
55e743aad6 Streamline 2019-11-12 23:57:28 +00:00
e2ab0d671e Implement destructors 2019-11-12 23:18:37 +00:00
7a4c5dbbd5 Restoring previous version for _reduced variables 2019-11-12 22:12:35 +00:00
3f00b8f6c7 Switch to std::unique_ptr<GridCartesian> grid3d;
Remove hand-coded reference to pi - switch to <math.h> definition
2019-11-12 21:53:09 +00:00
6d7043e0c2 NamedTensor changes done 2019-11-12 17:31:42 +00:00
b0f24ec302 Test works now 2019-11-12 15:14:13 +00:00
fb2834bf82 Oops 2019-11-12 14:01:20 +00:00
78f75b0e9f Better than graffiti 2019-11-12 14:00:46 +00:00
62dd0bfe58 New parameter module compiles. Untested. 2019-11-12 13:59:53 +00:00
db952993fa envCreate problem.. 2019-11-12 12:23:34 +00:00
b8f0878981 removed most default behaviour 2019-11-11 17:49:38 +00:00
df586a142d added DistilPar-module and cleaned up some code 2019-11-11 17:29:55 +00:00
7a446d5b7f removed default filenames 2019-11-11 14:36:45 +00:00
e7d7ea4f8f added LoadNoise module 2019-11-11 12:55:45 +00:00
f8e1941327 Implemented specialisations of NamedTensor as derived classes, however this suffers a number of problems:
1) virtual functions not available in base class constructor where I'd like to use them - e.g. IndexNames
2) Must define new constructors in derived classes
... so the specialisations are fatter than I'd like. Would prefer to revert to specifying tensor name and index name defaults in template
2019-11-08 11:55:00 +00:00
65aa54804e added comments 2019-11-08 11:15:51 +00:00
293bfe17d1 added code to the noise module... 2019-11-07 14:00:40 +00:00
a8f3a111a5 added Serial RNG - code compiles but not tested! 2019-11-07 13:45:38 +00:00
5c23abe507 commented on Notation 2019-11-07 11:57:40 +00:00
22c654182a Fixes for GPU compile 2019-11-04 17:24:34 +00:00
6f0439c0e4 Remove unnecessary cast 2019-11-04 15:50:14 +00:00
4f9a7c5d76 Back out unnecessary change 2019-11-02 16:50:29 +00:00
fcd90705bc Beautification 2019-11-02 16:15:48 +00:00
4bcdb4ff95 Remove accidental check-in of local debugging 2019-11-02 15:24:12 +00:00
1c10933db1 Rationalisation of NamedTensor (Perambulator) 2019-11-02 14:58:32 +00:00
52d8d576d0 Removed SliceShare as a reusable routine 2019-11-01 20:10:51 +00:00
ada0a7a83b C++11 case comparison of named tensor index names 2019-11-01 16:05:08 +00:00
efe2f2d48b Merge branch 'develop' into feature/distil
* develop:
  Summit jsrun GPU mapping updates. Conffigure with --enable-jsrun
  Fixed Lanczos calling aligned alloc in threaded region hitting up against pointer-cache no-threading restrictions Fixed Lattice::reset not compiling with new Grid explicit memory region handling Fixed memory leak in Lattice::resize that occurs when data region has been previously allocated
2019-11-01 15:38:48 +00:00
45d4cf0971 Cleanup in progress 2019-11-01 15:35:07 +00:00
ac614cbc53 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-10-31 11:46:43 -04:00
ec8e060ec7 Summit jsrun GPU mapping updates. Conffigure with --enable-jsrun 2019-10-31 11:46:09 -04:00
5c54f27ac1 some cleanup, but hard-coded src in LapEvec unclear 2019-10-31 11:51:05 +00:00
4ed9379535 some cleanup 2019-10-31 11:45:50 +00:00
858e348a6d Cleanup of messages 2019-10-31 11:11:52 +00:00
3b3680c64e Reversed Felix's interim A2Autils.h changes ... these were finished and went into develop via a separate branch 2019-10-30 15:50:04 +00:00
2a926b3dc6 Merged latest changes from develop, in preparation for release. 2019-10-30 14:52:34 +00:00
845a045493 Merge pull request #233 from giltirn/lanczos_fix
A few run /compile / memory leak fixes
2019-10-30 10:21:59 -04:00
eb8848a071 Merge branch 'develop' into feature/distil
* develop: (27 commits)
  Update README.md
  result layout standardised, iterator size more elegant
  updated syntac in Test_hadrons_spectrum
  chroma-regression test now prints difference correctly
  baryon input strings are now pairs of pairs of gammas - still ugly!!
  second update to pull request
  Changing back interface for Gamma3pt
  Removing old debug code
  Changes to A2Autils
  suggested changes for 1st pull request implemented
  changed input parameters for easier use
  Should compile everywhere now
  changed baryon interface
  added author information
  ready for pull request
  code compiling now - still need to test
  Baryons module works in 1 of 3 cases - still need SlicedProp and Msource part!!
  thread_for caused the problems - slow for loop for now
  still bugfix
  weird bug...
  ...

# Conflicts:
#	Hadrons/Modules.hpp
#	Hadrons/modules.inc
2019-10-30 14:13:00 +00:00
f31e3278a6 Update README.md 2019-10-25 11:43:55 -04:00
ca234325bc Fix single-precision error 2019-10-23 21:49:32 +01:00
c97f780784 Merge pull request #243 from fionnoh/feature/A2A_current_insertion
Feature/a2 a current insertion
2019-10-22 13:55:53 +01:00
78bdb0ff6a Grid 2019-10-20 14:22:45 +01:00
decab587a0 PerambFileName defaults to object name if empty 2019-10-20 14:14:06 +01:00
202f025fc7 Merge pull request #242 from mmphys/feature/baryons
Feature/baryons
2019-10-16 15:06:32 +01:00
3c702b510b result layout standardised, iterator size more elegant 2019-10-15 18:48:51 +01:00
519ce19128 Fixes to enable GPU build. NB: Contractor and ContractorBenchmark still not working 2019-10-14 22:40:13 +01:00
8d166a81c0 updated syntac in Test_hadrons_spectrum 2019-10-14 13:41:08 +01:00
aa62ca9046 chroma-regression test now prints difference correctly 2019-10-10 11:07:20 +01:00
2dee4791db baryon input strings are now pairs of pairs of gammas - still ugly!! 2019-10-09 17:56:09 +01:00
548b3bf43c second update to pull request 2019-10-09 14:52:33 +01:00
a55d0ba8fe Changing back interface for Gamma3pt 2019-10-08 15:52:01 +01:00
5de9547db5 Removing old debug code 2019-10-08 15:51:28 +01:00
6a3b09cf02 Merge branch 'develop' of github.com:fionnoh/Grid into feature/A2A_current_insertion 2019-10-08 13:25:51 +01:00
10de4bfc23 Changes to A2Autils 2019-10-08 13:24:56 +01:00
2ce7f2b4d8 suggested changes for 1st pull request implemented 2019-10-08 13:19:47 +01:00
88d6ff8f1d Peter's bugfix in ImplicitlyRestartedLanczos.h
My bugfix in MomentumPhase.hpp
2019-10-07 17:36:11 +01:00
803329af99 Merge branch 'develop' into feature/distil
* develop:
  Fix after GPU merge: Phase in Free Propagator
  z2-momentum phase module

# Conflicts:
#	Hadrons/Modules/MSource/MomentumPhase.hpp
2019-10-07 13:09:52 +01:00
9d96899aa8 Doc bugfix 2019-10-07 13:05:04 +01:00
86939dbf1a Removed unnecessary function (for getting a parameter) 2019-10-04 13:59:59 +01:00
317645aaeb undo (most) whitespace changes in the two files HMC/Mobius2p1fEOFA{,_F1}.cc 2019-10-02 16:25:23 +01:00
e280ec6b0b changed input parameters for easier use 2019-10-02 16:14:06 +01:00
d5a180d914 Merge branch 'fix/removeQCDremnants' into fix/remQCDns_ignore_ws 2019-10-02 16:11:27 +01:00
d2928761dd Merge pull request #240 from guelpers/feature/bugfixafterGPUmerge
Fix after GPU merge: Phase in Free Propagator
2019-10-02 15:00:15 +01:00
f2a74c603f Merge pull request #239 from mmphys/z2_momentum
z2-momentum phase module
2019-10-02 14:57:59 +01:00
5f22810f55 Fix after GPU merge: Phase in Free Propagator 2019-10-02 14:49:35 +01:00
92e25488f8 Added MomentumPhase Hadrons module from z2_momentum branch (thankyou, Felix) so I can run Z_2 wall with momenta easily 2019-10-02 14:13:35 +01:00
89ef2b7dc2 Should compile everywhere now 2019-10-02 13:20:07 +01:00
7606554b76 Remove references to unused modules (now part of separate Baryons branch) 2019-10-02 13:16:58 +01:00
c8fc0b3e0c changed baryon interface 2019-10-02 11:36:39 +01:00
ccb5e8374b z2-momentum phase module 2019-09-30 17:36:15 +01:00
b88fd436e7 added author information 2019-09-30 17:07:46 +01:00
155bcd4ff3 ready for pull request 2019-09-30 16:58:20 +01:00
d1daab601a Merge branch 'develop' of github.com:fionnoh/Grid into feature/A2A_current_insertion
Peter's GPU branch changes merged with A2A CI code
2019-09-30 16:53:44 +01:00
e5d7910fa7 code compiling now - still need to test 2019-09-30 13:55:26 +01:00
94b9a9474c Baryons module works in 1 of 3 cases - still need SlicedProp and Msource part!! 2019-09-27 15:08:56 +01:00
bf62ec163d thread_for caused the problems - slow for loop for now 2019-09-26 13:33:49 +01:00
8415e23fc6 still bugfix 2019-09-26 11:09:09 +01:00
76c93aa44e weird bug... 2019-09-17 14:36:26 +01:00
3137628222 BaryonUtils.h is now part of Baryons 2019-09-17 13:19:20 +01:00
ce965ee6bb Cleanup tests that are no longer required 2019-09-17 13:10:59 +01:00
911fbb0f36 Cleanup modules that are no longer required 2019-09-17 13:06:52 +01:00
eb293e9909 Restore Baryons modules per develop branch 2019-09-16 20:29:37 +01:00
f548114ff6 bugfix 2019-09-16 17:55:58 +01:00
dab8c01c3d added Baryon code 2019-09-16 17:20:54 +01:00
2f3dd0703d Ensure Distillation test (Test_distil) works 2019-09-16 17:00:46 +01:00
2e963d1a78 Fix location of Grid.h and remove reference to QCD namespace 2019-09-16 15:34:47 +01:00
bf52e7cc96 Latest BaryonUtils.h from Felix + my fixes 2019-09-13 18:11:10 +01:00
61d017d0a5 Merge GPU support (upstream/develop) into distillation branch.
This compiles and looks right ... but may need some testing

* develop: (762 commits)
  Tensor ambiguous fix
  Fix for GCC preprocessor/pragma handling bug
  Trips up NVCC for reasons I dont understand on summit
  Fix GCC complaint
  Zero() change
  Force a couple of things to compile on NVCC
  Remove debug code
  nvcc error suppress
  Merge develop
  Reduction finished and hopefully fixes CI regression fail on single precisoin and force
  Double precision variants for summation accuracy
  Update todo list
  Freeze the seed
  Fix compiling of MSource::Gauss for single precision
  Think the reduction is now sorted and cleaned up
  Fix force term
  Printing improvement
  GPU reduction fix and also exit backtrace option
  GPU friendly
  Simplify the comms benchmark
  ...

# Conflicts:
#	Grid/communicator/SharedMemoryMPI.cc
#	Grid/qcd/action/fermion/WilsonKernelsAsm.cc
#	Grid/qcd/action/fermion/implementation/StaggeredKernelsAsm.h
#	Grid/qcd/smearing/StoutSmearing.h
#	Hadrons/Modules.hpp
#	Hadrons/Utilities/Contractor.cc
#	Hadrons/modules.inc
#	tests/forces/Test_dwf_force_eofa.cc
#	tests/forces/Test_dwf_gpforce_eofa.cc
2019-09-13 13:30:00 +01:00
04a661cafe Remove unused modules BC2 and Baryon2 2019-09-10 14:49:24 +01:00
a7fa86dc29 MooeeInv improvement for DW EOFA + comments 2019-09-05 12:05:21 +01:00
0c1efa5235 pass OpenMP flag to host compiler 2019-09-03 12:12:25 +01:00
fdd9b14e82 speed up MooeeInvDag for DWF EOFA 2019-09-02 14:49:51 +01:00
e66669d300 fast MooeeInv for EOFA 2019-09-02 14:26:13 +01:00
0efaf3c4fa access M5D coeffs through pointers 2019-09-02 11:33:00 +01:00
3ef519aaa4 fast MooeeInv 2019-09-02 11:18:14 +01:00
b473405652 Tensor ambiguous fix 2019-08-29 09:36:41 -05:00
114ebb7914 Fixed Lanczos calling aligned alloc in threaded region hitting up against pointer-cache no-threading restrictions
Fixed Lattice::reset not compiling with new Grid explicit memory region handling
Fixed memory leak in Lattice::resize that occurs when data region has been previously allocated
2019-08-26 16:47:44 -04:00
9b7a6d197f Fix for GCC preprocessor/pragma handling bug 2019-08-23 14:37:46 +01:00
59cd7f3b70 Trips up NVCC for reasons I dont understand on summit 2019-08-23 06:03:49 -04:00
28d6be2a4e Fix GCC complaint 2019-08-22 18:56:37 +01:00
6b6c5aa626 remove namespace QCD from directory tests 2019-08-20 15:35:36 +01:00
9210b0aa6e remove namespace QCD from directory HMC 2019-08-20 15:21:23 +01:00
ad01290545 remove remnants of the namespace QCD 2019-08-19 20:30:33 +01:00
25150eb2e0 3pt contraction now takes a list of gammas 2019-08-15 12:09:30 +01:00
95f66cc93c Merge branch 'feature/gpu-port' into develop 2019-08-15 02:19:31 +01:00
12eb2a6a34 Zero() change 2019-08-15 01:43:00 +01:00
7c8902b04f Merge branch 'develop' into feature/gpu-port 2019-08-15 01:33:07 +01:00
4278caa030 Force a couple of things to compile on NVCC 2019-08-15 01:32:03 +01:00
be37dfb6f8 Remove debug code 2019-08-15 01:31:40 +01:00
5e8437029f nvcc error suppress 2019-08-15 01:31:12 +01:00
e279b2be29 Merge develop 2019-08-14 23:01:59 +01:00
48e6efc7c9 Merge branch 'develop' into feature/gpu-port
Conflicts:
	Grid/qcd/action/fermion/WilsonKernelsAsm.cc
	Grid/qcd/action/fermion/implementation/ImprovedStaggeredFermionImplementation.h
	Grid/qcd/action/fermion/implementation/StaggeredKernelsAsm.h
	benchmarks/Benchmark_comms.cc
2019-08-14 18:56:54 +01:00
55c095f620 Merge pull request #226 from nils-asmussen/fix/Gauss
Fix compiling of MSource::Gauss for single precision
2019-08-14 17:50:38 +01:00
3e49dc8a67 Reduction finished and hopefully fixes CI regression fail on single precisoin and force 2019-08-14 15:18:34 +01:00
96ac56cace Double precision variants for summation accuracy 2019-08-14 13:08:01 +01:00
2b037e3daa Update todo list 2019-08-14 13:07:26 +01:00
2d2de7aede Freeze the seed 2019-08-14 13:07:11 +01:00
e3966aa49b Fix compiling of MSource::Gauss for single precision 2019-08-12 14:57:11 +01:00
ce97638bac Think the reduction is now sorted and cleaned up 2019-08-11 11:09:01 +01:00
53e3ab4131 Fix force term 2019-08-11 11:06:13 +01:00
c2c4252a07 Merge pull request #216 from nils-asmussen/feature/GaussianSmearing
feature/gaussian smearing
2019-08-08 12:29:55 +02:00
d566637cec Merge branch 'develop' of github.com:fionnoh/Grid into feature/A2A_current_insertion 2019-08-07 12:11:40 +01:00
51bed48cd2 added selfcontract module 2019-08-05 17:46:42 +01:00
b875edceab Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil
Conflicts:
	Grid/qcd/utils/BaryonUtils.h
	Hadrons/Modules/MContraction/Baryon2.hpp
2019-08-05 14:19:43 +01:00
29df60c0cb some debugging stuff 2019-08-05 14:10:04 +01:00
8d97e2a02a Say which A2AMatrix is being loaded, and which contraction is being performed (m of n) 2019-08-02 19:23:18 +01:00
ed23f6be20 Remove blank line from log 2019-08-02 15:59:18 +01:00
cad76827b0 Be consistent about separator usage. Log start / stop / duration 2019-08-02 15:47:20 +01:00
310867d46a Additional option to specify the separator used between terms in correlator 2019-08-02 11:25:29 +01:00
e598178d94 TODO: Felix, please fix. I commented this out because of compiler errors 2019-08-01 20:51:51 +01:00
723457d467 Contractor updates ready for test on Tesseract:
1) Move definitions of serialisable objects into header for re-use by external programs/utilities
2) Add "-s" switch for "Simple" correlators, i.e. only include A2AMatrix info for the actual fields included in each contraction
2019-08-01 20:35:55 +01:00
6f40021842 Fixed compiler errors: TODO: Felix, please validate 2019-08-01 19:57:59 +01:00
9cd33a7b9c Printing improvement 2019-07-31 08:01:24 +01:00
639dc1ab21 GPU reduction fix and also exit backtrace option 2019-07-31 01:23:23 +01:00
9117f61109 GPU friendly 2019-07-31 01:22:54 +01:00
bca36d9bc3 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-07-30 22:51:23 +01:00
263dcbabab Simplify the comms benchmark 2019-07-30 22:51:04 +01:00
622d5eaa3e Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil 2019-07-30 13:47:22 +01:00
e66d48c142 second way to compute baryons - qdp style 2019-07-30 13:46:59 +01:00
9dad7a0094 Reproducible reduction and axpy_norm offload from Gianluca.
Hopefully get CG running entirely on GPU
2019-07-30 00:14:12 +01:00
8c6016f717 Merge pull request #219 from mmphys/feature/include
Housekeeping. #include <Grid.h> ---> #include <Grid/Grid.h>
2019-07-29 23:08:01 +01:00
1282e1067f Do the force term on the accelerator too. Needed particularly because comms buffers
are device memory.
2019-07-29 22:58:35 +01:00
f5ad4f3de8 Added the ability to write a version of the validated XML file excluding any of the module IDs supplied in a separate exclude file 2019-07-26 19:46:55 +01:00
275c1c920f More info dump on error from CUDA 2019-07-26 12:18:53 +01:00
fe700a183a Getting HMC to run 2019-07-26 12:18:29 +01:00
34108296cd Merge branch 'develop' into feature/gpu-port
Conflicts:
	Grid/simd/Grid_avx512.h
2019-07-20 17:05:35 +01:00
76c704b84b Intrinsics for CLANG are now fixed in v6 2019-07-20 16:52:24 +01:00
ce255ec359 Relocate to fix build failure for comms none 2019-07-20 16:37:03 +01:00
1c096626cb Hypercube defaults to on if HPE detected, but override to off possible 2019-07-20 16:06:16 +01:00
ce8b247426 Compiles 2019-07-20 15:16:02 +01:00
80481f81be Constructor typo 2019-07-20 09:58:24 +01:00
d85dcc72df Multinode fix 2019-07-20 07:13:28 +01:00
3fedcd6d52 Compiles 2019-07-20 07:12:44 +01:00
e7050a7aed Support gamma structure names that have trailing white space 2019-07-19 11:58:56 +01:00
e138bc7204 debug output 2019-07-19 11:16:35 +01:00
25ba4c5f80 Merge branch 'develop' into feature/gpu-port
Conflicts:
	HMC/Mobius2p1fEOFA.cc
	tests/forces/Test_rect_force.cc
2019-07-19 11:01:55 +01:00
671bcbcccb Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-07-19 10:48:22 +01:00
ff325376cb Fix single precision deriv test fail 2019-07-19 10:47:44 +01:00
6d4fb35d84 Ready for testing 2019-07-19 10:33:03 +01:00
9e926e3fc5 Build fix in develop 2019-07-19 10:01:52 +01:00
775eaee199 Fix for suspected Intel 2018.1 compiler bug under O3 2019-07-19 07:57:34 +01:00
0fd2827d5d Fix fail in single 2019-07-19 05:28:26 +01:00
bdd79f9ef8 TODO update 2019-07-18 22:04:28 +01:00
0695f8cec2 Single precision compile fix. Soon deprecate single precision 2019-07-18 22:02:31 +01:00
9fa705c5a0 comma fix 2019-07-18 21:38:11 +01:00
56cefadf9b gamma matrices as input 2019-07-18 17:46:43 +01:00
9d82855c5d bugfix in Baryonutils 2019-07-18 15:45:43 +01:00
97d61f2564 bugfix in Baryonutils 2019-07-18 14:57:10 +01:00
331f5a53dc New header 2019-07-18 14:51:09 +01:00
a23dc295ac Remove compiler errors and warnings 2019-07-18 14:47:02 +01:00
11a8668d19 bugfix in Baryonutils 2019-07-18 14:44:55 +01:00
cded7670d0 new utils for baryons 2019-07-18 14:29:04 +01:00
feb029fb66 new utils for baryons 2019-07-18 14:24:16 +01:00
08904f830e Merge develop 2019-07-16 11:59:56 +01:00
fa9cd50c5b Merge branch 'develop' into feature/gpu-port 2019-07-16 11:55:17 +01:00
5a62ebe7b1 general baryons case added 2019-07-15 15:26:30 +01:00
7c11525d1a Local stencil for complex wilson loops etc 2019-07-14 14:05:09 +01:00
42c1dbb1d1 General local stencil first cut for Patrick force term 2019-07-14 14:04:28 +01:00
6179acfda0 Put back a call that was required 2019-07-14 13:59:54 +01:00
fa747173d1 Debugging references were to l-values, so added const to stop errors 2019-07-14 11:08:00 +01:00
07601ac1f5 Replace instantiation of Gparity 2019-07-12 17:18:12 +01:00
705a8098b2 Merge branch 'feature/gpu-port' of https://github.com/paboyle/Grid into feature/gpu-port
Conflicts:
	Grid/stencil/Stencil.h
2019-07-12 17:14:11 +01:00
a29b43d755 Stencil comms cleaner 2019-07-12 17:12:25 +01:00
368c8369ce Merge branch 'feature/gpu-port' of https://github.com/paboyle/Grid into feature/gpu-port 2019-07-12 17:11:29 +01:00
c0d89a2dbb TODO updates 2019-07-12 17:11:15 +01:00
78ebd93281 Cuda 9.1 happy 2019-07-12 17:11:00 +01:00
3d58daf70f Safety check 2019-07-12 17:10:35 +01:00
bd155ca5c0 Overlap comms with comput now supported 2019-07-12 09:09:40 +01:00
91e2cf9b40 All axes can be used for comms now 2019-07-12 09:08:26 +01:00
3cc9947731 Better welcome printing 2019-07-12 06:47:51 +01:00
f15eeb0283 localise scope of variables declared in macro 2019-07-12 06:47:01 +01:00
0996ba9396 Pretty messaging 2019-07-12 06:45:31 +01:00
12afb0395f Debugging transposeSpin - seems just not to be implemented for Lattice<x> 2019-07-11 17:42:26 +01:00
ec4aa978ab why cant I spinTranspose 2019-07-11 14:01:41 +01:00
966a203dcb Interactions with GPU compilation 2019-07-11 03:16:17 +01:00
44170cc15f Initialise CUDA device prior to entering MPI.
This may or may not interact with Summit which configures MPI - CUDA mapping with jsrun.
TBD
Cases of OpenMPI and MVAPICH are covered, and default to cudaSetDevice(0) otherwise
2019-07-11 03:14:23 +01:00
7bc4a06f3f This is probably what you want ... 2019-07-10 12:29:33 +01:00
cd659525e1 You probably want to add this to the build. And you may need to do a bootstrap 2019-07-10 12:08:37 +01:00
dc2240d2d8 why does sliceSum in Nucleon.hpp not work 2019-07-10 11:34:16 +01:00
98cf20cf06 continued work on baryons 2019-07-09 17:42:36 +01:00
cc3346073e continued work on baryons 2019-07-09 17:30:32 +01:00
3848da7c50 added nucleon module (non-distillation) 2019-07-08 17:43:14 +01:00
c3d0c176ab cleaning up Kl2 contraction 2019-07-05 16:29:46 +01:00
0a71f8bb10 Merge pull request #222 from guelpers/feature/kl2QEDseq
EMLepton: Multiple source-sink separations at once
2019-07-05 16:22:34 +01:00
b7d0cf6751 buxfix in diquark sum / baryons 2019-07-04 22:06:37 +01:00
3a31ba2ea2 Merge remote-tracking branch 'upstream/develop' into feature/kl2QEDseq 2019-07-03 14:37:56 +01:00
eac6337466 Hadrons: EMLepton: multiple source-sink separations at once 2019-07-03 14:36:34 +01:00
ab7537e002 Merge pull request #221 from fionnoh/bugfix/A2ALoop
Bugfix for A2ALoop module
2019-07-03 14:13:51 +01:00
2c1a077369 continued on baryons 2019-07-02 17:55:28 +01:00
6e3c3214a3 Offload loops 2019-07-02 17:25:40 +01:00
d6ffadb33b Coalesced write 2019-07-02 17:25:13 +01:00
ae3abbe53d Added the ability for Perambulator module to save unsmeared sinks through the addition of two optional parameters:
UnsmearedSinkFileName: If present, specifies the filename to write to
UnsmearedSinkMultiFile: defaults to true to write each sink vector to a different file, but can be set to 0 for a single file
2019-07-01 17:28:27 +01:00
5fc0188205 started saving sinks 2019-07-01 14:51:59 +01:00
4c3225412b Drop 5dVEC 2019-07-01 07:31:26 +01:00
b8f7bfbb26 Dont stream as poor perf in some cases 2019-07-01 07:30:25 +01:00
7b7c470917 Accelerator loop 2019-07-01 07:29:51 +01:00
532e226b22 cuda 9.1 fixes 2019-07-01 07:29:22 +01:00
6a13731818 Move GPU cuda call earlier 2019-07-01 07:28:41 +01:00
67690df3bd Changes nedded to have a current insertion on every second time slice - avoids unnecessary contractions 2019-06-28 15:18:28 +08:00
1059189abf Bugfix for A2ALoop module 2019-06-27 13:49:55 +08:00
ce29b18dc9 New modules for loading in MFs as diskvectors and producing propagaotrs from 4 quark contractions 2019-06-27 13:46:06 +08:00
421a0a8a36 Changes to A2Autils, A2AMatirx and DiskVector code that is needed for Hadrons 4 quark contraction module 2019-06-27 13:45:20 +08:00
ac530636ca A2Aloop bugfix 2019-06-27 13:44:47 +08:00
2d940a598c Inserted four extra parameters just to make this test compile. Needs to be fixed properly 2019-06-19 10:37:50 +01:00
c28c5fc61b Inserted four extra parameters just to make this test compile. Needs to be fixed properly 2019-06-19 10:31:41 +01:00
015340d60c Elided superfluous copy on write 2019-06-19 09:37:03 +01:00
1cd4ee0706 Thrust used on GPU builds 2019-06-18 12:50:35 +01:00
b8f71b6777 Fix NVCC warning unused variable 2019-06-17 13:58:45 +01:00
703dc20377 Compile tests fix 2019-06-16 13:59:29 +01:00
d976e5c514 Pow is being awkward in thrust for reasons I don't understand. Possible thrust bug. 2019-06-16 12:05:11 +01:00
d7b3efe893 Compile fix 2019-06-15 17:03:15 +01:00
f710d7bd45 TODO list update 2019-06-15 12:54:27 +01:00
cb336aa8f8 Thread loop constructs changing a little 2019-06-15 12:54:11 +01:00
462900b48d Modified entire test directory to suit new GPU constructs for looping 2019-06-15 12:53:27 +01:00
0561c2edeb Benchmarks modified for new GPU constructs 2019-06-15 12:52:56 +01:00
0184719216 Change to predicate type 2019-06-15 12:52:26 +01:00
24202dbc51 Thread loop construct change 2019-06-15 12:52:07 +01:00
d763c303c5 Clean acceleerator barrier 2019-06-15 12:51:45 +01:00
8e394d3bf9 New loop construct 2019-06-15 12:51:15 +01:00
b881d5489b Move SchurDiagTwoKappa to Algorithms 2019-06-15 12:50:45 +01:00
82306913a8 Move Schur operator into correct place 2019-06-15 12:49:22 +01:00
49f90cc7eb use pragma once 2019-06-15 12:45:22 +01:00
b77af0210b Thread loop. Probably deprecate this impl 2019-06-15 12:44:56 +01:00
5254ede2d8 New loops. Revisit as accelerator loop in future audit 2019-06-15 12:44:29 +01:00
16e5d7945e Hard to make 5D vec work with GPU code 2019-06-15 12:43:43 +01:00
decc99ca76 Accelerator version 2019-06-15 12:43:00 +01:00
464cd65931 Still to test this fully 2019-06-15 12:35:14 +01:00
a1ec2f4723 Still to test this routine fully 2019-06-15 12:33:55 +01:00
ea9662ec85 Thread loop changes 2019-06-15 09:09:57 +01:00
52c74f1cac Thread loop changes 2019-06-15 09:08:16 +01:00
9a13d2992c lean up 2019-06-15 09:05:16 +01:00
b0449ae270 Thread loop changes 2019-06-15 09:04:19 +01:00
1299225105 Accelerator loop changes 2019-06-15 09:03:46 +01:00
5925e7f405 Thread for changes 2019-06-15 09:01:30 +01:00
be1fd4930f Template instantiation make happy changes 2019-06-15 08:37:34 +01:00
377fa5dec1 looping construct 2019-06-15 08:36:48 +01:00
e8b78f596e Looping construct changes 2019-06-15 08:35:57 +01:00
09720c40cd Coalesced loops 2019-06-15 08:35:26 +01:00
bb024dd114 Loop construct changed 2019-06-15 08:30:05 +01:00
52456b9ec7 New loop construct 2019-06-15 08:28:45 +01:00
b285138be4 Better checking on types 2019-06-15 08:27:48 +01:00
c7dbf4c87e Scalar support for GPU threads 2019-06-15 08:25:43 +01:00
1e889c93b8 Insert a GPU synchronise 2019-06-15 08:23:26 +01:00
7379047482 Threading and acceleration primitives further changes. accelerator_barrier() needed and used 2019-06-15 08:22:48 +01:00
d836ce3b78 Clean up of acceleration and threading primitives 2019-06-15 08:14:21 +01:00
cefaacbc07 Changing accelerator loop. Still have work to do for multi-GPU code 2019-06-15 08:10:24 +01:00
0074ef7f69 thread loops 2019-06-15 08:04:29 +01:00
20359ca15f Coalesced loops. 2019-06-15 08:03:57 +01:00
736358b0cb Coalesced loops 2019-06-15 08:03:13 +01:00
6b692aa726 Thread loops 2019-06-15 08:02:26 +01:00
7f99e1cd3b Coalesced loops 2019-06-15 08:01:39 +01:00
f3c89df948 Thread loop changes 2019-06-15 08:00:37 +01:00
b7e6d111d7 Thread loop changes. Need to offload this file 2019-06-15 07:59:10 +01:00
f39cf69c33 Accelerator loop change 2019-06-15 07:58:23 +01:00
8e27338df2 Rationalise number of loop macros 2019-06-15 07:57:40 +01:00
bcbb5e9d26 Remove assembly tests 2019-06-15 07:57:05 +01:00
0ea7f5279d Accelerator loop changes 2019-06-15 07:56:14 +01:00
18e5de426d There is a stray use of predicatedWhere introduced by Andrew Lawson in the conserve currents.
The conserved currents need rewritten using data parallel operations.
2019-06-15 07:53:58 +01:00
e896d81235 Accelerator loop redefine. Coalesce most accesses, but ET engine still to go clean. 2019-06-15 07:52:44 +01:00
7b8ccff4f4 Accelerated coalesced loops in most cases 2019-06-15 07:48:00 +01:00
68541606ab Thread loop changes. Soon try these with accelerator loops and benchmark 2019-06-15 07:46:42 +01:00
339ea10cc7 First touch only on CPU code 2019-06-15 07:45:43 +01:00
d0d8dc8042 Thread loop changes 2019-06-15 07:45:09 +01:00
81eb1fd9f2 Accelerator loop changes for coalesced access 2019-06-15 07:44:47 +01:00
cb93d32cd9 Thread loop changes 2019-06-15 07:44:08 +01:00
8f223962ff Thread loop changed 2019-06-15 07:43:42 +01:00
9a8a63467e BC2 now runs. setup() runs twice, which had resulted in doubling up of momenta. Also fixed initialisation of momentum phases. 2019-06-12 15:25:59 +01:00
36f06555a2 Simplify Impl 2019-06-09 22:26:27 +01:00
d6c0e0756d Remove GPU version 2019-06-09 11:23:42 +01:00
3e41b1055c Remove Gpu only kernels. 2019-06-09 11:20:01 +01:00
9fbcfe612c Update TODO list 2019-06-09 11:19:38 +01:00
e78a5e7838 ASM instantiation without link errors 2019-06-09 01:25:21 +01:00
da8d87e9da Cuda switch off 2019-06-08 17:11:38 +01:00
8e3a05d89b Moving the instantiation into a cleaner structure 2019-06-08 13:48:33 +01:00
8adc5da7dd Testig out approaches to kernel writing introducing SIMT_loop temporarily 2019-06-08 13:47:04 +01:00
29a244e423 Test of using a lane variable instead of repeated reference to threadIdx.y 2019-06-08 13:46:26 +01:00
18cbfecf02 Use symlinks in find command 2019-06-08 13:45:46 +01:00
c933ac2248 Temporarily introduce a SIMT_loop to test out approaches prior to making a global change to
accelerator_loop
2019-06-08 13:44:27 +01:00
ad2c433574 Instantiations move. Tried using Gianluca's suggestion about avoiding threadIdx but doesn't
seem to make a difference. Will revisit this and probably remove the lane parameter from the coalescedRead
2019-06-08 13:43:12 +01:00
86e7fb6e86 Instantiation relocation 2019-06-08 13:42:46 +01:00
fb91dda7be Hand instantiation moved location 2019-06-08 13:42:26 +01:00
82cf7bc5ab Move instantiation into fermion/instantiation 2019-06-08 13:41:46 +01:00
e452cc0a22 Move static variables into instantiation .cc file 2019-06-08 13:41:20 +01:00
4d2b938166 Remove explict instantiation from here 2019-06-08 13:41:01 +01:00
10d16ab76c Remove explict instantiation from here 2019-06-08 13:40:32 +01:00
1f997fa484 Instantiate via explict .cc files for parallel make. 2019-06-08 13:39:51 +01:00
dc5024e88c The GPU reduction was not working for me and causing errors. Need to revisit.
Gianluca is working on deterministic reduction/
2019-06-08 13:39:11 +01:00
6d77941990 Drop the 5D vec actions 2019-06-08 13:38:05 +01:00
0ee6e77cbc Compiles GPU and CPU, still gives good performance on CPU 2019-06-05 13:28:16 +01:00
18d3cde29a Compile on GPU workd 2019-06-05 00:14:58 +01:00
7323099966 Instatiation fix 2019-06-05 00:14:38 +01:00
6379651cdd Generic or GPU ready for benchmark test on GPU 2019-06-05 00:13:52 +01:00
ba4fd756b9 Fix signature, but deprecating this loops style 2019-06-05 00:12:36 +01:00
d185fc1ebf clean up instantiation 2019-06-05 00:11:52 +01:00
96b36d8367 Instantiation clean up 2019-06-05 00:11:27 +01:00
899f8b5065 Instantiation clean up 5d vec removal 2019-06-05 00:11:05 +01:00
c8d0483fe9 Remove 5d vectorisation 2019-06-05 00:10:37 +01:00
0f214e5f76 Clean up instantiation 2019-06-05 00:10:13 +01:00
8eea568426 GPU loop ; presently differentiated with ifdef, find a way to unify. 2019-06-05 00:09:28 +01:00
9636324069 GPU happy code 2019-06-05 00:08:54 +01:00
8a5489d9e6 Move the loop into a central kernel call. 2019-06-05 00:08:13 +01:00
8113845f9c coalesce loop. Need to rationalise this file 2019-06-04 23:49:29 +01:00
b47f73c222 GPU happy 2019-06-04 21:30:39 +01:00
5720ced0fd Simplifying 2019-06-04 21:30:08 +01:00
2c87b56b53 Making GPU happier 2019-06-04 21:29:44 +01:00
dbad48d802 Remove Ls vectorised DWF 2019-06-04 21:27:40 +01:00
4557a1365a Remove Ls vectorised DWF 2019-06-04 20:59:59 +01:00
16e9b87d98 Remove Ls vectorised DWF as unused and hard to maintain 2019-06-04 20:59:01 +01:00
685eea3d0f Small cosmetic 2019-06-04 20:58:14 +01:00
65b48831fb Simplify code 2019-06-04 20:56:30 +01:00
57396fc595 Simplify code 2019-06-04 20:56:23 +01:00
a2e199df50 Simplifying Cayley cases. 2019-06-04 20:54:52 +01:00
020346c848 WOrk list. Will have to clean up Fermion sector. 2019-06-04 20:54:00 +01:00
c2625a127e Non blocking loop. Want to change the naming here. 2019-06-04 20:52:59 +01:00
8794d35c78 GPU 2019-06-04 20:52:27 +01:00
24bff6dbe6 Minor improvements 2019-06-04 20:51:48 +01:00
45b15d10d3 GPU happy changes 2019-06-04 20:49:16 +01:00
33d6bbe32b GPU must use accelerator vectors 2019-06-04 20:48:52 +01:00
7a1569bd46 Annoying, cannot rely on equivalence of Grid ComplexD adn Eigen Complex type on GPU.
Solve with ComplexD typecasts but must be a better way
2019-06-04 20:47:49 +01:00
6e2e904a0e NVCC compiles happy. Start to develop strategy for writing generic
code for GPU kernels and CPU kernels.
2019-06-04 20:46:35 +01:00
d92a17f359 Suppress NVCC warnings in pugixml with pragma 2019-06-04 20:45:53 +01:00
47c063f984 Remove Ls Vec cases from benchmarks 2019-06-04 20:45:35 +01:00
7e27a5213a Tests builds clean. 2019-06-04 20:45:20 +01:00
fe72dc099b Upgrade to Mojave forced me to reinstall MacPorts. These are the ports I installed to get Grid working 2019-06-04 16:12:24 +01:00
ade4a126da Getting closer on the GPU port, but will start deleting 5th dim vectorised variants
for code maintainability
2019-06-04 11:53:44 +01:00
7b59ab5bd7 Compiling after reorganisation 2019-06-03 15:46:26 +01:00
fcd8cfe257 Gparity in 2019-06-03 15:45:09 +01:00
b4b53812cb Move implementation to specific implementation headers 2019-06-03 15:43:01 +01:00
085cac583f Implementation in header 2019-06-03 15:42:36 +01:00
25e3b8640c Move to header 2019-06-03 15:42:05 +01:00
c81d3d422d Housekeeping. #include <Grid.h> ---> #include <Grid/Grid.h> 2019-06-03 15:25:05 +01:00
54edb9906e Housekeeping. #include <Grid.h> ---> #include <Grid/Grid.h> 2019-06-03 15:20:46 +01:00
44bbec50b0 Making GPU compile happy 2019-06-03 14:57:04 +01:00
ec68b67d5d Attempt at unified GPU and CPU kernel 2019-06-03 14:55:51 +01:00
778450e0c8 Move to implementation subdir 2019-06-03 14:53:56 +01:00
567aa5f366 Move to implementation subdir 2019-06-03 14:53:33 +01:00
2ab7e2b175 Force instantiation in .cc files.
Eventually move into multiple files
2019-06-03 14:52:59 +01:00
6f61be044d Dont instantiate in header 2019-06-03 14:52:01 +01:00
269e00509e Don't instantiate in header 2019-06-03 14:51:24 +01:00
a5e90b0ddc Making the kernels more GPU happy 2019-06-03 14:50:54 +01:00
5622faf226 pragma once ifdef guard 2019-06-03 14:50:26 +01:00
82ecd520c7 Macos happy fix under nvcc 2019-06-03 14:48:50 +01:00
620965781e MSource::Convolution remove test code 2019-06-02 13:44:19 +01:00
9c18638b24 MSource::Convolution let mom argument be Nd dimensional 2019-06-02 13:41:39 +01:00
4bfe678218 MSource::Gauss Integer is unsigned... 2019-06-02 12:36:57 +01:00
fc6e584f2c MSource::Gauss fix sign in exponent of normalization + use correct types 2019-06-02 11:52:05 +01:00
7c3f400fc5 MSource::Gauss add parameters tA and tB 2019-06-02 00:12:15 +01:00
4bca2c17ce MSource::Convolution rename parameters 2019-06-02 00:04:07 +01:00
8d540a4e85 MSource::Gauss add mom parameter + avoid Cshifts 2019-06-01 23:56:14 +01:00
9ff459816f ReadBinary needs to do case insensitive name comparison (since I changed the default case of perambulator column names) 2019-06-01 13:50:27 +01:00
eb737daeb5 Merge branch 'develop' into feature/distil
* develop: (34 commits)
  Hadrons: EMLepton: Wall source
  Revert "cleaning up Kl2 contraction"
  cleaning up Kl2 contraction
  posibility to save/load schedules directly from the application parameters
  moving VERSION file to the empty ChangeLog one, this create compilation problems with #include <version> in recent versions of LLVM and case-insensitive FS (typically macOS)
  Added precision tuning to Hadrons parameterfile writing
  Kl2 QED cleanup
  Added ZFIMPL to SeqGamma
  Added ZFIMPL to SeqConserved module
  F1 ensemble running with 96%~ acceptance etc..
  Make detection of HPE 8600 automatic
  Added variables that were missing from wall source setup
  Exposed a coulomb/landau enum to the gauge fixing module
  Coulomb gauge added as an option
  More logging, timing, and 4d/5d logic for eigpack gauge transforms
  Added gauge transform option to eigpack IO
  Hadrons: Lepton Propagator for kl2, sign swap for antiperiodic boundary
  A2A Lepton-Meson Field contraction
  Verbose
  Iteratoin range fix
  ...
2019-05-31 18:20:43 +01:00
b120ef1fe4 Merge pull request #217 from guelpers/feature/EMlepwall
Hadrons: EMLepton: Wall source
2019-05-30 11:13:27 +02:00
166feb6483 Hadrons: EMLepton: Wall source 2019-05-30 10:07:08 +01:00
f569813b60 remove commented code 2019-05-29 17:07:07 +01:00
0190ada714 Merge branch 'develop' into feature/GaussianSmearing 2019-05-29 17:01:17 +01:00
de1a1dccb3 MSource::Gauss and MSource::Convolution: change LatticeComplex to ComplexField 2019-05-29 16:25:45 +01:00
0b3f40ce16 MSource::Convolution fix sign in Momentum 2019-05-29 16:06:10 +01:00
e35e8da111 Revert "cleaning up Kl2 contraction"
This reverts commit f244fed6ab.
2019-05-29 11:23:17 +02:00
6fdf93d695 move momentum phase from MSource::Gauss to MSource::Convolution 2019-05-28 17:26:55 +01:00
ffde81f22a Nsimd() and coalesced support 2019-05-25 12:44:07 +01:00
d8098f1ecd coalesced support 2019-05-25 12:43:31 +01:00
aca788cf4f Move coalesced read into tensors 2019-05-25 12:43:00 +01:00
6064f96fde MSource::Gauss remove superfluous comment 2019-05-24 20:18:37 +01:00
4e52e46a2c MSource::Gauss fix missing factor 2019-05-24 20:16:09 +01:00
6b27369ade MSource::Convolution use type PropagatorField 2019-05-24 16:07:08 +01:00
ab2e5f88cd add fields as input (for scheduler) 2019-05-24 15:57:30 +01:00
f244fed6ab cleaning up Kl2 contraction 2019-05-24 13:08:35 +01:00
9b3701ae27 posibility to save/load schedules directly from the application parameters 2019-05-24 13:08:20 +01:00
4ac27340b9 moving VERSION file to the empty ChangeLog one, this create compilation problems with #include <version> in recent versions of LLVM and case-insensitive FS (typically macOS) 2019-05-24 13:05:17 +01:00
c7c0a1065f Merge pull request #214 from guelpers/feature/kl2QEDseq
Kl2 contraction with sequential propagators
2019-05-23 20:31:41 +01:00
80947130f9 Merge pull request #215 from fionnoh/develop
Added precision tuning to Hadrons parameterfile writing
2019-05-23 18:44:58 +01:00
0aee73ea6b Added precision tuning to Hadrons parameterfile writing 2019-05-23 18:43:25 +01:00
e43d59045e add option mom to MSource::Gauss 2019-05-23 17:33:32 +01:00
e553678599 add modules MSource::Gauss and MSource::Convolution 2019-05-23 16:38:13 +01:00
0290ee1f6d Merge pull request #213 from fionnoh/develop
Added ZFIMPL to SeqConserved module
2019-05-23 13:46:02 +01:00
9a34edcf9f Kl2 QED cleanup 2019-05-23 13:43:22 +01:00
246f10001e Added ZFIMPL to SeqGamma 2019-05-23 12:42:40 +01:00
e675c6a48c Merge remote-tracking branch 'upstream/develop' into feature/kl2QED 2019-05-23 12:41:54 +01:00
a66d110b88 Added ZFIMPL to SeqConserved module 2019-05-23 11:49:54 +01:00
918e673078 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-05-22 09:57:02 +01:00
44b53c3ba2 F1 ensemble running with 96%~ acceptance etc.. 2019-05-22 09:56:26 +01:00
2095c12eac Make detection of HPE 8600 automatic 2019-05-22 09:54:21 +01:00
a0e9f3b0a0 Plan for GPU port 2019-05-20 09:46:19 +01:00
ae5ad986e2 Merge remote-tracking branch 'upstream/develop' into feature/kl2QED 2019-05-19 14:35:46 +01:00
a9342c6ae5 Udpdate TODO afer gianluc marge 2019-05-18 22:58:25 +01:00
ee6f96d85c Merge pull request #210 from grid-test-organisation/feature/gpu-port-develop
Cayley fermion functions for GPUs
2019-05-18 19:06:20 +01:00
77ca45ff49 Merge pull request #211 from fionnoh/develop
Enum for gaugefix and bug fix for wall source
2019-05-18 18:57:52 +01:00
4e9df9e93c GPU patches 2019-05-18 17:43:11 +01:00
9fe68857a9 Runs multiGPU with coalesced access on tesseract 2019-05-18 17:42:41 +01:00
37336c9e0c Allow compress to be either vector or scalar types 2019-05-18 17:41:13 +01:00
6c4da3bbc7 Stencil now runs with coalesced accesses 2019-05-18 17:40:35 +01:00
a584b16c4a Adding a non-blocking kernel launch 2019-05-18 17:39:54 +01:00
dbd7f3f0fc Added variables that were missing from wall source setup 2019-05-17 19:10:09 +01:00
d14512ee03 Exposed a coulomb/landau enum to the gauge fixing module 2019-05-17 19:01:52 +01:00
48b1c806ed Coulomb gauge added as an option 2019-05-17 17:36:32 +01:00
8ce7ebdca3 fixed contraction issue 2019-05-17 10:52:55 +01:00
435653490e fixed contraction issue 2019-05-17 10:50:15 +01:00
10a052d695 3 issues preventing compilation under clang. Marked these with FELIX_ISSUE and made minimal change to make compile (as fix not obvious) 2019-05-17 09:59:01 +01:00
acd5a01b65 some work on baryons 2019-05-16 15:11:50 +01:00
0a8b6724ef Merge pull request #209 from fionnoh/develop
Added gauge transform option to eigpack IO
2019-05-15 18:09:44 +02:00
ce102ac550 More logging, timing, and 4d/5d logic for eigpack gauge transforms 2019-05-15 14:31:25 +01:00
94accec311 Added gauge transform option to eigpack IO 2019-05-15 13:35:47 +01:00
1a82533d22 fix inner product with thrust reduction 2019-05-14 15:35:54 +01:00
ec7d96ce3b Merge branch 'develop' into feature/distil
* develop:
  Hadron WeakEye and A2ALoop bug fixes, and WWVVContraction bug fix
  DiskVector: fix of memory bug triggering segfault when the cache is accessed following a certain pattern
  MFermion::GaugeProp fix for 4d fields
2019-05-14 13:10:40 +01:00
e3c56fd9b3 CayleyZeroCounters before benchmark loop 2019-05-13 15:52:00 +01:00
955cc7790f MooeeInvDag offloaded to GPU 2019-05-13 14:25:29 +01:00
1179123ac2 MooeeInv offloaded to GPU 2019-05-13 12:37:12 +01:00
d8512b03f8 Merge pull request #195 from nils-asmussen/fix_GaugeProp_4d
MFermion::GaugeProp fix for 4d fields
2019-05-12 21:31:18 +02:00
d90cf9d022 Merge pull request #207 from fionnoh/develop
Weak Hamiltonian and contraction bug fixes
2019-05-12 21:30:20 +02:00
79e930ba12 Hadrons: Lepton Propagator for kl2, sign swap for antiperiodic boundary 2019-05-10 12:46:18 +01:00
22e35c9ddd M5Ddag offloaded to GPU 2019-05-10 12:23:39 +01:00
698b45e163 remove unused typedef 2019-05-09 11:19:39 +01:00
f1744b3f01 M5D offloaded to GPU 2019-05-09 11:17:55 +01:00
2b3c22f03d bandwidth dependent on grid default precision 2019-05-08 12:01:11 +01:00
8423a05940 duplicate CayleyFermion5D for gpu 2019-05-08 11:51:37 +01:00
2acd8ece65 Hadron WeakEye and A2ALoop bug fixes, and WWVVContraction bug fix 2019-05-08 10:57:36 +01:00
b638509c61 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-05-08 10:51:04 +01:00
c16916cc45 Multiple local slice fixes 2019-05-06 10:35:42 +01:00
a865caf0d2 Forgot a const in IndexName only version of NamedTensor constructor 2019-05-03 22:17:25 +01:00
9ae4d369f3 Use the definition of the Perambulator Index names given in Hadrons::MDistil 2019-05-03 22:00:50 +01:00
edeb590818 DiskVector: fix of memory bug triggering segfault when the cache is accessed following a certain pattern 2019-05-03 17:09:47 +01:00
ec24a1f828 Fixed 2 bugs in LapEvec: 1) InsertLocalSlice 2) ensure convergence assertion stops entire machine 2019-05-03 16:03:56 +01:00
0efe63f6fa 3D smearing fix 2019-05-02 19:37:59 +01:00
b7ead6c16a Fixed bug: iff stout smearing disabled then gauge field uninitialised 2019-05-02 18:20:49 +01:00
d9438627d9 M5D benchmark without vector copy overhead 2019-05-02 11:10:57 +01:00
b23305dbe2 fix M5D flop count 2019-05-02 11:08:21 +01:00
d3b5c02e2d measure M5D bandwidth and fix M5D flop count 2019-05-02 11:02:39 +01:00
8b6541fb60 Fix gpu MultRealPart and MaddRealPart bug 2019-05-02 10:58:17 +01:00
6da9aa9971 replace std::vector with Vector in benchmark 2019-05-02 10:56:22 +01:00
44e0360b97 replace std::vector with Vector 2019-05-02 10:55:36 +01:00
9003c4a07c allocator copy constructor (to be fixed) 2019-05-02 10:53:37 +01:00
b52fa38f8c seed initialisation of RNG5 2019-05-02 10:36:09 +01:00
3f1c4d8789 fix comment hash 2019-05-02 10:24:36 +01:00
62692b68b9 I'd forgotten that Intel '17 doesn't like auto var{value}; syntax 2019-05-01 20:45:16 +01:00
311c35a15c Looking for fixes for Intel '17 compiler errors. std::cout << complex number ? 2019-05-01 18:22:08 +01:00
a3fe57f430 NamedTensor writes to tag NamedTensor by default (not filename) - so still usable in case user renames file.
Also tweaked tensor index name checking (which is used to ensure tensor is correct type)
2019-05-01 18:11:37 +01:00
8dc0587621 Post Michael / Felix review. Ready for Peter / Antonin review 2019-05-01 13:04:51 +01:00
cfe5fa7a35 1) Don't write Laplacian eigenvectors to disk 2) Add a test that loads perambulators from disk 2019-05-01 09:50:23 +01:00
e72e26c899 Get rid of unnecessary multiFile options 2019-05-01 08:53:08 +01:00
334f29becb Fairly close to ready for release. Felix and I to review, then submit for release 2019-04-30 23:53:57 +01:00
e56ead55ef WIP 2019-04-30 14:41:48 +01:00
4f0631615f A2A Lepton-Meson Field contraction 2019-04-30 12:04:59 +01:00
d74d443d1b Pre-release cleanup in progress 2019-04-29 22:18:29 +01:00
4203105104 Part-way through release tidy-up 2019-04-29 18:40:38 +01:00
c2cd0e15d7 Merge remote-tracking branch 'upstream/develop' into feature/kl2QED
Conflicts:
	Grid/qcd/action/fermion/DomainWallFermion.h
	Grid/qcd/action/fermion/FermionOperator.h
2019-04-29 12:07:20 +01:00
ac19c0e04f This will need to be removed eventually, but should save us fiddling about with each release 2019-04-29 09:20:08 +01:00
b48ca8a6ef Merge branch 'develop' into feature/distil
* develop: (36 commits)
  Mobius 2+1f sign off.
  Integrator logging on by default
  RHMC for mobius
  HMC make file
  Update
  Simple check
  Simple checks
  Monius HMC
  Changes locally
  Power method
  Momentum rescaling
  Bounds checking
  Bounds checking
  Scale momentum convention to CPS/UKQCD MD time
  Add bounds checking
  Updated documentation after Peter's review. 1) Removed version numbers from Grid dependencies 2) Explained in a little more detail how to use Xcode to build Grid and Hadrons libraries
  Remove bundled Eigen stuff
  Fix typo so it matches develop
  Remove bundled source from my local repository
  Slightly generalize interface to SchurRedBlackBase and derived solver classes so we can pass forecasted initial guesses in EOFA heatbath correctly
  ...
2019-04-29 08:37:39 +01:00
c48ae4f3ad 1) Only the boss should write the perambulator - possibly was a source of intermittent corruption?
2) Implemented and test a perambulator conversion utility in Test_distil (commented out near the start of main)
2019-04-28 23:24:57 +01:00
fb74de0798 Making sure Hdf5 is an optional dependency (default to binary writer if not present) 2019-04-28 20:23:44 +01:00
adc1eaee68 Switched to Hdf5 format for perambulators. Ready for first test on Tesseract. 2019-04-28 17:53:42 +01:00
60330e05a3 NVCC wacky compiler options frozen. Possibly Cuda 9.2 specific 2019-04-28 07:39:33 +01:00
f9b8c0cccf Vector changes for UVM 2019-04-28 07:38:57 +01:00
3cad67e569 Compile on tesseract 2019-04-28 07:38:09 +01:00
170ba4e619 Ensure different MPI ranks use different GPUs. The mapping works on Tesseract. 2019-04-28 07:32:30 +01:00
204a090497 Inner product is not working on GPU. Why? 2019-04-28 07:31:56 +01:00
3c717c47ef GPU no compile on Wilson Multigrid fixed 2019-04-28 07:31:19 +01:00
5aca4e8670 Just realised that the trace is at every lattice site, so moved the check for no smearing further up 2019-04-26 17:23:18 +01:00
e223d0b99f Need to validate range about which exp^iQ is considered unity 2019-04-26 16:00:35 +01:00
2e220456d3 First attempt at minimising smearing 2019-04-26 15:54:05 +01:00
4333d97958 fixed parameter 2019-04-26 14:29:21 +01:00
55c9c45d4b Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil 2019-04-26 14:28:01 +01:00
e70e03f560 started stout smearing for small w 2019-04-26 14:27:40 +01:00
ff5e2e0f47 Debug output fix. Meant to print the rho matrix for stout smearing ... not the address of the function that creates it 2019-04-26 12:30:41 +01:00
4f3d1ea6e8 Two heads are better than one. Combined effort and hopefully spatial smearing now fixed! 2019-04-26 12:18:11 +01:00
b1768ba820 Urgh! 2019-04-26 10:04:27 +01:00
3ac5a69a57 Ready to test spatial smearing (again) 2019-04-26 08:54:30 +01:00
50a74eaea3 Doesn't compile. Does it still need to be maintained? 2019-04-26 08:33:10 +01:00
8419fbb335 Renamed PerambLight module. Check with Felix whether Test_24 and Test_tesseract still need to be maintained 2019-04-26 08:23:15 +01:00
23a9b93cda More dependencies for Distil.hpp move and (C) 2019 only 2019-04-26 07:39:05 +01:00
ecdc3ddebf Moved Distil.hpp and added GNU license to all files 2019-04-26 07:24:56 +01:00
df41de4cb6 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-04-24 12:02:50 +01:00
6d0b985697 Verbose 2019-04-24 06:29:52 +01:00
94ebcf551c Iteratoin range fix 2019-04-24 06:28:14 +01:00
6fd4b0be91 Evolving HMC status 2019-04-23 21:54:45 +01:00
7894ea6263 Now have mixed precision solves in the 2f sector 2019-04-23 21:54:19 +01:00
73d4676997 Action and Deriv solvers allowed to differ 2019-04-23 21:53:44 +01:00
262a73c964 COmment improvement 2019-04-23 21:52:58 +01:00
5921b1d2b9 Layout/whitespace changes 2019-04-23 21:52:33 +01:00
6505efcb57 Set iteration count if guess is already good 2019-04-23 21:51:57 +01:00
b595f58e4c Allow HMC to acces matrix 2019-04-23 21:51:23 +01:00
b0de7ab7db Extra do nothing guesser 2019-04-23 21:50:45 +01:00
e1124d9572 Integrator verbosity updates 2019-04-23 21:50:15 +01:00
606698511c Seems we've not been keeping the test up-to-date 2019-04-22 19:03:24 +01:00
a97b814f0c Remove redundancy in LapEvec filename 2019-04-19 14:09:36 +01:00
7214681e11 Spatial smearing doesn't work yet. Fixed inconsistency in naming of perambulator in PerambLight.hpp 2019-04-19 13:54:25 +01:00
143b75956c Stout smearing 3D fixes. Changed LapEvec to perform spatial smearing only 2019-04-19 11:54:02 +01:00
d416156c16 Mobius 2+1f sign off. 2019-04-19 07:57:08 +01:00
cd8d939a1a Integrator logging on by default 2019-04-19 07:54:17 +01:00
760cfe294c RHMC for mobius 2019-04-19 07:53:54 +01:00
4a4203c610 fixed stout smearing for now 2019-04-18 19:10:49 +01:00
2b598294c9 added distil source module 2019-04-18 17:47:09 +01:00
13eaf21b5c HMC make file 2019-04-18 11:53:26 +01:00
1403ab231b Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-04-18 11:06:02 +01:00
0368fbcde8 Update 2019-04-18 11:05:53 +01:00
2dd0ec7862 Merge pull request #186 from djm2131/feature/eofa-bug-fixes
Merge feature/eofa-bug-fixes into develop
2019-04-17 14:54:06 +01:00
f4241e59ba Merge pull request #200 from mmphys/feature/XcodeDoc
Updated documentation after Peter's review.
2019-04-17 14:51:19 +01:00
26b1d2df2d Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-04-17 12:08:06 +01:00
bc14e86812 Simple check 2019-04-17 12:07:42 +01:00
780a67844e Simple checks 2019-04-17 12:07:17 +01:00
8b7805200f Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-04-17 12:05:09 +01:00
2871dec6c0 Monius HMC 2019-04-17 12:04:57 +01:00
abde12433e Changes locally 2019-04-17 12:03:20 +01:00
1f88ba4e39 Power method 2019-04-17 12:03:05 +01:00
ea5b3ed8a2 Momentum rescaling 2019-04-17 12:01:06 +01:00
a104115c7d Bounds checking 2019-04-17 11:56:46 +01:00
b899042d81 Bounds checking 2019-04-17 11:55:43 +01:00
3e712fe643 Scale momentum convention to CPS/UKQCD MD time 2019-04-17 11:54:17 +01:00
f4723e07c5 Add bounds checking 2019-04-17 11:52:23 +01:00
9ed2d02bb2 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-04-12 12:11:06 +01:00
d111c70c38 Merge branch 'develop' into feature/distil
* develop:
  Make sure Grid::Serializable can write Eigen Tensors to output streams. NB: 1) The Eigen package defines operator<< for Eigen tensors, but this format is different, hence Grid::Serializable::WriteMember 2) For simplification, the contents are written in memory order. I.e. Different results will be obtained depending on whether the tensor is row- or column-major
  ... this time without the new Distillation modules ...
  Eigen tensor serialisation fixes after Antonin's review
  Iterator added. Will wait for review comments before finalising.
  Fix build with Intel '17 compiler, i.e. workaround incorrect auto types for c++ style definitions. E.g. assuming T::rank is an int, then objects defined like so:     const auto rank{T::rank}; should also be int. Unfortunately, Intel '17 instead defines them to be std::initializer_list<int>, then proceeds to complain where these variables are used that they cannot be converted to int. NB: This was fixed under Intel '18
  Pushed paboyle's changes: Updates for clang happy
  Merge paboyle's no compile in single precision Intel 2019 fix
  Eigen::Tensor serialisation. Tested on single and double precision builds
2019-04-10 13:14:24 +01:00
50d016340c Merge pull request #190 from mmphys/feature/distil-checkin
Eigen::Tensor serialisation. Tested on single and double precision builds
2019-04-10 12:49:06 +01:00
f7b4fd0f69 Make sure Grid::Serializable can write Eigen Tensors to output streams. NB:
1) The Eigen package defines operator<< for Eigen tensors, but this format is different, hence Grid::Serializable::WriteMember
2) For simplification, the contents are written in memory order. I.e. Different results will be obtained depending on whether the tensor is row- or column-major
2019-04-06 15:40:23 +01:00
ed2427d5f7 Make sure Grid::Serializable can write Eigen Tensors to output streams. NB:
1) The Eigen package defines operator<< for Eigen tensors, but this format is different, hence Grid::Serializable::WriteMember
2) For simplification, the contents are written in memory order. I.e. Different results will be obtained depending on whether the tensor is row- or column-major
2019-04-06 15:37:53 +01:00
1f1aa92f14 Updated documentation after Peter's review.
1) Removed version numbers from Grid dependencies
2) Explained in a little more detail how to use Xcode to build Grid and Hadrons libraries
2019-04-06 13:42:39 +01:00
ea2f34de7b Updated documentation after Peter's review.
1) Removed version numbers from Grid dependencies
2) Explained in a little more detail how to use Xcode to build Grid and Hadrons libraries
2019-04-06 13:37:47 +01:00
00963a7499 twist and boundary conditions for free propagator 2019-04-05 10:08:27 +01:00
63dc0fa7e9 Fixed memory leak ... without breaking semantics of prior code. Possibly should change the semantics? For Peter / Antonin to comment 2019-04-04 16:00:17 +01:00
5e6104e683 Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil 2019-04-04 12:13:35 +01:00
25e4ee3a49 3D Stout smearing added 2019-04-04 12:13:16 +01:00
82a77f9960 ... this time without the new Distillation modules ... 2019-04-03 23:02:26 +01:00
00b4139c16 Eigen tensor serialisation fixes after Antonin's review 2019-04-03 22:48:07 +01:00
4161429dcc Serialisation fixes after Antonin's review 2019-04-03 22:30:07 +01:00
3e9c757b3b Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-04-03 16:56:48 +01:00
b5eb97206b Merge branch 'develop' into feature/distil
* develop:
  MGauge::GaugeFix use standard convention for fields
  fix bug: MGauge::GaugeFix should not modify its input
  add gauge transformation matrix as output to module MGauge/GaugeFix
2019-04-03 16:24:49 +01:00
ecf736e6bf Merge pull request #193 from nils-asmussen/fix_GaugeFix_inputmod
Fixes #192 and adds gauge transformation matrix as output.
2019-04-02 18:28:50 +01:00
f22ab5e1bc Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-04-02 11:29:59 +01:00
72f959c0b8 MGauge::GaugeFix use standard convention for fields 2019-03-29 16:51:21 +00:00
63001d3fa6 fix bug: MGauge::GaugeFix should not modify its input 2019-03-29 16:51:11 +00:00
b1d3d1f1a9 add gauge transformation matrix as output to module MGauge/GaugeFix 2019-03-29 16:51:00 +00:00
c2250fa124 MFermion::GaugeProp fix for 4d fields 2019-03-29 16:36:56 +00:00
84940fbdf0 Hadrons: Lepton Propagator for kl2 2019-03-28 10:15:09 +00:00
0da906cf66 Merge branch 'develop' into feature/distil
* develop:
  Documentation for using Grid with Xcode on Mac OS
2019-03-27 23:08:29 +00:00
0a270b3e93 Merge pull request #191 from mmphys/GridXcode
Documentation for using Grid with Xcode on Mac OS
2019-03-27 22:53:35 +00:00
6536bed8a4 Documentation for using Grid with Xcode on Mac OS 2019-03-27 20:51:20 +00:00
3decb5f886 Merge branch 'develop' of github.com:paboyle/Grid into feature/distil
* 'develop' of github.com:paboyle/Grid:
  endianness fix in resilient IO
2019-03-27 20:39:23 +00:00
faa8bb9bc6 Fixed funny memory leak 2019-03-27 17:55:52 +00:00
4c02ed6d0c Updated GridXcode documentation 2019-03-27 13:54:39 +00:00
f757b80e1c tried to fix mem leak 2019-03-27 12:00:36 +00:00
b8581be1da : 2019-03-27 11:59:06 +00:00
79160011a1 endianness fix in resilient IO 2019-03-26 16:06:13 +00:00
9fce1263be Fixed bug in LapEvec if machine running spread-out in time 2019-03-26 13:24:39 +00:00
ae565b006a Compiling in single-precision now works 2019-03-25 22:56:01 +00:00
8502660023 Begin fixes for single precision 2019-03-25 20:40:05 +00:00
47f5b1e2b5 Iterator added. Will wait for review comments before finalising. 2019-03-25 18:19:55 +00:00
625a97a466 cosmetic 2019-03-25 18:16:04 +00:00
bce2766fef Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil 2019-03-25 16:38:42 +00:00
ce501afec6 bugfix 2019-03-25 16:38:25 +00:00
1d10a3b3de Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil
* 'feature/distil' of github.com:mmphys/Grid:
  bugfix
2019-03-25 15:50:57 +00:00
d1e02f50ff Added iterator for Eigen tensors 2019-03-25 15:50:29 +00:00
48b03c4590 bugfix 2019-03-25 15:45:35 +00:00
b3b9e608e1 added new module for noises 2019-03-25 14:13:03 +00:00
4e87cbd400 Fix build with Intel '17 compiler, i.e. workaround incorrect auto types for c++ style definitions.
E.g. assuming T::rank is an int, then objects defined like so:
const auto rank{T::rank};
should also be int. Unfortunately, Intel '17 instead defines them to be std::initializer_list<int>, then proceeds to complain where these variables are used that they cannot be converted to int. NB: This was fixed under Intel '18
2019-03-23 09:28:41 +00:00
a381d34f37 Fix build with Intel '17 compiler, i.e. workaround incorrect auto types for c++ style definitions.
E.g. assuming T::rank is an int, then objects defined like so:
    const auto rank{T::rank};
should also be int. Unfortunately, Intel '17 instead defines them to be std::initializer_list<int>, then proceeds to complain where these variables are used that they cannot be converted to int. NB: This was fixed under Intel '18
2019-03-23 09:24:15 +00:00
4fc045b563 added module to load perambulators from disk 2019-03-22 13:50:47 +00:00
fbf286b0e3 added Spin dilution 2019-03-22 13:30:11 +00:00
9dc3fe9922 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil
* 'feature/distil' of github.com:mmphys/Grid:
  modules list
2019-03-22 13:00:06 +00:00
f0c2108acf Pushed paboyle's changes: Updates for clang happy 2019-03-22 12:59:14 +00:00
6c9029fab7 Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil 2019-03-22 12:41:56 +00:00
8700dd4d0d modules list 2019-03-22 12:41:53 +00:00
9c16391e55 Merge branch 'develop' into feature/distil
* develop:
  Updates for clang happy
2019-03-22 12:08:50 +00:00
93a5fc083f Updates for clang happy 2019-03-22 11:39:22 +00:00
6d1de8ed2e Merge paboyle's no compile in single precision Intel 2019 fix 2019-03-21 16:48:08 +00:00
685d9bafef Merge branch 'develop' into feature/distil
* develop:
  No compile in single precisoin Intel 2019 fix
2019-03-21 16:36:48 +00:00
116dde31eb No compile in single precisoin Intel 2019 fix 2019-03-21 14:13:33 +00:00
d2d26b302d Removed the module we don't need from modules.inc (so make now works)
i.e. removed Modules/MDistil/PerambMultipleSolves.hpp from Hadrons/modules.inc
2019-03-20 22:59:20 +00:00
12d8bf1ced Eigen::Tensor serialisation. Tested on single and double precision builds 2019-03-20 22:27:41 +00:00
88cb004731 Fixed single-precision issues in Test_serialisation 2019-03-20 22:05:16 +00:00
a66bb8acba fixed possible memory leak 2019-03-20 14:41:36 +00:00
4ae35000a9 removed module which we do not need 2019-03-20 13:36:57 +00:00
02b96b4602 Fixed module list (messed up when I merged from develop) 2019-03-20 11:20:40 +00:00
11dded61e8 Merge branch 'develop' into feature/distil
* develop: (29 commits)
  precision fix
  Updates after review with Peter.
  Wilson clover multi grid for lime lattice
  Recommendations for Traits classes
  Hadrons: uninitialised pointer fix (might have been harmless)
  Hadrons: beware of the nasty uninitialised twists
  Smearing test. Test on free field.
  Smearing for quark observables
  Smearing
  Hadrons: XML validator utility
  display relative norm during field IO norm check
  possibility to set a build number
  IO norm check on relative norm
  Output field norm check during IO
  Hadrons: random vector utility module I/O
  quieter initialisation
  fix patch command for eigen in bootstrap.sh
  Mres changes and gauge xform mat changes
  Hadrons: 32 bit I/O directly in Lanczos module
  Hadrons: copyright update
  ...

# Conflicts:
#	Grid/tensors/Tensor_traits.h
#	Hadrons/Modules.hpp
#	Hadrons/modules.inc
2019-03-20 10:35:36 +00:00
d921a99b1a precision fix 2019-03-19 17:07:40 +00:00
9790926cc5 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-03-19 16:35:48 +00:00
24cf3b9df5 Ignore Version.h as it's created by automake/autoconf 2019-03-19 12:12:39 +00:00
9c8aa2047d Put GridXcode doc in subdirectory 2019-03-19 07:33:19 +00:00
204cfa1c5a Added documentation for Grid using Xcode 2019-03-19 07:28:29 +00:00
fe6845d38b Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-03-18 14:44:18 +00:00
a6adb85a1b Merge pull request #185 from mmphys/feature/trait-recommend
Recommendations for Traits classes
2019-03-18 12:22:42 +00:00
bff4eeec41 Added disclaimer on half-precision types 2019-03-18 12:15:25 +00:00
98b5b61fea Remove bundled Eigen stuff 2019-03-15 19:44:28 -04:00
6896c57d7c Fix typo so it matches develop 2019-03-15 19:34:36 -04:00
b3d480a978 Remove bundled source from my local repository 2019-03-15 19:17:03 -04:00
bb731c97d6 Slightly generalize interface to SchurRedBlackBase and derived solver classes so we can pass forecasted initial guesses in EOFA heatbath correctly 2019-03-15 19:10:56 -04:00
974003ae96 Fix sign convention of ExactOneFlavourRatioPseudoFermionAction::deriv() to match force conventions for Integrator class 2019-03-15 19:04:29 -04:00
93348775af Resolved merge conflict 2019-03-15 19:01:37 -04:00
d1fe4dce33 new idea to get multiple perambulators 2019-03-15 10:28:02 +00:00
50ca3101de bug in multiSolves and new test prog 2019-03-13 17:25:55 +00:00
0faf40e207 last commit did not compile - fxied this 2019-03-13 13:24:18 +00:00
5313e44d11 some cleanup 2019-03-13 13:15:12 +00:00
6bb9b67c93 externalised gauge field reading to hadrons module 2019-03-13 12:09:12 +00:00
a0405c6d84 PerambMultipleSolves.hpp compiles (not had time to test) 2019-03-12 14:01:29 +00:00
c2a3231cdf added testing module for multiple perambulators 2019-03-11 18:05:39 +00:00
5fb2ee89bb modified test so that it runs 2019-03-08 16:50:21 +00:00
608a98d870 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-03-08 16:28:34 +00:00
2df396380d solver is now external 2019-03-08 16:28:21 +00:00
64ba664637 changed debug options 2019-03-08 12:25:00 +00:00
4a70b2ffd4 Aslash insertions work now? 2019-03-08 12:23:22 +00:00
2d659015ff Serialisation is fully functional. Ready for review. 2019-03-08 00:30:43 +00:00
e63019ac50 Tensor serialisation is fully functional 2019-03-08 00:01:45 +00:00
dde118fed9 added everythong to compute sequential aslash fields 2019-03-07 17:36:53 +00:00
1538bf8c34 added everythong to compute sequential aslash fields 2019-03-07 17:36:22 +00:00
4abc498ae3 Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil 2019-03-07 15:34:10 +00:00
93dfbfbfcd added module to compute perambulator from a solve 2019-03-07 15:33:50 +00:00
f9e273d4bf Making sure same as Traits-recommend 2019-03-07 14:33:04 +00:00
91cffef883 Updates after review with Peter. 2019-03-07 14:30:35 +00:00
584fa0a633 Changes after review with Peter 2019-03-07 12:53:34 +00:00
d3935ae7fc Hadrons: some updates in WeakMesonDecayKl2 2019-03-06 15:27:59 +00:00
73cdca3973 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-03-06 13:55:51 +00:00
d716f8a0c9 new module for baryon contraction 2019-03-06 13:55:36 +00:00
aa24f04911 Changed EigenIO to use GridTypeMapper type traits 2019-03-06 12:55:05 +00:00
0b426bf9f6 Merge remote-tracking branch 'upstream/develop' into feature/kl2QED
Conflicts:
	Hadrons/Modules.hpp
	Hadrons/modules.inc
2019-03-06 11:28:59 +00:00
1880e6d12d Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-03-06 11:16:34 +00:00
4a00513e65 Moving Eigen trensor utilities to separate (optional) header 2019-03-06 11:16:22 +00:00
7718ee199a efficient baryon test program 2019-03-05 17:16:42 +00:00
d7c7bff065 added output for source meson fields on all tsrc 2019-03-05 12:01:55 +00:00
802675f062 baryons should compile now... 2019-03-04 17:31:21 +00:00
acd25d0d01 Wilson clover multi grid for lime lattice 2019-03-04 11:30:15 +00:00
d56d8c923f Replaced an error in A2AUtils.h that was stopping the build with an assert() 2019-03-02 00:36:53 +00:00
00c3c6fc54 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-03-02 00:24:47 +00:00
b3d4ba8657 Fixed issues with Eigen Tensor serialisation. Fixed issues with precision to text streams 2019-03-02 00:24:37 +00:00
a4d578bd5d baryons work now??? 2019-03-01 14:44:39 +00:00
7653649389 baryons working now 2019-03-01 12:57:41 +00:00
a344a2227e Fixing build errors 2019-02-28 20:30:16 +00:00
b7db99967a Recommendations for Traits classes 2019-02-28 20:06:59 +00:00
4b9200b35c Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-28 19:06:36 +00:00
91be028507 Still one issue on write 2019-02-28 19:06:25 +00:00
3b05f91f5c Prototype for template traits recommendations 2019-02-28 19:04:44 +00:00
8804271339 efficient baryons compile! 2019-02-28 16:32:40 +00:00
6d9f377913 added parity 2019-02-28 11:05:31 +00:00
18b603c5ae simple but hopefully efficient baryon field 2019-02-28 10:27:05 +00:00
e9784572af baryons... 2019-02-27 17:51:25 +00:00
b930eda69d Merge branch 'develop' of github.com:paboyle/Grid into develop 2019-02-27 02:27:46 +00:00
7852181c2c Hadrons: uninitialised pointer fix (might have been harmless) 2019-02-27 02:27:40 +00:00
bdf87bc994 Hadrons: beware of the nasty uninitialised twists 2019-02-27 02:27:09 +00:00
f168a9e7ee continued with baryons 2019-02-26 16:41:52 +00:00
50b6db75da Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil 2019-02-26 15:57:09 +00:00
df065f1d57 first test configs 2019-02-26 15:57:01 +00:00
136e7b2314 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-02-26 11:31:36 +00:00
1ea64b24fe Smearing test. Test on free field. 2019-02-26 11:31:17 +00:00
8f661f6c05 Smearing for quark observables 2019-02-26 11:31:00 +00:00
ae9e248c95 Smearing 2019-02-26 11:29:12 +00:00
578eb177e7 Tweaked format and memory use on Xml format. Still crashes (out of memory) on large read on my laptop 2019-02-25 22:03:21 +00:00
81b3f3d2ca Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-25 15:39:07 +00:00
7c7ffa3b10 Added text read/write 2019-02-25 15:38:47 +00:00
1f098ceecf Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-25 15:36:23 +00:00
c47c1a2472 started working on baryons - this time efficiently 2019-02-25 15:36:11 +00:00
ec45b16840 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-25 14:10:34 +00:00
9288019789 Added Xml IO (has one deficiency: the format for multi-dimensional data is flat) 2019-02-25 14:10:24 +00:00
351ffe73cd Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-02-25 14:06:09 +00:00
9c04139362 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-25 12:40:44 +00:00
cfc14a7432 more adjustments to test 2019-02-25 12:40:32 +00:00
31e40c26fa Oops. Forgot to delete SortNode (prevented linking) 2019-02-25 11:35:33 +00:00
3f2fe5c7e7 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-25 11:18:34 +00:00
76b6e8a01e first tesseract test 2019-02-25 11:18:25 +00:00
f9543982e4 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-25 11:07:43 +00:00
3c9f2d4106 Chunking layout reasonably efficient. Looks for small prime factors of each dimension, falling back to approximate size if needed. 2019-02-25 11:07:29 +00:00
6160795a43 dt^2 term comments 2019-02-24 15:23:20 +00:00
ded2d5c3ab HMC directory 2019-02-24 15:22:57 +00:00
04255128ef HMC directory 2019-02-24 15:22:17 +00:00
a9a3248cb5 More precision in rect force test 2019-02-24 15:21:19 +00:00
7c461dc664 Bounds checking plan setup 2019-02-24 15:19:48 +00:00
15fddde9bf ConstEE override in Clover 2019-02-24 14:44:43 +00:00
048397d880 Default tau spacing should be longer c.f. Zbigniew Srocinsky thesis 2019-02-24 14:43:22 +00:00
196c9e4a4a Better conformable check with message 2019-02-24 14:42:52 +00:00
6a0823718e Make ConstEE except override in clover 2019-02-24 14:41:59 +00:00
22476cc5a3 Power method estimator of spectral range 2019-02-24 14:37:56 +00:00
cb16c96dc7 Hadrons: XML validator utility 2019-02-22 18:41:26 +00:00
cad26a736e quick&dirty fix for g5*field 2019-02-22 17:05:16 +00:00
4f2ac433f1 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-22 16:31:26 +00:00
f9e505108b test Aslash 2019-02-22 16:31:17 +00:00
d2aced13da Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil 2019-02-22 16:30:40 +00:00
03d031d623 tesserct test 2019-02-22 16:30:22 +00:00
44a2d4854a Ensured Hdf5 chunk size always less than 4GB 2019-02-22 15:14:32 +00:00
292ff33f7f Removed issue with std::string_literal 2019-02-21 16:51:05 +00:00
55886cf9db ran make_module_list.sh 2019-02-21 16:14:13 +00:00
c640923159 Fixed reference to depth from test 2019-02-21 15:48:52 +00:00
752530f352 Gotten rid of c++17 in Test_serialisation.cc 2019-02-21 14:43:07 +00:00
34b9450fc9 Gotten rid of c++17 2019-02-21 14:22:48 +00:00
5d6462b706 bugfix 2019-02-21 11:13:10 +00:00
f70c5b004a some cleanup in Baryon2pt 2019-02-20 12:56:13 +00:00
5bb9de9242 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-19 17:37:37 +00:00
982a24514b Binary IO also implemented and tested 2019-02-19 17:37:21 +00:00
97c6f770b4 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-19 17:22:41 +00:00
4522f1e831 separated final 2pt Contraction 2019-02-19 17:22:30 +00:00
c14547ddbe EigenIO writing rationalised. All indices (trivial or not) written 2019-02-19 16:12:55 +00:00
63c97db414 Prior to rationalising 2 versions of BaseIO::write (scalar and vector) 2019-02-19 13:29:08 +00:00
6ebb32ffbf Rationalised Test_serialisation 2019-02-18 21:40:53 +00:00
07c97cb424 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-18 17:12:36 +00:00
04b58de5de Read-back working. 2019-02-18 17:12:27 +00:00
6e822b7201 added sign for contraction sum 2019-02-18 15:21:13 +00:00
625ccfcd72 continued baryon contraction code 2019-02-18 13:10:34 +00:00
c77069244d Nearly ready. Just finishing off readback and compare 2019-02-18 08:55:50 +00:00
9815ddb853 Started read routines. Introduced readMultiDim and tested I didnt break anything 2019-02-16 19:30:33 +00:00
74a3a5b825 Fixed existing bug in Hdf5Reader::readDefault for std::vector<U> 2019-02-16 18:45:46 +00:00
00e9416e0a Tweak to initialisation example 2019-02-16 17:08:22 +00:00
b6803a070a Making sure I understand row-major vs column-major ordering 2019-02-16 16:18:28 +00:00
bfd2770657 started on baryon flavour sums 2019-02-15 15:51:46 +00:00
668b1e77c7 small changes 2019-02-15 15:31:53 +00:00
e51744260f Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-15 14:32:26 +00:00
e0987d7d81 first contraction version done 2019-02-15 14:32:17 +00:00
26b94d7bda Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-15 13:53:00 +00:00
df0c8b5d84 Test of Eigen slices 2019-02-15 13:52:49 +00:00
a111d814db Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-15 10:47:41 +00:00
e8bd8767c0 Get rid of declarations inside constexpr functions. if constexpr warning remains 2019-02-15 10:06:15 +00:00
8cb96cb693 Hmmm lots of warnings depending on compiler ... 2019-02-14 19:17:12 +00:00
b9bee45277 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-14 19:05:47 +00:00
bee24655cd Finalising traits 2019-02-14 19:05:35 +00:00
886c895f81 baryon field structure is now eigentensor - started on contractions for 2pt functions 2019-02-14 16:44:54 +00:00
e37614bde4 display relative norm during field IO norm check 2019-02-14 16:23:50 +00:00
042bad2ced possibility to set a build number 2019-02-14 13:58:17 +00:00
59c8cc1588 Minor bugfix 2019-02-13 22:11:24 +00:00
11467a994d Enough for tonight 2019-02-13 21:48:35 +00:00
9f2ca98dfc enseble can now be specified in LapEvec 2019-02-13 13:54:31 +00:00
bf434b6bef Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-13 12:14:18 +00:00
41ff592515 Moved serialisation tests into Test_serialisation 2019-02-13 12:14:01 +00:00
48ec937c55 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-13 11:48:57 +00:00
65731546b7 merge... 2019-02-13 11:48:34 +00:00
76c6a6772a Added rank_non_trivial 2019-02-12 22:15:55 +00:00
e7048231bc Working version with additional Grid traits pre: review by Antonin 2019-02-12 13:59:48 +00:00
49babeab19 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-11 23:26:46 +00:00
fb2cb3015e Writing of Eigen::Tensor of grid objects now works (for Hdf5) 2019-02-11 23:26:18 +00:00
53f45d2c7e Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-11 17:39:55 +00:00
d889cebc60 unique string is now used 2019-02-11 17:39:42 +00:00
9a225235b6 Can write both fixed and dynamic sized tensors (small tidy) 2019-02-11 17:15:38 +00:00
dff7d9261d Can write both fixed and dynamic sized tensors 2019-02-11 15:47:40 +00:00
6f2663edf6 Serialisation of an object containing an Eigen::Tensor works for Hdf5. Still quite a lot of tidying up to do. 2019-02-10 23:19:20 +00:00
5bc0857412 IO norm check on relative norm 2019-02-10 22:12:47 +00:00
b540dc1cee Output field norm check during IO 2019-02-10 21:41:17 +00:00
7672bb6434 Hadrons: random vector utility module I/O 2019-02-10 21:25:25 +00:00
f80c548365 quieter initialisation 2019-02-10 20:47:35 +00:00
d5024bd07e Hdf5 writing of scalar (i.e. no Grid subtypes) Eigen::Tensor works. But issues when adding Eigen::Tensor to serialisable object. 2019-02-10 15:33:16 +00:00
9c4189484a Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-09 17:12:43 +00:00
3720103f41 Adding Eigen::Tensor still WIP 2019-02-09 17:12:36 +00:00
c4d27ee30f added parity operator to baryon fields 2019-02-08 15:49:52 +00:00
d26a5dce12 bugfix 2019-02-08 14:37:09 +00:00
5843a943d9 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-08 14:36:08 +00:00
c1341b8ed2 bugfix 2019-02-08 14:33:06 +00:00
6a4515d0cd baryons have now the correct (?) structure - also easier! 2019-02-07 12:27:57 +00:00
a0a39e4b00 Fixed initialisation of vector of Complex 2019-02-06 21:56:44 +00:00
b9fb16077c Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-06 21:37:54 +00:00
4b3c566c89 ../tests/hadrons/Test_hadrons_distil.cc 2019-02-06 21:36:46 +00:00
c8bcee6e97 Merge pull request #183 from nils-asmussen/fix-eigen-patch
fix patch command for eigen in bootstrap.sh
2019-02-06 14:36:36 +00:00
cbd2dfe53f Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-06 12:56:49 +00:00
6cdb1eb62c BContraction now computes what might be a baryon function, but probably isn't 2019-02-06 12:23:52 +00:00
6e0d43aef5 fix patch command for eigen in bootstrap.sh 2019-02-06 11:25:51 +00:00
ed7175076b Turned off warning of unused variable line 150 2019-02-06 09:32:13 +00:00
27677b3870 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-06 09:25:39 +00:00
7423f5af1a Examples of how to access Grid Tensors 2019-02-06 09:25:24 +00:00
c1257208e2 Mres changes and gauge xform mat changes 2019-02-05 23:43:00 +00:00
74c38822ed Hadrons: 32 bit I/O directly in Lanczos module 2019-02-05 21:56:51 +00:00
318c64adc2 Hadrons: copyright update 2019-02-05 19:13:37 +00:00
d5b053f86f Hadrons: 1 propagator loop construction now using A2A vectors 2019-02-05 19:12:38 +00:00
c60e50e3cb Hadrons: copyright update 2019-02-05 18:55:24 +00:00
08d8b1d5fb Hadrons: 4-quark eye 3-pt contractions 2019-02-05 18:53:20 +00:00
21d6dbe0b6 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-02-05 17:32:39 +00:00
1ee84509b5 added baryons project - not working yet 2019-02-05 17:32:26 +00:00
57e57d162f Removed Eigen::DontAlign attribute 2019-02-05 12:50:28 +00:00
90d6d28547 Hadrons: non-eye weak 3pt fix 2019-02-05 11:35:10 +00:00
5b0870bb19 Added Scalar_ length and Scalar_Unit_Size to Perambulator file for validation 2019-02-05 09:07:05 +00:00
7f5354630a Updated perambulator binary format to save payload in big endian format on disk 2019-02-04 23:07:59 +00:00
9c31305b8d Hadrons: test cleaning 2019-02-04 21:26:25 +00:00
2eb584fdf0 Hadrons: 4-quark non-eye 3-pt contractions 2019-02-04 21:24:07 +00:00
6b46834af8 Hadrons: archiving unmaintained or exotic modules 2019-02-04 21:23:30 +00:00
3692c7f1ef Hadrons: type alias cleaning and global correlator class (need to propagate) 2019-02-04 21:21:51 +00:00
0cf94587cd array with all gammas for convenience 2019-02-04 21:20:16 +00:00
008ac6b5ae Permabulator is read back from disk if it exists instead of being created 2019-02-04 12:06:32 +00:00
c7aa4e0c1f Perambulator filename can be specified in xml. NB: Perambulator binary format now includes data size in bytes to avoid type mismatches. 2019-02-04 11:30:30 +00:00
43bd918a47 Logging tweak 2019-02-03 21:48:50 +00:00
7eda54bb87 Only write indices with dimesion!=1 2019-02-03 20:58:58 +00:00
bd75b843fa Added checksum to data 2019-02-03 20:31:42 +00:00
8865bf5d7c Implemented perambulator read/write ... but in binary format. Will switch to Hdf5 when I have Antonins feedback 2019-02-03 17:05:19 +00:00
caabbcd951 minor change 2019-02-01 17:50:18 +00:00
48528c5b1d Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil
Added index names to Perambulator
2019-02-01 15:31:27 +00:00
f7b90a0c14 Added index names to perambulator 2019-02-01 15:20:35 +00:00
a9848becb0 unsmeared sinks can now be computed - new test program available 2019-02-01 13:23:42 +00:00
7cc13f48d5 added some TODO comments; needs discussion 2019-01-31 16:54:11 +00:00
b6b267fd4b Fixed new test parameters 2019-01-31 15:11:12 +00:00
9671a61bb2 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-01-31 15:07:45 +00:00
d7dc617746 Switched perambulator to sue Eigen::Tensor (file write temporarily excluded) 2019-01-31 15:06:52 +00:00
68868c83ff Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2019-01-31 14:46:56 +00:00
32cb2e1a9a Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-01-31 13:01:31 +00:00
3d31113337 added test t5 to compute meson fields of different quarks. Different nvec are allowed. 2019-01-31 13:01:16 +00:00
48b6f7e6ad Changed PerambLight<FIMPL> to PerambLight<GIMPL> 2019-01-31 12:37:00 +00:00
0da411fe60 LapEvec fixes 2019-01-31 12:28:38 +00:00
d7b9ed199d PerambLight fixes 2019-01-31 12:24:32 +00:00
7e74f7bec4 tsrc != 0 now works 2019-01-31 11:35:05 +00:00
dae7b30b92 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-01-30 21:16:30 +00:00
f7e4661ca0 Fixed grid3d leak in PerambLight 2019-01-30 21:16:09 +00:00
7b66197534 meson fields are now the same 2019-01-30 18:03:34 +00:00
c3273eff20 agreement up to laph vectors 2019-01-30 11:20:22 +00:00
67a3d7aeed added debug output, perambulators now agree up to 8 digits 2019-01-29 16:24:59 +00:00
d8831fe925 changed parameters to match Test_Distil 2019-01-29 13:40:26 +00:00
c7ceff6a21 Switched to Gauge field (GIMPL) 2019-01-28 12:28:35 +00:00
5580b3a7d1 bugfix in DistilVectors 2019-01-28 12:24:47 +00:00
33d8fb2dd9 Default 2019-01-25 19:21:12 +00:00
9f6f776460 ensured there is a default test to run 2019-01-25 19:14:22 +00:00
84fe36d084 meson functions work until to be saved 2019-01-25 17:26:43 +00:00
3438dde8df test prog now computes everything up to meson fields 2019-01-25 15:19:18 +00:00
aea49bc349 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-01-25 13:44:30 +00:00
9ef6f9878e test works up to perambulators now 2019-01-25 13:44:19 +00:00
708ca8585a Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-01-25 13:26:56 +00:00
d15bf4b8e1 Added trajectory number to output file 2019-01-25 13:26:48 +00:00
7496da0987 bugfix in prambLight 2019-01-25 13:08:56 +00:00
2568f5b925 bugfix in prambLight 2019-01-25 12:37:18 +00:00
577cdf1d72 Simplified tests 2019-01-24 18:50:18 +00:00
f92ed659a7 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-01-24 16:30:28 +00:00
dfb7fb1d9f LapEvec test works on --grid 4.4.4.8 2019-01-24 16:30:13 +00:00
a4c1ab6147 all modules linked in test prog 2019-01-24 16:12:19 +00:00
cf85f0388d Still debugging eigenvector parameters 2019-01-24 13:26:05 +00:00
00b0f75b0d Eigenvectors created. Still need to correctly set parameters for test. 2019-01-24 12:44:06 +00:00
b45586e81c Discovered bug root cause. setup() is called multiple times. Now ready to copy-paste the LapEvec code 2019-01-23 21:17:56 +00:00
2c7e6bf58b Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-01-23 15:20:06 +00:00
7c5a06f6d0 Trying to work out why LapEvec constructor not being called 2019-01-23 15:19:51 +00:00
068ef85b05 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-01-23 15:08:24 +00:00
a6ab742fdb added perambs to test 2019-01-23 13:58:20 +00:00
2062a8d578 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-01-23 13:00:20 +00:00
3d3e8f4f9f Structured objects passed into LapEvec 2019-01-23 12:59:55 +00:00
2756f16a5e created test prog for perambs 2019-01-23 12:49:20 +00:00
d7908c33de moved hard-coded parameters in DistilVectors to module input 2019-01-23 11:32:53 +00:00
4cc2ebc9e4 moved hard-coded parameters to module input 2019-01-23 11:26:07 +00:00
b8afa7314c Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil 2019-01-23 10:51:23 +00:00
be5605931c merge 2019-01-23 10:51:09 +00:00
09fa821510 Added remaining methods to Permabulator 2019-01-22 17:59:55 +00:00
f45d2d5dcc perambLight done, but SliceShare and Write does not work yet 2019-01-22 15:52:26 +00:00
0a82fae45c moved perambulator definition to shared header file 2019-01-22 15:06:45 +00:00
46b05aa9c5 cleaned up, deleted commented out old code 2019-01-22 13:48:44 +00:00
813c1ab1f1 Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil 2019-01-22 13:28:09 +00:00
b1c27a141d DistilVectors complete and compiling - not tested at all! 2019-01-22 13:27:51 +00:00
81bb361299 Test program ready 2019-01-22 13:19:39 +00:00
79d533550d continued on DistilVectors.hpp 2019-01-21 16:45:31 +00:00
b8c106f320 working on DistilVectors, initialisation done and compiles 2019-01-21 16:04:18 +00:00
b74492a805 Merge branch 'feature/distil' of github.com:mmphys/Grid into feature/distil 2019-01-21 10:40:01 +00:00
c93a43f158 Added test program 2019-01-21 10:39:28 +00:00
0ff410ae19 copied perambulato code into PerambLight.hpp 2019-01-18 17:47:41 +00:00
ced30b61e2 added phi vectors - still commented out and does not compile otherwise 2019-01-18 16:38:13 +00:00
2b782df290 Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil 2019-01-18 15:58:51 +00:00
f0f1ba0307 uses evec4d now 2019-01-18 15:58:10 +00:00
2343e621e6 Bananas 2019-01-18 13:32:27 +00:00
2568504821 small change 2019-01-18 13:23:03 +00:00
b821dde020 Initial version 2019-01-18 13:14:28 +00:00
ae3b053334 Initial version 2019-01-18 13:10:02 +00:00
9b6ddb6e54 Adding a norm of a general field check, so that for things other than gauge configs there is an analogue of plaquette norm.
Improve argument checking in the BinaryIO.h, as there looks to be some corruption issue intermittently on tesseract jobs.
Not clear where the root bug is.
2019-01-16 22:35:58 +00:00
447b772136 Merge remote-tracking branch 'upstream/develop' into feature/kl2QED 2019-01-07 15:09:18 +00:00
c5e081d69c Re-Merge branch 'develop' into feature/gpu-port
Pull in Regensburg MultiGrid pull request
2019-01-03 01:50:16 +00:00
535a6aaf05 Update todo list 2019-01-02 22:07:51 +00:00
91a7fe247b Merge branch 'DanielRichtmann-feature/wilsonmg' into develop 2019-01-02 14:40:31 +00:00
8a1be021d3 Merge branch 'feature/wilsonmg' of https://github.com/DanielRichtmann/Grid into DanielRichtmann-feature/wilsonmg 2019-01-02 14:39:59 +00:00
e73b909a48 Make tests running past nvcc. Different NVCC versions proving tricky to keep happy. This is 9.2 2019-01-02 12:05:30 +00:00
a4d9200293 Fixing AVX 512 instantiation error. Need to move to extern templates urgently. 2019-01-02 00:27:07 +00:00
350508bdb3 pugixml problem 2019-01-01 16:38:54 +00:00
38852737e4 No compile fix on clang 2019-01-01 15:55:13 +00:00
802404c78c Remove warnings under NVCC and move parallel_for to thread-loop 2019-01-01 15:08:09 +00:00
0e9b591c1c NVCC warning suppression 2019-01-01 15:07:47 +00:00
c43a2b599a GPU support 2019-01-01 15:07:29 +00:00
8c91e82ee8 GPU clean up, remove parallel_for. Split into accelerator_loop, thread_loop
cases, and collides with parallel_for in thrust
2019-01-01 15:06:46 +00:00
9d866d062a GPU support improvements 2019-01-01 15:05:03 +00:00
3a4e397e72 Deprecating JSON, too hard to support under NVCC 2019-01-01 15:04:33 +00:00
2b6cfe555f Disable JSON on NVCC. Maybe unsupport JSON full stop. XML and JSON is too many formats in my view. 2019-01-01 15:03:50 +00:00
7df58dd883 Photon syntax gave problems with NVCC 2019-01-01 15:03:29 +00:00
4bf86ae60a NVCC clean up 2019-01-01 15:02:50 +00:00
07ee87ff5a GPU happy. Still need to prevent hand kernels being callable under NVCC 2019-01-01 15:00:33 +00:00
0c2498fe2f Explicit instantiation needed for NVCC 2019-01-01 13:55:12 +00:00
ad2e65dad5 GPU related updates 2019-01-01 13:54:40 +00:00
715babeac8 GPU reductions first cut; use thrust, non-reproducible. Inclusive scan can fix this if desired.
Local reduction to LatticeComplex and then further reduction.
2019-01-01 13:53:37 +00:00
3eae9a9e3f update NVCC flags 2019-01-01 13:49:15 +00:00
186aad065f Roll forward Eigen in attempt to make CUDA happy 2019-01-01 13:48:32 +00:00
bf5685eb11 Update todo list 2019-01-01 13:48:06 +00:00
4a96c067ae Remove warnings from NVCC 2019-01-01 13:43:09 +00:00
ab063f33c0 Offload the linear combinations in CG 2019-01-01 13:42:13 +00:00
9efcc535bc Cleaner drop from CUDA mode around Eigen includes. Remains difficult to let Eigen compile under nvcc with version issues. 2019-01-01 13:39:10 +00:00
231b61d012 std::array by default 2019-01-01 13:37:35 +00:00
e898f4f0b0 Whitespace 2019-01-01 13:36:55 +00:00
d5db5f5242 Wrong dimension used in a temporary 2018-12-20 10:49:45 +00:00
2fcedb13dd Step size modification in HMC; ICC happy thread pragmas 2018-12-20 09:32:33 +00:00
35ed1defac Passes make check now single and double compile 2018-12-19 11:09:32 +00:00
4e95accf80 Namespace fix 2018-12-15 21:46:17 +00:00
fd66325321 pure QED test and copyright update 2018-12-14 17:39:11 +00:00
c637c0c48c James H.'s code for general size Wilson loops 2018-12-14 17:37:09 +00:00
c4b472176c Photon code fix 2018-12-14 17:36:38 +00:00
422764757d Updates in tests to make all of Grid compile 2018-12-14 16:55:54 +00:00
943fa48ce4 Hadrons: Kl2 contraction using sequential propagators 2018-12-14 13:45:30 +00:00
fa97a56fdd Hadrons: sequential Aslash insertion on propagator 2018-12-14 12:40:26 +00:00
856476a890 big cleanup of the Photon class + QED Coulomb gauge 2018-12-13 21:52:38 +00:00
afc462bd58 Bracketing issue in macro 2018-12-13 10:53:22 +00:00
b57a4d32aa Merge branch 'develop' into feature/gpu-port 2018-12-13 05:11:34 +00:00
c509bd3fe2 Merge branch 'feature/resilient-io' into develop 2018-12-01 12:57:43 +00:00
49b934310b resilient I/O fix 2018-11-27 20:17:09 +00:00
01e8cf5017 Merge branch 'develop' into feature/resilient-io 2018-11-27 19:09:59 +00:00
12f4499502 HDF5 serialiser fix 2018-11-27 19:09:50 +00:00
05aec72887 Hadrons: application parameter for resilient I/O 2018-11-27 18:46:43 +00:00
136d3802cb binary parallel IO can do read tests and eventually re-write in case of failure 2018-11-27 18:38:24 +00:00
a4c55406ed checksummed HDF5 IO 2018-11-27 17:43:19 +00:00
c7f33ca2a8 Revert "Hadrons: A2A vector write can fail and retry"
This reverts commit 10fc263675.
2018-11-27 17:27:26 +00:00
0e3035c51d Revert "optional non-fatal checksum fail in Lime lattice read (with error codes)"
This reverts commit bccfd4cbb3.
2018-11-27 17:27:20 +00:00
10fc263675 Hadrons: A2A vector write can fail and retry 2018-11-26 19:47:03 +00:00
bccfd4cbb3 optional non-fatal checksum fail in Lime lattice read (with error codes) 2018-11-26 19:45:51 +00:00
0b50d4a328 log time fix 2018-11-23 15:51:27 +00:00
b74940b3d4 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-11-23 14:08:29 +00:00
e232257cb6 Hadrons: A2AAslashVector modul cleaning and renaming 2018-11-22 19:43:49 +00:00
09451b5e48 Merge branch 'develop' of github.com:paboyle/Grid into develop 2018-11-22 15:45:24 +00:00
6364aa8acf Merge branch 'feature/contractor' into develop 2018-11-22 15:44:46 +00:00
b9e84ecab7 Hadrons: minor code cleaning 2018-11-22 15:44:30 +00:00
41032fef44 Optional RW mode for Hdf5Reader 2018-11-21 18:36:50 +00:00
d77bc88170 Optional support for faster CRC32C checksum through Intel IPP 2018-11-19 17:21:53 +00:00
494b3c9e57 Hadrons: contractor more IO fix 2018-11-19 16:26:53 +00:00
2ba19a9e07 Hadrons: contractor IO fix 2018-11-19 16:17:51 +00:00
5d7cc29eaf Hadrons: contractor token @traj@ for trajectory number in input file 2018-11-19 16:04:01 +00:00
f22a27d7f9 Hadrons: contractor trajectory loop and file output 2018-11-19 15:45:04 +00:00
33a0bbb17b Const correctness 2018-11-19 11:27:57 +00:00
f592ec8baa Hadrons: contractor performance fix 2018-11-16 20:59:49 +00:00
8b007b5c24 Hadrons: remove the use of OpenMP reductions 2018-11-16 20:00:29 +00:00
17b3f47b1e Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-11-16 16:32:12 +00:00
9bb170576d Merge pull request #177 from guelpers/develop
Hadrons module to electrify a gauge
2018-11-14 16:04:09 +00:00
a7e3977b75 Merge remote-tracking branch 'upstream/develop' into develop 2018-11-13 14:56:23 +00:00
995f20e45d Hadrons: some renamings 2018-11-13 14:54:48 +00:00
d058b4e681 Merge branch 'feature/seqA2A' into develop 2018-11-13 13:27:24 +00:00
8e0d2f3402 Hadrons: support for twisted boundary conditions 2018-11-12 17:16:18 +00:00
2ac57370f1 Hadrons: contractor translation average normalisation 2018-11-12 16:04:35 +00:00
344e832a4e Hadrons: contractor faster transpose and finer timings 2018-11-12 15:59:54 +00:00
cfe281f1a4 Hadrons: diskvectors measure hash performance in debug output 2018-11-12 15:59:11 +00:00
f5422c7334 Hadrons: more contractor instrumentation 2018-11-09 16:23:53 +00:00
68c76a410d Hadrons: more contractor improvements 2018-11-08 19:24:29 +00:00
69b6ba0a73 Hadrons: contractor fixes and improvements 2018-11-08 18:46:28 +00:00
65349b07a7 Hadrons: simpler A2A perf functions 2018-11-08 18:44:44 +00:00
7cd9914f0e Hadrons: automatically resize output in MKL A2A matrix kernels 2018-11-08 17:40:57 +00:00
f3f24b3017 Optional Twisted BC's added, in "DoubleStore" for WilsonImpl.
Untested but doesn't affect answers when twists are all zero. The zero is the default behaviour
for ImplParams.
2018-11-08 12:55:25 +00:00
8ef4657805 Merge remote-tracking branch 'upstream/develop' into feature/seqA2A 2018-11-08 09:00:06 +00:00
78c1086f8b Hadrons: sequential Aslash insertion and propagator on A2A vector 2018-11-08 08:58:09 +00:00
68c13045d6 Added a test for Felix and Michael to look at 2018-11-07 23:40:15 +00:00
e9b6f58fdc Allow shrinking machine in orthog direction for extract slice local 2018-11-07 23:39:18 +00:00
839605c45c Verbose reduce 2018-11-07 23:38:46 +00:00
1ff1422e07 Hadrons: contractor lighter output 2018-11-07 20:02:53 +00:00
32376f0437 Hadrons: contractor performances 2018-11-07 19:59:11 +00:00
0c6e581336 Hadrons: first stab at general contraction code, needs serious testing 2018-11-07 19:16:55 +00:00
e0a79a5bbf Hadrons: PR#177: Electrify gauge: Single Precision fix 2018-11-07 15:01:22 +00:00
4c016cc1a4 Merge remote-tracking branch 'upstream/develop' into develop 2018-11-07 14:03:12 +00:00
2205b1e63e Add CXX to grid-config 2018-11-07 13:32:46 +00:00
6f421c7a6f Block solver in the SchurRedBlack plus timing report cleaner 2018-11-07 12:26:56 +00:00
b62b9ac214 Patch to broken assertion 2018-11-06 22:18:17 +00:00
88d9922e4f Hadrons: fast A2A matrix contraction kernels 2018-11-06 19:49:09 +00:00
9734e3ee58 Hadrons: (somewhat) faster build 2018-11-06 19:47:41 +00:00
8c3a599148 Block solver test 2018-11-06 16:44:58 +00:00
4a47b11876 Block CG improvements to develop 2018-11-06 12:49:05 +00:00
f1382cf81d Merge remote-tracking branch 'upstream/develop' into develop 2018-11-06 10:29:52 +00:00
85699daef2 Hadrons: Module to electrify a gauge field 2018-11-06 10:27:18 +00:00
1651111d18 Hadrons: final, portable form of the contractor benchmark 2018-11-05 21:29:13 +00:00
1ed4ea344d Merge branch 'develop' into feature/contractor 2018-11-05 11:42:02 +00:00
8f514ae550 Hadrons: Lanczos 32bit IO 2018-11-05 11:41:10 +00:00
4a7415e83c Hadrons: contractor benchmark update 2018-10-23 21:00:54 +01:00
0ffcfea724 Hadrons: contractor benchmark 2018-10-23 17:08:16 +01:00
febe41cc1d Hadrons: improvement on PR #176 2018-10-23 12:48:15 +01:00
62173395b8 Merge pull request #176 from guelpers/develop
Hadrons: full volume noise source for A2A
2018-10-23 12:29:35 +01:00
b48611b80f Merge branch 'develop' into feature/contractor 2018-10-22 18:27:18 +01:00
6b559d68aa Hadrons: eigenpack converter can do test reads 2018-10-22 11:10:18 +01:00
1982cc58dd Hadrons: A2A vectors I/O filename fix 2018-10-21 01:20:05 +01:00
2e2e5ce596 SciDAC I/O print data checksums 2018-10-19 20:36:32 +01:00
7d84dca8e9 Merge branch 'develop' into feature/contractor 2018-10-18 23:46:58 +01:00
2d3916418e Hadrons: more precision fix 2018-10-18 23:45:13 +01:00
21304e2139 Hadrons: fix to allow single-prec build again 2018-10-18 19:58:50 +01:00
7b850eb48b Merge branch 'develop' of github.com:paboyle/Grid into develop 2018-10-18 19:46:25 +01:00
a3ace57e01 Hadrons copyright update 2018-10-18 19:46:11 +01:00
b1c3cbe35e Hadrons: A2A vectors I/O 2018-10-18 19:44:58 +01:00
f31d6bfec2 Hadrons: contractor cleaning and better error check 2018-10-18 17:50:35 +01:00
a7cfa26901 Hadrons: reverse A2A matrix load for better DiskVector cache reuse 2018-10-18 17:50:16 +01:00
f333f3e575 Hadrons: DiskVector save-on-eviction and faster CRC32 for Eigen matrices 2018-10-18 17:48:25 +01:00
2b4e253473 Merge branch 'develop' of github.com:paboyle/Grid into develop 2018-10-17 20:28:20 +01:00
0ba3d469c7 Benchmark IO in single and double precision 2018-10-17 20:27:34 +01:00
f709329d96 Hadrons: first version of a contractor utility 2018-10-17 20:26:48 +01:00
f05b25dae4 Hadrons: A2AMatrix load 2018-10-17 20:26:26 +01:00
3e1d268fa3 Hadrons: DiskVector optimisation 2018-10-17 20:25:32 +01:00
109c74bed8 Hadrons: full volume noise source for A2A 2018-10-16 14:56:12 +01:00
3023287fd9 Hadrons: 3-index RO access to Eigen disk vector 2018-10-16 14:44:14 +01:00
b3d6805638 Merge branch 'feature/contractor' into develop 2018-10-16 11:29:37 +01:00
291bc2a1f0 IO benchmark on a list of directories 2018-10-15 17:25:08 +01:00
2f368c33fc Hadrons: copyright update 2018-10-15 15:51:45 +01:00
9592115341 Hadrons: NPR and gauge fixing linking fix 2018-10-15 15:49:42 +01:00
24c07694bc Mixed precision now supported in MADWF 2018-10-14 00:22:52 +01:00
f0229025e2 MADWF working across a range of actions 2018-10-13 19:55:03 +01:00
6de9a45a09 NPR first cut by Julia Kettle 2018-10-12 11:00:58 +01:00
03c3d495a2 First cut (non functional NPR code) developed by Julia Kettle 2018-10-12 10:59:33 +01:00
49f25e08e8 PauliVillars based 4D -> 5D reconstruction with Fourier Accelerated PV inverse
by Christoph. Differs from the one by Rudy in BFM since it vectorises the twisted
4D solves in pairs.
2018-10-11 12:35:32 +01:00
efc0c65056 Hadrons: DiskVector Eigen specialisation with binary I/O and sha256 correctness check 2018-10-08 19:02:00 +01:00
936eaac8e1 function to get the sha256 string 2018-10-08 19:00:50 +01:00
fe6a372f75 Hadrons: fixes and cleaning in the scalar SU(N) part 2018-10-08 15:14:08 +01:00
dac9f8622e Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-10-08 10:12:11 +01:00
148fc052bd Hadrons: Aslash field, tested 2018-10-05 21:04:10 +01:00
c073341a10 Hadrons: more cleaning 2018-10-05 19:50:41 +01:00
78299daaac Hadrons: code cleaning 2018-10-05 16:47:52 +01:00
866449c804 Hadrons: integration of Peter's A2Autils 2018-10-05 16:42:44 +01:00
d69a52079f Merge remote-tracking branch 'gh/feature/a2a-integration' into feature/aslashfield 2018-10-05 15:39:09 +01:00
9f4f8a14a3 Hadrons: code cleaning 2018-10-05 15:38:01 +01:00
f6593dc881 Hadrons: A2A block performance counter fix 2018-10-05 15:11:01 +01:00
b46d31d4b6 MKL enable on Eigen if Grid is configured to use MKL 2018-10-05 11:29:40 +01:00
58567fc650 Hadrons: big update abstracting the block meson field routine, tested & working, performance counters broken and code dirty 2018-10-04 20:01:49 +01:00
7c57cac670 Adding A2A utils class for containing kernels. 2018-10-04 18:57:41 +01:00
d0b21bf1ff Merge branch 'feature/eigenpack-convert' into develop 2018-10-04 18:26:45 +01:00
a1825d1f59 Hadrons: final fix for multiprec eigenpacks 2018-10-04 18:25:26 +01:00
5a3e83ff7b Hadrons: new layer in eigenpacks class hierarchy 2018-10-03 14:45:01 +01:00
52569d98d8 Hadrons: multiprec eigenpack I/O fix 2018-10-03 14:24:43 +01:00
b351103c29 Hadrons: eigenpack load module with 32bit I/O 2018-10-02 21:07:56 +01:00
118cca4681 Hadrons: linking fix 2018-10-02 20:08:49 +01:00
44de727cd2 Hadrons: eigenpack support for multiprecision I/O 2018-10-02 19:51:09 +01:00
888ebc3cf9 Hadrons: better name for the EP converter 2018-10-02 15:22:18 +01:00
6c031a1b81 Merge branch 'feature/eigenpack-convert' into develop 2018-10-02 14:57:30 +01:00
02aa4bd762 Hadrons: cleaner eigenpack convert log 2018-10-02 13:43:25 +01:00
9aafa8ee60 Hadrons: eigenpack converter generalised for RB/5d grids 2018-10-02 13:34:17 +01:00
430b98b354 fix previous commit 2018-10-02 13:12:46 +01:00
84189867ef Hadrons: eigenpack converter with RB grids (to be generalised) 2018-10-02 13:05:05 +01:00
4ab8cfbe2a Hadrons: more verbose eigenpack convert 2018-10-02 12:24:45 +01:00
aadd9f4468 Eigenpack converter, to be tested, HadronsXmlRun moved to Utilities directory 2018-10-02 00:02:34 +01:00
8fbb27ce13 Hadrons: less code duplication in eigenpack IO 2018-10-01 20:15:21 +01:00
21bba95909 Hadrons: eigenpack metadata is no ignored anymore when reading 2018-10-01 19:33:45 +01:00
6448fe7121 More flexible XML control in Lime files 2018-10-01 19:32:50 +01:00
2458a11d1d Hadrons: precision cast module 2018-09-29 18:00:08 +01:00
d0ca7c3fe6 Hadrons: big update for getGrid, grids are now created automatically 2018-09-29 17:55:19 +01:00
57f899d79c Merge branch 'develop' of github.com:paboyle/Grid into develop 2018-09-29 15:50:59 +01:00
e881a0c157 Merge commit 'beed527ea37c90fd5e19b82d326eb8adc8eba5ff' into develop 2018-09-29 15:50:21 +01:00
f411657118 JSON update 2018-09-29 15:48:05 +01:00
7458c6174b Use operator() for indexing internal indices 2018-09-27 06:42:02 +01:00
21b269d0f9 Move the Grid.pdf out of a deep directory 2018-09-27 06:36:25 +01:00
083af92ac2 Update from chulwoo ; high level link for Grid.pdf in documentation 2018-09-27 06:30:40 +01:00
2c162577b5 HMC documentation 2018-09-25 23:28:17 +01:00
b1c4e96382 Updates to actions etc.. 2018-09-24 22:10:30 +01:00
a55c6f34f3 Updated docs 2018-09-24 15:44:35 +01:00
beed527ea3 Carletons chapter 2018-09-24 15:09:51 +01:00
eaa633cf69 Merge branch 'develop' of github.com:paboyle/Grid into develop 2018-09-21 18:16:22 +01:00
c632455129 Hadrons: meson field IO fix 2018-09-21 18:16:01 +01:00
c012899ed5 Hadrons: big update after templating of get/createGrid 2018-09-21 18:15:33 +01:00
8bab544c2f Updated manual pdf 2018-09-20 18:51:11 +01:00
76fc06a5dc Updates with todo from Carleton 2018-09-20 18:50:11 +01:00
d9de8fd5c9 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-09-17 12:19:47 +01:00
7e3647246c Merge branch 'master' of https://github.com/paboyle/Grid into develop 2018-09-17 12:19:20 +01:00
4af6c7e7aa Hadrons: copyright update 2018-09-14 12:51:48 +01:00
f60fbcfc4d Hadrons: mixed precision CG, to be tested 2018-09-14 12:47:55 +01:00
464c81706e Hadrons: defaults Impls for different precisions 2018-09-14 12:46:43 +01:00
adbdc4e65b Half comms not working on GPU yet, so disable. 2018-09-11 05:15:22 +01:00
e4deea4b94 Weird bug appears with Vector<Vector<>>.
"fix" with std::vector<Vector<>>

Lies in the face table code. But think there is some latent problem.
Possibly in my allocator since it is caching, but could simplify or eliminate the caching
option and retest. One to look at later.
2018-09-11 04:36:57 +01:00
94d721a20b Comments on further topology discovery work 2018-09-11 04:20:04 +01:00
408130b808 Hadrons: header list fix 2018-09-10 17:38:54 +01:00
375edd1370 file forgotten in last commit 2018-09-10 17:37:29 +01:00
6d912f6c67 Hadrons: general guesser factory 2018-09-10 17:36:54 +01:00
6d1d28955e Guesser class is redundant, switching to LinearFunction 2018-09-10 17:35:54 +01:00
920b471761 Hadrons tests update 2018-09-10 15:32:13 +01:00
63c21767ba Hadrons: grids stored with hash of SIMD type (for mixed-precision setups) 2018-09-10 15:31:39 +01:00
7b6b712565 function to convert std::vector to string 2018-09-10 15:17:32 +01:00
35abd05ee9 mute Version.h cache creation 2018-09-10 15:16:59 +01:00
7bf82f5b37 Offload the face handling to GPU 2018-09-10 11:28:42 +01:00
f02c7ea534 Peer to peer on GPU's setup 2018-09-10 11:26:20 +01:00
bc503b60e6 Offloadable gather code 2018-09-10 11:21:25 +01:00
704ca162c1 Offloadable compression 2018-09-10 11:20:50 +01:00
b5329d8852 Protect against zero length loops giving a kernel call failure 2018-09-10 11:20:07 +01:00
f27b9347ff Better unquiesce MPI coverage 2018-09-10 11:19:39 +01:00
b4967f0231 Verbose and error trapping cleaner 2018-09-09 14:28:02 +01:00
6d0f1aabb1 Fix the multi-node path 2018-09-09 14:27:37 +01:00
f4bfeb835d Drop back to smaller Ls 2018-09-09 14:25:06 +01:00
394b7b6276 Verbose decrease 2018-09-09 14:24:46 +01:00
dd36e60f6a compilation fix for hypercube optimal communicator 2018-09-08 18:07:29 +01:00
cb6c548e21 Hadrons: code cleaning 2018-09-07 20:40:55 +01:00
02c4ccf621 Hadrons: diskvector debug message for writes 2018-09-07 20:33:49 +01:00
fd24588212 Merge branch 'develop' of github.com:paboyle/Grid into develop 2018-09-07 20:25:11 +01:00
b800bb3ecb Hadrons: disk vector cache policy to last touch 2018-09-07 20:24:48 +01:00
f8abd0978b Hadrons copyright update 2018-09-07 20:10:07 +01:00
12c7c493bf Hadrons: disk-based container 2018-09-07 20:04:54 +01:00
c7c9072313 Documentation 2018-09-06 16:01:42 +01:00
2bf3be5fae Hadrons: copyright and code cleaning 2018-09-04 18:25:10 +01:00
3a40e4fc69 Hadrons: scalar SU(N) 2-pt guard against negative momenta components 2018-09-04 18:24:07 +01:00
2e69e03f6f Hadrons: CosmHol configs IO module 2018-09-04 18:23:28 +01:00
a09f9bb528 Hadrons: code cleaning 2018-09-04 18:22:21 +01:00
f0e341d726 Hadrons: module list generator fix 2018-09-04 18:22:04 +01:00
6f09df0daf Hadrons: A2A matrix IO fix 2018-09-02 01:46:22 +01:00
26cee605b8 Hadrons: copyright update 2018-09-01 21:30:30 +01:00
b3fa18c229 copyright script never removes authorship 2018-09-01 21:29:58 +01:00
2940c9bcfd Hadrons: dedicated IO class for A2A matrices 2018-09-01 21:09:01 +01:00
0bb532f72b more explicit clean git tree message 2018-09-01 20:02:18 +01:00
fada2aa0f7 Hadrons: precision fix 2018-09-01 20:00:12 +01:00
c193e4e675 Aslash expression in Mathematica notebook 2018-09-01 19:59:58 +01:00
3ee682f676 more Version.h fine tuning 2018-09-01 19:58:16 +01:00
d85ec3bac2 build system minor fix 2018-09-01 19:54:21 +01:00
b52d8eb1e3 better Version.h implementation 2018-09-01 19:49:13 +01:00
ee630d2e8b Hadrons: smearing plaquette output 2018-09-01 17:38:32 +01:00
2f0af79869 Hadrons: scalar SU(N) NPR update 2018-09-01 17:36:35 +01:00
1b7fb79ec0 CI fix 2018-08-28 18:26:37 +01:00
2db1a4628c build system minor fix 2018-08-28 18:26:30 +01:00
6aa047d842 Hadrons module template fix 2018-08-28 17:17:00 +01:00
8779c32ae1 Merge branch 'feature/hadrons' into develop 2018-08-28 17:10:33 +01:00
c527dc3358 CI fix 2018-08-28 17:10:08 +01:00
6b42577b6b gitignore update 2018-08-28 16:58:37 +01:00
fb3596f968 Hadrons: precision fixes 2018-08-28 16:58:23 +01:00
f3a0158213 code cleaning 2018-08-28 16:56:07 +01:00
0250aa9347 file committed in error 2018-08-28 16:55:48 +01:00
3df6743396 more build system cleaning and patch for bad include in Eigen 2018-08-28 16:54:57 +01:00
fb7d021b9d Hadrons: moving Hadrons to root directory, build system improvements 2018-08-28 15:00:40 +01:00
5f206df775 Hadrons: meson field cache friendly cache copy 2018-08-15 17:29:44 +01:00
7727e81113 Hadrons: slight improvement on previous commit 2018-08-14 20:18:47 +01:00
c4115544a5 Hadrons: application option to save graph 2018-08-14 20:03:53 +01:00
08c47328ba Hadrons: meson field kernel performance for each block 2018-08-14 17:35:42 +01:00
09001aedca Hadrons: meson fields saved in single precision 2018-08-14 17:19:38 +01:00
2c67304716 Hadrons: meson field code cleaning 2018-08-14 17:00:05 +01:00
dc6d8686de Hadrons: meson field chunked HDF5 IO 2018-08-14 16:40:29 +01:00
cc2780bea3 Hadrons: meson field parallel IO 2018-08-14 14:55:13 +01:00
6e5a2b7922 fix previous commit 2018-08-14 14:07:54 +01:00
f4878d3a13 Hadrons: meson field threaded cache copy 2018-08-14 14:02:37 +01:00
89d2fac92e Hadrons: copyright update 2018-08-14 12:19:14 +01:00
f2d3e41cf2 Hadrons: meson field: HDF5 perf, gamma input and Eigen tensors allocated by Grid 2018-08-13 20:18:33 +01:00
3c27bb36d4 Hadrons: direct timer access 2018-08-13 20:17:45 +01:00
603d59f389 Hadrons: code cleaning 2018-08-13 20:17:24 +01:00
07a0ef3f95 Hadrons: global measurement time profile 2018-08-13 16:44:57 +01:00
503259f9c9 Hadrons: meson field HDF5 IO done and tested 2018-08-12 16:52:12 +01:00
5be6a51044 Hadrons: meson fields code cleaning and momentum phases 2018-08-11 15:13:43 +01:00
ac69f042b1 Hadrons: module RNG uniquely seeded with <run id> + <module name> + <trajectory> 2018-08-10 18:27:00 +01:00
133d5c2e34 Merge branch 'develop' into feature/hadrons 2018-08-10 16:36:40 +01:00
2a94244890 configure: --with-openssl option and LIME is now mandatory 2018-08-10 16:36:11 +01:00
a15a2dfd29 Merge branch 'develop' into feature/hadrons 2018-08-10 16:08:22 +01:00
093bb02633 Hadrons: execute message for time diluted noise 2018-08-10 16:07:48 +01:00
99a85116f8 Hadrons: module and VM instrumentation 2018-08-10 16:07:30 +01:00
27cdb79063 Sha used to seed from a unique string 2018-08-10 15:11:01 +01:00
f4cbfd63ff Hadrons: more meson field cleaning, needs IO now 2018-08-09 18:39:58 +01:00
2b794b6aa7 Hadrons: module generating random lattices for testing purposes 2018-08-09 17:16:42 +01:00
d0244a059f Hadrons: cleaning cleaning... 2018-08-09 00:38:17 +01:00
dcdd891d7d Hadrons: precision fix 2018-08-09 00:13:53 +01:00
6d2df9de79 Hadrons: even more cleaning 2018-08-08 23:15:55 +01:00
41d4e37bae Hadrons: more cleaning 2018-08-08 19:04:44 +01:00
ee5c0cc9b6 Hadrons: code cleaning 2018-08-08 18:45:06 +01:00
0a4020eb4d Hadrons: copyright fix 2018-08-07 18:42:52 +01:00
b2de26589b Hadrons: code cleaning and copyright update 2018-08-07 18:40:48 +01:00
0677adb4dd Hadrons: overhaul of A2A for production 2018-08-07 18:27:59 +01:00
231cc95be6 Hadrons: eigenvalues precision fix 2018-08-07 18:27:19 +01:00
639f9cab82 Hadrons: schedule loading fix 2018-08-07 18:26:49 +01:00
4eac4e575e Hadrons: meson fields indentation fix 2018-08-06 12:42:25 +01:00
3f0f92cda6 Hadrons: first cleaning/integration of A2A/meson fields 2018-08-06 12:11:52 +01:00
d2650e89bd Hadrons: VM exception for object type (solves infinite loop in scheduler) 2018-08-06 12:11:00 +01:00
2962123cba Hadrons: diluted noise polish 2018-08-05 01:44:37 +01:00
830168ec37 Hadrons: first try at diluted noise class (tested) 2018-08-04 12:32:58 +01:00
584c921ca0 Eigen support fix (use of Grid as a library was broken) 2018-08-03 21:07:58 +01:00
81347b4d16 gitignore update 2018-08-03 19:58:52 +01:00
2cfa0b0e6b Merge pull request #174 from fionnoh/a2a_basics
A2A basics
2018-08-03 16:32:14 +01:00
fa5dee76b1 Included Peter's A2AMeson field and Eigen changes 2018-08-03 15:15:54 +01:00
8d1679c6b8 Merge branch 'feature/hadrons-a2a' of https://github.com/paboyle/Grid into a2a_basics 2018-08-03 15:12:24 +01:00
3791a38f7c Optimised the MesonField a bit more 2018-08-01 08:27:27 +01:00
142f7b0c86 Updated the A2A Meson Field module 2018-07-31 15:58:02 +01:00
891ad66eab Included changes to Hadrons RBPrecCG solver needed for subtraction of guess 2018-07-31 11:26:07 +01:00
60c43151c5 Merge branch 'feature/hadrons-a2a' of https://github.com/paboyle/Grid into feature/hadrons-a2a 2018-07-31 01:09:02 +01:00
e036800261 Eigen fix 2018-07-31 01:08:42 +01:00
62900def36 Merge branch 'feature/hadrons-a2a' of https://github.com/paboyle/Grid into feature/hadrons-a2a 2018-07-31 00:36:26 +01:00
e3a309a73f Eigen happiness 2018-07-31 00:35:17 +01:00
ad6c1c0c4e The basics of what is needed in Grid and Hadrons for the A2A class and module, with none of the contraction or MF code. 2018-07-30 18:40:50 +01:00
00b92a91b5 Optimising 2018-07-28 23:46:22 +01:00
65533741f7 7 moms 2018-07-28 16:17:47 +01:00
dc0259fbda Merge pull request #173 from fionnoh/feature/hadrons-a2a
Changes to meson field benchmark. Now includes the gammas in the fina…
2018-07-27 23:03:56 +01:00
131a6785d4 Merge branch 'feature/hadrons-a2a' into feature/hadrons-a2a 2018-07-27 23:03:42 +01:00
44f4f5c8e2 Momentum loop 2018-07-27 23:00:16 +01:00
2679df034f Changes to meson field benchmark. Now includes the gammas in the final part of the naive method, both methods compute
lhs^dag*Gamma*rhs (previously Gamma*lhs^dag*rhs), and checks results.
2018-07-27 18:31:10 +01:00
bf71162b97 Hadrons: backtrace on abort 2018-07-26 19:20:12 +01:00
299e828d83 Merge branch 'develop' into feature/hadrons 2018-07-26 16:49:49 +01:00
ef5452cddf Hadrons: smarter memory profiler 2018-07-26 16:47:45 +01:00
80de748737 Hadrons: new exceptions which can save a integer 2018-07-26 16:47:25 +01:00
71e1006ba8 Updated meson field benchmark for dirac structures 2018-07-26 09:09:29 +01:00
00f31ae83f Merge pull request #163 from goracle/unstaged
Add printing of whether there are unstaged changes in the git hash print
2018-07-25 19:00:00 +00:00
cce339deaf Merge pull request #172 from fionnoh/feature/hadrons
feature/hadrons -> feature/hadrons-a2a
2018-07-25 17:20:19 +00:00
24128ff109 Changes needed for MF benchmark to work with comms correctly 2018-07-23 15:51:37 +01:00
da17a015c7 Pack the stencil smaller for 128 bit access 2018-07-23 06:12:45 -04:00
1fd08c21ac make simd width configure time option for GPU 2018-07-23 06:10:55 -04:00
28db0631ff Hack to force 128bit accesses 2018-07-23 06:10:27 -04:00
b35401b86b Fix CUDA_ARCH. Need to simplify. See when new eigen release happens 2018-07-23 06:09:33 -04:00
a0714de8ec Define vector length for GPU 2018-07-23 06:09:05 -04:00
21a1710b43 Verbose vector length 2018-07-23 06:08:39 -04:00
34e9d3f0ca Moved the creation and resizing of the v and w high modes from the A2A class to the A2A module and made them an output of the module. This means that they have to be inputs of the contration modules and they will freed from memory if they are no longer needed. 2018-07-22 14:40:31 +01:00
c995788259 Added ImportUnphysicalFermion and included appropriate logic for 5d w vectors in A2A code 2018-07-21 00:08:11 +01:00
94c7198001 Added ZFIMPL to A2AMeson contraction 2018-07-20 23:08:22 +01:00
04d86fe9f3 Removed overly verbose print statement 2018-07-20 21:38:19 +01:00
b78074b6a0 Removed a Dminus from high mode v and removed duplication pf D_oo code 2018-07-20 16:55:24 +01:00
7dfd3cdae8 Inclusion of ExportPhysicalFermionSource that fixes a bug in the low mode w vectors 2018-07-20 15:45:43 +01:00
cecee1ef2c Merge branch 'develop' of github.com:paboyle/Grid into feature/hadrons 2018-07-20 13:37:50 +01:00
355d4b58be Merge branch 'feature/hadrons' of github.com:fionnoh/Grid into feature/hadrons 2018-07-19 16:07:54 +01:00
2c54a536f3 Moved the meson field inner product to its own header file 2018-07-19 15:56:52 +01:00
d868a45120 Cleaned up some stuff that was erroneously included in a previous "trash" commit. Leaving in the mySliceInnerProdct function for now as it speeds up mesonfield creation quite a lot for 24^3 tests 2018-07-16 16:19:59 +01:00
9deae8c962 A2A meson field contraction code 2018-07-16 14:18:45 +01:00
b2b5137d28 Finally starting to get decent performance on Volta 2018-07-13 12:06:18 -04:00
db86cdd7bd Possible trash commit 2018-07-10 13:30:45 +01:00
ec9939c1ba Test for faster implementation of meson field inner loop
This should be possible to cache block at outer levels, global sum across nodes not performed
and deferred to caller to block them all into a big all reduce.
Nc=3 and Fermion is hard coded in an ugly way. We might think about benchmarking whether
a product without the conjugate should be made available by Grid.

It is not clear whether the explicit unroll, or the performing of conjugate on left once
was the real source of the speed up.

Gives 70-80 GF/s on my laptop (single) half that double, and 70GB/s to cache.

This is competitive with dslash and a reasonable stopping point for the optimisation. If necessary we can revisit.
2018-07-10 12:38:51 +01:00
2cc07450f4 Fastest option for the dslash 2018-07-05 09:57:55 -04:00
c0e8bc9da9 Current version gets 250 - 320 GF/s on Volta on the target 12^4 volume. 2018-07-05 07:10:25 -04:00
b1265ae867 Prettify code 2018-07-05 07:08:06 -04:00
32bb85ea4c Standard extractLane is fast 2018-07-05 07:07:30 -04:00
ca0607b6ef Clearer kernel call meaning 2018-07-05 07:06:15 -04:00
19b527e83f Better extract merge for GPU. Let the SIMD header files define the pointer type for
access. GPU redirects through builtin float2, double2 for complex
2018-07-05 07:05:13 -04:00
4730d4692a Fast lane extract, saturates bandwidth on Volta for SU3 benchmarks 2018-07-05 07:03:33 -04:00
1bb456c0c5 Minor GPU vector width change 2018-07-05 07:02:04 -04:00
4b04ae3611 Printing improvement 2018-07-05 06:59:38 -04:00
2f776d51c6 Gpu specific benchmark saturates memory. Can enhance Grid to do this for expressions,
but a bitof (known) work.
2018-07-05 06:58:37 -04:00
f74617c124 Added ZFIMPL to meson field module 2018-07-03 14:04:53 +01:00
8c6a3921ed Merge remote-tracking branch 'upstream/feature/hadrons' into feature/hadrons 2018-07-03 11:35:14 +01:00
a8a15dd9d0 Hadrons: code cleaning 2018-07-02 17:52:39 +01:00
3ce68a751a Hadrons: stout smearing module 2018-07-02 17:52:04 +01:00
daa0977d01 Included a print statement that indicates that the guess is being subtracted from the solve. 2018-06-28 16:34:56 +01:00
a2929f4384 Removed A2A contraction module and replaced it with the beginnings of a meson field module 2018-06-28 16:17:26 +01:00
7fe3974c0a Included eigenPacks and action as references, not inputs, of A2A module. They now now longer need to be parameters in the meson field modules. 2018-06-28 16:14:49 +01:00
f7e86f81a0 Changes A2A class to make use of the new Solver class 2018-06-28 16:14:16 +01:00
fecec803d9 Merge branch 'feature/hadrons' of https://github.com/paboyle/Grid into feature/hadrons 2018-06-28 16:13:43 +01:00
8fe9a13cdd Merge branch 'feature/hadrons' of https://github.com/paboyle/Grid into feature/hadrons 2018-06-28 16:13:07 +01:00
3a50afe7e7 GPU dslash updates 2018-06-27 22:32:21 +01:00
f8e880b445 Loop for s and xyzt offlow 2018-06-27 21:49:57 +01:00
3e947527cb Move looping over "s" and "site" into kernels for GPU optimisatoin 2018-06-27 21:29:43 +01:00
31f65beac8 Move site and Ls looping into the kernels 2018-06-27 21:28:48 +01:00
38e2a32ac9 Single SIMD lane operations for CUDA 2018-06-27 21:28:06 +01:00
efa84ca50a Keep Cuda 9.1 happy 2018-06-27 21:27:32 +01:00
5e96d6d04c Keep CUDA happy 2018-06-27 21:27:11 +01:00
df30bdc599 CUDA happy 2018-06-27 21:26:49 +01:00
7f45222924 Diagnostics on memory alloc fail 2018-06-27 21:26:20 +01:00
dd891f5e3b Use NVCC to suppress device Eigen 2018-06-27 21:25:17 +01:00
d2c42e6f42 Hadrons: scaled DWF action 2018-06-26 14:59:33 +01:00
2881b3e8e5 WilsonMG: Remove unnecessary static assertions 2018-06-26 14:42:30 +02:00
049cc518f4 Hadrons: introduction message 2 2018-06-25 19:08:39 +01:00
2e1c66897f Hadrons: introduction message 2018-06-25 19:08:22 +01:00
adcef36189 Hadrons: Möbius DWF action 2018-06-25 15:58:35 +01:00
2f121c41c9 Commiting reation of meson field code before a merge with the upstream branch feature/hadrons 2018-06-25 12:20:46 +01:00
e0ed7e300f Hadrons: spurious Dminus removed 2018-06-22 16:33:43 +02:00
485207901b Merge branch 'develop' into feature/hadrons 2018-06-22 16:15:32 +02:00
c760f0a4c3 Hadrons: remove make_5D/4D functions and FreeProp fix 2018-06-22 16:12:46 +02:00
c84eeedec3 Hadrons: GaugeProp module for z-Wilson actions 2018-06-22 15:53:22 +02:00
1ac3526f33 Small changes to the A2A header and module 2018-06-22 12:29:42 +01:00
0de090ee74 Temporarily added in the contraction code that produced the working 2-pt function. This is commited for reference only and will be removed in the next push. 2018-06-22 12:28:41 +01:00
91405de3f7 Hadrons: new solver exposing fermion matrix and generic source/solve import/export 2018-06-22 12:14:37 +02:00
8fccda301a Fixed a bug where the guess was always subtracted after the solve and included appropriate weights for the sources in the one case we're looking at now. More work needs to be done to make the 5d/4d source logic less brittle. 2018-06-21 16:36:59 +01:00
7a0abfac89 Restructured the class that computes and returns the A2A vectors. 2018-06-21 16:36:06 +01:00
ae37fda699 A more elegant way to subtract guesses from solve and a bool check before verifying residual 2018-06-20 16:07:40 +01:00
b5fc5e2030 All to all module update that hit a promising milestone. Commiting for a reference for future changes. 2018-06-20 10:59:07 +01:00
cc5d025ea4 WilsonMG: Adapt staggered GMRES/MR tests to "new" constructor 2018-06-18 16:20:20 +02:00
6c97a6a071 Coalescing version of the kernel 2018-06-13 20:52:29 +01:00
73bb2d5128 Ugly hack to speed up compile on GPU; we don't use the hand kernels on GPU anyway so why compile 2018-06-13 20:35:28 +01:00
b710fec6ea Gpu code first version of specialised kernel 2018-06-13 20:34:39 +01:00
b2a8cd60f5 Doubled gauge field is useful 2018-06-13 20:27:47 +01:00
867ee364ab Explicit instantiation hooks 2018-06-13 20:27:12 +01:00
25becc9324 GPU tweaks for benchmarking; really necessary? 2018-06-13 20:26:07 +01:00
94d1ae4c82 Some prep work for GPU shared memory. Need to be careful, as will try GPU direct
RDMA and inter-GPU memory sharing on SUmmit later
2018-06-13 20:24:06 +01:00
2075b177ef CUDA_ARCH more carefule treatment 2018-06-13 20:22:34 +01:00
847c761ccc Move sfw IEEE fp16 into central location 2018-06-13 20:22:01 +01:00
8287ed8383 New GPU vector targets 2018-06-13 20:21:35 +01:00
e6be7416f4 Use managed memory 2018-06-13 20:14:00 +01:00
26863b6d95 User Managed memory 2018-06-13 20:13:42 +01:00
ebd730bd54 Adding 2D loops 2018-06-13 20:13:01 +01:00
066be31a3b Optional GPU target SIMD types; work in progress and trying experiments 2018-06-13 20:07:55 +01:00
7a4c142955 Add GPU specific simd targets 2018-06-13 19:55:30 +01:00
ddcb53bce2 Merge remote-tracking branch 'upstream/develop' into feature/wilsonmg 2018-06-13 09:50:37 +02:00
d1c80e1d46 WilsonMG: Correct years in copyright line 2018-06-13 09:44:09 +02:00
c73cc7d354 WilsonMG: Add tests with MG preconditioner running single precision, outer solver running in double 2018-06-12 16:10:48 +02:00
49fdc324a0 WilsonMG: Make MG correctness checks abort on failing tests 2018-06-12 16:10:48 +02:00
f32714a2d1 WilsonMG: Make running MG correctness checks optional via commandline 2018-06-12 16:10:48 +02:00
73a955be20 WilsonMG: Move tests for Wilson & WilsonClover into separate files 2018-06-12 16:10:48 +02:00
66b7a0f871 WilsonMG: Move multigrid class to separate file 2018-06-12 16:10:48 +02:00
2ab9d4bc56 WilsonMG: Fix random behavior in GMRES
From time to time I saw random since the basis vectors were not initialized
properly.
2018-06-12 15:01:31 +02:00
4f41cd114d WilsonMG: Add a mixed precision version of FGMRES
This version does everything in double prec but accepts a preconditioner working
in single precision.
2018-06-12 15:01:31 +02:00
11c4f5e32c WilsonMG: Provide command line switch for reading in input xml + move default params to constructor of MultiGridParams 2018-06-12 15:01:31 +02:00
e9b9550298 WilsonMG: Fix incompatibility with single prec MG in construction of simd layout on coarser grids 2018-06-12 15:01:31 +02:00
7564fedf68 WilsonMG: Set subspace to zero to avoid random behavior 2018-06-12 15:01:31 +02:00
8db0ef9736 Merge pull request #168 from jch1g10/feature/qed-fvol
Feature/qed fvol
2018-06-08 20:09:06 +02:00
95d4b46446 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-06-08 11:30:29 +01:00
251b904a28 Merge branch 'release/ISC-freeze-2' 2018-06-04 21:09:48 +01:00
5dfd216a34 Better thread safety 2018-06-04 21:08:44 +01:00
5a112feac3 Merge branch 'release/ISC-freeze-1' 2018-06-04 18:49:40 +01:00
c2e8d0aa88 Solve g++ problem on the lanczos test 2018-06-04 18:34:15 +01:00
0fe5aeffbb Merge branch 'feature/hadrons' into feature/qed-fvol 2018-06-04 16:59:43 +01:00
7fbc469046 Merge branch 'develop' into feature/hadrons 2018-06-04 16:58:30 +01:00
bf96a4bdbf Merge branch 'master' into develop 2018-06-04 14:03:11 +01:00
84685c9bc3 Overflow fix 2018-06-04 13:42:07 +01:00
a8d4156997 Added a Hadrons module that computes the all-to-all v and w vectors 2018-05-31 17:18:58 +01:00
c18074869b Changes to Hadrons SchurRB solver to allow for a subtract_guess boolean to be passed 2018-05-31 17:17:16 +01:00
f4c6d39238 CHanges made to SchurRB solvers to allow for the subtraction of a guess after solve 2018-05-31 17:16:20 +01:00
200d35b38a Merge branch 'develop' into feature/hadrons 2018-05-28 11:52:47 +02:00
eb52e84d09 Merge branch 'feature/hadrons' of github.com:paboyle/Grid into feature/hadrons 2018-05-28 11:50:27 +02:00
72abc34764 Merge pull request #166 from guelpers/feature/hadrons
Feature/hadrons
2018-05-28 11:49:46 +02:00
e3164d4c7b Hadrons: env function to get volume in double 2018-05-28 11:39:17 +02:00
f5db386c55 Change MODULE_REGISTER_NS -> MODULE_REGISTER in UnitEM, ScalarVP and VPCounterTerms 2018-05-22 16:16:21 +01:00
294ee70a7a Merge branch 'feature/hadrons' into feature/qed-fvol
# Conflicts:
#	extras/Hadrons/modules.inc
#	lib/qcd/action/gauge/Photon.h
2018-05-21 18:02:41 +01:00
013ea4e8d1 Merge branch 'feature/staggered-comms-compute' into develop 2018-05-21 13:11:56 +01:00
7fbbb31a50 Merge branch 'develop' into feature/staggered-comms-compute
Conflicts:
	lib/qcd/action/fermion/ImprovedStaggeredFermion.cc
2018-05-21 13:07:29 +01:00
0e127b1fc7 New file single prec test 2018-05-21 12:57:13 +01:00
68c028b0a6 Comment 2018-05-21 12:54:25 +01:00
255d4992e1 Hadrons: stochastic scalar SU(N) free field fix 2018-05-18 20:49:55 +01:00
a0d399e5ce Hadrons: yet other attempts at EMT NPR 2018-05-18 20:49:26 +01:00
fd3b2e945a Hadrons: don't right result with empty stem 2018-05-18 20:48:24 +01:00
6c27c72585 WilsonMG: Provide more sensible default values for MG parameters 2018-05-16 17:26:09 +02:00
9c003d2d72 WilsonMG: Base wilson mg preconditioner entirely on existing infrastructure 2018-05-16 17:26:09 +02:00
4b8710970c WilsonMG: Switch to Galerkin coarsening in CoarsenedMatrix 2018-05-16 17:26:09 +02:00
68d686ec38 WilsonMG: Add functionality for applying G5 on coarse grids 2018-05-16 16:17:14 +02:00
c48b69ca81 WilsonMG: Implement Mdir & Mdiag in CoarsenedMatrix 2018-05-16 16:08:05 +02:00
df8c208f5c WilsonMG: Revert CoarsenedMatrix.h and Lattice_transfer.h back to state of develop branch 2018-05-16 16:02:54 +02:00
61812ab7f1 Merge remote-tracking branch 'upstream/develop' into feature/wilsonmg 2018-05-15 14:57:18 +02:00
b999984501 Merge branch 'develop' into feature/hadrons 2018-05-15 13:53:57 +01:00
7836cc2d74 No checksum output on log for scidac 2018-05-15 10:10:08 +01:00
eb7d34a4cc GPU version 2018-05-14 19:41:47 -04:00
aab27a655a Start of GPU kernels 2018-05-14 19:41:17 -04:00
93280bae85 Gpu option 2018-05-14 19:40:58 -04:00
c5f93abcd7 GPU clean up 2018-05-14 19:40:33 -04:00
d5deef782d Useful debug comments 2018-05-14 19:39:52 -04:00
5f50473c0d Clean up 2018-05-14 19:39:11 -04:00
a61e0df54b Travis fix for Lime 2018-05-14 19:56:12 +01:00
9d835afa35 Attempt at solving the FP exception in the QED code 2018-05-14 19:05:54 +01:00
5e3be47117 Hadrons: scalar SU(N) various fixes 2018-05-14 18:58:39 +01:00
13f50406e3 Suppress print statement 2018-05-12 18:00:00 -04:00
09cd46d337 Lane by Lane operation 2018-05-12 17:59:35 -04:00
d3f51065c2 Give command line control of blocks/threads split 2018-05-12 17:58:56 -04:00
925ac4173d Thread count control for warp scheduler thingy doodaa thing 2018-05-12 17:58:22 -04:00
eb921041d0 Perf count control 2018-05-12 17:57:32 -04:00
48de706dd5 Merge branch 'develop' into feature/hadrons 2018-05-11 18:06:40 +01:00
f871fb0c6d check file is opened correctly in the Lime reader 2018-05-11 18:06:28 +01:00
93771f3099 Hadrons: scalar SU(N) stochastic free field 2018-05-10 22:29:48 +01:00
8cb205725b Merge branch 'develop' into feature/hadrons 2018-05-09 23:56:35 +01:00
9ad580d82f Hadrons: format fix 2018-05-07 21:38:15 +01:00
899f961d0d Hadrons: eigenvalue metadata saved with 16 significant digits 2018-05-07 21:37:03 +01:00
54d789204f more general implementation of the precision interface for serialisers 2018-05-07 21:17:46 +01:00
25828746f3 XML precision scientific with 16 digits by default 2018-05-07 21:04:31 +01:00
f362c00739 Hadrons: better handling of automatic directory creation 2018-05-07 19:43:40 +01:00
25d1cadd3b Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-05-07 18:55:09 +01:00
c24d53bbd1 Further debug of RNG I/O 2018-05-07 18:55:05 +01:00
2017e4e3b4 Hadrons: more verbose directory creation error 2018-05-07 18:12:22 +01:00
27a4d4c951 Hadrons: multi-file eigenpack in separate directory 2018-05-07 17:52:54 +01:00
2f92721249 Merge branch 'develop' into feature/hadrons 2018-05-07 17:26:47 +01:00
3c7a4106ed Trap for deadly empty comm thread option 2018-05-07 17:26:39 +01:00
3252059daf Hadrons: multi-file support for eigenpacks 2018-05-07 17:25:36 +01:00
6eed167f0c Merge branch 'release/0.8.1' 2018-05-04 17:34:11 +01:00
4ad0df6fde Bump volume for Gerardo 2018-05-04 17:33:23 +01:00
661381e881 Merge branch 'develop' into feature/hadrons 2018-05-04 14:52:17 +01:00
68a5079f33 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-05-04 14:13:54 +01:00
8634e19f1b Update 2018-05-04 14:13:35 +01:00
9ada378e38 Add timing 2018-05-04 10:58:01 +01:00
9d9692d439 Fix double vs float in boundary phases 2018-05-03 16:40:16 +01:00
0659ae4014 Merge branch 'develop' into feature/hadrons 2018-05-03 16:20:22 +01:00
bfbf2f1fa0 no threaded stencil benchmark if OpenMP is not supported 2018-05-03 16:20:01 +01:00
dd6b796a01 Hadrons: scalar SU(N) volume factor fix 2018-05-03 16:19:17 +01:00
52a856b4a8 FreeProp module for Hadrons 2018-05-03 12:33:20 +01:00
04190ee7f3 5D free propagator for DWF and boundary conditions for free propagators 2018-05-03 12:31:36 +01:00
587bfcc0f4 Add Timing 2018-05-03 12:10:31 +01:00
2700992ef5 Merge remote-tracking branch 'upstream/feature/hadrons' into feature/hadrons 2018-05-03 10:01:52 +01:00
8c658de179 Compressor speed up (a little); streaming stores 2018-05-02 17:52:16 +01:00
ba37d51ee9 Debugging the RNG IO 2018-05-02 15:32:06 +01:00
4f4181c54a Merge branch 'feature/staggered-comms-compute' of https://github.com/paboyle/Grid into feature/staggered-comms-compute 2018-05-02 14:59:13 +01:00
4d4ac2517b Adding Scalar field theory example for Scidac format 2018-05-02 14:36:32 +01:00
e568c24d1d Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-05-02 14:29:25 +01:00
b458326744 Checkpointer module update 2018-05-02 14:29:22 +01:00
6e7d5e2243 HMC: added Scidac checkpointer and support for metadata 2018-05-02 14:28:59 +01:00
b35169f1dd MultiShift for Staggered 2018-05-02 14:22:37 +01:00
441ad7498d add Iterative counter 2018-05-02 14:21:30 +01:00
6f6c5c549a Split off gparity 2018-05-02 14:11:23 +01:00
1584e17b54 Revert to fast versoin 2018-05-02 14:10:55 +01:00
12982a4455 Hypercube optimisation 2018-05-02 14:10:21 +01:00
172f412102 shmget reintroduce 2018-05-02 14:07:41 +01:00
a64497265d TIming 2018-05-02 14:07:28 +01:00
ca639c195f Merge branch 'develop' into feature/hadrons 2018-05-01 14:07:32 +01:00
edc28dcfbf Hadrons: scalar SU(N) 2-pt fix 2018-05-01 14:02:31 +01:00
c45f24a1b5 Improvements for tesseract 2018-04-30 21:50:00 +01:00
aaf37ee4d7 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-04-27 11:45:13 +01:00
1dddd17e3c Benchmark improvements from tesseract 2018-04-27 11:44:46 +01:00
661f1d3e8e Merge branch 'release/0.8.0' into develop 2018-04-27 11:22:33 +01:00
edcf9b9293 Merge branch 'release/0.8.0' 2018-04-27 11:13:19 +01:00
fe6860b4dd Update with LIME library guard 2018-04-27 08:57:34 +01:00
d6406b13e1 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-04-27 07:52:56 +01:00
e369d7306d Rename 2018-04-27 07:51:44 +01:00
9f8d63e104 Roll over version 2018-04-27 07:51:12 +01:00
9b0240d101 Hot start test 2018-04-27 07:50:51 +01:00
b27f0e5a53 Control over IO 2018-04-27 07:50:15 +01:00
75e4483407 Stronger convergence test 2018-04-27 07:49:57 +01:00
0734e9ddd4 Debugging Scatter_plane_simple 2018-04-27 14:39:01 +09:00
809b1cdd58 Bug fix for MPI running ; introduced last night 2018-04-27 05:19:10 +01:00
1be8089604 Clean compile 2018-04-26 23:42:45 +01:00
3e0eff6468 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-04-26 23:00:46 +01:00
7ecc47ac89 Quenched test compile 2018-04-26 23:00:28 +01:00
e9f1ac09de static 2018-04-26 23:00:08 +01:00
fa0d8feff4 Performance of CovariantCshift now non-embarrassing. 2018-04-26 17:56:27 +01:00
49b8501fd4 Merge branch 'develop' into feature/hadrons 2018-04-26 17:33:50 +01:00
d47484717e Hadrons: scalar SU(N) result handling improvement 2018-04-26 17:32:37 +01:00
05b44aef6b Merge branch 'develop' of https://github.com/paboyle/Grid into develop
Conflicts:
	benchmarks/Benchmark_su3.cc
2018-04-26 15:38:49 +01:00
03e9832efa Use macros for bare openmp 2018-04-26 14:50:02 +01:00
28a375d35d Force static 2018-04-26 14:49:42 +01:00
3b06381745 Guard bare openmp statemetn with ifdef 2018-04-26 14:48:57 +01:00
91a0a3f820 Improvement 2018-04-26 14:48:35 +01:00
8f44c799a6 Saving the benchmarking tests for Cshift 2018-04-26 14:48:03 +01:00
96272f3841 Merge staggered fix linear operator and reduction 2018-04-26 10:33:19 +01:00
5c936d88a0 Merge branch 'feature/staggered-comms-compute' of https://github.com/paboyle/Grid into feature/staggered-comms-compute 2018-04-26 10:18:37 +01:00
1c64ee926e Faster staggered operator with m^2 term trivial used 2018-04-26 10:17:49 +01:00
2cbb72a81c Provide info if EE term is trivial (m^2 factor)
Better timing in staggered 4d case
2018-04-26 10:10:07 +01:00
31d83ee046 Enable special treatment of constEE cases 2018-04-26 10:08:46 +01:00
a9e8758a01 Improvements to staggered tests timings 2018-04-26 10:08:05 +01:00
3e125c5b61 Faster linalg on CG optimised against staggered
Sum overhead is bigger for staggered
2018-04-26 10:07:19 +01:00
eac6ec4b5e Faster reductions, important on single node staggered 2018-04-26 10:03:57 +01:00
213f8db6a2 Microsecond resultion 2018-04-26 10:01:39 +01:00
6358f35b7e Debug of previous commit 2018-04-26 14:18:11 +09:00
43f5a0df50 More timers in the integrator 2018-04-26 12:01:56 +09:00
c897878776 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-04-26 11:31:57 +09:00
cc6eb51e3e Hadrons: macro refactoring for library portability 2018-04-25 16:49:14 +01:00
507009089b Merge remote-tracking branch 'upstream/feature/hadrons' into feature/hadrons 2018-04-25 09:36:39 +01:00
2baf193031 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-04-25 00:14:03 +01:00
362ba0443a Cshift updates 2018-04-25 00:12:11 +01:00
276a2353df Move constructor 2018-04-25 00:11:07 +01:00
b234784c8e Hadrons: scalar SU(N) takes operator pairs now 2018-04-24 19:52:12 +01:00
6ea2a8b7ca Hadrons: scheduler shows starting value 2018-04-24 19:51:47 +01:00
c1d0359aaa Hadrons: scalar SU(N) kinetic term saves trace 2018-04-24 19:51:22 +01:00
047ee4ad0b Hadrons: scalar SU(N) cleanup 2018-04-24 19:50:58 +01:00
a13106da0c Hadrons: scalar SU(N) gradient 2018-04-24 19:50:30 +01:00
75113e6523 Hadrons: Scalar SU(N) variable name update 2018-04-24 19:49:27 +01:00
325c73d051 Hadrons: module template update 2018-04-24 19:48:54 +01:00
b25a59e95e Hadrons: mitigation of GCC/Intel compiler bug not generating defaulted destructors 2018-04-24 17:20:25 +01:00
c5b9147b53 Correction of a minor bug in the su3 benchmark 2018-04-24 08:03:57 -07:00
64ac815fd9 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-04-24 17:27:38 +09:00
a1be533329 Corrected Flop count in Benchmark su3 and expanded the Wilson flow output 2018-04-24 01:19:53 -07:00
7c4533797f Hadrons: scalar SU(N) EMT improvement term optional 2018-04-23 22:46:39 +01:00
af84fd65bb Hadrons: missing dependency message improvement 2018-04-23 22:46:17 +01:00
1a2613086a Fix print message. 2018-04-23 15:42:12 -04:00
4f110c09a5 Add printing of whether there are unstaged changes in the git hash print. 2018-04-23 15:38:23 -04:00
6764362237 Hadrons: automatic directory creation fix 2018-04-23 18:45:39 +01:00
2fa2b0e0b1 Hadrons: Application header does not include all the modules 2018-04-23 17:57:17 +01:00
b61292f735 Hadrons: recursive mkdir function 2018-04-23 17:36:43 +01:00
ce7720e221 Hadrons: copyright update 2018-04-23 17:36:20 +01:00
853a5528dc Hadrons: template modules compilation optimisation 2018-04-23 17:35:01 +01:00
169f405c9c Hadrons: tests repaired 2018-04-23 12:48:34 +01:00
c6125b01ce Hadrons: Error and Warning channels always on 2018-04-23 12:48:17 +01:00
b0b5b34bff Hadrons: custom abort with module trace 2018-04-23 12:48:00 +01:00
1c9722357d Merge branch 'develop' into feature/hadrons
# Conflicts:
#	lib/qcd/action/fermion/FermionOperator.h
2018-04-20 17:15:21 +01:00
141da3ae71 function to get tensor dimensions 2018-04-20 17:13:34 +01:00
94edf9cf8b HDF5: direct access to group for custom operations 2018-04-20 17:13:21 +01:00
c11a3ca0a7 vectorise/unvectorise in reverse order 2018-04-20 17:13:04 +01:00
870b1a85ae Think I have the physical prop interface to CF and PF overlap right, but need a strong check/regression.
Only support Hw overlap, not Ht for now. Ht needs a new Dminus implemented.
2018-04-18 14:17:49 +01:00
b5510427f9 physical fermion interface, cshift benchmark in SU3. 2018-04-18 01:43:29 +01:00
26ed65c8f8 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-04-17 12:03:32 +01:00
f7f043d8cf Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-04-17 10:57:18 +01:00
ddcaa6ad29 Master does header on Nersc 2018-04-17 10:48:33 +01:00
87c5c0271b Ficxing eigen 2018-04-16 19:08:07 -04:00
a3f5a13591 Better Eigen handling 2018-04-16 18:02:55 -04:00
9fe28f00eb Eigen sim link off head revision 2018-04-16 17:54:46 -04:00
334da7f452 Hadrons: can trace which module is throwing an error 2018-04-13 18:45:31 +02:00
4669ecd4ba Hadrons: build improvement 2018-04-13 18:21:18 +02:00
4573b34cac Hadrons: scalar SU(N) 2-pt functions with momentum 2018-04-13 18:21:00 +02:00
a8a0bb85cc Control scalar execution or vector under generic. Disable Eigen vectorisation on powerpc / SUmmit 2018-04-12 12:32:57 -04:00
6411caad67 work distribution 2018-04-12 11:41:41 -04:00
7533035a99 Control Eigen vectorisatoin 2018-04-12 11:40:56 -04:00
17f57e85d1 Merge branch 'develop' into feature/hadrons 2018-04-06 22:53:11 +01:00
c8d4d184ee XML push fragment fix 2018-04-06 22:53:01 +01:00
17f27b1ebd Hadrons: eigenpack writer fix 2018-04-06 22:52:11 +01:00
a16bbecb8a Hadrons: more feedback 2018-04-06 19:38:20 +01:00
7c9b0dd842 Hadrons: top level name for eigenpack metadata 2018-04-06 19:32:22 +01:00
6b7228b3e6 Hadrons: better metadata for eigenpack 2018-04-06 19:29:53 +01:00
f117552334 post-merge fix 2018-04-06 18:38:46 +01:00
a21a160029 Merge branch 'develop' into feature/hadrons
# Conflicts:
#	lib/serialisation/XmlIO.cc
2018-04-06 18:34:19 +01:00
1569a374a9 XML interface polish, XML fragments can be pushed into a writer 2018-04-06 18:32:14 +01:00
eddf023b8a pugixml 1.9 update 2018-04-06 16:17:22 +01:00
6b8ffbe735 Hadrons: genetic minimum value type fix 2018-04-06 15:41:31 +01:00
81050535a5 Hadrons: truncate eigenvalues when loading partial eigenpack 2018-04-06 13:48:58 +01:00
7dcf5c90e3 Hadrons: eigenpack must be referred by solver when used 2018-04-06 13:16:28 +01:00
9ce00f26f9 not special characters in std::vector operator<< 2018-04-04 17:44:56 +01:00
85c253ed4a Test_serialisation MPI fix 2018-04-04 17:19:34 +01:00
ccfc0a5a89 Hadrons: better string representation of module parameters 2018-04-04 17:19:22 +01:00
d3f857b1c9 Hadrons: proper metadata for eigenpacks 2018-04-04 16:36:37 +01:00
fb62035aa0 Hadrons: do not create RB coarse grids 2018-04-03 19:49:11 +01:00
0260bc7705 Hadrons: eigen pack writing only for boss node 2018-04-03 18:55:46 +01:00
68e6a58f12 Hadrons: several Lanczos fixes and improvements 2018-04-03 17:42:21 +01:00
73ced656eb Merge remote-tracking branch 'upstream/develop' into feature/wilsonmg 2018-04-03 17:51:11 +02:00
f69008edf1 WilsonMG: Add functionality to report timings to MG preconditioner 2018-04-03 17:26:49 +02:00
57a49ed22f WilsonMG: Read in MG parameters from xml in test 2018-04-03 16:03:11 +02:00
ff6413a764 WilsonMG: Make number of levels chooseable at runtime
I don't like this solution though :(
2018-04-03 15:57:33 +02:00
2530bfed01 WilsonMG: Move params instance from global scope to test main function 2018-04-03 14:50:48 +02:00
640515e3d8 Merge branch 'develop' into feature/hadrons 2018-03-30 17:43:49 +01:00
f089bf5629 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-03-30 16:17:26 +01:00
276f113f28 IO uses master boss node for metadata. 2018-03-30 16:17:05 +01:00
97c579f637 Merge branch 'develop' into feature/hadrons 2018-03-30 16:04:44 +01:00
a13c109111 deterministic initialisation of field metadata 2018-03-30 16:03:01 +01:00
ab6afd18ac Still compile if no LIME 2018-03-30 13:39:20 +01:00
5bde64d48b Barrier required in parallel when we use ftell 2018-03-30 12:41:30 +01:00
2f5add4d5f Creation of file 2018-03-30 12:30:58 +01:00
c5a885dcd6 I/O benchmark 2018-03-29 19:57:41 +01:00
74f79c5ac7 Revert "Add function to return full type as std::string"
This reverts commit 1cb745c8dc.
2018-03-29 12:03:50 +02:00
58c30c0cb1 WilsonMG: Add conformability checks in MG preconditioner 2018-03-28 13:24:39 +02:00
917a92118a WilsonMG: Move operator test to MG testing routine 2018-03-28 12:19:25 +02:00
a4d8512fb8 Revert "Lattice serialisation, just HDF5 for the moment"
This reverts commit 8a0cf0194f.
2018-03-27 17:55:42 +01:00
5ec903044d Serial IO code cleaning for std:: convention 2018-03-27 17:11:50 +01:00
04f9cf088d WilsonMG: Add more parameters to MultiGridParams struct 2018-03-27 17:13:11 +02:00
99107038f9 WilsonMG: Rationalize the level counting strategy 2018-03-27 17:06:33 +02:00
8a0cf0194f Lattice serialisation, just HDF5 for the moment 2018-03-26 19:16:16 +01:00
b78456bdf4 WilsonMG: Get rid of explicit include of GCR header 2018-03-26 15:41:53 +02:00
08543b6b11 WilsonMG: Provide a switch between V- and K-cycle 2018-03-26 15:37:17 +02:00
63ba33371f WilsonMG: Some minor refactoring 2018-03-26 15:34:53 +02:00
683a7d2ddd WilsonMG: Move comment to make clang-format happy 2018-03-26 14:59:40 +02:00
1c680d4b7a Merge branch 'develop' into feature/hadrons 2018-03-26 13:52:44 +01:00
b15db11c60 Kernels -> pure static object to enable device execution 2018-03-24 19:35:20 -04:00
f6077f9d48 Kernels -> not instantiaed otherwise object ref on GPU 2018-03-24 19:33:44 -04:00
572954ef12 Kernels not an instantiated object, just static 2018-03-24 19:33:13 -04:00
cedeaae7db Lebesge -> StencilView if necessary 2018-03-24 19:32:41 -04:00
e6cf0b1e17 View typedefs go to OperatorImpl 2018-03-24 19:32:11 -04:00
5412628ea6 begin end lamda 2018-03-24 19:31:45 -04:00
1f70cedbab Have to make all kernel called routines static since object reference will be a host pointer on GPU 2018-03-24 19:29:26 -04:00
b50f37cfb4 Remove overlap comms flag 2018-03-24 19:28:53 -04:00
cb0d2a1b03 threaded rng init; I thought this was on 2018-03-24 19:28:17 -04:00
6fe9b28a82 Cosmetic 2018-03-24 19:27:14 -04:00
b002587d7c Simplify 2018-03-24 19:26:44 -04:00
6c08385782 Simplify 2018-03-24 19:26:19 -04:00
afdcbf79d1 Merge remote-tracking branch 'upstream/develop' into feature/wilsonmg 2018-03-23 21:13:50 +01:00
3c3ec4e267 WilsonMG: Move tests for Wilson & WilsonClover into the same file 2018-03-23 21:12:27 +01:00
bbe1d5b49e WilsonMG: Temporarily use GMRES in construction of basis vectors
This can go back to CG once Mdag in CoarsenedMatrix works.
2018-03-23 20:02:27 +01:00
0f6009a29f WilsonMG: Huge refactor into something that could be considered an algorithm 2018-03-23 19:55:43 +01:00
1cfed3de7c WilsonMG: Add new logger for MG 2018-03-23 19:55:16 +01:00
c9c073eee4 Changes in messages in test dwf mixedprec 2018-03-23 11:27:56 +00:00
f290b2e908 Fix to pass CI tests 2018-03-23 11:14:23 +00:00
5f8225461b Fencing mixedcg test propagator write. LIME is still optional in Grid 2018-03-23 10:37:58 +00:00
4e1272fabf Kernels need to be static to work on GPU. No reference to host resident data 2018-03-22 18:44:53 -04:00
607dc2d3c6 Remove lebesgue order 2018-03-22 18:23:09 -04:00
23c880b009 Remove lebesgue order; stick in stencil if need 2018-03-22 18:13:41 -04:00
334bb6792f Lebesgue order removed. Stick in the stencil view 2018-03-22 18:12:12 -04:00
a3690071b4 Warm up GPu 2018-03-22 18:05:20 -04:00
299d119013 GPU work allocation improved 2018-03-22 18:04:24 -04:00
55be842d23 Dont force l1p.h so early 2018-03-22 18:01:43 -04:00
edbc0d49d7 WilsonMG: Get rid of explicit GridTypeMappers in CoarsenedMatrix 2018-03-22 16:38:24 +01:00
e9323460c7 Merge branch 'develop' into feature/hadrons 2018-03-22 10:48:37 +00:00
20e186a1e0 Merge pull request #158 from goracle/dev-pull
Make compilation faster by moving print of git hash.
2018-03-22 10:45:17 +00:00
6ef4af989b Merge pull request #159 from goracle/dev-precsafe
Add dimension check to precisionChange.
2018-03-22 10:41:53 +00:00
ccde8b817f Add dimension check to precisionChange. 2018-03-21 20:58:04 -04:00
68168bf72d Revert "Add dimension match check to precisionChange."
This reverts commit 8f601d9b39.
2018-03-21 20:51:38 -04:00
e93d0feaa7 Merge branch 'dev-pull' of github.com:goracle/Grid into dev-pull 2018-03-21 20:39:30 -04:00
8f601d9b39 Add dimension match check to precisionChange. 2018-03-21 20:38:19 -04:00
5436308e4a Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-03-21 14:26:29 +00:00
07fe7d0cbe Save file in current dir; print checksums 2018-03-21 14:26:04 +00:00
60b57706c4 Small bug fix in the shm file names 2018-03-21 13:57:30 +00:00
58c2f60b69 Merge branch 'feature/hadrons' into feature/qed-fvol 2018-03-20 20:19:18 +00:00
bfa3a7b3b0 Merge branch 'feature/hadrons' into feature/qed-fvol
# Conflicts:
#	extras/Hadrons/Modules.hpp
#	extras/Hadrons/Modules/MGauge/StochEm.cc
#	extras/Hadrons/modules.inc
2018-03-20 20:17:59 +00:00
954e38bebe Put a username in the path 2018-03-20 18:16:15 +00:00
b1a38bde7a Extra test for Gparity with plaquette action 2018-03-20 18:01:32 +00:00
9875c446c6 Clean up pragmas 2018-03-20 07:19:17 -04:00
9c25eb35ca Eigen develop branch for now 2018-03-20 07:18:56 -04:00
5ac96dbdc6 Warm behaviour in SU3 benchmark 2018-03-20 07:18:31 -04:00
5cc9aca85d Use 64bit index for looping 2018-03-20 06:34:52 -04:00
ac29ebcb95 Clean up debug prints 2018-03-20 06:33:59 -04:00
2581875edc Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-03-19 18:00:08 +00:00
f212b0a963 Merge branch 'feature/hadrons' of https://github.com/paboyle/Grid into feature/hadrons 2018-03-19 17:57:13 +00:00
62702dbcb8 Fixing bug in the Point sink causing NaNs 2018-03-19 17:56:53 +00:00
41d6cab033 Merge branch 'develop' into feature/hadrons 2018-03-19 13:30:21 +00:00
5a31e747c9 Merge commit 'd5ce66f6ab2c44a12def7b6d26df80d6e646b1fb' into feature/hadrons 2018-03-19 13:19:09 +00:00
cbc73a3fd1 Hadrons: CG guesser fix 2018-03-19 13:11:38 +00:00
a5cfb89304 Update eigen process direct from develop on github. Dangerous, but needed from GPU 2018-03-19 07:20:48 -04:00
f04a7251cc Gpu welcome message and device info 2018-03-19 07:12:12 -04:00
d4ce7d9905 GPU friendly Stencil needs a view 2018-03-19 07:11:21 -04:00
8a1d303ab9 GPU friendly stencil improvements 2018-03-19 07:11:03 -04:00
bf0a4de919 GPU friendly params object 2018-03-19 07:10:12 -04:00
6fe5885fe4 Warning suppress 2018-03-19 07:09:49 -04:00
17ac309e84 Fix the compile 2018-03-19 07:08:59 -04:00
7467a1c027 Latest eigen needed for GPU 2018-03-19 07:08:10 -04:00
fdfb8a26a8 Disable eigen vectorisation on GPU because of Summit compile issues 2018-03-19 07:07:30 -04:00
2df4e422ad Merge branch 'feature/gpu-port' of https://github.com/paboyle/Grid into feature/gpu-port 2018-03-18 14:45:41 +00:00
3a3e3cac40 Pull the trigger on offload 2018-03-18 14:45:29 +00:00
b1c02ec310 MallocManaged in GPU 2018-03-18 14:44:46 +00:00
38eadee2c9 Prettier code 2018-03-18 14:44:22 +00:00
42c70437be Views 2018-03-18 14:43:47 +00:00
65274b4d7f Tidy up 2018-03-18 14:43:16 +00:00
6c6d43eb4e Drop RB on coarse space ; that was a mistake 2018-03-17 09:35:01 +00:00
e1dcfd3553 typo fix 2018-03-16 23:10:47 +00:00
888838473a 4GB clean the offsets in parallel IO for multifile records 2018-03-16 21:54:56 +00:00
01568b0e62 Add a new SHM option 2018-03-16 21:54:28 +00:00
d5ce66f6ab Extra SHM option 2018-03-16 21:37:03 +00:00
d86936a3de Eliminating deprecated lex_sites 2018-03-16 12:26:39 +00:00
ee5cf6c8c5 WilsonMG: Some minor changes to GMRES implementations 2018-03-16 13:10:45 +01:00
d516938707 Hadrons: eigen packs I/O and deflation interface 2018-03-14 14:55:47 +00:00
7e8be32755 Typo fix 2018-03-13 19:22:31 -04:00
72344d1418 Hadrons: change default Schur convention to DiagTwo 2018-03-13 17:10:54 +00:00
7ecf6ab38b Merge branch 'develop' into feature/hadrons 2018-03-13 16:11:59 +00:00
2d4d70d3ec Hadrons: LCL fixes 2018-03-13 16:10:36 +00:00
78f8d47528 Hadrons: environment access to derived objects 2018-03-13 16:10:16 +00:00
b85f987b0b Hadrons: error message channel verbose during profiling 2018-03-13 16:09:22 +00:00
f57afe2079 Hadrons: much cleaner eigenpack implementation, to be tested 2018-03-13 13:51:09 +00:00
0fb84fa34b Make compilation faster by moving print of git hash. 2018-03-12 17:03:48 -04:00
8462bbfe63 Gamma input for meson contraction with round brackets 2018-03-12 18:02:12 +00:00
229977c955 Hadrons: minor memory fix for ShiftProbe module 2018-03-09 21:56:27 +00:00
e485a07133 Hadrons: garbage collector debug output 2018-03-09 21:56:01 +00:00
0880747edb Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-03-09 20:44:42 +00:00
b801e1fcd6 fclose should be called through a call to close() 2018-03-09 20:44:10 +00:00
ff761ea4e6 Bound check improvement 2018-03-09 20:00:46 +00:00
70ec2faa98 Hadrons: maximum iteration specified for tests and error if 0 2018-03-09 19:53:55 +00:00
a31d3e60d8 Better bounds check 2018-03-09 18:10:21 +00:00
a66cecc509 WilsonMG: Fix invalid call to MR ctor 2018-03-09 17:34:29 +01:00
0f6cdf3d4b WilsonMG: Implement missing parts of CoarsenedMatrix 2018-03-09 16:56:16 +01:00
1e63b73a14 WilsonMG: Some cleanup/formatting 2018-03-09 16:50:19 +01:00
2f849ee252 declaration fix 2018-03-08 23:34:00 +00:00
bb6ed44339 Merge branch 'develop' into feature/hadrons 2018-03-08 23:09:28 +00:00
4d60b92b7f Update oSites 2018-03-08 21:00:25 +00:00
360cface33 Grid tensor serialisation fully implemented and tested 2018-03-08 19:12:03 +00:00
80302e95a8 MILC Interface 2018-03-08 15:34:03 +00:00
c159c70c84 View introduced 2018-03-08 14:58:04 +00:00
28b5572755 Merge branch 'feature/gpu-port' of https://github.com/paboyle/Grid into feature/gpu-port 2018-03-08 13:01:42 +00:00
5fac7080bc Adding -fno-strict-aliasing by default 2018-03-08 13:01:24 +00:00
4548523ecc This modification eliminates what looks like a compiler bug
on Intel 2017.
2018-03-08 04:41:16 -08:00
caf2f6b274 Merge branch 'develop' of github.com:paboyle/Grid into develop 2018-03-08 09:52:25 +00:00
c49be8988b Grid tensor serialisation 2018-03-08 09:51:22 +00:00
971c2379bd std::vector to tensor conversion + test units 2018-03-08 09:50:39 +00:00
94b0d66e4c Merge pull request #157 from goracle/dev-pull
Add print of the current git hash on Grid init.
2018-03-08 16:09:28 +09:00
5e8af396fd Add print of the current git hash on Grid init. 2018-03-07 13:11:51 -05:00
4154fc6f44 Revert a change 2018-03-07 16:54:11 +00:00
4e3458516a Reverting after fixing issue with extract merge 2018-03-07 16:50:13 +00:00
90a2efb9b3 Hit an annoying strict alias optimisation in GCC 4.9 through 6.3
Chris K was correct. It appears that an additional memcpy (UGHHH) is enough
to suppress the compiler
2018-03-07 07:27:26 -08:00
9942723189 Merge branch 'develop' into feature/hadrons
# Conflicts:
#	lib/serialisation/BaseIO.h
2018-03-07 15:22:16 +00:00
a7d19dbb64 Merge branch 'develop' of github.com:paboyle/Grid into develop
# Conflicts:
#	lib/serialisation/BaseIO.h
2018-03-07 15:13:54 +00:00
90dbe03e17 Conversion of Grid tensors to std::vector made more elegant, also pair syntax changed to (x y) to avoid issues with JSON/XML 2018-03-07 15:12:32 +00:00
8b14096990 Conversion of Grid tensors to std::vector made more elegant, also pair syntax changed to (x y) to avoid issues with JSON/XML 2018-03-07 15:12:18 +00:00
b938202081 Overlapped Comm for Wilson DhopInternal 2018-03-07 14:08:43 +00:00
e79ef469ac Merge branch 'develop' into feature/hadrons
# Conflicts:
#	lib/serialisation/BaseIO.h
2018-03-06 19:25:51 +00:00
485c5db0fe conversion of Grid tensors to nested std::vector in preparation for tensor serialisation 2018-03-06 19:22:03 +00:00
c793947209 Add overloaded Photon constructors, with default parameters for IR improvements and infinite-volume G(x=0). 2018-03-06 16:27:26 +00:00
40699221e2 Dont alias lhs and rhs in a where statement 2018-03-06 04:14:13 -08:00
3cb1b545d0 Don't alias the variables with a where statement. 2018-03-06 04:13:26 -08:00
3e9ee053a1 Merge branch 'develop' into feature/hadrons 2018-03-05 20:01:38 +00:00
dda6c69d5b Hadrons: scalar SU(N) shift probes 2018-03-05 20:00:29 +00:00
cd51b9af99 Torture yourself with namespace lookup 101 2018-03-05 19:58:13 +00:00
e199ba7e88 Fix the Charge conjugate BC's 2018-03-05 13:59:02 +00:00
c399c2b44d Guido broke the charge conjugate plaquette action with premature optimisation.
This sector of the code does not matter for anything other than Guido's quenched HMC
studies, and any plaq specific optimisations should be retained in a private branch
instead of destroying the code simplicity.
2018-03-05 12:55:41 +00:00
af7de7a294 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-03-05 12:22:41 +00:00
1dc86efd26 Finalize protection 2018-03-05 12:22:18 +00:00
4d53703c67 Scalar type differeing allowed, eg. precisoin change 2018-03-05 11:39:52 +00:00
d506c59efa Warnings disabled 2018-03-05 11:39:20 +00:00
44188a5c6f AVX512 fix 2018-03-05 00:32:24 +00:00
2018077770 Make NVCC happy with the compile. This is warning free on 9.1 on my laptop (both make and make tests). 2018-03-05 00:28:24 +00:00
984e06e2b5 Introduce view objects that can safely be copied to GPU for access 2018-03-04 16:40:11 +00:00
aead94e9a7 View introduced 2018-03-04 16:39:29 +00:00
3277bda130 View introduction to prepare for accelerator offload.
Probably same problem exists for stencil object
2018-03-04 16:38:08 +00:00
442b0b406c View related changes 2018-03-04 16:34:14 +00:00
8824a54269 View related changes 2018-03-04 16:33:33 +00:00
c03423250f Indexable changes 2018-03-04 16:31:35 +00:00
317fd0da44 Views introduced. Need to accelerator offload these routines. 2018-03-04 16:30:45 +00:00
783795a44a Views introduced 2018-03-04 16:12:49 +00:00
0e6197fbed Introduce accelerator friendly expression template rewrite.
Must obtain and access lattice indexing through a view object that is safe
to copy construct in copy to GPU (without copying the lattice).
2018-03-04 16:03:19 +00:00
dad7862f91 Go through a view object that can be copied to GPU 2018-03-04 16:02:02 +00:00
c89a883448 where was deprecated and integrated to ET engine a long time ago. Remove dead old original code 2018-03-04 15:58:02 +00:00
c204288fbc Remove a couple of print statements 2018-03-04 15:57:15 +00:00
ad739f042a Introduce views for passing lattice indexing to accelerators. 2018-03-04 15:56:14 +00:00
db988301d0 Introduce view objects for indexing lattices. Used to pass the view to acccelerators 2018-03-04 15:55:16 +00:00
9b1f29c4c2 Support a view for passing to accelerator 2018-03-04 15:54:35 +00:00
e5ea04ee0c Need to support precision change, and real replication in multiple simd lanes 2018-03-04 15:53:04 +00:00
c92a3c6068 Need to support any vector type template and run on accelerator 2018-03-04 15:52:14 +00:00
03f8da8fbc enable-debug option for debug flags in compile 2018-03-04 15:51:47 +00:00
f32555dcc5 Merge branch 'develop' into feature/hadrons 2018-03-03 15:31:52 +00:00
30391cb2eb Merge pull request #155 from fionnoh/develop
Some changes needed for deflation interface
2018-03-03 13:43:59 +00:00
e93c883470 Hadrons: basic GraphViz visualisation 2018-03-03 13:42:36 +00:00
2e88408f5c Some changes needed for deflation interface 2018-03-02 22:27:41 +00:00
fcac5c0772 Hadrons: scalar SU(N) fixes 2018-03-02 19:20:23 +00:00
90f4000935 Hadrons: scheduler debug less verbose 2018-03-02 19:20:01 +00:00
480708b9a0 Hadrons: safer error handling for HadronsXmlRun 2018-03-02 19:19:37 +00:00
c4baf876d4 Hadrons: graph consistency check 2018-03-02 18:40:18 +00:00
2f4dac3531 Hadrons: legal update 2018-03-02 18:10:58 +00:00
3ec6890850 Merge branch 'feature/hadrons' of github.com:paboyle/Grid into feature/hadrons 2018-03-02 17:56:08 +00:00
018801d973 Hadrons: legal update 2018-03-02 17:56:00 +00:00
1d83521daa Hadrons: scalar SU(N) EMT 2018-03-02 17:55:18 +00:00
fc5670c6a4 Merge pull request #151 from guelpers/feature/hadrons
Feature/hadrons
2018-03-02 17:54:43 +00:00
d9c435e282 Hadrons: Scalar SU(N) transverse projection module 2018-03-02 17:35:12 +00:00
614a0e8277 Hadrons: Scalar SU(N) utility functions 2018-03-02 17:34:23 +00:00
aaf39222c3 update my fork and fixed conflicts 2018-03-02 17:08:08 +00:00
550142bd6a Hadrons: more code cleaning 2018-03-02 14:30:45 +00:00
c0a929aef7 Hadrons: code cleaning 2018-03-02 14:29:54 +00:00
37fe944224 Hadrons: scalar kinetic term 2018-03-02 14:14:11 +00:00
315a42843f changes requested for the pull request 2018-03-02 11:47:38 +00:00
83a101db83 Hadrons: more LCL fixes 2018-03-02 11:05:02 +00:00
c4274e1660 Hadrons: LCL cleaning 2018-03-02 10:18:33 +00:00
ba6db55cb0 Hadrons: reverse last commit 2018-03-01 23:30:58 +00:00
e5ea84d531 Hadrons: LCL: orthogonalise coarse evec 2018-03-01 19:33:11 +00:00
15767a1491 Hadrons: LCL fine convergence test 2018-03-01 18:04:08 +00:00
4d2a32ae7a Hadrons: z-Mobius message fix 2018-03-01 18:03:44 +00:00
5b937e3644 Hadrons: VM memory profiling fix 2018-03-01 17:28:38 +00:00
e418b044f7 Hadrons: code cleaning 2018-03-01 12:57:28 +00:00
b8b05f143f Hadrons: Lanczos more conservative type names 2018-03-01 12:53:16 +00:00
6ec42b4b82 LCL: external storage fix 2018-03-01 12:27:29 +00:00
abb7d4d2f5 Hadrons: z-Mobius action 2018-02-27 19:32:19 +00:00
16ebbfff29 Hadrons: Schur convention globally defined through a macro 2018-02-27 18:45:23 +00:00
4828226095 Hadrons: prettier log 2018-02-27 14:43:51 +00:00
8a049f27b8 Hadrons: Lanczos code improvement 2018-02-27 13:46:59 +00:00
43578a3eb4 Hadrons: copyright update 2018-02-26 19:24:19 +00:00
fdbd42e542 Hadrons: first implementation of local coherence Lanczos 2018-02-26 19:22:43 +00:00
e7e4cee4f3 Merge branch 'develop' into feature/hadrons 2018-02-26 15:05:05 +00:00
78a9e31ff0 options more obvious 2018-02-24 22:26:32 +00:00
c1fc947bb8 Coordinate handling GPU friendly + some GPU merge/extract improvements 2018-02-24 22:26:10 +00:00
ff7b19a71b Coordinate handling GPU ready avoid malloc 2018-02-24 22:25:39 +00:00
1c16ffa1c1 Coordinate GPU ready. No malloc 2018-02-24 22:25:09 +00:00
4962f59477 Eliminate both GPU issue and threading bottle neck by avoiding malloc in coordinate handling 2018-02-24 22:24:37 +00:00
e158b60bce GPU friendly coords 2018-02-24 22:23:47 +00:00
34820bec27 Coordinate handling GPU ready. No malloc 2018-02-24 22:23:18 +00:00
eed9aa9f0c Extract merge gpu ready 2018-02-24 22:23:01 +00:00
8792ff6439 Coordinate handling gpu ready 2018-02-24 22:22:43 +00:00
078901278c Coordinate handling gpu friendly 2018-02-24 22:22:02 +00:00
bf5fb89aff Coordinate handling GPU friendly 2018-02-24 22:21:36 +00:00
7574c18cef Massive clean up extract merge.
Simpler and GPU friendly
2018-02-24 22:21:08 +00:00
36ea5f6b77 gpu friendly coordinates ; no std::vector on GPU 2018-02-24 22:20:14 +00:00
285deab432 Coordinate handling GPU friendly. Avoid std::vector 2018-02-24 22:19:28 +00:00
bb7d87d0a0 Coordinate handling gpu friendly 2018-02-24 22:18:33 +00:00
ec3954ff5f QedFVol: Add input parameter G(x=0) for infinite-volume photon 2018-02-23 14:53:05 +00:00
0f468e2179 OverlappedComm for Staggered 5D and 4D. 2018-02-22 12:50:09 +00:00
8e61286741 Merge branch 'develop' into feature/qed-fvol 2018-02-20 15:33:35 +00:00
4790e99817 Extra communicator free that I had missed.
Hard to audit them all as this is complex
2018-02-20 15:12:31 +00:00
2dd63aa7a4 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-02-20 14:29:26 +00:00
559a501140 Deflation interface for solvers 2018-02-20 14:29:08 +00:00
945684c470 updates for deflation in the RB solver 2018-02-20 14:28:38 +00:00
e30a80a234 Relaxed constraints on MPI thread mode when not using multiple comms threads 2018-02-15 17:13:36 +00:00
69e4ecc1d2 QedFVol: Fix single precision build error 2018-02-14 17:37:18 +00:00
5f483df16b Merge branch 'develop' into feature/qed-fvol 2018-02-14 16:35:04 +00:00
4680a977c3 QedFVol: set infinite-volume photon propagator to 1 at x=0,
so that momentum-spage photon propagator is non-negative.
Need to check whether this is sufficient for all volumes.
2018-02-14 16:30:09 +00:00
de42456171 updated my fork and conflicts fixed 2018-02-14 13:57:56 +00:00
d55212c998 restructure SeqConservedCurrent for DWF to need less memory 2018-02-14 10:45:18 +00:00
c96483e3bd Whitespace only change 2018-02-13 11:39:07 +00:00
c6e1f64573 Test for QED 2018-02-13 09:30:23 +00:00
ae31a6a760 Move deflate to right class 2018-02-13 02:11:37 +00:00
dd8f2a64fe INterface to suit hadrons on Lanczos 2018-02-13 02:08:49 +00:00
724cf02d4a QedFVol: Implement infinite-volume photon 2018-02-12 17:18:10 +00:00
7b8b2731e7 Conj error for complex coeffs 2018-02-12 16:06:31 +00:00
237a8ec918 Communicator leak fixed (I think) 2018-02-12 13:27:20 +00:00
49a0ae73eb Insertion of photon field in seqential conserved current 2018-02-12 09:36:08 +00:00
6ab60c5b70 Merge remote-tracking branch 'upstream/develop' into feature/wilsonmg 2018-02-08 23:59:07 +01:00
8c692b7ffd WilsonMG: Comment assertion on hermiticity of coarse operator for now
TODO: Think of a way to not break dwf_hdcr by doing that. It's only an assertion
but it still interferes with it.
2018-02-08 23:55:05 +01:00
2976132bdd Add first version of multigrid for wilson clover analogous to wilson one
Just like the wilson one, this algorithm

• is currently only a 2-level method since I don't have correct implementations
  for Mdir and Mdiag in CoarsenedMatrix yet (needed for further coarsening)
• needs levelization and refactoring into a proper algorithm
2018-02-08 23:52:10 +01:00
48177f2f2d Add tests for all MR|GMRES solvers with wilson clover action 2018-02-08 23:52:09 +01:00
c4ce70a821 WilsonMG: Major cleanup 2018-02-08 23:52:08 +01:00
315f1146cd QedFVol: Fix output of VPCounterTerms module. 2018-02-08 20:40:45 +00:00
a3e009ba54 Add tests for CAGMRES solvers with staggered action 2018-02-08 17:46:28 +01:00
eb7cf239d9 Print warning messages in CAGMRES solvers
Currently, the implementation of these algorithms doesn't differ from their non
communication-avoiding versions.
2018-02-08 17:43:47 +01:00
13ae371ef8 Make solver parameters match in all MR|GMRES solver tests 2018-02-08 17:33:10 +01:00
9f79a87102 Fix bugs in Flexible GMRES solvers
Somehow I got the left and right-preconditioned versions of GMRES mixed up. As
of now this is right-preconditioned version, which is what we want.
2018-02-08 16:00:31 +01:00
4ded1ceeb0 Make GMRES solvers perform no more than MaxIterations steps
I noticed that it was possible to overrun this number.
2018-02-08 15:29:44 +01:00
9f202782c5 QedFVol: Change format of scalar VP output files, and save diagrams without charge factors for consistency with ChargedProp module. 2018-02-07 20:31:50 +00:00
8bc12e0ce1 Remove superfluous comments in MR solver 2018-02-07 18:09:09 +01:00
cc2f00f827 Remove test for MR solver with dwf action as it doesn't converge 2018-02-07 18:09:08 +01:00
cd61e2e6d6 Increase max iterations in test of MR solver with staggered action 2018-02-07 18:09:07 +01:00
323ed1a588 Add an overrelaxation parameter to the MR solver 2018-02-07 18:09:06 +01:00
68c66d2e4b Remove empty line in output of *Residual* solvers 2018-02-07 18:08:56 +01:00
1671adfd49 WilsonMG: Add some tests for linear operators 2018-02-07 17:15:22 +01:00
594a262dcc QedFVol: Remove redundant file Communicator_mpi.cc 2018-02-07 11:37:01 +00:00
7f8ca54285 Merge branch 'develop' into feature/qed-fvol 2018-02-07 10:11:00 +00:00
c5b23c367e QedFVol: Fix segmentation fault when multiple propagator modules are used. 2018-02-05 11:46:33 +00:00
b6fe03eb26 BugFix: Now the stochatic EM potential weight is generated when calling for the first time 2018-02-02 15:29:38 +00:00
f37ed4958b Implement IR improvement, with coefficients set in input file. 2018-02-02 11:56:51 +00:00
b9b5bdfc3a Proper offload (accelerator access) will require a mutable copy lambda. 2018-02-02 11:38:19 +00:00
51eb2c5dfc Make referencign the stencil and all info required to evaluate the kernel
accelerator marked up
2018-02-02 11:37:13 +00:00
ede0dff794 Mark up as an accelerator function 2018-02-02 11:36:44 +00:00
aa6de818e2 Copy data needed by Kernels out of the grid object to avoid host reference 2018-02-02 11:36:11 +00:00
dcf6517a93 Accelerator offload and copy Opt into the kernel for GPU host var safety 2018-02-02 11:35:35 +00:00
a308dff410 accelerator loop, copy Opt into the GPU 2018-02-02 11:34:37 +00:00
14ba20898a Accelerator loop the key kernel call 2018-02-02 11:30:07 +00:00
a53d3ee19a Add Opt to the lambda capture to get it into the GPU 2018-02-02 11:28:39 +00:00
5df435319d Use constexpr 2018-02-02 11:27:56 +00:00
0da2d3e222 accelerator off load some more stuff 2018-02-02 11:27:35 +00:00
9c9dfbfa78 Force accelerator 2018-02-02 11:25:09 +00:00
e4df025d01 Accelerator related 2018-02-01 23:20:05 +00:00
cfeda9d536 constexpr on const ints 2018-02-01 22:59:12 +00:00
4450b1993a Offload 2018-02-01 22:45:47 +00:00
d03ce5c2a4 Provide a way to get around std::vector for a known type on device.
Use template specialisation to access a private member in the Clang++ STL implementation
2018-02-01 22:44:25 +00:00
7d6522c1ef Accelerator inline 2018-02-01 22:43:56 +00:00
b96832a922 Accelerator inline 2018-02-01 22:43:26 +00:00
5d7af47b05 accelerator_inline 2018-02-01 22:42:54 +00:00
053ef25c90 constexpr makes GPU happy 2018-02-01 22:42:29 +00:00
8ae77d3706 Small simplification of FermionOperatorImpl towards GPU but not there yet 2018-02-01 22:41:54 +00:00
896f3a8002 Fix to MPI for Hokusai system 2018-02-01 18:51:51 +00:00
5f85473d6b QedFVol: Move Projection class into Result class 2018-02-01 16:16:13 +00:00
871649238c WilsonMG: Stricter naming for linear operators 2018-02-01 14:43:08 +01:00
ac3b0ebc58 QedFVol: New structure for ChargedProp output files 2018-02-01 12:31:32 +00:00
7c86d2085b WilsonMG: Some minor cleanup 2018-02-01 12:24:16 +01:00
9292be0b69 WilsonMG: Add check for Mdiag + Σ Mdir == M
Need to test my implementations of CoarsenedMatrix::Mdiag &
CoarsenedMatrix::Mdir.
2018-01-31 14:03:30 +01:00
f0fcdf75b5 Update README.md 2018-01-30 12:44:20 +01:00
53bffb83d4 Updating README with new SKL target 2018-01-30 12:42:36 +01:00
10141f90c9 WilsonMG: Rename test file 2018-01-30 10:25:09 +01:00
cd44e851f1 Fixing compilation error in FundtoHirep 2018-01-30 06:04:30 +01:00
a414430817 Merge remote-tracking branch 'upstream/develop' into feature/ddalphaamg 2018-01-29 18:32:31 +01:00
f20728baa9 WilsonMG: Some further steps towards a three level method
Currently this is very "manual" as we are still testing stuff. Will refactor
and make it an algorithm once everything works.

What currently does work:

  - All tests in MultiGridPreconditioner::runChecks for the first coarse grid
  - The tests for the intergrid operators going from the first to the second
    coarse grid
    - (1 - P R) v   == 0
    - (1 - R P) v_c == 0
  - A full solve with VPGCR and a two-level MG preconditioner

What hinders the rest of the tests from passing with a three-level method is the
absence of implementations of CoarsenedMatrix::Mdir and CoarsenedMatrix::Mdiag.
2018-01-29 18:29:49 +01:00
d2e68c4355 WilsonMG: Perform some minor cleanup 2018-01-29 18:07:10 +01:00
1cb745c8dc Add function to return full type as std::string
Also works for nested templates. I find it useful for debugging.
Possible usage:

std::cout << "getTypename<AType>() = " << getTypename<Atype>() << std::endl;
std::cout << "getTypename<decltype(AnInstance)>() = " << getTypename<decltype(AnInstance)>() << std::endl;
2018-01-29 17:39:19 +01:00
faf4278019 Use 2 passes of GS in coarse operator construction 2018-01-29 17:21:42 +01:00
194e4b94bb Make MG checking function work level-wise 2018-01-29 17:18:20 +01:00
bfc1411c1f Use more iterations in subspace creation 2018-01-29 17:11:29 +01:00
161637e573 Turn on orthogonality checking temporarily 2018-01-29 17:10:05 +01:00
79b50feacf fixme updates 2018-01-29 16:00:40 +00:00
fb24e3a7d2 Adding utilities for perf profiling 2018-01-29 11:11:45 +01:00
655a69259a Added support for GCC compilation for Skylake AVX512 2018-01-28 17:02:46 +01:00
c67c1544cd abs no compile on travis fix attempt 2018-01-28 10:26:04 +00:00
e657f9a344 OMP collapse changes to make NVCC happy 2018-01-28 01:21:53 +00:00
b6ebf35af5 Intel compiler doesn't like Nvidia error disable pragmas 2018-01-28 01:03:10 +00:00
604c05f4b8 parallel_for elimination -> thread_loop 2018-01-28 01:01:36 +00:00
70e276e1ab parallel_for elimination -> thread_loop 2018-01-28 01:01:14 +00:00
9472b02771 Parallel_for elimination -> thread_loop. 2018-01-28 01:00:55 +00:00
9597ab94eb Zero changes, swap on lattice type. 2018-01-27 23:51:40 +00:00
ce4da83bc2 Zero changes, literally 2018-01-27 23:51:10 +00:00
d557f3ef77 Zero changes (literally) and also a warning elimination 2018-01-27 23:50:43 +00:00
f574c20118 Zero changes, __VA_ARGS__ and swap 2018-01-27 23:50:17 +00:00
f102897385 VA_ARGS to make comma safe automatic 2018-01-27 23:49:47 +00:00
d6fce3e498 Zero changes, literally 2018-01-27 23:48:01 +00:00
2d0bcc2606 Zero changes, acceleartor on kernels and some thread loop changes 2018-01-27 23:47:38 +00:00
45df59720e Zero changes and VA_ARGS changes 2018-01-27 23:46:58 +00:00
44ef5bc207 Zero changes (literally speaking). 2018-01-27 23:46:28 +00:00
98af36217a Zero changes. (I mean literally) 2018-01-27 23:46:02 +00:00
4e0cf0cc28 QedFVol: Fix bug in ScalarVP.cc due to double use of temporary object. Still getting mpi3 errors when configured with enable-comms=mpi[-auto]. 2018-01-27 15:15:25 +00:00
507c4e9efc Correcting an missing semicolumn in avx512 2018-01-27 10:59:55 +01:00
be7b37b9c9 Mistake on openmp 2018-01-27 00:05:11 +00:00
c4f82e072b _grid becomes private ; use Grid()§ 2018-01-27 00:04:12 +00:00
3f9654e397 Hiding internals 2018-01-26 23:09:03 +00:00
912b50f6fa Hiding lattice internals 2018-01-26 23:08:45 +00:00
2a4a0e43c1 Hide internals 2018-01-26 23:08:27 +00:00
32523a229c Hide internals 2018-01-26 23:08:02 +00:00
1ebd56c3fb Hide internal data 2018-01-26 23:07:34 +00:00
8dccffdfd5 Hide internal data 2018-01-26 23:06:51 +00:00
5642ea270f Hide internal data 2018-01-26 23:06:28 +00:00
43cea62855 Hide internal data 2018-01-26 23:06:03 +00:00
2b4067bb71 Hide internal data 2018-01-26 23:05:32 +00:00
85771e97e9 Hide internal data 2018-01-26 23:04:46 +00:00
8b371ffa94 Hide internal data 2018-01-26 23:03:54 +00:00
bf659dfd92 Hide the ._odata 2018-01-26 22:27:47 +00:00
cdf550845f QedFVol: Fix bugs in StochEm.cc and ChargedProp.cc (still only works without MPI). 2018-01-26 21:25:20 +00:00
3db7a5387b BROKEN: Adapted scalarVP, UnitEm and VPCounterTerms modules to new Hadrons. Currently getting an assertion error from Communicator_mpi3.cc when I try to run. 2018-01-26 16:33:48 +00:00
76a4dd36d9 Fix no compile of test serialisation 2018-01-26 00:13:21 +00:00
f4010023ca Warning fixes 2018-01-25 23:46:47 +00:00
24a4589def Changes to interface a little 2018-01-25 23:37:34 +00:00
c904822e74 Warning removal 2018-01-25 23:37:15 +00:00
40ee1e1957 Zero() 2018-01-25 23:36:58 +00:00
461df78a3f Better to use Zero(), and not zero static data 2018-01-25 23:36:22 +00:00
db9c9475d4 const 2018-01-25 23:36:06 +00:00
214f7a6f13 Drop std::vector container for the lattice data 2018-01-25 23:35:04 +00:00
c844cfcda8 Remove commAllocator; make more simple; option to switch off the pointer caceh 2018-01-25 23:33:57 +00:00
a3e3034e6f Host compile 2018-01-25 23:33:00 +00:00
e7cba358c2 Temporary update to reflect the new dropping of std::vector in Lattice
Will update again to hide the internals in an interface
2018-01-25 23:31:41 +00:00
f8a5194c70 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-01-25 13:46:37 +01:00
cff3bae155 Adding support for general Nc in the benchmark outputs 2018-01-25 13:46:31 +01:00
90dffc73c8 Merge branch 'feature/hadrons' into feature/qed-fvol
# Conflicts:
#	extras/Hadrons/Modules.hpp
#	extras/Hadrons/Modules/MGauge/StochEm.cc
#	extras/Hadrons/Modules/MScalar/ChargedProp.cc
#	extras/Hadrons/Modules/MScalar/ChargedProp.hpp
#	extras/Hadrons/modules.inc
#	lib/communicator/Communicator_mpi.cc
2018-01-24 16:41:44 +00:00
99329197ee Rename header to .h 2018-01-24 14:10:09 +00:00
421401af55 Remove IMCI as really don't support 2018-01-24 13:53:21 +00:00
0626c1e39e Accelerator flaggina dn thrust complex for NVCC 2018-01-24 13:50:41 +00:00
725f03e2e2 Accelerator markup and thrust complex on nvcc 2018-01-24 13:50:10 +00:00
65f77112e0 Thread loops done properly 2018-01-24 13:49:39 +00:00
408b868475 Generic for GPU needs accelerator markup of functions 2018-01-24 13:49:12 +00:00
1c797deb04 Accelerator tweaks 2018-01-24 13:43:43 +00:00
b9d5a42b57 Should be able to eliminate the COMMA_SAFE with VA_ARGS trick ; revisit this file 2018-01-24 13:42:06 +00:00
e737591918 Accelerator loops 2018-01-24 13:41:12 +00:00
ba5ea5830b Acceleartor loops 2018-01-24 13:40:56 +00:00
43f244badf Thread loops for now; figure out what can be GPU accelerated later here 2018-01-24 13:40:30 +00:00
e9c8ba5ef7 Accelerator loosp 2018-01-24 13:39:54 +00:00
d70709a8e8 Thread construct changes 2018-01-24 13:39:06 +00:00
733f8ff0b2 Still using parallel_for -- don't know how to implement reduction on GPU yet. Look at some sample code is best. 2018-01-24 13:38:13 +00:00
0bfa5bb213 Accelerator loosp 2018-01-24 13:37:26 +00:00
1f26a234f9 CPU loops explicit for peek poke 2018-01-24 13:36:31 +00:00
13f0116425 Accelerator loops 2018-01-24 13:35:55 +00:00
25f589b064 Accelerator loops 2018-01-24 13:35:36 +00:00
210c50a278 Accelerator prep work 2018-01-24 13:35:13 +00:00
549a143e78 Accelerator related 2018-01-24 13:34:46 +00:00
277301486d Simple warning elimination 2018-01-24 13:34:15 +00:00
c851b39a49 Nicer way of including aggregate 2018-01-24 13:33:34 +00:00
15cc12eb6c Delete the old non ET file 2018-01-24 13:33:07 +00:00
ae4f1f8c12 New file, split out two from Lattice_reduction 2018-01-24 13:32:43 +00:00
5609624b44 Threading constructs replaced 2018-01-24 13:32:24 +00:00
b5a947dd79 Change to make NVCC happy 2018-01-24 13:32:02 +00:00
ee16f62322 stray semicolon elimination. NVCC is picky, but eventually picked up these diags
with a pragma to suppress
2018-01-24 13:31:17 +00:00
3318de27d6 Thread macro changes 2018-01-24 13:30:23 +00:00
ac56965306 GPU changes and threading macros replaced 2018-01-24 13:28:30 +00:00
8e99264f40 Accelerator mark up of entire tensore space for offload 2018-01-24 13:27:30 +00:00
69327db9a9 Improviements for NVCC. Eigen is not compat with CUDA 9 and must hack to disable device
compilation
2018-01-24 13:25:07 +00:00
7331ee2d80 Warnings control to overpower the NVCC compiler 2018-01-24 13:24:36 +00:00
918c105c57 NVCC warning elimination 2018-01-24 13:23:59 +00:00
be1511d469 Remove old macros for threading 2018-01-24 13:23:24 +00:00
f1c31df9d2 updated Eigen version. Still didn't fix CUDA 9 no compile.
Worked around by switching off __NVCC__ during the include of Eigen and switching it
back on after. No Eigen code can be offloaded, note as a rsult of this. No harm done.
2018-01-24 13:19:29 +00:00
ff7b587fad Ugly... nvcc needs -x cu to compile .cc as cuda.
Since CXXFLAGS is Also passed to linker, and -x cu breaks link phase must replace
CXX and CXXLD with nvcc -x cu and nvcc -link respectively.
2018-01-24 13:18:19 +00:00
4e1135b214 Updated pugixml to v1.8; still didn't fix no compile under nvcc.
Turns out nvcc was right; must to an explicit template instantiation that was missing
but left gcc, icpc and clang happy for some reason.
Fix this.
2018-01-24 13:17:10 +00:00
acd4955a18 remove rdtsc on __NVCC__ as may be device called 2018-01-24 13:16:18 +00:00
bd08dc4f45 Pragma use for nvcc, warning elimination. 2018-01-24 13:15:43 +00:00
22d137d4e5 Namespace, nvcc warning elimination. 2018-01-24 13:14:43 +00:00
87ee592176 Pragma changes and layout and warning elimination for nvcc 2018-01-24 13:14:09 +00:00
063603b1ea Warning elimination 2018-01-24 13:12:14 +00:00
f292106db6 Split out pragms from threads.h;
More work needed; renam threads directory to "parallelism" or something like that
2018-01-24 13:11:04 +00:00
9d08aebea9 Compile through nvcc ; warning elimination fixes 2018-01-24 13:09:53 +00:00
4e30739093 First compile OK through nvcc on host 2018-01-24 13:08:47 +00:00
a1151fc734 Hadrons: MPI-safe serial IO 2018-01-23 17:26:50 +00:00
ab3baeb38f Implement contractions and data output in functions; calculate diagrams S, X and 4C separately; output 2E and 2T instead of sunset_shifted, sunset_unshifted, tadpole_shifted, tadpole_unshifted; add comments. 2018-01-23 17:07:45 +00:00
389731d373 changed SeqConservedSummed.hpp to work with new hadrons interface 2018-01-23 10:11:33 +00:00
6e3ce7423e Hadrons: don't display module list at startup (too long) 2018-01-22 20:04:05 +00:00
15f15a7cfd Merge branch 'develop' into feature/hadrons
# Conflicts:
#	extras/Hadrons/Modules.hpp
#	extras/Hadrons/modules.inc
2018-01-22 20:03:36 +00:00
0e5f626226 Hadrons: module for scalar operator divergence 2018-01-22 19:38:19 +00:00
04f92ccddf WilsonMG: Provide a fix for the previous commit; compiles and runs successfully now
I don't like the solution with the temporary very much though ...
2018-01-22 14:56:48 +01:00
3b2d805398 WilsonMG: Some first steps towards coarse spin dofs; not compiling yet
A failing conversion from the innermost type (Grid::Simd<...>) to a coarse
scalar (triple iScalar) in blockPromote prohibits this commit from working.
2018-01-22 12:45:51 +01:00
97b9c6f03d No option for interior/exterior split of asm kernels since different directions get interleaved 2018-01-22 11:04:19 +00:00
63982819c6 No option to overlap comms and compute for asm implementation since different directions are interleaved
in the kernels, introducing if else structure would be too painful
2018-01-22 11:03:39 +00:00
6fec507bef merged new hadrons interface 2018-01-22 10:09:20 +00:00
219b3bd34f Remove freeVpTensor object 2018-01-19 17:14:11 +00:00
9dc885d297 Fix a bug in Wilson MG
The calculation of the lattice size of a second coarse level was incorrect.
2018-01-18 17:02:04 +01:00
a70c1feecc Remove some unnecessary stuff in Wilson MG 2018-01-18 15:48:28 +01:00
38328100c9 Implement correctness checks for Wilson MG 2018-01-18 15:43:15 +01:00
9732519c41 Apply clang-format to Wilson MG
I can provide the configuration file I used if people want that.
2018-01-18 15:14:37 +01:00
fa4eeb28c4 Save current state in Wilson MG test file 2018-01-17 17:56:34 +01:00
b00d2d2c39 Correction of Representations compilation and small compilation error for Intel 17 2018-01-17 13:46:12 +00:00
f1b3e21830 Merge branch 'feature/clover' into develop 2018-01-17 10:07:42 +00:00
90ea472411 Auto emacs format C++ no namespace indent 2018-01-15 11:44:54 +00:00
56999474e2 Indent 2018-01-15 11:44:45 +00:00
d74c21a386 GLobal edit for QCD namespace removal & NAMESPACE macros 2018-01-15 09:37:58 +00:00
ca6bdd7302 Useful drive to emacs C++ mode 2018-01-15 00:24:41 +00:00
6f20f1d224 Namespace 2018-01-15 00:24:20 +00:00
d0e357ef89 CLeanup and no QCD namespace 2018-01-15 00:23:51 +00:00
21251f2e1b Namespace and formatting changes 2018-01-15 00:21:27 +00:00
fcf1ccf669 Namespace, indent, badly formatted 2018-01-15 00:17:58 +00:00
49cce514f1 Namespace 2018-01-15 00:17:11 +00:00
695af98a1d Namespace, indent, tidy 2018-01-15 00:16:13 +00:00
f8cb46d360 Namspace, indent, badly formatted code fixed 2018-01-15 00:14:47 +00:00
0da64dea90 Namespace, indent 2018-01-15 00:13:32 +00:00
2cceebbf12 Namespace, indent 2018-01-15 00:12:20 +00:00
40232dcefe Namespce 2018-01-15 00:11:19 +00:00
dbd86bb95b CLeanup, namespace, indent 2018-01-15 00:10:11 +00:00
b8fd2c161f Indent, namespace 2018-01-15 00:09:33 +00:00
df9b979583 Indent, namespace 2018-01-15 00:08:40 +00:00
23ef0e3e19 Namespace and indentation 2018-01-15 00:07:46 +00:00
ae9175735a Indentation, Namespace 2018-01-15 00:07:10 +00:00
2d13ea1a22 Namespace and indentation emacs choices 2018-01-15 00:05:55 +00:00
8c675064bd Namespace and indentation 2018-01-15 00:04:43 +00:00
550b905bb8 Namespace nd indentation 2018-01-15 00:03:49 +00:00
edb79dc088 Namespce,and indent 2018-01-15 00:02:33 +00:00
88e635c5d1 Namepscae, format 2018-01-15 00:02:01 +00:00
ecb4a24de8 Namespace 2018-01-15 00:01:25 +00:00
c8c1d36710 Namespace, indent 2018-01-15 00:00:52 +00:00
b4bb428d9b Namespace, indent 2018-01-14 23:59:57 +00:00
e9ef7e3852 Namespace, indent 2018-01-14 23:59:23 +00:00
31cbbfc07e Namespace, indent 2018-01-14 23:58:44 +00:00
4eb0552d1d Namespace, indnet 2018-01-14 23:58:03 +00:00
08f2a4564f Namespace, formatting 2018-01-14 23:56:33 +00:00
7e00f643f8 Namespace indent 2018-01-14 23:55:44 +00:00
c19ccdad7c Namespace, indent 2018-01-14 23:55:07 +00:00
8aed4181e1 Namespace, indent 2018-01-14 23:54:25 +00:00
06ab7f5661 Namespace 2018-01-14 23:53:31 +00:00
645ec8eba0 Namespace 2018-01-14 23:52:26 +00:00
72ffa8a88e Namespace 2018-01-14 23:51:38 +00:00
4c829b410e Namespace 2018-01-14 23:50:20 +00:00
eda4fd9912 Namespace 2018-01-14 23:49:11 +00:00
041d9137c0 Namespace 2018-01-14 23:48:27 +00:00
eeacdfe031 Namespace 2018-01-14 23:47:37 +00:00
e5535f4d72 Namespace, indent 2018-01-14 23:46:51 +00:00
044a292281 Namespace, indnet 2018-01-14 23:46:07 +00:00
fe0467df1e Namespace, indenting 2018-01-14 23:45:19 +00:00
19234fb40e Namespace, format 2018-01-14 23:44:16 +00:00
f445257d28 Namespace, indenting 2018-01-14 23:43:36 +00:00
bdc2a987aa Namespace, indent 2018-01-14 23:42:47 +00:00
72acb0e48f Namespace, indent 2018-01-14 23:41:59 +00:00
b4e9211df7 Namespace, indent 2018-01-14 23:40:38 +00:00
97019d2997 Namespace, format 2018-01-14 23:39:57 +00:00
83c5f05094 Namespace, indent 2018-01-14 23:39:13 +00:00
1619e42d90 Indent and Namespace changes 2018-01-14 23:38:25 +00:00
9f6cebe5ff Namespace and format changes 2018-01-14 23:37:40 +00:00
a84ebe5624 Namespace, format change 2018-01-14 23:36:45 +00:00
c527e39881 Namespace, format indent change 2018-01-14 23:36:07 +00:00
a0f4687887 Namespace, formatting indent changes 2018-01-14 23:35:16 +00:00
3ef7b2389e Format eamcs style after NAMESPCCE change 2018-01-14 23:34:08 +00:00
7dfa3d0b50 Namespace, format 2018-01-14 23:33:16 +00:00
bf629dddce Namespace, format improved 2018-01-14 23:32:19 +00:00
7747b95430 Namespace, formatting emacs style 2018-01-14 23:31:28 +00:00
ccd75c039a Namespace, fmt 2018-01-14 23:30:34 +00:00
493ea80208 Namespace 2018-01-14 23:29:53 +00:00
229baf3aba Namespace, emacs fmt 2018-01-14 23:29:02 +00:00
0ce4ecfc84 Emacs format indent 2018-01-14 23:28:12 +00:00
ddfaae8ea6 Namespace 2018-01-14 23:27:49 +00:00
70c5b781e5 Namespace, clean up 2018-01-14 23:26:41 +00:00
901e359d28 Namespace changes; need to simplify the EOFA as too many cases and duplicated from Mobius 2018-01-14 23:25:51 +00:00
e857d4d4c8 Namespace, indent 2018-01-14 23:24:51 +00:00
e5b77c7fd8 Namespace, indent 2018-01-14 23:24:06 +00:00
3b5d629048 Namespace, format 2018-01-14 23:23:26 +00:00
08772d5e0c Namespace, indent 2018-01-14 23:22:42 +00:00
017dcd69a6 Namespace, indent 2018-01-14 23:21:40 +00:00
8178a17b88 Namespace, indent 2018-01-14 23:20:55 +00:00
c5c1b53e54 Namespace, indent 2018-01-14 23:20:08 +00:00
440f9e2013 Namespace, indent 2018-01-14 23:19:22 +00:00
c98657d588 Namespace 2018-01-14 23:18:46 +00:00
f450857716 Namespce, indent 2018-01-14 23:17:33 +00:00
9ec238df9e Namespace, indent 2018-01-14 23:16:49 +00:00
3ba8eb1500 Namespace, indent 2018-01-14 23:16:08 +00:00
8da49c5a34 Namespace 2018-01-14 23:15:26 +00:00
e04f61b1fa Namespace 2018-01-14 23:14:46 +00:00
115e13b227 Namespace 2018-01-14 23:13:49 +00:00
75f3062a80 Think this should move to the algorithms directory 2018-01-14 23:12:14 +00:00
b460cd3ef1 Namespace, format 2018-01-14 23:11:24 +00:00
0e6727a33b Namespace, format; possibly some conflict with Azusa beware 2018-01-14 23:10:21 +00:00
4c6745cb4c Namespace 2018-01-14 23:09:44 +00:00
efdd0e572c Namespace 2018-01-14 23:09:10 +00:00
ca60a218ac Namespace 2018-01-14 23:08:35 +00:00
03633d709e Namespace 2018-01-14 23:07:36 +00:00
4de58c4aab Namespace 2018-01-14 23:06:47 +00:00
4f8b1c1940 Namespace 2018-01-14 23:05:23 +00:00
dec39b313d Namespace and format 2018-01-14 23:04:37 +00:00
dc835ad1cb Namespace 2018-01-14 23:03:49 +00:00
71c8c9e4fb Pretty 2018-01-14 23:03:01 +00:00
a935ef7b39 Namespace 2018-01-14 23:01:07 +00:00
a97ad1a51d Namespce 2018-01-14 23:01:01 +00:00
5ab9129db3 Namespace 2018-01-14 22:58:42 +00:00
634943c11f Namepsace 2018-01-14 22:57:59 +00:00
e598e65f69 Namespace 2018-01-14 22:57:10 +00:00
291407dc7f Namespace 2018-01-14 22:54:42 +00:00
641a28aa1d Namespace 2018-01-14 22:53:50 +00:00
75207fa010 FOrmat 2018-01-14 22:53:13 +00:00
c2b0e0269a Namespace 2018-01-14 22:52:22 +00:00
7828887604 Namespace, indent 2018-01-14 22:51:18 +00:00
e6efc93a7c Namespace 2018-01-14 22:50:35 +00:00
ff7e773d5e Namesapce 2018-01-14 22:49:48 +00:00
a0380fad72 Namespace 2018-01-14 22:48:57 +00:00
61e9a33777 Namesapce 2018-01-14 22:48:08 +00:00
3e139b52d3 Namespace 2018-01-14 22:47:24 +00:00
fd6031b005 Namespace 2018-01-14 22:46:17 +00:00
fe44fc50d9 Namespace 2018-01-14 22:45:29 +00:00
2dd88cf3f8 Namespace 2018-01-14 22:44:41 +00:00
6b7e82f1a9 Namespace, indentation 2018-01-14 22:44:06 +00:00
be612b3931 Namespace, indentation 2018-01-14 22:43:27 +00:00
f5e74033f9 Namespace 2018-01-14 22:42:31 +00:00
8d52e0a349 Namespace 2018-01-14 22:41:23 +00:00
a60f6d353e Namespace 2018-01-14 22:40:29 +00:00
5d3b574325 Missing banner; should recreate globally 2018-01-14 22:39:24 +00:00
6ee5ea6b32 Namespace QCD gone 2018-01-14 22:38:22 +00:00
cc349c6512 Namespace 2018-01-14 22:36:59 +00:00
fde2e07bf4 Namespace 2018-01-14 22:36:15 +00:00
2f38fe8d45 Namespace 2018-01-14 22:35:24 +00:00
813af84ae8 Format emacs C++ mode 2018-01-14 22:34:12 +00:00
cfe6c6838f Namespace 2018-01-14 22:33:18 +00:00
12a7216dfe Namespace 2018-01-14 22:32:29 +00:00
71ebd61327 Namespace 2018-01-14 22:31:39 +00:00
2c2da60cc2 Namespace 2018-01-14 22:30:54 +00:00
7631ed9c56 Namespace 2018-01-14 22:30:09 +00:00
65669b116e Namespace 2018-01-14 22:29:18 +00:00
ae2a6cfc6e Namespace 2018-01-14 22:27:32 +00:00
c36223055e Namespace 2018-01-14 22:26:55 +00:00
e42de105c5 Namespace 2018-01-14 22:26:11 +00:00
b08dae0809 Namespace 2018-01-14 22:25:29 +00:00
3bf8fddbb5 Namespace 2018-01-14 22:24:47 +00:00
d29fa23ebc Namespace 2018-01-14 22:23:49 +00:00
c978c88521 Namespace 2018-01-14 22:22:27 +00:00
93f09818da Namespace 2018-01-14 22:21:40 +00:00
54a8ea93ec Namespace QCD gone 2018-01-14 22:20:42 +00:00
56e87d6e55 Namespace 2018-01-14 22:19:25 +00:00
df29cc19ab Namespace 2018-01-14 22:18:27 +00:00
e61189db3f Namespace 2018-01-14 22:17:43 +00:00
361ce948c3 Namespace 2018-01-14 22:16:33 +00:00
049b4a4631 Namespace 2018-01-14 22:15:55 +00:00
9f2f294a27 Namespace 2018-01-14 22:14:58 +00:00
81dcd0e6ea Namespace 2018-01-14 22:13:46 +00:00
34a788331f Namespace 2018-01-14 22:13:02 +00:00
e2c39945b3 Namespace 2018-01-14 22:11:03 +00:00
1591d391b9 Namespace 2018-01-14 22:09:42 +00:00
f4c06ed8c0 Namespace 2018-01-14 22:08:25 +00:00
1f49f781bf Namespace 2018-01-14 22:07:27 +00:00
3a9f746421 Namespace 2018-01-14 22:06:01 +00:00
4491d87766 Namespace 2018-01-14 22:04:21 +00:00
0e080a7abc Namespace 2018-01-14 22:03:14 +00:00
8bf78846ee Namespace 2018-01-14 22:02:09 +00:00
9aa34dc803 Namespace 2018-01-14 22:01:17 +00:00
fdcbe0a0d1 Namespace 2018-01-14 22:00:29 +00:00
6a62a9c6a5 Namespace 2018-01-14 21:59:48 +00:00
b331ecea78 Namespace 2018-01-14 21:58:47 +00:00
66f8a2f082 Namespace 2018-01-14 21:57:46 +00:00
d58b7cf9b9 Namespace changes 2018-01-14 21:56:55 +00:00
0d749becff Namespace 2018-01-14 21:55:47 +00:00
1dbea9aa69 Namespace 2018-01-14 21:54:28 +00:00
c1438cbbe3 Namespace 2018-01-14 21:53:39 +00:00
f4623fd551 Namespace 2018-01-14 21:53:05 +00:00
59ba9ff3bb NAMESPACE & format 2018-01-14 21:52:27 +00:00
1fbab4032b Namespace changes 2018-01-14 21:51:19 +00:00
c037244874 Tensor reformatted with NAMESPACE too 2018-01-13 00:31:02 +00:00
f4272aa6fd Clean up 2018-01-13 00:19:19 +00:00
8cb7a1a887 Format 2018-01-13 00:17:16 +00:00
b45bd8e097 NAMESPACE 2018-01-13 00:16:34 +00:00
5e48b701ec FOrmatting 2018-01-13 00:11:53 +00:00
7f6bffe5ad NAMESPACE 2018-01-13 00:11:30 +00:00
6bf5fb1924 Clean up and format NAMESPACE 2018-01-13 00:08:25 +00:00
086db7bd19 NAMESPACE and reformat 2018-01-13 00:05:33 +00:00
c0a9b38c02 C++ NAMESPACE format emacs happy 2018-01-13 00:03:57 +00:00
6d7bdfb5f5 Emacs happy 2018-01-13 00:02:53 +00:00
be5d70ae6e C++ happy 2018-01-13 00:02:10 +00:00
ab1068044e C++ emacs happy 2018-01-13 00:01:58 +00:00
dda151250f Emacs format 2018-01-12 23:59:58 +00:00
18daf85069 Emacs format 2018-01-12 23:58:23 +00:00
81cc28f6ca Format 2018-01-12 23:57:22 +00:00
c01a1e02fe Namespace, format 2018-01-12 23:55:38 +00:00
7e70f4ed9c Format, NAMESPACE 2018-01-12 23:55:03 +00:00
1056e36f11 Format, NAMESPACE 2018-01-12 23:49:46 +00:00
0b8a88978b Format, NAMESPACE 2018-01-12 23:47:24 +00:00
59b31b6bb8 Format, NAMESPACE 2018-01-12 23:43:44 +00:00
69496482fc Format, NAMESPACE 2018-01-12 23:42:22 +00:00
4be31ad1f6 C++ indentation 2018-01-12 23:39:49 +00:00
176a021ce9 Formatting, NAMESPACE§ 2018-01-12 23:38:15 +00:00
b673174b71 FOrmat, NAMESPACE 2018-01-12 23:29:22 +00:00
e6f7a5a818 Namespace 2018-01-12 23:28:01 +00:00
68b69a2ac0 Namespace management 2018-01-12 23:26:14 +00:00
bd15c38ae8 Formatting emacs compliant 2018-01-12 23:25:02 +00:00
b815f5f764 Formatting 2018-01-12 23:23:21 +00:00
4da437431e Reformat 2018-01-12 23:22:46 +00:00
3c7bf211a9 Reformat 2018-01-12 23:22:18 +00:00
347d5404dd format 2018-01-12 23:21:25 +00:00
5e2cd0d07c Format 2018-01-12 23:18:22 +00:00
62fcee72c5 Format, NAMESPACE 2018-01-12 23:16:37 +00:00
0a6168eef0 Format emacs style 2018-01-12 23:11:22 +00:00
63865e4232 format 2018-01-12 23:10:48 +00:00
c64deedf74 Format 2018-01-12 23:09:35 +00:00
3281559ec3 Format 2018-01-12 23:09:01 +00:00
6a2eca2ec2 NAMESAPCE 2018-01-12 23:00:03 +00:00
d8ff895e74 NAMESPACE and format 2018-01-12 18:27:22 +00:00
00c49d4c17 Format 2018-01-12 18:25:39 +00:00
ec89714cce NAMESPACE 2018-01-12 18:24:16 +00:00
6ab744c720 NAMESPACE and formatting 2018-01-12 18:11:04 +00:00
bbb657da5c NAMESPACE and formatting 2018-01-12 18:10:11 +00:00
fbc2380cb8 NAMESPACE & format 2018-01-12 18:05:36 +00:00
08682c5461 NAMESPACE and format to my liking 2018-01-12 18:03:57 +00:00
13bce2a6bf NAMESPACE 2018-01-12 17:58:53 +00:00
70e689900b NAMESPACE 2018-01-12 17:58:13 +00:00
b7f8c5b823 Modify test to merge with the new Lanczos interface 2018-01-12 14:38:27 +00:00
3923683e9b Updating the feature/clover branch with the newest Hadron package 2018-01-12 13:35:51 +00:00
e199fda9dc Merge pull request #136 from pretidav/feature/clover
Feature/clover
2018-01-12 11:57:08 +00:00
7bb405e790 Merge branch 'develop' into feature/hadrons
# Conflicts:
#	lib/communicator/Communicator_mpi3_leader.cc
#	lib/communicator/Communicator_shmem.cc
2018-01-11 18:50:15 +00:00
10f7a17ae4 Make timing in VPGCR more detailed 2018-01-11 13:42:18 +01:00
26f14d7dd7 Adapt output format of non-herm solvers to the one of VPGCR 2018-01-11 13:36:30 +01:00
ec16eacc6a Hadrons: scalar SU(N) 2-pt function 2018-01-10 22:12:21 +00:00
cf858deb16 Lanczos with 2 reps fixed (tobe tested) 2018-01-10 18:43:02 +01:00
a3affac963 SU3 restored + output filename for mesons and baryons fixed. 2018-01-10 14:56:54 +01:00
d9d1f43ba2 Hadrons: code cleaning 2018-01-10 11:31:24 +00:00
b7cd721308 Hadrons: scalar SU(N) tr(mag^n) 2018-01-10 11:25:59 +00:00
29f026c375 Hadrons: scalar SU(N) tr(phi^n) 1-pt function 2018-01-10 11:01:03 +00:00
58c7a13d54 Hadrons: result file macro with trajectory number 2018-01-10 10:59:58 +00:00
24162c9ead Staggered overlap comms comput 2018-01-09 13:02:52 +00:00
73434db636 Merge remote-tracking branch 'upstream/develop' into feature/ddalphaamg 2018-01-09 10:43:33 +01:00
e564d11687 Allow resize of the shared memory buffers 2018-01-08 15:20:26 +00:00
0b2162f375 Clean up 2018-01-08 14:06:53 +00:00
5610570182 Synthetic test of lanczos 2018-01-08 11:36:39 +00:00
44f65526e0 Simplify communicators 2018-01-08 11:35:43 +00:00
43e48542ab Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-01-08 11:34:45 +00:00
0b85f1bfc8 Simplify the communicator proliferation: mpi and none. 2018-01-08 11:33:47 +00:00
9947cfbf14 Simplify number of communicator cases 2018-01-08 11:33:01 +00:00
357badce5e Simplify communicator case proliferation 2018-01-08 11:32:16 +00:00
0091eec23a Simplify communicator cases 2018-01-08 11:31:32 +00:00
9e9c2962df Simplify comms layer proliferation 2018-01-08 11:30:22 +00:00
bda97212a9 Simplify proliferation of comms layers 2018-01-08 11:29:20 +00:00
b91282ad46 Simplify comms layer proliferation 2018-01-08 11:28:52 +00:00
0a68470f9a Simplify comms layers 2018-01-08 11:28:30 +00:00
6ecf280723 Simplify comms layer proliferation 2018-01-08 11:28:04 +00:00
7eeab7f995 Simplify comms layers 2018-01-08 11:27:43 +00:00
9b32d51cd1 Simplify comms layer proliferatoin 2018-01-08 11:27:14 +00:00
7b3ed160aa Rationalise MPI options 2018-01-08 11:26:48 +00:00
1a0163f45c Updated to do list 2018-01-08 11:26:11 +00:00
c6411f8514 Merge remote-tracking branch 'upstream/develop' into feature/ddalphaamg 2018-01-08 10:37:10 +01:00
9028e278e4 Trying to fix a bug with SU4 mesons (still under investigation) 2018-01-06 15:57:38 +01:00
dd62f2f371 Hadrons: log message fix 2017-12-29 16:58:44 +01:00
0d612039ed Hadrons: prettier Grid logging (non-intrusive) 2017-12-29 16:58:23 +01:00
e8ac75055c Hadrons: binary configuration loader 2017-12-27 14:24:29 +01:00
8b30c5956c Hadrons: copyright update 2017-12-26 14:16:47 +01:00
185da83454 Hadrons: new MIO module namespace, NERSC loader moved there 2017-12-26 14:05:17 +01:00
6718fa8c4f Merge branch 'feature/scalar_adjointFT' into feature/hadrons 2017-12-26 12:59:33 +01:00
4ce63af7d5 Working on Hadrons with Hirep. (QCD is set for SU4) 2017-12-22 19:02:07 +01:00
6cf635d61c Remove some old code in Wilson MG 2017-12-22 13:20:09 +01:00
39558cce52 Multiply TVs in Wilson MG with G5 instead of G5R5 2017-12-22 13:07:56 +01:00
935cd1e173 conserved current insertion summed over Lorentzindex 2017-12-22 11:38:45 +00:00
55e39df30f tadpole insertion for DWF 2017-12-22 11:36:31 +00:00
67c3fa0f5f Hadrons: all modules are now ported, more tests need to be done 2017-12-21 11:39:07 +00:00
65d4f17976 Hadrons: no errors when trying to recreate a cache 2017-12-19 20:28:32 +00:00
e2fe97277b Hadrons: getReference use is rare, empty by default 2017-12-19 20:28:04 +00:00
84f9c37ed4 Merge branch 'feature/scalar_adjointFT' of https://github.com/paboyle/Grid into feature/scalar_adjointFT 2017-12-19 15:43:55 +00:00
bcf6f3890c Hadrons: more fixes after test 2017-12-14 21:14:10 +00:00
591a38c487 Hadrons: VM fixes 2017-12-14 19:42:16 +00:00
581be32ed2 Implement infrared improvement for v=0 on-shell self-energy 2017-12-14 13:42:41 +00:00
842754bea9 Hadrons: most modules ported to the new interface, compiles but untested 2017-12-13 19:41:41 +00:00
6bc136b1d0 Add module for calculating diagrams required for HVP counter-terms 2017-12-13 17:31:01 +00:00
0887566134 Hadrons: scheduler back! 2017-12-13 16:36:15 +00:00
61fc50d616 Hadrons: better organisation of the VM 2017-12-13 13:44:23 +00:00
a9c8d7dad0 Hadrons: code cleaning 2017-12-13 12:13:40 +00:00
259d504ef0 Hadrons: first full implementation of the module memory profiler 2017-12-12 19:32:58 +00:00
f3a77f4b7f Merge branch 'feature/hadrons' into feature/hadrons-new-memory-model 2017-12-12 14:05:23 +00:00
26d7b829a0 Hadrons: error managed through expections 2017-12-12 14:04:28 +00:00
64161a8743 Hadrons: much simpler reference dependency 2017-12-12 13:08:01 +00:00
2401360784 Merge pull request #138 from guelpers/feature/hadrons
bug fix in sequential insertion of conserved vector current
2017-12-11 18:53:41 +01:00
2cfb50cbe5 bug fix in sequential insertion of conserved vector current 2017-12-08 11:13:39 +00:00
f9aa39e1c4 global memory debug through command line flag 2017-12-07 14:40:58 +01:00
df152648d6 Fix error in MR code when compiling for single precision 2017-12-06 18:00:58 +01:00
0fbf445edd Hadrons: object creation that get properly captured by the memory profiler 2017-12-06 16:51:48 +01:00
e78794688a memory profiler improvement 2017-12-06 16:50:25 +01:00
9e31307963 Merge branch 'feature/hadrons' into feature/hadrons-new-memory-model 2017-12-06 16:49:32 +01:00
29e2eddea8 Merge branch 'develop' into feature/hadrons-new-memory-model 2017-12-06 16:49:21 +01:00
0a038ea15a Merge branch 'develop' into feature/hadrons 2017-12-06 16:49:10 +01:00
62eb1f0e59 FermionOperator virtual destructor needed for polymorphism 2017-12-06 16:48:17 +01:00
5422251959 Hadrons: execution part moved in a new virtual machine class 2017-12-05 15:31:59 +01:00
9579c9c327 Threading improvement 2017-12-05 14:12:22 +00:00
3729c7a7a6 Clean up of test 2017-12-05 13:07:31 +00:00
c24d4c8d0e Improved parallel RNG init 2017-12-05 13:01:10 +00:00
a14038051f Improved AllToAll asserts 2017-12-05 11:43:25 +00:00
3e560b9462 Faster RNG init 2017-12-05 11:42:05 +00:00
d93c6760ec Faster code for split unsplit 2017-12-05 11:39:26 +00:00
ae3b7713a9 Cold start doesnt need RNG 2017-12-05 11:36:31 +00:00
cbd8fbe771 Merge branch 'feature/hadrons' into feature/hadrons-new-memory-model 2017-12-03 19:48:56 +01:00
d391f05cb7 Merge branch 'develop' into feature/hadrons 2017-12-03 19:48:46 +01:00
3127b52c90 bootstrap script does not destroy Eigen is working offline 2017-12-03 19:48:34 +01:00
01f00385a4 Hadrons: genetic pair selection based on exponential probability 2017-12-03 19:47:40 +01:00
59aae5f5ec Hadrons: garbage collector clean temporaries 2017-12-03 19:47:11 +01:00
624246409c Hadrons: module setup/execute protected to forbid user to bypass execution control 2017-12-03 19:46:18 +01:00
2a9ebddad5 Hadrons: scheduler offline, minimal code working again 2017-12-03 19:45:15 +01:00
ff7afe6e17 Merge branch 'feature/hadrons' into feature/hadrons-new-memory-model 2017-12-01 19:45:44 +00:00
33cb509d4b Merge branch 'develop' into feature/hadrons 2017-12-01 19:45:32 +00:00
456c78c233 Merge branch 'develop' into feature/hadrons-new-memory-model 2017-12-01 19:45:12 +00:00
2fd4989029 Merge branch 'develop' of github.com:paboyle/Grid into develop 2017-12-01 19:44:31 +00:00
2427a21428 minor serial IO fixes, XML now issues warning when trying to read absent nodes, these becomes 2017-12-01 19:44:07 +00:00
514993ed17 Hadrons: progress on the interface, genetic algorithm freezing 2017-12-01 19:38:23 +00:00
7a0c9e84f8 Fix HDF5 src 2017-11-29 18:03:03 -05:00
caf1a3c85d Also add HDF5 src 2017-11-29 17:58:02 -05:00
21ca730a49 Also import some dependcies 2017-11-29 17:34:40 -05:00
c6cd27e9b2 Also import Eigen.inc 2017-11-29 17:26:20 -05:00
6068411d61 Remove Eigen from gitignore 2017-11-29 17:24:40 -05:00
4e965c168e Implement analogon to test vector analysis in WMG codebase 2017-11-29 15:05:27 +01:00
f260af546e Save current state 2017-11-28 15:03:02 +01:00
28ceacec45 Split/Unsplit working 2017-11-27 15:13:29 +00:00
e6a3e375cf Debug 2017-11-27 15:10:22 +00:00
4987edbd44 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-11-27 12:34:56 +00:00
ad140bb6e7 Clean on multinode target after split 1 1 2 4 -> 1 1 2 2 2017-11-27 12:34:25 +00:00
1f04e56038 Believe split/unsplit works, but need to make pretty 2017-11-27 12:33:08 +00:00
4bfc8c85c3 Clean up verbose communicator create 2017-11-27 12:32:37 +00:00
e55397bc13 Staggerd cg 2017-11-24 14:18:30 +00:00
649b8c9aca Save current state 2017-11-24 10:46:20 +01:00
0afa22747d Merge remote-tracking branch 'upstream/develop' into feature/new-solver-algorithms 2017-11-24 10:11:42 +01:00
a3fe874a5b Hadrons: everything is broken, repairing while implementing the new memory model 2017-11-22 23:27:19 +00:00
f403ab0133 gitignore update 2017-11-22 17:13:09 +00:00
94b8fb5686 Debug in progress 2017-11-19 01:39:04 +00:00
1f1d77b01a Performance metrics for the Scalar Action force term 2017-11-14 10:01:48 +00:00
6a15e2e8ef Added WilsonTwoIndexAntiSymmImpl instantiation in WilsonKernelsHand.cc (shoud not be necessary) 2017-11-12 14:16:19 +01:00
074d17429f Merge branch 'develop' into feature/scalar_adjointFT
# Conflicts:
#	lib/communicator/Communicator_mpi3.cc
2017-11-11 18:09:55 +00:00
fa43206c79 Remove some empty lines 2017-11-10 13:48:38 +01:00
25f73018f4 Merge pull request #135 from fionnoh/develop
Declaring virtual functions as pure virtual functions.
2017-11-09 23:19:08 +00:00
1d7ccc6b2c Declaring virtual functions as pure virtual functions. 2017-11-09 19:46:57 +00:00
a367835bf2 Set everything up for the implementation of FCAGMRES
The current implementation is the exact same code as normal FGMRES. This commit
only sets up the "framework" for the implementation of FCAGMRES, i.e., a test
and an include in the algorithms header file.
2017-11-09 17:30:41 +01:00
d7743591ea Fix some minor formatting errors 2017-11-09 17:28:19 +01:00
c6cbe533ea Set everything up for the implementation of CAGMRES
The current implementation is the exact same code as normal GMRES. This commit
only sets up the "framework" for the implementation of CAGMRES, i.e., a test and
an include in the algorithms header file.
2017-11-09 17:14:44 +01:00
8402ab6cf9 Some minor formatting improvements 2017-11-09 12:52:04 +01:00
c63095345e Remove some superfluous comments 2017-11-09 12:47:20 +01:00
59d9ccf70c restored WilsonKernelsHand.cc and added Qtop to production codes 2017-11-08 22:02:32 +01:00
a7ae46b61e Remove some comments 2017-11-08 16:58:20 +01:00
cd63052205 Remove everything preconditioner-related in GMRES code 2017-11-08 16:57:40 +01:00
699d537cd6 Add FGMRES test with staggered fermions 2017-11-08 16:56:42 +01:00
9031f0ed95 Fix a filename in a file header 2017-11-08 16:42:26 +01:00
26b3d441bb Check in forgotten FGMRES test with wilson Fermions 2017-11-08 16:39:11 +01:00
99bc4cde56 Fix an implementation error in FGMRES 2017-11-08 16:38:34 +01:00
e843d83d9d Make z in FGMRES a single Field 2017-11-08 16:38:16 +01:00
0f75ea52b7 First version of FGMRES; not working yet 2017-11-08 16:17:18 +01:00
8107b785cc Rename misunderstood "rsd_sq" to "rsq" in GMRES code 2017-11-08 14:40:03 +01:00
37b777d801 Add test for GMRES solver with staggered fermions 2017-11-08 14:28:48 +01:00
7382787856 Some minor changes 2017-11-08 14:23:55 +01:00
781c611ca0 Perform minor code style fix 2017-11-08 14:22:38 +01:00
b069090b52 Remove a superfluous comment 2017-11-08 13:58:02 +01:00
0c1c1d9900 Set precision and formatting only once in MR code 2017-11-08 13:57:06 +01:00
7f4ed6c2e5 First working version of GMRES + a test for Wilson fermions 2017-11-08 13:56:41 +01:00
56d32a4afb Rename misunderstood "rsd_sq" to "rsq" in MR code 2017-11-08 13:51:08 +01:00
b8ee496ed6 Print some info at start of GMRES 2017-11-08 13:23:41 +01:00
1860b1698c Fixed the bag on MPI_T at Cam 2017-11-08 09:03:01 +00:00
9b8d1cc3da Staggered Schur decomposed matrix norm changed to not be the Schur anymore :(
Carleton wanted this for multimass / multishift
2017-11-07 14:48:45 +00:00
0c668bf46a QedFVol: Write to output files from one process only. 2017-11-07 14:46:39 +00:00
149c3f9e9c Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-11-07 14:01:13 +00:00
b87416dac4 Fix error with conformable 2017-11-07 15:00:08 +01:00
176bf37372 Remove some commented stuff 2017-11-07 14:57:36 +01:00
c519aab19d Fixing the MPI memory leak in the communicators 2017-11-07 13:55:37 +00:00
b3d342ca22 Remove old implementation of GMRES operator 2017-11-07 10:24:49 +01:00
e1f928398d Save current state 2017-11-07 10:22:41 +01:00
69929f20bb Destructor fix. Split Grid and MPI3 will not yet work without more effort from me. 2017-11-06 23:45:00 +00:00
8c579d2d4a Save current state 2017-11-06 18:09:48 +01:00
840814c776 QedFVol: Patch to fix MPI communicators error 2017-11-06 16:34:55 +00:00
fc7d07ade0 Correct function signature of body of GMRES outer loop 2017-11-06 17:12:38 +01:00
b3be9195b4 Save one lattice fermion in GMRES code 2017-11-06 17:12:23 +01:00
9e3c187a4d Save current state 2017-11-06 17:05:25 +01:00
8363edfcdb Perform some minor changes to GMRES code 2017-11-06 16:17:44 +01:00
74af31564f Adapt style of wilson GMRES test to style of wilson MR test 2017-11-06 14:06:45 +01:00
e0819d395f Merge remote-tracking branch 'upstream/develop' into feature/new-solver-algorithms 2017-11-06 13:09:36 +01:00
a493429218 added Production tests for MixedRep, Adj, 2S, 2AS. Still missing QObs. The HMC is not printing correctly all the actions and forces. 2017-11-04 18:16:54 +01:00
915f610da0 clover 2indexSymm hmc production test created. clover 2indexAsymm and clover mixed to be filled. 2017-11-04 01:17:06 +01:00
c79606a5dc Test production code wilson clover. Still missing QObs measurement on-the-fly. 2017-11-03 22:46:32 +01:00
95af55128e QedFVol: Redo optimisation of scalar VP (extra memory requirements were not the problem), and undo optimisation of charged propagator (which seemed to be causing HDF5 errors, although I don’t know why). 2017-11-03 18:46:16 +00:00
9f2a57e334 QedFVol: Undo optimisation of scalar VP, to reduce memory requirements 2017-11-03 13:10:11 +00:00
c645d33db5 QedFVol: Redo optimisation of charged propagator, and fix I/O bug 2017-11-03 10:59:26 +00:00
e0f1349524 QedFVol: Undo optimisation of charged propagator 2017-11-03 09:22:41 +00:00
360efd0088 Improved treatment of reverse asked for by chris.
Truncate the basis.
Power method renormalises
2017-11-02 22:05:31 +00:00
7b42ac9982 added polyakov loop observable to the hmc 2017-11-02 21:58:16 +01:00
c5c647e35e Merge branch 'feature/lanczos-reorg' into develop 2017-11-02 15:23:11 +00:00
a4e5fd1000 Merge branch 'feature/hadrons' into feature/hadrons-new-memory-model 2017-11-01 19:24:51 +00:00
682e7d7839 Merge branch 'develop' into feature/hadrons 2017-11-01 19:24:38 +00:00
8e057721a9 Anisotropic Clover term written and tested 2017-11-01 12:50:54 +00:00
fa5e4add47 Added support for anisotropy to the WilsonFermion class 2017-10-31 18:20:38 +00:00
6f81906b00 Add test for the MR solver with staggered fermions; does not converge atm
TODO: Is this a property of staggered or did I do something wrong?
2017-10-30 16:57:55 +01:00
79b761f923 Merge branch 'develop' into feature/qed-fvol
# Conflicts:
#	lib/communicator/Communicator_base.cc
2017-10-30 15:53:18 +00:00
0d4e31ca58 QedFVol: Calculate phase factors for momentum projections once per configuration only. 2017-10-30 15:46:50 +00:00
a2d83d4f3d Add test for the MR solver with DW fermions; does not converge atm
TODO: Is this a property of DWF or did I do something wrong?
2017-10-30 16:39:30 +01:00
89bacb0470 Fix path in MR solver header commentary 2017-10-30 16:16:55 +01:00
b07a354a33 QedFVol: output scalar propagator before FFT in spatial directions. 2017-10-30 14:20:44 +00:00
19010ff66a Merge remote-tracking branch 'upstream/develop' into feature/new-solver-algorithms 2017-10-30 13:16:46 +01:00
27ea2afe86 No compile on comms == none fix 2017-10-30 01:14:11 +00:00
78e8704eac Shaking out 2017-10-30 00:25:31 +00:00
67131d82f2 Get subrank info from communicator constructor 2017-10-30 00:24:11 +00:00
615a9448b9 Extended sub comm supported 2017-10-30 00:23:34 +00:00
00164f5ce5 : 2017-10-30 00:22:52 +00:00
a7f72eb994 SHaking out 2017-10-30 00:22:06 +00:00
501fa1614a Communicator updates for split grid 2017-10-30 00:16:12 +00:00
5bf42e1e15 Update 2017-10-30 00:05:21 +00:00
fe4d9b003c More digits 2017-10-30 00:04:47 +00:00
4a699b4da3 New rank can be found out 2017-10-30 00:04:14 +00:00
689323f4ee Reverse dim ordering lexico support 2017-10-30 00:03:15 +00:00
749189fd72 Full clover force correct 2017-10-29 12:03:08 +00:00
f941c4ee18 Clover term force ok 2017-10-29 11:43:33 +00:00
84b441800f Merge branch 'develop' into feature/lanczos-reorg 2017-10-27 14:21:38 +01:00
1ef424b139 Split grid Y2K bug fix attempt 2017-10-27 14:20:35 +01:00
5a477ed29e Perform minor style correction 2017-10-27 14:46:18 +02:00
54128d579a Make MR a bit more verbose 2017-10-27 14:45:29 +02:00
e7b1933e88 Add a test for the MR solver 2017-10-27 14:38:57 +02:00
1bad64ac6a Some formatting 2017-10-27 14:35:04 +02:00
15dfa9f663 Change stopping criterion implementation in MR solver + some cleanup 2017-10-27 14:33:25 +02:00
2185b0d651 Correct author in the file 2017-10-27 14:32:38 +02:00
f61c0b5d03 Very early version of MR solver 2017-10-27 14:09:02 +02:00
074db32e54 Fix build of gmres test 2017-10-27 14:08:48 +02:00
aa66f41c69 Bug fix in the coarse restore...
Think this is nearly there
2017-10-27 10:29:34 +01:00
f96c800d25 Passes reload of coarse basis 2017-10-27 09:43:22 +01:00
32a52d7583 Move the local coherence lanczos into algorithms.
Keep the I/O in the tester. Other people can copy this method to write other I/O formats.
2017-10-27 09:04:31 +01:00
fa04b6d3c2 Finished ? Verifying coarse evec restore 2017-10-27 08:18:29 +01:00
7fab183c0e Better read test 2017-10-27 08:17:49 +01:00
9ec9850bdb 64bit ftello update 2017-10-26 23:34:31 +01:00
0c4ddaea0b Cleaning up 2017-10-26 23:31:46 +01:00
00ebc150ad Mistake in string parse; interface is ambiguous and must fix. Is char * a file, or a XML buffer ? 2017-10-26 23:30:37 +01:00
0f3e9ae57d Gsites error. Only appeared (so far) in I/O code for even odd fields 2017-10-26 23:29:59 +01:00
034de160bf Staggered updates : Schur fixed and added a unit test for Test_staggered_cg_schur.cc giving stronger check 2017-10-26 20:58:46 +01:00
76bcf6cd8c Deleting vscode settings file 2017-10-26 18:45:41 +01:00
91b8bf0613 Debugging force term 2017-10-26 18:23:55 +01:00
14507fd6e4 Final? candidate for push back on the lanczos reorg feature 2017-10-26 16:25:01 +01:00
2db05ac214 Test for split/unsplit in isolation 2017-10-26 07:48:03 +01:00
31f99574fa Moving these out of algorithms 2017-10-26 07:47:42 +01:00
a34c8a2961 Update to IRL; getting close to the structure I would like. 2017-10-26 07:45:56 +01:00
ccd20df827 Better IRL interface 2017-10-26 01:59:59 +01:00
e9be293444 Better messaging 2017-10-26 01:59:30 +01:00
d577211cc3 Relax stoppign condition 2017-10-25 23:57:54 +01:00
f4336e480a Faster converge time 2017-10-25 23:53:44 +01:00
e4d461cb03 Messagign 2017-10-25 23:53:19 +01:00
3d63b4894e Use existing functionality where possible 2017-10-25 23:52:47 +01:00
08583afaff Red black friendly coarsening 2017-10-25 23:51:18 +01:00
b395a312af Better error messaging 2017-10-25 23:50:37 +01:00
66295b99aa Bit less verbose SciDAC IO 2017-10-25 23:50:05 +01:00
b8654be0ef 64 bit safe offsets 2017-10-25 23:49:23 +01:00
a479325349 Rewrite of local coherence lanczos 2017-10-25 23:48:47 +01:00
f6c3f6bf2d XML serialisation of parms and initialise from parms object 2017-10-25 23:47:59 +01:00
d83868fdbb Identity linear op added -- useful in circumstances where a linear op may or may not be needed.
Supply a trivial one if not needed
2017-10-25 23:47:10 +01:00
303e0b927d Improvements for coarse grid compressed lanczos 2017-10-25 23:46:33 +01:00
28ba8a0f48 Force spacing more nicely 2017-10-25 23:45:57 +01:00
f9e28577f3 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-10-25 21:07:56 +01:00
e0cae833da Merge branch 'develop' into feature/scalar_adjointFT 2017-10-25 10:49:50 +01:00
8a3aae98f6 Solving minor bug in compilation 2017-10-25 10:34:49 +01:00
8309f2364b Solving again the MPI comm bug with FFTs 2017-10-25 10:24:14 +01:00
d5f661ba70 Save intermediate state 2017-10-25 10:38:26 +02:00
cac1750078 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-10-24 23:30:36 +01:00
e17cd35151 Merge branch 'develop' into feature/scalar_adjointFT 2017-10-24 17:31:22 +01:00
ccdec7a7ab Merge branch 'develop' into feature/clover 2017-10-24 16:51:14 +01:00
93642d813d Merging 2017-10-24 16:48:05 +01:00
1ab8d5cc13 Save two more files 2017-10-24 16:58:05 +02:00
789e892865 Save current state 2017-10-24 16:58:04 +02:00
53cfa44d7a Save current state 2017-10-24 16:58:03 +02:00
0bc381f982 Merge pull request #133 from pretidav/feature/clover
Feature/clover
2017-10-24 15:15:21 +01:00
2986aa76f8 Restoring Perfcounts 2017-10-24 13:32:02 +01:00
657779374b Adding vscode to gitignore 2017-10-24 13:27:17 +01:00
ec8cd11c1f Cleanup and prepare for pull request 2017-10-24 13:21:17 +01:00
cbda4f66e0 Debug of the field strength 2017-10-24 10:20:13 +01:00
6579dd30ff More debug test 2017-10-23 18:47:00 +01:00
031c94e02e Debugging process for the clover term 2017-10-23 18:27:34 +01:00
6391b2a1d0 Added test for Wilson and Clover fermions 2017-10-23 14:42:35 +01:00
2e50b55ae4 Changes in the Makefile to compile against Chroma on Linux 2017-10-23 13:32:26 +01:00
c433939795 QedFVol: Temporarily remove incomplete implementation of infinite-volume photon 2017-10-20 16:27:58 +01:00
b6a4c31b48 Merge branch 'feature/qed-fvol' of https://github.com/jch1g10/Grid into feature/qed-fvol 2017-10-20 16:25:07 +01:00
98b1439ff9 QedFVol: pass arbitrary input values to photon constructor in UnitEm 2017-10-20 16:24:09 +01:00
27936900e6 Putting the FG verbosity in the Integrator level 2017-10-18 13:08:09 +01:00
564738b1ff Add module for unit EM field 2017-10-17 14:03:57 +01:00
cd3e810d25 Merge branch 'develop' into feature/scalar_adjointFT 2017-10-17 11:31:14 +01:00
317ddfedee updated test clover + first attempt derivative clove term (still missing spin part) 2017-10-16 02:47:33 +02:00
e325929851 ALl codes compile against the new Lanczos call signature 2017-10-13 14:02:43 +01:00
47af3565f4 Logging improvement; reunified the Lanczos codes 2017-10-13 13:23:07 +01:00
4b4d187935 Reunified the Lanczos implementations 2017-10-13 13:22:44 +01:00
9aff354ab5 Final version prior to reunification 2017-10-13 13:22:26 +01:00
cb9ff20249 Approx tests and lanczos improvement 2017-10-13 11:30:50 +01:00
a80e43dbcf Added infinite-volume photon in Photon.h (not checked yet) 2017-10-11 16:44:51 -04:00
9fe6ac71ea Starting reorg of Blocked lanczos 2017-10-11 10:12:07 +01:00
5c392a6ecc Merge commit 'bf58557fb1ec710c766e19c9a8809b0a352de239' into feature/scalar_adjointFT 2017-10-10 17:14:56 +01:00
f1fa00b71b Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-10-10 14:26:44 +01:00
bf58557fb1 Block compressed Lanczos 2017-10-10 14:15:11 +01:00
10cb37f504 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-10-10 14:09:44 +01:00
1374c943d4 Correct Schur operator called 2017-10-10 13:59:50 +01:00
a1d80282ec cb factorise 2017-10-10 13:49:31 +01:00
4eb8bbbebe Christop mods 2017-10-10 13:48:51 +01:00
d1c6288c5f Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-10-10 13:38:40 +01:00
dd949bc428 Merge branch 'feature/staggering' into develop 2017-10-10 13:02:51 +01:00
bb7378cfc3 Schur for staggered 2017-10-10 12:02:18 +01:00
f0e084a88c Schur staggered 2017-10-10 10:00:43 +01:00
153672d8ec Split CG testing 2017-10-09 23:20:58 +01:00
08ca338875 Split grid communication 2017-10-09 23:19:45 +01:00
f7cbf82c04 Better stdout/err debug 2017-10-09 23:18:48 +01:00
07009c569a Comms splitting improvements 2017-10-09 23:16:51 +01:00
15d690e9b9 Adding the cartesian communicator destructor 2017-10-09 09:59:58 +01:00
63b2bc1936 Merge branch 'develop' into feature/hadrons
# Conflicts:
#	lib/qcd/action/fermion/FermionOperatorImpl.h
2017-10-05 14:16:23 +01:00
d810e8c8fb first attempt to write C terms in clover derivative. Some shifts to be fixed 2017-10-05 10:13:53 +02:00
09f4cdb11e Merge branch 'develop' of https://github.com/paboyle/Grid into feature/staggering 2017-10-04 10:51:16 +01:00
1e54882f71 Stagger 2017-10-04 10:51:06 +01:00
27caff92c6 Merge branch 'feature/scalar_adjointFT' of https://github.com/paboyle/Grid into feature/scalar_adjointFT 2017-10-04 09:44:27 +01:00
d38cee73bf Scalar: easier Fourier acceleration parametrisation through -D flags 2017-10-03 17:29:34 +01:00
8784f2a88d post-merge fix 2017-10-03 14:38:10 +01:00
c497864b5d Merge commit 'd54807b8c0cd1a7658ff8563bb00d1137b987e3e' into feature/scalar_adjointFT
# Conflicts:
#	lib/communicator/Communicator_base.h
#	lib/communicator/Communicator_mpi.cc
#	lib/communicator/Communicator_mpit.cc
2017-10-03 14:27:54 +01:00
05c1c88440 Scalar: more action generalisation 2017-10-03 14:26:20 +01:00
d54807b8c0 MPIT works with split grid now 2017-10-02 23:14:56 +01:00
f6ba2b95ce Merge branch 'develop' into feature/scalar_adjointFT 2017-10-02 15:19:20 +01:00
5625b47c7d Merge branch 'feature/dwf-multirhs' into develop 2017-10-02 12:42:32 +01:00
1edcf902b7 Macos ANON 2017-10-02 12:41:02 +01:00
e5c19e1fd7 RB constructor change 2017-10-02 12:25:52 +01:00
a11d0a33d1 Merge branch 'feature/dwf-multirhs' of https://github.com/paboyle/Grid into feature/dwf-multirhs 2017-10-02 11:42:07 +01:00
4f8b6f26b4 Merge branch 'develop' into feature/dwf-multirhs 2017-10-02 11:41:49 +01:00
073525c5b3 Small patch from cori 2017-10-02 03:38:21 -07:00
eb6153080a Merge branch 'develop' of https://github.com/paboyle/Grid into feature/staggering 2017-10-02 08:56:33 +01:00
f7072d1ac2 Solving an annoying compilation error in json 2017-10-02 07:13:40 +01:00
a021933002 Scalar: SU(N) action change to t'Hooft scaling 2017-09-29 16:09:34 +01:00
b99622d9fb QedFVol: fix problem with JSON wanting gcc 4.9 2017-09-28 13:34:33 -04:00
937c77ead2 Merge branch 'develop' into feature/qed-fvol 2017-09-28 16:25:20 +01:00
95e5a2ade3 Merge pull request #116 from jch1g10/feature/qed-fvol
Feature/qed fvol
2017-09-25 15:08:33 +01:00
56478d63a5 clover + test (valence) 2017-09-24 19:32:15 +02:00
df21668f2c memory profiler update 2017-09-22 14:21:18 +01:00
482368e9de Merge branch 'develop' into feature/scalar_adjointFT 2017-09-21 13:44:08 +01:00
fddeb29d6b Bug fix with spreadout FFT 2017-09-21 11:10:08 +01:00
a9ec5cf564 Christoph bug report integrate 2017-09-21 10:32:41 +01:00
946a8671b9 Merge pull request #129 from djm2131/feature/eofa
Add support for DWF with the exact one flavor algorithm
2017-09-21 10:15:21 +01:00
a6eeea777b Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-09-21 10:12:41 +01:00
771a1b8e79 Merge pull request #128 from paboyle/feature/CG-reliable-update
Feature/cg reliable update
2017-09-21 10:12:03 +01:00
bfb68e6f02 Merge pull request #130 from giltirn/gparity-handunroll
Gparity handunroll
2017-09-21 10:11:00 +01:00
77f7737ccc Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-09-19 14:28:01 +01:00
9a827d0242 Fixing a compilation error 2017-09-18 14:55:51 +01:00
999c623590 Solving a memory leak in Communicator_mpi 2017-09-18 14:39:04 +01:00
18c335198a Merge branch 'hotfix/dirac-ITT-fix1' into develop 2017-09-16 18:19:02 +01:00
f9df685cde Merge branch 'hotfix/dirac-ITT-fix1' 2017-09-16 18:18:48 +01:00
17c5b0f152 Patching comparison point 2017-09-16 18:18:07 +01:00
5918769f97 Subtle Naik term bug updated in Stencil; less on logical && with a function call on right 2017-09-16 12:51:26 +01:00
b542d349b8 Minor cosmetic changes 2017-09-15 11:48:36 +01:00
91eaace19d Added support for FFT accelerated updates 2017-09-15 11:33:45 +01:00
bbaf1ada91 Merge branch 'feature/json-fix' into develop 2017-09-08 16:02:08 +01:00
1950ac9294 Fixed the Intel compiler problem with the JSON classes 2017-09-08 15:18:59 +01:00
13fa70ac1a Merge branch 'develop' into feature/json-fix 2017-09-08 13:42:20 +01:00
7cb2b11f26 Fixing Intel compiler error for the JSON parser 2017-09-08 13:41:53 +01:00
1184ed29ae Merge pull request #124 from nmeyer-ur/feature/arm-neon
Added integer reduce functionality
2017-09-08 10:54:35 +02:00
203c7bf6fa Merge branch 'hotfix/dirac-ITT-fix' into develop 2017-09-05 15:08:51 +01:00
c709883f3f Merge branch 'hotfix/dirac-ITT-fix' 2017-09-05 15:08:16 +01:00
aed5de4d50 Patching macos compile 2017-09-05 15:07:07 +01:00
ba27cc6571 Mac os happiness 2017-09-05 15:00:16 +01:00
d856327250 Merge branch 'release/dirac-ITT' into develop 2017-09-05 14:56:12 +01:00
d75369cb56 Merge branch 'release/dirac-ITT' 2017-09-05 14:55:54 +01:00
bf973d0d56 SHM complete 2017-09-05 14:30:29 +01:00
837bf8a5be Updating to control the SHM allocation scheme under configure time options 2017-09-05 12:51:02 +01:00
c05b2199f6 Improvements to huge memory 2017-09-04 10:41:21 -04:00
a5fe07c077 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-09-04 14:10:15 +01:00
b83b2b1415 Stability improvement to BCG. Force m_rr hermitian beyond rounding. 2017-09-04 14:09:47 +01:00
91676d1dda Fix “MAP_ANONYMOUS undefined” error on OSX. 2017-09-01 15:48:30 +01:00
b331be9101 Better reporting 2017-08-31 11:32:57 +01:00
49c20a9fa8 Patch to reporting 2017-08-31 11:32:21 +01:00
7359df3501 Full reporting for benchmark; save robustness factor 2017-08-31 10:42:35 +01:00
59bd1fe21b Fix for 'perm' and 'local' not being set for hand-unrolled external-site Dslash, which caused incorrect behavior of G-parity kernel 2017-08-29 13:07:37 -07:00
a56e3b40c4 Merge branch 'develop' into feature/hadrons 2017-08-29 11:03:53 -06:00
4e907fef2c Merge remote-tracking branch 'grid/develop' into feature/arm-neon 2017-08-29 17:47:36 +02:00
67888b657f Merge branch 'gparity-handunroll' of https://github.com/giltirn/Grid into gparity-handunroll 2017-08-29 09:52:05 -04:00
74af885d4e Removed some no-longer-needed associated with G-parity hand unrolled kernel 2017-08-29 09:50:37 -04:00
ac3611bb19 Merge branch 'develop' of https://github.com/paboyle/Grid into feature/qed-fvol 2017-08-29 11:53:37 +01:00
d36d2fb40d Added ability to override default Ls in Benchmark_dwf 2017-08-28 06:53:56 -07:00
5b9267e88d Cleaner comms benchmark treatment for one node runs 2017-08-27 18:24:48 -04:00
15fd4003ef Improving presentation of results 2017-08-27 13:46:02 +01:00
4b4c2a715b fcntl.h needed 2017-08-26 11:38:04 +01:00
54a5e6c1d0 Check if we get huge pages on linux. Larry Meadows piece of magic. 2017-08-25 22:36:08 +01:00
73aeca7dea Merge branch 'feature/multi-communicator' into develop 2017-08-25 21:55:09 +01:00
ad89abb018 Fix 2017-08-25 20:43:37 +01:00
80c5bce5bb Merge branch 'develop' into feature/multi-communicator 2017-08-25 20:21:26 +01:00
f68b5de9c8 No compile fix on Clang 2017-08-25 19:35:21 +01:00
d0f3d525d5 Optimal block size for KNL 2017-08-25 19:33:54 +01:00
f365a83fae In G-parity unrolled kernel, replaced calls to permute and exchange with run-time-evaluated permute type with explicit calls to appropriate underlying functions 2017-08-25 14:24:11 -04:00
3a58217405 Updated 2017-08-25 14:29:53 +01:00
c289699d9a updated from cambridge mpi3 shakeout 2017-08-25 11:41:01 +01:00
c3b1263e75 Benchmark prep 2017-08-25 09:25:54 +01:00
34a9aeb331 Reduced number of if-statement evaluations in G-parity unrolled kernel 2017-08-24 13:53:50 -07:00
5846566728 Merge branch 'develop' into feature/hadrons 2017-08-24 18:20:52 +01:00
102ea9ae66 CI update 2017-08-24 18:17:09 +01:00
cc4afb978d Fix bug in non-zero momentum projection 2017-08-24 17:31:44 +01:00
21b02760c3 Merge branch 'develop' into feature/hadrons 2017-08-24 17:05:45 +01:00
2bcb704af2 Merge pull request #121 from Lanny91/feature/hadrons
Feature/hadrons
2017-08-24 12:59:08 +01:00
5fa386ddc9 FFT test compile fixed 2017-08-24 10:17:52 +01:00
edabb3577f Imported Benchmark_gparity 2017-08-23 16:54:06 -04:00
ce5df177ee Removed superfluous implementation of G-parity twist for hand-unrolled kernel from GparityWilsonImpl 2017-08-23 15:05:22 -04:00
a0bb8e5b46 Added hand-unrolled kernel implementations of all the other dslash precision / comms precision combinations with G-parity 2017-08-23 14:44:40 -04:00
46f88e6d72 G-parity hand-unrolled intrinsics twist now uses one less permute and one less temporary 2017-08-23 13:21:10 -04:00
dd8f1ea189 Vectorized Mobius EOFA Dperp + shift operation 2017-08-23 13:17:26 -04:00
b61835c1a5 Added inplace version of intrinsic G-parity twist to hand-unrolled kernel 2017-08-23 12:33:48 -04:00
d9cd4f0273 Staggered multinode block cg debugged. Missing global sum.
Code stalls and resumes on KNL at cambridge. Curious.

CG iterations 23ms each, then 3200 ms pauses. Mean bandwidth reports
as 200MB/s. Comms dominant in the report. However, the time behaviour suggests it
is *bursty*.... Could be swap to disk?
2017-08-23 15:07:18 +01:00
459f70e8d4 Check-in of working Mobius EOFA class and tests 2017-08-22 22:38:30 -04:00
061e48fd73 Replaced slow unpack-repack in G-parity BC twist with intrinsics version 2017-08-22 18:12:12 -04:00
ab50145001 Implemented first, unoptimized version of hand-unrolled G-parity kernels
Improved Test_gparity
2017-08-22 17:12:25 -04:00
b49bec0cec MAP_HUGETLB portability fix 2017-08-20 03:08:54 +01:00
ae56e556c6 finalise issue on new OPA revert 2017-08-20 02:53:12 +01:00
1cdf999668 Moving multicommunicator into mpi3 also for threading 2017-08-20 02:39:10 +01:00
11062fb686 Comms none fail fix 2017-08-20 01:37:07 +01:00
383ca7d392 Switch off comms for now until feature/multi-communicator is merged 2017-08-20 01:27:48 +01:00
a446d95c33 Trying to pass TeamCity and Travis 2017-08-20 01:10:50 +01:00
be66e7dd95 Merge branch 'develop' into feature/multi-communicator 2017-08-19 23:12:38 +01:00
6d0d064a6c Update TODO 2017-08-19 23:11:30 +01:00
bfef525ed2 New benchmark prep 2017-08-19 23:10:12 +01:00
0b0cf62193 Fix mpi 3 interface change 2017-08-19 13:18:50 -04:00
7d88198387 Merge branch 'develop' into feature/multi-communicator 2017-08-19 13:03:35 -04:00
2f619482b8 Enable blocking stencil send 2017-08-19 12:53:59 -04:00
d6472eda8d Use mmap 2017-08-19 12:53:18 -04:00
9e658de238 Use Vector 2017-08-19 12:52:44 -04:00
bcefdd7c4e Align both allocator calls to 2MB 2017-08-19 12:49:02 -04:00
9d45fca8bc Implement MobiusEOFAFermioncache.cc 2017-08-17 23:45:36 -04:00
ac9e6b63c0 More re-import of Mobius EOFA 2017-08-17 19:28:53 -04:00
e140b3f802 Beginning to re-import Mobius EOFA 2017-08-16 23:36:23 -04:00
d9d3d30cc7 Minor clean-up 2017-08-16 20:57:51 -04:00
47a12ec7b5 Implement EOFA pseudofermion force and Shamir tests for G-parity and non G-parity cases 2017-08-16 19:50:08 -04:00
ec1e2f7a40 Add (mostly implemented) ExactOneFlavourRatio pseudofermion class and tests of Shamir heatbath and action 2017-08-16 12:38:59 -04:00
41f73ec083 Add ChronoForecast class for forecasting solutions across poles in the EOFA heatbath 2017-08-16 12:37:38 -04:00
fd367d8bfd Debugging the PointerCache 2017-08-16 09:42:57 +01:00
6d0786ff9d Typo fixes and check-in of G-parity action test for DWF 2017-08-15 22:47:00 -04:00
b7f93aeb4d Change CayleyFermion5D::SetCoefficientsInternal to virtual to allow overriding in derived EOFA classes 2017-08-15 14:18:51 -04:00
202a7fe900 Re-import DWF and abstract base EOFA fermion classes and tests 2017-08-15 13:36:08 -04:00
8d168ded4a Correction of the dagger version of the Clover 2017-08-15 10:50:44 +01:00
8a3fe60a27 Added more asserts at grid creation time 2017-08-08 11:36:20 +01:00
44051aecd1 Checking for integer divisions in cartesian full 2017-08-08 10:31:12 +01:00
06e6f8de00 Check that the reduced dim is an integer 2017-08-08 10:22:12 +01:00
dbe4d7850c Make a test file compatible with all architectures 2017-08-06 10:49:45 +01:00
4fe182e5a7 Added high level HMC support for overriding default SIMD lane decomposition 2017-08-06 10:46:19 +01:00
75ee6cfc86 Debugging the Clover term 2017-08-04 16:08:07 +01:00
fde71c3c52 Merge branch 'develop' into feature/clover 2017-08-04 12:19:57 +01:00
175f393f9d Binary IO error checking 2017-08-04 12:14:10 +01:00
7d867a8134 Merge branch 'develop' into feature/CG-reliable-update 2017-08-02 09:48:04 -04:00
9939b267d2 Added switching to fallback linear operator in reliable update CG, and added recalculation of b parameter on update. 2017-07-31 13:39:44 -04:00
323e9c439a Hadrons: Legal banner fixes 2017-07-31 12:26:34 +01:00
28396f1048 Merge branch 'feature/rare_kaon' of https://github.com/Lanny91/Grid into feature/hadrons 2017-07-31 12:19:54 +01:00
67b34e5789 Modified conserved current 5th dimension loop for compatibility with 5D vectorisation. 2017-07-31 11:35:01 +01:00
14d53e1c9e Threaded MPI calls patches 2017-07-29 13:08:10 -04:00
8bd869da37 Correcting a bug in the IO routines 2017-07-27 15:12:50 +01:00
c7036f6717 Adding checks for libm and libstdc++ 2017-07-27 11:15:09 +01:00
c0485d799d Explicit parameter declaration in the WilsonGauge test 2017-07-26 16:26:04 +01:00
7abc5613bd Added smearing to the topological charge observable 2017-07-26 16:21:17 +01:00
237cfd11ab Solving the spurious O2 flags 2017-07-26 12:08:51 +01:00
a4b7dddb67 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-07-26 12:07:38 +01:00
5696781862 Debug error in Tensor mult 2017-07-26 12:07:34 +01:00
8f4b3049cd Merge branch 'feature/CG-reliable-update' into ckelly_develop 2017-07-25 11:55:26 -04:00
2a6e673a91 Merge branch 'develop' into feature/CG-reliable-update 2017-07-25 11:54:43 -04:00
9b6cde173f Merge branch 'feature/CG-reliable-update' into ckelly_develop 2017-07-25 11:51:08 -04:00
9f280b82c4 Added mixed-precision CG with reliable updates 2017-07-25 11:30:41 -04:00
c3f0889eda Merge pull request #123 from giltirn/develop
Fix for 'using namespace' in lib/qcd/utils/GaugeFix.h
2017-07-25 11:32:02 -03:00
7a53dc3715 Added integer reduce functionality 2017-07-24 11:12:59 +02:00
0f214ad427 Moved FourierAcceleratedGaugeFixer into Grid::QCD namespace and removed 'using namespace' directives from header 2017-07-21 11:13:51 -04:00
fe4912880d Update README.md 2017-07-17 09:53:07 +01:00
875e1a841f Hadrons: updated Quark -> MFermion/GaugeProp module name in test. 2017-07-16 13:47:00 +01:00
0366288b1c Hadrons: added tests for 3pt contractions. 2017-07-16 13:45:55 +01:00
6293d438cd Hadrons: sink smearing compatibility for 3pt contraction modules. 2017-07-16 13:43:25 +01:00
852ade029a Hadrons: Added module to sink a propagator 2017-07-16 13:41:47 +01:00
f038c6babe Update README.md 2017-07-14 22:59:16 +01:00
169f4b2711 Update README.md 2017-07-14 22:56:06 +01:00
2d8aff36fe Update README.md 2017-07-14 22:52:16 +01:00
9fa07eecde Merge branch 'develop' into feature/json-fix 2017-07-12 15:47:22 +01:00
659d7d1a40 For test/solver
Fixed
2017-07-12 15:01:48 +01:00
f64fb7bd77 Fix gcc error on JSON compilation 2017-07-12 14:55:42 +01:00
2a35449b91 Merge branch 'develop' into feature/json-fix 2017-07-12 14:47:00 +01:00
184af5bd05 Added support for std::pair in the JSON serialiser 2017-07-12 14:44:53 +01:00
097c9637ee Fixed the JSON parsing error 2017-07-11 14:31:57 +01:00
dc6f078246 fixed the header file for mpi3 2017-07-11 14:15:08 +01:00
8a4714a4a6 Update README.md 2017-07-09 00:11:54 +01:00
40e119c61c NUMA improvements worth preserving from AMD EPYC tests 2017-07-08 22:27:11 -04:00
d9593c4b81 Merge branch 'develop' into feature/json-fix 2017-07-07 14:17:50 +01:00
ac740f73ce Works on Cori 2017-07-02 16:47:58 -07:00
75dc7794b9 Working on Cori 2017-07-02 16:47:42 -07:00
dee68fc728 IO working multiple nodes again. Strategy of all nodes writing metadata is unsafe.
Only one rank should do this. must identify this rank. Means pass communicator to the
Objects.
2017-07-02 23:33:48 +01:00
a2d3643634 Merge branch 'feature/dwf-multirhs' of https://github.com/paboyle/Grid into feature/dwf-multirhs 2017-07-02 14:59:22 -07:00
57002924bc NERSC shakeout of this 2017-07-02 14:58:30 -07:00
7b0237b081 Update README.md 2017-07-01 10:24:41 +01:00
b68ad0cc0b Update README.md 2017-07-01 10:20:07 +01:00
37263fd9b1 Update README.md 2017-07-01 10:06:24 +01:00
3d09e3e9e0 Update README.md 2017-07-01 10:05:46 +01:00
1354b46338 Update README.md 2017-07-01 10:04:32 +01:00
251a97fe1b Update README.md 2017-07-01 09:55:36 +01:00
e18929eaa0 Update README.md 2017-07-01 09:53:15 +01:00
f3b0a92e71 Update README.md 2017-07-01 09:48:00 +01:00
a0be3f7330 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-06-30 10:53:50 +01:00
b5a6e4f1fd Best option for Xeon cache blocking set 2017-06-30 10:53:22 +01:00
7a788db3dc Guard first touch 2017-06-30 10:49:08 +01:00
f20eceb6cd First touch once per page in a threaded loop 2017-06-30 10:48:27 +01:00
38325ebbc6 Interleave code path; not enabled 2017-06-30 10:23:51 +01:00
b73bd151bb Switch off counters by default 2017-06-30 10:16:35 +01:00
694b305cab Update to reporting 2017-06-30 10:16:13 +01:00
2d3737a133 O3, KNL 2017-06-30 10:15:59 +01:00
ac1f1838bc KNL only 2017-06-30 10:15:32 +01:00
09d09d0fe5 Update README.md 2017-06-29 11:48:11 +01:00
bf630a6821 README file update 2017-06-29 11:42:25 +01:00
8859a151cc Small corrections to the NEON port 2017-06-29 11:30:29 +01:00
688a39cfd9 Merge pull request #114 from nmeyer-ur/feature/arm-neon
ARM neon intrinsics support
Guido: checked and approved
2017-06-29 09:57:17 +01:00
6f5a5cd9b3 Improved threaded comms benchmark 2017-06-28 23:27:02 +01:00
0933aeefd4 corrected Grid_neon.h 2017-06-28 20:22:22 +02:00
322f61acee Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-06-28 15:30:35 +01:00
08e04b9676 Better benchmarks 2017-06-28 15:30:06 +01:00
feaa2ac947 Merge branch 'feature/scalar-hmc-update' into develop 2017-06-28 12:46:18 +01:00
07de925127 minor scalar action fixes 2017-06-28 12:45:44 +01:00
a9c816a268 moved file to correct folder 2017-06-27 21:39:15 +02:00
e43a8b6b8a removed comments 2017-06-27 20:58:48 +02:00
bf729766dd removed collision with QPX implementation 2017-06-27 20:32:24 +02:00
dafb351d38 Merge pull request #120 from paboyle/feature/scalar-hmc-update
Scalar HMC update. 
I agree with the changes.
2017-06-27 16:23:14 +01:00
0b707b861c Merge branch 'develop' into feature/scalar-hmc-update 2017-06-27 14:40:05 +01:00
15e87a4607 HDF5 IO fix 2017-06-27 14:39:27 +01:00
7d7220cbd7 scalar: lambda/4! convention 2017-06-27 14:38:45 +01:00
7d2d5e8d3d Merge branch 'develop' of https://github.com/paboyle/Grid into feature/hadrons 2017-06-26 15:19:46 +01:00
54e94360ad Experimental: Multiple communicators to see if we can avoid thread locks in --enable-comms=mpit 2017-06-24 23:10:24 +01:00
0af740dc15 minor scalar HMC code improvement 2017-06-24 23:04:05 +01:00
d2e8372df3 SU(N) algebra fix (was not working) 2017-06-24 23:03:39 +01:00
869b99ec1e Threaded calls to multiple communicators 2017-06-24 10:55:54 +01:00
4a29ab0d0a Merge branch 'feature/dwf-multirhs' of https://github.com/paboyle/Grid into feature/dwf-multirhs 2017-06-23 23:10:43 +01:00
0165bcb58e Added an update to TODO list 2017-06-23 23:10:24 +01:00
deca1ecc50 Merge branch 'develop' of https://github.com/paboyle/Grid into feature/rare_kaon 2017-06-23 19:35:19 +02:00
4372d04ad4 Merge pull request #118 from Lanny91/hotfix/bgq
Hotfix/bgq
2017-06-23 16:59:27 +01:00
349d75e483 Precision fix 2017-06-23 02:57:59 -07:00
56abbdf4c2 AVX512 integer reduce fix (for non-intel compiler) 2017-06-23 11:09:14 +02:00
af71c63f4c AVX2 fix 2017-06-23 11:03:12 +02:00
e51475703a Ticking off lots on the TODO list 2017-06-23 09:42:21 +01:00
1feddf4ba6 const fixes 2017-06-22 19:32:41 +01:00
600d7ddc2e Proof of concept : Multi RHS solver, running independent solves on different ranks 2017-06-22 18:54:34 +01:00
e504260f3d Able to run a test job splitting into multiple MPI subdomains. 2017-06-22 18:53:11 +01:00
0440d4ce66 Merge branch 'develop' of https://github.com/paboyle/Grid into hotfix/bgq 2017-06-22 17:09:42 +02:00
08b0e472aa Fixed hadrons tests after merge 2017-06-22 16:34:33 +02:00
c11d69787e Merge branch 'feature/hadrons' of https://github.com/paboyle/Grid into feature/rare_kaon
# Conflicts:
#	extras/Hadrons/Modules.hpp
#	extras/Hadrons/Modules/MFermion/GaugeProp.hpp
#	extras/Hadrons/modules.inc
#	tests/hadrons/Test_hadrons.hpp
#	tests/hadrons/Test_hadrons_meson_3pt.cc
2017-06-22 16:26:31 +02:00
dc6b2d30d2 Documentation fix 2017-06-22 16:09:45 +02:00
7a3bd5c66c Hadrons: new conserved current contraction test (for regression testing) 2017-06-22 16:06:15 +02:00
18211eb5b1 Hadrons: Fixed test to use new implementation of meson module. 2017-06-22 16:03:59 +02:00
863bb2ad10 Moving overly-specialised code out of Grid 2017-06-22 16:02:15 +02:00
5e4bea8f20 Benchmark DWF works 2017-06-22 08:38:54 +01:00
6ebf9f15b7 Splitting communicators first cut 2017-06-22 08:14:34 +01:00
1d7aa673a4 Include BlockCG by default 2017-06-21 21:08:53 +01:00
b9104f3072 Block CG 2017-06-21 21:08:03 +01:00
b22eab8c8b Merge commit 'a7d56523abee6c9030fdd9303c79954897b1086f' into feature/hadrons 2017-06-21 18:32:48 +01:00
a7d56523ab Merge branch 'feature/lanczos-simplify' into develop 2017-06-21 14:03:20 +01:00
9e56c65730 Updated TODO list 2017-06-21 14:02:58 +01:00
ef4f2b8c41 todo update 2017-06-21 09:22:20 +01:00
e8b95bd35b Clean up finished. Could shrink Lanczos to around 400 lines at a push 2017-06-21 02:50:09 +01:00
7e35286860 Simplified lanczos, added Eigen diagonalisation.
Curious if we can deprecate dependencly on BLAS.
Will see when we get 48^3 running on our BG/Q port
2017-06-21 02:26:03 +01:00
0486ff8e79 Improved the lancos 2017-06-20 18:46:01 +01:00
1e8a2e1621 various compatibility fixes after merge 2017-06-20 17:24:55 +01:00
7587df831a Merge branch 'develop' into feature/hadrons
# Conflicts:
#	lib/qcd/action/scalar/ScalarImpl.h
2017-06-20 15:50:39 +01:00
e9cc21900f Block solver complete for staggered. Now stable on mass 0.003 and
gives 8x (!) speed up on Haswell laptop vs. standard CG for 8 RHS solves.

166 iterations vs. 537 iterations so algorithmic gain + 2x in flop rate gain.

Better than a slap in the face with a wet kipper.
2017-06-20 12:37:41 +01:00
0a8faac271 Fix make tests compile 2017-06-19 22:54:18 +01:00
abc4de0fd2 No compile make tests fix 2017-06-19 22:03:03 +01:00
b672717096 Test_serialiation update for JSON 2017-06-19 14:38:39 +01:00
284ee194b1 JSON update 2017-06-19 14:38:15 +01:00
cfe3cd76d1 Block solver improvements 2017-06-19 14:04:21 +01:00
3fa5e3109f Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-06-19 14:01:44 +01:00
8b7049f737 Improved detectino of usqcdInfo for plaq/linktr 2017-06-19 08:46:07 +01:00
c85024683e Merge branch 'feature/parallelio' into develop 2017-06-19 01:39:48 +01:00
1300b0b04b Update to enable multiple records per file more consistent with SciDAC.
open, close, write records...
2017-06-19 01:01:48 +01:00
e6d984b484 ILDG tests 2017-06-18 00:13:22 +01:00
1d18d95d4f Class name return 2017-06-18 00:13:03 +01:00
ae39ec85a3 ComplexField defined 2017-06-18 00:12:48 +01:00
b96daf53a0 Query tensor structures 2017-06-18 00:12:15 +01:00
46879e1658 Complex defined in Impl even for gauge. 2017-06-18 00:11:45 +01:00
ae4de94798 SciDAC I/O support 2017-06-18 00:11:23 +01:00
0ab555b4f5 SciDAC I/O and ILDG improvements 2017-06-18 00:11:02 +01:00
8e9be9f84f Updates for SciDAC IO 2017-06-18 00:10:42 +01:00
d572170170 Update for SciDAC 2017-06-18 00:10:20 +01:00
81b18f843a Merge branch 'feature/scalar_adjointFT' into feature/hadrons
# Conflicts:
#	lib/qcd/action/scalar/ScalarImpl.h
2017-06-16 17:59:55 +01:00
1bd311ba9c Faster sequential conserved current implementation, now compatible with 5D vectorisation & G-parity. 2017-06-16 16:43:15 +01:00
41af8c12d7 Code cleaning for conserved current contractions. Will now be easier to implement mobius conserved current. 2017-06-16 16:38:59 +01:00
a833f88c32 Added missing SIMD integer reduction implementation for AVX, AVX-512, SSE4, IMCI 2017-06-16 15:58:47 +01:00
07b2c1b253 Placeholder precision change functions to allow Grid to compile with QPX (warning: no actual functionality) 2017-06-16 15:04:26 +01:00
735cbdb983 QPX Integer reduction (+ integer reduction test) 2017-06-14 10:55:10 +01:00
2ad54c5a02 QPX exchange support 2017-06-14 10:53:39 +01:00
12ccc73cf5 Serialisation no compile fix 2017-06-14 05:19:17 +01:00
3d04dc33c6 ARM neon intrinsics support 2017-06-13 13:26:59 +02:00
e7564f8330 Starting a test for reading an ILDG file. 2017-06-13 12:22:50 +01:00
91199a8ea0 openmpi is not const safe 2017-06-13 12:21:29 +01:00
0494feec98 Libz dependency 2017-06-13 12:00:23 +01:00
a16b1e134e gcc 4.9 fix 2017-06-13 10:48:43 +01:00
20e92a7009 QedVFol: Allow output of scalar propagator and vacuum polarisation projected to arbitrary lattice momentum, not just zero-momentum. 2017-06-12 18:27:32 +01:00
5633a2db20 Faster implementation of conserved current site contraction. Added 5D vectorised support, but not G-parity. 2017-06-12 10:41:02 +01:00
2d433ba307 Changed header include guards to match new convention 2017-06-12 10:32:14 +01:00
769ad578f5 Odd new error on G++ 49 on travis 2017-06-12 00:41:21 +01:00
eaac0044b5 Compile fixes 2017-06-12 00:20:49 +01:00
56042f002c New files 2017-06-11 23:19:20 +01:00
3bfd1f13e6 I/O improvements 2017-06-11 23:14:10 +01:00
42f0afcbfa QedFVol: Output all scalar VP diagrams separately 2017-06-09 18:08:40 +01:00
70ab598c96 Move gfix into utils 2017-06-08 22:22:23 +01:00
1d0ca65e28 Move Gfix into utils 2017-06-08 22:21:50 +01:00
2bc4d0a20e Move code into utils 2017-06-08 22:21:25 +01:00
20ac13fdf3 QedFVol: add ChargedProp as an input to ScalarVP module, instead of calculating scalar propagator within ScalarVP. 2017-06-08 17:43:39 +01:00
2490816297 Hadrons: rare kaon program removed 2017-06-07 20:11:02 -05:00
5f55bca378 Hadrons: Quark module renamed MFermion::GaugeProp 2017-06-07 20:10:48 -05:00
e38612e6fa QedFVol: Update ScalarVP module for compatibility with new scalar action 2017-06-07 17:42:00 +01:00
c2b2b71c5d Merge branch 'feature/qed-fvol' of https://github.com/paboyle/Grid into feature/qed-fvol
# Conflicts:
#	extras/Hadrons/Modules.hpp
#	extras/Hadrons/modules.inc
2017-06-07 16:59:47 +01:00
009f48a904 QedFVol: Add missing factor of 2 in free vacuum polarisation 2017-06-07 16:34:09 +01:00
b8e45ae490 Fixed remaining fermion type aliases after merge. 2017-06-07 16:26:22 +01:00
b35fc4e7f9 Merge branch 'feature/hadrons' of https://github.com/paboyle/Grid into feature/rare_kaon
# Conflicts:
#	extras/Hadrons/Global.hpp
#	tests/hadrons/Test_hadrons_rarekaon.cc
2017-06-07 14:38:51 +01:00
60f11bfd72 Removed redundant test module 2017-06-07 12:34:47 +01:00
f6aa82b7f2 Merge branch 'develop' into feature/hadrons 2017-06-06 11:46:33 -05:00
22749699a3 Fixes after merge and point sink module 2017-06-06 11:45:30 -05:00
8d442b502d Sequential current fix for spacial indices. 2017-06-06 17:06:40 +01:00
e5c8b7369e Boundary condition option in quark actions for hadrons tests. 2017-06-06 14:19:10 +01:00
0503c028be Merge branch 'feature/qed-fvol' into feature/hadrons (non-trivial conflicts on scalar Impl)
# Conflicts:
#	configure.ac
#	lib/qcd/action/scalar/Scalar.h
2017-06-05 16:37:47 -05:00
c504b4dbad Code cleaning 2017-06-05 15:56:43 +01:00
622a21bec6 Improvements to sequential conserved current test and small bugfix. 2017-06-05 15:55:32 +01:00
eec79e0a1e Ward Identity test improvements and conserved current bug fixes 2017-06-05 11:55:41 +01:00
092dcd4e04 MPI I/O only if MPI compiled 2017-06-02 22:50:25 +01:00
4a8c4ccfba Test wilson flow, added maxTau for adaptive flow 2017-06-02 17:02:29 +01:00
9b44189d5a Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-06-02 16:56:00 +01:00
7da4856e8e Wilson flow with adaptive steps 2017-06-02 16:55:53 +01:00
aaf1e33a77 Adding adaptive integration in the WilsonFlow 2017-06-02 16:32:35 +01:00
094c3d091a Improved and RNG's now survive checkpoint 2017-06-02 00:38:58 +01:00
4b98e524a0 Roll over to MPI version of I/O 2017-06-01 17:38:18 -04:00
1a1f6d55f9 Roll over to MPI IO for parallel IO 2017-06-01 17:37:26 -04:00
21421656ab Big changes improving the code to use MPI IO 2017-06-01 17:36:53 -04:00
6f687a67cd As local vols increase, use 64 bits for safety 2017-06-01 17:36:18 -04:00
b30754e762 Merge branch 'feature/parallelio' of https://github.com/paboyle/Grid into feature/parallelio 2017-05-30 23:41:28 +01:00
1e429a0d57 Added MPI version 2017-05-30 23:41:07 +01:00
d38a4de36c Beginning move to MPI IO 2017-05-30 23:40:39 +01:00
ef1b7db374 Diff comparison check 2017-05-30 23:40:11 +01:00
53a9aeb965 Cosmetic only 2017-05-30 23:39:53 +01:00
e30fa9f4b8 RankCount; need to clean up ambigious ProcessCount 2017-05-30 23:39:16 +01:00
58e8d0a10d reverse direction lexico mapping 2017-05-30 23:38:30 +01:00
62cf9cf638 Cleaner code 2017-05-30 23:38:02 +01:00
0fb458879d Precision safe compile 2017-05-30 23:37:02 +01:00
725c513d94 Better MPI3 benchmarking 2017-05-29 16:47:32 -04:00
d8648307ff Merge branch 'develop' into feature/hadrons 2017-05-29 12:58:08 +01:00
064315c00b Hadrons: mesons gamma list fix 2017-05-29 12:57:33 +01:00
7c6cc85df6 Updating WilsonFlow test 2017-05-27 18:03:49 +01:00
a6691ef87c Merge pull request #110 from Lanny91/feature/hadrons
Hadrons: Fermion boundary conditions can now be set in measurement code.
2017-05-26 16:43:22 +01:00
23135aa58a Merge branch 'feature/hadrons' of https://github.com/paboyle/Grid into feature/rare_kaon 2017-05-26 16:00:50 +01:00
8e0ced627a Hadrons: Fermion boundary conditions can now be set in measurement code. 2017-05-26 15:59:15 +01:00
0de314870d Faster derivative for WilsonGauge 2017-05-26 14:31:49 +01:00
ffb91e53d2 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-05-26 12:46:02 +01:00
f4e8bf2858 Fixing the topological charge. Wilson Flow tested, ok 2017-05-26 12:45:59 +01:00
a74c34315c Bootstrap script fix 2017-05-25 14:27:49 +01:00
69470ccc10 Update to do list 2017-05-25 13:41:26 +01:00
b8b5934193 Attempts to speed up the parallel IO 2017-05-25 13:32:24 +01:00
75856f2945 Compilation fix in the Tensor_exp 2017-05-25 12:44:56 +01:00
3c112a7a25 Small correction to the general exp definition 2017-05-25 12:09:00 +01:00
ab3596d4d3 Using Cayley-Hamilton form for the exponential of SU(3) matrices 2017-05-25 12:07:47 +01:00
a8c10b1933 Use a global-X x Local-Y chunksize for parallel binary I/O.
Gives O(32 x 8 x 18*8*8) chunk size on configuration I/O.

At 150KB should be getting close to packet sizes and 4MB filesystem
block sizes that are reasonably (!?) performant. We shall see once I move
this off my laptop and over to BNL and time it.
2017-05-25 11:43:33 +01:00
15e801af3f Fixing a compilation error for generic SIMD 2017-05-19 16:39:36 +01:00
0ffc235741 Adding more statistics to the Benchmark_comms. Min and max 2017-05-19 10:55:04 +01:00
8e19c99c7d Adding more statistical info in the Benchmark_comms 2017-05-18 19:07:35 +01:00
a0bc0ad06f Reverting change in Bechmark_comms. Keeping 300 iterations 2017-05-18 17:48:11 +01:00
a8fb2835ca Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-05-18 14:45:00 +01:00
bc862ce3ab Fixing an allocation issue in Benchmark_comms 2017-05-18 14:44:56 +01:00
08b314fd0f Hadrons: conserved current test fixes. Axial current tests now also optional. 2017-05-18 13:16:14 +01:00
22f4feee7b Merge branch 'develop' into feature/scalar_adjointFT 2017-05-17 13:27:13 +02:00
3f858d6755 Scalar: phi^2 observable 2017-05-17 13:25:14 +02:00
3267683e22 Union workaround for g++ 2017-05-17 11:26:18 +01:00
f46a67ffb3 No compile issue on clang on mac fixed.
Compiler version was clang++-3.9 under mpicxx
2017-05-17 10:51:01 +01:00
f7b8383ef5 Half precisoin comms mixed prec test 2017-05-16 14:52:51 +01:00
10f2872aae Faster exponentiation for lattice fields 2017-05-15 15:51:16 +01:00
34332fe393 Improvement to sequential conserved current insertion tests 2017-05-12 16:30:43 +01:00
c2010f21ab Added sequential propagator test for gamma matrix insertion 2017-05-12 16:23:01 +01:00
98f610ce53 Reduced code duplication in hadron tests 2017-05-12 16:15:26 +01:00
d44cc204d1 Added test module for sequential gamma matrix insertion 2017-05-12 14:58:17 +01:00
35fa3d1dfd Merge branch 'master' into feature/scalar_adjointFT 2017-05-12 10:41:39 +01:00
cd73897b8d Merge branch 'release/v0.7.0' into develop 2017-05-12 01:16:02 +01:00
c4435e6beb Merge branch 'release/v0.7.0' 2017-05-12 01:15:59 +01:00
7a8f6af5f8 Drop verbose compiler predefine check 2017-05-11 12:48:40 +01:00
49a5d9bac7 Clang major, minor trailing underscore 2017-05-11 12:25:02 +01:00
2b3fdd4a58 Print CXX predefines 2017-05-11 12:05:50 +01:00
34502ec471 4.8 dropped as buggy. 2017-05-11 11:43:39 +01:00
8a43e88b4f Compiler check early in build 2017-05-11 11:43:06 +01:00
d1ece74137 HMC scalar test: magnetisation measurement 2017-05-11 11:40:44 +01:00
238df20370 Still working on the compiler compat checks 2017-05-11 11:30:14 +01:00
97a32a6145 Add 4.8 test 2017-05-11 11:24:21 +01:00
655492a443 Compiler detection 2017-05-11 11:21:11 +01:00
1cab06f6bd Compat checks for compilers 2017-05-11 10:20:24 +01:00
43c817cc67 Scalar action: const fix 2017-05-11 00:07:17 +01:00
f8024c262b Update Eigen 2017-05-10 13:30:09 +01:00
4cc5f01f4a Small change in the readme about the intel compiler 2017-05-09 15:38:59 +01:00
5cfc0180aa QedFVol: Output free VP along with charged VP. 2017-05-09 12:46:57 +01:00
914f180fa3 QedFVol: Implement exact O(alpha) vacuum polarisation. 2017-05-09 11:46:25 +01:00
9c12c37aaf Confirming the fix on the complex boundary conditions 2017-05-09 08:41:29 +01:00
806eaa0530 Adding back the IO tests in the list 2017-05-08 22:26:44 +01:00
01d0e54594 Merge branch 'release/v0.7.0' into develop 2017-05-08 22:02:51 +01:00
5aafa335fe Fixing JSON error for complex numbers 2017-05-08 21:56:44 +01:00
8ba0494485 Fixing JSON for complex numbers 2017-05-08 21:41:39 +01:00
d99d98d9fd Merge branch 'release/v0.7.0' of https://github.com/paboyle/Grid into release/v0.7.0 2017-05-08 15:08:20 -04:00
95a017a4ae Relax force constraints to pass in single precision. 2017-05-08 15:06:41 -04:00
92f92379e6 Adding olivers test version 2017-05-08 18:42:19 +01:00
529e78d43f Restart the v0.7.0 release 2017-05-08 18:20:04 +01:00
4ec746d262 Merge branch 'release/v0.7.0' into develop 2017-05-06 18:43:03 +01:00
51bf1501fc Merge branch 'release/v0.7.0' 2017-05-06 18:42:50 +01:00
66d819c054 More info on gcc bug 2017-05-06 18:42:11 +01:00
3f3686f869 formattign 2017-05-06 18:41:27 +01:00
26bb829f8c Formatting 2017-05-06 18:40:55 +01:00
67cb04fc66 README update 2017-05-06 18:39:54 +01:00
a40bd68aed Version update 2017-05-06 17:00:14 +01:00
36495e0fd2 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-05-06 16:39:27 +01:00
93f6c15772 Warning squash 2017-05-06 16:38:58 +01:00
cb93eeff21 Update README 2017-05-06 16:28:12 +01:00
c7cc7e6101 Fix 2017-05-06 16:10:09 +01:00
c349aa6511 DEFINE warning elimination 2017-05-06 16:08:35 +01:00
3bae0a2d5c Drop a gcc warning 2017-05-06 15:51:42 +01:00
c1c7566089 GCC bug work around in 5.0 through 6.2 inclusive. 2017-05-06 15:20:25 +01:00
2439999ec8 Warning elimination; drop to -O2 on G++ bad versions 2017-05-06 14:44:49 +01:00
1d96f662e3 Fixed 4d fermion gparity force. Put strong tests on make check force tests 2017-05-06 00:46:31 +01:00
41d1889941 trusty ubuntu 2017-05-05 21:25:35 +01:00
0c3981e0c3 Trying to force recent automake 2017-05-05 21:15:22 +01:00
c727bd4609 Trying to work around automake version 2017-05-05 21:00:00 +01:00
db23749b67 Adding travis to make check 2017-05-05 20:42:08 +01:00
751f2b9703 Better check and benchmark driving 2017-05-05 19:54:38 +01:00
741bc836f6 Exposing support for Ncolours and Ndimensions and JSON input file for the ScalarAction 2017-05-05 17:36:43 +01:00
6cb563a40c QedFVol: Access HVP tensor using a vector<vector<ScalarField>> instead of vector<vector<ScalarField*>> 2017-05-05 17:12:41 +01:00
697c0603ce SITMO I/O for NERSC working now bit repro 2017-05-05 16:54:44 +01:00
14bedebb11 Source pointed to 2017-05-05 16:17:27 +01:00
8546d01a4c Merge branch 'develop' into feature/scalar_adjointFT 2017-05-05 15:47:33 +01:00
47b5c07ffb Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-05-05 14:27:02 +01:00
da86a2bf54 Merge branch 'feature/hmc_generalise' into develop 2017-05-05 14:23:02 +01:00
c1cb60a0b3 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-05-05 14:22:37 +01:00
5ed5b4bfbf Merge branch 'develop' into feature/hmc_generalise 2017-05-05 14:22:33 +01:00
de84aacdfd Fixing a configure error for the smearing tests 2017-05-05 13:59:10 +01:00
2888003765 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-05-05 13:02:24 +01:00
da06bf5b95 Zmobius force test added 2017-05-05 12:52:45 +01:00
20999c1370 Merge branch 'develop' into feature/hmc_generalise 2017-05-05 12:47:17 +01:00
77e0af9c2e Compilation fix after merge - conserved current code not yet operational for vectorised 5D or Gparity Impl. 2017-05-05 12:27:50 +01:00
33f0ed1a33 No compile fix 2017-05-05 11:04:30 +01:00
50be56433b Delete old and defunct tests 2017-05-04 23:41:16 +01:00
43924007db Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-05-04 19:53:41 +01:00
78ef10e60f Mobius force improvement 2017-05-04 19:53:21 +01:00
ca1077c560 Merge branch 'develop' of https://github.com/paboyle/Grid into feature/rare_kaon
# Conflicts:
#	lib/qcd/action/fermion/WilsonFermion5D.cc
#	tests/hadrons/Test_hadrons_rarekaon.cc
2017-05-04 16:22:33 +01:00
679ae98b14 Merge branch 'feature/better-external-library' into develop 2017-05-04 15:42:12 +01:00
90f6bc16bb No compile clang fix 2017-05-04 12:15:06 +01:00
9b5b639546 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-05-03 20:51:40 -04:00
945767c6d8 More info 2017-05-03 20:26:35 -04:00
422cdf4979 Some checks 2017-05-03 18:37:38 -04:00
38db174f3b Print statement 2017-05-03 18:25:26 -04:00
92e364a35f Better reporting in benchmark for MPI3 2017-05-03 15:43:36 -04:00
db3837be22 QedFVol: Change “double” to “Real” in ScalarVP.cc 2017-05-03 13:26:49 +01:00
2f0dd83016 Calculate HVP using a single contraction of O(alpha) charged propagators. 2017-05-03 12:53:41 +01:00
58299b8ba2 Git info separated from version in git-config 2017-05-02 20:04:41 +01:00
124bf4d829 git ref in config summary 2017-05-02 19:41:01 +01:00
e8e56b3414 Config summary saved in git-config 2017-05-02 19:40:47 +01:00
89c430136d grid-config program 2017-05-02 19:13:13 +01:00
ea9aef7baa New header for standard headers (was an issue with Remez.h and external compilation) 2017-05-02 18:26:11 +01:00
c9e9e8061d Merge branch 'feature/hadrons' into develop 2017-05-02 18:23:47 +01:00
453cf2a1c6 Moving the topological charge outside the HMC related routines 2017-05-02 14:40:12 +01:00
de7bbfa5f9 Adding ParameterFile option for the HMC 2017-05-02 12:16:16 +01:00
dda8d77c87 Merge branch 'feature/hadrons' into feature/rare_kaon 2017-05-01 17:50:57 +01:00
aa29f4346a Hadrons: weird bus error with recent macOS clang 2017-05-01 17:49:08 +01:00
86116dbed6 Adding boundary condition switch (compile time) for the Mobius HMC example 2017-05-01 16:33:11 +01:00
7bd31e3f7c Adding external file support in the Mobius example (JSON) 2017-05-01 16:30:24 +01:00
74f451715f Fix for Mac compilation on the size_t uint64_t types 2017-05-01 15:12:07 +01:00
655be8ed76 Adding tests for the mobius operator 2017-05-01 14:42:16 +01:00
4063238943 Adding HMC test file example for Mobius + smearing 2017-05-01 13:44:00 +01:00
3344788fa1 Merge branch 'develop' into feature/hmc_generalise 2017-05-01 12:13:56 +01:00
62a64d9108 EO support, wip 2017-05-01 11:06:21 +01:00
49331a3e72 Minor improvements to Ward Identity checks 2017-04-28 16:50:17 +01:00
51d84ec057 Bugfixes in Wilson 5D sequential conserved current insertion 2017-04-28 16:49:14 +01:00
db14fb30df Hadrons: overhaul of conserved current test 2017-04-28 16:48:00 +01:00
b9356d3866 Added more complete test of sequential insertion of conserved current. 2017-04-28 16:46:40 +01:00
99a73f4287 Correcting the M and Mdag in the clover term 2017-04-28 15:51:05 +01:00
f302eea91e SitePropagator redefined to be a scalar object in TYPE_ALIASES. 2017-04-28 15:27:49 +01:00
5553b8d2b8 Clover term compiles, not tested 2017-04-28 15:23:34 +01:00
a6ccbbe108 Conserved current sequential source now registered properly and fixed module inputs. 2017-04-28 10:43:47 +01:00
3ac27e5596 QedFVol: remove unnecessary copies of free propagator from shifted sources in ScalarVP 2017-04-27 14:17:50 +01:00
99220f6531 Fixes and better timing 2017-04-26 17:24:11 -04:00
d2003f24f4 Corrected incorrect usage of ExtractSlice for conserved current code. 2017-04-26 17:25:28 +01:00
6299dd35f5 Hadrons: Added test of conserved current code. Tests Ward identities for conserved vector and partially conserved axial currents. 2017-04-26 12:41:39 +01:00
a39daecb62 Removed make_5D const declaration to avoid compilation error 2017-04-26 12:39:07 +01:00
159770e21b Legal Banners added 2017-04-26 09:32:57 +01:00
2a6d093749 move the sudo: required to match locatoin on Guido's branch 2017-04-26 09:15:34 +01:00
c947947fad sudo required suggested by guido 2017-04-26 08:45:36 +01:00
f555b50547 Merge branch 'feature/half-prec-comms' into develop 2017-04-26 08:43:40 +01:00
738c1a11c2 longer nloop 2017-04-26 08:43:20 +01:00
f8797e1e3e bug fix. works now and great face performance 2017-04-26 03:14:02 -04:00
fd1eb7de13 Clean implementation of the exterior faces listing only those points on the boudary 2017-04-26 02:34:52 -04:00
2ce898efa3 Pretty code 2017-04-26 02:34:25 -04:00
dc5a6404ea Hadrons: modules for testing conserved current contractions and sequential insertion. 2017-04-25 22:08:33 +01:00
44260643f6 First conserved current implementation for Wilson fermions only. Not implemented for Gparity or 5D-vectorised Wilson fermions. 2017-04-25 18:00:24 +01:00
1425afc72f Rare Kaon test fix 2017-04-25 17:26:56 +01:00
bd466a55a8 QedFVol: remove charge dependence in chargedProp function of ScalarVP 2017-04-25 10:04:03 +01:00
ab66bac4e6 Think I'm getting on top of the reduced cost exterior precomputed list of links 2017-04-25 08:50:26 +01:00
56277a11c8 Build a list of whats on the surface 2017-04-24 17:06:15 +01:00
752048f410 Merge branch 'develop' into feature/clover 2017-04-24 14:41:20 +01:00
916e9e1d3e Merge branch 'feature/half-prec-comms' of https://github.com/paboyle/Grid into feature/half-prec-comms 2017-04-24 10:39:19 +01:00
5b55867a7a Slightly cheaper Ext assembly 2017-04-24 05:36:11 -04:00
3accb1ef89 Debugged assemply split phase with interior suppression 2017-04-23 19:30:19 -04:00
e3d0e31525 Debugged assemply split phase with interior suppression 2017-04-23 19:29:27 -04:00
5812eb8a8c Partially fixed. But the comms-overlap does not work yet. 2017-04-22 18:50:25 -04:00
4dd3763294 Use OMP as much as possible 2017-04-22 20:35:20 +01:00
c429ace748 Cleaner OpenMP use 2017-04-22 20:28:42 +01:00
ac58565d0a Dangerous rewrite of the assembly. If I make a mistake the debug will be painful. 2017-04-22 19:31:04 +01:00
3703b718aa Mark up a table if a given site only receives from itself; including MPI3 splitting info. 2017-04-22 19:28:37 +01:00
b722889234 Try a better load balancing loop 2017-04-22 19:27:41 +01:00
abba44a837 Hand unrolled for overlapped comms 2017-04-22 17:45:17 +01:00
f301be94ce Fixed 2017-04-22 17:42:31 +01:00
1d1b225497 Hand unrolled Nc=3 kernels support split phase compute (on-node, off-node). 2017-04-22 09:05:28 -04:00
53a785a3dd Fixing the KNL compile 2017-04-22 08:11:51 -04:00
736bf3c866 Major rework of stencil. Half precision and MPI3 now working. 2017-04-22 11:33:50 +01:00
b9bbe5d188 L1p config bg/q 2017-04-22 11:33:09 +01:00
3844bcf800 If no f16c instructions supported must use software half precision conversion.
This will also become useful on BG/Q, so will move out from SSE4 into a general area.
Lifted the Eigen half precision from web. Looks sensible, but not extensively regressed
against the intrinsics implementation yet.
2017-04-20 15:30:52 +01:00
e1a2319d01 Simple compressor moved out of cshift into stencil 2017-04-20 13:18:15 +01:00
180c732b4c Move compressors out of Cshift.
Slice iterators would help
2017-04-20 13:17:55 +01:00
957a706d0b Useful script 2017-04-20 13:17:44 +01:00
d2312e9874 Drop compressor entirely from Cshift to only Stencil. 2017-04-20 13:16:55 +01:00
fc4ab9ccd5 Working half precision comms 2017-04-20 11:20:26 +01:00
4a340aa5ca Massive compressor rework to support reduced precision comms 2017-04-20 09:28:27 +01:00
3b7de792d5 Type comparison in the traits work 2017-04-18 13:28:04 +01:00
557c3fa109 Pretty change 2017-04-18 13:27:38 +01:00
ec18e9f7f6 Merge branch 'develop' into feature/half-prec-comms 2017-04-18 11:39:39 +01:00
a839d5bc55 Updated todo list 2017-04-18 11:22:17 +01:00
de41b84c5c Merge branch 'feature/normHP' into develop 2017-04-18 10:57:21 +01:00
8e161152e4 MultiRHS solver improvements with slice operations moved into lattice and sped up.
Block solver requires a lot of performance work.
2017-04-18 10:51:55 +01:00
3141ebac10 MultiRHS working, starting to optimise. Block doesn't and I thought it already was; puzzled. 2017-04-17 10:50:19 +01:00
7ede696126 Non compile of tests fixed 2017-04-16 23:40:00 +01:00
bf516c3b81 higher precision reduction variables in norm and inner product 2017-04-15 12:27:28 +01:00
441a52ee5d First cut at higher precision reduction 2017-04-15 10:57:21 +01:00
a8db024c92 Cleaning up the dense matrix and lanczos sector 2017-04-15 08:54:11 +01:00
a9c22d5f43 Verbose removal 2017-04-14 14:38:49 +01:00
3ca41458a3 Fix to no USE_FP16 case 2017-04-14 14:20:54 +01:00
9e2d29c644 USE_FP16 macro 2017-04-14 14:17:14 +01:00
b694996302 adding comments 2017-04-14 13:30:14 +01:00
951be75292 Half precision conversion working on AVX512 now too 2017-04-13 17:35:11 +01:00
c8e6f58e24 Fix typos in ScalarVP 2017-04-13 17:04:37 +01:00
b9113ed310 Patches for knl 2017-04-13 12:02:12 -04:00
888988ad37 Merge branch 'feature/qed-fvol' of https://github.com/paboyle/Grid into feature/qed-fvol
# Conflicts:
#	lib/qcd/action/fermion/Fermion.h
2017-04-13 15:54:40 +01:00
1407418755 Old qed-fvol program build disabled 2017-04-13 15:32:30 +01:00
a6a0da873f Merge branch 'feature/hadrons' into feature/qed-fvol 2017-04-13 15:31:06 +01:00
42fb49d3fd Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-04-13 14:12:47 +01:00
2a54c9aaab Merge branch 'feature/block-cg' into develop 2017-04-13 14:12:24 +01:00
0957378679 Fixing conditional ugly way 2017-04-13 13:47:56 +01:00
2ed6c76fc5 Getting multiline if then fi working 2017-04-13 13:43:13 +01:00
d3b9a7fa14 F16c apparently requires AVX, even if the 128 bit are used.
Seems odd.
2017-04-13 13:19:11 +01:00
75ea306ce9 Another try at travis 2017-04-13 13:05:32 +01:00
4226c633c4 Default to FP16 off again 2017-04-13 12:51:39 +01:00
5a4eafbf7e .travis 2017-04-13 12:50:43 +01:00
eb8e26018b Travis update for macos 2017-04-13 12:35:11 +01:00
db5ea001a3 Update to use Xcode 8.3 since -mfp16 causes SIGILL 2017-04-13 12:22:40 +01:00
2846f079e5 Predicate tests on fp16 being enabled 2017-04-13 12:08:05 +01:00
1d502e4ed6 FP16 optional compile time 2017-04-13 11:55:24 +01:00
73cdf0fffe Drop f16c from SSE because of a macos compile error on travis 2017-04-13 11:23:41 +01:00
1c25773319 Trap illegal instructions 2017-04-13 10:51:40 +01:00
c38400b26f Trap signals 2017-04-13 10:35:20 +01:00
9c3065b860 Debug flags off again 2017-04-13 10:01:32 +01:00
94eb829d08 Align cast fixed for __mm128i gcc complained 2017-04-13 08:40:44 +01:00
68392ddb5b Exchange in generic
Precision change in AVX, SSE, AVX512, Generic. QPX still to do.
2017-04-13 08:38:12 +01:00
cb6b81ae82 Half precision conversion 2017-04-12 19:32:37 +01:00
c382c351a5 Quark test output correction. 2017-04-12 14:36:15 +01:00
af2d6ce2e0 Encapsulated 4D->5D and 5D->4D conversions in separate functions & added corresponding tests. 2017-04-12 14:36:02 +01:00
90ec6eda0c Rare K test solver name fix 2017-04-10 17:48:58 +01:00
ac1253bb76 Corrected solver in rare kaon test 2017-04-10 17:42:55 +01:00
fe8d625694 Merge commit '5e477ec553aa48d7d19b5a7c45d41acbb3392bcb' into feature/rare_kaon 2017-04-10 17:23:37 +01:00
53e76b41d2 Merge branch 'develop' into feature/hadrons 2017-04-10 17:00:53 +01:00
8ef4300412 spurious .dirstamp files removed 2017-04-10 17:00:22 +01:00
98a24ebf31 The macro “magics” is very intensive for the preprocessor in the measurement code which has numerous serialisable classes. Reducing the number of serialisable fields to 64 (instead of 1024) helps a lot, this is enough for now and can be extended trivially if needed in the future. 2017-04-10 16:58:54 +01:00
e4a105a30b Merge branch 'feature/qed-fvol' of https://github.com/paboyle/Grid into feature/qed-fvol 2017-04-10 16:35:01 +01:00
26ebe41fef QedFVol: Implement charged propagator calculation within ScalarVP module 2017-04-10 16:33:54 +01:00
b12dc89d26 Commenting and clean up 2017-04-10 20:38:20 +09:00
d80d802f9d MultiRHS solver test 2017-04-10 00:12:12 +09:00
3d99b09dba Start of blockCG 2017-04-09 23:42:10 +09:00
db5f6d3ae3 Verbose fix 2017-04-09 23:41:30 +09:00
683550f116 Const args improvement 2017-04-09 23:41:04 +09:00
5e477ec553 Merge branch 'feature/hadrons' of https://github.com/paboyle/Grid into feature/rare_kaon 2017-04-07 11:51:09 +01:00
55d0329624 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2017-04-07 11:08:14 +09:00
86aaa35294 Christoph needs SchurDiagTwoKappa which is mobius specific. 2017-04-07 11:07:40 +09:00
363611ae21 Merge branch 'develop' into feature/clover 2017-04-05 16:26:04 +01:00
172d3dc93a Correcting names in tests 2017-04-05 16:24:04 +01:00
3b8a791e28 Merge branch 'develop' into feature/clover 2017-04-05 16:20:28 +01:00
7b03d8d087 Fixing the remaining merge conflicts 2017-04-05 16:17:46 +01:00
4b759b8f2a Merge branch 'feature/hmc_generalise' into feature/scalar_adjointFT 2017-04-05 14:50:28 +01:00
8c540333d5 Merge branch 'develop' into feature/hmc_generalise 2017-04-05 14:41:04 +01:00
6fd82228bf Working on the derivative 2017-04-05 10:51:44 +01:00
5592f7b8c1 Creation mode better implementation 2017-04-05 02:35:34 +09:00
35da4ece0b UID fix 2017-04-05 02:18:15 +09:00
061b15b9e9 Merge branch 'feature/sitmo-skipahead' into develop 2017-04-05 01:24:49 +09:00
ca6efc685e Merge branch 'develop' into feature/clover 2017-04-04 10:19:02 +01:00
1e496fee74 Merge branch 'develop' into feature/qed-fvol
# Conflicts:
#	lib/qcd/action/fermion/Fermion.h
2017-04-03 19:02:57 +01:00
ff4e54ef80 Merge branch 'develop' into feature/hadrons 2017-04-03 18:56:21 +01:00
561426f6eb Clean up 2017-04-02 23:13:48 +09:00
83f6fab8fa Big/Small crush test, and fast SITMO rng init, faster but not ideal
MT and Ranlux init.
2017-04-02 12:10:51 +09:00
0fade84ab2 No random device 2017-04-02 00:29:40 +09:00
9dc7ca4c3b Sitmo fast init 2017-04-02 00:28:22 +09:00
935d82f5b1 sanity checks 2017-04-02 00:27:28 +09:00
9cbcdd65d7 No random device seed 2017-04-02 00:26:57 +09:00
f18f5ed926 Drop random device 2017-04-02 00:26:26 +09:00
d1d63a4f2d sitmo default 2017-04-02 00:26:05 +09:00
7e5faa0f34 Multiple RNGs 2017-04-02 00:25:44 +09:00
6af459cae4 Christoph's coefficients. 2017-03-31 17:07:43 +09:00
1c4bc7ed38 Debugged staggered conventions 2017-03-31 14:41:48 +09:00
cd1bd921bd Reduced code duplication for Weak Hamiltonian contraction modules 2017-03-30 18:02:14 +01:00
b8ae787b5e Correcting a simple typo 2017-03-30 11:33:15 +01:00
fbe2c3b5f9 ]Merge branch 'develop' into feature/clover 2017-03-30 11:18:31 +01:00
1ed69816b9 First steps for the force term 2017-03-30 11:14:27 +01:00
fff5751b1a HADRONS: Updated rare kaon test program, including all contractions. Sink smearing still to be implemented. 2017-03-30 10:57:01 +01:00
2c81696fdd HADRONS: 4pt Weak + current disconnected topology (e.g. for rare neutral kaon decays) 2017-03-30 10:37:17 +01:00
c9dc22efa1 HADRONS: Standalone disconnected loop contraction. 2017-03-30 10:33:18 +01:00
0ab04a000f HADRONS: 3pt contraction with gamma insertion between two propagators. 2017-03-30 10:30:58 +01:00
93ea5d9468 Pretty code 2017-03-30 15:00:03 +09:00
1ec5d32369 Chulwoo's test to zmobius helped me shake out 2017-03-30 13:45:13 +09:00
9fd23faadf Pretty layout 2017-03-30 13:44:45 +09:00
10e4fa0dc8 Template instantiation improvements 2017-03-30 13:44:25 +09:00
c4aca1dde4 Conjugate coefficients on adjoint 2017-03-30 13:44:05 +09:00
b9e8ea3aaa conjugate coefficient on the dagger 2017-03-30 13:43:13 +09:00
077aa728b9 Fix the ZMobius (I think) 2017-03-30 13:42:09 +09:00
a8d83d886e Macro controls 2017-03-30 13:31:34 +09:00
7fd46eeec4 Trailing whitespace removal 2017-03-30 13:31:10 +09:00
e0c4eeb3ec Compiles again 2017-03-30 13:30:45 +09:00
cb9a297a0a Chulwoo's Zmobius test 2017-03-30 13:30:25 +09:00
2b115929dc Small AVX512 asm ifdef patch 2017-03-29 18:51:23 +09:00
5c6571dab1 Merge branch 'feature/bgq-asm' into develop 2017-03-29 18:48:55 +09:00
417ec56cca Release candidate 2017-03-29 05:45:33 -04:00
756bc25008 Verbose header print by default 2017-03-29 04:44:17 -04:00
35695ba57a Bug fix in MPI3 2017-03-29 04:43:55 -04:00
81ead48850 Log any errors to a file 2017-03-29 04:39:52 -04:00
d805867e02 Better init 2017-03-28 13:25:05 -04:00
e55a751e23 Merge branch 'feature/bgq-asm' of https://github.com/paboyle/Grid into feature/bgq-asm 2017-03-28 12:20:12 -04:00
358eb75995 Shorten loop 2017-03-28 12:20:02 -04:00
98f9318279 Build on AVX2 and MPI passing with clang++ 2017-03-28 23:16:04 +09:00
4b17e8eba8 Merge branch 'develop' into feature/bgq-asm
Conflicts:
	lib/qcd/action/fermion/Fermion.h
	lib/qcd/action/fermion/WilsonFermion.cc
	lib/util/Init.cc
	tests/Test_cayley_even_odd_vec.cc
2017-03-28 04:49:30 -04:00
75112a632a IO improvements to fail on IO error 2017-03-28 02:28:04 -04:00
18bde08d1b Merge branch 'feature/staggering' into develop 2017-03-28 15:25:55 +09:00
9f755e0379 Add functions momD1 and momD2 to ScalarVP 2017-03-27 16:49:18 +01:00
4512dbdf58 Rename module ScalarFV to ScalarVP 2017-03-27 15:02:16 +01:00
483fd3cfa1 Add propagator expansion terms as inputs to ScalarFV 2017-03-27 13:24:51 +01:00
3750b9ffee Deleting MPI test for OSX in Travis 2017-03-27 16:53:32 +09:00
5e549ebd8b Adding force terms 2017-03-27 16:43:15 +09:00
fff484eca5 Populating Clover fermions methods 2017-03-27 15:12:57 +09:00
5fdc05782b More in the clover fermion class 2017-03-27 10:54:16 +09:00
d45cd7e677 Adding a simple read of NERSC test 2017-03-26 09:24:26 -04:00
4e96679797 Added a bnl log 2017-03-25 09:25:46 -04:00
85516e9c7c Output all terms of scalar propagator separately 2017-03-24 17:13:55 +00:00
0c006fbfaa Add ScalarFV inputs to ScalarFV.hpp 2017-03-24 11:59:09 +00:00
54c10a42cc Add source and emField inputs to ScalarFV module 2017-03-24 11:42:32 +00:00
a04eb7df5d Starting Clover term 2017-03-24 12:43:28 +09:00
4c1ea8677e Small cosmetic changes and vscode gitignore 2017-03-23 14:09:35 +09:00
fc93f0b2ec Save some code for static huge tlb's. It is ifdef'ed out but an interesting root only experiment.
No gain from it.
2017-03-21 22:30:29 -04:00
8c8473998d Average over whole cluster the comm time. 2017-03-21 22:29:51 -04:00
ef0fe2bcc1 Added empty ScalarFV module 2017-03-21 11:39:46 +00:00
120fb59978 Adding tests for WilsonFlow classes 2017-03-21 16:11:35 +09:00
fd56b3ff38 Merge branch 'develop' into feature/hmc_generalise 2017-03-21 13:33:41 +09:00
0ec6829edc Fixing compilation errors for the WilsonFlow 2017-03-21 13:06:32 +09:00
18b7845b7b Adding WilsonFlow smearing 2017-03-21 11:52:05 +09:00
3d0fe15374 Added topological charge measurement 2017-03-17 16:14:57 +09:00
91886068fe Fixed seg fault for observable modules 2017-03-17 13:59:31 +09:00
6d1e9e5f92 Small cleanup of the observables 2017-03-17 11:42:55 +09:00
b640230b1e Moving hmc observables in a different directory 2017-03-17 11:40:17 +09:00
e7c36771ed ZMobius prep for asm 2017-03-15 14:23:33 -04:00
038b6ee9cd Fixing JSON compilation error 2017-03-16 01:09:24 +09:00
38806343a8 Improving efficiency of the force term 2017-03-15 15:16:16 +09:00
831ca4e3bf Added Scalar action for fields in the adjoint representation 2017-03-14 14:55:18 +09:00
8dc57a1e25 Layout change 2017-03-13 11:11:46 +00:00
f57bd770b0 Merge branch 'bugfix/dminus' into feature/bgq-asm 2017-03-13 11:11:03 +00:00
4ed10a3d06 Merge branch 'develop' into feature/bgq-asm 2017-03-13 11:10:10 +00:00
dfefc70b57 Merge pull request #93 from Lanny91/hotfix/qpx
Some fixes for QPX and generic SIMD types.
2017-03-13 09:31:26 +00:00
0b61f75c9e Adding ZMobius CG test 2017-03-13 00:12:43 -04:00
33edde245d Changing Dminus(Dag) to use full vectors to work correctly 2017-03-12 23:02:42 -04:00
b64e004555 MPI run fail on macos 2017-03-13 01:59:01 +00:00
447c5e6cd7 Z mobius hermiticity correction 2017-03-13 01:30:43 +00:00
8b99d80d8c Merge branch 'bgq-asm-shmemfixes' into feature/bgq-asm 2017-03-12 23:30:09 +00:00
b3dede4dd3 Merge branch 'develop' into feature/hmc_generalise 2017-03-10 23:57:37 +09:00
4e34132f4d Correcting modules use in test files 2017-03-10 23:54:53 +09:00
c07cb10247 Merge branch 'feature/hmc_generalise' of https://github.com/paboyle/Grid into feature/hmc_generalise 2017-03-10 22:37:25 +09:00
d7767a2a62 Few more tests 2017-03-10 22:33:48 +09:00
ec035983fd Fixing the implicit integration 2017-03-01 11:56:35 +00:00
3901b17ade timeings from BNL 2017-02-28 17:06:45 -05:00
af230a1fb8 Average the time across the whole machine for outliers 2017-02-28 17:05:22 -05:00
06a132e3f9 Fixes to SHMEM comms 2017-02-28 13:31:54 -08:00
596dcd85b2 Auxiliary fields 2017-02-27 13:16:38 +00:00
96d44d5c55 Header fix 2017-02-24 19:12:11 -05:00
7270c6a150 Integrator works now 2017-02-24 17:03:42 +00:00
7fe797daf8 SIMD vector length sanity checks 2017-02-23 16:49:44 +00:00
486a01294a Corrected QPX SIMD width 2017-02-23 16:47:56 +00:00
586a7c90b7 Merge branch 'develop' into feature/bgq-asm 2017-02-23 00:26:59 +00:00
e099dcdae7 Merge branch 'develop' into feature/bgq-asm 2017-02-23 00:25:29 +00:00
4e7ab3166f Refactoring header layout 2017-02-22 18:09:33 +00:00
aac80cbb44 Bug fix from Chris K 2017-02-22 12:19:09 -05:00
c80948411b Added tRotate function and MaddRealPart struct for generic SIMD, bugfix in MultRealPart and minor cosmetic changes. 2017-02-22 14:57:10 +00:00
95625a7bd1 Use Grid Integer type 2017-02-22 13:09:32 +00:00
0796696733 Emulated integer vector type for QPX and generic SIMD instruction sets. 2017-02-22 12:01:36 +00:00
f8b9ad7d50 Merge pull request #91 from sunpho84/public_modules_memebers
making public same serializable parameters in HMC Module
2017-02-22 00:53:20 +00:00
04a1959895 Merge pull request #90 from sunpho84/liming
adding --with switch to pass lime path
2017-02-22 00:52:53 +00:00
cc773ae70c Merge pull request #89 from sunpho84/prepend_package_with_grid
Prepending PACKAGE_ with GRID_ in Config.h
2017-02-22 00:52:10 +00:00
d21c51b9be Merge pull request #88 from sunpho84/pickpoketting
now it is possible to pass {coords list} to a peek or poke
2017-02-22 00:51:33 +00:00
597a7b4b3a Merge pull request #81 from edbennett/develop
Fix misleading message: "doxygen-pdf requires doxygen-pdf"
2017-02-22 00:50:59 +00:00
1c30e9a961 Verified 2017-02-21 23:01:25 +00:00
93cc270016 making public same serializable parameters in HMC Module
RNGModuleParameters
GridModuleParameters
2017-02-21 23:11:56 +01:00
29b60f7e1a adding --with switch to pass lime path 2017-02-21 23:09:39 +01:00
041884acf0 Prepending PACKAGE_ with GRID_ in Config.h
Avoid polluting linking progr
2017-02-21 22:51:36 +01:00
15e668eef1 now it is possible to pass {coords list} to a peek or poke 2017-02-21 22:48:38 +01:00
bf7e3f20d4 Staggaered fermion optimised version 2017-02-21 14:35:42 +00:00
902afcfbaf Adding metric and the implicit steps 2017-02-21 11:30:57 +00:00
3ae92fa2e6 Global changes to parallel_for structure.
Move the comms flags to more sensible names
2017-02-21 05:24:27 -05:00
3906cd2149 Stencil fix on BNL KNL system 2017-02-20 17:51:31 -05:00
5a1fb29db7 Useful debug code info to preserve 2017-02-20 17:49:23 -05:00
661fc4d3d1 Debug AVX512 exchange code paths 2017-02-20 17:48:36 -05:00
41009cc142 Move excange into the stencil only; keep Cshift fully general 2017-02-20 17:48:04 -05:00
37720c4db7 Count bytes off node only 2017-02-20 17:47:40 -05:00
1a30455a10 1000 iters on bmark for more accurate timing 2017-02-20 17:47:01 -05:00
97a6b61551 Covariant laplacian and implicit integration 2017-02-20 11:17:27 +00:00
cd0da81196 Merge branch 'feature/bgq-asm' of https://github.com/paboyle/Grid into feature/bgq-asm 2017-02-16 18:52:30 -05:00
f246fe3304 Improvements to avx for invertible to avoid latent bug 2017-02-16 23:52:44 +00:00
8a29c16bde Faster gather exchange 2017-02-16 23:52:22 +00:00
d68907fc3e Debug temp 2017-02-16 18:51:35 -05:00
5c0adf7bf2 Make clang happy with parenthesis 2017-02-16 23:51:33 +00:00
be3a8249c6 Faster gather 2017-02-16 23:51:15 +00:00
bd600702cf Vectorise the XYZT face gathering better.
Hard coded for simd_layout <= 2 in any given spread out direction; full generality is inconsistent
with efficiency.
2017-02-15 11:11:04 +00:00
f011bdb869 Fixed overwrite of pminus projection in construction of 4d propagator from 5d. 2017-02-14 14:07:17 +00:00
bafb101e4f Testing different versions of the Laplacian 2017-02-13 15:38:11 +00:00
08fdf05528 Added and tested the covariant laplacian + CG solver 2017-02-13 15:05:01 +00:00
aca7a3ef0a Optimisation control improvements 2017-02-10 18:22:31 -05:00
9e72a6b22e Reverting to Xcode 7.3 2017-02-10 12:57:03 +00:00
1c12c5612c Xcode 8.2 for travis 2017-02-10 12:12:01 +00:00
a8193c4bcb Correcting travis compilation on gcc 2017-02-10 10:59:30 +00:00
c3d7ec65fa All tests compile. 2017-02-10 10:27:51 +00:00
8b6a6c8236 Resolving small merge conflict 2017-02-09 16:20:24 +00:00
e0571c872b Merge branch 'develop' into feature/hmc_generalise 2017-02-09 16:12:00 +00:00
c67f41887b Reverting parameters to original 2017-02-09 15:59:56 +00:00
84687ccf1f Handling an Intel compiler warning for Json class 2017-02-09 15:33:33 +00:00
3274561cf8 Cleanup 2017-02-09 15:18:38 +00:00
e08fbb3771 Merge pull request #84 from Lanny91/feature/rare_kaon
Rare Kaon decay contraction code
2017-02-08 08:23:34 -08:00
d7464aa0fe Switched from XmlWriter to CorrWriter in contraction code 2017-02-08 16:13:44 +00:00
00d29153f0 Merge branch 'feature/hadrons' of https://github.com/paboyle/Grid into feature/rare_kaon 2017-02-08 16:11:15 +00:00
2ce989f220 Hadrons: default I/O to HDF5 2017-02-08 07:50:05 -08:00
d7a1dc85be Revert "Hadrons: test for rare kaon contraction code."
This reverts commit 1e257a1251.
2017-02-08 13:23:05 +00:00
fc19503673 Removed MSink namespace. 2017-02-08 13:17:39 +00:00
beba824136 Make use of GammaL class in Weak Hamiltonian contractions 2017-02-08 12:45:39 +00:00
6ebf8b12b6 Removed unnecessary repeat of write in Weak Hamiltonian contractions 2017-02-08 12:43:13 +00:00
e5a7ed4362 Moved write outside of loop, some physics corrections 2017-02-08 12:29:33 +00:00
b9f7ea47c3 Access hasModule function directly from Environment instance. 2017-02-08 10:10:06 +00:00
06f7ee202e Revert "Add function to say whether or not a module exists in application class"
This reverts commit 522f6bf91a.
2017-02-08 10:08:18 +00:00
2b2fc6453f Fixed single precision compatibility issues 2017-02-07 13:59:29 +00:00
bdd2765461 Added missing allocation of Weak Hamiltonian result vector 2017-02-07 13:06:42 +00:00
2c246551d0 Overlap comms and compute options in wilson kernels 2017-02-07 01:37:10 -05:00
71ac2e7940 Faster RNG init 2017-02-07 01:33:23 -05:00
2bf4688e83 Running on BNL KNL 2017-02-07 01:32:10 -05:00
a48ee6f0f2 Don't use MPI3_leader any more. No real gain and complex 2017-02-07 01:31:24 -05:00
73547cca66 MPI3 working i think 2017-02-07 01:30:02 -05:00
123c673db7 Policy to control async or sync SendRecv 2017-02-07 01:24:54 -05:00
61f82216e2 Communicator Policy, NodeCount distinct from Rank count 2017-02-07 01:22:53 -05:00
8e7ca92278 Debugged cshift case 2017-02-07 01:21:32 -05:00
485ad6fde0 Stencil working in SHM MPI3 2017-02-07 01:20:39 -05:00
6ea2184e18 OMP define change 2017-02-07 01:17:16 -05:00
fdc170b8a3 Parallel fors in lattice transfer 2017-02-07 01:16:39 -05:00
060da786e9 Comms benchmark improvements 2017-02-07 01:07:39 -05:00
85c7bc4321 Bug fixes for cases that physics code couldn't hit but latent
and discovered on KNL (long vector, y SIMD dir) and checker dir set to y.
Remove the assertions on these code paths now they are tested.
2017-02-07 01:01:15 -05:00
0883d6a7ce Overlap comms compute support; make reg naming consistent with bgq aasm 2017-02-07 00:59:32 -05:00
9ff97b4711 Improved stencil tests passing all on KNL multinode 2017-02-07 00:58:34 -05:00
b5e9c900a4 Better printing and signal handling options 2017-02-07 00:57:55 -05:00
4bbdfb434c Overlap comms compute modifications 2017-02-07 00:57:01 -05:00
4a45c06dd7 Code cleaning and addition of Weak Hamiltonian contraction log message 2017-02-06 20:12:30 +00:00
d6a7d7d1e0 Hadrons: added missing momentum parameter in rare kaon contraction test 2017-02-06 18:15:49 +00:00
1a122a0dd8 Hadrons: corrected gamma matrix inputs in rare kaon test 2017-02-06 17:35:41 +00:00
20e20733e8 Merge branch 'feature/hadrons' into feature/rare_kaon 2017-02-06 14:12:21 +00:00
b7cd1a19e3 Utilities for reading and writing "pair" objects. 2017-02-06 14:08:59 +00:00
f510002a62 Merge remote-tracking branch 'paboyle/feature/hadrons' into feature/hadrons 2017-02-03 14:37:34 +00:00
c94133af49 Added iteration reporting to CG and mixed CG
Added ability to manually change the initial CG inner tolerance in mixed CG
Added .hpp files to filelist script
2017-02-02 17:04:42 -05:00
eedcaf6470 Merge branch 'feature/hadrons' into feature/qed-fvol 2017-02-01 15:53:10 -08:00
e7d8030a64 operator>> for serialisable enums 2017-02-01 15:51:08 -08:00
d775fbb2f9 Gammas: code cleaning and gamma_L implementation & test 2017-02-01 15:45:05 -08:00
863855f46f header fix 2017-02-01 11:59:44 -08:00
419af7610d New gamma matrices tidying: generated code is confined to Gamma.* for readability 2017-02-01 11:23:12 -08:00
1e257a1251 Hadrons: test for rare kaon contraction code. 2017-02-01 16:36:40 +00:00
522f6bf91a Add function to say whether or not a module exists in application class 2017-02-01 16:36:08 +00:00
d35d87d2c2 Weak Hamiltonian Eye-type contraction execution 2017-02-01 16:33:24 +00:00
74a5cda84b Removed unnecessary "3pt" labels 2017-02-01 15:03:49 +00:00
5be05d85b8 Fixed collision of Wall source and sink header ifdefs 2017-02-01 13:56:22 +00:00
35ac85aea8 Updated Weak Hamiltonian contractions to use zero-flop gamma matrices 2017-02-01 12:57:34 +00:00
fa237401ff Consistent variable name in macro 2017-02-01 12:56:55 +00:00
97053adcb5 Merge branch 'feature/hadrons' into feature/rare_kaon 2017-02-01 10:13:29 +00:00
f8fbe4d7a3 Merge remote-tracking branch 'paboyle/feature/hadrons' into feature/hadrons
# Conflicts:
#	extras/Hadrons/Modules/MContraction/Meson.hpp
#	tests/hadrons/Test_hadrons_meson_3pt.cc

Updated Meson.hpp to utilise zero-flop gamma matrices.
2017-02-01 09:27:00 +00:00
ef31c012bf Merge remote-tracking branch 'paboyle/develop' into feature/hadrons 2017-01-31 17:36:10 +00:00
7da7d263c4 typo 2017-01-30 10:53:13 -08:00
1140573027 Gamma adj fix: now in Grid namespace to avoid collisions 2017-01-30 10:53:04 -08:00
9e9f621d5d Hadrons: added Weak Hamiltonian module dependencies, some reformatting. 2017-01-30 17:54:21 +00:00
651e1a7cbc Hadrons: Momentum inserted as multiples of 2*pi/L 2017-01-30 17:14:33 +00:00
a0cfbb6e88 Merge branch 'feature/gammas' into feature/hadrons
# Conflicts:
#	.gitignore
#	lib/qcd/spin/Dirac.cc
#	scripts/filelist
2017-01-30 09:10:49 -08:00
c4d3672720 Hadrons: Momentum projection in meson module. 2017-01-30 17:09:04 +00:00
515a26b3c6 gammas: copyright update 2017-01-30 09:07:09 -08:00
16be6d378c Now action factory support different Fields (templated) 2017-01-30 14:22:41 +00:00
f05d0565aa Adding ScalarField theory 2017-01-30 10:59:28 +00:00
b39f0d1fb6 Hadrons: default I/O to HDF5 if possible, XML otherwise 2017-01-27 18:12:35 -08:00
9f1267dfe6 Merge branch 'feature/qed-fvol' of github.com:paboyle/Grid into feature/qed-fvol 2017-01-27 17:06:34 -08:00
2e90285232 Merge pull request #80 from jch1g10/feature/qed-fvol
ChargedProp: remove ScalarField fs
2017-01-27 17:06:13 -08:00
e254de982e Merge branch 'feature/qed-fvol' of github.com:paboyle/Grid into feature/qed-fvol 2017-01-27 17:02:35 -08:00
28d99b5297 Merge branch 'develop' into feature/qed-fvol 2017-01-27 16:59:53 -08:00
c946d3bf3f Merge branch 'develop' of github.com:edbennett/Grid into develop 2017-01-27 22:12:11 +00:00
1c68098780 fix misleading message: "doxygen-pdf requires doxygen-pdf" 2017-01-27 22:04:26 +00:00
9bf4108d1f Weak Hamiltonian contraction modules, for Eye and NonEye contraction topologies. Execution for NonEye type diagrams has been implemented, but not yet for Eye type. 2017-01-27 16:58:11 +00:00
899e685627 Merge branch 'feature/sitmo_rng' into develop 2017-01-27 14:15:56 +00:00
ee93f0218b ChargedProp: remove ScalarField fs 2017-01-27 12:22:48 +00:00
6929a84c70 Reformatting files 2017-01-27 11:54:44 +00:00
5c779a789b Moving registrations in an independent file 2017-01-27 11:23:51 +00:00
161ed102a5 Merge pull request #79 from jch1g10/feature/qed-fvol
Fixed bug in ChargedProp
2017-01-26 19:49:14 -08:00
3bf993d81a gitignore update 2017-01-26 17:00:59 -08:00
fad743fbb1 Build system sanity check: corrected several headers not in the <Grid/*> format 2017-01-26 17:00:41 -08:00
e863a948e3 Cleaning up files and directories 2017-01-26 15:24:49 +00:00
f65a585236 ChargedProp: Switch to HDF5 output 2017-01-26 15:02:30 +00:00
977f34dca6 Added missing typename 2017-01-26 13:18:33 +00:00
90ad956340 Merge branch 'develop' of https://github.com/paboyle/Grid into feature/rare_kaon 2017-01-26 12:08:41 +00:00
7996f06335 Commented out registrations.
Move to an independent file that is linked only for the factory managed HMC
2017-01-25 18:27:45 +00:00
ef8d3831eb Temporary patch the threading error in InsertSlice and ExtractSlice
Find source and fix the error
2017-01-25 18:12:04 +00:00
70ed9fc40c Updating the engine to the last version 2017-01-25 18:10:41 +00:00
7b40a3e3e5 Reorganizing files 2017-01-25 18:09:46 +00:00
4d3787db65 Hadrons fixed for new gammas, Meson only does one contraction but this’ll change in the future 2017-01-25 09:59:00 -08:00
677757cfeb Added and tested SITMO PRNG 2017-01-25 12:47:22 +00:00
f7fbbaaca3 Compiles after merging 2017-01-25 12:11:58 +00:00
17629b8d9e Merge branch 'develop' into feature/hmc_generalise 2017-01-25 11:33:53 +00:00
0baa20d292 Againg fixing compilation on Travis, no LIME lib present 2017-01-25 11:18:44 +00:00
4571c918a4 Fixing compilation error when compiling without LIME 2017-01-25 11:14:43 +00:00
5251ea4d30 Adding more fermion action modules, generalised DWF 2017-01-25 11:10:44 +00:00
05cb6d318a gammas: adjoint implemented as a symbolic operation 2017-01-24 18:07:43 -08:00
0432e30256 Gamma right multiply code fix (now passes consistency check) 2017-01-24 17:36:23 -08:00
2c3ebc1e07 .gitignore update 2017-01-24 17:35:42 -08:00
068b28af2d Extensive gamma test program 2017-01-24 17:35:29 -08:00
f7db342f49 Serialisable enums can be converted to int 2017-01-24 17:33:26 -08:00
d65e81518f Merge branch 'feature/hadrons' into develop 2017-01-24 09:21:44 -08:00
7f456b4173 👷 Added all pseudofermion actions to the serialiser 2017-01-24 13:57:32 +00:00
a37e71f362 New automatic implementation of gamma matrices, Meson and SeqGamma are broken 2017-01-23 19:13:43 -08:00
ae99e99da2 Fixed bug in ChargedProp 2017-01-23 17:27:50 +00:00
c291ef77b5 Merge branch 'feature/hadrons' of https://github.com/paboyle/Grid into feature/hadrons 2017-01-23 15:24:47 +00:00
7dd2764bb2 Wall sink smearing 2017-01-23 15:17:54 +00:00
244f8fb6dc Added JSON parser (without NextElement) 2017-01-23 14:57:38 +00:00
05c1924819 Timing loop change 2017-01-23 10:43:45 +00:00
f3ca29af6c Merge branch 'feature/hadrons' into feature/qed-fvol 2017-01-21 13:41:05 -08:00
b7da264b0a Hadrons: Application is not storing the environment ref but calling getInstance() each time, solving a very nasty set fault on Linux/KNL 2017-01-21 13:40:23 -08:00
37988221a8 Merge branch 'feature/serialisation-hdf5' into feature/qed-fvol 2017-01-20 14:04:20 -08:00
74ac2aa676 Merge branch 'feature/serialisation-hdf5' into feature/hadrons 2017-01-20 14:03:51 -08:00
4c75095c61 HDF5: header fix 2017-01-20 12:14:01 -08:00
afa095d33d HDF5: better complex number support 2017-01-20 12:10:41 -08:00
6b5259cc10 HDF5 detects if a name is a dataset or not without using exception catching 2017-01-20 11:03:19 -08:00
27dfe816fa Added TwoFlavorsEO
Had to remove a conformability check in the Derivative of SchurDiff,
see the comments in the file
2017-01-20 16:59:31 +00:00
af29be2c90 Simplified operation of meson module. Result has been modified to output one contraction at a time for each pair of gamma insertions at source and sink. 2017-01-20 16:38:50 +00:00
f96fac0aee All functionalities ready.
Todo: add all the fermion action modules
2017-01-20 12:56:20 +00:00
7423a352c5 HDF5: typos 2017-01-19 18:33:04 -08:00
81e66d6631 HDF5: revert back to native types 2017-01-19 18:24:53 -08:00
ade1058e5f Hdf5Type does not need to be a pointer anymore 2017-01-19 18:23:55 -08:00
6eea9e4da7 HDF5 types static initialisation is mysteriously buggy on BG/Q, changing strategy 2017-01-19 18:02:53 -08:00
2c673666da Standardisation of HDF5 types 2017-01-19 17:19:12 -08:00
7a327a3f28 Merge branch 'develop' into feature/qed-fvol 2017-01-19 14:22:36 -08:00
07f2ebea1b Meson module now takes list of gamma matrices to insert at source and sink. 2017-01-19 22:18:42 +00:00
d6401e6d2c Merge branch 'feature/hadrons' into develop 2017-01-19 14:10:01 -08:00
24d3d31b01 Genetic scheduler: uses insert instead of emplace for better compiler compatibility 2017-01-19 14:08:22 -08:00
851f2ad8ef Adding fermions actions support in the factories 2017-01-19 10:00:02 +00:00
5405526424 Code typo 2017-01-18 22:42:19 -08:00
f3f0b6fef9 serious rewriting of Test_serialisation, now crashes if IO inconsistent 2017-01-18 17:41:05 -08:00
654e0b0fd0 Serialisable object are now comparable with == 2017-01-18 17:40:32 -08:00
4be08ebccc debug code cleaning 2017-01-18 17:39:59 -08:00
f599cb5b17 HDF5 serial IO implemented and tested 2017-01-18 16:50:21 -08:00
23e0561dd6 Added all required functionalities, time for cleaning
All actions to be added
2017-01-18 16:31:51 +00:00
a4a509497a Merge branch 'develop' of github.com:paboyle/Grid into develop 2017-01-17 16:22:22 -08:00
5803933aea First implementation of HDF5 serial IO writer, reader is still empty 2017-01-17 16:21:18 -08:00
8ae1a95ec6 Legal banners and module descriptions 2017-01-17 18:14:20 +00:00
82b7d4eaf0 Added noise loop dependencies 2017-01-17 15:58:32 +00:00
78774fbdc0 Construct loop propagator 2017-01-17 15:29:45 +00:00
924130833e Moved more parameters to serialization 2017-01-17 13:22:18 +00:00
7cf833dfe9 Fixed compilation error in tests hadrons (capital letter in dir name) 2017-01-17 11:00:54 +00:00
0157274762 HMC factories 2017-01-17 10:46:49 +00:00
87e8aad5a0 Added support for input file HMC modules (missing the actions yet) 2017-01-16 16:07:12 +00:00
c6f59c2933 Adding factories 2017-01-16 10:18:09 +00:00
91a3534054 Lattice slice utilities now thread safe 2017-01-16 06:32:25 +00:00
16a8e3d0d4 gitignore update for ST3 2017-01-16 06:32:05 +00:00
b7f90aa011 Added momentum choice for wall source 2017-01-13 15:54:19 +00:00
92f8950a56 Charged scalar prop: cleaning and output 2017-01-13 13:30:56 +00:00
65987a8a58 First implementation of the scalar QED propagator, runs but absolutely not checked 2017-01-12 20:44:23 +00:00
889d828bc2 Code cleaning 2017-01-12 18:17:44 +00:00
f22b79da8f Added missing type aliases 2017-01-12 12:52:12 +00:00
3855673ebf Added header for wall source 2017-01-12 11:42:37 +00:00
4db82da0db Wall sources 2017-01-12 11:41:10 +00:00
0cdc3d2fa5 Merge remote-tracking branch 'refs/remotes/paboyle/feature/hadrons' into feature/hadrons 2017-01-12 11:26:55 +00:00
ad98b6193d creating the necessary caches for the FFT EM scalar propagator 2017-01-11 18:40:43 +00:00
fc760016b3 More uniform cache name for scalar momentum propagators 2017-01-11 18:39:58 +00:00
2da86f7dae Merge branch 'feature/hadrons' into feature/qed-fvol 2017-01-11 18:38:05 +00:00
41df1db811 Hadrons: number of dimensions entirely determined by the initial grid 2017-01-11 18:37:49 +00:00
0dfda4bb90 Working on the RNGModule 2017-01-09 11:06:18 +00:00
1189ebc8b5 Cleaning up the checkpointers interface 2017-01-05 15:52:52 +00:00
97843e2b58 Hadrons: free scalar buffer fix and output 2017-01-05 14:58:55 +00:00
82b3f54697 scalar free propagator fix 2017-01-05 14:58:07 +00:00
1bb8578173 Added module for checkpointers 2017-01-05 13:09:32 +00:00
c3b6d573b9 Merge branch 'feature/bgq-asm' of https://github.com/paboyle/Grid into feature/bgq-asm 2016-12-30 22:42:17 +00:00
673994b281 Hadrons: modules for scalar propagators 2016-12-29 22:44:58 +01:00
bbc0eff078 Hadrons: scalar sources 2016-12-29 22:44:22 +01:00
4c60e31070 Hadrons: code cleaning 2016-12-29 22:44:08 +01:00
afbf7d4c37 QED Gimpl moved in Photon.h 2016-12-29 22:43:38 +01:00
8c3cc32364 Scalar action 2016-12-29 22:42:58 +01:00
1e179c903d Worried about integer; suspect where statements are broken 2016-12-27 17:46:38 +00:00
669cfca9b7 No inline 2016-12-27 17:45:40 +00:00
ff2f559a57 Remove inline on gather optimised path 2016-12-27 17:45:19 +00:00
03c81bd902 Merge branch 'feature/bgq-asm' of https://github.com/paboyle/Grid into feature/bgq-asm 2016-12-27 11:25:35 +00:00
a869addef1 Stats switch off 2016-12-27 11:25:22 +00:00
1caa3fbc2d LOCK UNLOCK only 2016-12-27 11:24:45 +00:00
3d21297bbb Call the fast path compressor for wilson kernels to avoid if else on projector 2016-12-27 11:23:13 +00:00
25efefc5b4 Back to original thread policy post test 2016-12-23 09:49:04 +00:00
eabf316ed9 BGQ performance ASM 2016-12-22 21:56:08 +00:00
04ae7929a3 BGQ or KNL assembler now 2016-12-22 17:53:22 +00:00
caba0d42a5 L1p controls 2016-12-22 17:52:55 +00:00
9ae81c06d2 L1p controls for BG/Q 2016-12-22 17:52:21 +00:00
0903c48caa Hot start SU3 2016-12-22 17:51:45 +00:00
7dc36628a1 QPX finishing 2016-12-22 17:50:48 +00:00
b8cdb3e90a Debug hack; raises from 62GF/s to 72 GF/s per node on BG/Q 2016-12-22 17:50:14 +00:00
5241245534 Default to static scheduling 2016-12-22 17:49:21 +00:00
960316e207 type conversion in printf 2016-12-22 17:27:01 +00:00
5214846341 Adding a resource manager 2016-12-22 12:41:56 +00:00
4c3fd9fa3f stochastic QED field module in Hadrons 2016-12-22 00:29:41 +01:00
17b3a10d46 stochastic QED: function to cache 1/sqrt(khat^2) 2016-12-22 00:29:19 +01:00
149a46b92c Merge branch 'feature/hadrons' into feature/qed-fvol 2016-12-22 00:26:43 +01:00
3215ae6b7e Hadrons: genetic scheduler crashes in multi-thread with 1 module, multi-threading deactivated for now 2016-12-22 00:26:30 +01:00
7a85fddc7e Hadrons: modification of registration mechanism to allow for persistent caches 2016-12-22 00:25:36 +01:00
ce1a115e0b Removing redundant arguments for integrator functions, step 1 2016-12-20 17:51:30 +00:00
db9c28a773 qed-fvol: Photon parameter name fix 2016-12-20 12:41:39 +01:00
9ac3ac41df serialisable Photon parameters 2016-12-20 12:41:01 +01:00
2af9ab9034 old Makefile cleaning 2016-12-20 12:40:26 +01:00
6f1ea96293 Merge branch 'develop' into feature/qed-fvol 2016-12-20 12:33:02 +01:00
f8d11ff673 better serialisable enums (can be encapsulated into classes) 2016-12-20 12:31:49 +01:00
3f2d53a994 BGQ assembler beginning 2016-12-20 10:21:26 +00:00
8a337f3070 Move cayley into mainstream tests 2016-12-18 02:35:31 +00:00
a59f5374d7 Evade warning 2016-12-18 02:23:55 +00:00
4b220972ac Warning fix 2016-12-18 02:14:17 +00:00
629f43e36c Return statement needed 2016-12-18 02:09:37 +00:00
a3172b3455 Precision error 2016-12-18 02:07:45 +00:00
3e6945cd65 Fixing AVX Z-mobius 2016-12-18 02:05:11 +00:00
87be03006a AVX 512 code broke other compiles; fixing 2016-12-18 01:45:09 +00:00
f17436fec2 Bad commit fixed 2016-12-18 01:27:34 +00:00
4d8b01b7ed Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2016-12-18 00:56:57 +00:00
fa6acccf55 Zmobius asm 2016-12-18 00:56:19 +00:00
55cb22ad67 Z mobius bmark 2016-12-18 00:55:37 +00:00
df9108154d Debugged 2 versions of assembler; ls vectorised, xyzt vectorised 2016-12-17 23:47:51 +00:00
b3e7f600da Partial implementation of 4d vectorisation assembler 2016-12-16 23:50:30 +00:00
d4071daf2a Template specialise 2016-12-16 22:28:29 +00:00
a2a6329094 AVX512 only for ASM compilation 2016-12-16 22:03:29 +00:00
eabc577940 Assembler possibly working 2016-12-16 16:55:36 +00:00
2e3c5890b6 qed-fvol: build fix 2016-12-15 20:06:46 +00:00
bc6678732f Merge branch 'feature/hadrons' into feature/qed-fvol
# Conflicts:
#	Makefile.am
#	configure.ac
#	lib/qcd/action/gauge/Photon.h
2016-12-15 19:53:00 +00:00
b10ae00c8a Merge commit '6ad73145bc9754a5f26093eee5a34473ba0cff82' into feature/qed-fvol 2016-12-15 19:48:58 +00:00
67d72000e7 Hadrons: more legal banner fixes 2016-12-15 18:26:39 +00:00
80cef1c78f Hadrons: legal banner fix 2016-12-15 18:21:52 +00:00
91e98b1dd5 Merge branch 'feature/hadrons' into develop 2016-12-15 18:15:56 +00:00
b791c274b0 Revert "AVX: uninitialised variable fix"
This reverts commit c22c3db9ad.
2016-12-15 18:15:35 +00:00
596dd570c7 Linux linking fix 2016-12-15 12:26:53 +00:00
cad158e42f Hadrons: tests improvement 2016-12-14 19:41:51 +00:00
f63fac0c69 Hadrons: the XML runner can use a precomputed schedule 2016-12-14 19:41:30 +00:00
ab92de89ab Hadrons: utility to schedule a run 2016-12-14 19:41:04 +00:00
846272b037 Hadrons: option to save and load a schedule 2016-12-14 19:40:36 +00:00
f3e49e4b73 Hadrons: module templates update 2016-12-14 18:19:46 +00:00
decbb61ec1 Hadrons: XML driven program is again a binary installed with Grid 2016-12-14 18:19:24 +00:00
7e2482aad1 Hadrons: cpde cleaning 2016-12-14 18:04:21 +00:00
e1653a9f94 Hadrons: size fix in DWF module 2016-12-14 18:02:36 +00:00
ea40854e0b Hadrons: type names are demangled 2016-12-14 18:02:18 +00:00
34df71e755 Hadrons: function to save an application as an XML file 2016-12-14 18:01:56 +00:00
3af663e17b Hadrons: modules remember their factory registration name 2016-12-14 17:59:45 +00:00
0cd6b1858c Merge branch 'develop' of https://github.com/paboyle/Grid into feature/staggering 2016-12-14 09:23:22 +00:00
0bd296dda4 Adding check of the Dag part in the benchmark 2016-12-14 03:15:09 +00:00
af0ccdd8e9 Moving output order 2016-12-14 02:02:42 +00:00
c22c3db9ad AVX: uninitialised variable fix 2016-12-13 19:05:58 +00:00
013e710c7d Hadrons: 3pt function test improvement 2016-12-13 19:04:43 +00:00
16693bd69d Hadrons: scheduler heuristic benchmark 2016-12-13 19:02:32 +00:00
de8f80cf94 Hadrons: genetic operators improvement 2016-12-13 19:02:05 +00:00
2fb92dbc6e Cleaning up previous debug lines 2016-12-13 07:53:43 +00:00
5c74b6028b Commit for debugging, lot of IO 2016-12-13 06:35:30 +00:00
e0be2b6e6c Adding a new tests for the Ls vec CG 2016-12-13 04:59:18 +00:00
ef72f322d2 consistency of tests 2016-12-13 02:24:20 +00:00
426197e446 Nc=3 2016-12-12 09:10:54 +00:00
99e2c1e666 Kernels options 2016-12-12 09:08:53 +00:00
1440565a10 Decrease verbosity 2016-12-12 09:08:04 +00:00
e9f0c0ea39 Staggered kernels options 2016-12-12 09:07:38 +00:00
7bc2065113 Adding report at the end of the DWF HMC tests 2016-12-12 04:21:34 +00:00
4a87486365 Hadrons: a bit of cleaning in the scheduler 2016-12-10 21:14:13 +01:00
fe187e9ed3 Compiles and passes under ZMobius with assembler 2016-12-10 00:47:48 +00:00
0091b50f49 Zmobius working -- not asm yet 2016-12-09 22:51:32 +00:00
fb8d4b2357 Lots of debug on performance Mobius 2016-12-08 17:28:28 +00:00
ff71a8e847 Ready for sim 2016-12-08 17:00:32 +00:00
83fa038bdf Streaming stores 2016-12-08 16:58:42 +00:00
7a61feb6d3 Allocator added with caching for Linux VM subsystem optimisation 2016-12-08 16:58:01 +00:00
69ae817d1c Updates for supporting Mobius better 2016-12-08 16:43:28 +00:00
2bd4233919 Completed testing of the HMC for Ls vectorised version (on AVX2) 2016-12-07 04:56:37 +00:00
143c70e29f Debugged the threaded version. Cleaning up 2016-12-07 04:40:25 +00:00
51322da6f8 Hadrons: genetic scheduler improvement 2016-12-07 09:00:45 +09:00
49c3eeb378 Hadrons: more verbose genetic parameters 2016-12-07 08:59:58 +09:00
c56707e003 useless debug message removed 2016-12-07 08:59:20 +09:00
b812d5e39c Added single threaded version of the derivative for the Ls vectorised DWF 2016-12-06 16:31:13 +00:00
5b3edf08a4 Hadrons: sequential gamma source 2016-12-06 12:13:19 +09:00
bd1d1cca34 Hadrons: code cleaning 2016-12-06 12:12:59 +09:00
646b11f5c2 Hadrons: exposing scheduler settings 2016-12-06 12:12:05 +09:00
a683a0f55a Hadrons: meson tests renamed spectrum 2016-12-06 12:11:44 +09:00
e6effcfd95 Hadrons: more contractions in the spectrum test 2016-12-05 17:41:58 +09:00
aa016f61b9 Hadrons: empty baryon contractions 2016-12-05 17:26:57 +09:00
d42a1b73c4 Hadrons: code cleaning 2016-12-05 17:26:36 +09:00
d292657ef7 Hadrons: more module templates 2016-12-05 17:26:17 +09:00
d1f7c6b94e Hadrons: templatisation of the fermion implementation 2016-12-05 16:47:29 +09:00
7ae734103e Hadrons: namespace macro to tackle GCC 5 bug 2016-12-05 14:29:32 +09:00
01480da0a8 Merge branch 'develop' into feature/hmc_generalise 2016-12-05 05:10:27 +00:00
7a1ac45679 Hadrons: configure.ac Linux typo 2016-12-05 14:00:10 +09:00
320268fe93 Hadrons: code cleaning 2016-12-05 13:57:34 +09:00
dd6fb140c5 Hadrons: big module reorganisation 2016-12-05 13:53:31 +09:00
0b4f680d28 Hadrons: meson run test 2016-12-05 11:44:58 +09:00
a69086ba1f Hadrons: application run minor fixes 2016-12-05 11:44:36 +09:00
7433eed274 Hadrons: module creation fix 2016-12-05 11:44:16 +09:00
ee5b1fe043 Hadrons: freeing object message fix 2016-12-05 09:08:45 +09:00
1540616b22 Hadrons: integer types cleanup 2016-12-05 08:53:48 +09:00
8190523e4c Hadrons: type fix in module creation 2016-12-02 11:04:34 +09:00
b5555d85a7 Hadrons: generelalised FImpl for actions 2016-12-02 11:04:15 +09:00
e27c6b217c Updating 2016-12-01 12:42:53 +00:00
9ad3d3453e Hadrons is now a library, the previous XML driven program is now a test 2016-12-01 21:36:29 +09:00
f7a6b8e5ed Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2016-12-01 11:39:52 +00:00
6adf35da54 Faster Mobius 2016-12-01 11:39:04 +00:00
d8b716d2cd Hadrons: static initialisation fixed 2016-12-01 15:43:16 +09:00
cd01c1dbe9 Ls 16 more relevant 2016-11-30 22:11:10 +00:00
6ad73145bc Calculate Wilson loop average over multiple configurations. 2016-11-30 15:17:22 +00:00
bd0430b34f Serialisation in malloc fixed 2016-11-29 22:27:55 +00:00
c097fd041a Merge branch 'develop' of https://github.com/paboyle/Grid into feature/staggering 2016-11-29 13:44:17 +00:00
77fb25fb29 Push 5d tests 2016-11-29 13:43:56 +00:00
389e0a77bd Staggerd Fermion 5D 2016-11-29 13:13:56 +00:00
2f92b4860b Test the full Mooee sector 2016-11-29 00:15:08 +00:00
4704f2d009 Actions updated 2016-11-29 00:14:36 +00:00
ae9688e343 Reporting also the total mflops 2016-11-28 11:37:02 +00:00
43928846f2 first steps to make Hadrons a library 2016-11-28 16:02:15 +09:00
fabcd4179d Hadrons: propagator type coming from the fermion implementation 2016-11-28 14:02:10 +09:00
a8843c9af6 Code cleaning, the fermion implementation can be sepcified using the macro FIMPL 2016-11-27 16:47:22 +09:00
7a1a7a685e Merge branch 'feature/fft-opt' into feature/hadrons 2016-11-27 15:32:03 +09:00
1e44fd3094 Added some details on the mpi flags for Cray machines 2016-11-26 18:30:53 +00:00
d8258f0758 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2016-11-26 18:25:32 +00:00
6c0cc5676b Adding Eigen.inc to the gitignore 2016-11-26 18:25:12 +00:00
f7293f2ddb Merge pull request #69 from jch1g10/feature/qed-fvol 2016-11-26 07:04:04 +09:00
11dc0b398b Merge pull request #74 from Lanny91/develop 2016-11-26 07:01:51 +09:00
b18950f776 Added simd real divide test with QPX divide fixes 2016-11-25 13:21:33 +00:00
0acbf77bc6 Add QPX Div structure 2016-11-24 13:24:12 +00:00
3cdf945d84 Test_fftf fix 2016-11-24 09:10:03 +09:00
5833f247fa more FFt optimisations 2016-11-24 09:09:48 +09:00
95f43d27ae Merge branch 'develop' of https://github.com/paboyle/Grid into feature/staggering 2016-11-22 13:49:22 +00:00
668ca57702 Merge branch 'develop' of https://github.com/paboyle/Grid into feature/staggering 2016-11-22 13:49:11 +00:00
a2cffb0304 AVXFMA target fixed 2016-11-21 17:47:18 +01:00
bafbac6ac4 Merge branch 'feature/gen-simd' into develop 2016-11-19 13:45:30 +01:00
595f1ce371 GEN SIMD build fix 2016-11-19 13:45:12 +01:00
6d7cde4eb4 README update 2016-11-19 13:17:35 +01:00
97cddda49e Merge branch 'feature/gen-simd' into feature/doxygen
# Conflicts:
#	Makefile.am
#	configure.ac
2016-11-19 13:11:13 +01:00
433afd36f5 Makefile rule for simple_* objects 2016-11-19 01:33:13 +01:00
b873504b90 fully generic SIMD 2016-11-19 01:32:39 +01:00
62749d05a6 Naming the scalar action 2016-11-17 12:26:20 +00:00
3834feb4b7 Adding action names 2016-11-16 16:46:49 +00:00
6b8ee7bae0 Merge branch 'feature/feynman-rules' into feature/qed-fvol 2016-11-15 13:08:08 +00:00
739c2308b5 Set imaginary part of stochastic QED field to zero using real() instead of conjugate(). 2016-11-15 13:07:52 +00:00
454302414d Small modif at the test hmc 2016-11-15 12:31:13 +00:00
042ae5b87c generic 256bits SIMD 2016-11-15 12:16:15 +00:00
a71b69389b QedFVol: calculate square Wilson loops up to 10x10 2016-11-14 18:23:04 +00:00
d49e502f53 Merge branch 'feature/feynman-rules' into feature/qed-fvol 2016-11-14 18:00:33 +00:00
92ec3404f8 Set imaginary part of stochastic QED field to zero after FFT into position space 2016-11-14 17:59:02 +00:00
f4ebea3381 QedFVol: add functions for computing spatial and timelike Wilson loops 2016-11-14 17:51:53 +00:00
cf167d0cd1 QedFVol: implement exponentiation of photon field 2016-11-14 17:02:29 +00:00
6f8b771a37 Adding date of the last commit 2016-11-10 18:52:00 +00:00
4e1ffdd17c Adding git info to the configure output 2016-11-10 18:44:36 +00:00
1aa695cd78 Hadrons: merge typo 2016-11-10 18:38:30 +00:00
a783282b8b Merge branch 'develop' into feature/hmc_generalise 2016-11-10 18:13:07 +00:00
19b85d8486 Some comments in the hmc files 2016-11-10 17:55:58 +00:00
58f4950652 Merge branch 'release/v0.6.0' into develop 2016-11-09 12:44:00 +00:00
c363bdd784 Merge branch 'release/v0.6.0' 2016-11-09 12:43:14 +00:00
604f0ea2f6 Merge branch 'develop' into release/v0.6.0 2016-11-09 04:13:01 -08:00
42c912f608 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2016-11-09 04:12:15 -08:00
33dc1f51b5 Final sign off commits from Cori-1 2016-11-09 04:11:03 -08:00
c30d96ea50 QedFVol: x86intrin.h namespace fix 2016-11-09 11:06:20 +00:00
13a8997789 Merge branch 'release/v0.6.0' into feature/hadrons
# Conflicts:
#	Makefile.am
2016-11-08 20:43:39 +00:00
9576f0903d namespace fix 2016-11-08 19:07:47 +00:00
34cf702b24 README is now a symlink to README.md 2016-11-08 17:00:38 +00:00
8a5e3a917c Merge branch 'develop' into release/v0.6.0
# Conflicts:
#	tests/core/Test_fft_gfix.cc
2016-11-08 16:53:42 +00:00
65bcf281d0 Merge branch 'develop' of github.com:paboyle/Grid into develop 2016-11-08 16:51:19 +00:00
cd0be8cb24 Test_fft_gfix.c precision fix 2016-11-08 15:32:05 +00:00
3d2a22a14d include fix for MKL 2016-11-08 15:31:47 +00:00
7ffe17ada1 Merge branch 'feature/feynman-rules' into feature/qed-fvol 2016-11-08 14:52:43 +00:00
a26adfb090 README: only markdown 2016-11-08 14:11:18 +00:00
f6e1a5b348 building tests depends on building the library at the top level 2016-11-08 14:08:33 +00:00
c5a025d421 README typo 2016-11-08 14:07:59 +00:00
50d277d8d9 Merge branch 'develop' into release/v0.6.0 2016-11-08 13:44:14 +00:00
343f3e829f Fixes prerelease to make all tests 2016-11-08 13:42:12 +00:00
f85b35314d Fix a routine for single node processor coor from rank 2016-11-08 11:49:13 +00:00
3dc2e05d6e Time as well since MKL returns zero for Mflops 2016-11-08 11:36:18 +00:00
0cff8754d1 Usecs 2016-11-08 11:35:41 +00:00
afc8d3e524 Adding support for parallel recursive compilation for the tests 2016-11-07 11:13:43 +00:00
692b44dac1 Merge branch 'develop' into release/v0.6.0 2016-11-04 22:48:11 +00:00
96ba42a297 omm buf 2016-11-04 22:47:25 +00:00
7df940dc3e homemade test recusrive target for old autotools versions 2016-11-04 22:32:25 +00:00
f7b60004f3 Merge branch 'develop' into release/v0.6.0 2016-11-04 16:08:07 +00:00
8af8b047fd tests is now a recusrsive target 2016-11-04 13:44:21 +00:00
6592078fef Make.inc removed, once again don't commit it! 2016-11-04 13:43:40 +00:00
ad971ca07b fftw3.h is now expected to be an external header 2016-11-04 13:12:35 +00:00
f2f16eb972 fftw3.h removed, please don't commit this file back 2016-11-04 13:11:05 +00:00
b7d55f7dfb Fix a typo in reorg of the --dslash-asm 2016-11-04 11:35:08 +00:00
6e548a8ad5 Linux compile needed 2016-11-04 11:34:16 +00:00
ee686a7d85 Compiles now 2016-11-03 16:58:23 +00:00
1c5b7a6be5 Staggered phases first cut, c1, c2, u0 2016-11-03 16:26:56 +00:00
a5dd4a9bab Merge branch 'feature/fft-opt' into develop 2016-11-03 14:34:46 +00:00
ec232af851 Photon.h references removed 2016-11-03 14:34:16 +00:00
17e30281e9 Merge branch 'develop' into feature/fft-opt
# Conflicts:
#	lib/FFT.h
2016-11-03 14:14:03 +00:00
2854e601e6 FFT test typo 2016-11-03 14:09:47 +00:00
aee44dc694 Photon.h removed from develop branch 2016-11-03 13:54:15 +00:00
75bbf6a0af Merge branch 'develop' into feature/feynman-rules 2016-11-03 13:52:11 +00:00
c65d23935a README update 2016-11-03 13:48:20 +00:00
92cd797636 MPI auto configure fix 2016-11-03 13:48:07 +00:00
111bfbc6bc notimestamp by default 2016-11-03 11:40:26 +00:00
f41a230b32 Decrease mpi3l verbose 2016-11-02 19:54:03 +00:00
c067051d5f Merge branch 'develop' into release/v0.6.0 2016-11-02 13:59:18 +00:00
afdeb2b13c Merge branch 'feature/mpi3-master-slave' into develop 2016-11-02 13:43:20 +00:00
9e2ec2719b Merge branch 'develop' into feature/mpi3-master-slave 2016-11-02 13:02:56 +00:00
757a928f9a Improvement to use own SHM_OPEN call to avoid openmpi bug. 2016-11-02 12:37:46 +00:00
bc248b6948 Merge branch 'release/v0.6.0' into feature/KNL_double_prec
Conflicts:
	lib/simd/Grid_avx512.h
2016-11-02 10:40:49 +00:00
ae8561892e Eliminating useless defines 2016-11-02 10:21:06 +00:00
32375aca65 Semaphore sleep/wake up on remote processes. 2016-11-02 09:27:20 +00:00
bb94ddd0eb Tidy up of mpi3; also some cleaning of the dslash controls. 2016-11-02 08:07:09 +00:00
330a9b3f4c Merge pull request #65 from jch1g10/feature/qed-fvol 2016-11-01 19:53:25 +00:00
c2d78493c8 Merge pull request #64 from jch1g10/feature/feynman-rules 2016-11-01 19:53:08 +00:00
28ff66a381 Merge branch 'feature/feynman-rules' into feature/qed-fvol 2016-11-01 16:07:46 +00:00
78c7bcee36 QedFVol: Change variables of type "double" to type "Real". 2016-11-01 16:06:05 +00:00
7f0fc0eff5 Remove explicit use of double-precision types in photon.h 2016-11-01 16:02:35 +00:00
164d3691db Staggered 2016-11-01 14:24:22 +00:00
791cb050c8 Comms improvements 2016-11-01 11:35:43 +00:00
00a7b95631 Merge remote-tracking branch 'gh-james/feature/qed-fvol' into feature/qed-fvol 2016-10-31 18:46:23 +00:00
94d8321d01 Merge branch 'feature/feynman-rules' into feature/qed-fvol 2016-10-31 18:41:30 +00:00
d5e95bc350 Merge branch 'release/v0.6.0' into feature/feynman-rules 2016-10-31 18:36:21 +00:00
6efac3a252 Merge pull request #61 from jch1g10/feature/feynman-rules
Add missing volume factor in stochastic QED field
2016-10-31 18:35:22 +00:00
7a84906b5f Merge branch 'release/v0.6.0' into feature/fft-opt 2016-10-31 18:31:49 +00:00
07416e4567 README update 2016-10-31 18:21:52 +00:00
66d832c733 FFTW header fix 2016-10-31 16:39:29 +00:00
e74417ca12 big build system polish 2016-10-31 16:31:27 +00:00
7bd0084b5d Merge branch 'develop' into release/v0.6.0 2016-10-31 16:30:22 +00:00
e8c3174ae2 Small change in the defines 2016-10-30 12:23:11 +00:00
9b066e94d0 Compilation with both single and double precision 2016-10-30 12:04:06 +00:00
ac24cc9f99 Merge branch 'feature/feynman-rules' into feature/qed-fvol 2016-10-29 11:05:26 +01:00
618abdf302 Add missing volume factor in stochastic QED field 2016-10-29 11:04:02 +01:00
e1042aef77 First version of the doube prec for testing purposes
It does not compile single and double version at the same time
2016-10-28 17:20:04 +01:00
aa6a839c60 avx512 build fix; detect clang/gcc intrinsics vs. ICPC 2016-10-28 09:13:09 +01:00
ac99a56237 Merge branch 'develop' into release/v0.6.0 2016-10-27 11:53:24 +01:00
b4d2af8c89 threaded FFT 2016-10-26 19:46:36 +01:00
434af6aeaa Merge branch 'develop' into feature/fft-opt 2016-10-26 18:50:38 +01:00
e90f8ac841 Merge branch 'develop' into feature/feynman-rules 2016-10-26 18:50:21 +01:00
a1705a8d53 debug message removed 2016-10-26 18:50:07 +01:00
ca21003f01 Merge branch 'feature/fft-opt' into feature/feynman-rules
# Conflicts:
#	lib/FFT.h
#	lib/qcd/action/fermion/WilsonFermion5D.h
#	tests/core/Test_fft.cc
2016-10-26 18:44:47 +01:00
14ddf2c234 more FFT optimisations 2016-10-26 17:36:26 +01:00
1d666771f9 Debugging the RNG, eliminate the barrier after broadcast 2016-10-26 16:08:23 +01:00
d50055cd96 Making the ILDG support optional 2016-10-26 09:48:01 +01:00
bca861e112 Note:FFT shoud be GridFFT (Not change yet).
Gauge fix with FFt is added (tests/core)
2016-10-25 14:21:48 +01:00
3ab4c8c0bb QedFVol: calculate plaquette and 2x2 Wilson loop of stochastic QED field 2016-10-25 13:32:02 +01:00
33d199a0ad temporary thread safety in FFT 2016-10-25 12:56:40 +01:00
93896ce59e Roll version number 2016-10-25 06:12:49 +01:00
b1508e4124 Merge branch 'feature/mpi3' into develop 2016-10-25 06:06:36 +01:00
b820076b91 Merge branch 'develop' into feature/mpi3 2016-10-25 06:02:33 +01:00
09f66100d3 MPI 3 compile on non-linux 2016-10-25 06:01:12 +01:00
d7d92af09d Travis fail fix attempt 2016-10-25 01:45:53 +01:00
460d0753a1 Merge branch 'develop' into feature/mpi3
Conflicts:
	lib/simd/Grid_avx512.h
2016-10-25 01:08:51 +01:00
8f8058f8a5 More random bits on parallel seeding 2016-10-25 01:05:52 +01:00
d97a27f483 Verbose 2016-10-25 01:05:31 +01:00
7c3363b91e Compiles all comms targets 2016-10-25 00:04:17 +01:00
b94478fa51 mpi, mpi3, shmem all compile.
mpi, mpi3 pass single node multi-rank
2016-10-24 23:45:31 +01:00
47c7159177 ILDG reader/writer works
Fill the xml header with the required information, todo.
2016-10-24 21:57:54 +01:00
13bf0482e3 FFT optimisation 2016-10-24 19:25:40 +01:00
a795b5705e memory optimisation 2016-10-24 19:25:15 +01:00
392e064513 fast local peek-poke 2016-10-24 19:24:21 +01:00
b6a65059a2 Update to use shared memory to contain the stencil comms buffers
Tested on 2.1.1.1 1.2.1.1 4.1.1.1 1.4.1.1 2.2.1.1 subnode decompositions
2016-10-24 17:30:43 +01:00
f415db583a Adding ILDG format 2016-10-24 15:48:22 +01:00
f55c16f984 Adding a barrier in the RNG save 2016-10-24 11:02:14 +01:00
ea25a4d9ac Works 2016-10-23 06:10:05 +01:00
c190221fd3 Internal SHM comms in non-simd directions working
Need to fix simd directions
2016-10-22 18:14:27 +01:00
df67e013ca More debug output for the RNG 2016-10-22 13:34:17 +01:00
3e990c9d0a Reverting the broadcast change 2016-10-22 13:26:43 +01:00
4b740fc8fd Debugging the RNG state save 2016-10-22 13:06:00 +01:00
0fcd2e7188 Simplify the comms structure prior to implementing Shared memory direct bouncs 2016-10-21 22:44:10 +01:00
910b8dd6a1 use simd type 2016-10-21 22:35:29 +01:00
75ebd3a0d1 Typo fixes and rotate for CLANG 2016-10-21 22:34:29 +01:00
cccd14b09e Small cleanup 2016-10-21 17:20:54 +01:00
e6acffdfc2 Fixing the plaquette computation 2016-10-21 16:06:34 +01:00
26d124283e Merge branch 'feature/feynman-rules' into feature/qed-fvol 2016-10-21 15:23:31 +01:00
0d889b7041 QedFVol: first attempt at generating a QED field 2016-10-21 15:21:32 +01:00
7c8f79b147 more stochastic QED fixes 2016-10-21 15:20:12 +01:00
09fd5c43a7 Reasonably fast version 2016-10-21 15:17:39 +01:00
ab31ad006a Merge branch 'feature/feynman-rules' into feature/qed-fvol 2016-10-21 14:42:18 +01:00
462921e549 QED: fix stochastic field 2016-10-21 14:41:08 +01:00
392130a537 Working on the 5d 2016-10-21 14:22:25 +01:00
f22317748f Merge branch 'feature/mpi3' of https://github.com/paboyle/Grid into feature/mpi3 2016-10-21 13:36:35 +01:00
6a9eae6b6b Reporting improvements 2016-10-21 13:36:18 +01:00
fad96cf250 StencilBufs 2016-10-21 13:36:00 +01:00
f331809c27 Use variable type for loop 2016-10-21 13:35:37 +01:00
bd6a228af6 Merge commit '20a091c3eddfdb67a82ece6413740a93650a2f98' into feature/feynman-rules 2016-10-21 13:10:30 +01:00
63d219498b first (dirty) implementation of Feynman stoctachtic EM field 2016-10-21 13:10:13 +01:00
2c54a53d0a Compile verbose reduce 2016-10-21 12:12:14 +01:00
306160ad9a bcopy threaded 2016-10-21 12:07:28 +01:00
20a091c3ed Intel vs. Clang intrinsics differences absorbed 2016-10-21 09:08:36 +01:00
202078eb1b Cray / OpenSHMEM ordering differs 2016-10-21 09:07:20 +01:00
a762b1fb71 MPI3 working with a bounce through shared memory on my laptop.
Longer term plan: make the "u_comm_buf" in Stencil point to the shared region and avoid the
send between ranks on same node.
2016-10-21 09:03:26 +01:00
deef2673b2 Separating the Lattice theories stub from the QCD.h file 2016-10-20 17:24:08 +01:00
5b5925b8e5 Forgot to add 2016-10-20 17:09:40 +01:00
977b0a6dd9 Merge branch 'develop' into feature/hmc_generalise 2016-10-20 17:04:41 +01:00
977d844394 Few modifications on stdout messages 2016-10-20 17:01:59 +01:00
b58adc6a4b commVector 2016-10-20 17:00:15 +01:00
f9d5e95d72 allocator template typedefs moved to AlignedAllocator 2016-10-20 16:59:39 +01:00
4f8e636a43 commVector 2016-10-20 16:59:16 +01:00
9b39f35ae6 commVector different for SHMEM compat 2016-10-20 16:58:53 +01:00
5fe2b85cbd MPI3 and shared memory support 2016-10-20 16:58:01 +01:00
c7cccaaa69 Comm vector for shmem 2016-10-20 16:57:31 +01:00
cbcfea466f MPI3 2016-10-20 16:57:14 +01:00
4955672fc3 MPI3 2016-10-20 16:57:00 +01:00
39f1c880b8 mpi3 2016-10-20 16:56:40 +01:00
8c043da5b7 SHMEM and comms allocator made different 2016-10-20 16:56:05 +01:00
3cbe974eb4 Layout 2016-10-20 16:55:21 +01:00
6e4a06e180 qed-fvol: initial commit 2016-10-20 15:04:00 +01:00
997fd882ff Merge branch 'develop' into feature/feynman-rules
# Conflicts:
#	lib/Threads.h
#	lib/qcd/action/fermion/WilsonFermion.cc
#	lib/qcd/action/fermion/WilsonFermion.h
#	lib/qcd/utils/SUn.h
#	lib/simd/Grid_avx.h
#	lib/simd/Intel512common.h
2016-10-19 18:35:18 +01:00
590675e2ca Csum in hex format 2016-10-19 17:26:25 +01:00
8c65bdf6d3 Printing checksum for the RNG file 2016-10-19 16:56:11 +01:00
74f1ed3bc5 Adding some documentation for HMC 2016-10-19 10:51:13 +01:00
7af9b87318 Cache face tables to improve performance.
Extract merge now looking poor.
2016-10-18 09:51:37 +01:00
811ca45473 GNU clang hack for AVX512 since there are missing reduce intrinsics in Clang 3.9 and GCC-6 AVX512 support 2016-10-17 16:23:21 +01:00
bc1a4d40ba Faster integer handling avoid push_back 2016-10-17 16:16:44 +01:00
79270ef510 Added a test for EODWF Scaled Shamir with general HMC 2016-10-14 17:34:26 +01:00
e250e6b7bb Moving parameters outside of the HMCrunner 2016-10-14 17:22:32 +01:00
c8079e6621 Time the face gateher in x-dir more carefully 2016-10-13 22:28:50 +01:00
261342c15f Adding gh-pages 2016-10-13 11:51:25 +01:00
8b0d171c9a 32bit issue on the KNL code variant where byte offsets were stored 2016-10-12 17:49:32 +01:00
1f293b76b4 Merge branch 'feature/knl-stats' into develop 2016-10-12 13:47:58 +01:00
8bbd9ebc27 Reversing changes to Stencil class 2016-10-12 13:47:20 +01:00
6472b431f0 __rdpmc needed for gcc, clang++ 2016-10-12 12:29:08 +01:00
bd205a3293 Fixing for non x86 and non KNL 2016-10-12 12:09:15 +01:00
496beffa88 Fix non-KNL build 2016-10-12 12:06:08 +01:00
9b63e97108 align not absolutely required and confuses clang++ 2016-10-12 11:51:21 +01:00
81f2aeaece KNL streaming stores, and KNL performance coutners 2016-10-12 11:45:22 +01:00
2d4a45c758 Typecast pointer 2016-10-12 09:14:15 +01:00
a123dcd7e9 Static required for shmem. Reading same object twice requires csum reset 2016-10-12 00:29:57 +01:00
6b27c42dfe Cosmetic 2016-10-12 00:29:39 +01:00
f7c2aa3ba5 runtime by default 2016-10-12 00:29:13 +01:00
0f182f033b Drop macos with gcc 2016-10-11 22:29:06 +01:00
7240d73184 Parallelise the x faces; fix the segv on KNL with comms 2016-10-11 22:21:07 +01:00
42cd148f5e Base pointer for comms buffer under AVX512 assembly 2016-10-11 16:06:06 +01:00
eda4dd622e Some more edit 2016-10-11 15:45:20 +01:00
6e01264bb7 don't use static by default 2016-10-11 10:03:39 +01:00
6f408256bc FMA4 option moved on the align 2016-10-11 10:03:01 +01:00
8d11681aac verbose remove 2016-10-10 23:50:42 +01:00
3d5c9a1ee9 No compile fix on clang++ 3.9 2016-10-10 23:50:13 +01:00
db749f103f Add Wilson, DWF, Overlap feynman rule tests 2016-10-10 23:48:35 +01:00
dc389e467c axpy_ssp for any coeff type via template 2016-10-10 23:48:05 +01:00
3619167d62 Mass parameter 2016-10-10 23:47:33 +01:00
96f1d1b828 Debugged Domain wall and Overlap feynman rules (infinite Ls, finite mass). 2016-10-10 23:46:45 +01:00
657e0a8f4d Mass parameter 2016-10-10 23:46:10 +01:00
616e7cd83e Mass parameter 2016-10-10 23:45:48 +01:00
6f26d2e8d4 Overlap tree level feynman rule 2016-10-10 23:45:18 +01:00
c014574504 A "please implement me" feynman rule. If this were abstract virtual it would
require/force implementation
2016-10-10 23:44:00 +01:00
d7ce164e6e Feynman rule for DWF 2016-10-10 23:43:36 +01:00
c0d5b99016 Dminus 2016-10-10 23:43:19 +01:00
09ca32d678 Dminus added for Cayley 2016-10-10 23:42:55 +01:00
082ae350c6 static schedule by default 2016-10-10 23:42:30 +01:00
611b5d74ba Fix for AVX+FMA3 compilation 2016-10-10 15:26:17 +01:00
b56c9ffa52 Fix for AVXFMA 2016-10-10 14:43:37 +01:00
c68a2b9637 Minor fix 2016-10-10 11:54:58 +01:00
293df6cd20 Generalising the HMCRunner and moving parameters to the user level 2016-10-10 11:49:55 +01:00
65f61bb3bf Reset QCD colours to 3 2016-10-10 09:46:17 +01:00
26b9740d53 Some fix for the GenericHMCrunner 2016-10-10 09:43:05 +01:00
cb02b7088f Merge branch 'develop' into feature/doxygen
# Conflicts:
#	configure.ac
2016-10-09 13:35:44 +01:00
70c32fa49b Merge branch 'develop' of github.com:paboyle/Grid into develop 2016-10-09 12:55:46 +01:00
77c8a94dae AVXFMA4 flag fix for Intel Compiler 2016-10-09 12:55:12 +01:00
6eb873dd96 Added scalar action phi^4
Check Norm2 output (Complex type assumption)
2016-10-07 17:28:46 +01:00
11b4c80b27 Added support for hmc and binary IO for a general field 2016-10-07 13:37:29 +01:00
2e453dfbf5 Added some instrumentation to benchmark the force computation 2016-10-06 17:52:45 +01:00
c065e454c3 Adding Binrary IO, untested 2016-10-06 10:12:11 +01:00
4089984431 Timing hooks 2016-10-06 09:25:12 +01:00
98439847cf configure portability fix 2016-10-05 14:57:20 +01:00
c78bbd0f8c Fix ASM compilation 2016-10-04 15:37:32 +01:00
d9b5fbd374 In the middle of adding a general binary writer 2016-10-04 11:24:08 +01:00
cfbc1a26b8 Now the gauge implementation has to take care of the Nexp 2016-10-03 16:20:06 +01:00
257f69f931 One more function to generalise the HMC integrator 2016-10-03 15:50:04 +01:00
e415260961 First cut on generalised HMC
Backward compatibility OK
2016-10-03 15:28:00 +01:00
7ea4b959a4 hopefully more portable configure output 2016-09-27 11:54:37 +01:00
536e2ff073 *.inc removed: please don't commit these files either! 2016-09-27 11:54:03 +01:00
798ff34d7e configure removed: please don't commit configure! 2016-09-27 11:29:31 +01:00
87acd06990 Use streaming stores 2016-09-26 10:11:34 +01:00
9353b6edfe Fenv out of grid namespace 2016-09-26 10:09:13 +01:00
167cc2650e GNU SOURCE problem on travis 2016-09-26 09:58:09 +01:00
34f887ca1c Test_fft not complete; preparing for tests of momentum space DWF and Overlap feynman rules but not there yet. 2016-09-26 09:44:36 +01:00
7089b6d5a5 Setting up but not implemented some QED rules 2016-09-26 09:43:40 +01:00
2ba7d43ddd Divide handling 2016-09-26 09:43:14 +01:00
836e929565 Divide handling improved 2016-09-26 09:42:22 +01:00
b6713ecb60 Momentum space rules for Overlap, DWF untested to date 2016-09-26 09:39:09 +01:00
52a39f0fcd Divide in ET 2016-09-26 09:38:38 +01:00
81a7a03076 Integer << 2016-09-26 09:38:17 +01:00
16b37b956c divide goes to ET 2016-09-26 09:37:59 +01:00
567b6cf23f demangle moves to logging 2016-09-26 09:36:51 +01:00
296396646d FPE's on macos set up 2016-09-26 09:36:14 +01:00
04a437c92c Minor modification to the filelist script 2016-09-23 11:12:45 +01:00
5c190a1b8c Merge branch 'develop' into feature/hirep 2016-09-23 11:06:06 +01:00
15d8f5c88c Small change to the configure.ac to include the canonical names 2016-09-23 11:05:36 +01:00
c4ac6e7e8f Consolidating HMC interface
Uniformed interface for standard action in fundamental rep and Hirep
2016-09-23 10:47:42 +01:00
510e340e16 Debugged last commit for the Two index representation 2016-09-22 22:16:21 +01:00
6ffadca153 Restored number of colours to 3 2016-09-22 14:22:54 +01:00
b6597b74e7 Added support for the Two index Symmetric and Antisymmetric representations
Tested for HMC convergence: OK
Added also a test file showing an example for mixed representations
2016-09-22 14:17:37 +01:00
a034e9901b Merge branch 'develop' into feature/hadrons 2016-09-20 13:49:33 +01:00
d2573189d8 build system: FFTW fix 2016-09-20 12:30:24 +01:00
65ca174dbb gitignore update 2016-09-20 11:25:06 +01:00
0724f7af75 QPX single precision implementation 2016-09-19 18:09:12 +01:00
2e74520821 removed libtool use (BG/Q compatibility) 2016-09-16 15:25:49 +01:00
6dd75ad9e5 Merge branch 'develop' of github.com:paboyle/Grid into feature/bgq 2016-09-16 15:07:54 +01:00
fda408ee6f Added first lines for supporting Two Index representations 2016-09-13 10:43:30 +01:00
b9c80318a2 Merge branch 'develop' into feature/hirep 2016-09-13 10:01:51 +01:00
5df5d52d41 Fix for the Intel compiler 2016-09-12 17:17:20 +01:00
f76f281e58 Cleaning files after fix 2016-09-09 11:34:25 +01:00
aa20cc8b52 Fixing compilation error with AVX512 flag 2016-09-09 02:58:52 -07:00
0fd179fb33 Merge branch 'develop' into feature/hirep 2016-09-01 12:59:53 +01:00
f45ef8d114 Minor modification in ActionBase.h 2016-09-01 11:46:46 +01:00
7422953e36 Poisson solver example 2016-08-31 00:42:47 +01:00
8535d433a7 Cold or hot must support any precisoin 2016-08-31 00:27:53 +01:00
b573d1f35a Wilson tree level added 2016-08-31 00:27:04 +01:00
0c1d7e4daf Mom space prop for Wilson action 2016-08-31 00:26:36 +01:00
02e983a0cd Momentum space prop and free prop convolution 2016-08-31 00:26:02 +01:00
d15ab66aae FFT moves higher in include order 2016-08-31 00:25:22 +01:00
9005b82c6d Multi dim FFT, and normalisation fix 2016-08-31 00:24:52 +01:00
3475f45ce7 Demangle support for typeid stuff 2016-08-31 00:23:48 +01:00
0744f38866 Demangle support is useful 2016-08-31 00:23:28 +01:00
62febd2823 Wilson prop test 2016-08-31 00:23:09 +01:00
fd5614738d Merge branch 'develop' into feature/hirep 2016-08-30 18:21:36 +01:00
005dcc51aa Reset travis 2016-08-30 14:44:10 +01:00
655c893f86 Another test on travis 2016-08-30 14:38:42 +01:00
843f5783b4 Again travis test separating single and double 2016-08-30 14:29:09 +01:00
8986c9fedd Single and double precision travis matrix 2016-08-30 14:25:24 +01:00
c80a1d427c Retest original version of travis yaml 2016-08-30 14:05:05 +01:00
ae57032500 Separate single and double builds in travis 2016-08-30 14:00:34 +01:00
f75468728f Another error on travis 2016-08-30 13:56:23 +01:00
5acd856663 Correction of error in travis 2016-08-30 13:49:49 +01:00
b0d3e4bb2c Separating travis builds 2016-08-30 13:44:07 +01:00
b512ccbee6 HMC for Adjoint fermions works
Accepts and reproduces known results

Check initial instability of inverters
when starting from hot configurations
2016-08-30 11:31:25 +01:00
8c89391c02 FFTW unresolved fixed when no fftw3.h 2016-08-24 16:41:47 +01:00
bfac5195b8 tidy up 2016-08-24 16:38:36 +01:00
a782ca3238 Merge branch 'feature/fft-flop-count' into develop 2016-08-24 15:06:17 +01:00
744691097f Printing 2016-08-24 15:05:56 +01:00
ff6da364e8 FFT double and single precision gives good performance now in multithreaded code. 2016-08-24 15:05:00 +01:00
4d11a6f5f2 first commit for QPX intrinsics 2016-08-23 14:41:44 +01:00
88be3b39bb Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2016-08-22 18:29:36 +01:00
8a02824e08 Merge branch 'feature/FFT' into develop 2016-08-22 16:25:04 +01:00
356e7940fd fftw can be switched off 2016-08-22 16:24:49 +01:00
73ce476890 Include fftw headers 2016-08-22 16:24:21 +01:00
29c4ef41de Adding a test for libfftw3 2016-08-22 16:21:01 +01:00
e423a09974 FFT improved and test_FFT passing under MPI 8 processes, 8^4 for LatticeComplexD and LatticeSpinMatrixD 2016-08-18 02:23:21 +01:00
17097a93ec FFTW test ran over 4 mpi processes. 2016-08-17 01:33:55 +01:00
94a6373a7f Merge branch 'feature/eigen-cleanup' into develop 2016-08-15 23:58:34 +01:00
4ab7dbfd57 Instantiate 2016-08-15 23:00:40 +01:00
90e70790f3 Feature for z-Mobius prep 2016-08-15 22:31:29 +01:00
9c2e8d5e28 Nc=3 just to let all the test pass in Travis 2016-08-09 15:46:57 +01:00
147e2025b9 Added unit tests on the representation transformations
Status: Passing all tests
2016-08-08 16:54:22 +01:00
573b8c6020 build system: -O3 is not overriden by env CXXFLAGS 2016-08-06 01:26:24 +01:00
15218ec57f more Travis MPI fix 2016-08-06 00:49:14 +01:00
ec68e08dd2 Travis MPI fix 2016-08-06 00:36:05 +01:00
fc25d2295c fftw download 2016-08-06 00:28:52 +01:00
8dc2cfcedb Adding fftw header pulling 2016-08-06 00:28:28 +01:00
17c843700e missing doxygen.inc added 2016-08-05 15:38:21 +01:00
7b56f63a5c configure Doxygen output fix 2016-08-05 15:35:29 +01:00
b1cfb4d661 first try at a nicer Doxygen implementation 2016-08-05 15:29:18 +01:00
836f93780c first try at including MPI tests in Travis 2016-08-05 13:41:52 +01:00
5a68715be3 Richards sweep test 2016-08-05 10:51:57 +01:00
32bc7a6ab8 MPI back out of change that hangs
AVX2 for clang, gcc needs the -mfma flag.
2016-08-05 10:36:00 +01:00
b65e72e521 Merge pull request #43 from rprollins/bench/output-format
Benchmark_dwf_sweep and Benchmark_zmm output formats
2016-08-04 16:47:01 +01:00
d1aaff65e8 README update 2016-08-04 16:27:02 +01:00
7ff7c7d90d Merge branch 'develop' into feature/hadrons 2016-08-04 16:22:10 +01:00
93d29bb699 build system improvements after discussion with Peter 2016-08-04 16:19:59 +01:00
a2e9430abe Hadrons: fix after build system update 2016-08-03 17:14:32 +01:00
2485ef9c9c Merge branch 'feature/new-build' into feature/hadrons
# Conflicts:
#	Makefile.am
#	scripts/copyright
2016-08-03 16:49:16 +01:00
3b376ed54e build system: error if MPI not found 2016-08-03 15:23:38 +01:00
d5c1f614ba gitignore update 2016-08-03 15:14:33 +01:00
2edc24225d untracking ltmain.sh 2016-08-03 15:12:44 +01:00
629283726b build system: local Grid link flag moved to configure.ac 2016-08-03 15:07:42 +01:00
6adb66dd08 build system: finer management of GMP/MPFR dependence 2016-08-03 15:06:45 +01:00
5be92bb708 link fix in README 2016-08-03 12:40:56 +01:00
f4c049ea6d README update 2016-08-03 12:38:54 +01:00
bc092ad30f build system fix 2016-08-03 11:47:38 +01:00
dad642ed1b various build system fixes and improvements 2016-08-03 11:39:20 +01:00
63ae39abc7 proper propagation of OpenMP flags 2016-08-02 17:41:32 +01:00
9e5b934d21 improved LAPACK configuration 2016-08-02 17:26:54 +01:00
a7b483d67a Tests in subdirectories are not built by default 2016-08-02 12:14:28 +01:00
bb99ce0680 bootstrap script fix 2016-08-01 09:51:06 +01:00
83307df1af travis update for new build system 2016-08-01 09:38:40 +01:00
49b5c49851 Checked the hermiticity of the op in derivative, ok
Still CG fails to converge
2016-07-31 12:37:33 +01:00
e9f30cab2c first working version for the new build system 2016-07-30 17:53:18 +01:00
089f0ab582 Debugged HMC for Creutz relation 2016-07-28 16:44:41 +01:00
df6c9f55d1 Use common benchmark output format for dwf_sweep and zmm 2016-07-20 17:38:56 +01:00
b93e18ed50 Modified the Dirac Kernel class to compile with different number of colours
Added the general push_back functionality to accomodate for all defined representations

Compiles, not tested
2016-07-18 16:36:28 +01:00
9c77bb69a5 Added all elements for Hirep HMC
TODO: Test and debug
2016-07-18 12:05:23 +01:00
27f3ecc833 Merge branch 'feature/bugfix-ck-cj' into develop 2016-07-16 01:59:52 +01:00
f9e90eeb1f Sign error on the force for 4d fields fixed 2016-07-16 01:52:44 +01:00
fad5c675eb sign error on the 4d gparity force 2016-07-16 01:51:56 +01:00
4908b77d46 Fixed conflicts. PLEASE avoid making wholesale cosmetic only changes, this created
a HUGE amount of difficult to resolve and understand conflicts .

Wholesale formatting, reordering functions etc... in a central file like Tensor_class
or Grid_vector_types while others are also editing without making substantial functionality
changes creates pain.
2016-07-15 20:59:07 +01:00
f4dd5062d7 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2016-07-15 19:26:06 +01:00
da34d75841 Merge branch 'feature/Ls-vectorised-actions' into develop 2016-07-15 19:09:47 +01:00
980ff18956 Solving the instantiation no compile issue 2016-07-15 17:19:44 +01:00
7edf4c6c04 Added HMC utitities for the higher representations
TODO: Inherit types for the pseudofermions, Debugging, testing
2016-07-15 13:39:47 +01:00
1a6c7204ac Disable instantiation; Use cache version instead 2016-07-15 00:34:39 +01:00
49310fbab3 Done with red black change over 2016-07-15 00:08:43 +01:00
6049d5ac47 Update 2016-07-15 00:08:32 +01:00
35d0d35238 Updated file list 2016-07-15 00:02:53 +01:00
c0e878705e Updated file list 2016-07-15 00:02:39 +01:00
5c0c8efb9e Updated file list 2016-07-15 00:02:11 +01:00
dfd714e1ef Multiple implementations for the 5d hopping terms, depending on cache friendly
ops and/or the 5th direction being vectorised
All use 4d redblack.
2016-07-15 00:00:09 +01:00
79a8ca1a62 Rewrite for performance. Impl dependent instantiations give
4d linalg impls of the 5d hopping terms (and inverse)
Cache friendly loop orderings of the above
Dense matrix stored and apply to the above

-- Switch to Ls vectorised, and use dense matrix approach for the MooeeInv
   and rotate/shift of the Mooee M5D routines.
2016-07-14 23:58:15 +01:00
fb45eb2eb2 5d ls vec rename of impl class 2016-07-14 23:57:26 +01:00
a307274c96 Fermion impl rename for ls vectorised 5d approaches 2016-07-14 23:56:13 +01:00
3f2c44a5fe Updating the class to 5d selection based on impl type 2016-07-14 23:55:26 +01:00
48fb1cdc11 Update domain 5d vectorised impl type, move the type over to 4d redblack with
the dense OO inverse
2016-07-14 23:54:35 +01:00
8a79e93cc2 Rename the 5d domain wall fermion vectorised Ls impl class 2016-07-14 23:53:00 +01:00
3493b51879 Modest updates 2016-07-14 23:52:13 +01:00
de3e79d300 red black for Ls vectorised is 4d red black. Update accordingly now I've made this choice 2016-07-14 23:49:42 +01:00
dd62a61c5c Added broadcast and rotation of simd vectors 2016-07-14 23:49:00 +01:00
8f47d0b5ab Rotation needed for hopping term in fifth dim with Ls vectorised fields 2016-07-14 23:45:36 +01:00
42af132dab Fix for chris kellys request to peek poke on checkerboarded fields 2016-07-14 23:44:48 +01:00
9db2c6525d updating benchmarks for red black 4d for Ls vectorised code 2016-07-14 23:44:02 +01:00
adbc7c1188 Adding files for multiple implementations (cache opt) and Ls vectorisation
of the 5D cayley form chiral fermions for the 5d matrix. With Ls entirely
in the vector direction, s-hopping terms involve rotations.

The serial dependence of the LDU inversion for Mobius and 4d even odd
checkerboarding is removed by simply applying Ls^2 operations (vectorised
many ways) as a dense matrix operation.

This should give similar throughput but high flops (non-compulsory flops)
but enable use of the KNL cache friendly kernels throughout the code.

Ls is still constrained to be a multiple of Nsimd, which is as much as 8 for AVX512
with single precision.
2016-07-14 22:59:21 +01:00
9dc345e8e8 Debugged smearing and adding HMC functions for hirep 2016-07-13 17:51:18 +01:00
8b9301a74c Merge branch 'feature/bugfixes' into develop 2016-07-13 12:31:34 -04:00
6f47fbb1e2 Disabled parallel for loops in ExtractSlice and InsertSlice due to race conditions. Likely will need to do so for localConvert too. 2016-07-13 10:49:18 -04:00
a9ae30f868 Added representations definitions for the HMC 2016-07-12 13:36:10 +01:00
a3c0fb79b6 Fix to iVector and iMatrix pokeIndex and checkerboard local site indexing. 2016-07-11 17:15:22 -04:00
62601bb649 Bug fix 2016-07-08 20:46:29 +01:00
ef97e32152 Adding persistent communicators 2016-07-08 17:16:08 +01:00
daea5297ee Wrote the projector in the adjoint representation algebra 2016-07-08 16:14:16 +01:00
5028969d4b Added generators for the adjoint representation 2016-07-08 15:40:11 +01:00
c667d9fdcc Trying to make compile clean on travis; seem to have a make -j 4 problem with fftw 2016-07-07 23:26:39 +01:00
7dbb94bab2 Update 2016-07-07 22:51:37 +01:00
236dcc820b typo fix 2016-07-07 22:46:11 +01:00
a42a441a6a Rename the reconfigure script to ./autogen.sh 2016-07-07 22:35:45 +01:00
a0676beeb1 Open up dependency on Eigen and FFTW 2016-07-07 22:31:07 +01:00
c5106d0c03 Bugfix 2016-07-07 16:06:30 -04:00
fbf96b1bbb ]Merge branch 'develop' into feature/hirep 2016-07-07 14:20:10 +01:00
3c49ddfaa4 Merge branch 'temporary-smearing' into develop 2016-07-07 14:04:59 +01:00
ffb8b3116c Tested smeared RHMC Wilson1p1, accepting 2016-07-07 11:49:36 +01:00
290493e162 Merge branch 'feature/multi_prec' into develop 2016-07-06 19:29:57 -04:00
dd8cfff111 Another fix for pedantic compilers 2016-07-06 18:22:15 -04:00
184642adb0 Fix for pedantic compilers 2016-07-06 18:15:15 -04:00
4774a3bcd2 Generalized HotConfiguration and functions it calls to accept gauge fields with precision other than the default. 2016-07-06 18:01:08 -04:00
25fafa9a89 Comment 2016-07-06 16:19:41 -04:00
713520d3d2 Added tester for mixed CG 2016-07-06 16:18:19 -04:00
85ed8175cb Implemented mixed precision CG. Fixed filelist to exclude lib/Old directory and include Config.h. 2016-07-06 15:57:04 -04:00
df5c788ef2 Merge branch 'develop' into feature/multi_prec 2016-07-06 14:52:28 -04:00
15f22425c8 Added option to prevent CG from exiting when it fails to converge 2016-07-06 14:50:01 -04:00
e87182cf98 Debugged the copy constructor of the Lattice class 2016-07-06 15:31:00 +01:00
e3d5319470 Debugged the real() and imag() functions and added tests to Test_Simd 2016-07-06 14:16:03 +01:00
ffedeb1c58 Minor modifications 2016-07-06 11:41:27 +01:00
3e3b367aa9 Small changes in the Log files 2016-07-05 15:05:28 +01:00
3e80947c2b Cleaned up HMC output. Tested smeared HMCs for single precision (OK) 2016-07-05 12:03:54 +01:00
fdfbf11c6d Merge branch 'develop' into temporary-smearing 2016-07-04 18:45:10 +01:00
9cb90f714e Merge remote-tracking branch 'origin/develop' into temporary-smearing 2016-07-04 17:28:40 +01:00
6ce174cd60 Testing smearing for RHMC routines 2016-07-04 16:36:49 +01:00
17ca5240f7 Testet smeared EOWilsonRatio, accepts 2016-07-04 16:25:15 +01:00
2daffdf95d Tested smeared WilsonRatio action, accepts 2016-07-04 16:17:28 +01:00
149f826601 Tested smearing for Nf2 WilsonFermionAction, non EO: accepts 2016-07-04 16:09:19 +01:00
cd8ee27080 Simple change in iGamma for smearing 2016-07-04 16:02:57 +01:00
0fa66e8f3c Debugged smearing for EOWilson, accepts 2016-07-04 15:35:37 +01:00
8dd099267d Corrected a bug in the Expression Templates (acso and asin were wrong) 2016-07-03 12:28:25 +01:00
1a6d65c6a4 Converted set_uw and set_fj to all complex functions 2016-07-03 10:27:43 +01:00
fc4a043663 Colors and banner clean up 2016-07-02 16:15:38 +01:00
61ba50665e Merge branch 'hotfix/v0.5.1' into develop 2016-07-01 16:34:30 +01:00
446c768cd3 Merge branch 'hotfix/v0.5.1'
Double precision compile fix
2016-07-01 16:33:59 +01:00
bfe14000a9 Double compile fix 2016-07-01 16:33:51 +01:00
092fa0d8da Debugged set_fj,
to be fixed: BUG in imag()
2016-07-01 16:06:20 +01:00
e0b7004f96 Merge branch 'master' into feature/hadrons 2016-07-01 15:54:34 +01:00
1ceff48133 Merge branch 'release/v0.5.0' into develop 2016-06-30 15:15:59 -07:00
680645f849 Merge branch 'release/v0.5.0' 2016-06-30 15:15:03 -07:00
3fc6e03ad1 Version file 2016-06-30 14:44:09 -07:00
2d6614f3a1 Merge branch 'feature/knl-cache-opt' into develop 2016-06-30 14:36:20 -07:00
4e041b5103 Merge branch 'feature/knl-cache-opt' of https://github.com/paboyle/Grid into feature/knl-cache-opt 2016-06-30 14:36:08 -07:00
712b9a3489 Asm only for avx512 2016-06-30 14:35:02 -07:00
bdaa5b1767 Updated to have perfect prefetching for the s-vectorised kernel with any cache blocking. 2016-06-30 14:35:02 -07:00
8fcefc021a Improved the prefetching when using cache blocking codes 2016-06-30 14:35:02 -07:00
1445189361 COntrol the prefetch strategy 2016-06-30 14:35:02 -07:00
05c884a62a Prefetch change 2016-06-30 14:35:01 -07:00
a25bec87d9 Prefetch during save 2016-06-30 14:35:01 -07:00
2d8bb4c594 Tweaks 2016-06-30 14:35:01 -07:00
51cb2d4328 update file lists 2016-06-30 14:35:01 -07:00
6d58cb2a68 Enable reordering of the loops in the assembler for cache friendly.
This gets in the way of L2 prefetching however. Do next next link in stencil
prefetching.
2016-06-30 14:35:01 -07:00
c8b35d960c Merge branch 'develop' of https://github.com/paboyle/Grid into feature/knl-cache-opt 2016-06-30 14:30:49 -07:00
532f41dd61 Asm only for avx512 2016-06-30 14:00:34 -07:00
661b0ab45d Updated to have perfect prefetching for the s-vectorised kernel with any cache blocking. 2016-06-30 13:07:42 -07:00
565e9329ba Changed the colouring classes 2016-06-30 16:51:03 +01:00
4bc08ed995 Improved the prefetching when using cache blocking codes 2016-06-26 12:54:14 -07:00
b2933a0557 COntrol the prefetch strategy 2016-06-25 12:55:25 -07:00
db057cc276 Prefetch change 2016-06-25 12:54:50 -07:00
22e88eaf54 Prefetch during save 2016-06-25 12:54:14 -07:00
09fe3caebd Tweaks 2016-06-25 11:08:05 -07:00
5e02392f9c Fixed compilation error for benchmark_dwf
Some parts were assuming floating point precision
2016-06-20 12:30:51 +01:00
17a8f51a9b update file lists 2016-06-19 11:59:10 -07:00
1b7f88dd00 Enable reordering of the loops in the assembler for cache friendly.
This gets in the way of L2 prefetching however. Do next next link in stencil
prefetching.
2016-06-19 11:45:58 -07:00
d6737e4bd8 Travis fix for Linux clang builds 2016-06-14 19:15:08 +01:00
75fc295f6e Merge branch 'hadrons' into feature/hadrons 2016-06-14 17:51:15 +01:00
d539888e57 Merge pull request #37 from rprollins/fix/mpi_communicator
Removed write to stdout in constructor for MPI CartesianCommunicator
2016-06-14 17:25:40 +01:00
86187d7cca Removed write to stdout in constructor for MPI CartesianCommunicator 2016-06-14 15:34:20 +01:00
87418e7df1 Slightly faster prefetching perf. 2016-06-13 02:32:52 -07:00
55f65b81b5 Improvements to the assembler interface that let us move chunks of the
site and s loop into the kernels. This will save on function call overhead and
guarantee L2 prefetching strategy is right since OMP can't distribute the
sub-chunks of work.
2016-06-09 01:12:36 -07:00
d9408893b3 Prefetching in the normal kernel implementation. 2016-06-08 05:43:48 -07:00
05acc22920 placeholder for non temporal loads optimisation 2016-06-07 13:18:21 -07:00
8ac021de73 Added a test an fixed it for red black precon Ls innermost vectorised DWF 2016-06-07 13:16:56 -07:00
e503ef5590 Cleaned up 2016-06-07 00:11:36 +01:00
a7682b0060 Only instantiate the one routine to avoid duplicate symbol under g++5/MacOS 2016-06-06 23:48:21 +01:00
0b731b5d80 Hadrons: genetic scheduler parameter fix 2016-06-06 17:46:53 +01:00
8e2078be71 Hadrons: environment with fully generic object store 2016-06-06 17:45:37 +01:00
d4c9d71fc8 Merge branch 'master' of https://github.com/paboyle/Grid 2016-06-06 07:06:54 -07:00
786ca52c43 Problems remain in the red black preconditioning of the Ls vectorisation 2016-06-06 07:05:51 -07:00
048ac04abc Update Benchmark_dwf.cc 2016-06-03 13:44:41 +01:00
f78d89bcbe Update Lebesgue.cc
kill verbose
2016-06-03 13:33:42 +01:00
53d06046b0 Compiling updates for KNL 2016-06-03 03:47:54 -07:00
5d3a1a025d timers flag 2016-06-03 03:25:38 -07:00
139cc5f1ae Large change with KNL preparation 2016-06-03 03:24:26 -07:00
1826ed06a3 Merge branch 'master' into hadrons 2016-05-27 16:50:31 +01:00
1c0e922585 Merge pull request #35 from aportelli/master
empty SIMD fix
2016-05-27 16:49:13 +01:00
9d5f693cbe empty SIMD fix 2016-05-24 10:56:27 +01:00
3ff96c502b Merge branch 'master' into hadrons 2016-05-12 19:24:18 +01:00
15a0908bfc Merge branch 'master' into hadrons 2016-05-12 18:35:46 +01:00
bb2125962b Hadrons: finished implementation of 5D quarks 2016-05-12 18:34:42 +01:00
232fda5fe1 Hadrons: DWF action 2016-05-12 18:34:10 +01:00
2b31bf61ff Hadrons: message fix 2016-05-12 18:33:49 +01:00
afe5a94745 Hadrons: getModule with upcast 2016-05-12 18:33:36 +01:00
7ae667c767 Hadrons: module template update 2016-05-12 18:33:08 +01:00
07f0b69784 Merge branch 'master' into hadrons 2016-05-12 13:02:18 +01:00
5c06e89d69 Hadrons: code cleaning 2016-05-12 12:49:49 +01:00
3d75e0f0d1 Hadrons: MQuark fix 2016-05-12 12:02:15 +01:00
362f255100 Hadrons: module parameters can now be accessed from outside 2016-05-12 11:59:28 +01:00
3d78ed03ef Merge branch 'master' into hadrons 2016-05-11 15:21:46 +01:00
835003b3c5 Hadrons: removed useless gauge global parameters 2016-05-11 15:01:52 +01:00
328d213c9e Hadrons: FS case sensitivity fix 2016-05-11 14:44:14 +01:00
56a8d7a5bc Hadrons: build system fix 2016-05-11 10:27:14 +01:00
78198d1b04 Hadrons: size fix for module graph with one vertex 2016-05-10 20:13:28 +01:00
84fa2bdce6 Hadrons: modules moved in their own directory & utility script to add new modules 2016-05-10 20:12:48 +01:00
29dfe99e7c Hadrons: more scheduler optimizations 2016-05-10 19:19:38 +01:00
d604580e5a Hadrons: all objects/modules mapped to an integer address system to remove string operations from scheduling 2016-05-10 19:07:41 +01:00
7dfdc9baa0 Hadrons: lattice dynamic cast fix 2016-05-10 10:41:20 +01:00
9e986654e6 Hadrons: first version of the genetic scheduler 2016-05-09 14:49:06 +01:00
df3fbc477e Hadrons: code cleaning 2016-05-07 13:26:56 -07:00
bb580ae077 Hadrons: significant overhaul of the object registration system, previous version didn't allow dry runs 2016-05-07 13:19:38 -07:00
2c226753ab Hadrons: comments on graph theory algorithm complexity 2016-05-06 06:35:11 -07:00
ea0cea668e Hadrons: minor code cleaning 2016-05-05 16:13:14 -07:00
75cd72a421 Hadrons: memory management for fermion matrices, dynamic ownership in garbage collector 2016-05-04 19:11:03 -07:00
cbe52b0659 Hadrons: debug message removed 2016-05-04 12:20:33 -07:00
3aa6463ede Hadrons: general lattice store & a lot of code cleaning 2016-05-04 12:17:27 -07:00
312637e5fb Merge branch 'master' into hadrons
# Conflicts:
#	lib/Log.h
2016-05-04 12:16:18 -07:00
798d8f7340 Hadrons: Modules: better log messages 2016-05-03 18:17:58 -07:00
ba878724ce Hadrons: sources are now independent modules 2016-05-03 18:17:28 -07:00
b865dd9da8 Hadrons: solver renaming 2016-05-03 18:16:57 -07:00
8b313a35ac Hadrons: random and NERSC gauge configurations 2016-05-03 17:08:42 -07:00
02ec23cdad Hadrons: Fermion actions and gauge fields are modules now 2016-05-03 17:08:42 -07:00
6e83b6a203 Hadrons: namespace reorganisation, now everything is in Grid::Hadrons, the 'using Grid::operator<<' statement is used to prevent a very nasty compilation error with GCC. 2016-05-02 19:31:21 -07:00
48fcc34d72 CMeson: first implementation, still need proper output 2016-05-01 18:31:40 -07:00
d08d93c44c Merge branch 'master' into hadrons 2016-05-01 18:30:44 -07:00
0ab10cdedb Merge branch 'master' into hadrons 2016-05-01 16:08:05 -07:00
22653edf12 Merge branch 'master' into hadrons 2016-05-01 15:55:58 -07:00
12d2a95846 Merge branch 'master' into hadrons 2016-05-01 15:05:02 -07:00
978cf52f6b Merge branch 'master' into hadrons 2016-05-01 14:53:38 -07:00
63b730de80 Hadrons: for the moment, test with unit gauge 2016-05-01 14:50:57 -07:00
7905c5b8e5 Hadrons: Z2 source code fix 2016-05-01 14:49:45 -07:00
5e4b58ac40 Hadrons: Z2 source expression fix 2016-05-01 12:49:26 -07:00
468d8dc682 Merge branch 'master' into hadrons 2016-05-01 12:03:24 -07:00
beb11fd4ef Merge branch 'master' into hadrons 2016-05-01 10:32:24 -07:00
d7662b5175 Merge branch 'master' into hadrons 2016-04-30 00:24:59 -07:00
dc5f32e5f0 Merge branch 'master' into hadrons 2016-04-30 00:18:31 -07:00
1869d28429 Hadrons: first prototype with working inversions 2016-04-30 00:17:04 -07:00
405b175665 Merge branch 'master' into hadrons 2016-04-30 00:16:06 -07:00
e33b0f6ff7 cleaner output 2016-04-16 08:41:53 +01:00
9ee54e0db7 debug output removed 2016-04-16 08:41:28 +01:00
feae35d92c Hadrons: pass strings by value 2016-04-16 08:41:12 +01:00
3834d81181 Merge branch 'master' into hadrons 2016-04-14 15:15:45 +01:00
neo
339be37dba Debugging smeared HMC 2016-04-13 17:00:14 +09:00
neo
a87b744621 HMC runs but does not accept with smearing on 2016-04-07 16:45:11 +09:00
97d0d56bcb Debugging Smearing routines (set_fj) 2016-04-06 17:58:43 +09:00
7c7ea35ffb Putting the Traceless Antihermitian part outside the deriv in pseudofermion actions 2016-04-05 16:28:09 +09:00
4b1cf580e0 Debugging the Smearing routines 2016-04-05 16:19:30 +09:00
179e82b5ca Merge branch 'master' into hadrons 2016-03-08 12:55:33 +00:00
f2c59c8730 Merge branch 'master' into hadrons 2016-03-02 17:15:05 +00:00
fdd0848593 Hadrons: license text update 2016-02-25 12:07:21 +00:00
92f666905f copyright script update to 80 column text 2016-02-25 12:06:24 +00:00
5980fa8640 test implementation of DWF inverter 2016-02-25 11:56:16 +00:00
2d8bb356e3 Smearing routines compile (still untested) 2016-02-25 02:43:59 +09:00
a7251f28c7 Stout smearing compiles (untested) 2016-02-24 03:16:50 +09:00
a0d8eb2c24 minor code cleaning 2016-02-23 16:33:00 +00:00
1e10b4571d fix after Grid update 2016-02-23 16:21:45 +00:00
02f8b84ac9 Merge branch 'master' into hadrons 2016-02-23 16:13:39 +00:00
cfd368596d Merge branch 'master' into hadrons 2016-02-22 15:25:02 +00:00
neo
c1b1b89d17 More on smearing routines, writing APEsmear (dev) 2016-02-19 17:15:27 +09:00
neo
771235017d Adding smearing routines (development) 2016-02-19 15:30:41 +09:00
ae682674e0 Hadrons: first full implementation of the scheduler 2016-01-13 20:23:51 -08:00
17c43f49ac Hadrons: application class now take parameter file name as argument 2016-01-13 20:22:37 -08:00
30146e977c gitignore update 2016-01-13 20:20:43 -08:00
54eacec261 Hadrons: namespace std not used anymore in compiled sources 2015-12-23 14:30:33 +00:00
76c78f04e2 Hadrons: first complete prototype for run loop 2015-12-23 14:21:35 +00:00
379580cd89 Merge branch 'master' into hadrons 2015-12-23 14:20:22 +00:00
14a80733f9 Merge branch 'master' into hadrons 2015-12-08 13:57:53 +00:00
d4db009a58 Hadrons: starting scheduler implementation 2015-12-07 18:26:38 +00:00
20ce7e0270 Hadrons: algorithm to determine all possible topological ordering 2015-12-07 15:46:36 +00:00
bb195607ab Hadrons: fix in topological sort algorithm name 2015-12-02 19:40:11 +00:00
6f090e22c0 Hadrons: graph topological sort 2015-12-02 19:33:34 +00:00
339e983172 Merge branch 'master' into hadrons 2015-12-02 14:38:04 +00:00
4a7f3d1b7b Merge branch 'master' into hadrons
# Conflicts:
#	configure
2015-12-02 10:57:51 +00:00
c4e2202550 First graph class implementation and test 2015-11-05 14:28:14 +00:00
538b16610b First commit for measurement software 'Hadrons' 2015-10-27 17:33:18 +00:00
1050 changed files with 165841 additions and 46354 deletions

43
.gitignore vendored
View File

@ -5,11 +5,11 @@
*.o
*.obj
# Editor files #
################
*~
*#
*.sublime-*
# Precompiled Headers #
#######################
@ -48,6 +48,9 @@ Config.h.in
config.log
config.status
.deps
Make.inc
eigen.inc
Eigen.inc
# http://www.gnu.org/software/autoconf #
########################################
@ -62,19 +65,8 @@ stamp-h1
config.sub
config.guess
INSTALL
# Packages #
############
# it's better to unpack these files and commit the raw source
# git has its own built in compression methods
*.7z
*.dmg
*.gz
*.iso
*.jar
*.rar
*.tar
*.zip
.dirstamp
ltmain.sh
# Logs and databases #
######################
@ -91,6 +83,7 @@ INSTALL
.Trashes
ehthumbs.db
Thumbs.db
.dirstamp
# build directory #
###################
@ -100,3 +93,25 @@ build*/*
#####################
*.xcodeproj/*
build.sh
.vscode
*.code-workspace
# Eigen source #
################
Grid/Eigen
Eigen/*
# libtool macros #
##################
m4/lt*
m4/libtool.m4
# github pages #
################
gh-pages/
# generated sources #
#####################
Grid/qcd/spin/gamma-gen/*.h
Grid/qcd/spin/gamma-gen/*.cc
Grid/util/Version.h

View File

@ -1,79 +0,0 @@
language: cpp
matrix:
include:
- os: osx
osx_image: xcode7.2
compiler: clang
- os: osx
osx_image: xcode7.2
compiler: gcc
env: VERSION=-5
- compiler: gcc
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-4.9
- libmpfr-dev
- libgmp-dev
- libmpc-dev
- binutils-dev
env: VERSION=-4.9
- compiler: gcc
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-5
- libmpfr-dev
- libgmp-dev
- libmpc-dev
- binutils-dev
env: VERSION=-5
- compiler: clang
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-precise-3.7
packages:
- clang-3.7
- libmpfr-dev
- libgmp-dev
- libmpc-dev
- binutils-dev
env: VERSION=-3.7
- compiler: clang
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-precise-3.8
packages:
- clang-3.8
- libmpfr-dev
- libgmp-dev
- libmpc-dev
- binutils-dev
env: VERSION=-3.8
before_install:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install libmpc; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]] && [[ "$CC" == "gcc" ]]; then brew install gcc5; fi
install:
- export CC=$CC$VERSION
- export CXX=$CXX$VERSION
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export LDFLAGS='-L/usr/local/lib'; fi
script:
- ./scripts/reconfigure_script
- mkdir build
- cd build
- ../configure CXXFLAGS="-msse4.2 -O3 -std=c++11" LIBS="-lmpfr -lgmp" --enable-precision=single --enable-simd=SSE4 --enable-comms=none
- make -j4
- ./benchmarks/Benchmark_dwf --threads 1

View File

@ -0,0 +1,5 @@
Version : 0.8.0
- Clang 3.5 and above, ICPC v16 and above, GCC 6.3 and above recommended
- MPI and MPI3 comms optimisations for KNL and OPA finished
- Half precision comms

65
Grid/DisableWarnings.h Normal file
View File

@ -0,0 +1,65 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/DisableWarnings.h
Copyright (C) 2016
Author: Guido Cossu <guido.cossu@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef DISABLE_WARNINGS_H
#define DISABLE_WARNINGS_H
#if defined __GNUC__ && __GNUC__>=6
#pragma GCC diagnostic ignored "-Wignored-attributes"
#endif
//disables and intel compiler specific warning (in json.hpp)
#ifdef __ICC
#pragma warning disable 488
#endif
#ifdef __NVCC__
//disables nvcc specific warning in json.hpp
#pragma clang diagnostic ignored "-Wdeprecated-register"
#pragma diag_suppress unsigned_compare_with_zero
#pragma diag_suppress cast_to_qualified_type
//disables nvcc specific warning in many files
#pragma diag_suppress esa_on_defaulted_function_ignored
#pragma diag_suppress extra_semicolon
//Eigen only
#endif
// Disable vectorisation in Eigen on the Power8/9 and PowerPC
#ifdef __ALTIVEC__
#define EIGEN_DONT_VECTORIZE
#endif
#ifdef __VSX__
#define EIGEN_DONT_VECTORIZE
#endif
#endif

View File

@ -2,11 +2,13 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/Cshift.h
Source file: ./lib/Grid.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: azusayamaguchi <ayamaguc@YAMAKAZE.local>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -25,20 +27,24 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef _GRID_CSHIFT_H_
#define _GRID_CSHIFT_H_
//
// Grid.h
// simd
//
// Created by Peter Boyle on 09/05/2014.
// Copyright (c) 2014 University of Edinburgh. All rights reserved.
//
#include <cshift/Cshift_common.h>
#ifndef GRID_H
#define GRID_H
#include <Grid/GridCore.h>
#include <Grid/GridQCDcore.h>
#include <Grid/qcd/action/Action.h>
#include <Grid/qcd/utils/GaugeFix.h>
#include <Grid/qcd/utils/CovariantSmearing.h>
#include <Grid/qcd/smearing/Smearing.h>
#include <Grid/parallelIO/MetaData.h>
#include <Grid/qcd/hmc/HMC_aggregate.h>
#ifdef GRID_COMMS_NONE
#include <cshift/Cshift_none.h>
#endif
#ifdef GRID_COMMS_MPI
#include <cshift/Cshift_mpi.h>
#endif
#ifdef GRID_COMMS_SHMEM
#include <cshift/Cshift_mpi.h> // uses same implementation of communicator
#endif
#endif

View File

@ -35,54 +35,31 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
// Copyright (c) 2014 University of Edinburgh. All rights reserved.
//
#ifndef GRID_H
#define GRID_H
///////////////////
// Std C++ dependencies
///////////////////
#include <cassert>
#include <complex>
#include <vector>
#include <iostream>
#include <iomanip>
#include <random>
#include <functional>
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include <ctime>
#include <sys/time.h>
#include <chrono>
///////////////////
// Grid headers
///////////////////
#include <serialisation/Serialisation.h>
#include <Config.h>
#include <Timer.h>
#include <PerfCount.h>
#include <Log.h>
#include <AlignedAllocator.h>
#include <Simd.h>
#include <Threads.h>
#include <Lexicographic.h>
#include <Communicator.h>
#include <Cartesian.h>
#include <Tensors.h>
#include <Lattice.h>
#include <Cshift.h>
#include <Stencil.h>
#include <Algorithms.h>
#include <parallelIO/BinaryIO.h>
#include <qcd/QCD.h>
#include <parallelIO/NerscIO.h>
#include <Init.h>
#include <qcd/hmc/NerscCheckpointer.h>
#include <qcd/hmc/HmcRunner.h>
#ifndef GRID_BASE_H
#define GRID_BASE_H
#include <Grid/DisableWarnings.h>
#include <Grid/Namespace.h>
#include <Grid/GridStd.h>
#include <Grid/threads/Pragmas.h>
#include <Grid/perfmon/Timer.h>
#include <Grid/perfmon/PerfCount.h>
#include <Grid/util/Util.h>
#include <Grid/log/Log.h>
#include <Grid/allocator/Allocator.h>
#include <Grid/simd/Simd.h>
#include <Grid/threads/ThreadReduction.h>
#include <Grid/serialisation/Serialisation.h>
#include <Grid/util/Sha.h>
#include <Grid/communicator/Communicator.h>
#include <Grid/cartesian/Cartesian.h>
#include <Grid/tensors/Tensors.h>
#include <Grid/lattice/Lattice.h>
#include <Grid/cshift/Cshift.h>
#include <Grid/stencil/Stencil.h>
#include <Grid/parallelIO/BinaryIO.h>
#include <Grid/algorithms/Algorithms.h>
NAMESPACE_CHECK(GridCore)
#endif

View File

@ -2,11 +2,13 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/Lattice.h
Source file: ./lib/Grid.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: azusayamaguchi <ayamaguc@YAMAKAZE.local>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -25,9 +27,17 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_LATTICE_H
#define GRID_LATTICE_H
#ifndef GRID_QCD_CORE_H
#define GRID_QCD_CORE_H
#include <lattice/Lattice_base.h>
/////////////////////////
// Core Grid QCD headers
/////////////////////////
#include <Grid/GridCore.h>
#include <Grid/qcd/QCD.h>
#include <Grid/qcd/spin/Spin.h>
#include <Grid/qcd/utils/Utils.h>
#include <Grid/qcd/representations/Representations.h>
NAMESPACE_CHECK(GridQCDCore);
#endif

34
Grid/GridStd.h Normal file
View File

@ -0,0 +1,34 @@
#ifndef GRID_STD_H
#define GRID_STD_H
///////////////////
// Std C++ dependencies
///////////////////
#include <cassert>
#include <complex>
#include <memory>
#include <vector>
#include <array>
#include <string>
#include <iostream>
#include <iomanip>
#include <random>
#include <functional>
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include <ctime>
#include <sys/time.h>
#include <chrono>
#include <zlib.h>
///////////////////
// Grid config
///////////////////
#include "Config.h"
#ifdef TOFU
#undef GRID_COMMS_THREADS
#endif
#endif /* GRID_STD_H */

71
Grid/Grid_Eigen_Dense.h Normal file
View File

@ -0,0 +1,71 @@
#include <Grid/GridCore.h>
#pragma once
// Force Eigen to use MKL if Grid has been configured with --enable-mkl
#ifdef USE_MKL
#define EIGEN_USE_MKL_ALL
#endif
#if defined __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
/* NVCC save and restore compile environment*/
#ifdef __NVCC__
#pragma push
#pragma diag_suppress code_is_unreachable
#pragma push_macro("__CUDA_ARCH__")
#pragma push_macro("__NVCC__")
#pragma push_macro("__CUDACC__")
#undef __CUDA_ARCH__
#undef __NVCC__
#undef __CUDACC__
#define __NVCC__REDEFINE__
#endif
/* SYCL save and restore compile environment*/
#ifdef GRID_SYCL
#pragma push
#pragma push_macro("__SYCL_DEVICE_ONLY__")
#undef __SYCL_DEVICE_ONLY__
#define EIGEN_DONT_VECTORIZE
//#undef EIGEN_USE_SYCL
#define __SYCL__REDEFINE__
#endif
/* HIP save and restore compile environment*/
#ifdef GRID_HIP
#pragma push
#pragma push_macro("__HIP_DEVICE_COMPILE__")
#endif
#define EIGEN_NO_HIP
#include <Grid/Eigen/Dense>
#include <Grid/Eigen/unsupported/CXX11/Tensor>
/* NVCC restore */
#ifdef __NVCC__REDEFINE__
#pragma pop_macro("__CUDACC__")
#pragma pop_macro("__NVCC__")
#pragma pop_macro("__CUDA_ARCH__")
#pragma pop
#endif
/*SYCL restore*/
#ifdef __SYCL__REDEFINE__
#pragma pop_macro("__SYCL_DEVICE_ONLY__")
#pragma pop
#endif
/*HIP restore*/
#ifdef __HIP__REDEFINE__
#pragma pop_macro("__HIP_DEVICE_COMPILE__")
#pragma pop
#endif
#if defined __GNUC__
#pragma GCC diagnostic pop
#endif

1
Grid/Grid_Eigen_Tensor.h Normal file
View File

@ -0,0 +1 @@
#include <Grid/Grid_Eigen_Dense.h>

77
Grid/Makefile.am Normal file
View File

@ -0,0 +1,77 @@
extra_sources=
extra_headers=
if BUILD_COMMS_MPI3
extra_sources+=communicator/Communicator_mpi3.cc
extra_sources+=communicator/Communicator_base.cc
extra_sources+=communicator/SharedMemoryMPI.cc
extra_sources+=communicator/SharedMemory.cc
endif
if BUILD_COMMS_NONE
extra_sources+=communicator/Communicator_none.cc
extra_sources+=communicator/Communicator_base.cc
extra_sources+=communicator/SharedMemoryNone.cc
extra_sources+=communicator/SharedMemory.cc
endif
if BUILD_HDF5
extra_sources+=serialisation/Hdf5IO.cc
extra_headers+=serialisation/Hdf5IO.h
extra_headers+=serialisation/Hdf5Type.h
endif
all: version-cache Version.h
version-cache:
@if [ `git status --porcelain | grep -v '??' | wc -l` -gt 0 ]; then\
a="uncommited changes";\
else\
a="clean";\
fi;\
echo "`git log -n 1 --format=format:"#define GITHASH \\"%H:%d $$a\\"%n" HEAD`" > vertmp;\
if [ -e version-cache ]; then\
d=`diff vertmp version-cache`;\
if [ "$${d}" != "" ]; then\
mv vertmp version-cache;\
rm -f Version.h;\
fi;\
else\
mv vertmp version-cache;\
rm -f Version.h;\
fi;\
rm -f vertmp
Version.h: version-cache
cp version-cache Version.h
.PHONY: version-cache
#
# Libraries
#
include Make.inc
include Eigen.inc
extra_sources+=$(WILS_FERMION_FILES)
extra_sources+=$(STAG_FERMION_FILES)
if BUILD_ZMOBIUS
extra_sources+=$(ZWILS_FERMION_FILES)
endif
if BUILD_GPARITY
extra_sources+=$(GP_FERMION_FILES)
endif
if BUILD_FERMION_REPS
extra_sources+=$(ADJ_FERMION_FILES)
extra_sources+=$(TWOIND_FERMION_FILES)
endif
lib_LIBRARIES = libGrid.a
CCFILES += $(extra_sources)
HFILES += $(extra_headers) Config.h Version.h
libGrid_a_SOURCES = $(CCFILES)
libGrid_adir = $(includedir)/Grid
nobase_dist_pkginclude_HEADERS = $(HFILES) $(eigen_files) $(eigen_unsupp_files)

38
Grid/Namespace.h Normal file
View File

@ -0,0 +1,38 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/Namespace.h
Copyright (C) 2016
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#pragma once
#include <type_traits>
#include <cassert>
#define NAMESPACE_BEGIN(A) namespace A {
#define NAMESPACE_END(A) }
#define GRID_NAMESPACE_BEGIN NAMESPACE_BEGIN(Grid)
#define GRID_NAMESPACE_END NAMESPACE_END(Grid)
#define NAMESPACE_CHECK(x) struct namespaceTEST##x {}; static_assert(std::is_same<namespaceTEST##x, ::namespaceTEST##x>::value,"Not in :: at" );

View File

@ -0,0 +1,74 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/Algorithms.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_ALGORITHMS_H
#define GRID_ALGORITHMS_H
NAMESPACE_CHECK(algorithms);
#include <Grid/algorithms/SparseMatrix.h>
#include <Grid/algorithms/LinearOperator.h>
#include <Grid/algorithms/Preconditioner.h>
NAMESPACE_CHECK(SparseMatrix);
#include <Grid/algorithms/approx/Zolotarev.h>
#include <Grid/algorithms/approx/Chebyshev.h>
#include <Grid/algorithms/approx/JacobiPolynomial.h>
#include <Grid/algorithms/approx/Remez.h>
#include <Grid/algorithms/approx/MultiShiftFunction.h>
#include <Grid/algorithms/approx/Forecast.h>
#include <Grid/algorithms/approx/RemezGeneral.h>
#include <Grid/algorithms/approx/ZMobius.h>
NAMESPACE_CHECK(approx);
#include <Grid/algorithms/iterative/Deflation.h>
#include <Grid/algorithms/iterative/ConjugateGradient.h>
NAMESPACE_CHECK(ConjGrad);
#include <Grid/algorithms/iterative/BiCGSTAB.h>
NAMESPACE_CHECK(BiCGSTAB);
#include <Grid/algorithms/iterative/ConjugateResidual.h>
#include <Grid/algorithms/iterative/NormalEquations.h>
#include <Grid/algorithms/iterative/SchurRedBlack.h>
#include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h>
#include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h>
#include <Grid/algorithms/iterative/BiCGSTABMixedPrec.h>
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
#include <Grid/algorithms/iterative/ConjugateGradientReliableUpdate.h>
#include <Grid/algorithms/iterative/MinimalResidual.h>
#include <Grid/algorithms/iterative/GeneralisedMinimalResidual.h>
#include <Grid/algorithms/iterative/CommunicationAvoidingGeneralisedMinimalResidual.h>
#include <Grid/algorithms/iterative/FlexibleGeneralisedMinimalResidual.h>
#include <Grid/algorithms/iterative/FlexibleCommunicationAvoidingGeneralisedMinimalResidual.h>
#include <Grid/algorithms/iterative/MixedPrecisionFlexibleGeneralisedMinimalResidual.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
#include <Grid/algorithms/iterative/PowerMethod.h>
NAMESPACE_CHECK(PowerMethod);
#include <Grid/algorithms/CoarsenedMatrix.h>
NAMESPACE_CHECK(CoarsendMatrix);
#include <Grid/algorithms/FFT.h>
#endif

File diff suppressed because it is too large Load Diff

296
Grid/algorithms/FFT.h Normal file
View File

@ -0,0 +1,296 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/Cshift.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef _GRID_FFT_H_
#define _GRID_FFT_H_
#ifdef HAVE_FFTW
#ifdef USE_MKL
#include <fftw/fftw3.h>
#else
#include <fftw3.h>
#endif
#endif
NAMESPACE_BEGIN(Grid);
template<class scalar> struct FFTW { };
#ifdef HAVE_FFTW
template<> struct FFTW<ComplexD> {
public:
typedef fftw_complex FFTW_scalar;
typedef fftw_plan FFTW_plan;
static FFTW_plan fftw_plan_many_dft(int rank, const int *n,int howmany,
FFTW_scalar *in, const int *inembed,
int istride, int idist,
FFTW_scalar *out, const int *onembed,
int ostride, int odist,
int sign, unsigned flags) {
return ::fftw_plan_many_dft(rank,n,howmany,in,inembed,istride,idist,out,onembed,ostride,odist,sign,flags);
}
static void fftw_flops(const FFTW_plan p,double *add, double *mul, double *fmas){
::fftw_flops(p,add,mul,fmas);
}
inline static void fftw_execute_dft(const FFTW_plan p,FFTW_scalar *in,FFTW_scalar *out) {
::fftw_execute_dft(p,in,out);
}
inline static void fftw_destroy_plan(const FFTW_plan p) {
::fftw_destroy_plan(p);
}
};
template<> struct FFTW<ComplexF> {
public:
typedef fftwf_complex FFTW_scalar;
typedef fftwf_plan FFTW_plan;
static FFTW_plan fftw_plan_many_dft(int rank, const int *n,int howmany,
FFTW_scalar *in, const int *inembed,
int istride, int idist,
FFTW_scalar *out, const int *onembed,
int ostride, int odist,
int sign, unsigned flags) {
return ::fftwf_plan_many_dft(rank,n,howmany,in,inembed,istride,idist,out,onembed,ostride,odist,sign,flags);
}
static void fftw_flops(const FFTW_plan p,double *add, double *mul, double *fmas){
::fftwf_flops(p,add,mul,fmas);
}
inline static void fftw_execute_dft(const FFTW_plan p,FFTW_scalar *in,FFTW_scalar *out) {
::fftwf_execute_dft(p,in,out);
}
inline static void fftw_destroy_plan(const FFTW_plan p) {
::fftwf_destroy_plan(p);
}
};
#endif
#ifndef FFTW_FORWARD
#define FFTW_FORWARD (-1)
#define FFTW_BACKWARD (+1)
#endif
class FFT {
private:
GridCartesian *vgrid;
GridCartesian *sgrid;
int Nd;
double flops;
double flops_call;
uint64_t usec;
Coordinate dimensions;
Coordinate processors;
Coordinate processor_coor;
public:
static const int forward=FFTW_FORWARD;
static const int backward=FFTW_BACKWARD;
double Flops(void) {return flops;}
double MFlops(void) {return flops/usec;}
double USec(void) {return (double)usec;}
FFT ( GridCartesian * grid ) :
vgrid(grid),
Nd(grid->_ndimension),
dimensions(grid->_fdimensions),
processors(grid->_processors),
processor_coor(grid->_processor_coor)
{
flops=0;
usec =0;
Coordinate layout(Nd,1);
sgrid = new GridCartesian(dimensions,layout,processors);
};
~FFT ( void) {
delete sgrid;
}
template<class vobj>
void FFT_dim_mask(Lattice<vobj> &result,const Lattice<vobj> &source,Coordinate mask,int sign){
conformable(result.Grid(),vgrid);
conformable(source.Grid(),vgrid);
Lattice<vobj> tmp(vgrid);
tmp = source;
for(int d=0;d<Nd;d++){
if( mask[d] ) {
FFT_dim(result,tmp,d,sign);
tmp=result;
}
}
}
template<class vobj>
void FFT_all_dim(Lattice<vobj> &result,const Lattice<vobj> &source,int sign){
Coordinate mask(Nd,1);
FFT_dim_mask(result,source,mask,sign);
}
template<class vobj>
void FFT_dim(Lattice<vobj> &result,const Lattice<vobj> &source,int dim, int sign){
#ifndef HAVE_FFTW
assert(0);
#else
conformable(result.Grid(),vgrid);
conformable(source.Grid(),vgrid);
int L = vgrid->_ldimensions[dim];
int G = vgrid->_fdimensions[dim];
Coordinate layout(Nd,1);
Coordinate pencil_gd(vgrid->_fdimensions);
pencil_gd[dim] = G*processors[dim];
// Pencil global vol LxLxGxLxL per node
GridCartesian pencil_g(pencil_gd,layout,processors);
// Construct pencils
typedef typename vobj::scalar_object sobj;
typedef typename sobj::scalar_type scalar;
Lattice<sobj> pgbuf(&pencil_g);
autoView(pgbuf_v , pgbuf, CpuWrite);
typedef typename FFTW<scalar>::FFTW_scalar FFTW_scalar;
typedef typename FFTW<scalar>::FFTW_plan FFTW_plan;
int Ncomp = sizeof(sobj)/sizeof(scalar);
int Nlow = 1;
for(int d=0;d<dim;d++){
Nlow*=vgrid->_ldimensions[d];
}
int rank = 1; /* 1d transforms */
int n[] = {G}; /* 1d transforms of length G */
int howmany = Ncomp;
int odist,idist,istride,ostride;
idist = odist = 1; /* Distance between consecutive FT's */
istride = ostride = Ncomp*Nlow; /* distance between two elements in the same FT */
int *inembed = n, *onembed = n;
scalar div;
if ( sign == backward ) div = 1.0/G;
else if ( sign == forward ) div = 1.0;
else assert(0);
FFTW_plan p;
{
FFTW_scalar *in = (FFTW_scalar *)&pgbuf_v[0];
FFTW_scalar *out= (FFTW_scalar *)&pgbuf_v[0];
p = FFTW<scalar>::fftw_plan_many_dft(rank,n,howmany,
in,inembed,
istride,idist,
out,onembed,
ostride, odist,
sign,FFTW_ESTIMATE);
}
// Barrel shift and collect global pencil
Coordinate lcoor(Nd), gcoor(Nd);
result = source;
int pc = processor_coor[dim];
for(int p=0;p<processors[dim];p++) {
{
autoView(r_v,result,CpuRead);
autoView(p_v,pgbuf,CpuWrite);
thread_for(idx, sgrid->lSites(),{
Coordinate cbuf(Nd);
sobj s;
sgrid->LocalIndexToLocalCoor(idx,cbuf);
peekLocalSite(s,r_v,cbuf);
cbuf[dim]+=((pc+p) % processors[dim])*L;
pokeLocalSite(s,p_v,cbuf);
});
}
if (p != processors[dim] - 1) {
result = Cshift(result,dim,L);
}
}
// Loop over orthog coords
int NN=pencil_g.lSites();
GridStopWatch timer;
timer.Start();
thread_for( idx,NN,{
Coordinate cbuf(Nd);
pencil_g.LocalIndexToLocalCoor(idx, cbuf);
if ( cbuf[dim] == 0 ) { // restricts loop to plane at lcoor[dim]==0
FFTW_scalar *in = (FFTW_scalar *)&pgbuf_v[idx];
FFTW_scalar *out= (FFTW_scalar *)&pgbuf_v[idx];
FFTW<scalar>::fftw_execute_dft(p,in,out);
}
});
timer.Stop();
// performance counting
double add,mul,fma;
FFTW<scalar>::fftw_flops(p,&add,&mul,&fma);
flops_call = add+mul+2.0*fma;
usec += timer.useconds();
flops+= flops_call*NN;
// writing out result
{
autoView(pgbuf_v,pgbuf,CpuRead);
autoView(result_v,result,CpuWrite);
thread_for(idx,sgrid->lSites(),{
Coordinate clbuf(Nd), cgbuf(Nd);
sobj s;
sgrid->LocalIndexToLocalCoor(idx,clbuf);
cgbuf = clbuf;
cgbuf[dim] = clbuf[dim]+L*pc;
peekLocalSite(s,pgbuf_v,cgbuf);
pokeLocalSite(s,result_v,clbuf);
});
}
result = result*div;
// destroying plan
FFTW<scalar>::fftw_destroy_plan(p);
#endif
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,626 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/LinearOperator.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////////////////////////////////////
// LinearOperators Take a something and return a something.
/////////////////////////////////////////////////////////////////////////////////////////////
//
// Hopefully linearity is satisfied and the AdjOp is indeed the Hermitian Conjugateugate (transpose if real):
//SBase
// i) F(a x + b y) = aF(x) + b F(y).
// ii) <x|Op|y> = <y|AdjOp|x>^\ast
//
// Would be fun to have a test linearity & Herm Conj function!
/////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class LinearOperatorBase {
public:
// Support for coarsening to a multigrid
virtual void OpDiag (const Field &in, Field &out) = 0; // Abstract base
virtual void OpDir (const Field &in, Field &out,int dir,int disp) = 0; // Abstract base
virtual void OpDirAll (const Field &in, std::vector<Field> &out) = 0; // Abstract base
virtual void Op (const Field &in, Field &out) = 0; // Abstract base
virtual void AdjOp (const Field &in, Field &out) = 0; // Abstract base
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2)=0;
virtual void HermOp(const Field &in, Field &out)=0;
};
/////////////////////////////////////////////////////////////////////////////////////////////
// By sharing the class for Sparse Matrix across multiple operator wrappers, we can share code
// between RB and non-RB variants. Sparse matrix is like the fermion action def, and then
// the wrappers implement the specialisation of "Op" and "AdjOp" to the cases minimising
// replication of code.
//
// I'm not entirely happy with implementation; to share the Schur code between herm and non-herm
// while still having a "OpAndNorm" in the abstract base I had to implement it in both cases
// with an assert trap in the non-herm. This isn't right; there must be a better C++ way to
// do it, but I fear it required multiple inheritance and mixed in abstract base classes
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
// Construct herm op from non-herm matrix
////////////////////////////////////////////////////////////////////
template<class Matrix,class Field>
class MdagMLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
public:
MdagMLinearOperator(Matrix &Mat): _Mat(Mat){};
// Support for coarsening to a multigrid
void OpDiag (const Field &in, Field &out) {
_Mat.Mdiag(in,out);
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
_Mat.Mdir(in,out,dir,disp);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
_Mat.MdirAll(in,out);
};
void Op (const Field &in, Field &out){
_Mat.M(in,out);
}
void AdjOp (const Field &in, Field &out){
_Mat.Mdag(in,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
_Mat.MdagM(in,out);
ComplexD dot = innerProduct(in,out);
n1=real(dot);
n2=norm2(out);
}
void HermOp(const Field &in, Field &out){
_Mat.MdagM(in,out);
}
};
////////////////////////////////////////////////////////////////////
// Construct herm op and shift it for mgrid smoother
////////////////////////////////////////////////////////////////////
template<class Matrix,class Field>
class ShiftedMdagMLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
RealD _shift;
public:
ShiftedMdagMLinearOperator(Matrix &Mat,RealD shift): _Mat(Mat), _shift(shift){};
// Support for coarsening to a multigrid
void OpDiag (const Field &in, Field &out) {
_Mat.Mdiag(in,out);
assert(0);
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
_Mat.Mdir(in,out,dir,disp);
assert(0);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
assert(0);
};
void Op (const Field &in, Field &out){
_Mat.M(in,out);
assert(0);
}
void AdjOp (const Field &in, Field &out){
_Mat.Mdag(in,out);
assert(0);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
HermOp(in,out);
ComplexD dot = innerProduct(in,out);
n1=real(dot);
n2=norm2(out);
}
void HermOp(const Field &in, Field &out){
_Mat.MdagM(in,out);
out = out + _shift*in;
}
};
////////////////////////////////////////////////////////////////////
// Wrap an already herm matrix
////////////////////////////////////////////////////////////////////
template<class Matrix,class Field>
class HermitianLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
public:
HermitianLinearOperator(Matrix &Mat): _Mat(Mat){};
// Support for coarsening to a multigrid
void OpDiag (const Field &in, Field &out) {
_Mat.Mdiag(in,out);
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
_Mat.Mdir(in,out,dir,disp);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
_Mat.MdirAll(in,out);
};
void Op (const Field &in, Field &out){
_Mat.M(in,out);
}
void AdjOp (const Field &in, Field &out){
_Mat.M(in,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
HermOp(in,out);
ComplexD dot= innerProduct(in,out); n1=real(dot);
n2=norm2(out);
}
void HermOp(const Field &in, Field &out){
_Mat.M(in,out);
}
};
template<class Matrix,class Field>
class NonHermitianLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
public:
NonHermitianLinearOperator(Matrix &Mat): _Mat(Mat){};
// Support for coarsening to a multigrid
void OpDiag (const Field &in, Field &out) {
_Mat.Mdiag(in,out);
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
_Mat.Mdir(in,out,dir,disp);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
_Mat.MdirAll(in,out);
};
void Op (const Field &in, Field &out){
_Mat.M(in,out);
}
void AdjOp (const Field &in, Field &out){
_Mat.Mdag(in,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
assert(0);
}
void HermOp(const Field &in, Field &out){
assert(0);
}
};
//////////////////////////////////////////////////////////
// Even Odd Schur decomp operators; there are several
// ways to introduce the even odd checkerboarding
//////////////////////////////////////////////////////////
template<class Field>
class SchurOperatorBase : public LinearOperatorBase<Field> {
public:
virtual void Mpc (const Field &in, Field &out) =0;
virtual void MpcDag (const Field &in, Field &out) =0;
virtual void MpcDagMpc(const Field &in, Field &out) {
Field tmp(in.Grid());
tmp.Checkerboard() = in.Checkerboard();
Mpc(in,tmp);
MpcDag(tmp,out);
}
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
out.Checkerboard() = in.Checkerboard();
MpcDagMpc(in,out);
ComplexD dot= innerProduct(in,out);
n1=real(dot);
n2=norm2(out);
}
virtual void HermOp(const Field &in, Field &out){
out.Checkerboard() = in.Checkerboard();
MpcDagMpc(in,out);
}
void Op (const Field &in, Field &out){
Mpc(in,out);
}
void AdjOp (const Field &in, Field &out){
MpcDag(in,out);
}
// Support for coarsening to a multigrid
void OpDiag (const Field &in, Field &out) {
assert(0); // must coarsen the unpreconditioned system
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
assert(0);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
assert(0);
};
};
template<class Matrix,class Field>
class SchurDiagMooeeOperator : public SchurOperatorBase<Field> {
public:
Matrix &_Mat;
SchurDiagMooeeOperator (Matrix &Mat): _Mat(Mat){};
virtual void Mpc (const Field &in, Field &out) {
Field tmp(in.Grid());
tmp.Checkerboard() = !in.Checkerboard();
_Mat.Meooe(in,tmp);
_Mat.MooeeInv(tmp,out);
_Mat.Meooe(out,tmp);
_Mat.Mooee(in,out);
axpy(out,-1.0,tmp,out);
}
virtual void MpcDag (const Field &in, Field &out){
Field tmp(in.Grid());
_Mat.MeooeDag(in,tmp);
_Mat.MooeeInvDag(tmp,out);
_Mat.MeooeDag(out,tmp);
_Mat.MooeeDag(in,out);
axpy(out,-1.0,tmp,out);
}
};
template<class Matrix,class Field>
class SchurDiagOneOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
public:
SchurDiagOneOperator (Matrix &Mat): _Mat(Mat){};
virtual void Mpc (const Field &in, Field &out) {
Field tmp(in.Grid());
_Mat.Meooe(in,out);
_Mat.MooeeInv(out,tmp);
_Mat.Meooe(tmp,out);
_Mat.MooeeInv(out,tmp);
axpy(out,-1.0,tmp,in);
}
virtual void MpcDag (const Field &in, Field &out){
Field tmp(in.Grid());
_Mat.MooeeInvDag(in,out);
_Mat.MeooeDag(out,tmp);
_Mat.MooeeInvDag(tmp,out);
_Mat.MeooeDag(out,tmp);
axpy(out,-1.0,tmp,in);
}
};
template<class Matrix,class Field>
class SchurDiagTwoOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
public:
SchurDiagTwoOperator (Matrix &Mat): _Mat(Mat){};
virtual void Mpc (const Field &in, Field &out) {
Field tmp(in.Grid());
_Mat.MooeeInv(in,out);
_Mat.Meooe(out,tmp);
_Mat.MooeeInv(tmp,out);
_Mat.Meooe(out,tmp);
axpy(out,-1.0,tmp,in);
}
virtual void MpcDag (const Field &in, Field &out){
Field tmp(in.Grid());
_Mat.MeooeDag(in,out);
_Mat.MooeeInvDag(out,tmp);
_Mat.MeooeDag(tmp,out);
_Mat.MooeeInvDag(out,tmp);
axpy(out,-1.0,tmp,in);
}
};
template<class Field>
class NonHermitianSchurOperatorBase : public LinearOperatorBase<Field>
{
public:
virtual void Mpc (const Field& in, Field& out) = 0;
virtual void MpcDag (const Field& in, Field& out) = 0;
virtual void MpcDagMpc(const Field& in, Field& out) {
Field tmp(in.Grid());
tmp.Checkerboard() = in.Checkerboard();
Mpc(in,tmp);
MpcDag(tmp,out);
}
virtual void HermOpAndNorm(const Field& in, Field& out, RealD& n1, RealD& n2) {
assert(0);
}
virtual void HermOp(const Field& in, Field& out) {
assert(0);
}
void Op(const Field& in, Field& out) {
Mpc(in, out);
}
void AdjOp(const Field& in, Field& out) {
MpcDag(in, out);
}
// Support for coarsening to a multigrid
void OpDiag(const Field& in, Field& out) {
assert(0); // must coarsen the unpreconditioned system
}
void OpDir(const Field& in, Field& out, int dir, int disp) {
assert(0);
}
void OpDirAll(const Field& in, std::vector<Field>& out){
assert(0);
};
};
template<class Matrix, class Field>
class NonHermitianSchurDiagMooeeOperator : public NonHermitianSchurOperatorBase<Field>
{
public:
Matrix& _Mat;
NonHermitianSchurDiagMooeeOperator(Matrix& Mat): _Mat(Mat){};
virtual void Mpc(const Field& in, Field& out) {
Field tmp(in.Grid());
tmp.Checkerboard() = !in.Checkerboard();
_Mat.Meooe(in, tmp);
_Mat.MooeeInv(tmp, out);
_Mat.Meooe(out, tmp);
_Mat.Mooee(in, out);
axpy(out, -1.0, tmp, out);
}
virtual void MpcDag(const Field& in, Field& out) {
Field tmp(in.Grid());
_Mat.MeooeDag(in, tmp);
_Mat.MooeeInvDag(tmp, out);
_Mat.MeooeDag(out, tmp);
_Mat.MooeeDag(in, out);
axpy(out, -1.0, tmp, out);
}
};
template<class Matrix,class Field>
class NonHermitianSchurDiagOneOperator : public NonHermitianSchurOperatorBase<Field>
{
protected:
Matrix &_Mat;
public:
NonHermitianSchurDiagOneOperator (Matrix& Mat): _Mat(Mat){};
virtual void Mpc(const Field& in, Field& out) {
Field tmp(in.Grid());
_Mat.Meooe(in, out);
_Mat.MooeeInv(out, tmp);
_Mat.Meooe(tmp, out);
_Mat.MooeeInv(out, tmp);
axpy(out, -1.0, tmp, in);
}
virtual void MpcDag(const Field& in, Field& out) {
Field tmp(in.Grid());
_Mat.MooeeInvDag(in, out);
_Mat.MeooeDag(out, tmp);
_Mat.MooeeInvDag(tmp, out);
_Mat.MeooeDag(out, tmp);
axpy(out, -1.0, tmp, in);
}
};
template<class Matrix, class Field>
class NonHermitianSchurDiagTwoOperator : public NonHermitianSchurOperatorBase<Field>
{
protected:
Matrix& _Mat;
public:
NonHermitianSchurDiagTwoOperator(Matrix& Mat): _Mat(Mat){};
virtual void Mpc(const Field& in, Field& out) {
Field tmp(in.Grid());
_Mat.MooeeInv(in, out);
_Mat.Meooe(out, tmp);
_Mat.MooeeInv(tmp, out);
_Mat.Meooe(out, tmp);
axpy(out, -1.0, tmp, in);
}
virtual void MpcDag(const Field& in, Field& out) {
Field tmp(in.Grid());
_Mat.MeooeDag(in, out);
_Mat.MooeeInvDag(out, tmp);
_Mat.MeooeDag(tmp, out);
_Mat.MooeeInvDag(out, tmp);
axpy(out, -1.0, tmp, in);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Left handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) psi = eta --> ( 1 - Moo^-1 Moe Mee^-1 Meo ) psi = Moo^-1 eta
// Right handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) Moo^-1 Moo psi = eta --> ( 1 - Moe Mee^-1 Meo Moo^-1) phi=eta ; psi = Moo^-1 phi
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class Matrix,class Field> using SchurDiagOneRH = SchurDiagTwoOperator<Matrix,Field> ;
template<class Matrix,class Field> using SchurDiagOneLH = SchurDiagOneOperator<Matrix,Field> ;
///////////////////////////////////////////////////////////////////////////////////////////////////
// Staggered use
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class Matrix,class Field>
class SchurStaggeredOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
Field tmp;
RealD mass;
public:
SchurStaggeredOperator (Matrix &Mat): _Mat(Mat), tmp(_Mat.RedBlackGrid())
{
assert( _Mat.isTrivialEE() );
mass = _Mat.Mass();
}
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
Mpc(in,out);
ComplexD dot= innerProduct(in,out);
n1 = real(dot);
n2 =0.0;
}
virtual void HermOp(const Field &in, Field &out){
Mpc(in,out);
// _Mat.Meooe(in,out);
// _Mat.Meooe(out,tmp);
// axpby(out,-1.0,mass*mass,tmp,in);
}
virtual void Mpc (const Field &in, Field &out)
{
Field tmp(in.Grid());
Field tmp2(in.Grid());
// _Mat.Mooee(in,out);
// _Mat.Mooee(out,tmp);
_Mat.Meooe(in,out);
_Mat.Meooe(out,tmp);
axpby(out,-1.0,mass*mass,tmp,in);
}
virtual void MpcDag (const Field &in, Field &out){
Mpc(in,out);
}
virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no) {
assert(0);// Never need with staggered
}
};
template<class Matrix,class Field> using SchurStagOperator = SchurStaggeredOperator<Matrix,Field>;
/////////////////////////////////////////////////////////////
// Base classes for functions of operators
/////////////////////////////////////////////////////////////
template<class Field> class OperatorFunction {
public:
virtual void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) = 0;
virtual void operator() (LinearOperatorBase<Field> &Linop, const std::vector<Field> &in,std::vector<Field> &out) {
assert(in.size()==out.size());
for(int k=0;k<in.size();k++){
(*this)(Linop,in[k],out[k]);
}
};
};
template<class Field> class LinearFunction {
public:
virtual void operator() (const Field &in, Field &out) = 0;
};
template<class Field> class IdentityLinearFunction : public LinearFunction<Field> {
public:
void operator() (const Field &in, Field &out){
out = in;
};
};
/////////////////////////////////////////////////////////////
// Base classes for Multishift solvers for operators
/////////////////////////////////////////////////////////////
template<class Field> class OperatorMultiFunction {
public:
virtual void operator() (LinearOperatorBase<Field> &Linop, const Field &in, std::vector<Field> &out) = 0;
};
// FIXME : To think about
// Chroma functionality list defining LinearOperator
/*
virtual void operator() (T& chi, const T& psi, enum PlusMinus isign) const = 0;
virtual void operator() (T& chi, const T& psi, enum PlusMinus isign, Real epsilon) const
virtual const Subset& subset() const = 0;
virtual unsigned long nFlops() const { return 0; }
virtual void deriv(P& ds_u, const T& chi, const T& psi, enum PlusMinus isign) const
class UnprecLinearOperator : public DiffLinearOperator<T,P,Q>
const Subset& subset() const {return all;}
};
*/
////////////////////////////////////////////////////////////////////////////////////////////
// Hermitian operator Linear function and operator function
////////////////////////////////////////////////////////////////////////////////////////////
template<class Field>
class HermOpOperatorFunction : public OperatorFunction<Field> {
void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
Linop.HermOp(in,out);
};
};
template<typename Field>
class PlainHermOp : public LinearFunction<Field> {
public:
LinearOperatorBase<Field> &_Linop;
PlainHermOp(LinearOperatorBase<Field>& linop) : _Linop(linop)
{}
void operator()(const Field& in, Field& out) {
_Linop.HermOp(in,out);
}
};
template<typename Field>
class FunctionHermOp : public LinearFunction<Field> {
public:
OperatorFunction<Field> & _poly;
LinearOperatorBase<Field> &_Linop;
FunctionHermOp(OperatorFunction<Field> & poly,LinearOperatorBase<Field>& linop)
: _poly(poly), _Linop(linop) {};
void operator()(const Field& in, Field& out) {
_poly(_Linop,in,out);
}
};
template<class Field>
class Polynomial : public OperatorFunction<Field> {
private:
std::vector<RealD> Coeffs;
public:
using OperatorFunction<Field>::operator();
Polynomial(std::vector<RealD> &_Coeffs) : Coeffs(_Coeffs) { };
// Implement the required interface
void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
Field AtoN(in.Grid());
Field Mtmp(in.Grid());
AtoN = in;
out = AtoN*Coeffs[0];
for(int n=1;n<Coeffs.size();n++){
Mtmp = AtoN;
Linop.HermOp(Mtmp,AtoN);
out=out+AtoN*Coeffs[n];
}
};
};
NAMESPACE_END(Grid);

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -23,24 +23,24 @@ Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_PRECONDITIONER_H
#define GRID_PRECONDITIONER_H
namespace Grid {
NAMESPACE_BEGIN(Grid);
template<class Field> class Preconditioner : public LinearFunction<Field> {
virtual void operator()(const Field &src, Field & psi)=0;
};
template<class Field> class Preconditioner : public LinearFunction<Field> {
virtual void operator()(const Field &src, Field & psi)=0;
};
template<class Field> class TrivialPrecon : public Preconditioner<Field> {
public:
void operator()(const Field &src, Field & psi){
psi = src;
}
TrivialPrecon(void){};
};
template<class Field> class TrivialPrecon : public Preconditioner<Field> {
public:
void operator()(const Field &src, Field & psi){
psi = src;
}
TrivialPrecon(void){};
};
}
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,80 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/SparseMatrix.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_ALGORITHM_SPARSE_MATRIX_H
#define GRID_ALGORITHM_SPARSE_MATRIX_H
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////////////////////////////////////
// Interface defining what I expect of a general sparse matrix, such as a Fermion action
/////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SparseMatrixBase {
public:
virtual GridBase *Grid(void) =0;
// Full checkerboar operations
virtual void M (const Field &in, Field &out)=0;
virtual void Mdag (const Field &in, Field &out)=0;
virtual void MdagM(const Field &in, Field &out) {
Field tmp (in.Grid());
M(in,tmp);
Mdag(tmp,out);
}
virtual void Mdiag (const Field &in, Field &out)=0;
virtual void Mdir (const Field &in, Field &out,int dir, int disp)=0;
virtual void MdirAll (const Field &in, std::vector<Field> &out)=0;
};
/////////////////////////////////////////////////////////////////////////////////////////////
// Interface augmented by a red black sparse matrix, such as a Fermion action
/////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class CheckerBoardedSparseMatrixBase : public SparseMatrixBase<Field> {
public:
virtual GridBase *RedBlackGrid(void)=0;
//////////////////////////////////////////////////////////////////////
// Query the even even properties to make algorithmic decisions
//////////////////////////////////////////////////////////////////////
virtual RealD Mass(void) { return 0.0; };
virtual int ConstEE(void) { return 1; }; // Disable assumptions unless overridden
virtual int isTrivialEE(void) { return 0; }; // by a derived class that knows better
// half checkerboard operaions
virtual void Meooe (const Field &in, Field &out)=0;
virtual void Mooee (const Field &in, Field &out)=0;
virtual void MooeeInv (const Field &in, Field &out)=0;
virtual void MeooeDag (const Field &in, Field &out)=0;
virtual void MooeeDag (const Field &in, Field &out)=0;
virtual void MooeeInvDag (const Field &in, Field &out)=0;
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,409 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/approx/Chebyshev.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
Author: Christoph Lehner <clehner@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_CHEBYSHEV_H
#define GRID_CHEBYSHEV_H
#include <Grid/algorithms/LinearOperator.h>
NAMESPACE_BEGIN(Grid);
struct ChebyParams : Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(ChebyParams,
RealD, alpha,
RealD, beta,
int, Npoly);
};
////////////////////////////////////////////////////////////////////////////////////////////
// Generic Chebyshev approximations
////////////////////////////////////////////////////////////////////////////////////////////
template<class Field>
class Chebyshev : public OperatorFunction<Field> {
private:
using OperatorFunction<Field>::operator();
std::vector<RealD> Coeffs;
int order;
RealD hi;
RealD lo;
public:
void csv(std::ostream &out){
RealD diff = hi-lo;
RealD delta = diff*1.0e-9;
for (RealD x=lo; x<hi; x+=delta) {
delta*=1.1;
RealD f = approx(x);
out<< x<<" "<<f<<std::endl;
}
return;
}
// Convenience for plotting the approximation
void PlotApprox(std::ostream &out) {
out<<"Polynomial approx ["<<lo<<","<<hi<<"]"<<std::endl;
for(RealD x=lo;x<hi;x+=(hi-lo)/50.0){
out <<x<<"\t"<<approx(x)<<std::endl;
}
};
Chebyshev(){};
Chebyshev(ChebyParams p){ Init(p.alpha,p.beta,p.Npoly);};
Chebyshev(RealD _lo,RealD _hi,int _order, RealD (* func)(RealD) ) {Init(_lo,_hi,_order,func);};
Chebyshev(RealD _lo,RealD _hi,int _order) {Init(_lo,_hi,_order);};
////////////////////////////////////////////////////////////////////////////////////////////////////
// c.f. numerical recipes "chebft"/"chebev". This is sec 5.8 "Chebyshev approximation".
////////////////////////////////////////////////////////////////////////////////////////////////////
// CJ: the one we need for Lanczos
void Init(RealD _lo,RealD _hi,int _order)
{
lo=_lo;
hi=_hi;
order=_order;
if(order < 2) exit(-1);
Coeffs.resize(order);
Coeffs.assign(0.,order);
Coeffs[order-1] = 1.;
};
// PB - more efficient low pass drops high modes above the low as 1/x uses all Chebyshev's.
// Similar kick effect below the threshold as Lanczos filter approach
void InitLowPass(RealD _lo,RealD _hi,int _order)
{
lo=_lo;
hi=_hi;
order=_order;
if(order < 2) exit(-1);
Coeffs.resize(order);
for(int j=0;j<order;j++){
RealD k=(order-1.0);
RealD s=std::cos( j*M_PI*(k+0.5)/order );
Coeffs[j] = s * 2.0/order;
}
};
void Init(RealD _lo,RealD _hi,int _order, RealD (* func)(RealD))
{
lo=_lo;
hi=_hi;
order=_order;
if(order < 2) exit(-1);
Coeffs.resize(order);
for(int j=0;j<order;j++){
RealD s=0;
for(int k=0;k<order;k++){
RealD y=std::cos(M_PI*(k+0.5)/order);
RealD x=0.5*(y*(hi-lo)+(hi+lo));
RealD f=func(x);
s=s+f*std::cos( j*M_PI*(k+0.5)/order );
}
Coeffs[j] = s * 2.0/order;
}
};
void JacksonSmooth(void){
RealD M=order;
RealD alpha = M_PI/(M+2);
RealD lmax = std::cos(alpha);
RealD sumUsq =0;
std::vector<RealD> U(M);
std::vector<RealD> a(M);
std::vector<RealD> g(M);
for(int n=0;n<=M;n++){
U[n] = std::sin((n+1)*std::acos(lmax))/std::sin(std::acos(lmax));
sumUsq += U[n]*U[n];
}
sumUsq = std::sqrt(sumUsq);
for(int i=1;i<=M;i++){
a[i] = U[i]/sumUsq;
}
g[0] = 1.0;
for(int m=1;m<=M;m++){
g[m] = 0;
for(int i=0;i<=M-m;i++){
g[m]+= a[i]*a[m+i];
}
}
for(int m=1;m<=M;m++){
Coeffs[m]*=g[m];
}
}
RealD approx(RealD x) // Convenience for plotting the approximation
{
RealD Tn;
RealD Tnm;
RealD Tnp;
RealD y=( x-0.5*(hi+lo))/(0.5*(hi-lo));
RealD T0=1;
RealD T1=y;
RealD sum;
sum = 0.5*Coeffs[0]*T0;
sum+= Coeffs[1]*T1;
Tn =T1;
Tnm=T0;
for(int i=2;i<order;i++){
Tnp=2*y*Tn-Tnm;
Tnm=Tn;
Tn =Tnp;
sum+= Tn*Coeffs[i];
}
return sum;
};
RealD approxD(RealD x)
{
RealD Un;
RealD Unm;
RealD Unp;
RealD y=( x-0.5*(hi+lo))/(0.5*(hi-lo));
RealD U0=1;
RealD U1=2*y;
RealD sum;
sum = Coeffs[1]*U0;
sum+= Coeffs[2]*U1*2.0;
Un =U1;
Unm=U0;
for(int i=2;i<order-1;i++){
Unp=2*y*Un-Unm;
Unm=Un;
Un =Unp;
sum+= Un*Coeffs[i+1]*(i+1.0);
}
return sum/(0.5*(hi-lo));
};
RealD approxInv(RealD z, RealD x0, int maxiter, RealD resid) {
RealD x = x0;
RealD eps;
int i;
for (i=0;i<maxiter;i++) {
eps = approx(x) - z;
if (fabs(eps / z) < resid)
return x;
x = x - eps / approxD(x);
}
return std::numeric_limits<double>::quiet_NaN();
}
// Implement the required interface
void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
GridBase *grid=in.Grid();
int vol=grid->gSites();
typedef typename Field::vector_type vector_type;
Field T0(grid); T0 = in;
Field T1(grid);
Field T2(grid);
Field y(grid);
Field *Tnm = &T0;
Field *Tn = &T1;
Field *Tnp = &T2;
// Tn=T1 = (xscale M + mscale)in
RealD xscale = 2.0/(hi-lo);
RealD mscale = -(hi+lo)/(hi-lo);
Linop.HermOp(T0,y);
axpby(T1,xscale,mscale,y,in);
// sum = .5 c[0] T0 + c[1] T1
// out = ()*T0 + Coeffs[1]*T1;
axpby(out,0.5*Coeffs[0],Coeffs[1],T0,T1);
for(int n=2;n<order;n++){
Linop.HermOp(*Tn,y);
#if 0
auto y_v = y.View();
auto Tn_v = Tn->View();
auto Tnp_v = Tnp->View();
auto Tnm_v = Tnm->View();
constexpr int Nsimd = vector_type::Nsimd();
accelerator_forNB(ss, in.Grid()->oSites(), Nsimd, {
coalescedWrite(y_v[ss],xscale*y_v(ss)+mscale*Tn_v(ss));
coalescedWrite(Tnp_v[ss],2.0*y_v(ss)-Tnm_v(ss));
});
if ( Coeffs[n] != 0.0) {
axpy(out,Coeffs[n],*Tnp,out);
}
#else
axpby(y,xscale,mscale,y,(*Tn));
axpby(*Tnp,2.0,-1.0,y,(*Tnm));
if ( Coeffs[n] != 0.0) {
axpy(out,Coeffs[n],*Tnp,out);
}
#endif
// Cycle pointers to avoid copies
Field *swizzle = Tnm;
Tnm =Tn;
Tn =Tnp;
Tnp =swizzle;
}
}
};
template<class Field>
class ChebyshevLanczos : public Chebyshev<Field> {
private:
std::vector<RealD> Coeffs;
int order;
RealD alpha;
RealD beta;
RealD mu;
public:
ChebyshevLanczos(RealD _alpha,RealD _beta,RealD _mu,int _order) :
alpha(_alpha),
beta(_beta),
mu(_mu)
{
order=_order;
Coeffs.resize(order);
for(int i=0;i<_order;i++){
Coeffs[i] = 0.0;
}
Coeffs[order-1]=1.0;
};
void csv(std::ostream &out){
for (RealD x=-1.2*alpha; x<1.2*alpha; x+=(2.0*alpha)/10000) {
RealD f = approx(x);
out<< x<<" "<<f<<std::endl;
}
return;
}
RealD approx(RealD xx) // Convenience for plotting the approximation
{
RealD Tn;
RealD Tnm;
RealD Tnp;
Real aa = alpha * alpha;
Real bb = beta * beta;
RealD x = ( 2.0 * (xx-mu)*(xx-mu) - (aa+bb) ) / (aa-bb);
RealD y= x;
RealD T0=1;
RealD T1=y;
RealD sum;
sum = 0.5*Coeffs[0]*T0;
sum+= Coeffs[1]*T1;
Tn =T1;
Tnm=T0;
for(int i=2;i<order;i++){
Tnp=2*y*Tn-Tnm;
Tnm=Tn;
Tn =Tnp;
sum+= Tn*Coeffs[i];
}
return sum;
};
// shift_Multiply in Rudy's code
void AminusMuSq(LinearOperatorBase<Field> &Linop, const Field &in, Field &out)
{
GridBase *grid=in.Grid();
Field tmp(grid);
RealD aa= alpha*alpha;
RealD bb= beta * beta;
Linop.HermOp(in,out);
out = out - mu*in;
Linop.HermOp(out,tmp);
tmp = tmp - mu * out;
out = (2.0/ (aa-bb) ) * tmp - ((aa+bb)/(aa-bb))*in;
};
// Implement the required interface
void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
GridBase *grid=in.Grid();
int vol=grid->gSites();
Field T0(grid); T0 = in;
Field T1(grid);
Field T2(grid);
Field y(grid);
Field *Tnm = &T0;
Field *Tn = &T1;
Field *Tnp = &T2;
// Tn=T1 = (xscale M )*in
AminusMuSq(Linop,T0,T1);
// sum = .5 c[0] T0 + c[1] T1
out = (0.5*Coeffs[0])*T0 + Coeffs[1]*T1;
for(int n=2;n<order;n++){
AminusMuSq(Linop,*Tn,y);
*Tnp=2.0*y-(*Tnm);
out=out+Coeffs[n]* (*Tnp);
// Cycle pointers to avoid copies
Field *swizzle = Tnm;
Tnm =Tn;
Tn =Tnp;
Tnp =swizzle;
}
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,152 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/approx/Forecast.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
Author: David Murphy <dmurphy@phys.columbia.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef INCLUDED_FORECAST_H
#define INCLUDED_FORECAST_H
NAMESPACE_BEGIN(Grid);
// Abstract base class.
// Takes a matrix (Mat), a source (phi), and a vector of Fields (chi)
// and returns a forecasted solution to the system D*psi = phi (psi).
template<class Matrix, class Field>
class Forecast
{
public:
virtual Field operator()(Matrix &Mat, const Field& phi, const std::vector<Field>& chi) = 0;
};
// Implementation of Brower et al.'s chronological inverter (arXiv:hep-lat/9509012),
// used to forecast solutions across poles of the EOFA heatbath.
//
// Modified from CPS (cps_pp/src/util/dirac_op/d_op_base/comsrc/minresext.C)
template<class Matrix, class Field>
class ChronoForecast : public Forecast<Matrix,Field>
{
public:
Field operator()(Matrix &Mat, const Field& phi, const std::vector<Field>& prev_solns)
{
int degree = prev_solns.size();
Field chi(phi); // forecasted solution
// Trivial cases
if(degree == 0){ chi = Zero(); return chi; }
else if(degree == 1){ return prev_solns[0]; }
// RealD dot;
ComplexD xp;
Field r(phi); // residual
Field Mv(phi);
std::vector<Field> v(prev_solns); // orthonormalized previous solutions
std::vector<Field> MdagMv(degree,phi);
// Array to hold the matrix elements
std::vector<std::vector<ComplexD>> G(degree, std::vector<ComplexD>(degree));
// Solution and source vectors
std::vector<ComplexD> a(degree);
std::vector<ComplexD> b(degree);
// Orthonormalize the vector basis
for(int i=0; i<degree; i++){
v[i] *= 1.0/std::sqrt(norm2(v[i]));
for(int j=i+1; j<degree; j++){ v[j] -= innerProduct(v[i],v[j]) * v[i]; }
}
// Perform sparse matrix multiplication and construct rhs
for(int i=0; i<degree; i++){
b[i] = innerProduct(v[i],phi);
Mat.M(v[i],Mv);
Mat.Mdag(Mv,MdagMv[i]);
G[i][i] = innerProduct(v[i],MdagMv[i]);
}
// Construct the matrix
for(int j=0; j<degree; j++){
for(int k=j+1; k<degree; k++){
G[j][k] = innerProduct(v[j],MdagMv[k]);
G[k][j] = conjugate(G[j][k]);
}}
// Gauss-Jordan elimination with partial pivoting
for(int i=0; i<degree; i++){
// Perform partial pivoting
int k = i;
for(int j=i+1; j<degree; j++){ if(abs(G[j][j]) > abs(G[k][k])){ k = j; } }
if(k != i){
xp = b[k];
b[k] = b[i];
b[i] = xp;
for(int j=0; j<degree; j++){
xp = G[k][j];
G[k][j] = G[i][j];
G[i][j] = xp;
}
}
// Convert matrix to upper triangular form
for(int j=i+1; j<degree; j++){
xp = G[j][i]/G[i][i];
b[j] -= xp * b[i];
for(int k=0; k<degree; k++){ G[j][k] -= xp*G[i][k]; }
}
}
// Use Gaussian elimination to solve equations and calculate initial guess
chi = Zero();
r = phi;
for(int i=degree-1; i>=0; i--){
a[i] = 0.0;
for(int j=i+1; j<degree; j++){ a[i] += G[i][j] * a[j]; }
a[i] = (b[i]-a[i])/G[i][i];
chi += a[i]*v[i];
r -= a[i]*MdagMv[i];
}
RealD true_r(0.0);
ComplexD tmp;
for(int i=0; i<degree; i++){
tmp = -b[i];
for(int j=0; j<degree; j++){ tmp += G[i][j]*a[j]; }
tmp = conjugate(tmp)*tmp;
true_r += std::sqrt(tmp.real());
}
RealD error = std::sqrt(norm2(r)/norm2(phi));
std::cout << GridLogMessage << "ChronoForecast: |res|/|src| = " << error << std::endl;
return chi;
};
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,129 @@
#ifndef GRID_JACOBIPOLYNOMIAL_H
#define GRID_JACOBIPOLYNOMIAL_H
#include <Grid/algorithms/LinearOperator.h>
NAMESPACE_BEGIN(Grid);
template<class Field>
class JacobiPolynomial : public OperatorFunction<Field> {
private:
using OperatorFunction<Field>::operator();
int order;
RealD hi;
RealD lo;
RealD alpha;
RealD beta;
public:
void csv(std::ostream &out){
csv(out,lo,hi);
}
void csv(std::ostream &out,RealD llo,RealD hhi){
RealD diff = hhi-llo;
RealD delta = diff*1.0e-5;
for (RealD x=llo-delta; x<=hhi; x+=delta) {
RealD f = approx(x);
out<< x<<" "<<f <<std::endl;
}
return;
}
JacobiPolynomial(){};
JacobiPolynomial(RealD _lo,RealD _hi,int _order,RealD _alpha, RealD _beta)
{
lo=_lo;
hi=_hi;
alpha=_alpha;
beta=_beta;
order=_order;
};
RealD approx(RealD x) // Convenience for plotting the approximation
{
RealD Tn;
RealD Tnm;
RealD Tnp;
RealD y=( x-0.5*(hi+lo))/(0.5*(hi-lo));
RealD T0=1.0;
RealD T1=(alpha-beta)*0.5+(alpha+beta+2.0)*0.5*y;
Tn =T1;
Tnm=T0;
for(int n=2;n<=order;n++){
RealD cnp = 2.0*n*(n+alpha+beta)*(2.0*n-2.0+alpha+beta);
RealD cny = (2.0*n-2.0+alpha+beta)*(2.0*n-1.0+alpha+beta)*(2.0*n+alpha+beta);
RealD cn1 = (2.0*n+alpha+beta-1.0)*(alpha*alpha-beta*beta);
RealD cnm = - 2.0*(n+alpha-1.0)*(n+beta-1.0)*(2.0*n+alpha+beta);
Tnp= ( cny * y *Tn + cn1 * Tn + cnm * Tnm )/ cnp;
Tnm=Tn;
Tn =Tnp;
}
return Tnp;
};
// Implement the required interface
void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
GridBase *grid=in.Grid();
int vol=grid->gSites();
Field T0(grid);
Field T1(grid);
Field T2(grid);
Field y(grid);
Field *Tnm = &T0;
Field *Tn = &T1;
Field *Tnp = &T2;
// RealD T0=1.0;
T0=in;
// RealD y=( x-0.5*(hi+lo))/(0.5*(hi-lo));
// = x * 2/(hi-lo) - (hi+lo)/(hi-lo)
Linop.HermOp(T0,y);
RealD xscale = 2.0/(hi-lo);
RealD mscale = -(hi+lo)/(hi-lo);
Linop.HermOp(T0,y);
y=y*xscale+in*mscale;
// RealD T1=(alpha-beta)*0.5+(alpha+beta+2.0)*0.5*y;
RealD halfAmB = (alpha-beta)*0.5;
RealD halfApBp2= (alpha+beta+2.0)*0.5;
T1 = halfAmB * in + halfApBp2*y;
for(int n=2;n<=order;n++){
Linop.HermOp(*Tn,y);
y=xscale*y+mscale*(*Tn);
RealD cnp = 2.0*n*(n+alpha+beta)*(2.0*n-2.0+alpha+beta);
RealD cny = (2.0*n-2.0+alpha+beta)*(2.0*n-1.0+alpha+beta)*(2.0*n+alpha+beta);
RealD cn1 = (2.0*n+alpha+beta-1.0)*(alpha*alpha-beta*beta);
RealD cnm = - 2.0*(n+alpha-1.0)*(n+beta-1.0)*(2.0*n+alpha+beta);
// Tnp= ( cny * y *Tn + cn1 * Tn + cnm * Tnm )/ cnp;
cny=cny/cnp;
cn1=cn1/cnp;
cn1=cn1/cnp;
cnm=cnm/cnp;
*Tnp=cny*y + cn1 *(*Tn) + cnm * (*Tnm);
// Cycle pointers to avoid copies
Field *swizzle = Tnm;
Tnm =Tn;
Tn =Tnp;
Tnp =swizzle;
}
out=*Tnp;
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -25,9 +25,10 @@ Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid.h>
#include <Grid/GridCore.h>
NAMESPACE_BEGIN(Grid);
namespace Grid {
double MultiShiftFunction::approx(double x)
{
double a = norm;
@ -53,4 +54,4 @@ void MultiShiftFunction::csv(std::ostream &out)
}
return;
}
}
NAMESPACE_END(Grid);

View File

@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#ifndef MULTI_SHIFT_FUNCTION
#define MULTI_SHIFT_FUNCTION
namespace Grid {
NAMESPACE_BEGIN(Grid);
class MultiShiftFunction {
public:
@ -63,5 +63,5 @@ public:
}
};
}
NAMESPACE_END(Grid);
#endif

View File

@ -20,7 +20,7 @@
#include<iomanip>
#include<cassert>
#include<algorithms/approx/Remez.h>
#include<Grid/algorithms/approx/Remez.h>
// Constructor
AlgRemez::AlgRemez(double lower, double upper, long precision)
@ -298,7 +298,7 @@ void AlgRemez::stpini(bigfloat *step) {
// Search for error maxima and minima
void AlgRemez::search(bigfloat *step) {
bigfloat a, q, xm, ym, xn, yn, xx0, xx1;
int i, j, meq, emsign, ensign, steps;
int i, meq, emsign, ensign, steps;
meq = neq + 1;
bigfloat *yy = new bigfloat[meq];
@ -306,7 +306,6 @@ void AlgRemez::search(bigfloat *step) {
bigfloat eclose = 1.0e30;
bigfloat farther = 0l;
j = 1;
xx0 = apstrt;
for (i = 0; i < meq; i++) {

View File

@ -16,12 +16,12 @@
#define INCLUDED_ALG_REMEZ_H
#include <stddef.h>
#include <Config.h>
#include <Grid/GridStd.h>
#ifdef HAVE_GMP_H
#include <algorithms/approx/bigfloat.h>
#ifdef HAVE_LIBGMP
#include "bigfloat.h"
#else
#include <algorithms/approx/bigfloat_double.h>
#include "bigfloat_double.h"
#endif
#define JMAX 10000 //Maximum number of iterations of Newton's approximation

View File

@ -0,0 +1,473 @@
#include<math.h>
#include<stdio.h>
#include<stdlib.h>
#include<string>
#include<iostream>
#include<iomanip>
#include<cassert>
#include<Grid/algorithms/approx/RemezGeneral.h>
// Constructor
AlgRemezGeneral::AlgRemezGeneral(double lower, double upper, long precision,
bigfloat (*f)(bigfloat x, void *data), void *data): f(f),
data(data),
prec(precision),
apstrt(lower), apend(upper), apwidt(upper - lower),
n(0), d(0), pow_n(0), pow_d(0)
{
bigfloat::setDefaultPrecision(prec);
std::cout<<"Approximation bounds are ["<<apstrt<<","<<apend<<"]\n";
std::cout<<"Precision of arithmetic is "<<precision<<std::endl;
}
//Determine the properties of the numerator and denominator polynomials
void AlgRemezGeneral::setupPolyProperties(int num_degree, int den_degree, PolyType num_type_in, PolyType den_type_in){
pow_n = num_degree;
pow_d = den_degree;
if(pow_n % 2 == 0 && num_type_in == PolyType::Odd) assert(0);
if(pow_n % 2 == 1 && num_type_in == PolyType::Even) assert(0);
if(pow_d % 2 == 0 && den_type_in == PolyType::Odd) assert(0);
if(pow_d % 2 == 1 && den_type_in == PolyType::Even) assert(0);
num_type = num_type_in;
den_type = den_type_in;
num_pows.resize(pow_n+1);
den_pows.resize(pow_d+1);
int n_in = 0;
bool odd = num_type == PolyType::Full || num_type == PolyType::Odd;
bool even = num_type == PolyType::Full || num_type == PolyType::Even;
for(int i=0;i<=pow_n;i++){
num_pows[i] = -1;
if(i % 2 == 0 && even) num_pows[i] = n_in++;
if(i % 2 == 1 && odd) num_pows[i] = n_in++;
}
std::cout << n_in << " terms in numerator" << std::endl;
--n_in; //power is 1 less than the number of terms, eg pow=1 a x^1 + b x^0
int d_in = 0;
odd = den_type == PolyType::Full || den_type == PolyType::Odd;
even = den_type == PolyType::Full || den_type == PolyType::Even;
for(int i=0;i<=pow_d;i++){
den_pows[i] = -1;
if(i % 2 == 0 && even) den_pows[i] = d_in++;
if(i % 2 == 1 && odd) den_pows[i] = d_in++;
}
std::cout << d_in << " terms in denominator" << std::endl;
--d_in;
n = n_in;
d = d_in;
}
//Setup algorithm
void AlgRemezGeneral::reinitializeAlgorithm(){
spread = 1.0e37;
iter = 0;
neq = n + d + 1; //not +2 because highest-power term in denominator is fixed to 1
param.resize(neq);
yy.resize(neq+1);
//Initialize linear equation temporaries
A.resize(neq*neq);
B.resize(neq);
IPS.resize(neq);
//Initialize maximum and minimum errors
xx.resize(neq+2);
mm.resize(neq+1);
initialGuess();
//Initialize search steps
step.resize(neq+1);
stpini();
}
double AlgRemezGeneral::generateApprox(const int num_degree, const int den_degree,
const PolyType num_type_in, const PolyType den_type_in,
const double _tolerance, const int report_freq){
//Setup the properties of the polynomial
setupPolyProperties(num_degree, den_degree, num_type_in, den_type_in);
//Setup the algorithm
reinitializeAlgorithm();
bigfloat tolerance = _tolerance;
//Iterate until convergance
while (spread > tolerance) {
if (iter++ % report_freq==0)
std::cout<<"Iteration " <<iter-1<<" spread "<<(double)spread<<" delta "<<(double)delta << std::endl;
equations();
if (delta < tolerance) {
std::cout<<"Iteration " << iter-1 << " delta too small (" << delta << "<" << tolerance << "), try increasing precision\n";
assert(0);
};
assert( delta>= tolerance );
search();
}
int sign;
double error = (double)getErr(mm[0],&sign);
std::cout<<"Converged at "<<iter<<" iterations; error = "<<error<<std::endl;
// Return the maximum error in the approximation
return error;
}
// Initial values of maximal and minimal errors
void AlgRemezGeneral::initialGuess(){
// Supply initial guesses for solution points
long ncheb = neq; // Degree of Chebyshev error estimate
// Find ncheb+1 extrema of Chebyshev polynomial
bigfloat a = ncheb;
bigfloat r;
mm[0] = apstrt;
for (long i = 1; i < ncheb; i++) {
r = 0.5 * (1 - cos((M_PI * i)/(double) a));
//r *= sqrt_bf(r);
r = (exp((double)r)-1.0)/(exp(1.0)-1.0);
mm[i] = apstrt + r * apwidt;
}
mm[ncheb] = apend;
a = 2.0 * ncheb;
for (long i = 0; i <= ncheb; i++) {
r = 0.5 * (1 - cos(M_PI * (2*i+1)/(double) a));
//r *= sqrt_bf(r); // Squeeze to low end of interval
r = (exp((double)r)-1.0)/(exp(1.0)-1.0);
xx[i] = apstrt + r * apwidt;
}
}
// Initialise step sizes
void AlgRemezGeneral::stpini(){
xx[neq+1] = apend;
delta = 0.25;
step[0] = xx[0] - apstrt;
for (int i = 1; i < neq; i++) step[i] = xx[i] - xx[i-1];
step[neq] = step[neq-1];
}
// Search for error maxima and minima
void AlgRemezGeneral::search(){
bigfloat a, q, xm, ym, xn, yn, xx1;
int emsign, ensign, steps;
int meq = neq + 1;
bigfloat eclose = 1.0e30;
bigfloat farther = 0l;
bigfloat xx0 = apstrt;
for (int i = 0; i < meq; i++) {
steps = 0;
xx1 = xx[i]; // Next zero
if (i == meq-1) xx1 = apend;
xm = mm[i];
ym = getErr(xm,&emsign);
q = step[i];
xn = xm + q;
if (xn < xx0 || xn >= xx1) { // Cannot skip over adjacent boundaries
q = -q;
xn = xm;
yn = ym;
ensign = emsign;
} else {
yn = getErr(xn,&ensign);
if (yn < ym) {
q = -q;
xn = xm;
yn = ym;
ensign = emsign;
}
}
while(yn >= ym) { // March until error becomes smaller.
if (++steps > 10)
break;
ym = yn;
xm = xn;
emsign = ensign;
a = xm + q;
if (a == xm || a <= xx0 || a >= xx1)
break;// Must not skip over the zeros either side.
xn = a;
yn = getErr(xn,&ensign);
}
mm[i] = xm; // Position of maximum
yy[i] = ym; // Value of maximum
if (eclose > ym) eclose = ym;
if (farther < ym) farther = ym;
xx0 = xx1; // Walk to next zero.
} // end of search loop
q = (farther - eclose); // Decrease step size if error spread increased
if (eclose != 0.0) q /= eclose; // Relative error spread
if (q >= spread)
delta *= 0.5; // Spread is increasing; decrease step size
spread = q;
for (int i = 0; i < neq; i++) {
q = yy[i+1];
if (q != 0.0) q = yy[i] / q - (bigfloat)1l;
else q = 0.0625;
if (q > (bigfloat)0.25) q = 0.25;
q *= mm[i+1] - mm[i];
step[i] = q * delta;
}
step[neq] = step[neq-1];
for (int i = 0; i < neq; i++) { // Insert new locations for the zeros.
xm = xx[i] - step[i];
if (xm <= apstrt)
continue;
if (xm >= apend)
continue;
if (xm <= mm[i])
xm = (bigfloat)0.5 * (mm[i] + xx[i]);
if (xm >= mm[i+1])
xm = (bigfloat)0.5 * (mm[i+1] + xx[i]);
xx[i] = xm;
}
}
// Solve the equations
void AlgRemezGeneral::equations(){
bigfloat x, y, z;
bigfloat *aa;
for (int i = 0; i < neq; i++) { // set up the equations for solution by simq()
int ip = neq * i; // offset to 1st element of this row of matrix
x = xx[i]; // the guess for this row
y = func(x); // right-hand-side vector
z = (bigfloat)1l;
aa = A.data()+ip;
int t = 0;
for (int j = 0; j <= pow_n; j++) {
if(num_pows[j] != -1){ *aa++ = z; t++; }
z *= x;
}
assert(t == n+1);
z = (bigfloat)1l;
t = 0;
for (int j = 0; j < pow_d; j++) {
if(den_pows[j] != -1){ *aa++ = -y * z; t++; }
z *= x;
}
assert(t == d);
B[i] = y * z; // Right hand side vector
}
// Solve the simultaneous linear equations.
if (simq()){
std::cout<<"simq failed\n";
exit(0);
}
}
// Evaluate the rational form P(x)/Q(x) using coefficients
// from the solution vector param
bigfloat AlgRemezGeneral::approx(const bigfloat x) const{
// Work backwards toward the constant term.
int c = n;
bigfloat yn = param[c--]; // Highest order numerator coefficient
for (int i = pow_n-1; i >= 0; i--) yn = x * yn + (num_pows[i] != -1 ? param[c--] : bigfloat(0l));
c = n+d;
bigfloat yd = 1l; //Highest degree coefficient is 1.0
for (int i = pow_d-1; i >= 0; i--) yd = x * yd + (den_pows[i] != -1 ? param[c--] : bigfloat(0l));
return(yn/yd);
}
// Compute size and sign of the approximation error at x
bigfloat AlgRemezGeneral::getErr(bigfloat x, int *sign) const{
bigfloat f = func(x);
bigfloat e = approx(x) - f;
if (f != 0) e /= f;
if (e < (bigfloat)0.0) {
*sign = -1;
e = -e;
}
else *sign = 1;
return(e);
}
// Solve the system AX=B
int AlgRemezGeneral::simq(){
int ip, ipj, ipk, ipn;
int idxpiv;
int kp, kp1, kpk, kpn;
int nip, nkp;
bigfloat em, q, rownrm, big, size, pivot, sum;
bigfloat *aa;
bigfloat *X = param.data();
int n = neq;
int nm1 = n - 1;
// Initialize IPS and X
int ij = 0;
for (int i = 0; i < n; i++) {
IPS[i] = i;
rownrm = 0.0;
for(int j = 0; j < n; j++) {
q = abs_bf(A[ij]);
if(rownrm < q) rownrm = q;
++ij;
}
if (rownrm == (bigfloat)0l) {
std::cout<<"simq rownrm=0\n";
return(1);
}
X[i] = (bigfloat)1.0 / rownrm;
}
for (int k = 0; k < nm1; k++) {
big = 0.0;
idxpiv = 0;
for (int i = k; i < n; i++) {
ip = IPS[i];
ipk = n*ip + k;
size = abs_bf(A[ipk]) * X[ip];
if (size > big) {
big = size;
idxpiv = i;
}
}
if (big == (bigfloat)0l) {
std::cout<<"simq big=0\n";
return(2);
}
if (idxpiv != k) {
int j = IPS[k];
IPS[k] = IPS[idxpiv];
IPS[idxpiv] = j;
}
kp = IPS[k];
kpk = n*kp + k;
pivot = A[kpk];
kp1 = k+1;
for (int i = kp1; i < n; i++) {
ip = IPS[i];
ipk = n*ip + k;
em = -A[ipk] / pivot;
A[ipk] = -em;
nip = n*ip;
nkp = n*kp;
aa = A.data()+nkp+kp1;
for (int j = kp1; j < n; j++) {
ipj = nip + j;
A[ipj] = A[ipj] + em * *aa++;
}
}
}
kpn = n * IPS[n-1] + n - 1; // last element of IPS[n] th row
if (A[kpn] == (bigfloat)0l) {
std::cout<<"simq A[kpn]=0\n";
return(3);
}
ip = IPS[0];
X[0] = B[ip];
for (int i = 1; i < n; i++) {
ip = IPS[i];
ipj = n * ip;
sum = 0.0;
for (int j = 0; j < i; j++) {
sum += A[ipj] * X[j];
++ipj;
}
X[i] = B[ip] - sum;
}
ipn = n * IPS[n-1] + n - 1;
X[n-1] = X[n-1] / A[ipn];
for (int iback = 1; iback < n; iback++) {
//i goes (n-1),...,1
int i = nm1 - iback;
ip = IPS[i];
nip = n*ip;
sum = 0.0;
aa = A.data()+nip+i+1;
for (int j= i + 1; j < n; j++)
sum += *aa++ * X[j];
X[i] = (X[i] - sum) / A[nip+i];
}
return(0);
}
void AlgRemezGeneral::csv(std::ostream & os) const{
os << "Numerator" << std::endl;
for(int i=0;i<=pow_n;i++){
os << getCoeffNum(i) << "*x^" << i;
if(i!=pow_n) os << " + ";
}
os << std::endl;
os << "Denominator" << std::endl;
for(int i=0;i<=pow_d;i++){
os << getCoeffDen(i) << "*x^" << i;
if(i!=pow_d) os << " + ";
}
os << std::endl;
//For a true minimax solution the errors should all be equal and the signs should oscillate +-+-+- etc
int sign;
os << "Errors at maxima: coordinate, error, (sign)" << std::endl;
for(int i=0;i<neq+1;i++){
os << mm[i] << " " << getErr(mm[i],&sign) << " (" << sign << ")" << std::endl;
}
os << "Scan over range:" << std::endl;
int npt = 60;
bigfloat dlt = (apend - apstrt)/bigfloat(npt-1);
for (bigfloat x=apstrt; x<=apend; x = x + dlt) {
double f = evaluateFunc(x);
double r = evaluateApprox(x);
os<< x<<","<<r<<","<<f<<","<<r-f<<std::endl;
}
return;
}

View File

@ -0,0 +1,170 @@
/*
C.Kelly Jan 2020 based on implementation by M. Clark May 2005
AlgRemezGeneral is an implementation of the Remez algorithm for approximating an arbitrary function by a rational polynomial
It includes optional restriction to odd/even polynomials for the numerator and/or denominator
*/
#ifndef INCLUDED_ALG_REMEZ_GENERAL_H
#define INCLUDED_ALG_REMEZ_GENERAL_H
#include <stddef.h>
#include <Grid/GridStd.h>
#ifdef HAVE_LIBGMP
#include "bigfloat.h"
#else
#include "bigfloat_double.h"
#endif
class AlgRemezGeneral{
public:
enum PolyType { Even, Odd, Full };
private:
// In GSL-style, pass the function as a function pointer. Any data required to evaluate the function is passed in as a void pointer
bigfloat (*f)(bigfloat x, void *data);
void *data;
// The approximation parameters
std::vector<bigfloat> param;
bigfloat norm;
// The number of non-zero terms in the numerator and denominator
int n, d;
// The numerator and denominator degree (i.e. the largest power)
int pow_n, pow_d;
// Specify if the numerator and/or denominator are odd/even polynomials
PolyType num_type;
PolyType den_type;
std::vector<int> num_pows; //contains the mapping, with -1 if not present
std::vector<int> den_pows;
// The bounds of the approximation
bigfloat apstrt, apwidt, apend;
// Variables used to calculate the approximation
int nd1, iter;
std::vector<bigfloat> xx;
std::vector<bigfloat> mm;
std::vector<bigfloat> step;
bigfloat delta, spread;
// Variables used in search
std::vector<bigfloat> yy;
// Variables used in solving linear equations
std::vector<bigfloat> A;
std::vector<bigfloat> B;
std::vector<int> IPS;
// The number of equations we must solve at each iteration (n+d+1)
int neq;
// The precision of the GNU MP library
long prec;
// Initialize member variables associated with the polynomial's properties
void setupPolyProperties(int num_degree, int den_degree, PolyType num_type_in, PolyType den_type_in);
// Initial values of maximal and minmal errors
void initialGuess();
// Initialise step sizes
void stpini();
// Initialize the algorithm
void reinitializeAlgorithm();
// Solve the equations
void equations();
// Search for error maxima and minima
void search();
// Calculate function required for the approximation
inline bigfloat func(bigfloat x) const{
return f(x, data);
}
// Compute size and sign of the approximation error at x
bigfloat getErr(bigfloat x, int *sign) const;
// Solve the system AX=B where X = param
int simq();
// Evaluate the rational form P(x)/Q(x) using coefficients from the solution vector param
bigfloat approx(bigfloat x) const;
public:
AlgRemezGeneral(double lower, double upper, long prec,
bigfloat (*f)(bigfloat x, void *data), void *data);
inline int getDegree(void) const{
assert(n==d);
return n;
}
// Reset the bounds of the approximation
inline void setBounds(double lower, double upper) {
apstrt = lower;
apend = upper;
apwidt = apend - apstrt;
}
// Get the bounds of the approximation
inline void getBounds(double &lower, double &upper) const{
lower=(double)apstrt;
upper=(double)apend;
}
// Run the algorithm to generate the rational approximation
double generateApprox(int num_degree, int den_degree,
PolyType num_type, PolyType den_type,
const double tolerance = 1e-15, const int report_freq = 1000);
inline double generateApprox(int num_degree, int den_degree,
const double tolerance = 1e-15, const int report_freq = 1000){
return generateApprox(num_degree, den_degree, Full, Full, tolerance, report_freq);
}
// Evaluate the rational form P(x)/Q(x) using coefficients from the
// solution vector param
inline double evaluateApprox(double x) const{
return (double)approx((bigfloat)x);
}
// Evaluate the rational form Q(x)/P(x) using coefficients from the solution vector param
inline double evaluateInverseApprox(double x) const{
return 1.0/(double)approx((bigfloat)x);
}
// Calculate function required for the approximation
inline double evaluateFunc(double x) const{
return (double)func((bigfloat)x);
}
// Calculate inverse function required for the approximation
inline double evaluateInverseFunc(double x) const{
return 1.0/(double)func((bigfloat)x);
}
// Dump csv of function, approx and error
void csv(std::ostream &os = std::cout) const;
// Get the coefficient of the term x^i in the numerator
inline double getCoeffNum(const int i) const{
return num_pows[i] == -1 ? 0. : double(param[num_pows[i]]);
}
// Get the coefficient of the term x^i in the denominator
inline double getCoeffDen(const int i) const{
if(i == pow_d) return 1.0;
else return den_pows[i] == -1 ? 0. : double(param[den_pows[i]+n+1]);
}
};
#endif

View File

@ -0,0 +1,183 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/approx/ZMobius.cc
Copyright (C) 2015
Author: Christopher Kelly <ckelly@phys.columbia.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/algorithms/approx/ZMobius.h>
#include <Grid/algorithms/approx/RemezGeneral.h>
NAMESPACE_BEGIN(Grid);
NAMESPACE_BEGIN(Approx);
//Compute the tanh approximation
inline double epsilonMobius(const double x, const std::vector<ComplexD> &w){
int Ls = w.size();
ComplexD fxp = 1., fmp = 1.;
for(int i=0;i<Ls;i++){
fxp = fxp * ( w[i] + x );
fmp = fmp * ( w[i] - x );
}
return ((fxp - fmp)/(fxp + fmp)).real();
}
inline double epsilonMobius(const double x, const std::vector<RealD> &w){
int Ls = w.size();
double fxp = 1., fmp = 1.;
for(int i=0;i<Ls;i++){
fxp = fxp * ( w[i] + x );
fmp = fmp * ( w[i] - x );
}
return (fxp - fmp)/(fxp + fmp);
}
//Compute the tanh approximation in a form suitable for the Remez
bigfloat epsilonMobius(bigfloat x, void* data){
const std::vector<RealD> &omega = *( (std::vector<RealD> const*)data );
bigfloat fxp(1.0);
bigfloat fmp(1.0);
for(int i=0;i<omega.size();i++){
fxp = fxp * ( bigfloat(omega[i]) + x);
fmp = fmp * ( bigfloat(omega[i]) - x);
}
return (fxp - fmp)/(fxp + fmp);
}
//Compute the Zmobius Omega parameters suitable for eigenvalue range -lambda_bound <= lambda <= lambda_bound
//Note omega_i = 1/(b_i + c_i) where b_i and c_i are the Mobius parameters
void computeZmobiusOmega(std::vector<ComplexD> &omega_out, const int Ls_out,
const std::vector<RealD> &omega_in, const int Ls_in,
const RealD lambda_bound){
assert(omega_in.size() == Ls_in);
omega_out.resize(Ls_out);
//Use the Remez algorithm to generate the appropriate rational polynomial
//For odd polynomial, to satisfy Haar condition must take either positive or negative half of range (cf https://arxiv.org/pdf/0803.0439.pdf page 6)
AlgRemezGeneral remez(0, lambda_bound, 64, &epsilonMobius, (void*)&omega_in);
remez.generateApprox(Ls_out-1, Ls_out,AlgRemezGeneral::Odd, AlgRemezGeneral::Even, 1e-15, 100);
remez.csv(std::cout);
//The rational approximation has the form [ f(x) - f(-x) ] / [ f(x) + f(-x) ] where f(x) = \Prod_{i=0}^{L_s-1} ( \omega_i + x )
//cf https://academiccommons.columbia.edu/doi/10.7916/D8T72HD7 pg 102
//omega_i are therefore the negative of the complex roots of f(x)
//We can find the roots by recognizing that the eigenvalues of a matrix A are the roots of the characteristic polynomial
// \rho(\lambda) = det( A - \lambda I ) where I is the unit matrix
//The matrix whose characteristic polynomial is an arbitrary monic polynomial a0 + a1 x + a2 x^2 + ... x^n is the companion matrix
// A = | 0 1 0 0 0 .... 0 |
// | 0 0 1 0 0 .... 0 |
// | : : : : : : |
// | 0 0 0 0 0 1
// | -a0 -a1 -a2 ... ... -an|
//Note the Remez defines the largest power to have unit coefficient
std::vector<RealD> coeffs(Ls_out+1);
for(int i=0;i<Ls_out+1;i+=2) coeffs[i] = coeffs[i] = remez.getCoeffDen(i); //even powers
for(int i=1;i<Ls_out+1;i+=2) coeffs[i] = coeffs[i] = remez.getCoeffNum(i); //odd powers
std::vector<std::complex<RealD> > roots(Ls_out);
//Form the companion matrix
Eigen::MatrixXd compn(Ls_out,Ls_out);
for(int i=0;i<Ls_out-1;i++) compn(i,0) = 0.;
compn(Ls_out - 1, 0) = -coeffs[0];
for(int j=1;j<Ls_out;j++){
for(int i=0;i<Ls_out-1;i++) compn(i,j) = i == j-1 ? 1. : 0.;
compn(Ls_out - 1, j) = -coeffs[j];
}
//Eigensolve
Eigen::EigenSolver<Eigen::MatrixXd> slv(compn, false);
const auto & ev = slv.eigenvalues();
for(int i=0;i<Ls_out;i++)
omega_out[i] = -ev(i);
//Sort ascending (smallest at start of vector!)
std::sort(omega_out.begin(), omega_out.end(),
[&](const ComplexD &a, const ComplexD &b){ return a.real() < b.real() || (a.real() == b.real() && a.imag() < b.imag()); });
//McGlynn thesis pg 122 suggest improved iteration counts if magnitude of omega diminishes towards the center of the 5th dimension
std::vector<ComplexD> omega_tmp = omega_out;
int s_low=0, s_high=Ls_out-1, ss=0;
for(int s_from = Ls_out-1; s_from >= 0; s_from--){ //loop from largest omega
int s_to;
if(ss % 2 == 0){
s_to = s_low++;
}else{
s_to = s_high--;
}
omega_out[s_to] = omega_tmp[s_from];
++ss;
}
std::cout << "Resulting omega_i:" << std::endl;
for(int i=0;i<Ls_out;i++)
std::cout << omega_out[i] << std::endl;
std::cout << "Test result matches the approximate polynomial found by the Remez" << std::endl;
std::cout << "<x> <remez approx> <poly approx> <diff poly approx remez approx> <exact> <diff poly approx exact>\n";
int npt = 60;
double dlt = lambda_bound/double(npt-1);
for (int i =0; i<npt; i++){
double x = i*dlt;
double r = remez.evaluateApprox(x);
double p = epsilonMobius(x, omega_out);
double e = epsilonMobius(x, omega_in);
std::cout << x<< " " << r << " " << p <<" " <<r-p << " " << e << " " << e-p << std::endl;
}
}
//mobius_param = b+c with b-c=1
void computeZmobiusOmega(std::vector<ComplexD> &omega_out, const int Ls_out, const RealD mobius_param, const int Ls_in, const RealD lambda_bound){
std::vector<RealD> omega_in(Ls_in, 1./mobius_param);
computeZmobiusOmega(omega_out, Ls_out, omega_in, Ls_in, lambda_bound);
}
//ZMobius class takes gamma_i = (b+c) omega_i as its input, where b, c are factored out
void computeZmobiusGamma(std::vector<ComplexD> &gamma_out,
const RealD mobius_param_out, const int Ls_out,
const RealD mobius_param_in, const int Ls_in,
const RealD lambda_bound){
computeZmobiusOmega(gamma_out, Ls_out, mobius_param_in, Ls_in, lambda_bound);
for(int i=0;i<Ls_out;i++) gamma_out[i] = gamma_out[i] * mobius_param_out;
}
//Assumes mobius_param_out == mobius_param_in
void computeZmobiusGamma(std::vector<ComplexD> &gamma_out, const int Ls_out, const RealD mobius_param, const int Ls_in, const RealD lambda_bound){
computeZmobiusGamma(gamma_out, mobius_param, Ls_out, mobius_param, Ls_in, lambda_bound);
}
NAMESPACE_END(Approx);
NAMESPACE_END(Grid);

View File

@ -0,0 +1,57 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/approx/ZMobius.h
Copyright (C) 2015
Author: Christopher Kelly <ckelly@phys.columbia.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_ZMOBIUS_APPROX_H
#define GRID_ZMOBIUS_APPROX_H
#include <Grid/GridCore.h>
NAMESPACE_BEGIN(Grid);
NAMESPACE_BEGIN(Approx);
//Compute the Zmobius Omega parameters suitable for eigenvalue range -lambda_bound <= lambda <= lambda_bound
//Note omega_i = 1/(b_i + c_i) where b_i and c_i are the Mobius parameters
void computeZmobiusOmega(std::vector<ComplexD> &omega_out, const int Ls_out,
const std::vector<RealD> &omega_in, const int Ls_in,
const RealD lambda_bound);
//mobius_param = b+c with b-c=1
void computeZmobiusOmega(std::vector<ComplexD> &omega_out, const int Ls_out, const RealD mobius_param, const int Ls_in, const RealD lambda_bound);
//ZMobius class takes gamma_i = (b+c) omega_i as its input, where b, c are factored out
void computeZmobiusGamma(std::vector<ComplexD> &gamma_out,
const RealD mobius_param_out, const int Ls_out,
const RealD mobius_param_in, const int Ls_in,
const RealD lambda_bound);
//Assumes mobius_param_out == mobius_param_in
void computeZmobiusGamma(std::vector<ComplexD> &gamma_out, const int Ls_out, const RealD mobius_param, const int Ls_in, const RealD lambda_bound);
NAMESPACE_END(Approx);
NAMESPACE_END(Grid);
#endif

View File

@ -58,8 +58,8 @@
/* Compute the partial fraction expansion coefficients (alpha) from the
* factored form */
namespace Grid {
namespace Approx {
NAMESPACE_BEGIN(Grid);
NAMESPACE_BEGIN(Approx);
static void construct_partfrac(izd *z) {
int dn = z -> dn, dd = z -> dd, type = z -> type;
@ -516,7 +516,9 @@ zolotarev_data* higham(PRECISION epsilon, int n) {
free(d);
return zd;
}
}}
NAMESPACE_END(Approx);
NAMESPACE_END(Grid);
#ifdef TEST
@ -585,6 +587,7 @@ static PRECISION zolotarev_cayley_eval(PRECISION x, zolotarev_data* rdata) {
return (ONE - T) / (ONE + T);
}
/* Test program. Apart from printing out the parameters for R(x) it produces
* the following data files for plotting (unless NPLOT is defined):
*
@ -723,5 +726,5 @@ int main(int argc, char** argv) {
return EXIT_SUCCESS;
}
#endif /* TEST */

View File

@ -1,13 +1,13 @@
/* -*- Mode: C; comment-column: 22; fill-column: 79; -*- */
#ifdef __cplusplus
namespace Grid {
namespace Approx {
#include <Grid/Namespace.h>
NAMESPACE_BEGIN(Grid);
NAMESPACE_BEGIN(Approx);
#endif
#define HVERSION Header Time-stamp: <14-OCT-2004 09:26:51.00 adk@MISSCONTRARY>
#ifndef ZOLOTAREV_INTERNAL
#ifndef PRECISION
#define PRECISION double
@ -83,5 +83,6 @@ void zolotarev_free(zolotarev_data *zdata);
#endif
#ifdef __cplusplus
}}
NAMESPACE_END(Approx);
NAMESPACE_END(Grid);
#endif

View File

@ -10,10 +10,12 @@
#ifndef INCLUDED_BIGFLOAT_H
#define INCLUDED_BIGFLOAT_H
#define __GMP_WITHIN_CONFIGURE
#include <gmp.h>
#include <mpf2mpfr.h>
#include <mpfr.h>
#undef __GMP_WITHIN_CONFIGURE
class bigfloat {
private:

View File

@ -25,6 +25,10 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef INCLUDED_BIGFLOAT_DOUBLE_H
#define INCLUDED_BIGFLOAT_DOUBLE_H
#include <math.h>
typedef double mfloat;
@ -186,4 +190,6 @@ public:
// friend bigfloat& random(void);
};
#endif

View File

@ -90,8 +90,8 @@ class TwoLevelFlexiblePcg : public LinearFunction<Field>
void operator() (const Field &src, Field &psi){
void operator() (const Field &src, Field &psi){
psi.checkerboard = src.checkerboard;
grid = src._grid;
psi.Checkerboard() = src.Checkerboard();
grid = src.Grid();
RealD f;
RealD rtzp,rtz,a,d,b;

View File

@ -0,0 +1,234 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/BiCGSTAB.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
Author: juettner <juettner@soton.ac.uk>
Author: David Murphy <djmurphy@mit.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_BICGSTAB_H
#define GRID_BICGSTAB_H
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////
// Base classes for iterative processes based on operators
// single input vec, single output vec.
/////////////////////////////////////////////////////////////
template <class Field>
class BiCGSTAB : public OperatorFunction<Field>
{
public:
using OperatorFunction<Field>::operator();
bool ErrorOnNoConverge; // throw an assert when the CG fails to converge.
// Defaults true.
RealD Tolerance;
Integer MaxIterations;
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
BiCGSTAB(RealD tol, Integer maxit, bool err_on_no_conv = true) :
Tolerance(tol), MaxIterations(maxit), ErrorOnNoConverge(err_on_no_conv){};
void operator()(LinearOperatorBase<Field>& Linop, const Field& src, Field& psi)
{
psi.Checkerboard() = src.Checkerboard();
conformable(psi, src);
RealD cp(0), rho(1), rho_prev(0), alpha(1), beta(0), omega(1);
RealD a(0), bo(0), b(0), ssq(0);
Field p(src);
Field r(src);
Field rhat(src);
Field v(src);
Field s(src);
Field t(src);
Field h(src);
v = Zero();
p = Zero();
// Initial residual computation & set up
RealD guess = norm2(psi);
assert(std::isnan(guess) == 0);
Linop.Op(psi, v);
b = norm2(v);
r = src - v;
rhat = r;
a = norm2(r);
ssq = norm2(src);
std::cout << GridLogIterative << std::setprecision(8) << "BiCGSTAB: guess " << guess << std::endl;
std::cout << GridLogIterative << std::setprecision(8) << "BiCGSTAB: src " << ssq << std::endl;
std::cout << GridLogIterative << std::setprecision(8) << "BiCGSTAB: mp " << b << std::endl;
std::cout << GridLogIterative << std::setprecision(8) << "BiCGSTAB: r " << a << std::endl;
RealD rsq = Tolerance * Tolerance * ssq;
// Check if guess is really REALLY good :)
if(a <= rsq){ return; }
std::cout << GridLogIterative << std::setprecision(8) << "BiCGSTAB: k=0 residual " << a << " target " << rsq << std::endl;
GridStopWatch LinalgTimer;
GridStopWatch InnerTimer;
GridStopWatch AxpyNormTimer;
GridStopWatch LinearCombTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
int k;
for (k = 1; k <= MaxIterations; k++)
{
rho_prev = rho;
LinalgTimer.Start();
InnerTimer.Start();
ComplexD Crho = innerProduct(rhat,r);
InnerTimer.Stop();
rho = Crho.real();
beta = (rho / rho_prev) * (alpha / omega);
LinearCombTimer.Start();
bo = beta * omega;
{
autoView( p_v , p, AcceleratorWrite);
autoView( r_v , r, AcceleratorRead);
autoView( v_v , v, AcceleratorRead);
accelerator_for(ss, p_v.size(), Field::vector_object::Nsimd(),{
coalescedWrite(p_v[ss], beta*p_v(ss) - bo*v_v(ss) + r_v(ss));
});
}
LinearCombTimer.Stop();
LinalgTimer.Stop();
MatrixTimer.Start();
Linop.Op(p,v);
MatrixTimer.Stop();
LinalgTimer.Start();
InnerTimer.Start();
ComplexD Calpha = innerProduct(rhat,v);
InnerTimer.Stop();
alpha = rho / Calpha.real();
LinearCombTimer.Start();
{
autoView( p_v , p, AcceleratorRead);
autoView( r_v , r, AcceleratorRead);
autoView( v_v , v, AcceleratorRead);
autoView( psi_v,psi, AcceleratorRead);
autoView( h_v , h, AcceleratorWrite);
autoView( s_v , s, AcceleratorWrite);
accelerator_for(ss, h_v.size(), Field::vector_object::Nsimd(),{
coalescedWrite(h_v[ss], alpha*p_v(ss) + psi_v(ss));
});
accelerator_for(ss, s_v.size(), Field::vector_object::Nsimd(),{
coalescedWrite(s_v[ss], -alpha*v_v(ss) + r_v(ss));
});
}
LinearCombTimer.Stop();
LinalgTimer.Stop();
MatrixTimer.Start();
Linop.Op(s,t);
MatrixTimer.Stop();
LinalgTimer.Start();
InnerTimer.Start();
ComplexD Comega = innerProduct(t,s);
InnerTimer.Stop();
omega = Comega.real() / norm2(t);
LinearCombTimer.Start();
{
autoView( psi_v,psi, AcceleratorWrite);
autoView( r_v , r, AcceleratorWrite);
autoView( h_v , h, AcceleratorRead);
autoView( s_v , s, AcceleratorRead);
autoView( t_v , t, AcceleratorRead);
accelerator_for(ss, psi_v.size(), Field::vector_object::Nsimd(),{
coalescedWrite(psi_v[ss], h_v(ss) + omega * s_v(ss));
coalescedWrite(r_v[ss], -omega * t_v(ss) + s_v(ss));
});
}
LinearCombTimer.Stop();
cp = norm2(r);
LinalgTimer.Stop();
std::cout << GridLogIterative << "BiCGSTAB: Iteration " << k << " residual " << sqrt(cp/ssq) << " target " << Tolerance << std::endl;
// Stopping condition
if(cp <= rsq)
{
SolverTimer.Stop();
Linop.Op(psi, v);
p = v - src;
RealD srcnorm = sqrt(norm2(src));
RealD resnorm = sqrt(norm2(p));
RealD true_residual = resnorm / srcnorm;
std::cout << GridLogMessage << "BiCGSTAB Converged on iteration " << k << std::endl;
std::cout << GridLogMessage << "\tComputed residual " << sqrt(cp/ssq) << std::endl;
std::cout << GridLogMessage << "\tTrue residual " << true_residual << std::endl;
std::cout << GridLogMessage << "\tTarget " << Tolerance << std::endl;
std::cout << GridLogMessage << "Time breakdown " << std::endl;
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "\tMatrix " << MatrixTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "\tLinalg " << LinalgTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "\tInner " << InnerTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "\tAxpyNorm " << AxpyNormTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "\tLinearComb " << LinearCombTimer.Elapsed() << std::endl;
if(ErrorOnNoConverge){ assert(true_residual / Tolerance < 10000.0); }
IterationsToComplete = k;
return;
}
}
std::cout << GridLogMessage << "BiCGSTAB did NOT converge" << std::endl;
if(ErrorOnNoConverge){ assert(0); }
IterationsToComplete = k;
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,158 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/BiCGSTABMixedPrec.h
Copyright (C) 2015
Author: Christopher Kelly <ckelly@phys.columbia.edu>
Author: David Murphy <djmurphy@mit.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_BICGSTAB_MIXED_PREC_H
#define GRID_BICGSTAB_MIXED_PREC_H
NAMESPACE_BEGIN(Grid);
// Mixed precision restarted defect correction BiCGSTAB
template<class FieldD, class FieldF, typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0, typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
class MixedPrecisionBiCGSTAB : public LinearFunction<FieldD>
{
public:
RealD Tolerance;
RealD InnerTolerance; // Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;
Integer MaxOuterIterations;
GridBase* SinglePrecGrid; // Grid for single-precision fields
RealD OuterLoopNormMult; // Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
LinearOperatorBase<FieldF> &Linop_f;
LinearOperatorBase<FieldD> &Linop_d;
Integer TotalInnerIterations; //Number of inner CG iterations
Integer TotalOuterIterations; //Number of restarts
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
//Option to speed up *inner single precision* solves using a LinearFunction that produces a guess
LinearFunction<FieldF> *guesser;
MixedPrecisionBiCGSTAB(RealD tol, Integer maxinnerit, Integer maxouterit, GridBase* _sp_grid,
LinearOperatorBase<FieldF>& _Linop_f, LinearOperatorBase<FieldD>& _Linop_d) :
Linop_f(_Linop_f), Linop_d(_Linop_d), Tolerance(tol), InnerTolerance(tol), MaxInnerIterations(maxinnerit),
MaxOuterIterations(maxouterit), SinglePrecGrid(_sp_grid), OuterLoopNormMult(100.), guesser(NULL) {};
void useGuesser(LinearFunction<FieldF>& g){
guesser = &g;
}
void operator() (const FieldD& src_d_in, FieldD& sol_d)
{
TotalInnerIterations = 0;
GridStopWatch TotalTimer;
TotalTimer.Start();
int cb = src_d_in.Checkerboard();
sol_d.Checkerboard() = cb;
RealD src_norm = norm2(src_d_in);
RealD stop = src_norm * Tolerance*Tolerance;
GridBase* DoublePrecGrid = src_d_in.Grid();
FieldD tmp_d(DoublePrecGrid);
tmp_d.Checkerboard() = cb;
FieldD tmp2_d(DoublePrecGrid);
tmp2_d.Checkerboard() = cb;
FieldD src_d(DoublePrecGrid);
src_d = src_d_in; //source for next inner iteration, computed from residual during operation
RealD inner_tol = InnerTolerance;
FieldF src_f(SinglePrecGrid);
src_f.Checkerboard() = cb;
FieldF sol_f(SinglePrecGrid);
sol_f.Checkerboard() = cb;
BiCGSTAB<FieldF> CG_f(inner_tol, MaxInnerIterations);
CG_f.ErrorOnNoConverge = false;
GridStopWatch InnerCGtimer;
GridStopWatch PrecChangeTimer;
Integer &outer_iter = TotalOuterIterations; //so it will be equal to the final iteration count
for(outer_iter = 0; outer_iter < MaxOuterIterations; outer_iter++)
{
// Compute double precision rsd and also new RHS vector.
Linop_d.Op(sol_d, tmp_d);
RealD norm = axpy_norm(src_d, -1., tmp_d, src_d_in); //src_d is residual vector
std::cout << GridLogMessage << "MixedPrecisionBiCGSTAB: Outer iteration " << outer_iter << " residual " << norm << " target " << stop << std::endl;
if(norm < OuterLoopNormMult * stop){
std::cout << GridLogMessage << "MixedPrecisionBiCGSTAB: Outer iteration converged on iteration " << outer_iter << std::endl;
break;
}
while(norm * inner_tol * inner_tol < stop){ inner_tol *= 2; } // inner_tol = sqrt(stop/norm) ??
PrecChangeTimer.Start();
precisionChange(src_f, src_d);
PrecChangeTimer.Stop();
sol_f = Zero();
//Optionally improve inner solver guess (eg using known eigenvectors)
if(guesser != NULL){ (*guesser)(src_f, sol_f); }
//Inner CG
CG_f.Tolerance = inner_tol;
InnerCGtimer.Start();
CG_f(Linop_f, src_f, sol_f);
InnerCGtimer.Stop();
TotalInnerIterations += CG_f.IterationsToComplete;
//Convert sol back to double and add to double prec solution
PrecChangeTimer.Start();
precisionChange(tmp_d, sol_f);
PrecChangeTimer.Stop();
axpy(sol_d, 1.0, tmp_d, sol_d);
}
//Final trial CG
std::cout << GridLogMessage << "MixedPrecisionBiCGSTAB: Starting final patch-up double-precision solve" << std::endl;
BiCGSTAB<FieldD> CG_d(Tolerance, MaxInnerIterations);
CG_d(Linop_d, src_d_in, sol_d);
TotalFinalStepIterations = CG_d.IterationsToComplete;
TotalTimer.Stop();
std::cout << GridLogMessage << "MixedPrecisionBiCGSTAB: Inner CG iterations " << TotalInnerIterations << " Restarts " << TotalOuterIterations << " Final CG iterations " << TotalFinalStepIterations << std::endl;
std::cout << GridLogMessage << "MixedPrecisionBiCGSTAB: Total time " << TotalTimer.Elapsed() << " Precision change " << PrecChangeTimer.Elapsed() << " Inner CG total " << InnerCGtimer.Elapsed() << std::endl;
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,698 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/BlockConjugateGradient.h
Copyright (C) 2017
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
enum BlockCGtype { BlockCG, BlockCGrQ, CGmultiRHS, BlockCGVec, BlockCGrQVec };
//////////////////////////////////////////////////////////////////////////
// Block conjugate gradient. Dimension zero should be the block direction
//////////////////////////////////////////////////////////////////////////
template <class Field>
class BlockConjugateGradient : public OperatorFunction<Field> {
public:
typedef typename Field::scalar_type scomplex;
int blockDim ;
int Nblock;
BlockCGtype CGtype;
bool ErrorOnNoConverge; // throw an assert when the CG fails to converge.
// Defaults true.
RealD Tolerance;
Integer MaxIterations;
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
Integer PrintInterval; //GridLogMessages or Iterative
RealD TrueResidual;
BlockConjugateGradient(BlockCGtype cgtype,int _Orthog,RealD tol, Integer maxit, bool err_on_no_conv = true)
: Tolerance(tol), CGtype(cgtype), blockDim(_Orthog), MaxIterations(maxit), ErrorOnNoConverge(err_on_no_conv),PrintInterval(100)
{};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Thin QR factorisation (google it)
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
//Dimensions
// R_{ferm x Nblock} = Q_{ferm x Nblock} x C_{Nblock x Nblock} -> ferm x Nblock
//
// Rdag R = m_rr = Herm = L L^dag <-- Cholesky decomposition (LLT routine in Eigen)
//
// Q C = R => Q = R C^{-1}
//
// Want Ident = Q^dag Q = C^{-dag} R^dag R C^{-1} = C^{-dag} L L^dag C^{-1} = 1_{Nblock x Nblock}
//
// Set C = L^{dag}, and then Q^dag Q = ident
//
// Checks:
// Cdag C = Rdag R ; passes.
// QdagQ = 1 ; passes
////////////////////////////////////////////////////////////////////////////////////////////////////
void ThinQRfact (Eigen::MatrixXcd &m_rr,
Eigen::MatrixXcd &C,
Eigen::MatrixXcd &Cinv,
Field & Q,
const Field & R)
{
int Orthog = blockDim; // First dimension is block dim; this is an assumption
sliceInnerProductMatrix(m_rr,R,R,Orthog);
// Force manifest hermitian to avoid rounding related
m_rr = 0.5*(m_rr+m_rr.adjoint());
Eigen::MatrixXcd L = m_rr.llt().matrixL();
C = L.adjoint();
Cinv = C.inverse();
////////////////////////////////////////////////////////////////////////////////////////////////////
// Q = R C^{-1}
//
// Q_j = R_i Cinv(i,j)
//
// NB maddMatrix conventions are Right multiplication X[j] a[j,i] already
////////////////////////////////////////////////////////////////////////////////////////////////////
sliceMulMatrix(Q,Cinv,R,Orthog);
}
// see comments above
void ThinQRfact (Eigen::MatrixXcd &m_rr,
Eigen::MatrixXcd &C,
Eigen::MatrixXcd &Cinv,
std::vector<Field> & Q,
const std::vector<Field> & R)
{
InnerProductMatrix(m_rr,R,R);
m_rr = 0.5*(m_rr+m_rr.adjoint());
Eigen::MatrixXcd L = m_rr.llt().matrixL();
C = L.adjoint();
Cinv = C.inverse();
MulMatrix(Q,Cinv,R);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// Call one of several implementations
////////////////////////////////////////////////////////////////////////////////////////////////////
void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi)
{
if ( CGtype == BlockCGrQ ) {
BlockCGrQsolve(Linop,Src,Psi);
} else if (CGtype == CGmultiRHS ) {
CGmultiRHSsolve(Linop,Src,Psi);
} else {
assert(0);
}
}
virtual void operator()(LinearOperatorBase<Field> &Linop, const std::vector<Field> &Src, std::vector<Field> &Psi)
{
if ( CGtype == BlockCGrQVec ) {
BlockCGrQsolveVec(Linop,Src,Psi);
} else {
assert(0);
}
}
////////////////////////////////////////////////////////////////////////////
// BlockCGrQ implementation:
//--------------------------
// X is guess/Solution
// B is RHS
// Solve A X_i = B_i ; i refers to Nblock index
////////////////////////////////////////////////////////////////////////////
void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
{
int Orthog = blockDim; // First dimension is block dim; this is an assumption
Nblock = B.Grid()->_fdimensions[Orthog];
/* FAKE */
Nblock=8;
std::cout<<GridLogMessage<<" Block Conjugate Gradient : Orthog "<<Orthog<<" Nblock "<<Nblock<<std::endl;
X.Checkerboard() = B.Checkerboard();
conformable(X, B);
Field tmp(B);
Field Q(B);
Field D(B);
Field Z(B);
Field AD(B);
Eigen::MatrixXcd m_DZ = Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_M = Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_rr = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_C = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_Cinv = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_S = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_Sinv = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_tmp = Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_tmp1 = Eigen::MatrixXcd::Identity(Nblock,Nblock);
// Initial residual computation & set up
std::vector<RealD> residuals(Nblock);
std::vector<RealD> ssq(Nblock);
sliceNorm(ssq,B,Orthog);
RealD sssum=0;
for(int b=0;b<Nblock;b++) sssum+=ssq[b];
sliceNorm(residuals,B,Orthog);
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
sliceNorm(residuals,X,Orthog);
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
/************************************************************************
* Block conjugate gradient rQ (Sebastien Birk Thesis, after Dubrulle 2001)
************************************************************************
* Dimensions:
*
* X,B==(Nferm x Nblock)
* A==(Nferm x Nferm)
*
* Nferm = Nspin x Ncolour x Ncomplex x Nlattice_site
*
* QC = R = B-AX, D = Q ; QC => Thin QR factorisation (google it)
* for k:
* Z = AD
* M = [D^dag Z]^{-1}
* X = X + D MC
* QS = Q - ZM
* D = Q + D S^dag
* C = S C
*/
///////////////////////////////////////
// Initial block: initial search dir is guess
///////////////////////////////////////
std::cout << GridLogMessage<<"BlockCGrQ algorithm initialisation " <<std::endl;
//1. QC = R = B-AX, D = Q ; QC => Thin QR factorisation (google it)
Linop.HermOp(X, AD);
tmp = B - AD;
ThinQRfact (m_rr, m_C, m_Cinv, Q, tmp);
D=Q;
std::cout << GridLogMessage<<"BlockCGrQ computed initial residual and QR fact " <<std::endl;
///////////////////////////////////////
// Timers
///////////////////////////////////////
GridStopWatch sliceInnerTimer;
GridStopWatch sliceMaddTimer;
GridStopWatch QRTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
int k;
for (k = 1; k <= MaxIterations; k++) {
//3. Z = AD
MatrixTimer.Start();
Linop.HermOp(D, Z);
MatrixTimer.Stop();
//4. M = [D^dag Z]^{-1}
sliceInnerTimer.Start();
sliceInnerProductMatrix(m_DZ,D,Z,Orthog);
sliceInnerTimer.Stop();
m_M = m_DZ.inverse();
//5. X = X + D MC
m_tmp = m_M * m_C;
sliceMaddTimer.Start();
sliceMaddMatrix(X,m_tmp, D,X,Orthog);
sliceMaddTimer.Stop();
//6. QS = Q - ZM
sliceMaddTimer.Start();
sliceMaddMatrix(tmp,m_M,Z,Q,Orthog,-1.0);
sliceMaddTimer.Stop();
QRTimer.Start();
ThinQRfact (m_rr, m_S, m_Sinv, Q, tmp);
QRTimer.Stop();
//7. D = Q + D S^dag
m_tmp = m_S.adjoint();
sliceMaddTimer.Start();
sliceMaddMatrix(D,m_tmp,D,Q,Orthog);
sliceMaddTimer.Stop();
//8. C = S C
m_C = m_S*m_C;
/*********************
* convergence monitor
*********************
*/
m_rr = m_C.adjoint() * m_C;
RealD max_resid=0;
RealD rrsum=0;
RealD rr;
for(int b=0;b<Nblock;b++) {
rrsum+=real(m_rr(b,b));
rr = real(m_rr(b,b))/ssq[b];
if ( rr > max_resid ) max_resid = rr;
}
std::cout << GridLogIterative << "\titeration "<<k<<" rr_sum "<<rrsum<<" ssq_sum "<< sssum
<<" ave "<<std::sqrt(rrsum/sssum) << " max "<< max_resid <<std::endl;
if ( max_resid < Tolerance*Tolerance ) {
SolverTimer.Stop();
std::cout << GridLogMessage<<"BlockCGrQ converged in "<<k<<" iterations"<<std::endl;
for(int b=0;b<Nblock;b++){
std::cout << GridLogMessage<< "\t\tblock "<<b<<" computed resid "
<< std::sqrt(real(m_rr(b,b))/ssq[b])<<std::endl;
}
std::cout << GridLogMessage<<"\tMax residual is "<<std::sqrt(max_resid)<<std::endl;
Linop.HermOp(X, AD);
AD = AD-B;
TrueResidual = std::sqrt(norm2(AD)/norm2(B));
std::cout << GridLogMessage <<"\tTrue residual is " << TrueResidual <<std::endl;
std::cout << GridLogMessage << "Time Breakdown "<<std::endl;
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tInnerProd " << sliceInnerTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMaddMatrix " << sliceMaddTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tThinQRfact " << QRTimer.Elapsed() <<std::endl;
IterationsToComplete = k;
return;
}
}
std::cout << GridLogMessage << "BlockConjugateGradient(rQ) did NOT converge" << std::endl;
if (ErrorOnNoConverge) assert(0);
IterationsToComplete = k;
}
//////////////////////////////////////////////////////////////////////////
// multiRHS conjugate gradient. Dimension zero should be the block direction
// Use this for spread out across nodes
//////////////////////////////////////////////////////////////////////////
void CGmultiRHSsolve(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi)
{
int Orthog = blockDim; // First dimension is block dim
Nblock = Src.Grid()->_fdimensions[Orthog];
std::cout<<GridLogMessage<<"MultiRHS Conjugate Gradient : Orthog "<<Orthog<<" Nblock "<<Nblock<<std::endl;
Psi.Checkerboard() = Src.Checkerboard();
conformable(Psi, Src);
Field P(Src);
Field AP(Src);
Field R(Src);
std::vector<ComplexD> v_pAp(Nblock);
std::vector<RealD> v_rr (Nblock);
std::vector<RealD> v_rr_inv(Nblock);
std::vector<RealD> v_alpha(Nblock);
std::vector<RealD> v_beta(Nblock);
// Initial residual computation & set up
std::vector<RealD> residuals(Nblock);
std::vector<RealD> ssq(Nblock);
sliceNorm(ssq,Src,Orthog);
RealD sssum=0;
for(int b=0;b<Nblock;b++) sssum+=ssq[b];
sliceNorm(residuals,Src,Orthog);
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
sliceNorm(residuals,Psi,Orthog);
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
// Initial search dir is guess
Linop.HermOp(Psi, AP);
R = Src - AP;
P = R;
sliceNorm(v_rr,R,Orthog);
GridStopWatch sliceInnerTimer;
GridStopWatch sliceMaddTimer;
GridStopWatch sliceNormTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
int k;
for (k = 1; k <= MaxIterations; k++) {
RealD rrsum=0;
for(int b=0;b<Nblock;b++) rrsum+=real(v_rr[b]);
std::cout << GridLogIterative << "\titeration "<<k<<" rr_sum "<<rrsum<<" ssq_sum "<< sssum
<<" / "<<std::sqrt(rrsum/sssum) <<std::endl;
MatrixTimer.Start();
Linop.HermOp(P, AP);
MatrixTimer.Stop();
// Alpha
sliceInnerTimer.Start();
sliceInnerProductVector(v_pAp,P,AP,Orthog);
sliceInnerTimer.Stop();
for(int b=0;b<Nblock;b++){
v_alpha[b] = v_rr[b]/real(v_pAp[b]);
}
// Psi, R update
sliceMaddTimer.Start();
sliceMaddVector(Psi,v_alpha, P,Psi,Orthog); // add alpha * P to psi
sliceMaddVector(R ,v_alpha,AP, R,Orthog,-1.0);// sub alpha * AP to resid
sliceMaddTimer.Stop();
// Beta
for(int b=0;b<Nblock;b++){
v_rr_inv[b] = 1.0/v_rr[b];
}
sliceNormTimer.Start();
sliceNorm(v_rr,R,Orthog);
sliceNormTimer.Stop();
for(int b=0;b<Nblock;b++){
v_beta[b] = v_rr_inv[b] *v_rr[b];
}
// Search update
sliceMaddTimer.Start();
sliceMaddVector(P,v_beta,P,R,Orthog);
sliceMaddTimer.Stop();
/*********************
* convergence monitor
*********************
*/
RealD max_resid=0;
for(int b=0;b<Nblock;b++){
RealD rr = v_rr[b]/ssq[b];
if ( rr > max_resid ) max_resid = rr;
}
if ( max_resid < Tolerance*Tolerance ) {
SolverTimer.Stop();
std::cout << GridLogMessage<<"MultiRHS solver converged in " <<k<<" iterations"<<std::endl;
for(int b=0;b<Nblock;b++){
std::cout << GridLogMessage<< "\t\tBlock "<<b<<" computed resid "<< std::sqrt(v_rr[b]/ssq[b])<<std::endl;
}
std::cout << GridLogMessage<<"\tMax residual is "<<std::sqrt(max_resid)<<std::endl;
Linop.HermOp(Psi, AP);
AP = AP-Src;
TrueResidual = std::sqrt(norm2(AP)/norm2(Src));
std::cout <<GridLogMessage << "\tTrue residual is " << TrueResidual <<std::endl;
std::cout << GridLogMessage << "Time Breakdown "<<std::endl;
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tInnerProd " << sliceInnerTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tNorm " << sliceNormTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMaddMatrix " << sliceMaddTimer.Elapsed() <<std::endl;
IterationsToComplete = k;
return;
}
}
std::cout << GridLogMessage << "MultiRHSConjugateGradient did NOT converge" << std::endl;
if (ErrorOnNoConverge) assert(0);
IterationsToComplete = k;
}
void InnerProductMatrix(Eigen::MatrixXcd &m , const std::vector<Field> &X, const std::vector<Field> &Y){
for(int b=0;b<Nblock;b++){
for(int bp=0;bp<Nblock;bp++) {
m(b,bp) = innerProduct(X[b],Y[bp]);
}}
}
void MaddMatrix(std::vector<Field> &AP, Eigen::MatrixXcd &m , const std::vector<Field> &X,const std::vector<Field> &Y,RealD scale=1.0){
// Should make this cache friendly with site outermost, parallel_for
// Deal with case AP aliases with either Y or X
std::vector<Field> tmp(Nblock,X[0]);
for(int b=0;b<Nblock;b++){
tmp[b] = Y[b];
for(int bp=0;bp<Nblock;bp++) {
tmp[b] = tmp[b] + scomplex(scale*m(bp,b))*X[bp];
}
}
for(int b=0;b<Nblock;b++){
AP[b] = tmp[b];
}
}
void MulMatrix(std::vector<Field> &AP, Eigen::MatrixXcd &m , const std::vector<Field> &X){
// Should make this cache friendly with site outermost, parallel_for
for(int b=0;b<Nblock;b++){
AP[b] = Zero();
for(int bp=0;bp<Nblock;bp++) {
AP[b] += scomplex(m(bp,b))*X[bp];
}
}
}
double normv(const std::vector<Field> &P){
double nn = 0.0;
for(int b=0;b<Nblock;b++) {
nn+=norm2(P[b]);
}
return nn;
}
////////////////////////////////////////////////////////////////////////////
// BlockCGrQvec implementation:
//--------------------------
// X is guess/Solution
// B is RHS
// Solve A X_i = B_i ; i refers to Nblock index
////////////////////////////////////////////////////////////////////////////
void BlockCGrQsolveVec(LinearOperatorBase<Field> &Linop, const std::vector<Field> &B, std::vector<Field> &X)
{
Nblock = B.size();
assert(Nblock == X.size());
std::cout<<GridLogMessage<<" Block Conjugate Gradient Vec rQ : Nblock "<<Nblock<<std::endl;
for(int b=0;b<Nblock;b++){
X[b].Checkerboard() = B[b].Checkerboard();
conformable(X[b], B[b]);
conformable(X[b], X[0]);
}
Field Fake(B[0]);
std::vector<Field> tmp(Nblock,Fake);
std::vector<Field> Q(Nblock,Fake);
std::vector<Field> D(Nblock,Fake);
std::vector<Field> Z(Nblock,Fake);
std::vector<Field> AD(Nblock,Fake);
Eigen::MatrixXcd m_DZ = Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_M = Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_rr = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_C = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_Cinv = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_S = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_Sinv = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_tmp = Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_tmp1 = Eigen::MatrixXcd::Identity(Nblock,Nblock);
// Initial residual computation & set up
std::vector<RealD> residuals(Nblock);
std::vector<RealD> ssq(Nblock);
RealD sssum=0;
for(int b=0;b<Nblock;b++){ ssq[b] = norm2(B[b]);}
for(int b=0;b<Nblock;b++) sssum+=ssq[b];
for(int b=0;b<Nblock;b++){ residuals[b] = norm2(B[b]);}
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
for(int b=0;b<Nblock;b++){ residuals[b] = norm2(X[b]);}
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
/************************************************************************
* Block conjugate gradient rQ (Sebastien Birk Thesis, after Dubrulle 2001)
************************************************************************
* Dimensions:
*
* X,B==(Nferm x Nblock)
* A==(Nferm x Nferm)
*
* Nferm = Nspin x Ncolour x Ncomplex x Nlattice_site
*
* QC = R = B-AX, D = Q ; QC => Thin QR factorisation (google it)
* for k:
* Z = AD
* M = [D^dag Z]^{-1}
* X = X + D MC
* QS = Q - ZM
* D = Q + D S^dag
* C = S C
*/
///////////////////////////////////////
// Initial block: initial search dir is guess
///////////////////////////////////////
std::cout << GridLogMessage<<"BlockCGrQvec algorithm initialisation " <<std::endl;
//1. QC = R = B-AX, D = Q ; QC => Thin QR factorisation (google it)
for(int b=0;b<Nblock;b++) {
Linop.HermOp(X[b], AD[b]);
tmp[b] = B[b] - AD[b];
}
ThinQRfact (m_rr, m_C, m_Cinv, Q, tmp);
for(int b=0;b<Nblock;b++) D[b]=Q[b];
std::cout << GridLogMessage<<"BlockCGrQ vec computed initial residual and QR fact " <<std::endl;
///////////////////////////////////////
// Timers
///////////////////////////////////////
GridStopWatch sliceInnerTimer;
GridStopWatch sliceMaddTimer;
GridStopWatch QRTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
int k;
for (k = 1; k <= MaxIterations; k++) {
//3. Z = AD
MatrixTimer.Start();
for(int b=0;b<Nblock;b++) Linop.HermOp(D[b], Z[b]);
MatrixTimer.Stop();
//4. M = [D^dag Z]^{-1}
sliceInnerTimer.Start();
InnerProductMatrix(m_DZ,D,Z);
sliceInnerTimer.Stop();
m_M = m_DZ.inverse();
//5. X = X + D MC
m_tmp = m_M * m_C;
sliceMaddTimer.Start();
MaddMatrix(X,m_tmp, D,X);
sliceMaddTimer.Stop();
//6. QS = Q - ZM
sliceMaddTimer.Start();
MaddMatrix(tmp,m_M,Z,Q,-1.0);
sliceMaddTimer.Stop();
QRTimer.Start();
ThinQRfact (m_rr, m_S, m_Sinv, Q, tmp);
QRTimer.Stop();
//7. D = Q + D S^dag
m_tmp = m_S.adjoint();
sliceMaddTimer.Start();
MaddMatrix(D,m_tmp,D,Q);
sliceMaddTimer.Stop();
//8. C = S C
m_C = m_S*m_C;
/*********************
* convergence monitor
*********************
*/
m_rr = m_C.adjoint() * m_C;
RealD max_resid=0;
RealD rrsum=0;
RealD rr;
for(int b=0;b<Nblock;b++) {
rrsum+=real(m_rr(b,b));
rr = real(m_rr(b,b))/ssq[b];
if ( rr > max_resid ) max_resid = rr;
}
std::cout << GridLogIterative << "\t Block Iteration "<<k<<" ave resid "<< std::sqrt(rrsum/sssum) << " max "<< std::sqrt(max_resid) <<std::endl;
if ( max_resid < Tolerance*Tolerance ) {
SolverTimer.Stop();
std::cout << GridLogMessage<<"BlockCGrQ converged in "<<k<<" iterations"<<std::endl;
for(int b=0;b<Nblock;b++){
std::cout << GridLogMessage<< "\t\tblock "<<b<<" computed resid "<< std::sqrt(real(m_rr(b,b))/ssq[b])<<std::endl;
}
std::cout << GridLogMessage<<"\tMax residual is "<<std::sqrt(max_resid)<<std::endl;
for(int b=0;b<Nblock;b++) Linop.HermOp(X[b], AD[b]);
for(int b=0;b<Nblock;b++) AD[b] = AD[b]-B[b];
TrueResidual = std::sqrt(normv(AD)/normv(B));
std::cout << GridLogMessage << "\tTrue residual is " << TrueResidual <<std::endl;
std::cout << GridLogMessage << "Time Breakdown "<<std::endl;
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tInnerProd " << sliceInnerTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMaddMatrix " << sliceMaddTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tThinQRfact " << QRTimer.Elapsed() <<std::endl;
IterationsToComplete = k;
return;
}
}
std::cout << GridLogMessage << "BlockConjugateGradient(rQ) did NOT converge" << std::endl;
if (ErrorOnNoConverge) assert(0);
IterationsToComplete = k;
}
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,248 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/CommunicationAvoidingGeneralisedMinimalResidual.h
Copyright (C) 2015
Author: Daniel Richtmann <daniel.richtmann@ur.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_COMMUNICATION_AVOIDING_GENERALISED_MINIMAL_RESIDUAL_H
#define GRID_COMMUNICATION_AVOIDING_GENERALISED_MINIMAL_RESIDUAL_H
namespace Grid {
template<class Field>
class CommunicationAvoidingGeneralisedMinimalResidual : public OperatorFunction<Field> {
public:
using OperatorFunction<Field>::operator();
bool ErrorOnNoConverge; // Throw an assert when CAGMRES fails to converge,
// defaults to true
RealD Tolerance;
Integer MaxIterations;
Integer RestartLength;
Integer MaxNumberOfRestarts;
Integer IterationCount; // Number of iterations the CAGMRES took to finish,
// filled in upon completion
GridStopWatch MatrixTimer;
GridStopWatch LinalgTimer;
GridStopWatch QrTimer;
GridStopWatch CompSolutionTimer;
Eigen::MatrixXcd H;
std::vector<ComplexD> y;
std::vector<ComplexD> gamma;
std::vector<ComplexD> c;
std::vector<ComplexD> s;
CommunicationAvoidingGeneralisedMinimalResidual(RealD tol,
Integer maxit,
Integer restart_length,
bool err_on_no_conv = true)
: Tolerance(tol)
, MaxIterations(maxit)
, RestartLength(restart_length)
, MaxNumberOfRestarts(MaxIterations/RestartLength + ((MaxIterations%RestartLength == 0) ? 0 : 1))
, ErrorOnNoConverge(err_on_no_conv)
, H(Eigen::MatrixXcd::Zero(RestartLength, RestartLength + 1)) // sizes taken from DD-αAMG code base
, y(RestartLength + 1, 0.)
, gamma(RestartLength + 1, 0.)
, c(RestartLength + 1, 0.)
, s(RestartLength + 1, 0.) {};
void operator()(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi) {
std::cout << GridLogWarning << "This algorithm currently doesn't differ from regular GMRES" << std::endl;
psi.Checkerboard() = src.Checkerboard();
conformable(psi, src);
RealD guess = norm2(psi);
assert(std::isnan(guess) == 0);
RealD cp;
RealD ssq = norm2(src);
RealD rsq = Tolerance * Tolerance * ssq;
Field r(src.Grid());
std::cout << std::setprecision(4) << std::scientific;
std::cout << GridLogIterative << "CommunicationAvoidingGeneralisedMinimalResidual: guess " << guess << std::endl;
std::cout << GridLogIterative << "CommunicationAvoidingGeneralisedMinimalResidual: src " << ssq << std::endl;
MatrixTimer.Reset();
LinalgTimer.Reset();
QrTimer.Reset();
CompSolutionTimer.Reset();
GridStopWatch SolverTimer;
SolverTimer.Start();
IterationCount = 0;
for (int k=0; k<MaxNumberOfRestarts; k++) {
cp = outerLoopBody(LinOp, src, psi, rsq);
// Stopping condition
if (cp <= rsq) {
SolverTimer.Stop();
LinOp.Op(psi,r);
axpy(r,-1.0,src,r);
RealD srcnorm = sqrt(ssq);
RealD resnorm = sqrt(norm2(r));
RealD true_residual = resnorm / srcnorm;
std::cout << GridLogMessage << "CommunicationAvoidingGeneralisedMinimalResidual: Converged on iteration " << IterationCount
<< " computed residual " << sqrt(cp / ssq)
<< " true residual " << true_residual
<< " target " << Tolerance << std::endl;
std::cout << GridLogMessage << "CAGMRES Time elapsed: Total " << SolverTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "CAGMRES Time elapsed: Matrix " << MatrixTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "CAGMRES Time elapsed: Linalg " << LinalgTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "CAGMRES Time elapsed: QR " << QrTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "CAGMRES Time elapsed: CompSol " << CompSolutionTimer.Elapsed() << std::endl;
return;
}
}
std::cout << GridLogMessage << "CommunicationAvoidingGeneralisedMinimalResidual did NOT converge" << std::endl;
if (ErrorOnNoConverge)
assert(0);
}
RealD outerLoopBody(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi, RealD rsq) {
RealD cp = 0;
Field w(src.Grid());
Field r(src.Grid());
// this should probably be made a class member so that it is only allocated once, not in every restart
std::vector<Field> v(RestartLength + 1, src.Grid()); for (auto &elem : v) elem = Zero();
MatrixTimer.Start();
LinOp.Op(psi, w);
MatrixTimer.Stop();
LinalgTimer.Start();
r = src - w;
gamma[0] = sqrt(norm2(r));
ComplexD scale = 1.0/gamma[0];
v[0] = scale * r;
LinalgTimer.Stop();
for (int i=0; i<RestartLength; i++) {
IterationCount++;
arnoldiStep(LinOp, v, w, i);
qrUpdate(i);
cp = norm(gamma[i+1]);
std::cout << GridLogIterative << "CommunicationAvoidingGeneralisedMinimalResidual: Iteration " << IterationCount
<< " residual " << cp << " target " << rsq << std::endl;
if ((i == RestartLength - 1) || (IterationCount == MaxIterations) || (cp <= rsq)) {
computeSolution(v, psi, i);
return cp;
}
}
assert(0); // Never reached
return cp;
}
void arnoldiStep(LinearOperatorBase<Field> &LinOp, std::vector<Field> &v, Field &w, int iter) {
MatrixTimer.Start();
LinOp.Op(v[iter], w);
MatrixTimer.Stop();
LinalgTimer.Start();
for (int i = 0; i <= iter; ++i) {
H(iter, i) = innerProduct(v[i], w);
w = w - ComplexD(H(iter, i)) * v[i];
}
H(iter, iter + 1) = sqrt(norm2(w));
v[iter + 1] = ComplexD(1. / H(iter, iter + 1)) * w;
LinalgTimer.Stop();
}
void qrUpdate(int iter) {
QrTimer.Start();
for (int i = 0; i < iter ; ++i) {
auto tmp = -s[i] * ComplexD(H(iter, i)) + c[i] * ComplexD(H(iter, i + 1));
H(iter, i) = conjugate(c[i]) * ComplexD(H(iter, i)) + conjugate(s[i]) * ComplexD(H(iter, i + 1));
H(iter, i + 1) = tmp;
}
// Compute new Givens Rotation
auto nu = sqrt(std::norm(H(iter, iter)) + std::norm(H(iter, iter + 1)));
c[iter] = H(iter, iter) / nu;
s[iter] = H(iter, iter + 1) / nu;
// Apply new Givens rotation
H(iter, iter) = nu;
H(iter, iter + 1) = 0.;
gamma[iter + 1] = -s[iter] * gamma[iter];
gamma[iter] = conjugate(c[iter]) * gamma[iter];
QrTimer.Stop();
}
void computeSolution(std::vector<Field> const &v, Field &psi, int iter) {
CompSolutionTimer.Start();
for (int i = iter; i >= 0; i--) {
y[i] = gamma[i];
for (int k = i + 1; k <= iter; k++)
y[i] = y[i] - ComplexD(H(k, i)) * y[k];
y[i] = y[i] / ComplexD(H(i, i));
}
for (int i = 0; i <= iter; i++)
psi = psi + v[i] * y[i];
CompSolutionTimer.Stop();
}
};
}
#endif

View File

@ -0,0 +1,203 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ConjugateGradient.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_CONJUGATE_GRADIENT_H
#define GRID_CONJUGATE_GRADIENT_H
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////
// Base classes for iterative processes based on operators
// single input vec, single output vec.
/////////////////////////////////////////////////////////////
template <class Field>
class ConjugateGradient : public OperatorFunction<Field> {
public:
using OperatorFunction<Field>::operator();
bool ErrorOnNoConverge; // throw an assert when the CG fails to converge.
// Defaults true.
RealD Tolerance;
Integer MaxIterations;
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
RealD TrueResidual;
ConjugateGradient(RealD tol, Integer maxit, bool err_on_no_conv = true)
: Tolerance(tol),
MaxIterations(maxit),
ErrorOnNoConverge(err_on_no_conv){};
void operator()(LinearOperatorBase<Field> &Linop, const Field &src, Field &psi) {
psi.Checkerboard() = src.Checkerboard();
conformable(psi, src);
RealD cp, c, a, d, b, ssq, qq;
//RealD b_pred;
Field p(src);
Field mmp(src);
Field r(src);
// Initial residual computation & set up
RealD guess = norm2(psi);
assert(std::isnan(guess) == 0);
Linop.HermOpAndNorm(psi, mmp, d, b);
r = src - mmp;
p = r;
a = norm2(p);
cp = a;
ssq = norm2(src);
// Handle trivial case of zero src
if (ssq == 0.){
psi = Zero();
IterationsToComplete = 1;
TrueResidual = 0.;
return;
}
std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: guess " << guess << std::endl;
std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: src " << ssq << std::endl;
std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: mp " << d << std::endl;
std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: mmp " << b << std::endl;
std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: cp,r " << cp << std::endl;
std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: p " << a << std::endl;
RealD rsq = Tolerance * Tolerance * ssq;
// Check if guess is really REALLY good :)
if (cp <= rsq) {
TrueResidual = std::sqrt(a/ssq);
std::cout << GridLogMessage << "ConjugateGradient guess is converged already " << std::endl;
IterationsToComplete = 0;
return;
}
std::cout << GridLogIterative << std::setprecision(8)
<< "ConjugateGradient: k=0 residual " << cp << " target " << rsq << std::endl;
GridStopWatch LinalgTimer;
GridStopWatch InnerTimer;
GridStopWatch AxpyNormTimer;
GridStopWatch LinearCombTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
int k;
for (k = 1; k <= MaxIterations; k++) {
c = cp;
MatrixTimer.Start();
Linop.HermOp(p, mmp);
MatrixTimer.Stop();
LinalgTimer.Start();
InnerTimer.Start();
ComplexD dc = innerProduct(p,mmp);
InnerTimer.Stop();
d = dc.real();
a = c / d;
AxpyNormTimer.Start();
cp = axpy_norm(r, -a, mmp, r);
AxpyNormTimer.Stop();
b = cp / c;
LinearCombTimer.Start();
{
autoView( psi_v , psi, AcceleratorWrite);
autoView( p_v , p, AcceleratorWrite);
autoView( r_v , r, AcceleratorWrite);
accelerator_for(ss,p_v.size(), Field::vector_object::Nsimd(),{
coalescedWrite(psi_v[ss], a * p_v(ss) + psi_v(ss));
coalescedWrite(p_v[ss] , b * p_v(ss) + r_v (ss));
});
}
LinearCombTimer.Stop();
LinalgTimer.Stop();
std::cout << GridLogIterative << "ConjugateGradient: Iteration " << k
<< " residual " << sqrt(cp/ssq) << " target " << Tolerance << std::endl;
// Stopping condition
if (cp <= rsq) {
SolverTimer.Stop();
Linop.HermOpAndNorm(psi, mmp, d, qq);
p = mmp - src;
RealD srcnorm = std::sqrt(norm2(src));
RealD resnorm = std::sqrt(norm2(p));
RealD true_residual = resnorm / srcnorm;
std::cout << GridLogMessage << "ConjugateGradient Converged on iteration " << k
<< "\tComputed residual " << std::sqrt(cp / ssq)
<< "\tTrue residual " << true_residual
<< "\tTarget " << Tolerance << std::endl;
std::cout << GridLogIterative << "Time breakdown "<<std::endl;
std::cout << GridLogIterative << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogIterative << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogIterative << "\tLinalg " << LinalgTimer.Elapsed() <<std::endl;
std::cout << GridLogIterative << "\tInner " << InnerTimer.Elapsed() <<std::endl;
std::cout << GridLogIterative << "\tAxpyNorm " << AxpyNormTimer.Elapsed() <<std::endl;
std::cout << GridLogIterative << "\tLinearComb " << LinearCombTimer.Elapsed() <<std::endl;
if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0);
IterationsToComplete = k;
TrueResidual = true_residual;
return;
}
}
// Failed. Calculate true residual before giving up
Linop.HermOpAndNorm(psi, mmp, d, qq);
p = mmp - src;
TrueResidual = sqrt(norm2(p)/ssq);
std::cout << GridLogMessage << "ConjugateGradient did NOT converge "<<k<<" / "<< MaxIterations<< std::endl;
if (ErrorOnNoConverge) assert(0);
IterationsToComplete = k;
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,161 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ConjugateGradientMixedPrec.h
Copyright (C) 2015
Author: Christopher Kelly <ckelly@phys.columbia.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_CONJUGATE_GRADIENT_MIXED_PREC_H
#define GRID_CONJUGATE_GRADIENT_MIXED_PREC_H
NAMESPACE_BEGIN(Grid);
//Mixed precision restarted defect correction CG
template<class FieldD,class FieldF,
typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,
typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
class MixedPrecisionConjugateGradient : public LinearFunction<FieldD> {
public:
RealD Tolerance;
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;
Integer MaxOuterIterations;
GridBase* SinglePrecGrid; //Grid for single-precision fields
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
LinearOperatorBase<FieldF> &Linop_f;
LinearOperatorBase<FieldD> &Linop_d;
Integer TotalInnerIterations; //Number of inner CG iterations
Integer TotalOuterIterations; //Number of restarts
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
//Option to speed up *inner single precision* solves using a LinearFunction that produces a guess
LinearFunction<FieldF> *guesser;
MixedPrecisionConjugateGradient(RealD tol,
Integer maxinnerit,
Integer maxouterit,
GridBase* _sp_grid,
LinearOperatorBase<FieldF> &_Linop_f,
LinearOperatorBase<FieldD> &_Linop_d) :
Linop_f(_Linop_f), Linop_d(_Linop_d),
Tolerance(tol), InnerTolerance(tol), MaxInnerIterations(maxinnerit), MaxOuterIterations(maxouterit), SinglePrecGrid(_sp_grid),
OuterLoopNormMult(100.), guesser(NULL){ };
void useGuesser(LinearFunction<FieldF> &g){
guesser = &g;
}
void operator() (const FieldD &src_d_in, FieldD &sol_d){
TotalInnerIterations = 0;
GridStopWatch TotalTimer;
TotalTimer.Start();
int cb = src_d_in.Checkerboard();
sol_d.Checkerboard() = cb;
RealD src_norm = norm2(src_d_in);
RealD stop = src_norm * Tolerance*Tolerance;
GridBase* DoublePrecGrid = src_d_in.Grid();
FieldD tmp_d(DoublePrecGrid);
tmp_d.Checkerboard() = cb;
FieldD tmp2_d(DoublePrecGrid);
tmp2_d.Checkerboard() = cb;
FieldD src_d(DoublePrecGrid);
src_d = src_d_in; //source for next inner iteration, computed from residual during operation
RealD inner_tol = InnerTolerance;
FieldF src_f(SinglePrecGrid);
src_f.Checkerboard() = cb;
FieldF sol_f(SinglePrecGrid);
sol_f.Checkerboard() = cb;
ConjugateGradient<FieldF> CG_f(inner_tol, MaxInnerIterations);
CG_f.ErrorOnNoConverge = false;
GridStopWatch InnerCGtimer;
GridStopWatch PrecChangeTimer;
Integer &outer_iter = TotalOuterIterations; //so it will be equal to the final iteration count
for(outer_iter = 0; outer_iter < MaxOuterIterations; outer_iter++){
//Compute double precision rsd and also new RHS vector.
Linop_d.HermOp(sol_d, tmp_d);
RealD norm = axpy_norm(src_d, -1., tmp_d, src_d_in); //src_d is residual vector
std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Outer iteration " <<outer_iter<<" residual "<< norm<< " target "<< stop<<std::endl;
if(norm < OuterLoopNormMult * stop){
std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Outer iteration converged on iteration " <<outer_iter <<std::endl;
break;
}
while(norm * inner_tol * inner_tol < stop) inner_tol *= 2; // inner_tol = sqrt(stop/norm) ??
PrecChangeTimer.Start();
precisionChange(src_f, src_d);
PrecChangeTimer.Stop();
sol_f = Zero();
//Optionally improve inner solver guess (eg using known eigenvectors)
if(guesser != NULL)
(*guesser)(src_f, sol_f);
//Inner CG
CG_f.Tolerance = inner_tol;
InnerCGtimer.Start();
CG_f(Linop_f, src_f, sol_f);
InnerCGtimer.Stop();
TotalInnerIterations += CG_f.IterationsToComplete;
//Convert sol back to double and add to double prec solution
PrecChangeTimer.Start();
precisionChange(tmp_d, sol_f);
PrecChangeTimer.Stop();
axpy(sol_d, 1.0, tmp_d, sol_d);
}
//Final trial CG
std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Starting final patch-up double-precision solve"<<std::endl;
ConjugateGradient<FieldD> CG_d(Tolerance, MaxInnerIterations);
CG_d(Linop_d, src_d_in, sol_d);
TotalFinalStepIterations = CG_d.IterationsToComplete;
TotalTimer.Stop();
std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Inner CG iterations " << TotalInnerIterations << " Restarts " << TotalOuterIterations << " Final CG iterations " << TotalFinalStepIterations << std::endl;
std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Total time " << TotalTimer.Elapsed() << " Precision change " << PrecChangeTimer.Elapsed() << " Inner CG total " << InnerCGtimer.Elapsed() << std::endl;
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,342 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ConjugateGradientMultiShift.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_CONJUGATE_MULTI_SHIFT_GRADIENT_H
#define GRID_CONJUGATE_MULTI_SHIFT_GRADIENT_H
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////
// Base classes for iterative processes based on operators
// single input vec, single output vec.
/////////////////////////////////////////////////////////////
template<class Field>
class ConjugateGradientMultiShift : public OperatorMultiFunction<Field>,
public OperatorFunction<Field>
{
public:
using OperatorFunction<Field>::operator();
RealD Tolerance;
Integer MaxIterations;
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
std::vector<int> IterationsToCompleteShift; // Iterations for this shift
int verbose;
MultiShiftFunction shifts;
std::vector<RealD> TrueResidualShift;
ConjugateGradientMultiShift(Integer maxit,MultiShiftFunction &_shifts) :
MaxIterations(maxit),
shifts(_shifts)
{
verbose=1;
IterationsToCompleteShift.resize(_shifts.order);
TrueResidualShift.resize(_shifts.order);
}
void operator() (LinearOperatorBase<Field> &Linop, const Field &src, Field &psi)
{
GridBase *grid = src.Grid();
int nshift = shifts.order;
std::vector<Field> results(nshift,grid);
(*this)(Linop,src,results,psi);
}
void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector<Field> &results, Field &psi)
{
int nshift = shifts.order;
(*this)(Linop,src,results);
psi = shifts.norm*src;
for(int i=0;i<nshift;i++){
psi = psi + shifts.residues[i]*results[i];
}
return;
}
void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector<Field> &psi)
{
GridBase *grid = src.Grid();
////////////////////////////////////////////////////////////////////////
// Convenience references to the info stored in "MultiShiftFunction"
////////////////////////////////////////////////////////////////////////
int nshift = shifts.order;
std::vector<RealD> &mass(shifts.poles); // Make references to array in "shifts"
std::vector<RealD> &mresidual(shifts.tolerances);
std::vector<RealD> alpha(nshift,1.0);
std::vector<Field> ps(nshift,grid);// Search directions
assert(psi.size()==nshift);
assert(mass.size()==nshift);
assert(mresidual.size()==nshift);
// dynamic sized arrays on stack; 2d is a pain with vector
RealD bs[nshift];
RealD rsq[nshift];
RealD z[nshift][2];
int converged[nshift];
const int primary =0;
//Primary shift fields CG iteration
RealD a,b,c,d;
RealD cp,bp,qq; //prev
// Matrix mult fields
Field r(grid);
Field p(grid);
Field tmp(grid);
Field mmp(grid);
// Check lightest mass
for(int s=0;s<nshift;s++){
assert( mass[s]>= mass[primary] );
converged[s]=0;
}
// Wire guess to zero
// Residuals "r" are src
// First search direction "p" is also src
cp = norm2(src);
// Handle trivial case of zero src.
if( cp == 0. ){
for(int s=0;s<nshift;s++){
psi[s] = Zero();
IterationsToCompleteShift[s] = 1;
TrueResidualShift[s] = 0.;
}
return;
}
for(int s=0;s<nshift;s++){
rsq[s] = cp * mresidual[s] * mresidual[s];
std::cout<<GridLogMessage<<"ConjugateGradientMultiShift: shift "<<s
<<" target resid "<<rsq[s]<<std::endl;
ps[s] = src;
}
// r and p for primary
r=src;
p=src;
//MdagM+m[0]
Linop.HermOpAndNorm(p,mmp,d,qq);
axpy(mmp,mass[0],p,mmp);
RealD rn = norm2(p);
d += rn*mass[0];
// have verified that inner product of
// p and mmp is equal to d after this since
// the d computation is tricky
// qq = real(innerProduct(p,mmp));
// std::cout<<GridLogMessage << "debug equal ? qq "<<qq<<" d "<< d<<std::endl;
b = -cp /d;
// Set up the various shift variables
int iz=0;
z[0][1-iz] = 1.0;
z[0][iz] = 1.0;
bs[0] = b;
for(int s=1;s<nshift;s++){
z[s][1-iz] = 1.0;
z[s][iz] = 1.0/( 1.0 - b*(mass[s]-mass[0]));
bs[s] = b*z[s][iz];
}
// r += b[0] A.p[0]
// c= norm(r)
c=axpy_norm(r,b,mmp,r);
for(int s=0;s<nshift;s++) {
axpby(psi[s],0.,-bs[s]*alpha[s],src,src);
}
///////////////////////////////////////
// Timers
///////////////////////////////////////
GridStopWatch AXPYTimer;
GridStopWatch ShiftTimer;
GridStopWatch QRTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
// Iteration loop
int k;
for (k=1;k<=MaxIterations;k++){
a = c /cp;
AXPYTimer.Start();
axpy(p,a,p,r);
AXPYTimer.Stop();
// Note to self - direction ps is iterated seperately
// for each shift. Does not appear to have any scope
// for avoiding linear algebra in "single" case.
//
// However SAME r is used. Could load "r" and update
// ALL ps[s]. 2/3 Bandwidth saving
// New Kernel: Load r, vector of coeffs, vector of pointers ps
AXPYTimer.Start();
for(int s=0;s<nshift;s++){
if ( ! converged[s] ) {
if (s==0){
axpy(ps[s],a,ps[s],r);
} else{
RealD as =a *z[s][iz]*bs[s] /(z[s][1-iz]*b);
axpby(ps[s],z[s][iz],as,r,ps[s]);
}
}
}
AXPYTimer.Stop();
cp=c;
MatrixTimer.Start();
//Linop.HermOpAndNorm(p,mmp,d,qq); // d is used
// The below is faster on KNL
Linop.HermOp(p,mmp);
d=real(innerProduct(p,mmp));
MatrixTimer.Stop();
AXPYTimer.Start();
axpy(mmp,mass[0],p,mmp);
AXPYTimer.Stop();
RealD rn = norm2(p);
d += rn*mass[0];
bp=b;
b=-cp/d;
AXPYTimer.Start();
c=axpy_norm(r,b,mmp,r);
AXPYTimer.Stop();
// Toggle the recurrence history
bs[0] = b;
iz = 1-iz;
ShiftTimer.Start();
for(int s=1;s<nshift;s++){
if((!converged[s])){
RealD z0 = z[s][1-iz];
RealD z1 = z[s][iz];
z[s][iz] = z0*z1*bp
/ (b*a*(z1-z0) + z1*bp*(1- (mass[s]-mass[0])*b));
bs[s] = b*z[s][iz]/z0; // NB sign rel to Mike
}
}
ShiftTimer.Stop();
for(int s=0;s<nshift;s++){
int ss = s;
// Scope for optimisation here in case of "single".
// Could load psi[0] and pull all ps[s] in.
// if ( single ) ss=primary;
// Bandwith saving in single case is Ls * 3 -> 2+Ls, so ~ 3x saving
// Pipelined CG gain:
//
// New Kernel: Load r, vector of coeffs, vector of pointers ps
// New Kernel: Load psi[0], vector of coeffs, vector of pointers ps
// If can predict the coefficient bs then we can fuse these and avoid write reread cyce
// on ps[s].
// Before: 3 x npole + 3 x npole
// After : 2 x npole (ps[s]) => 3x speed up of multishift CG.
if( (!converged[s]) ) {
axpy(psi[ss],-bs[s]*alpha[s],ps[s],psi[ss]);
}
}
// Convergence checks
int all_converged = 1;
for(int s=0;s<nshift;s++){
if ( (!converged[s]) ){
IterationsToCompleteShift[s] = k;
RealD css = c * z[s][iz]* z[s][iz];
if(css<rsq[s]){
if ( ! converged[s] )
std::cout<<GridLogMessage<<"ConjugateGradientMultiShift k="<<k<<" Shift "<<s<<" has converged"<<std::endl;
converged[s]=1;
} else {
all_converged=0;
}
}
}
if ( all_converged ){
SolverTimer.Stop();
std::cout<<GridLogMessage<< "CGMultiShift: All shifts have converged iteration "<<k<<std::endl;
std::cout<<GridLogMessage<< "CGMultiShift: Checking solutions"<<std::endl;
// Check answers
for(int s=0; s < nshift; s++) {
Linop.HermOpAndNorm(psi[s],mmp,d,qq);
axpy(tmp,mass[s],psi[s],mmp);
axpy(r,-alpha[s],src,tmp);
RealD rn = norm2(r);
RealD cn = norm2(src);
TrueResidualShift[s] = std::sqrt(rn/cn);
std::cout<<GridLogMessage<<"CGMultiShift: shift["<<s<<"] true residual "<< TrueResidualShift[s] <<std::endl;
}
std::cout << GridLogMessage << "Time Breakdown "<<std::endl;
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tAXPY " << AXPYTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMarix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tShift " << ShiftTimer.Elapsed() <<std::endl;
IterationsToComplete = k;
return;
}
}
// ugly hack
std::cout<<GridLogMessage<<"CG multi shift did not converge"<<std::endl;
// assert(0);
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,258 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ConjugateGradientReliableUpdate.h
Copyright (C) 2015
Author: Christopher Kelly <ckelly@phys.columbia.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_CONJUGATE_GRADIENT_RELIABLE_UPDATE_H
#define GRID_CONJUGATE_GRADIENT_RELIABLE_UPDATE_H
NAMESPACE_BEGIN(Grid);
template<class FieldD,class FieldF,
typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,
typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
class ConjugateGradientReliableUpdate : public LinearFunction<FieldD> {
public:
bool ErrorOnNoConverge; // throw an assert when the CG fails to converge.
// Defaults true.
RealD Tolerance;
Integer MaxIterations;
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
Integer ReliableUpdatesPerformed;
bool DoFinalCleanup; //Final DP cleanup, defaults to true
Integer IterationsToCleanup; //Final DP cleanup step iterations
LinearOperatorBase<FieldF> &Linop_f;
LinearOperatorBase<FieldD> &Linop_d;
GridBase* SinglePrecGrid;
RealD Delta; //reliable update parameter
//Optional ability to switch to a different linear operator once the tolerance reaches a certain point. Useful for single/half -> single/single
LinearOperatorBase<FieldF> *Linop_fallback;
RealD fallback_transition_tol;
ConjugateGradientReliableUpdate(RealD tol, Integer maxit, RealD _delta, GridBase* _sp_grid, LinearOperatorBase<FieldF> &_Linop_f, LinearOperatorBase<FieldD> &_Linop_d, bool err_on_no_conv = true)
: Tolerance(tol),
MaxIterations(maxit),
Delta(_delta),
Linop_f(_Linop_f),
Linop_d(_Linop_d),
SinglePrecGrid(_sp_grid),
ErrorOnNoConverge(err_on_no_conv),
DoFinalCleanup(true),
Linop_fallback(NULL)
{};
void setFallbackLinop(LinearOperatorBase<FieldF> &_Linop_fallback, const RealD _fallback_transition_tol){
Linop_fallback = &_Linop_fallback;
fallback_transition_tol = _fallback_transition_tol;
}
void operator()(const FieldD &src, FieldD &psi) {
LinearOperatorBase<FieldF> *Linop_f_use = &Linop_f;
bool using_fallback = false;
psi.Checkerboard() = src.Checkerboard();
conformable(psi, src);
RealD cp, c, a, d, b, ssq, qq, b_pred;
FieldD p(src);
FieldD mmp(src);
FieldD r(src);
// Initial residual computation & set up
RealD guess = norm2(psi);
assert(std::isnan(guess) == 0);
Linop_d.HermOpAndNorm(psi, mmp, d, b);
r = src - mmp;
p = r;
a = norm2(p);
cp = a;
ssq = norm2(src);
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: guess " << guess << std::endl;
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: src " << ssq << std::endl;
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: mp " << d << std::endl;
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: mmp " << b << std::endl;
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: cp,r " << cp << std::endl;
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: p " << a << std::endl;
RealD rsq = Tolerance * Tolerance * ssq;
// Check if guess is really REALLY good :)
if (cp <= rsq) {
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate guess was REALLY good\n";
std::cout << GridLogMessage << "\tComputed residual " << std::sqrt(cp / ssq)<<std::endl;
return;
}
//Single prec initialization
FieldF r_f(SinglePrecGrid);
r_f.Checkerboard() = r.Checkerboard();
precisionChange(r_f, r);
FieldF psi_f(r_f);
psi_f = Zero();
FieldF p_f(r_f);
FieldF mmp_f(r_f);
RealD MaxResidSinceLastRelUp = cp; //initial residual
std::cout << GridLogIterative << std::setprecision(4)
<< "ConjugateGradient: k=0 residual " << cp << " target " << rsq << std::endl;
GridStopWatch LinalgTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
int k = 0;
int l = 0;
for (k = 1; k <= MaxIterations; k++) {
c = cp;
MatrixTimer.Start();
Linop_f_use->HermOpAndNorm(p_f, mmp_f, d, qq);
MatrixTimer.Stop();
LinalgTimer.Start();
a = c / d;
b_pred = a * (a * qq - d) / c;
cp = axpy_norm(r_f, -a, mmp_f, r_f);
b = cp / c;
// Fuse these loops ; should be really easy
psi_f = a * p_f + psi_f;
//p_f = p_f * b + r_f;
LinalgTimer.Stop();
std::cout << GridLogIterative << "ConjugateGradientReliableUpdate: Iteration " << k
<< " residual " << cp << " target " << rsq << std::endl;
std::cout << GridLogDebug << "a = "<< a << " b_pred = "<< b_pred << " b = "<< b << std::endl;
std::cout << GridLogDebug << "qq = "<< qq << " d = "<< d << " c = "<< c << std::endl;
if(cp > MaxResidSinceLastRelUp){
std::cout << GridLogIterative << "ConjugateGradientReliableUpdate: updating MaxResidSinceLastRelUp : " << MaxResidSinceLastRelUp << " -> " << cp << std::endl;
MaxResidSinceLastRelUp = cp;
}
// Stopping condition
if (cp <= rsq) {
//Although not written in the paper, I assume that I have to add on the final solution
precisionChange(mmp, psi_f);
psi = psi + mmp;
SolverTimer.Stop();
Linop_d.HermOpAndNorm(psi, mmp, d, qq);
p = mmp - src;
RealD srcnorm = std::sqrt(norm2(src));
RealD resnorm = std::sqrt(norm2(p));
RealD true_residual = resnorm / srcnorm;
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate Converged on iteration " << k << " after " << l << " reliable updates" << std::endl;
std::cout << GridLogMessage << "\tComputed residual " << std::sqrt(cp / ssq)<<std::endl;
std::cout << GridLogMessage << "\tTrue residual " << true_residual<<std::endl;
std::cout << GridLogMessage << "\tTarget " << Tolerance << std::endl;
std::cout << GridLogMessage << "Time breakdown "<<std::endl;
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tLinalg " << LinalgTimer.Elapsed() <<std::endl;
IterationsToComplete = k;
ReliableUpdatesPerformed = l;
if(DoFinalCleanup){
//Do a final CG to cleanup
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate performing final cleanup.\n";
ConjugateGradient<FieldD> CG(Tolerance,MaxIterations);
CG.ErrorOnNoConverge = ErrorOnNoConverge;
CG(Linop_d,src,psi);
IterationsToCleanup = CG.IterationsToComplete;
}
else if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0);
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate complete.\n";
return;
}
else if(cp < Delta * MaxResidSinceLastRelUp) { //reliable update
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate "
<< cp << "(residual) < " << Delta << "(Delta) * " << MaxResidSinceLastRelUp << "(MaxResidSinceLastRelUp) on iteration " << k << " : performing reliable update\n";
precisionChange(mmp, psi_f);
psi = psi + mmp;
Linop_d.HermOpAndNorm(psi, mmp, d, qq);
r = src - mmp;
psi_f = Zero();
precisionChange(r_f, r);
cp = norm2(r);
MaxResidSinceLastRelUp = cp;
b = cp/c;
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate new residual " << cp << std::endl;
l = l+1;
}
p_f = p_f * b + r_f; //update search vector after reliable update appears to help convergence
if(!using_fallback && Linop_fallback != NULL && cp < fallback_transition_tol){
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate switching to fallback linear operator on iteration " << k << " at residual " << cp << std::endl;
Linop_f_use = Linop_fallback;
using_fallback = true;
}
}
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate did NOT converge"
<< std::endl;
if (ErrorOnNoConverge) assert(0);
IterationsToComplete = k;
ReliableUpdatesPerformed = l;
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,113 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ConjugateResidual.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_CONJUGATE_RESIDUAL_H
#define GRID_CONJUGATE_RESIDUAL_H
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////
// Base classes for iterative processes based on operators
// single input vec, single output vec.
/////////////////////////////////////////////////////////////
template<class Field>
class ConjugateResidual : public OperatorFunction<Field> {
public:
using OperatorFunction<Field>::operator();
RealD Tolerance;
Integer MaxIterations;
int verbose;
ConjugateResidual(RealD tol,Integer maxit) : Tolerance(tol), MaxIterations(maxit) {
verbose=0;
};
void operator() (LinearOperatorBase<Field> &Linop,const Field &src, Field &psi){
RealD a, b; // c, d;
RealD cp, ssq,rsq;
RealD rAr, rAAr, rArp;
RealD pAp, pAAp;
GridBase *grid = src.Grid();
psi=Zero();
Field r(grid), p(grid), Ap(grid), Ar(grid);
r=src;
p=src;
Linop.HermOpAndNorm(p,Ap,pAp,pAAp);
Linop.HermOpAndNorm(r,Ar,rAr,rAAr);
cp =norm2(r);
ssq=norm2(src);
rsq=Tolerance*Tolerance*ssq;
if (verbose) std::cout<<GridLogMessage<<"ConjugateResidual: iteration " <<0<<" residual "<<cp<< " target"<< rsq<<std::endl;
for(int k=1;k<MaxIterations;k++){
a = rAr/pAAp;
axpy(psi,a,p,psi);
cp = axpy_norm(r,-a,Ap,r);
rArp=rAr;
Linop.HermOpAndNorm(r,Ar,rAr,rAAr);
b =rAr/rArp;
axpy(p,b,p,r);
pAAp=axpy_norm(Ap,b,Ap,Ar);
if(verbose) std::cout<<GridLogMessage<<"ConjugateResidual: iteration " <<k<<" residual "<<cp<< " target"<< rsq<<std::endl;
if(cp<rsq) {
Linop.HermOp(psi,Ap);
axpy(r,-1.0,src,Ap);
RealD true_resid = norm2(r)/ssq;
std::cout<<GridLogMessage<<"ConjugateResidual: Converged on iteration " <<k
<< " computed residual "<<std::sqrt(cp/ssq)
<< " true residual "<<std::sqrt(true_resid)
<< " target " <<Tolerance <<std::endl;
return;
}
}
std::cout<<GridLogMessage<<"ConjugateResidual did NOT converge"<<std::endl;
assert(0);
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,108 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_DEFLATION_H
#define GRID_DEFLATION_H
namespace Grid {
template<class Field>
class ZeroGuesser: public LinearFunction<Field> {
public:
virtual void operator()(const Field &src, Field &guess) { guess = Zero(); };
};
template<class Field>
class DoNothingGuesser: public LinearFunction<Field> {
public:
virtual void operator()(const Field &src, Field &guess) { };
};
template<class Field>
class SourceGuesser: public LinearFunction<Field> {
public:
virtual void operator()(const Field &src, Field &guess) { guess = src; };
};
////////////////////////////////
// Fine grid deflation
////////////////////////////////
template<class Field>
class DeflatedGuesser: public LinearFunction<Field> {
private:
const std::vector<Field> &evec;
const std::vector<RealD> &eval;
public:
DeflatedGuesser(const std::vector<Field> & _evec,const std::vector<RealD> & _eval) : evec(_evec), eval(_eval) {};
virtual void operator()(const Field &src,Field &guess) {
guess = Zero();
assert(evec.size()==eval.size());
auto N = evec.size();
for (int i=0;i<N;i++) {
const Field& tmp = evec[i];
axpy(guess,TensorRemove(innerProduct(tmp,src)) / eval[i],tmp,guess);
}
guess.Checkerboard() = src.Checkerboard();
}
};
template<class FineField, class CoarseField>
class LocalCoherenceDeflatedGuesser: public LinearFunction<FineField> {
private:
const std::vector<FineField> &subspace;
const std::vector<CoarseField> &evec_coarse;
const std::vector<RealD> &eval_coarse;
public:
LocalCoherenceDeflatedGuesser(const std::vector<FineField> &_subspace,
const std::vector<CoarseField> &_evec_coarse,
const std::vector<RealD> &_eval_coarse)
: subspace(_subspace),
evec_coarse(_evec_coarse),
eval_coarse(_eval_coarse)
{
}
void operator()(const FineField &src,FineField &guess) {
int N = (int)evec_coarse.size();
CoarseField src_coarse(evec_coarse[0].Grid());
CoarseField guess_coarse(evec_coarse[0].Grid()); guess_coarse = Zero();
blockProject(src_coarse,src,subspace);
for (int i=0;i<N;i++) {
const CoarseField & tmp = evec_coarse[i];
axpy(guess_coarse,TensorRemove(innerProduct(tmp,src_coarse)) / eval_coarse[i],tmp,guess_coarse);
}
blockPromote(guess_coarse,guess,subspace);
guess.Checkerboard() = src.Checkerboard();
};
};
}
#endif

View File

@ -0,0 +1,258 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/FlexibleCommunicationAvoidingGeneralisedMinimalResidual.h
Copyright (C) 2015
Author: Daniel Richtmann <daniel.richtmann@ur.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_FLEXIBLE_COMMUNICATION_AVOIDING_GENERALISED_MINIMAL_RESIDUAL_H
#define GRID_FLEXIBLE_COMMUNICATION_AVOIDING_GENERALISED_MINIMAL_RESIDUAL_H
namespace Grid {
template<class Field>
class FlexibleCommunicationAvoidingGeneralisedMinimalResidual : public OperatorFunction<Field> {
public:
using OperatorFunction<Field>::operator();
bool ErrorOnNoConverge; // Throw an assert when FCAGMRES fails to converge,
// defaults to true
RealD Tolerance;
Integer MaxIterations;
Integer RestartLength;
Integer MaxNumberOfRestarts;
Integer IterationCount; // Number of iterations the FCAGMRES took to finish,
// filled in upon completion
GridStopWatch MatrixTimer;
GridStopWatch PrecTimer;
GridStopWatch LinalgTimer;
GridStopWatch QrTimer;
GridStopWatch CompSolutionTimer;
Eigen::MatrixXcd H;
std::vector<ComplexD> y;
std::vector<ComplexD> gamma;
std::vector<ComplexD> c;
std::vector<ComplexD> s;
LinearFunction<Field> &Preconditioner;
FlexibleCommunicationAvoidingGeneralisedMinimalResidual(RealD tol,
Integer maxit,
LinearFunction<Field> &Prec,
Integer restart_length,
bool err_on_no_conv = true)
: Tolerance(tol)
, MaxIterations(maxit)
, RestartLength(restart_length)
, MaxNumberOfRestarts(MaxIterations/RestartLength + ((MaxIterations%RestartLength == 0) ? 0 : 1))
, ErrorOnNoConverge(err_on_no_conv)
, H(Eigen::MatrixXcd::Zero(RestartLength, RestartLength + 1)) // sizes taken from DD-αAMG code base
, y(RestartLength + 1, 0.)
, gamma(RestartLength + 1, 0.)
, c(RestartLength + 1, 0.)
, s(RestartLength + 1, 0.)
, Preconditioner(Prec) {};
void operator()(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi) {
std::cout << GridLogWarning << "This algorithm currently doesn't differ from regular FGMRES" << std::endl;
psi.Checkerboard() = src.Checkerboard();
conformable(psi, src);
RealD guess = norm2(psi);
assert(std::isnan(guess) == 0);
RealD cp;
RealD ssq = norm2(src);
RealD rsq = Tolerance * Tolerance * ssq;
Field r(src.Grid());
std::cout << std::setprecision(4) << std::scientific;
std::cout << GridLogIterative << "FlexibleCommunicationAvoidingGeneralisedMinimalResidual: guess " << guess << std::endl;
std::cout << GridLogIterative << "FlexibleCommunicationAvoidingGeneralisedMinimalResidual: src " << ssq << std::endl;
PrecTimer.Reset();
MatrixTimer.Reset();
LinalgTimer.Reset();
QrTimer.Reset();
CompSolutionTimer.Reset();
GridStopWatch SolverTimer;
SolverTimer.Start();
IterationCount = 0;
for (int k=0; k<MaxNumberOfRestarts; k++) {
cp = outerLoopBody(LinOp, src, psi, rsq);
// Stopping condition
if (cp <= rsq) {
SolverTimer.Stop();
LinOp.Op(psi,r);
axpy(r,-1.0,src,r);
RealD srcnorm = sqrt(ssq);
RealD resnorm = sqrt(norm2(r));
RealD true_residual = resnorm / srcnorm;
std::cout << GridLogMessage << "FlexibleCommunicationAvoidingGeneralisedMinimalResidual: Converged on iteration " << IterationCount
<< " computed residual " << sqrt(cp / ssq)
<< " true residual " << true_residual
<< " target " << Tolerance << std::endl;
std::cout << GridLogMessage << "FCAGMRES Time elapsed: Total " << SolverTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "FCAGMRES Time elapsed: Precon " << PrecTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "FCAGMRES Time elapsed: Matrix " << MatrixTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "FCAGMRES Time elapsed: Linalg " << LinalgTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "FCAGMRES Time elapsed: QR " << QrTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "FCAGMRES Time elapsed: CompSol " << CompSolutionTimer.Elapsed() << std::endl;
return;
}
}
std::cout << GridLogMessage << "FlexibleCommunicationAvoidingGeneralisedMinimalResidual did NOT converge" << std::endl;
if (ErrorOnNoConverge)
assert(0);
}
RealD outerLoopBody(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi, RealD rsq) {
RealD cp = 0;
Field w(src.Grid());
Field r(src.Grid());
// these should probably be made class members so that they are only allocated once, not in every restart
std::vector<Field> v(RestartLength + 1, src.Grid()); for (auto &elem : v) elem = Zero();
std::vector<Field> z(RestartLength + 1, src.Grid()); for (auto &elem : z) elem = Zero();
MatrixTimer.Start();
LinOp.Op(psi, w);
MatrixTimer.Stop();
LinalgTimer.Start();
r = src - w;
gamma[0] = sqrt(norm2(r));
v[0] = (1. / gamma[0]) * r;
LinalgTimer.Stop();
for (int i=0; i<RestartLength; i++) {
IterationCount++;
arnoldiStep(LinOp, v, z, w, i);
qrUpdate(i);
cp = norm(gamma[i+1]);
std::cout << GridLogIterative << "FlexibleCommunicationAvoidingGeneralisedMinimalResidual: Iteration " << IterationCount
<< " residual " << cp << " target " << rsq << std::endl;
if ((i == RestartLength - 1) || (IterationCount == MaxIterations) || (cp <= rsq)) {
computeSolution(z, psi, i);
return cp;
}
}
assert(0); // Never reached
return cp;
}
void arnoldiStep(LinearOperatorBase<Field> &LinOp, std::vector<Field> &v, std::vector<Field> &z, Field &w, int iter) {
PrecTimer.Start();
Preconditioner(v[iter], z[iter]);
PrecTimer.Stop();
MatrixTimer.Start();
LinOp.Op(z[iter], w);
MatrixTimer.Stop();
LinalgTimer.Start();
for (int i = 0; i <= iter; ++i) {
H(iter, i) = innerProduct(v[i], w);
w = w - ComplexD(H(iter, i)) * v[i];
}
H(iter, iter + 1) = sqrt(norm2(w));
v[iter + 1] = ComplexD(1. / H(iter, iter + 1)) * w;
LinalgTimer.Stop();
}
void qrUpdate(int iter) {
QrTimer.Start();
for (int i = 0; i < iter ; ++i) {
auto tmp = -s[i] * ComplexD(H(iter, i)) + c[i] * ComplexD(H(iter, i + 1));
H(iter, i) = conjugate(c[i]) * ComplexD(H(iter, i)) + conjugate(s[i]) * ComplexD(H(iter, i + 1));
H(iter, i + 1) = tmp;
}
// Compute new Givens Rotation
auto nu = sqrt(std::norm(H(iter, iter)) + std::norm(H(iter, iter + 1)));
c[iter] = H(iter, iter) / nu;
s[iter] = H(iter, iter + 1) / nu;
// Apply new Givens rotation
H(iter, iter) = nu;
H(iter, iter + 1) = 0.;
gamma[iter + 1] = -s[iter] * gamma[iter];
gamma[iter] = conjugate(c[iter]) * gamma[iter];
QrTimer.Stop();
}
void computeSolution(std::vector<Field> const &z, Field &psi, int iter) {
CompSolutionTimer.Start();
for (int i = iter; i >= 0; i--) {
y[i] = gamma[i];
for (int k = i + 1; k <= iter; k++)
y[i] = y[i] - ComplexD(H(k, i)) * y[k];
y[i] = y[i] / ComplexD(H(i, i));
}
for (int i = 0; i <= iter; i++)
psi = psi + z[i] * y[i];
CompSolutionTimer.Stop();
}
};
}
#endif

View File

@ -0,0 +1,256 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/FlexibleGeneralisedMinimalResidual.h
Copyright (C) 2015
Author: Daniel Richtmann <daniel.richtmann@ur.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_FLEXIBLE_GENERALISED_MINIMAL_RESIDUAL_H
#define GRID_FLEXIBLE_GENERALISED_MINIMAL_RESIDUAL_H
namespace Grid {
template<class Field>
class FlexibleGeneralisedMinimalResidual : public OperatorFunction<Field> {
public:
using OperatorFunction<Field>::operator();
bool ErrorOnNoConverge; // Throw an assert when FGMRES fails to converge,
// defaults to true
RealD Tolerance;
Integer MaxIterations;
Integer RestartLength;
Integer MaxNumberOfRestarts;
Integer IterationCount; // Number of iterations the FGMRES took to finish,
// filled in upon completion
GridStopWatch MatrixTimer;
GridStopWatch PrecTimer;
GridStopWatch LinalgTimer;
GridStopWatch QrTimer;
GridStopWatch CompSolutionTimer;
Eigen::MatrixXcd H;
std::vector<ComplexD> y;
std::vector<ComplexD> gamma;
std::vector<ComplexD> c;
std::vector<ComplexD> s;
LinearFunction<Field> &Preconditioner;
FlexibleGeneralisedMinimalResidual(RealD tol,
Integer maxit,
LinearFunction<Field> &Prec,
Integer restart_length,
bool err_on_no_conv = true)
: Tolerance(tol)
, MaxIterations(maxit)
, RestartLength(restart_length)
, MaxNumberOfRestarts(MaxIterations/RestartLength + ((MaxIterations%RestartLength == 0) ? 0 : 1))
, ErrorOnNoConverge(err_on_no_conv)
, H(Eigen::MatrixXcd::Zero(RestartLength, RestartLength + 1)) // sizes taken from DD-αAMG code base
, y(RestartLength + 1, 0.)
, gamma(RestartLength + 1, 0.)
, c(RestartLength + 1, 0.)
, s(RestartLength + 1, 0.)
, Preconditioner(Prec) {};
void operator()(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi) {
psi.Checkerboard() = src.Checkerboard();
conformable(psi, src);
RealD guess = norm2(psi);
assert(std::isnan(guess) == 0);
RealD cp;
RealD ssq = norm2(src);
RealD rsq = Tolerance * Tolerance * ssq;
Field r(src.Grid());
std::cout << std::setprecision(4) << std::scientific;
std::cout << GridLogIterative << "FlexibleGeneralisedMinimalResidual: guess " << guess << std::endl;
std::cout << GridLogIterative << "FlexibleGeneralisedMinimalResidual: src " << ssq << std::endl;
PrecTimer.Reset();
MatrixTimer.Reset();
LinalgTimer.Reset();
QrTimer.Reset();
CompSolutionTimer.Reset();
GridStopWatch SolverTimer;
SolverTimer.Start();
IterationCount = 0;
for (int k=0; k<MaxNumberOfRestarts; k++) {
cp = outerLoopBody(LinOp, src, psi, rsq);
// Stopping condition
if (cp <= rsq) {
SolverTimer.Stop();
LinOp.Op(psi,r);
axpy(r,-1.0,src,r);
RealD srcnorm = sqrt(ssq);
RealD resnorm = sqrt(norm2(r));
RealD true_residual = resnorm / srcnorm;
std::cout << GridLogMessage << "FlexibleGeneralisedMinimalResidual: Converged on iteration " << IterationCount
<< " computed residual " << sqrt(cp / ssq)
<< " true residual " << true_residual
<< " target " << Tolerance << std::endl;
std::cout << GridLogMessage << "FGMRES Time elapsed: Total " << SolverTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "FGMRES Time elapsed: Precon " << PrecTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "FGMRES Time elapsed: Matrix " << MatrixTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "FGMRES Time elapsed: Linalg " << LinalgTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "FGMRES Time elapsed: QR " << QrTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "FGMRES Time elapsed: CompSol " << CompSolutionTimer.Elapsed() << std::endl;
return;
}
}
std::cout << GridLogMessage << "FlexibleGeneralisedMinimalResidual did NOT converge" << std::endl;
if (ErrorOnNoConverge)
assert(0);
}
RealD outerLoopBody(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi, RealD rsq) {
RealD cp = 0;
Field w(src.Grid());
Field r(src.Grid());
// these should probably be made class members so that they are only allocated once, not in every restart
std::vector<Field> v(RestartLength + 1, src.Grid()); for (auto &elem : v) elem = Zero();
std::vector<Field> z(RestartLength + 1, src.Grid()); for (auto &elem : z) elem = Zero();
MatrixTimer.Start();
LinOp.Op(psi, w);
MatrixTimer.Stop();
LinalgTimer.Start();
r = src - w;
gamma[0] = sqrt(norm2(r));
v[0] = (1. / gamma[0]) * r;
LinalgTimer.Stop();
for (int i=0; i<RestartLength; i++) {
IterationCount++;
arnoldiStep(LinOp, v, z, w, i);
qrUpdate(i);
cp = norm(gamma[i+1]);
std::cout << GridLogIterative << "FlexibleGeneralisedMinimalResidual: Iteration " << IterationCount
<< " residual " << cp << " target " << rsq << std::endl;
if ((i == RestartLength - 1) || (IterationCount == MaxIterations) || (cp <= rsq)) {
computeSolution(z, psi, i);
return cp;
}
}
assert(0); // Never reached
return cp;
}
void arnoldiStep(LinearOperatorBase<Field> &LinOp, std::vector<Field> &v, std::vector<Field> &z, Field &w, int iter) {
PrecTimer.Start();
Preconditioner(v[iter], z[iter]);
PrecTimer.Stop();
MatrixTimer.Start();
LinOp.Op(z[iter], w);
MatrixTimer.Stop();
LinalgTimer.Start();
for (int i = 0; i <= iter; ++i) {
H(iter, i) = innerProduct(v[i], w);
w = w - ComplexD(H(iter, i)) * v[i];
}
H(iter, iter + 1) = sqrt(norm2(w));
v[iter + 1] = ComplexD(1. / H(iter, iter + 1)) * w;
LinalgTimer.Stop();
}
void qrUpdate(int iter) {
QrTimer.Start();
for (int i = 0; i < iter ; ++i) {
auto tmp = -s[i] * ComplexD(H(iter, i)) + c[i] * ComplexD(H(iter, i + 1));
H(iter, i) = conjugate(c[i]) * ComplexD(H(iter, i)) + conjugate(s[i]) * ComplexD(H(iter, i + 1));
H(iter, i + 1) = tmp;
}
// Compute new Givens Rotation
auto nu = sqrt(std::norm(H(iter, iter)) + std::norm(H(iter, iter + 1)));
c[iter] = H(iter, iter) / nu;
s[iter] = H(iter, iter + 1) / nu;
// Apply new Givens rotation
H(iter, iter) = nu;
H(iter, iter + 1) = 0.;
gamma[iter + 1] = -s[iter] * gamma[iter];
gamma[iter] = conjugate(c[iter]) * gamma[iter];
QrTimer.Stop();
}
void computeSolution(std::vector<Field> const &z, Field &psi, int iter) {
CompSolutionTimer.Start();
for (int i = iter; i >= 0; i--) {
y[i] = gamma[i];
for (int k = i + 1; k <= iter; k++)
y[i] = y[i] - ComplexD(H(k, i)) * y[k];
y[i] = y[i] / ComplexD(H(i, i));
}
for (int i = 0; i <= iter; i++)
psi = psi + z[i] * y[i];
CompSolutionTimer.Stop();
}
};
}
#endif

View File

@ -0,0 +1,244 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/GeneralisedMinimalResidual.h
Copyright (C) 2015
Author: Daniel Richtmann <daniel.richtmann@ur.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_GENERALISED_MINIMAL_RESIDUAL_H
#define GRID_GENERALISED_MINIMAL_RESIDUAL_H
namespace Grid {
template<class Field>
class GeneralisedMinimalResidual : public OperatorFunction<Field> {
public:
using OperatorFunction<Field>::operator();
bool ErrorOnNoConverge; // Throw an assert when GMRES fails to converge,
// defaults to true
RealD Tolerance;
Integer MaxIterations;
Integer RestartLength;
Integer MaxNumberOfRestarts;
Integer IterationCount; // Number of iterations the GMRES took to finish,
// filled in upon completion
GridStopWatch MatrixTimer;
GridStopWatch LinalgTimer;
GridStopWatch QrTimer;
GridStopWatch CompSolutionTimer;
Eigen::MatrixXcd H;
std::vector<ComplexD> y;
std::vector<ComplexD> gamma;
std::vector<ComplexD> c;
std::vector<ComplexD> s;
GeneralisedMinimalResidual(RealD tol,
Integer maxit,
Integer restart_length,
bool err_on_no_conv = true)
: Tolerance(tol)
, MaxIterations(maxit)
, RestartLength(restart_length)
, MaxNumberOfRestarts(MaxIterations/RestartLength + ((MaxIterations%RestartLength == 0) ? 0 : 1))
, ErrorOnNoConverge(err_on_no_conv)
, H(Eigen::MatrixXcd::Zero(RestartLength, RestartLength + 1)) // sizes taken from DD-αAMG code base
, y(RestartLength + 1, 0.)
, gamma(RestartLength + 1, 0.)
, c(RestartLength + 1, 0.)
, s(RestartLength + 1, 0.) {};
void operator()(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi) {
psi.Checkerboard() = src.Checkerboard();
conformable(psi, src);
RealD guess = norm2(psi);
assert(std::isnan(guess) == 0);
RealD cp;
RealD ssq = norm2(src);
RealD rsq = Tolerance * Tolerance * ssq;
Field r(src.Grid());
std::cout << std::setprecision(4) << std::scientific;
std::cout << GridLogIterative << "GeneralisedMinimalResidual: guess " << guess << std::endl;
std::cout << GridLogIterative << "GeneralisedMinimalResidual: src " << ssq << std::endl;
MatrixTimer.Reset();
LinalgTimer.Reset();
QrTimer.Reset();
CompSolutionTimer.Reset();
GridStopWatch SolverTimer;
SolverTimer.Start();
IterationCount = 0;
for (int k=0; k<MaxNumberOfRestarts; k++) {
cp = outerLoopBody(LinOp, src, psi, rsq);
// Stopping condition
if (cp <= rsq) {
SolverTimer.Stop();
LinOp.Op(psi,r);
axpy(r,-1.0,src,r);
RealD srcnorm = sqrt(ssq);
RealD resnorm = sqrt(norm2(r));
RealD true_residual = resnorm / srcnorm;
std::cout << GridLogMessage << "GeneralisedMinimalResidual: Converged on iteration " << IterationCount
<< " computed residual " << sqrt(cp / ssq)
<< " true residual " << true_residual
<< " target " << Tolerance << std::endl;
std::cout << GridLogMessage << "GMRES Time elapsed: Total " << SolverTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "GMRES Time elapsed: Matrix " << MatrixTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "GMRES Time elapsed: Linalg " << LinalgTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "GMRES Time elapsed: QR " << QrTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "GMRES Time elapsed: CompSol " << CompSolutionTimer.Elapsed() << std::endl;
return;
}
}
std::cout << GridLogMessage << "GeneralisedMinimalResidual did NOT converge" << std::endl;
if (ErrorOnNoConverge)
assert(0);
}
RealD outerLoopBody(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi, RealD rsq) {
RealD cp = 0;
Field w(src.Grid());
Field r(src.Grid());
// this should probably be made a class member so that it is only allocated once, not in every restart
std::vector<Field> v(RestartLength + 1, src.Grid()); for (auto &elem : v) elem = Zero();
MatrixTimer.Start();
LinOp.Op(psi, w);
MatrixTimer.Stop();
LinalgTimer.Start();
r = src - w;
gamma[0] = sqrt(norm2(r));
v[0] = (1. / gamma[0]) * r;
LinalgTimer.Stop();
for (int i=0; i<RestartLength; i++) {
IterationCount++;
arnoldiStep(LinOp, v, w, i);
qrUpdate(i);
cp = norm(gamma[i+1]);
std::cout << GridLogIterative << "GeneralisedMinimalResidual: Iteration " << IterationCount
<< " residual " << cp << " target " << rsq << std::endl;
if ((i == RestartLength - 1) || (IterationCount == MaxIterations) || (cp <= rsq)) {
computeSolution(v, psi, i);
return cp;
}
}
assert(0); // Never reached
return cp;
}
void arnoldiStep(LinearOperatorBase<Field> &LinOp, std::vector<Field> &v, Field &w, int iter) {
MatrixTimer.Start();
LinOp.Op(v[iter], w);
MatrixTimer.Stop();
LinalgTimer.Start();
for (int i = 0; i <= iter; ++i) {
H(iter, i) = innerProduct(v[i], w);
w = w - ComplexD(H(iter, i)) * v[i];
}
H(iter, iter + 1) = sqrt(norm2(w));
v[iter + 1] = ComplexD(1. / H(iter, iter + 1)) * w;
LinalgTimer.Stop();
}
void qrUpdate(int iter) {
QrTimer.Start();
for (int i = 0; i < iter ; ++i) {
auto tmp = -s[i] * ComplexD(H(iter, i)) + c[i] * ComplexD(H(iter, i + 1));
H(iter, i) = conjugate(c[i]) * ComplexD(H(iter, i)) + conjugate(s[i]) * ComplexD(H(iter, i + 1));
H(iter, i + 1) = tmp;
}
// Compute new Givens Rotation
auto nu = sqrt(std::norm(H(iter, iter)) + std::norm(H(iter, iter + 1)));
c[iter] = H(iter, iter) / nu;
s[iter] = H(iter, iter + 1) / nu;
// Apply new Givens rotation
H(iter, iter) = nu;
H(iter, iter + 1) = 0.;
gamma[iter + 1] = -s[iter] * gamma[iter];
gamma[iter] = conjugate(c[iter]) * gamma[iter];
QrTimer.Stop();
}
void computeSolution(std::vector<Field> const &v, Field &psi, int iter) {
CompSolutionTimer.Start();
for (int i = iter; i >= 0; i--) {
y[i] = gamma[i];
for (int k = i + 1; k <= iter; k++)
y[i] = y[i] - ComplexD(H(k, i)) * y[k];
y[i] = y[i] / ComplexD(H(i, i));
}
for (int i = 0; i <= iter; i++)
psi = psi + v[i] * y[i];
CompSolutionTimer.Stop();
}
};
}
#endif

View File

@ -0,0 +1,736 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
Author: Chulwoo Jung <chulwoo@bnl.gov>
Author: Christoph Lehner <clehner@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_BIRL_H
#define GRID_BIRL_H
#include <string.h> //memset
//#include <zlib.h>
#include <sys/stat.h>
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////
// Implicitly restarted lanczos
/////////////////////////////////////////////////////////////
template<class Field> class ImplicitlyRestartedLanczosTester
{
public:
virtual int TestConvergence(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox)=0;
virtual int ReconstructEval(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox)=0;
};
enum IRLdiagonalisation {
IRLdiagonaliseWithDSTEGR,
IRLdiagonaliseWithQR,
IRLdiagonaliseWithEigen
};
template<class Field> class ImplicitlyRestartedLanczosHermOpTester : public ImplicitlyRestartedLanczosTester<Field>
{
public:
LinearFunction<Field> &_HermOp;
ImplicitlyRestartedLanczosHermOpTester(LinearFunction<Field> &HermOp) : _HermOp(HermOp) { };
int ReconstructEval(int j,RealD resid,Field &B, RealD &eval,RealD evalMaxApprox)
{
return TestConvergence(j,resid,B,eval,evalMaxApprox);
}
int TestConvergence(int j,RealD eresid,Field &B, RealD &eval,RealD evalMaxApprox)
{
Field v(B);
RealD eval_poly = eval;
// Apply operator
_HermOp(B,v);
RealD vnum = real(innerProduct(B,v)); // HermOp.
RealD vden = norm2(B);
RealD vv0 = norm2(v);
eval = vnum/vden;
v -= eval*B;
RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0);
std::cout.precision(13);
std::cout<<GridLogIRL << "[" << std::setw(3)<<j<<"] "
<<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")"
<<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv
<<std::endl;
int conv=0;
if( (vv<eresid*eresid) ) conv = 1;
return conv;
}
};
template<class Field>
class ImplicitlyRestartedLanczos {
private:
const RealD small = 1.0e-8;
int MaxIter;
int MinRestart; // Minimum number of restarts; only check for convergence after
int Nstop; // Number of evecs checked for convergence
int Nk; // Number of converged sought
// int Np; // Np -- Number of spare vecs in krylov space // == Nm - Nk
int Nm; // Nm -- total number of vectors
IRLdiagonalisation diagonalisation;
int orth_period;
RealD OrthoTime;
RealD eresid, betastp;
////////////////////////////////
// Embedded objects
////////////////////////////////
LinearFunction<Field> &_PolyOp;
LinearFunction<Field> &_HermOp;
ImplicitlyRestartedLanczosTester<Field> &_Tester;
// Default tester provided (we need a ref to something in default case)
ImplicitlyRestartedLanczosHermOpTester<Field> SimpleTester;
/////////////////////////
// Constructor
/////////////////////////
public:
//////////////////////////////////////////////////////////////////
// PAB:
//////////////////////////////////////////////////////////////////
// Too many options & knobs.
// Eliminate:
// orth_period
// betastp
// MinRestart
//
// Do we really need orth_period
// What is the theoretical basis & guarantees of betastp ?
// Nstop=Nk viable?
// MinRestart avoidable with new convergence test?
// Could cut to PolyOp, HermOp, Tester, Nk, Nm, resid, maxiter (+diagonalisation)
// HermOp could be eliminated if we dropped the Power method for max eval.
// -- also: The eval, eval2, eval2_copy stuff is still unnecessarily unclear
//////////////////////////////////////////////////////////////////
ImplicitlyRestartedLanczos(LinearFunction<Field> & PolyOp,
LinearFunction<Field> & HermOp,
ImplicitlyRestartedLanczosTester<Field> & Tester,
int _Nstop, // sought vecs
int _Nk, // sought vecs
int _Nm, // spare vecs
RealD _eresid, // resid in lmdue deficit
int _MaxIter, // Max iterations
RealD _betastp=0.0, // if beta(k) < betastp: converged
int _MinRestart=0, int _orth_period = 1,
IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) :
SimpleTester(HermOp), _PolyOp(PolyOp), _HermOp(HermOp), _Tester(Tester),
Nstop(_Nstop) , Nk(_Nk), Nm(_Nm),
eresid(_eresid), betastp(_betastp),
MaxIter(_MaxIter) , MinRestart(_MinRestart),
orth_period(_orth_period), diagonalisation(_diagonalisation) { };
ImplicitlyRestartedLanczos(LinearFunction<Field> & PolyOp,
LinearFunction<Field> & HermOp,
int _Nstop, // sought vecs
int _Nk, // sought vecs
int _Nm, // spare vecs
RealD _eresid, // resid in lmdue deficit
int _MaxIter, // Max iterations
RealD _betastp=0.0, // if beta(k) < betastp: converged
int _MinRestart=0, int _orth_period = 1,
IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) :
SimpleTester(HermOp), _PolyOp(PolyOp), _HermOp(HermOp), _Tester(SimpleTester),
Nstop(_Nstop) , Nk(_Nk), Nm(_Nm),
eresid(_eresid), betastp(_betastp),
MaxIter(_MaxIter) , MinRestart(_MinRestart),
orth_period(_orth_period), diagonalisation(_diagonalisation) { };
////////////////////////////////
// Helpers
////////////////////////////////
template<typename T> static RealD normalise(T& v)
{
RealD nn = norm2(v);
nn = std::sqrt(nn);
v = v * (1.0/nn);
return nn;
}
void orthogonalize(Field& w, std::vector<Field>& evec,int k)
{
OrthoTime-=usecond()/1e6;
basisOrthogonalize(evec,w,k);
normalise(w);
OrthoTime+=usecond()/1e6;
}
/* Rudy Arthur's thesis pp.137
------------------------
Require: M > K P = M K †
Compute the factorization AVM = VM HM + fM eM
repeat
Q=I
for i = 1,...,P do
QiRi =HM θiI Q = QQi
H M = Q †i H M Q i
end for
βK =HM(K+1,K) σK =Q(M,K)
r=vK+1βK +rσK
VK =VM(1:M)Q(1:M,1:K)
HK =HM(1:K,1:K)
→AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM
until convergence
*/
void calc(std::vector<RealD>& eval, std::vector<Field>& evec, const Field& src, int& Nconv, bool reverse=false)
{
GridBase *grid = src.Grid();
assert(grid == evec[0].Grid());
// GridLogIRL.TimingMode(1);
std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
std::cout << GridLogIRL <<" ImplicitlyRestartedLanczos::calc() starting iteration 0 / "<< MaxIter<< std::endl;
std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
std::cout << GridLogIRL <<" -- seek Nk = " << Nk <<" vectors"<< std::endl;
std::cout << GridLogIRL <<" -- accept Nstop = " << Nstop <<" vectors"<< std::endl;
std::cout << GridLogIRL <<" -- total Nm = " << Nm <<" vectors"<< std::endl;
std::cout << GridLogIRL <<" -- size of eval = " << eval.size() << std::endl;
std::cout << GridLogIRL <<" -- size of evec = " << evec.size() << std::endl;
if ( diagonalisation == IRLdiagonaliseWithDSTEGR ) {
std::cout << GridLogIRL << "Diagonalisation is DSTEGR "<<std::endl;
} else if ( diagonalisation == IRLdiagonaliseWithQR ) {
std::cout << GridLogIRL << "Diagonalisation is QR "<<std::endl;
} else if ( diagonalisation == IRLdiagonaliseWithEigen ) {
std::cout << GridLogIRL << "Diagonalisation is Eigen "<<std::endl;
}
std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
assert(Nm <= evec.size() && Nm <= eval.size());
// quickly get an idea of the largest eigenvalue to more properly normalize the residuum
RealD evalMaxApprox = 0.0;
{
auto src_n = src;
auto tmp = src;
std::cout << GridLogIRL << " IRL source norm " << norm2(src) << std::endl;
const int _MAX_ITER_IRL_MEVAPP_ = 50;
for (int i=0;i<_MAX_ITER_IRL_MEVAPP_;i++) {
normalise(src_n);
_HermOp(src_n,tmp);
// std::cout << GridLogMessage<< tmp<<std::endl; exit(0);
// std::cout << GridLogIRL << " _HermOp " << norm2(tmp) << std::endl;
RealD vnum = real(innerProduct(src_n,tmp)); // HermOp.
RealD vden = norm2(src_n);
RealD na = vnum/vden;
if (fabs(evalMaxApprox/na - 1.0) < 0.0001)
i=_MAX_ITER_IRL_MEVAPP_;
evalMaxApprox = na;
std::cout << GridLogIRL << " Approximation of largest eigenvalue: " << evalMaxApprox << std::endl;
src_n = tmp;
}
}
std::vector<RealD> lme(Nm);
std::vector<RealD> lme2(Nm);
std::vector<RealD> eval2(Nm);
std::vector<RealD> eval2_copy(Nm);
Eigen::MatrixXd Qt = Eigen::MatrixXd::Zero(Nm,Nm);
Field f(grid);
Field v(grid);
int k1 = 1;
int k2 = Nk;
RealD beta_k;
Nconv = 0;
// Set initial vector
evec[0] = src;
normalise(evec[0]);
// Initial Nk steps
OrthoTime=0.;
for(int k=0; k<Nk; ++k) step(eval,lme,evec,f,Nm,k);
std::cout<<GridLogIRL <<"Initial "<< Nk <<"steps done "<<std::endl;
std::cout<<GridLogIRL <<"Initial steps:OrthoTime "<<OrthoTime<< "seconds"<<std::endl;
//////////////////////////////////
// Restarting loop begins
//////////////////////////////////
int iter;
for(iter = 0; iter<MaxIter; ++iter){
OrthoTime=0.;
std::cout<< GridLogMessage <<" **********************"<< std::endl;
std::cout<< GridLogMessage <<" Restart iteration = "<< iter << std::endl;
std::cout<< GridLogMessage <<" **********************"<< std::endl;
std::cout<<GridLogIRL <<" running "<<Nm-Nk <<" steps: "<<std::endl;
for(int k=Nk; k<Nm; ++k) step(eval,lme,evec,f,Nm,k);
f *= lme[Nm-1];
std::cout<<GridLogIRL <<" "<<Nm-Nk <<" steps done "<<std::endl;
std::cout<<GridLogIRL <<"Initial steps:OrthoTime "<<OrthoTime<< "seconds"<<std::endl;
//////////////////////////////////
// getting eigenvalues
//////////////////////////////////
for(int k=0; k<Nm; ++k){
eval2[k] = eval[k+k1-1];
lme2[k] = lme[k+k1-1];
}
Qt = Eigen::MatrixXd::Identity(Nm,Nm);
diagonalize(eval2,lme2,Nm,Nm,Qt,grid);
std::cout<<GridLogIRL <<" diagonalized "<<std::endl;
//////////////////////////////////
// sorting
//////////////////////////////////
eval2_copy = eval2;
std::partial_sort(eval2.begin(),eval2.begin()+Nm,eval2.end(),std::greater<RealD>());
std::cout<<GridLogIRL <<" evals sorted "<<std::endl;
const int chunk=8;
for(int io=0; io<k2;io+=chunk){
std::cout<<GridLogIRL << "eval "<< std::setw(3) << io ;
for(int ii=0;ii<chunk;ii++){
if ( (io+ii)<k2 )
std::cout<< " "<< std::setw(12)<< eval2[io+ii];
}
std::cout << std::endl;
}
//////////////////////////////////
// Implicitly shifted QR transformations
//////////////////////////////////
Qt = Eigen::MatrixXd::Identity(Nm,Nm);
for(int ip=k2; ip<Nm; ++ip){
QR_decomp(eval,lme,Nm,Nm,Qt,eval2[ip],k1,Nm);
}
std::cout<<GridLogIRL <<"QR decomposed "<<std::endl;
assert(k2<Nm); assert(k2<Nm); assert(k1>0);
basisRotate(evec,Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis
std::cout<<GridLogIRL <<"basisRotated by Qt *"<<k1-1<<","<<k2+1<<")"<<std::endl;
////////////////////////////////////////////////////
// Compressed vector f and beta(k2)
////////////////////////////////////////////////////
f *= Qt(k2-1,Nm-1);
f += lme[k2-1] * evec[k2];
beta_k = norm2(f);
beta_k = std::sqrt(beta_k);
std::cout<<GridLogIRL<<" beta(k) = "<<beta_k<<std::endl;
RealD betar = 1.0/beta_k;
evec[k2] = betar * f;
lme[k2-1] = beta_k;
////////////////////////////////////////////////////
// Convergence test
////////////////////////////////////////////////////
for(int k=0; k<Nm; ++k){
eval2[k] = eval[k];
lme2[k] = lme[k];
}
Qt = Eigen::MatrixXd::Identity(Nm,Nm);
diagonalize(eval2,lme2,Nk,Nm,Qt,grid);
std::cout<<GridLogIRL <<" Diagonalized "<<std::endl;
Nconv = 0;
if (iter >= MinRestart) {
std::cout << GridLogIRL << "Test convergence: rotate subset of vectors to test convergence " << std::endl;
Field B(grid); B.Checkerboard() = evec[0].Checkerboard();
// power of two search pattern; not every evalue in eval2 is assessed.
int allconv =1;
for(int jj = 1; jj<=Nstop; jj*=2){
int j = Nstop-jj;
RealD e = eval2_copy[j]; // Discard the evalue
basisRotateJ(B,evec,Qt,j,0,Nk,Nm);
if( !_Tester.TestConvergence(j,eresid,B,e,evalMaxApprox) ) {
allconv=0;
}
}
// Do evec[0] for good measure
{
int j=0;
RealD e = eval2_copy[0];
basisRotateJ(B,evec,Qt,j,0,Nk,Nm);
if( !_Tester.TestConvergence(j,eresid,B,e,evalMaxApprox) ) allconv=0;
}
if ( allconv ) Nconv = Nstop;
// test if we converged, if so, terminate
std::cout<<GridLogIRL<<" #modes converged: >= "<<Nconv<<"/"<<Nstop<<std::endl;
// if( Nconv>=Nstop || beta_k < betastp){
if( Nconv>=Nstop){
goto converged;
}
} else {
std::cout << GridLogIRL << "iter < MinRestart: do not yet test for convergence\n";
} // end of iter loop
}
std::cout<<GridLogError<<"\n NOT converged.\n";
abort();
converged:
{
Field B(grid); B.Checkerboard() = evec[0].Checkerboard();
basisRotate(evec,Qt,0,Nk,0,Nk,Nm);
std::cout << GridLogIRL << " Rotated basis"<<std::endl;
Nconv=0;
//////////////////////////////////////////////////////////////////////
// Full final convergence test; unconditionally applied
//////////////////////////////////////////////////////////////////////
for(int j = 0; j<=Nk; j++){
B=evec[j];
if( _Tester.ReconstructEval(j,eresid,B,eval2[j],evalMaxApprox) ) {
Nconv++;
}
}
if ( Nconv < Nstop )
std::cout << GridLogIRL << "Nconv ("<<Nconv<<") < Nstop ("<<Nstop<<")"<<std::endl;
eval=eval2;
//Keep only converged
eval.resize(Nconv);// Nstop?
evec.resize(Nconv,grid);// Nstop?
basisSortInPlace(evec,eval,reverse);
}
std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
std::cout << GridLogIRL << "ImplicitlyRestartedLanczos CONVERGED ; Summary :\n";
std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
std::cout << GridLogIRL << " -- Iterations = "<< iter << "\n";
std::cout << GridLogIRL << " -- beta(k) = "<< beta_k << "\n";
std::cout << GridLogIRL << " -- Nconv = "<< Nconv << "\n";
std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
}
private:
/* Saad PP. 195
1. Choose an initial vector v1 of 2-norm unity. Set β1 ≡ 0, v0 ≡ 0
2. For k = 1,2,...,m Do:
3. wk:=Avk - b_k v_{k-1}
4. ak:=(wk,vk) //
5. wk:=wk-akvk // wk orthog vk
6. bk+1 := ||wk||_2. If b_k+1 = 0 then Stop
7. vk+1 := wk/b_k+1
8. EndDo
*/
void step(std::vector<RealD>& lmd,
std::vector<RealD>& lme,
std::vector<Field>& evec,
Field& w,int Nm,int k)
{
std::cout<<GridLogIRL << "Lanczos step " <<k<<std::endl;
const RealD tiny = 1.0e-20;
assert( k< Nm );
GridStopWatch gsw_op,gsw_o;
Field& evec_k = evec[k];
_PolyOp(evec_k,w); std::cout<<GridLogIRL << "PolyOp" <<std::endl;
if(k>0) w -= lme[k-1] * evec[k-1];
ComplexD zalph = innerProduct(evec_k,w);
RealD alph = real(zalph);
w = w - alph * evec_k;
RealD beta = normalise(w);
lmd[k] = alph;
lme[k] = beta;
if ( (k>0) && ( (k % orth_period) == 0 )) {
std::cout<<GridLogIRL << "Orthogonalising " <<k<<std::endl;
orthogonalize(w,evec,k); // orthonormalise
std::cout<<GridLogIRL << "Orthogonalised " <<k<<std::endl;
}
if(k < Nm-1) evec[k+1] = w;
std::cout<<GridLogIRL << "alpha[" << k << "] = " << zalph << " beta[" << k << "] = "<<beta<<std::endl;
if ( beta < tiny )
std::cout<<GridLogIRL << " beta is tiny "<<beta<<std::endl;
std::cout<<GridLogIRL << "Lanczos step complete " <<k<<std::endl;
}
void diagonalize_Eigen(std::vector<RealD>& lmd, std::vector<RealD>& lme,
int Nk, int Nm,
Eigen::MatrixXd & Qt, // Nm x Nm
GridBase *grid)
{
Eigen::MatrixXd TriDiag = Eigen::MatrixXd::Zero(Nk,Nk);
for(int i=0;i<Nk;i++) TriDiag(i,i) = lmd[i];
for(int i=0;i<Nk-1;i++) TriDiag(i,i+1) = lme[i];
for(int i=0;i<Nk-1;i++) TriDiag(i+1,i) = lme[i];
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> eigensolver(TriDiag);
for (int i = 0; i < Nk; i++) {
lmd[Nk-1-i] = eigensolver.eigenvalues()(i);
}
for (int i = 0; i < Nk; i++) {
for (int j = 0; j < Nk; j++) {
Qt(Nk-1-i,j) = eigensolver.eigenvectors()(j,i);
}
}
}
///////////////////////////////////////////////////////////////////////////
// File could end here if settle on Eigen ??? !!!
///////////////////////////////////////////////////////////////////////////
void QR_decomp(std::vector<RealD>& lmd, // Nm
std::vector<RealD>& lme, // Nm
int Nk, int Nm, // Nk, Nm
Eigen::MatrixXd& Qt, // Nm x Nm matrix
RealD Dsh, int kmin, int kmax)
{
int k = kmin-1;
RealD x;
RealD Fden = 1.0/hypot(lmd[k]-Dsh,lme[k]);
RealD c = ( lmd[k] -Dsh) *Fden;
RealD s = -lme[k] *Fden;
RealD tmpa1 = lmd[k];
RealD tmpa2 = lmd[k+1];
RealD tmpb = lme[k];
lmd[k] = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb;
lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb;
lme[k] = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb;
x =-s*lme[k+1];
lme[k+1] = c*lme[k+1];
for(int i=0; i<Nk; ++i){
RealD Qtmp1 = Qt(k,i);
RealD Qtmp2 = Qt(k+1,i);
Qt(k,i) = c*Qtmp1 - s*Qtmp2;
Qt(k+1,i)= s*Qtmp1 + c*Qtmp2;
}
// Givens transformations
for(int k = kmin; k < kmax-1; ++k){
RealD Fden = 1.0/hypot(x,lme[k-1]);
RealD c = lme[k-1]*Fden;
RealD s = - x*Fden;
RealD tmpa1 = lmd[k];
RealD tmpa2 = lmd[k+1];
RealD tmpb = lme[k];
lmd[k] = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb;
lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb;
lme[k] = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb;
lme[k-1] = c*lme[k-1] -s*x;
if(k != kmax-2){
x = -s*lme[k+1];
lme[k+1] = c*lme[k+1];
}
for(int i=0; i<Nk; ++i){
RealD Qtmp1 = Qt(k,i);
RealD Qtmp2 = Qt(k+1,i);
Qt(k,i) = c*Qtmp1 -s*Qtmp2;
Qt(k+1,i) = s*Qtmp1 +c*Qtmp2;
}
}
}
void diagonalize(std::vector<RealD>& lmd, std::vector<RealD>& lme,
int Nk, int Nm,
Eigen::MatrixXd & Qt,
GridBase *grid)
{
Qt = Eigen::MatrixXd::Identity(Nm,Nm);
if ( diagonalisation == IRLdiagonaliseWithDSTEGR ) {
diagonalize_lapack(lmd,lme,Nk,Nm,Qt,grid);
} else if ( diagonalisation == IRLdiagonaliseWithQR ) {
diagonalize_QR(lmd,lme,Nk,Nm,Qt,grid);
} else if ( diagonalisation == IRLdiagonaliseWithEigen ) {
diagonalize_Eigen(lmd,lme,Nk,Nm,Qt,grid);
} else {
assert(0);
}
}
#ifdef USE_LAPACK
void LAPACK_dstegr(char *jobz, char *range, int *n, double *d, double *e,
double *vl, double *vu, int *il, int *iu, double *abstol,
int *m, double *w, double *z, int *ldz, int *isuppz,
double *work, int *lwork, int *iwork, int *liwork,
int *info);
#endif
void diagonalize_lapack(std::vector<RealD>& lmd,
std::vector<RealD>& lme,
int Nk, int Nm,
Eigen::MatrixXd& Qt,
GridBase *grid)
{
#ifdef USE_LAPACK
const int size = Nm;
int NN = Nk;
double evals_tmp[NN];
double evec_tmp[NN][NN];
memset(evec_tmp[0],0,sizeof(double)*NN*NN);
double DD[NN];
double EE[NN];
for (int i = 0; i< NN; i++) {
for (int j = i - 1; j <= i + 1; j++) {
if ( j < NN && j >= 0 ) {
if (i==j) DD[i] = lmd[i];
if (i==j) evals_tmp[i] = lmd[i];
if (j==(i-1)) EE[j] = lme[j];
}
}
}
int evals_found;
int lwork = ( (18*NN) > (1+4*NN+NN*NN)? (18*NN):(1+4*NN+NN*NN)) ;
int liwork = 3+NN*10 ;
int iwork[liwork];
double work[lwork];
int isuppz[2*NN];
char jobz = 'V'; // calculate evals & evecs
char range = 'I'; // calculate all evals
// char range = 'A'; // calculate all evals
char uplo = 'U'; // refer to upper half of original matrix
char compz = 'I'; // Compute eigenvectors of tridiagonal matrix
int ifail[NN];
int info;
int total = grid->_Nprocessors;
int node = grid->_processor;
int interval = (NN/total)+1;
double vl = 0.0, vu = 0.0;
int il = interval*node+1 , iu = interval*(node+1);
if (iu > NN) iu=NN;
double tol = 0.0;
if (1) {
memset(evals_tmp,0,sizeof(double)*NN);
if ( il <= NN){
LAPACK_dstegr(&jobz, &range, &NN,
(double*)DD, (double*)EE,
&vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A'
&tol, // tolerance
&evals_found, evals_tmp, (double*)evec_tmp, &NN,
isuppz,
work, &lwork, iwork, &liwork,
&info);
for (int i = iu-1; i>= il-1; i--){
evals_tmp[i] = evals_tmp[i - (il-1)];
if (il>1) evals_tmp[i-(il-1)]=0.;
for (int j = 0; j< NN; j++){
evec_tmp[i][j] = evec_tmp[i - (il-1)][j];
if (il>1) evec_tmp[i-(il-1)][j]=0.;
}
}
}
{
grid->GlobalSumVector(evals_tmp,NN);
grid->GlobalSumVector((double*)evec_tmp,NN*NN);
}
}
// Safer to sort instead of just reversing it,
// but the document of the routine says evals are sorted in increasing order.
// qr gives evals in decreasing order.
for(int i=0;i<NN;i++){
lmd [NN-1-i]=evals_tmp[i];
for(int j=0;j<NN;j++){
Qt((NN-1-i),j)=evec_tmp[i][j];
}
}
#else
assert(0);
#endif
}
void diagonalize_QR(std::vector<RealD>& lmd, std::vector<RealD>& lme,
int Nk, int Nm,
Eigen::MatrixXd & Qt,
GridBase *grid)
{
int QRiter = 100*Nm;
int kmin = 1;
int kmax = Nk;
// (this should be more sophisticated)
for(int iter=0; iter<QRiter; ++iter){
// determination of 2x2 leading submatrix
RealD dsub = lmd[kmax-1]-lmd[kmax-2];
RealD dd = std::sqrt(dsub*dsub + 4.0*lme[kmax-2]*lme[kmax-2]);
RealD Dsh = 0.5*(lmd[kmax-2]+lmd[kmax-1] +dd*(dsub/fabs(dsub)));
// (Dsh: shift)
// transformation
QR_decomp(lmd,lme,Nk,Nm,Qt,Dsh,kmin,kmax); // Nk, Nm
// Convergence criterion (redef of kmin and kamx)
for(int j=kmax-1; j>= kmin; --j){
RealD dds = fabs(lmd[j-1])+fabs(lmd[j]);
if(fabs(lme[j-1])+dds > dds){
kmax = j+1;
goto continued;
}
}
QRiter = iter;
return;
continued:
for(int j=0; j<kmax-1; ++j){
RealD dds = fabs(lmd[j])+fabs(lmd[j+1]);
if(fabs(lme[j])+dds > dds){
kmin = j+1;
break;
}
}
}
std::cout << GridLogError << "[QL method] Error - Too many iteration: "<<QRiter<<"\n";
abort();
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,409 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/LocalCoherenceLanczos.h
Copyright (C) 2015
Author: Christoph Lehner <clehner@bnl.gov>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_LOCAL_COHERENCE_IRL_H
#define GRID_LOCAL_COHERENCE_IRL_H
NAMESPACE_BEGIN(Grid);
struct LanczosParams : Serializable {
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParams,
ChebyParams, Cheby,/*Chebyshev*/
int, Nstop, /*Vecs in Lanczos must converge Nstop < Nk < Nm*/
int, Nk, /*Vecs in Lanczos seek converge*/
int, Nm, /*Total vecs in Lanczos include restart*/
RealD, resid, /*residual*/
int, MaxIt,
RealD, betastp, /* ? */
int, MinRes); // Must restart
};
struct LocalCoherenceLanczosParams : Serializable {
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(LocalCoherenceLanczosParams,
bool, saveEvecs,
bool, doFine,
bool, doFineRead,
bool, doCoarse,
bool, doCoarseRead,
LanczosParams, FineParams,
LanczosParams, CoarseParams,
ChebyParams, Smoother,
RealD , coarse_relax_tol,
std::vector<int>, blockSize,
std::string, config,
std::vector < ComplexD >, omega,
RealD, mass,
RealD, M5);
};
// Duplicate functionality; ProjectedFunctionHermOp could be used with the trivial function
template<class Fobj,class CComplex,int nbasis>
class ProjectedHermOp : public LinearFunction<Lattice<iVector<CComplex,nbasis > > > {
public:
typedef iVector<CComplex,nbasis > CoarseSiteVector;
typedef Lattice<CoarseSiteVector> CoarseField;
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field
typedef Lattice<Fobj> FineField;
LinearOperatorBase<FineField> &_Linop;
std::vector<FineField> &subspace;
ProjectedHermOp(LinearOperatorBase<FineField>& linop, std::vector<FineField> & _subspace) :
_Linop(linop), subspace(_subspace)
{
assert(subspace.size() >0);
};
void operator()(const CoarseField& in, CoarseField& out) {
GridBase *FineGrid = subspace[0].Grid();
int checkerboard = subspace[0].Checkerboard();
FineField fin (FineGrid); fin.Checkerboard()= checkerboard;
FineField fout(FineGrid); fout.Checkerboard() = checkerboard;
blockPromote(in,fin,subspace); std::cout<<GridLogIRL<<"ProjectedHermop : Promote to fine"<<std::endl;
_Linop.HermOp(fin,fout); std::cout<<GridLogIRL<<"ProjectedHermop : HermOp (fine) "<<std::endl;
blockProject(out,fout,subspace); std::cout<<GridLogIRL<<"ProjectedHermop : Project to coarse "<<std::endl;
}
};
template<class Fobj,class CComplex,int nbasis>
class ProjectedFunctionHermOp : public LinearFunction<Lattice<iVector<CComplex,nbasis > > > {
public:
typedef iVector<CComplex,nbasis > CoarseSiteVector;
typedef Lattice<CoarseSiteVector> CoarseField;
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field
typedef Lattice<Fobj> FineField;
OperatorFunction<FineField> & _poly;
LinearOperatorBase<FineField> &_Linop;
std::vector<FineField> &subspace;
ProjectedFunctionHermOp(OperatorFunction<FineField> & poly,
LinearOperatorBase<FineField>& linop,
std::vector<FineField> & _subspace) :
_poly(poly),
_Linop(linop),
subspace(_subspace)
{ };
void operator()(const CoarseField& in, CoarseField& out) {
GridBase *FineGrid = subspace[0].Grid();
int checkerboard = subspace[0].Checkerboard();
FineField fin (FineGrid); fin.Checkerboard() =checkerboard;
FineField fout(FineGrid);fout.Checkerboard() =checkerboard;
blockPromote(in,fin,subspace); std::cout<<GridLogIRL<<"ProjectedFunctionHermop : Promote to fine"<<std::endl;
_poly(_Linop,fin,fout); std::cout<<GridLogIRL<<"ProjectedFunctionHermop : Poly "<<std::endl;
blockProject(out,fout,subspace); std::cout<<GridLogIRL<<"ProjectedFunctionHermop : Project to coarse "<<std::endl;
}
};
template<class Fobj,class CComplex,int nbasis>
class ImplicitlyRestartedLanczosSmoothedTester : public ImplicitlyRestartedLanczosTester<Lattice<iVector<CComplex,nbasis > > >
{
public:
typedef iVector<CComplex,nbasis > CoarseSiteVector;
typedef Lattice<CoarseSiteVector> CoarseField;
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field
typedef Lattice<Fobj> FineField;
LinearFunction<CoarseField> & _Poly;
OperatorFunction<FineField> & _smoother;
LinearOperatorBase<FineField> &_Linop;
RealD _coarse_relax_tol;
std::vector<FineField> &_subspace;
ImplicitlyRestartedLanczosSmoothedTester(LinearFunction<CoarseField> &Poly,
OperatorFunction<FineField> &smoother,
LinearOperatorBase<FineField> &Linop,
std::vector<FineField> &subspace,
RealD coarse_relax_tol=5.0e3)
: _smoother(smoother), _Linop(Linop), _Poly(Poly), _subspace(subspace),
_coarse_relax_tol(coarse_relax_tol)
{ };
int TestConvergence(int j,RealD eresid,CoarseField &B, RealD &eval,RealD evalMaxApprox)
{
CoarseField v(B);
RealD eval_poly = eval;
// Apply operator
_Poly(B,v);
RealD vnum = real(innerProduct(B,v)); // HermOp.
RealD vden = norm2(B);
RealD vv0 = norm2(v);
eval = vnum/vden;
v -= eval*B;
RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0);
std::cout.precision(13);
std::cout<<GridLogIRL << "[" << std::setw(3)<<j<<"] "
<<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")"
<<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv
<<std::endl;
int conv=0;
if( (vv<eresid*eresid) ) conv = 1;
return conv;
}
int ReconstructEval(int j,RealD eresid,CoarseField &B, RealD &eval,RealD evalMaxApprox)
{
GridBase *FineGrid = _subspace[0].Grid();
int checkerboard = _subspace[0].Checkerboard();
FineField fB(FineGrid);fB.Checkerboard() =checkerboard;
FineField fv(FineGrid);fv.Checkerboard() =checkerboard;
blockPromote(B,fv,_subspace);
_smoother(_Linop,fv,fB);
RealD eval_poly = eval;
_Linop.HermOp(fB,fv);
RealD vnum = real(innerProduct(fB,fv)); // HermOp.
RealD vden = norm2(fB);
RealD vv0 = norm2(fv);
eval = vnum/vden;
fv -= eval*fB;
RealD vv = norm2(fv) / ::pow(evalMaxApprox,2.0);
std::cout.precision(13);
std::cout<<GridLogIRL << "[" << std::setw(3)<<j<<"] "
<<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")"
<<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv
<<std::endl;
if ( j > nbasis ) eresid = eresid*_coarse_relax_tol;
if( (vv<eresid*eresid) ) return 1;
return 0;
}
};
////////////////////////////////////////////
// Make serializable Lanczos params
////////////////////////////////////////////
template<class Fobj,class CComplex,int nbasis>
class LocalCoherenceLanczos
{
public:
typedef iVector<CComplex,nbasis > CoarseSiteVector;
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field
typedef Lattice<CoarseSiteVector> CoarseField;
typedef Lattice<Fobj> FineField;
protected:
GridBase *_CoarseGrid;
GridBase *_FineGrid;
int _checkerboard;
LinearOperatorBase<FineField> & _FineOp;
std::vector<RealD> &evals_fine;
std::vector<RealD> &evals_coarse;
std::vector<FineField> &subspace;
std::vector<CoarseField> &evec_coarse;
private:
std::vector<RealD> _evals_fine;
std::vector<RealD> _evals_coarse;
std::vector<FineField> _subspace;
std::vector<CoarseField> _evec_coarse;
public:
LocalCoherenceLanczos(GridBase *FineGrid,
GridBase *CoarseGrid,
LinearOperatorBase<FineField> &FineOp,
int checkerboard) :
_CoarseGrid(CoarseGrid),
_FineGrid(FineGrid),
_FineOp(FineOp),
_checkerboard(checkerboard),
evals_fine (_evals_fine),
evals_coarse(_evals_coarse),
subspace (_subspace),
evec_coarse(_evec_coarse)
{
evals_fine.resize(0);
evals_coarse.resize(0);
};
//////////////////////////////////////////////////////////////////////////
// Alternate constructore, external storage for use by Hadrons module
//////////////////////////////////////////////////////////////////////////
LocalCoherenceLanczos(GridBase *FineGrid,
GridBase *CoarseGrid,
LinearOperatorBase<FineField> &FineOp,
int checkerboard,
std::vector<FineField> &ext_subspace,
std::vector<CoarseField> &ext_coarse,
std::vector<RealD> &ext_eval_fine,
std::vector<RealD> &ext_eval_coarse
) :
_CoarseGrid(CoarseGrid),
_FineGrid(FineGrid),
_FineOp(FineOp),
_checkerboard(checkerboard),
evals_fine (ext_eval_fine),
evals_coarse(ext_eval_coarse),
subspace (ext_subspace),
evec_coarse (ext_coarse)
{
evals_fine.resize(0);
evals_coarse.resize(0);
};
void Orthogonalise(void ) {
CoarseScalar InnerProd(_CoarseGrid);
std::cout << GridLogMessage <<" Gramm-Schmidt pass 1"<<std::endl;
blockOrthogonalise(InnerProd,subspace);
std::cout << GridLogMessage <<" Gramm-Schmidt pass 2"<<std::endl;
blockOrthogonalise(InnerProd,subspace);
};
template<typename T> static RealD normalise(T& v)
{
RealD nn = norm2(v);
nn = ::sqrt(nn);
v = v * (1.0/nn);
return nn;
}
/*
void fakeFine(void)
{
int Nk = nbasis;
subspace.resize(Nk,_FineGrid);
subspace[0]=1.0;
subspace[0].Checkerboard()=_checkerboard;
normalise(subspace[0]);
PlainHermOp<FineField> Op(_FineOp);
for(int k=1;k<Nk;k++){
subspace[k].Checkerboard()=_checkerboard;
Op(subspace[k-1],subspace[k]);
normalise(subspace[k]);
}
}
*/
void testFine(RealD resid)
{
assert(evals_fine.size() == nbasis);
assert(subspace.size() == nbasis);
PlainHermOp<FineField> Op(_FineOp);
ImplicitlyRestartedLanczosHermOpTester<FineField> SimpleTester(Op);
for(int k=0;k<nbasis;k++){
assert(SimpleTester.ReconstructEval(k,resid,subspace[k],evals_fine[k],1.0)==1);
}
}
void testCoarse(RealD resid,ChebyParams cheby_smooth,RealD relax)
{
assert(evals_fine.size() == nbasis);
assert(subspace.size() == nbasis);
//////////////////////////////////////////////////////////////////////////////////////////////////
// create a smoother and see if we can get a cheap convergence test and smooth inside the IRL
//////////////////////////////////////////////////////////////////////////////////////////////////
Chebyshev<FineField> ChebySmooth(cheby_smooth);
ProjectedFunctionHermOp<Fobj,CComplex,nbasis> ChebyOp (ChebySmooth,_FineOp,subspace);
ImplicitlyRestartedLanczosSmoothedTester<Fobj,CComplex,nbasis> ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,subspace,relax);
for(int k=0;k<evec_coarse.size();k++){
if ( k < nbasis ) {
assert(ChebySmoothTester.ReconstructEval(k,resid,evec_coarse[k],evals_coarse[k],1.0)==1);
} else {
assert(ChebySmoothTester.ReconstructEval(k,resid*relax,evec_coarse[k],evals_coarse[k],1.0)==1);
}
}
}
void calcFine(ChebyParams cheby_parms,int Nstop,int Nk,int Nm,RealD resid,
RealD MaxIt, RealD betastp, int MinRes)
{
assert(nbasis<=Nm);
Chebyshev<FineField> Cheby(cheby_parms);
FunctionHermOp<FineField> ChebyOp(Cheby,_FineOp);
PlainHermOp<FineField> Op(_FineOp);
evals_fine.resize(Nm);
subspace.resize(Nm,_FineGrid);
ImplicitlyRestartedLanczos<FineField> IRL(ChebyOp,Op,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes);
FineField src(_FineGrid);
typedef typename FineField::scalar_type Scalar;
// src=1.0;
src=Scalar(1.0);
src.Checkerboard() = _checkerboard;
int Nconv;
IRL.calc(evals_fine,subspace,src,Nconv,false);
// Shrink down to number saved
assert(Nstop>=nbasis);
assert(Nconv>=nbasis);
evals_fine.resize(nbasis);
subspace.resize(nbasis,_FineGrid);
}
void calcCoarse(ChebyParams cheby_op,ChebyParams cheby_smooth,RealD relax,
int Nstop, int Nk, int Nm,RealD resid,
RealD MaxIt, RealD betastp, int MinRes)
{
Chebyshev<FineField> Cheby(cheby_op);
ProjectedHermOp<Fobj,CComplex,nbasis> Op(_FineOp,subspace);
ProjectedFunctionHermOp<Fobj,CComplex,nbasis> ChebyOp (Cheby,_FineOp,subspace);
//////////////////////////////////////////////////////////////////////////////////////////////////
// create a smoother and see if we can get a cheap convergence test and smooth inside the IRL
//////////////////////////////////////////////////////////////////////////////////////////////////
Chebyshev<FineField> ChebySmooth(cheby_smooth);
ImplicitlyRestartedLanczosSmoothedTester<Fobj,CComplex,nbasis> ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,subspace,relax);
evals_coarse.resize(Nm);
evec_coarse.resize(Nm,_CoarseGrid);
CoarseField src(_CoarseGrid); src=1.0;
ImplicitlyRestartedLanczos<CoarseField> IRL(ChebyOp,ChebyOp,ChebySmoothTester,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes);
int Nconv=0;
IRL.calc(evals_coarse,evec_coarse,src,Nconv,false);
assert(Nconv>=Nstop);
evals_coarse.resize(Nstop);
evec_coarse.resize (Nstop,_CoarseGrid);
for (int i=0;i<Nstop;i++){
std::cout << i << " Coarse eval = " << evals_coarse[i] << std::endl;
}
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,157 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/MinimalResidual.h
Copyright (C) 2015
Author: Daniel Richtmann <daniel.richtmann@ur.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_MINIMAL_RESIDUAL_H
#define GRID_MINIMAL_RESIDUAL_H
namespace Grid {
template<class Field> class MinimalResidual : public OperatorFunction<Field> {
public:
using OperatorFunction<Field>::operator();
bool ErrorOnNoConverge; // throw an assert when the MR fails to converge.
// Defaults true.
RealD Tolerance;
Integer MaxIterations;
RealD overRelaxParam;
Integer IterationsToComplete; // Number of iterations the MR took to finish.
// Filled in upon completion
MinimalResidual(RealD tol, Integer maxit, Real ovrelparam = 1.0, bool err_on_no_conv = true)
: Tolerance(tol), MaxIterations(maxit), overRelaxParam(ovrelparam), ErrorOnNoConverge(err_on_no_conv){};
void operator()(LinearOperatorBase<Field> &Linop, const Field &src, Field &psi) {
psi.Checkerboard() = src.Checkerboard();
conformable(psi, src);
ComplexD a, c;
RealD d;
Field Mr(src);
Field r(src);
// Initial residual computation & set up
RealD guess = norm2(psi);
assert(std::isnan(guess) == 0);
RealD ssq = norm2(src);
RealD rsq = Tolerance * Tolerance * ssq;
Linop.Op(psi, Mr);
r = src - Mr;
RealD cp = norm2(r);
std::cout << std::setprecision(4) << std::scientific;
std::cout << GridLogIterative << "MinimalResidual: guess " << guess << std::endl;
std::cout << GridLogIterative << "MinimalResidual: src " << ssq << std::endl;
std::cout << GridLogIterative << "MinimalResidual: cp,r " << cp << std::endl;
if (cp <= rsq) {
return;
}
std::cout << GridLogIterative << "MinimalResidual: k=0 residual " << cp << " target " << rsq << std::endl;
GridStopWatch LinalgTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
int k;
for (k = 1; k <= MaxIterations; k++) {
MatrixTimer.Start();
Linop.Op(r, Mr);
MatrixTimer.Stop();
LinalgTimer.Start();
c = innerProduct(Mr, r);
d = norm2(Mr);
a = c / d;
a = a * overRelaxParam;
psi = psi + r * a;
r = r - Mr * a;
cp = norm2(r);
LinalgTimer.Stop();
std::cout << GridLogIterative << "MinimalResidual: Iteration " << k
<< " residual " << cp << " target " << rsq << std::endl;
std::cout << GridLogDebug << "a = " << a << " c = " << c << " d = " << d << std::endl;
// Stopping condition
if (cp <= rsq) {
SolverTimer.Stop();
Linop.Op(psi, Mr);
r = src - Mr;
RealD srcnorm = sqrt(ssq);
RealD resnorm = sqrt(norm2(r));
RealD true_residual = resnorm / srcnorm;
std::cout << GridLogMessage << "MinimalResidual Converged on iteration " << k
<< " computed residual " << sqrt(cp / ssq)
<< " true residual " << true_residual
<< " target " << Tolerance << std::endl;
std::cout << GridLogMessage << "MR Time elapsed: Total " << SolverTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "MR Time elapsed: Matrix " << MatrixTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "MR Time elapsed: Linalg " << LinalgTimer.Elapsed() << std::endl;
if (ErrorOnNoConverge)
assert(true_residual / Tolerance < 10000.0);
IterationsToComplete = k;
return;
}
}
std::cout << GridLogMessage << "MinimalResidual did NOT converge"
<< std::endl;
if (ErrorOnNoConverge)
assert(0);
IterationsToComplete = k;
}
};
} // namespace Grid
#endif

View File

@ -0,0 +1,276 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/MixedPrecisionFlexibleGeneralisedMinimalResidual.h
Copyright (C) 2015
Author: Daniel Richtmann <daniel.richtmann@ur.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_MIXED_PRECISION_FLEXIBLE_GENERALISED_MINIMAL_RESIDUAL_H
#define GRID_MIXED_PRECISION_FLEXIBLE_GENERALISED_MINIMAL_RESIDUAL_H
namespace Grid {
template<class FieldD, class FieldF, typename std::enable_if<getPrecision<FieldD>::value == 2, int>::type = 0, typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
class MixedPrecisionFlexibleGeneralisedMinimalResidual : public OperatorFunction<FieldD> {
public:
using OperatorFunction<FieldD>::operator();
bool ErrorOnNoConverge; // Throw an assert when MPFGMRES fails to converge,
// defaults to true
RealD Tolerance;
Integer MaxIterations;
Integer RestartLength;
Integer MaxNumberOfRestarts;
Integer IterationCount; // Number of iterations the MPFGMRES took to finish,
// filled in upon completion
GridStopWatch MatrixTimer;
GridStopWatch PrecTimer;
GridStopWatch LinalgTimer;
GridStopWatch QrTimer;
GridStopWatch CompSolutionTimer;
GridStopWatch ChangePrecTimer;
Eigen::MatrixXcd H;
std::vector<ComplexD> y;
std::vector<ComplexD> gamma;
std::vector<ComplexD> c;
std::vector<ComplexD> s;
GridBase* SinglePrecGrid;
LinearFunction<FieldF> &Preconditioner;
MixedPrecisionFlexibleGeneralisedMinimalResidual(RealD tol,
Integer maxit,
GridBase * sp_grid,
LinearFunction<FieldF> &Prec,
Integer restart_length,
bool err_on_no_conv = true)
: Tolerance(tol)
, MaxIterations(maxit)
, RestartLength(restart_length)
, MaxNumberOfRestarts(MaxIterations/RestartLength + ((MaxIterations%RestartLength == 0) ? 0 : 1))
, ErrorOnNoConverge(err_on_no_conv)
, H(Eigen::MatrixXcd::Zero(RestartLength, RestartLength + 1)) // sizes taken from DD-αAMG code base
, y(RestartLength + 1, 0.)
, gamma(RestartLength + 1, 0.)
, c(RestartLength + 1, 0.)
, s(RestartLength + 1, 0.)
, SinglePrecGrid(sp_grid)
, Preconditioner(Prec) {};
void operator()(LinearOperatorBase<FieldD> &LinOp, const FieldD &src, FieldD &psi) {
psi.Checkerboard() = src.Checkerboard();
conformable(psi, src);
RealD guess = norm2(psi);
assert(std::isnan(guess) == 0);
RealD cp;
RealD ssq = norm2(src);
RealD rsq = Tolerance * Tolerance * ssq;
FieldD r(src.Grid());
std::cout << std::setprecision(4) << std::scientific;
std::cout << GridLogIterative << "MPFGMRES: guess " << guess << std::endl;
std::cout << GridLogIterative << "MPFGMRES: src " << ssq << std::endl;
PrecTimer.Reset();
MatrixTimer.Reset();
LinalgTimer.Reset();
QrTimer.Reset();
CompSolutionTimer.Reset();
ChangePrecTimer.Reset();
GridStopWatch SolverTimer;
SolverTimer.Start();
IterationCount = 0;
for (int k=0; k<MaxNumberOfRestarts; k++) {
cp = outerLoopBody(LinOp, src, psi, rsq);
// Stopping condition
if (cp <= rsq) {
SolverTimer.Stop();
LinOp.Op(psi,r);
axpy(r,-1.0,src,r);
RealD srcnorm = sqrt(ssq);
RealD resnorm = sqrt(norm2(r));
RealD true_residual = resnorm / srcnorm;
std::cout << GridLogMessage << "MPFGMRES: Converged on iteration " << IterationCount
<< " computed residual " << sqrt(cp / ssq)
<< " true residual " << true_residual
<< " target " << Tolerance << std::endl;
std::cout << GridLogMessage << "MPFGMRES Time elapsed: Total " << SolverTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "MPFGMRES Time elapsed: Precon " << PrecTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "MPFGMRES Time elapsed: Matrix " << MatrixTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "MPFGMRES Time elapsed: Linalg " << LinalgTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "MPFGMRES Time elapsed: QR " << QrTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "MPFGMRES Time elapsed: CompSol " << CompSolutionTimer.Elapsed() << std::endl;
std::cout << GridLogMessage << "MPFGMRES Time elapsed: PrecChange " << ChangePrecTimer.Elapsed() << std::endl;
return;
}
}
std::cout << GridLogMessage << "MPFGMRES did NOT converge" << std::endl;
if (ErrorOnNoConverge)
assert(0);
}
RealD outerLoopBody(LinearOperatorBase<FieldD> &LinOp, const FieldD &src, FieldD &psi, RealD rsq) {
RealD cp = 0;
FieldD w(src.Grid());
FieldD r(src.Grid());
// these should probably be made class members so that they are only allocated once, not in every restart
std::vector<FieldD> v(RestartLength + 1, src.Grid()); for (auto &elem : v) elem = Zero();
std::vector<FieldD> z(RestartLength + 1, src.Grid()); for (auto &elem : z) elem = Zero();
MatrixTimer.Start();
LinOp.Op(psi, w);
MatrixTimer.Stop();
LinalgTimer.Start();
r = src - w;
gamma[0] = sqrt(norm2(r));
v[0] = (1. / gamma[0]) * r;
LinalgTimer.Stop();
for (int i=0; i<RestartLength; i++) {
IterationCount++;
arnoldiStep(LinOp, v, z, w, i);
qrUpdate(i);
cp = norm(gamma[i+1]);
std::cout << GridLogIterative << "MPFGMRES: Iteration " << IterationCount
<< " residual " << cp << " target " << rsq << std::endl;
if ((i == RestartLength - 1) || (IterationCount == MaxIterations) || (cp <= rsq)) {
computeSolution(z, psi, i);
return cp;
}
}
assert(0); // Never reached
return cp;
}
void arnoldiStep(LinearOperatorBase<FieldD> &LinOp, std::vector<FieldD> &v, std::vector<FieldD> &z, FieldD &w, int iter) {
FieldF v_f(SinglePrecGrid);
FieldF z_f(SinglePrecGrid);
ChangePrecTimer.Start();
precisionChange(v_f, v[iter]);
precisionChange(z_f, z[iter]);
ChangePrecTimer.Stop();
PrecTimer.Start();
Preconditioner(v_f, z_f);
PrecTimer.Stop();
ChangePrecTimer.Start();
precisionChange(z[iter], z_f);
ChangePrecTimer.Stop();
MatrixTimer.Start();
LinOp.Op(z[iter], w);
MatrixTimer.Stop();
LinalgTimer.Start();
for (int i = 0; i <= iter; ++i) {
H(iter, i) = innerProduct(v[i], w);
w = w - ComplexD(H(iter, i)) * v[i];
}
H(iter, iter + 1) = sqrt(norm2(w));
v[iter + 1] = ComplexD(1. / H(iter, iter + 1)) * w;
LinalgTimer.Stop();
}
void qrUpdate(int iter) {
QrTimer.Start();
for (int i = 0; i < iter ; ++i) {
auto tmp = -s[i] * ComplexD(H(iter, i)) + c[i] * ComplexD(H(iter, i + 1));
H(iter, i) = conjugate(c[i]) * ComplexD(H(iter, i)) + conjugate(s[i]) * ComplexD(H(iter, i + 1));
H(iter, i + 1) = tmp;
}
// Compute new Givens Rotation
auto nu = sqrt(std::norm(H(iter, iter)) + std::norm(H(iter, iter + 1)));
c[iter] = H(iter, iter) / nu;
s[iter] = H(iter, iter + 1) / nu;
// Apply new Givens rotation
H(iter, iter) = nu;
H(iter, iter + 1) = 0.;
gamma[iter + 1] = -s[iter] * gamma[iter];
gamma[iter] = conjugate(c[iter]) * gamma[iter];
QrTimer.Stop();
}
void computeSolution(std::vector<FieldD> const &z, FieldD &psi, int iter) {
CompSolutionTimer.Start();
for (int i = iter; i >= 0; i--) {
y[i] = gamma[i];
for (int k = i + 1; k <= iter; k++)
y[i] = y[i] - ComplexD(H(k, i)) * y[k];
y[i] = y[i] / ComplexD(H(i, i));
}
for (int i = 0; i <= iter; i++)
psi = psi + z[i] * y[i];
CompSolutionTimer.Stop();
}
};
}
#endif

View File

@ -0,0 +1,112 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/NormalEquations.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_NORMAL_EQUATIONS_H
#define GRID_NORMAL_EQUATIONS_H
NAMESPACE_BEGIN(Grid);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Take a matrix and form an NE solver calling a Herm solver
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class NormalEquations {
private:
SparseMatrixBase<Field> & _Matrix;
OperatorFunction<Field> & _HermitianSolver;
LinearFunction<Field> & _Guess;
public:
/////////////////////////////////////////////////////
// Wrap the usual normal equations trick
/////////////////////////////////////////////////////
NormalEquations(SparseMatrixBase<Field> &Matrix, OperatorFunction<Field> &HermitianSolver,
LinearFunction<Field> &Guess)
: _Matrix(Matrix), _HermitianSolver(HermitianSolver), _Guess(Guess) {};
void operator() (const Field &in, Field &out){
Field src(in.Grid());
Field tmp(in.Grid());
MdagMLinearOperator<SparseMatrixBase<Field>,Field> MdagMOp(_Matrix);
_Matrix.Mdag(in,src);
_Guess(src,out);
_HermitianSolver(MdagMOp,src,out); // Mdag M out = Mdag in
}
};
template<class Field> class HPDSolver {
private:
LinearOperatorBase<Field> & _Matrix;
OperatorFunction<Field> & _HermitianSolver;
LinearFunction<Field> & _Guess;
public:
/////////////////////////////////////////////////////
// Wrap the usual normal equations trick
/////////////////////////////////////////////////////
HPDSolver(LinearOperatorBase<Field> &Matrix,
OperatorFunction<Field> &HermitianSolver,
LinearFunction<Field> &Guess)
: _Matrix(Matrix), _HermitianSolver(HermitianSolver), _Guess(Guess) {};
void operator() (const Field &in, Field &out){
_Guess(in,out);
_HermitianSolver(_Matrix,in,out); // Mdag M out = Mdag in
}
};
template<class Field> class MdagMSolver {
private:
SparseMatrixBase<Field> & _Matrix;
OperatorFunction<Field> & _HermitianSolver;
LinearFunction<Field> & _Guess;
public:
/////////////////////////////////////////////////////
// Wrap the usual normal equations trick
/////////////////////////////////////////////////////
MdagMSolver(SparseMatrixBase<Field> &Matrix, OperatorFunction<Field> &HermitianSolver,
LinearFunction<Field> &Guess)
: _Matrix(Matrix), _HermitianSolver(HermitianSolver), _Guess(Guess) {};
void operator() (const Field &in, Field &out){
MdagMLinearOperator<SparseMatrixBase<Field>,Field> MdagMOp(_Matrix);
_Guess(in,out);
_HermitianSolver(MdagMOp,in,out); // Mdag M out = Mdag in
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,45 @@
#pragma once
namespace Grid {
template<class Field> class PowerMethod
{
public:
template<typename T> static RealD normalise(T& v)
{
RealD nn = norm2(v);
nn = sqrt(nn);
v = v * (1.0/nn);
return nn;
}
RealD operator()(LinearOperatorBase<Field> &HermOp, const Field &src)
{
GridBase *grid = src.Grid();
// quickly get an idea of the largest eigenvalue to more properly normalize the residuum
RealD evalMaxApprox = 0.0;
auto src_n = src;
auto tmp = src;
const int _MAX_ITER_EST_ = 50;
for (int i=0;i<_MAX_ITER_EST_;i++) {
normalise(src_n);
HermOp.HermOp(src_n,tmp);
RealD vnum = real(innerProduct(src_n,tmp)); // HermOp.
RealD vden = norm2(src_n);
RealD na = vnum/vden;
if ( (fabs(evalMaxApprox/na - 1.0) < 0.001) || (i==_MAX_ITER_EST_-1) ) {
evalMaxApprox = na;
std::cout << GridLogMessage << " Approximation of largest eigenvalue: " << evalMaxApprox << std::endl;
return evalMaxApprox;
}
evalMaxApprox = na;
src_n = tmp;
}
assert(0);
return 0;
}
};
}

View File

@ -0,0 +1,119 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/PrecConjugateResidual.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_PREC_CONJUGATE_RESIDUAL_H
#define GRID_PREC_CONJUGATE_RESIDUAL_H
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////
// Base classes for iterative processes based on operators
// single input vec, single output vec.
/////////////////////////////////////////////////////////////
template<class Field>
class PrecConjugateResidual : public OperatorFunction<Field> {
public:
RealD Tolerance;
Integer MaxIterations;
int verbose;
LinearFunction<Field> &Preconditioner;
PrecConjugateResidual(RealD tol,Integer maxit,LinearFunction<Field> &Prec) : Tolerance(tol), MaxIterations(maxit), Preconditioner(Prec)
{
verbose=1;
};
void operator() (LinearOperatorBase<Field> &Linop,const Field &src, Field &psi){
RealD a, b, c, d;
RealD cp, ssq,rsq;
RealD rAr, rAAr, rArp;
RealD pAp, pAAp;
GridBase *grid = src.Grid();
Field r(grid), p(grid), Ap(grid), Ar(grid), z(grid);
psi=zero;
r = src;
Preconditioner(r,p);
Linop.HermOpAndNorm(p,Ap,pAp,pAAp);
Ar=Ap;
rAr=pAp;
rAAr=pAAp;
cp =norm2(r);
ssq=norm2(src);
rsq=Tolerance*Tolerance*ssq;
if (verbose) std::cout<<GridLogMessage<<"PrecConjugateResidual: iteration " <<0<<" residual "<<cp<< " target"<< rsq<<std::endl;
for(int k=0;k<MaxIterations;k++){
Preconditioner(Ap,z);
RealD rq= real(innerProduct(Ap,z));
a = rAr/rq;
axpy(psi,a,p,psi);
cp = axpy_norm(r,-a,z,r);
rArp=rAr;
Linop.HermOpAndNorm(r,Ar,rAr,rAAr);
b =rAr/rArp;
axpy(p,b,p,r);
pAAp=axpy_norm(Ap,b,Ap,Ar);
if(verbose) std::cout<<GridLogMessage<<"PrecConjugateResidual: iteration " <<k<<" residual "<<cp<< " target"<< rsq<<std::endl;
if(cp<rsq) {
Linop.HermOp(psi,Ap);
axpy(r,-1.0,src,Ap);
RealD true_resid = norm2(r)/ssq;
std::cout<<GridLogMessage<<"PrecConjugateResidual: Converged on iteration " <<k
<< " computed residual "<<sqrt(cp/ssq)
<< " true residual "<<sqrt(true_resid)
<< " target " <<Tolerance <<std::endl;
return;
}
}
std::cout<<GridLogMessage<<"PrecConjugateResidual did NOT converge"<<std::endl;
assert(0);
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,239 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/PrecGeneralisedConjugateResidual.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_PREC_GCR_H
#define GRID_PREC_GCR_H
///////////////////////////////////////////////////////////////////////////////////////////////////////
//VPGCR Abe and Zhang, 2005.
//INTERNATIONAL JOURNAL OF NUMERICAL ANALYSIS AND MODELING
//Computing and Information Volume 2, Number 2, Pages 147-161
//NB. Likely not original reference since they are focussing on a preconditioner variant.
// but VPGCR was nicely written up in their paper
///////////////////////////////////////////////////////////////////////////////////////////////////////
NAMESPACE_BEGIN(Grid);
#define GCRLogLevel std::cout << GridLogMessage <<std::string(level,'\t')<< " Level "<<level<<" "
template<class Field>
class PrecGeneralisedConjugateResidual : public LinearFunction<Field> {
public:
RealD Tolerance;
Integer MaxIterations;
int verbose;
int mmax;
int nstep;
int steps;
int level;
GridStopWatch PrecTimer;
GridStopWatch MatTimer;
GridStopWatch LinalgTimer;
LinearFunction<Field> &Preconditioner;
LinearOperatorBase<Field> &Linop;
void Level(int lv) { level=lv; };
PrecGeneralisedConjugateResidual(RealD tol,Integer maxit,LinearOperatorBase<Field> &_Linop,LinearFunction<Field> &Prec,int _mmax,int _nstep) :
Tolerance(tol),
MaxIterations(maxit),
Linop(_Linop),
Preconditioner(Prec),
mmax(_mmax),
nstep(_nstep)
{
level=1;
verbose=1;
};
void operator() (const Field &src, Field &psi){
psi=Zero();
RealD cp, ssq,rsq;
ssq=norm2(src);
rsq=Tolerance*Tolerance*ssq;
Field r(src.Grid());
PrecTimer.Reset();
MatTimer.Reset();
LinalgTimer.Reset();
GridStopWatch SolverTimer;
SolverTimer.Start();
steps=0;
for(int k=0;k<MaxIterations;k++){
cp=GCRnStep(src,psi,rsq);
GCRLogLevel <<"PGCR("<<mmax<<","<<nstep<<") "<< steps <<" steps cp = "<<cp<<" target "<<rsq <<std::endl;
if(cp<rsq) {
SolverTimer.Stop();
Linop.HermOp(psi,r);
axpy(r,-1.0,src,r);
RealD tr = norm2(r);
GCRLogLevel<<"PGCR: Converged on iteration " <<steps
<< " computed residual "<<sqrt(cp/ssq)
<< " true residual " <<sqrt(tr/ssq)
<< " target " <<Tolerance <<std::endl;
GCRLogLevel<<"PGCR Time elapsed: Total "<< SolverTimer.Elapsed() <<std::endl;
/*
GCRLogLevel<<"PGCR Time elapsed: Precon "<< PrecTimer.Elapsed() <<std::endl;
GCRLogLevel<<"PGCR Time elapsed: Matrix "<< MatTimer.Elapsed() <<std::endl;
GCRLogLevel<<"PGCR Time elapsed: Linalg "<< LinalgTimer.Elapsed() <<std::endl;
*/
return;
}
}
GCRLogLevel<<"Variable Preconditioned GCR did not converge"<<std::endl;
// assert(0);
}
RealD GCRnStep(const Field &src, Field &psi,RealD rsq){
RealD cp;
RealD a, b;
RealD zAz, zAAz;
RealD rq;
GridBase *grid = src.Grid();
Field r(grid);
Field z(grid);
Field tmp(grid);
Field ttmp(grid);
Field Az(grid);
////////////////////////////////
// history for flexible orthog
////////////////////////////////
std::vector<Field> q(mmax,grid);
std::vector<Field> p(mmax,grid);
std::vector<RealD> qq(mmax);
GCRLogLevel<< "PGCR nStep("<<nstep<<")"<<std::endl;
//////////////////////////////////
// initial guess x0 is taken as nonzero.
// r0=src-A x0 = src
//////////////////////////////////
MatTimer.Start();
Linop.HermOpAndNorm(psi,Az,zAz,zAAz);
MatTimer.Stop();
LinalgTimer.Start();
r=src-Az;
LinalgTimer.Stop();
GCRLogLevel<< "PGCR true residual r = src - A psi "<<norm2(r) <<std::endl;
/////////////////////
// p = Prec(r)
/////////////////////
PrecTimer.Start();
Preconditioner(r,z);
PrecTimer.Stop();
MatTimer.Start();
Linop.HermOpAndNorm(z,Az,zAz,zAAz);
MatTimer.Stop();
LinalgTimer.Start();
//p[0],q[0],qq[0]
p[0]= z;
q[0]= Az;
qq[0]= zAAz;
cp =norm2(r);
LinalgTimer.Stop();
for(int k=0;k<nstep;k++){
steps++;
int kp = k+1;
int peri_k = k %mmax;
int peri_kp= kp%mmax;
LinalgTimer.Start();
rq= real(innerProduct(r,q[peri_k])); // what if rAr not real?
a = rq/qq[peri_k];
axpy(psi,a,p[peri_k],psi);
cp = axpy_norm(r,-a,q[peri_k],r);
LinalgTimer.Stop();
GCRLogLevel<< "PGCR step["<<steps<<"] resid " << cp << " target " <<rsq<<std::endl;
if((k==nstep-1)||(cp<rsq)){
return cp;
}
PrecTimer.Start();
Preconditioner(r,z);// solve Az = r
PrecTimer.Stop();
MatTimer.Start();
Linop.HermOpAndNorm(z,Az,zAz,zAAz);
MatTimer.Stop();
LinalgTimer.Start();
q[peri_kp]=Az;
p[peri_kp]=z;
int northog = ((kp)>(mmax-1))?(mmax-1):(kp); // if more than mmax done, we orthog all mmax history.
for(int back=0;back<northog;back++){
int peri_back=(k-back)%mmax; assert((k-back)>=0);
b=-real(innerProduct(q[peri_back],Az))/qq[peri_back];
p[peri_kp]=p[peri_kp]+b*p[peri_back];
q[peri_kp]=q[peri_kp]+b*q[peri_back];
}
qq[peri_kp]=norm2(q[peri_kp]); // could use axpy_norm
LinalgTimer.Stop();
}
assert(0); // never reached
return cp;
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,241 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/PrecGeneralisedConjugateResidual.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_PREC_GCR_NON_HERM_H
#define GRID_PREC_GCR_NON_HERM_H
///////////////////////////////////////////////////////////////////////////////////////////////////////
//VPGCR Abe and Zhang, 2005.
//INTERNATIONAL JOURNAL OF NUMERICAL ANALYSIS AND MODELING
//Computing and Information Volume 2, Number 2, Pages 147-161
//NB. Likely not original reference since they are focussing on a preconditioner variant.
// but VPGCR was nicely written up in their paper
///////////////////////////////////////////////////////////////////////////////////////////////////////
NAMESPACE_BEGIN(Grid);
#define GCRLogLevel std::cout << GridLogMessage <<std::string(level,'\t')<< " Level "<<level<<" "
template<class Field>
class PrecGeneralisedConjugateResidualNonHermitian : public LinearFunction<Field> {
public:
RealD Tolerance;
Integer MaxIterations;
int verbose;
int mmax;
int nstep;
int steps;
int level;
GridStopWatch PrecTimer;
GridStopWatch MatTimer;
GridStopWatch LinalgTimer;
LinearFunction<Field> &Preconditioner;
LinearOperatorBase<Field> &Linop;
void Level(int lv) { level=lv; };
PrecGeneralisedConjugateResidualNonHermitian(RealD tol,Integer maxit,LinearOperatorBase<Field> &_Linop,LinearFunction<Field> &Prec,int _mmax,int _nstep) :
Tolerance(tol),
MaxIterations(maxit),
Linop(_Linop),
Preconditioner(Prec),
mmax(_mmax),
nstep(_nstep)
{
level=1;
verbose=1;
};
void operator() (const Field &src, Field &psi){
psi=Zero();
RealD cp, ssq,rsq;
ssq=norm2(src);
rsq=Tolerance*Tolerance*ssq;
Field r(src.Grid());
PrecTimer.Reset();
MatTimer.Reset();
LinalgTimer.Reset();
GridStopWatch SolverTimer;
SolverTimer.Start();
steps=0;
for(int k=0;k<MaxIterations;k++){
cp=GCRnStep(src,psi,rsq);
GCRLogLevel <<"PGCR("<<mmax<<","<<nstep<<") "<< steps <<" steps cp = "<<cp<<" target "<<rsq <<std::endl;
if(cp<rsq) {
SolverTimer.Stop();
Linop.Op(psi,r);
axpy(r,-1.0,src,r);
RealD tr = norm2(r);
GCRLogLevel<<"PGCR: Converged on iteration " <<steps
<< " computed residual "<<sqrt(cp/ssq)
<< " true residual " <<sqrt(tr/ssq)
<< " target " <<Tolerance <<std::endl;
GCRLogLevel<<"PGCR Time elapsed: Total "<< SolverTimer.Elapsed() <<std::endl;
return;
}
}
GCRLogLevel<<"Variable Preconditioned GCR did not converge"<<std::endl;
// assert(0);
}
RealD GCRnStep(const Field &src, Field &psi,RealD rsq){
RealD cp;
ComplexD a, b, zAz;
RealD zAAz;
ComplexD rq;
GridBase *grid = src.Grid();
Field r(grid);
Field z(grid);
Field tmp(grid);
Field ttmp(grid);
Field Az(grid);
////////////////////////////////
// history for flexible orthog
////////////////////////////////
std::vector<Field> q(mmax,grid);
std::vector<Field> p(mmax,grid);
std::vector<RealD> qq(mmax);
GCRLogLevel<< "PGCR nStep("<<nstep<<")"<<std::endl;
//////////////////////////////////
// initial guess x0 is taken as nonzero.
// r0=src-A x0 = src
//////////////////////////////////
MatTimer.Start();
Linop.Op(psi,Az);
zAz = innerProduct(Az,psi);
zAAz= norm2(Az);
MatTimer.Stop();
LinalgTimer.Start();
r=src-Az;
LinalgTimer.Stop();
GCRLogLevel<< "PGCR true residual r = src - A psi "<<norm2(r) <<std::endl;
/////////////////////
// p = Prec(r)
/////////////////////
PrecTimer.Start();
Preconditioner(r,z);
PrecTimer.Stop();
MatTimer.Start();
Linop.Op(z,Az);
MatTimer.Stop();
LinalgTimer.Start();
zAz = innerProduct(Az,psi);
zAAz= norm2(Az);
//p[0],q[0],qq[0]
p[0]= z;
q[0]= Az;
qq[0]= zAAz;
cp =norm2(r);
LinalgTimer.Stop();
for(int k=0;k<nstep;k++){
steps++;
int kp = k+1;
int peri_k = k %mmax;
int peri_kp= kp%mmax;
LinalgTimer.Start();
rq= innerProduct(q[peri_k],r); // what if rAr not real?
a = rq/qq[peri_k];
axpy(psi,a,p[peri_k],psi);
cp = axpy_norm(r,-a,q[peri_k],r);
LinalgTimer.Stop();
GCRLogLevel<< "PGCR step["<<steps<<"] resid " << cp << " target " <<rsq<<std::endl;
if((k==nstep-1)||(cp<rsq)){
return cp;
}
PrecTimer.Start();
Preconditioner(r,z);// solve Az = r
PrecTimer.Stop();
MatTimer.Start();
Linop.Op(z,Az);
MatTimer.Stop();
zAz = innerProduct(Az,psi);
zAAz= norm2(Az);
LinalgTimer.Start();
q[peri_kp]=Az;
p[peri_kp]=z;
int northog = ((kp)>(mmax-1))?(mmax-1):(kp); // if more than mmax done, we orthog all mmax history.
for(int back=0;back<northog;back++){
int peri_back=(k-back)%mmax; assert((k-back)>=0);
b=-real(innerProduct(q[peri_back],Az))/qq[peri_back];
p[peri_kp]=p[peri_kp]+b*p[peri_back];
q[peri_kp]=q[peri_kp]+b*q[peri_back];
}
qq[peri_kp]=norm2(q[peri_kp]); // could use axpy_norm
LinalgTimer.Stop();
}
assert(0); // never reached
return cp;
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,371 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithmsf/iterative/QuasiMinimalResidual.h
Copyright (C) 2019
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
template<class Field>
RealD innerG5ProductReal(Field &l, Field &r)
{
Gamma G5(Gamma::Algebra::Gamma5);
Field tmp(l.Grid());
// tmp = G5*r;
G5R5(tmp,r);
ComplexD ip =innerProduct(l,tmp);
std::cout << "innerProductRealG5R5 "<<ip<<std::endl;
return ip.real();
}
template<class Field>
class QuasiMinimalResidual : public OperatorFunction<Field> {
public:
using OperatorFunction<Field>::operator();
bool ErrorOnNoConverge;
RealD Tolerance;
Integer MaxIterations;
Integer IterationCount;
QuasiMinimalResidual(RealD tol,
Integer maxit,
bool err_on_no_conv = true)
: Tolerance(tol)
, MaxIterations(maxit)
, ErrorOnNoConverge(err_on_no_conv)
{};
#if 1
void operator()(LinearOperatorBase<Field> &LinOp, const Field &b, Field &x)
{
RealD resid;
IterationCount=0;
RealD rho, rho_1, xi, gamma, gamma_1, theta, theta_1;
RealD eta, delta, ep, beta;
GridBase *Grid = b.Grid();
Field r(Grid), d(Grid), s(Grid);
Field v(Grid), w(Grid), y(Grid), z(Grid);
Field v_tld(Grid), w_tld(Grid), y_tld(Grid), z_tld(Grid);
Field p(Grid), q(Grid), p_tld(Grid);
Real normb = norm2(b);
LinOp.Op(x,r); r = b - r;
assert(normb> 0.0);
resid = norm2(r)/normb;
if (resid <= Tolerance) {
return;
}
v_tld = r;
y = v_tld;
rho = norm2(y);
// Take Gamma5 conjugate
// Gamma G5(Gamma::Algebra::Gamma5);
// G5R5(w_tld,r);
// w_tld = G5* v_tld;
w_tld=v_tld;
z = w_tld;
xi = norm2(z);
gamma = 1.0;
eta = -1.0;
theta = 0.0;
for (int i = 1; i <= MaxIterations; i++) {
// Breakdown tests
assert( rho != 0.0);
assert( xi != 0.0);
v = (1. / rho) * v_tld;
y = (1. / rho) * y;
w = (1. / xi) * w_tld;
z = (1. / xi) * z;
ComplexD Zdelta = innerProduct(z, y); // Complex?
std::cout << "Zdelta "<<Zdelta<<std::endl;
delta = Zdelta.real();
y_tld = y;
z_tld = z;
if (i > 1) {
p = y_tld - (xi * delta / ep) * p;
q = z_tld - (rho * delta / ep) * q;
} else {
p = y_tld;
q = z_tld;
}
LinOp.Op(p,p_tld); // p_tld = A * p;
ComplexD Zep = innerProduct(q, p_tld);
ep=Zep.real();
std::cout << "Zep "<<Zep <<std::endl;
// Complex Audit
assert(abs(ep)>0);
beta = ep / delta;
assert(abs(beta)>0);
v_tld = p_tld - beta * v;
y = v_tld;
rho_1 = rho;
rho = norm2(y);
LinOp.AdjOp(q,w_tld);
w_tld = w_tld - beta * w;
z = w_tld;
xi = norm2(z);
gamma_1 = gamma;
theta_1 = theta;
theta = rho / (gamma_1 * beta);
gamma = 1.0 / sqrt(1.0 + theta * theta);
std::cout << "theta "<<theta<<std::endl;
std::cout << "gamma "<<gamma<<std::endl;
assert(abs(gamma)> 0.0);
eta = -eta * rho_1 * gamma* gamma / (beta * gamma_1 * gamma_1);
if (i > 1) {
d = eta * p + (theta_1 * theta_1 * gamma * gamma) * d;
s = eta * p_tld + (theta_1 * theta_1 * gamma * gamma) * s;
} else {
d = eta * p;
s = eta * p_tld;
}
x =x+d; // update approximation vector
r =r-s; // compute residual
if ((resid = norm2(r) / normb) <= Tolerance) {
return;
}
std::cout << "Iteration "<<i<<" resid " << resid<<std::endl;
}
assert(0);
return; // no convergence
}
#else
// QMRg5 SMP thesis
void operator()(LinearOperatorBase<Field> &LinOp, const Field &b, Field &x)
{
// Real scalars
GridBase *grid = b.Grid();
Field r(grid);
Field p_m(grid), p_m_minus_1(grid), p_m_minus_2(grid);
Field v_m(grid), v_m_minus_1(grid), v_m_plus_1(grid);
Field tmp(grid);
RealD w;
RealD z1, z2;
RealD delta_m, delta_m_minus_1;
RealD c_m_plus_1, c_m, c_m_minus_1;
RealD s_m_plus_1, s_m, s_m_minus_1;
RealD alpha, beta, gamma, epsilon;
RealD mu, nu, rho, theta, xi, chi;
RealD mod2r, mod2b;
RealD tau2, target2;
mod2b=norm2(b);
/////////////////////////
// Initial residual
/////////////////////////
LinOp.Op(x,tmp);
r = b - tmp;
/////////////////////////
// \mu = \rho = |r_0|
/////////////////////////
mod2r = norm2(r);
rho = sqrt( mod2r);
mu=rho;
std::cout << "QuasiMinimalResidual rho "<< rho<<std::endl;
/////////////////////////
// Zero negative history
/////////////////////////
v_m_plus_1 = Zero();
v_m_minus_1 = Zero();
p_m_minus_1 = Zero();
p_m_minus_2 = Zero();
// v0
v_m = (1.0/rho)*r;
/////////////////////////
// Initial coeffs
/////////////////////////
delta_m_minus_1 = 1.0;
c_m_minus_1 = 1.0;
c_m = 1.0;
s_m_minus_1 = 0.0;
s_m = 0.0;
/////////////////////////
// Set up convergence check
/////////////////////////
tau2 = mod2r;
target2 = mod2b * Tolerance*Tolerance;
for(int iter = 0 ; iter < MaxIterations; iter++){
/////////////////////////
// \delta_m = (v_m, \gamma_5 v_m)
/////////////////////////
delta_m = innerG5ProductReal(v_m,v_m);
std::cout << "QuasiMinimalResidual delta_m "<< delta_m<<std::endl;
/////////////////////////
// tmp = A v_m
/////////////////////////
LinOp.Op(v_m,tmp);
/////////////////////////
// \alpha = (v_m, \gamma_5 temp) / \delta_m
/////////////////////////
alpha = innerG5ProductReal(v_m,tmp);
alpha = alpha/delta_m ;
std::cout << "QuasiMinimalResidual alpha "<< alpha<<std::endl;
/////////////////////////
// \beta = \rho \delta_m / \delta_{m-1}
/////////////////////////
beta = rho * delta_m / delta_m_minus_1;
std::cout << "QuasiMinimalResidual beta "<< beta<<std::endl;
/////////////////////////
// \tilde{v}_{m+1} = temp - \alpha v_m - \beta v_{m-1}
/////////////////////////
v_m_plus_1 = tmp - alpha*v_m - beta*v_m_minus_1;
///////////////////////////////
// \rho = || \tilde{v}_{m+1} ||
///////////////////////////////
rho = sqrt( norm2(v_m_plus_1) );
std::cout << "QuasiMinimalResidual rho "<< rho<<std::endl;
///////////////////////////////
// v_{m+1} = \tilde{v}_{m+1}
///////////////////////////////
v_m_plus_1 = (1.0 / rho) * v_m_plus_1;
////////////////////////////////
// QMR recurrence coefficients.
////////////////////////////////
theta = s_m_minus_1 * beta;
gamma = c_m_minus_1 * beta;
epsilon = c_m * gamma + s_m * alpha;
xi = -s_m * gamma + c_m * alpha;
nu = sqrt( xi*xi + rho*rho );
c_m_plus_1 = fabs(xi) / nu;
if ( xi == 0.0 ) {
s_m_plus_1 = 1.0;
} else {
s_m_plus_1 = c_m_plus_1 * rho / xi;
}
chi = c_m_plus_1 * xi + s_m_plus_1 * rho;
std::cout << "QuasiMinimalResidual coeffs "<< theta <<" "<<gamma<<" "<< epsilon<<" "<< xi<<" "<< nu<<std::endl;
std::cout << "QuasiMinimalResidual coeffs "<< chi <<std::endl;
////////////////////////////////
//p_m=(v_m - \epsilon p_{m-1} - \theta p_{m-2}) / \chi
////////////////////////////////
p_m = (1.0/chi) * v_m - (epsilon/chi) * p_m_minus_1 - (theta/chi) * p_m_minus_2;
////////////////////////////////////////////////////////////////
// \psi = \psi + c_{m+1} \mu p_m
////////////////////////////////////////////////////////////////
x = x + ( c_m_plus_1 * mu ) * p_m;
////////////////////////////////////////
//
////////////////////////////////////////
mu = -s_m_plus_1 * mu;
delta_m_minus_1 = delta_m;
c_m_minus_1 = c_m;
c_m = c_m_plus_1;
s_m_minus_1 = s_m;
s_m = s_m_plus_1;
////////////////////////////////////
// Could use pointer swizzle games.
////////////////////////////////////
v_m_minus_1 = v_m;
v_m = v_m_plus_1;
p_m_minus_2 = p_m_minus_1;
p_m_minus_1 = p_m;
/////////////////////////////////////
// Convergence checks
/////////////////////////////////////
z1 = RealD(iter+1.0);
z2 = z1 + 1.0;
tau2 = tau2 *( z2 / z1 ) * s_m * s_m;
std::cout << " QuasiMinimumResidual iteration "<< iter<<std::endl;
std::cout << " QuasiMinimumResidual tau bound "<< tau2<<std::endl;
// Compute true residual
mod2r = tau2;
if ( 1 || (tau2 < (100.0 * target2)) ) {
LinOp.Op(x,tmp);
r = b - tmp;
mod2r = norm2(r);
std::cout << " QuasiMinimumResidual true residual is "<< mod2r<<std::endl;
}
if ( mod2r < target2 ) {
std::cout << " QuasiMinimumResidual has converged"<<std::endl;
return;
}
}
}
#endif
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,621 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/SchurRedBlack.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_SCHUR_RED_BLACK_H
#define GRID_SCHUR_RED_BLACK_H
/*
* Red black Schur decomposition
*
* M = (Mee Meo) = (1 0 ) (Mee 0 ) (1 Mee^{-1} Meo)
* (Moe Moo) (Moe Mee^-1 1 ) (0 Moo-Moe Mee^-1 Meo) (0 1 )
* = L D U
*
* L^-1 = (1 0 )
* (-MoeMee^{-1} 1 )
* L^{dag} = ( 1 Mee^{-dag} Moe^{dag} )
* ( 0 1 )
* L^{-d} = ( 1 -Mee^{-dag} Moe^{dag} )
* ( 0 1 )
*
* U^-1 = (1 -Mee^{-1} Meo)
* (0 1 )
* U^{dag} = ( 1 0)
* (Meo^dag Mee^{-dag} 1)
* U^{-dag} = ( 1 0)
* (-Meo^dag Mee^{-dag} 1)
***********************
* M psi = eta
***********************
*Odd
* i) D_oo psi_o = L^{-1} eta_o
* eta_o' = (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e)
*
* Wilson:
* (D_oo)^{\dag} D_oo psi_o = (D_oo)^dag L^{-1} eta_o
* Stag:
* D_oo psi_o = L^{-1} eta = (eta_o - Moe Mee^{-1} eta_e)
*
* L^-1 eta_o= (1 0 ) (e
* (-MoeMee^{-1} 1 )
*
*Even
* ii) Mee psi_e + Meo psi_o = src_e
*
* => sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
*
*
* TODO: Other options:
*
* a) change checkerboards for Schur e<->o
*
* Left precon by Moo^-1
* b) Doo^{dag} M_oo^-dag Moo^-1 Doo psi_0 = (D_oo)^dag M_oo^-dag Moo^-1 L^{-1} eta_o
* eta_o' = (D_oo)^dag M_oo^-dag Moo^-1 (eta_o - Moe Mee^{-1} eta_e)
*
* Right precon by Moo^-1
* c) M_oo^-dag Doo^{dag} Doo Moo^-1 phi_0 = M_oo^-dag (D_oo)^dag L^{-1} eta_o
* eta_o' = M_oo^-dag (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e)
* psi_o = M_oo^-1 phi_o
* TODO: Deflation
*/
namespace Grid {
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Use base class to share code
///////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Take a matrix and form a Red Black solver calling a Herm solver
// Use of RB info prevents making SchurRedBlackSolve conform to standard interface
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SchurRedBlackBase {
protected:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
OperatorFunction<Field> & _HermitianRBSolver;
int CBfactorise;
bool subGuess;
bool useSolnAsInitGuess; // if true user-supplied solution vector is used as initial guess for solver
public:
SchurRedBlackBase(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false) :
_HermitianRBSolver(HermitianRBSolver),
useSolnAsInitGuess(_solnAsInitGuess)
{
CBfactorise = 0;
subtractGuess(initSubGuess);
};
void subtractGuess(const bool initSubGuess)
{
subGuess = initSubGuess;
}
bool isSubtractGuess(void)
{
return subGuess;
}
/////////////////////////////////////////////////////////////
// Shared code
/////////////////////////////////////////////////////////////
void operator() (Matrix & _Matrix,const Field &in, Field &out){
ZeroGuesser<Field> guess;
(*this)(_Matrix,in,out,guess);
}
void operator()(Matrix &_Matrix, const std::vector<Field> &in, std::vector<Field> &out)
{
ZeroGuesser<Field> guess;
(*this)(_Matrix,in,out,guess);
}
template<class Guesser>
void operator()(Matrix &_Matrix, const std::vector<Field> &in, std::vector<Field> &out,Guesser &guess)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
int nblock = in.size();
std::vector<Field> src_o(nblock,grid);
std::vector<Field> sol_o(nblock,grid);
std::vector<Field> guess_save;
Field resid(fgrid);
Field tmp(grid);
////////////////////////////////////////////////
// Prepare RedBlack source
////////////////////////////////////////////////
for(int b=0;b<nblock;b++){
RedBlackSource(_Matrix,in[b],tmp,src_o[b]);
}
////////////////////////////////////////////////
// Make the guesses
////////////////////////////////////////////////
if ( subGuess ) guess_save.resize(nblock,grid);
for(int b=0;b<nblock;b++){
if(useSolnAsInitGuess) {
pickCheckerboard(Odd, sol_o[b], out[b]);
} else {
guess(src_o[b],sol_o[b]);
}
if ( subGuess ) {
guess_save[b] = sol_o[b];
}
}
//////////////////////////////////////////////////////////////
// Call the block solver
//////////////////////////////////////////////////////////////
std::cout<<GridLogMessage << "SchurRedBlackBase calling the solver for "<<nblock<<" RHS" <<std::endl;
RedBlackSolve(_Matrix,src_o,sol_o);
////////////////////////////////////////////////
// A2A boolean behavioural control & reconstruct other checkerboard
////////////////////////////////////////////////
for(int b=0;b<nblock;b++) {
if (subGuess) sol_o[b] = sol_o[b] - guess_save[b];
///////// Needs even source //////////////
pickCheckerboard(Even,tmp,in[b]);
RedBlackSolution(_Matrix,sol_o[b],tmp,out[b]);
/////////////////////////////////////////////////
// Check unprec residual if possible
/////////////////////////////////////////////////
if ( ! subGuess ) {
_Matrix.M(out[b],resid);
resid = resid-in[b];
RealD ns = norm2(in[b]);
RealD nr = norm2(resid);
std::cout<<GridLogMessage<< "SchurRedBlackBase solver true unprec resid["<<b<<"] "<<std::sqrt(nr/ns) << std::endl;
} else {
std::cout<<GridLogMessage<< "SchurRedBlackBase Guess subtracted after solve["<<b<<"] " << std::endl;
}
}
}
template<class Guesser>
void operator() (Matrix & _Matrix,const Field &in, Field &out,Guesser &guess){
// FIXME CGdiagonalMee not implemented virtual function
// FIXME use CBfactorise to control schur decomp
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field resid(fgrid);
Field src_o(grid);
Field src_e(grid);
Field sol_o(grid);
////////////////////////////////////////////////
// RedBlack source
////////////////////////////////////////////////
RedBlackSource(_Matrix,in,src_e,src_o);
////////////////////////////////
// Construct the guess
////////////////////////////////
if(useSolnAsInitGuess) {
pickCheckerboard(Odd, sol_o, out);
} else {
guess(src_o,sol_o);
}
Field guess_save(grid);
guess_save = sol_o;
//////////////////////////////////////////////////////////////
// Call the red-black solver
//////////////////////////////////////////////////////////////
RedBlackSolve(_Matrix,src_o,sol_o);
////////////////////////////////////////////////
// Fionn A2A boolean behavioural control
////////////////////////////////////////////////
if (subGuess) sol_o= sol_o-guess_save;
///////////////////////////////////////////////////
// RedBlack solution needs the even source
///////////////////////////////////////////////////
RedBlackSolution(_Matrix,sol_o,src_e,out);
// Verify the unprec residual
if ( ! subGuess ) {
_Matrix.M(out,resid);
resid = resid-in;
RealD ns = norm2(in);
RealD nr = norm2(resid);
std::cout<<GridLogMessage << "SchurRedBlackBase solver true unprec resid "<< std::sqrt(nr/ns) << std::endl;
} else {
std::cout << GridLogMessage << "SchurRedBlackBase Guess subtracted after solve." << std::endl;
}
}
/////////////////////////////////////////////////////////////
// Override in derived.
/////////////////////////////////////////////////////////////
virtual void RedBlackSource (Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o) =0;
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol) =0;
virtual void RedBlackSolve (Matrix & _Matrix,const Field &src_o, Field &sol_o) =0;
virtual void RedBlackSolve (Matrix & _Matrix,const std::vector<Field> &src_o, std::vector<Field> &sol_o)=0;
};
template<class Field> class SchurRedBlackStaggeredSolve : public SchurRedBlackBase<Field> {
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
SchurRedBlackStaggeredSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess,_solnAsInitGuess)
{
}
//////////////////////////////////////////////////////
// Override RedBlack specialisation
//////////////////////////////////////////////////////
virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field tmp(grid);
Field Mtmp(grid);
pickCheckerboard(Even,src_e,src);
pickCheckerboard(Odd ,src_o,src);
/////////////////////////////////////////////////////
// src_o = (source_o - Moe MeeInv source_e)
/////////////////////////////////////////////////////
_Matrix.MooeeInv(src_e,tmp); assert( tmp.Checkerboard() ==Even);
_Matrix.Meooe (tmp,Mtmp); assert( Mtmp.Checkerboard() ==Odd);
tmp=src_o-Mtmp; assert( tmp.Checkerboard() ==Odd);
_Matrix.Mooee(tmp,src_o); // Extra factor of "m" in source from dumb choice of matrix norm.
}
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e_c,Field &sol)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field tmp(grid);
Field sol_e(grid);
Field src_e(grid);
src_e = src_e_c; // Const correctness
///////////////////////////////////////////////////
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
///////////////////////////////////////////////////
_Matrix.Meooe(sol_o,tmp); assert( tmp.Checkerboard() ==Even);
src_e = src_e-tmp; assert( src_e.Checkerboard() ==Even);
_Matrix.MooeeInv(src_e,sol_e); assert( sol_e.Checkerboard() ==Even);
setCheckerboard(sol,sol_e); assert( sol_e.Checkerboard() ==Even);
setCheckerboard(sol,sol_o); assert( sol_o.Checkerboard() ==Odd );
}
virtual void RedBlackSolve (Matrix & _Matrix,const Field &src_o, Field &sol_o)
{
SchurStaggeredOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.Checkerboard()==Odd);
};
virtual void RedBlackSolve (Matrix & _Matrix,const std::vector<Field> &src_o, std::vector<Field> &sol_o)
{
SchurStaggeredOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);
}
};
template<class Field> using SchurRedBlackStagSolve = SchurRedBlackStaggeredSolve<Field>;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Site diagonal has Mooee on it.
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SchurRedBlackDiagMooeeSolve : public SchurRedBlackBase<Field> {
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess,_solnAsInitGuess) {};
//////////////////////////////////////////////////////
// Override RedBlack specialisation
//////////////////////////////////////////////////////
virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field tmp(grid);
Field Mtmp(grid);
pickCheckerboard(Even,src_e,src);
pickCheckerboard(Odd ,src_o,src);
/////////////////////////////////////////////////////
// src_o = Mdag * (source_o - Moe MeeInv source_e)
/////////////////////////////////////////////////////
_Matrix.MooeeInv(src_e,tmp); assert( tmp.Checkerboard() ==Even);
_Matrix.Meooe (tmp,Mtmp); assert( Mtmp.Checkerboard() ==Odd);
tmp=src_o-Mtmp; assert( tmp.Checkerboard() ==Odd);
// get the right MpcDag
SchurDiagMooeeOperator<Matrix,Field> _HermOpEO(_Matrix);
_HermOpEO.MpcDag(tmp,src_o); assert(src_o.Checkerboard() ==Odd);
}
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field tmp(grid);
Field sol_e(grid);
Field src_e_i(grid);
///////////////////////////////////////////////////
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
///////////////////////////////////////////////////
_Matrix.Meooe(sol_o,tmp); assert( tmp.Checkerboard() ==Even);
src_e_i = src_e-tmp; assert( src_e_i.Checkerboard() ==Even);
_Matrix.MooeeInv(src_e_i,sol_e); assert( sol_e.Checkerboard() ==Even);
setCheckerboard(sol,sol_e); assert( sol_e.Checkerboard() ==Even);
setCheckerboard(sol,sol_o); assert( sol_o.Checkerboard() ==Odd );
}
virtual void RedBlackSolve (Matrix & _Matrix,const Field &src_o, Field &sol_o)
{
SchurDiagMooeeOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.Checkerboard()==Odd);
};
virtual void RedBlackSolve (Matrix & _Matrix,const std::vector<Field> &src_o, std::vector<Field> &sol_o)
{
SchurDiagMooeeOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);
}
};
template<class Field> class NonHermitianSchurRedBlackDiagMooeeSolve : public SchurRedBlackBase<Field>
{
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
NonHermitianSchurRedBlackDiagMooeeSolve(OperatorFunction<Field>& RBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field>(RBSolver, initSubGuess, _solnAsInitGuess) {};
//////////////////////////////////////////////////////
// Override RedBlack specialisation
//////////////////////////////////////////////////////
virtual void RedBlackSource(Matrix& _Matrix, const Field& src, Field& src_e, Field& src_o)
{
GridBase* grid = _Matrix.RedBlackGrid();
GridBase* fgrid = _Matrix.Grid();
Field tmp(grid);
Field Mtmp(grid);
pickCheckerboard(Even, src_e, src);
pickCheckerboard(Odd , src_o, src);
/////////////////////////////////////////////////////
// src_o = Mdag * (source_o - Moe MeeInv source_e)
/////////////////////////////////////////////////////
_Matrix.MooeeInv(src_e, tmp); assert( tmp.Checkerboard() == Even );
_Matrix.Meooe (tmp, Mtmp); assert( Mtmp.Checkerboard() == Odd );
src_o -= Mtmp; assert( src_o.Checkerboard() == Odd );
}
virtual void RedBlackSolution(Matrix& _Matrix, const Field& sol_o, const Field& src_e, Field& sol)
{
GridBase* grid = _Matrix.RedBlackGrid();
GridBase* fgrid = _Matrix.Grid();
Field tmp(grid);
Field sol_e(grid);
Field src_e_i(grid);
///////////////////////////////////////////////////
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
///////////////////////////////////////////////////
_Matrix.Meooe(sol_o, tmp); assert( tmp.Checkerboard() == Even );
src_e_i = src_e - tmp; assert( src_e_i.Checkerboard() == Even );
_Matrix.MooeeInv(src_e_i, sol_e); assert( sol_e.Checkerboard() == Even );
setCheckerboard(sol, sol_e); assert( sol_e.Checkerboard() == Even );
setCheckerboard(sol, sol_o); assert( sol_o.Checkerboard() == Odd );
}
virtual void RedBlackSolve(Matrix& _Matrix, const Field& src_o, Field& sol_o)
{
NonHermitianSchurDiagMooeeOperator<Matrix,Field> _OpEO(_Matrix);
this->_HermitianRBSolver(_OpEO, src_o, sol_o); assert(sol_o.Checkerboard() == Odd);
}
virtual void RedBlackSolve(Matrix& _Matrix, const std::vector<Field>& src_o, std::vector<Field>& sol_o)
{
NonHermitianSchurDiagMooeeOperator<Matrix,Field> _OpEO(_Matrix);
this->_HermitianRBSolver(_OpEO, src_o, sol_o);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Site diagonal is identity, right preconditioned by Mee^inv
// ( 1 - Meo Moo^inv Moe Mee^inv ) phi =( 1 - Meo Moo^inv Moe Mee^inv ) Mee psi = = eta = eta
//=> psi = MeeInv phi
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SchurRedBlackDiagTwoSolve : public SchurRedBlackBase<Field> {
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
/////////////////////////////////////////////////////
// Wrap the usual normal equations Schur trick
/////////////////////////////////////////////////////
SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field>(HermitianRBSolver,initSubGuess,_solnAsInitGuess) {};
virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
Field tmp(grid);
Field Mtmp(grid);
pickCheckerboard(Even,src_e,src);
pickCheckerboard(Odd ,src_o,src);
/////////////////////////////////////////////////////
// src_o = Mdag * (source_o - Moe MeeInv source_e)
/////////////////////////////////////////////////////
_Matrix.MooeeInv(src_e,tmp); assert( tmp.Checkerboard() ==Even);
_Matrix.Meooe (tmp,Mtmp); assert( Mtmp.Checkerboard() ==Odd);
tmp=src_o-Mtmp; assert( tmp.Checkerboard() ==Odd);
// get the right MpcDag
_HermOpEO.MpcDag(tmp,src_o); assert(src_o.Checkerboard() ==Odd);
}
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field sol_o_i(grid);
Field tmp(grid);
Field sol_e(grid);
////////////////////////////////////////////////
// MooeeInv due to pecond
////////////////////////////////////////////////
_Matrix.MooeeInv(sol_o,tmp);
sol_o_i = tmp;
///////////////////////////////////////////////////
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
///////////////////////////////////////////////////
_Matrix.Meooe(sol_o_i,tmp); assert( tmp.Checkerboard() ==Even);
tmp = src_e-tmp; assert( src_e.Checkerboard() ==Even);
_Matrix.MooeeInv(tmp,sol_e); assert( sol_e.Checkerboard() ==Even);
setCheckerboard(sol,sol_e); assert( sol_e.Checkerboard() ==Even);
setCheckerboard(sol,sol_o_i); assert( sol_o_i.Checkerboard() ==Odd );
};
virtual void RedBlackSolve (Matrix & _Matrix,const Field &src_o, Field &sol_o)
{
SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);
};
virtual void RedBlackSolve (Matrix & _Matrix,const std::vector<Field> &src_o, std::vector<Field> &sol_o)
{
SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);
}
};
template<class Field> class NonHermitianSchurRedBlackDiagTwoSolve : public SchurRedBlackBase<Field>
{
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
/////////////////////////////////////////////////////
// Wrap the usual normal equations Schur trick
/////////////////////////////////////////////////////
NonHermitianSchurRedBlackDiagTwoSolve(OperatorFunction<Field>& RBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field>(RBSolver, initSubGuess, _solnAsInitGuess) {};
virtual void RedBlackSource(Matrix& _Matrix, const Field& src, Field& src_e, Field& src_o)
{
GridBase* grid = _Matrix.RedBlackGrid();
GridBase* fgrid = _Matrix.Grid();
Field tmp(grid);
Field Mtmp(grid);
pickCheckerboard(Even, src_e, src);
pickCheckerboard(Odd , src_o, src);
/////////////////////////////////////////////////////
// src_o = Mdag * (source_o - Moe MeeInv source_e)
/////////////////////////////////////////////////////
_Matrix.MooeeInv(src_e, tmp); assert( tmp.Checkerboard() == Even );
_Matrix.Meooe (tmp, Mtmp); assert( Mtmp.Checkerboard() == Odd );
src_o -= Mtmp; assert( src_o.Checkerboard() == Odd );
}
virtual void RedBlackSolution(Matrix& _Matrix, const Field& sol_o, const Field& src_e, Field& sol)
{
GridBase* grid = _Matrix.RedBlackGrid();
GridBase* fgrid = _Matrix.Grid();
Field sol_o_i(grid);
Field tmp(grid);
Field sol_e(grid);
////////////////////////////////////////////////
// MooeeInv due to pecond
////////////////////////////////////////////////
_Matrix.MooeeInv(sol_o, tmp);
sol_o_i = tmp;
///////////////////////////////////////////////////
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
///////////////////////////////////////////////////
_Matrix.Meooe(sol_o_i, tmp); assert( tmp.Checkerboard() == Even );
tmp = src_e - tmp; assert( src_e.Checkerboard() == Even );
_Matrix.MooeeInv(tmp, sol_e); assert( sol_e.Checkerboard() == Even );
setCheckerboard(sol, sol_e); assert( sol_e.Checkerboard() == Even );
setCheckerboard(sol, sol_o_i); assert( sol_o_i.Checkerboard() == Odd );
};
virtual void RedBlackSolve(Matrix& _Matrix, const Field& src_o, Field& sol_o)
{
NonHermitianSchurDiagTwoOperator<Matrix,Field> _OpEO(_Matrix);
this->_HermitianRBSolver(_OpEO, src_o, sol_o);
};
virtual void RedBlackSolve(Matrix& _Matrix, const std::vector<Field>& src_o, std::vector<Field>& sol_o)
{
NonHermitianSchurDiagTwoOperator<Matrix,Field> _OpEO(_Matrix);
this->_HermitianRBSolver(_OpEO, src_o, sol_o);
}
};
}
#endif

View File

@ -0,0 +1,183 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/AlignedAllocator.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
template<typename _Tp>
class alignedAllocator {
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef _Tp* pointer;
typedef const _Tp* const_pointer;
typedef _Tp& reference;
typedef const _Tp& const_reference;
typedef _Tp value_type;
template<typename _Tp1> struct rebind { typedef alignedAllocator<_Tp1> other; };
alignedAllocator() throw() { }
alignedAllocator(const alignedAllocator&) throw() { }
template<typename _Tp1> alignedAllocator(const alignedAllocator<_Tp1>&) throw() { }
~alignedAllocator() throw() { }
pointer address(reference __x) const { return &__x; }
size_type max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
pointer allocate(size_type __n, const void* _p= 0)
{
size_type bytes = __n*sizeof(_Tp);
profilerAllocate(bytes);
_Tp *ptr = (_Tp*) MemoryManager::CpuAllocate(bytes);
assert( ( (_Tp*)ptr != (_Tp *)NULL ) );
return ptr;
}
void deallocate(pointer __p, size_type __n)
{
size_type bytes = __n * sizeof(_Tp);
profilerFree(bytes);
MemoryManager::CpuFree((void *)__p,bytes);
}
// FIXME: hack for the copy constructor: it must be avoided to avoid single thread loop
void construct(pointer __p, const _Tp& __val) { assert(0);};
void construct(pointer __p) { };
void destroy(pointer __p) { };
};
template<typename _Tp> inline bool operator==(const alignedAllocator<_Tp>&, const alignedAllocator<_Tp>&){ return true; }
template<typename _Tp> inline bool operator!=(const alignedAllocator<_Tp>&, const alignedAllocator<_Tp>&){ return false; }
//////////////////////////////////////////////////////////////////////////////////////
// Unified virtual memory
//////////////////////////////////////////////////////////////////////////////////////
template<typename _Tp>
class uvmAllocator {
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef _Tp* pointer;
typedef const _Tp* const_pointer;
typedef _Tp& reference;
typedef const _Tp& const_reference;
typedef _Tp value_type;
template<typename _Tp1> struct rebind { typedef uvmAllocator<_Tp1> other; };
uvmAllocator() throw() { }
uvmAllocator(const uvmAllocator&) throw() { }
template<typename _Tp1> uvmAllocator(const uvmAllocator<_Tp1>&) throw() { }
~uvmAllocator() throw() { }
pointer address(reference __x) const { return &__x; }
size_type max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
pointer allocate(size_type __n, const void* _p= 0)
{
size_type bytes = __n*sizeof(_Tp);
profilerAllocate(bytes);
_Tp *ptr = (_Tp*) MemoryManager::SharedAllocate(bytes);
assert( ( (_Tp*)ptr != (_Tp *)NULL ) );
return ptr;
}
void deallocate(pointer __p, size_type __n)
{
size_type bytes = __n * sizeof(_Tp);
profilerFree(bytes);
MemoryManager::SharedFree((void *)__p,bytes);
}
void construct(pointer __p, const _Tp& __val) { new((void *)__p) _Tp(__val); };
void construct(pointer __p) { };
void destroy(pointer __p) { };
};
template<typename _Tp> inline bool operator==(const uvmAllocator<_Tp>&, const uvmAllocator<_Tp>&){ return true; }
template<typename _Tp> inline bool operator!=(const uvmAllocator<_Tp>&, const uvmAllocator<_Tp>&){ return false; }
////////////////////////////////////////////////////////////////////////////////
// Device memory
////////////////////////////////////////////////////////////////////////////////
template<typename _Tp>
class devAllocator {
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef _Tp* pointer;
typedef const _Tp* const_pointer;
typedef _Tp& reference;
typedef const _Tp& const_reference;
typedef _Tp value_type;
template<typename _Tp1> struct rebind { typedef devAllocator<_Tp1> other; };
devAllocator() throw() { }
devAllocator(const devAllocator&) throw() { }
template<typename _Tp1> devAllocator(const devAllocator<_Tp1>&) throw() { }
~devAllocator() throw() { }
pointer address(reference __x) const { return &__x; }
size_type max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
pointer allocate(size_type __n, const void* _p= 0)
{
size_type bytes = __n*sizeof(_Tp);
profilerAllocate(bytes);
_Tp *ptr = (_Tp*) MemoryManager::AcceleratorAllocate(bytes);
assert( ( (_Tp*)ptr != (_Tp *)NULL ) );
return ptr;
}
void deallocate(pointer __p, size_type __n)
{
size_type bytes = __n * sizeof(_Tp);
profilerFree(bytes);
MemoryManager::AcceleratorFree((void *)__p,bytes);
}
void construct(pointer __p, const _Tp& __val) { };
void construct(pointer __p) { };
void destroy(pointer __p) { };
};
template<typename _Tp> inline bool operator==(const devAllocator<_Tp>&, const devAllocator<_Tp>&){ return true; }
template<typename _Tp> inline bool operator!=(const devAllocator<_Tp>&, const devAllocator<_Tp>&){ return false; }
////////////////////////////////////////////////////////////////////////////////
// Template typedefs
////////////////////////////////////////////////////////////////////////////////
#ifdef ACCELERATOR_CSHIFT
// Cshift on device
template<class T> using cshiftAllocator = devAllocator<T>;
#else
// Cshift on host
template<class T> using cshiftAllocator = std::allocator<T>;
#endif
template<class T> using Vector = std::vector<T,uvmAllocator<T> >;
template<class T> using stencilVector = std::vector<T,alignedAllocator<T> >;
template<class T> using commVector = std::vector<T,devAllocator<T> >;
template<class T> using cshiftVector = std::vector<T,cshiftAllocator<T> >;
NAMESPACE_END(Grid);

View File

@ -0,0 +1,4 @@
#pragma once
#include <Grid/allocator/MemoryStats.h>
#include <Grid/allocator/MemoryManager.h>
#include <Grid/allocator/AlignedAllocator.h>

View File

@ -0,0 +1,254 @@
#include <Grid/GridCore.h>
NAMESPACE_BEGIN(Grid);
/*Allocation types, saying which pointer cache should be used*/
#define Cpu (0)
#define CpuSmall (1)
#define Acc (2)
#define AccSmall (3)
#define Shared (4)
#define SharedSmall (5)
uint64_t total_shared;
uint64_t total_device;
uint64_t total_host;;
void MemoryManager::PrintBytes(void)
{
std::cout << " MemoryManager : "<<total_shared<<" shared bytes "<<std::endl;
std::cout << " MemoryManager : "<<total_device<<" accelerator bytes "<<std::endl;
std::cout << " MemoryManager : "<<total_host <<" cpu bytes "<<std::endl;
}
//////////////////////////////////////////////////////////////////////
// Data tables for recently freed pooiniter caches
//////////////////////////////////////////////////////////////////////
MemoryManager::AllocationCacheEntry MemoryManager::Entries[MemoryManager::NallocType][MemoryManager::NallocCacheMax];
int MemoryManager::Victim[MemoryManager::NallocType];
int MemoryManager::Ncache[MemoryManager::NallocType] = { 8, 32, 8, 32, 8, 32 };
//////////////////////////////////////////////////////////////////////
// Actual allocation and deallocation utils
//////////////////////////////////////////////////////////////////////
void *MemoryManager::AcceleratorAllocate(size_t bytes)
{
void *ptr = (void *) Lookup(bytes,Acc);
if ( ptr == (void *) NULL ) {
ptr = (void *) acceleratorAllocDevice(bytes);
total_device+=bytes;
}
return ptr;
}
void MemoryManager::AcceleratorFree (void *ptr,size_t bytes)
{
void *__freeme = Insert(ptr,bytes,Acc);
if ( __freeme ) {
acceleratorFreeDevice(__freeme);
total_device-=bytes;
// PrintBytes();
}
}
void *MemoryManager::SharedAllocate(size_t bytes)
{
void *ptr = (void *) Lookup(bytes,Shared);
if ( ptr == (void *) NULL ) {
ptr = (void *) acceleratorAllocShared(bytes);
total_shared+=bytes;
// std::cout <<"AcceleratorAllocate: allocated Shared pointer "<<std::hex<<ptr<<std::dec<<std::endl;
// PrintBytes();
}
return ptr;
}
void MemoryManager::SharedFree (void *ptr,size_t bytes)
{
void *__freeme = Insert(ptr,bytes,Shared);
if ( __freeme ) {
acceleratorFreeShared(__freeme);
total_shared-=bytes;
// PrintBytes();
}
}
#ifdef GRID_UVM
void *MemoryManager::CpuAllocate(size_t bytes)
{
void *ptr = (void *) Lookup(bytes,Cpu);
if ( ptr == (void *) NULL ) {
ptr = (void *) acceleratorAllocShared(bytes);
total_host+=bytes;
}
return ptr;
}
void MemoryManager::CpuFree (void *_ptr,size_t bytes)
{
NotifyDeletion(_ptr);
void *__freeme = Insert(_ptr,bytes,Cpu);
if ( __freeme ) {
acceleratorFreeShared(__freeme);
total_host-=bytes;
}
}
#else
void *MemoryManager::CpuAllocate(size_t bytes)
{
void *ptr = (void *) Lookup(bytes,Cpu);
if ( ptr == (void *) NULL ) {
ptr = (void *) acceleratorAllocCpu(bytes);
total_host+=bytes;
}
return ptr;
}
void MemoryManager::CpuFree (void *_ptr,size_t bytes)
{
NotifyDeletion(_ptr);
void *__freeme = Insert(_ptr,bytes,Cpu);
if ( __freeme ) {
acceleratorFreeCpu(__freeme);
total_host-=bytes;
}
}
#endif
//////////////////////////////////////////
// call only once
//////////////////////////////////////////
void MemoryManager::Init(void)
{
char * str;
int Nc;
int NcS;
str= getenv("GRID_ALLOC_NCACHE_LARGE");
if ( str ) {
Nc = atoi(str);
if ( (Nc>=0) && (Nc < NallocCacheMax)) {
Ncache[Cpu]=Nc;
Ncache[Acc]=Nc;
Ncache[Shared]=Nc;
}
}
str= getenv("GRID_ALLOC_NCACHE_SMALL");
if ( str ) {
Nc = atoi(str);
if ( (Nc>=0) && (Nc < NallocCacheMax)) {
Ncache[CpuSmall]=Nc;
Ncache[AccSmall]=Nc;
Ncache[SharedSmall]=Nc;
}
}
}
void MemoryManager::InitMessage(void) {
#ifndef GRID_UVM
std::cout << GridLogMessage << "MemoryManager Cache "<< MemoryManager::DeviceMaxBytes <<" bytes "<<std::endl;
#endif
std::cout << GridLogMessage<< "MemoryManager::Init() setting up"<<std::endl;
#ifdef ALLOCATION_CACHE
std::cout << GridLogMessage<< "MemoryManager::Init() cache pool for recent allocations: SMALL "<<Ncache[CpuSmall]<<" LARGE "<<Ncache[Cpu]<<std::endl;
#endif
#ifdef GRID_UVM
std::cout << GridLogMessage<< "MemoryManager::Init() Unified memory space"<<std::endl;
#ifdef GRID_CUDA
std::cout << GridLogMessage<< "MemoryManager::Init() Using cudaMallocManaged"<<std::endl;
#endif
#ifdef GRID_HIP
std::cout << GridLogMessage<< "MemoryManager::Init() Using hipMallocManaged"<<std::endl;
#endif
#ifdef GRID_SYCL
std::cout << GridLogMessage<< "MemoryManager::Init() Using SYCL malloc_shared"<<std::endl;
#endif
#else
std::cout << GridLogMessage<< "MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory"<<std::endl;
#ifdef GRID_CUDA
std::cout << GridLogMessage<< "MemoryManager::Init() Using cudaMalloc"<<std::endl;
#endif
#ifdef GRID_HIP
std::cout << GridLogMessage<< "MemoryManager::Init() Using hipMalloc"<<std::endl;
#endif
#ifdef GRID_SYCL
std::cout << GridLogMessage<< "MemoryManager::Init() Using SYCL malloc_device"<<std::endl;
#endif
#endif
}
void *MemoryManager::Insert(void *ptr,size_t bytes,int type)
{
#ifdef ALLOCATION_CACHE
bool small = (bytes < GRID_ALLOC_SMALL_LIMIT);
int cache = type + small;
return Insert(ptr,bytes,Entries[cache],Ncache[cache],Victim[cache]);
#else
return ptr;
#endif
}
void *MemoryManager::Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim)
{
assert(ncache>0);
#ifdef GRID_OMP
assert(omp_in_parallel()==0);
#endif
void * ret = NULL;
int v = -1;
for(int e=0;e<ncache;e++) {
if ( entries[e].valid==0 ) {
v=e;
break;
}
}
if ( v==-1 ) {
v=victim;
victim = (victim+1)%ncache;
}
if ( entries[v].valid ) {
ret = entries[v].address;
entries[v].valid = 0;
entries[v].address = NULL;
entries[v].bytes = 0;
}
entries[v].address=ptr;
entries[v].bytes =bytes;
entries[v].valid =1;
return ret;
}
void *MemoryManager::Lookup(size_t bytes,int type)
{
#ifdef ALLOCATION_CACHE
bool small = (bytes < GRID_ALLOC_SMALL_LIMIT);
int cache = type+small;
return Lookup(bytes,Entries[cache],Ncache[cache]);
#else
return NULL;
#endif
}
void *MemoryManager::Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache)
{
assert(ncache>0);
#ifdef GRID_OMP
assert(omp_in_parallel()==0);
#endif
for(int e=0;e<ncache;e++){
if ( entries[e].valid && ( entries[e].bytes == bytes ) ) {
entries[e].valid = 0;
return entries[e].address;
}
}
return NULL;
}
NAMESPACE_END(Grid);

View File

@ -0,0 +1,180 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/MemoryManager.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
#include <list>
#include <unordered_map>
NAMESPACE_BEGIN(Grid);
// Move control to configure.ac and Config.h?
#define GRID_ALLOC_SMALL_LIMIT (4096)
/*Pinning pages is costly*/
////////////////////////////////////////////////////////////////////////////
// Advise the LatticeAccelerator class
////////////////////////////////////////////////////////////////////////////
enum ViewAdvise {
AdviseDefault = 0x0, // Regular data
AdviseInfrequentUse = 0x1 // Advise that the data is used infrequently. This can
// significantly influence performance of bulk storage.
// AdviseTransient = 0x2, // Data will mostly be read. On some architectures
// enables read-only copies of memory to be kept on
// host and device.
// AdviseAcceleratorWriteDiscard = 0x4 // Field will be written in entirety on device
};
////////////////////////////////////////////////////////////////////////////
// View Access Mode
////////////////////////////////////////////////////////////////////////////
enum ViewMode {
AcceleratorRead = 0x01,
AcceleratorWrite = 0x02,
AcceleratorWriteDiscard = 0x04,
CpuRead = 0x08,
CpuWrite = 0x10,
CpuWriteDiscard = 0x10 // same for now
};
class MemoryManager {
private:
////////////////////////////////////////////////////////////
// For caching recently freed allocations
////////////////////////////////////////////////////////////
typedef struct {
void *address;
size_t bytes;
int valid;
} AllocationCacheEntry;
static const int NallocCacheMax=128;
static const int NallocType=6;
static AllocationCacheEntry Entries[NallocType][NallocCacheMax];
static int Victim[NallocType];
static int Ncache[NallocType];
/////////////////////////////////////////////////
// Free pool
/////////////////////////////////////////////////
static void *Insert(void *ptr,size_t bytes,int type) ;
static void *Lookup(size_t bytes,int type) ;
static void *Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim) ;
static void *Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache) ;
static void PrintBytes(void);
public:
static void Init(void);
static void InitMessage(void);
static void *AcceleratorAllocate(size_t bytes);
static void AcceleratorFree (void *ptr,size_t bytes);
static void *SharedAllocate(size_t bytes);
static void SharedFree (void *ptr,size_t bytes);
static void *CpuAllocate(size_t bytes);
static void CpuFree (void *ptr,size_t bytes);
////////////////////////////////////////////////////////
// Footprint tracking
////////////////////////////////////////////////////////
static uint64_t DeviceBytes;
static uint64_t DeviceLRUBytes;
static uint64_t DeviceMaxBytes;
static uint64_t HostToDeviceBytes;
static uint64_t DeviceToHostBytes;
static uint64_t HostToDeviceXfer;
static uint64_t DeviceToHostXfer;
private:
#ifndef GRID_UVM
//////////////////////////////////////////////////////////////////////
// Data tables for ViewCache
//////////////////////////////////////////////////////////////////////
typedef std::list<uint64_t> LRU_t;
typedef typename LRU_t::iterator LRUiterator;
typedef struct {
int LRU_valid;
LRUiterator LRU_entry;
uint64_t CpuPtr;
uint64_t AccPtr;
size_t bytes;
uint32_t transient;
uint32_t state;
uint32_t accLock;
uint32_t cpuLock;
} AcceleratorViewEntry;
typedef std::unordered_map<uint64_t,AcceleratorViewEntry> AccViewTable_t;
typedef typename AccViewTable_t::iterator AccViewTableIterator ;
static AccViewTable_t AccViewTable;
static LRU_t LRU;
/////////////////////////////////////////////////
// Device motion
/////////////////////////////////////////////////
static void Create(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
static void EvictVictims(uint64_t bytes); // Frees up <bytes>
static void Evict(AcceleratorViewEntry &AccCache);
static void Flush(AcceleratorViewEntry &AccCache);
static void Clone(AcceleratorViewEntry &AccCache);
static void AccDiscard(AcceleratorViewEntry &AccCache);
static void CpuDiscard(AcceleratorViewEntry &AccCache);
// static void LRUupdate(AcceleratorViewEntry &AccCache);
static void LRUinsert(AcceleratorViewEntry &AccCache);
static void LRUremove(AcceleratorViewEntry &AccCache);
// manage entries in the table
static int EntryPresent(uint64_t CpuPtr);
static void EntryCreate(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
static void EntryErase (uint64_t CpuPtr);
static AccViewTableIterator EntryLookup(uint64_t CpuPtr);
static void EntrySet (uint64_t CpuPtr,AcceleratorViewEntry &entry);
static void AcceleratorViewClose(uint64_t AccPtr);
static uint64_t AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
static void CpuViewClose(uint64_t Ptr);
static uint64_t CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
#endif
static void NotifyDeletion(void * CpuPtr);
public:
static void Print(void);
static int isOpen (void* CpuPtr);
static void ViewClose(void* CpuPtr,ViewMode mode);
static void *ViewOpen (void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,478 @@
#include <Grid/GridCore.h>
#ifndef GRID_UVM
#warning "Using explicit device memory copies"
NAMESPACE_BEGIN(Grid);
//define dprintf(...) printf ( __VA_ARGS__ ); fflush(stdout);
#define dprintf(...)
////////////////////////////////////////////////////////////
// For caching copies of data on device
////////////////////////////////////////////////////////////
MemoryManager::AccViewTable_t MemoryManager::AccViewTable;
MemoryManager::LRU_t MemoryManager::LRU;
////////////////////////////////////////////////////////
// Footprint tracking
////////////////////////////////////////////////////////
uint64_t MemoryManager::DeviceBytes;
uint64_t MemoryManager::DeviceLRUBytes;
uint64_t MemoryManager::DeviceMaxBytes = 1024*1024*128;
uint64_t MemoryManager::HostToDeviceBytes;
uint64_t MemoryManager::DeviceToHostBytes;
uint64_t MemoryManager::HostToDeviceXfer;
uint64_t MemoryManager::DeviceToHostXfer;
////////////////////////////////////
// Priority ordering for unlocked entries
// Empty
// CpuDirty
// Consistent
// AccDirty
////////////////////////////////////
#define Empty (0x0) /*Entry unoccupied */
#define CpuDirty (0x1) /*CPU copy is golden, Acc buffer MAY not be allocated*/
#define Consistent (0x2) /*ACC copy AND CPU copy are valid */
#define AccDirty (0x4) /*ACC copy is golden */
#define EvictNext (0x8) /*Priority for eviction*/
/////////////////////////////////////////////////
// Mechanics of data table maintenance
/////////////////////////////////////////////////
int MemoryManager::EntryPresent(uint64_t CpuPtr)
{
if(AccViewTable.empty()) return 0;
auto count = AccViewTable.count(CpuPtr); assert((count==0)||(count==1));
return count;
}
void MemoryManager::EntryCreate(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint)
{
assert(!EntryPresent(CpuPtr));
AcceleratorViewEntry AccCache;
AccCache.CpuPtr = CpuPtr;
AccCache.AccPtr = (uint64_t)NULL;
AccCache.bytes = bytes;
AccCache.state = CpuDirty;
AccCache.LRU_valid=0;
AccCache.transient=0;
AccCache.accLock=0;
AccCache.cpuLock=0;
AccViewTable[CpuPtr] = AccCache;
}
MemoryManager::AccViewTableIterator MemoryManager::EntryLookup(uint64_t CpuPtr)
{
assert(EntryPresent(CpuPtr));
auto AccCacheIterator = AccViewTable.find(CpuPtr);
assert(AccCacheIterator!=AccViewTable.end());
return AccCacheIterator;
}
void MemoryManager::EntryErase(uint64_t CpuPtr)
{
auto AccCache = EntryLookup(CpuPtr);
AccViewTable.erase(CpuPtr);
}
void MemoryManager::LRUinsert(AcceleratorViewEntry &AccCache)
{
assert(AccCache.LRU_valid==0);
if (AccCache.transient) {
LRU.push_back(AccCache.CpuPtr);
AccCache.LRU_entry = --LRU.end();
} else {
LRU.push_front(AccCache.CpuPtr);
AccCache.LRU_entry = LRU.begin();
}
AccCache.LRU_valid = 1;
DeviceLRUBytes+=AccCache.bytes;
}
void MemoryManager::LRUremove(AcceleratorViewEntry &AccCache)
{
assert(AccCache.LRU_valid==1);
LRU.erase(AccCache.LRU_entry);
AccCache.LRU_valid = 0;
DeviceLRUBytes-=AccCache.bytes;
}
/////////////////////////////////////////////////
// Accelerator cache motion & consistency logic
/////////////////////////////////////////////////
void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
{
///////////////////////////////////////////////////////////
// Remove from Accelerator, remove entry, without flush
// Cannot be locked. If allocated Must be in LRU pool.
///////////////////////////////////////////////////////////
assert(AccCache.state!=Empty);
dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
assert(AccCache.accLock==0);
assert(AccCache.cpuLock==0);
assert(AccCache.CpuPtr!=(uint64_t)NULL);
if(AccCache.AccPtr) {
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
DeviceBytes -=AccCache.bytes;
LRUremove(AccCache);
dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
}
uint64_t CpuPtr = AccCache.CpuPtr;
EntryErase(CpuPtr);
}
void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
{
///////////////////////////////////////////////////////////////////////////
// Make CPU consistent, remove from Accelerator, remove entry
// Cannot be locked. If allocated must be in LRU pool.
///////////////////////////////////////////////////////////////////////////
assert(AccCache.state!=Empty);
dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
assert(AccCache.accLock==0);
assert(AccCache.cpuLock==0);
if(AccCache.state==AccDirty) {
Flush(AccCache);
}
assert(AccCache.CpuPtr!=(uint64_t)NULL);
if(AccCache.AccPtr) {
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
DeviceBytes -=AccCache.bytes;
LRUremove(AccCache);
dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
}
uint64_t CpuPtr = AccCache.CpuPtr;
EntryErase(CpuPtr);
}
void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
{
assert(AccCache.state==AccDirty);
assert(AccCache.cpuLock==0);
assert(AccCache.accLock==0);
assert(AccCache.AccPtr!=(uint64_t)NULL);
assert(AccCache.CpuPtr!=(uint64_t)NULL);
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
dprintf("MemoryManager: Flush %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
DeviceToHostBytes+=AccCache.bytes;
DeviceToHostXfer++;
AccCache.state=Consistent;
}
void MemoryManager::Clone(AcceleratorViewEntry &AccCache)
{
assert(AccCache.state==CpuDirty);
assert(AccCache.cpuLock==0);
assert(AccCache.accLock==0);
assert(AccCache.CpuPtr!=(uint64_t)NULL);
if(AccCache.AccPtr==(uint64_t)NULL){
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
DeviceBytes+=AccCache.bytes;
}
dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
HostToDeviceBytes+=AccCache.bytes;
HostToDeviceXfer++;
AccCache.state=Consistent;
}
void MemoryManager::CpuDiscard(AcceleratorViewEntry &AccCache)
{
assert(AccCache.state!=Empty);
assert(AccCache.cpuLock==0);
assert(AccCache.accLock==0);
assert(AccCache.CpuPtr!=(uint64_t)NULL);
if(AccCache.AccPtr==(uint64_t)NULL){
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
DeviceBytes+=AccCache.bytes;
}
AccCache.state=AccDirty;
}
/////////////////////////////////////////////////////////////////////////////////
// View management
/////////////////////////////////////////////////////////////////////////////////
void MemoryManager::ViewClose(void* Ptr,ViewMode mode)
{
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
AcceleratorViewClose((uint64_t)Ptr);
} else if( (mode==CpuRead)||(mode==CpuWrite)){
CpuViewClose((uint64_t)Ptr);
} else {
assert(0);
}
}
void *MemoryManager::ViewOpen(void* _CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint)
{
uint64_t CpuPtr = (uint64_t)_CpuPtr;
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
return (void *) AcceleratorViewOpen(CpuPtr,bytes,mode,hint);
} else if( (mode==CpuRead)||(mode==CpuWrite)){
return (void *)CpuViewOpen(CpuPtr,bytes,mode,hint);
} else {
assert(0);
return NULL;
}
}
void MemoryManager::EvictVictims(uint64_t bytes)
{
while(bytes+DeviceLRUBytes > DeviceMaxBytes){
if ( DeviceLRUBytes > 0){
assert(LRU.size()>0);
uint64_t victim = LRU.back();
auto AccCacheIterator = EntryLookup(victim);
auto & AccCache = AccCacheIterator->second;
Evict(AccCache);
}
}
}
uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint)
{
////////////////////////////////////////////////////////////////////////////
// Find if present, otherwise get or force an empty
////////////////////////////////////////////////////////////////////////////
if ( EntryPresent(CpuPtr)==0 ){
EntryCreate(CpuPtr,bytes,mode,hint);
}
auto AccCacheIterator = EntryLookup(CpuPtr);
auto & AccCache = AccCacheIterator->second;
if (!AccCache.AccPtr) {
EvictVictims(bytes);
}
assert((mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard));
assert(AccCache.cpuLock==0); // Programming error
if(AccCache.state!=Empty) {
dprintf("ViewOpen found entry %llx %llx : %lld %lld\n",
(uint64_t)AccCache.CpuPtr,
(uint64_t)CpuPtr,
(uint64_t)AccCache.bytes,
(uint64_t)bytes);
assert(AccCache.CpuPtr == CpuPtr);
assert(AccCache.bytes ==bytes);
}
/*
* State transitions and actions
*
* Action State StateNext Flush Clone
*
* AccRead Empty Consistent - Y
* AccWrite Empty AccDirty - Y
* AccRead CpuDirty Consistent - Y
* AccWrite CpuDirty AccDirty - Y
* AccRead Consistent Consistent - -
* AccWrite Consistent AccDirty - -
* AccRead AccDirty AccDirty - -
* AccWrite AccDirty AccDirty - -
*/
if(AccCache.state==Empty) {
assert(AccCache.LRU_valid==0);
AccCache.CpuPtr = CpuPtr;
AccCache.AccPtr = (uint64_t)NULL;
AccCache.bytes = bytes;
AccCache.state = CpuDirty; // Cpu starts primary
if(mode==AcceleratorWriteDiscard){
CpuDiscard(AccCache);
AccCache.state = AccDirty; // Empty + AcceleratorWrite=> AccDirty
} else if(mode==AcceleratorWrite){
Clone(AccCache);
AccCache.state = AccDirty; // Empty + AcceleratorWrite=> AccDirty
} else {
Clone(AccCache);
AccCache.state = Consistent; // Empty + AccRead => Consistent
}
AccCache.accLock= 1;
} else if(AccCache.state==CpuDirty ){
if(mode==AcceleratorWriteDiscard) {
CpuDiscard(AccCache);
AccCache.state = AccDirty; // CpuDirty + AcceleratorWrite=> AccDirty
} else if(mode==AcceleratorWrite) {
Clone(AccCache);
AccCache.state = AccDirty; // CpuDirty + AcceleratorWrite=> AccDirty
} else {
Clone(AccCache);
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
}
AccCache.accLock++;
dprintf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
} else if(AccCache.state==Consistent) {
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
else
AccCache.state = Consistent; // Consistent + AccRead => Consistent
AccCache.accLock++;
dprintf("Consistent entry into device accLock %d\n",AccCache.accLock);
} else if(AccCache.state==AccDirty) {
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
else
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
AccCache.accLock++;
dprintf("AccDirty entry into device accLock %d\n",AccCache.accLock);
} else {
assert(0);
}
// If view is opened on device remove from LRU
if(AccCache.LRU_valid==1){
// must possibly remove from LRU as now locked on GPU
LRUremove(AccCache);
}
int transient =hint;
AccCache.transient= transient? EvictNext : 0;
return AccCache.AccPtr;
}
////////////////////////////////////
// look up & decrement lock count
////////////////////////////////////
void MemoryManager::AcceleratorViewClose(uint64_t CpuPtr)
{
auto AccCacheIterator = EntryLookup(CpuPtr);
auto & AccCache = AccCacheIterator->second;
assert(AccCache.cpuLock==0);
assert(AccCache.accLock>0);
AccCache.accLock--;
// Move to LRU queue if not locked and close on device
if(AccCache.accLock==0) {
LRUinsert(AccCache);
}
}
void MemoryManager::CpuViewClose(uint64_t CpuPtr)
{
auto AccCacheIterator = EntryLookup(CpuPtr);
auto & AccCache = AccCacheIterator->second;
assert(AccCache.cpuLock>0);
assert(AccCache.accLock==0);
AccCache.cpuLock--;
}
/*
* Action State StateNext Flush Clone
*
* CpuRead Empty CpuDirty - -
* CpuWrite Empty CpuDirty - -
* CpuRead CpuDirty CpuDirty - -
* CpuWrite CpuDirty CpuDirty - -
* CpuRead Consistent Consistent - -
* CpuWrite Consistent CpuDirty - -
* CpuRead AccDirty Consistent Y -
* CpuWrite AccDirty CpuDirty Y -
*/
uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise transient)
{
////////////////////////////////////////////////////////////////////////////
// Find if present, otherwise get or force an empty
////////////////////////////////////////////////////////////////////////////
if ( EntryPresent(CpuPtr)==0 ){
EntryCreate(CpuPtr,bytes,mode,transient);
}
auto AccCacheIterator = EntryLookup(CpuPtr);
auto & AccCache = AccCacheIterator->second;
if (!AccCache.AccPtr) {
EvictVictims(bytes);
}
assert((mode==CpuRead)||(mode==CpuWrite));
assert(AccCache.accLock==0); // Programming error
if(AccCache.state!=Empty) {
assert(AccCache.CpuPtr == CpuPtr);
assert(AccCache.bytes==bytes);
}
if(AccCache.state==Empty) {
AccCache.CpuPtr = CpuPtr;
AccCache.AccPtr = (uint64_t)NULL;
AccCache.bytes = bytes;
AccCache.state = CpuDirty; // Empty + CpuRead/CpuWrite => CpuDirty
AccCache.accLock= 0;
AccCache.cpuLock= 1;
} else if(AccCache.state==CpuDirty ){
// AccPtr dont care, deferred allocate
AccCache.state = CpuDirty; // CpuDirty +CpuRead/CpuWrite => CpuDirty
AccCache.cpuLock++;
} else if(AccCache.state==Consistent) {
assert(AccCache.AccPtr != (uint64_t)NULL);
if(mode==CpuWrite)
AccCache.state = CpuDirty; // Consistent +CpuWrite => CpuDirty
else
AccCache.state = Consistent; // Consistent +CpuRead => Consistent
AccCache.cpuLock++;
} else if(AccCache.state==AccDirty) {
assert(AccCache.AccPtr != (uint64_t)NULL);
Flush(AccCache);
if(mode==CpuWrite) AccCache.state = CpuDirty; // AccDirty +CpuWrite => CpuDirty, Flush
else AccCache.state = Consistent; // AccDirty +CpuRead => Consistent, Flush
AccCache.cpuLock++;
} else {
assert(0); // should be unreachable
}
AccCache.transient= transient? EvictNext : 0;
return AccCache.CpuPtr;
}
void MemoryManager::NotifyDeletion(void *_ptr)
{
// Look up in ViewCache
uint64_t ptr = (uint64_t)_ptr;
if(EntryPresent(ptr)) {
auto e = EntryLookup(ptr);
AccDiscard(e->second);
}
}
void MemoryManager::Print(void)
{
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
std::cout << GridLogDebug << "Memory Manager " << std::endl;
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
std::cout << GridLogDebug << DeviceBytes << " bytes allocated on device " << std::endl;
std::cout << GridLogDebug << DeviceLRUBytes<< " bytes evictable on device " << std::endl;
std::cout << GridLogDebug << DeviceMaxBytes<< " bytes max on device " << std::endl;
std::cout << GridLogDebug << HostToDeviceXfer << " transfers to device " << std::endl;
std::cout << GridLogDebug << DeviceToHostXfer << " transfers from device " << std::endl;
std::cout << GridLogDebug << HostToDeviceBytes<< " bytes transfered to device " << std::endl;
std::cout << GridLogDebug << DeviceToHostBytes<< " bytes transfered from device " << std::endl;
std::cout << GridLogDebug << AccViewTable.size()<< " vectors " << LRU.size()<<" evictable"<< std::endl;
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
std::cout << GridLogDebug << "CpuAddr\t\tAccAddr\t\tState\t\tcpuLock\taccLock\tLRU_valid "<<std::endl;
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
for(auto it=AccViewTable.begin();it!=AccViewTable.end();it++){
auto &AccCache = it->second;
std::string str;
if ( AccCache.state==Empty ) str = std::string("Empty");
if ( AccCache.state==CpuDirty ) str = std::string("CpuDirty");
if ( AccCache.state==AccDirty ) str = std::string("AccDirty");
if ( AccCache.state==Consistent)str = std::string("Consistent");
std::cout << GridLogDebug << "0x"<<std::hex<<AccCache.CpuPtr<<std::dec
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
<< "\t" << AccCache.cpuLock
<< "\t" << AccCache.accLock
<< "\t" << AccCache.LRU_valid<<std::endl;
}
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
};
int MemoryManager::isOpen (void* _CpuPtr)
{
uint64_t CpuPtr = (uint64_t)_CpuPtr;
if ( EntryPresent(CpuPtr) ){
auto AccCacheIterator = EntryLookup(CpuPtr);
auto & AccCache = AccCacheIterator->second;
return AccCache.cpuLock+AccCache.accLock;
} else {
return 0;
}
}
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,23 @@
#include <Grid/GridCore.h>
#ifdef GRID_UVM
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////////////////////////
// View management is 1:1 address space mapping
/////////////////////////////////////////////////////////////////////////////////
uint64_t MemoryManager::DeviceBytes;
uint64_t MemoryManager::DeviceLRUBytes;
uint64_t MemoryManager::DeviceMaxBytes = 1024*1024*128;
uint64_t MemoryManager::HostToDeviceBytes;
uint64_t MemoryManager::DeviceToHostBytes;
uint64_t MemoryManager::HostToDeviceXfer;
uint64_t MemoryManager::DeviceToHostXfer;
void MemoryManager::ViewClose(void* AccPtr,ViewMode mode){};
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; };
int MemoryManager::isOpen (void* CpuPtr) { return 0;}
void MemoryManager::Print(void){};
void MemoryManager::NotifyDeletion(void *ptr){};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,67 @@
#include <Grid/GridCore.h>
#include <fcntl.h>
NAMESPACE_BEGIN(Grid);
MemoryStats *MemoryProfiler::stats = nullptr;
bool MemoryProfiler::debug = false;
void check_huge_pages(void *Buf,uint64_t BYTES)
{
#ifdef __linux__
int fd = open("/proc/self/pagemap", O_RDONLY);
assert(fd >= 0);
const int page_size = 4096;
uint64_t virt_pfn = (uint64_t)Buf / page_size;
off_t offset = sizeof(uint64_t) * virt_pfn;
uint64_t npages = (BYTES + page_size-1) / page_size;
uint64_t pagedata[npages];
uint64_t ret = lseek(fd, offset, SEEK_SET);
assert(ret == offset);
ret = ::read(fd, pagedata, sizeof(uint64_t)*npages);
assert(ret == sizeof(uint64_t) * npages);
int nhugepages = npages / 512;
int n4ktotal, nnothuge;
n4ktotal = 0;
nnothuge = 0;
for (int i = 0; i < nhugepages; ++i) {
uint64_t baseaddr = (pagedata[i*512] & 0x7fffffffffffffULL) * page_size;
for (int j = 0; j < 512; ++j) {
uint64_t pageaddr = (pagedata[i*512+j] & 0x7fffffffffffffULL) * page_size;
++n4ktotal;
if (pageaddr != baseaddr + j * page_size)
++nnothuge;
}
}
int rank = CartesianCommunicator::RankWorld();
printf("rank %d Allocated %d 4k pages, %d not in huge pages\n", rank, n4ktotal, nnothuge);
#endif
}
std::string sizeString(const size_t bytes)
{
constexpr unsigned int bufSize = 256;
const char *suffixes[7] = {"", "K", "M", "G", "T", "P", "E"};
char buf[256];
size_t s = 0;
double count = bytes;
while (count >= 1024 && s < 7)
{
s++;
count /= 1024;
}
if (count - floor(count) == 0.0)
{
snprintf(buf, bufSize, "%d %sB", (int)count, suffixes[s]);
}
else
{
snprintf(buf, bufSize, "%.1f %sB", count, suffixes[s]);
}
return std::string(buf);
}
NAMESPACE_END(Grid);

View File

@ -0,0 +1,95 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/MemoryStats.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
std::string sizeString(size_t bytes);
struct MemoryStats
{
size_t totalAllocated{0}, maxAllocated{0},
currentlyAllocated{0}, totalFreed{0};
};
class MemoryProfiler
{
public:
static MemoryStats *stats;
static bool debug;
};
#define memString(bytes) std::to_string(bytes) + " (" + sizeString(bytes) + ")"
#define profilerDebugPrint \
if (MemoryProfiler::stats) \
{ \
auto s = MemoryProfiler::stats; \
std::cout << GridLogDebug << "[Memory debug] Stats " << MemoryProfiler::stats << std::endl; \
std::cout << GridLogDebug << "[Memory debug] total : " << memString(s->totalAllocated) \
<< std::endl; \
std::cout << GridLogDebug << "[Memory debug] max : " << memString(s->maxAllocated) \
<< std::endl; \
std::cout << GridLogDebug << "[Memory debug] current: " << memString(s->currentlyAllocated) \
<< std::endl; \
std::cout << GridLogDebug << "[Memory debug] freed : " << memString(s->totalFreed) \
<< std::endl; \
}
#define profilerAllocate(bytes) \
if (MemoryProfiler::stats) \
{ \
auto s = MemoryProfiler::stats; \
s->totalAllocated += (bytes); \
s->currentlyAllocated += (bytes); \
s->maxAllocated = std::max(s->maxAllocated, s->currentlyAllocated); \
} \
if (MemoryProfiler::debug) \
{ \
std::cout << GridLogDebug << "[Memory debug] allocating " << memString(bytes) << std::endl; \
profilerDebugPrint; \
}
#define profilerFree(bytes) \
if (MemoryProfiler::stats) \
{ \
auto s = MemoryProfiler::stats; \
s->totalFreed += (bytes); \
s->currentlyAllocated -= (bytes); \
} \
if (MemoryProfiler::debug) \
{ \
std::cout << GridLogDebug << "[Memory debug] freeing " << memString(bytes) << std::endl; \
profilerDebugPrint; \
}
void check_huge_pages(void *Buf,uint64_t BYTES);
NAMESPACE_END(Grid);

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -23,13 +23,13 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_CARTESIAN_H
#define GRID_CARTESIAN_H
#include <cartesian/Cartesian_base.h>
#include <cartesian/Cartesian_full.h>
#include <cartesian/Cartesian_red_black.h>
#include <Grid/cartesian/Cartesian_base.h>
#include <Grid/cartesian/Cartesian_full.h>
#include <Grid/cartesian/Cartesian_red_black.h>
#endif

View File

@ -0,0 +1,291 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/cartesian/Cartesian_base.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
Author: Guido Cossu <guido.cossu@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_CARTESIAN_BASE_H
#define GRID_CARTESIAN_BASE_H
NAMESPACE_BEGIN(Grid);
//////////////////////////////////////////////////////////////////////
// Commicator provides information on the processor grid
//////////////////////////////////////////////////////////////////////
// unsigned long _ndimension;
// Coordinate _processors; // processor grid
// int _processor; // linear processor rank
// Coordinate _processor_coor; // linear processor rank
//////////////////////////////////////////////////////////////////////
class GridBase : public CartesianCommunicator , public GridThread {
public:
int dummy;
// Give Lattice access
template<class object> friend class Lattice;
GridBase(const Coordinate & processor_grid) : CartesianCommunicator(processor_grid) { LocallyPeriodic=0;};
GridBase(const Coordinate & processor_grid,
const CartesianCommunicator &parent,
int &split_rank)
: CartesianCommunicator(processor_grid,parent,split_rank) {LocallyPeriodic=0;};
GridBase(const Coordinate & processor_grid,
const CartesianCommunicator &parent)
: CartesianCommunicator(processor_grid,parent,dummy) {LocallyPeriodic=0;};
virtual ~GridBase() = default;
// Physics Grid information.
Coordinate _simd_layout;// Which dimensions get relayed out over simd lanes.
Coordinate _fdimensions;// (full) Global dimensions of array prior to cb removal
Coordinate _gdimensions;// Global dimensions of array after cb removal
Coordinate _ldimensions;// local dimensions of array with processor images removed
Coordinate _rdimensions;// Reduced local dimensions with simd lane images and processor images removed
Coordinate _ostride; // Outer stride for each dimension
Coordinate _istride; // Inner stride i.e. within simd lane
int _osites; // _isites*_osites = product(dimensions).
int _isites;
int _fsites; // _isites*_osites = product(dimensions).
int _gsites;
Coordinate _slice_block;// subslice information
Coordinate _slice_stride;
Coordinate _slice_nblock;
Coordinate _lstart; // local start of array in gcoors _processor_coor[d]*_ldimensions[d]
Coordinate _lend ; // local end of array in gcoors _processor_coor[d]*_ldimensions[d]+_ldimensions_[d]-1
bool _isCheckerBoarded;
int LocallyPeriodic;
Coordinate _checker_dim_mask;
public:
////////////////////////////////////////////////////////////////
// Checkerboarding interface is virtual and overridden by
// GridCartesian / GridRedBlackCartesian
////////////////////////////////////////////////////////////////
virtual int CheckerBoarded(int dim)=0;
virtual int CheckerBoard(const Coordinate &site)=0;
virtual int CheckerBoardDestination(int source_cb,int shift,int dim)=0;
virtual int CheckerBoardShift(int source_cb,int dim,int shift,int osite)=0;
virtual int CheckerBoardShiftForCB(int source_cb,int dim,int shift,int cb)=0;
virtual int CheckerBoardFromOindex (int Oindex)=0;
virtual int CheckerBoardFromOindexTable (int Oindex)=0;
//////////////////////////////////////////////////////////////////////////////////////////////
// Local layout calculations
//////////////////////////////////////////////////////////////////////////////////////////////
// These routines are key. Subdivide the linearised cartesian index into
// "inner" index identifying which simd lane of object<vFcomplex> is associated with coord
// "outer" index identifying which element of _odata in class "Lattice" is associated with coord.
//
// Compared to, say, Blitz++ we simply need to store BOTH an inner stride and an outer
// stride per dimension. The cost of evaluating the indexing information is doubled for an n-dimensional
// coordinate. Note, however, for data parallel operations the "inner" indexing cost is not paid and all
// lanes are operated upon simultaneously.
virtual int oIndex(Coordinate &coor)
{
int idx=0;
// Works with either global or local coordinates
for(int d=0;d<_ndimension;d++) idx+=_ostride[d]*(coor[d]%_rdimensions[d]);
return idx;
}
virtual int iIndex(Coordinate &lcoor)
{
int idx=0;
for(int d=0;d<_ndimension;d++) idx+=_istride[d]*(lcoor[d]/_rdimensions[d]);
return idx;
}
inline int oIndexReduced(Coordinate &ocoor)
{
int idx=0;
// ocoor is already reduced so can eliminate the modulo operation
// for fast indexing and inline the routine
for(int d=0;d<_ndimension;d++) idx+=_ostride[d]*ocoor[d];
return idx;
}
inline void oCoorFromOindex (Coordinate& coor,int Oindex){
Lexicographic::CoorFromIndex(coor,Oindex,_rdimensions);
}
inline void InOutCoorToLocalCoor (Coordinate &ocoor, Coordinate &icoor, Coordinate &lcoor) {
lcoor.resize(_ndimension);
for (int d = 0; d < _ndimension; d++)
lcoor[d] = ocoor[d] + _rdimensions[d] * icoor[d];
}
//////////////////////////////////////////////////////////
// SIMD lane addressing
//////////////////////////////////////////////////////////
inline void iCoorFromIindex(Coordinate &coor,int lane)
{
Lexicographic::CoorFromIndex(coor,lane,_simd_layout);
}
inline int PermuteDim(int dimension){
return _simd_layout[dimension]>1;
}
inline int PermuteType(int dimension){
int permute_type=0;
//
// Best way to encode this would be to present a mask
// for which simd dimensions are rotated, and the rotation
// size. If there is only one simd dimension rotated, this is just
// a permute.
//
// Cases: PermuteType == 1,2,4,8
// Distance should be either 0,1,2..
//
if ( _simd_layout[dimension] > 2 ) {
for(int d=0;d<_ndimension;d++){
if ( d != dimension ) assert ( (_simd_layout[d]==1) );
}
permute_type = RotateBit; // How to specify distance; this is not just direction.
return permute_type;
}
for(int d=_ndimension-1;d>dimension;d--){
if (_simd_layout[d]>1 ) permute_type++;
}
return permute_type;
}
////////////////////////////////////////////////////////////////
// Array sizing queries
////////////////////////////////////////////////////////////////
inline int iSites(void) const { return _isites; };
inline int Nsimd(void) const { return _isites; };// Synonymous with iSites
inline int oSites(void) const { return _osites; };
inline int lSites(void) const { return _isites*_osites; };
inline int gSites(void) const { return _isites*_osites*_Nprocessors; };
inline int Nd (void) const { return _ndimension;};
inline const Coordinate LocalStarts(void) { return _lstart; };
inline const Coordinate &FullDimensions(void) { return _fdimensions;};
inline const Coordinate &GlobalDimensions(void) { return _gdimensions;};
inline const Coordinate &LocalDimensions(void) { return _ldimensions;};
inline const Coordinate &VirtualLocalDimensions(void) { return _ldimensions;};
////////////////////////////////////////////////////////////////
// Utility to print the full decomposition details
////////////////////////////////////////////////////////////////
void show_decomposition(){
std::cout << GridLogMessage << "\tFull Dimensions : " << _fdimensions << std::endl;
std::cout << GridLogMessage << "\tSIMD layout : " << _simd_layout << std::endl;
std::cout << GridLogMessage << "\tGlobal Dimensions : " << _gdimensions << std::endl;
std::cout << GridLogMessage << "\tLocal Dimensions : " << _ldimensions << std::endl;
std::cout << GridLogMessage << "\tReduced Dimensions : " << _rdimensions << std::endl;
std::cout << GridLogMessage << "\tOuter strides : " << _ostride << std::endl;
std::cout << GridLogMessage << "\tInner strides : " << _istride << std::endl;
std::cout << GridLogMessage << "\tiSites : " << _isites << std::endl;
std::cout << GridLogMessage << "\toSites : " << _osites << std::endl;
std::cout << GridLogMessage << "\tlSites : " << lSites() << std::endl;
std::cout << GridLogMessage << "\tgSites : " << gSites() << std::endl;
std::cout << GridLogMessage << "\tNd : " << _ndimension << std::endl;
}
////////////////////////////////////////////////////////////////
// Global addressing
////////////////////////////////////////////////////////////////
void GlobalIndexToGlobalCoor(int gidx,Coordinate &gcoor){
assert(gidx< gSites());
Lexicographic::CoorFromIndex(gcoor,gidx,_gdimensions);
}
void LocalIndexToLocalCoor(int lidx,Coordinate &lcoor){
assert(lidx<lSites());
Lexicographic::CoorFromIndex(lcoor,lidx,_ldimensions);
}
void GlobalCoorToGlobalIndex(const Coordinate & gcoor,int & gidx){
gidx=0;
int mult=1;
for(int mu=0;mu<_ndimension;mu++) {
gidx+=mult*gcoor[mu];
mult*=_gdimensions[mu];
}
}
void GlobalCoorToProcessorCoorLocalCoor(Coordinate &pcoor,Coordinate &lcoor,const Coordinate &gcoor)
{
pcoor.resize(_ndimension);
lcoor.resize(_ndimension);
for(int mu=0;mu<_ndimension;mu++){
int _fld = _fdimensions[mu]/_processors[mu];
pcoor[mu] = gcoor[mu]/_fld;
lcoor[mu] = gcoor[mu]%_fld;
}
}
void GlobalCoorToRankIndex(int &rank, int &o_idx, int &i_idx ,const Coordinate &gcoor)
{
Coordinate pcoor;
Coordinate lcoor;
GlobalCoorToProcessorCoorLocalCoor(pcoor,lcoor,gcoor);
rank = RankFromProcessorCoor(pcoor);
/*
Coordinate cblcoor(lcoor);
for(int d=0;d<cblcoor.size();d++){
if( this->CheckerBoarded(d) ) {
cblcoor[d] = lcoor[d]/2;
}
}
*/
i_idx= iIndex(lcoor);
o_idx= oIndex(lcoor);
}
void RankIndexToGlobalCoor(int rank, int o_idx, int i_idx , Coordinate &gcoor)
{
gcoor.resize(_ndimension);
Coordinate coor(_ndimension);
ProcessorCoorFromRank(rank,coor);
for(int mu=0;mu<_ndimension;mu++) gcoor[mu] = _ldimensions[mu]*coor[mu];
iCoorFromIindex(coor,i_idx);
for(int mu=0;mu<_ndimension;mu++) gcoor[mu] += _rdimensions[mu]*coor[mu];
oCoorFromOindex (coor,o_idx);
for(int mu=0;mu<_ndimension;mu++) gcoor[mu] += coor[mu];
}
void RankIndexCbToFullGlobalCoor(int rank, int o_idx, int i_idx, int cb,Coordinate &fcoor)
{
RankIndexToGlobalCoor(rank,o_idx,i_idx ,fcoor);
if(CheckerBoarded(0)){
fcoor[0] = fcoor[0]*2+cb;
}
}
void ProcessorCoorLocalCoorToGlobalCoor(Coordinate &Pcoor,Coordinate &Lcoor,Coordinate &gcoor)
{
gcoor.resize(_ndimension);
for(int mu=0;mu<_ndimension;mu++) gcoor[mu] = Pcoor[mu]*_ldimensions[mu]+Lcoor[mu];
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,178 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/cartesian/Cartesian_full.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_CARTESIAN_FULL_H
#define GRID_CARTESIAN_FULL_H
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////////////////////////////////
// Grid Support.
/////////////////////////////////////////////////////////////////////////////////////////
class GridCartesian: public GridBase {
public:
int dummy;
Coordinate _checker_dim_mask;
virtual int CheckerBoardFromOindexTable (int Oindex) {
return 0;
}
virtual int CheckerBoardFromOindex (int Oindex)
{
return 0;
}
virtual int CheckerBoarded(int dim){
return 0;
}
virtual int CheckerBoard(const Coordinate &site){
return 0;
}
virtual int CheckerBoardDestination(int cb,int shift,int dim){
return 0;
}
virtual int CheckerBoardShiftForCB(int source_cb,int dim,int shift, int ocb){
return shift;
}
virtual int CheckerBoardShift(int source_cb,int dim,int shift, int osite){
return shift;
}
/////////////////////////////////////////////////////////////////////////
// Constructor takes a parent grid and possibly subdivides communicator.
/////////////////////////////////////////////////////////////////////////
GridCartesian(const Coordinate &dimensions,
const Coordinate &simd_layout,
const Coordinate &processor_grid,
const GridCartesian &parent) : GridBase(processor_grid,parent,dummy)
{
Init(dimensions,simd_layout,processor_grid);
}
GridCartesian(const Coordinate &dimensions,
const Coordinate &simd_layout,
const Coordinate &processor_grid,
const GridCartesian &parent,int &split_rank) : GridBase(processor_grid,parent,split_rank)
{
Init(dimensions,simd_layout,processor_grid);
}
/////////////////////////////////////////////////////////////////////////
// Construct from comm world
/////////////////////////////////////////////////////////////////////////
GridCartesian(const Coordinate &dimensions,
const Coordinate &simd_layout,
const Coordinate &processor_grid) : GridBase(processor_grid)
{
Init(dimensions,simd_layout,processor_grid);
}
virtual ~GridCartesian() = default;
void Init(const Coordinate &dimensions,
const Coordinate &simd_layout,
const Coordinate &processor_grid)
{
///////////////////////
// Grid information
///////////////////////
_isCheckerBoarded = false;
_ndimension = dimensions.size();
_fdimensions.resize(_ndimension);
_gdimensions.resize(_ndimension);
_ldimensions.resize(_ndimension);
_rdimensions.resize(_ndimension);
_simd_layout.resize(_ndimension);
_checker_dim_mask.resize(_ndimension);;
_lstart.resize(_ndimension);
_lend.resize(_ndimension);
_ostride.resize(_ndimension);
_istride.resize(_ndimension);
_fsites = _gsites = _osites = _isites = 1;
for (int d = 0; d < _ndimension; d++)
{
_checker_dim_mask[d]=0;
_fdimensions[d] = dimensions[d]; // Global dimensions
_gdimensions[d] = _fdimensions[d]; // Global dimensions
_simd_layout[d] = simd_layout[d];
_fsites = _fsites * _fdimensions[d];
_gsites = _gsites * _gdimensions[d];
// Use a reduced simd grid
_ldimensions[d] = _gdimensions[d] / _processors[d]; //local dimensions
//std::cout << _ldimensions[d] << " " << _gdimensions[d] << " " << _processors[d] << std::endl;
assert(_ldimensions[d] * _processors[d] == _gdimensions[d]);
_rdimensions[d] = _ldimensions[d] / _simd_layout[d]; //overdecomposition
assert(_rdimensions[d] * _simd_layout[d] == _ldimensions[d]);
_lstart[d] = _processor_coor[d] * _ldimensions[d];
_lend[d] = _processor_coor[d] * _ldimensions[d] + _ldimensions[d] - 1;
_osites *= _rdimensions[d];
_isites *= _simd_layout[d];
// Addressing support
if (d == 0)
{
_ostride[d] = 1;
_istride[d] = 1;
}
else
{
_ostride[d] = _ostride[d - 1] * _rdimensions[d - 1];
_istride[d] = _istride[d - 1] * _simd_layout[d - 1];
}
}
///////////////////////
// subplane information
///////////////////////
_slice_block.resize(_ndimension);
_slice_stride.resize(_ndimension);
_slice_nblock.resize(_ndimension);
int block = 1;
int nblock = 1;
for (int d = 0; d < _ndimension; d++)
nblock *= _rdimensions[d];
for (int d = 0; d < _ndimension; d++)
{
nblock /= _rdimensions[d];
_slice_block[d] = block;
_slice_stride[d] = _ostride[d] * _rdimensions[d];
_slice_nblock[d] = nblock;
block = block * _rdimensions[d];
}
};
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,305 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/cartesian/Cartesian_red_black.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_CARTESIAN_RED_BLACK_H
#define GRID_CARTESIAN_RED_BLACK_H
NAMESPACE_BEGIN(Grid);
static const int CbRed =0;
static const int CbBlack=1;
static const int Even =CbRed;
static const int Odd =CbBlack;
accelerator_inline int RedBlackCheckerBoardFromOindex (int oindex,const Coordinate &rdim,const Coordinate &chk_dim_msk)
{
int nd=rdim.size();
Coordinate coor(nd);
Lexicographic::CoorFromIndex(coor,oindex,rdim);
int linear=0;
for(int d=0;d<nd;d++){
if(chk_dim_msk[d])
linear=linear+coor[d];
}
return (linear&0x1);
}
// Specialise this for red black grids storing half the data like a chess board.
class GridRedBlackCartesian : public GridBase
{
public:
// Coordinate _checker_dim_mask;
int _checker_dim;
std::vector<int> _checker_board;
virtual int CheckerBoarded(int dim){
if( dim==_checker_dim) return 1;
else return 0;
}
virtual int CheckerBoard(const Coordinate &site){
int linear=0;
assert(site.size()==_ndimension);
for(int d=0;d<_ndimension;d++){
if(_checker_dim_mask[d])
linear=linear+site[d];
}
return (linear&0x1);
}
// Depending on the cb of site, we toggle source cb.
// for block #b, element #e = (b, e)
// we need
virtual int CheckerBoardShiftForCB(int source_cb,int dim,int shift,int ocb){
if(dim != _checker_dim) return shift;
int fulldim =_fdimensions[dim];
shift = (shift+fulldim)%fulldim;
// Probably faster with table lookup;
// or by looping over x,y,z and multiply rather than computing checkerboard.
if ( (source_cb+ocb)&1 ) {
return (shift)/2;
} else {
return (shift+1)/2;
}
}
virtual int CheckerBoardFromOindexTable (int Oindex) {
return _checker_board[Oindex];
}
virtual int CheckerBoardFromOindex (int Oindex)
{
Coordinate ocoor;
oCoorFromOindex(ocoor,Oindex);
return CheckerBoard(ocoor);
}
virtual int CheckerBoardShift(int source_cb,int dim,int shift,int osite){
if(dim != _checker_dim) return shift;
int ocb=CheckerBoardFromOindex(osite);
return CheckerBoardShiftForCB(source_cb,dim,shift,ocb);
}
virtual int CheckerBoardDestination(int source_cb,int shift,int dim){
if ( _checker_dim_mask[dim] ) {
// If _fdimensions[checker_dim] is odd, then shifting by 1 in other dims
// does NOT cause a parity hop.
int add=(dim==_checker_dim) ? 0 : _fdimensions[_checker_dim];
if ( (shift+add) &0x1) {
return 1-source_cb;
} else {
return source_cb;
}
} else {
return source_cb;
}
};
////////////////////////////////////////////////////////////
// Create Redblack from original grid; require full grid pointer ?
////////////////////////////////////////////////////////////
GridRedBlackCartesian(const GridBase *base) : GridBase(base->_processors,*base)
{
int dims = base->_ndimension;
Coordinate checker_dim_mask(dims,1);
int checker_dim = 0;
Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim);
};
////////////////////////////////////////////////////////////
// Create redblack from original grid, with non-trivial checker dim mask
////////////////////////////////////////////////////////////
GridRedBlackCartesian(const GridBase *base,
const Coordinate &checker_dim_mask,
int checker_dim
) : GridBase(base->_processors,*base)
{
Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim) ;
}
virtual ~GridRedBlackCartesian() = default;
void Init(const Coordinate &dimensions,
const Coordinate &simd_layout,
const Coordinate &processor_grid,
const Coordinate &checker_dim_mask,
int checker_dim)
{
_isCheckerBoarded = true;
_checker_dim = checker_dim;
assert(checker_dim_mask[checker_dim] == 1);
_ndimension = dimensions.size();
assert(checker_dim_mask.size() == _ndimension);
assert(processor_grid.size() == _ndimension);
assert(simd_layout.size() == _ndimension);
_fdimensions.resize(_ndimension);
_gdimensions.resize(_ndimension);
_ldimensions.resize(_ndimension);
_rdimensions.resize(_ndimension);
_simd_layout.resize(_ndimension);
_lstart.resize(_ndimension);
_lend.resize(_ndimension);
_ostride.resize(_ndimension);
_istride.resize(_ndimension);
_fsites = _gsites = _osites = _isites = 1;
_checker_dim_mask = checker_dim_mask;
for (int d = 0; d < _ndimension; d++)
{
_fdimensions[d] = dimensions[d];
_gdimensions[d] = _fdimensions[d];
_fsites = _fsites * _fdimensions[d];
_gsites = _gsites * _gdimensions[d];
if (d == _checker_dim)
{
assert((_gdimensions[d] & 0x1) == 0);
_gdimensions[d] = _gdimensions[d] / 2; // Remove a checkerboard
_gsites /= 2;
}
_ldimensions[d] = _gdimensions[d] / _processors[d];
assert(_ldimensions[d] * _processors[d] == _gdimensions[d]);
_lstart[d] = _processor_coor[d] * _ldimensions[d];
_lend[d] = _processor_coor[d] * _ldimensions[d] + _ldimensions[d] - 1;
// Use a reduced simd grid
_simd_layout[d] = simd_layout[d];
_rdimensions[d] = _ldimensions[d] / _simd_layout[d]; // this is not checking if this is integer
assert(_rdimensions[d] * _simd_layout[d] == _ldimensions[d]);
assert(_rdimensions[d] > 0);
// all elements of a simd vector must have same checkerboard.
// If Ls vectorised, this must still be the case; e.g. dwf rb5d
if (_simd_layout[d] > 1)
{
if (checker_dim_mask[d])
{
assert((_rdimensions[d] & 0x1) == 0);
}
}
_osites *= _rdimensions[d];
_isites *= _simd_layout[d];
// Addressing support
if (d == 0)
{
_ostride[d] = 1;
_istride[d] = 1;
}
else
{
_ostride[d] = _ostride[d - 1] * _rdimensions[d - 1];
_istride[d] = _istride[d - 1] * _simd_layout[d - 1];
}
}
////////////////////////////////////////////////////////////////////////////////////////////
// subplane information
////////////////////////////////////////////////////////////////////////////////////////////
_slice_block.resize(_ndimension);
_slice_stride.resize(_ndimension);
_slice_nblock.resize(_ndimension);
int block = 1;
int nblock = 1;
for (int d = 0; d < _ndimension; d++)
nblock *= _rdimensions[d];
for (int d = 0; d < _ndimension; d++)
{
nblock /= _rdimensions[d];
_slice_block[d] = block;
_slice_stride[d] = _ostride[d] * _rdimensions[d];
_slice_nblock[d] = nblock;
block = block * _rdimensions[d];
}
////////////////////////////////////////////////
// Create a checkerboard lookup table
////////////////////////////////////////////////
int rvol = 1;
for (int d = 0; d < _ndimension; d++)
{
rvol = rvol * _rdimensions[d];
}
_checker_board.resize(rvol);
for (int osite = 0; osite < _osites; osite++)
{
_checker_board[osite] = CheckerBoardFromOindex(osite);
}
};
protected:
virtual int oIndex(Coordinate &coor)
{
int idx = 0;
for (int d = 0; d < _ndimension; d++)
{
if (d == _checker_dim)
{
idx += _ostride[d] * ((coor[d] / 2) % _rdimensions[d]);
}
else
{
idx += _ostride[d] * (coor[d] % _rdimensions[d]);
}
}
return idx;
};
virtual int iIndex(Coordinate &lcoor)
{
int idx = 0;
for (int d = 0; d < _ndimension; d++)
{
if (d == _checker_dim)
{
idx += _istride[d] * (lcoor[d] / (2 * _rdimensions[d]));
}
else
{
idx += _istride[d] * (lcoor[d] / _rdimensions[d]);
}
}
return idx;
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -23,11 +23,13 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_COMMUNICATOR_H
#define GRID_COMMUNICATOR_H
#include <communicator/Communicator_base.h>
#include <Grid/util/Coordinate.h>
#include <Grid/communicator/SharedMemory.h>
#include <Grid/communicator/Communicator_base.h>
#endif

View File

@ -0,0 +1,77 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/communicator/Communicator_none.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/GridCore.h>
#include <fcntl.h>
#include <unistd.h>
#include <limits.h>
#include <sys/mman.h>
NAMESPACE_BEGIN(Grid);
///////////////////////////////////////////////////////////////
// Info that is setup once and indept of cartesian layout
///////////////////////////////////////////////////////////////
CartesianCommunicator::CommunicatorPolicy_t
CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicyConcurrent;
int CartesianCommunicator::nCommThreads = -1;
/////////////////////////////////
// Grid information queries
/////////////////////////////////
int CartesianCommunicator::Dimensions(void) { return _ndimension; };
int CartesianCommunicator::IsBoss(void) { return _processor==0; };
int CartesianCommunicator::BossRank(void) { return 0; };
int CartesianCommunicator::ThisRank(void) { return _processor; };
const Coordinate & CartesianCommunicator::ThisProcessorCoor(void) { return _processor_coor; };
const Coordinate & CartesianCommunicator::ProcessorGrid(void) { return _processors; };
int CartesianCommunicator::ProcessorCount(void) { return _Nprocessors; };
////////////////////////////////////////////////////////////////////////////////
// very VERY rarely (Log, serial RNG) we need world without a grid
////////////////////////////////////////////////////////////////////////////////
void CartesianCommunicator::GlobalSum(ComplexF &c)
{
GlobalSumVector((float *)&c,2);
}
void CartesianCommunicator::GlobalSumVector(ComplexF *c,int N)
{
GlobalSumVector((float *)c,2*N);
}
void CartesianCommunicator::GlobalSum(ComplexD &c)
{
GlobalSumVector((double *)&c,2);
}
void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N)
{
GlobalSumVector((double *)c,2*N);
}
NAMESPACE_END(Grid);

View File

@ -0,0 +1,195 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/communicator/Communicator_base.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_COMMUNICATOR_BASE_H
#define GRID_COMMUNICATOR_BASE_H
///////////////////////////////////
// Processor layout information
///////////////////////////////////
#include <Grid/communicator/SharedMemory.h>
NAMESPACE_BEGIN(Grid);
class CartesianCommunicator : public SharedMemory {
public:
////////////////////////////////////////////
// Policies
////////////////////////////////////////////
enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential };
static CommunicatorPolicy_t CommunicatorPolicy;
static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; }
static int nCommThreads;
////////////////////////////////////////////
// Communicator should know nothing of the physics grid, only processor grid.
////////////////////////////////////////////
int _Nprocessors; // How many in all
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
int _processor; // linear processor rank
Coordinate _processor_coor; // linear processor coordinate
unsigned long _ndimension;
static Grid_MPI_Comm communicator_world;
Grid_MPI_Comm communicator;
std::vector<Grid_MPI_Comm> communicator_halo;
////////////////////////////////////////////////
// Must call in Grid startup
////////////////////////////////////////////////
static void Init(int *argc, char ***argv);
////////////////////////////////////////////////
// Constructors to sub-divide a parent communicator
// and default to comm world
////////////////////////////////////////////////
CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank);
CartesianCommunicator(const Coordinate &pdimensions_in);
virtual ~CartesianCommunicator();
private:
////////////////////////////////////////////////
// Private initialise from an MPI communicator
// Can use after an MPI_Comm_split, but hidden from user so private
////////////////////////////////////////////////
void InitFromMPICommunicator(const Coordinate &processors, Grid_MPI_Comm communicator_base);
public:
////////////////////////////////////////////////////////////////////////////////////////
// Wraps MPI_Cart routines, or implements equivalent on other impls
////////////////////////////////////////////////////////////////////////////////////////
void ShiftedRanks(int dim,int shift,int & source, int & dest);
int RankFromProcessorCoor(Coordinate &coor);
void ProcessorCoorFromRank(int rank,Coordinate &coor);
int Dimensions(void) ;
int IsBoss(void) ;
int BossRank(void) ;
int ThisRank(void) ;
const Coordinate & ThisProcessorCoor(void) ;
const Coordinate & ProcessorGrid(void) ;
int ProcessorCount(void) ;
////////////////////////////////////////////////////////////////////////////////
// very VERY rarely (Log, serial RNG) we need world without a grid
////////////////////////////////////////////////////////////////////////////////
static int RankWorld(void) ;
static void BroadcastWorld(int root,void* data, int bytes);
////////////////////////////////////////////////////////////
// Reduction
////////////////////////////////////////////////////////////
void GlobalMax(RealD &);
void GlobalMax(RealF &);
void GlobalSum(RealF &);
void GlobalSumVector(RealF *,int N);
void GlobalSum(RealD &);
void GlobalSumVector(RealD *,int N);
void GlobalSum(uint32_t &);
void GlobalSum(uint64_t &);
void GlobalSumVector(uint64_t*,int N);
void GlobalSum(ComplexF &c);
void GlobalSumVector(ComplexF *c,int N);
void GlobalSum(ComplexD &c);
void GlobalSumVector(ComplexD *c,int N);
void GlobalXOR(uint32_t &);
void GlobalXOR(uint64_t &);
template<class obj> void GlobalSum(obj &o){
typedef typename obj::scalar_type scalar_type;
int words = sizeof(obj)/sizeof(scalar_type);
scalar_type * ptr = (scalar_type *)& o;
GlobalSumVector(ptr,words);
}
////////////////////////////////////////////////////////////
// Face exchange, buffer swap in translational invariant way
////////////////////////////////////////////////////////////
void SendToRecvFrom(void *xmit,
int xmit_to_rank,
void *recv,
int recv_from_rank,
int bytes);
double StencilSendToRecvFrom(void *xmit,
int xmit_to_rank,
void *recv,
int recv_from_rank,
int bytes,int dir);
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
void *xmit,
int xmit_to_rank,
void *recv,
int recv_from_rank,
int bytes,int dir);
void StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int i);
void StencilBarrier(void);
////////////////////////////////////////////////////////////
// Barrier
////////////////////////////////////////////////////////////
void Barrier(void);
////////////////////////////////////////////////////////////
// Broadcast a buffer and composite larger
////////////////////////////////////////////////////////////
void Broadcast(int root,void* data, int bytes);
////////////////////////////////////////////////////////////
// All2All down one dimension
////////////////////////////////////////////////////////////
template<class T> void AllToAll(int dim,std::vector<T> &in, std::vector<T> &out){
assert(dim>=0);
assert(dim<_ndimension);
assert(in.size()==out.size());
int numnode = _processors[dim];
uint64_t bytes=sizeof(T);
uint64_t words=in.size()/numnode;
assert(numnode * words == in.size());
assert(words < (1ULL<<31));
AllToAll(dim,(void *)&in[0],(void *)&out[0],words,bytes);
}
void AllToAll(int dim ,void *in,void *out,uint64_t words,uint64_t bytes);
void AllToAll(void *in,void *out,uint64_t words ,uint64_t bytes);
template<class obj> void Broadcast(int root,obj &data)
{
Broadcast(root,(void *)&data,sizeof(data));
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,473 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/communicator/Communicator_mpi.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/GridCore.h>
#include <Grid/communicator/SharedMemory.h>
NAMESPACE_BEGIN(Grid);
Grid_MPI_Comm CartesianCommunicator::communicator_world;
////////////////////////////////////////////
// First initialise of comms system
////////////////////////////////////////////
void CartesianCommunicator::Init(int *argc, char ***argv)
{
int flag;
int provided;
MPI_Initialized(&flag); // needed to coexist with other libs apparently
if ( !flag ) {
#ifndef GRID_COMMS_THREADS
nCommThreads=1;
// wrong results here too
// For now: comms-overlap leads to wrong results in Benchmark_wilson even on single node MPI runs
// other comms schemes are ok
MPI_Init_thread(argc,argv,MPI_THREAD_SERIALIZED,&provided);
#else
MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
#endif
//If only 1 comms thread we require any threading mode other than SINGLE, but for multiple comms threads we need MULTIPLE
if( (nCommThreads == 1) && (provided == MPI_THREAD_SINGLE) ) {
assert(0);
}
if( (nCommThreads > 1) && (provided != MPI_THREAD_MULTIPLE) ) {
assert(0);
}
}
// Never clean up as done once.
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
Grid_quiesce_nodes();
GlobalSharedMemory::Init(communicator_world);
GlobalSharedMemory::SharedMemoryAllocate(
GlobalSharedMemory::MAX_MPI_SHM_BYTES,
GlobalSharedMemory::Hugepages);
Grid_unquiesce_nodes();
}
///////////////////////////////////////////////////////////////////////////
// Use cartesian communicators now even in MPI3
///////////////////////////////////////////////////////////////////////////
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
{
int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest);
assert(ierr==0);
}
int CartesianCommunicator::RankFromProcessorCoor(Coordinate &coor)
{
int rank;
int ierr=MPI_Cart_rank (communicator, &coor[0], &rank);
assert(ierr==0);
return rank;
}
void CartesianCommunicator::ProcessorCoorFromRank(int rank, Coordinate &coor)
{
coor.resize(_ndimension);
int ierr=MPI_Cart_coords (communicator, rank, _ndimension,&coor[0]);
assert(ierr==0);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// Initialises from communicator_world
////////////////////////////////////////////////////////////////////////////////////////////////////////
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
{
MPI_Comm optimal_comm;
////////////////////////////////////////////////////
// Remap using the shared memory optimising routine
// The remap creates a comm which must be freed
////////////////////////////////////////////////////
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm);
InitFromMPICommunicator(processors,optimal_comm);
SetCommunicator(optimal_comm);
///////////////////////////////////////////////////
// Free the temp communicator
///////////////////////////////////////////////////
MPI_Comm_free(&optimal_comm);
}
//////////////////////////////////
// Try to subdivide communicator
//////////////////////////////////
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
{
_ndimension = processors.size(); assert(_ndimension>=1);
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
Coordinate parent_processor_coor(_ndimension,0);
Coordinate parent_processors (_ndimension,1);
// Can make 5d grid from 4d etc...
int pad = _ndimension-parent_ndimension;
for(int d=0;d<parent_ndimension;d++){
parent_processor_coor[pad+d]=parent._processor_coor[d];
parent_processors [pad+d]=parent._processors[d];
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// split the communicator
//////////////////////////////////////////////////////////////////////////////////////////////////////
// int Nparent = parent._processors ;
int Nparent;
MPI_Comm_size(parent.communicator,&Nparent);
int childsize=1;
for(int d=0;d<processors.size();d++) {
childsize *= processors[d];
}
int Nchild = Nparent/childsize;
assert (childsize * Nchild == Nparent);
Coordinate ccoor(_ndimension); // coor within subcommunicator
Coordinate scoor(_ndimension); // coor of split within parent
Coordinate ssize(_ndimension); // coor of split within parent
for(int d=0;d<_ndimension;d++){
ccoor[d] = parent_processor_coor[d] % processors[d];
scoor[d] = parent_processor_coor[d] / processors[d];
ssize[d] = parent_processors[d] / processors[d];
}
// rank within subcomm ; srank is rank of subcomm within blocks of subcomms
int crank;
// Mpi uses the reverse Lexico convention to us; so reversed routines called
Lexicographic::IndexFromCoorReversed(ccoor,crank,processors); // processors is the split grid dimensions
Lexicographic::IndexFromCoorReversed(scoor,srank,ssize); // ssize is the number of split grids
MPI_Comm comm_split;
if ( Nchild > 1 ) {
////////////////////////////////////////////////////////////////
// Split the communicator
////////////////////////////////////////////////////////////////
int ierr= MPI_Comm_split(parent.communicator,srank,crank,&comm_split);
assert(ierr==0);
} else {
srank = 0;
int ierr = MPI_Comm_dup (parent.communicator,&comm_split);
assert(ierr==0);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Set up from the new split communicator
//////////////////////////////////////////////////////////////////////////////////////////////////////
InitFromMPICommunicator(processors,comm_split);
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Take the right SHM buffers
//////////////////////////////////////////////////////////////////////////////////////////////////////
SetCommunicator(comm_split);
///////////////////////////////////////////////
// Free the temp communicator
///////////////////////////////////////////////
MPI_Comm_free(&comm_split);
if(0){
std::cout << " ndim " <<_ndimension<<" " << parent._ndimension << std::endl;
for(int d=0;d<processors.size();d++){
std::cout << d<< " " << _processor_coor[d] <<" " << ccoor[d]<<std::endl;
}
}
for(int d=0;d<processors.size();d++){
assert(_processor_coor[d] == ccoor[d] );
}
}
void CartesianCommunicator::InitFromMPICommunicator(const Coordinate &processors, MPI_Comm communicator_base)
{
////////////////////////////////////////////////////
// Creates communicator, and the communicator_halo
////////////////////////////////////////////////////
_ndimension = processors.size();
_processor_coor.resize(_ndimension);
/////////////////////////////////
// Count the requested nodes
/////////////////////////////////
_Nprocessors=1;
_processors = processors;
for(int i=0;i<_ndimension;i++){
_Nprocessors*=_processors[i];
}
Coordinate periodic(_ndimension,1);
MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],0,&communicator);
MPI_Comm_rank(communicator,&_processor);
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
if ( 0 && (communicator_base != communicator_world) ) {
std::cout << "InitFromMPICommunicator Cartesian communicator created with a non-world communicator"<<std::endl;
std::cout << " new communicator rank "<<_processor<< " coor ["<<_ndimension<<"] ";
for(int d=0;d<_processors.size();d++){
std::cout << _processor_coor[d]<<" ";
}
std::cout << std::endl;
}
int Size;
MPI_Comm_size(communicator,&Size);
communicator_halo.resize (2*_ndimension);
for(int i=0;i<_ndimension*2;i++){
MPI_Comm_dup(communicator,&communicator_halo[i]);
}
assert(Size==_Nprocessors);
}
CartesianCommunicator::~CartesianCommunicator()
{
int MPI_is_finalised;
MPI_Finalized(&MPI_is_finalised);
if (communicator && !MPI_is_finalised) {
MPI_Comm_free(&communicator);
for(int i=0;i<communicator_halo.size();i++){
MPI_Comm_free(&communicator_halo[i]);
}
}
}
void CartesianCommunicator::GlobalSum(uint32_t &u){
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
assert(ierr==0);
}
void CartesianCommunicator::GlobalSum(uint64_t &u){
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator);
assert(ierr==0);
}
void CartesianCommunicator::GlobalSumVector(uint64_t* u,int N){
int ierr=MPI_Allreduce(MPI_IN_PLACE,u,N,MPI_UINT64_T,MPI_SUM,communicator);
assert(ierr==0);
}
void CartesianCommunicator::GlobalXOR(uint32_t &u){
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator);
assert(ierr==0);
}
void CartesianCommunicator::GlobalXOR(uint64_t &u){
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator);
assert(ierr==0);
}
void CartesianCommunicator::GlobalMax(float &f)
{
int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_MAX,communicator);
assert(ierr==0);
}
void CartesianCommunicator::GlobalMax(double &d)
{
int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_MAX,communicator);
assert(ierr==0);
}
void CartesianCommunicator::GlobalSum(float &f){
int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator);
assert(ierr==0);
}
void CartesianCommunicator::GlobalSumVector(float *f,int N)
{
int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator);
assert(ierr==0);
}
void CartesianCommunicator::GlobalSum(double &d)
{
int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator);
assert(ierr==0);
}
void CartesianCommunicator::GlobalSumVector(double *d,int N)
{
int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator);
assert(ierr==0);
}
// Basic Halo comms primitive
void CartesianCommunicator::SendToRecvFrom(void *xmit,
int dest,
void *recv,
int from,
int bytes)
{
std::vector<CommsRequest_t> reqs(0);
unsigned long xcrc = crc32(0L, Z_NULL, 0);
unsigned long rcrc = crc32(0L, Z_NULL, 0);
int myrank = _processor;
int ierr;
// Enforce no UVM in comms, device or host OK
assert(acceleratorIsCommunicable(xmit));
assert(acceleratorIsCommunicable(recv));
// Give the CPU to MPI immediately; can use threads to overlap optionally
// printf("proc %d SendToRecvFrom %d bytes Sendrecv \n",_processor,bytes);
ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
recv,bytes,MPI_CHAR,from, from,
communicator,MPI_STATUS_IGNORE);
assert(ierr==0);
// xcrc = crc32(xcrc,(unsigned char *)xmit,bytes);
// rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
// printf("proc %d SendToRecvFrom %d bytes xcrc %lx rcrc %lx\n",_processor,bytes,xcrc,rcrc); fflush
}
// Basic Halo comms primitive
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
int dest,
void *recv,
int from,
int bytes,int dir)
{
std::vector<CommsRequest_t> list;
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,recv,from,bytes,dir);
StencilSendToRecvFromComplete(list,dir);
return offbytes;
}
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
void *xmit,
int dest,
void *recv,
int from,
int bytes,int dir)
{
int ncomm =communicator_halo.size();
int commdir=dir%ncomm;
MPI_Request xrq;
MPI_Request rrq;
int ierr;
int gdest = ShmRanks[dest];
int gfrom = ShmRanks[from];
int gme = ShmRanks[_processor];
assert(dest != _processor);
assert(from != _processor);
assert(gme == ShmRank);
double off_node_bytes=0.0;
int tag;
if ( gfrom ==MPI_UNDEFINED) {
tag= dir+from*32;
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
assert(ierr==0);
list.push_back(rrq);
off_node_bytes+=bytes;
}
if ( gdest == MPI_UNDEFINED ) {
tag= dir+_processor*32;
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
assert(ierr==0);
list.push_back(xrq);
off_node_bytes+=bytes;
}
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
this->StencilSendToRecvFromComplete(list,dir);
}
return off_node_bytes;
}
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list,int dir)
{
int nreq=list.size();
if (nreq==0) return;
std::vector<MPI_Status> status(nreq);
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
assert(ierr==0);
list.resize(0);
}
void CartesianCommunicator::StencilBarrier(void)
{
MPI_Barrier (ShmComm);
}
//void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
//{
//}
void CartesianCommunicator::Barrier(void)
{
int ierr = MPI_Barrier(communicator);
assert(ierr==0);
}
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
{
int ierr=MPI_Bcast(data,
bytes,
MPI_BYTE,
root,
communicator);
assert(ierr==0);
}
int CartesianCommunicator::RankWorld(void){
int r;
MPI_Comm_rank(communicator_world,&r);
return r;
}
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
{
int ierr= MPI_Bcast(data,
bytes,
MPI_BYTE,
root,
communicator_world);
assert(ierr==0);
}
void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,uint64_t bytes)
{
Coordinate row(_ndimension,1);
assert(dim>=0 && dim<_ndimension);
// Split the communicator
row[dim] = _processors[dim];
int me;
CartesianCommunicator Comm(row,*this,me);
Comm.AllToAll(in,out,words,bytes);
}
void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes)
{
// MPI is a pain and uses "int" arguments
// 64*64*64*128*16 == 500Million elements of data.
// When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug.
// (Turns up on 32^3 x 64 Gparity too)
MPI_Datatype object;
int iwords;
int ibytes;
iwords = words;
ibytes = bytes;
assert(words == iwords); // safe to cast to int ?
assert(bytes == ibytes); // safe to cast to int ?
MPI_Type_contiguous(ibytes,MPI_BYTE,&object);
MPI_Type_commit(&object);
MPI_Alltoall(in,iwords,object,out,iwords,object,communicator);
MPI_Type_free(&object);
}
NAMESPACE_END(Grid);

View File

@ -0,0 +1,138 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/communicator/Communicator_none.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/GridCore.h>
NAMESPACE_BEGIN(Grid);
///////////////////////////////////////////////////////////////////////////////////////////////////
// Info that is setup once and indept of cartesian layout
///////////////////////////////////////////////////////////////////////////////////////////////////
Grid_MPI_Comm CartesianCommunicator::communicator_world;
void CartesianCommunicator::Init(int *argc, char *** arv)
{
GlobalSharedMemory::Init(communicator_world);
GlobalSharedMemory::SharedMemoryAllocate(
GlobalSharedMemory::MAX_MPI_SHM_BYTES,
GlobalSharedMemory::Hugepages);
}
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
: CartesianCommunicator(processors)
{
srank=0;
SetCommunicator(communicator_world);
}
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
{
_processors = processors;
_ndimension = processors.size(); assert(_ndimension>=1);
_processor_coor.resize(_ndimension);
// Require 1^N processor grid for fake
_Nprocessors=1;
_processor = 0;
for(int d=0;d<_ndimension;d++) {
assert(_processors[d]==1);
_processor_coor[d] = 0;
}
SetCommunicator(communicator_world);
}
CartesianCommunicator::~CartesianCommunicator(){}
void CartesianCommunicator::GlobalMax(float &){}
void CartesianCommunicator::GlobalMax(double &){}
void CartesianCommunicator::GlobalSum(float &){}
void CartesianCommunicator::GlobalSumVector(float *,int N){}
void CartesianCommunicator::GlobalSum(double &){}
void CartesianCommunicator::GlobalSumVector(double *,int N){}
void CartesianCommunicator::GlobalSum(uint32_t &){}
void CartesianCommunicator::GlobalSum(uint64_t &){}
void CartesianCommunicator::GlobalSumVector(uint64_t *,int N){}
void CartesianCommunicator::GlobalXOR(uint32_t &){}
void CartesianCommunicator::GlobalXOR(uint64_t &){}
// Basic Halo comms primitive -- should never call in single node
void CartesianCommunicator::SendToRecvFrom(void *xmit,
int dest,
void *recv,
int from,
int bytes)
{
assert(0);
}
void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,uint64_t bytes)
{
bcopy(in,out,bytes*words);
}
void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes)
{
bcopy(in,out,bytes*words);
}
int CartesianCommunicator::RankWorld(void){return 0;}
void CartesianCommunicator::Barrier(void){}
void CartesianCommunicator::Broadcast(int root,void* data, int bytes) {}
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) { }
int CartesianCommunicator::RankFromProcessorCoor(Coordinate &coor) { return 0;}
void CartesianCommunicator::ProcessorCoorFromRank(int rank, Coordinate &coor){ coor = _processor_coor; }
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
{
source =0;
dest=0;
}
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
int xmit_to_rank,
void *recv,
int recv_from_rank,
int bytes, int dir)
{
return 2.0*bytes;
}
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
void *xmit,
int xmit_to_rank,
void *recv,
int recv_from_rank,
int bytes, int dir)
{
return 2.0*bytes;
}
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir)
{
}
void CartesianCommunicator::StencilBarrier(void){};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,96 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/communicator/SharedMemory.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/GridCore.h>
NAMESPACE_BEGIN(Grid);
// static data
int GlobalSharedMemory::HPEhypercube = 1;
uint64_t GlobalSharedMemory::MAX_MPI_SHM_BYTES = 1024LL*1024LL*1024LL;
int GlobalSharedMemory::Hugepages = 0;
int GlobalSharedMemory::_ShmSetup;
int GlobalSharedMemory::_ShmAlloc;
uint64_t GlobalSharedMemory::_ShmAllocBytes;
std::vector<void *> GlobalSharedMemory::WorldShmCommBufs;
Grid_MPI_Comm GlobalSharedMemory::WorldShmComm;
int GlobalSharedMemory::WorldShmRank;
int GlobalSharedMemory::WorldShmSize;
std::vector<int> GlobalSharedMemory::WorldShmRanks;
Grid_MPI_Comm GlobalSharedMemory::WorldComm;
int GlobalSharedMemory::WorldSize;
int GlobalSharedMemory::WorldRank;
int GlobalSharedMemory::WorldNodes;
int GlobalSharedMemory::WorldNode;
void GlobalSharedMemory::SharedMemoryFree(void)
{
assert(_ShmAlloc);
assert(_ShmAllocBytes>0);
for(int r=0;r<WorldShmSize;r++){
munmap(WorldShmCommBufs[r],_ShmAllocBytes);
}
_ShmAlloc = 0;
_ShmAllocBytes = 0;
}
/////////////////////////////////
// Alloc, free shmem region
/////////////////////////////////
void *SharedMemory::ShmBufferMalloc(size_t bytes){
// bytes = (bytes+sizeof(vRealD))&(~(sizeof(vRealD)-1));// align up bytes
void *ptr = (void *)heap_top;
heap_top += bytes;
heap_bytes+= bytes;
if (heap_bytes >= heap_size) {
std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm <MB> flag" <<std::endl;
std::cout<< " Parameter specified in units of MB (megabytes) " <<std::endl;
std::cout<< " Current alloc is " << (bytes/(1024*1024)) <<"MB"<<std::endl;
std::cout<< " Current bytes is " << (heap_bytes/(1024*1024)) <<"MB"<<std::endl;
std::cout<< " Current heap is " << (heap_size/(1024*1024)) <<"MB"<<std::endl;
assert(heap_bytes<heap_size);
}
//std::cerr << "ShmBufferMalloc "<<std::hex<< ptr<<" - "<<((uint64_t)ptr+bytes)<<std::dec<<std::endl;
return ptr;
}
void SharedMemory::ShmBufferFreeAll(void) {
heap_top =(size_t)ShmBufferSelf();
heap_bytes=0;
}
void *SharedMemory::ShmBufferSelf(void)
{
//std::cerr << "ShmBufferSelf "<<ShmRank<<" "<<std::hex<< ShmCommBufs[ShmRank] <<std::dec<<std::endl;
return ShmCommBufs[ShmRank];
}
NAMESPACE_END(Grid);

View File

@ -0,0 +1,163 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/communicator/SharedMemory.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
#include <Grid/GridCore.h>
#if defined (GRID_COMMS_MPI3)
#include <mpi.h>
#endif
#include <semaphore.h>
#include <fcntl.h>
#include <unistd.h>
#include <limits.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/mman.h>
#include <zlib.h>
NAMESPACE_BEGIN(Grid);
#if defined (GRID_COMMS_MPI3)
typedef MPI_Comm Grid_MPI_Comm;
typedef MPI_Request CommsRequest_t;
#else
typedef int CommsRequest_t;
typedef int Grid_MPI_Comm;
#endif
class GlobalSharedMemory {
private:
static const int MAXLOG2RANKSPERNODE = 16;
// Init once lock on the buffer allocation
static int _ShmSetup;
static int _ShmAlloc;
static uint64_t _ShmAllocBytes;
public:
///////////////////////////////////////
// HPE 8600 hypercube optimisation
///////////////////////////////////////
static int HPEhypercube;
static int ShmSetup(void) { return _ShmSetup; }
static int ShmAlloc(void) { return _ShmAlloc; }
static uint64_t ShmAllocBytes(void) { return _ShmAllocBytes; }
static uint64_t MAX_MPI_SHM_BYTES;
static int Hugepages;
static std::vector<void *> WorldShmCommBufs;
static Grid_MPI_Comm WorldComm;
static int WorldRank;
static int WorldSize;
static Grid_MPI_Comm WorldShmComm;
static int WorldShmRank;
static int WorldShmSize;
static int WorldNodes;
static int WorldNode;
static std::vector<int> WorldShmRanks;
//////////////////////////////////////////////////////////////////////////////////////
// Create an optimal reordered communicator that makes MPI_Cart_create get it right
//////////////////////////////////////////////////////////////////////////////////////
static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
static void GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims);
///////////////////////////////////////////////////
// Provide shared memory facilities off comm world
///////////////////////////////////////////////////
static void SharedMemoryAllocate(uint64_t bytes, int flags);
static void SharedMemoryFree(void);
static void SharedMemoryCopy(void *dest,void *src,size_t bytes);
static void SharedMemoryZero(void *dest,size_t bytes);
};
//////////////////////////////
// one per communicator
//////////////////////////////
class SharedMemory
{
private:
static const int MAXLOG2RANKSPERNODE = 16;
size_t heap_top;
size_t heap_bytes;
size_t heap_size;
protected:
Grid_MPI_Comm ShmComm; // for barriers
int ShmRank;
int ShmSize;
std::vector<void *> ShmCommBufs;
std::vector<int> ShmRanks;// Mapping comm ranks to Shm ranks
public:
SharedMemory() {};
~SharedMemory();
///////////////////////////////////////////////////////////////////////////////////////
// set the buffers & sizes
///////////////////////////////////////////////////////////////////////////////////////
void SetCommunicator(Grid_MPI_Comm comm);
////////////////////////////////////////////////////////////////////////
// For this instance ; disjoint buffer sets between splits if split grid
////////////////////////////////////////////////////////////////////////
void ShmBarrier(void);
///////////////////////////////////////////////////
// Call on any instance
///////////////////////////////////////////////////
void SharedMemoryTest(void);
void *ShmBufferSelf(void);
void *ShmBuffer (int rank);
void *ShmBufferTranslate(int rank,void * local_p);
void *ShmBufferMalloc(size_t bytes);
void ShmBufferFreeAll(void) ;
//////////////////////////////////////////////////////////////////////////
// Make info on Nodes & ranks and Shared memory available
//////////////////////////////////////////////////////////////////////////
int NodeCount(void) { return GlobalSharedMemory::WorldNodes;};
int RankCount(void) { return GlobalSharedMemory::WorldSize;};
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,870 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/communicator/SharedMemory.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Christoph Lehner <christoph@lhnr.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/GridCore.h>
#include <pwd.h>
#ifdef GRID_CUDA
#include <cuda_runtime_api.h>
#endif
#ifdef GRID_HIP
#include <hip/hip_runtime_api.h>
#endif
NAMESPACE_BEGIN(Grid);
#define header "SharedMemoryMpi: "
/*Construct from an MPI communicator*/
void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
{
assert(_ShmSetup==0);
WorldComm = comm;
MPI_Comm_rank(WorldComm,&WorldRank);
MPI_Comm_size(WorldComm,&WorldSize);
// WorldComm, WorldSize, WorldRank
/////////////////////////////////////////////////////////////////////
// Split into groups that can share memory
/////////////////////////////////////////////////////////////////////
#ifndef GRID_MPI3_SHM_NONE
MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&WorldShmComm);
#else
MPI_Comm_split(comm, WorldRank, 0, &WorldShmComm);
#endif
MPI_Comm_rank(WorldShmComm ,&WorldShmRank);
MPI_Comm_size(WorldShmComm ,&WorldShmSize);
if ( WorldRank == 0) {
std::cout << header " World communicator of size " <<WorldSize << std::endl;
std::cout << header " Node communicator of size " <<WorldShmSize << std::endl;
}
// WorldShmComm, WorldShmSize, WorldShmRank
// WorldNodes
WorldNodes = WorldSize/WorldShmSize;
assert( (WorldNodes * WorldShmSize) == WorldSize );
// FIXME: Check all WorldShmSize are the same ?
/////////////////////////////////////////////////////////////////////
// find world ranks in our SHM group (i.e. which ranks are on our node)
/////////////////////////////////////////////////////////////////////
MPI_Group WorldGroup, ShmGroup;
MPI_Comm_group (WorldComm, &WorldGroup);
MPI_Comm_group (WorldShmComm, &ShmGroup);
std::vector<int> world_ranks(WorldSize); for(int r=0;r<WorldSize;r++) world_ranks[r]=r;
WorldShmRanks.resize(WorldSize);
MPI_Group_translate_ranks (WorldGroup,WorldSize,&world_ranks[0],ShmGroup, &WorldShmRanks[0]);
///////////////////////////////////////////////////////////////////
// Identify who is in my group and nominate the leader
///////////////////////////////////////////////////////////////////
int g=0;
std::vector<int> MyGroup;
MyGroup.resize(WorldShmSize);
for(int rank=0;rank<WorldSize;rank++){
if(WorldShmRanks[rank]!=MPI_UNDEFINED){
assert(g<WorldShmSize);
MyGroup[g++] = rank;
}
}
std::sort(MyGroup.begin(),MyGroup.end(),std::less<int>());
int myleader = MyGroup[0];
std::vector<int> leaders_1hot(WorldSize,0);
std::vector<int> leaders_group(WorldNodes,0);
leaders_1hot [ myleader ] = 1;
///////////////////////////////////////////////////////////////////
// global sum leaders over comm world
///////////////////////////////////////////////////////////////////
int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,WorldComm);
assert(ierr==0);
///////////////////////////////////////////////////////////////////
// find the group leaders world rank
///////////////////////////////////////////////////////////////////
int group=0;
for(int l=0;l<WorldSize;l++){
if(leaders_1hot[l]){
leaders_group[group++] = l;
}
}
///////////////////////////////////////////////////////////////////
// Identify the node of the group in which I (and my leader) live
///////////////////////////////////////////////////////////////////
WorldNode=-1;
for(int g=0;g<WorldNodes;g++){
if (myleader == leaders_group[g]){
WorldNode=g;
}
}
assert(WorldNode!=-1);
_ShmSetup=1;
}
// Gray encode support
int BinaryToGray (int binary) {
int gray = (binary>>1)^binary;
return gray;
}
int Log2Size(int TwoToPower,int MAXLOG2)
{
int log2size = -1;
for(int i=0;i<=MAXLOG2;i++){
if ( (0x1<<i) == TwoToPower ) {
log2size = i;
break;
}
}
return log2size;
}
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
{
//////////////////////////////////////////////////////////////////////////////
// Look and see if it looks like an HPE 8600 based on hostname conventions
//////////////////////////////////////////////////////////////////////////////
const int namelen = _POSIX_HOST_NAME_MAX;
char name[namelen];
int R;
int I;
int N;
gethostname(name,namelen);
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm);
else OptimalCommunicatorSharedMemory(processors,optimal_comm);
}
static inline int divides(int a,int b)
{
return ( b == ( (b/a)*a ) );
}
void GlobalSharedMemory::GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims)
{
////////////////////////////////////////////////////////////////
// Allow user to configure through environment variable
////////////////////////////////////////////////////////////////
char* str = getenv(("GRID_SHM_DIMS_" + std::to_string(ShmDims.size())).c_str());
if ( str ) {
std::vector<int> IntShmDims;
GridCmdOptionIntVector(std::string(str),IntShmDims);
assert(IntShmDims.size() == WorldDims.size());
long ShmSize = 1;
for (int dim=0;dim<WorldDims.size();dim++) {
ShmSize *= (ShmDims[dim] = IntShmDims[dim]);
assert(divides(ShmDims[dim],WorldDims[dim]));
}
assert(ShmSize == WorldShmSize);
return;
}
////////////////////////////////////////////////////////////////
// Powers of 2,3,5 only in prime decomposition for now
////////////////////////////////////////////////////////////////
int ndimension = WorldDims.size();
ShmDims=Coordinate(ndimension,1);
std::vector<int> primes({2,3,5});
int dim = 0;
int last_dim = ndimension - 1;
int AutoShmSize = 1;
while(AutoShmSize != WorldShmSize) {
int p;
for(p=0;p<primes.size();p++) {
int prime=primes[p];
if ( divides(prime,WorldDims[dim]/ShmDims[dim])
&& divides(prime,WorldShmSize/AutoShmSize) ) {
AutoShmSize*=prime;
ShmDims[dim]*=prime;
last_dim = dim;
break;
}
}
if (p == primes.size() && last_dim == dim) {
std::cerr << "GlobalSharedMemory::GetShmDims failed" << std::endl;
exit(EXIT_FAILURE);
}
dim=(dim+1) %ndimension;
}
}
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
{
////////////////////////////////////////////////////////////////
// Assert power of two shm_size.
////////////////////////////////////////////////////////////////
int log2size = Log2Size(WorldShmSize,MAXLOG2RANKSPERNODE);
assert(log2size != -1);
////////////////////////////////////////////////////////////////
// Identify the hypercube coordinate of this node using hostname
////////////////////////////////////////////////////////////////
// n runs 0...7 9...16 18...25 27...34 (8*4) 5 bits
// i runs 0..7 3 bits
// r runs 0..3 2 bits
// 2^10 = 1024 nodes
const int maxhdim = 10;
std::vector<int> HyperCubeCoords(maxhdim,0);
std::vector<int> RootHyperCubeCoords(maxhdim,0);
int R;
int I;
int N;
const int namelen = _POSIX_HOST_NAME_MAX;
char name[namelen];
// Parse ICE-XA hostname to get hypercube location
gethostname(name,namelen);
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
assert(nscan==3);
int nlo = N%9;
int nhi = N/9;
uint32_t hypercoor = (R<<8)|(I<<5)|(nhi<<3)|nlo ;
uint32_t rootcoor = hypercoor;
//////////////////////////////////////////////////////////////////
// Print debug info
//////////////////////////////////////////////////////////////////
for(int d=0;d<maxhdim;d++){
HyperCubeCoords[d] = (hypercoor>>d)&0x1;
}
std::string hname(name);
// std::cout << "hostname "<<hname<<std::endl;
// std::cout << "R " << R << " I " << I << " N "<< N
// << " hypercoor 0x"<<std::hex<<hypercoor<<std::dec<<std::endl;
//////////////////////////////////////////////////////////////////
// broadcast node 0's base coordinate for this partition.
//////////////////////////////////////////////////////////////////
MPI_Bcast(&rootcoor, sizeof(rootcoor), MPI_BYTE, 0, WorldComm);
hypercoor=hypercoor-rootcoor;
assert(hypercoor<WorldSize);
assert(hypercoor>=0);
//////////////////////////////////////
// Printing
//////////////////////////////////////
for(int d=0;d<maxhdim;d++){
HyperCubeCoords[d] = (hypercoor>>d)&0x1;
}
////////////////////////////////////////////////////////////////
// Identify subblock of ranks on node spreading across dims
// in a maximally symmetrical way
////////////////////////////////////////////////////////////////
int ndimension = processors.size();
Coordinate processor_coor(ndimension);
Coordinate WorldDims = processors;
Coordinate ShmDims (ndimension); Coordinate NodeDims (ndimension);
Coordinate ShmCoor (ndimension); Coordinate NodeCoor (ndimension); Coordinate WorldCoor(ndimension);
Coordinate HyperCoor(ndimension);
GetShmDims(WorldDims,ShmDims);
////////////////////////////////////////////////////////////////
// Establish torus of processes and nodes with sub-blockings
////////////////////////////////////////////////////////////////
for(int d=0;d<ndimension;d++){
NodeDims[d] = WorldDims[d]/ShmDims[d];
}
////////////////////////////////////////////////////////////////
// Map Hcube according to physical lattice
// must partition. Loop over dims and find out who would join.
////////////////////////////////////////////////////////////////
int hcoor = hypercoor;
for(int d=0;d<ndimension;d++){
int bits = Log2Size(NodeDims[d],MAXLOG2RANKSPERNODE);
int msk = (0x1<<bits)-1;
HyperCoor[d]=hcoor & msk;
HyperCoor[d]=BinaryToGray(HyperCoor[d]); // Space filling curve magic
hcoor = hcoor >> bits;
}
////////////////////////////////////////////////////////////////
// Check processor counts match
////////////////////////////////////////////////////////////////
int Nprocessors=1;
for(int i=0;i<ndimension;i++){
Nprocessors*=processors[i];
}
assert(WorldSize==Nprocessors);
////////////////////////////////////////////////////////////////
// Establish mapping between lexico physics coord and WorldRank
////////////////////////////////////////////////////////////////
int rank;
Lexicographic::CoorFromIndexReversed(NodeCoor,WorldNode ,NodeDims);
for(int d=0;d<ndimension;d++) NodeCoor[d]=HyperCoor[d];
Lexicographic::CoorFromIndexReversed(ShmCoor ,WorldShmRank,ShmDims);
for(int d=0;d<ndimension;d++) WorldCoor[d] = NodeCoor[d]*ShmDims[d]+ShmCoor[d];
Lexicographic::IndexFromCoorReversed(WorldCoor,rank,WorldDims);
/////////////////////////////////////////////////////////////////
// Build the new communicator
/////////////////////////////////////////////////////////////////
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
assert(ierr==0);
}
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
{
////////////////////////////////////////////////////////////////
// Identify subblock of ranks on node spreading across dims
// in a maximally symmetrical way
////////////////////////////////////////////////////////////////
int ndimension = processors.size();
Coordinate processor_coor(ndimension);
Coordinate WorldDims = processors; Coordinate ShmDims(ndimension); Coordinate NodeDims (ndimension);
Coordinate ShmCoor(ndimension); Coordinate NodeCoor(ndimension); Coordinate WorldCoor(ndimension);
GetShmDims(WorldDims,ShmDims);
////////////////////////////////////////////////////////////////
// Establish torus of processes and nodes with sub-blockings
////////////////////////////////////////////////////////////////
for(int d=0;d<ndimension;d++){
NodeDims[d] = WorldDims[d]/ShmDims[d];
}
////////////////////////////////////////////////////////////////
// Check processor counts match
////////////////////////////////////////////////////////////////
int Nprocessors=1;
for(int i=0;i<ndimension;i++){
Nprocessors*=processors[i];
}
assert(WorldSize==Nprocessors);
////////////////////////////////////////////////////////////////
// Establish mapping between lexico physics coord and WorldRank
////////////////////////////////////////////////////////////////
int rank;
Lexicographic::CoorFromIndexReversed(NodeCoor,WorldNode ,NodeDims);
Lexicographic::CoorFromIndexReversed(ShmCoor ,WorldShmRank,ShmDims);
for(int d=0;d<ndimension;d++) WorldCoor[d] = NodeCoor[d]*ShmDims[d]+ShmCoor[d];
Lexicographic::IndexFromCoorReversed(WorldCoor,rank,WorldDims);
/////////////////////////////////////////////////////////////////
// Build the new communicator
/////////////////////////////////////////////////////////////////
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
assert(ierr==0);
}
////////////////////////////////////////////////////////////////////////////////////////////
// SHMGET
////////////////////////////////////////////////////////////////////////////////////////////
#ifdef GRID_MPI3_SHMGET
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << header "SharedMemoryAllocate "<< bytes<< " shmget implementation "<<std::endl;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// allocate the shared windows for our group
//////////////////////////////////////////////////////////////////////////////////////////////////////////
MPI_Barrier(WorldShmComm);
WorldShmCommBufs.resize(WorldShmSize);
std::vector<int> shmids(WorldShmSize);
if ( WorldShmRank == 0 ) {
for(int r=0;r<WorldShmSize;r++){
size_t size = bytes;
key_t key = IPC_PRIVATE;
int flags = IPC_CREAT | SHM_R | SHM_W;
#ifdef SHM_HUGETLB
if (Hugepages) flags|=SHM_HUGETLB;
#endif
if ((shmids[r]= shmget(key,size, flags)) ==-1) {
int errsv = errno;
printf("Errno %d\n",errsv);
printf("key %d\n",key);
printf("size %ld\n",size);
printf("flags %d\n",flags);
perror("shmget");
exit(1);
}
}
}
MPI_Barrier(WorldShmComm);
MPI_Bcast(&shmids[0],WorldShmSize*sizeof(int),MPI_BYTE,0,WorldShmComm);
MPI_Barrier(WorldShmComm);
for(int r=0;r<WorldShmSize;r++){
WorldShmCommBufs[r] = (uint64_t *)shmat(shmids[r], NULL,0);
if (WorldShmCommBufs[r] == (uint64_t *)-1) {
perror("Shared memory attach failure");
shmctl(shmids[r], IPC_RMID, NULL);
exit(2);
}
}
MPI_Barrier(WorldShmComm);
///////////////////////////////////
// Mark for clean up
///////////////////////////////////
for(int r=0;r<WorldShmSize;r++){
shmctl(shmids[r], IPC_RMID,(struct shmid_ds *)NULL);
}
MPI_Barrier(WorldShmComm);
_ShmAlloc=1;
_ShmAllocBytes = bytes;
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////
// Hugetlbfs mapping intended
////////////////////////////////////////////////////////////////////////////////////////////
#if defined(GRID_CUDA) ||defined(GRID_HIP)
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
void * ShmCommBuf ;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// allocate the pointer array for shared windows for our group
//////////////////////////////////////////////////////////////////////////////////////////////////////////
MPI_Barrier(WorldShmComm);
WorldShmCommBufs.resize(WorldShmSize);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// TODO/FIXME : NOT ALL NVLINK BOARDS have full Peer to peer connectivity.
// The annoyance is that they have partial peer 2 peer. This occurs on the 8 GPU blades.
// e.g. DGX1, supermicro board,
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// cudaDeviceGetP2PAttribute(&perfRank, cudaDevP2PAttrPerformanceRank, device1, device2);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Each MPI rank should allocate our own buffer
///////////////////////////////////////////////////////////////////////////////////////////////////////////
ShmCommBuf = acceleratorAllocDevice(bytes);
if (ShmCommBuf == (void *)NULL ) {
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
exit(EXIT_FAILURE);
}
// if ( WorldRank == 0 ){
if ( 1 ){
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
}
SharedMemoryZero(ShmCommBuf,bytes);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Loop over ranks/gpu's on our node
///////////////////////////////////////////////////////////////////////////////////////////////////////////
for(int r=0;r<WorldShmSize;r++){
#ifndef GRID_MPI3_SHM_NONE
//////////////////////////////////////////////////
// If it is me, pass around the IPC access key
//////////////////////////////////////////////////
#ifdef GRID_CUDA
cudaIpcMemHandle_t handle;
if ( r==WorldShmRank ) {
auto err = cudaIpcGetMemHandle(&handle,ShmCommBuf);
if ( err != cudaSuccess) {
std::cerr << " SharedMemoryMPI.cc cudaIpcGetMemHandle failed for rank" << r <<" "<<cudaGetErrorString(err)<< std::endl;
exit(EXIT_FAILURE);
}
}
#endif
#ifdef GRID_HIP
hipIpcMemHandle_t handle;
if ( r==WorldShmRank ) {
auto err = hipIpcGetMemHandle(&handle,ShmCommBuf);
if ( err != hipSuccess) {
std::cerr << " SharedMemoryMPI.cc hipIpcGetMemHandle failed for rank" << r <<" "<<hipGetErrorString(err)<< std::endl;
exit(EXIT_FAILURE);
}
}
#endif
//////////////////////////////////////////////////
// Share this IPC handle across the Shm Comm
//////////////////////////////////////////////////
{
int ierr=MPI_Bcast(&handle,
sizeof(handle),
MPI_BYTE,
r,
WorldShmComm);
assert(ierr==0);
}
///////////////////////////////////////////////////////////////
// If I am not the source, overwrite thisBuf with remote buffer
///////////////////////////////////////////////////////////////
void * thisBuf = ShmCommBuf;
#ifdef GRID_CUDA
if ( r!=WorldShmRank ) {
auto err = cudaIpcOpenMemHandle(&thisBuf,handle,cudaIpcMemLazyEnablePeerAccess);
if ( err != cudaSuccess) {
std::cerr << " SharedMemoryMPI.cc cudaIpcOpenMemHandle failed for rank" << r <<" "<<cudaGetErrorString(err)<< std::endl;
exit(EXIT_FAILURE);
}
}
#endif
#ifdef GRID_HIP
if ( r!=WorldShmRank ) {
auto err = hipIpcOpenMemHandle(&thisBuf,handle,hipIpcMemLazyEnablePeerAccess);
if ( err != hipSuccess) {
std::cerr << " SharedMemoryMPI.cc hipIpcOpenMemHandle failed for rank" << r <<" "<<hipGetErrorString(err)<< std::endl;
exit(EXIT_FAILURE);
}
}
#endif
///////////////////////////////////////////////////////////////
// Save a copy of the device buffers
///////////////////////////////////////////////////////////////
WorldShmCommBufs[r] = thisBuf;
#else
WorldShmCommBufs[r] = ShmCommBuf;
#endif
}
_ShmAllocBytes=bytes;
_ShmAlloc=1;
}
#else
#ifdef GRID_MPI3_SHMMMAP
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << header "SharedMemoryAllocate "<< bytes<< " MMAP implementation "<< GRID_SHM_PATH <<std::endl;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// allocate the shared windows for our group
//////////////////////////////////////////////////////////////////////////////////////////////////////////
MPI_Barrier(WorldShmComm);
WorldShmCommBufs.resize(WorldShmSize);
////////////////////////////////////////////////////////////////////////////////////////////
// Hugetlbfs and others map filesystems as mappable huge pages
////////////////////////////////////////////////////////////////////////////////////////////
char shm_name [NAME_MAX];
for(int r=0;r<WorldShmSize;r++){
sprintf(shm_name,GRID_SHM_PATH "/Grid_mpi3_shm_%d_%d",WorldNode,r);
int fd=open(shm_name,O_RDWR|O_CREAT,0666);
if ( fd == -1) {
printf("open %s failed\n",shm_name);
perror("open hugetlbfs");
exit(0);
}
int mmap_flag = MAP_SHARED ;
#ifdef MAP_POPULATE
mmap_flag|=MAP_POPULATE;
#endif
#ifdef MAP_HUGETLB
if ( flags ) mmap_flag |= MAP_HUGETLB;
#endif
void *ptr = (void *) mmap(NULL, bytes, PROT_READ | PROT_WRITE, mmap_flag,fd, 0);
if ( ptr == (void *)MAP_FAILED ) {
printf("mmap %s failed\n",shm_name);
perror("failed mmap"); assert(0);
}
assert(((uint64_t)ptr&0x3F)==0);
close(fd);
WorldShmCommBufs[r] =ptr;
// std::cout << header "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< bytes<< "bytes)"<<std::endl;
}
_ShmAlloc=1;
_ShmAllocBytes = bytes;
};
#endif // MMAP
#ifdef GRID_MPI3_SHM_NONE
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << header "SharedMemoryAllocate "<< bytes<< " MMAP anonymous implementation "<<std::endl;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// allocate the shared windows for our group
//////////////////////////////////////////////////////////////////////////////////////////////////////////
MPI_Barrier(WorldShmComm);
WorldShmCommBufs.resize(WorldShmSize);
////////////////////////////////////////////////////////////////////////////////////////////
// Hugetlbf and others map filesystems as mappable huge pages
////////////////////////////////////////////////////////////////////////////////////////////
char shm_name [NAME_MAX];
assert(WorldShmSize == 1);
for(int r=0;r<WorldShmSize;r++){
int fd=-1;
int mmap_flag = MAP_SHARED |MAP_ANONYMOUS ;
#ifdef MAP_POPULATE
mmap_flag|=MAP_POPULATE;
#endif
#ifdef MAP_HUGETLB
if ( flags ) mmap_flag |= MAP_HUGETLB;
#endif
void *ptr = (void *) mmap(NULL, bytes, PROT_READ | PROT_WRITE, mmap_flag,fd, 0);
if ( ptr == (void *)MAP_FAILED ) {
printf("mmap %s failed\n",shm_name);
perror("failed mmap"); assert(0);
}
assert(((uint64_t)ptr&0x3F)==0);
close(fd);
WorldShmCommBufs[r] =ptr;
// std::cout << "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< bytes<< "bytes)"<<std::endl;
}
_ShmAlloc=1;
_ShmAllocBytes = bytes;
};
#endif // MMAP
#ifdef GRID_MPI3_SHMOPEN
////////////////////////////////////////////////////////////////////////////////////////////
// POSIX SHMOPEN ; as far as I know Linux does not allow EXPLICIT HugePages with this case
// tmpfs (Larry Meadows says) does not support explicit huge page, and this is used for
// the posix shm virtual file system
////////////////////////////////////////////////////////////////////////////////////////////
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << header "SharedMemoryAllocate "<< bytes<< " SHMOPEN implementation "<<std::endl;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
MPI_Barrier(WorldShmComm);
WorldShmCommBufs.resize(WorldShmSize);
char shm_name [NAME_MAX];
if ( WorldShmRank == 0 ) {
for(int r=0;r<WorldShmSize;r++){
size_t size = bytes;
struct passwd *pw = getpwuid (getuid());
sprintf(shm_name,"/Grid_%s_mpi3_shm_%d_%d",pw->pw_name,WorldNode,r);
shm_unlink(shm_name);
int fd=shm_open(shm_name,O_RDWR|O_CREAT,0666);
if ( fd < 0 ) { perror("failed shm_open"); assert(0); }
ftruncate(fd, size);
int mmap_flag = MAP_SHARED;
#ifdef MAP_POPULATE
mmap_flag |= MAP_POPULATE;
#endif
#ifdef MAP_HUGETLB
if (flags) mmap_flag |= MAP_HUGETLB;
#endif
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, mmap_flag, fd, 0);
if ( ptr == (void * )MAP_FAILED ) {
perror("failed mmap");
assert(0);
}
assert(((uint64_t)ptr&0x3F)==0);
WorldShmCommBufs[r] =ptr;
close(fd);
}
}
MPI_Barrier(WorldShmComm);
if ( WorldShmRank != 0 ) {
for(int r=0;r<WorldShmSize;r++){
size_t size = bytes ;
struct passwd *pw = getpwuid (getuid());
sprintf(shm_name,"/Grid_%s_mpi3_shm_%d_%d",pw->pw_name,WorldNode,r);
int fd=shm_open(shm_name,O_RDWR,0666);
if ( fd<0 ) { perror("failed shm_open"); assert(0); }
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if ( ptr == MAP_FAILED ) { perror("failed mmap"); assert(0); }
assert(((uint64_t)ptr&0x3F)==0);
WorldShmCommBufs[r] =ptr;
close(fd);
}
}
_ShmAlloc=1;
_ShmAllocBytes = bytes;
}
#endif
#endif // End NVCC case for GPU device buffers
/////////////////////////////////////////////////////////////////////////
// Routines accessing shared memory should route through for GPU safety
/////////////////////////////////////////////////////////////////////////
void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
{
#ifdef GRID_CUDA
cudaMemset(dest,0,bytes);
#else
bzero(dest,bytes);
#endif
}
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
{
#ifdef GRID_CUDA
cudaMemcpy(dest,src,bytes,cudaMemcpyDefault);
#else
bcopy(src,dest,bytes);
#endif
}
////////////////////////////////////////////////////////
// Global shared functionality finished
// Now move to per communicator functionality
////////////////////////////////////////////////////////
void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
{
int rank, size;
MPI_Comm_rank(comm,&rank);
MPI_Comm_size(comm,&size);
ShmRanks.resize(size);
/////////////////////////////////////////////////////////////////////
// Split into groups that can share memory
/////////////////////////////////////////////////////////////////////
#ifndef GRID_MPI3_SHM_NONE
MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&ShmComm);
#else
MPI_Comm_split(comm, rank, 0, &ShmComm);
#endif
MPI_Comm_rank(ShmComm ,&ShmRank);
MPI_Comm_size(ShmComm ,&ShmSize);
ShmCommBufs.resize(ShmSize);
//////////////////////////////////////////////////////////////////////
// Map ShmRank to WorldShmRank and use the right buffer
//////////////////////////////////////////////////////////////////////
assert (GlobalSharedMemory::ShmAlloc()==1);
heap_size = GlobalSharedMemory::ShmAllocBytes();
for(int r=0;r<ShmSize;r++){
uint32_t wsr = (r==ShmRank) ? GlobalSharedMemory::WorldShmRank : 0 ;
MPI_Allreduce(MPI_IN_PLACE,&wsr,1,MPI_UINT32_T,MPI_SUM,ShmComm);
ShmCommBufs[r] = GlobalSharedMemory::WorldShmCommBufs[wsr];
}
ShmBufferFreeAll();
/////////////////////////////////////////////////////////////////////
// find comm ranks in our SHM group (i.e. which ranks are on our node)
/////////////////////////////////////////////////////////////////////
MPI_Group FullGroup, ShmGroup;
MPI_Comm_group (comm , &FullGroup);
MPI_Comm_group (ShmComm, &ShmGroup);
std::vector<int> ranks(size); for(int r=0;r<size;r++) ranks[r]=r;
MPI_Group_translate_ranks (FullGroup,size,&ranks[0],ShmGroup, &ShmRanks[0]);
#ifdef GRID_SHM_FORCE_MPI
// Hide the shared memory path between ranks
{
for(int r=0;r<size;r++){
if ( r!=rank ) {
ShmRanks[r] = MPI_UNDEFINED;
}
}
}
#endif
SharedMemoryTest();
}
//////////////////////////////////////////////////////////////////
// On node barrier
//////////////////////////////////////////////////////////////////
void SharedMemory::ShmBarrier(void)
{
MPI_Barrier (ShmComm);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// Test the shared memory is working
//////////////////////////////////////////////////////////////////////////////////////////////////////////
void SharedMemory::SharedMemoryTest(void)
{
ShmBarrier();
uint64_t check[3];
uint64_t magic = 0x5A5A5A;
if ( ShmRank == 0 ) {
for(uint64_t r=0;r<ShmSize;r++){
check[0]=GlobalSharedMemory::WorldNode;
check[1]=r;
check[2]=magic;
GlobalSharedMemory::SharedMemoryCopy( ShmCommBufs[r], check, 3*sizeof(uint64_t));
}
}
ShmBarrier();
for(uint64_t r=0;r<ShmSize;r++){
ShmBarrier();
GlobalSharedMemory::SharedMemoryCopy(check,ShmCommBufs[r], 3*sizeof(uint64_t));
ShmBarrier();
assert(check[0]==GlobalSharedMemory::WorldNode);
assert(check[1]==r);
assert(check[2]==magic);
ShmBarrier();
}
}
void *SharedMemory::ShmBuffer(int rank)
{
int gpeer = ShmRanks[rank];
if (gpeer == MPI_UNDEFINED){
return NULL;
} else {
return ShmCommBufs[gpeer];
}
}
void *SharedMemory::ShmBufferTranslate(int rank,void * local_p)
{
int gpeer = ShmRanks[rank];
assert(gpeer!=ShmRank); // never send to self
if (gpeer == MPI_UNDEFINED){
return NULL;
} else {
uint64_t offset = (uint64_t)local_p - (uint64_t)ShmCommBufs[ShmRank];
uint64_t remote = (uint64_t)ShmCommBufs[gpeer]+offset;
return (void *) remote;
}
}
SharedMemory::~SharedMemory()
{
int MPI_is_finalised; MPI_Finalized(&MPI_is_finalised);
if ( !MPI_is_finalised ) {
MPI_Comm_free(&ShmComm);
}
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,170 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/communicator/SharedMemory.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/GridCore.h>
NAMESPACE_BEGIN(Grid);
#define header "SharedMemoryNone: "
/*Construct from an MPI communicator*/
void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
{
assert(_ShmSetup==0);
WorldComm = 0;
WorldRank = 0;
WorldSize = 1;
WorldShmComm = 0 ;
WorldShmRank = 0 ;
WorldShmSize = 1 ;
WorldNodes = 1 ;
WorldNode = 0 ;
WorldShmRanks.resize(WorldSize); WorldShmRanks[0] = 0;
WorldShmCommBufs.resize(1);
_ShmSetup=1;
}
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
{
optimal_comm = WorldComm;
}
////////////////////////////////////////////////////////////////////////////////////////////
// Hugetlbfs mapping intended, use anonymous mmap
////////////////////////////////////////////////////////////////////////////////////////////
#if 1
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << header "SharedMemoryAllocate "<< bytes<< " GPU implementation "<<std::endl;
void * ShmCommBuf ;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Each MPI rank should allocate our own buffer
///////////////////////////////////////////////////////////////////////////////////////////////////////////
ShmCommBuf = acceleratorAllocDevice(bytes);
if (ShmCommBuf == (void *)NULL ) {
std::cerr << " SharedMemoryNone.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
exit(EXIT_FAILURE);
}
if ( WorldRank == 0 ){
std::cout << WorldRank << header " SharedMemoryNone.cc acceleratorAllocDevice "<< bytes
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
}
SharedMemoryZero(ShmCommBuf,bytes);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Loop over ranks/gpu's on our node
///////////////////////////////////////////////////////////////////////////////////////////////////////////
WorldShmCommBufs[0] = ShmCommBuf;
_ShmAllocBytes=bytes;
_ShmAlloc=1;
}
#else
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
void * ShmCommBuf ;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
int mmap_flag =0;
#ifdef MAP_ANONYMOUS
mmap_flag = mmap_flag| MAP_SHARED | MAP_ANONYMOUS;
#endif
#ifdef MAP_ANON
mmap_flag = mmap_flag| MAP_SHARED | MAP_ANON;
#endif
#ifdef MAP_HUGETLB
if ( flags ) mmap_flag |= MAP_HUGETLB;
#endif
ShmCommBuf =(void *) mmap(NULL, bytes, PROT_READ | PROT_WRITE, mmap_flag, -1, 0);
if (ShmCommBuf == (void *)MAP_FAILED) {
perror("mmap failed ");
exit(EXIT_FAILURE);
}
#ifdef MADV_HUGEPAGE
if (!Hugepages ) madvise(ShmCommBuf,bytes,MADV_HUGEPAGE);
#endif
bzero(ShmCommBuf,bytes);
WorldShmCommBufs[0] = ShmCommBuf;
_ShmAllocBytes=bytes;
_ShmAlloc=1;
};
#endif
void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
{
acceleratorMemSet(dest,0,bytes);
}
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
{
acceleratorCopyToDevice(src,dest,bytes);
}
////////////////////////////////////////////////////////
// Global shared functionality finished
// Now move to per communicator functionality
////////////////////////////////////////////////////////
void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
{
assert(GlobalSharedMemory::ShmAlloc()==1);
ShmRanks.resize(1);
ShmCommBufs.resize(1);
ShmRanks[0] = 0;
ShmRank = 0;
ShmSize = 1;
//////////////////////////////////////////////////////////////////////
// Map ShmRank to WorldShmRank and use the right buffer
//////////////////////////////////////////////////////////////////////
ShmCommBufs[0] = GlobalSharedMemory::WorldShmCommBufs[0];
heap_size = GlobalSharedMemory::ShmAllocBytes();
ShmBufferFreeAll();
return;
}
//////////////////////////////////////////////////////////////////
// On node barrier
//////////////////////////////////////////////////////////////////
void SharedMemory::ShmBarrier(void){ return ; }
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// Test the shared memory is working
//////////////////////////////////////////////////////////////////////////////////////////////////////////
void SharedMemory::SharedMemoryTest(void) { return; }
void *SharedMemory::ShmBuffer(int rank)
{
return NULL;
}
void *SharedMemory::ShmBufferTranslate(int rank,void * local_p)
{
return NULL;
}
SharedMemory::~SharedMemory()
{};
NAMESPACE_END(Grid);

62
Grid/cshift/Cshift.h Normal file
View File

@ -0,0 +1,62 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/Cshift.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef _GRID_CSHIFT_H_
#define _GRID_CSHIFT_H_
#include <Grid/cshift/Cshift_common.h>
#ifdef GRID_COMMS_NONE
#include <Grid/cshift/Cshift_none.h>
#endif
#ifdef GRID_COMMS_MPI
#include <Grid/cshift/Cshift_mpi.h>
#endif
#ifdef GRID_COMMS_MPI3
#include <Grid/cshift/Cshift_mpi.h>
#endif
#ifdef GRID_COMMS_MPIT
#include <Grid/cshift/Cshift_mpi.h>
#endif
#ifdef GRID_COMMS_SHMEM
#include <Grid/cshift/Cshift_mpi.h> // uses same implementation of communicator
#endif
NAMESPACE_BEGIN(Grid);
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
auto Cshift(const Expression &expr,int dim,int shift) -> decltype(closure(expr))
{
return Cshift(closure(expr),dim,shift);
}
NAMESPACE_END(Grid);
#endif

496
Grid/cshift/Cshift_common.h Normal file
View File

@ -0,0 +1,496 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/cshift/Cshift_common.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
extern Vector<std::pair<int,int> > Cshift_table;
///////////////////////////////////////////////////////////////////
// Gather for when there is no need to SIMD split
///////////////////////////////////////////////////////////////////
template<class vobj> void
Gather_plane_simple (const Lattice<vobj> &rhs,cshiftVector<vobj> &buffer,int dimension,int plane,int cbmask, int off=0)
{
int rd = rhs.Grid()->_rdimensions[dimension];
if ( !rhs.Grid()->CheckerBoarded(dimension) ) {
cbmask = 0x3;
}
int so=plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
int e1=rhs.Grid()->_slice_nblock[dimension];
int e2=rhs.Grid()->_slice_block[dimension];
int ent = 0;
if(Cshift_table.size()<e1*e2) Cshift_table.resize(e1*e2); // Let it grow to biggest
int stride=rhs.Grid()->_slice_stride[dimension];
if ( cbmask == 0x3 ) {
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int o = n*stride;
int bo = n*e2;
Cshift_table[ent++] = std::pair<int,int>(off+bo+b,so+o+b);
}
}
} else {
int bo=0;
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int o = n*stride;
int ocb=1<<rhs.Grid()->CheckerBoardFromOindex(o+b);
if ( ocb &cbmask ) {
Cshift_table[ent++]=std::pair<int,int> (off+bo++,so+o+b);
}
}
}
}
{
auto buffer_p = & buffer[0];
auto table = &Cshift_table[0];
#ifdef ACCELERATOR_CSHIFT
autoView(rhs_v , rhs, AcceleratorRead);
accelerator_for(i,ent,vobj::Nsimd(),{
coalescedWrite(buffer_p[table[i].first],coalescedRead(rhs_v[table[i].second]));
});
#else
autoView(rhs_v , rhs, CpuRead);
thread_for(i,ent,{
buffer_p[table[i].first]=rhs_v[table[i].second];
});
#endif
}
}
///////////////////////////////////////////////////////////////////
// Gather for when there *is* need to SIMD split
///////////////////////////////////////////////////////////////////
template<class vobj> void
Gather_plane_extract(const Lattice<vobj> &rhs,
ExtractPointerArray<typename vobj::scalar_object> pointers,
int dimension,int plane,int cbmask)
{
int rd = rhs.Grid()->_rdimensions[dimension];
if ( !rhs.Grid()->CheckerBoarded(dimension) ) {
cbmask = 0x3;
}
int so = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
int e1=rhs.Grid()->_slice_nblock[dimension];
int e2=rhs.Grid()->_slice_block[dimension];
int n1=rhs.Grid()->_slice_stride[dimension];
if ( cbmask ==0x3){
#ifdef ACCELERATOR_CSHIFT
autoView(rhs_v , rhs, AcceleratorRead);
accelerator_for(nn,e1*e2,1,{
int n = nn%e1;
int b = nn/e1;
int o = n*n1;
int offset = b+n*e2;
vobj temp =rhs_v[so+o+b];
extract<vobj>(temp,pointers,offset);
});
#else
autoView(rhs_v , rhs, CpuRead);
thread_for2d(n,e1,b,e2,{
int o = n*n1;
int offset = b+n*e2;
vobj temp =rhs_v[so+o+b];
extract<vobj>(temp,pointers,offset);
});
#endif
} else {
Coordinate rdim=rhs.Grid()->_rdimensions;
Coordinate cdm =rhs.Grid()->_checker_dim_mask;
std::cout << " Dense packed buffer WARNING " <<std::endl; // Does this get called twice once for each cb?
#ifdef ACCELERATOR_CSHIFT
autoView(rhs_v , rhs, AcceleratorRead);
accelerator_for(nn,e1*e2,1,{
int n = nn%e1;
int b = nn/e1;
Coordinate coor;
int o=n*n1;
int oindex = o+b;
int cb = RedBlackCheckerBoardFromOindex(oindex, rdim, cdm);
int ocb=1<<cb;
int offset = b+n*e2;
if ( ocb & cbmask ) {
vobj temp =rhs_v[so+o+b];
extract<vobj>(temp,pointers,offset);
}
});
#else
autoView(rhs_v , rhs, CpuRead);
thread_for2d(n,e1,b,e2,{
Coordinate coor;
int o=n*n1;
int oindex = o+b;
int cb = RedBlackCheckerBoardFromOindex(oindex, rdim, cdm);
int ocb=1<<cb;
int offset = b+n*e2;
if ( ocb & cbmask ) {
vobj temp =rhs_v[so+o+b];
extract<vobj>(temp,pointers,offset);
}
});
#endif
}
}
//////////////////////////////////////////////////////
// Scatter for when there is no need to SIMD split
//////////////////////////////////////////////////////
template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,cshiftVector<vobj> &buffer, int dimension,int plane,int cbmask)
{
int rd = rhs.Grid()->_rdimensions[dimension];
if ( !rhs.Grid()->CheckerBoarded(dimension) ) {
cbmask=0x3;
}
int so = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
int e1=rhs.Grid()->_slice_nblock[dimension];
int e2=rhs.Grid()->_slice_block[dimension];
int stride=rhs.Grid()->_slice_stride[dimension];
if(Cshift_table.size()<e1*e2) Cshift_table.resize(e1*e2); // Let it grow to biggest
int ent =0;
if ( cbmask ==0x3 ) {
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int o =n*rhs.Grid()->_slice_stride[dimension];
int bo =n*rhs.Grid()->_slice_block[dimension];
Cshift_table[ent++] = std::pair<int,int>(so+o+b,bo+b);
}
}
} else {
int bo=0;
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int o =n*rhs.Grid()->_slice_stride[dimension];
int ocb=1<<rhs.Grid()->CheckerBoardFromOindex(o+b);// Could easily be a table lookup
if ( ocb & cbmask ) {
Cshift_table[ent++]=std::pair<int,int> (so+o+b,bo++);
}
}
}
}
{
auto buffer_p = & buffer[0];
auto table = &Cshift_table[0];
#ifdef ACCELERATOR_CSHIFT
autoView( rhs_v, rhs, AcceleratorWrite);
accelerator_for(i,ent,vobj::Nsimd(),{
coalescedWrite(rhs_v[table[i].first],coalescedRead(buffer_p[table[i].second]));
});
#else
autoView( rhs_v, rhs, CpuWrite);
thread_for(i,ent,{
rhs_v[table[i].first]=buffer_p[table[i].second];
});
#endif
}
}
//////////////////////////////////////////////////////
// Scatter for when there *is* need to SIMD split
//////////////////////////////////////////////////////
template<class vobj> void Scatter_plane_merge(Lattice<vobj> &rhs,ExtractPointerArray<typename vobj::scalar_object> pointers,int dimension,int plane,int cbmask)
{
int rd = rhs.Grid()->_rdimensions[dimension];
if ( !rhs.Grid()->CheckerBoarded(dimension) ) {
cbmask=0x3;
}
int so = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
int e1=rhs.Grid()->_slice_nblock[dimension];
int e2=rhs.Grid()->_slice_block[dimension];
if(cbmask ==0x3 ) {
int _slice_stride = rhs.Grid()->_slice_stride[dimension];
int _slice_block = rhs.Grid()->_slice_block[dimension];
#ifdef ACCELERATOR_CSHIFT
autoView( rhs_v , rhs, AcceleratorWrite);
accelerator_for(nn,e1*e2,1,{
int n = nn%e1;
int b = nn/e1;
int o = n*_slice_stride;
int offset = b+n*_slice_block;
merge(rhs_v[so+o+b],pointers,offset);
});
#else
autoView( rhs_v , rhs, CpuWrite);
thread_for2d(n,e1,b,e2,{
int o = n*_slice_stride;
int offset = b+n*_slice_block;
merge(rhs_v[so+o+b],pointers,offset);
});
#endif
} else {
// Case of SIMD split AND checker dim cannot currently be hit, except in
// Test_cshift_red_black code.
std::cout << "Scatter_plane merge assert(0); think this is buggy FIXME "<< std::endl;// think this is buggy FIXME
std::cout<<" Unthreaded warning -- buffer is not densely packed ??"<<std::endl;
assert(0); // This will fail if hit on GPU
autoView( rhs_v, rhs, CpuWrite);
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int o = n*rhs.Grid()->_slice_stride[dimension];
int offset = b+n*rhs.Grid()->_slice_block[dimension];
int ocb=1<<rhs.Grid()->CheckerBoardFromOindex(o+b);
if ( ocb&cbmask ) {
merge(rhs_v[so+o+b],pointers,offset);
}
}
}
}
}
//////////////////////////////////////////////////////
// local to node block strided copies
//////////////////////////////////////////////////////
template<class vobj> void Copy_plane(Lattice<vobj>& lhs,const Lattice<vobj> &rhs, int dimension,int lplane,int rplane,int cbmask)
{
int rd = rhs.Grid()->_rdimensions[dimension];
if ( !rhs.Grid()->CheckerBoarded(dimension) ) {
cbmask=0x3;
}
int ro = rplane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
int lo = lplane*lhs.Grid()->_ostride[dimension]; // base offset for start of plane
int e1=rhs.Grid()->_slice_nblock[dimension]; // clearly loop invariant for icpc
int e2=rhs.Grid()->_slice_block[dimension];
int stride = rhs.Grid()->_slice_stride[dimension];
if(Cshift_table.size()<e1*e2) Cshift_table.resize(e1*e2); // Let it grow to biggest
int ent=0;
if(cbmask == 0x3 ){
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int o =n*stride+b;
Cshift_table[ent++] = std::pair<int,int>(lo+o,ro+o);
}
}
} else {
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int o =n*stride+b;
int ocb=1<<lhs.Grid()->CheckerBoardFromOindex(o);
if ( ocb&cbmask ) {
Cshift_table[ent++] = std::pair<int,int>(lo+o,ro+o);
}
}
}
}
{
auto table = &Cshift_table[0];
#ifdef ACCELERATOR_CSHIFT
autoView(rhs_v , rhs, AcceleratorRead);
autoView(lhs_v , lhs, AcceleratorWrite);
accelerator_for(i,ent,vobj::Nsimd(),{
coalescedWrite(lhs_v[table[i].first],coalescedRead(rhs_v[table[i].second]));
});
#else
autoView(rhs_v , rhs, CpuRead);
autoView(lhs_v , lhs, CpuWrite);
thread_for(i,ent,{
lhs_v[table[i].first]=rhs_v[table[i].second];
});
#endif
}
}
template<class vobj> void Copy_plane_permute(Lattice<vobj>& lhs,const Lattice<vobj> &rhs, int dimension,int lplane,int rplane,int cbmask,int permute_type)
{
int rd = rhs.Grid()->_rdimensions[dimension];
if ( !rhs.Grid()->CheckerBoarded(dimension) ) {
cbmask=0x3;
}
int ro = rplane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
int lo = lplane*lhs.Grid()->_ostride[dimension]; // base offset for start of plane
int e1=rhs.Grid()->_slice_nblock[dimension];
int e2=rhs.Grid()->_slice_block [dimension];
int stride = rhs.Grid()->_slice_stride[dimension];
if(Cshift_table.size()<e1*e2) Cshift_table.resize(e1*e2); // Let it grow to biggest
int ent=0;
if ( cbmask == 0x3 ) {
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int o =n*stride;
Cshift_table[ent++] = std::pair<int,int>(lo+o+b,ro+o+b);
}}
} else {
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int o =n*stride;
int ocb=1<<lhs.Grid()->CheckerBoardFromOindex(o+b);
if ( ocb&cbmask ) Cshift_table[ent++] = std::pair<int,int>(lo+o+b,ro+o+b);
}}
}
{
auto table = &Cshift_table[0];
#ifdef ACCELERATOR_CSHIFT
autoView( rhs_v, rhs, AcceleratorRead);
autoView( lhs_v, lhs, AcceleratorWrite);
accelerator_for(i,ent,1,{
permute(lhs_v[table[i].first],rhs_v[table[i].second],permute_type);
});
#else
autoView( rhs_v, rhs, CpuRead);
autoView( lhs_v, lhs, CpuWrite);
thread_for(i,ent,{
permute(lhs_v[table[i].first],rhs_v[table[i].second],permute_type);
});
#endif
}
}
//////////////////////////////////////////////////////
// Local to node Cshift
//////////////////////////////////////////////////////
template<class vobj> void Cshift_local(Lattice<vobj>& ret,const Lattice<vobj> &rhs,int dimension,int shift)
{
int sshift[2];
sshift[0] = rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,Even);
sshift[1] = rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,Odd);
if ( sshift[0] == sshift[1] ) {
Cshift_local(ret,rhs,dimension,shift,0x3);
} else {
Cshift_local(ret,rhs,dimension,shift,0x1);// if checkerboard is unfavourable take two passes
Cshift_local(ret,rhs,dimension,shift,0x2);// both with block stride loop iteration
}
}
template<class vobj> void Cshift_local(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
{
GridBase *grid = rhs.Grid();
int fd = grid->_fdimensions[dimension];
int rd = grid->_rdimensions[dimension];
int ld = grid->_ldimensions[dimension];
int gd = grid->_gdimensions[dimension];
int ly = grid->_simd_layout[dimension];
// Map to always positive shift modulo global full dimension.
shift = (shift+fd)%fd;
// the permute type
ret.Checkerboard() = grid->CheckerBoardDestination(rhs.Checkerboard(),shift,dimension);
int permute_dim =grid->PermuteDim(dimension);
int permute_type=grid->PermuteType(dimension);
int permute_type_dist;
for(int x=0;x<rd;x++){
// int o = 0;
int bo = x * grid->_ostride[dimension];
int cb= (cbmask==0x2)? Odd : Even;
int sshift = grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
int sx = (x+sshift)%rd;
// wrap is whether sshift > rd.
// num is sshift mod rd.
//
// shift 7
//
// XoXo YcYc
// oXoX cYcY
// XoXo YcYc
// oXoX cYcY
//
// sshift --
//
// XX YY ; 3
// XX YY ; 0
// XX YY ; 3
// XX YY ; 0
//
int permute_slice=0;
if(permute_dim){
int wrap = sshift/rd; wrap=wrap % ly;
int num = sshift%rd;
if ( x< rd-num ) permute_slice=wrap;
else permute_slice = (wrap+1)%ly;
if ( (ly>2) && (permute_slice) ) {
assert(permute_type & RotateBit);
permute_type_dist = permute_type|permute_slice;
} else {
permute_type_dist = permute_type;
}
}
if ( permute_slice ) Copy_plane_permute(ret,rhs,dimension,x,sx,cbmask,permute_type_dist);
else Copy_plane(ret,rhs,dimension,x,sx,cbmask);
}
}
NAMESPACE_END(Grid);

467
Grid/cshift/Cshift_mpi.h Normal file
View File

@ -0,0 +1,467 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/cshift/Cshift_mpi.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef _GRID_CSHIFT_MPI_H_
#define _GRID_CSHIFT_MPI_H_
NAMESPACE_BEGIN(Grid);
template<class vobj> Lattice<vobj> Cshift(const Lattice<vobj> &rhs,int dimension,int shift)
{
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_type scalar_type;
Lattice<vobj> ret(rhs.Grid());
int fd = rhs.Grid()->_fdimensions[dimension];
int rd = rhs.Grid()->_rdimensions[dimension];
// Map to always positive shift modulo global full dimension.
shift = (shift+fd)%fd;
ret.Checkerboard() = rhs.Grid()->CheckerBoardDestination(rhs.Checkerboard(),shift,dimension);
// the permute type
int simd_layout = rhs.Grid()->_simd_layout[dimension];
int comm_dim = rhs.Grid()->_processors[dimension] >1 ;
int splice_dim = rhs.Grid()->_simd_layout[dimension]>1 && (comm_dim);
if ( !comm_dim ) {
//std::cout << "CSHIFT: Cshift_local" <<std::endl;
Cshift_local(ret,rhs,dimension,shift); // Handles checkerboarding
} else if ( splice_dim ) {
//std::cout << "CSHIFT: Cshift_comms_simd call - splice_dim = " << splice_dim << " shift " << shift << " dimension = " << dimension << std::endl;
Cshift_comms_simd(ret,rhs,dimension,shift);
} else {
//std::cout << "CSHIFT: Cshift_comms" <<std::endl;
Cshift_comms(ret,rhs,dimension,shift);
}
return ret;
}
template<class vobj> void Cshift_comms(Lattice<vobj>& ret,const Lattice<vobj> &rhs,int dimension,int shift)
{
int sshift[2];
sshift[0] = rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,Even);
sshift[1] = rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,Odd);
// std::cout << "Cshift_comms dim "<<dimension<<"cb "<<rhs.Checkerboard()<<"shift "<<shift<<" sshift " << sshift[0]<<" "<<sshift[1]<<std::endl;
if ( sshift[0] == sshift[1] ) {
// std::cout << "Single pass Cshift_comms" <<std::endl;
Cshift_comms(ret,rhs,dimension,shift,0x3);
} else {
// std::cout << "Two pass Cshift_comms" <<std::endl;
Cshift_comms(ret,rhs,dimension,shift,0x1);// if checkerboard is unfavourable take two passes
Cshift_comms(ret,rhs,dimension,shift,0x2);// both with block stride loop iteration
}
}
template<class vobj> void Cshift_comms_simd(Lattice<vobj>& ret,const Lattice<vobj> &rhs,int dimension,int shift)
{
int sshift[2];
sshift[0] = rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,Even);
sshift[1] = rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,Odd);
//std::cout << "Cshift_comms_simd dim "<<dimension<<"cb "<<rhs.checkerboard<<"shift "<<shift<<" sshift " << sshift[0]<<" "<<sshift[1]<<std::endl;
if ( sshift[0] == sshift[1] ) {
//std::cout << "Single pass Cshift_comms" <<std::endl;
Cshift_comms_simd(ret,rhs,dimension,shift,0x3);
} else {
//std::cout << "Two pass Cshift_comms" <<std::endl;
Cshift_comms_simd(ret,rhs,dimension,shift,0x1);// if checkerboard is unfavourable take two passes
Cshift_comms_simd(ret,rhs,dimension,shift,0x2);// both with block stride loop iteration
}
}
#define ACCELERATOR_CSHIFT_NO_COPY
#ifdef ACCELERATOR_CSHIFT_NO_COPY
template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
{
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_type scalar_type;
GridBase *grid=rhs.Grid();
Lattice<vobj> temp(rhs.Grid());
int fd = rhs.Grid()->_fdimensions[dimension];
int rd = rhs.Grid()->_rdimensions[dimension];
int pd = rhs.Grid()->_processors[dimension];
int simd_layout = rhs.Grid()->_simd_layout[dimension];
int comm_dim = rhs.Grid()->_processors[dimension] >1 ;
assert(simd_layout==1);
assert(comm_dim==1);
assert(shift>=0);
assert(shift<fd);
int buffer_size = rhs.Grid()->_slice_nblock[dimension]*rhs.Grid()->_slice_block[dimension];
static cshiftVector<vobj> send_buf; send_buf.resize(buffer_size);
static cshiftVector<vobj> recv_buf; recv_buf.resize(buffer_size);
int cb= (cbmask==0x2)? Odd : Even;
int sshift= rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
for(int x=0;x<rd;x++){
int sx = (x+sshift)%rd;
int comm_proc = ((x+sshift)/rd)%pd;
if (comm_proc==0) {
Copy_plane(ret,rhs,dimension,x,sx,cbmask);
} else {
int words = buffer_size;
if (cbmask != 0x3) words=words>>1;
int bytes = words * sizeof(vobj);
Gather_plane_simple (rhs,send_buf,dimension,sx,cbmask);
// int rank = grid->_processor;
int recv_from_rank;
int xmit_to_rank;
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
grid->Barrier();
grid->SendToRecvFrom((void *)&send_buf[0],
xmit_to_rank,
(void *)&recv_buf[0],
recv_from_rank,
bytes);
grid->Barrier();
Scatter_plane_simple (ret,recv_buf,dimension,x,cbmask);
}
}
}
template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
{
GridBase *grid=rhs.Grid();
const int Nsimd = grid->Nsimd();
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_object scalar_object;
typedef typename vobj::scalar_type scalar_type;
int fd = grid->_fdimensions[dimension];
int rd = grid->_rdimensions[dimension];
int ld = grid->_ldimensions[dimension];
int pd = grid->_processors[dimension];
int simd_layout = grid->_simd_layout[dimension];
int comm_dim = grid->_processors[dimension] >1 ;
//std::cout << "Cshift_comms_simd dim "<< dimension << " fd "<<fd<<" rd "<<rd
// << " ld "<<ld<<" pd " << pd<<" simd_layout "<<simd_layout
// << " comm_dim " << comm_dim << " cbmask " << cbmask <<std::endl;
assert(comm_dim==1);
assert(simd_layout==2);
assert(shift>=0);
assert(shift<fd);
int permute_type=grid->PermuteType(dimension);
///////////////////////////////////////////////
// Simd direction uses an extract/merge pair
///////////////////////////////////////////////
int buffer_size = grid->_slice_nblock[dimension]*grid->_slice_block[dimension];
// int words = sizeof(vobj)/sizeof(vector_type);
static std::vector<cshiftVector<scalar_object> > send_buf_extract; send_buf_extract.resize(Nsimd);
static std::vector<cshiftVector<scalar_object> > recv_buf_extract; recv_buf_extract.resize(Nsimd);
scalar_object * recv_buf_extract_mpi;
scalar_object * send_buf_extract_mpi;
for(int s=0;s<Nsimd;s++){
send_buf_extract[s].resize(buffer_size);
recv_buf_extract[s].resize(buffer_size);
}
int bytes = buffer_size*sizeof(scalar_object);
ExtractPointerArray<scalar_object> pointers(Nsimd); //
ExtractPointerArray<scalar_object> rpointers(Nsimd); // received pointers
///////////////////////////////////////////
// Work out what to send where
///////////////////////////////////////////
int cb = (cbmask==0x2)? Odd : Even;
int sshift= grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
// loop over outer coord planes orthog to dim
for(int x=0;x<rd;x++){
// FIXME call local permute copy if none are offnode.
for(int i=0;i<Nsimd;i++){
pointers[i] = &send_buf_extract[i][0];
}
int sx = (x+sshift)%rd;
Gather_plane_extract(rhs,pointers,dimension,sx,cbmask);
for(int i=0;i<Nsimd;i++){
int inner_bit = (Nsimd>>(permute_type+1));
int ic= (i&inner_bit)? 1:0;
int my_coor = rd*ic + x;
int nbr_coor = my_coor+sshift;
int nbr_proc = ((nbr_coor)/ld) % pd;// relative shift in processors
int nbr_ic = (nbr_coor%ld)/rd; // inner coord of peer
int nbr_ox = (nbr_coor%rd); // outer coord of peer
int nbr_lane = (i&(~inner_bit));
int recv_from_rank;
int xmit_to_rank;
if (nbr_ic) nbr_lane|=inner_bit;
assert (sx == nbr_ox);
if(nbr_proc){
grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
grid->Barrier();
send_buf_extract_mpi = &send_buf_extract[nbr_lane][0];
recv_buf_extract_mpi = &recv_buf_extract[i][0];
grid->SendToRecvFrom((void *)send_buf_extract_mpi,
xmit_to_rank,
(void *)recv_buf_extract_mpi,
recv_from_rank,
bytes);
grid->Barrier();
rpointers[i] = &recv_buf_extract[i][0];
} else {
rpointers[i] = &send_buf_extract[nbr_lane][0];
}
}
Scatter_plane_merge(ret,rpointers,dimension,x,cbmask);
}
}
#else
template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
{
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_type scalar_type;
GridBase *grid=rhs.Grid();
Lattice<vobj> temp(rhs.Grid());
int fd = rhs.Grid()->_fdimensions[dimension];
int rd = rhs.Grid()->_rdimensions[dimension];
int pd = rhs.Grid()->_processors[dimension];
int simd_layout = rhs.Grid()->_simd_layout[dimension];
int comm_dim = rhs.Grid()->_processors[dimension] >1 ;
assert(simd_layout==1);
assert(comm_dim==1);
assert(shift>=0);
assert(shift<fd);
int buffer_size = rhs.Grid()->_slice_nblock[dimension]*rhs.Grid()->_slice_block[dimension];
static cshiftVector<vobj> send_buf_v; send_buf_v.resize(buffer_size);
static cshiftVector<vobj> recv_buf_v; recv_buf_v.resize(buffer_size);
vobj *send_buf;
vobj *recv_buf;
{
grid->ShmBufferFreeAll();
size_t bytes = buffer_size*sizeof(vobj);
send_buf=(vobj *)grid->ShmBufferMalloc(bytes);
recv_buf=(vobj *)grid->ShmBufferMalloc(bytes);
}
int cb= (cbmask==0x2)? Odd : Even;
int sshift= rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
for(int x=0;x<rd;x++){
int sx = (x+sshift)%rd;
int comm_proc = ((x+sshift)/rd)%pd;
if (comm_proc==0) {
Copy_plane(ret,rhs,dimension,x,sx,cbmask);
} else {
int words = buffer_size;
if (cbmask != 0x3) words=words>>1;
int bytes = words * sizeof(vobj);
Gather_plane_simple (rhs,send_buf_v,dimension,sx,cbmask);
// int rank = grid->_processor;
int recv_from_rank;
int xmit_to_rank;
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
grid->Barrier();
acceleratorCopyDeviceToDevice((void *)&send_buf_v[0],(void *)&send_buf[0],bytes);
grid->SendToRecvFrom((void *)&send_buf[0],
xmit_to_rank,
(void *)&recv_buf[0],
recv_from_rank,
bytes);
acceleratorCopyDeviceToDevice((void *)&recv_buf[0],(void *)&recv_buf_v[0],bytes);
grid->Barrier();
Scatter_plane_simple (ret,recv_buf_v,dimension,x,cbmask);
}
}
}
template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
{
GridBase *grid=rhs.Grid();
const int Nsimd = grid->Nsimd();
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_object scalar_object;
typedef typename vobj::scalar_type scalar_type;
int fd = grid->_fdimensions[dimension];
int rd = grid->_rdimensions[dimension];
int ld = grid->_ldimensions[dimension];
int pd = grid->_processors[dimension];
int simd_layout = grid->_simd_layout[dimension];
int comm_dim = grid->_processors[dimension] >1 ;
//std::cout << "Cshift_comms_simd dim "<< dimension << " fd "<<fd<<" rd "<<rd
// << " ld "<<ld<<" pd " << pd<<" simd_layout "<<simd_layout
// << " comm_dim " << comm_dim << " cbmask " << cbmask <<std::endl;
assert(comm_dim==1);
assert(simd_layout==2);
assert(shift>=0);
assert(shift<fd);
int permute_type=grid->PermuteType(dimension);
///////////////////////////////////////////////
// Simd direction uses an extract/merge pair
///////////////////////////////////////////////
int buffer_size = grid->_slice_nblock[dimension]*grid->_slice_block[dimension];
// int words = sizeof(vobj)/sizeof(vector_type);
static std::vector<cshiftVector<scalar_object> > send_buf_extract; send_buf_extract.resize(Nsimd);
static std::vector<cshiftVector<scalar_object> > recv_buf_extract; recv_buf_extract.resize(Nsimd);
scalar_object * recv_buf_extract_mpi;
scalar_object * send_buf_extract_mpi;
{
size_t bytes = sizeof(scalar_object)*buffer_size;
grid->ShmBufferFreeAll();
send_buf_extract_mpi = (scalar_object *)grid->ShmBufferMalloc(bytes);
recv_buf_extract_mpi = (scalar_object *)grid->ShmBufferMalloc(bytes);
}
for(int s=0;s<Nsimd;s++){
send_buf_extract[s].resize(buffer_size);
recv_buf_extract[s].resize(buffer_size);
}
int bytes = buffer_size*sizeof(scalar_object);
ExtractPointerArray<scalar_object> pointers(Nsimd); //
ExtractPointerArray<scalar_object> rpointers(Nsimd); // received pointers
///////////////////////////////////////////
// Work out what to send where
///////////////////////////////////////////
int cb = (cbmask==0x2)? Odd : Even;
int sshift= grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
// loop over outer coord planes orthog to dim
for(int x=0;x<rd;x++){
// FIXME call local permute copy if none are offnode.
for(int i=0;i<Nsimd;i++){
pointers[i] = &send_buf_extract[i][0];
}
int sx = (x+sshift)%rd;
Gather_plane_extract(rhs,pointers,dimension,sx,cbmask);
for(int i=0;i<Nsimd;i++){
int inner_bit = (Nsimd>>(permute_type+1));
int ic= (i&inner_bit)? 1:0;
int my_coor = rd*ic + x;
int nbr_coor = my_coor+sshift;
int nbr_proc = ((nbr_coor)/ld) % pd;// relative shift in processors
int nbr_ic = (nbr_coor%ld)/rd; // inner coord of peer
int nbr_ox = (nbr_coor%rd); // outer coord of peer
int nbr_lane = (i&(~inner_bit));
int recv_from_rank;
int xmit_to_rank;
if (nbr_ic) nbr_lane|=inner_bit;
assert (sx == nbr_ox);
if(nbr_proc){
grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
grid->Barrier();
acceleratorCopyDeviceToDevice((void *)&send_buf_extract[nbr_lane][0],(void *)send_buf_extract_mpi,bytes);
grid->SendToRecvFrom((void *)send_buf_extract_mpi,
xmit_to_rank,
(void *)recv_buf_extract_mpi,
recv_from_rank,
bytes);
acceleratorCopyDeviceToDevice((void *)recv_buf_extract_mpi,(void *)&recv_buf_extract[i][0],bytes);
grid->Barrier();
rpointers[i] = &recv_buf_extract[i][0];
} else {
rpointers[i] = &send_buf_extract[nbr_lane][0];
}
}
Scatter_plane_merge(ret,rpointers,dimension,x,cbmask);
}
}
#endif
NAMESPACE_END(Grid);
#endif

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -23,17 +23,18 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
#ifndef _GRID_CSHIFT_NONE_H_
#define _GRID_CSHIFT_NONE_H_
namespace Grid {
NAMESPACE_BEGIN(Grid);
template<class vobj> Lattice<vobj> Cshift(const Lattice<vobj> &rhs,int dimension,int shift)
{
Lattice<vobj> ret(rhs._grid);
ret.checkerboard = rhs._grid->CheckerBoardDestination(rhs.checkerboard,shift,dimension);
Lattice<vobj> ret(rhs.Grid());
ret.Checkerboard() = rhs.Grid()->CheckerBoardDestination(rhs.Checkerboard(),shift,dimension);
Cshift_local(ret,rhs,dimension,shift);
return ret;
}
}
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,4 @@
#include <Grid/GridCore.h>
NAMESPACE_BEGIN(Grid);
Vector<std::pair<int,int> > Cshift_table;
NAMESPACE_END(Grid);

18922
Grid/json/json.hpp Normal file

File diff suppressed because it is too large Load Diff

View File

@ -2,13 +2,11 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/Tensors.h
Source file: ./lib/Lattice.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: neo <cossu@post.kek.jp>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -27,25 +25,24 @@ Author: neo <cossu@post.kek.jp>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_MATH_H
#define GRID_MATH_H
#include <tensors/Tensor_traits.h>
#include <tensors/Tensor_class.h>
#include <tensors/Tensor_arith.h>
#include <tensors/Tensor_inner.h>
#include <tensors/Tensor_outer.h>
#include <tensors/Tensor_transpose.h>
#include <tensors/Tensor_trace.h>
#include <tensors/Tensor_index.h>
#include <tensors/Tensor_Ta.h>
#include <tensors/Tensor_determinant.h>
#include <tensors/Tensor_exp.h>
//#include <tensors/Tensor_peek.h>
//#include <tensors/Tensor_poke.h>
#include <tensors/Tensor_reality.h>
#include <tensors/Tensor_unary.h>
#include <tensors/Tensor_extract_merge.h>
#include <tensors/Tensor_logical.h>
#endif
#pragma once
#include <Grid/lattice/Lattice_view.h>
#include <Grid/lattice/Lattice_base.h>
#include <Grid/lattice/Lattice_conformable.h>
#include <Grid/lattice/Lattice_ET.h>
#include <Grid/lattice/Lattice_arith.h>
#include <Grid/lattice/Lattice_trace.h>
#include <Grid/lattice/Lattice_transpose.h>
#include <Grid/lattice/Lattice_local.h>
#include <Grid/lattice/Lattice_reduction.h>
#include <Grid/lattice/Lattice_peekpoke.h>
#include <Grid/lattice/Lattice_reality.h>
#include <Grid/lattice/Lattice_real_imag.h>
#include <Grid/lattice/Lattice_comparison_utils.h>
#include <Grid/lattice/Lattice_comparison.h>
#include <Grid/lattice/Lattice_coordinate.h>
//#include <Grid/lattice/Lattice_where.h>
#include <Grid/lattice/Lattice_rng.h>
#include <Grid/lattice/Lattice_unary.h>
#include <Grid/lattice/Lattice_transfer.h>
#include <Grid/lattice/Lattice_basis.h>

530
Grid/lattice/Lattice_ET.h Normal file
View File

@ -0,0 +1,530 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/Lattice_ET.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: neo <cossu@post.kek.jp>
Author: Christoph Lehner <christoph@lhnr.de
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_LATTICE_ET_H
#define GRID_LATTICE_ET_H
#include <iostream>
#include <tuple>
#include <typeinfo>
#include <vector>
NAMESPACE_BEGIN(Grid);
////////////////////////////////////////////////////
// Predicated where support
////////////////////////////////////////////////////
#ifdef GRID_SIMT
// drop to scalar in SIMT; cleaner in fact
template <class iobj, class vobj, class robj>
accelerator_inline vobj predicatedWhere(const iobj &predicate,
const vobj &iftrue,
const robj &iffalse)
{
Integer mask = TensorRemove(predicate);
typename std::remove_const<vobj>::type ret= iffalse;
if (mask) ret=iftrue;
return ret;
}
#else
template <class iobj, class vobj, class robj>
accelerator_inline vobj predicatedWhere(const iobj &predicate,
const vobj &iftrue,
const robj &iffalse)
{
typename std::remove_const<vobj>::type ret;
typedef typename vobj::scalar_object scalar_object;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
const int Nsimd = vobj::vector_type::Nsimd();
ExtractBuffer<Integer> mask(Nsimd);
ExtractBuffer<scalar_object> truevals(Nsimd);
ExtractBuffer<scalar_object> falsevals(Nsimd);
extract(iftrue, truevals);
extract(iffalse, falsevals);
extract<vInteger, Integer>(TensorRemove(predicate), mask);
for (int s = 0; s < Nsimd; s++) {
if (mask[s]) falsevals[s] = truevals[s];
}
merge(ret, falsevals);
return ret;
}
#endif
/////////////////////////////////////////////////////
//Specialization of getVectorType for lattices
/////////////////////////////////////////////////////
template<typename T>
struct getVectorType<Lattice<T> >{
typedef typename Lattice<T>::vector_object type;
};
////////////////////////////////////////////
//-- recursive evaluation of expressions; --
// handle leaves of syntax tree
///////////////////////////////////////////////////
template<class sobj,
typename std::enable_if<!is_lattice<sobj>::value&&!is_lattice_expr<sobj>::value,sobj>::type * = nullptr>
accelerator_inline
sobj eval(const uint64_t ss, const sobj &arg)
{
return arg;
}
template <class lobj> accelerator_inline
auto eval(const uint64_t ss, const LatticeView<lobj> &arg) -> decltype(arg(ss))
{
return arg(ss);
}
////////////////////////////////////////////
//-- recursive evaluation of expressions; --
// whole vector return, used only for expression return type inference
///////////////////////////////////////////////////
template<class sobj> accelerator_inline
sobj vecEval(const uint64_t ss, const sobj &arg)
{
return arg;
}
template <class lobj> accelerator_inline
const lobj & vecEval(const uint64_t ss, const LatticeView<lobj> &arg)
{
return arg[ss];
}
///////////////////////////////////////////////////
// handle nodes in syntax tree- eval one operand
// vecEval needed (but never called as all expressions offloaded) to infer the return type
// in SIMT contexts of closure.
///////////////////////////////////////////////////
template <typename Op, typename T1> accelerator_inline
auto vecEval(const uint64_t ss, const LatticeUnaryExpression<Op, T1> &expr)
-> decltype(expr.op.func( vecEval(ss, expr.arg1)))
{
return expr.op.func( vecEval(ss, expr.arg1) );
}
// vecEval two operands
template <typename Op, typename T1, typename T2> accelerator_inline
auto vecEval(const uint64_t ss, const LatticeBinaryExpression<Op, T1, T2> &expr)
-> decltype(expr.op.func( vecEval(ss,expr.arg1),vecEval(ss,expr.arg2)))
{
return expr.op.func( vecEval(ss,expr.arg1), vecEval(ss,expr.arg2) );
}
// vecEval three operands
template <typename Op, typename T1, typename T2, typename T3> accelerator_inline
auto vecEval(const uint64_t ss, const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
-> decltype(expr.op.func(vecEval(ss, expr.arg1), vecEval(ss, expr.arg2), vecEval(ss, expr.arg3)))
{
return expr.op.func(vecEval(ss, expr.arg1), vecEval(ss, expr.arg2), vecEval(ss, expr.arg3));
}
///////////////////////////////////////////////////
// handle nodes in syntax tree- eval one operand coalesced
///////////////////////////////////////////////////
template <typename Op, typename T1> accelerator_inline
auto eval(const uint64_t ss, const LatticeUnaryExpression<Op, T1> &expr)
-> decltype(expr.op.func( eval(ss, expr.arg1)))
{
return expr.op.func( eval(ss, expr.arg1) );
}
// eval two operands
template <typename Op, typename T1, typename T2> accelerator_inline
auto eval(const uint64_t ss, const LatticeBinaryExpression<Op, T1, T2> &expr)
-> decltype(expr.op.func( eval(ss,expr.arg1),eval(ss,expr.arg2)))
{
return expr.op.func( eval(ss,expr.arg1), eval(ss,expr.arg2) );
}
// eval three operands
template <typename Op, typename T1, typename T2, typename T3> accelerator_inline
auto eval(const uint64_t ss, const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
-> decltype(expr.op.func(eval(ss, expr.arg1),
eval(ss, expr.arg2),
eval(ss, expr.arg3)))
{
#ifdef GRID_SIMT
// Handles Nsimd (vInteger) != Nsimd(ComplexD)
typedef decltype(vecEval(ss, expr.arg2)) rvobj;
typedef typename std::remove_reference<rvobj>::type vobj;
const int Nsimd = vobj::vector_type::Nsimd();
auto vpred = vecEval(ss,expr.arg1);
ExtractBuffer<Integer> mask(Nsimd);
extract<vInteger, Integer>(TensorRemove(vpred), mask);
int s = acceleratorSIMTlane(Nsimd);
return expr.op.func(mask[s],
eval(ss, expr.arg2),
eval(ss, expr.arg3));
#else
return expr.op.func(eval(ss, expr.arg1),
eval(ss, expr.arg2),
eval(ss, expr.arg3));
#endif
}
//////////////////////////////////////////////////////////////////////////
// Obtain the grid from an expression, ensuring conformable. This must follow a
// tree recursion; must retain grid pointer in the LatticeView class which sucks
// Use a different method, and make it void *.
// Perhaps a conformable method.
//////////////////////////////////////////////////////////////////////////
template <class T1,typename std::enable_if<is_lattice<T1>::value, T1>::type * = nullptr>
accelerator_inline void GridFromExpression(GridBase *&grid, const T1 &lat) // Lattice leaf
{
lat.Conformable(grid);
}
template <class T1,typename std::enable_if<!is_lattice<T1>::value, T1>::type * = nullptr>
accelerator_inline
void GridFromExpression(GridBase *&grid,const T1 &notlat) // non-lattice leaf
{}
template <typename Op, typename T1>
accelerator_inline
void GridFromExpression(GridBase *&grid,const LatticeUnaryExpression<Op, T1> &expr)
{
GridFromExpression(grid, expr.arg1); // recurse
}
template <typename Op, typename T1, typename T2>
accelerator_inline
void GridFromExpression(GridBase *&grid, const LatticeBinaryExpression<Op, T1, T2> &expr)
{
GridFromExpression(grid, expr.arg1); // recurse
GridFromExpression(grid, expr.arg2);
}
template <typename Op, typename T1, typename T2, typename T3>
accelerator_inline
void GridFromExpression(GridBase *&grid, const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
{
GridFromExpression(grid, expr.arg1); // recurse
GridFromExpression(grid, expr.arg2); // recurse
GridFromExpression(grid, expr.arg3); // recurse
}
//////////////////////////////////////////////////////////////////////////
// Obtain the CB from an expression, ensuring conformable. This must follow a
// tree recursion
//////////////////////////////////////////////////////////////////////////
template <class T1,typename std::enable_if<is_lattice<T1>::value, T1>::type * = nullptr>
inline void CBFromExpression(int &cb, const T1 &lat) // Lattice leaf
{
if ((cb == Odd) || (cb == Even)) {
assert(cb == lat.Checkerboard());
}
cb = lat.Checkerboard();
}
template <class T1,typename std::enable_if<!is_lattice<T1>::value, T1>::type * = nullptr>
inline void CBFromExpression(int &cb, const T1 &notlat) {} // non-lattice leaf
template <typename Op, typename T1> inline
void CBFromExpression(int &cb,const LatticeUnaryExpression<Op, T1> &expr)
{
CBFromExpression(cb, expr.arg1); // recurse AST
}
template <typename Op, typename T1, typename T2> inline
void CBFromExpression(int &cb,const LatticeBinaryExpression<Op, T1, T2> &expr)
{
CBFromExpression(cb, expr.arg1); // recurse AST
CBFromExpression(cb, expr.arg2); // recurse AST
}
template <typename Op, typename T1, typename T2, typename T3>
inline void CBFromExpression(int &cb, const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
{
CBFromExpression(cb, expr.arg1); // recurse AST
CBFromExpression(cb, expr.arg2); // recurse AST
CBFromExpression(cb, expr.arg3); // recurse AST
}
//////////////////////////////////////////////////////////////////////////
// ViewOpen
//////////////////////////////////////////////////////////////////////////
template <class T1,typename std::enable_if<is_lattice<T1>::value, T1>::type * = nullptr>
inline void ExpressionViewOpen(T1 &lat) // Lattice leaf
{
lat.ViewOpen(AcceleratorRead);
}
template <class T1,typename std::enable_if<!is_lattice<T1>::value, T1>::type * = nullptr>
inline void ExpressionViewOpen(T1 &notlat) {}
template <typename Op, typename T1> inline
void ExpressionViewOpen(LatticeUnaryExpression<Op, T1> &expr)
{
ExpressionViewOpen(expr.arg1); // recurse AST
}
template <typename Op, typename T1, typename T2> inline
void ExpressionViewOpen(LatticeBinaryExpression<Op, T1, T2> &expr)
{
ExpressionViewOpen(expr.arg1); // recurse AST
ExpressionViewOpen(expr.arg2); // rrecurse AST
}
template <typename Op, typename T1, typename T2, typename T3>
inline void ExpressionViewOpen(LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
{
ExpressionViewOpen(expr.arg1); // recurse AST
ExpressionViewOpen(expr.arg2); // recurse AST
ExpressionViewOpen(expr.arg3); // recurse AST
}
//////////////////////////////////////////////////////////////////////////
// ViewClose
//////////////////////////////////////////////////////////////////////////
template <class T1,typename std::enable_if<is_lattice<T1>::value, T1>::type * = nullptr>
inline void ExpressionViewClose( T1 &lat) // Lattice leaf
{
lat.ViewClose();
}
template <class T1,typename std::enable_if<!is_lattice<T1>::value, T1>::type * = nullptr>
inline void ExpressionViewClose(T1 &notlat) {}
template <typename Op, typename T1> inline
void ExpressionViewClose(LatticeUnaryExpression<Op, T1> &expr)
{
ExpressionViewClose(expr.arg1); // recurse AST
}
template <typename Op, typename T1, typename T2> inline
void ExpressionViewClose(LatticeBinaryExpression<Op, T1, T2> &expr)
{
ExpressionViewClose(expr.arg1); // recurse AST
ExpressionViewClose(expr.arg2); // recurse AST
}
template <typename Op, typename T1, typename T2, typename T3>
inline void ExpressionViewClose(LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
{
ExpressionViewClose(expr.arg1); // recurse AST
ExpressionViewClose(expr.arg2); // recurse AST
ExpressionViewClose(expr.arg3); // recurse AST
}
////////////////////////////////////////////
// Unary operators and funcs
////////////////////////////////////////////
#define GridUnopClass(name, ret) \
struct name { \
template<class _arg> static auto accelerator_inline func(const _arg a) -> decltype(ret) { return ret; } \
};
GridUnopClass(UnarySub, -a);
GridUnopClass(UnaryNot, Not(a));
GridUnopClass(UnaryTrace, trace(a));
GridUnopClass(UnaryTranspose, transpose(a));
GridUnopClass(UnaryTa, Ta(a));
GridUnopClass(UnaryProjectOnGroup, ProjectOnGroup(a));
GridUnopClass(UnaryTimesI, timesI(a));
GridUnopClass(UnaryTimesMinusI, timesMinusI(a));
GridUnopClass(UnaryAbs, abs(a));
GridUnopClass(UnarySqrt, sqrt(a));
GridUnopClass(UnarySin, sin(a));
GridUnopClass(UnaryCos, cos(a));
GridUnopClass(UnaryAsin, asin(a));
GridUnopClass(UnaryAcos, acos(a));
GridUnopClass(UnaryLog, log(a));
GridUnopClass(UnaryExp, exp(a));
////////////////////////////////////////////
// Binary operators
////////////////////////////////////////////
#define GridBinOpClass(name, combination) \
struct name { \
template <class _left, class _right> \
static auto accelerator_inline \
func(const _left &lhs, const _right &rhs) \
-> decltype(combination) const \
{ \
return combination; \
} \
};
GridBinOpClass(BinaryAdd, lhs + rhs);
GridBinOpClass(BinarySub, lhs - rhs);
GridBinOpClass(BinaryMul, lhs *rhs);
GridBinOpClass(BinaryDiv, lhs /rhs);
GridBinOpClass(BinaryAnd, lhs &rhs);
GridBinOpClass(BinaryOr, lhs | rhs);
GridBinOpClass(BinaryAndAnd, lhs &&rhs);
GridBinOpClass(BinaryOrOr, lhs || rhs);
////////////////////////////////////////////////////
// Trinary conditional op
////////////////////////////////////////////////////
#define GridTrinOpClass(name, combination) \
struct name { \
template <class _predicate,class _left, class _right> \
static auto accelerator_inline \
func(const _predicate &pred, const _left &lhs, const _right &rhs) \
-> decltype(combination) const \
{ \
return combination; \
} \
};
GridTrinOpClass(TrinaryWhere,
(predicatedWhere<
typename std::remove_reference<_predicate>::type,
typename std::remove_reference<_left>::type,
typename std::remove_reference<_right>::type>(pred, lhs,rhs)));
////////////////////////////////////////////
// Operator syntactical glue
////////////////////////////////////////////
#define GRID_UNOP(name) name
#define GRID_BINOP(name) name
#define GRID_TRINOP(name) name
#define GRID_DEF_UNOP(op, name) \
template <typename T1, typename std::enable_if<is_lattice<T1>::value||is_lattice_expr<T1>::value,T1>::type * = nullptr> \
inline auto op(const T1 &arg) ->decltype(LatticeUnaryExpression<GRID_UNOP(name),T1>(GRID_UNOP(name)(), arg)) \
{ \
return LatticeUnaryExpression<GRID_UNOP(name),T1>(GRID_UNOP(name)(), arg); \
}
#define GRID_BINOP_LEFT(op, name) \
template <typename T1, typename T2, \
typename std::enable_if<is_lattice<T1>::value||is_lattice_expr<T1>::value,T1>::type * = nullptr> \
inline auto op(const T1 &lhs, const T2 &rhs) \
->decltype(LatticeBinaryExpression<GRID_BINOP(name),T1,T2>(GRID_BINOP(name)(),lhs,rhs)) \
{ \
return LatticeBinaryExpression<GRID_BINOP(name),T1,T2>(GRID_BINOP(name)(),lhs,rhs);\
}
#define GRID_BINOP_RIGHT(op, name) \
template <typename T1, typename T2, \
typename std::enable_if<!is_lattice<T1>::value&&!is_lattice_expr<T1>::value,T1>::type * = nullptr, \
typename std::enable_if< is_lattice<T2>::value|| is_lattice_expr<T2>::value,T2>::type * = nullptr> \
inline auto op(const T1 &lhs, const T2 &rhs) \
->decltype(LatticeBinaryExpression<GRID_BINOP(name),T1,T2>(GRID_BINOP(name)(),lhs, rhs)) \
{ \
return LatticeBinaryExpression<GRID_BINOP(name),T1,T2>(GRID_BINOP(name)(),lhs, rhs); \
}
#define GRID_DEF_BINOP(op, name) \
GRID_BINOP_LEFT(op, name); \
GRID_BINOP_RIGHT(op, name);
#define GRID_DEF_TRINOP(op, name) \
template <typename T1, typename T2, typename T3> \
inline auto op(const T1 &pred, const T2 &lhs, const T3 &rhs) \
->decltype(LatticeTrinaryExpression<GRID_TRINOP(name),T1,T2,T3>(GRID_TRINOP(name)(),pred, lhs, rhs)) \
{ \
return LatticeTrinaryExpression<GRID_TRINOP(name),T1,T2,T3>(GRID_TRINOP(name)(),pred, lhs, rhs); \
}
////////////////////////
// Operator definitions
////////////////////////
GRID_DEF_UNOP(operator-, UnarySub);
GRID_DEF_UNOP(Not, UnaryNot);
GRID_DEF_UNOP(operator!, UnaryNot);
//GRID_DEF_UNOP(adj, UnaryAdj);
//GRID_DEF_UNOP(conjugate, UnaryConj);
GRID_DEF_UNOP(trace, UnaryTrace);
GRID_DEF_UNOP(transpose, UnaryTranspose);
GRID_DEF_UNOP(Ta, UnaryTa);
GRID_DEF_UNOP(ProjectOnGroup, UnaryProjectOnGroup);
GRID_DEF_UNOP(timesI, UnaryTimesI);
GRID_DEF_UNOP(timesMinusI, UnaryTimesMinusI);
GRID_DEF_UNOP(abs, UnaryAbs); // abs overloaded in cmath C++98; DON'T do the
// abs-fabs-dabs-labs thing
GRID_DEF_UNOP(sqrt, UnarySqrt);
GRID_DEF_UNOP(sin, UnarySin);
GRID_DEF_UNOP(cos, UnaryCos);
GRID_DEF_UNOP(asin, UnaryAsin);
GRID_DEF_UNOP(acos, UnaryAcos);
GRID_DEF_UNOP(log, UnaryLog);
GRID_DEF_UNOP(exp, UnaryExp);
GRID_DEF_BINOP(operator+, BinaryAdd);
GRID_DEF_BINOP(operator-, BinarySub);
GRID_DEF_BINOP(operator*, BinaryMul);
GRID_DEF_BINOP(operator/, BinaryDiv);
GRID_DEF_BINOP(operator&, BinaryAnd);
GRID_DEF_BINOP(operator|, BinaryOr);
GRID_DEF_BINOP(operator&&, BinaryAndAnd);
GRID_DEF_BINOP(operator||, BinaryOrOr);
GRID_DEF_TRINOP(where, TrinaryWhere);
/////////////////////////////////////////////////////////////
// Closure convenience to force expression to evaluate
/////////////////////////////////////////////////////////////
template <class Op, class T1>
auto closure(const LatticeUnaryExpression<Op, T1> &expr)
-> Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1)))>::type >
{
Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1)))>::type > ret(expr);
return ret;
}
template <class Op, class T1, class T2>
auto closure(const LatticeBinaryExpression<Op, T1, T2> &expr)
-> Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),vecEval(0, expr.arg2)))>::type >
{
Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),vecEval(0, expr.arg2)))>::type > ret(expr);
return ret;
}
template <class Op, class T1, class T2, class T3>
auto closure(const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
-> Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),
vecEval(0, expr.arg2),
vecEval(0, expr.arg3)))>::type >
{
Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),
vecEval(0, expr.arg2),
vecEval(0, expr.arg3)))>::type > ret(expr);
return ret;
}
#define EXPRESSION_CLOSURE(function) \
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr> \
auto function(Expression &expr) -> decltype(function(closure(expr))) \
{ \
return function(closure(expr)); \
}
#undef GRID_UNOP
#undef GRID_BINOP
#undef GRID_TRINOP
#undef GRID_DEF_UNOP
#undef GRID_DEF_BINOP
#undef GRID_DEF_TRINOP
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,258 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/Lattice_arith.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Christoph Lehner <christoph@lhnr.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_LATTICE_ARITH_H
#define GRID_LATTICE_ARITH_H
NAMESPACE_BEGIN(Grid);
//////////////////////////////////////////////////////////////////////////////////////////////////////
// avoid copy back routines for mult, mac, sub, add
//////////////////////////////////////////////////////////////////////////////////////////////////////
template<class obj1,class obj2,class obj3> inline
void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
ret.Checkerboard() = lhs.Checkerboard();
autoView( ret_v , ret, AcceleratorWrite);
autoView( lhs_v , lhs, AcceleratorRead);
autoView( rhs_v , rhs, AcceleratorRead);
conformable(ret,rhs);
conformable(lhs,rhs);
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
decltype(coalescedRead(obj1())) tmp;
auto lhs_t = lhs_v(ss);
auto rhs_t = rhs_v(ss);
mult(&tmp,&lhs_t,&rhs_t);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class obj1,class obj2,class obj3> inline
void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
ret.Checkerboard() = lhs.Checkerboard();
conformable(ret,rhs);
conformable(lhs,rhs);
autoView( ret_v , ret, AcceleratorWrite);
autoView( lhs_v , lhs, AcceleratorRead);
autoView( rhs_v , rhs, AcceleratorRead);
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
auto lhs_t=lhs_v(ss);
auto rhs_t=rhs_v(ss);
auto tmp =ret_v(ss);
mac(&tmp,&lhs_t,&rhs_t);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class obj1,class obj2,class obj3> inline
void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
ret.Checkerboard() = lhs.Checkerboard();
conformable(ret,rhs);
conformable(lhs,rhs);
autoView( ret_v , ret, AcceleratorWrite);
autoView( lhs_v , lhs, AcceleratorRead);
autoView( rhs_v , rhs, AcceleratorRead);
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
decltype(coalescedRead(obj1())) tmp;
auto lhs_t=lhs_v(ss);
auto rhs_t=rhs_v(ss);
sub(&tmp,&lhs_t,&rhs_t);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class obj1,class obj2,class obj3> inline
void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
ret.Checkerboard() = lhs.Checkerboard();
conformable(ret,rhs);
conformable(lhs,rhs);
autoView( ret_v , ret, AcceleratorWrite);
autoView( lhs_v , lhs, AcceleratorRead);
autoView( rhs_v , rhs, AcceleratorRead);
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
decltype(coalescedRead(obj1())) tmp;
auto lhs_t=lhs_v(ss);
auto rhs_t=rhs_v(ss);
add(&tmp,&lhs_t,&rhs_t);
coalescedWrite(ret_v[ss],tmp);
});
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// avoid copy back routines for mult, mac, sub, add
//////////////////////////////////////////////////////////////////////////////////////////////////////
template<class obj1,class obj2,class obj3> inline
void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
ret.Checkerboard() = lhs.Checkerboard();
conformable(lhs,ret);
autoView( ret_v , ret, AcceleratorWrite);
autoView( lhs_v , lhs, AcceleratorRead);
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
decltype(coalescedRead(obj1())) tmp;
mult(&tmp,&lhs_v(ss),&rhs);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class obj1,class obj2,class obj3> inline
void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
ret.Checkerboard() = lhs.Checkerboard();
conformable(ret,lhs);
autoView( ret_v , ret, AcceleratorWrite);
autoView( lhs_v , lhs, AcceleratorRead);
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
auto tmp =ret_v(ss);
auto lhs_t=lhs_v(ss);
mac(&tmp,&lhs_t,&rhs);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class obj1,class obj2,class obj3> inline
void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
ret.Checkerboard() = lhs.Checkerboard();
conformable(ret,lhs);
autoView( ret_v , ret, AcceleratorWrite);
autoView( lhs_v , lhs, AcceleratorRead);
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
decltype(coalescedRead(obj1())) tmp;
auto lhs_t=lhs_v(ss);
sub(&tmp,&lhs_t,&rhs);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class obj1,class obj2,class obj3> inline
void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
ret.Checkerboard() = lhs.Checkerboard();
conformable(lhs,ret);
autoView( ret_v , ret, AcceleratorWrite);
autoView( lhs_v , lhs, AcceleratorRead);
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
decltype(coalescedRead(obj1())) tmp;
auto lhs_t=lhs_v(ss);
add(&tmp,&lhs_t,&rhs);
coalescedWrite(ret_v[ss],tmp);
});
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// avoid copy back routines for mult, mac, sub, add
//////////////////////////////////////////////////////////////////////////////////////////////////////
template<class obj1,class obj2,class obj3> inline
void mult(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
ret.Checkerboard() = rhs.Checkerboard();
conformable(ret,rhs);
autoView( ret_v , ret, AcceleratorWrite);
autoView( rhs_v , lhs, AcceleratorRead);
accelerator_for(ss,rhs_v.size(),obj1::Nsimd(),{
decltype(coalescedRead(obj1())) tmp;
auto rhs_t=rhs_v(ss);
mult(&tmp,&lhs,&rhs_t);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class obj1,class obj2,class obj3> inline
void mac(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
ret.Checkerboard() = rhs.Checkerboard();
conformable(ret,rhs);
autoView( ret_v , ret, AcceleratorWrite);
autoView( rhs_v , lhs, AcceleratorRead);
accelerator_for(ss,rhs_v.size(),obj1::Nsimd(),{
auto tmp =ret_v(ss);
auto rhs_t=rhs_v(ss);
mac(&tmp,&lhs,&rhs_t);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class obj1,class obj2,class obj3> inline
void sub(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
ret.Checkerboard() = rhs.Checkerboard();
conformable(ret,rhs);
autoView( ret_v , ret, AcceleratorWrite);
autoView( rhs_v , lhs, AcceleratorRead);
accelerator_for(ss,rhs_v.size(),obj1::Nsimd(),{
decltype(coalescedRead(obj1())) tmp;
auto rhs_t=rhs_v(ss);
sub(&tmp,&lhs,&rhs_t);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class obj1,class obj2,class obj3> inline
void add(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
ret.Checkerboard() = rhs.Checkerboard();
conformable(ret,rhs);
autoView( ret_v , ret, AcceleratorWrite);
autoView( rhs_v , lhs, AcceleratorRead);
accelerator_for(ss,rhs_v.size(),obj1::Nsimd(),{
decltype(coalescedRead(obj1())) tmp;
auto rhs_t=rhs_v(ss);
add(&tmp,&lhs,&rhs_t);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class sobj,class vobj> inline
void axpy(Lattice<vobj> &ret,sobj a,const Lattice<vobj> &x,const Lattice<vobj> &y){
ret.Checkerboard() = x.Checkerboard();
conformable(ret,x);
conformable(x,y);
autoView( ret_v , ret, AcceleratorWrite);
autoView( x_v , x, AcceleratorRead);
autoView( y_v , y, AcceleratorRead);
accelerator_for(ss,x_v.size(),vobj::Nsimd(),{
auto tmp = a*x_v(ss)+y_v(ss);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class sobj,class vobj> inline
void axpby(Lattice<vobj> &ret,sobj a,sobj b,const Lattice<vobj> &x,const Lattice<vobj> &y){
ret.Checkerboard() = x.Checkerboard();
conformable(ret,x);
conformable(x,y);
autoView( ret_v , ret, AcceleratorWrite);
autoView( x_v , x, AcceleratorRead);
autoView( y_v , y, AcceleratorRead);
accelerator_for(ss,x_v.size(),vobj::Nsimd(),{
auto tmp = a*x_v(ss)+b*y_v(ss);
coalescedWrite(ret_v[ss],tmp);
});
}
template<class sobj,class vobj> inline
RealD axpy_norm(Lattice<vobj> &ret,sobj a,const Lattice<vobj> &x,const Lattice<vobj> &y)
{
return axpy_norm_fast(ret,a,x,y);
}
template<class sobj,class vobj> inline
RealD axpby_norm(Lattice<vobj> &ret,sobj a,sobj b,const Lattice<vobj> &x,const Lattice<vobj> &y)
{
return axpby_norm_fast(ret,a,b,x,y);
}
NAMESPACE_END(Grid);
#endif

372
Grid/lattice/Lattice_base.h Normal file
View File

@ -0,0 +1,372 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/Lattice_base.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
Author: Christoph Lehner <christoph@lhnr.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#pragma once
#define STREAMING_STORES
NAMESPACE_BEGIN(Grid);
extern int GridCshiftPermuteMap[4][16];
/////////////////////////////////////////////////////////////////////////////////////////
// The real lattice class, with normal copy and assignment semantics.
// This contains extra (host resident) grid pointer data that may be accessed by host code
/////////////////////////////////////////////////////////////////////////////////////////
template<class vobj>
class Lattice : public LatticeAccelerator<vobj>
{
public:
GridBase *Grid(void) const { return this->_grid; }
///////////////////////////////////////////////////
// Member types
///////////////////////////////////////////////////
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_object scalar_object;
typedef vobj vector_object;
private:
void dealloc(void)
{
if( this->_odata_size ) {
alignedAllocator<vobj> alloc;
alloc.deallocate(this->_odata,this->_odata_size);
this->_odata=nullptr;
this->_odata_size=0;
}
}
void resize(uint64_t size)
{
if ( this->_odata_size != size ) {
alignedAllocator<vobj> alloc;
dealloc();
this->_odata_size = size;
if ( size )
this->_odata = alloc.allocate(this->_odata_size);
else
this->_odata = nullptr;
}
}
public:
/////////////////////////////////////////////////////////////////////////////////
// Can use to make accelerator dirty without copy from host ; useful for temporaries "dont care" prev contents
/////////////////////////////////////////////////////////////////////////////////
void SetViewMode(ViewMode mode) {
LatticeView<vobj> accessor(*( (LatticeAccelerator<vobj> *) this),mode);
accessor.ViewClose();
}
/////////////////////////////////////////////////////////////////////////////////
// Return a view object that may be dereferenced in site loops.
// The view is trivially copy constructible and may be copied to an accelerator device
// in device lambdas
/////////////////////////////////////////////////////////////////////////////////
LatticeView<vobj> View (ViewMode mode) const
{
LatticeView<vobj> accessor(*( (LatticeAccelerator<vobj> *) this),mode);
return accessor;
}
~Lattice() {
if ( this->_odata_size ) {
dealloc();
}
}
////////////////////////////////////////////////////////////////////////////////
// Expression Template closure support
////////////////////////////////////////////////////////////////////////////////
template <typename Op, typename T1> inline Lattice<vobj> & operator=(const LatticeUnaryExpression<Op,T1> &expr)
{
GridBase *egrid(nullptr);
GridFromExpression(egrid,expr);
assert(egrid!=nullptr);
conformable(this->_grid,egrid);
int cb=-1;
CBFromExpression(cb,expr);
assert( (cb==Odd) || (cb==Even));
this->checkerboard=cb;
auto exprCopy = expr;
ExpressionViewOpen(exprCopy);
auto me = View(AcceleratorWriteDiscard);
accelerator_for(ss,me.size(),vobj::Nsimd(),{
auto tmp = eval(ss,exprCopy);
coalescedWrite(me[ss],tmp);
});
me.ViewClose();
ExpressionViewClose(exprCopy);
return *this;
}
template <typename Op, typename T1,typename T2> inline Lattice<vobj> & operator=(const LatticeBinaryExpression<Op,T1,T2> &expr)
{
GridBase *egrid(nullptr);
GridFromExpression(egrid,expr);
assert(egrid!=nullptr);
conformable(this->_grid,egrid);
int cb=-1;
CBFromExpression(cb,expr);
assert( (cb==Odd) || (cb==Even));
this->checkerboard=cb;
auto exprCopy = expr;
ExpressionViewOpen(exprCopy);
auto me = View(AcceleratorWriteDiscard);
accelerator_for(ss,me.size(),vobj::Nsimd(),{
auto tmp = eval(ss,exprCopy);
coalescedWrite(me[ss],tmp);
});
me.ViewClose();
ExpressionViewClose(exprCopy);
return *this;
}
template <typename Op, typename T1,typename T2,typename T3> inline Lattice<vobj> & operator=(const LatticeTrinaryExpression<Op,T1,T2,T3> &expr)
{
GridBase *egrid(nullptr);
GridFromExpression(egrid,expr);
assert(egrid!=nullptr);
conformable(this->_grid,egrid);
int cb=-1;
CBFromExpression(cb,expr);
assert( (cb==Odd) || (cb==Even));
this->checkerboard=cb;
auto exprCopy = expr;
ExpressionViewOpen(exprCopy);
auto me = View(AcceleratorWriteDiscard);
accelerator_for(ss,me.size(),vobj::Nsimd(),{
auto tmp = eval(ss,exprCopy);
coalescedWrite(me[ss],tmp);
});
me.ViewClose();
ExpressionViewClose(exprCopy);
return *this;
}
//GridFromExpression is tricky to do
template<class Op,class T1>
Lattice(const LatticeUnaryExpression<Op,T1> & expr) {
this->_grid = nullptr;
GridFromExpression(this->_grid,expr);
assert(this->_grid!=nullptr);
int cb=-1;
CBFromExpression(cb,expr);
assert( (cb==Odd) || (cb==Even));
this->checkerboard=cb;
resize(this->_grid->oSites());
*this = expr;
}
template<class Op,class T1, class T2>
Lattice(const LatticeBinaryExpression<Op,T1,T2> & expr) {
this->_grid = nullptr;
GridFromExpression(this->_grid,expr);
assert(this->_grid!=nullptr);
int cb=-1;
CBFromExpression(cb,expr);
assert( (cb==Odd) || (cb==Even));
this->checkerboard=cb;
resize(this->_grid->oSites());
*this = expr;
}
template<class Op,class T1, class T2, class T3>
Lattice(const LatticeTrinaryExpression<Op,T1,T2,T3> & expr) {
this->_grid = nullptr;
GridFromExpression(this->_grid,expr);
assert(this->_grid!=nullptr);
int cb=-1;
CBFromExpression(cb,expr);
assert( (cb==Odd) || (cb==Even));
this->checkerboard=cb;
resize(this->_grid->oSites());
*this = expr;
}
template<class sobj> inline Lattice<vobj> & operator = (const sobj & r){
auto me = View(CpuWrite);
thread_for(ss,me.size(),{
me[ss]= r;
});
me.ViewClose();
return *this;
}
//////////////////////////////////////////////////////////////////
// Follow rule of five, with Constructor requires "grid" passed
// to user defined constructor
///////////////////////////////////////////
// user defined constructor
///////////////////////////////////////////
Lattice(GridBase *grid,ViewMode mode=AcceleratorWriteDiscard) {
this->_grid = grid;
resize(this->_grid->oSites());
assert((((uint64_t)&this->_odata[0])&0xF) ==0);
this->checkerboard=0;
SetViewMode(mode);
}
// virtual ~Lattice(void) = default;
void reset(GridBase* grid) {
if (this->_grid != grid) {
this->_grid = grid;
this->resize(grid->oSites());
this->checkerboard = 0;
}
}
///////////////////////////////////////////
// copy constructor
///////////////////////////////////////////
Lattice(const Lattice& r){
this->_grid = r.Grid();
resize(this->_grid->oSites());
*this = r;
}
///////////////////////////////////////////
// move constructor
///////////////////////////////////////////
Lattice(Lattice && r){
this->_grid = r.Grid();
this->_odata = r._odata;
this->_odata_size = r._odata_size;
this->checkerboard= r.Checkerboard();
r._odata = nullptr;
r._odata_size = 0;
}
///////////////////////////////////////////
// assignment template
///////////////////////////////////////////
template<class robj> inline Lattice<vobj> & operator = (const Lattice<robj> & r){
typename std::enable_if<!std::is_same<robj,vobj>::value,int>::type i=0;
conformable(*this,r);
this->checkerboard = r.Checkerboard();
auto me = View(AcceleratorWriteDiscard);
auto him= r.View(AcceleratorRead);
accelerator_for(ss,me.size(),vobj::Nsimd(),{
coalescedWrite(me[ss],him(ss));
});
me.ViewClose(); him.ViewClose();
return *this;
}
///////////////////////////////////////////
// Copy assignment
///////////////////////////////////////////
inline Lattice<vobj> & operator = (const Lattice<vobj> & r){
this->checkerboard = r.Checkerboard();
conformable(*this,r);
auto me = View(AcceleratorWriteDiscard);
auto him= r.View(AcceleratorRead);
accelerator_for(ss,me.size(),vobj::Nsimd(),{
coalescedWrite(me[ss],him(ss));
});
me.ViewClose(); him.ViewClose();
return *this;
}
///////////////////////////////////////////
// Move assignment possible if same type
///////////////////////////////////////////
inline Lattice<vobj> & operator = (Lattice<vobj> && r){
resize(0); // deletes if appropriate
this->_grid = r.Grid();
this->_odata = r._odata;
this->_odata_size = r._odata_size;
this->checkerboard= r.Checkerboard();
r._odata = nullptr;
r._odata_size = 0;
return *this;
}
/////////////////////////////////////////////////////////////////////////////
// *=,+=,-= operators inherit behvour from correspond */+/- operation
/////////////////////////////////////////////////////////////////////////////
template<class T> inline Lattice<vobj> &operator *=(const T &r) {
*this = (*this)*r;
return *this;
}
template<class T> inline Lattice<vobj> &operator -=(const T &r) {
*this = (*this)-r;
return *this;
}
template<class T> inline Lattice<vobj> &operator +=(const T &r) {
*this = (*this)+r;
return *this;
}
friend inline void swap(Lattice &l, Lattice &r) {
conformable(l,r);
LatticeAccelerator<vobj> tmp;
LatticeAccelerator<vobj> *lp = (LatticeAccelerator<vobj> *)&l;
LatticeAccelerator<vobj> *rp = (LatticeAccelerator<vobj> *)&r;
tmp = *lp; *lp=*rp; *rp=tmp;
}
}; // class Lattice
template<class vobj> std::ostream& operator<< (std::ostream& stream, const Lattice<vobj> &o){
typedef typename vobj::scalar_object sobj;
for(int g=0;g<o.Grid()->_gsites;g++){
Coordinate gcoor;
o.Grid()->GlobalIndexToGlobalCoor(g,gcoor);
sobj ss;
peekSite(ss,o,gcoor);
stream<<"[";
for(int d=0;d<gcoor.size();d++){
stream<<gcoor[d];
if(d!=gcoor.size()-1) stream<<",";
}
stream<<"]\t";
stream<<ss<<std::endl;
}
return stream;
}
NAMESPACE_END(Grid);

View File

@ -0,0 +1,248 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/Lattice_basis.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
Author: Christoph Lehner <christoph@lhnr.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
template<class Field>
void basisOrthogonalize(std::vector<Field> &basis,Field &w,int k)
{
// If assume basis[j] are already orthonormal,
// can take all inner products in parallel saving 2x bandwidth
// Save 3x bandwidth on the second line of loop.
// perhaps 2.5x speed up.
// 2x overall in Multigrid Lanczos
for(int j=0; j<k; ++j){
auto ip = innerProduct(basis[j],w);
w = w - ip*basis[j];
}
}
template<class VField, class Matrix>
void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
{
typedef decltype(basis[0]) Field;
typedef decltype(basis[0].View(AcceleratorRead)) View;
Vector<View> basis_v; basis_v.reserve(basis.size());
typedef typename std::remove_reference<decltype(basis_v[0][0])>::type vobj;
typedef typename std::remove_reference<decltype(Qt(0,0))>::type Coeff_t;
GridBase* grid = basis[0].Grid();
for(int k=0;k<basis.size();k++){
basis_v.push_back(basis[k].View(AcceleratorWrite));
}
#if ( (!defined(GRID_CUDA)) )
int max_threads = thread_max();
Vector < vobj > Bt(Nm * max_threads);
thread_region
{
vobj* B = &Bt[Nm * thread_num()];
thread_for_in_region(ss, grid->oSites(),{
for(int j=j0; j<j1; ++j) B[j]=0.;
for(int j=j0; j<j1; ++j){
for(int k=k0; k<k1; ++k){
B[j] +=Qt(j,k) * basis_v[k][ss];
}
}
for(int j=j0; j<j1; ++j){
basis_v[j][ss] = B[j];
}
});
}
#else
View *basis_vp = &basis_v[0];
int nrot = j1-j0;
if (!nrot) // edge case not handled gracefully by Cuda
return;
uint64_t oSites =grid->oSites();
uint64_t siteBlock=(grid->oSites()+nrot-1)/nrot; // Maximum 1 additional vector overhead
Vector <vobj> Bt(siteBlock * nrot);
auto Bp=&Bt[0];
// GPU readable copy of matrix
Vector<Coeff_t> Qt_jv(Nm*Nm);
Coeff_t *Qt_p = & Qt_jv[0];
thread_for(i,Nm*Nm,{
int j = i/Nm;
int k = i%Nm;
Qt_p[i]=Qt(j,k);
});
// Block the loop to keep storage footprint down
for(uint64_t s=0;s<oSites;s+=siteBlock){
// remaining work in this block
int ssites=MIN(siteBlock,oSites-s);
// zero out the accumulators
accelerator_for(ss,siteBlock*nrot,vobj::Nsimd(),{
decltype(coalescedRead(Bp[ss])) z;
z=Zero();
coalescedWrite(Bp[ss],z);
});
accelerator_for(sj,ssites*nrot,vobj::Nsimd(),{
int j =sj%nrot;
int jj =j0+j;
int ss =sj/nrot;
int sss=ss+s;
for(int k=k0; k<k1; ++k){
auto tmp = coalescedRead(Bp[ss*nrot+j]);
coalescedWrite(Bp[ss*nrot+j],tmp+ Qt_p[jj*Nm+k] * coalescedRead(basis_v[k][sss]));
}
});
accelerator_for(sj,ssites*nrot,vobj::Nsimd(),{
int j =sj%nrot;
int jj =j0+j;
int ss =sj/nrot;
int sss=ss+s;
coalescedWrite(basis_v[jj][sss],coalescedRead(Bp[ss*nrot+j]));
});
}
#endif
for(int k=0;k<basis.size();k++) basis_v[k].ViewClose();
}
// Extract a single rotated vector
template<class Field>
void basisRotateJ(Field &result,std::vector<Field> &basis,Eigen::MatrixXd& Qt,int j, int k0,int k1,int Nm)
{
typedef decltype(basis[0].View(AcceleratorRead)) View;
typedef typename Field::vector_object vobj;
GridBase* grid = basis[0].Grid();
result.Checkerboard() = basis[0].Checkerboard();
Vector<View> basis_v; basis_v.reserve(basis.size());
for(int k=0;k<basis.size();k++){
basis_v.push_back(basis[k].View(AcceleratorRead));
}
vobj zz=Zero();
Vector<double> Qt_jv(Nm);
double * Qt_j = & Qt_jv[0];
for(int k=0;k<Nm;++k) Qt_j[k]=Qt(j,k);
auto basis_vp=& basis_v[0];
autoView(result_v,result,AcceleratorWrite);
accelerator_for(ss, grid->oSites(),vobj::Nsimd(),{
vobj zzz=Zero();
auto B=coalescedRead(zzz);
for(int k=k0; k<k1; ++k){
B +=Qt_j[k] * coalescedRead(basis_vp[k][ss]);
}
coalescedWrite(result_v[ss], B);
});
for(int k=0;k<basis.size();k++) basis_v[k].ViewClose();
}
template<class Field>
void basisReorderInPlace(std::vector<Field> &_v,std::vector<RealD>& sort_vals, std::vector<int>& idx)
{
int vlen = idx.size();
assert(vlen>=1);
assert(vlen<=sort_vals.size());
assert(vlen<=_v.size());
for (size_t i=0;i<vlen;i++) {
if (idx[i] != i) {
//////////////////////////////////////
// idx[i] is a table of desired sources giving a permutation.
// Swap v[i] with v[idx[i]].
// Find j>i for which _vnew[j] = _vold[i],
// track the move idx[j] => idx[i]
// track the move idx[i] => i
//////////////////////////////////////
size_t j;
for (j=i;j<idx.size();j++)
if (idx[j]==i)
break;
assert(idx[i] > i); assert(j!=idx.size()); assert(idx[j]==i);
swap(_v[i],_v[idx[i]]); // should use vector move constructor, no data copy
std::swap(sort_vals[i],sort_vals[idx[i]]);
idx[j] = idx[i];
idx[i] = i;
}
}
}
inline std::vector<int> basisSortGetIndex(std::vector<RealD>& sort_vals)
{
std::vector<int> idx(sort_vals.size());
std::iota(idx.begin(), idx.end(), 0);
// sort indexes based on comparing values in v
std::sort(idx.begin(), idx.end(), [&sort_vals](int i1, int i2) {
return ::fabs(sort_vals[i1]) < ::fabs(sort_vals[i2]);
});
return idx;
}
template<class Field>
void basisSortInPlace(std::vector<Field> & _v,std::vector<RealD>& sort_vals, bool reverse)
{
std::vector<int> idx = basisSortGetIndex(sort_vals);
if (reverse)
std::reverse(idx.begin(), idx.end());
basisReorderInPlace(_v,sort_vals,idx);
}
// PAB: faster to compute the inner products first then fuse loops.
// If performance critical can improve.
template<class Field>
void basisDeflate(const std::vector<Field> &_v,const std::vector<RealD>& eval,const Field& src_orig,Field& result) {
result = Zero();
assert(_v.size()==eval.size());
int N = (int)_v.size();
for (int i=0;i<N;i++) {
Field& tmp = _v[i];
axpy(result,TensorRemove(innerProduct(tmp,src_orig)) / eval[i],tmp,result);
}
}
NAMESPACE_END(Grid);

View File

@ -0,0 +1,179 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/Lattice_comparison.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_LATTICE_COMPARISON_H
#define GRID_LATTICE_COMPARISON_H
NAMESPACE_BEGIN(Grid);
//////////////////////////////////////////////////////////////////////////
// relational operators
//
// Support <,>,<=,>=,==,!=
//
//Query supporting bitwise &, |, ^, !
//Query supporting logical &&, ||,
//////////////////////////////////////////////////////////////////////////
typedef iScalar<vInteger> vPredicate ;
//////////////////////////////////////////////////////////////////////////
// compare lattice to lattice
//////////////////////////////////////////////////////////////////////////
template<class vfunctor,class lobj,class robj>
inline Lattice<vPredicate> LLComparison(vfunctor op,const Lattice<lobj> &lhs,const Lattice<robj> &rhs)
{
Lattice<vPredicate> ret(rhs.Grid());
autoView( lhs_v, lhs, CpuRead);
autoView( rhs_v, rhs, CpuRead);
autoView( ret_v, ret, CpuWrite);
thread_for( ss, rhs_v.size(), {
ret_v[ss]=op(lhs_v[ss],rhs_v[ss]);
});
return ret;
}
//////////////////////////////////////////////////////////////////////////
// compare lattice to scalar
//////////////////////////////////////////////////////////////////////////
template<class vfunctor,class lobj,class robj>
inline Lattice<vPredicate> LSComparison(vfunctor op,const Lattice<lobj> &lhs,const robj &rhs)
{
Lattice<vPredicate> ret(lhs.Grid());
autoView( lhs_v, lhs, CpuRead);
autoView( ret_v, ret, CpuWrite);
thread_for( ss, lhs_v.size(), {
ret_v[ss]=op(lhs_v[ss],rhs);
});
return ret;
}
//////////////////////////////////////////////////////////////////////////
// compare scalar to lattice
//////////////////////////////////////////////////////////////////////////
template<class vfunctor,class lobj,class robj>
inline Lattice<vPredicate> SLComparison(vfunctor op,const lobj &lhs,const Lattice<robj> &rhs)
{
Lattice<vPredicate> ret(rhs.Grid());
autoView( rhs_v, rhs, CpuRead);
autoView( ret_v, ret, CpuWrite);
thread_for( ss, rhs_v.size(), {
ret_v[ss]=op(lhs,rhs_v[ss]);
});
return ret;
}
//////////////////////////////////////////////////////////////////////////
// Map to functors
//////////////////////////////////////////////////////////////////////////
// Less than
template<class lobj,class robj>
inline Lattice<vPredicate> operator < (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
return LLComparison(vlt<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator < (const Lattice<lobj> & lhs, const robj & rhs) {
return LSComparison(vlt<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator < (const lobj & lhs, const Lattice<robj> & rhs) {
return SLComparison(vlt<lobj,robj>(),lhs,rhs);
}
// Less than equal
template<class lobj,class robj>
inline Lattice<vPredicate> operator <= (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
return LLComparison(vle<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator <= (const Lattice<lobj> & lhs, const robj & rhs) {
return LSComparison(vle<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator <= (const lobj & lhs, const Lattice<robj> & rhs) {
return SLComparison(vle<lobj,robj>(),lhs,rhs);
}
// Greater than
template<class lobj,class robj>
inline Lattice<vPredicate> operator > (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
return LLComparison(vgt<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator > (const Lattice<lobj> & lhs, const robj & rhs) {
return LSComparison(vgt<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator > (const lobj & lhs, const Lattice<robj> & rhs) {
return SLComparison(vgt<lobj,robj>(),lhs,rhs);
}
// Greater than equal
template<class lobj,class robj>
inline Lattice<vPredicate> operator >= (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
return LLComparison(vge<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator >= (const Lattice<lobj> & lhs, const robj & rhs) {
return LSComparison(vge<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator >= (const lobj & lhs, const Lattice<robj> & rhs) {
return SLComparison(vge<lobj,robj>(),lhs,rhs);
}
// equal
template<class lobj,class robj>
inline Lattice<vPredicate> operator == (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
return LLComparison(veq<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator == (const Lattice<lobj> & lhs, const robj & rhs) {
return LSComparison(veq<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator == (const lobj & lhs, const Lattice<robj> & rhs) {
return SLComparison(veq<lobj,robj>(),lhs,rhs);
}
// not equal
template<class lobj,class robj>
inline Lattice<vPredicate> operator != (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
return LLComparison(vne<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator != (const Lattice<lobj> & lhs, const robj & rhs) {
return LSComparison(vne<lobj,robj>(),lhs,rhs);
}
template<class lobj,class robj>
inline Lattice<vPredicate> operator != (const lobj & lhs, const Lattice<robj> & rhs) {
return SLComparison(vne<lobj,robj>(),lhs,rhs);
}
NAMESPACE_END(Grid);
#endif

View File

@ -26,10 +26,10 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_COMPARISON_H
#define GRID_COMPARISON_H
namespace Grid {
#pragma once
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////
// This implementation is a bit poor.
@ -44,42 +44,42 @@ namespace Grid {
//
template<class lobj,class robj> class veq {
public:
vInteger operator()(const lobj &lhs, const robj &rhs)
accelerator vInteger operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) == (rhs);
}
};
template<class lobj,class robj> class vne {
public:
vInteger operator()(const lobj &lhs, const robj &rhs)
accelerator vInteger operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) != (rhs);
}
};
template<class lobj,class robj> class vlt {
public:
vInteger operator()(const lobj &lhs, const robj &rhs)
accelerator vInteger operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) < (rhs);
}
};
template<class lobj,class robj> class vle {
public:
vInteger operator()(const lobj &lhs, const robj &rhs)
accelerator vInteger operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) <= (rhs);
}
};
template<class lobj,class robj> class vgt {
public:
vInteger operator()(const lobj &lhs, const robj &rhs)
accelerator vInteger operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) > (rhs);
}
};
template<class lobj,class robj> class vge {
public:
vInteger operator()(const lobj &lhs, const robj &rhs)
accelerator vInteger operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) >= (rhs);
}
@ -88,42 +88,42 @@ namespace Grid {
// Generic list of functors
template<class lobj,class robj> class seq {
public:
Integer operator()(const lobj &lhs, const robj &rhs)
accelerator Integer operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) == (rhs);
}
};
template<class lobj,class robj> class sne {
public:
Integer operator()(const lobj &lhs, const robj &rhs)
accelerator Integer operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) != (rhs);
}
};
template<class lobj,class robj> class slt {
public:
Integer operator()(const lobj &lhs, const robj &rhs)
accelerator Integer operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) < (rhs);
}
};
template<class lobj,class robj> class sle {
public:
Integer operator()(const lobj &lhs, const robj &rhs)
accelerator Integer operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) <= (rhs);
}
};
template<class lobj,class robj> class sgt {
public:
Integer operator()(const lobj &lhs, const robj &rhs)
accelerator Integer operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) > (rhs);
}
};
template<class lobj,class robj> class sge {
public:
Integer operator()(const lobj &lhs, const robj &rhs)
accelerator Integer operator()(const lobj &lhs, const robj &rhs)
{
return (lhs) >= (rhs);
}
@ -133,12 +133,12 @@ namespace Grid {
// Integer and real get extra relational functions.
//////////////////////////////////////////////////////////////////////////////////////////////////////
template<class sfunctor, class vsimd,IfNotComplex<vsimd> = 0>
inline vInteger Comparison(sfunctor sop,const vsimd & lhs, const vsimd & rhs)
accelerator_inline vInteger Comparison(sfunctor sop,const vsimd & lhs, const vsimd & rhs)
{
typedef typename vsimd::scalar_type scalar;
std::vector<scalar> vlhs(vsimd::Nsimd()); // Use functors to reduce this to single implementation
std::vector<scalar> vrhs(vsimd::Nsimd());
std::vector<Integer> vpred(vsimd::Nsimd());
ExtractBuffer<scalar> vlhs(vsimd::Nsimd()); // Use functors to reduce this to single implementation
ExtractBuffer<scalar> vrhs(vsimd::Nsimd());
ExtractBuffer<Integer> vpred(vsimd::Nsimd());
vInteger ret;
extract<vsimd,scalar>(lhs,vlhs);
extract<vsimd,scalar>(rhs,vrhs);
@ -150,11 +150,11 @@ namespace Grid {
}
template<class sfunctor, class vsimd,IfNotComplex<vsimd> = 0>
inline vInteger Comparison(sfunctor sop,const vsimd & lhs, const typename vsimd::scalar_type & rhs)
accelerator_inline vInteger Comparison(sfunctor sop,const vsimd & lhs, const typename vsimd::scalar_type & rhs)
{
typedef typename vsimd::scalar_type scalar;
std::vector<scalar> vlhs(vsimd::Nsimd()); // Use functors to reduce this to single implementation
std::vector<Integer> vpred(vsimd::Nsimd());
ExtractBuffer<scalar> vlhs(vsimd::Nsimd()); // Use functors to reduce this to single implementation
ExtractBuffer<Integer> vpred(vsimd::Nsimd());
vInteger ret;
extract<vsimd,scalar>(lhs,vlhs);
for(int s=0;s<vsimd::Nsimd();s++){
@ -165,11 +165,11 @@ namespace Grid {
}
template<class sfunctor, class vsimd,IfNotComplex<vsimd> = 0>
inline vInteger Comparison(sfunctor sop,const typename vsimd::scalar_type & lhs, const vsimd & rhs)
accelerator_inline vInteger Comparison(sfunctor sop,const typename vsimd::scalar_type & lhs, const vsimd & rhs)
{
typedef typename vsimd::scalar_type scalar;
std::vector<scalar> vrhs(vsimd::Nsimd()); // Use functors to reduce this to single implementation
std::vector<Integer> vpred(vsimd::Nsimd());
ExtractBuffer<scalar> vrhs(vsimd::Nsimd()); // Use functors to reduce this to single implementation
ExtractBuffer<Integer> vpred(vsimd::Nsimd());
vInteger ret;
extract<vsimd,scalar>(rhs,vrhs);
for(int s=0;s<vsimd::Nsimd();s++){
@ -179,52 +179,54 @@ namespace Grid {
return ret;
}
#define DECLARE_RELATIONAL(op,functor) \
#define DECLARE_RELATIONAL_EQ(op,functor) \
template<class vsimd,IfSimd<vsimd> = 0>\
inline vInteger operator op (const vsimd & lhs, const vsimd & rhs)\
accelerator_inline vInteger operator op (const vsimd & lhs, const vsimd & rhs)\
{\
typedef typename vsimd::scalar_type scalar;\
return Comparison(functor<scalar,scalar>(),lhs,rhs);\
}\
template<class vsimd,IfSimd<vsimd> = 0>\
inline vInteger operator op (const vsimd & lhs, const typename vsimd::scalar_type & rhs) \
accelerator_inline vInteger operator op (const vsimd & lhs, const typename vsimd::scalar_type & rhs) \
{\
typedef typename vsimd::scalar_type scalar;\
return Comparison(functor<scalar,scalar>(),lhs,rhs);\
}\
template<class vsimd,IfSimd<vsimd> = 0>\
inline vInteger operator op (const typename vsimd::scalar_type & lhs, const vsimd & rhs) \
accelerator_inline vInteger operator op (const typename vsimd::scalar_type & lhs, const vsimd & rhs) \
{\
typedef typename vsimd::scalar_type scalar;\
return Comparison(functor<scalar,scalar>(),lhs,rhs);\
}\
template<class vsimd>\
inline vInteger operator op(const iScalar<vsimd> &lhs,const iScalar<vsimd> &rhs)\
{ \
return lhs._internal op rhs._internal; \
} \
template<class vsimd>\
inline vInteger operator op(const iScalar<vsimd> &lhs,const typename vsimd::scalar_type &rhs) \
accelerator_inline vInteger operator op(const iScalar<vsimd> &lhs,const typename vsimd::scalar_type &rhs) \
{ \
return lhs._internal op rhs; \
} \
template<class vsimd>\
inline vInteger operator op(const typename vsimd::scalar_type &lhs,const iScalar<vsimd> &rhs) \
accelerator_inline vInteger operator op(const typename vsimd::scalar_type &lhs,const iScalar<vsimd> &rhs) \
{ \
return lhs op rhs._internal; \
}
} \
#define DECLARE_RELATIONAL(op,functor) \
DECLARE_RELATIONAL_EQ(op,functor) \
template<class vsimd>\
accelerator_inline vInteger operator op(const iScalar<vsimd> &lhs,const iScalar<vsimd> &rhs)\
{ \
return lhs._internal op rhs._internal; \
}
DECLARE_RELATIONAL(<,slt);
DECLARE_RELATIONAL(<=,sle);
DECLARE_RELATIONAL(>,sgt);
DECLARE_RELATIONAL(>=,sge);
DECLARE_RELATIONAL(==,seq);
DECLARE_RELATIONAL_EQ(==,seq);
DECLARE_RELATIONAL(!=,sne);
#undef DECLARE_RELATIONAL
}
NAMESPACE_END(Grid);
#endif

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -23,18 +23,18 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_LATTICE_CONFORMABLE_H
#define GRID_LATTICE_CONFORMABLE_H
namespace Grid {
template<class obj1,class obj2> void conformable(const Lattice<obj1> &lhs,const Lattice<obj2> &rhs)
{
assert(lhs._grid == rhs._grid);
assert(lhs.checkerboard == rhs.checkerboard);
}
NAMESPACE_BEGIN(Grid);
template<class obj1,class obj2> void conformable(const Lattice<obj1> &lhs,const Lattice<obj2> &rhs)
{
assert(lhs.Grid() == rhs.Grid());
assert(lhs.Checkerboard() == rhs.Checkerboard());
}
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,55 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/Lattice_coordinate.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
template<class iobj> inline void LatticeCoordinate(Lattice<iobj> &l,int mu)
{
typedef typename iobj::scalar_type scalar_type;
typedef typename iobj::vector_type vector_type;
GridBase *grid = l.Grid();
int Nsimd = grid->iSites();
autoView(l_v, l, CpuWrite);
thread_for( o, grid->oSites(), {
vector_type vI;
Coordinate gcoor;
ExtractBuffer<scalar_type> mergebuf(Nsimd);
for(int i=0;i<grid->iSites();i++){
grid->RankIndexToGlobalCoor(grid->ThisRank(),o,i,gcoor);
mergebuf[i]=(Integer)gcoor[mu];
}
merge<vector_type,scalar_type>(vI,mergebuf);
l_v[o]=vI;
});
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,87 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/Lattice_local.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_LATTICE_LOCALREDUCTION_H
#define GRID_LATTICE_LOCALREDUCTION_H
///////////////////////////////////////////////
// localInner, localNorm, outerProduct
///////////////////////////////////////////////
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////
// Non site, reduced locally reduced routines
/////////////////////////////////////////////////////
// localNorm2,
template<class vobj>
inline auto localNorm2 (const Lattice<vobj> &rhs)-> Lattice<typename vobj::tensor_reduced>
{
Lattice<typename vobj::tensor_reduced> ret(rhs.Grid());
autoView( rhs_v , rhs, AcceleratorRead);
autoView( ret_v , ret, AcceleratorWrite);
accelerator_for(ss,rhs_v.size(),vobj::Nsimd(),{
coalescedWrite(ret_v[ss],innerProduct(rhs_v(ss),rhs_v(ss)));
});
return ret;
}
// localInnerProduct
template<class vobj>
inline auto localInnerProduct (const Lattice<vobj> &lhs,const Lattice<vobj> &rhs) -> Lattice<typename vobj::tensor_reduced>
{
Lattice<typename vobj::tensor_reduced> ret(rhs.Grid());
autoView( lhs_v , lhs, AcceleratorRead);
autoView( rhs_v , rhs, AcceleratorRead);
autoView( ret_v , ret, AcceleratorWrite);
accelerator_for(ss,rhs_v.size(),vobj::Nsimd(),{
coalescedWrite(ret_v[ss],innerProduct(lhs_v(ss),rhs_v(ss)));
});
return ret;
}
// outerProduct Scalar x Scalar -> Scalar
// Vector x Vector -> Matrix
template<class ll,class rr>
inline auto outerProduct (const Lattice<ll> &lhs,const Lattice<rr> &rhs) -> Lattice<decltype(outerProduct(ll(),rr()))>
{
typedef decltype(coalescedRead(ll())) sll;
typedef decltype(coalescedRead(rr())) srr;
Lattice<decltype(outerProduct(ll(),rr()))> ret(rhs.Grid());
autoView( lhs_v , lhs, AcceleratorRead);
autoView( rhs_v , rhs, AcceleratorRead);
autoView( ret_v , ret, AcceleratorWrite);
accelerator_for(ss,rhs_v.size(),1,{
// FIXME had issues with scalar version of outer
// Use vector [] operator and don't read coalesce this loop
ret_v[ss]=outerProduct(lhs_v[ss],rhs_v[ss]);
});
return ret;
}
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,202 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/Lattice_reduction.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
#include <Grid/Grid_Eigen_Dense.h>
#ifdef GRID_WARN_SUBOPTIMAL
#warning "Optimisation alert all these reduction loops are NOT threaded "
#endif
NAMESPACE_BEGIN(Grid);
template<class vobj>
static void sliceMaddMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice<vobj> &X,const Lattice<vobj> &Y,int Orthog,RealD scale=1.0)
{
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
int Nblock = X.Grid()->GlobalDimensions()[Orthog];
GridBase *FullGrid = X.Grid();
// GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
// Lattice<vobj> Xslice(SliceGrid);
// Lattice<vobj> Rslice(SliceGrid);
assert( FullGrid->_simd_layout[Orthog]==1);
//FIXME package in a convenient iterator
//Should loop over a plane orthogonal to direction "Orthog"
int stride=FullGrid->_slice_stride[Orthog];
int block =FullGrid->_slice_block [Orthog];
int nblock=FullGrid->_slice_nblock[Orthog];
int ostride=FullGrid->_ostride[Orthog];
autoView( X_v , X, CpuRead);
autoView( Y_v , Y, CpuRead);
autoView( R_v , R, CpuWrite);
thread_region
{
std::vector<vobj> s_x(Nblock);
thread_loop_collapse2( (int n=0;n<nblock;n++),{
for(int b=0;b<block;b++){
int o = n*stride + b;
for(int i=0;i<Nblock;i++){
s_x[i] = X_v[o+i*ostride];
}
vobj dot;
for(int i=0;i<Nblock;i++){
dot = Y_v[o+i*ostride];
for(int j=0;j<Nblock;j++){
dot = dot + s_x[j]*(scale*aa(j,i));
}
R_v[o+i*ostride]=dot;
}
}});
}
};
template<class vobj>
static void sliceMulMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice<vobj> &X,int Orthog,RealD scale=1.0)
{
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
int Nblock = X.Grid()->GlobalDimensions()[Orthog];
GridBase *FullGrid = X.Grid();
assert( FullGrid->_simd_layout[Orthog]==1);
//FIXME package in a convenient iterator
//Should loop over a plane orthogonal to direction "Orthog"
int stride=FullGrid->_slice_stride[Orthog];
int block =FullGrid->_slice_block [Orthog];
int nblock=FullGrid->_slice_nblock[Orthog];
int ostride=FullGrid->_ostride[Orthog];
autoView( X_v , X, CpuRead);
autoView( R_v , R, CpuWrite);
thread_region
{
std::vector<vobj> s_x(Nblock);
thread_loop_collapse2( (int n=0;n<nblock;n++),{
for(int b=0;b<block;b++){
int o = n*stride + b;
for(int i=0;i<Nblock;i++){
s_x[i] = X_v[o+i*ostride];
}
vobj dot;
for(int i=0;i<Nblock;i++){
dot = s_x[0]*(scale*aa(0,i));
for(int j=1;j<Nblock;j++){
dot = dot + s_x[j]*(scale*aa(j,i));
}
R_v[o+i*ostride]=dot;
}
}});
}
};
template<class vobj>
static void sliceInnerProductMatrix( Eigen::MatrixXcd &mat, const Lattice<vobj> &lhs,const Lattice<vobj> &rhs,int Orthog)
{
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
GridBase *FullGrid = lhs.Grid();
// GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
int Nblock = FullGrid->GlobalDimensions()[Orthog];
// Lattice<vobj> Lslice(SliceGrid);
// Lattice<vobj> Rslice(SliceGrid);
mat = Eigen::MatrixXcd::Zero(Nblock,Nblock);
assert( FullGrid->_simd_layout[Orthog]==1);
// int nh = FullGrid->_ndimension;
// int nl = SliceGrid->_ndimension;
// int nl = nh-1;
//FIXME package in a convenient iterator
//Should loop over a plane orthogonal to direction "Orthog"
int stride=FullGrid->_slice_stride[Orthog];
int block =FullGrid->_slice_block [Orthog];
int nblock=FullGrid->_slice_nblock[Orthog];
int ostride=FullGrid->_ostride[Orthog];
typedef typename vobj::vector_typeD vector_typeD;
autoView( lhs_v , lhs, CpuRead);
autoView( rhs_v , rhs, CpuRead);
thread_region {
std::vector<vobj> Left(Nblock);
std::vector<vobj> Right(Nblock);
Eigen::MatrixXcd mat_thread = Eigen::MatrixXcd::Zero(Nblock,Nblock);
thread_loop_collapse2((int n=0;n<nblock;n++),{
for(int b=0;b<block;b++){
int o = n*stride + b;
for(int i=0;i<Nblock;i++){
Left [i] = lhs_v[o+i*ostride];
Right[i] = rhs_v[o+i*ostride];
}
for(int i=0;i<Nblock;i++){
for(int j=0;j<Nblock;j++){
auto tmp = innerProduct(Left[i],Right[j]);
auto rtmp = TensorRemove(tmp);
ComplexD z = Reduce(rtmp);
mat_thread(i,j) += std::complex<double>(real(z),imag(z));
}}
}});
thread_critical {
mat += mat_thread;
}
}
for(int i=0;i<Nblock;i++){
for(int j=0;j<Nblock;j++){
ComplexD sum = mat(i,j);
FullGrid->GlobalSum(sum);
mat(i,j)=sum;
}}
return;
}
NAMESPACE_END(Grid);

View File

@ -0,0 +1,231 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/Lattice_peekpoke.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_LATTICE_PEEK_H
#define GRID_LATTICE_PEEK_H
///////////////////////////////////////////////
// Peeking and poking around
///////////////////////////////////////////////
NAMESPACE_BEGIN(Grid);
// FIXME accelerator_loop and accelerator_inline these
////////////////////////////////////////////////////////////////////////////////////////////////////
// Peek internal indices of a Lattice object
////////////////////////////////////////////////////////////////////////////////////////////////////
template<int Index,class vobj>
auto PeekIndex(const Lattice<vobj> &lhs,int i) -> Lattice<decltype(peekIndex<Index>(vobj(),i))>
{
Lattice<decltype(peekIndex<Index>(vobj(),i))> ret(lhs.Grid());
ret.Checkerboard()=lhs.Checkerboard();
autoView( ret_v, ret, AcceleratorWrite);
autoView( lhs_v, lhs, AcceleratorRead);
accelerator_for( ss, lhs_v.size(), 1, {
ret_v[ss] = peekIndex<Index>(lhs_v[ss],i);
});
return ret;
};
template<int Index,class vobj>
auto PeekIndex(const Lattice<vobj> &lhs,int i,int j) -> Lattice<decltype(peekIndex<Index>(vobj(),i,j))>
{
Lattice<decltype(peekIndex<Index>(vobj(),i,j))> ret(lhs.Grid());
ret.Checkerboard()=lhs.Checkerboard();
autoView( ret_v, ret, AcceleratorWrite);
autoView( lhs_v, lhs, AcceleratorRead);
accelerator_for( ss, lhs_v.size(), 1, {
ret_v[ss] = peekIndex<Index>(lhs_v[ss],i,j);
});
return ret;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Poke internal indices of a Lattice object
////////////////////////////////////////////////////////////////////////////////////////////////////
template<int Index,class vobj>
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(vobj(),0))> & rhs,int i)
{
autoView( rhs_v, rhs, AcceleratorRead);
autoView( lhs_v, lhs, AcceleratorWrite);
accelerator_for( ss, lhs_v.size(), 1, {
pokeIndex<Index>(lhs_v[ss],rhs_v[ss],i);
});
}
template<int Index,class vobj>
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(vobj(),0,0))> & rhs,int i,int j)
{
autoView( rhs_v, rhs, AcceleratorRead);
autoView( lhs_v, lhs, AcceleratorWrite);
accelerator_for( ss, lhs_v.size(), 1, {
pokeIndex<Index>(lhs_v[ss],rhs_v[ss],i,j);
});
}
//////////////////////////////////////////////////////
// Poke a scalar object into the SIMD array
//////////////////////////////////////////////////////
template<class vobj,class sobj>
void pokeSite(const sobj &s,Lattice<vobj> &l,const Coordinate &site){
GridBase *grid=l.Grid();
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
int Nsimd = grid->Nsimd();
assert( l.Checkerboard()== l.Grid()->CheckerBoard(site));
assert( sizeof(sobj)*Nsimd == sizeof(vobj));
int rank,odx,idx;
// Optional to broadcast from node 0.
grid->GlobalCoorToRankIndex(rank,odx,idx,site);
grid->Broadcast(grid->BossRank(),s);
// extract-modify-merge cycle is easiest way and this is not perf critical
ExtractBuffer<sobj> buf(Nsimd);
autoView( l_v , l, CpuWrite);
if ( rank == grid->ThisRank() ) {
extract(l_v[odx],buf);
buf[idx] = s;
merge(l_v[odx],buf);
}
return;
};
//////////////////////////////////////////////////////////
// Peek a scalar object from the SIMD array
//////////////////////////////////////////////////////////
template<class vobj,class sobj>
void peekSite(sobj &s,const Lattice<vobj> &l,const Coordinate &site){
GridBase *grid=l.Grid();
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
int Nsimd = grid->Nsimd();
assert( l.Checkerboard() == l.Grid()->CheckerBoard(site));
int rank,odx,idx;
grid->GlobalCoorToRankIndex(rank,odx,idx,site);
ExtractBuffer<sobj> buf(Nsimd);
autoView( l_v , l, CpuWrite);
extract(l_v[odx],buf);
s = buf[idx];
grid->Broadcast(rank,s);
return;
};
//////////////////////////////////////////////////////////
// Peek a scalar object from the SIMD array
//////////////////////////////////////////////////////////
// Must be CPU read view
template<class vobj,class sobj>
inline void peekLocalSite(sobj &s,const LatticeView<vobj> &l,Coordinate &site)
{
GridBase *grid = l.getGrid();
assert(l.mode==CpuRead);
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
int Nsimd = grid->Nsimd();
assert( l.Checkerboard()== grid->CheckerBoard(site));
assert( sizeof(sobj)*Nsimd == sizeof(vobj));
static const int words=sizeof(vobj)/sizeof(vector_type);
int odx,idx;
idx= grid->iIndex(site);
odx= grid->oIndex(site);
scalar_type * vp = (scalar_type *)&l[odx];
scalar_type * pt = (scalar_type *)&s;
for(int w=0;w<words;w++){
pt[w] = vp[idx+w*Nsimd];
}
return;
};
template<class vobj,class sobj>
inline void peekLocalSite(sobj &s,const Lattice<vobj> &l,Coordinate &site)
{
autoView(lv,l,CpuRead);
peekLocalSite(s,lv,site);
return;
};
// Must be CPU write view
template<class vobj,class sobj>
inline void pokeLocalSite(const sobj &s,LatticeView<vobj> &l,Coordinate &site)
{
GridBase *grid=l.getGrid();
assert(l.mode==CpuWrite);
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
int Nsimd = grid->Nsimd();
assert( l.Checkerboard()== grid->CheckerBoard(site));
assert( sizeof(sobj)*Nsimd == sizeof(vobj));
static const int words=sizeof(vobj)/sizeof(vector_type);
int odx,idx;
idx= grid->iIndex(site);
odx= grid->oIndex(site);
scalar_type * vp = (scalar_type *)&l[odx];
scalar_type * pt = (scalar_type *)&s;
for(int w=0;w<words;w++){
vp[idx+w*Nsimd] = pt[w];
}
return;
};
template<class vobj,class sobj>
inline void pokeLocalSite(const sobj &s, Lattice<vobj> &l,Coordinate &site)
{
autoView(lv,l,CpuWrite);
pokeLocalSite(s,lv,site);
return;
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,79 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/Lattice_reality.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: neo <cossu@post.kek.jp>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_LATTICE_REAL_IMAG_H
#define GRID_LATTICE_REAL_IMAG_H
// FIXME .. this is the sector of the code
// I am most worried about the directions
// The choice of burying complex in the SIMD
// is making the use of "real" and "imag" very cumbersome
NAMESPACE_BEGIN(Grid);
template<class vobj> inline Lattice<vobj> real(const Lattice<vobj> &lhs){
Lattice<vobj> ret(lhs.Grid());
autoView( lhs_v, lhs, AcceleratorRead);
autoView( ret_v, ret, AcceleratorWrite);
ret.Checkerboard()=lhs.Checkerboard();
accelerator_for( ss, lhs_v.size(), 1, {
ret_v[ss] =real(lhs_v[ss]);
});
return ret;
};
template<class vobj> inline Lattice<vobj> imag(const Lattice<vobj> &lhs){
Lattice<vobj> ret(lhs.Grid());
autoView( lhs_v, lhs, AcceleratorRead);
autoView( ret_v, ret, AcceleratorWrite);
ret.Checkerboard()=lhs.Checkerboard();
accelerator_for( ss, lhs_v.size(), 1, {
ret_v[ss] =imag(lhs_v[ss]);
});
return ret;
};
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
auto real(const Expression &expr) -> decltype(real(closure(expr)))
{
return real(closure(expr));
}
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
auto imag(const Expression &expr) -> decltype(imag(closure(expr)))
{
return imag(closure(expr));
}
NAMESPACE_END(Grid);
#endif

Some files were not shown because too many files have changed in this diff Show More