1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-14 22:07:05 +01:00

Compare commits

..

197 Commits

Author SHA1 Message Date
c04de86d07 Merge branch 'feature/ddhmc' of https://github.com/paboyle/Grid into feature/ddhmc 2022-02-14 17:33:17 +01:00
53e1b00cde Several updates 2022-02-14 17:29:41 +01:00
1257c9f2f0 Correct mass 2021-11-17 21:40:04 +00:00
44b0988f9b Update HMC runs 2021-10-07 20:06:55 +01:00
98766851c3 Config improvements 2021-10-07 20:06:17 +01:00
f36e984278 Update 2021-10-07 20:05:56 +01:00
c234a7f913 Accidental verbose 2021-10-07 20:04:57 +01:00
0bf0c65b7d Don't know why off 2021-10-07 20:04:28 +01:00
3355ceea9f Speed zones 2021-10-06 20:52:15 +02:00
e4cbfe3d4b Blocking change 2021-10-06 20:51:42 +02:00
4867e02dfb Merge branch 'develop' into feature/ddhmc 2021-10-05 23:12:23 +01:00
ed68cf6268 Updates 2021-10-05 22:11:23 +01:00
7760417312 Production jobs 2021-08-12 00:18:34 +02:00
8cc0defb69 Reorg headers 2021-06-10 14:28:49 -04:00
e0c9d01123 Build tests works 2021-06-10 14:28:26 -04:00
91bf1df018 Rename changes 2021-06-09 22:58:01 -04:00
386a89c668 Updated mixed prec 2021-06-09 17:14:24 -04:00
670f4985fd DDHMC test update 2021-06-09 16:35:53 -04:00
dcd48a0435 Domain decomposed benchmark 2021-06-09 16:35:24 -04:00
87ec14c353 Update for mixed precison solve 2021-06-09 16:34:44 -04:00
4f5ad73501 Mixed prec update 2021-06-09 16:33:02 -04:00
096bb98f78 Remove 2021-06-09 16:32:35 -04:00
76837ffc65 Working (I think) version 2021-06-09 16:31:37 -04:00
81bd0d7906 Default to mixed precision now 2021-06-09 16:31:19 -04:00
7d8d250389 Complete ? 2021-06-09 16:30:39 -04:00
ad406b2c3e Provide a call back for subdomain solve / local domain pseudofermion restriction 2021-06-09 16:29:15 -04:00
e6366b98a5 Mixed precision & domain changes 2021-06-09 16:27:59 -04:00
302356189c Reduce verbose 2021-06-09 14:56:49 -04:00
9394450c1a Verbose changes 2021-06-09 13:30:42 -04:00
6cf3edef00 More logging 2021-06-09 12:59:27 -04:00
31cc227dd2 Domain definition class 2021-06-09 12:58:57 -04:00
c690e66325 Mobius two flavour 2021-06-09 12:58:24 -04:00
5fdbb924f0 Mixed prec tolerance changes 2021-06-09 12:57:03 -04:00
6dcaed621c General detection for arbitrary domains.
Can simplify and make specific if performance matters
2021-06-09 12:54:43 -04:00
f9cda24781 Cleaner 2021-06-08 21:50:43 -04:00
cd5e3fbd82 berrier for debug code 2021-06-08 21:18:22 -04:00
f68036c79f Inner mixed tolerance 2021-06-08 21:17:06 -04:00
216c178c16 Clean up 2021-06-08 20:59:00 -04:00
990d976241 More precision to convince Chris Kelly it's right 2021-06-08 20:29:27 -04:00
f2fe2573a7 Constructor control of inner tolerance 2021-06-08 20:27:52 -04:00
11c55a0476 More verbose 2021-06-08 20:26:52 -04:00
1cc706b2f4 Better precision 2021-06-03 17:27:49 -04:00
9e51fa5681 Mixed precision changes 2021-06-01 13:41:02 -04:00
9164cfbfc6 Mixed precision changes 2021-06-01 13:39:33 -04:00
2f3a96e5de Mixed precision changes 2021-06-01 13:38:00 -04:00
26aa89cb0c HMC logging 2021-06-01 13:36:17 -04:00
426d2365d1 Schur factored matrix 2021-06-01 13:35:38 -04:00
81bbd4e4ce Force logging improvements 2021-06-01 13:34:43 -04:00
b83bbe2dd1 Mixed precision 2021-06-01 13:34:05 -04:00
3f2d8eb727 Mixed precision change 2021-06-01 13:33:18 -04:00
60f9bf69cd These are not acccessible from device 2021-06-01 13:32:34 -04:00
38d8cd228e Reusable mixed precision wrapper 2021-06-01 13:31:18 -04:00
4e1e242025 Short term hack while testing force sizes 2021-06-01 13:30:42 -04:00
20c6f19bb2 Virtual fix 2021-06-01 13:29:44 -04:00
036270a0c5 Bug fix must preserve "virtual" 2021-06-01 13:25:44 -04:00
6c506601c5 Logging the forces each trajectory useful 2021-06-01 13:25:03 -04:00
e4ff4c902a 2f Mixed precision DDHMC running and conserving hamiltonian 2021-05-27 17:10:04 -04:00
2e4d4625b6 correct multinode 2021-05-25 21:08:50 -04:00
10f2c2530b Temporary debug / display force structure 2021-05-25 21:08:09 -04:00
375e0698dc 2 deep cleaner coding 2021-05-25 19:51:52 -04:00
64b3b37476 Force test - try degenerate ratio and check for zero force 2021-05-25 18:27:46 -04:00
59e1a9be4e Locally periodic option 2021-05-25 18:25:51 -04:00
aac1736617 Double or single 2021-05-25 18:21:48 -04:00
60f814733d Could maybe simplify and require theh params always containn locally_periodic 2021-05-25 18:18:15 -04:00
612e9a178a Should maybe require the params always contain locally_periodic 2021-05-25 18:17:31 -04:00
21af9cf83c GetDoubledGaugeField 2021-05-25 18:16:37 -04:00
3b8cb929d6 Doubled GaugeField option 2021-05-25 18:15:47 -04:00
5d3046eae8 Locally periodic override option 2021-05-25 18:15:17 -04:00
d73063682e GetDoubledGaugeField 2021-05-25 18:14:55 -04:00
59584b6605 GetDoubledGaugeField 2021-05-25 18:14:34 -04:00
f6d7188615 GetDoubledGaugeField useful for mixed precision or generally retrieving the double stored gauge field 2021-05-25 18:13:37 -04:00
b810b6f6bd Dirichlet working multinode now 2021-05-25 18:13:01 -04:00
e0a92dff32 update action params 2021-05-25 18:12:11 -04:00
e125f0f738 Running with small dH aand Luscher's gauge link freezing scheme 2021-05-18 16:38:07 -04:00
5f081d87b0 MacOS file system capitals got me. Will rename filter to Filter later 2021-05-18 16:36:58 -04:00
84e246a963 Allow dimensions to be periodic 2021-05-18 16:36:29 -04:00
c18025c0b8 Luscher's filter - will tune this later 2021-05-18 16:35:39 -04:00
a918955020 Typo in assert 2021-05-18 16:35:01 -04:00
e3c18ce872 Adding boundary det 2021-05-15 09:06:58 -04:00
07a61e8826 renamed 2021-05-15 09:06:22 -04:00
58cb7c0732 passes force test and conserves in HMC plaquette looks odd 2021-05-15 09:05:34 -04:00
10339fd775 Rename 2021-05-15 08:53:39 -04:00
9400c207f7 Force test passing for boundary det 2021-05-15 08:38:26 -04:00
1fa89a2e7d Numerator pseudofermion 2021-05-15 08:37:47 -04:00
09b233b82e RdagR inverse 2021-05-14 22:21:23 -04:00
34ca4dd53a Nexp=20 default. Need a test for this. 2021-05-14 22:20:41 -04:00
c19cf46169 Cayley causes problems if the argument is exactly ZERO 2021-05-14 22:19:22 -04:00
c8db9ddb33 Massively improved test 2021-05-14 21:56:24 -04:00
104986b53d Prep for storing refresh actoin 2021-05-14 12:02:15 -04:00
91fd44419b Prep for storing action from the pseudofermion refresh 2021-05-14 11:52:11 -04:00
d1daa0e3f7 Allow DD only in subset of dimensions 2021-05-14 11:47:27 -04:00
05e1aed326 commenting 2021-05-14 11:45:06 -04:00
d3fd23117a Better block dimension checking and if Block >= Global dims implement non-Dirichlet 2021-05-14 11:28:24 -04:00
def51267e9 Starting the DDHMC filter. This will be seriously subject to tuning as force in
Omega and force in Omega bar appears to have different structure
2021-05-14 11:26:47 -04:00
5b52f29b2f Prepare to store the action in refresh 2021-05-14 11:18:49 -04:00
25bd03f201 Add LorentzVector 2021-05-14 10:48:45 -04:00
d5edd100a5 Useful debug fingerprints 2021-05-14 10:48:14 -04:00
e39e326b79 sRNG compile fix 2021-05-12 11:00:43 -04:00
8458e13a23 Improved - untested 2021-05-12 11:00:04 -04:00
3575278b57 Small change 2021-05-12 10:57:04 -04:00
69a2c8769a Testing out the DDHMC constituent parts 2021-05-11 13:56:52 -04:00
d4eaea00cf 2flavour local determinant ratio 2021-05-11 13:56:21 -04:00
347ccdc468 Delete file, replaced by SchurFactoredFermionOperator 2021-05-11 13:55:52 -04:00
bf034ce239 The Dirichlet BC filter 2021-05-11 13:55:27 -04:00
791d0ab0b5 Provides the DDHMC lexicon for a pair of non-Dirichlet and Dirichlet operators 2021-05-11 13:54:49 -04:00
94a2a645bd Dirichlet BC wrapper for any Fermion Operator 2021-05-11 13:54:15 -04:00
281b55df04 srng changes 2021-05-11 13:52:24 -04:00
a36e797bfc Use Integer 2021-05-11 13:52:05 -04:00
0bade717bf Added the momentum filter to resources 2021-05-11 13:50:38 -04:00
84fe791519 put momentum filter into the HMC resources 2021-05-11 13:49:49 -04:00
e009a37f6e Moving the code to SchurFactorizedFermionOperator 2021-05-11 13:46:40 -04:00
d7a887baf1 Better include guard 2021-05-11 13:45:53 -04:00
060bb59535 GCC warnings 2021-05-11 13:38:27 -04:00
bac36399c1 Compiles 2021-05-07 00:36:28 +02:00
b5b930d5bb Merge branch 'feature/ddhmc' of https://github.com/paboyle/Grid into feature/ddhmc 2021-05-06 23:51:25 +02:00
4fca66a7c6 Implement Dirichlet Comms option 2021-05-06 23:44:45 +02:00
bd181b9481 Merge branch 'feature/ddhmc' of https://github.com/paboyle/Grid into feature/ddhmc 2021-05-06 23:42:39 +02:00
919ced1c31 Move the momentum filter to a better location for DDHMC 2021-05-06 23:29:03 +02:00
b32fd473f8 Start at the Domain decomposed supprt 2021-05-06 23:28:28 +02:00
ffcab64890 DDHMC filter 2021-05-06 23:27:57 +02:00
374fb325f3 Remove LIME dependency 2021-05-06 23:26:42 +02:00
bab88bc4f7 Fix compile 2021-05-06 23:25:12 +02:00
7533f66b54 Fix compile 2021-05-06 23:24:39 +02:00
805cde5899 Fix compile 2021-05-06 23:24:19 +02:00
a0534e03f9 Augmented test 2021-05-06 23:23:57 +02:00
ebba195e0d Code prettier 2021-05-06 23:23:30 +02:00
3b433fe6fb Better force logging 2021-05-06 23:22:09 +02:00
07d1030660 Schur solver for Mdag 2021-05-06 23:21:15 +02:00
f8d7d23893 4D pseudofermion options 2021-05-06 23:19:53 +02:00
cdeb718229 4D pseudo fermion, with Schur red black solvers 2021-05-06 23:15:16 +02:00
cb28568198 Tuning integrator 2021-05-06 23:12:57 +02:00
45440da79d Tuning integrator 2021-05-06 23:12:34 +02:00
6fe8533414 Mdagger solve support 2021-05-06 23:10:36 +02:00
f776a7fe4a Merge branch 'feature/gparity_HMC' into feature/ddhmc 2021-05-06 20:55:03 +02:00
cff884929c Merge branch 'develop' into feature/ddhmc 2021-05-05 23:40:23 -04:00
9c991c7e29 First half quarter cut 2021-05-05 23:37:21 -04:00
1c70d8c4d9 Warning remove 2021-05-05 19:56:04 -04:00
f0e9a5299f Happy on GCC I hope 2021-05-05 19:55:34 -04:00
f1b8ba45e7 Warning on GCC suppress unrelated to my code so why doesn't it shut up about its ABI fix 2021-05-05 19:54:21 -04:00
fe998ab578 Merge branch 'feature/gparity_HMC' of https://github.com/paboyle/Grid into feature/gparity_HMC 2021-05-05 17:36:51 -04:00
c2ee2b5fd1 Random chhanges 2021-05-05 17:36:38 -04:00
3b734ee397 two point function example 2021-05-05 17:36:19 -04:00
8637a9512a Freeze Gaussian implementation 2021-05-05 17:34:54 -04:00
7f6e2ee03e Drop normal_distribution, standardise 2021-05-05 17:34:17 -04:00
7b02acb2bd Merge branch 'feature/gparity_HMC' of https://github.com/paboyle/Grid into feature/gparity_HMC 2021-05-04 13:45:11 -04:00
86948c6ea0 CRC for finger print fields - aids debug / version diff 2021-05-04 13:44:38 -04:00
53d226924a CRC added 2021-05-04 13:44:07 -04:00
80176b1b39 RHMC now outputs some initial norms to the logs
Fixed DWF+I Gparity binaries not correctly assigning twist directions (thanks Peter!)
2021-05-04 13:12:23 -04:00
29ddafd0fc Added variant of G-parity DWF+I ensemble gen code using double prec RHMC 2021-04-30 13:12:24 -04:00
0f08364e4f Mom filter refresh sRNG 2021-04-26 23:18:11 +02:00
a198d59381 Merge branch 'feature/gparity_HMC' of https://github.com/paboyle/Grid into feature/gparity_HMC 2021-04-26 21:05:52 +02:00
3a4f5f2324 Merge develop, strengthen force tests 2021-04-22 18:54:00 -04:00
824d84473f Merge branch 'develop' into feature/gparity_HMC 2021-04-22 16:32:41 -04:00
38964a4076 Switch twist direction 2021-04-22 15:57:37 -04:00
0d9aa87228 Reduce momentum to the GP plane 2021-04-22 15:56:59 -04:00
0e959d9b94 Update plaquette analysis 2021-04-22 15:55:47 -04:00
752f70cd48 Merge branch 'develop' into feature/gparity_HMC 2021-04-22 01:58:11 +02:00
e0e42873c1 Const correctness for Lattice::Replicate
Adapted GeneralEvenOddRationalRatio and Test_rhmc_EOWilsonRatio_doubleVsMixedPrec to recent changes that require passing in serial RNG

For GeneralEvenOddRationalRatio and TwoFlavourEvenOddRatio, broke refresh into two stages, the first of which generates the random field and the second that computes the pseudofermion field.
This allows derived classes to override the generation of the random field, for example in testing.

Test_dwf_gpforce now uses Gparity in x-direction and APBC in time as opposed to G-parity in time

Added Test_action_dwf_gparity2fvs1f that compares the DWF fermion action with the 2f and the 1f (doubled-lattice) implementations of Gparity
2021-04-14 16:41:27 -04:00
21165ed489 Better logging and moving momentumfilter 2021-04-10 01:08:23 +02:00
09288d633c 4D pseudofermoin 2021-04-10 01:06:52 +02:00
fe00c96435 4D Pseudofermion support 2021-04-10 01:06:11 +02:00
0765f30308 4D pseudo fermion support 2021-04-10 01:05:42 +02:00
a6326b664e Move momenutm filter to earlier in the include sequence so it can be used by DDHMC 2021-04-10 01:04:16 +02:00
ccd30e1485 4D pseudofermion in Cayley action 2021-04-10 01:03:01 +02:00
3060887a37 Merge branch 'develop' into feature/ddhmc 2021-03-31 19:48:31 +02:00
b53059344e Better test on non-unit gauge 2021-03-31 13:45:06 -04:00
aaf5ebf345 Small hack 2021-03-31 19:32:57 +02:00
48edb8f72e HMC prep for DDHMC 2021-03-31 19:31:46 +02:00
0ff3bf6dc5 Merge branch 'develop' into feature/gparity_HMC 2021-03-22 15:33:13 -04:00
351eab02ae Comment fix 2021-03-22 14:39:17 -04:00
feee5ccde2 Added Gparity flavour Pauli matrix algebra and associated tensor types mirroring strategy used for Gamma matrices
Added test program for the above
2021-03-03 15:39:41 -05:00
e0f6a146d8 To DWF+I G-parity evolution code, added ability to specify number of MD steps in params and an optional usage mode that reads the config and checks the plaq/checksum agree then exits 2021-02-16 10:41:52 -05:00
daa095c519 Fixed an obscure but reproducible hang in the RHMC caused by the bounds check being activated by a random number that wasn't synchronized over the nodes
HMC now also reports the "L-infinity norm" of the impulse, aka the largest site norm
2021-02-09 12:55:46 -05:00
c2676853ca Merge branch 'bugfix/maxnorm2' into feature/gparity_HMC 2021-02-08 12:17:33 -05:00
6a824033f8 Merge branch 'develop' into feature/gparity_HMC 2021-02-08 09:31:49 -05:00
cee6a37639 Added a logging tag for HMC
As the integrator logger is active by default the cmdline option to activate had no effect. Changed option to *de*activate on request ("NoIntegrator")
Cleaned up generating rational approxs in the general RHMC code
As the tolerance of the rational approx is not related to the CG tolerance, regenerating approxs for MD and MC if they differ only by the CG tolerance is not necessary; this has been fixed
In DWF+I Gparity evolution code, added cmdline options to check the rational approximations and compute the lowest/highest eigenvalues of M^dagM for RHMC tuning
In the above, changed the integrator layout to a much simpler one that completes much faster; may need additional tuning
2021-02-08 09:30:35 -05:00
6cc3ad110c Improved logging output for RHMC bounds checks
In GenericHMCRunner, exposed functionality for initializing gauge fields and RNG for external use
2021-01-29 12:35:00 -05:00
e6c6f82c52 Gparity DWF+I HMC main program now has option to specify parameter file 2021-01-27 11:18:41 -05:00
d10d0c4e7f Merge branch 'develop' into feature/gparity_HMC 2021-01-25 15:13:29 -05:00
9c106d625a Added HMC main program designed to reproduce the 16^3x32x16 DWF+I ensembles with beta=2.13 and Gparity BCs 2021-01-25 15:07:44 -05:00
6795bbca31 Generalized GeneralEvenOddRatioRationalPseudoFermionAction such that the multi-shift CG algorithm can be overridden by derived classes
Added a mixed-precision variant of GeneralEvenOddRatioRationalPseudoFermionAction and a verification test against double prec class
Fixed non-const reference used in passing RHMC approx to multishift classes
2021-01-25 14:22:31 -05:00
d161c2dc35 Improved formating of timing output in mixed-prec multishift
In test of mixed-prec multishift, added comparison against full double precision multishift both for timing and to cross-check the results
2021-01-20 15:42:06 -05:00
7a06826cf1 Added option to NerscIO to disable exit on failing plaquette check allowing for circumvention of factor of 2 error in CPS-generated G-parity config headers
Adapted mixed-prec multi-shift test to new way to pass gauge BC directions and added cmdline option to perform the G-parity plaquette comparison with the corrected plaquette when loading config
2021-01-20 13:31:50 -05:00
c3712b8e06 Merge branch 'develop' into feature/gparity_HMC 2021-01-20 11:48:52 -05:00
901ee77b84 Mixed precision multishift test can now be performed with/without G-parity using cmdline check and can load a pregenerated configuration 2021-01-20 11:45:44 -05:00
1b84f59273 Added a mixed precision multishift algorithm for which the matrix multiplies are performed in single precision but the search directions are accumulated in double precision.
A reliable update step is performed at a tunable frequency to correct the residual. A final mixed-prec single-shift solve is performed on each pole to perform cleanup if necessary.
A test is provided to demonstrate the algorithm.
2021-01-06 12:24:44 -05:00
1fb41a4300 Added copyLane function to Tensor_extract_merge.h which copies one lane of data from an input tensor object to a different lane of an output tensor object of potentially different precision
precisionChange lattice function now uses copyLane to remove need for temporary scalar objects, reducing register footprint and significantly improving performance
2021-01-06 11:50:56 -05:00
287bac946f ConjugateGradientMixedPrec now stores final true residual and uses the precisionChange workspaces for improved efficiency 2021-01-06 09:50:41 -05:00
80c14be65e Added core test to check precision change 2021-01-06 09:34:44 -05:00
d7a2a4852d Reimplemented precisionChange to run on GPUs. A workspace containing the mapping table can be optionally precomputed and reused for improved performance. 2021-01-06 09:30:49 -05:00
d185f2eaa7 OneFlavourEvenOddRatioRationalPseudoFermionAction now derives from GeneralEvenOddRatioRationalPseudoFermionAction, simply performs transcription of parameters 2020-12-23 16:26:10 -05:00
813d4cd900 Added test program that ensures the generic checkerboarded RHMC (with parameters set appropriately) gives the same answer as the existing 1f code 2020-12-23 16:01:42 -05:00
75c6c6b173 General RHMC pseudofermion action now allows for different rational approximations to be used in the MD and action evaluation 2020-12-23 11:19:26 -05:00
220ad5e3ee Added more verbose log output to GeneralEvenOddRatioRationalPseudoFermionAction
In GeneralEvenOddRatioRationalPseudoFermionAction, setting the bounds check frequency to 0 now disables the check
2020-12-22 11:08:22 -05:00
ba5dc670a5 Reimplemented GparityWilsonImpl::InsertForce5D to run efficiently on GPUs
Swapped order of templated tensor code and c-number specializations in Tensor_outer.h to fix compile issue with type deduction on Summit
2020-12-22 10:10:07 -05:00
a0ca362690 Added an RHMC pseudofermion action, GeneralEvenOddRatioRationalPseudoFermionAction, that works for an arbitrary fractional power, not just a square root
Added a test evolution for the above, Test_rhmc_EOWilsonRatioPowQuarter, demonstrating conservation of Hamiltonian
Fixed HMC ignoring the MetropolisTest parameter of HMCparameters
2020-12-17 16:21:58 -05:00
249b6e61ec For G-parity BCs the Nd-1 direction is now assumed to be the time direction and setting a twist in this direction will apply antiperiodic BCs
Added option to run Test_gparity with antiperiodic time BCs
2020-12-17 14:09:00 -05:00
187 changed files with 10257 additions and 2959 deletions

View File

@ -34,6 +34,9 @@ directory
#if defined __GNUC__ && __GNUC__>=6
#pragma GCC diagnostic ignored "-Wignored-attributes"
#endif
#if defined __GNUC__ && __GNUC__>=6
#pragma GCC diagnostic ignored "-Wpsabi"
#endif
//disables and intel compiler specific warning (in json.hpp)

View File

@ -36,6 +36,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#include <Grid/GridCore.h>
#include <Grid/qcd/QCD.h>
#include <Grid/qcd/spin/Spin.h>
#include <Grid/qcd/gparity/Gparity.h>
#include <Grid/qcd/utils/Utils.h>
#include <Grid/qcd/representations/Representations.h>
NAMESPACE_CHECK(GridQCDCore);

View File

@ -54,6 +54,7 @@ NAMESPACE_CHECK(BiCGSTAB);
#include <Grid/algorithms/iterative/SchurRedBlack.h>
#include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h>
#include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h>
#include <Grid/algorithms/iterative/ConjugateGradientMultiShiftMixedPrec.h>
#include <Grid/algorithms/iterative/BiCGSTABMixedPrec.h>
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
#include <Grid/algorithms/iterative/ConjugateGradientReliableUpdate.h>

View File

@ -358,7 +358,7 @@ public:
autoView( in_v , in, AcceleratorRead);
autoView( out_v , out, AcceleratorWrite);
autoView( Stencil_v , Stencil, AcceleratorRead);
int npoint = geom.npoint;
auto& geom_v = geom;
typedef LatticeView<Cobj> Aview;
Vector<Aview> AcceleratorViewContainer;
@ -380,7 +380,7 @@ public:
int ptype;
StencilEntry *SE;
for(int point=0;point<npoint;point++){
for(int point=0;point<geom_v.npoint;point++){
SE=Stencil_v.GetEntry(ptype,point,ss);
@ -424,7 +424,7 @@ public:
autoView( in_v , in, AcceleratorRead);
autoView( out_v , out, AcceleratorWrite);
autoView( Stencil_v , Stencil, AcceleratorRead);
int npoint = geom.npoint;
auto& geom_v = geom;
typedef LatticeView<Cobj> Aview;
Vector<Aview> AcceleratorViewContainer;
@ -454,7 +454,7 @@ public:
int ptype;
StencilEntry *SE;
for(int p=0;p<npoint;p++){
for(int p=0;p<geom_v.npoint;p++){
int point = points_p[p];
SE=Stencil_v.GetEntry(ptype,point,ss);

View File

@ -52,7 +52,6 @@ public:
virtual void AdjOp (const Field &in, Field &out) = 0; // Abstract base
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2)=0;
virtual void HermOp(const Field &in, Field &out)=0;
virtual ~LinearOperatorBase(){};
};
@ -224,9 +223,14 @@ class SchurOperatorBase : public LinearOperatorBase<Field> {
Mpc(in,tmp);
MpcDag(tmp,out);
}
virtual void MpcMpcDag(const Field &in, Field &out) {
Field tmp(in.Grid());
tmp.Checkerboard() = in.Checkerboard();
MpcDag(in,tmp);
Mpc(tmp,out);
}
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
out.Checkerboard() = in.Checkerboard();
MpcDagMpc(in,out);
HermOp(in,out);
ComplexD dot= innerProduct(in,out);
n1=real(dot);
n2=norm2(out);
@ -277,6 +281,16 @@ template<class Matrix,class Field>
axpy(out,-1.0,tmp,out);
}
};
// Mpc MpcDag system presented as the HermOp
template<class Matrix,class Field>
class SchurDiagMooeeDagOperator : public SchurDiagMooeeOperator<Matrix,Field> {
public:
virtual void HermOp(const Field &in, Field &out){
out.Checkerboard() = in.Checkerboard();
this->MpcMpcDag(in,out);
}
SchurDiagMooeeDagOperator (Matrix &Mat): SchurDiagMooeeOperator<Matrix,Field>(Mat){};
};
template<class Matrix,class Field>
class SchurDiagOneOperator : public SchurOperatorBase<Field> {
protected:
@ -508,7 +522,7 @@ class SchurStaggeredOperator : public SchurOperatorBase<Field> {
virtual void MpcDag (const Field &in, Field &out){
Mpc(in,out);
}
virtual void MpcDagMpc(const Field &in, Field &out) {
virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no) {
assert(0);// Never need with staggered
}
};
@ -586,7 +600,6 @@ class HermOpOperatorFunction : public OperatorFunction<Field> {
template<typename Field>
class PlainHermOp : public LinearFunction<Field> {
public:
using LinearFunction<Field>::operator();
LinearOperatorBase<Field> &_Linop;
PlainHermOp(LinearOperatorBase<Field>& linop) : _Linop(linop)
@ -600,7 +613,6 @@ public:
template<typename Field>
class FunctionHermOp : public LinearFunction<Field> {
public:
using LinearFunction<Field>::operator();
OperatorFunction<Field> & _poly;
LinearOperatorBase<Field> &_Linop;

View File

@ -30,19 +30,13 @@ Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
NAMESPACE_BEGIN(Grid);
template<class Field> using Preconditioner = LinearFunction<Field> ;
/*
template<class Field> class Preconditioner : public LinearFunction<Field> {
using LinearFunction<Field>::operator();
template<class Field> class Preconditioner : public LinearFunction<Field> {
virtual void operator()(const Field &src, Field & psi)=0;
};
*/
template<class Field> class TrivialPrecon : public Preconditioner<Field> {
public:
using Preconditioner<Field>::operator();
virtual void operator()(const Field &src, Field & psi){
void operator()(const Field &src, Field & psi){
psi = src;
}
TrivialPrecon(void){};

View File

@ -48,7 +48,6 @@ public:
virtual void Mdiag (const Field &in, Field &out)=0;
virtual void Mdir (const Field &in, Field &out,int dir, int disp)=0;
virtual void MdirAll (const Field &in, std::vector<Field> &out)=0;
virtual ~SparseMatrixBase() {};
};
/////////////////////////////////////////////////////////////////////////////////////////////
@ -73,7 +72,7 @@ public:
virtual void MeooeDag (const Field &in, Field &out)=0;
virtual void MooeeDag (const Field &in, Field &out)=0;
virtual void MooeeInvDag (const Field &in, Field &out)=0;
virtual ~CheckerBoardedSparseMatrixBase() {};
};
NAMESPACE_END(Grid);

View File

@ -292,6 +292,7 @@ public:
template<class Field>
class ChebyshevLanczos : public Chebyshev<Field> {
private:
std::vector<RealD> Coeffs;
int order;
RealD alpha;

View File

@ -36,8 +36,7 @@ NAMESPACE_BEGIN(Grid);
template<class FieldD, class FieldF, typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0, typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
class MixedPrecisionBiCGSTAB : public LinearFunction<FieldD>
{
public:
using LinearFunction<FieldD>::operator();
public:
RealD Tolerance;
RealD InnerTolerance; // Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;

View File

@ -102,7 +102,7 @@ public:
// Check if guess is really REALLY good :)
if (cp <= rsq) {
TrueResidual = std::sqrt(a/ssq);
std::cout << GridLogMessage << "ConjugateGradient guess is converged already " << std::endl;
std::cout << GridLogMessage << "ConjugateGradient guess is converged already "<<TrueResidual<< " tol "<< Tolerance<< std::endl;
IterationsToComplete = 0;
return;
}

View File

@ -35,8 +35,7 @@ NAMESPACE_BEGIN(Grid);
typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,
typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
class MixedPrecisionConjugateGradient : public LinearFunction<FieldD> {
public:
using LinearFunction<FieldD>::operator();
public:
RealD Tolerance;
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;
@ -49,19 +48,29 @@ NAMESPACE_BEGIN(Grid);
Integer TotalInnerIterations; //Number of inner CG iterations
Integer TotalOuterIterations; //Number of restarts
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
RealD TrueResidual;
//Option to speed up *inner single precision* solves using a LinearFunction that produces a guess
LinearFunction<FieldF> *guesser;
MixedPrecisionConjugateGradient(RealD tol,
MixedPrecisionConjugateGradient(RealD Tol,
Integer maxinnerit,
Integer maxouterit,
GridBase* _sp_grid,
LinearOperatorBase<FieldF> &_Linop_f,
LinearOperatorBase<FieldD> &_Linop_d) :
MixedPrecisionConjugateGradient(Tol, Tol, maxinnerit, maxouterit, _sp_grid, _Linop_f, _Linop_d) {};
MixedPrecisionConjugateGradient(RealD Tol,
RealD InnerTol,
Integer maxinnerit,
Integer maxouterit,
GridBase* _sp_grid,
LinearOperatorBase<FieldF> &_Linop_f,
LinearOperatorBase<FieldD> &_Linop_d) :
Linop_f(_Linop_f), Linop_d(_Linop_d),
Tolerance(tol), InnerTolerance(tol), MaxInnerIterations(maxinnerit), MaxOuterIterations(maxouterit), SinglePrecGrid(_sp_grid),
OuterLoopNormMult(100.), guesser(NULL){ };
Tolerance(Tol), InnerTolerance(InnerTol), MaxInnerIterations(maxinnerit), MaxOuterIterations(maxouterit), SinglePrecGrid(_sp_grid),
OuterLoopNormMult(100.), guesser(NULL){ assert(InnerTol < 1.0e-1);};
void useGuesser(LinearFunction<FieldF> &g){
guesser = &g;
@ -80,6 +89,11 @@ NAMESPACE_BEGIN(Grid);
RealD stop = src_norm * Tolerance*Tolerance;
GridBase* DoublePrecGrid = src_d_in.Grid();
//Generate precision change workspaces
precisionChangeWorkspace wk_dp_from_sp(DoublePrecGrid, SinglePrecGrid);
precisionChangeWorkspace wk_sp_from_dp(SinglePrecGrid, DoublePrecGrid);
FieldD tmp_d(DoublePrecGrid);
tmp_d.Checkerboard() = cb;
@ -120,7 +134,7 @@ NAMESPACE_BEGIN(Grid);
while(norm * inner_tol * inner_tol < stop) inner_tol *= 2; // inner_tol = sqrt(stop/norm) ??
PrecChangeTimer.Start();
precisionChange(src_f, src_d);
precisionChange(src_f, src_d, wk_sp_from_dp);
PrecChangeTimer.Stop();
sol_f = Zero();
@ -138,7 +152,7 @@ NAMESPACE_BEGIN(Grid);
//Convert sol back to double and add to double prec solution
PrecChangeTimer.Start();
precisionChange(tmp_d, sol_f);
precisionChange(tmp_d, sol_f, wk_dp_from_sp);
PrecChangeTimer.Stop();
axpy(sol_d, 1.0, tmp_d, sol_d);
@ -150,6 +164,7 @@ NAMESPACE_BEGIN(Grid);
ConjugateGradient<FieldD> CG_d(Tolerance, MaxInnerIterations);
CG_d(Linop_d, src_d_in, sol_d);
TotalFinalStepIterations = CG_d.IterationsToComplete;
TrueResidual = CG_d.TrueResidual;
TotalTimer.Stop();
std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Inner CG iterations " << TotalInnerIterations << " Restarts " << TotalOuterIterations << " Final CG iterations " << TotalFinalStepIterations << std::endl;

View File

@ -52,7 +52,7 @@ public:
MultiShiftFunction shifts;
std::vector<RealD> TrueResidualShift;
ConjugateGradientMultiShift(Integer maxit,MultiShiftFunction &_shifts) :
ConjugateGradientMultiShift(Integer maxit, const MultiShiftFunction &_shifts) :
MaxIterations(maxit),
shifts(_shifts)
{
@ -182,6 +182,9 @@ public:
for(int s=0;s<nshift;s++) {
axpby(psi[s],0.,-bs[s]*alpha[s],src,src);
}
std::cout << GridLogIterative << "ConjugateGradientMultiShift: initial rn (|src|^2) =" << rn << " qq (|MdagM src|^2) =" << qq << " d ( dot(src, [MdagM + m_0]src) ) =" << d << " c=" << c << std::endl;
///////////////////////////////////////
// Timers

View File

@ -0,0 +1,411 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ConjugateGradientMultiShift.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Christopher Kelly <ckelly@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_CONJUGATE_GRADIENT_MULTI_SHIFT_MIXEDPREC_H
#define GRID_CONJUGATE_GRADIENT_MULTI_SHIFT_MIXEDPREC_H
NAMESPACE_BEGIN(Grid);
//CK 2020: A variant of the multi-shift conjugate gradient with the matrix multiplication in single precision.
//The residual is stored in single precision, but the search directions and solution are stored in double precision.
//Every update_freq iterations the residual is corrected in double precision.
//For safety the a final regular CG is applied to clean up if necessary
//Linop to add shift to input linop, used in cleanup CG
namespace ConjugateGradientMultiShiftMixedPrecSupport{
template<typename Field>
class ShiftedLinop: public LinearOperatorBase<Field>{
public:
LinearOperatorBase<Field> &linop_base;
RealD shift;
ShiftedLinop(LinearOperatorBase<Field> &_linop_base, RealD _shift): linop_base(_linop_base), shift(_shift){}
void OpDiag (const Field &in, Field &out){ assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp){ assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); }
void Op (const Field &in, Field &out){ assert(0); }
void AdjOp (const Field &in, Field &out){ assert(0); }
void HermOp(const Field &in, Field &out){
linop_base.HermOp(in, out);
axpy(out, shift, in, out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
HermOp(in,out);
ComplexD dot = innerProduct(in,out);
n1=real(dot);
n2=norm2(out);
}
};
};
template<class FieldD, class FieldF,
typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,
typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
class ConjugateGradientMultiShiftMixedPrec : public OperatorMultiFunction<FieldD>,
public OperatorFunction<FieldD>
{
public:
using OperatorFunction<FieldD>::operator();
RealD Tolerance;
Integer MaxIterations;
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
std::vector<int> IterationsToCompleteShift; // Iterations for this shift
int verbose;
MultiShiftFunction shifts;
std::vector<RealD> TrueResidualShift;
int ReliableUpdateFreq; //number of iterations between reliable updates
GridBase* SinglePrecGrid; //Grid for single-precision fields
LinearOperatorBase<FieldF> &Linop_f; //single precision
ConjugateGradientMultiShiftMixedPrec(Integer maxit, const MultiShiftFunction &_shifts,
GridBase* _SinglePrecGrid, LinearOperatorBase<FieldF> &_Linop_f,
int _ReliableUpdateFreq
) :
MaxIterations(maxit), shifts(_shifts), SinglePrecGrid(_SinglePrecGrid), Linop_f(_Linop_f), ReliableUpdateFreq(_ReliableUpdateFreq)
{
verbose=1;
IterationsToCompleteShift.resize(_shifts.order);
TrueResidualShift.resize(_shifts.order);
}
void operator() (LinearOperatorBase<FieldD> &Linop, const FieldD &src, FieldD &psi)
{
GridBase *grid = src.Grid();
int nshift = shifts.order;
std::vector<FieldD> results(nshift,grid);
(*this)(Linop,src,results,psi);
}
void operator() (LinearOperatorBase<FieldD> &Linop, const FieldD &src, std::vector<FieldD> &results, FieldD &psi)
{
int nshift = shifts.order;
(*this)(Linop,src,results);
psi = shifts.norm*src;
for(int i=0;i<nshift;i++){
psi = psi + shifts.residues[i]*results[i];
}
return;
}
void operator() (LinearOperatorBase<FieldD> &Linop_d, const FieldD &src_d, std::vector<FieldD> &psi_d)
{
GridBase *DoublePrecGrid = src_d.Grid();
precisionChangeWorkspace wk_f_from_d(SinglePrecGrid, DoublePrecGrid);
precisionChangeWorkspace wk_d_from_f(DoublePrecGrid, SinglePrecGrid);
////////////////////////////////////////////////////////////////////////
// Convenience references to the info stored in "MultiShiftFunction"
////////////////////////////////////////////////////////////////////////
int nshift = shifts.order;
std::vector<RealD> &mass(shifts.poles); // Make references to array in "shifts"
std::vector<RealD> &mresidual(shifts.tolerances);
std::vector<RealD> alpha(nshift,1.0);
//Double precision search directions
FieldD p_d(DoublePrecGrid);
std::vector<FieldD> ps_d(nshift, DoublePrecGrid);// Search directions (double precision)
FieldD tmp_d(DoublePrecGrid);
FieldD r_d(DoublePrecGrid);
FieldD mmp_d(DoublePrecGrid);
assert(psi_d.size()==nshift);
assert(mass.size()==nshift);
assert(mresidual.size()==nshift);
// dynamic sized arrays on stack; 2d is a pain with vector
RealD bs[nshift];
RealD rsq[nshift];
RealD z[nshift][2];
int converged[nshift];
const int primary =0;
//Primary shift fields CG iteration
RealD a,b,c,d;
RealD cp,bp,qq; //prev
// Matrix mult fields
FieldF r_f(SinglePrecGrid);
FieldF p_f(SinglePrecGrid);
FieldF tmp_f(SinglePrecGrid);
FieldF mmp_f(SinglePrecGrid);
FieldF src_f(SinglePrecGrid);
precisionChange(src_f, src_d, wk_f_from_d);
// Check lightest mass
for(int s=0;s<nshift;s++){
assert( mass[s]>= mass[primary] );
converged[s]=0;
}
// Wire guess to zero
// Residuals "r" are src
// First search direction "p" is also src
cp = norm2(src_d);
// Handle trivial case of zero src.
if( cp == 0. ){
for(int s=0;s<nshift;s++){
psi_d[s] = Zero();
IterationsToCompleteShift[s] = 1;
TrueResidualShift[s] = 0.;
}
return;
}
for(int s=0;s<nshift;s++){
rsq[s] = cp * mresidual[s] * mresidual[s];
std::cout<<GridLogMessage<<"ConjugateGradientMultiShiftMixedPrec: shift "<< s <<" target resid "<<rsq[s]<<std::endl;
ps_d[s] = src_d;
}
// r and p for primary
r_f=src_f; //residual maintained in single
p_f=src_f;
p_d = src_d; //primary copy --- make this a reference to ps_d to save axpys
//MdagM+m[0]
Linop_f.HermOpAndNorm(p_f,mmp_f,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp)
axpy(mmp_f,mass[0],p_f,mmp_f);
RealD rn = norm2(p_f);
d += rn*mass[0];
b = -cp /d;
// Set up the various shift variables
int iz=0;
z[0][1-iz] = 1.0;
z[0][iz] = 1.0;
bs[0] = b;
for(int s=1;s<nshift;s++){
z[s][1-iz] = 1.0;
z[s][iz] = 1.0/( 1.0 - b*(mass[s]-mass[0]));
bs[s] = b*z[s][iz];
}
// r += b[0] A.p[0]
// c= norm(r)
c=axpy_norm(r_f,b,mmp_f,r_f);
for(int s=0;s<nshift;s++) {
axpby(psi_d[s],0.,-bs[s]*alpha[s],src_d,src_d);
}
///////////////////////////////////////
// Timers
///////////////////////////////////////
GridStopWatch AXPYTimer, ShiftTimer, QRTimer, MatrixTimer, SolverTimer, PrecChangeTimer, CleanupTimer;
SolverTimer.Start();
// Iteration loop
int k;
for (k=1;k<=MaxIterations;k++){
a = c /cp;
//Update double precision search direction by residual
PrecChangeTimer.Start();
precisionChange(r_d, r_f, wk_d_from_f);
PrecChangeTimer.Stop();
AXPYTimer.Start();
axpy(p_d,a,p_d,r_d);
for(int s=0;s<nshift;s++){
if ( ! converged[s] ) {
if (s==0){
axpy(ps_d[s],a,ps_d[s],r_d);
} else{
RealD as =a *z[s][iz]*bs[s] /(z[s][1-iz]*b);
axpby(ps_d[s],z[s][iz],as,r_d,ps_d[s]);
}
}
}
AXPYTimer.Stop();
PrecChangeTimer.Start();
precisionChange(p_f, p_d, wk_f_from_d); //get back single prec search direction for linop
PrecChangeTimer.Stop();
cp=c;
MatrixTimer.Start();
Linop_f.HermOp(p_f,mmp_f);
d=real(innerProduct(p_f,mmp_f));
MatrixTimer.Stop();
AXPYTimer.Start();
axpy(mmp_f,mass[0],p_f,mmp_f);
AXPYTimer.Stop();
RealD rn = norm2(p_f);
d += rn*mass[0];
bp=b;
b=-cp/d;
// Toggle the recurrence history
bs[0] = b;
iz = 1-iz;
ShiftTimer.Start();
for(int s=1;s<nshift;s++){
if((!converged[s])){
RealD z0 = z[s][1-iz];
RealD z1 = z[s][iz];
z[s][iz] = z0*z1*bp
/ (b*a*(z1-z0) + z1*bp*(1- (mass[s]-mass[0])*b));
bs[s] = b*z[s][iz]/z0; // NB sign rel to Mike
}
}
ShiftTimer.Stop();
//Update double precision solutions
AXPYTimer.Start();
for(int s=0;s<nshift;s++){
int ss = s;
if( (!converged[s]) ) {
axpy(psi_d[ss],-bs[s]*alpha[s],ps_d[s],psi_d[ss]);
}
}
//Perform reliable update if necessary; otherwise update residual from single-prec mmp
RealD c_f = axpy_norm(r_f,b,mmp_f,r_f);
AXPYTimer.Stop();
c = c_f;
if(k % ReliableUpdateFreq == 0){
//Replace r with true residual
MatrixTimer.Start();
Linop_d.HermOp(psi_d[0],mmp_d);
MatrixTimer.Stop();
AXPYTimer.Start();
axpy(mmp_d,mass[0],psi_d[0],mmp_d);
RealD c_d = axpy_norm(r_d, -1.0, mmp_d, src_d);
AXPYTimer.Stop();
std::cout<<GridLogMessage<<"ConjugateGradientMultiShiftMixedPrec k="<<k<< ", replaced |r|^2 = "<<c_f <<" with |r|^2 = "<<c_d<<std::endl;
PrecChangeTimer.Start();
precisionChange(r_f, r_d, wk_f_from_d);
PrecChangeTimer.Stop();
c = c_d;
}
// Convergence checks
int all_converged = 1;
for(int s=0;s<nshift;s++){
if ( (!converged[s]) ){
IterationsToCompleteShift[s] = k;
RealD css = c * z[s][iz]* z[s][iz];
if(css<rsq[s]){
if ( ! converged[s] )
std::cout<<GridLogMessage<<"ConjugateGradientMultiShiftMixedPrec k="<<k<<" Shift "<<s<<" has converged"<<std::endl;
converged[s]=1;
} else {
all_converged=0;
}
}
}
if ( all_converged ){
SolverTimer.Stop();
std::cout<<GridLogMessage<< "ConjugateGradientMultiShiftMixedPrec: All shifts have converged iteration "<<k<<std::endl;
std::cout<<GridLogMessage<< "ConjugateGradientMultiShiftMixedPrec: Checking solutions"<<std::endl;
// Check answers
for(int s=0; s < nshift; s++) {
Linop_d.HermOpAndNorm(psi_d[s],mmp_d,d,qq);
axpy(tmp_d,mass[s],psi_d[s],mmp_d);
axpy(r_d,-alpha[s],src_d,tmp_d);
RealD rn = norm2(r_d);
RealD cn = norm2(src_d);
TrueResidualShift[s] = std::sqrt(rn/cn);
std::cout<<GridLogMessage<<"ConjugateGradientMultiShiftMixedPrec: shift["<<s<<"] true residual "<< TrueResidualShift[s] << " target " << mresidual[s] << std::endl;
//If we have not reached the desired tolerance, do a (mixed precision) CG cleanup
if(rn >= rsq[s]){
CleanupTimer.Start();
std::cout<<GridLogMessage<<"ConjugateGradientMultiShiftMixedPrec: performing cleanup step for shift " << s << std::endl;
//Setup linear operators for final cleanup
ConjugateGradientMultiShiftMixedPrecSupport::ShiftedLinop<FieldD> Linop_shift_d(Linop_d, mass[s]);
ConjugateGradientMultiShiftMixedPrecSupport::ShiftedLinop<FieldF> Linop_shift_f(Linop_f, mass[s]);
MixedPrecisionConjugateGradient<FieldD,FieldF> cg(mresidual[s], MaxIterations, MaxIterations, SinglePrecGrid, Linop_shift_f, Linop_shift_d);
cg(src_d, psi_d[s]);
TrueResidualShift[s] = cg.TrueResidual;
CleanupTimer.Stop();
}
}
std::cout << GridLogMessage << "ConjugateGradientMultiShiftMixedPrec: Time Breakdown for body"<<std::endl;
std::cout << GridLogMessage << "\tSolver " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\t\tAXPY " << AXPYTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\t\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\t\tShift " << ShiftTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\t\tPrecision Change " << PrecChangeTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tFinal Cleanup " << CleanupTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tSolver+Cleanup " << SolverTimer.Elapsed() + CleanupTimer.Elapsed() << std::endl;
IterationsToComplete = k;
return;
}
}
// ugly hack
std::cout<<GridLogMessage<<"CG multi shift did not converge"<<std::endl;
// assert(0);
}
};
NAMESPACE_END(Grid);
#endif

View File

@ -33,19 +33,16 @@ namespace Grid {
template<class Field>
class ZeroGuesser: public LinearFunction<Field> {
public:
using LinearFunction<Field>::operator();
virtual void operator()(const Field &src, Field &guess) { guess = Zero(); };
};
template<class Field>
class DoNothingGuesser: public LinearFunction<Field> {
public:
using LinearFunction<Field>::operator();
virtual void operator()(const Field &src, Field &guess) { };
};
template<class Field>
class SourceGuesser: public LinearFunction<Field> {
public:
using LinearFunction<Field>::operator();
virtual void operator()(const Field &src, Field &guess) { guess = src; };
};
@ -60,7 +57,6 @@ private:
const unsigned int N;
public:
using LinearFunction<Field>::operator();
DeflatedGuesser(const std::vector<Field> & _evec,const std::vector<RealD> & _eval)
: DeflatedGuesser(_evec, _eval, _evec.size())
@ -91,7 +87,6 @@ private:
const std::vector<RealD> &eval_coarse;
public:
using LinearFunction<FineField>::operator();
LocalCoherenceDeflatedGuesser(const std::vector<FineField> &_subspace,
const std::vector<CoarseField> &_evec_coarse,
const std::vector<RealD> &_eval_coarse)

View File

@ -67,7 +67,6 @@ public:
template<class Fobj,class CComplex,int nbasis>
class ProjectedHermOp : public LinearFunction<Lattice<iVector<CComplex,nbasis > > > {
public:
using LinearFunction<Lattice<iVector<CComplex,nbasis > > >::operator();
typedef iVector<CComplex,nbasis > CoarseSiteVector;
typedef Lattice<CoarseSiteVector> CoarseField;
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field
@ -98,7 +97,6 @@ public:
template<class Fobj,class CComplex,int nbasis>
class ProjectedFunctionHermOp : public LinearFunction<Lattice<iVector<CComplex,nbasis > > > {
public:
using LinearFunction<Lattice<iVector<CComplex,nbasis > > >::operator();
typedef iVector<CComplex,nbasis > CoarseSiteVector;
typedef Lattice<CoarseSiteVector> CoarseField;
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field

View File

@ -43,7 +43,7 @@ NAMESPACE_BEGIN(Grid);
template<class Field>
class PrecGeneralisedConjugateResidual : public LinearFunction<Field> {
public:
using LinearFunction<Field>::operator();
RealD Tolerance;
Integer MaxIterations;
int verbose;

View File

@ -43,7 +43,7 @@ NAMESPACE_BEGIN(Grid);
template<class Field>
class PrecGeneralisedConjugateResidualNonHermitian : public LinearFunction<Field> {
public:
using LinearFunction<Field>::operator();
RealD Tolerance;
Integer MaxIterations;
int verbose;
@ -119,8 +119,7 @@ public:
RealD GCRnStep(const Field &src, Field &psi,RealD rsq){
RealD cp;
ComplexD a, b;
// ComplexD zAz;
ComplexD a, b, zAz;
RealD zAAz;
ComplexD rq;
@ -147,7 +146,7 @@ public:
//////////////////////////////////
MatTimer.Start();
Linop.Op(psi,Az);
// zAz = innerProduct(Az,psi);
zAz = innerProduct(Az,psi);
zAAz= norm2(Az);
MatTimer.Stop();
@ -171,7 +170,7 @@ public:
LinalgTimer.Start();
// zAz = innerProduct(Az,psi);
zAz = innerProduct(Az,psi);
zAAz= norm2(Az);
//p[0],q[0],qq[0]
@ -213,7 +212,7 @@ public:
MatTimer.Start();
Linop.Op(z,Az);
MatTimer.Stop();
// zAz = innerProduct(Az,psi);
zAz = innerProduct(Az,psi);
zAAz= norm2(Az);
LinalgTimer.Start();

View File

@ -40,7 +40,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
* (-MoeMee^{-1} 1 )
* L^{dag} = ( 1 Mee^{-dag} Moe^{dag} )
* ( 0 1 )
* L^{-d} = ( 1 -Mee^{-dag} Moe^{dag} )
* L^{-dag}= ( 1 -Mee^{-dag} Moe^{dag} )
* ( 0 1 )
*
* U^-1 = (1 -Mee^{-1} Meo)
@ -82,7 +82,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
* c) M_oo^-dag Doo^{dag} Doo Moo^-1 phi_0 = M_oo^-dag (D_oo)^dag L^{-1} eta_o
* eta_o' = M_oo^-dag (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e)
* psi_o = M_oo^-1 phi_o
* TODO: Deflation
*
*
*/
namespace Grid {
@ -97,6 +98,7 @@ namespace Grid {
protected:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
OperatorFunction<Field> & _HermitianRBSolver;
int CBfactorise;
bool subGuess;
bool useSolnAsInitGuess; // if true user-supplied solution vector is used as initial guess for solver
@ -219,13 +221,20 @@ namespace Grid {
/////////////////////////////////////////////////
// Check unprec residual if possible
/////////////////////////////////////////////////
if ( ! subGuess ) {
_Matrix.M(out[b],resid);
if ( ! subGuess ) {
if ( this->adjoint() ) _Matrix.Mdag(out[b],resid);
else _Matrix.M(out[b],resid);
resid = resid-in[b];
RealD ns = norm2(in[b]);
RealD nr = norm2(resid);
std::cout<<GridLogMessage<< "SchurRedBlackBase solver true unprec resid["<<b<<"] "<<std::sqrt(nr/ns) << std::endl;
std::cout<<GridLogMessage<< "SchurRedBlackBase adjoint "<< this->adjoint() << std::endl;
if ( this->adjoint() )
std::cout<<GridLogMessage<< "SchurRedBlackBase adjoint solver true unprec resid["<<b<<"] "<<std::sqrt(nr/ns) << std::endl;
else
std::cout<<GridLogMessage<< "SchurRedBlackBase solver true unprec resid["<<b<<"] "<<std::sqrt(nr/ns) << std::endl;
} else {
std::cout<<GridLogMessage<< "SchurRedBlackBase Guess subtracted after solve["<<b<<"] " << std::endl;
}
@ -279,12 +288,21 @@ namespace Grid {
// Verify the unprec residual
if ( ! subGuess ) {
_Matrix.M(out,resid);
std::cout<<GridLogMessage<< "SchurRedBlackBase adjoint "<< this->adjoint() << std::endl;
if ( this->adjoint() ) _Matrix.Mdag(out,resid);
else _Matrix.M(out,resid);
resid = resid-in;
RealD ns = norm2(in);
RealD nr = norm2(resid);
std::cout<<GridLogMessage << "SchurRedBlackBase solver true unprec resid "<< std::sqrt(nr/ns) << std::endl;
if ( this->adjoint() )
std::cout<<GridLogMessage<< "SchurRedBlackBase adjoint solver true unprec resid "<<std::sqrt(nr/ns) << std::endl;
else
std::cout<<GridLogMessage<< "SchurRedBlackBase solver true unprec resid "<<std::sqrt(nr/ns) << std::endl;
} else {
std::cout << GridLogMessage << "SchurRedBlackBase Guess subtracted after solve." << std::endl;
}
@ -293,6 +311,7 @@ namespace Grid {
/////////////////////////////////////////////////////////////
// Override in derived.
/////////////////////////////////////////////////////////////
virtual bool adjoint(void) { return false; }
virtual void RedBlackSource (Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o) =0;
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol) =0;
virtual void RedBlackSolve (Matrix & _Matrix,const Field &src_o, Field &sol_o) =0;
@ -646,6 +665,127 @@ namespace Grid {
this->_HermitianRBSolver(_OpEO, src_o, sol_o);
}
};
/*
* Red black Schur decomposition
*
* M = (Mee Meo) = (1 0 ) (Mee 0 ) (1 Mee^{-1} Meo)
* (Moe Moo) (Moe Mee^-1 1 ) (0 Moo-Moe Mee^-1 Meo) (0 1 )
* = L D U
*
* L^-1 = (1 0 )
* (-MoeMee^{-1} 1 )
* L^{dag} = ( 1 Mee^{-dag} Moe^{dag} )
* ( 0 1 )
*
* U^-1 = (1 -Mee^{-1} Meo)
* (0 1 )
* U^{dag} = ( 1 0)
* (Meo^dag Mee^{-dag} 1)
* U^{-dag} = ( 1 0)
* (-Meo^dag Mee^{-dag} 1)
*
*
***********************
* M^dag psi = eta
***********************
*
* Really for Mobius: (Wilson - easier to just use gamma 5 hermiticity)
*
* Mdag psi = Udag Ddag Ldag psi = eta
*
* U^{-dag} = ( 1 0)
* (-Meo^dag Mee^{-dag} 1)
*
*
* i) D^dag phi = (U^{-dag} eta)
* eta'_e = eta_e
* eta'_o = (eta_o - Meo^dag Mee^{-dag} eta_e)
*
* phi_o = D_oo^-dag eta'_o = D_oo^-dag (eta_o - Meo^dag Mee^{-dag} eta_e)
*
* phi_e = D_ee^-dag eta'_e = D_ee^-dag eta_e
*
* Solve:
*
* D_oo D_oo^dag phi_o = D_oo (eta_o - Meo^dag Mee^{-dag} eta_e)
*
* ii)
* phi = L^dag psi => psi = L^-dag phi.
*
* L^{-dag} = ( 1 -Mee^{-dag} Moe^{dag} )
* ( 0 1 )
*
* => sol_e = M_ee^-dag * ( src_e - Moe^dag phi_o )...
* => sol_o = phi_o
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Site diagonal has Mooee on it, but solve the Adjoint system
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SchurRedBlackDiagMooeeDagSolve : public SchurRedBlackBase<Field> {
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
virtual bool adjoint(void) { return true; }
SchurRedBlackDiagMooeeDagSolve(OperatorFunction<Field> &HermitianRBSolver,
const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess,_solnAsInitGuess) {};
//////////////////////////////////////////////////////
// Override RedBlack specialisation
//////////////////////////////////////////////////////
virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field tmp(grid);
Field Mtmp(grid);
pickCheckerboard(Even,src_e,src);
pickCheckerboard(Odd ,src_o,src);
/////////////////////////////////////////////////////
// src_o = (source_o - Moe^dag MeeInvDag source_e)
/////////////////////////////////////////////////////
_Matrix.MooeeInvDag(src_e,tmp); assert( tmp.Checkerboard() ==Even);
_Matrix.MeooeDag (tmp,Mtmp); assert( Mtmp.Checkerboard() ==Odd);
tmp=src_o-Mtmp; assert( tmp.Checkerboard() ==Odd);
// get the right Mpc
SchurDiagMooeeOperator<Matrix,Field> _HermOpEO(_Matrix);
_HermOpEO.Mpc(tmp,src_o); assert(src_o.Checkerboard() ==Odd);
}
virtual void RedBlackSolve (Matrix & _Matrix,const Field &src_o, Field &sol_o)
{
SchurDiagMooeeDagOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);
};
virtual void RedBlackSolve (Matrix & _Matrix,const std::vector<Field> &src_o, std::vector<Field> &sol_o)
{
SchurDiagMooeeDagOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);
}
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field sol_e(grid);
Field tmp(grid);
///////////////////////////////////////////////////
// sol_e = M_ee^-dag * ( src_e - Moe^dag phi_o )...
// sol_o = phi_o
///////////////////////////////////////////////////
_Matrix.MeooeDag(sol_o,tmp); assert(tmp.Checkerboard()==Even);
tmp = src_e-tmp; assert(tmp.Checkerboard()==Even);
_Matrix.MooeeInvDag(tmp,sol_e); assert(sol_e.Checkerboard()==Even);
setCheckerboard(sol,sol_e); assert( sol_e.Checkerboard() ==Even);
setCheckerboard(sol,sol_o); assert( sol_o.Checkerboard() ==Odd );
}
};
}
#endif

View File

@ -159,6 +159,7 @@ void MemoryManager::Init(void)
char * str;
int Nc;
int NcS;
str= getenv("GRID_ALLOC_NCACHE_LARGE");
if ( str ) {

View File

@ -113,11 +113,6 @@ private:
static uint64_t DeviceToHostBytes;
static uint64_t HostToDeviceXfer;
static uint64_t DeviceToHostXfer;
static uint64_t DeviceAccesses;
static uint64_t HostAccesses;
static uint64_t DeviceAccessBytes;
static uint64_t HostAccessBytes;
private:
#ifndef GRID_UVM
@ -157,7 +152,6 @@ private:
// static void LRUupdate(AcceleratorViewEntry &AccCache);
static void LRUinsert(AcceleratorViewEntry &AccCache);
static void LRUinsertback(AcceleratorViewEntry &AccCache);
static void LRUremove(AcceleratorViewEntry &AccCache);
// manage entries in the table
@ -176,7 +170,6 @@ private:
public:
static void Print(void);
static void PrintState( void* CpuPtr);
static int isOpen (void* CpuPtr);
static void ViewClose(void* CpuPtr,ViewMode mode);
static void *ViewOpen (void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);

View File

@ -23,11 +23,6 @@ uint64_t MemoryManager::HostToDeviceBytes;
uint64_t MemoryManager::DeviceToHostBytes;
uint64_t MemoryManager::HostToDeviceXfer;
uint64_t MemoryManager::DeviceToHostXfer;
uint64_t MemoryManager::DeviceAccesses;
uint64_t MemoryManager::HostAccesses;
uint64_t MemoryManager::DeviceAccessBytes;
uint64_t MemoryManager::HostAccessBytes;
////////////////////////////////////
// Priority ordering for unlocked entries
@ -91,14 +86,6 @@ void MemoryManager::LRUinsert(AcceleratorViewEntry &AccCache)
AccCache.LRU_valid = 1;
DeviceLRUBytes+=AccCache.bytes;
}
void MemoryManager::LRUinsertback(AcceleratorViewEntry &AccCache)
{
assert(AccCache.LRU_valid==0);
LRU.push_back(AccCache.CpuPtr);
AccCache.LRU_entry = --LRU.end();
AccCache.LRU_valid = 1;
DeviceLRUBytes+=AccCache.bytes;
}
void MemoryManager::LRUremove(AcceleratorViewEntry &AccCache)
{
assert(AccCache.LRU_valid==1);
@ -142,7 +129,6 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
assert(AccCache.accLock==0);
assert(AccCache.cpuLock==0);
if(AccCache.state==AccDirty) {
Flush(AccCache);
}
@ -245,9 +231,6 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
EntryCreate(CpuPtr,bytes,mode,hint);
}
DeviceAccesses++;
DeviceAccessBytes+=bytes;
auto AccCacheIterator = EntryLookup(CpuPtr);
auto & AccCache = AccCacheIterator->second;
if (!AccCache.AccPtr) {
@ -366,10 +349,6 @@ void MemoryManager::CpuViewClose(uint64_t CpuPtr)
assert(AccCache.accLock==0);
AccCache.cpuLock--;
if(AccCache.cpuLock==0) {
LRUinsertback(AccCache);
}
}
/*
* Action State StateNext Flush Clone
@ -392,9 +371,6 @@ uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,V
EntryCreate(CpuPtr,bytes,mode,transient);
}
HostAccesses++;
HostAccessBytes+=bytes;
auto AccCacheIterator = EntryLookup(CpuPtr);
auto & AccCache = AccCacheIterator->second;
@ -440,12 +416,6 @@ uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,V
AccCache.transient= transient? EvictNext : 0;
// If view is opened on host remove from LRU
// Host close says evict next from device
if(AccCache.LRU_valid==1){
LRUremove(AccCache);
}
return AccCache.CpuPtr;
}
void MemoryManager::NotifyDeletion(void *_ptr)
@ -504,32 +474,6 @@ int MemoryManager::isOpen (void* _CpuPtr)
}
}
void MemoryManager::PrintState(void* _CpuPtr)
{
uint64_t CpuPtr = (uint64_t)_CpuPtr;
if ( EntryPresent(CpuPtr) ){
auto AccCacheIterator = EntryLookup(CpuPtr);
auto & AccCache = AccCacheIterator->second;
std::string str;
if ( AccCache.state==Empty ) str = std::string("Empty");
if ( AccCache.state==CpuDirty ) str = std::string("CpuDirty");
if ( AccCache.state==AccDirty ) str = std::string("AccDirty");
if ( AccCache.state==Consistent)str = std::string("Consistent");
if ( AccCache.state==EvictNext) str = std::string("EvictNext");
std::cout << GridLogMessage << "CpuAddr\t\tAccAddr\t\tState\t\tcpuLock\taccLock\tLRU_valid "<<std::endl;
std::cout << GridLogMessage << "0x"<<std::hex<<AccCache.CpuPtr<<std::dec
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
<< "\t" << AccCache.cpuLock
<< "\t" << AccCache.accLock
<< "\t" << AccCache.LRU_valid<<std::endl;
} else {
std::cout << GridLogMessage << "No Entry in AccCache table." << std::endl;
}
}
NAMESPACE_END(Grid);
#endif

View File

@ -12,18 +12,10 @@ uint64_t MemoryManager::HostToDeviceBytes;
uint64_t MemoryManager::DeviceToHostBytes;
uint64_t MemoryManager::HostToDeviceXfer;
uint64_t MemoryManager::DeviceToHostXfer;
uint64_t MemoryManager::DeviceAccesses;
uint64_t MemoryManager::HostAccesses;
uint64_t MemoryManager::DeviceAccessBytes;
uint64_t MemoryManager::HostAccessBytes;
void MemoryManager::ViewClose(void* AccPtr,ViewMode mode){};
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; };
int MemoryManager::isOpen (void* CpuPtr) { return 0;}
void MemoryManager::PrintState(void* CpuPtr)
{
std::cout << GridLogMessage << "Host<->Device memory movement not currently managed by Grid." << std::endl;
};
void MemoryManager::Print(void){};
void MemoryManager::NotifyDeletion(void *ptr){};

View File

@ -388,8 +388,8 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
// TODO : make a OMP loop on CPU, call threaded bcopy
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
assert(shm!=NULL);
// std::cout <<"acceleratorCopyDeviceToDeviceAsynch"<< std::endl;
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
acceleratorCopySynchronise(); // MPI prob slower
}
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
@ -400,9 +400,6 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
}
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list,int dir)
{
// std::cout << "Copy Synchronised\n"<<std::endl;
acceleratorCopySynchronise();
int nreq=list.size();
if (nreq==0) return;

View File

@ -46,3 +46,4 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/lattice/Lattice_unary.h>
#include <Grid/lattice/Lattice_transfer.h>
#include <Grid/lattice/Lattice_basis.h>
#include <Grid/lattice/Lattice_crc.h>

View File

@ -88,13 +88,6 @@ public:
LatticeView<vobj> accessor(*( (LatticeAccelerator<vobj> *) this),mode);
accessor.ViewClose();
}
// Helper function to print the state of this object in the AccCache
void PrintCacheState(void)
{
MemoryManager::PrintState(this->_odata);
}
/////////////////////////////////////////////////////////////////////////////////
// Return a view object that may be dereferenced in site loops.
// The view is trivially copy constructible and may be copied to an accelerator device

View File

@ -0,0 +1,55 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/Lattice_crc.h
Copyright (C) 2021
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
template<class vobj> void DumpSliceNorm(std::string s,Lattice<vobj> &f,int mu=-1)
{
auto ff = localNorm2(f);
if ( mu==-1 ) mu = f.Grid()->Nd()-1;
typedef typename vobj::tensor_reduced normtype;
typedef typename normtype::scalar_object scalar;
std::vector<scalar> sff;
sliceSum(ff,sff,mu);
for(int t=0;t<sff.size();t++){
std::cout << s<<" "<<t<<" "<<sff[t]<<std::endl;
}
}
template<class vobj> uint32_t crc(Lattice<vobj> & buf)
{
autoView( buf_v , buf, CpuRead);
return ::crc32(0L,(unsigned char *)&buf_v[0],(size_t)sizeof(vobj)*buf.oSites());
}
#define CRC(U) std::cout << "FingerPrint "<<__FILE__ <<" "<< __LINE__ <<" "<< #U <<" "<<crc(U)<<std::endl;
NAMESPACE_END(Grid);

View File

@ -42,6 +42,7 @@ void getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator
std::cout << GridLogDebug << "\twarpSize = " << warpSize << std::endl;
std::cout << GridLogDebug << "\tsharedMemPerBlock = " << sharedMemPerBlock << std::endl;
std::cout << GridLogDebug << "\tmaxThreadsPerBlock = " << maxThreadsPerBlock << std::endl;
std::cout << GridLogDebug << "\tmaxThreadsPerBlock = " << warpSize << std::endl;
std::cout << GridLogDebug << "\tmultiProcessorCount = " << multiProcessorCount << std::endl;
if (warpSize != WARP_SIZE) {
@ -51,10 +52,6 @@ void getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator
// let the number of threads in a block be a multiple of 2, starting from warpSize
threads = warpSize;
if ( threads*sizeofsobj > sharedMemPerBlock ) {
std::cout << GridLogError << "The object is too large for the shared memory." << std::endl;
exit(EXIT_FAILURE);
}
while( 2*threads*sizeofsobj < sharedMemPerBlock && 2*threads <= maxThreadsPerBlock ) threads *= 2;
// keep all the streaming multiprocessors busy
blocks = nextPow2(multiProcessorCount);

View File

@ -32,8 +32,9 @@
#include <random>
#ifdef RNG_SITMO
#include <Grid/sitmo_rng/sitmo_prng_engine.hpp>
#include <Grid/random/sitmo_prng_engine.hpp>
#endif
#include <Grid/random/gaussian.h>
#if defined(RNG_SITMO)
#define RNG_FAST_DISCARD
@ -142,7 +143,7 @@ public:
std::vector<RngEngine> _generators;
std::vector<std::uniform_real_distribution<RealD> > _uniform;
std::vector<std::normal_distribution<RealD> > _gaussian;
std::vector<Grid::gaussian_distribution<RealD> > _gaussian;
std::vector<std::discrete_distribution<int32_t> > _bernoulli;
std::vector<std::uniform_int_distribution<uint32_t> > _uid;
@ -243,7 +244,7 @@ public:
GridSerialRNG() : GridRNGbase() {
_generators.resize(1);
_uniform.resize(1,std::uniform_real_distribution<RealD>{0,1});
_gaussian.resize(1,std::normal_distribution<RealD>(0.0,1.0) );
_gaussian.resize(1,gaussian_distribution<RealD>(0.0,1.0) );
_bernoulli.resize(1,std::discrete_distribution<int32_t>{1,1});
_uid.resize(1,std::uniform_int_distribution<uint32_t>() );
}
@ -357,7 +358,7 @@ public:
_generators.resize(_vol);
_uniform.resize(_vol,std::uniform_real_distribution<RealD>{0,1});
_gaussian.resize(_vol,std::normal_distribution<RealD>(0.0,1.0) );
_gaussian.resize(_vol,gaussian_distribution<RealD>(0.0,1.0) );
_bernoulli.resize(_vol,std::discrete_distribution<int32_t>{1,1});
_uid.resize(_vol,std::uniform_int_distribution<uint32_t>() );
}

View File

@ -85,76 +85,6 @@ template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Latti
});
}
template<class vobj> inline void acceleratorPickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full, int checker_dim_half=0)
{
half.Checkerboard() = cb;
autoView(half_v, half, AcceleratorWrite);
autoView(full_v, full, AcceleratorRead);
Coordinate rdim_full = full.Grid()->_rdimensions;
Coordinate rdim_half = half.Grid()->_rdimensions;
unsigned long ndim_half = half.Grid()->_ndimension;
Coordinate checker_dim_mask_half = half.Grid()->_checker_dim_mask;
Coordinate ostride_half = half.Grid()->_ostride;
accelerator_for(ss, full.Grid()->oSites(),full.Grid()->Nsimd(),{
Coordinate coor;
int cbos;
int linear=0;
Lexicographic::CoorFromIndex(coor,ss,rdim_full);
assert(coor.size()==ndim_half);
for(int d=0;d<ndim_half;d++){
if(checker_dim_mask_half[d]) linear += coor[d];
}
cbos = (linear&0x1);
if (cbos==cb) {
int ssh=0;
for(int d=0;d<ndim_half;d++) {
if (d == checker_dim_half) ssh += ostride_half[d] * ((coor[d] / 2) % rdim_half[d]);
else ssh += ostride_half[d] * (coor[d] % rdim_half[d]);
}
coalescedWrite(half_v[ssh],full_v(ss));
}
});
}
template<class vobj> inline void acceleratorSetCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half, int checker_dim_half=0)
{
int cb = half.Checkerboard();
autoView(half_v , half, AcceleratorRead);
autoView(full_v , full, AcceleratorWrite);
Coordinate rdim_full = full.Grid()->_rdimensions;
Coordinate rdim_half = half.Grid()->_rdimensions;
unsigned long ndim_half = half.Grid()->_ndimension;
Coordinate checker_dim_mask_half = half.Grid()->_checker_dim_mask;
Coordinate ostride_half = half.Grid()->_ostride;
accelerator_for(ss,full.Grid()->oSites(),full.Grid()->Nsimd(),{
Coordinate coor;
int cbos;
int linear=0;
Lexicographic::CoorFromIndex(coor,ss,rdim_full);
assert(coor.size()==ndim_half);
for(int d=0;d<ndim_half;d++){
if(checker_dim_mask_half[d]) linear += coor[d];
}
cbos = (linear&0x1);
if (cbos==cb) {
int ssh=0;
for(int d=0;d<ndim_half;d++){
if (d == checker_dim_half) ssh += ostride_half[d] * ((coor[d] / 2) % rdim_half[d]);
else ssh += ostride_half[d] * (coor[d] % rdim_half[d]);
}
coalescedWrite(full_v[ss],half_v(ssh));
}
});
}
////////////////////////////////////////////////////////////////////////////////////////////
// Flexible Type Conversion for internal promotion to double as well as graceful
// treatment of scalar-compatible types
@ -855,7 +785,7 @@ void ExtractSliceLocal(Lattice<vobj> &lowDim,const Lattice<vobj> & higherDim,int
template<class vobj>
void Replicate(Lattice<vobj> &coarse,Lattice<vobj> & fine)
void Replicate(const Lattice<vobj> &coarse,Lattice<vobj> & fine)
{
typedef typename vobj::scalar_object sobj;
@ -1080,54 +1010,96 @@ vectorizeFromRevLexOrdArray( std::vector<sobj> &in, Lattice<vobj> &out)
});
}
//Convert a Lattice from one precision to another
template<class VobjOut, class VobjIn>
void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in)
{
assert(out.Grid()->Nd() == in.Grid()->Nd());
for(int d=0;d<out.Grid()->Nd();d++){
assert(out.Grid()->FullDimensions()[d] == in.Grid()->FullDimensions()[d]);
}
out.Checkerboard() = in.Checkerboard();
GridBase *in_grid=in.Grid();
GridBase *out_grid = out.Grid();
typedef typename VobjOut::scalar_object SobjOut;
typedef typename VobjIn::scalar_object SobjIn;
int ndim = out.Grid()->Nd();
int out_nsimd = out_grid->Nsimd();
std::vector<Coordinate > out_icoor(out_nsimd);
for(int lane=0; lane < out_nsimd; lane++){
out_icoor[lane].resize(ndim);
out_grid->iCoorFromIindex(out_icoor[lane], lane);
}
std::vector<SobjOut> in_slex_conv(in_grid->lSites());
unvectorizeToLexOrdArray(in_slex_conv, in);
autoView( out_v , out, CpuWrite);
thread_for(out_oidx,out_grid->oSites(),{
Coordinate out_ocoor(ndim);
out_grid->oCoorFromOindex(out_ocoor, out_oidx);
ExtractPointerArray<SobjOut> ptrs(out_nsimd);
Coordinate lcoor(out_grid->Nd());
for(int lane=0; lane < out_nsimd; lane++){
for(int mu=0;mu<ndim;mu++)
lcoor[mu] = out_ocoor[mu] + out_grid->_rdimensions[mu]*out_icoor[lane][mu];
int llex; Lexicographic::IndexFromCoor(lcoor, llex, out_grid->_ldimensions);
ptrs[lane] = &in_slex_conv[llex];
//The workspace for a precision change operation allowing for the reuse of the mapping to save time on subsequent calls
class precisionChangeWorkspace{
std::pair<Integer,Integer>* fmap_device; //device pointer
public:
precisionChangeWorkspace(GridBase *out_grid, GridBase *in_grid){
//Build a map between the sites and lanes of the output field and the input field as we cannot use the Grids on the device
assert(out_grid->Nd() == in_grid->Nd());
for(int d=0;d<out_grid->Nd();d++){
assert(out_grid->FullDimensions()[d] == in_grid->FullDimensions()[d]);
}
merge(out_v[out_oidx], ptrs, 0);
});
int Nsimd_out = out_grid->Nsimd();
std::vector<Coordinate> out_icorrs(out_grid->Nsimd()); //reuse these
for(int lane=0; lane < out_grid->Nsimd(); lane++)
out_grid->iCoorFromIindex(out_icorrs[lane], lane);
std::vector<std::pair<Integer,Integer> > fmap_host(out_grid->lSites()); //lsites = osites*Nsimd
thread_for(out_oidx,out_grid->oSites(),{
Coordinate out_ocorr;
out_grid->oCoorFromOindex(out_ocorr, out_oidx);
Coordinate lcorr; //the local coordinate (common to both in and out as full coordinate)
for(int out_lane=0; out_lane < Nsimd_out; out_lane++){
out_grid->InOutCoorToLocalCoor(out_ocorr, out_icorrs[out_lane], lcorr);
//int in_oidx = in_grid->oIndex(lcorr), in_lane = in_grid->iIndex(lcorr);
//Note oIndex and OcorrFromOindex (and same for iIndex) are not inverse for checkerboarded lattice, the former coordinates being defined on the full lattice and the latter on the reduced lattice
//Until this is fixed we need to circumvent the problem locally. Here I will use the coordinates defined on the reduced lattice for simplicity
int in_oidx = 0, in_lane = 0;
for(int d=0;d<in_grid->_ndimension;d++){
in_oidx += in_grid->_ostride[d] * ( lcorr[d] % in_grid->_rdimensions[d] );
in_lane += in_grid->_istride[d] * ( lcorr[d] / in_grid->_rdimensions[d] );
}
fmap_host[out_lane + Nsimd_out*out_oidx] = std::pair<Integer,Integer>( in_oidx, in_lane );
}
});
//Copy the map to the device (if we had a way to tell if an accelerator is in use we could avoid this copy for CPU-only machines)
size_t fmap_bytes = out_grid->lSites() * sizeof(std::pair<Integer,Integer>);
fmap_device = (std::pair<Integer,Integer>*)acceleratorAllocDevice(fmap_bytes);
acceleratorCopyToDevice(fmap_host.data(), fmap_device, fmap_bytes);
}
//Prevent moving or copying
precisionChangeWorkspace(const precisionChangeWorkspace &r) = delete;
precisionChangeWorkspace(precisionChangeWorkspace &&r) = delete;
precisionChangeWorkspace &operator=(const precisionChangeWorkspace &r) = delete;
precisionChangeWorkspace &operator=(precisionChangeWorkspace &&r) = delete;
std::pair<Integer,Integer> const* getMap() const{ return fmap_device; }
~precisionChangeWorkspace(){
acceleratorFreeDevice(fmap_device);
}
};
//Convert a lattice of one precision to another. The input workspace contains the mapping data.
template<class VobjOut, class VobjIn>
void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in, const precisionChangeWorkspace &workspace){
static_assert( std::is_same<typename VobjOut::DoublePrecision, typename VobjIn::DoublePrecision>::value == 1, "copyLane: tensor types must be the same" ); //if tensor types are same the DoublePrecision type must be the same
out.Checkerboard() = in.Checkerboard();
constexpr int Nsimd_out = VobjOut::Nsimd();
std::pair<Integer,Integer> const* fmap_device = workspace.getMap();
//Do the copy/precision change
autoView( out_v , out, AcceleratorWrite);
autoView( in_v , in, AcceleratorRead);
accelerator_for(out_oidx, out.Grid()->oSites(), 1,{
std::pair<Integer,Integer> const* fmap_osite = fmap_device + out_oidx*Nsimd_out;
for(int out_lane=0; out_lane < Nsimd_out; out_lane++){
int in_oidx = fmap_osite[out_lane].first;
int in_lane = fmap_osite[out_lane].second;
copyLane(out_v[out_oidx], out_lane, in_v[in_oidx], in_lane);
}
});
}
//Convert a Lattice from one precision to another
//Generate the workspace in place; if multiple calls with the same mapping are performed, consider pregenerating the workspace and reusing
template<class VobjOut, class VobjIn>
void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
precisionChangeWorkspace workspace(out.Grid(), in.Grid());
precisionChange(out, in, workspace);
}
////////////////////////////////////////////////////////////////////////////////
// Communicate between grids
////////////////////////////////////////////////////////////////////////////////

View File

@ -69,6 +69,7 @@ GridLogger GridLogDebug (1, "Debug", GridLogColours, "PURPLE");
GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN");
GridLogger GridLogIterative (1, "Iterative", GridLogColours, "BLUE");
GridLogger GridLogIntegrator (1, "Integrator", GridLogColours, "BLUE");
GridLogger GridLogHMC (1, "HMC", GridLogColours, "BLUE");
void GridLogConfigure(std::vector<std::string> &logstreams) {
GridLogError.Active(0);
@ -79,6 +80,7 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
GridLogPerformance.Active(0);
GridLogIntegrator.Active(1);
GridLogColours.Active(0);
GridLogHMC.Active(1);
for (int i = 0; i < logstreams.size(); i++) {
if (logstreams[i] == std::string("Error")) GridLogError.Active(1);
@ -87,7 +89,8 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
if (logstreams[i] == std::string("Performance")) GridLogPerformance.Active(1);
if (logstreams[i] == std::string("Integrator")) GridLogIntegrator.Active(1);
if (logstreams[i] == std::string("NoIntegrator")) GridLogIntegrator.Active(0);
if (logstreams[i] == std::string("NoHMC")) GridLogHMC.Active(0);
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
}
}

View File

@ -182,6 +182,7 @@ extern GridLogger GridLogDebug ;
extern GridLogger GridLogPerformance;
extern GridLogger GridLogIterative ;
extern GridLogger GridLogIntegrator ;
extern GridLogger GridLogHMC;
extern Colours GridLogColours;
std::string demangle(const char* name) ;

View File

@ -576,8 +576,6 @@ class ScidacReader : public GridLimeReader {
std::string rec_name(ILDG_BINARY_DATA);
while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {
if ( !strncmp(limeReaderType(LimeR), rec_name.c_str(),strlen(rec_name.c_str()) ) ) {
// in principle should do the line below, but that breaks backard compatibility with old data
// skipPastObjectRecord(std::string(GRID_FIELD_NORM));
skipPastObjectRecord(std::string(SCIDAC_CHECKSUM));
return;
}

View File

@ -39,9 +39,11 @@ using namespace Grid;
////////////////////////////////////////////////////////////////////////////////
class NerscIO : public BinaryIO {
public:
typedef Lattice<vLorentzColourMatrixD> GaugeField;
// Enable/disable exiting if the plaquette in the header does not match the value computed (default true)
static bool & exitOnReadPlaquetteMismatch(){ static bool v=true; return v; }
static inline void truncate(std::string file){
std::ofstream fout(file,std::ios::out);
}
@ -198,7 +200,7 @@ public:
std::cerr << " nersc_csum " <<std::hex<< nersc_csum << " " << header.checksum<< std::dec<< std::endl;
exit(0);
}
assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-5 );
if(exitOnReadPlaquetteMismatch()) assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-5 );
assert(fabs(clone.link_trace-header.link_trace) < 1.0e-6 );
assert(nersc_csum == header.checksum );

View File

@ -63,6 +63,7 @@ static constexpr int Ngp=2; // gparity index range
#define ColourIndex (2)
#define SpinIndex (1)
#define LorentzIndex (0)
#define GparityFlavourIndex (0)
// Also should make these a named enum type
static constexpr int DaggerNo=0;
@ -87,6 +88,8 @@ template<typename T> struct isCoarsened {
template <typename T> using IfCoarsened = Invoke<std::enable_if< isCoarsened<T>::value,int> > ;
template <typename T> using IfNotCoarsened = Invoke<std::enable_if<!isCoarsened<T>::value,int> > ;
const int GparityFlavourTensorIndex = 3; //TensorLevel counts from the bottom!
// ChrisK very keen to add extra space for Gparity doubling.
//
// Also add domain wall index, in a way where Wilson operator
@ -101,6 +104,7 @@ template<typename vtype> using iSpinMatrix = iScalar<iMatrix<iSca
template<typename vtype> using iColourMatrix = iScalar<iScalar<iMatrix<vtype, Nc> > > ;
template<typename vtype> using iSpinColourMatrix = iScalar<iMatrix<iMatrix<vtype, Nc>, Ns> >;
template<typename vtype> using iLorentzColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nd > ;
template<typename vtype> using iLorentzVector = iVector<iScalar<iScalar<vtype> >, Nd > ;
template<typename vtype> using iDoubleStoredColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nds > ;
template<typename vtype> using iSpinVector = iScalar<iVector<iScalar<vtype>, Ns> >;
template<typename vtype> using iColourVector = iScalar<iScalar<iVector<vtype, Nc> > >;
@ -110,8 +114,10 @@ template<typename vtype> using iHalfSpinColourVector = iScalar<iVector<iVec
template<typename vtype> using iSpinColourSpinColourMatrix = iScalar<iMatrix<iMatrix<iMatrix<iMatrix<vtype, Nc>, Ns>, Nc>, Ns> >;
template<typename vtype> using iGparityFlavourVector = iVector<iScalar<iScalar<vtype> >, Ngp>;
template<typename vtype> using iGparitySpinColourVector = iVector<iVector<iVector<vtype, Nc>, Ns>, Ngp >;
template<typename vtype> using iGparityHalfSpinColourVector = iVector<iVector<iVector<vtype, Nc>, Nhs>, Ngp >;
template<typename vtype> using iGparityFlavourMatrix = iMatrix<iScalar<iScalar<vtype> >, Ngp>;
// Spin matrix
typedef iSpinMatrix<Complex > SpinMatrix;
@ -158,7 +164,16 @@ typedef iSpinColourSpinColourMatrix<vComplex > vSpinColourSpinColourMatrix;
typedef iSpinColourSpinColourMatrix<vComplexF> vSpinColourSpinColourMatrixF;
typedef iSpinColourSpinColourMatrix<vComplexD> vSpinColourSpinColourMatrixD;
// LorentzColour
// LorentzVector
typedef iLorentzVector<Complex > LorentzVector;
typedef iLorentzVector<ComplexF > LorentzVectorF;
typedef iLorentzVector<ComplexD > LorentzVectorD;
typedef iLorentzVector<vComplex > vLorentzVector;
typedef iLorentzVector<vComplexF> vLorentzVectorF;
typedef iLorentzVector<vComplexD> vLorentzVectorD;
// LorentzColourMatrix
typedef iLorentzColourMatrix<Complex > LorentzColourMatrix;
typedef iLorentzColourMatrix<ComplexF > LorentzColourMatrixF;
typedef iLorentzColourMatrix<ComplexD > LorentzColourMatrixD;
@ -176,6 +191,16 @@ typedef iDoubleStoredColourMatrix<vComplex > vDoubleStoredColourMatrix;
typedef iDoubleStoredColourMatrix<vComplexF> vDoubleStoredColourMatrixF;
typedef iDoubleStoredColourMatrix<vComplexD> vDoubleStoredColourMatrixD;
//G-parity flavour matrix
typedef iGparityFlavourMatrix<Complex> GparityFlavourMatrix;
typedef iGparityFlavourMatrix<ComplexF> GparityFlavourMatrixF;
typedef iGparityFlavourMatrix<ComplexD> GparityFlavourMatrixD;
typedef iGparityFlavourMatrix<vComplex> vGparityFlavourMatrix;
typedef iGparityFlavourMatrix<vComplexF> vGparityFlavourMatrixF;
typedef iGparityFlavourMatrix<vComplexD> vGparityFlavourMatrixD;
// Spin vector
typedef iSpinVector<Complex > SpinVector;
typedef iSpinVector<ComplexF> SpinVectorF;
@ -220,6 +245,16 @@ typedef iHalfSpinColourVector<ComplexD> HalfSpinColourVectorD;
typedef iHalfSpinColourVector<vComplex > vHalfSpinColourVector;
typedef iHalfSpinColourVector<vComplexF> vHalfSpinColourVectorF;
typedef iHalfSpinColourVector<vComplexD> vHalfSpinColourVectorD;
//G-parity flavour vector
typedef iGparityFlavourVector<Complex > GparityFlavourVector;
typedef iGparityFlavourVector<ComplexF> GparityFlavourVectorF;
typedef iGparityFlavourVector<ComplexD> GparityFlavourVectorD;
typedef iGparityFlavourVector<vComplex > vGparityFlavourVector;
typedef iGparityFlavourVector<vComplexF> vGparityFlavourVectorF;
typedef iGparityFlavourVector<vComplexD> vGparityFlavourVectorD;
// singlets
typedef iSinglet<Complex > TComplex; // FIXME This is painful. Tensor singlet complex type.
@ -263,6 +298,10 @@ typedef Lattice<vLorentzColourMatrix> LatticeLorentzColourMatrix;
typedef Lattice<vLorentzColourMatrixF> LatticeLorentzColourMatrixF;
typedef Lattice<vLorentzColourMatrixD> LatticeLorentzColourMatrixD;
typedef Lattice<vLorentzVector> LatticeLorentzVector;
typedef Lattice<vLorentzVectorF> LatticeLorentzVectorF;
typedef Lattice<vLorentzVectorD> LatticeLorentzVectorD;
// DoubleStored gauge field
typedef Lattice<vDoubleStoredColourMatrix> LatticeDoubleStoredColourMatrix;
typedef Lattice<vDoubleStoredColourMatrixF> LatticeDoubleStoredColourMatrixF;

View File

@ -30,8 +30,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_QCD_ACTION_H
#define GRID_QCD_ACTION_H
#pragma once
////////////////////////////////////////////
// Abstract base interface
@ -51,4 +50,4 @@ NAMESPACE_CHECK(Fermion);
#include <Grid/qcd/action/pseudofermion/PseudoFermion.h>
NAMESPACE_CHECK(PseudoFermion);
#endif

View File

@ -40,6 +40,29 @@ class Action
public:
bool is_smeared = false;
RealD deriv_norm_sum;
RealD deriv_max_sum;
int deriv_num;
RealD deriv_us;
RealD S_us;
RealD refresh_us;
void reset_timer(void) {
deriv_us = S_us = refresh_us = 0.0;
deriv_num=0;
deriv_norm_sum = deriv_max_sum=0.0;
}
void deriv_log(RealD nrm, RealD max) { deriv_max_sum+=max; deriv_norm_sum+=nrm; deriv_num++;}
RealD deriv_max_average(void) { return deriv_max_sum/deriv_num; };
RealD deriv_norm_average(void) { return deriv_norm_sum/deriv_num; };
RealD deriv_timer(void) { return deriv_us; };
RealD S_timer(void) { return deriv_us; };
RealD refresh_timer(void) { return deriv_us; };
void deriv_timer_start(void) { deriv_us-=usecond(); }
void deriv_timer_stop(void) { deriv_us+=usecond(); }
void refresh_timer_start(void) { refresh_us-=usecond(); }
void refresh_timer_stop(void) { refresh_us+=usecond(); }
void S_timer_start(void) { S_us-=usecond(); }
void S_timer_stop(void) { S_us+=usecond(); }
// Heatbath?
virtual void refresh(const GaugeField& U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) = 0; // refresh pseudofermions
virtual RealD S(const GaugeField& U) = 0; // evaluate the action

View File

@ -58,6 +58,8 @@ NAMESPACE_CHECK(Scalar);
////////////////////////////////////////////
// Utility functions
////////////////////////////////////////////
#include <Grid/qcd/action/domains/Domains.h>
#include <Grid/qcd/utils/Metric.h>
NAMESPACE_CHECK(Metric);
#include <Grid/qcd/utils/CovariantLaplacian.h>

View File

@ -36,28 +36,34 @@ NAMESPACE_BEGIN(Grid);
// These can move into a params header and be given MacroMagic serialisation
struct GparityWilsonImplParams {
Coordinate twists;
GparityWilsonImplParams() : twists(Nd, 0) {};
Coordinate twists; //Here the first Nd-1 directions are treated as "spatial", and a twist value of 1 indicates G-parity BCs in that direction.
//mu=Nd-1 is assumed to be the time direction and a twist value of 1 indicates antiperiodic BCs
bool locally_periodic;
GparityWilsonImplParams() : twists(Nd, 0), locally_periodic(false) {};
};
struct WilsonImplParams {
bool overlapCommsCompute;
bool locally_periodic;
AcceleratorVector<Real,Nd> twist_n_2pi_L;
AcceleratorVector<Complex,Nd> boundary_phases;
WilsonImplParams() {
boundary_phases.resize(Nd, 1.0);
twist_n_2pi_L.resize(Nd, 0.0);
locally_periodic = false;
};
WilsonImplParams(const AcceleratorVector<Complex,Nd> phi) : boundary_phases(phi), overlapCommsCompute(false) {
twist_n_2pi_L.resize(Nd, 0.0);
locally_periodic = false;
}
};
struct StaggeredImplParams {
StaggeredImplParams() {};
bool locally_periodic;
StaggeredImplParams() : locally_periodic(false) {};
};
struct OneFlavourRationalParams : Serializable {
struct OneFlavourRationalParams : Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(OneFlavourRationalParams,
RealD, lo,
RealD, hi,
@ -85,6 +91,50 @@ struct StaggeredImplParams {
precision(_precision),
BoundsCheckFreq(_BoundsCheckFreq){};
};
/*Action parameters for the generalized rational action
The approximation is for (M^dag M)^{1/inv_pow}
where inv_pow is the denominator of the fractional power.
Default inv_pow=2 for square root, making this equivalent to
the OneFlavourRational action
*/
struct RationalActionParams : Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(RationalActionParams,
int, inv_pow,
RealD, lo, //low eigenvalue bound of rational approx
RealD, hi, //high eigenvalue bound of rational approx
int, MaxIter, //maximum iterations in msCG
RealD, action_tolerance, //msCG tolerance in action evaluation
int, action_degree, //rational approx tolerance in action evaluation
RealD, md_tolerance, //msCG tolerance in MD integration
int, md_degree, //rational approx tolerance in MD integration
int, precision, //precision of floating point arithmetic
int, BoundsCheckFreq); //frequency the approximation is tested (with Metropolis degree/tolerance); 0 disables the check
// constructor
RationalActionParams(int _inv_pow = 2,
RealD _lo = 0.0,
RealD _hi = 1.0,
int _maxit = 1000,
RealD _action_tolerance = 1.0e-8,
int _action_degree = 10,
RealD _md_tolerance = 1.0e-8,
int _md_degree = 10,
int _precision = 64,
int _BoundsCheckFreq=20)
: inv_pow(_inv_pow),
lo(_lo),
hi(_hi),
MaxIter(_maxit),
action_tolerance(_action_tolerance),
action_degree(_action_degree),
md_tolerance(_md_tolerance),
md_degree(_md_degree),
precision(_precision),
BoundsCheckFreq(_BoundsCheckFreq){};
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,52 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/hmc/DDHMC.h
Copyright (C) 2021
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Christopher Kelly
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
NAMESPACE_BEGIN(Grid);
////////////////////////////////////////////////////
// DDHMC filter with sub-block size B[mu]
////////////////////////////////////////////////////
template<typename MomentaField>
struct DDHMCFilter: public MomentumFilterBase<MomentaField>
{
Coordinate Block;
int Width;
DDHMCFilter(const Coordinate &_Block): Block(_Block) {}
void applyFilter(MomentaField &P) const override
{
DomainDecomposition Domains(Block);
Domains.ProjectDDHMC(P);
}
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,98 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/momentum/DirichletFilter.h
Copyright (C) 2021
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
////////////////////////////////////////////////////
// Dirichlet filter with sub-block size B[mu]
////////////////////////////////////////////////////
#pragma once
#include <Grid/qcd/action/domains/DomainDecomposition.h>
NAMESPACE_BEGIN(Grid);
template<typename MomentaField>
struct DirichletFilter: public MomentumFilterBase<MomentaField>
{
Coordinate Block;
DirichletFilter(const Coordinate &_Block): Block(_Block) {}
// Edge detect using domain projectors
void applyFilter (MomentaField &U) const override
{
DomainDecomposition Domains(Block);
GridBase *grid = U.Grid();
LatticeInteger coor(grid);
LatticeInteger face(grid);
LatticeInteger one(grid); one = 1;
LatticeInteger zero(grid); zero = 0;
LatticeInteger omega(grid);
LatticeInteger omegabar(grid);
LatticeInteger tmp(grid);
omega=one; Domains.ProjectDomain(omega,0);
omegabar=one; Domains.ProjectDomain(omegabar,1);
LatticeInteger nface(grid); nface=Zero();
MomentaField projected(grid); projected=Zero();
typedef decltype(PeekIndex<LorentzIndex>(U,0)) MomentaLinkField;
MomentaLinkField Umu(grid);
MomentaLinkField zz(grid); zz=Zero();
int dims = grid->Nd();
Coordinate Global=grid->GlobalDimensions();
assert(dims==Nd);
for(int mu=0;mu<Nd;mu++){
if ( Block[mu]!=0 ) {
Umu = PeekIndex<LorentzIndex>(U,mu);
// Upper face
tmp = Cshift(omegabar,mu,1);
tmp = tmp + omega;
face = where(tmp == Integer(2),one,zero );
tmp = Cshift(omega,mu,1);
tmp = tmp + omegabar;
face = where(tmp == Integer(2),one,face );
Umu = where(face,zz,Umu);
PokeIndex<LorentzIndex>(U, Umu, mu);
}
}
}
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,187 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/domains/DomainDecomposition.h
Copyright (C) 2021
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
////////////////////////////////////////////////////
// Dirichlet filter with sub-block size B[mu]
////////////////////////////////////////////////////
#pragma once
NAMESPACE_BEGIN(Grid);
struct DomainDecomposition
{
Coordinate Block;
static constexpr RealD factor = 0.6;
DomainDecomposition(const Coordinate &_Block): Block(_Block){ assert(Block.size()==Nd);};
template<class Field>
void ProjectDomain(Field &f,Integer domain)
{
GridBase *grid = f.Grid();
int dims = grid->Nd();
int isDWF= (dims==Nd+1);
assert((dims==Nd)||(dims==Nd+1));
Field zz(grid); zz = Zero();
LatticeInteger coor(grid);
LatticeInteger domaincoor(grid);
LatticeInteger mask(grid); mask = Integer(1);
LatticeInteger zi(grid); zi = Integer(0);
for(int d=0;d<Nd;d++){
Integer B= Block[d];
if ( B ) {
LatticeCoordinate(coor,d+isDWF);
domaincoor = mod(coor,B);
mask = where(domaincoor==Integer(0),zi,mask);
mask = where(domaincoor==Integer(B-1),zi,mask);
}
}
if ( !domain )
f = where(mask==Integer(1),f,zz);
else
f = where(mask==Integer(0),f,zz);
};
template<class GaugeField>
void ProjectDDHMC(GaugeField &U)
{
GridBase *grid = U.Grid();
Coordinate Global=grid->GlobalDimensions();
GaugeField zzz(grid); zzz = Zero();
LatticeInteger coor(grid);
GaugeField Uorg(grid); Uorg = U;
auto zzz_mu = PeekIndex<LorentzIndex>(zzz,0);
////////////////////////////////////////////////////
// Zero BDY layers
////////////////////////////////////////////////////
for(int mu=0;mu<Nd;mu++) {
Integer B1 = Block[mu];
if ( B1 && (B1 <= Global[mu]) ) {
LatticeCoordinate(coor,mu);
////////////////////////////////
// OmegaBar - zero all links contained in slice B-1,0 and
// mu links connecting to Omega
////////////////////////////////
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
U_mu = where(mod(coor,B1)==Integer(B1-2),zzz_mu,U_mu);
PokeIndex<LorentzIndex>(U, U_mu, mu);
}
}
////////////////////////////////////////////
// Omega interior slow the evolution
// Tricky as we need to take the smallest of values imposed by each cut
// Do them in order or largest to smallest and smallest writes last
////////////////////////////////////////////
RealD f= factor;
#if 0
for(int mu=0;mu<Nd;mu++) {
Integer B1 = Block[mu];
if ( B1 && (B1 <= Global[mu]) ) {
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
auto Uorg_mu= PeekIndex<LorentzIndex>(Uorg,mu);
// In the plane
U = where(mod(coor,B1)==Integer(B1-5),Uorg*f,U);
U = where(mod(coor,B1)==Integer(4) ,Uorg*f,U);
// Perp links
U_mu = where(mod(coor,B1)==Integer(B1-6),Uorg_mu*f,U_mu);
U_mu = where(mod(coor,B1)==Integer(4) ,Uorg_mu*f,U_mu);
PokeIndex<LorentzIndex>(U, U_mu, mu);
}
}
#endif
for(int mu=0;mu<Nd;mu++) {
Integer B1 = Block[mu];
if ( B1 && (B1 <= Global[mu]) ) {
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
auto Uorg_mu= PeekIndex<LorentzIndex>(Uorg,mu);
// In the plane
U = where(mod(coor,B1)==Integer(B1-4),Uorg*f*f,U);
U = where(mod(coor,B1)==Integer(3) ,Uorg*f*f,U);
// Perp links
U_mu = where(mod(coor,B1)==Integer(B1-5),Uorg_mu*f*f,U_mu);
U_mu = where(mod(coor,B1)==Integer(3) ,Uorg_mu*f*f,U_mu);
PokeIndex<LorentzIndex>(U, U_mu, mu);
}
}
for(int mu=0;mu<Nd;mu++) {
Integer B1 = Block[mu];
if ( B1 && (B1 <= Global[mu]) ) {
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
auto Uorg_mu= PeekIndex<LorentzIndex>(Uorg,mu);
// In the plane
U = where(mod(coor,B1)==Integer(B1-3),Uorg*f*f*f,U);
U = where(mod(coor,B1)==Integer(2) ,Uorg*f*f*f,U);
// Perp links
U_mu = where(mod(coor,B1)==Integer(B1-4),Uorg_mu*f*f*f,U_mu);
U_mu = where(mod(coor,B1)==Integer(2) ,Uorg_mu*f*f*f,U_mu);
PokeIndex<LorentzIndex>(U, U_mu, mu);
}
}
for(int mu=0;mu<Nd;mu++) {
Integer B1 = Block[mu];
if ( B1 && (B1 <= Global[mu]) ) {
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
auto Uorg_mu= PeekIndex<LorentzIndex>(Uorg,mu);
// In the plane
U = where(mod(coor,B1)==Integer(B1-2),zzz,U);
U = where(mod(coor,B1)==Integer(1) ,zzz,U);
// Perp links
U_mu = where(mod(coor,B1)==Integer(B1-3),Uorg_mu*f*f*f*f,U_mu);
U_mu = where(mod(coor,B1)==Integer(1) ,Uorg_mu*f*f*f*f,U_mu);
PokeIndex<LorentzIndex>(U, U_mu, mu);
}
}
}
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,39 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/momentum/Domains.h
Copyright (C) 2021
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
////////////////////////////////////////////////////
// Dirichlet filter with sub-block size B[mu]
////////////////////////////////////////////////////
#pragma once
#include <Grid/qcd/action/domains/DomainDecomposition.h>
#include <Grid/qcd/action/domains/MomentumFilter.h>
#include <Grid/qcd/action/domains/DirichletFilter.h>
#include <Grid/qcd/action/domains/DDHMCFilter.h>

View File

@ -28,8 +28,7 @@ directory
*************************************************************************************/
/* END LEGAL */
//--------------------------------------------------------------------
#ifndef MOMENTUM_FILTER
#define MOMENTUM_FILTER
#pragma once
NAMESPACE_BEGIN(Grid);
@ -37,7 +36,7 @@ NAMESPACE_BEGIN(Grid);
template<typename MomentaField>
struct MomentumFilterBase{
virtual void applyFilter(MomentaField &P) const;
virtual void applyFilter(MomentaField &P) const = 0;
};
//Do nothing
@ -90,5 +89,3 @@ struct MomentumFilterApplyPhase: public MomentumFilterBase<MomentaField>{
NAMESPACE_END(Grid);
#endif

View File

@ -60,6 +60,8 @@ public:
///////////////////////////////////////////////////////////////
virtual void Dminus(const FermionField &psi, FermionField &chi);
virtual void DminusDag(const FermionField &psi, FermionField &chi);
virtual void ImportFourDimPseudoFermion(const FermionField &input,FermionField &imported);
virtual void ExportFourDimPseudoFermion(const FermionField &solution,FermionField &exported);
virtual void ExportPhysicalFermionSolution(const FermionField &solution5d,FermionField &exported4d);
virtual void ExportPhysicalFermionSource(const FermionField &solution5d, FermionField &exported4d);
virtual void ImportPhysicalFermionSource(const FermionField &input4d,FermionField &imported5d);

View File

@ -0,0 +1,185 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/fermion/DirichletFermionOperator.h
Copyright (C) 2021
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
////////////////////////////////////////////////////////////////
// Wrap a fermion operator in Dirichlet BC's at node boundary
////////////////////////////////////////////////////////////////
template<class Impl>
class DirichletFermionOperator : public FermionOperator<Impl>
{
public:
INHERIT_IMPL_TYPES(Impl);
// Data members
int CommsMode;
Coordinate Block;
DirichletFilter<GaugeField> Filter;
FermionOperator<Impl> & FermOp;
// Constructor / bespoke
DirichletFermionOperator(FermionOperator<Impl> & _FermOp, Coordinate &_Block)
: FermOp(_FermOp), Block(_Block), Filter(Block)
{
// Save what the comms mode should be under normal BCs
CommsMode = WilsonKernelsStatic::Comms;
assert((CommsMode == WilsonKernelsStatic::CommsAndCompute)
||(CommsMode == WilsonKernelsStatic::CommsThenCompute));
// Check the block size divides local lattice
GridBase *grid = FermOp.GaugeGrid();
int blocks_per_rank = 1;
Coordinate LocalDims = grid->LocalDimensions();
Coordinate GlobalDims= grid->GlobalDimensions();
assert(Block.size()==LocalDims.size());
for(int d=0;d<LocalDims.size();d++){
if (Block[d]&&(Block[d]<=GlobalDims[d])){
int r = LocalDims[d] % Block[d];
assert(r == 0);
blocks_per_rank *= (LocalDims[d] / Block[d]);
}
}
// Even blocks per node required // could be relaxed but inefficient use of hardware as idle nodes in boundary operator R
assert( blocks_per_rank != 0);
// Possible checks that SIMD lanes are used with full occupancy???
};
virtual ~DirichletFermionOperator(void) = default;
void DirichletOn(void) {
assert(WilsonKernelsStatic::Comms!= WilsonKernelsStatic::CommsDirichlet);
// WilsonKernelsStatic::Comms = WilsonKernelsStatic::CommsDirichlet;
}
void DirichletOff(void) {
// assert(WilsonKernelsStatic::Comms== WilsonKernelsStatic::CommsDirichlet);
// WilsonKernelsStatic::Comms = CommsMode;
}
// Implement the full interface
virtual FermionField &tmp(void) { return FermOp.tmp(); };
virtual GridBase *FermionGrid(void) { return FermOp.FermionGrid(); }
virtual GridBase *FermionRedBlackGrid(void) { return FermOp.FermionRedBlackGrid(); }
virtual GridBase *GaugeGrid(void) { return FermOp.GaugeGrid(); }
virtual GridBase *GaugeRedBlackGrid(void) { return FermOp.GaugeRedBlackGrid(); }
// override multiply
virtual void M (const FermionField &in, FermionField &out) { DirichletOn(); FermOp.M(in,out); DirichletOff(); };
virtual void Mdag (const FermionField &in, FermionField &out) { DirichletOn(); FermOp.Mdag(in,out); DirichletOff(); };
// half checkerboard operaions
virtual void Meooe (const FermionField &in, FermionField &out) { DirichletOn(); FermOp.Meooe(in,out); DirichletOff(); };
virtual void MeooeDag (const FermionField &in, FermionField &out) { DirichletOn(); FermOp.MeooeDag(in,out); DirichletOff(); };
virtual void Mooee (const FermionField &in, FermionField &out) { DirichletOn(); FermOp.Mooee(in,out); DirichletOff(); };
virtual void MooeeDag (const FermionField &in, FermionField &out) { DirichletOn(); FermOp.MooeeDag(in,out); DirichletOff(); };
virtual void MooeeInv (const FermionField &in, FermionField &out) { DirichletOn(); FermOp.MooeeInv(in,out); DirichletOff(); };
virtual void MooeeInvDag (const FermionField &in, FermionField &out) { DirichletOn(); FermOp.MooeeInvDag(in,out); DirichletOff(); };
// non-hermitian hopping term; half cb or both
virtual void Dhop (const FermionField &in, FermionField &out,int dag) { DirichletOn(); FermOp.Dhop(in,out,dag); DirichletOff(); };
virtual void DhopOE(const FermionField &in, FermionField &out,int dag) { DirichletOn(); FermOp.DhopOE(in,out,dag); DirichletOff(); };
virtual void DhopEO(const FermionField &in, FermionField &out,int dag) { DirichletOn(); FermOp.DhopEO(in,out,dag); DirichletOff(); };
virtual void DhopDir(const FermionField &in, FermionField &out,int dir,int disp) { DirichletOn(); FermOp.DhopDir(in,out,dir,disp); DirichletOff(); };
// force terms; five routines; default to Dhop on diagonal
virtual void MDeriv (GaugeField &mat,const FermionField &U,const FermionField &V,int dag){FermOp.MDeriv(mat,U,V,dag);};
virtual void MoeDeriv(GaugeField &mat,const FermionField &U,const FermionField &V,int dag){FermOp.MoeDeriv(mat,U,V,dag);};
virtual void MeoDeriv(GaugeField &mat,const FermionField &U,const FermionField &V,int dag){FermOp.MeoDeriv(mat,U,V,dag);};
virtual void MooDeriv(GaugeField &mat,const FermionField &U,const FermionField &V,int dag){FermOp.MooDeriv(mat,U,V,dag);};
virtual void MeeDeriv(GaugeField &mat,const FermionField &U,const FermionField &V,int dag){FermOp.MeeDeriv(mat,U,V,dag);};
virtual void DhopDeriv (GaugeField &mat,const FermionField &U,const FermionField &V,int dag){FermOp.DhopDeriv(mat,U,V,dag);};
virtual void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag){FermOp.DhopDerivEO(mat,U,V,dag);};
virtual void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag){FermOp.DhopDerivOE(mat,U,V,dag);};
virtual void Mdiag (const FermionField &in, FermionField &out) { Mooee(in,out);};
virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp){FermOp.Mdir(in,out,dir,disp);};
virtual void MdirAll(const FermionField &in, std::vector<FermionField> &out) {FermOp.MdirAll(in,out);};
///////////////////////////////////////////////
// Updates gauge field during HMC
///////////////////////////////////////////////
DoubledGaugeField &GetDoubledGaugeField(void){ return FermOp.GetDoubledGaugeField(); };
DoubledGaugeField &GetDoubledGaugeFieldE(void){ return FermOp.GetDoubledGaugeFieldE(); };
DoubledGaugeField &GetDoubledGaugeFieldO(void){ return FermOp.GetDoubledGaugeFieldO(); };
virtual void ImportGauge(const GaugeField & _U)
{
GaugeField U = _U;
// Filter gauge field to apply Dirichlet
Filter.applyFilter(U);
FermOp.ImportGauge(U);
}
///////////////////////////////////////////////
// Physical field import/export
///////////////////////////////////////////////
virtual void Dminus(const FermionField &psi, FermionField &chi) { FermOp.Dminus(psi,chi); }
virtual void DminusDag(const FermionField &psi, FermionField &chi) { FermOp.DminusDag(psi,chi); }
virtual void ImportFourDimPseudoFermion(const FermionField &input,FermionField &imported) { FermOp.ImportFourDimPseudoFermion(input,imported);}
virtual void ExportFourDimPseudoFermion(const FermionField &solution,FermionField &exported){ FermOp.ExportFourDimPseudoFermion(solution,exported);}
virtual void ImportPhysicalFermionSource(const FermionField &input,FermionField &imported) { FermOp.ImportPhysicalFermionSource(input,imported);}
virtual void ImportUnphysicalFermion(const FermionField &input,FermionField &imported) { FermOp.ImportUnphysicalFermion(input,imported);}
virtual void ExportPhysicalFermionSolution(const FermionField &solution,FermionField &exported) {FermOp.ExportPhysicalFermionSolution(solution,exported);}
virtual void ExportPhysicalFermionSource(const FermionField &solution,FermionField &exported) {FermOp.ExportPhysicalFermionSource(solution,exported);}
//////////////////////////////////////////////////////////////////////
// Should never be used
//////////////////////////////////////////////////////////////////////
virtual void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m,std::vector<double> twist) { assert(0);};
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<Complex> boundary,std::vector<double> twist) {assert(0);}
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass) { assert(0);}
virtual void ContractConservedCurrent(PropagatorField &q_in_1,
PropagatorField &q_in_2,
PropagatorField &q_out,
PropagatorField &phys_src,
Current curr_type,
unsigned int mu)
{assert(0);};
virtual void SeqConservedCurrent(PropagatorField &q_in,
PropagatorField &q_out,
PropagatorField &phys_src,
Current curr_type,
unsigned int mu,
unsigned int tmin,
unsigned int tmax,
ComplexField &lattice_cmplx)
{assert(0);};
// Only reimplemented in Wilson5D
// Default to just a zero correlation function
virtual void ContractJ5q(FermionField &q_in ,ComplexField &J5q) { J5q=Zero(); };
virtual void ContractJ5q(PropagatorField &q_in,ComplexField &J5q) { J5q=Zero(); };
};
NAMESPACE_END(Grid);

View File

@ -101,6 +101,12 @@ NAMESPACE_CHECK(WilsonTM5);
#include <Grid/qcd/action/fermion/PauliVillarsInverters.h>
#include <Grid/qcd/action/fermion/Reconstruct5Dprop.h>
#include <Grid/qcd/action/fermion/MADWF.h>
////////////////////////////////////////////////////////////////////
// DDHMC related
////////////////////////////////////////////////////////////////////
#include <Grid/qcd/action/fermion/DirichletFermionOperator.h>
#include <Grid/qcd/action/fermion/SchurFactoredFermionOperator.h>
NAMESPACE_CHECK(DWFutils);
////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -25,8 +25,7 @@ Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_QCD_FERMION_CORE_H
#define GRID_QCD_FERMION_CORE_H
#pragma once
#include <Grid/GridCore.h>
#include <Grid/GridQCDcore.h>
@ -45,4 +44,3 @@ NAMESPACE_CHECK(FermionOperator);
#include <Grid/qcd/action/fermion/StaggeredKernels.h> //used by all wilson type fermions
NAMESPACE_CHECK(Kernels);
#endif

View File

@ -140,6 +140,9 @@ public:
// Updates gauge field during HMC
///////////////////////////////////////////////
virtual void ImportGauge(const GaugeField & _U)=0;
virtual DoubledGaugeField &GetDoubledGaugeField(void) =0;
virtual DoubledGaugeField &GetDoubledGaugeFieldE(void) =0;
virtual DoubledGaugeField &GetDoubledGaugeFieldO(void) =0;
//////////////////////////////////////////////////////////////////////
// Conserved currents, either contract at sink or insert sequentially.
@ -171,6 +174,16 @@ public:
///////////////////////////////////////////////
virtual void Dminus(const FermionField &psi, FermionField &chi) { chi=psi; }
virtual void DminusDag(const FermionField &psi, FermionField &chi) { chi=psi; }
virtual void ImportFourDimPseudoFermion(const FermionField &input,FermionField &imported)
{
imported = input;
};
virtual void ExportFourDimPseudoFermion(const FermionField &solution,FermionField &exported)
{
exported=solution;
};
virtual void ImportPhysicalFermionSource(const FermionField &input,FermionField &imported)
{
imported = input;

View File

@ -30,6 +30,18 @@ directory
NAMESPACE_BEGIN(Grid);
/*
Policy implementation for G-parity boundary conditions
Rather than treating the gauge field as a flavored field, the Grid implementation of G-parity treats the gauge field as a regular
field with complex conjugate boundary conditions. In order to ensure the second flavor interacts with the conjugate links and the first
with the regular links we overload the functionality of doubleStore, whose purpose is to store the gauge field and the barrel-shifted gauge field
to avoid communicating links when applying the Dirac operator, such that the double-stored field contains also a flavor index which maps to
either the link or the conjugate link. This flavored field is then used by multLink to apply the correct link to a spinor.
Here the first Nd-1 directions are treated as "spatial", and a twist value of 1 indicates G-parity BCs in that direction.
mu=Nd-1 is assumed to be the time direction and a twist value of 1 indicates antiperiodic BCs
*/
template <class S, class Representation = FundamentalRepresentation, class Options=CoeffReal>
class GparityWilsonImpl : public ConjugateGaugeImpl<GaugeImplTypes<S, Representation::Dimension> > {
public:
@ -113,7 +125,7 @@ public:
|| ((distance== 1)&&(icoor[direction]==1))
|| ((distance==-1)&&(icoor[direction]==0));
permute_lane = permute_lane && SE->_around_the_world && St.parameters.twists[mmu]; //only if we are going around the world
permute_lane = permute_lane && SE->_around_the_world && St.parameters.twists[mmu] && mmu < Nd-1; //only if we are going around the world in a spatial direction
//Apply the links
int f_upper = permute_lane ? 1 : 0;
@ -139,10 +151,10 @@ public:
assert((distance == 1) || (distance == -1)); // nearest neighbour stencil hard code
assert((sl == 1) || (sl == 2));
if ( SE->_around_the_world && St.parameters.twists[mmu] ) {
//If this site is an global boundary site, perform the G-parity flavor twist
if ( mmu < Nd-1 && SE->_around_the_world && St.parameters.twists[mmu] ) {
if ( sl == 2 ) {
//Only do the twist for lanes on the edge of the physical node
ExtractBuffer<sobj> vals(Nsimd);
extract(chi,vals);
@ -197,6 +209,19 @@ public:
reg = memory;
}
//Poke 'poke_f0' onto flavor 0 and 'poke_f1' onto flavor 1 in direction mu of the doubled gauge field Uds
inline void pokeGparityDoubledGaugeField(DoubledGaugeField &Uds, const GaugeLinkField &poke_f0, const GaugeLinkField &poke_f1, const int mu){
autoView(poke_f0_v, poke_f0, CpuRead);
autoView(poke_f1_v, poke_f1, CpuRead);
autoView(Uds_v, Uds, CpuWrite);
thread_foreach(ss,poke_f0_v,{
Uds_v[ss](0)(mu) = poke_f0_v[ss]();
Uds_v[ss](1)(mu) = poke_f1_v[ss]();
});
}
inline void DoubleStore(GridBase *GaugeGrid,DoubledGaugeField &Uds,const GaugeField &Umu)
{
conformable(Uds.Grid(),GaugeGrid);
@ -207,14 +232,19 @@ public:
GaugeLinkField Uconj(GaugeGrid);
Lattice<iScalar<vInteger> > coor(GaugeGrid);
for(int mu=0;mu<Nd;mu++){
LatticeCoordinate(coor,mu);
//Here the first Nd-1 directions are treated as "spatial", and a twist value of 1 indicates G-parity BCs in that direction.
//mu=Nd-1 is assumed to be the time direction and a twist value of 1 indicates antiperiodic BCs
for(int mu=0;mu<Nd-1;mu++){
if( Params.twists[mu] ){
LatticeCoordinate(coor,mu);
}
U = PeekIndex<LorentzIndex>(Umu,mu);
Uconj = conjugate(U);
// Implement the isospin rotation sign on the boundary between f=1 and f=0
// This phase could come from a simple bc 1,1,-1,1 ..
int neglink = GaugeGrid->GlobalDimensions()[mu]-1;
if ( Params.twists[mu] ) {
@ -229,7 +259,7 @@ public:
thread_foreach(ss,U_v,{
Uds_v[ss](0)(mu) = U_v[ss]();
Uds_v[ss](1)(mu) = Uconj_v[ss]();
});
});
}
U = adj(Cshift(U ,mu,-1)); // correct except for spanning the boundary
@ -260,6 +290,38 @@ public:
});
}
}
{ //periodic / antiperiodic temporal BCs
int mu = Nd-1;
int L = GaugeGrid->GlobalDimensions()[mu];
int Lmu = L - 1;
LatticeCoordinate(coor, mu);
U = PeekIndex<LorentzIndex>(Umu, mu); //Get t-directed links
GaugeLinkField *Upoke = &U;
if(Params.twists[mu]){ //antiperiodic
Utmp = where(coor == Lmu, -U, U);
Upoke = &Utmp;
}
Uconj = conjugate(*Upoke); //second flavor interacts with conjugate links
pokeGparityDoubledGaugeField(Uds, *Upoke, Uconj, mu);
//Get the barrel-shifted field
Utmp = adj(Cshift(U, mu, -1)); //is a forward shift!
Upoke = &Utmp;
if(Params.twists[mu]){
U = where(coor == 0, -Utmp, Utmp); //boundary phase
Upoke = &U;
}
Uconj = conjugate(*Upoke);
pokeGparityDoubledGaugeField(Uds, *Upoke, Uconj, mu + 4);
}
}
inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A, int mu) {
@ -298,28 +360,48 @@ public:
inline void extractLinkField(std::vector<GaugeLinkField> &mat, DoubledGaugeField &Uds){
assert(0);
}
inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField &Atilde, int mu) {
int Ls = Btilde.Grid()->_fdimensions[0];
GaugeLinkField tmp(mat.Grid());
tmp = Zero();
int Ls=Btilde.Grid()->_fdimensions[0];
{
autoView( tmp_v , tmp, CpuWrite);
autoView( Atilde_v , Atilde, CpuRead);
autoView( Btilde_v , Btilde, CpuRead);
thread_for(ss,tmp.Grid()->oSites(),{
for (int s = 0; s < Ls; s++) {
int sF = s + Ls * ss;
auto ttmp = traceIndex<SpinIndex>(outerProduct(Btilde_v[sF], Atilde_v[sF]));
tmp_v[ss]() = tmp_v[ss]() + ttmp(0, 0) + conjugate(ttmp(1, 1));
}
});
GridBase *GaugeGrid = mat.Grid();
Lattice<iScalar<vInteger> > coor(GaugeGrid);
if( Params.twists[mu] ){
LatticeCoordinate(coor,mu);
}
autoView( mat_v , mat, AcceleratorWrite);
autoView( Btilde_v , Btilde, AcceleratorRead);
autoView( Atilde_v , Atilde, AcceleratorRead);
accelerator_for(sss,mat.Grid()->oSites(), FermionField::vector_type::Nsimd(),{
int sU=sss;
typedef decltype(coalescedRead(mat_v[sU](mu)() )) ColorMatrixType;
ColorMatrixType sum;
zeroit(sum);
for(int s=0;s<Ls;s++){
int sF = s+Ls*sU;
for(int spn=0;spn<Ns;spn++){ //sum over spin
//Flavor 0
auto bb = coalescedRead(Btilde_v[sF](0)(spn) ); //color vector
auto aa = coalescedRead(Atilde_v[sF](0)(spn) );
sum = sum + outerProduct(bb,aa);
//Flavor 1
bb = coalescedRead(Btilde_v[sF](1)(spn) );
aa = coalescedRead(Atilde_v[sF](1)(spn) );
sum = sum + conjugate(outerProduct(bb,aa));
}
}
coalescedWrite(mat_v[sU](mu)(), sum);
});
}
PokeIndex<LorentzIndex>(mat, tmp, mu);
return;
}
};

View File

@ -141,8 +141,11 @@ public:
void ImportGauge(const GaugeField &_Uthin, const GaugeField &_Ufat);
void ImportGaugeSimple(const GaugeField &_UUU ,const GaugeField &_U);
void ImportGaugeSimple(const DoubledGaugeField &_UUU,const DoubledGaugeField &_U);
DoubledGaugeField &GetU(void) { return Umu ; } ;
DoubledGaugeField &GetUUU(void) { return UUUmu; };
virtual DoubledGaugeField &GetDoubledGaugeField(void) override { return Umu; };
virtual DoubledGaugeField &GetDoubledGaugeFieldE(void) override { return UmuEven; };
virtual DoubledGaugeField &GetDoubledGaugeFieldO(void) override { return UmuOdd; };
virtual DoubledGaugeField &GetU(void) { return Umu ; } ;
virtual DoubledGaugeField &GetUUU(void) { return UUUmu; };
void CopyGaugeCheckerboards(void);
///////////////////////////////////////////////////////////////

View File

@ -160,17 +160,20 @@ public:
RealD _c1=1.0, RealD _c2=1.0,RealD _u0=1.0,
const ImplParams &p= ImplParams());
// DoubleStore gauge field in operator
void ImportGauge (const GaugeField &_Uthin ) { assert(0); }
// DoubleStore gauge field in operator
void ImportGauge (const GaugeField &_Uthin ) { assert(0); }
void ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat);
void ImportGaugeSimple(const GaugeField &_UUU,const GaugeField &_U);
void ImportGaugeSimple(const DoubledGaugeField &_UUU,const DoubledGaugeField &_U);
// Give a reference; can be used to do an assignment or copy back out after import
// if Carleton wants to cache them and not use the ImportSimple
DoubledGaugeField &GetU(void) { return Umu ; } ;
DoubledGaugeField &GetUUU(void) { return UUUmu; };
void CopyGaugeCheckerboards(void);
void ImportGaugeSimple(const GaugeField &_UUU,const GaugeField &_U);
void ImportGaugeSimple(const DoubledGaugeField &_UUU,const DoubledGaugeField &_U);
// Give a reference; can be used to do an assignment or copy back out after import
// if Carleton wants to cache them and not use the ImportSimple
virtual DoubledGaugeField &GetDoubledGaugeField(void) override { return Umu; };
virtual DoubledGaugeField &GetDoubledGaugeFieldE(void) override { return UmuEven; };
virtual DoubledGaugeField &GetDoubledGaugeFieldO(void) override { return UmuOdd; };
DoubledGaugeField &GetU(void) { return Umu ; } ;
DoubledGaugeField &GetUUU(void) { return UUUmu; };
void CopyGaugeCheckerboards(void);
///////////////////////////////////////////////////////////////
// Data members require to support the functionality
///////////////////////////////////////////////////////////////

View File

@ -135,6 +135,9 @@ public:
// DoubleStore impl dependent
void ImportGauge (const GaugeField &_U );
DoubledGaugeField &GetDoubledGaugeField(void){ return Umu; };
DoubledGaugeField &GetDoubledGaugeFieldE(void){ return UmuEven; };
DoubledGaugeField &GetDoubledGaugeFieldO(void){ return UmuOdd; };
DoubledGaugeField &GetU(void) { return Umu ; } ;
void CopyGaugeCheckerboards(void);

View File

@ -0,0 +1,534 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/fermion/SchurFactoredFermionOperator.h
Copyright (C) 2021
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
#include <Grid/qcd/utils/MixedPrecisionOperatorFunction.h>
#include <Grid/qcd/action/domains/Domains.h>
NAMESPACE_BEGIN(Grid);
////////////////////////////////////////////////////////
// Some explanation of class structure for domain decomposition:
//
// Need a dirichlet operator for two flavour determinant - acts on both Omega and OmegaBar.
//
// Possible gain if the global sums and CG are run independently?? Could measure this.
//
// Types of operations
//
// 1) assemble local det dOmega det dOmegaBar pseudofermion
//
// - DirichletFermionOperator - can either do a global solve, or independent/per cell coefficients.
//
// 2) assemble dOmegaInverse and dOmegaBarInverse in R
//
// - DirichletFermionOperator - can also be used to
// - need two or more cells per node. Options
// - a) solve one cell at a time, no new code, CopyRegion and reduced /split Grids
// - b) solve multiple cells in parallel. predicated dslash implementation
//
// - b) has more parallelism, experience with block solver suggest might not be aalgorithmically inefficient
// a) has more cache friendly and easier code.
// b) is easy to implement in a "trial" or inefficient code with projection.
//
// 3) Additional functionality for domain operations
//
// - SchurFactoredFermionOperator - Need a DDHMC utility - whether used in two flavour or one flavour
//
// - dBoundary - needs non-dirichlet operator
// - Contains one Dirichlet Op, and one non-Dirichlet op. Implements dBoundary etc...
// - The Dirichlet ops can be passed to dOmega(Bar) solvers etc...
//
////////////////////////////////////////////////////////
template<class ImplD,class ImplF>
class SchurFactoredFermionOperator : public ImplD
{
INHERIT_IMPL_TYPES(ImplD);
typedef typename ImplF::FermionField FermionFieldF;
typedef typename ImplD::FermionField FermionFieldD;
typedef SchurDiagMooeeOperator<FermionOperator<ImplD>,FermionFieldD> LinearOperatorD;
typedef SchurDiagMooeeOperator<FermionOperator<ImplF>,FermionFieldF> LinearOperatorF;
typedef SchurDiagMooeeDagOperator<FermionOperator<ImplD>,FermionFieldD> LinearOperatorDagD;
typedef SchurDiagMooeeDagOperator<FermionOperator<ImplF>,FermionFieldF> LinearOperatorDagF;
typedef MixedPrecisionConjugateGradientOperatorFunction<FermionOperator<ImplD>,
FermionOperator<ImplF>,
LinearOperatorD,
LinearOperatorF> MxPCG;
typedef MixedPrecisionConjugateGradientOperatorFunction<FermionOperator<ImplD>,
FermionOperator<ImplF>,
LinearOperatorDagD,
LinearOperatorDagF> MxDagPCG;
public:
GridBase *FermionGrid(void) { return PeriodicFermOpD.FermionGrid(); };
GridBase *GaugeGrid(void) { return PeriodicFermOpD.GaugeGrid(); };
FermionOperator<ImplD> & DirichletFermOpD;
FermionOperator<ImplF> & DirichletFermOpF;
FermionOperator<ImplD> & PeriodicFermOpD;
FermionOperator<ImplF> & PeriodicFermOpF;
LinearOperatorD DirichletLinOpD;
LinearOperatorF DirichletLinOpF;
LinearOperatorD PeriodicLinOpD;
LinearOperatorF PeriodicLinOpF;
LinearOperatorDagD DirichletLinOpDagD;
LinearOperatorDagF DirichletLinOpDagF;
LinearOperatorDagD PeriodicLinOpDagD;
LinearOperatorDagF PeriodicLinOpDagF;
// Can tinker with these in the pseudofermion for force vs. action solves
Integer maxinnerit;
Integer maxouterit;
RealD tol;
RealD tolinner;
Coordinate Block;
DomainDecomposition Domains;
SchurFactoredFermionOperator(FermionOperator<ImplD> & _PeriodicFermOpD,
FermionOperator<ImplF> & _PeriodicFermOpF,
FermionOperator<ImplD> & _DirichletFermOpD,
FermionOperator<ImplF> & _DirichletFermOpF,
Coordinate &_Block)
: Block(_Block), Domains(Block),
PeriodicFermOpD(_PeriodicFermOpD),
PeriodicFermOpF(_PeriodicFermOpF),
DirichletFermOpD(_DirichletFermOpD),
DirichletFermOpF(_DirichletFermOpF),
DirichletLinOpD(DirichletFermOpD),
DirichletLinOpF(DirichletFermOpF),
PeriodicLinOpD(PeriodicFermOpD),
PeriodicLinOpF(PeriodicFermOpF),
DirichletLinOpDagD(DirichletFermOpD),
DirichletLinOpDagF(DirichletFermOpF),
PeriodicLinOpDagD(PeriodicFermOpD),
PeriodicLinOpDagF(PeriodicFermOpF)
{
tol=1.0e-10;
tolinner=1.0e-6;
maxinnerit=1000;
maxouterit=10;
assert(PeriodicFermOpD.FermionGrid() == DirichletFermOpD.FermionGrid());
assert(PeriodicFermOpF.FermionGrid() == DirichletFermOpF.FermionGrid());
};
enum Domain { Omega=0, OmegaBar=1 };
void ImportGauge(const GaugeField &Umu)
{
// Single precision will update in the mixed prec CG
PeriodicFermOpD.ImportGauge(Umu);
GaugeField dUmu(Umu.Grid());
dUmu=Umu;
// DirchletBCs(dUmu);
DirichletFilter<GaugeField> Filter(Block);
Filter.applyFilter(dUmu);
DirichletFermOpD.ImportGauge(dUmu);
}
/*
void ProjectBoundaryBothDomains (FermionField &f,int sgn)
{
assert((sgn==1)||(sgn==-1));
Real rsgn = sgn;
Gamma::Algebra Gmu [] = {
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT
};
GridBase *grid = f.Grid();
LatticeInteger coor(grid);
LatticeInteger face(grid);
LatticeInteger one(grid); one = 1;
LatticeInteger zero(grid); zero = 0;
LatticeInteger nface(grid); nface=Zero();
FermionField projected(grid); projected=Zero();
FermionField sp_proj (grid);
int dims = grid->Nd();
int isDWF= (dims==Nd+1);
assert((dims==Nd)||(dims==Nd+1));
Coordinate Global=grid->GlobalDimensions();
for(int mu=0;mu<Nd;mu++){
if ( Block[mu] <= Global[mu+isDWF] ) {
// need to worry about DWF 5th dim first
LatticeCoordinate(coor,mu+isDWF);
face = where(mod(coor,Block[mu]) == Integer(0),one,zero );
nface = nface + face;
Gamma G(Gmu[mu]);
// Lower face receives (1-gamma)/2 in normal forward hopping term
sp_proj = 0.5*(f-G*f*rsgn);
projected= where(face,sp_proj,projected);
//projected= where(face,f,projected);
face = where(mod(coor,Block[mu]) == Integer(Block[mu]-1) ,one,zero );
nface = nface + face;
// Upper face receives (1+gamma)/2 in normal backward hopping term
sp_proj = 0.5*(f+G*f*rsgn);
projected= where(face,sp_proj,projected);
//projected= where(face,f,projected);
}
}
// Initial Zero() where nface==0.
// Keep the spin projected faces where nface==1
// Full spinor where nface>=2
projected = where(nface>Integer(1),f,projected);
f=projected;
}
*/
void ProjectBoundaryBothDomains (FermionField &f,int sgn)
{
assert((sgn==1)||(sgn==-1));
Real rsgn = sgn;
Gamma::Algebra Gmu [] = {
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT
};
GridBase *grid = f.Grid();
LatticeInteger coor(grid);
LatticeInteger face(grid);
LatticeInteger one(grid); one = 1;
LatticeInteger zero(grid); zero = 0;
LatticeInteger omega(grid);
LatticeInteger omegabar(grid);
LatticeInteger tmp(grid);
omega=one; Domains.ProjectDomain(omega,0);
omegabar=one; Domains.ProjectDomain(omegabar,1);
LatticeInteger nface(grid); nface=Zero();
FermionField projected(grid); projected=Zero();
FermionField sp_proj (grid);
int dims = grid->Nd();
int isDWF= (dims==Nd+1);
assert((dims==Nd)||(dims==Nd+1));
Coordinate Global=grid->GlobalDimensions();
for(int mmu=0;mmu<Nd;mmu++){
Gamma G(Gmu[mmu]);
// need to worry about DWF 5th dim first
int mu = mmu+isDWF;
if ( Block[mmu] && (Block[mmu] <= Global[mu]) ) {
// Lower face receives (1-gamma)/2 in normal forward hopping term
tmp = Cshift(omegabar,mu,-1);
tmp = tmp + omega;
face = where(tmp == Integer(2),one,zero );
tmp = Cshift(omega,mu,-1);
tmp = tmp + omegabar;
face = where(tmp == Integer(2),one,face );
nface = nface + face;
sp_proj = 0.5*(f-G*f*rsgn);
projected= where(face,sp_proj,projected);
// Upper face receives (1+gamma)/2 in normal backward hopping term
tmp = Cshift(omegabar,mu,1);
tmp = tmp + omega;
face = where(tmp == Integer(2),one,zero );
tmp = Cshift(omega,mu,1);
tmp = tmp + omegabar;
face = where(tmp == Integer(2),one,face );
nface = nface + face;
sp_proj = 0.5*(f+G*f*rsgn);
projected= where(face,sp_proj,projected);
}
}
// Initial Zero() where nface==0.
// Keep the spin projected faces where nface==1
// Full spinor where nface>=2
projected = where(nface>Integer(1),f,projected);
f=projected;
}
void ProjectDomain(FermionField &f,int domain)
{
/*
GridBase *grid = f.Grid();
int dims = grid->Nd();
int isDWF= (dims==Nd+1);
assert((dims==Nd)||(dims==Nd+1));
FermionField zz(grid); zz=Zero();
LatticeInteger coor(grid);
LatticeInteger domaincb(grid); domaincb=Zero();
for(int d=0;d<Nd;d++){
LatticeCoordinate(coor,d+isDWF);
domaincb = domaincb + div(coor,Block[d]);
}
f = where(mod(domaincb,2)==Integer(domain),f,zz);
*/
Domains.ProjectDomain(f,domain);
};
void ProjectOmegaBar (FermionField &f) {ProjectDomain(f,OmegaBar);}
void ProjectOmega (FermionField &f) {ProjectDomain(f,Omega);}
// See my notes(!).
// Notation: Following Luscher, we introduce projectors $\hPdb$ with both spinor and space structure
// projecting all spinor elements in $\Omega$ connected by $\Ddb$ to $\bar{\Omega}$,
void ProjectBoundaryBar(FermionField &f)
{
ProjectBoundaryBothDomains(f,1);
ProjectOmega(f);
}
// and $\hPd$ projecting all spinor elements in $\bar{\Omega}$ connected by $\Dd$ to $\Omega$.
void ProjectBoundary (FermionField &f)
{
ProjectBoundaryBothDomains(f,1);
ProjectOmegaBar(f);
// DumpSliceNorm("ProjectBoundary",f,f.Grid()->Nd()-1);
};
void dBoundary (FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmegaBar(tmp);
PeriodicFermOpD.M(tmp,out);
ProjectOmega(out);
};
void dBoundaryDag (FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmega(tmp);
PeriodicFermOpD.Mdag(tmp,out);
ProjectOmegaBar(out);
};
void dBoundaryBar (FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmega(tmp);
PeriodicFermOpD.M(tmp,out);
ProjectOmegaBar(out);
};
void dBoundaryBarDag (FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmegaBar(tmp);
PeriodicFermOpD.Mdag(tmp,out);
ProjectOmega(out);
};
void dOmega (FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmega(tmp);
DirichletFermOpD.M(tmp,out);
ProjectOmega(out);
};
void dOmegaBar (FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmegaBar(tmp);
DirichletFermOpD.M(tmp,out);
ProjectOmegaBar(out);
};
void dOmegaDag (FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmega(tmp);
DirichletFermOpD.Mdag(tmp,out);
ProjectOmega(out);
};
void dOmegaBarDag (FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmegaBar(tmp);
DirichletFermOpD.Mdag(tmp,out);
ProjectOmegaBar(out);
};
void dOmegaInv (FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmega(tmp);
dOmegaInvAndOmegaBarInv(tmp,out); // Inefficient warning
ProjectOmega(out);
};
void dOmegaBarInv(FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmegaBar(tmp);
dOmegaInvAndOmegaBarInv(tmp,out);
ProjectOmegaBar(out);
};
void dOmegaDagInv (FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmega(tmp);
dOmegaDagInvAndOmegaBarDagInv(tmp,out);
ProjectOmega(out);
};
void dOmegaBarDagInv(FermionField &in,FermionField &out)
{
FermionField tmp(in);
ProjectOmegaBar(tmp);
dOmegaDagInvAndOmegaBarDagInv(tmp,out);
ProjectOmegaBar(out);
};
void dOmegaInvAndOmegaBarInv(FermionField &in,FermionField &out)
{
MxPCG OmegaSolver(tol,
tolinner,
maxinnerit,
maxouterit,
DirichletFermOpF.FermionRedBlackGrid(),
DirichletFermOpF,
DirichletFermOpD,
DirichletLinOpF,
DirichletLinOpD);
SchurRedBlackDiagMooeeSolve<FermionField> PrecSolve(OmegaSolver);
PrecSolve(DirichletFermOpD,in,out);
};
void dOmegaDagInvAndOmegaBarDagInv(FermionField &in,FermionField &out)
{
MxDagPCG OmegaDagSolver(tol,
tolinner,
maxinnerit,
maxouterit,
DirichletFermOpF.FermionRedBlackGrid(),
DirichletFermOpF,
DirichletFermOpD,
DirichletLinOpDagF,
DirichletLinOpDagD);
SchurRedBlackDiagMooeeDagSolve<FermionField> PrecSolve(OmegaDagSolver);
PrecSolve(DirichletFermOpD,in,out);
};
// Rdag = Pdbar - DdbarDag DomegabarDagInv DdDag DomegaDagInv Pdbar
void RDag(FermionField &in,FermionField &out)
{
FermionField tmp1(PeriodicFermOpD.FermionGrid());
FermionField tmp2(PeriodicFermOpD.FermionGrid());
out = in;
ProjectBoundaryBar(out);
dOmegaDagInv(out,tmp1);
dBoundaryDag(tmp1,tmp2);
dOmegaBarDagInv(tmp2,tmp1);
dBoundaryBarDag(tmp1,tmp2);
out = out - tmp2;
};
// R = Pdbar - Pdbar DomegaInv Dd DomegabarInv Ddbar
void R(FermionField &in,FermionField &out)
{
FermionField tmp1(PeriodicFermOpD.FermionGrid());
FermionField tmp2(PeriodicFermOpD.FermionGrid());
out = in;
ProjectBoundaryBar(out);
dBoundaryBar(out,tmp1);
dOmegaBarInv(tmp1,tmp2);
dBoundary(tmp2,tmp1);
dOmegaInv(tmp1,tmp2);
out = in - tmp2 ;
ProjectBoundaryBar(out);
// DumpSliceNorm("R",out,out.Grid()->Nd()-1);
};
// R = Pdbar - Pdbar Dinv Ddbar
void RInv(FermionField &in,FermionField &out)
{
FermionField tmp1(PeriodicFermOpD.FermionGrid());
dBoundaryBar(in,out);
Dinverse(out,tmp1);
out =in -tmp1;
ProjectBoundaryBar(out);
};
// R = Pdbar - DdbarDag DinvDag Pdbar
void RDagInv(FermionField &in,FermionField &out)
{
FermionField tmp(PeriodicFermOpD.FermionGrid());
FermionField Pin(PeriodicFermOpD.FermionGrid());
Pin = in; ProjectBoundaryBar(Pin);
DinverseDag(Pin,out);
dBoundaryBarDag(out,tmp);
out =Pin -tmp;
};
// Non-dirichlet inverter using red-black preconditioning
void Dinverse(FermionField &in,FermionField &out)
{
MxPCG DSolver(tol,
tolinner,
maxinnerit,
maxouterit,
PeriodicFermOpF.FermionRedBlackGrid(),
PeriodicFermOpF,
PeriodicFermOpD,
PeriodicLinOpF,
PeriodicLinOpD);
SchurRedBlackDiagMooeeSolve<FermionField> Solve(DSolver);
Solve(PeriodicFermOpD,in,out);
}
void DinverseDag(FermionField &in,FermionField &out)
{
MxDagPCG DdagSolver(tol,
tolinner,
maxinnerit,
maxouterit,
PeriodicFermOpF.FermionRedBlackGrid(),
PeriodicFermOpF,
PeriodicFermOpD,
PeriodicLinOpDagF,
PeriodicLinOpDagD);
SchurRedBlackDiagMooeeDagSolve<FermionField> Solve(DdagSolver);
Solve(PeriodicFermOpD,in,out);
}
};
NAMESPACE_END(Grid);

View File

@ -303,9 +303,11 @@ public:
int npoints,
int checkerboard,
const std::vector<int> &directions,
const std::vector<int> &distances,Parameters p)
: CartesianStencil<vobj,cobj,Parameters> (grid,npoints,checkerboard,directions,distances,p)
{
const std::vector<int> &distances,
bool locally_periodic,
Parameters p)
: CartesianStencil<vobj,cobj,Parameters> (grid,npoints,checkerboard,directions,distances,locally_periodic,p)
{
ZeroCountersi();
surface_list.resize(0);
this->same_node.resize(npoints);

View File

@ -146,8 +146,11 @@ public:
void DhopInternalSerial(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
const FermionField &in, FermionField &out, int dag);
void DhopInternalDirichletComms(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
const FermionField &in, FermionField &out, int dag);
void DhopInternalOverlappedComms(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
const FermionField &in, FermionField &out, int dag);
const FermionField &in, FermionField &out, int dag);
// Constructor
WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
@ -157,7 +160,10 @@ public:
// DoubleStore impl dependent
void ImportGauge(const GaugeField &_Umu);
DoubledGaugeField &GetDoubledGaugeField(void){ return Umu; };
DoubledGaugeField &GetDoubledGaugeFieldE(void){ return UmuEven; };
DoubledGaugeField &GetDoubledGaugeFieldO(void){ return UmuOdd; };
///////////////////////////////////////////////////////////////
// Data members require to support the functionality
///////////////////////////////////////////////////////////////

View File

@ -165,7 +165,14 @@ public:
const FermionField &in,
FermionField &out,
int dag);
void DhopInternalDirichletComms(StencilImpl & st,
LebesgueOrder &lo,
DoubledGaugeField &U,
const FermionField &in,
FermionField &out,
int dag);
// Constructors
WilsonFermion5D(GaugeField &_Umu,
GridCartesian &FiveDimGrid,
@ -174,19 +181,11 @@ public:
GridRedBlackCartesian &FourDimRedBlackGrid,
double _M5,const ImplParams &p= ImplParams());
// Constructors
/*
WilsonFermion5D(int simd,
GaugeField &_Umu,
GridCartesian &FiveDimGrid,
GridRedBlackCartesian &FiveDimRedBlackGrid,
GridCartesian &FourDimGrid,
double _M5,const ImplParams &p= ImplParams());
*/
// DoubleStore
void ImportGauge(const GaugeField &_Umu);
DoubledGaugeField &GetDoubledGaugeField(void){ return Umu; };
DoubledGaugeField &GetDoubledGaugeFieldE(void){ return UmuEven; };
DoubledGaugeField &GetDoubledGaugeFieldO(void){ return UmuOdd; };
///////////////////////////////////////////////////////////////
// Data members require to support the functionality
///////////////////////////////////////////////////////////////

View File

@ -39,7 +39,7 @@ NAMESPACE_BEGIN(Grid);
class WilsonKernelsStatic {
public:
enum { OptGeneric, OptHandUnroll, OptInlineAsm };
enum { CommsAndCompute, CommsThenCompute };
enum { CommsAndCompute, CommsThenCompute, CommsDirichlet };
static int Opt;
static int Comms;
};

View File

@ -112,7 +112,6 @@ void CayleyFermion5D<Impl>::ImportUnphysicalFermion(const FermionField &input4d,
axpby_ssp_pminus(tmp, 0., tmp, 1., tmp, Ls-1, Ls-1);
imported5d=tmp;
}
template<class Impl>
void CayleyFermion5D<Impl>::ImportPhysicalFermionSource(const FermionField &input4d,FermionField &imported5d)
{
@ -127,6 +126,37 @@ void CayleyFermion5D<Impl>::ImportPhysicalFermionSource(const FermionField &inpu
axpby_ssp_pminus(tmp, 0., tmp, 1., tmp, Ls-1, Ls-1);
Dminus(tmp,imported5d);
}
////////////////////////////////////////////////////
// Added for fourD pseudofermion det estimation
////////////////////////////////////////////////////
template<class Impl>
void CayleyFermion5D<Impl>::ImportFourDimPseudoFermion(const FermionField &input4d,FermionField &imported5d)
{
int Ls = this->Ls;
FermionField tmp(this->FermionGrid());
conformable(imported5d.Grid(),this->FermionGrid());
conformable(input4d.Grid() ,this->GaugeGrid());
tmp = Zero();
InsertSlice(input4d, tmp, 0 , 0);
InsertSlice(input4d, tmp, Ls-1, 0);
axpby_ssp_pminus(tmp, 0., tmp, 1., tmp, 0, 0);
axpby_ssp_pplus (tmp, 0., tmp, 1., tmp, Ls-1, Ls-1);
imported5d=tmp;
}
template<class Impl>
void CayleyFermion5D<Impl>::ExportFourDimPseudoFermion(const FermionField &solution5d,FermionField &exported4d)
{
int Ls = this->Ls;
FermionField tmp(this->FermionGrid());
tmp = solution5d;
conformable(solution5d.Grid(),this->FermionGrid());
conformable(exported4d.Grid(),this->GaugeGrid());
axpby_ssp_pminus(tmp, 0., solution5d, 1., solution5d, 0, 0);
axpby_ssp_pplus (tmp, 1., tmp , 1., solution5d, 0, Ls-1);
ExtractSlice(exported4d, tmp, 0, 0);
}
// Dminus
template<class Impl>
void CayleyFermion5D<Impl>::Dminus(const FermionField &psi, FermionField &chi)
{
@ -828,7 +858,6 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
#if (!defined(GRID_HIP))
int tshift = (mu == Nd-1) ? 1 : 0;
unsigned int LLt = GridDefaultLatt()[Tp];
////////////////////////////////////////////////
// GENERAL CAYLEY CASE
////////////////////////////////////////////////
@ -881,7 +910,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
}
std::vector<RealD> G_s(Ls,1.0);
RealD sign = 1.0; // sign flip for vector/tadpole
RealD sign = 1; // sign flip for vector/tadpole
if ( curr_type == Current::Axial ) {
for(int s=0;s<Ls/2;s++){
G_s[s] = -1.0;
@ -891,7 +920,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
auto b=this->_b;
auto c=this->_c;
if ( b == 1 && c == 0 ) {
sign = -1.0;
sign = -1;
}
else {
std::cerr << "Error: Tadpole implementation currently unavailable for non-Shamir actions." << std::endl;
@ -935,13 +964,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
tmp = Cshift(tmp,mu,-1);
Impl::multLinkField(Utmp,this->Umu,tmp,mu+Nd); // Adjoint link
tmp = -G_s[s]*( Utmp + gmu*Utmp );
// Mask the time
if (tmax == LLt - 1 && tshift == 1){ // quick fix to include timeslice 0 if tmax + tshift is over the last timeslice
unsigned int t0 = 0;
tmp = where(((lcoor==t0) || (lcoor>=tmin+tshift)),tmp,zz);
} else {
tmp = where((lcoor>=tmin+tshift),tmp,zz);
}
tmp = where((lcoor>=tmin+tshift),tmp,zz); // Mask the time
L_Q += where((lcoor<=tmax+tshift),tmp,zz); // Position of current complicated
InsertSlice(L_Q, q_out, s , 0);

View File

@ -51,9 +51,9 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
_FiveDimRedBlackGrid(&FiveDimRedBlackGrid),
_FourDimGrid (&FourDimGrid),
_FourDimRedBlackGrid(&FourDimRedBlackGrid),
Stencil (_FiveDimGrid,npoint,Even,directions,displacements,p),
StencilEven(_FiveDimRedBlackGrid,npoint,Even,directions,displacements,p), // source is Even
StencilOdd (_FiveDimRedBlackGrid,npoint,Odd ,directions,displacements,p), // source is Odd
Stencil (_FiveDimGrid,npoint,Even,directions,displacements,p.locally_periodic,p),
StencilEven(_FiveDimRedBlackGrid,npoint,Even,directions,displacements,p.locally_periodic,p), // source is Even
StencilOdd (_FiveDimRedBlackGrid,npoint,Odd ,directions,displacements,p.locally_periodic,p), // source is Odd
M5(_M5),
Umu(_FourDimGrid),
UmuEven(_FourDimRedBlackGrid),
@ -361,10 +361,21 @@ void WilsonFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
const FermionField &in, FermionField &out,int dag)
{
DhopTotalTime-=usecond();
if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute )
assert( (WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute)
||(WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsThenCompute)
||(WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsDirichlet) );
if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute ) {
DhopInternalOverlappedComms(st,lo,U,in,out,dag);
else
}
if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsThenCompute ) {
DhopInternalSerialComms(st,lo,U,in,out,dag);
}
if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsDirichlet ) {
DhopInternalDirichletComms(st,lo,U,in,out,dag);
}
DhopTotalTime+=usecond();
}
@ -431,6 +442,30 @@ void WilsonFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st, Lebesg
DhopComputeTime2+=usecond();
}
template<class Impl>
void WilsonFermion5D<Impl>::DhopInternalDirichletComms(StencilImpl & st, LebesgueOrder &lo,
DoubledGaugeField & U,
const FermionField &in, FermionField &out,int dag)
{
Compressor compressor(dag);
int LLs = in.Grid()->_rdimensions[0];
int len = U.Grid()->oSites();
/////////////////////////////
// do the compute interior
/////////////////////////////
int Opt = WilsonKernelsStatic::Opt; // Why pass this. Kernels should know
DhopComputeTime-=usecond();
if (dag == DaggerYes) {
Kernels::DhopDagKernel(Opt,st,U,st.CommBuf(),LLs,U.oSites(),in,out,1,0);
} else {
Kernels::DhopKernel (Opt,st,U,st.CommBuf(),LLs,U.oSites(),in,out,1,0);
}
accelerator_barrier();
DhopComputeTime+=usecond();
}
template<class Impl>
void WilsonFermion5D<Impl>::DhopInternalSerialComms(StencilImpl & st, LebesgueOrder &lo,

View File

@ -47,9 +47,9 @@ WilsonFermion<Impl>::WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
Kernels(p),
_grid(&Fgrid),
_cbgrid(&Hgrid),
Stencil(&Fgrid, npoint, Even, directions, displacements,p),
StencilEven(&Hgrid, npoint, Even, directions,displacements,p), // source is Even
StencilOdd(&Hgrid, npoint, Odd, directions,displacements,p), // source is Odd
Stencil(&Fgrid, npoint, Even, directions, displacements,p.locally_periodic,p),
StencilEven(&Hgrid, npoint, Even, directions,displacements,p.locally_periodic,p), // source is Even
StencilOdd(&Hgrid, npoint, Odd, directions,displacements,p.locally_periodic,p), // source is Odd
mass(_mass),
Lebesgue(_grid),
LebesgueEvenOdd(_cbgrid),
@ -488,12 +488,21 @@ void WilsonFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
FermionField &out, int dag)
{
DhopTotalTime-=usecond();
#ifdef GRID_OMP
if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute )
assert( (WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute)
||(WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsThenCompute)
||(WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsDirichlet) );
if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute ) {
DhopInternalOverlappedComms(st,lo,U,in,out,dag);
else
#endif
}
if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsThenCompute ) {
DhopInternalSerial(st,lo,U,in,out,dag);
}
if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsDirichlet ) {
DhopInternalDirichletComms(st,lo,U,in,out,dag);
}
DhopTotalTime+=usecond();
}
@ -562,6 +571,29 @@ void WilsonFermion<Impl>::DhopInternalOverlappedComms(StencilImpl &st, LebesgueO
DhopComputeTime2+=usecond();
};
template <class Impl>
void WilsonFermion<Impl>::DhopInternalDirichletComms(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,
const FermionField &in,
FermionField &out, int dag)
{
assert((dag == DaggerNo) || (dag == DaggerYes));
Compressor compressor(dag);
int len = U.Grid()->oSites();
/////////////////////////////
// do the compute interior
/////////////////////////////
int Opt = WilsonKernelsStatic::Opt;
DhopComputeTime-=usecond();
if (dag == DaggerYes) {
Kernels::DhopDagKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out,1,0);
} else {
Kernels::DhopKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out,1,0);
}
DhopComputeTime+=usecond();
};
template <class Impl>
void WilsonFermion<Impl>::DhopInternalSerial(StencilImpl &st, LebesgueOrder &lo,

View File

@ -77,23 +77,23 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#define REGISTER
#ifdef GRID_SIMT
#define LOAD_CHIMU(Ptype) \
#define LOAD_CHIMU(ptype) \
{const SiteSpinor & ref (in[offset]); \
Chimu_00=coalescedReadPermute<Ptype>(ref()(0)(0),perm,lane); \
Chimu_01=coalescedReadPermute<Ptype>(ref()(0)(1),perm,lane); \
Chimu_02=coalescedReadPermute<Ptype>(ref()(0)(2),perm,lane); \
Chimu_10=coalescedReadPermute<Ptype>(ref()(1)(0),perm,lane); \
Chimu_11=coalescedReadPermute<Ptype>(ref()(1)(1),perm,lane); \
Chimu_12=coalescedReadPermute<Ptype>(ref()(1)(2),perm,lane); \
Chimu_20=coalescedReadPermute<Ptype>(ref()(2)(0),perm,lane); \
Chimu_21=coalescedReadPermute<Ptype>(ref()(2)(1),perm,lane); \
Chimu_22=coalescedReadPermute<Ptype>(ref()(2)(2),perm,lane); \
Chimu_30=coalescedReadPermute<Ptype>(ref()(3)(0),perm,lane); \
Chimu_31=coalescedReadPermute<Ptype>(ref()(3)(1),perm,lane); \
Chimu_32=coalescedReadPermute<Ptype>(ref()(3)(2),perm,lane); }
Chimu_00=coalescedReadPermute<ptype>(ref()(0)(0),perm,lane); \
Chimu_01=coalescedReadPermute<ptype>(ref()(0)(1),perm,lane); \
Chimu_02=coalescedReadPermute<ptype>(ref()(0)(2),perm,lane); \
Chimu_10=coalescedReadPermute<ptype>(ref()(1)(0),perm,lane); \
Chimu_11=coalescedReadPermute<ptype>(ref()(1)(1),perm,lane); \
Chimu_12=coalescedReadPermute<ptype>(ref()(1)(2),perm,lane); \
Chimu_20=coalescedReadPermute<ptype>(ref()(2)(0),perm,lane); \
Chimu_21=coalescedReadPermute<ptype>(ref()(2)(1),perm,lane); \
Chimu_22=coalescedReadPermute<ptype>(ref()(2)(2),perm,lane); \
Chimu_30=coalescedReadPermute<ptype>(ref()(3)(0),perm,lane); \
Chimu_31=coalescedReadPermute<ptype>(ref()(3)(1),perm,lane); \
Chimu_32=coalescedReadPermute<ptype>(ref()(3)(2),perm,lane); }
#define PERMUTE_DIR(dir) ;
#else
#define LOAD_CHIMU(Ptype) \
#define LOAD_CHIMU(ptype) \
{const SiteSpinor & ref (in[offset]); \
Chimu_00=ref()(0)(0);\
Chimu_01=ref()(0)(1);\
@ -109,12 +109,12 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
Chimu_32=ref()(3)(2);}
#define PERMUTE_DIR(dir) \
permute##dir(Chi_00,Chi_00); \
permute##dir(Chi_01,Chi_01); \
permute##dir(Chi_02,Chi_02); \
permute##dir(Chi_10,Chi_10); \
permute##dir(Chi_11,Chi_11); \
permute##dir(Chi_12,Chi_12);
permute##dir(Chi_00,Chi_00); \
permute##dir(Chi_01,Chi_01);\
permute##dir(Chi_02,Chi_02);\
permute##dir(Chi_10,Chi_10); \
permute##dir(Chi_11,Chi_11);\
permute##dir(Chi_12,Chi_12);
#endif
@ -371,91 +371,88 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
result_32-= UChi_12;
#define HAND_STENCIL_LEGB(PROJ,PERM,DIR,RECON) \
{int ptype; \
SE=st.GetEntry(ptype,DIR,ss); \
auto offset = SE->_offset; \
auto local = SE->_is_local; \
auto perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU(PERM); \
PROJ; \
if ( perm) { \
PERMUTE_DIR(PERM); \
} \
} else { \
LOAD_CHI; \
} \
acceleratorSynchronise(); \
MULT_2SPIN(DIR); \
RECON; }
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU(PERM); \
PROJ; \
if ( perm) { \
PERMUTE_DIR(PERM); \
} \
} else { \
LOAD_CHI; \
} \
acceleratorSynchronise(); \
MULT_2SPIN(DIR); \
RECON;
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON) \
{ SE=&st_p[DIR+8*ss]; \
auto ptype=st_perm[DIR]; \
auto offset = SE->_offset; \
auto local = SE->_is_local; \
auto perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU(PERM); \
PROJ; \
if ( perm) { \
PERMUTE_DIR(PERM); \
} \
} else { \
LOAD_CHI; \
} \
acceleratorSynchronise(); \
MULT_2SPIN(DIR); \
RECON; }
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON) \
SE=&st_p[DIR+8*ss]; \
ptype=st_perm[DIR]; \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU(PERM); \
PROJ; \
if ( perm) { \
PERMUTE_DIR(PERM); \
} \
} else { \
LOAD_CHI; \
} \
acceleratorSynchronise(); \
MULT_2SPIN(DIR); \
RECON;
#define HAND_STENCIL_LEGA(PROJ,PERM,DIR,RECON) \
{ SE=&st_p[DIR+8*ss]; \
auto ptype=st_perm[DIR]; \
/*SE=st.GetEntry(ptype,DIR,ss);*/ \
auto offset = SE->_offset; \
auto perm = SE->_permute; \
LOAD_CHIMU(PERM); \
PROJ; \
MULT_2SPIN(DIR); \
RECON; }
SE=&st_p[DIR+8*ss]; \
ptype=st_perm[DIR]; \
/*SE=st.GetEntry(ptype,DIR,ss);*/ \
offset = SE->_offset; \
perm = SE->_permute; \
LOAD_CHIMU(PERM); \
PROJ; \
MULT_2SPIN(DIR); \
RECON;
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON) \
{ int ptype; \
SE=st.GetEntry(ptype,DIR,ss); \
auto offset = SE->_offset; \
auto local = SE->_is_local; \
auto perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU(PERM); \
PROJ; \
if ( perm) { \
PERMUTE_DIR(PERM); \
} \
} else if ( st.same_node[DIR] ) { \
LOAD_CHI; \
} \
acceleratorSynchronise(); \
if (local || st.same_node[DIR] ) { \
MULT_2SPIN(DIR); \
RECON; \
} \
acceleratorSynchronise(); }
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU(PERM); \
PROJ; \
if ( perm) { \
PERMUTE_DIR(PERM); \
} \
} else if ( st.same_node[DIR] ) { \
LOAD_CHI; \
} \
acceleratorSynchronise(); \
if (local || st.same_node[DIR] ) { \
MULT_2SPIN(DIR); \
RECON; \
} \
acceleratorSynchronise();
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON) \
{ int ptype; \
SE=st.GetEntry(ptype,DIR,ss); \
auto offset = SE->_offset; \
if((!SE->_is_local)&&(!st.same_node[DIR]) ) { \
LOAD_CHI; \
MULT_2SPIN(DIR); \
RECON; \
nmu++; \
} \
acceleratorSynchronise(); }
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
if((!SE->_is_local)&&(!st.same_node[DIR]) ) { \
LOAD_CHI; \
MULT_2SPIN(DIR); \
RECON; \
nmu++; \
} \
acceleratorSynchronise();
#define HAND_RESULT(ss) \
{ \
SiteSpinor & ref (out[ss]); \
#define HAND_RESULT(ss) \
{ \
SiteSpinor & ref (out[ss]); \
coalescedWrite(ref()(0)(0),result_00,lane); \
coalescedWrite(ref()(0)(1),result_01,lane); \
coalescedWrite(ref()(0)(2),result_02,lane); \
@ -566,6 +563,7 @@ WilsonKernels<Impl>::HandDhopSiteSycl(StencilVector st_perm,StencilEntry *st_p,
HAND_DECLARATIONS(Simt);
int offset,local,perm, ptype;
StencilEntry *SE;
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
@ -595,7 +593,9 @@ WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,Site
HAND_DECLARATIONS(Simt);
int offset,local,perm, ptype;
StencilEntry *SE;
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
@ -623,6 +623,8 @@ void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView
HAND_DECLARATIONS(Simt);
StencilEntry *SE;
int offset,local,perm, ptype;
HAND_STENCIL_LEG(XP_PROJ,3,Xp,XP_RECON);
HAND_STENCIL_LEG(YP_PROJ,2,Yp,YP_RECON_ACCUM);
HAND_STENCIL_LEG(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
@ -638,8 +640,8 @@ template<class Impl> accelerator_inline void
WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
{
// auto st_p = st._entries_p;
// auto st_perm = st._permute_type;
auto st_p = st._entries_p;
auto st_perm = st._permute_type;
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
@ -650,6 +652,7 @@ WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,Si
HAND_DECLARATIONS(Simt);
int offset,local,perm, ptype;
StencilEntry *SE;
ZERO_RESULT;
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xp,XM_RECON_ACCUM);
@ -667,8 +670,8 @@ template<class Impl> accelerator_inline
void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
{
// auto st_p = st._entries_p;
// auto st_perm = st._permute_type;
auto st_p = st._entries_p;
auto st_perm = st._permute_type;
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
typedef decltype( coalescedRead( in[0]()(0)(0) )) Simt;
@ -679,6 +682,7 @@ void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldVi
HAND_DECLARATIONS(Simt);
StencilEntry *SE;
int offset,local,perm, ptype;
ZERO_RESULT;
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
HAND_STENCIL_LEG_INT(YP_PROJ,2,Yp,YP_RECON_ACCUM);
@ -695,8 +699,8 @@ template<class Impl> accelerator_inline void
WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
{
// auto st_p = st._entries_p;
// auto st_perm = st._permute_type;
auto st_p = st._entries_p;
auto st_perm = st._permute_type;
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
@ -707,7 +711,7 @@ WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,Si
HAND_DECLARATIONS(Simt);
// int offset, ptype;
int offset, ptype;
StencilEntry *SE;
int nmu=0;
ZERO_RESULT;
@ -726,8 +730,8 @@ template<class Impl> accelerator_inline
void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
{
// auto st_p = st._entries_p;
// auto st_perm = st._permute_type;
auto st_p = st._entries_p;
auto st_perm = st._permute_type;
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
typedef decltype( coalescedRead( in[0]()(0)(0) )) Simt;
@ -738,7 +742,7 @@ void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldVi
HAND_DECLARATIONS(Simt);
StencilEntry *SE;
// int offset, ptype;
int offset, ptype;
int nmu=0;
ZERO_RESULT;
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xp,XP_RECON_ACCUM);

View File

@ -61,7 +61,7 @@ NAMESPACE_BEGIN(Grid);
typedef typename Impl::Field Field;
// hardcodes the exponential approximation in the template
template <class S, int Nrepresentation = Nc, int Nexp = 12 > class GaugeImplTypes {
template <class S, int Nrepresentation = Nc, int Nexp = 20 > class GaugeImplTypes {
public:
typedef S Simd;
typedef typename Simd::scalar_type scalar_type;
@ -78,8 +78,6 @@ public:
typedef Lattice<SiteLink> LinkField;
typedef Lattice<SiteField> Field;
typedef SU<Nrepresentation> Group;
// Guido: we can probably separate the types from the HMC functions
// this will create 2 kind of implementations
// probably confusing the users
@ -120,7 +118,7 @@ public:
LinkField Pmu(P.Grid());
Pmu = Zero();
for (int mu = 0; mu < Nd; mu++) {
Group::GaussianFundamentalLieAlgebraMatrix(pRNG, Pmu);
SU<Nrepresentation>::GaussianFundamentalLieAlgebraMatrix(pRNG, Pmu);
RealD scale = ::sqrt(HMC_MOMENTUM_DENOMINATOR) ;
Pmu = Pmu*scale;
PokeIndex<LorentzIndex>(P, Pmu, mu);
@ -161,15 +159,15 @@ public:
}
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
Group::HotConfiguration(pRNG, U);
SU<Nc>::HotConfiguration(pRNG, U);
}
static inline void TepidConfiguration(GridParallelRNG &pRNG, Field &U) {
Group::TepidConfiguration(pRNG, U);
SU<Nc>::TepidConfiguration(pRNG, U);
}
static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
Group::ColdConfiguration(pRNG, U);
SU<Nc>::ColdConfiguration(pRNG, U);
}
};

View File

@ -40,13 +40,66 @@ NAMESPACE_BEGIN(Grid);
X=X-Y;
RealD Nd = norm2(X);
std::cout << "************************* "<<std::endl;
std::cout << " noise = "<<Nx<<std::endl;
std::cout << " (MdagM^-1/2)^2 noise = "<<Nz<<std::endl;
std::cout << " MdagM (MdagM^-1/2)^2 noise = "<<Ny<<std::endl;
std::cout << " noise - MdagM (MdagM^-1/2)^2 noise = "<<Nd<<std::endl;
std::cout << " | noise |^2 = "<<Nx<<std::endl;
std::cout << " | (MdagM^-1/2)^2 noise |^2 = "<<Nz<<std::endl;
std::cout << " | MdagM (MdagM^-1/2)^2 noise |^2 = "<<Ny<<std::endl;
std::cout << " | noise - MdagM (MdagM^-1/2)^2 noise |^2 = "<<Nd<<std::endl;
std::cout << " | noise - MdagM (MdagM^-1/2)^2 noise|/|noise| = " << std::sqrt(Nd/Nx) << std::endl;
std::cout << "************************* "<<std::endl;
assert( (std::sqrt(Nd/Nx)<tol) && " InverseSqrtBoundsCheck ");
}
/* For a HermOp = M^dag M, check the approximation of HermOp^{-1/inv_pow}
by computing |X - HermOp * [ Hermop^{-1/inv_pow} ]^{inv_pow} X| < tol
for noise X (aka GaussNoise).
ApproxNegPow should be the rational approximation for X^{-1/inv_pow}
*/
template<class Field> void InversePowerBoundsCheck(int inv_pow,
int MaxIter,double tol,
LinearOperatorBase<Field> &HermOp,
Field &GaussNoise,
MultiShiftFunction &ApproxNegPow)
{
GridBase *FermionGrid = GaussNoise.Grid();
Field X(FermionGrid);
Field Y(FermionGrid);
Field Z(FermionGrid);
Field tmp1(FermionGrid), tmp2(FermionGrid);
X=GaussNoise;
RealD Nx = norm2(X);
ConjugateGradientMultiShift<Field> msCG(MaxIter,ApproxNegPow);
tmp1 = X;
Field* in = &tmp1;
Field* out = &tmp2;
for(int i=0;i<inv_pow;i++){ //apply [ Hermop^{-1/inv_pow} ]^{inv_pow} X = HermOp^{-1} X
msCG(HermOp, *in, *out); //backwards conventions!
if(i!=inv_pow-1) std::swap(in, out);
}
Z = *out;
RealD Nz = norm2(Z);
HermOp.HermOp(Z,Y);
RealD Ny = norm2(Y);
X=X-Y;
RealD Nd = norm2(X);
std::cout << "************************* "<<std::endl;
std::cout << " | noise |^2 = "<<Nx<<std::endl;
std::cout << " | (MdagM^-1/" << inv_pow << ")^" << inv_pow << " noise |^2 = "<<Nz<<std::endl;
std::cout << " | MdagM (MdagM^-1/" << inv_pow << ")^" << inv_pow << " noise |^2 = "<<Ny<<std::endl;
std::cout << " | noise - MdagM (MdagM^-1/" << inv_pow << ")^" << inv_pow << " noise |^2 = "<<Nd<<std::endl;
std::cout << " | noise - MdagM (MdagM^-1/" << inv_pow << ")^" << inv_pow << " noise |/| noise | = "<<std::sqrt(Nd/Nx)<<std::endl;
std::cout << "************************* "<<std::endl;
assert( (std::sqrt(Nd/Nx)<tol) && " InversePowerBoundsCheck ");
}
NAMESPACE_END(Grid);

View File

@ -0,0 +1,163 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/pseudofermion/DomainDecomposedTwoFlavourBoundaryBoson.h
Copyright (C) 2021
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
///////////////////////////////////////
// Two flavour ratio
///////////////////////////////////////
template<class ImplD,class ImplF>
class DomainDecomposedBoundaryTwoFlavourBosonPseudoFermion : public Action<typename ImplD::GaugeField> {
public:
INHERIT_IMPL_TYPES(ImplD);
private:
SchurFactoredFermionOperator<ImplD,ImplF> & NumOp;// the basic operator
RealD InnerStoppingCondition;
RealD ActionStoppingCondition;
RealD DerivativeStoppingCondition;
FermionField Phi; // the pseudo fermion field for this trajectory
public:
DomainDecomposedBoundaryTwoFlavourBosonPseudoFermion(SchurFactoredFermionOperator<ImplD,ImplF> &_NumOp,RealD _DerivativeTol, RealD _ActionTol, RealD _InnerTol=1.0e-6)
: NumOp(_NumOp),
DerivativeStoppingCondition(_DerivativeTol),
ActionStoppingCondition(_ActionTol),
InnerStoppingCondition(_InnerTol),
Phi(_NumOp.FermionGrid()) {};
virtual std::string action_name(){return "DomainDecomposedBoundaryTwoFlavourBosonPseudoFermion";}
virtual std::string LogParameters(){
std::stringstream sstream;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridSerialRNG& sRNG, GridParallelRNG& pRNG)
{
// P(phi) = e^{- phi^dag P^dag P phi}
//
// NumOp == P
//
// Take phi = P^{-1} eta ; eta = P Phi
//
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2) and must multiply by 0.707....
//
RealD scale = std::sqrt(0.5);
NumOp.tolinner=InnerStoppingCondition;
NumOp.tol=ActionStoppingCondition;
NumOp.ImportGauge(U);
FermionField eta(NumOp.FermionGrid());
gaussian(pRNG,eta); eta=eta*scale;
NumOp.ProjectBoundaryBar(eta);
//DumpSliceNorm("eta",eta);
NumOp.RInv(eta,Phi);
//DumpSliceNorm("Phi",Phi);
};
//////////////////////////////////////////////////////
// S = phi^dag Pdag P phi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
NumOp.tolinner=InnerStoppingCondition;
NumOp.tol=ActionStoppingCondition;
NumOp.ImportGauge(U);
FermionField Y(NumOp.FermionGrid());
NumOp.R(Phi,Y);
RealD action = norm2(Y);
return action;
};
virtual void deriv(const GaugeField &U,GaugeField & dSdU)
{
NumOp.tolinner=InnerStoppingCondition;
NumOp.tol=DerivativeStoppingCondition;
NumOp.ImportGauge(U);
GridBase *fgrid = NumOp.FermionGrid();
GridBase *ugrid = NumOp.GaugeGrid();
FermionField X(fgrid);
FermionField Y(fgrid);
FermionField tmp(fgrid);
GaugeField force(ugrid);
FermionField DobiDdbPhi(fgrid); // Vector A in my notes
FermionField DoiDdDobiDdbPhi(fgrid); // Vector B in my notes
FermionField DoidP_Phi(fgrid); // Vector E in my notes
FermionField DobidDddDoidP_Phi(fgrid); // Vector F in my notes
FermionField P_Phi(fgrid);
// P term
NumOp.dBoundaryBar(Phi,tmp);
NumOp.dOmegaBarInv(tmp,DobiDdbPhi); // Vector A
NumOp.dBoundary(DobiDdbPhi,tmp);
NumOp.dOmegaInv(tmp,DoiDdDobiDdbPhi); // Vector B
P_Phi = Phi - DoiDdDobiDdbPhi;
NumOp.ProjectBoundaryBar(P_Phi);
// P^dag P term
NumOp.dOmegaDagInv(P_Phi,DoidP_Phi); // Vector E
NumOp.dBoundaryDag(DoidP_Phi,tmp);
NumOp.dOmegaBarDagInv(tmp,DobidDddDoidP_Phi); // Vector F
NumOp.dBoundaryBarDag(DobidDddDoidP_Phi,tmp);
X = DobiDdbPhi;
Y = DobidDddDoidP_Phi;
NumOp.DirichletFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=force;
NumOp.DirichletFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
X = DoiDdDobiDdbPhi;
Y = DoidP_Phi;
NumOp.DirichletFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=dSdU+force;
NumOp.DirichletFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
dSdU *= -1.0;
};
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,158 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/pseudofermion/DomainDecomposedTwoFlavourBoundary.h
Copyright (C) 2021
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
///////////////////////////////////////
// Two flavour ratio
///////////////////////////////////////
template<class ImplD,class ImplF>
class DomainDecomposedBoundaryTwoFlavourPseudoFermion : public Action<typename ImplD::GaugeField> {
public:
INHERIT_IMPL_TYPES(ImplD);
private:
SchurFactoredFermionOperator<ImplD,ImplF> & DenOp;// the basic operator
RealD ActionStoppingCondition;
RealD DerivativeStoppingCondition;
RealD InnerStoppingCondition;
FermionField Phi; // the pseudo fermion field for this trajectory
RealD refresh_action;
public:
DomainDecomposedBoundaryTwoFlavourPseudoFermion(SchurFactoredFermionOperator<ImplD,ImplF> &_DenOp,RealD _DerivativeTol, RealD _ActionTol, RealD _InnerTol = 1.0e-6 )
: DenOp(_DenOp),
DerivativeStoppingCondition(_DerivativeTol),
ActionStoppingCondition(_ActionTol),
InnerStoppingCondition(_InnerTol),
Phi(_DenOp.FermionGrid()) {};
virtual std::string action_name(){return "DomainDecomposedBoundaryTwoFlavourPseudoFermion";}
virtual std::string LogParameters(){
std::stringstream sstream;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridSerialRNG& sRNG, GridParallelRNG& pRNG)
{
// P(phi) = e^{- phi^dag Rdag^-1 R^-1 phi}
//
// DenOp == R
//
// Take phi = R eta ; eta = R^-1 Phi
//
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2) and must multiply by 0.707....
//
RealD scale = std::sqrt(0.5);
DenOp.tolinner=InnerStoppingCondition;
DenOp.tol =ActionStoppingCondition;
DenOp.ImportGauge(U);
FermionField eta(DenOp.FermionGrid());
gaussian(pRNG,eta); eta=eta*scale;
DenOp.ProjectBoundaryBar(eta);
DenOp.R(eta,Phi);
//DumpSliceNorm("Phi",Phi);
refresh_action = norm2(eta);
};
//////////////////////////////////////////////////////
// S = phi^dag Rdag^-1 R^-1 phi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
DenOp.tolinner=InnerStoppingCondition;
DenOp.tol=ActionStoppingCondition;
DenOp.ImportGauge(U);
FermionField X(DenOp.FermionGrid());
DenOp.RInv(Phi,X);
RealD action = norm2(X);
return action;
};
virtual void deriv(const GaugeField &U,GaugeField & dSdU)
{
DenOp.tolinner=InnerStoppingCondition;
DenOp.tol=DerivativeStoppingCondition;
DenOp.ImportGauge(U);
GridBase *fgrid = DenOp.FermionGrid();
GridBase *ugrid = DenOp.GaugeGrid();
FermionField X(fgrid);
FermionField Y(fgrid);
FermionField tmp(fgrid);
GaugeField force(ugrid);
FermionField DiDdb_Phi(fgrid); // Vector C in my notes
FermionField DidRinv_Phi(fgrid); // Vector D in my notes
FermionField Rinv_Phi(fgrid);
// FermionField RinvDagRinv_Phi(fgrid);
// FermionField DdbdDidRinv_Phi(fgrid);
// R^-1 term
DenOp.dBoundaryBar(Phi,tmp);
DenOp.Dinverse(tmp,DiDdb_Phi); // Vector C
Rinv_Phi = Phi - DiDdb_Phi;
DenOp.ProjectBoundaryBar(Rinv_Phi);
// R^-dagger R^-1 term
DenOp.DinverseDag(Rinv_Phi,DidRinv_Phi); // Vector D
/*
DenOp.dBoundaryBarDag(DidRinv_Phi,DdbdDidRinv_Phi);
RinvDagRinv_Phi = Rinv_Phi - DdbdDidRinv_Phi;
DenOp.ProjectBoundaryBar(RinvDagRinv_Phi);
*/
X = DiDdb_Phi;
Y = DidRinv_Phi;
DenOp.PeriodicFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=force;
DenOp.PeriodicFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
DumpSliceNorm("force",dSdU);
dSdU *= -1.0;
};
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,237 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/pseudofermion/DomainDecomposedTwoFlavourBoundary.h
Copyright (C) 2021
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
///////////////////////////////////////
// Two flavour ratio
///////////////////////////////////////
template<class ImplD,class ImplF>
class DomainDecomposedBoundaryTwoFlavourRatioPseudoFermion : public Action<typename ImplD::GaugeField> {
public:
INHERIT_IMPL_TYPES(ImplD);
private:
SchurFactoredFermionOperator<ImplD,ImplF> & NumOp;// the basic operator
SchurFactoredFermionOperator<ImplD,ImplF> & DenOp;// the basic operator
RealD InnerStoppingCondition;
RealD ActionStoppingCondition;
RealD DerivativeStoppingCondition;
FermionField Phi; // the pseudo fermion field for this trajectory
public:
DomainDecomposedBoundaryTwoFlavourRatioPseudoFermion(SchurFactoredFermionOperator<ImplD,ImplF> &_NumOp,
SchurFactoredFermionOperator<ImplD,ImplF> &_DenOp,
RealD _DerivativeTol, RealD _ActionTol, RealD _InnerTol=1.0e-6)
: NumOp(_NumOp), DenOp(_DenOp),
Phi(_NumOp.PeriodicFermOpD.FermionGrid()),
InnerStoppingCondition(_InnerTol),
DerivativeStoppingCondition(_DerivativeTol),
ActionStoppingCondition(_ActionTol)
{};
virtual std::string action_name(){return "DomainDecomposedBoundaryTwoFlavourRatioPseudoFermion";}
virtual std::string LogParameters(){
std::stringstream sstream;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridSerialRNG& sRNG, GridParallelRNG& pRNG)
{
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
FermionField eta(NumOp.PeriodicFermOpD.FermionGrid());
FermionField tmp(NumOp.PeriodicFermOpD.FermionGrid());
// P(phi) = e^{- phi^dag P^dag Rdag^-1 R^-1 P phi}
//
// NumOp == P
// DenOp == R
//
// Take phi = P^{-1} R eta ; eta = R^-1 P Phi
//
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2) and must multiply by 0.707....
//
RealD scale = std::sqrt(0.5);
gaussian(pRNG,eta); eta=eta*scale;
NumOp.ProjectBoundaryBar(eta);
NumOp.tolinner=InnerStoppingCondition;
DenOp.tolinner=InnerStoppingCondition;
DenOp.tol = ActionStoppingCondition;
NumOp.tol = ActionStoppingCondition;
DenOp.R(eta,tmp);
NumOp.RInv(tmp,Phi);
DumpSliceNorm("Phi",Phi);
};
//////////////////////////////////////////////////////
// S = phi^dag Pdag Rdag^-1 R^-1 P phi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
FermionField X(NumOp.PeriodicFermOpD.FermionGrid());
FermionField Y(NumOp.PeriodicFermOpD.FermionGrid());
NumOp.tolinner=InnerStoppingCondition;
DenOp.tolinner=InnerStoppingCondition;
DenOp.tol = ActionStoppingCondition;
NumOp.tol = ActionStoppingCondition;
NumOp.R(Phi,Y);
DenOp.RInv(Y,X);
RealD action = norm2(X);
// std::cout << " DD boundary action is " <<action<<std::endl;
return action;
};
virtual void deriv(const GaugeField &U,GaugeField & dSdU)
{
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
GridBase *fgrid = NumOp.PeriodicFermOpD.FermionGrid();
GridBase *ugrid = NumOp.PeriodicFermOpD.GaugeGrid();
FermionField X(fgrid);
FermionField Y(fgrid);
FermionField tmp(fgrid);
GaugeField force(ugrid);
FermionField DobiDdbPhi(fgrid); // Vector A in my notes
FermionField DoiDdDobiDdbPhi(fgrid); // Vector B in my notes
FermionField DiDdbP_Phi(fgrid); // Vector C in my notes
FermionField DidRinvP_Phi(fgrid); // Vector D in my notes
FermionField DdbdDidRinvP_Phi(fgrid);
FermionField DoidRinvDagRinvP_Phi(fgrid); // Vector E in my notes
FermionField DobidDddDoidRinvDagRinvP_Phi(fgrid); // Vector F in my notes
FermionField P_Phi(fgrid);
FermionField RinvP_Phi(fgrid);
FermionField RinvDagRinvP_Phi(fgrid);
FermionField PdagRinvDagRinvP_Phi(fgrid);
// RealD action = S(U);
NumOp.tolinner=InnerStoppingCondition;
DenOp.tolinner=InnerStoppingCondition;
DenOp.tol = DerivativeStoppingCondition;
NumOp.tol = DerivativeStoppingCondition;
// P term
NumOp.dBoundaryBar(Phi,tmp);
NumOp.dOmegaBarInv(tmp,DobiDdbPhi); // Vector A
NumOp.dBoundary(DobiDdbPhi,tmp);
NumOp.dOmegaInv(tmp,DoiDdDobiDdbPhi); // Vector B
P_Phi = Phi - DoiDdDobiDdbPhi;
NumOp.ProjectBoundaryBar(P_Phi);
// R^-1 P term
DenOp.dBoundaryBar(P_Phi,tmp);
DenOp.Dinverse(tmp,DiDdbP_Phi); // Vector C
RinvP_Phi = P_Phi - DiDdbP_Phi;
DenOp.ProjectBoundaryBar(RinvP_Phi); // Correct to here
// R^-dagger R^-1 P term
DenOp.DinverseDag(RinvP_Phi,DidRinvP_Phi); // Vector D
DenOp.dBoundaryBarDag(DidRinvP_Phi,DdbdDidRinvP_Phi);
RinvDagRinvP_Phi = RinvP_Phi - DdbdDidRinvP_Phi;
DenOp.ProjectBoundaryBar(RinvDagRinvP_Phi);
// P^dag R^-dagger R^-1 P term
NumOp.dOmegaDagInv(RinvDagRinvP_Phi,DoidRinvDagRinvP_Phi); // Vector E
NumOp.dBoundaryDag(DoidRinvDagRinvP_Phi,tmp);
NumOp.dOmegaBarDagInv(tmp,DobidDddDoidRinvDagRinvP_Phi); // Vector F
NumOp.dBoundaryBarDag(DobidDddDoidRinvDagRinvP_Phi,tmp);
PdagRinvDagRinvP_Phi = RinvDagRinvP_Phi- tmp;
NumOp.ProjectBoundaryBar(PdagRinvDagRinvP_Phi);
/*
std::cout << "S eval "<< action << std::endl;
std::cout << "S - IP1 "<< innerProduct(Phi,PdagRinvDagRinvP_Phi) << std::endl;
std::cout << "S - IP2 "<< norm2(RinvP_Phi) << std::endl;
NumOp.R(Phi,tmp);
tmp = tmp - P_Phi;
std::cout << "diff1 "<<norm2(tmp) <<std::endl;
DenOp.RInv(P_Phi,tmp);
tmp = tmp - RinvP_Phi;
std::cout << "diff2 "<<norm2(tmp) <<std::endl;
DenOp.RDagInv(RinvP_Phi,tmp);
tmp = tmp - RinvDagRinvP_Phi;
std::cout << "diff3 "<<norm2(tmp) <<std::endl;
DenOp.RDag(RinvDagRinvP_Phi,tmp);
tmp = tmp - PdagRinvDagRinvP_Phi;
std::cout << "diff4 "<<norm2(tmp) <<std::endl;
*/
dSdU=Zero();
X = DobiDdbPhi;
Y = DobidDddDoidRinvDagRinvP_Phi;
NumOp.DirichletFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=dSdU+force;
NumOp.DirichletFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
X = DoiDdDobiDdbPhi;
Y = DoidRinvDagRinvP_Phi;
NumOp.DirichletFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=dSdU+force;
NumOp.DirichletFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
X = DiDdbP_Phi;
Y = DidRinvP_Phi;
DenOp.PeriodicFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=dSdU+force;
DenOp.PeriodicFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
dSdU *= -1.0;
};
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,372 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/pseudofermion/GeneralEvenOddRationalRatio.h
Copyright (C) 2015
Author: Christopher Kelly <ckelly@bnl.gov>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef QCD_PSEUDOFERMION_GENERAL_EVEN_ODD_RATIONAL_RATIO_H
#define QCD_PSEUDOFERMION_GENERAL_EVEN_ODD_RATIONAL_RATIO_H
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////
// Generic rational approximation for ratios of operators
/////////////////////////////////////////////////////////
/* S_f = -log( det( [M^dag M]/[V^dag V] )^{1/inv_pow} )
= chi^dag ( [M^dag M]/[V^dag V] )^{-1/inv_pow} chi\
= chi^dag ( [V^dag V]^{-1/2} [M^dag M] [V^dag V]^{-1/2} )^{-1/inv_pow} chi\
= chi^dag [V^dag V]^{1/(2*inv_pow)} [M^dag M]^{-1/inv_pow} [V^dag V]^{1/(2*inv_pow)} chi\
S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
BIG WARNING:
Here V^dag V is referred to in this code as the "numerator" operator and M^dag M is the *denominator* operator.
this refers to their position in the pseudofermion action, which is the *inverse* of what appears in the determinant
Thus for DWF the numerator operator is the Pauli-Villars operator
Here P/Q \sim R_{1/(2*inv_pow)} ~ (V^dagV)^{1/(2*inv_pow)}
Here N/D \sim R_{-1/inv_pow} ~ (M^dagM)^{-1/inv_pow}
*/
template<class Impl>
class GeneralEvenOddRatioRationalPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
INHERIT_IMPL_TYPES(Impl);
typedef RationalActionParams Params;
Params param;
//For action evaluation
MultiShiftFunction ApproxPowerAction ; //rational approx for X^{1/inv_pow}
MultiShiftFunction ApproxNegPowerAction; //rational approx for X^{-1/inv_pow}
MultiShiftFunction ApproxHalfPowerAction; //rational approx for X^{1/(2*inv_pow)}
MultiShiftFunction ApproxNegHalfPowerAction; //rational approx for X^{-1/(2*inv_pow)}
//For the MD integration
MultiShiftFunction ApproxPowerMD ; //rational approx for X^{1/inv_pow}
MultiShiftFunction ApproxNegPowerMD; //rational approx for X^{-1/inv_pow}
MultiShiftFunction ApproxHalfPowerMD; //rational approx for X^{1/(2*inv_pow)}
MultiShiftFunction ApproxNegHalfPowerMD; //rational approx for X^{-1/(2*inv_pow)}
private:
FermionOperator<Impl> & NumOp;// the basic operator
FermionOperator<Impl> & DenOp;// the basic operator
FermionField PhiEven; // the pseudo fermion field for this trajectory
FermionField PhiOdd; // the pseudo fermion field for this trajectory
//Generate the approximation to x^{1/inv_pow} (->approx) and x^{-1/inv_pow} (-> approx_inv) by an approx_degree degree rational approximation
//CG_tolerance is used to issue a warning if the approximation error is larger than the tolerance of the CG and is otherwise just stored in the MultiShiftFunction for use by the multi-shift
static void generateApprox(MultiShiftFunction &approx, MultiShiftFunction &approx_inv, int inv_pow, int approx_degree, double CG_tolerance, AlgRemez &remez){
std::cout<<GridLogMessage << "Generating degree "<< approx_degree<<" approximation for x^(1/" << inv_pow << ")"<<std::endl;
double error = remez.generateApprox(approx_degree,1,inv_pow);
if(error > CG_tolerance)
std::cout<<GridLogMessage << "WARNING: Remez approximation has a larger error " << error << " than the CG tolerance " << CG_tolerance << "! Try increasing the number of poles" << std::endl;
approx.Init(remez, CG_tolerance,false);
approx_inv.Init(remez, CG_tolerance,true);
}
protected:
static constexpr bool Numerator = true;
static constexpr bool Denominator = false;
//Allow derived classes to override the multishift CG
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionField &in, FermionField &out){
SchurDifferentiableOperator<Impl> schurOp(numerator ? NumOp : DenOp);
ConjugateGradientMultiShift<FermionField> msCG(MaxIter, approx);
msCG(schurOp,in, out);
}
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionField &in, std::vector<FermionField> &out_elems, FermionField &out){
SchurDifferentiableOperator<Impl> schurOp(numerator ? NumOp : DenOp);
ConjugateGradientMultiShift<FermionField> msCG(MaxIter, approx);
msCG(schurOp,in, out_elems, out);
}
//Allow derived classes to override the gauge import
virtual void ImportGauge(const GaugeField &U){
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
}
public:
GeneralEvenOddRatioRationalPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
const Params & p
) :
NumOp(_NumOp),
DenOp(_DenOp),
PhiOdd (_NumOp.FermionRedBlackGrid()),
PhiEven(_NumOp.FermionRedBlackGrid()),
param(p)
{
std::cout<<GridLogMessage << action_name() << " initialize: starting" << std::endl;
AlgRemez remez(param.lo,param.hi,param.precision);
//Generate approximations for action eval
generateApprox(ApproxPowerAction, ApproxNegPowerAction, param.inv_pow, param.action_degree, param.action_tolerance, remez);
generateApprox(ApproxHalfPowerAction, ApproxNegHalfPowerAction, 2*param.inv_pow, param.action_degree, param.action_tolerance, remez);
//Generate approximations for MD
if(param.md_degree != param.action_degree){ //note the CG tolerance is unrelated to the stopping condition of the Remez algorithm
generateApprox(ApproxPowerMD, ApproxNegPowerMD, param.inv_pow, param.md_degree, param.md_tolerance, remez);
generateApprox(ApproxHalfPowerMD, ApproxNegHalfPowerMD, 2*param.inv_pow, param.md_degree, param.md_tolerance, remez);
}else{
std::cout<<GridLogMessage << "Using same rational approximations for MD as for action evaluation" << std::endl;
ApproxPowerMD = ApproxPowerAction;
ApproxNegPowerMD = ApproxNegPowerAction;
for(int i=0;i<ApproxPowerMD.tolerances.size();i++)
ApproxNegPowerMD.tolerances[i] = ApproxPowerMD.tolerances[i] = param.md_tolerance; //used for multishift
ApproxHalfPowerMD = ApproxHalfPowerAction;
ApproxNegHalfPowerMD = ApproxNegHalfPowerAction;
for(int i=0;i<ApproxPowerMD.tolerances.size();i++)
ApproxNegHalfPowerMD.tolerances[i] = ApproxHalfPowerMD.tolerances[i] = param.md_tolerance;
}
std::cout<<GridLogMessage << action_name() << " initialize: complete" << std::endl;
};
virtual std::string action_name(){return "GeneralEvenOddRatioRationalPseudoFermionAction";}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] Power : 1/" << param.inv_pow << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Low :" << param.lo << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] High :" << param.hi << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Max iterations :" << param.MaxIter << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance (Action) :" << param.action_tolerance << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Degree (Action) :" << param.action_degree << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance (MD) :" << param.md_tolerance << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Degree (MD) :" << param.md_degree << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Precision :" << param.precision << std::endl;
return sstream.str();
}
//Access the fermion field
const FermionField &getPhiOdd() const{ return PhiOdd; }
virtual void refresh(const GaugeField &U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) {
std::cout<<GridLogMessage << action_name() << " refresh: starting" << std::endl;
FermionField eta(NumOp.FermionGrid());
// P(eta) \propto e^{- eta^dag eta}
//
// The gaussian function draws from P(x) \propto e^{- x^2 / 2 } [i.e. sigma=1]
// Thus eta = x/sqrt{2} = x * sqrt(1/2)
RealD scale = std::sqrt(0.5);
gaussian(pRNG,eta); eta=eta*scale;
refresh(U,eta);
}
//Allow for manual specification of random field for testing
void refresh(const GaugeField &U, const FermionField &eta) {
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// P(phi) = e^{- phi^dag (VdagV)^1/(2*inv_pow) (MdagM)^-1/inv_pow (VdagV)^1/(2*inv_pow) phi}
// = e^{- phi^dag (VdagV)^1/(2*inv_pow) (MdagM)^-1/(2*inv_pow) (MdagM)^-1/(2*inv_pow) (VdagV)^1/(2*inv_pow) phi}
//
// Phi = (VdagV)^-1/(2*inv_pow) Mdag^{1/(2*inv_pow)} eta
std::cout<<GridLogMessage << action_name() << " refresh: starting" << std::endl;
FermionField etaOdd (NumOp.FermionRedBlackGrid());
FermionField etaEven(NumOp.FermionRedBlackGrid());
FermionField tmp(NumOp.FermionRedBlackGrid());
pickCheckerboard(Even,etaEven,eta);
pickCheckerboard(Odd,etaOdd,eta);
ImportGauge(U);
// MdagM^1/(2*inv_pow) eta
std::cout<<GridLogMessage << action_name() << " refresh: doing (M^dag M)^{1/" << 2*param.inv_pow << "} eta" << std::endl;
multiShiftInverse(Denominator, ApproxHalfPowerAction, param.MaxIter, etaOdd, tmp);
// VdagV^-1/(2*inv_pow) MdagM^1/(2*inv_pow) eta
std::cout<<GridLogMessage << action_name() << " refresh: doing (V^dag V)^{-1/" << 2*param.inv_pow << "} ( (M^dag M)^{1/" << 2*param.inv_pow << "} eta)" << std::endl;
multiShiftInverse(Numerator, ApproxNegHalfPowerAction, param.MaxIter, tmp, PhiOdd);
assert(NumOp.ConstEE() == 1);
assert(DenOp.ConstEE() == 1);
PhiEven = Zero();
std::cout<<GridLogMessage << action_name() << " refresh: starting" << std::endl;
};
//////////////////////////////////////////////////////
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
std::cout<<GridLogMessage << action_name() << " compute action: starting" << std::endl;
ImportGauge(U);
FermionField X(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
// VdagV^1/(2*inv_pow) Phi
std::cout<<GridLogMessage << action_name() << " compute action: doing (V^dag V)^{1/" << 2*param.inv_pow << "} Phi" << std::endl;
multiShiftInverse(Numerator, ApproxHalfPowerAction, param.MaxIter, PhiOdd,X);
// MdagM^-1/(2*inv_pow) VdagV^1/(2*inv_pow) Phi
std::cout<<GridLogMessage << action_name() << " compute action: doing (M^dag M)^{-1/" << 2*param.inv_pow << "} ( (V^dag V)^{1/" << 2*param.inv_pow << "} Phi)" << std::endl;
multiShiftInverse(Denominator, ApproxNegHalfPowerAction, param.MaxIter, X,Y);
// Randomly apply rational bounds checks.
int rcheck = rand();
auto grid = NumOp.FermionGrid();
auto r=rand();
grid->Broadcast(0,r);
if ( param.BoundsCheckFreq != 0 && (r % param.BoundsCheckFreq)==0 ) {
std::cout<<GridLogMessage << action_name() << " compute action: doing bounds check" << std::endl;
FermionField gauss(NumOp.FermionRedBlackGrid());
gauss = PhiOdd;
SchurDifferentiableOperator<Impl> MdagM(DenOp);
std::cout<<GridLogMessage << action_name() << " compute action: checking high bounds" << std::endl;
HighBoundCheck(MdagM,gauss,param.hi);
std::cout<<GridLogMessage << action_name() << " compute action: full approximation" << std::endl;
InversePowerBoundsCheck(param.inv_pow,param.MaxIter,param.action_tolerance*100,MdagM,gauss,ApproxNegPowerAction);
std::cout<<GridLogMessage << action_name() << " compute action: bounds check complete" << std::endl;
}
// Phidag VdagV^1/(2*inv_pow) MdagM^-1/(2*inv_pow) MdagM^-1/(2*inv_pow) VdagV^1/(2*inv_pow) Phi
RealD action = norm2(Y);
std::cout<<GridLogMessage << action_name() << " compute action: complete" << std::endl;
return action;
};
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// Here, M is some 5D operator and V is the Pauli-Villars field
// N and D makeup the rat. poly of the M term and P and & makeup the rat.poly of the denom term
//
// Need
// dS_f/dU = chi^dag d[P/Q] N/D P/Q chi
// + chi^dag P/Q d[N/D] P/Q chi
// + chi^dag P/Q N/D d[P/Q] chi
//
// P/Q is expressed as partial fraction expansion:
//
// a0 + \sum_k ak/(V^dagV + bk)
//
// d[P/Q] is then
//
// \sum_k -ak [V^dagV+bk]^{-1} [ dV^dag V + V^dag dV ] [V^dag V + bk]^{-1}
//
// and similar for N/D.
//
// Need
// MpvPhi_k = [Vdag V + bk]^{-1} chi
// MpvPhi = {a0 + \sum_k ak [Vdag V + bk]^{-1} }chi
//
// MfMpvPhi_k = [MdagM+bk]^{-1} MpvPhi
// MfMpvPhi = {a0 + \sum_k ak [Mdag M + bk]^{-1} } MpvPhi
//
// MpvMfMpvPhi_k = [Vdag V + bk]^{-1} MfMpvchi
//
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
std::cout<<GridLogMessage << action_name() << " deriv: starting" << std::endl;
const int n_f = ApproxNegPowerMD.poles.size();
const int n_pv = ApproxHalfPowerMD.poles.size();
std::vector<FermionField> MpvPhi_k (n_pv,NumOp.FermionRedBlackGrid());
std::vector<FermionField> MpvMfMpvPhi_k(n_pv,NumOp.FermionRedBlackGrid());
std::vector<FermionField> MfMpvPhi_k (n_f ,NumOp.FermionRedBlackGrid());
FermionField MpvPhi(NumOp.FermionRedBlackGrid());
FermionField MfMpvPhi(NumOp.FermionRedBlackGrid());
FermionField MpvMfMpvPhi(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
GaugeField tmp(NumOp.GaugeGrid());
ImportGauge(U);
std::cout<<GridLogMessage << action_name() << " deriv: doing (V^dag V)^{1/" << 2*param.inv_pow << "} Phi" << std::endl;
multiShiftInverse(Numerator, ApproxHalfPowerMD, param.MaxIter, PhiOdd,MpvPhi_k,MpvPhi);
std::cout<<GridLogMessage << action_name() << " deriv: doing (M^dag M)^{-1/" << param.inv_pow << "} ( (V^dag V)^{1/" << 2*param.inv_pow << "} Phi)" << std::endl;
multiShiftInverse(Denominator, ApproxNegPowerMD, param.MaxIter, MpvPhi,MfMpvPhi_k,MfMpvPhi);
std::cout<<GridLogMessage << action_name() << " deriv: doing (V^dag V)^{1/" << 2*param.inv_pow << "} ( (M^dag M)^{-1/" << param.inv_pow << "} (V^dag V)^{1/" << 2*param.inv_pow << "} Phi)" << std::endl;
multiShiftInverse(Numerator, ApproxHalfPowerMD, param.MaxIter, MfMpvPhi,MpvMfMpvPhi_k,MpvMfMpvPhi);
SchurDifferentiableOperator<Impl> MdagM(DenOp);
SchurDifferentiableOperator<Impl> VdagV(NumOp);
RealD ak;
dSdU = Zero();
// With these building blocks
//
// dS/dU =
// \sum_k -ak MfMpvPhi_k^dag [ dM^dag M + M^dag dM ] MfMpvPhi_k (1)
// + \sum_k -ak MpvMfMpvPhi_k^\dag [ dV^dag V + V^dag dV ] MpvPhi_k (2)
// -ak MpvPhi_k^dag [ dV^dag V + V^dag dV ] MpvMfMpvPhi_k (3)
//(1)
std::cout<<GridLogMessage << action_name() << " deriv: doing dS/dU part (1)" << std::endl;
for(int k=0;k<n_f;k++){
ak = ApproxNegPowerMD.residues[k];
MdagM.Mpc(MfMpvPhi_k[k],Y);
MdagM.MpcDagDeriv(tmp , MfMpvPhi_k[k], Y ); dSdU=dSdU+ak*tmp;
MdagM.MpcDeriv(tmp , Y, MfMpvPhi_k[k] ); dSdU=dSdU+ak*tmp;
}
//(2)
//(3)
std::cout<<GridLogMessage << action_name() << " deriv: doing dS/dU part (2)+(3)" << std::endl;
for(int k=0;k<n_pv;k++){
ak = ApproxHalfPowerMD.residues[k];
VdagV.Mpc(MpvPhi_k[k],Y);
VdagV.MpcDagDeriv(tmp,MpvMfMpvPhi_k[k],Y); dSdU=dSdU+ak*tmp;
VdagV.MpcDeriv (tmp,Y,MpvMfMpvPhi_k[k]); dSdU=dSdU+ak*tmp;
VdagV.Mpc(MpvMfMpvPhi_k[k],Y); // V as we take Ydag
VdagV.MpcDeriv (tmp,Y, MpvPhi_k[k]); dSdU=dSdU+ak*tmp;
VdagV.MpcDagDeriv(tmp,MpvPhi_k[k], Y); dSdU=dSdU+ak*tmp;
}
//dSdU = Ta(dSdU);
std::cout<<GridLogMessage << action_name() << " deriv: complete" << std::endl;
};
};
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,93 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/pseudofermion/GeneralEvenOddRationalRatioMixedPrec.h
Copyright (C) 2015
Author: Christopher Kelly <ckelly@bnl.gov>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef QCD_PSEUDOFERMION_GENERAL_EVEN_ODD_RATIONAL_RATIO_MIXED_PREC_H
#define QCD_PSEUDOFERMION_GENERAL_EVEN_ODD_RATIONAL_RATIO_MIXED_PREC_H
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Generic rational approximation for ratios of operators utilizing the mixed precision multishift algorithm
// cf. GeneralEvenOddRational.h for details
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<class ImplD, class ImplF>
class GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction : public GeneralEvenOddRatioRationalPseudoFermionAction<ImplD> {
private:
typedef typename ImplD::FermionField FermionFieldD;
typedef typename ImplF::FermionField FermionFieldF;
FermionOperator<ImplD> & NumOpD;
FermionOperator<ImplD> & DenOpD;
FermionOperator<ImplF> & NumOpF;
FermionOperator<ImplF> & DenOpF;
Integer ReliableUpdateFreq;
protected:
//Allow derived classes to override the multishift CG
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, FermionFieldD &out){
SchurDifferentiableOperator<ImplD> schurOpD(numerator ? NumOpD : DenOpD);
SchurDifferentiableOperator<ImplF> schurOpF(numerator ? NumOpF : DenOpF);
ConjugateGradientMultiShiftMixedPrec<FermionFieldD, FermionFieldF> msCG(MaxIter, approx, NumOpF.FermionRedBlackGrid(), schurOpF, ReliableUpdateFreq);
msCG(schurOpD, in, out);
}
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, std::vector<FermionFieldD> &out_elems, FermionFieldD &out){
SchurDifferentiableOperator<ImplD> schurOpD(numerator ? NumOpD : DenOpD);
SchurDifferentiableOperator<ImplF> schurOpF(numerator ? NumOpF : DenOpF);
ConjugateGradientMultiShiftMixedPrec<FermionFieldD, FermionFieldF> msCG(MaxIter, approx, NumOpF.FermionRedBlackGrid(), schurOpF, ReliableUpdateFreq);
msCG(schurOpD, in, out_elems, out);
}
//Allow derived classes to override the gauge import
virtual void ImportGauge(const typename ImplD::GaugeField &Ud){
typename ImplF::GaugeField Uf(NumOpF.GaugeGrid());
precisionChange(Uf, Ud);
NumOpD.ImportGauge(Ud);
DenOpD.ImportGauge(Ud);
NumOpF.ImportGauge(Uf);
DenOpF.ImportGauge(Uf);
}
public:
GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction(FermionOperator<ImplD> &_NumOpD, FermionOperator<ImplD> &_DenOpD,
FermionOperator<ImplF> &_NumOpF, FermionOperator<ImplF> &_DenOpF,
const RationalActionParams & p, Integer _ReliableUpdateFreq
) : GeneralEvenOddRatioRationalPseudoFermionAction<ImplD>(_NumOpD, _DenOpD, p),
ReliableUpdateFreq(_ReliableUpdateFreq), NumOpD(_NumOpD), DenOpD(_DenOpD), NumOpF(_NumOpF), DenOpF(_DenOpF){}
virtual std::string action_name(){return "GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction";}
};
NAMESPACE_END(Grid);
#endif

View File

@ -40,249 +40,31 @@ NAMESPACE_BEGIN(Grid);
// Here N/D \sim R_{-1/2} ~ (M^dagM)^{-1/2}
template<class Impl>
class OneFlavourEvenOddRatioRationalPseudoFermionAction : public Action<typename Impl::GaugeField> {
class OneFlavourEvenOddRatioRationalPseudoFermionAction : public GeneralEvenOddRatioRationalPseudoFermionAction<Impl> {
public:
INHERIT_IMPL_TYPES(Impl);
typedef OneFlavourRationalParams Params;
Params param;
MultiShiftFunction PowerHalf ;
MultiShiftFunction PowerNegHalf;
MultiShiftFunction PowerQuarter;
MultiShiftFunction PowerNegQuarter;
private:
FermionOperator<Impl> & NumOp;// the basic operator
FermionOperator<Impl> & DenOp;// the basic operator
FermionField PhiEven; // the pseudo fermion field for this trajectory
FermionField PhiOdd; // the pseudo fermion field for this trajectory
static RationalActionParams transcribe(const Params &in){
RationalActionParams out;
out.inv_pow = 2;
out.lo = in.lo;
out.hi = in.hi;
out.MaxIter = in.MaxIter;
out.action_tolerance = out.md_tolerance = in.tolerance;
out.action_degree = out.md_degree = in.degree;
out.precision = in.precision;
out.BoundsCheckFreq = in.BoundsCheckFreq;
return out;
}
public:
OneFlavourEvenOddRatioRationalPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
Params & p
) :
NumOp(_NumOp),
DenOp(_DenOp),
PhiOdd (_NumOp.FermionRedBlackGrid()),
PhiEven(_NumOp.FermionRedBlackGrid()),
param(p)
{
AlgRemez remez(param.lo,param.hi,param.precision);
FermionOperator<Impl> &_DenOp,
const Params & p
) :
GeneralEvenOddRatioRationalPseudoFermionAction<Impl>(_NumOp, _DenOp, transcribe(p)){}
// MdagM^(+- 1/2)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/2)"<<std::endl;
remez.generateApprox(param.degree,1,2);
PowerHalf.Init(remez,param.tolerance,false);
PowerNegHalf.Init(remez,param.tolerance,true);
// MdagM^(+- 1/4)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/4)"<<std::endl;
remez.generateApprox(param.degree,1,4);
PowerQuarter.Init(remez,param.tolerance,false);
PowerNegQuarter.Init(remez,param.tolerance,true);
};
virtual std::string action_name(){return "OneFlavourEvenOddRatioRationalPseudoFermionAction";}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] Low :" << param.lo << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] High :" << param.hi << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Max iterations :" << param.MaxIter << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance :" << param.tolerance << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Degree :" << param.degree << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Precision :" << param.precision << std::endl;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) {
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// P(phi) = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/2 (VdagV)^1/4 phi}
// = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/4 (MdagM)^-1/4 (VdagV)^1/4 phi}
//
// Phi = (VdagV)^-1/4 Mdag^{1/4} eta
//
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2).
RealD scale = std::sqrt(0.5);
FermionField eta(NumOp.FermionGrid());
FermionField etaOdd (NumOp.FermionRedBlackGrid());
FermionField etaEven(NumOp.FermionRedBlackGrid());
FermionField tmp(NumOp.FermionRedBlackGrid());
gaussian(pRNG,eta); eta=eta*scale;
pickCheckerboard(Even,etaEven,eta);
pickCheckerboard(Odd,etaOdd,eta);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
// MdagM^1/4 eta
SchurDifferentiableOperator<Impl> MdagM(DenOp);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerQuarter);
msCG_M(MdagM,etaOdd,tmp);
// VdagV^-1/4 MdagM^1/4 eta
SchurDifferentiableOperator<Impl> VdagV(NumOp);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerNegQuarter);
msCG_V(VdagV,tmp,PhiOdd);
assert(NumOp.ConstEE() == 1);
assert(DenOp.ConstEE() == 1);
PhiEven = Zero();
};
//////////////////////////////////////////////////////
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
FermionField X(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
// VdagV^1/4 Phi
SchurDifferentiableOperator<Impl> VdagV(NumOp);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
msCG_V(VdagV,PhiOdd,X);
// MdagM^-1/4 VdagV^1/4 Phi
SchurDifferentiableOperator<Impl> MdagM(DenOp);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegQuarter);
msCG_M(MdagM,X,Y);
// Randomly apply rational bounds checks.
auto grid = NumOp.FermionGrid();
auto r=rand();
grid->Broadcast(0,r);
if ( (r%param.BoundsCheckFreq)==0 ) {
FermionField gauss(NumOp.FermionRedBlackGrid());
gauss = PhiOdd;
HighBoundCheck(MdagM,gauss,param.hi);
InverseSqrtBoundsCheck(param.MaxIter,param.tolerance*100,MdagM,gauss,PowerNegHalf);
}
// Phidag VdagV^1/4 MdagM^-1/4 MdagM^-1/4 VdagV^1/4 Phi
RealD action = norm2(Y);
return action;
};
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// Here, M is some 5D operator and V is the Pauli-Villars field
// N and D makeup the rat. poly of the M term and P and & makeup the rat.poly of the denom term
//
// Need
// dS_f/dU = chi^dag d[P/Q] N/D P/Q chi
// + chi^dag P/Q d[N/D] P/Q chi
// + chi^dag P/Q N/D d[P/Q] chi
//
// P/Q is expressed as partial fraction expansion:
//
// a0 + \sum_k ak/(V^dagV + bk)
//
// d[P/Q] is then
//
// \sum_k -ak [V^dagV+bk]^{-1} [ dV^dag V + V^dag dV ] [V^dag V + bk]^{-1}
//
// and similar for N/D.
//
// Need
// MpvPhi_k = [Vdag V + bk]^{-1} chi
// MpvPhi = {a0 + \sum_k ak [Vdag V + bk]^{-1} }chi
//
// MfMpvPhi_k = [MdagM+bk]^{-1} MpvPhi
// MfMpvPhi = {a0 + \sum_k ak [Mdag M + bk]^{-1} } MpvPhi
//
// MpvMfMpvPhi_k = [Vdag V + bk]^{-1} MfMpvchi
//
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
const int n_f = PowerNegHalf.poles.size();
const int n_pv = PowerQuarter.poles.size();
std::vector<FermionField> MpvPhi_k (n_pv,NumOp.FermionRedBlackGrid());
std::vector<FermionField> MpvMfMpvPhi_k(n_pv,NumOp.FermionRedBlackGrid());
std::vector<FermionField> MfMpvPhi_k (n_f ,NumOp.FermionRedBlackGrid());
FermionField MpvPhi(NumOp.FermionRedBlackGrid());
FermionField MfMpvPhi(NumOp.FermionRedBlackGrid());
FermionField MpvMfMpvPhi(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
GaugeField tmp(NumOp.GaugeGrid());
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
SchurDifferentiableOperator<Impl> VdagV(NumOp);
SchurDifferentiableOperator<Impl> MdagM(DenOp);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegHalf);
msCG_V(VdagV,PhiOdd,MpvPhi_k,MpvPhi);
msCG_M(MdagM,MpvPhi,MfMpvPhi_k,MfMpvPhi);
msCG_V(VdagV,MfMpvPhi,MpvMfMpvPhi_k,MpvMfMpvPhi);
RealD ak;
dSdU = Zero();
// With these building blocks
//
// dS/dU =
// \sum_k -ak MfMpvPhi_k^dag [ dM^dag M + M^dag dM ] MfMpvPhi_k (1)
// + \sum_k -ak MpvMfMpvPhi_k^\dag [ dV^dag V + V^dag dV ] MpvPhi_k (2)
// -ak MpvPhi_k^dag [ dV^dag V + V^dag dV ] MpvMfMpvPhi_k (3)
//(1)
for(int k=0;k<n_f;k++){
ak = PowerNegHalf.residues[k];
MdagM.Mpc(MfMpvPhi_k[k],Y);
MdagM.MpcDagDeriv(tmp , MfMpvPhi_k[k], Y ); dSdU=dSdU+ak*tmp;
MdagM.MpcDeriv(tmp , Y, MfMpvPhi_k[k] ); dSdU=dSdU+ak*tmp;
}
//(2)
//(3)
for(int k=0;k<n_pv;k++){
ak = PowerQuarter.residues[k];
VdagV.Mpc(MpvPhi_k[k],Y);
VdagV.MpcDagDeriv(tmp,MpvMfMpvPhi_k[k],Y); dSdU=dSdU+ak*tmp;
VdagV.MpcDeriv (tmp,Y,MpvMfMpvPhi_k[k]); dSdU=dSdU+ak*tmp;
VdagV.Mpc(MpvMfMpvPhi_k[k],Y); // V as we take Ydag
VdagV.MpcDeriv (tmp,Y, MpvPhi_k[k]); dSdU=dSdU+ak*tmp;
VdagV.MpcDagDeriv(tmp,MpvPhi_k[k], Y); dSdU=dSdU+ak*tmp;
}
//dSdU = Ta(dSdU);
};
virtual std::string action_name(){return "OneFlavourEvenOddRatioRationalPseudoFermionAction";}
};
NAMESPACE_END(Grid);

View File

@ -26,8 +26,7 @@ See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef QCD_PSEUDOFERMION_AGGREGATE_H
#define QCD_PSEUDOFERMION_AGGREGATE_H
#pragma once
// Rational functions
#include <Grid/qcd/action/pseudofermion/Bounds.h>
@ -40,7 +39,14 @@ directory
#include <Grid/qcd/action/pseudofermion/OneFlavourRational.h>
#include <Grid/qcd/action/pseudofermion/OneFlavourRationalRatio.h>
#include <Grid/qcd/action/pseudofermion/OneFlavourEvenOddRational.h>
#include <Grid/qcd/action/pseudofermion/GeneralEvenOddRationalRatio.h>
#include <Grid/qcd/action/pseudofermion/GeneralEvenOddRationalRatioMixedPrec.h>
#include <Grid/qcd/action/pseudofermion/OneFlavourEvenOddRationalRatio.h>
#include <Grid/qcd/action/pseudofermion/ExactOneFlavourRatio.h>
#include <Grid/qcd/action/pseudofermion/DomainDecomposedBoundaryTwoFlavourPseudoFermion.h>
#include <Grid/qcd/action/pseudofermion/DomainDecomposedBoundaryTwoFlavourBosonPseudoFermion.h>
#include <Grid/qcd/action/pseudofermion/DomainDecomposedBoundaryTwoFlavourRatioPseudoFermion.h>
#endif

View File

@ -98,6 +98,7 @@ public:
FermOp.ImportGauge(U);
FermOp.Mdag(eta, Phi);
std::cout << GridLogMessage << "Pseudofermion action refresh " << norm2(eta) << std::endl;
};
//////////////////////////////////////////////////////

View File

@ -50,6 +50,8 @@ NAMESPACE_BEGIN(Grid);
FermionField PhiOdd; // the pseudo fermion field for this trajectory
FermionField PhiEven; // the pseudo fermion field for this trajectory
virtual void refreshRestrict(FermionField &eta) {};
public:
TwoFlavourEvenOddRatioPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
@ -60,7 +62,8 @@ NAMESPACE_BEGIN(Grid);
TwoFlavourEvenOddRatioPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
OperatorFunction<FermionField> & DS,
OperatorFunction<FermionField> & AS, OperatorFunction<FermionField> & HS) :
OperatorFunction<FermionField> & AS,
OperatorFunction<FermionField> & HS) :
NumOp(_NumOp),
DenOp(_DenOp),
DerivativeSolver(DS),
@ -83,16 +86,7 @@ NAMESPACE_BEGIN(Grid);
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) {
// P(phi) = e^{- phi^dag Vpc (MpcdagMpc)^-1 Vpcdag phi}
//
// NumOp == V
// DenOp == M
//
// Take phi_o = Vpcdag^{-1} Mpcdag eta_o ; eta_o = Mpcdag^{-1} Vpcdag Phi
//
// P(eta_o) = e^{- eta_o^dag eta_o}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
@ -100,12 +94,23 @@ NAMESPACE_BEGIN(Grid);
RealD scale = std::sqrt(0.5);
FermionField eta (NumOp.FermionGrid());
gaussian(pRNG,eta); eta = eta * scale;
refreshRestrict(eta); // Used by DDHMC
refresh(U,eta);
}
void refresh(const GaugeField &U, const FermionField &eta) {
// P(phi) = e^{- phi^dag Vpc (MpcdagMpc)^-1 Vpcdag phi}
//
// NumOp == V
// DenOp == M
//
// Take phi_o = Vpcdag^{-1} Mpcdag eta_o ; eta_o = Mpcdag^{-1} Vpcdag Phi
FermionField etaOdd (NumOp.FermionRedBlackGrid());
FermionField etaEven(NumOp.FermionRedBlackGrid());
FermionField tmp (NumOp.FermionRedBlackGrid());
gaussian(pRNG,eta);
pickCheckerboard(Even,etaEven,eta);
pickCheckerboard(Odd,etaOdd,eta);
@ -125,8 +130,9 @@ NAMESPACE_BEGIN(Grid);
DenOp.MooeeDag(etaEven,tmp);
NumOp.MooeeInvDag(tmp,PhiEven);
PhiOdd =PhiOdd*scale;
PhiEven=PhiEven*scale;
//PhiOdd =PhiOdd*scale;
//PhiEven=PhiEven*scale;
std::cout << GridLogMessage<<" TwoFlavourEvenOddRatio Expect action to be "<<norm2(etaOdd) + norm2(etaEven)<<std::endl;
};
@ -161,6 +167,8 @@ NAMESPACE_BEGIN(Grid);
DenOp.MooeeInvDag(X,Y);
action = action + norm2(Y);
std::cout << GridLogMessage<<" TwoFlavourEvenOddRatio action is "<<action<<std::endl;
return action;
};
@ -173,7 +181,7 @@ NAMESPACE_BEGIN(Grid);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
SchurDifferentiableOperator<Impl> Mpc(DenOp);
SchurDifferentiableOperator<Impl> Vpc(NumOp);
@ -208,7 +216,7 @@ NAMESPACE_BEGIN(Grid);
assert(DenOp.ConstEE() == 1);
dSdU = -dSdU;
};
};
NAMESPACE_END(Grid);

View File

@ -99,7 +99,7 @@ public:
NumOp.M(tmp,Phi); // Vdag^-1 Mdag eta
Phi=Phi*scale;
std::cout << GridLogMessage<<" TwoFlavourRatio Expect action to be "<<norm2(eta)*scale*scale<<std::endl;
};
//////////////////////////////////////////////////////
@ -121,6 +121,7 @@ public:
DenOp.M(X,Y); // Y= Mdag^-1 Vdag phi
RealD action = norm2(Y);
std::cout << GridLogMessage<<" TwoFlavourRatio action is "<<action<<std::endl;
return action;
};

View File

@ -0,0 +1,197 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/pseudofermion/TwoFlavourRatio.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
///////////////////////////////////////
// Two flavour ratio
///////////////////////////////////////
template<class Impl>
class TwoFlavourRatio4DPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
INHERIT_IMPL_TYPES(Impl);
private:
FermionOperator<Impl> & NumOp;// the basic operator
FermionOperator<Impl> & DenOp;// the basic operator
OperatorFunction<FermionField> &DerivativeSolver;
OperatorFunction<FermionField> &ActionSolver;
FermionField phi4; // the pseudo fermion field for this trajectory
public:
TwoFlavourRatio4DPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
OperatorFunction<FermionField> & DS,
OperatorFunction<FermionField> & AS
) : NumOp(_NumOp),
DenOp(_DenOp),
DerivativeSolver(DS),
ActionSolver(AS),
phi4(_NumOp.GaugeGrid())
{};
virtual std::string action_name(){return "TwoFlavourRatio4DPseudoFermionAction";}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] has no parameters" << std::endl;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) {
// P(phi) = e^{- phi^dag (V^dag M^-dag)_11 (M^-1 V)_11 phi}
//
// NumOp == V
// DenOp == M
//
// Take phi = (V^{-1} M)_11 eta ; eta = (M^{-1} V)_11 Phi
//
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2) and must multiply by 0.707....
//
RealD scale = std::sqrt(0.5);
FermionField eta4(NumOp.GaugeGrid());
FermionField eta5(NumOp.FermionGrid());
FermionField tmp(NumOp.FermionGrid());
FermionField phi5(NumOp.FermionGrid());
gaussian(pRNG,eta4);
NumOp.ImportFourDimPseudoFermion(eta4,eta5);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagMOp(NumOp);
DenOp.M(eta5,phi5); // M eta
NumOp.Mdag(phi5,tmp); // Vdag M eta
phi5 = Zero();
ActionSolver(MdagMOp,tmp,phi5); // (VdagV)^-1 M eta = V^-1 Vdag^-1 Vdag M eta = V^-1 M eta
phi5=phi5*scale;
// Project to 4d
NumOp.ExportFourDimPseudoFermion(phi5,phi4);
};
//////////////////////////////////////////////////////
// S = phi^dag (V^dag M^-dag)_11 (M^-1 V)_11 phi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
FermionField Y4(NumOp.GaugeGrid());
FermionField X(NumOp.FermionGrid());
FermionField Y(NumOp.FermionGrid());
FermionField phi5(NumOp.FermionGrid());
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagMOp(DenOp);
NumOp.ImportFourDimPseudoFermion(phi4,phi5);
NumOp.M(phi5,Y); // Y= V phi
DenOp.Mdag(Y,X); // X= Mdag V phi
Y=Zero();
ActionSolver(MdagMOp,X,Y); // Y= (MdagM)^-1 Mdag Vdag phi = M^-1 V phi
NumOp.ExportFourDimPseudoFermion(Y,Y4);
RealD action = norm2(Y4);
return action;
};
//////////////////////////////////////////////////////
// dS/du = 2 Re phi^dag (V^dag M^-dag)_11 (M^-1 d V)_11 phi
// - 2 Re phi^dag (dV^dag M^-dag)_11 (M^-1 dM M^-1 V)_11 phi
//////////////////////////////////////////////////////
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagMOp(DenOp);
FermionField X(NumOp.FermionGrid());
FermionField Y(NumOp.FermionGrid());
FermionField phi(NumOp.FermionGrid());
FermionField Vphi(NumOp.FermionGrid());
FermionField MinvVphi(NumOp.FermionGrid());
FermionField tmp4(NumOp.GaugeGrid());
FermionField MdagInvMinvVphi(NumOp.FermionGrid());
GaugeField force(NumOp.GaugeGrid());
//Y=V phi
//X = (Mdag V phi
//Y = (Mdag M)^-1 Mdag V phi = M^-1 V Phi
NumOp.ImportFourDimPseudoFermion(phi4,phi);
NumOp.M(phi,Vphi); // V phi
DenOp.Mdag(Vphi,X); // X= Mdag V phi
Y=Zero();
DerivativeSolver(MdagMOp,X,MinvVphi);// M^-1 V phi
// Projects onto the physical space and back
NumOp.ExportFourDimPseudoFermion(MinvVphi,tmp4);
NumOp.ImportFourDimPseudoFermion(tmp4,Y);
X=Zero();
DerivativeSolver(MdagMOp,Y,X);// X = (MdagM)^-1 proj M^-1 V phi
DenOp.M(X,MdagInvMinvVphi);
// phi^dag (Vdag Mdag^-1) (M^-1 dV) phi
NumOp.MDeriv(force ,MdagInvMinvVphi , phi, DaggerNo ); dSdU=force;
// phi^dag (dVdag Mdag^-1) (M^-1 V) phi
NumOp.MDeriv(force , phi, MdagInvMinvVphi ,DaggerYes ); dSdU=dSdU+force;
// - 2 Re phi^dag (dV^dag M^-dag)_11 (M^-1 dM M^-1 V)_11 phi
DenOp.MDeriv(force,MdagInvMinvVphi,MinvVphi,DaggerNo); dSdU=dSdU-force;
DenOp.MDeriv(force,MinvVphi,MdagInvMinvVphi,DaggerYes); dSdU=dSdU-force;
dSdU *= -1.0;
//dSdU = - Ta(dSdU);
};
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,203 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/pseudofermion/TwoFlavourRatio.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
///////////////////////////////////////
// Two flavour ratio
///////////////////////////////////////
template<class Impl>
class TwoFlavourRatioEO4DPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
INHERIT_IMPL_TYPES(Impl);
private:
typedef FermionOperator<Impl> FermOp;
FermionOperator<Impl> & NumOp;// the basic operator
FermionOperator<Impl> & DenOp;// the basic operator
OperatorFunction<FermionField> &DerivativeSolver;
OperatorFunction<FermionField> &DerivativeDagSolver;
OperatorFunction<FermionField> &ActionSolver;
OperatorFunction<FermionField> &HeatbathSolver;
FermionField phi4; // the pseudo fermion field for this trajectory
public:
TwoFlavourRatioEO4DPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
OperatorFunction<FermionField> & DS,
OperatorFunction<FermionField> & AS ) :
TwoFlavourRatioEO4DPseudoFermionAction(_NumOp,_DenOp, DS,DS,AS,AS) {};
TwoFlavourRatioEO4DPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
OperatorFunction<FermionField> & DS,
OperatorFunction<FermionField> & DDS,
OperatorFunction<FermionField> & AS,
OperatorFunction<FermionField> & HS
) : NumOp(_NumOp),
DenOp(_DenOp),
DerivativeSolver(DS),
DerivativeDagSolver(DDS),
ActionSolver(AS),
HeatbathSolver(HS),
phi4(_NumOp.GaugeGrid())
{};
virtual std::string action_name(){return "TwoFlavourRatioEO4DPseudoFermionAction";}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] has no parameters" << std::endl;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) {
// P(phi) = e^{- phi^dag (V^dag M^-dag)_11 (M^-1 V)_11 phi}
//
// NumOp == V
// DenOp == M
//
// Take phi = (V^{-1} M)_11 eta ; eta = (M^{-1} V)_11 Phi
//
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2) and must multiply by 0.707....
//
RealD scale = std::sqrt(0.5);
FermionField eta4(NumOp.GaugeGrid());
FermionField eta5(NumOp.FermionGrid());
FermionField tmp(NumOp.FermionGrid());
FermionField phi5(NumOp.FermionGrid());
gaussian(pRNG,eta4);
NumOp.ImportFourDimPseudoFermion(eta4,eta5);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
SchurRedBlackDiagMooeeSolve<FermionField> PrecSolve(HeatbathSolver);
DenOp.M(eta5,tmp); // M eta
PrecSolve(NumOp,tmp,phi5); // phi = V^-1 M eta
phi5=phi5*scale;
std::cout << GridLogMessage << "4d pf refresh "<< norm2(phi5)<<"\n";
// Project to 4d
NumOp.ExportFourDimPseudoFermion(phi5,phi4);
};
//////////////////////////////////////////////////////
// S = phi^dag (V^dag M^-dag)_11 (M^-1 V)_11 phi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
FermionField Y4(NumOp.GaugeGrid());
FermionField X(NumOp.FermionGrid());
FermionField Y(NumOp.FermionGrid());
FermionField phi5(NumOp.FermionGrid());
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagMOp(DenOp);
SchurRedBlackDiagMooeeSolve<FermionField> PrecSolve(ActionSolver);
NumOp.ImportFourDimPseudoFermion(phi4,phi5);
NumOp.M(phi5,X); // X= V phi
PrecSolve(DenOp,X,Y); // Y= (MdagM)^-1 Mdag Vdag phi = M^-1 V phi
NumOp.ExportFourDimPseudoFermion(Y,Y4);
RealD action = norm2(Y4);
return action;
};
//////////////////////////////////////////////////////
// dS/du = 2 Re phi^dag (V^dag M^-dag)_11 (M^-1 d V)_11 phi
// - 2 Re phi^dag (dV^dag M^-dag)_11 (M^-1 dM M^-1 V)_11 phi
//////////////////////////////////////////////////////
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
FermionField X(NumOp.FermionGrid());
FermionField Y(NumOp.FermionGrid());
FermionField phi(NumOp.FermionGrid());
FermionField Vphi(NumOp.FermionGrid());
FermionField MinvVphi(NumOp.FermionGrid());
FermionField tmp4(NumOp.GaugeGrid());
FermionField MdagInvMinvVphi(NumOp.FermionGrid());
GaugeField force(NumOp.GaugeGrid());
//Y=V phi
//X = (Mdag V phi
//Y = (Mdag M)^-1 Mdag V phi = M^-1 V Phi
NumOp.ImportFourDimPseudoFermion(phi4,phi);
NumOp.M(phi,Vphi); // V phi
SchurRedBlackDiagMooeeSolve<FermionField> PrecSolve(DerivativeSolver);
PrecSolve(DenOp,Vphi,MinvVphi);// M^-1 V phi
std::cout << GridLogMessage << "4d deriv solve "<< norm2(MinvVphi)<<"\n";
// Projects onto the physical space and back
NumOp.ExportFourDimPseudoFermion(MinvVphi,tmp4);
NumOp.ImportFourDimPseudoFermion(tmp4,Y);
SchurRedBlackDiagMooeeDagSolve<FermionField> PrecDagSolve(DerivativeDagSolver);
// X = proj M^-dag V phi
// Need an adjoint solve
PrecDagSolve(DenOp,Y,MdagInvMinvVphi);
std::cout << GridLogMessage << "4d deriv solve dag "<< norm2(MdagInvMinvVphi)<<"\n";
// phi^dag (Vdag Mdag^-1) (M^-1 dV) phi
NumOp.MDeriv(force ,MdagInvMinvVphi , phi, DaggerNo ); dSdU=force;
// phi^dag (dVdag Mdag^-1) (M^-1 V) phi
NumOp.MDeriv(force , phi, MdagInvMinvVphi ,DaggerYes ); dSdU=dSdU+force;
// - 2 Re phi^dag (dV^dag M^-dag)_11 (M^-1 dM M^-1 V)_11 phi
DenOp.MDeriv(force,MdagInvMinvVphi,MinvVphi,DaggerNo); dSdU=dSdU-force;
DenOp.MDeriv(force,MinvVphi,MdagInvMinvVphi,DaggerYes); dSdU=dSdU-force;
dSdU *= -1.0;
//dSdU = - Ta(dSdU);
};
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,6 @@
#ifndef GRID_GPARITY_H_
#define GRID_GPARITY_H_
#include<Grid/qcd/gparity/GparityFlavour.h>
#endif

View File

@ -0,0 +1,34 @@
#include <Grid/Grid.h>
NAMESPACE_BEGIN(Grid);
const std::array<const GparityFlavour, 3> GparityFlavour::sigma_mu = {{
GparityFlavour(GparityFlavour::Algebra::SigmaX),
GparityFlavour(GparityFlavour::Algebra::SigmaY),
GparityFlavour(GparityFlavour::Algebra::SigmaZ)
}};
const std::array<const GparityFlavour, 6> GparityFlavour::sigma_all = {{
GparityFlavour(GparityFlavour::Algebra::Identity),
GparityFlavour(GparityFlavour::Algebra::SigmaX),
GparityFlavour(GparityFlavour::Algebra::SigmaY),
GparityFlavour(GparityFlavour::Algebra::SigmaZ),
GparityFlavour(GparityFlavour::Algebra::ProjPlus),
GparityFlavour(GparityFlavour::Algebra::ProjMinus)
}};
const std::array<const char *, GparityFlavour::nSigma> GparityFlavour::name = {{
"SigmaX",
"MinusSigmaX",
"SigmaY",
"MinusSigmaY",
"SigmaZ",
"MinusSigmaZ",
"Identity",
"MinusIdentity",
"ProjPlus",
"MinusProjPlus",
"ProjMinus",
"MinusProjMinus"}};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,475 @@
#ifndef GRID_QCD_GPARITY_FLAVOUR_H
#define GRID_QCD_GPARITY_FLAVOUR_H
//Support for flavour-matrix operations acting on the G-parity flavour index
#include <array>
NAMESPACE_BEGIN(Grid);
class GparityFlavour {
public:
GRID_SERIALIZABLE_ENUM(Algebra, undef,
SigmaX, 0,
MinusSigmaX, 1,
SigmaY, 2,
MinusSigmaY, 3,
SigmaZ, 4,
MinusSigmaZ, 5,
Identity, 6,
MinusIdentity, 7,
ProjPlus, 8,
MinusProjPlus, 9,
ProjMinus, 10,
MinusProjMinus, 11
);
static constexpr unsigned int nSigma = 12;
static const std::array<const char *, nSigma> name;
static const std::array<const GparityFlavour, 3> sigma_mu;
static const std::array<const GparityFlavour, 6> sigma_all;
Algebra g;
public:
accelerator GparityFlavour(Algebra initg): g(initg) {}
};
// 0 1 x vector
// 1 0
template<class vtype>
accelerator_inline void multFlavourSigmaX(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = rhs(1);
ret(1) = rhs(0);
};
template<class vtype>
accelerator_inline void lmultFlavourSigmaX(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = rhs(1,0);
ret(0,1) = rhs(1,1);
ret(1,0) = rhs(0,0);
ret(1,1) = rhs(0,1);
};
template<class vtype>
accelerator_inline void rmultFlavourSigmaX(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = rhs(0,1);
ret(0,1) = rhs(0,0);
ret(1,0) = rhs(1,1);
ret(1,1) = rhs(1,0);
};
template<class vtype>
accelerator_inline void multFlavourMinusSigmaX(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = -rhs(1);
ret(1) = -rhs(0);
};
template<class vtype>
accelerator_inline void lmultFlavourMinusSigmaX(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = -rhs(1,0);
ret(0,1) = -rhs(1,1);
ret(1,0) = -rhs(0,0);
ret(1,1) = -rhs(0,1);
};
template<class vtype>
accelerator_inline void rmultFlavourMinusSigmaX(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = -rhs(0,1);
ret(0,1) = -rhs(0,0);
ret(1,0) = -rhs(1,1);
ret(1,1) = -rhs(1,0);
};
// 0 -i x vector
// i 0
template<class vtype>
accelerator_inline void multFlavourSigmaY(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = timesMinusI(rhs(1));
ret(1) = timesI(rhs(0));
};
template<class vtype>
accelerator_inline void lmultFlavourSigmaY(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = timesMinusI(rhs(1,0));
ret(0,1) = timesMinusI(rhs(1,1));
ret(1,0) = timesI(rhs(0,0));
ret(1,1) = timesI(rhs(0,1));
};
template<class vtype>
accelerator_inline void rmultFlavourSigmaY(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = timesI(rhs(0,1));
ret(0,1) = timesMinusI(rhs(0,0));
ret(1,0) = timesI(rhs(1,1));
ret(1,1) = timesMinusI(rhs(1,0));
};
template<class vtype>
accelerator_inline void multFlavourMinusSigmaY(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = timesI(rhs(1));
ret(1) = timesMinusI(rhs(0));
};
template<class vtype>
accelerator_inline void lmultFlavourMinusSigmaY(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = timesI(rhs(1,0));
ret(0,1) = timesI(rhs(1,1));
ret(1,0) = timesMinusI(rhs(0,0));
ret(1,1) = timesMinusI(rhs(0,1));
};
template<class vtype>
accelerator_inline void rmultFlavourMinusSigmaY(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = timesMinusI(rhs(0,1));
ret(0,1) = timesI(rhs(0,0));
ret(1,0) = timesMinusI(rhs(1,1));
ret(1,1) = timesI(rhs(1,0));
};
// 1 0 x vector
// 0 -1
template<class vtype>
accelerator_inline void multFlavourSigmaZ(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = rhs(0);
ret(1) = -rhs(1);
};
template<class vtype>
accelerator_inline void lmultFlavourSigmaZ(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = rhs(0,0);
ret(0,1) = rhs(0,1);
ret(1,0) = -rhs(1,0);
ret(1,1) = -rhs(1,1);
};
template<class vtype>
accelerator_inline void rmultFlavourSigmaZ(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = rhs(0,0);
ret(0,1) = -rhs(0,1);
ret(1,0) = rhs(1,0);
ret(1,1) = -rhs(1,1);
};
template<class vtype>
accelerator_inline void multFlavourMinusSigmaZ(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = -rhs(0);
ret(1) = rhs(1);
};
template<class vtype>
accelerator_inline void lmultFlavourMinusSigmaZ(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = -rhs(0,0);
ret(0,1) = -rhs(0,1);
ret(1,0) = rhs(1,0);
ret(1,1) = rhs(1,1);
};
template<class vtype>
accelerator_inline void rmultFlavourMinusSigmaZ(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = -rhs(0,0);
ret(0,1) = rhs(0,1);
ret(1,0) = -rhs(1,0);
ret(1,1) = rhs(1,1);
};
template<class vtype>
accelerator_inline void multFlavourIdentity(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = rhs(0);
ret(1) = rhs(1);
};
template<class vtype>
accelerator_inline void lmultFlavourIdentity(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = rhs(0,0);
ret(0,1) = rhs(0,1);
ret(1,0) = rhs(1,0);
ret(1,1) = rhs(1,1);
};
template<class vtype>
accelerator_inline void rmultFlavourIdentity(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = rhs(0,0);
ret(0,1) = rhs(0,1);
ret(1,0) = rhs(1,0);
ret(1,1) = rhs(1,1);
};
template<class vtype>
accelerator_inline void multFlavourMinusIdentity(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = -rhs(0);
ret(1) = -rhs(1);
};
template<class vtype>
accelerator_inline void lmultFlavourMinusIdentity(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = -rhs(0,0);
ret(0,1) = -rhs(0,1);
ret(1,0) = -rhs(1,0);
ret(1,1) = -rhs(1,1);
};
template<class vtype>
accelerator_inline void rmultFlavourMinusIdentity(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = -rhs(0,0);
ret(0,1) = -rhs(0,1);
ret(1,0) = -rhs(1,0);
ret(1,1) = -rhs(1,1);
};
//G-parity flavour projection 1/2(1+\sigma_2)
//1 -i
//i 1
template<class vtype>
accelerator_inline void multFlavourProjPlus(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = 0.5*rhs(0) + 0.5*timesMinusI(rhs(1));
ret(1) = 0.5*timesI(rhs(0)) + 0.5*rhs(1);
};
template<class vtype>
accelerator_inline void lmultFlavourProjPlus(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = 0.5*rhs(0,0) + 0.5*timesMinusI(rhs(1,0));
ret(0,1) = 0.5*rhs(0,1) + 0.5*timesMinusI(rhs(1,1));
ret(1,0) = 0.5*timesI(rhs(0,0)) + 0.5*rhs(1,0);
ret(1,1) = 0.5*timesI(rhs(0,1)) + 0.5*rhs(1,1);
};
template<class vtype>
accelerator_inline void rmultFlavourProjPlus(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = 0.5*rhs(0,0) + 0.5*timesI(rhs(0,1));
ret(0,1) = 0.5*timesMinusI(rhs(0,0)) + 0.5*rhs(0,1);
ret(1,0) = 0.5*rhs(1,0) + 0.5*timesI(rhs(1,1));
ret(1,1) = 0.5*timesMinusI(rhs(1,0)) + 0.5*rhs(1,1);
};
template<class vtype>
accelerator_inline void multFlavourMinusProjPlus(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = -0.5*rhs(0) + 0.5*timesI(rhs(1));
ret(1) = 0.5*timesMinusI(rhs(0)) - 0.5*rhs(1);
};
template<class vtype>
accelerator_inline void lmultFlavourMinusProjPlus(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = -0.5*rhs(0,0) + 0.5*timesI(rhs(1,0));
ret(0,1) = -0.5*rhs(0,1) + 0.5*timesI(rhs(1,1));
ret(1,0) = 0.5*timesMinusI(rhs(0,0)) - 0.5*rhs(1,0);
ret(1,1) = 0.5*timesMinusI(rhs(0,1)) - 0.5*rhs(1,1);
};
template<class vtype>
accelerator_inline void rmultFlavourMinusProjPlus(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = -0.5*rhs(0,0) + 0.5*timesMinusI(rhs(0,1));
ret(0,1) = 0.5*timesI(rhs(0,0)) - 0.5*rhs(0,1);
ret(1,0) = -0.5*rhs(1,0) + 0.5*timesMinusI(rhs(1,1));
ret(1,1) = 0.5*timesI(rhs(1,0)) - 0.5*rhs(1,1);
};
//G-parity flavour projection 1/2(1-\sigma_2)
//1 i
//-i 1
template<class vtype>
accelerator_inline void multFlavourProjMinus(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = 0.5*rhs(0) + 0.5*timesI(rhs(1));
ret(1) = 0.5*timesMinusI(rhs(0)) + 0.5*rhs(1);
};
template<class vtype>
accelerator_inline void lmultFlavourProjMinus(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = 0.5*rhs(0,0) + 0.5*timesI(rhs(1,0));
ret(0,1) = 0.5*rhs(0,1) + 0.5*timesI(rhs(1,1));
ret(1,0) = 0.5*timesMinusI(rhs(0,0)) + 0.5*rhs(1,0);
ret(1,1) = 0.5*timesMinusI(rhs(0,1)) + 0.5*rhs(1,1);
};
template<class vtype>
accelerator_inline void rmultFlavourProjMinus(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = 0.5*rhs(0,0) + 0.5*timesMinusI(rhs(0,1));
ret(0,1) = 0.5*timesI(rhs(0,0)) + 0.5*rhs(0,1);
ret(1,0) = 0.5*rhs(1,0) + 0.5*timesMinusI(rhs(1,1));
ret(1,1) = 0.5*timesI(rhs(1,0)) + 0.5*rhs(1,1);
};
template<class vtype>
accelerator_inline void multFlavourMinusProjMinus(iVector<vtype, Ngp> &ret, const iVector<vtype, Ngp> &rhs)
{
ret(0) = -0.5*rhs(0) + 0.5*timesMinusI(rhs(1));
ret(1) = 0.5*timesI(rhs(0)) - 0.5*rhs(1);
};
template<class vtype>
accelerator_inline void lmultFlavourMinusProjMinus(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = -0.5*rhs(0,0) + 0.5*timesMinusI(rhs(1,0));
ret(0,1) = -0.5*rhs(0,1) + 0.5*timesMinusI(rhs(1,1));
ret(1,0) = 0.5*timesI(rhs(0,0)) - 0.5*rhs(1,0);
ret(1,1) = 0.5*timesI(rhs(0,1)) - 0.5*rhs(1,1);
};
template<class vtype>
accelerator_inline void rmultFlavourMinusProjMinus(iMatrix<vtype, Ngp> &ret, const iMatrix<vtype, Ngp> &rhs)
{
ret(0,0) = -0.5*rhs(0,0) + 0.5*timesI(rhs(0,1));
ret(0,1) = 0.5*timesMinusI(rhs(0,0)) - 0.5*rhs(0,1);
ret(1,0) = -0.5*rhs(1,0) + 0.5*timesI(rhs(1,1));
ret(1,1) = 0.5*timesMinusI(rhs(1,0)) - 0.5*rhs(1,1);
};
template<class vtype>
accelerator_inline auto operator*(const GparityFlavour &G, const iVector<vtype, Ngp> &arg)
->typename std::enable_if<matchGridTensorIndex<iVector<vtype, Ngp>, GparityFlavourTensorIndex>::value, iVector<vtype, Ngp>>::type
{
iVector<vtype, Ngp> ret;
switch (G.g)
{
case GparityFlavour::Algebra::SigmaX:
multFlavourSigmaX(ret, arg); break;
case GparityFlavour::Algebra::MinusSigmaX:
multFlavourMinusSigmaX(ret, arg); break;
case GparityFlavour::Algebra::SigmaY:
multFlavourSigmaY(ret, arg); break;
case GparityFlavour::Algebra::MinusSigmaY:
multFlavourMinusSigmaY(ret, arg); break;
case GparityFlavour::Algebra::SigmaZ:
multFlavourSigmaZ(ret, arg); break;
case GparityFlavour::Algebra::MinusSigmaZ:
multFlavourMinusSigmaZ(ret, arg); break;
case GparityFlavour::Algebra::Identity:
multFlavourIdentity(ret, arg); break;
case GparityFlavour::Algebra::MinusIdentity:
multFlavourMinusIdentity(ret, arg); break;
case GparityFlavour::Algebra::ProjPlus:
multFlavourProjPlus(ret, arg); break;
case GparityFlavour::Algebra::MinusProjPlus:
multFlavourMinusProjPlus(ret, arg); break;
case GparityFlavour::Algebra::ProjMinus:
multFlavourProjMinus(ret, arg); break;
case GparityFlavour::Algebra::MinusProjMinus:
multFlavourMinusProjMinus(ret, arg); break;
default: assert(0);
}
return ret;
}
template<class vtype>
accelerator_inline auto operator*(const GparityFlavour &G, const iMatrix<vtype, Ngp> &arg)
->typename std::enable_if<matchGridTensorIndex<iMatrix<vtype, Ngp>, GparityFlavourTensorIndex>::value, iMatrix<vtype, Ngp>>::type
{
iMatrix<vtype, Ngp> ret;
switch (G.g)
{
case GparityFlavour::Algebra::SigmaX:
lmultFlavourSigmaX(ret, arg); break;
case GparityFlavour::Algebra::MinusSigmaX:
lmultFlavourMinusSigmaX(ret, arg); break;
case GparityFlavour::Algebra::SigmaY:
lmultFlavourSigmaY(ret, arg); break;
case GparityFlavour::Algebra::MinusSigmaY:
lmultFlavourMinusSigmaY(ret, arg); break;
case GparityFlavour::Algebra::SigmaZ:
lmultFlavourSigmaZ(ret, arg); break;
case GparityFlavour::Algebra::MinusSigmaZ:
lmultFlavourMinusSigmaZ(ret, arg); break;
case GparityFlavour::Algebra::Identity:
lmultFlavourIdentity(ret, arg); break;
case GparityFlavour::Algebra::MinusIdentity:
lmultFlavourMinusIdentity(ret, arg); break;
case GparityFlavour::Algebra::ProjPlus:
lmultFlavourProjPlus(ret, arg); break;
case GparityFlavour::Algebra::MinusProjPlus:
lmultFlavourMinusProjPlus(ret, arg); break;
case GparityFlavour::Algebra::ProjMinus:
lmultFlavourProjMinus(ret, arg); break;
case GparityFlavour::Algebra::MinusProjMinus:
lmultFlavourMinusProjMinus(ret, arg); break;
default: assert(0);
}
return ret;
}
template<class vtype>
accelerator_inline auto operator*(const iMatrix<vtype, Ngp> &arg, const GparityFlavour &G)
->typename std::enable_if<matchGridTensorIndex<iMatrix<vtype, Ngp>, GparityFlavourTensorIndex>::value, iMatrix<vtype, Ngp>>::type
{
iMatrix<vtype, Ngp> ret;
switch (G.g)
{
case GparityFlavour::Algebra::SigmaX:
rmultFlavourSigmaX(ret, arg); break;
case GparityFlavour::Algebra::MinusSigmaX:
rmultFlavourMinusSigmaX(ret, arg); break;
case GparityFlavour::Algebra::SigmaY:
rmultFlavourSigmaY(ret, arg); break;
case GparityFlavour::Algebra::MinusSigmaY:
rmultFlavourMinusSigmaY(ret, arg); break;
case GparityFlavour::Algebra::SigmaZ:
rmultFlavourSigmaZ(ret, arg); break;
case GparityFlavour::Algebra::MinusSigmaZ:
rmultFlavourMinusSigmaZ(ret, arg); break;
case GparityFlavour::Algebra::Identity:
rmultFlavourIdentity(ret, arg); break;
case GparityFlavour::Algebra::MinusIdentity:
rmultFlavourMinusIdentity(ret, arg); break;
case GparityFlavour::Algebra::ProjPlus:
rmultFlavourProjPlus(ret, arg); break;
case GparityFlavour::Algebra::MinusProjPlus:
rmultFlavourMinusProjPlus(ret, arg); break;
case GparityFlavour::Algebra::ProjMinus:
rmultFlavourProjMinus(ret, arg); break;
case GparityFlavour::Algebra::MinusProjMinus:
rmultFlavourMinusProjMinus(ret, arg); break;
default: assert(0);
}
return ret;
}
NAMESPACE_END(Grid);
#endif // include guard

View File

@ -129,18 +129,10 @@ public:
Runner(S);
}
//////////////////////////////////////////////////////////////////
private:
template <class SmearingPolicy>
void Runner(SmearingPolicy &Smearing) {
auto UGrid = Resources.GetCartesian();
Resources.AddRNGs();
Field U(UGrid);
// Can move this outside?
typedef IntegratorType<SmearingPolicy> TheIntegrator;
TheIntegrator MDynamics(UGrid, Parameters.MD, TheAction, Smearing);
//Use the checkpointer to initialize the RNGs and the gauge field, writing the resulting gauge field into U.
//This is called automatically by Run but may be useful elsewhere, e.g. for integrator tuning experiments
void initializeGaugeFieldAndRNGs(Field &U){
if(!Resources.haveRNGs()) Resources.AddRNGs();
if (Parameters.StartingType == "HotStart") {
// Hot start
@ -167,6 +159,25 @@ private:
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
exit(1);
}
}
//////////////////////////////////////////////////////////////////
private:
template <class SmearingPolicy>
void Runner(SmearingPolicy &Smearing) {
auto UGrid = Resources.GetCartesian();
Field U(UGrid);
initializeGaugeFieldAndRNGs(U);
typedef IntegratorType<SmearingPolicy> TheIntegrator;
TheIntegrator MDynamics(UGrid, Parameters.MD, TheAction, Smearing);
// Sets the momentum filter
MDynamics.setMomentumFilter(*(Resources.GetMomentumFilter()));
Smearing.set_Field(U);

View File

@ -34,6 +34,7 @@ directory
* @brief Classes for Hybrid Monte Carlo update
*
* @author Guido Cossu
* @author Peter Boyle
*/
//--------------------------------------------------------------------
#pragma once
@ -115,22 +116,17 @@ private:
random(sRNG, rn_test);
std::cout << GridLogMessage
<< "--------------------------------------------------\n";
std::cout << GridLogMessage << "exp(-dH) = " << prob
<< " Random = " << rn_test << "\n";
std::cout << GridLogMessage
<< "Acc. Probability = " << ((prob < 1.0) ? prob : 1.0) << "\n";
std::cout << GridLogHMC << "--------------------------------------------------\n";
std::cout << GridLogHMC << "exp(-dH) = " << prob << " Random = " << rn_test << "\n";
std::cout << GridLogHMC << "Acc. Probability = " << ((prob < 1.0) ? prob : 1.0) << "\n";
if ((prob > 1.0) || (rn_test <= prob)) { // accepted
std::cout << GridLogMessage << "Metropolis_test -- ACCEPTED\n";
std::cout << GridLogMessage
<< "--------------------------------------------------\n";
std::cout << GridLogHMC << "Metropolis_test -- ACCEPTED\n";
std::cout << GridLogHMC << "--------------------------------------------------\n";
return true;
} else { // rejected
std::cout << GridLogMessage << "Metropolis_test -- REJECTED\n";
std::cout << GridLogMessage
<< "--------------------------------------------------\n";
std::cout << GridLogHMC << "Metropolis_test -- REJECTED\n";
std::cout << GridLogHMC << "--------------------------------------------------\n";
return false;
}
}
@ -139,19 +135,68 @@ private:
// Evolution
/////////////////////////////////////////////////////////
RealD evolve_hmc_step(Field &U) {
TheIntegrator.refresh(U, sRNG, pRNG); // set U and initialize P and phi's
RealD H0 = TheIntegrator.S(U); // initial state action
GridBase *Grid = U.Grid();
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Mainly for DDHMC perform a random translation of U modulo volume
//////////////////////////////////////////////////////////////////////////////////////////////////////
std::cout << GridLogMessage << "--------------------------------------------------\n";
std::cout << GridLogMessage << "Random shifting gauge field by [";
for(int d=0;d<Grid->Nd();d++) {
int L = Grid->GlobalDimensions()[d];
RealD rn_uniform; random(sRNG, rn_uniform);
int shift = (int) (rn_uniform*L);
std::cout << shift;
if(d<Grid->Nd()-1) std::cout <<",";
else std::cout <<"]\n";
U = Cshift(U,d,shift);
}
std::cout << GridLogMessage << "--------------------------------------------------\n";
TheIntegrator.reset_timer();
//////////////////////////////////////////////////////////////////////////////////////////////////////
// set U and initialize P and phi's
//////////////////////////////////////////////////////////////////////////////////////////////////////
std::cout << GridLogMessage << "--------------------------------------------------\n";
std::cout << GridLogMessage << "Refresh momenta and pseudofermions";
TheIntegrator.refresh(U, sRNG, pRNG);
std::cout << GridLogMessage << "--------------------------------------------------\n";
//////////////////////////////////////////////////////////////////////////////////////////////////////
// initial state action
//////////////////////////////////////////////////////////////////////////////////////////////////////
std::cout << GridLogMessage << "--------------------------------------------------\n";
std::cout << GridLogMessage << "Compute initial action";
RealD H0 = TheIntegrator.S(U);
std::cout << GridLogMessage << "--------------------------------------------------\n";
std::streamsize current_precision = std::cout.precision();
std::cout.precision(15);
std::cout << GridLogMessage << "Total H before trajectory = " << H0 << "\n";
std::cout << GridLogHMC << "Total H before trajectory = " << H0 << "\n";
std::cout.precision(current_precision);
std::cout << GridLogMessage << "--------------------------------------------------\n";
std::cout << GridLogMessage << " Molecular Dynamics evolution ";
TheIntegrator.integrate(U);
std::cout << GridLogMessage << "--------------------------------------------------\n";
RealD H1 = TheIntegrator.S(U); // updated state action
//////////////////////////////////////////////////////////////////////////////////////////////////////
// updated state action
//////////////////////////////////////////////////////////////////////////////////////////////////////
std::cout << GridLogMessage << "--------------------------------------------------\n";
std::cout << GridLogMessage << "Compute final action";
RealD H1 = TheIntegrator.S(U);
std::cout << GridLogMessage << "--------------------------------------------------\n";
///////////////////////////////////////////////////////////
if(0){
std::cout << "------------------------- Reversibility test" << std::endl;
@ -163,17 +208,16 @@ private:
}
///////////////////////////////////////////////////////////
std::cout.precision(15);
std::cout << GridLogMessage << "Total H after trajectory = " << H1
<< " dH = " << H1 - H0 << "\n";
std::cout << GridLogHMC << "--------------------------------------------------\n";
std::cout << GridLogHMC << "Total H after trajectory = " << H1 << " dH = " << H1 - H0 << "\n";
std::cout << GridLogHMC << "--------------------------------------------------\n";
std::cout.precision(current_precision);
return (H1 - H0);
}
public:
/////////////////////////////////////////
@ -195,10 +239,13 @@ public:
// Actual updates (evolve a copy Ucopy then copy back eventually)
unsigned int FinalTrajectory = Params.Trajectories + Params.NoMetropolisUntil + Params.StartTrajectory;
for (int traj = Params.StartTrajectory; traj < FinalTrajectory; ++traj) {
std::cout << GridLogMessage << "-- # Trajectory = " << traj << "\n";
std::cout << GridLogHMC << "-- # Trajectory = " << traj << "\n";
if (traj < Params.StartTrajectory + Params.NoMetropolisUntil) {
std::cout << GridLogMessage << "-- Thermalization" << std::endl;
std::cout << GridLogHMC << "-- Thermalization" << std::endl;
}
double t0=usecond();
@ -207,20 +254,19 @@ public:
DeltaH = evolve_hmc_step(Ucopy);
// Metropolis-Hastings test
bool accept = true;
if (traj >= Params.StartTrajectory + Params.NoMetropolisUntil) {
if (Params.MetropolisTest && traj >= Params.StartTrajectory + Params.NoMetropolisUntil) {
accept = metropolis_test(DeltaH);
} else {
std::cout << GridLogMessage << "Skipping Metropolis test" << std::endl;
std::cout << GridLogHMC << "Skipping Metropolis test" << std::endl;
}
if (accept)
Ucur = Ucopy;
double t1=usecond();
std::cout << GridLogMessage << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
std::cout << GridLogHMC << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
TheIntegrator.print_timer();
for (int obs = 0; obs < Observables.size(); obs++) {
std::cout << GridLogDebug << "Observables # " << obs << std::endl;
@ -228,7 +274,7 @@ public:
std::cout << GridLogDebug << "Observables pointer " << Observables[obs] << std::endl;
Observables[obs]->TrajectoryComplete(traj + 1, Ucur, sRNG, pRNG);
}
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::::" << std::endl;
std::cout << GridLogHMC << ":::::::::::::::::::::::::::::::::::::::::::" << std::endl;
}
}

View File

@ -72,6 +72,8 @@ class HMCResourceManager {
typedef HMCModuleBase< BaseHmcCheckpointer<ImplementationPolicy> > CheckpointerBaseModule;
typedef HMCModuleBase< HmcObservable<typename ImplementationPolicy::Field> > ObservableBaseModule;
typedef ActionModuleBase< Action<typename ImplementationPolicy::Field>, GridModule > ActionBaseModule;
typedef typename ImplementationPolicy::Field MomentaField;
typedef typename ImplementationPolicy::Field Field;
// Named storage for grid pairs (std + red-black)
std::unordered_map<std::string, GridModule> Grids;
@ -80,6 +82,9 @@ class HMCResourceManager {
// SmearingModule<ImplementationPolicy> Smearing;
std::unique_ptr<CheckpointerBaseModule> CP;
// Momentum filter
std::unique_ptr<MomentumFilterBase<typename ImplementationPolicy::Field> > Filter;
// A vector of HmcObservable modules
std::vector<std::unique_ptr<ObservableBaseModule> > ObservablesList;
@ -90,6 +95,7 @@ class HMCResourceManager {
bool have_RNG;
bool have_CheckPointer;
bool have_Filter;
// NOTE: operator << is not overloaded for std::vector<string>
// so this function is necessary
@ -101,7 +107,7 @@ class HMCResourceManager {
public:
HMCResourceManager() : have_RNG(false), have_CheckPointer(false) {}
HMCResourceManager() : have_RNG(false), have_CheckPointer(false), have_Filter(false) {}
template <class ReaderClass, class vector_type = vComplex >
void initialize(ReaderClass &Read){
@ -129,6 +135,7 @@ public:
RNGModuleParameters RNGpar(Read);
SetRNGSeeds(RNGpar);
// Observables
auto &ObsFactory = HMC_ObservablesModuleFactory<observable_string, typename ImplementationPolicy::Field, ReaderClass>::getInstance();
Read.push(observable_string);// here must check if existing...
@ -208,6 +215,16 @@ public:
AddGrid(s, Mod);
}
void SetMomentumFilter( MomentumFilterBase<typename ImplementationPolicy::Field> * MomFilter) {
assert(have_Filter==false);
Filter = std::unique_ptr<MomentumFilterBase<typename ImplementationPolicy::Field> >(MomFilter);
have_Filter = true;
}
MomentumFilterBase<typename ImplementationPolicy::Field> *GetMomentumFilter(void) {
if ( !have_Filter)
SetMomentumFilter(new MomentumFilterNone<typename ImplementationPolicy::Field>());
return Filter.get();
}
GridCartesian* GetCartesian(std::string s = "") {
if (s.empty()) s = Grids.begin()->first;
@ -226,6 +243,9 @@ public:
//////////////////////////////////////////////////////
// Random number generators
//////////////////////////////////////////////////////
//Return true if the RNG objects have been instantiated
bool haveRNGs() const{ return have_RNG; }
void AddRNGs(std::string s = "") {
// Couple the RNGs to the GridModule tagged by s

View File

@ -1,63 +1,61 @@
# Using HMC in Grid
Using HMC in Grid version 0.5.1
These are the instructions to use the Generalised HMC on Grid as of commit `749b802`.
Disclaimer: Grid is still under active development so any information here can be changed in future releases.
These are the instructions to use the Generalised HMC on Grid version 0.5.1.
Disclaimer: GRID is still under active development so any information here can be changed in future releases.
## Command line options
(relevant file `GenericHMCrunner.h`)
Command line options
===================
(relevant file GenericHMCrunner.h)
The initial configuration can be changed at the command line using
`--StartingType STARTING_TYPE`, where `STARTING_TYPE` is one of
`HotStart`, `ColdStart`, `TepidStart`, and `CheckpointStart`.
Default: `--StartingType HotStart`
--StartType <your choice>
valid choices, one among these
HotStart, ColdStart, TepidStart, CheckpointStart
default: HotStart
Example:
```
./My_hmc_exec --StartingType HotStart
```
example
./My_hmc_exec --StartType HotStart
The `CheckpointStart` option uses the prefix for the configurations and rng seed files defined in your executable and the initial configuration is specified by
`--StartingTrajectory STARTING_TRAJECTORY`, where `STARTING_TRAJECTORY` is an integer.
Default: `--StartingTrajectory 0`
The CheckpointStart option uses the prefix for the configurations and rng seed files defined in your executable and the initial configuration is specified by
--StartTrajectory <integer>
default: 0
The number of trajectories for a specific run are specified at command line by
`--Trajectories TRAJECTORIES`, where `TRAJECTORIES` is an integer.
Default: `--Trajectories 1`
--Trajectories <integer>
default: 1
The number of thermalization steps (i.e. steps when the Metropolis acceptance check is turned off) is specified by
`--Thermalizations THERMALIZATIONS`, where `THERMALIZATIONS` is an integer.
Default: `--Thermalizations 10`
--Thermalizations <integer>
default: 10
Any other parameter is defined in the source for the executable.
## HMC controls
HMC controls
===========
The lines
```
std::vector<int> SerSeed({1, 2, 3, 4, 5});
std::vector<int> ParSeed({6, 7, 8, 9, 10});
```
define the seeds for the serial and the parallel RNG.
The line
```
TheHMC.MDparameters.set(20, 1.0);// MDsteps, traj length
```
declares the number of molecular dynamics steps and the total trajectory length.
## Actions
Actions
======
Action names are defined in the directory `Grid/qcd/action`.
Action names are defined in the file
lib/qcd/Actions.h
Gauge actions list (from `Grid/qcd/action/gauge/Gauge.h`):
Gauge actions list:
```
WilsonGaugeActionR;
WilsonGaugeActionF;
WilsonGaugeActionD;
@ -70,9 +68,8 @@ IwasakiGaugeActionD;
SymanzikGaugeActionR;
SymanzikGaugeActionF;
SymanzikGaugeActionD;
```
```
ConjugateWilsonGaugeActionR;
ConjugateWilsonGaugeActionF;
ConjugateWilsonGaugeActionD;
@ -85,23 +82,26 @@ ConjugateIwasakiGaugeActionD;
ConjugateSymanzikGaugeActionR;
ConjugateSymanzikGaugeActionF;
ConjugateSymanzikGaugeActionD;
```
Each of these action accepts one single parameter at creation time (beta).
Example for creating a Symanzik action with beta=4.0
```
SymanzikGaugeActionR(4.0)
```
Scalar actions list (from `Grid/qcd/action/scalar/Scalar.h`):
```
ScalarActionR;
ScalarActionF;
ScalarActionD;
```
The suffixes `R`, `F`, `D` in the action names refer to the `Real`
(the precision is defined at compile time by the `--enable-precision` flag in the configure),
`Float` and `Double`, that force the precision of the action to be 32, 64 bit respectively.
each of these action accept one single parameter at creation time (beta).
Example for creating a Symanzik action with beta=4.0
SymanzikGaugeActionR(4.0)
The suffixes R,F,D in the action names refer to the Real
(the precision is defined at compile time by the --enable-precision flag in the configure),
Float and Double, that force the precision of the action to be 32, 64 bit respectively.

View File

@ -33,7 +33,6 @@ directory
#define INTEGRATOR_INCLUDED
#include <memory>
#include "MomentumFilter.h"
NAMESPACE_BEGIN(Grid);
@ -67,6 +66,7 @@ public:
template <class FieldImplementation, class SmearingPolicy, class RepresentationPolicy>
class Integrator {
protected:
typedef typename FieldImplementation::Field MomentaField; //for readability
typedef typename FieldImplementation::Field Field;
@ -119,6 +119,7 @@ protected:
}
} update_P_hireps{};
void update_P(MomentaField& Mom, Field& U, int level, double ep) {
// input U actually not used in the fundamental case
// Fundamental updates, include smearing
@ -130,25 +131,45 @@ protected:
Field& Us = Smearer.get_U(as[level].actions.at(a)->is_smeared);
double start_force = usecond();
as[level].actions.at(a)->deriv_timer_start();
as[level].actions.at(a)->deriv(Us, force); // deriv should NOT include Ta
as[level].actions.at(a)->deriv_timer_stop();
std::cout << GridLogIntegrator << "Smearing (on/off): " << as[level].actions.at(a)->is_smeared << std::endl;
auto name = as[level].actions.at(a)->action_name();
if (as[level].actions.at(a)->is_smeared) Smearer.smeared_force(force);
DumpSliceNorm("force before Ta",force,Nd-1);
force = FieldImplementation::projectForce(force); // Ta for gauge fields
double end_force = usecond();
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites());
std::cout << GridLogIntegrator << "["<<level<<"]["<<a<<"] Force average: " << force_abs << std::endl;
DumpSliceNorm("force before filter",force,Nd-1);
MomFilter->applyFilter(force);
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites()); //average per-site norm. nb. norm2(latt) = \sum_x norm2(latt[x])
Real impulse_abs = force_abs * ep * HMC_MOMENTUM_DENOMINATOR;
Real force_max = std::sqrt(maxLocalNorm2(force));
Real impulse_max = force_max * ep * HMC_MOMENTUM_DENOMINATOR;
as[level].actions.at(a)->deriv_log(force_abs,force_max);
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Force average: " << force_abs <<" "<<name<<std::endl;
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Force max : " << force_max <<" "<<name<<std::endl;
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Fdt average : " << impulse_abs <<" "<<name<<std::endl;
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Fdt max : " << impulse_max <<" "<<name<<std::endl;
Mom -= force * ep* HMC_MOMENTUM_DENOMINATOR;;
double end_full = usecond();
double time_full = (end_full - start_full) / 1e3;
double time_force = (end_force - start_force) / 1e3;
std::cout << GridLogMessage << "["<<level<<"]["<<a<<"] P update elapsed time: " << time_full << " ms (force: " << time_force << " ms)" << std::endl;
DumpSliceNorm("force after filter",force,Nd-1);
}
// Force from the other representations
as[level].apply(update_P_hireps, Representations, Mom, U, ep);
MomFilter->applyFilter(Mom);
}
void update_U(Field& U, double ep)
@ -162,8 +183,12 @@ protected:
void update_U(MomentaField& Mom, Field& U, double ep)
{
MomentaField MomFiltered(Mom.Grid());
MomFiltered = Mom;
MomFilter->applyFilter(MomFiltered);
// exponential of Mom*U in the gauge fields case
FieldImplementation::update_field(Mom, U, ep);
FieldImplementation::update_field(MomFiltered, U, ep);
// Update the smeared fields, can be implemented as observer
Smearer.set_Field(U);
@ -206,6 +231,66 @@ public:
const MomentaField & getMomentum() const{ return P; }
void reset_timer(void)
{
for (int level = 0; level < as.size(); ++level) {
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
as[level].actions.at(actionID)->reset_timer();
}
}
}
void print_timer(void)
{
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::" << std::endl;
std::cout << GridLogMessage << " Refresh cumulative timings "<<std::endl;
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
for (int level = 0; level < as.size(); ++level) {
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
std::cout << GridLogMessage
<< as[level].actions.at(actionID)->action_name()
<<"["<<level<<"]["<< actionID<<"] "
<< as[level].actions.at(actionID)->refresh_us*1.0e-6<<" s"<< std::endl;
}
}
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
std::cout << GridLogMessage << " Action cumulative timings "<<std::endl;
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
for (int level = 0; level < as.size(); ++level) {
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
std::cout << GridLogMessage
<< as[level].actions.at(actionID)->action_name()
<<"["<<level<<"]["<< actionID<<"] "
<< as[level].actions.at(actionID)->S_us*1.0e-6<<" s"<< std::endl;
}
}
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
std::cout << GridLogMessage << " Force cumulative timings "<<std::endl;
std::cout << GridLogMessage << "------------------------- "<<std::endl;
for (int level = 0; level < as.size(); ++level) {
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
std::cout << GridLogMessage
<< as[level].actions.at(actionID)->action_name()
<<"["<<level<<"]["<< actionID<<"] "
<< as[level].actions.at(actionID)->deriv_us*1.0e-6<<" s"<< std::endl;
}
}
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
std::cout << GridLogMessage << " Force average size "<<std::endl;
std::cout << GridLogMessage << "------------------------- "<<std::endl;
for (int level = 0; level < as.size(); ++level) {
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
std::cout << GridLogMessage
<< as[level].actions.at(actionID)->action_name()
<<"["<<level<<"]["<< actionID<<"] : "
<<" force max " << as[level].actions.at(actionID)->deriv_max_average()
<<" norm " << as[level].actions.at(actionID)->deriv_norm_average()
<<" calls " << as[level].actions.at(actionID)->deriv_num
<< std::endl;
}
}
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
}
void print_parameters()
{
std::cout << GridLogMessage << "[Integrator] Name : "<< integrator_name() << std::endl;
@ -224,7 +309,6 @@ public:
}
}
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
}
void reverse_momenta()
@ -267,15 +351,19 @@ public:
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
// get gauge field from the SmearingPolicy and
// based on the boolean is_smeared in actionID
auto name = as[level].actions.at(actionID)->action_name();
std::cout << GridLogMessage << "refresh [" << level << "][" << actionID << "] "<<name << std::endl;
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
as[level].actions.at(actionID)->refresh_timer_start();
as[level].actions.at(actionID)->refresh(Us, sRNG, pRNG);
as[level].actions.at(actionID)->refresh_timer_stop();
}
// Refresh the higher representation actions
as[level].apply(refresh_hireps, Representations, sRNG, pRNG);
}
MomFilter->applyFilter(P);
}
// to be used by the actionlevel class to iterate
@ -310,7 +398,9 @@ public:
// based on the boolean is_smeared in actionID
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
as[level].actions.at(actionID)->S_timer_start();
Hterm = as[level].actions.at(actionID)->S(Us);
as[level].actions.at(actionID)->S_timer_stop();
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
H += Hterm;
}

View File

@ -182,7 +182,7 @@ namespace ConjugateBC {
GridBase *grid = Link.Grid();
int Lmu = grid->GlobalDimensions()[mu] - 1;
Lattice<iScalar<vInteger>> coor(grid);
Lattice<iScalar<vInteger> > coor(grid);
LatticeCoordinate(coor, mu);
Lattice<gauge> tmp(grid);

View File

@ -0,0 +1,111 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file:
Copyright (C) 2015-2016
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
class MixedPrecisionConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
public:
typedef typename FermionOperatorD::FermionField FieldD;
typedef typename FermionOperatorF::FermionField FieldF;
using OperatorFunction<FieldD>::operator();
RealD Tolerance;
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;
Integer MaxOuterIterations;
GridBase* SinglePrecGrid;
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
FermionOperatorF &FermOpF;
FermionOperatorD &FermOpD;;
SchurOperatorF &LinOpF;
SchurOperatorD &LinOpD;
Integer TotalInnerIterations; //Number of inner CG iterations
Integer TotalOuterIterations; //Number of restarts
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
MixedPrecisionConjugateGradientOperatorFunction(RealD tol, RealD tolInner,
Integer maxinnerit,
Integer maxouterit,
GridBase *_SinglePrecGrid,
FermionOperatorF &_FermOpF,
FermionOperatorD &_FermOpD,
SchurOperatorF &_LinOpF,
SchurOperatorD &_LinOpD) :
LinOpF(_LinOpF),
LinOpD(_LinOpD),
FermOpF(_FermOpF),
FermOpD(_FermOpD),
Tolerance(tol),
InnerTolerance(tolInner),
MaxInnerIterations(maxinnerit),
MaxOuterIterations(maxouterit),
SinglePrecGrid(_SinglePrecGrid),
OuterLoopNormMult(100.)
{ assert(tolInner<0.01); };
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi)
{
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
// Assumption made in code to extract gauge field
// We could avoid storing LinopD reference alltogether ?
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
////////////////////////////////////////////////////////////////////////////////////
// Moving this to a Clone method of fermion operator would allow to duplicate the
// physics parameters and decrease gauge field copies
////////////////////////////////////////////////////////////////////////////////////
auto &Umu_d = FermOpD.GetDoubledGaugeField();
auto &Umu_f = FermOpF.GetDoubledGaugeField();
auto &Umu_fe= FermOpF.GetDoubledGaugeFieldE();
auto &Umu_fo= FermOpF.GetDoubledGaugeFieldO();
precisionChange(Umu_f,Umu_d);
pickCheckerboard(Even,Umu_fe,Umu_f);
pickCheckerboard(Odd ,Umu_fo,Umu_f);
//////////////////////////////////////////////////////////////////////////////////////////
// Make a mixed precision conjugate gradient
//////////////////////////////////////////////////////////////////////////////////////////
// Could assume red black solver here and remove the SinglePrecGrid parameter???
MixedPrecisionConjugateGradient<FieldD,FieldF> MPCG(Tolerance, InnerTolerance,MaxInnerIterations,MaxOuterIterations,SinglePrecGrid,LinOpF,LinOpD);
std::cout << GridLogMessage << "Calling mixed precision Conjugate Gradient src "<<norm2(src) <<std::endl;
psi=Zero();
MPCG(src,psi);
}
};
NAMESPACE_END(Grid);

200
Grid/random/gaussian.h Normal file
View File

@ -0,0 +1,200 @@
// -*- C++ -*-
//===--------------------------- random -----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Peter Boyle: Taken from libc++ in Clang/LLVM.
// Reason is that libstdc++ and clang differ in their return order in the normal_distribution / box mueller type step.
// standardise on one and call it "gaussian_distribution".
#pragma once
#include <cstddef>
#include <cstdint>
#include <cmath>
#include <type_traits>
#include <initializer_list>
#include <limits>
#include <algorithm>
#include <numeric>
#include <vector>
#include <string>
#include <istream>
#include <ostream>
#include <random>
// normal_distribution -> gaussian distribution
namespace Grid {
template<class _RealType = double>
class gaussian_distribution
{
public:
// types
typedef _RealType result_type;
class param_type
{
result_type __mean_;
result_type __stddev_;
public:
typedef gaussian_distribution distribution_type;
strong_inline
explicit param_type(result_type __mean = 0, result_type __stddev = 1)
: __mean_(__mean), __stddev_(__stddev) {}
strong_inline
result_type mean() const {return __mean_;}
strong_inline
result_type stddev() const {return __stddev_;}
friend strong_inline
bool operator==(const param_type& __x, const param_type& __y)
{return __x.__mean_ == __y.__mean_ && __x.__stddev_ == __y.__stddev_;}
friend strong_inline
bool operator!=(const param_type& __x, const param_type& __y)
{return !(__x == __y);}
};
private:
param_type __p_;
result_type _V_;
bool _V_hot_;
public:
// constructors and reset functions
strong_inline
explicit gaussian_distribution(result_type __mean = 0, result_type __stddev = 1)
: __p_(param_type(__mean, __stddev)), _V_hot_(false) {}
strong_inline
explicit gaussian_distribution(const param_type& __p)
: __p_(__p), _V_hot_(false) {}
strong_inline
void reset() {_V_hot_ = false;}
// generating functions
template<class _URNG>
strong_inline
result_type operator()(_URNG& __g)
{return (*this)(__g, __p_);}
template<class _URNG> result_type operator()(_URNG& __g, const param_type& __p);
// property functions
strong_inline
result_type mean() const {return __p_.mean();}
strong_inline
result_type stddev() const {return __p_.stddev();}
strong_inline
param_type param() const {return __p_;}
strong_inline
void param(const param_type& __p) {__p_ = __p;}
strong_inline
result_type min() const {return -std::numeric_limits<result_type>::infinity();}
strong_inline
result_type max() const {return std::numeric_limits<result_type>::infinity();}
friend strong_inline
bool operator==(const gaussian_distribution& __x,
const gaussian_distribution& __y)
{return __x.__p_ == __y.__p_ && __x._V_hot_ == __y._V_hot_ &&
(!__x._V_hot_ || __x._V_ == __y._V_);}
friend strong_inline
bool operator!=(const gaussian_distribution& __x,
const gaussian_distribution& __y)
{return !(__x == __y);}
template <class _CharT, class _Traits, class _RT>
friend
std::basic_ostream<_CharT, _Traits>&
operator<<(std::basic_ostream<_CharT, _Traits>& __os,
const gaussian_distribution<_RT>& __x);
template <class _CharT, class _Traits, class _RT>
friend
std::basic_istream<_CharT, _Traits>&
operator>>(std::basic_istream<_CharT, _Traits>& __is,
gaussian_distribution<_RT>& __x);
};
template <class _RealType>
template<class _URNG>
_RealType
gaussian_distribution<_RealType>::operator()(_URNG& __g, const param_type& __p)
{
result_type _Up;
if (_V_hot_)
{
_V_hot_ = false;
_Up = _V_;
}
else
{
std::uniform_real_distribution<result_type> _Uni(-1, 1);
result_type __u;
result_type __v;
result_type __s;
do
{
__u = _Uni(__g);
__v = _Uni(__g);
__s = __u * __u + __v * __v;
} while (__s > 1 || __s == 0);
result_type _Fp = std::sqrt(-2 * std::log(__s) / __s);
_V_ = __v * _Fp;
_V_hot_ = true;
_Up = __u * _Fp;
}
return _Up * __p.stddev() + __p.mean();
}
template <class _CharT, class _Traits, class _RT>
std::basic_ostream<_CharT, _Traits>&
operator<<(std::basic_ostream<_CharT, _Traits>& __os,
const gaussian_distribution<_RT>& __x)
{
auto __save_flags = __os.flags();
__os.flags(std::ios_base::dec | std::ios_base::left | std::ios_base::fixed |
std::ios_base::scientific);
_CharT __sp = __os.widen(' ');
__os.fill(__sp);
__os << __x.mean() << __sp << __x.stddev() << __sp << __x._V_hot_;
if (__x._V_hot_)
__os << __sp << __x._V_;
__os.flags(__save_flags);
return __os;
}
template <class _CharT, class _Traits, class _RT>
std::basic_istream<_CharT, _Traits>&
operator>>(std::basic_istream<_CharT, _Traits>& __is,
gaussian_distribution<_RT>& __x)
{
typedef gaussian_distribution<_RT> _Eng;
typedef typename _Eng::result_type result_type;
typedef typename _Eng::param_type param_type;
auto __save_flags = __is.flags();
__is.flags(std::ios_base::dec | std::ios_base::skipws);
result_type __mean;
result_type __stddev;
result_type _Vp = 0;
bool _V_hot = false;
__is >> __mean >> __stddev >> _V_hot;
if (_V_hot)
__is >> _Vp;
if (!__is.fail())
{
__x.param(param_type(__mean, __stddev));
__x._V_hot_ = _V_hot;
__x._V_ = _Vp;
}
__is.flags(__save_flags);
return __is;
}
}

View File

@ -263,7 +263,8 @@ public:
int face_table_computed;
std::vector<commVector<std::pair<int,int> > > face_table ;
Vector<int> surface_list;
bool locally_periodic;
stencilVector<StencilEntry> _entries; // Resident in managed memory
commVector<StencilEntry> _entries_device; // Resident in managed memory
std::vector<Packet> Packets;
@ -320,14 +321,15 @@ public:
int ld = _grid->_ldimensions[dimension];
int rd = _grid->_rdimensions[dimension];
int simd_layout = _grid->_simd_layout[dimension];
int comm_dim = _grid->_processors[dimension] >1 ;
int comm_dim = _grid->_processors[dimension] >1 && (!locally_periodic);
// int recv_from_rank;
// int xmit_to_rank;
int recv_from_rank;
int xmit_to_rank;
if ( ! comm_dim ) return 1;
if ( displacement == 0 ) return 1;
return 0;
}
//////////////////////////////////////////
@ -473,7 +475,7 @@ public:
// the permute type
int simd_layout = _grid->_simd_layout[dimension];
int comm_dim = _grid->_processors[dimension] >1 ;
int comm_dim = _grid->_processors[dimension] >1 && (!locally_periodic);
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
int is_same_node = 1;
@ -657,6 +659,20 @@ public:
const std::vector<int> &directions,
const std::vector<int> &distances,
Parameters p)
: CartesianStencil(grid,
npoints,
checkerboard,
directions,
distances,
false,
p){};
CartesianStencil(GridBase *grid,
int npoints,
int checkerboard,
const std::vector<int> &directions,
const std::vector<int> &distances,
bool _locally_periodic,
Parameters p)
: shm_bytes_thr(npoints),
comm_bytes_thr(npoints),
comm_enter_thr(npoints),
@ -665,6 +681,7 @@ public:
{
face_table_computed=0;
_grid = grid;
this->locally_periodic=_locally_periodic;
this->parameters=p;
/////////////////////////////////////
// Initialise the base
@ -690,6 +707,8 @@ public:
int point = i;
int dimension = directions[i];
assert(dimension>=0 && dimension<_grid->Nd());
int displacement = distances[i];
int shift = displacement;
@ -703,7 +722,7 @@ public:
// the permute type
//////////////////////////
int simd_layout = _grid->_simd_layout[dimension];
int comm_dim = _grid->_processors[dimension] >1 ;
int comm_dim = _grid->_processors[dimension] >1 && (!locally_periodic);
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
int rotate_dim = _grid->_simd_layout[dimension]>2;
@ -817,7 +836,7 @@ public:
int pd = _grid->_processors[dimension];
int simd_layout = _grid->_simd_layout[dimension];
int comm_dim = _grid->_processors[dimension] >1 ;
assert(locally_periodic==false);
assert(comm_dim==1);
int shift = (shiftpm + fd) %fd;
assert(shift>=0);
@ -997,6 +1016,7 @@ public:
int pd = _grid->_processors[dimension];
int simd_layout = _grid->_simd_layout[dimension];
int comm_dim = _grid->_processors[dimension] >1 ;
assert(locally_periodic==false);
assert(simd_layout==1);
assert(comm_dim==1);
assert(shift>=0);
@ -1089,6 +1109,7 @@ public:
int pd = _grid->_processors[dimension];
int simd_layout = _grid->_simd_layout[dimension];
int comm_dim = _grid->_processors[dimension] >1 ;
assert(locally_periodic==false);
assert(comm_dim==1);
// This will not work with a rotate dim
assert(simd_layout==maxl);

View File

@ -52,12 +52,17 @@ template<class vtype, int N> accelerator_inline iVector<vtype, N> Exponentiate(c
return ret;
}
// Specialisation: Cayley-Hamilton exponential for SU(3)
#ifndef GRID_CUDA
#if 0
template<class vtype, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0>::type * =nullptr>
accelerator_inline iMatrix<vtype,3> Exponentiate(const iMatrix<vtype,3> &arg, RealD alpha , Integer Nexp = DEFAULT_MAT_EXP )
accelerator_inline iMatrix<vtype,3> Exponentiated(const iMatrix<vtype,3> &arg, RealD alpha , Integer Nexp = DEFAULT_MAT_EXP )
{
return ExponentiateCayleyHamilton(arg,alpha);
}
#endif
template<class vtype, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0>::type * =nullptr>
accelerator_inline iMatrix<vtype,3> ExponentiateCayleyHamilton(const iMatrix<vtype,3> &arg, RealD alpha )
{
// for SU(3) 2x faster than the std implementation using Nexp=12
// notice that it actually computes
@ -115,8 +120,6 @@ accelerator_inline iMatrix<vtype,3> Exponentiate(const iMatrix<vtype,3> &arg, Re
return (f0 * unit + timesMinusI(f1) * arg*alpha - f2 * iQ2);
}
#endif
// General exponential
template<class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
@ -129,8 +132,8 @@ accelerator_inline iMatrix<vtype,N> Exponentiate(const iMatrix<vtype,N> &arg, Re
typedef iMatrix<vtype,N> mat;
mat unit(1.0);
mat temp(unit);
for(int i=Nexp; i>=1;--i){
temp *= alpha/RealD(i);
for(int n=Nexp; n>=1;--n){
temp *= alpha/RealD(n);
temp = unit + temp*arg;
}
return temp;

View File

@ -208,5 +208,46 @@ void merge(vobj &vec,const ExtractPointerArray<sobj> &extracted, int offset)
}
//////////////////////////////////////////////////////////////////////////////////
//Copy a single lane of a SIMD tensor type from one object to another
//Output object must be of the same tensor type but may be of a different precision (i.e. it can have a different root data type)
///////////////////////////////////////////////////////////////////////////////////
template<class vobjOut, class vobjIn>
accelerator_inline
void copyLane(vobjOut & __restrict__ vecOut, int lane_out, const vobjIn & __restrict__ vecIn, int lane_in)
{
static_assert( std::is_same<typename vobjOut::DoublePrecision, typename vobjIn::DoublePrecision>::value == 1, "copyLane: tensor types must be the same" ); //if tensor types are same the DoublePrecision type must be the same
typedef typename vobjOut::vector_type ovector_type;
typedef typename vobjIn::vector_type ivector_type;
constexpr int owords=sizeof(vobjOut)/sizeof(ovector_type);
constexpr int iwords=sizeof(vobjIn)/sizeof(ivector_type);
static_assert( owords == iwords, "copyLane: Expected number of vector words in input and output objects to be equal" );
typedef typename vobjOut::scalar_type oscalar_type;
typedef typename vobjIn::scalar_type iscalar_type;
typedef typename ExtractTypeMap<oscalar_type>::extract_type oextract_type;
typedef typename ExtractTypeMap<iscalar_type>::extract_type iextract_type;
typedef oextract_type * opointer;
typedef iextract_type * ipointer;
constexpr int oNsimd=ovector_type::Nsimd();
constexpr int iNsimd=ivector_type::Nsimd();
iscalar_type itmp;
oscalar_type otmp;
opointer __restrict__ op = (opointer)&vecOut;
ipointer __restrict__ ip = (ipointer)&vecIn;
for(int w=0;w<owords;w++){
memcpy( (char*)&itmp, (char*)(ip + lane_in + iNsimd*w), sizeof(iscalar_type) );
otmp = itmp; //potential precision change
memcpy( (char*)(op + lane_out + oNsimd*w), (char*)&otmp, sizeof(oscalar_type) );
}
}
NAMESPACE_END(Grid);

View File

@ -47,20 +47,20 @@ NAMESPACE_BEGIN(Grid);
class TypePair {
public:
T _internal[2];
accelerator TypePair<T>& operator=(const Grid::Zero& o) {
TypePair<T>& operator=(const Grid::Zero& o) {
_internal[0] = Zero();
_internal[1] = Zero();
return *this;
}
accelerator TypePair<T> operator+(const TypePair<T>& o) const {
TypePair<T> operator+(const TypePair<T>& o) const {
TypePair<T> r;
r._internal[0] = _internal[0] + o._internal[0];
r._internal[1] = _internal[1] + o._internal[1];
return r;
}
accelerator TypePair<T>& operator+=(const TypePair<T>& o) {
TypePair<T>& operator+=(const TypePair<T>& o) {
_internal[0] += o._internal[0];
_internal[1] += o._internal[1];
return *this;

View File

@ -84,8 +84,7 @@ void acceleratorInit(void)
// IBM Jsrun makes cuda Device numbering screwy and not match rank
if ( world_rank == 0 ) {
printf("AcceleratorCudaInit: using default device \n");
printf("AcceleratorCudaInit: assume user either uses\n");
printf("AcceleratorCudaInit: a) IBM jsrun, or \n");
printf("AcceleratorCudaInit: assume user either uses a) IBM jsrun, or \n");
printf("AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding \n");
printf("AcceleratorCudaInit: Configure options --enable-setdevice=no \n");
}
@ -96,7 +95,7 @@ void acceleratorInit(void)
#endif
cudaSetDevice(device);
cudaStreamCreate(&copyStream);
const int len=64;
char busid[len];
if( rank == world_rank ) {
@ -110,7 +109,6 @@ void acceleratorInit(void)
#ifdef GRID_HIP
hipDeviceProp_t *gpu_props;
hipStream_t copyStream;
void acceleratorInit(void)
{
int nDevices = 1;
@ -168,25 +166,16 @@ void acceleratorInit(void)
#ifdef GRID_DEFAULT_GPU
if ( world_rank == 0 ) {
printf("AcceleratorHipInit: using default device \n");
printf("AcceleratorHipInit: assume user or srun sets ROCR_VISIBLE_DEVICES and numa binding \n");
printf("AcceleratorHipInit: Configure options --enable-setdevice=no \n");
printf("AcceleratorHipInit: assume user either uses a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding \n");
printf("AcceleratorHipInit: Configure options --enable-summit, --enable-select-gpu=no \n");
}
int device = 0;
#else
if ( world_rank == 0 ) {
printf("AcceleratorHipInit: rank %d setting device to node rank %d\n",world_rank,rank);
printf("AcceleratorHipInit: Configure options --enable-setdevice=yes \n");
printf("AcceleratorHipInit: Configure options --enable-select-gpu=yes \n");
}
int device = rank;
hipSetDevice(rank);
#endif
hipSetDevice(device);
hipStreamCreate(&copyStream);
const int len=64;
char busid[len];
if( rank == world_rank ) {
hipDeviceGetPCIBusId(busid, len, device);
printf("local rank %d device %d bus id: %s\n", rank, device, busid);
}
if ( world_rank == 0 ) printf("AcceleratorHipInit: ================================================\n");
}
#endif

View File

@ -95,7 +95,6 @@ void acceleratorInit(void);
//////////////////////////////////////////////
#ifdef GRID_CUDA
#include <cuda.h>
#ifdef __CUDA_ARCH__
@ -206,7 +205,7 @@ inline void *acceleratorAllocShared(size_t bytes)
auto err = cudaMallocManaged((void **)&ptr,bytes);
if( err != cudaSuccess ) {
ptr = (void *) NULL;
printf(" cudaMallocManaged failed for %d %s \n",bytes,cudaGetErrorString(err));
printf(" cudaMallocManaged failed for %lu %s \n",bytes,cudaGetErrorString(err));
}
return ptr;
};
@ -216,7 +215,7 @@ inline void *acceleratorAllocDevice(size_t bytes)
auto err = cudaMalloc((void **)&ptr,bytes);
if( err != cudaSuccess ) {
ptr = (void *) NULL;
printf(" cudaMalloc failed for %d %s \n",bytes,cudaGetErrorString(err));
printf(" cudaMalloc failed for %lu %s \n",bytes,cudaGetErrorString(err));
}
return ptr;
};
@ -230,7 +229,6 @@ inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes
cudaMemcpyAsync(to,from,bytes, cudaMemcpyDeviceToDevice,copyStream);
}
inline void acceleratorCopySynchronise(void) { cudaStreamSynchronize(copyStream); };
inline int acceleratorIsCommunicable(void *ptr)
{
// int uvm=0;
@ -307,7 +305,7 @@ inline void acceleratorFreeDevice(void *ptr){free(ptr,*theGridAccelerator);};
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) {
theGridAccelerator->memcpy(to,from,bytes);
}
inline void acceleratorCopySynchronise(void) { theGridAccelerator->wait(); std::cout<<"acceleratorCopySynchronise() wait "<<std::endl; }
inline void acceleratorCopySynchronise(void) { theGridAccelerator->wait(); }
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
inline void acceleratorMemSet(void *base,int value,size_t bytes) { theGridAccelerator->memset(base,value,bytes); theGridAccelerator->wait();}
@ -338,7 +336,6 @@ NAMESPACE_BEGIN(Grid);
#define accelerator __host__ __device__
#define accelerator_inline __host__ __device__ inline
extern hipStream_t copyStream;
/*These routines define mapping from thread grid to loop & vector lane indexing */
accelerator_inline int acceleratorSIMTlane(int Nsimd) {
#ifdef GRID_SIMT
@ -413,16 +410,10 @@ inline void acceleratorFreeShared(void *ptr){ hipFree(ptr);};
inline void acceleratorFreeDevice(void *ptr){ hipFree(ptr);};
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);}
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);}
//inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);}
//inline void acceleratorCopySynchronise(void) { }
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);}
inline void acceleratorCopySynchronise(void) { }
inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(base,value,bytes);}
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
{
hipMemcpyAsync(to,from,bytes, hipMemcpyDeviceToDevice,copyStream);
}
inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream); };
#endif
//////////////////////////////////////////////
@ -493,12 +484,18 @@ inline void acceleratorFreeCpu (void *ptr){free(ptr);};
///////////////////////////////////////////////////
// Synchronise across local threads for divergence resynch
///////////////////////////////////////////////////
accelerator_inline void acceleratorSynchronise(void) // Only Nvidia needs
accelerator_inline void acceleratorSynchronise(void)
{
#ifdef GRID_SIMT
#ifdef GRID_CUDA
__syncwarp();
#endif
#ifdef GRID_SYCL
//cl::sycl::detail::workGroupBarrier();
#endif
#ifdef GRID_HIP
__syncthreads();
#endif
#endif
return;
}

View File

@ -88,7 +88,7 @@ public:
// Coordinate class, maxdims = 8 for now.
////////////////////////////////////////////////////////////////
#define GRID_MAX_LATTICE_DIMENSION (8)
#define GRID_MAX_SIMD (32)
#define GRID_MAX_SIMD (16)
static constexpr int MaxDims = GRID_MAX_LATTICE_DIMENSION;

View File

@ -0,0 +1,473 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./HMC/DWF2p1fIwasakiGparity.cc
Copyright (C) 2015-2016
Author: Christopher Kelly <ckelly@bnl.gov>
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace Grid;
//2+1f DWF+I ensemble with G-parity BCs
//designed to reproduce ensembles in https://arxiv.org/pdf/1908.08640.pdf
struct RatQuoParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(RatQuoParameters,
double, bnd_lo,
double, bnd_hi,
Integer, action_degree,
double, action_tolerance,
Integer, md_degree,
double, md_tolerance,
Integer, reliable_update_freq,
Integer, bnd_check_freq);
RatQuoParameters() {
bnd_lo = 1e-2;
bnd_hi = 30;
action_degree = 10;
action_tolerance = 1e-10;
md_degree = 10;
md_tolerance = 1e-8;
bnd_check_freq = 20;
reliable_update_freq = 50;
}
void Export(RationalActionParams &into) const{
into.lo = bnd_lo;
into.hi = bnd_hi;
into.action_degree = action_degree;
into.action_tolerance = action_tolerance;
into.md_degree = md_degree;
into.md_tolerance = md_tolerance;
into.BoundsCheckFreq = bnd_check_freq;
}
};
struct EvolParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(EvolParameters,
Integer, StartTrajectory,
Integer, Trajectories,
Integer, SaveInterval,
Integer, Steps,
bool, MetropolisTest,
std::string, StartingType,
std::vector<Integer>, GparityDirs,
RatQuoParameters, rat_quo_l,
RatQuoParameters, rat_quo_s);
EvolParameters() {
//For initial thermalization; afterwards user should switch Metropolis on and use StartingType=CheckpointStart
MetropolisTest = false;
StartTrajectory = 0;
Trajectories = 50;
SaveInterval = 5;
StartingType = "ColdStart";
GparityDirs.resize(3, 1); //1 for G-parity, 0 for periodic
Steps = 5;
}
};
bool fileExists(const std::string &fn){
std::ifstream f(fn);
return f.good();
}
struct LanczosParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
double, alpha,
double, beta,
double, mu,
int, ord,
int, n_stop,
int, n_want,
int, n_use,
double, tolerance);
LanczosParameters() {
alpha = 35;
beta = 5;
mu = 0;
ord = 100;
n_stop = 10;
n_want = 10;
n_use = 15;
tolerance = 1e-6;
}
};
template<typename FermionActionD, typename FermionFieldD>
void computeEigenvalues(std::string param_file,
GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
FermionActionD &action, GridParallelRNG &rng){
LanczosParameters params;
if(fileExists(param_file)){
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
Grid::XmlReader rd(param_file);
read(rd, "LanczosParameters", params);
}else if(!GlobalSharedMemory::WorldRank){
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
Grid::XmlWriter wr(param_file + ".templ");
write(wr, "LanczosParameters", params);
}
FermionFieldD gauss_o(rbGrid);
FermionFieldD gauss(Grid);
gaussian(rng, gauss);
pickCheckerboard(Odd, gauss_o, gauss);
action.ImportGauge(latt);
SchurDiagMooeeOperator<FermionActionD, FermionFieldD> hermop(action);
PlainHermOp<FermionFieldD> hermop_wrap(hermop);
//ChebyshevLanczos<FermionFieldD> Cheb(params.alpha, params.beta, params.mu, params.ord);
assert(params.mu == 0.0);
Chebyshev<FermionFieldD> Cheb(params.beta*params.beta, params.alpha*params.alpha, params.ord+1);
FunctionHermOp<FermionFieldD> Cheb_wrap(Cheb, hermop);
std::cout << "IRL: alpha=" << params.alpha << " beta=" << params.beta << " mu=" << params.mu << " ord=" << params.ord << std::endl;
ImplicitlyRestartedLanczos<FermionFieldD> IRL(Cheb_wrap, hermop_wrap, params.n_stop, params.n_want, params.n_use, params.tolerance, 10000);
std::vector<RealD> eval(params.n_use);
std::vector<FermionFieldD> evec(params.n_use, rbGrid);
int Nconv;
IRL.calc(eval, evec, gauss_o, Nconv);
std::cout << "Eigenvalues:" << std::endl;
for(int i=0;i<params.n_want;i++){
std::cout << i << " " << eval[i] << std::endl;
}
}
//Check the quality of the RHMC approx
template<typename FermionActionD, typename FermionFieldD, typename RHMCtype>
void checkRHMC(GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
FermionActionD &numOp, FermionActionD &denOp, RHMCtype &rhmc, GridParallelRNG &rng,
int inv_pow, const std::string &quark_descr){
FermionFieldD gauss_o(rbGrid);
FermionFieldD gauss(Grid);
gaussian(rng, gauss);
pickCheckerboard(Odd, gauss_o, gauss);
numOp.ImportGauge(latt);
denOp.ImportGauge(latt);
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
SchurDifferentiableOperator<FermionImplPolicyD> MdagM(numOp);
SchurDifferentiableOperator<FermionImplPolicyD> VdagV(denOp);
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerAction); //use large tolerance to prevent exit on fail; we are trying to tune here!
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerAction);
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerAction);
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerAction);
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
std::cout << "-------------------------------------------------------------------------------" << std::endl;
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerMD);
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerMD);
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerMD);
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerMD);
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
}
int main(int argc, char **argv) {
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
// here make a routine to print all the relevant information on the run
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
std::string param_file = "params.xml";
bool file_load_check = false;
for(int i=1;i<argc;i++){
std::string sarg(argv[i]);
if(sarg == "--param_file"){
assert(i!=argc-1);
param_file = argv[i+1];
}else if(sarg == "--read_check"){ //check the fields load correctly and pass checksum/plaquette repro
file_load_check = true;
}
}
//Read the user parameters
EvolParameters user_params;
if(fileExists(param_file)){
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
Grid::XmlReader rd(param_file);
read(rd, "Params", user_params);
}else if(!GlobalSharedMemory::WorldRank){
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
Grid::XmlWriter wr(param_file + ".templ");
write(wr, "Params", user_params);
std::cout << GridLogMessage << " Done" << std::endl;
Grid_finalize();
return 0;
}
//Check the parameters
if(user_params.GparityDirs.size() != Nd-1){
std::cerr << "Error in input parameters: expect GparityDirs to have size = " << Nd-1 << std::endl;
exit(1);
}
for(int i=0;i<Nd-1;i++)
if(user_params.GparityDirs[i] != 0 && user_params.GparityDirs[i] != 1){
std::cerr << "Error in input parameters: expect GparityDirs values to be 0 (periodic) or 1 (G-parity)" << std::endl;
exit(1);
}
// Typedefs to simplify notation
typedef GparityDomainWallFermionD FermionActionD;
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
typedef typename FermionActionD::FermionField FermionFieldD;
typedef GparityDomainWallFermionF FermionActionF;
typedef typename FermionActionF::Impl_t FermionImplPolicyF;
typedef typename FermionActionF::FermionField FermionFieldF;
typedef GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicyD,FermionImplPolicyF> MixedPrecRHMC;
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicyD> DoublePrecRHMC;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
typedef ConjugateHMCRunnerD<MinimumNorm2> HMCWrapper; //NB: This is the "Omelyan integrator"
typedef HMCWrapper::ImplPolicy GaugeImplPolicy;
MD.name = std::string("MinimumNorm2");
MD.MDsteps = user_params.Steps;
MD.trajL = 1.0;
HMCparameters HMCparams;
HMCparams.StartTrajectory = user_params.StartTrajectory;
HMCparams.Trajectories = user_params.Trajectories;
HMCparams.NoMetropolisUntil= 0;
HMCparams.StartingType = user_params.StartingType;
HMCparams.MetropolisTest = user_params.MetropolisTest;
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_lat";
CPparams.rng_prefix = "ckpoint_rng";
CPparams.saveInterval = user_params.SaveInterval;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
//Note that checkpointing saves the RNG state so that this initialization is required only for the very first configuration
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
typedef PlaquetteMod<GaugeImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
Real beta = 2.13;
Real light_mass = 0.01;
Real strange_mass = 0.032;
Real pv_mass = 1.0;
RealD M5 = 1.8;
//Setup the Grids
auto GridPtrD = TheHMC.Resources.GetCartesian();
auto GridRBPtrD = TheHMC.Resources.GetRBCartesian();
auto FGridD = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrD);
auto FrbGridD = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrD);
GridCartesian* GridPtrF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexF::Nsimd()), GridDefaultMpi());
GridRedBlackCartesian* GridRBPtrF = SpaceTimeGrid::makeFourDimRedBlackGrid(GridPtrF);
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrF);
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrF);
ConjugateIwasakiGaugeActionD GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeFieldD Ud(GridPtrD);
LatticeGaugeFieldF Uf(GridPtrF);
//Setup the BCs
FermionActionD::ImplParams Params;
for(int i=0;i<Nd-1;i++) Params.twists[i] = user_params.GparityDirs[i]; //G-parity directions
Params.twists[Nd-1] = 1; //APBC in time direction
std::vector<int> dirs4(Nd);
for(int i=0;i<Nd-1;i++) dirs4[i] = user_params.GparityDirs[i];
dirs4[Nd-1] = 0; //periodic gauge BC in time
GaugeImplPolicy::setDirections(dirs4); //gauge BC
//Run optional gauge field checksum checker and exit
if(file_load_check){
TheHMC.initializeGaugeFieldAndRNGs(Ud);
std::cout << GridLogMessage << " Done" << std::endl;
Grid_finalize();
return 0;
}
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1); //light quark + strange quark
ActionLevel<HMCWrapper::Field> Level2(8); //gauge (8 increments per step)
/////////////////////////////////////////////////////////////
// Light action
/////////////////////////////////////////////////////////////
FermionActionD Numerator_lD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, light_mass,M5,Params);
FermionActionD Denominator_lD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, pv_mass,M5,Params);
FermionActionF Numerator_lF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, light_mass,M5,Params);
FermionActionF Denominator_lF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, pv_mass,M5,Params);
RationalActionParams rat_act_params_l;
rat_act_params_l.inv_pow = 2; // (M^dag M)^{1/2}
rat_act_params_l.precision= 60;
rat_act_params_l.MaxIter = 10000;
user_params.rat_quo_l.Export(rat_act_params_l);
std::cout << GridLogMessage << " Light quark bounds check every " << rat_act_params_l.BoundsCheckFreq << " trajectories (avg)" << std::endl;
MixedPrecRHMC Quotient_l(Denominator_lD, Numerator_lD, Denominator_lF, Numerator_lF, rat_act_params_l, user_params.rat_quo_l.reliable_update_freq);
//DoublePrecRHMC Quotient_l(Denominator_lD, Numerator_lD, rat_act_params_l);
Level1.push_back(&Quotient_l);
////////////////////////////////////
// Strange action
////////////////////////////////////
FermionActionD Numerator_sD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD,strange_mass,M5,Params);
FermionActionD Denominator_sD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, pv_mass,M5,Params);
FermionActionF Numerator_sF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,strange_mass,M5,Params);
FermionActionF Denominator_sF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, pv_mass,M5,Params);
RationalActionParams rat_act_params_s;
rat_act_params_s.inv_pow = 4; // (M^dag M)^{1/4}
rat_act_params_s.precision= 60;
rat_act_params_s.MaxIter = 10000;
user_params.rat_quo_s.Export(rat_act_params_s);
std::cout << GridLogMessage << " Heavy quark bounds check every " << rat_act_params_l.BoundsCheckFreq << " trajectories (avg)" << std::endl;
MixedPrecRHMC Quotient_s(Denominator_sD, Numerator_sD, Denominator_sF, Numerator_sF, rat_act_params_s, user_params.rat_quo_s.reliable_update_freq);
//DoublePrecRHMC Quotient_s(Denominator_sD, Numerator_sD, rat_act_params_s);
Level1.push_back(&Quotient_s);
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
Level2.push_back(&GaugeAction);
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
std::cout << GridLogMessage << " Action complete "<< std::endl;
//Action tuning
bool tune_rhmc_l=false, tune_rhmc_s=false, eigenrange_l=false, eigenrange_s=false;
std::string lanc_params_l, lanc_params_s;
for(int i=1;i<argc;i++){
std::string sarg(argv[i]);
if(sarg == "--tune_rhmc_l") tune_rhmc_l=true;
else if(sarg == "--tune_rhmc_s") tune_rhmc_s=true;
else if(sarg == "--eigenrange_l"){
assert(i < argc-1);
eigenrange_l=true;
lanc_params_l = argv[i+1];
}
else if(sarg == "--eigenrange_s"){
assert(i < argc-1);
eigenrange_s=true;
lanc_params_s = argv[i+1];
}
}
if(tune_rhmc_l || tune_rhmc_s || eigenrange_l || eigenrange_s){
TheHMC.initializeGaugeFieldAndRNGs(Ud);
if(eigenrange_l) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_l, FGridD, FrbGridD, Ud, Numerator_lD, TheHMC.Resources.GetParallelRNG());
if(eigenrange_s) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_s, FGridD, FrbGridD, Ud, Numerator_sD, TheHMC.Resources.GetParallelRNG());
if(tune_rhmc_l) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_l)>(FGridD, FrbGridD, Ud, Numerator_lD, Denominator_lD, Quotient_l, TheHMC.Resources.GetParallelRNG(), 2, "light");
if(tune_rhmc_s) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_s)>(FGridD, FrbGridD, Ud, Numerator_sD, Denominator_sD, Quotient_s, TheHMC.Resources.GetParallelRNG(), 4, "strange");
std::cout << GridLogMessage << " Done" << std::endl;
Grid_finalize();
return 0;
}
//Run the HMC
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.Run();
std::cout << GridLogMessage << " Done" << std::endl;
Grid_finalize();
return 0;
} // main

View File

@ -0,0 +1,473 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./HMC/DWF2p1fIwasakiGparity.cc
Copyright (C) 2015-2016
Author: Christopher Kelly <ckelly@bnl.gov>
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace Grid;
//2+1f DWF+I ensemble with G-parity BCs
//designed to reproduce ensembles in https://arxiv.org/pdf/1908.08640.pdf
struct RatQuoParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(RatQuoParameters,
double, bnd_lo,
double, bnd_hi,
Integer, action_degree,
double, action_tolerance,
Integer, md_degree,
double, md_tolerance,
Integer, reliable_update_freq,
Integer, bnd_check_freq);
RatQuoParameters() {
bnd_lo = 1e-2;
bnd_hi = 30;
action_degree = 10;
action_tolerance = 1e-10;
md_degree = 10;
md_tolerance = 1e-8;
bnd_check_freq = 20;
reliable_update_freq = 50;
}
void Export(RationalActionParams &into) const{
into.lo = bnd_lo;
into.hi = bnd_hi;
into.action_degree = action_degree;
into.action_tolerance = action_tolerance;
into.md_degree = md_degree;
into.md_tolerance = md_tolerance;
into.BoundsCheckFreq = bnd_check_freq;
}
};
struct EvolParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(EvolParameters,
Integer, StartTrajectory,
Integer, Trajectories,
Integer, SaveInterval,
Integer, Steps,
bool, MetropolisTest,
std::string, StartingType,
std::vector<Integer>, GparityDirs,
RatQuoParameters, rat_quo_l,
RatQuoParameters, rat_quo_s);
EvolParameters() {
//For initial thermalization; afterwards user should switch Metropolis on and use StartingType=CheckpointStart
MetropolisTest = false;
StartTrajectory = 0;
Trajectories = 50;
SaveInterval = 5;
StartingType = "ColdStart";
GparityDirs.resize(3, 1); //1 for G-parity, 0 for periodic
Steps = 5;
}
};
bool fileExists(const std::string &fn){
std::ifstream f(fn);
return f.good();
}
struct LanczosParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
double, alpha,
double, beta,
double, mu,
int, ord,
int, n_stop,
int, n_want,
int, n_use,
double, tolerance);
LanczosParameters() {
alpha = 35;
beta = 5;
mu = 0;
ord = 100;
n_stop = 10;
n_want = 10;
n_use = 15;
tolerance = 1e-6;
}
};
template<typename FermionActionD, typename FermionFieldD>
void computeEigenvalues(std::string param_file,
GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
FermionActionD &action, GridParallelRNG &rng){
LanczosParameters params;
if(fileExists(param_file)){
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
Grid::XmlReader rd(param_file);
read(rd, "LanczosParameters", params);
}else if(!GlobalSharedMemory::WorldRank){
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
Grid::XmlWriter wr(param_file + ".templ");
write(wr, "LanczosParameters", params);
}
FermionFieldD gauss_o(rbGrid);
FermionFieldD gauss(Grid);
gaussian(rng, gauss);
pickCheckerboard(Odd, gauss_o, gauss);
action.ImportGauge(latt);
SchurDiagMooeeOperator<FermionActionD, FermionFieldD> hermop(action);
PlainHermOp<FermionFieldD> hermop_wrap(hermop);
//ChebyshevLanczos<FermionFieldD> Cheb(params.alpha, params.beta, params.mu, params.ord);
assert(params.mu == 0.0);
Chebyshev<FermionFieldD> Cheb(params.beta*params.beta, params.alpha*params.alpha, params.ord+1);
FunctionHermOp<FermionFieldD> Cheb_wrap(Cheb, hermop);
std::cout << "IRL: alpha=" << params.alpha << " beta=" << params.beta << " mu=" << params.mu << " ord=" << params.ord << std::endl;
ImplicitlyRestartedLanczos<FermionFieldD> IRL(Cheb_wrap, hermop_wrap, params.n_stop, params.n_want, params.n_use, params.tolerance, 10000);
std::vector<RealD> eval(params.n_use);
std::vector<FermionFieldD> evec(params.n_use, rbGrid);
int Nconv;
IRL.calc(eval, evec, gauss_o, Nconv);
std::cout << "Eigenvalues:" << std::endl;
for(int i=0;i<params.n_want;i++){
std::cout << i << " " << eval[i] << std::endl;
}
}
//Check the quality of the RHMC approx
template<typename FermionActionD, typename FermionFieldD, typename RHMCtype>
void checkRHMC(GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
FermionActionD &numOp, FermionActionD &denOp, RHMCtype &rhmc, GridParallelRNG &rng,
int inv_pow, const std::string &quark_descr){
FermionFieldD gauss_o(rbGrid);
FermionFieldD gauss(Grid);
gaussian(rng, gauss);
pickCheckerboard(Odd, gauss_o, gauss);
numOp.ImportGauge(latt);
denOp.ImportGauge(latt);
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
SchurDifferentiableOperator<FermionImplPolicyD> MdagM(numOp);
SchurDifferentiableOperator<FermionImplPolicyD> VdagV(denOp);
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerAction); //use large tolerance to prevent exit on fail; we are trying to tune here!
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerAction);
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerAction);
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerAction);
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
std::cout << "-------------------------------------------------------------------------------" << std::endl;
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerMD);
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerMD);
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerMD);
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerMD);
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
}
int main(int argc, char **argv) {
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
// here make a routine to print all the relevant information on the run
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
std::string param_file = "params.xml";
bool file_load_check = false;
for(int i=1;i<argc;i++){
std::string sarg(argv[i]);
if(sarg == "--param_file"){
assert(i!=argc-1);
param_file = argv[i+1];
}else if(sarg == "--read_check"){ //check the fields load correctly and pass checksum/plaquette repro
file_load_check = true;
}
}
//Read the user parameters
EvolParameters user_params;
if(fileExists(param_file)){
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
Grid::XmlReader rd(param_file);
read(rd, "Params", user_params);
}else if(!GlobalSharedMemory::WorldRank){
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
Grid::XmlWriter wr(param_file + ".templ");
write(wr, "Params", user_params);
std::cout << GridLogMessage << " Done" << std::endl;
Grid_finalize();
return 0;
}
//Check the parameters
if(user_params.GparityDirs.size() != Nd-1){
std::cerr << "Error in input parameters: expect GparityDirs to have size = " << Nd-1 << std::endl;
exit(1);
}
for(int i=0;i<Nd-1;i++)
if(user_params.GparityDirs[i] != 0 && user_params.GparityDirs[i] != 1){
std::cerr << "Error in input parameters: expect GparityDirs values to be 0 (periodic) or 1 (G-parity)" << std::endl;
exit(1);
}
// Typedefs to simplify notation
typedef GparityDomainWallFermionD FermionActionD;
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
typedef typename FermionActionD::FermionField FermionFieldD;
typedef GparityDomainWallFermionF FermionActionF;
typedef typename FermionActionF::Impl_t FermionImplPolicyF;
typedef typename FermionActionF::FermionField FermionFieldF;
typedef GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicyD,FermionImplPolicyF> MixedPrecRHMC;
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicyD> DoublePrecRHMC;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
typedef ConjugateHMCRunnerD<MinimumNorm2> HMCWrapper; //NB: This is the "Omelyan integrator"
typedef HMCWrapper::ImplPolicy GaugeImplPolicy;
MD.name = std::string("MinimumNorm2");
MD.MDsteps = user_params.Steps;
MD.trajL = 1.0;
HMCparameters HMCparams;
HMCparams.StartTrajectory = user_params.StartTrajectory;
HMCparams.Trajectories = user_params.Trajectories;
HMCparams.NoMetropolisUntil= 0;
HMCparams.StartingType = user_params.StartingType;
HMCparams.MetropolisTest = user_params.MetropolisTest;
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_lat";
CPparams.rng_prefix = "ckpoint_rng";
CPparams.saveInterval = user_params.SaveInterval;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
//Note that checkpointing saves the RNG state so that this initialization is required only for the very first configuration
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
typedef PlaquetteMod<GaugeImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
Real beta = 2.13;
Real light_mass = 0.01;
Real strange_mass = 0.032;
Real pv_mass = 1.0;
RealD M5 = 1.8;
//Setup the Grids
auto GridPtrD = TheHMC.Resources.GetCartesian();
auto GridRBPtrD = TheHMC.Resources.GetRBCartesian();
auto FGridD = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrD);
auto FrbGridD = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrD);
GridCartesian* GridPtrF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexF::Nsimd()), GridDefaultMpi());
GridRedBlackCartesian* GridRBPtrF = SpaceTimeGrid::makeFourDimRedBlackGrid(GridPtrF);
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrF);
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrF);
ConjugateIwasakiGaugeActionD GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeFieldD Ud(GridPtrD);
LatticeGaugeFieldF Uf(GridPtrF);
//Setup the BCs
FermionActionD::ImplParams Params;
for(int i=0;i<Nd-1;i++) Params.twists[i] = user_params.GparityDirs[i]; //G-parity directions
Params.twists[Nd-1] = 1; //APBC in time direction
std::vector<int> dirs4(Nd);
for(int i=0;i<Nd-1;i++) dirs4[i] = user_params.GparityDirs[i];
dirs4[Nd-1] = 0; //periodic gauge BC in time
GaugeImplPolicy::setDirections(dirs4); //gauge BC
//Run optional gauge field checksum checker and exit
if(file_load_check){
TheHMC.initializeGaugeFieldAndRNGs(Ud);
std::cout << GridLogMessage << " Done" << std::endl;
Grid_finalize();
return 0;
}
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1); //light quark + strange quark
ActionLevel<HMCWrapper::Field> Level2(8); //gauge (8 increments per step)
/////////////////////////////////////////////////////////////
// Light action
/////////////////////////////////////////////////////////////
FermionActionD Numerator_lD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, light_mass,M5,Params);
FermionActionD Denominator_lD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, pv_mass,M5,Params);
FermionActionF Numerator_lF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, light_mass,M5,Params);
FermionActionF Denominator_lF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, pv_mass,M5,Params);
RationalActionParams rat_act_params_l;
rat_act_params_l.inv_pow = 2; // (M^dag M)^{1/2}
rat_act_params_l.precision= 60;
rat_act_params_l.MaxIter = 10000;
user_params.rat_quo_l.Export(rat_act_params_l);
std::cout << GridLogMessage << " Light quark bounds check every " << rat_act_params_l.BoundsCheckFreq << " trajectories (avg)" << std::endl;
//MixedPrecRHMC Quotient_l(Denominator_lD, Numerator_lD, Denominator_lF, Numerator_lF, rat_act_params_l, user_params.rat_quo_l.reliable_update_freq);
DoublePrecRHMC Quotient_l(Denominator_lD, Numerator_lD, rat_act_params_l);
Level1.push_back(&Quotient_l);
////////////////////////////////////
// Strange action
////////////////////////////////////
FermionActionD Numerator_sD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD,strange_mass,M5,Params);
FermionActionD Denominator_sD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, pv_mass,M5,Params);
FermionActionF Numerator_sF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,strange_mass,M5,Params);
FermionActionF Denominator_sF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, pv_mass,M5,Params);
RationalActionParams rat_act_params_s;
rat_act_params_s.inv_pow = 4; // (M^dag M)^{1/4}
rat_act_params_s.precision= 60;
rat_act_params_s.MaxIter = 10000;
user_params.rat_quo_s.Export(rat_act_params_s);
std::cout << GridLogMessage << " Heavy quark bounds check every " << rat_act_params_l.BoundsCheckFreq << " trajectories (avg)" << std::endl;
//MixedPrecRHMC Quotient_s(Denominator_sD, Numerator_sD, Denominator_sF, Numerator_sF, rat_act_params_s, user_params.rat_quo_s.reliable_update_freq);
DoublePrecRHMC Quotient_s(Denominator_sD, Numerator_sD, rat_act_params_s);
Level1.push_back(&Quotient_s);
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
Level2.push_back(&GaugeAction);
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
std::cout << GridLogMessage << " Action complete "<< std::endl;
//Action tuning
bool tune_rhmc_l=false, tune_rhmc_s=false, eigenrange_l=false, eigenrange_s=false;
std::string lanc_params_l, lanc_params_s;
for(int i=1;i<argc;i++){
std::string sarg(argv[i]);
if(sarg == "--tune_rhmc_l") tune_rhmc_l=true;
else if(sarg == "--tune_rhmc_s") tune_rhmc_s=true;
else if(sarg == "--eigenrange_l"){
assert(i < argc-1);
eigenrange_l=true;
lanc_params_l = argv[i+1];
}
else if(sarg == "--eigenrange_s"){
assert(i < argc-1);
eigenrange_s=true;
lanc_params_s = argv[i+1];
}
}
if(tune_rhmc_l || tune_rhmc_s || eigenrange_l || eigenrange_s){
TheHMC.initializeGaugeFieldAndRNGs(Ud);
if(eigenrange_l) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_l, FGridD, FrbGridD, Ud, Numerator_lD, TheHMC.Resources.GetParallelRNG());
if(eigenrange_s) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_s, FGridD, FrbGridD, Ud, Numerator_sD, TheHMC.Resources.GetParallelRNG());
if(tune_rhmc_l) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_l)>(FGridD, FrbGridD, Ud, Numerator_lD, Denominator_lD, Quotient_l, TheHMC.Resources.GetParallelRNG(), 2, "light");
if(tune_rhmc_s) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_s)>(FGridD, FrbGridD, Ud, Numerator_sD, Denominator_sD, Quotient_s, TheHMC.Resources.GetParallelRNG(), 4, "strange");
std::cout << GridLogMessage << " Done" << std::endl;
Grid_finalize();
return 0;
}
//Run the HMC
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.Run();
std::cout << GridLogMessage << " Done" << std::endl;
Grid_finalize();
return 0;
} // main

170
HMC/Mobius2f.cc Normal file
View File

@ -0,0 +1,170 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_hmc_EODWFRatio.cc
Copyright (C) 2015-2016
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
Author: Guido Cossu <guido.cossu@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
int main(int argc, char **argv) {
using namespace Grid;
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
// here make a routine to print all the relevant information on the run
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
// Typedefs to simplify notation
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
// MD.name = std::string("Leap Frog");
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
// MD.name = std::string("Force Gradient");
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
MD.name = std::string("MinimumNorm2");
MD.MDsteps = 12;
MD.trajL = 1.0;
HMCparameters HMCparams;
HMCparams.StartTrajectory = 17;
HMCparams.Trajectories = 200;
HMCparams.NoMetropolisUntil= 0;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
// HMCparams.StartingType =std::string("ColdStart");
HMCparams.StartingType =std::string("CheckpointStart");
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_2fDWF_lat";
CPparams.rng_prefix = "ckpoint_2fDWF_rng";
CPparams.saveInterval = 1;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
// Construct observables
// here there is too much indirection
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
Real beta = 2.13;
Real light_mass = 0.01;
Real pv_mass = 1.0;
RealD M5 = 1.8;
RealD b = 1.0;
RealD c = 0.0;
std::vector<Real> hasenbusch({ 0.1, 0.4, 0.7 });
auto GridPtr = TheHMC.Resources.GetCartesian();
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
IwasakiGaugeActionR GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeField U(GridPtr);
// These lines are unecessary if BC are all periodic
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
double StoppingCondition = 1e-10;
double MaxCGIterations = 30000;
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1);
ActionLevel<HMCWrapper::Field> Level2(8);
////////////////////////////////////
// up down action
////////////////////////////////////
std::vector<Real> light_den;
std::vector<Real> light_num;
int n_hasenbusch = hasenbusch.size();
light_den.push_back(light_mass);
for(int h=0;h<n_hasenbusch;h++){
light_den.push_back(hasenbusch[h]);
light_num.push_back(hasenbusch[h]);
}
light_num.push_back(pv_mass);
std::vector<FermionAction *> Numerators;
std::vector<FermionAction *> Denominators;
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
for(int h=0;h<n_hasenbusch+1;h++){
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h] << " / " << light_den[h]<< std::endl;
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],CG,CG));
}
for(int h=0;h<n_hasenbusch+1;h++){
Level1.push_back(Quotients[h]);
}
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
Level2.push_back(&GaugeAction);
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
std::cout << GridLogMessage << " Action complete "<< std::endl;
/////////////////////////////////////////////////////////////
// HMC parameters are serialisable
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.Run(); // no smearing
Grid_finalize();
} // main

386
HMC/Mobius2f_DDHMC_mixed.cc Normal file
View File

@ -0,0 +1,386 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
nnSource file:
Copyright (C) 2015-2016
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
NAMESPACE_BEGIN(Grid);
template<class Impl>
class DomainLocalTwoFlavourEvenOddRatioPseudoFermionAction
: public TwoFlavourEvenOddRatioPseudoFermionAction<Impl>
{
public:
INHERIT_IMPL_TYPES(Impl);
Coordinate Block;
DomainDecomposition Domains;
DomainLocalTwoFlavourEvenOddRatioPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
OperatorFunction<FermionField> & DS,
OperatorFunction<FermionField> & AS,
OperatorFunction<FermionField> & HS,
Coordinate &_Block ) :
Block(_Block),
Domains(_Block),
TwoFlavourEvenOddRatioPseudoFermionAction<Impl>(_NumOp,_DenOp,DS,AS,HS)
{};
virtual void refreshRestrict(FermionField &eta)
{
Domains.ProjectDomain(eta,0);
DumpSliceNorm("refresh Restrict eta",eta);
};
};
#define MIXED_PRECISION
NAMESPACE_END(Grid);
int main(int argc, char **argv)
{
using namespace Grid;
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
// here make a routine to print all the relevant information on the run
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
// Typedefs to simplify notation
typedef WilsonImplR FimplD;
typedef WilsonImplF FimplF;
typedef FermionOperator<FimplF> FermionOperatorF;
typedef FermionOperator<FimplD> FermionOperatorD;
typedef MobiusFermionR FermionActionD;
typedef MobiusFermionF FermionActionF;
typedef DirichletFermionOperator<WilsonImplR> DirichletFermionD;
typedef DirichletFermionOperator<WilsonImplF> DirichletFermionF;
typedef MobiusEOFAFermionR FermionEOFAAction;
typedef typename FermionActionD::FermionField FermionFieldD;
typedef typename FermionActionF::FermionField FermionFieldF;
typedef SchurDiagMooeeOperator<FermionOperator<FimplF>,FermionFieldF> LinearOperatorF;
typedef SchurDiagMooeeOperator<FermionOperator<FimplD>,FermionFieldD> LinearOperatorD;
typedef SchurDiagMooeeDagOperator<FermionOperator<FimplF>,FermionFieldF> LinearOperatorDagF;
typedef SchurDiagMooeeDagOperator<FermionOperator<FimplD>,FermionFieldD> LinearOperatorDagD;
typedef Grid::XmlReader Serialiser;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
// MD.name = std::string("Leap Frog");
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
// MD.name = std::string("Force Gradient");
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
/*
MD.name = std::string("MinimumNorm2");
MD.MDsteps = 4; // dH = 0.08
// MD.MDsteps = 3; // dH = 0.8
MD.trajL = 1.0;
*/
HMCparameters HMCparams;
{
XmlReader HMCrd("HMCparameters.xml");
read(HMCrd,"HMCparameters",HMCparams);
std::cout << GridLogMessage<< HMCparams <<std::endl;
}
HMCWrapper TheHMC(HMCparams);
/*
HMCparams.StartTrajectory = 66;
HMCparams.Trajectories = 200;
HMCparams.NoMetropolisUntil= 0;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
// HMCparams.StartingType =std::string("ColdStart");
HMCparams.StartingType =std::string("CheckpointStart");
HMCparams.MD = MD;
*/
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_DDHMC_lat";
CPparams.rng_prefix = "ckpoint_DDHMC_rng";
CPparams.saveInterval = 1;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
// Momentum Dirichlet
Coordinate Block({0,0,0,24});
TheHMC.Resources.SetMomentumFilter(new DDHMCFilter<WilsonImplR::Field>(Block));
// Construct observables
// here there is too much indirection
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
Real beta = 2.13;
// Real light_mass = 0.04;
Real light_mass = 0.01;
Real pv_mass = 1.0;
RealD M5 = 1.8;
RealD b = 1.0;
RealD c = 0.0;
std::vector<Real> hasenbusch({ 0.1, 0.4, 0.7 });
auto GridPtr = TheHMC.Resources.GetCartesian();
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
Coordinate latt = GridDefaultLatt();
Coordinate mpi = GridDefaultMpi();
Coordinate simdF = GridDefaultSimd(Nd,vComplexF::Nsimd());
Coordinate simdD = GridDefaultSimd(Nd,vComplexD::Nsimd());
auto GridPtrF = SpaceTimeGrid::makeFourDimGrid(latt,simdF,mpi);
auto GridRBPtrF = SpaceTimeGrid::makeFourDimRedBlackGrid(GridPtrF);
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrF);
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrF);
IwasakiGaugeActionR GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeField U(GridPtr);
LatticeGaugeFieldF UF(GridPtrF);
// These lines are unecessary if BC are all periodic
std::vector<Complex> boundary = {1,1,1,-1};
FermionActionD::ImplParams Params(boundary);
FermionActionD::ImplParams DirichletParams(boundary);
DirichletParams.locally_periodic=true;
double ActionStoppingCondition = 1e-10;
double DerivativeStoppingCondition = 1e-10;
// double BoundaryDerivativeStoppingCondition = 1e-10; decent acceptance
double BoundaryDerivativeStoppingCondition = 1e-7; // decent acceptance
// double BoundaryDerivativeStoppingCondition = 1e-6; // bit bigger not huge
// double BoundaryDerivativeStoppingCondition = 1e-5; // Large dH poor acceptance
double MaxCGIterations = 30000;
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1);
ActionLevel<HMCWrapper::Field> Level2(3);
ActionLevel<HMCWrapper::Field> Level3(8);
ConjugateGradient<FermionFieldD> ActionCG(ActionStoppingCondition,MaxCGIterations);
ConjugateGradient<FermionFieldD> DerivativeCG(DerivativeStoppingCondition,MaxCGIterations);
////////////////////////////////////
// up down action
////////////////////////////////////
std::vector<Real> light_den;
std::vector<Real> light_num;
int n_hasenbusch = hasenbusch.size();
light_den.push_back(light_mass);
for(int h=0;h<n_hasenbusch;h++){
light_den.push_back(hasenbusch[h]);
light_num.push_back(hasenbusch[h]);
}
light_num.push_back(pv_mass);
//////////////////////////////////////////////////////////////
// Forced to replicate the MxPCG and DenominatorsF etc.. because
// there is no convenient way to "Clone" physics params from double op
// into single op for any operator pair.
// Same issue prevents using MxPCG in the Heatbath step
//////////////////////////////////////////////////////////////
/////////////////////////////////////////////////
// These are consumed/owned by the Dirichlet wrappers
/////////////////////////////////////////////////
std::vector<FermionActionD *> DNumeratorsD;
std::vector<FermionActionF *> DNumeratorsF;
std::vector<FermionActionD *> DDenominatorsD;
std::vector<FermionActionF *> DDenominatorsF;
/////////////////////////////////////////////////
// Dirichlet wrappers
/////////////////////////////////////////////////
std::vector<DirichletFermionD *> DirichletNumeratorsD;
std::vector<DirichletFermionF *> DirichletNumeratorsF;
std::vector<DirichletFermionD *> DirichletDenominatorsD;
std::vector<DirichletFermionF *> DirichletDenominatorsF;
std::vector<DomainLocalTwoFlavourEvenOddRatioPseudoFermionAction<FimplD> *> Quotients;
typedef MixedPrecisionConjugateGradientOperatorFunction<FermionOperatorD,
FermionOperatorF,
LinearOperatorD,
LinearOperatorF> MxPCG;
std::vector<MxPCG *> ActionMPCG;
std::vector<MxPCG *> MPCG;
std::vector<LinearOperatorD *> LinOpD;
std::vector<LinearOperatorF *> LinOpF;
int MX_inner = 1000;
RealD MX_tol = 1.0e-5;
for(int h=0;h<n_hasenbusch+1;h++){
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h] << " / " << light_den[h]<< std::endl;
DNumeratorsD.push_back (new FermionActionD(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, DirichletParams));
DNumeratorsF.push_back (new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_num[h],M5,b,c, DirichletParams));
DDenominatorsD.push_back(new FermionActionD(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, DirichletParams));
DDenominatorsF.push_back(new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[h],M5,b,c, DirichletParams));
DirichletNumeratorsD.push_back (new DirichletFermionD(*DNumeratorsD[h],Block));
DirichletNumeratorsF.push_back (new DirichletFermionF(*DNumeratorsF[h],Block));
DirichletDenominatorsD.push_back(new DirichletFermionD(*DDenominatorsD[h],Block));
DirichletDenominatorsF.push_back(new DirichletFermionF(*DDenominatorsF[h],Block));
// Dirichlet Schur even odd MpsDagMpc operators on local domains
LinOpD.push_back(new LinearOperatorD(*DirichletDenominatorsD[h]));
LinOpF.push_back(new LinearOperatorF(*DirichletDenominatorsF[h]));
// Derivative
MPCG.push_back(new MxPCG(DerivativeStoppingCondition,MX_tol,
MX_inner,
MaxCGIterations,
FrbGridF,
*DirichletDenominatorsF[h],*DirichletDenominatorsD[h],
*LinOpF[h], *LinOpD[h]) );
// Action
ActionMPCG.push_back(new MxPCG(ActionStoppingCondition,MX_tol,
MX_inner,
MaxCGIterations,
FrbGridF,
*DirichletDenominatorsF[h],*DirichletDenominatorsD[h],
*LinOpF[h], *LinOpD[h]) );
////////////////////////////////////////////////////////////////////////////
// Standard CG for 2f force
////////////////////////////////////////////////////////////////////////////
Quotients.push_back (new
DomainLocalTwoFlavourEvenOddRatioPseudoFermionAction<FimplD>
(*DirichletNumeratorsD[h],
*DirichletDenominatorsD[h],
*MPCG[h],
*ActionMPCG[h],
ActionCG,Block));
Level2.push_back(Quotients[h]);
}
/////////////////////////////////////////////////////////////
// Boundary action
/////////////////////////////////////////////////////////////
int l_idx = 0;
int pv_idx = n_hasenbusch;
RealD h_mass = 0.012;
std::cout << GridLogMessage<<" Boundary action masses " <<light_num[l_idx]<<" / "<<light_den[pv_idx]<<std::endl;
// OmegaBar cross domain boundary and is used in Boundary operator, so no locally_periodic hack in the boundary det
// Dirichlet is applied in gauge link only. OmegaBar solve is too expensive. Monitor cost.
FermionActionD PeriNumeratorD (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[pv_idx],M5,b,c, Params);
FermionActionF PeriNumeratorF (UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_num[pv_idx],M5,b,c, Params);
FermionActionD DirichletNumeratorDD(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[pv_idx],M5,b,c, Params);
FermionActionF DirichletNumeratorFF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_num[pv_idx],M5,b,c, Params);
DirichletFermionD DirichletNumeratorD (DirichletNumeratorDD,Block);
DirichletFermionF DirichletNumeratorF (DirichletNumeratorFF,Block);
FermionActionD PeriDenominatorD(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[l_idx] ,M5,b,c, Params);
FermionActionF PeriDenominatorF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[l_idx] ,M5,b,c, Params);
FermionActionD DirichletDenominatorDD(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[l_idx] ,M5,b,c, Params);
FermionActionF DirichletDenominatorFF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[l_idx] ,M5,b,c, Params);
DirichletFermionD DirichletDenominatorD(DirichletDenominatorDD,Block);
DirichletFermionF DirichletDenominatorF(DirichletDenominatorFF,Block);
FermionActionD PeriHasenD (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,h_mass ,M5,b,c, Params);
FermionActionF PeriHasenF (UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,h_mass,M5,b,c, Params);
FermionActionD DHasenD(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,h_mass,M5,b,c, Params);
FermionActionF DHasenF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,h_mass,M5,b,c, Params);
DirichletFermionD DirichletHasenD(DHasenD,Block);
DirichletFermionF DirichletHasenF(DHasenF,Block);
SchurFactoredFermionOperator<FimplD,FimplF> BoundaryNumerator(PeriNumeratorD,PeriNumeratorF,
DirichletNumeratorD,DirichletNumeratorF,
Block);
SchurFactoredFermionOperator<FimplD,FimplF> BoundaryDenominator(PeriDenominatorD,PeriDenominatorF,
DirichletDenominatorD,DirichletDenominatorF,
Block);
SchurFactoredFermionOperator<FimplD,FimplF> BoundaryHasen(PeriHasenD,PeriHasenF,
DirichletHasenD,DirichletHasenF,
Block);
#if 1
std::cout << GridLogMessage << " Boundary NO ratio "<< std::endl;
MX_tol = 1.0e-5;
Level1.push_back(new
DomainDecomposedBoundaryTwoFlavourPseudoFermion<FimplD,FimplF>
(BoundaryDenominator,
BoundaryDerivativeStoppingCondition,ActionStoppingCondition,MX_tol));
Level1.push_back(new
DomainDecomposedBoundaryTwoFlavourBosonPseudoFermion<FimplD,FimplF>
(BoundaryNumerator,
BoundaryDerivativeStoppingCondition,ActionStoppingCondition,MX_tol));
#else
Level1.push_back(new
DomainDecomposedBoundaryTwoFlavourRatioPseudoFermion<FimplD,FimplF>
(BoundaryNumerator,
BoundaryDenominator,
BoundaryDerivativeStoppingCondition,ActionStoppingCondition));
#endif
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
Level3.push_back(&GaugeAction);
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
TheHMC.TheAction.push_back(Level3);
std::cout << GridLogMessage << " Action complete "<< std::endl;
/////////////////////////////////////////////////////////////
// HMC parameters are serialisable
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.Run(); // no smearing
Grid_finalize();
} // main

View File

@ -33,137 +33,8 @@ directory
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
#define MIXED_PRECISION
#endif
#include <Grid/qcd/utils/MixedPrecisionOperatorFunction.h>
NAMESPACE_BEGIN(Grid);
/*
* Need a plan for gauge field update for mixed precision in HMC (2x speed up)
* -- Store the single prec action operator.
* -- Clone the gauge field from the operator function argument.
* -- Build the mixed precision operator dynamically from the passed operator and single prec clone.
*/
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
class MixedPrecisionConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
public:
typedef typename FermionOperatorD::FermionField FieldD;
typedef typename FermionOperatorF::FermionField FieldF;
using OperatorFunction<FieldD>::operator();
RealD Tolerance;
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;
Integer MaxOuterIterations;
GridBase* SinglePrecGrid4; //Grid for single-precision fields
GridBase* SinglePrecGrid5; //Grid for single-precision fields
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
FermionOperatorF &FermOpF;
FermionOperatorD &FermOpD;;
SchurOperatorF &LinOpF;
SchurOperatorD &LinOpD;
Integer TotalInnerIterations; //Number of inner CG iterations
Integer TotalOuterIterations; //Number of restarts
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
MixedPrecisionConjugateGradientOperatorFunction(RealD tol,
Integer maxinnerit,
Integer maxouterit,
GridBase* _sp_grid4,
GridBase* _sp_grid5,
FermionOperatorF &_FermOpF,
FermionOperatorD &_FermOpD,
SchurOperatorF &_LinOpF,
SchurOperatorD &_LinOpD):
LinOpF(_LinOpF),
LinOpD(_LinOpD),
FermOpF(_FermOpF),
FermOpD(_FermOpD),
Tolerance(tol),
InnerTolerance(tol),
MaxInnerIterations(maxinnerit),
MaxOuterIterations(maxouterit),
SinglePrecGrid4(_sp_grid4),
SinglePrecGrid5(_sp_grid5),
OuterLoopNormMult(100.)
{
/* Debugging instances of objects; references are stored
std::cout << GridLogMessage << " Mixed precision CG wrapper LinOpF " <<std::hex<< &LinOpF<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper LinOpD " <<std::hex<< &LinOpD<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper FermOpF " <<std::hex<< &FermOpF<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper FermOpD " <<std::hex<< &FermOpD<<std::dec <<std::endl;
*/
};
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
std::cout << GridLogMessage << " Mixed precision CG wrapper operator() "<<std::endl;
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
// std::cout << GridLogMessage << " Mixed precision CG wrapper operator() FermOpU " <<std::hex<< &(SchurOpU->_Mat)<<std::dec <<std::endl;
// std::cout << GridLogMessage << " Mixed precision CG wrapper operator() FermOpD " <<std::hex<< &(LinOpD._Mat) <<std::dec <<std::endl;
// Assumption made in code to extract gauge field
// We could avoid storing LinopD reference alltogether ?
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
////////////////////////////////////////////////////////////////////////////////////
// Must snarf a single precision copy of the gauge field in Linop_d argument
////////////////////////////////////////////////////////////////////////////////////
typedef typename FermionOperatorF::GaugeField GaugeFieldF;
typedef typename FermionOperatorF::GaugeLinkField GaugeLinkFieldF;
typedef typename FermionOperatorD::GaugeField GaugeFieldD;
typedef typename FermionOperatorD::GaugeLinkField GaugeLinkFieldD;
GridBase * GridPtrF = SinglePrecGrid4;
GridBase * GridPtrD = FermOpD.Umu.Grid();
GaugeFieldF U_f (GridPtrF);
GaugeLinkFieldF Umu_f(GridPtrF);
// std::cout << " Dim gauge field "<<GridPtrF->Nd()<<std::endl; // 4d
// std::cout << " Dim gauge field "<<GridPtrD->Nd()<<std::endl; // 4d
////////////////////////////////////////////////////////////////////////////////////
// Moving this to a Clone method of fermion operator would allow to duplicate the
// physics parameters and decrease gauge field copies
////////////////////////////////////////////////////////////////////////////////////
GaugeLinkFieldD Umu_d(GridPtrD);
for(int mu=0;mu<Nd*2;mu++){
Umu_d = PeekIndex<LorentzIndex>(FermOpD.Umu, mu);
precisionChange(Umu_f,Umu_d);
PokeIndex<LorentzIndex>(FermOpF.Umu, Umu_f, mu);
}
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
////////////////////////////////////////////////////////////////////////////////////
// Could test to make sure that LinOpF and LinOpD agree to single prec?
////////////////////////////////////////////////////////////////////////////////////
/*
GridBase *Fgrid = psi._grid;
FieldD tmp2(Fgrid);
FieldD tmp1(Fgrid);
LinOpU.Op(src,tmp1);
LinOpD.Op(src,tmp2);
std::cout << " Double gauge field "<< norm2(FermOpD.Umu)<<std::endl;
std::cout << " Single gauge field "<< norm2(FermOpF.Umu)<<std::endl;
std::cout << " Test of operators "<<norm2(tmp1)<<std::endl;
std::cout << " Test of operators "<<norm2(tmp2)<<std::endl;
tmp1=tmp1-tmp2;
std::cout << " Test of operators diff "<<norm2(tmp1)<<std::endl;
*/
////////////////////////////////////////////////////////////////////////////////////
// Make a mixed precision conjugate gradient
////////////////////////////////////////////////////////////////////////////////////
MixedPrecisionConjugateGradient<FieldD,FieldF> MPCG(Tolerance,MaxInnerIterations,MaxOuterIterations,SinglePrecGrid5,LinOpF,LinOpD);
std::cout << GridLogMessage << "Calling mixed precision Conjugate Gradient" <<std::endl;
MPCG(src,psi);
}
};
NAMESPACE_END(Grid);
int main(int argc, char **argv) {
using namespace Grid;
@ -190,18 +61,18 @@ int main(int argc, char **argv) {
// MD.name = std::string("Leap Frog");
typedef GenericHMCRunner<ForceGradient> HMCWrapper;
MD.name = std::string("Force Gradient");
// typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
// MD.name = std::string("MinimumNorm2");
MD.MDsteps = 6;
//typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
//MD.name = std::string("MinimumNorm2");
MD.MDsteps = 15;
MD.trajL = 1.0;
HMCparameters HMCparams;
HMCparams.StartTrajectory = 590;
HMCparams.StartTrajectory = 0;
HMCparams.Trajectories = 1000;
HMCparams.NoMetropolisUntil= 0;
HMCparams.NoMetropolisUntil= 10;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
// HMCparams.StartingType =std::string("ColdStart");
HMCparams.StartingType =std::string("CheckpointStart");
HMCparams.StartingType =std::string("ColdStart");
//HMCparams.StartingType =std::string("CheckpointStart");
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
@ -209,9 +80,9 @@ int main(int argc, char **argv) {
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_EODWF_lat";
CPparams.rng_prefix = "ckpoint_EODWF_rng";
CPparams.saveInterval = 10;
CPparams.config_prefix = "ckpoint_EOFA_lat";
CPparams.rng_prefix = "ckpoint_EOFA_rng";
CPparams.saveInterval = 1;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
@ -226,16 +97,16 @@ int main(int argc, char **argv) {
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
const int Ls = 24;
Real beta = 2.13;
Real light_mass = 0.01;
Real strange_mass = 0.04;
Real light_mass = 0.005;
Real strange_mass = 0.0362;
Real pv_mass = 1.0;
RealD M5 = 1.8;
RealD b = 1.0;
RealD c = 0.0;
RealD b = 1.5;
RealD c = 0.5;
std::vector<Real> hasenbusch({ 0.1, 0.3, 0.6 });
std::vector<Real> hasenbusch({ 0.02, 0.2, 0.6 });
auto GridPtr = TheHMC.Resources.GetCartesian();
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
@ -263,7 +134,7 @@ int main(int argc, char **argv) {
FermionActionF::ImplParams ParamsF(boundary);
double ActionStoppingCondition = 1e-10;
double DerivativeStoppingCondition = 1e-6;
double DerivativeStoppingCondition = 1e-8;
double MaxCGIterations = 30000;
////////////////////////////////////
@ -302,40 +173,37 @@ int main(int argc, char **argv) {
ConjugateGradient<FermionField> DerivativeCG(DerivativeStoppingCondition,MaxCGIterations);
#ifdef MIXED_PRECISION
const int MX_inner = 1000;
const RealD MX_tol = 1.0e-6;
// Mixed precision EOFA
LinearOperatorEOFAD Strange_LinOp_L (Strange_Op_L);
LinearOperatorEOFAD Strange_LinOp_R (Strange_Op_R);
LinearOperatorEOFAF Strange_LinOp_LF(Strange_Op_LF);
LinearOperatorEOFAF Strange_LinOp_RF(Strange_Op_RF);
MxPCG_EOFA ActionCGL(ActionStoppingCondition,
MxPCG_EOFA ActionCGL(ActionStoppingCondition,MX_tol,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_LF,Strange_Op_L,
Strange_LinOp_LF,Strange_LinOp_L);
MxPCG_EOFA DerivativeCGL(DerivativeStoppingCondition,
MxPCG_EOFA DerivativeCGL(DerivativeStoppingCondition,MX_tol,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_LF,Strange_Op_L,
Strange_LinOp_LF,Strange_LinOp_L);
MxPCG_EOFA ActionCGR(ActionStoppingCondition,
MxPCG_EOFA ActionCGR(ActionStoppingCondition,MX_tol,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_RF,Strange_Op_R,
Strange_LinOp_RF,Strange_LinOp_R);
MxPCG_EOFA DerivativeCGR(DerivativeStoppingCondition,
MxPCG_EOFA DerivativeCGR(DerivativeStoppingCondition,MX_tol,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_RF,Strange_Op_R,
Strange_LinOp_RF,Strange_LinOp_R);
@ -401,18 +269,16 @@ int main(int argc, char **argv) {
LinOpD.push_back(new LinearOperatorD(*Denominators[h]));
LinOpF.push_back(new LinearOperatorF(*DenominatorsF[h]));
MPCG.push_back(new MxPCG(DerivativeStoppingCondition,
MPCG.push_back(new MxPCG(DerivativeStoppingCondition,MX_tol,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
*DenominatorsF[h],*Denominators[h],
*LinOpF[h], *LinOpD[h]) );
ActionMPCG.push_back(new MxPCG(ActionStoppingCondition,
ActionMPCG.push_back(new MxPCG(ActionStoppingCondition,MX_tol,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
*DenominatorsF[h],*Denominators[h],
*LinOpF[h], *LinOpD[h]) );

Some files were not shown because too many files have changed in this diff Show More