mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-17 15:27:06 +01:00
Compare commits
381 Commits
feature/rm
...
feature/sy
Author | SHA1 | Date | |
---|---|---|---|
229709a980 | |||
9295eeadfe | |||
36f471e333 | |||
ca4eadd4ab | |||
d954595922 | |||
1ac13ec3a7 | |||
55de69a569 | |||
eda9ab487b | |||
cd99edcc5f | |||
69f1f04f74 | |||
11a5fd09d6 | |||
ff1fa98808 | |||
b0339bc5a4 | |||
3c23a947cc | |||
56111bb823 | |||
99445673f6 | |||
97a59643f7 | |||
579595f547 | |||
281ac5fc12 | |||
d8fa903b02 | |||
eaff0f3aeb | |||
e8e20c01b2 | |||
a4afc3ea2a | |||
3fe75bc7cb | |||
45d49d8648 | |||
6013183361 | |||
4b882e8056 | |||
3f9ae6e7e7 | |||
909acd55cd | |||
4dd9e39e0d | |||
7adb253e25 | |||
873519e960 | |||
9aec4a3c26 | |||
70510d151b | |||
9e7bacb5a4 | |||
2ef1fa66a8 | |||
cf76741ec6 | |||
497e7c1c40 | |||
888eacd3b8 | |||
321f0f51b5 | |||
30ad9578a2 | |||
9dce101586 | |||
97e264d0ff | |||
683a5e5bf5 | |||
d4861a362c | |||
5ff3eae027 | |||
147dc15d26 | |||
c61ea72949 | |||
86e8b9fe38 | |||
612e468889 | |||
4ea8d128c2 | |||
e49b7f2f88 | |||
aace3d47b9 | |||
d5049949a4 | |||
f1c7480e3c | |||
5adae5d6ff | |||
a8412ace05 | |||
9fd1c2ad4b | |||
4cf3575353 | |||
804a810d68 | |||
8fcb392e24 | |||
dd8d70eeff | |||
aa8aba6543 | |||
13df14f96e | |||
3aab983760 | |||
9c4dcc5ea3 | |||
a1063ddbb9 | |||
18ef8056ec | |||
1c673977fa | |||
e9bc748828 | |||
f48156529b | |||
d05ce01809 | |||
cf23eff60e | |||
6e313575be | |||
b13d1f7238 | |||
b5e7945dd9 | |||
7535566f54 | |||
50b808ab33 | |||
f16c2665f5 | |||
41e28015ae | |||
a0ccbb3bd6 | |||
5eeabaa2bb | |||
00d0d6d008 | |||
537a9f7030 | |||
cc9c993f74 | |||
d10422ded8 | |||
f313565a3c | |||
b3881d2636 | |||
61d5860b46 | |||
52d17987dc | |||
19d8bba97d | |||
463d72d322 | |||
d060341168 | |||
c772bcd514 | |||
3362f8dfa0 | |||
bf3c9857e0 | |||
a88b3ceca5 | |||
aa135412f5 | |||
9945399e60 | |||
5eeffa49e8 | |||
3f06209720 | |||
12e239dd9f | |||
af2301afbb | |||
f98856a26f | |||
d55cc5b380 | |||
c2b688abc9 | |||
b0d61b9687 | |||
5f893bf9af | |||
0e17bd6597 | |||
22caa158cc | |||
b24a504d7c | |||
992ef6e9fc | |||
f32a320bc3 | |||
5f0fe029d2 | |||
6b1486e89b | |||
3f9c427a3a | |||
d201277652 | |||
fdda7cf9cf | |||
e22d30f715 | |||
1ba25a0d8c | |||
9ba3647bdf | |||
5ee832f738 | |||
467deee46f | |||
35a69a5133 | |||
e9c5a271a8 | |||
acac2d6938 | |||
97db2b8d20 | |||
80fd6ab407 | |||
5534921bee | |||
ace9cd64bb | |||
a3e2aeb603 | |||
049dd25785 | |||
d43d372294 | |||
b71a081cba | |||
c48909590b | |||
446ef40570 | |||
81441e98f4 | |||
ecd3f890f5 | |||
1c881ce23c | |||
dacbbdd051 | |||
2859955a03 | |||
cc220abd1d | |||
d1c0c0197e | |||
fd9424ef27 | |||
a5c35c4024 | |||
e03b64dc06 | |||
4677c40195 | |||
288c615782 | |||
48e81cf6f8 | |||
5cffa05c7e | |||
d50a2164d7 | |||
32ff766dbd | |||
01652d8cfe | |||
4d2dc7ba03 | |||
51d1beb1f3 | |||
65b724bb5f | |||
6dbd117aa5 | |||
198b29f618 | |||
a8309638d4 | |||
f98a4e880e | |||
8244caff25 | |||
bcd7895362 | |||
85b1c5df39 | |||
b4255140d6 | |||
0c3095e173 | |||
d3ce60713d | |||
eac1f08b7b | |||
1654c4f3c0 | |||
8807d998bc | |||
5791021dcd | |||
c273fb051c | |||
c545530170 | |||
d982a5b6d5 | |||
15ca8637f3 | |||
cbc995b74c | |||
8b74174d74 | |||
e21fef17df | |||
3d27708f07 | |||
b918744184 | |||
7d14a3c086 | |||
e14a84317d | |||
6c31b99f1f | |||
9522dcd611 | |||
ed469898dc | |||
1eee94a809 | |||
54523369a3 | |||
a98c91c2a5 | |||
a9b92867a8 | |||
65920faeba | |||
249e2db87d | |||
cf3535d16e | |||
d61ee817f4 | |||
3448b7387c | |||
47b89d2739 | |||
2a75516330 | |||
b2087f14c4 | |||
dd1ba266b2 | |||
1292d59563 | |||
9877ed9bf8 | |||
f0dc0f3621 | |||
1efe30d6cc | |||
0b787e9fe0 | |||
37ec4b241c | |||
63b0a19f37 | |||
90ea7dfa99 | |||
f866d7c33e | |||
542bdef198 | |||
06007db3d9 | |||
12e6059a70 | |||
dbaa24ebf6 | |||
3276aa67dc | |||
3b30b9f0c0 | |||
69db4816f7 | |||
3abe09025a | |||
e33878e0de | |||
27b4fbf3f0 | |||
968a90633a | |||
6365a89ba3 | |||
ddbb008694 | |||
7997e0a449 | |||
197612bc7a | |||
0e88bf4bff | |||
3e64d78469 | |||
2004611def | |||
a2868c96a4 | |||
ea7f8fda5e | |||
906b78811b | |||
97703b181b | |||
d9474c6cb6 | |||
bbd145382b | |||
1b08cb7300 | |||
337d9dc043 | |||
8726e94ea7 | |||
67db4993c2 | |||
fd3c8b0e85 | |||
1635c263ee | |||
5b117865b2 | |||
05bbc49a99 | |||
81a8209749 | |||
a87e45ba25 | |||
465856331a | |||
cc958aa9ed | |||
a25e4b3d0c | |||
d1210ca12a | |||
36ea0e222a | |||
92281ec22d | |||
87266ce099 | |||
2a23f133e8 | |||
8dbf790f62 | |||
2402b4940e | |||
2111052fbe | |||
433766ac62 | |||
93a37c8f68 | |||
9872c76825 | |||
5ee3ea2144 | |||
5050833b42 | |||
7bee4ebb54 | |||
71cf9851e7 | |||
b4735c9904 | |||
9b2699226c | |||
5f52804907 | |||
936071773e | |||
1732f9319e | |||
91c81cab30 | |||
38164f8480 | |||
f013979791 | |||
e947b563ea | |||
5cb3530c34 | |||
250008372f | |||
4fedd8d29f | |||
6ddcef1bca | |||
8c5a5fdfce | |||
046b1cbbc0 | |||
a65ce237c1 | |||
cd27f1005d | |||
f8c0a59221 | |||
832485699f | |||
81484a4760 | |||
9a86059761 | |||
b780b7b7a0 | |||
9e085bd04e | |||
6b6bf537d3 | |||
323a651c71 | |||
9f212679f1 | |||
032f7dde1a | |||
50b1db1e8b | |||
015d8bb38a | |||
10a34312dc | |||
db8c0e7584 | |||
d15ccad8a7 | |||
0009b5cee8 | |||
20d1941a45 | |||
b7c76ede29 | |||
05edf803bd | |||
78b8e40f83 | |||
fc2e9850d3 | |||
ffaaed679e | |||
b2fd8b993a | |||
291ee8c3d0 | |||
e1a5b3ea49 | |||
55a55660cb | |||
ceb8b374da | |||
4bc2ad2894 | |||
798af3e68f | |||
b0ef2367f3 | |||
71a7350a85 | |||
6f79369955 | |||
f9cb6b979f | |||
ed4d9d17f8 | |||
fbed02690d | |||
39f3ae5b1d | |||
e64bec8c8e | |||
0893b4e552 | |||
92f0f29670 | |||
48a340a9d1 | |||
f45621109b | |||
32d1a0bbea | |||
267cce66a1 | |||
3417147b11 | |||
b338719bc8 | |||
2b81cbe2c2 | |||
acff9d6ed2 | |||
a306a49788 | |||
7ef03c5368 | |||
5abec5b8a9 | |||
499edc0636 | |||
d990e61be3 | |||
3edb2dc2da | |||
345721220e | |||
6db68d6ecb | |||
09f0963d1f | |||
6f44e3c192 | |||
5893888f87 | |||
39b448affb | |||
e54a8f05a9 | |||
64b72fc17f | |||
6fdce60492 | |||
852db4626a | |||
6504a098cc | |||
79a385faca | |||
c12a67030a | |||
581392f2f2 | |||
113f277b6a | |||
974586bedc | |||
160f78c1e4 | |||
7e4e1bbbc2 | |||
e699b7e9f9 | |||
a28bc0de90 | |||
14d0fe4d6c | |||
0ad2e0815c | |||
1c8ca05e16 | |||
dc9c8340bb | |||
19eef97503 | |||
635246ce50 | |||
5cdbb7e71e | |||
8123590a1b | |||
86c9c4da8b | |||
cd1efee866 | |||
bd310932f7 | |||
304762e7ac | |||
d79ab03a6c | |||
d5708e0eb2 | |||
123f6b7a61 | |||
2b6457dd9a | |||
b367cbd422 | |||
e252c1aca3 | |||
b140c6a4f9 | |||
326de36467 | |||
9f224a1647 | |||
bb46ba9b5f | |||
dd5a22b36b | |||
1ea85b9972 | |||
8fb63f1c25 | |||
77fa586f6c | |||
15238e8d5e | |||
b27e31957a | |||
46927771e3 | |||
d8cea77707 | |||
5f8a76d490 | |||
28d49a3b60 | |||
b4c624ece6 |
@ -9,11 +9,6 @@ matrix:
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
compiler: clang
|
||||
env: PREC=single
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
compiler: clang
|
||||
env: PREC=double
|
||||
|
||||
before_install:
|
||||
- export GRIDDIR=`pwd`
|
||||
@ -55,7 +50,7 @@ script:
|
||||
- make -j4
|
||||
- make install
|
||||
- cd $CWD/build
|
||||
- ../configure --enable-precision=$PREC --enable-simd=SSE4 --enable-comms=none --with-lime=$CWD/build/lime/install ${EXTRACONF}
|
||||
- ../configure --enable-simd=SSE4 --enable-comms=none --with-lime=$CWD/build/lime/install ${EXTRACONF}
|
||||
- make -j4
|
||||
- ./benchmarks/Benchmark_dwf --threads 1 --debug-signals
|
||||
- make check
|
||||
|
@ -37,7 +37,9 @@ directory
|
||||
#endif
|
||||
|
||||
//disables and intel compiler specific warning (in json.hpp)
|
||||
#ifdef __ICC
|
||||
#pragma warning disable 488
|
||||
#endif
|
||||
|
||||
#ifdef __NVCC__
|
||||
//disables nvcc specific warning in json.hpp
|
||||
|
@ -28,4 +28,7 @@
|
||||
///////////////////
|
||||
#include "Config.h"
|
||||
|
||||
#ifdef TOFU
|
||||
#undef GRID_COMMS_THREADS
|
||||
#endif
|
||||
#endif /* GRID_STD_H */
|
||||
|
@ -34,6 +34,12 @@
|
||||
#define __SYCL__REDEFINE__
|
||||
#endif
|
||||
|
||||
/* HIP save and restore compile environment*/
|
||||
#ifdef GRID_HIP
|
||||
#pragma push
|
||||
#pragma push_macro("__HIP_DEVICE_COMPILE__")
|
||||
#endif
|
||||
#define EIGEN_NO_HIP
|
||||
|
||||
#include <Grid/Eigen/Dense>
|
||||
#include <Grid/Eigen/unsupported/CXX11/Tensor>
|
||||
@ -42,7 +48,7 @@
|
||||
#ifdef __NVCC__REDEFINE__
|
||||
#pragma pop_macro("__CUDACC__")
|
||||
#pragma pop_macro("__NVCC__")
|
||||
#pragma pop_macro("GRID_SIMT")
|
||||
#pragma pop_macro("__CUDA_ARCH__")
|
||||
#pragma pop
|
||||
#endif
|
||||
|
||||
@ -52,6 +58,12 @@
|
||||
#pragma pop
|
||||
#endif
|
||||
|
||||
/*HIP restore*/
|
||||
#ifdef __HIP__REDEFINE__
|
||||
#pragma pop_macro("__HIP_DEVICE_COMPILE__")
|
||||
#pragma pop
|
||||
#endif
|
||||
|
||||
#if defined __GNUC__
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
@ -21,6 +21,7 @@ if BUILD_HDF5
|
||||
extra_headers+=serialisation/Hdf5Type.h
|
||||
endif
|
||||
|
||||
|
||||
all: version-cache Version.h
|
||||
|
||||
version-cache:
|
||||
@ -53,6 +54,17 @@ Version.h: version-cache
|
||||
include Make.inc
|
||||
include Eigen.inc
|
||||
|
||||
#extra_sources+=$(ZWILS_FERMION_FILES)
|
||||
extra_sources+=$(WILS_FERMION_FILES)
|
||||
extra_sources+=$(STAG_FERMION_FILES)
|
||||
if BUILD_GPARITY
|
||||
extra_sources+=$(GP_FERMION_FILES)
|
||||
endif
|
||||
if BUILD_FERMION_REPS
|
||||
extra_sources+=$(ADJ_FERMION_FILES)
|
||||
extra_sources+=$(TWOIND_FERMION_FILES)
|
||||
endif
|
||||
|
||||
lib_LIBRARIES = libGrid.a
|
||||
|
||||
CCFILES += $(extra_sources)
|
||||
|
@ -31,6 +31,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#ifndef GRID_ALGORITHM_COARSENED_MATRIX_H
|
||||
#define GRID_ALGORITHM_COARSENED_MATRIX_H
|
||||
|
||||
#include <Grid/qcd/QCD.h> // needed for Dagger(Yes|No), Inverse(Yes|No)
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
@ -59,12 +60,14 @@ inline void blockMaskedInnerProduct(Lattice<CComplex> &CoarseInner,
|
||||
class Geometry {
|
||||
public:
|
||||
int npoint;
|
||||
int base;
|
||||
std::vector<int> directions ;
|
||||
std::vector<int> displacements;
|
||||
std::vector<int> points_dagger;
|
||||
|
||||
Geometry(int _d) {
|
||||
|
||||
int base = (_d==5) ? 1:0;
|
||||
base = (_d==5) ? 1:0;
|
||||
|
||||
// make coarse grid stencil for 4d , not 5d
|
||||
if ( _d==5 ) _d=4;
|
||||
@ -72,16 +75,51 @@ public:
|
||||
npoint = 2*_d+1;
|
||||
directions.resize(npoint);
|
||||
displacements.resize(npoint);
|
||||
points_dagger.resize(npoint);
|
||||
for(int d=0;d<_d;d++){
|
||||
directions[d ] = d+base;
|
||||
directions[d+_d] = d+base;
|
||||
displacements[d ] = +1;
|
||||
displacements[d+_d]= -1;
|
||||
points_dagger[d ] = d+_d;
|
||||
points_dagger[d+_d] = d;
|
||||
}
|
||||
directions [2*_d]=0;
|
||||
displacements[2*_d]=0;
|
||||
points_dagger[2*_d]=2*_d;
|
||||
}
|
||||
|
||||
int point(int dir, int disp) {
|
||||
assert(disp == -1 || disp == 0 || disp == 1);
|
||||
assert(base+0 <= dir && dir < base+4);
|
||||
|
||||
// directions faster index = new indexing
|
||||
// 4d (base = 0):
|
||||
// point 0 1 2 3 4 5 6 7 8
|
||||
// dir 0 1 2 3 0 1 2 3 0
|
||||
// disp +1 +1 +1 +1 -1 -1 -1 -1 0
|
||||
// 5d (base = 1):
|
||||
// point 0 1 2 3 4 5 6 7 8
|
||||
// dir 1 2 3 4 1 2 3 4 0
|
||||
// disp +1 +1 +1 +1 -1 -1 -1 -1 0
|
||||
|
||||
// displacements faster index = old indexing
|
||||
// 4d (base = 0):
|
||||
// point 0 1 2 3 4 5 6 7 8
|
||||
// dir 0 0 1 1 2 2 3 3 0
|
||||
// disp +1 -1 +1 -1 +1 -1 +1 -1 0
|
||||
// 5d (base = 1):
|
||||
// point 0 1 2 3 4 5 6 7 8
|
||||
// dir 1 1 2 2 3 3 4 4 0
|
||||
// disp +1 -1 +1 -1 +1 -1 +1 -1 0
|
||||
|
||||
if(dir == 0 and disp == 0)
|
||||
return 8;
|
||||
else // New indexing
|
||||
return (1 - disp) / 2 * 4 + dir - base;
|
||||
// else // Old indexing
|
||||
// return (4 * (dir - base) + 1 - disp) / 2;
|
||||
}
|
||||
};
|
||||
|
||||
template<class Fobj,class CComplex,int nbasis>
|
||||
@ -258,7 +296,7 @@ public:
|
||||
// Fine Object == (per site) type of fine field
|
||||
// nbasis == number of deflation vectors
|
||||
template<class Fobj,class CComplex,int nbasis>
|
||||
class CoarsenedMatrix : public SparseMatrixBase<Lattice<iVector<CComplex,nbasis > > > {
|
||||
class CoarsenedMatrix : public CheckerBoardedSparseMatrixBase<Lattice<iVector<CComplex,nbasis > > > {
|
||||
public:
|
||||
|
||||
typedef iVector<CComplex,nbasis > siteVector;
|
||||
@ -268,33 +306,59 @@ public:
|
||||
typedef iMatrix<CComplex,nbasis > Cobj;
|
||||
typedef Lattice< CComplex > CoarseScalar; // used for inner products on fine field
|
||||
typedef Lattice<Fobj > FineField;
|
||||
typedef CoarseVector FermionField;
|
||||
|
||||
// enrich interface, use default implementation as in FermionOperator ///////
|
||||
void Dminus(CoarseVector const& in, CoarseVector& out) { out = in; }
|
||||
void DminusDag(CoarseVector const& in, CoarseVector& out) { out = in; }
|
||||
void ImportPhysicalFermionSource(CoarseVector const& input, CoarseVector& imported) { imported = input; }
|
||||
void ImportUnphysicalFermion(CoarseVector const& input, CoarseVector& imported) { imported = input; }
|
||||
void ExportPhysicalFermionSolution(CoarseVector const& solution, CoarseVector& exported) { exported = solution; };
|
||||
void ExportPhysicalFermionSource(CoarseVector const& solution, CoarseVector& exported) { exported = solution; };
|
||||
|
||||
////////////////////
|
||||
// Data members
|
||||
////////////////////
|
||||
Geometry geom;
|
||||
GridBase * _grid;
|
||||
GridBase* _cbgrid;
|
||||
int hermitian;
|
||||
|
||||
CartesianStencil<siteVector,siteVector,int> Stencil;
|
||||
CartesianStencil<siteVector,siteVector,int> StencilEven;
|
||||
CartesianStencil<siteVector,siteVector,int> StencilOdd;
|
||||
|
||||
std::vector<CoarseMatrix> A;
|
||||
|
||||
std::vector<CoarseMatrix> Aeven;
|
||||
std::vector<CoarseMatrix> Aodd;
|
||||
|
||||
CoarseMatrix AselfInv;
|
||||
CoarseMatrix AselfInvEven;
|
||||
CoarseMatrix AselfInvOdd;
|
||||
|
||||
Vector<RealD> dag_factor;
|
||||
|
||||
///////////////////////
|
||||
// Interface
|
||||
///////////////////////
|
||||
GridBase * Grid(void) { return _grid; }; // this is all the linalg routines need to know
|
||||
GridBase * RedBlackGrid() { return _cbgrid; };
|
||||
|
||||
int ConstEE() { return 0; }
|
||||
|
||||
void M (const CoarseVector &in, CoarseVector &out)
|
||||
{
|
||||
conformable(_grid,in.Grid());
|
||||
conformable(in.Grid(),out.Grid());
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
|
||||
SimpleCompressor<siteVector> compressor;
|
||||
|
||||
Stencil.HaloExchange(in,compressor);
|
||||
autoView( in_v , in, AcceleratorRead);
|
||||
autoView( out_v , out, AcceleratorWrite);
|
||||
autoView( Stencil_v , Stencil, AcceleratorRead);
|
||||
auto& geom_v = geom;
|
||||
typedef LatticeView<Cobj> Aview;
|
||||
|
||||
Vector<Aview> AcceleratorViewContainer;
|
||||
@ -316,14 +380,14 @@ public:
|
||||
int ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
for(int point=0;point<geom.npoint;point++){
|
||||
for(int point=0;point<geom_v.npoint;point++){
|
||||
|
||||
SE=Stencil.GetEntry(ptype,point,ss);
|
||||
SE=Stencil_v.GetEntry(ptype,point,ss);
|
||||
|
||||
if(SE->_is_local) {
|
||||
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||
} else {
|
||||
nbr = coalescedRead(Stencil.CommBuf()[SE->_offset]);
|
||||
nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
|
||||
}
|
||||
acceleratorSynchronise();
|
||||
|
||||
@ -344,12 +408,72 @@ public:
|
||||
return M(in,out);
|
||||
} else {
|
||||
// corresponds to Galerkin coarsening
|
||||
CoarseVector tmp(Grid());
|
||||
G5C(tmp, in);
|
||||
M(tmp, out);
|
||||
G5C(out, out);
|
||||
return MdagNonHermitian(in, out);
|
||||
}
|
||||
};
|
||||
|
||||
void MdagNonHermitian(const CoarseVector &in, CoarseVector &out)
|
||||
{
|
||||
conformable(_grid,in.Grid());
|
||||
conformable(in.Grid(),out.Grid());
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
|
||||
SimpleCompressor<siteVector> compressor;
|
||||
|
||||
Stencil.HaloExchange(in,compressor);
|
||||
autoView( in_v , in, AcceleratorRead);
|
||||
autoView( out_v , out, AcceleratorWrite);
|
||||
autoView( Stencil_v , Stencil, AcceleratorRead);
|
||||
auto& geom_v = geom;
|
||||
typedef LatticeView<Cobj> Aview;
|
||||
|
||||
Vector<Aview> AcceleratorViewContainer;
|
||||
|
||||
for(int p=0;p<geom.npoint;p++) AcceleratorViewContainer.push_back(A[p].View(AcceleratorRead));
|
||||
Aview *Aview_p = & AcceleratorViewContainer[0];
|
||||
|
||||
const int Nsimd = CComplex::Nsimd();
|
||||
typedef decltype(coalescedRead(in_v[0])) calcVector;
|
||||
typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
|
||||
|
||||
int osites=Grid()->oSites();
|
||||
|
||||
Vector<int> points(geom.npoint, 0);
|
||||
for(int p=0; p<geom.npoint; p++)
|
||||
points[p] = geom.points_dagger[p];
|
||||
|
||||
RealD* dag_factor_p = &dag_factor[0];
|
||||
|
||||
accelerator_for(sss, Grid()->oSites()*nbasis, Nsimd, {
|
||||
int ss = sss/nbasis;
|
||||
int b = sss%nbasis;
|
||||
calcComplex res = Zero();
|
||||
calcVector nbr;
|
||||
int ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
for(int p=0;p<geom_v.npoint;p++){
|
||||
int point = points[p];
|
||||
|
||||
SE=Stencil_v.GetEntry(ptype,point,ss);
|
||||
|
||||
if(SE->_is_local) {
|
||||
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||
} else {
|
||||
nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
|
||||
}
|
||||
acceleratorSynchronise();
|
||||
|
||||
for(int bb=0;bb<nbasis;bb++) {
|
||||
res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
|
||||
}
|
||||
}
|
||||
coalescedWrite(out_v[ss](b),res);
|
||||
});
|
||||
|
||||
for(int p=0;p<geom.npoint;p++) AcceleratorViewContainer[p].ViewClose();
|
||||
}
|
||||
|
||||
void MdirComms(const CoarseVector &in)
|
||||
{
|
||||
SimpleCompressor<siteVector> compressor;
|
||||
@ -359,6 +483,7 @@ public:
|
||||
{
|
||||
conformable(_grid,in.Grid());
|
||||
conformable(_grid,out.Grid());
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
|
||||
typedef LatticeView<Cobj> Aview;
|
||||
Vector<Aview> AcceleratorViewContainer;
|
||||
@ -367,6 +492,7 @@ public:
|
||||
|
||||
autoView( out_v , out, AcceleratorWrite);
|
||||
autoView( in_v , in, AcceleratorRead);
|
||||
autoView( Stencil_v , Stencil, AcceleratorRead);
|
||||
|
||||
const int Nsimd = CComplex::Nsimd();
|
||||
typedef decltype(coalescedRead(in_v[0])) calcVector;
|
||||
@ -380,12 +506,12 @@ public:
|
||||
int ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
SE=Stencil.GetEntry(ptype,point,ss);
|
||||
SE=Stencil_v.GetEntry(ptype,point,ss);
|
||||
|
||||
if(SE->_is_local) {
|
||||
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||
} else {
|
||||
nbr = coalescedRead(Stencil.CommBuf()[SE->_offset]);
|
||||
nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
|
||||
}
|
||||
acceleratorSynchronise();
|
||||
|
||||
@ -413,34 +539,7 @@ public:
|
||||
|
||||
this->MdirComms(in);
|
||||
|
||||
int ndim = in.Grid()->Nd();
|
||||
|
||||
//////////////
|
||||
// 4D action like wilson
|
||||
// 0+ => 0
|
||||
// 0- => 1
|
||||
// 1+ => 2
|
||||
// 1- => 3
|
||||
// etc..
|
||||
//////////////
|
||||
// 5D action like DWF
|
||||
// 1+ => 0
|
||||
// 1- => 1
|
||||
// 2+ => 2
|
||||
// 2- => 3
|
||||
// etc..
|
||||
auto point = [dir, disp, ndim](){
|
||||
if(dir == 0 and disp == 0)
|
||||
return 8;
|
||||
else if ( ndim==4 ) {
|
||||
return (4 * dir + 1 - disp) / 2;
|
||||
} else {
|
||||
return (4 * (dir-1) + 1 - disp) / 2;
|
||||
}
|
||||
}();
|
||||
|
||||
MdirCalc(in,out,point);
|
||||
|
||||
MdirCalc(in,out,geom.point(dir,disp));
|
||||
};
|
||||
|
||||
void Mdiag(const CoarseVector &in, CoarseVector &out)
|
||||
@ -449,23 +548,296 @@ public:
|
||||
MdirCalc(in, out, point); // No comms
|
||||
};
|
||||
|
||||
|
||||
CoarsenedMatrix(GridCartesian &CoarseGrid, int hermitian_=0) :
|
||||
void Mooee(const CoarseVector &in, CoarseVector &out) {
|
||||
MooeeInternal(in, out, DaggerNo, InverseNo);
|
||||
}
|
||||
|
||||
void MooeeInv(const CoarseVector &in, CoarseVector &out) {
|
||||
MooeeInternal(in, out, DaggerNo, InverseYes);
|
||||
}
|
||||
|
||||
void MooeeDag(const CoarseVector &in, CoarseVector &out) {
|
||||
MooeeInternal(in, out, DaggerYes, InverseNo);
|
||||
}
|
||||
|
||||
void MooeeInvDag(const CoarseVector &in, CoarseVector &out) {
|
||||
MooeeInternal(in, out, DaggerYes, InverseYes);
|
||||
}
|
||||
|
||||
void Meooe(const CoarseVector &in, CoarseVector &out) {
|
||||
if(in.Checkerboard() == Odd) {
|
||||
DhopEO(in, out, DaggerNo);
|
||||
} else {
|
||||
DhopOE(in, out, DaggerNo);
|
||||
}
|
||||
}
|
||||
|
||||
void MeooeDag(const CoarseVector &in, CoarseVector &out) {
|
||||
if(in.Checkerboard() == Odd) {
|
||||
DhopEO(in, out, DaggerYes);
|
||||
} else {
|
||||
DhopOE(in, out, DaggerYes);
|
||||
}
|
||||
}
|
||||
|
||||
void Dhop(const CoarseVector &in, CoarseVector &out, int dag) {
|
||||
conformable(in.Grid(), _grid); // verifies full grid
|
||||
conformable(in.Grid(), out.Grid());
|
||||
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
|
||||
DhopInternal(Stencil, A, in, out, dag);
|
||||
}
|
||||
|
||||
void DhopOE(const CoarseVector &in, CoarseVector &out, int dag) {
|
||||
conformable(in.Grid(), _cbgrid); // verifies half grid
|
||||
conformable(in.Grid(), out.Grid()); // drops the cb check
|
||||
|
||||
assert(in.Checkerboard() == Even);
|
||||
out.Checkerboard() = Odd;
|
||||
|
||||
DhopInternal(StencilEven, Aodd, in, out, dag);
|
||||
}
|
||||
|
||||
void DhopEO(const CoarseVector &in, CoarseVector &out, int dag) {
|
||||
conformable(in.Grid(), _cbgrid); // verifies half grid
|
||||
conformable(in.Grid(), out.Grid()); // drops the cb check
|
||||
|
||||
assert(in.Checkerboard() == Odd);
|
||||
out.Checkerboard() = Even;
|
||||
|
||||
DhopInternal(StencilOdd, Aeven, in, out, dag);
|
||||
}
|
||||
|
||||
void MooeeInternal(const CoarseVector &in, CoarseVector &out, int dag, int inv) {
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
assert(in.Checkerboard() == Odd || in.Checkerboard() == Even);
|
||||
|
||||
CoarseMatrix *Aself = nullptr;
|
||||
if(in.Grid()->_isCheckerBoarded) {
|
||||
if(in.Checkerboard() == Odd) {
|
||||
Aself = (inv) ? &AselfInvOdd : &Aodd[geom.npoint-1];
|
||||
DselfInternal(StencilOdd, *Aself, in, out, dag);
|
||||
} else {
|
||||
Aself = (inv) ? &AselfInvEven : &Aeven[geom.npoint-1];
|
||||
DselfInternal(StencilEven, *Aself, in, out, dag);
|
||||
}
|
||||
} else {
|
||||
Aself = (inv) ? &AselfInv : &A[geom.npoint-1];
|
||||
DselfInternal(Stencil, *Aself, in, out, dag);
|
||||
}
|
||||
assert(Aself != nullptr);
|
||||
}
|
||||
|
||||
void DselfInternal(CartesianStencil<siteVector,siteVector,int> &st, CoarseMatrix &a,
|
||||
const CoarseVector &in, CoarseVector &out, int dag) {
|
||||
int point = geom.npoint-1;
|
||||
autoView( out_v, out, AcceleratorWrite);
|
||||
autoView( in_v, in, AcceleratorRead);
|
||||
autoView( st_v, st, AcceleratorRead);
|
||||
autoView( a_v, a, AcceleratorRead);
|
||||
|
||||
const int Nsimd = CComplex::Nsimd();
|
||||
typedef decltype(coalescedRead(in_v[0])) calcVector;
|
||||
typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
|
||||
|
||||
RealD* dag_factor_p = &dag_factor[0];
|
||||
|
||||
if(dag) {
|
||||
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
|
||||
int ss = sss/nbasis;
|
||||
int b = sss%nbasis;
|
||||
calcComplex res = Zero();
|
||||
calcVector nbr;
|
||||
int ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
SE=st_v.GetEntry(ptype,point,ss);
|
||||
|
||||
if(SE->_is_local) {
|
||||
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||
} else {
|
||||
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
|
||||
}
|
||||
acceleratorSynchronise();
|
||||
|
||||
for(int bb=0;bb<nbasis;bb++) {
|
||||
res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(a_v[ss](b,bb))*nbr(bb);
|
||||
}
|
||||
coalescedWrite(out_v[ss](b),res);
|
||||
});
|
||||
} else {
|
||||
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
|
||||
int ss = sss/nbasis;
|
||||
int b = sss%nbasis;
|
||||
calcComplex res = Zero();
|
||||
calcVector nbr;
|
||||
int ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
SE=st_v.GetEntry(ptype,point,ss);
|
||||
|
||||
if(SE->_is_local) {
|
||||
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||
} else {
|
||||
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
|
||||
}
|
||||
acceleratorSynchronise();
|
||||
|
||||
for(int bb=0;bb<nbasis;bb++) {
|
||||
res = res + coalescedRead(a_v[ss](b,bb))*nbr(bb);
|
||||
}
|
||||
coalescedWrite(out_v[ss](b),res);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void DhopInternal(CartesianStencil<siteVector,siteVector,int> &st, std::vector<CoarseMatrix> &a,
|
||||
const CoarseVector &in, CoarseVector &out, int dag) {
|
||||
SimpleCompressor<siteVector> compressor;
|
||||
|
||||
st.HaloExchange(in,compressor);
|
||||
autoView( in_v, in, AcceleratorRead);
|
||||
autoView( out_v, out, AcceleratorWrite);
|
||||
autoView( st_v , st, AcceleratorRead);
|
||||
typedef LatticeView<Cobj> Aview;
|
||||
|
||||
// determine in what order we need the points
|
||||
int npoint = geom.npoint-1;
|
||||
Vector<int> points(npoint, 0);
|
||||
for(int p=0; p<npoint; p++)
|
||||
points[p] = (dag && !hermitian) ? geom.points_dagger[p] : p;
|
||||
|
||||
Vector<Aview> AcceleratorViewContainer;
|
||||
for(int p=0;p<npoint;p++) AcceleratorViewContainer.push_back(a[p].View(AcceleratorRead));
|
||||
Aview *Aview_p = & AcceleratorViewContainer[0];
|
||||
|
||||
const int Nsimd = CComplex::Nsimd();
|
||||
typedef decltype(coalescedRead(in_v[0])) calcVector;
|
||||
typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
|
||||
|
||||
RealD* dag_factor_p = &dag_factor[0];
|
||||
|
||||
if(dag) {
|
||||
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
|
||||
int ss = sss/nbasis;
|
||||
int b = sss%nbasis;
|
||||
calcComplex res = Zero();
|
||||
calcVector nbr;
|
||||
int ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
for(int p=0;p<npoint;p++){
|
||||
int point = points[p];
|
||||
SE=st_v.GetEntry(ptype,point,ss);
|
||||
|
||||
if(SE->_is_local) {
|
||||
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||
} else {
|
||||
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
|
||||
}
|
||||
acceleratorSynchronise();
|
||||
|
||||
for(int bb=0;bb<nbasis;bb++) {
|
||||
res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
|
||||
}
|
||||
}
|
||||
coalescedWrite(out_v[ss](b),res);
|
||||
});
|
||||
} else {
|
||||
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
|
||||
int ss = sss/nbasis;
|
||||
int b = sss%nbasis;
|
||||
calcComplex res = Zero();
|
||||
calcVector nbr;
|
||||
int ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
for(int p=0;p<npoint;p++){
|
||||
int point = points[p];
|
||||
SE=st_v.GetEntry(ptype,point,ss);
|
||||
|
||||
if(SE->_is_local) {
|
||||
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||
} else {
|
||||
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
|
||||
}
|
||||
acceleratorSynchronise();
|
||||
|
||||
for(int bb=0;bb<nbasis;bb++) {
|
||||
res = res + coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
|
||||
}
|
||||
}
|
||||
coalescedWrite(out_v[ss](b),res);
|
||||
});
|
||||
}
|
||||
|
||||
for(int p=0;p<npoint;p++) AcceleratorViewContainer[p].ViewClose();
|
||||
}
|
||||
|
||||
CoarsenedMatrix(GridCartesian &CoarseGrid, int hermitian_=0) :
|
||||
_grid(&CoarseGrid),
|
||||
_cbgrid(new GridRedBlackCartesian(&CoarseGrid)),
|
||||
geom(CoarseGrid._ndimension),
|
||||
hermitian(hermitian_),
|
||||
Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
|
||||
A(geom.npoint,&CoarseGrid)
|
||||
StencilEven(_cbgrid,geom.npoint,Even,geom.directions,geom.displacements,0),
|
||||
StencilOdd(_cbgrid,geom.npoint,Odd,geom.directions,geom.displacements,0),
|
||||
A(geom.npoint,&CoarseGrid),
|
||||
Aeven(geom.npoint,_cbgrid),
|
||||
Aodd(geom.npoint,_cbgrid),
|
||||
AselfInv(&CoarseGrid),
|
||||
AselfInvEven(_cbgrid),
|
||||
AselfInvOdd(_cbgrid),
|
||||
dag_factor(nbasis*nbasis)
|
||||
{
|
||||
fillFactor();
|
||||
};
|
||||
|
||||
CoarsenedMatrix(GridCartesian &CoarseGrid, GridRedBlackCartesian &CoarseRBGrid, int hermitian_=0) :
|
||||
|
||||
_grid(&CoarseGrid),
|
||||
_cbgrid(&CoarseRBGrid),
|
||||
geom(CoarseGrid._ndimension),
|
||||
hermitian(hermitian_),
|
||||
Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
|
||||
StencilEven(&CoarseRBGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
|
||||
StencilOdd(&CoarseRBGrid,geom.npoint,Odd,geom.directions,geom.displacements,0),
|
||||
A(geom.npoint,&CoarseGrid),
|
||||
Aeven(geom.npoint,&CoarseRBGrid),
|
||||
Aodd(geom.npoint,&CoarseRBGrid),
|
||||
AselfInv(&CoarseGrid),
|
||||
AselfInvEven(&CoarseRBGrid),
|
||||
AselfInvOdd(&CoarseRBGrid),
|
||||
dag_factor(nbasis*nbasis)
|
||||
{
|
||||
fillFactor();
|
||||
};
|
||||
|
||||
void fillFactor() {
|
||||
Eigen::MatrixXd dag_factor_eigen = Eigen::MatrixXd::Ones(nbasis, nbasis);
|
||||
if(!hermitian) {
|
||||
const int nb = nbasis/2;
|
||||
dag_factor_eigen.block(0,nb,nb,nb) *= -1.0;
|
||||
dag_factor_eigen.block(nb,0,nb,nb) *= -1.0;
|
||||
}
|
||||
|
||||
// GPU readable prefactor
|
||||
thread_for(i, nbasis*nbasis, {
|
||||
int j = i/nbasis;
|
||||
int k = i%nbasis;
|
||||
dag_factor[i] = dag_factor_eigen(j, k);
|
||||
});
|
||||
}
|
||||
|
||||
void CoarsenOperator(GridBase *FineGrid,LinearOperatorBase<Lattice<Fobj> > &linop,
|
||||
Aggregation<Fobj,CComplex,nbasis> & Subspace)
|
||||
{
|
||||
typedef Lattice<typename Fobj::tensor_reduced> FineComplexField;
|
||||
typedef typename Fobj::scalar_type scalar_type;
|
||||
|
||||
std::cout << GridLogMessage<< "CoarsenMatrix "<< std::endl;
|
||||
|
||||
FineComplexField one(FineGrid); one=scalar_type(1.0,0.0);
|
||||
FineComplexField zero(FineGrid); zero=scalar_type(0.0,0.0);
|
||||
|
||||
@ -496,11 +868,13 @@ public:
|
||||
|
||||
CoarseScalar InnerProd(Grid());
|
||||
|
||||
std::cout << GridLogMessage<< "CoarsenMatrix Orthog "<< std::endl;
|
||||
// Orthogonalise the subblocks over the basis
|
||||
blockOrthogonalise(InnerProd,Subspace.subspace);
|
||||
|
||||
// Compute the matrix elements of linop between this orthonormal
|
||||
// set of vectors.
|
||||
std::cout << GridLogMessage<< "CoarsenMatrix masks "<< std::endl;
|
||||
int self_stencil=-1;
|
||||
for(int p=0;p<geom.npoint;p++)
|
||||
{
|
||||
@ -539,7 +913,7 @@ public:
|
||||
|
||||
phi=Subspace.subspace[i];
|
||||
|
||||
// std::cout << GridLogMessage<< "CoarsenMatrix vector "<<i << std::endl;
|
||||
std::cout << GridLogMessage<< "CoarsenMatrix vector "<<i << std::endl;
|
||||
linop.OpDirAll(phi,Mphi_p);
|
||||
linop.OpDiag (phi,Mphi_p[geom.npoint-1]);
|
||||
|
||||
@ -568,6 +942,18 @@ public:
|
||||
autoView( A_self , A[self_stencil], AcceleratorWrite);
|
||||
|
||||
accelerator_for(ss, Grid()->oSites(), Fobj::Nsimd(),{ coalescedWrite(A_p[ss](j,i),oZProj_v(ss)); });
|
||||
if ( hermitian && (disp==-1) ) {
|
||||
for(int pp=0;pp<geom.npoint;pp++){// Find the opposite link and set <j|A|i> = <i|A|j>*
|
||||
int dirp = geom.directions[pp];
|
||||
int dispp = geom.displacements[pp];
|
||||
if ( (dirp==dir) && (dispp==1) ){
|
||||
auto sft = conjugate(Cshift(oZProj,dir,1));
|
||||
autoView( sft_v , sft , AcceleratorWrite);
|
||||
autoView( A_pp , A[pp], AcceleratorWrite);
|
||||
accelerator_for(ss, Grid()->oSites(), Fobj::Nsimd(),{ coalescedWrite(A_pp[ss](i,j),sft_v(ss)); });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@ -606,28 +992,54 @@ public:
|
||||
}
|
||||
if(hermitian) {
|
||||
std::cout << GridLogMessage << " ForceHermitian, new code "<<std::endl;
|
||||
ForceHermitian();
|
||||
}
|
||||
|
||||
InvertSelfStencilLink(); std::cout << GridLogMessage << "Coarse self link inverted" << std::endl;
|
||||
FillHalfCbs(); std::cout << GridLogMessage << "Coarse half checkerboards filled" << std::endl;
|
||||
}
|
||||
|
||||
void ForceHermitian(void) {
|
||||
CoarseMatrix Diff (Grid());
|
||||
for(int p=0;p<geom.npoint;p++){
|
||||
int dir = geom.directions[p];
|
||||
int disp = geom.displacements[p];
|
||||
if(disp==-1) {
|
||||
// Find the opposite link
|
||||
for(int pp=0;pp<geom.npoint;pp++){
|
||||
int dirp = geom.directions[pp];
|
||||
int dispp = geom.displacements[pp];
|
||||
if ( (dirp==dir) && (dispp==1) ){
|
||||
// Diff = adj(Cshift(A[p],dir,1)) - A[pp];
|
||||
// std::cout << GridLogMessage<<" Replacing stencil leg "<<pp<<" with leg "<<p<< " diff "<<norm2(Diff) <<std::endl;
|
||||
A[pp] = adj(Cshift(A[p],dir,1));
|
||||
}
|
||||
}
|
||||
}
|
||||
void InvertSelfStencilLink() {
|
||||
std::cout << GridLogDebug << "CoarsenedMatrix::InvertSelfStencilLink" << std::endl;
|
||||
int localVolume = Grid()->lSites();
|
||||
|
||||
typedef typename Cobj::scalar_object scalar_object;
|
||||
|
||||
autoView(Aself_v, A[geom.npoint-1], CpuRead);
|
||||
autoView(AselfInv_v, AselfInv, CpuWrite);
|
||||
thread_for(site, localVolume, { // NOTE: Not able to bring this to GPU because of Eigen + peek/poke
|
||||
Eigen::MatrixXcd selfLinkEigen = Eigen::MatrixXcd::Zero(nbasis, nbasis);
|
||||
Eigen::MatrixXcd selfLinkInvEigen = Eigen::MatrixXcd::Zero(nbasis, nbasis);
|
||||
|
||||
scalar_object selfLink = Zero();
|
||||
scalar_object selfLinkInv = Zero();
|
||||
|
||||
Coordinate lcoor;
|
||||
|
||||
Grid()->LocalIndexToLocalCoor(site, lcoor);
|
||||
peekLocalSite(selfLink, Aself_v, lcoor);
|
||||
|
||||
for (int i = 0; i < nbasis; ++i)
|
||||
for (int j = 0; j < nbasis; ++j)
|
||||
selfLinkEigen(i, j) = static_cast<ComplexD>(TensorRemove(selfLink(i, j)));
|
||||
|
||||
selfLinkInvEigen = selfLinkEigen.inverse();
|
||||
|
||||
for(int i = 0; i < nbasis; ++i)
|
||||
for(int j = 0; j < nbasis; ++j)
|
||||
selfLinkInv(i, j) = selfLinkInvEigen(i, j);
|
||||
|
||||
pokeLocalSite(selfLinkInv, AselfInv_v, lcoor);
|
||||
});
|
||||
}
|
||||
|
||||
void FillHalfCbs() {
|
||||
std::cout << GridLogDebug << "CoarsenedMatrix::FillHalfCbs" << std::endl;
|
||||
for(int p = 0; p < geom.npoint; ++p) {
|
||||
pickCheckerboard(Even, Aeven[p], A[p]);
|
||||
pickCheckerboard(Odd, Aodd[p], A[p]);
|
||||
}
|
||||
pickCheckerboard(Even, AselfInvEven, AselfInv);
|
||||
pickCheckerboard(Odd, AselfInvOdd, AselfInv);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,67 +0,0 @@
|
||||
#include <Grid/GridCore.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
MemoryStats *MemoryProfiler::stats = nullptr;
|
||||
bool MemoryProfiler::debug = false;
|
||||
|
||||
void check_huge_pages(void *Buf,uint64_t BYTES)
|
||||
{
|
||||
#ifdef __linux__
|
||||
int fd = open("/proc/self/pagemap", O_RDONLY);
|
||||
assert(fd >= 0);
|
||||
const int page_size = 4096;
|
||||
uint64_t virt_pfn = (uint64_t)Buf / page_size;
|
||||
off_t offset = sizeof(uint64_t) * virt_pfn;
|
||||
uint64_t npages = (BYTES + page_size-1) / page_size;
|
||||
uint64_t pagedata[npages];
|
||||
uint64_t ret = lseek(fd, offset, SEEK_SET);
|
||||
assert(ret == offset);
|
||||
ret = ::read(fd, pagedata, sizeof(uint64_t)*npages);
|
||||
assert(ret == sizeof(uint64_t) * npages);
|
||||
int nhugepages = npages / 512;
|
||||
int n4ktotal, nnothuge;
|
||||
n4ktotal = 0;
|
||||
nnothuge = 0;
|
||||
for (int i = 0; i < nhugepages; ++i) {
|
||||
uint64_t baseaddr = (pagedata[i*512] & 0x7fffffffffffffULL) * page_size;
|
||||
for (int j = 0; j < 512; ++j) {
|
||||
uint64_t pageaddr = (pagedata[i*512+j] & 0x7fffffffffffffULL) * page_size;
|
||||
++n4ktotal;
|
||||
if (pageaddr != baseaddr + j * page_size)
|
||||
++nnothuge;
|
||||
}
|
||||
}
|
||||
int rank = CartesianCommunicator::RankWorld();
|
||||
printf("rank %d Allocated %d 4k pages, %d not in huge pages\n", rank, n4ktotal, nnothuge);
|
||||
#endif
|
||||
}
|
||||
|
||||
std::string sizeString(const size_t bytes)
|
||||
{
|
||||
constexpr unsigned int bufSize = 256;
|
||||
const char *suffixes[7] = {"", "K", "M", "G", "T", "P", "E"};
|
||||
char buf[256];
|
||||
size_t s = 0;
|
||||
double count = bytes;
|
||||
|
||||
while (count >= 1024 && s < 7)
|
||||
{
|
||||
s++;
|
||||
count /= 1024;
|
||||
}
|
||||
if (count - floor(count) == 0.0)
|
||||
{
|
||||
snprintf(buf, bufSize, "%d %sB", (int)count, suffixes[s]);
|
||||
}
|
||||
else
|
||||
{
|
||||
snprintf(buf, bufSize, "%.1f %sB", count, suffixes[s]);
|
||||
}
|
||||
|
||||
return std::string(buf);
|
||||
}
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -65,8 +65,7 @@ public:
|
||||
MemoryManager::CpuFree((void *)__p,bytes);
|
||||
}
|
||||
|
||||
// FIXME: hack for the copy constructor, eventually it must be avoided
|
||||
//void construct(pointer __p, const _Tp& __val) { new((void *)__p) _Tp(__val); };
|
||||
// FIXME: hack for the copy constructor: it must be avoided to avoid single thread loop
|
||||
void construct(pointer __p, const _Tp& __val) { assert(0);};
|
||||
void construct(pointer __p) { };
|
||||
void destroy(pointer __p) { };
|
||||
@ -74,6 +73,9 @@ public:
|
||||
template<typename _Tp> inline bool operator==(const alignedAllocator<_Tp>&, const alignedAllocator<_Tp>&){ return true; }
|
||||
template<typename _Tp> inline bool operator!=(const alignedAllocator<_Tp>&, const alignedAllocator<_Tp>&){ return false; }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// Unified virtual memory
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
template<typename _Tp>
|
||||
class uvmAllocator {
|
||||
public:
|
||||
@ -109,22 +111,72 @@ public:
|
||||
MemoryManager::SharedFree((void *)__p,bytes);
|
||||
}
|
||||
|
||||
// FIXME: hack for the copy constructor, eventually it must be avoided
|
||||
void construct(pointer __p, const _Tp& __val) { new((void *)__p) _Tp(__val); };
|
||||
//void construct(pointer __p, const _Tp& __val) { };
|
||||
void construct(pointer __p) { };
|
||||
void destroy(pointer __p) { };
|
||||
};
|
||||
template<typename _Tp> inline bool operator==(const uvmAllocator<_Tp>&, const uvmAllocator<_Tp>&){ return true; }
|
||||
template<typename _Tp> inline bool operator!=(const uvmAllocator<_Tp>&, const uvmAllocator<_Tp>&){ return false; }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Device memory
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
template<typename _Tp>
|
||||
class devAllocator {
|
||||
public:
|
||||
typedef std::size_t size_type;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef _Tp* pointer;
|
||||
typedef const _Tp* const_pointer;
|
||||
typedef _Tp& reference;
|
||||
typedef const _Tp& const_reference;
|
||||
typedef _Tp value_type;
|
||||
|
||||
template<typename _Tp1> struct rebind { typedef devAllocator<_Tp1> other; };
|
||||
devAllocator() throw() { }
|
||||
devAllocator(const devAllocator&) throw() { }
|
||||
template<typename _Tp1> devAllocator(const devAllocator<_Tp1>&) throw() { }
|
||||
~devAllocator() throw() { }
|
||||
pointer address(reference __x) const { return &__x; }
|
||||
size_type max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
|
||||
|
||||
pointer allocate(size_type __n, const void* _p= 0)
|
||||
{
|
||||
size_type bytes = __n*sizeof(_Tp);
|
||||
profilerAllocate(bytes);
|
||||
_Tp *ptr = (_Tp*) MemoryManager::AcceleratorAllocate(bytes);
|
||||
assert( ( (_Tp*)ptr != (_Tp *)NULL ) );
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void deallocate(pointer __p, size_type __n)
|
||||
{
|
||||
size_type bytes = __n * sizeof(_Tp);
|
||||
profilerFree(bytes);
|
||||
MemoryManager::AcceleratorFree((void *)__p,bytes);
|
||||
}
|
||||
void construct(pointer __p, const _Tp& __val) { };
|
||||
void construct(pointer __p) { };
|
||||
void destroy(pointer __p) { };
|
||||
};
|
||||
template<typename _Tp> inline bool operator==(const devAllocator<_Tp>&, const devAllocator<_Tp>&){ return true; }
|
||||
template<typename _Tp> inline bool operator!=(const devAllocator<_Tp>&, const devAllocator<_Tp>&){ return false; }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Template typedefs
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
template<class T> using commAllocator = uvmAllocator<T>;
|
||||
template<class T> using Vector = std::vector<T,uvmAllocator<T> >;
|
||||
template<class T> using commVector = std::vector<T,uvmAllocator<T> >;
|
||||
//template<class T> using Matrix = std::vector<std::vector<T,alignedAllocator<T> > >;
|
||||
#ifdef ACCELERATOR_CSHIFT
|
||||
// Cshift on device
|
||||
template<class T> using cshiftAllocator = devAllocator<T>;
|
||||
#else
|
||||
// Cshift on host
|
||||
template<class T> using cshiftAllocator = std::allocator<T>;
|
||||
#endif
|
||||
|
||||
template<class T> using Vector = std::vector<T,uvmAllocator<T> >;
|
||||
template<class T> using stencilVector = std::vector<T,alignedAllocator<T> >;
|
||||
template<class T> using commVector = std::vector<T,devAllocator<T> >;
|
||||
template<class T> using cshiftVector = std::vector<T,cshiftAllocator<T> >;
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -136,11 +136,20 @@ void MemoryManager::Init(void)
|
||||
Ncache[SharedSmall]=Nc;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void MemoryManager::InitMessage(void) {
|
||||
|
||||
#ifndef GRID_UVM
|
||||
std::cout << GridLogMessage << "MemoryManager Cache "<< MemoryManager::DeviceMaxBytes <<" bytes "<<std::endl;
|
||||
#endif
|
||||
|
||||
std::cout << GridLogMessage<< "MemoryManager::Init() setting up"<<std::endl;
|
||||
#ifdef ALLOCATION_CACHE
|
||||
std::cout << GridLogMessage<< "MemoryManager::Init() cache pool for recent allocations: SMALL "<<Ncache[CpuSmall]<<" LARGE "<<Ncache[Cpu]<<std::endl;
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef GRID_UVM
|
||||
std::cout << GridLogMessage<< "MemoryManager::Init() Unified memory space"<<std::endl;
|
||||
#ifdef GRID_CUDA
|
||||
@ -164,6 +173,7 @@ void MemoryManager::Init(void)
|
||||
std::cout << GridLogMessage<< "MemoryManager::Init() Using SYCL malloc_device"<<std::endl;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
void *MemoryManager::Insert(void *ptr,size_t bytes,int type)
|
||||
|
@ -34,8 +34,6 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
// Move control to configure.ac and Config.h?
|
||||
|
||||
#define ALLOCATION_CACHE
|
||||
#define GRID_ALLOC_ALIGN (2*1024*1024)
|
||||
#define GRID_ALLOC_SMALL_LIMIT (4096)
|
||||
|
||||
/*Pinning pages is costly*/
|
||||
@ -93,11 +91,12 @@ private:
|
||||
static void *Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim) ;
|
||||
static void *Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache) ;
|
||||
|
||||
static void *AcceleratorAllocate(size_t bytes);
|
||||
static void AcceleratorFree (void *ptr,size_t bytes);
|
||||
static void PrintBytes(void);
|
||||
public:
|
||||
static void Init(void);
|
||||
static void InitMessage(void);
|
||||
static void *AcceleratorAllocate(size_t bytes);
|
||||
static void AcceleratorFree (void *ptr,size_t bytes);
|
||||
static void *SharedAllocate(size_t bytes);
|
||||
static void SharedFree (void *ptr,size_t bytes);
|
||||
static void *CpuAllocate(size_t bytes);
|
||||
|
@ -1,11 +1,12 @@
|
||||
#include <Grid/GridCore.h>
|
||||
|
||||
#ifndef GRID_UVM
|
||||
|
||||
#warning "Using explicit device memory copies"
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
//define dprintf(...) printf ( __VA_ARGS__ ); fflush(stdout);
|
||||
#define dprintf(...)
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// For caching copies of data on device
|
||||
////////////////////////////////////////////////////////////
|
||||
@ -103,7 +104,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
|
||||
///////////////////////////////////////////////////////////
|
||||
assert(AccCache.state!=Empty);
|
||||
|
||||
// dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
||||
dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
||||
assert(AccCache.accLock==0);
|
||||
assert(AccCache.cpuLock==0);
|
||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||
@ -111,7 +112,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
|
||||
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
||||
DeviceBytes -=AccCache.bytes;
|
||||
LRUremove(AccCache);
|
||||
// dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
|
||||
dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
|
||||
}
|
||||
uint64_t CpuPtr = AccCache.CpuPtr;
|
||||
EntryErase(CpuPtr);
|
||||
@ -125,7 +126,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
assert(AccCache.state!=Empty);
|
||||
|
||||
// dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
||||
dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
||||
assert(AccCache.accLock==0);
|
||||
assert(AccCache.cpuLock==0);
|
||||
if(AccCache.state==AccDirty) {
|
||||
@ -136,7 +137,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
|
||||
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
||||
DeviceBytes -=AccCache.bytes;
|
||||
LRUremove(AccCache);
|
||||
// dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
|
||||
dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
|
||||
}
|
||||
uint64_t CpuPtr = AccCache.CpuPtr;
|
||||
EntryErase(CpuPtr);
|
||||
@ -149,7 +150,7 @@ void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
|
||||
assert(AccCache.AccPtr!=(uint64_t)NULL);
|
||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
|
||||
// dprintf("MemoryManager: Flush %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||
dprintf("MemoryManager: Flush %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||
DeviceToHostBytes+=AccCache.bytes;
|
||||
DeviceToHostXfer++;
|
||||
AccCache.state=Consistent;
|
||||
@ -164,7 +165,7 @@ void MemoryManager::Clone(AcceleratorViewEntry &AccCache)
|
||||
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
|
||||
DeviceBytes+=AccCache.bytes;
|
||||
}
|
||||
// dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||
dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
|
||||
HostToDeviceBytes+=AccCache.bytes;
|
||||
HostToDeviceXfer++;
|
||||
@ -227,18 +228,24 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
||||
// Find if present, otherwise get or force an empty
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
if ( EntryPresent(CpuPtr)==0 ){
|
||||
EvictVictims(bytes);
|
||||
EntryCreate(CpuPtr,bytes,mode,hint);
|
||||
}
|
||||
|
||||
auto AccCacheIterator = EntryLookup(CpuPtr);
|
||||
auto & AccCache = AccCacheIterator->second;
|
||||
|
||||
if (!AccCache.AccPtr) {
|
||||
EvictVictims(bytes);
|
||||
}
|
||||
assert((mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard));
|
||||
|
||||
assert(AccCache.cpuLock==0); // Programming error
|
||||
|
||||
if(AccCache.state!=Empty) {
|
||||
dprintf("ViewOpen found entry %llx %llx : %lld %lld\n",
|
||||
(uint64_t)AccCache.CpuPtr,
|
||||
(uint64_t)CpuPtr,
|
||||
(uint64_t)AccCache.bytes,
|
||||
(uint64_t)bytes);
|
||||
assert(AccCache.CpuPtr == CpuPtr);
|
||||
assert(AccCache.bytes ==bytes);
|
||||
}
|
||||
@ -285,21 +292,21 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
||||
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
|
||||
}
|
||||
AccCache.accLock++;
|
||||
// printf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
|
||||
dprintf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
|
||||
} else if(AccCache.state==Consistent) {
|
||||
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
||||
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
|
||||
else
|
||||
AccCache.state = Consistent; // Consistent + AccRead => Consistent
|
||||
AccCache.accLock++;
|
||||
// printf("Consistent entry into device accLock %d\n",AccCache.accLock);
|
||||
dprintf("Consistent entry into device accLock %d\n",AccCache.accLock);
|
||||
} else if(AccCache.state==AccDirty) {
|
||||
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
||||
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
|
||||
else
|
||||
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
|
||||
AccCache.accLock++;
|
||||
// printf("AccDirty entry into device accLock %d\n",AccCache.accLock);
|
||||
dprintf("AccDirty entry into device accLock %d\n",AccCache.accLock);
|
||||
} else {
|
||||
assert(0);
|
||||
}
|
||||
@ -361,13 +368,16 @@ uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,V
|
||||
// Find if present, otherwise get or force an empty
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
if ( EntryPresent(CpuPtr)==0 ){
|
||||
EvictVictims(bytes);
|
||||
EntryCreate(CpuPtr,bytes,mode,transient);
|
||||
}
|
||||
|
||||
auto AccCacheIterator = EntryLookup(CpuPtr);
|
||||
auto & AccCache = AccCacheIterator->second;
|
||||
|
||||
|
||||
if (!AccCache.AccPtr) {
|
||||
EvictVictims(bytes);
|
||||
}
|
||||
|
||||
assert((mode==CpuRead)||(mode==CpuWrite));
|
||||
assert(AccCache.accLock==0); // Programming error
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
#include <Grid/GridCore.h>
|
||||
#ifdef GRID_UVM
|
||||
|
||||
#warning "Grid is assuming unified virtual memory address space"
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
// View management is 1:1 address space mapping
|
||||
|
@ -36,7 +36,7 @@ static const int CbBlack=1;
|
||||
static const int Even =CbRed;
|
||||
static const int Odd =CbBlack;
|
||||
|
||||
accelerator_inline int RedBlackCheckerBoardFromOindex (int oindex, Coordinate &rdim, Coordinate &chk_dim_msk)
|
||||
accelerator_inline int RedBlackCheckerBoardFromOindex (int oindex,const Coordinate &rdim,const Coordinate &chk_dim_msk)
|
||||
{
|
||||
int nd=rdim.size();
|
||||
Coordinate coor(nd);
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
@ -108,6 +107,8 @@ public:
|
||||
////////////////////////////////////////////////////////////
|
||||
// Reduction
|
||||
////////////////////////////////////////////////////////////
|
||||
void GlobalMax(RealD &);
|
||||
void GlobalMax(RealF &);
|
||||
void GlobalSum(RealF &);
|
||||
void GlobalSumVector(RealF *,int N);
|
||||
void GlobalSum(RealD &);
|
||||
@ -138,21 +139,6 @@ public:
|
||||
int recv_from_rank,
|
||||
int bytes);
|
||||
|
||||
void SendRecvPacket(void *xmit,
|
||||
void *recv,
|
||||
int xmit_to_rank,
|
||||
int recv_from_rank,
|
||||
int bytes);
|
||||
|
||||
void SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes);
|
||||
|
||||
void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
|
||||
|
||||
double StencilSendToRecvFrom(void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/communicator/Communicator_mpi.cc
|
||||
|
||||
@ -35,7 +35,7 @@ Grid_MPI_Comm CartesianCommunicator::communicator_world;
|
||||
////////////////////////////////////////////
|
||||
// First initialise of comms system
|
||||
////////////////////////////////////////////
|
||||
void CartesianCommunicator::Init(int *argc, char ***argv)
|
||||
void CartesianCommunicator::Init(int *argc, char ***argv)
|
||||
{
|
||||
|
||||
int flag;
|
||||
@ -43,8 +43,16 @@ void CartesianCommunicator::Init(int *argc, char ***argv)
|
||||
|
||||
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
||||
if ( !flag ) {
|
||||
MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
|
||||
|
||||
#ifndef GRID_COMMS_THREADS
|
||||
nCommThreads=1;
|
||||
// wrong results here too
|
||||
// For now: comms-overlap leads to wrong results in Benchmark_wilson even on single node MPI runs
|
||||
// other comms schemes are ok
|
||||
MPI_Init_thread(argc,argv,MPI_THREAD_SERIALIZED,&provided);
|
||||
#else
|
||||
MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
|
||||
#endif
|
||||
//If only 1 comms thread we require any threading mode other than SINGLE, but for multiple comms threads we need MULTIPLE
|
||||
if( (nCommThreads == 1) && (provided == MPI_THREAD_SINGLE) ) {
|
||||
assert(0);
|
||||
@ -91,7 +99,7 @@ void CartesianCommunicator::ProcessorCoorFromRank(int rank, Coordinate &coor)
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Initialises from communicator_world
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||
{
|
||||
MPI_Comm optimal_comm;
|
||||
////////////////////////////////////////////////////
|
||||
@ -110,7 +118,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||
//////////////////////////////////
|
||||
// Try to subdivide communicator
|
||||
//////////////////////////////////
|
||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
|
||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
|
||||
{
|
||||
_ndimension = processors.size(); assert(_ndimension>=1);
|
||||
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
|
||||
@ -127,7 +135,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// split the communicator
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// int Nparent = parent._processors ;
|
||||
// int Nparent = parent._processors ;
|
||||
int Nparent;
|
||||
MPI_Comm_size(parent.communicator,&Nparent);
|
||||
|
||||
@ -149,13 +157,13 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
||||
}
|
||||
|
||||
// rank within subcomm ; srank is rank of subcomm within blocks of subcomms
|
||||
int crank;
|
||||
int crank;
|
||||
// Mpi uses the reverse Lexico convention to us; so reversed routines called
|
||||
Lexicographic::IndexFromCoorReversed(ccoor,crank,processors); // processors is the split grid dimensions
|
||||
Lexicographic::IndexFromCoorReversed(scoor,srank,ssize); // ssize is the number of split grids
|
||||
|
||||
MPI_Comm comm_split;
|
||||
if ( Nchild > 1 ) {
|
||||
if ( Nchild > 1 ) {
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Split the communicator
|
||||
@ -180,11 +188,11 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
||||
SetCommunicator(comm_split);
|
||||
|
||||
///////////////////////////////////////////////
|
||||
// Free the temp communicator
|
||||
// Free the temp communicator
|
||||
///////////////////////////////////////////////
|
||||
MPI_Comm_free(&comm_split);
|
||||
|
||||
if(0){
|
||||
if(0){
|
||||
std::cout << " ndim " <<_ndimension<<" " << parent._ndimension << std::endl;
|
||||
for(int d=0;d<processors.size();d++){
|
||||
std::cout << d<< " " << _processor_coor[d] <<" " << ccoor[d]<<std::endl;
|
||||
@ -245,7 +253,7 @@ CartesianCommunicator::~CartesianCommunicator()
|
||||
for(int i=0;i<communicator_halo.size();i++){
|
||||
MPI_Comm_free(&communicator_halo[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||
@ -267,6 +275,16 @@ void CartesianCommunicator::GlobalXOR(uint64_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalMax(float &f)
|
||||
{
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_MAX,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalMax(double &d)
|
||||
{
|
||||
int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_MAX,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(float &f){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
@ -294,60 +312,28 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||
int bytes)
|
||||
{
|
||||
std::vector<CommsRequest_t> reqs(0);
|
||||
// unsigned long xcrc = crc32(0L, Z_NULL, 0);
|
||||
// unsigned long rcrc = crc32(0L, Z_NULL, 0);
|
||||
// xcrc = crc32(xcrc,(unsigned char *)xmit,bytes);
|
||||
SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes);
|
||||
SendToRecvFromComplete(reqs);
|
||||
// rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
|
||||
// printf("proc %d SendToRecvFrom %d bytes %lx %lx\n",_processor,bytes,xcrc,rcrc);
|
||||
}
|
||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
|
||||
void *recv,
|
||||
int sender,
|
||||
int receiver,
|
||||
int bytes)
|
||||
{
|
||||
MPI_Status stat;
|
||||
assert(sender != receiver);
|
||||
int tag = sender;
|
||||
if ( _processor == sender ) {
|
||||
MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator);
|
||||
}
|
||||
if ( _processor == receiver ) {
|
||||
MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat);
|
||||
}
|
||||
}
|
||||
// Basic Halo comms primitive
|
||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes)
|
||||
{
|
||||
unsigned long xcrc = crc32(0L, Z_NULL, 0);
|
||||
unsigned long rcrc = crc32(0L, Z_NULL, 0);
|
||||
|
||||
int myrank = _processor;
|
||||
int ierr;
|
||||
|
||||
if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) {
|
||||
MPI_Request xrq;
|
||||
MPI_Request rrq;
|
||||
// Enforce no UVM in comms, device or host OK
|
||||
assert(acceleratorIsCommunicable(xmit));
|
||||
assert(acceleratorIsCommunicable(recv));
|
||||
|
||||
ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||
ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
list.push_back(rrq);
|
||||
} else {
|
||||
// Give the CPU to MPI immediately; can use threads to overlap optionally
|
||||
ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
|
||||
recv,bytes,MPI_CHAR,from, from,
|
||||
communicator,MPI_STATUS_IGNORE);
|
||||
assert(ierr==0);
|
||||
}
|
||||
// Give the CPU to MPI immediately; can use threads to overlap optionally
|
||||
// printf("proc %d SendToRecvFrom %d bytes Sendrecv \n",_processor,bytes);
|
||||
ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
|
||||
recv,bytes,MPI_CHAR,from, from,
|
||||
communicator,MPI_STATUS_IGNORE);
|
||||
assert(ierr==0);
|
||||
|
||||
// xcrc = crc32(xcrc,(unsigned char *)xmit,bytes);
|
||||
// rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
|
||||
// printf("proc %d SendToRecvFrom %d bytes xcrc %lx rcrc %lx\n",_processor,bytes,xcrc,rcrc); fflush
|
||||
}
|
||||
|
||||
// Basic Halo comms primitive
|
||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
@ -367,7 +353,7 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
||||
int from,
|
||||
int bytes,int dir)
|
||||
{
|
||||
int ncomm =communicator_halo.size();
|
||||
int ncomm =communicator_halo.size();
|
||||
int commdir=dir%ncomm;
|
||||
|
||||
MPI_Request xrq;
|
||||
@ -382,36 +368,31 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
||||
assert(from != _processor);
|
||||
assert(gme == ShmRank);
|
||||
double off_node_bytes=0.0;
|
||||
int tag;
|
||||
|
||||
if ( gfrom ==MPI_UNDEFINED) {
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator_halo[commdir],&rrq);
|
||||
tag= dir+from*32;
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
off_node_bytes+=bytes;
|
||||
}
|
||||
|
||||
if ( gdest == MPI_UNDEFINED ) {
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator_halo[commdir],&xrq);
|
||||
tag= dir+_processor*32;
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
off_node_bytes+=bytes;
|
||||
}
|
||||
|
||||
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
||||
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
||||
this->StencilSendToRecvFromComplete(list,dir);
|
||||
}
|
||||
|
||||
return off_node_bytes;
|
||||
}
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir)
|
||||
{
|
||||
SendToRecvFromComplete(waitall);
|
||||
}
|
||||
void CartesianCommunicator::StencilBarrier(void)
|
||||
{
|
||||
MPI_Barrier (ShmComm);
|
||||
}
|
||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list,int dir)
|
||||
{
|
||||
int nreq=list.size();
|
||||
|
||||
@ -422,6 +403,13 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &
|
||||
assert(ierr==0);
|
||||
list.resize(0);
|
||||
}
|
||||
void CartesianCommunicator::StencilBarrier(void)
|
||||
{
|
||||
MPI_Barrier (ShmComm);
|
||||
}
|
||||
//void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||
//{
|
||||
//}
|
||||
void CartesianCommunicator::Barrier(void)
|
||||
{
|
||||
int ierr = MPI_Barrier(communicator);
|
||||
@ -436,8 +424,8 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
||||
communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
int CartesianCommunicator::RankWorld(void){
|
||||
int r;
|
||||
int CartesianCommunicator::RankWorld(void){
|
||||
int r;
|
||||
MPI_Comm_rank(communicator_world,&r);
|
||||
return r;
|
||||
}
|
||||
@ -470,7 +458,7 @@ void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t
|
||||
// When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug.
|
||||
// (Turns up on 32^3 x 64 Gparity too)
|
||||
MPI_Datatype object;
|
||||
int iwords;
|
||||
int iwords;
|
||||
int ibytes;
|
||||
iwords = words;
|
||||
ibytes = bytes;
|
||||
@ -483,5 +471,3 @@ void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t
|
||||
}
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
||||
|
@ -67,6 +67,8 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||
|
||||
CartesianCommunicator::~CartesianCommunicator(){}
|
||||
|
||||
void CartesianCommunicator::GlobalMax(float &){}
|
||||
void CartesianCommunicator::GlobalMax(double &){}
|
||||
void CartesianCommunicator::GlobalSum(float &){}
|
||||
void CartesianCommunicator::GlobalSumVector(float *,int N){}
|
||||
void CartesianCommunicator::GlobalSum(double &){}
|
||||
@ -77,15 +79,6 @@ void CartesianCommunicator::GlobalSumVector(uint64_t *,int N){}
|
||||
void CartesianCommunicator::GlobalXOR(uint32_t &){}
|
||||
void CartesianCommunicator::GlobalXOR(uint64_t &){}
|
||||
|
||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
|
||||
void *recv,
|
||||
int xmit_to_rank,
|
||||
int recv_from_rank,
|
||||
int bytes)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
|
||||
// Basic Halo comms primitive -- should never call in single node
|
||||
void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||
@ -96,20 +89,6 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,uint64_t bytes)
|
||||
{
|
||||
bcopy(in,out,bytes*words);
|
||||
@ -137,10 +116,6 @@ double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
int recv_from_rank,
|
||||
int bytes, int dir)
|
||||
{
|
||||
std::vector<CommsRequest_t> list;
|
||||
// Discard the "dir"
|
||||
SendToRecvFromBegin (list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
|
||||
SendToRecvFromComplete(list);
|
||||
return 2.0*bytes;
|
||||
}
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
@ -150,13 +125,10 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
||||
int recv_from_rank,
|
||||
int bytes, int dir)
|
||||
{
|
||||
// Discard the "dir"
|
||||
SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
|
||||
return 2.0*bytes;
|
||||
}
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir)
|
||||
{
|
||||
SendToRecvFromComplete(waitall);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::StencilBarrier(void){};
|
||||
|
@ -102,7 +102,7 @@ public:
|
||||
///////////////////////////////////////////////////
|
||||
static void SharedMemoryAllocate(uint64_t bytes, int flags);
|
||||
static void SharedMemoryFree(void);
|
||||
static void SharedMemoryCopy(void *dest,const void *src,size_t bytes);
|
||||
static void SharedMemoryCopy(void *dest,void *src,size_t bytes);
|
||||
static void SharedMemoryZero(void *dest,size_t bytes);
|
||||
|
||||
};
|
||||
|
@ -32,6 +32,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#ifdef GRID_CUDA
|
||||
#include <cuda_runtime_api.h>
|
||||
#endif
|
||||
#ifdef GRID_HIP
|
||||
#include <hip/hip_runtime_api.h>
|
||||
#endif
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
#define header "SharedMemoryMpi: "
|
||||
@ -47,7 +50,12 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// Split into groups that can share memory
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
#ifndef GRID_MPI3_SHM_NONE
|
||||
MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&WorldShmComm);
|
||||
#else
|
||||
MPI_Comm_split(comm, WorldRank, 0, &WorldShmComm);
|
||||
#endif
|
||||
|
||||
MPI_Comm_rank(WorldShmComm ,&WorldShmRank);
|
||||
MPI_Comm_size(WorldShmComm ,&WorldShmSize);
|
||||
|
||||
@ -420,7 +428,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Hugetlbfs mapping intended
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef GRID_CUDA
|
||||
#if defined(GRID_CUDA) ||defined(GRID_HIP)
|
||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
{
|
||||
void * ShmCommBuf ;
|
||||
@ -443,17 +451,16 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Each MPI rank should allocate our own buffer
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
auto err = cudaMalloc(&ShmCommBuf, bytes);
|
||||
if ( err != cudaSuccess) {
|
||||
std::cerr << " SharedMemoryMPI.cc cudaMallocManaged failed for " << bytes<<" bytes " <<cudaGetErrorString(err)<< std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
ShmCommBuf = acceleratorAllocDevice(bytes);
|
||||
|
||||
if (ShmCommBuf == (void *)NULL ) {
|
||||
std::cerr << " SharedMemoryMPI.cc cudaMallocManaged failed NULL pointer for " << bytes<<" bytes " << std::endl;
|
||||
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if ( WorldRank == 0 ){
|
||||
std::cout << header " SharedMemoryMPI.cc cudaMalloc "<< bytes << "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
||||
// if ( WorldRank == 0 ){
|
||||
if ( 1 ){
|
||||
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
|
||||
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
||||
}
|
||||
SharedMemoryZero(ShmCommBuf,bytes);
|
||||
|
||||
@ -461,19 +468,31 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
// Loop over ranks/gpu's on our node
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
for(int r=0;r<WorldShmSize;r++){
|
||||
|
||||
|
||||
#ifndef GRID_MPI3_SHM_NONE
|
||||
//////////////////////////////////////////////////
|
||||
// If it is me, pass around the IPC access key
|
||||
//////////////////////////////////////////////////
|
||||
#ifdef GRID_CUDA
|
||||
cudaIpcMemHandle_t handle;
|
||||
|
||||
if ( r==WorldShmRank ) {
|
||||
err = cudaIpcGetMemHandle(&handle,ShmCommBuf);
|
||||
auto err = cudaIpcGetMemHandle(&handle,ShmCommBuf);
|
||||
if ( err != cudaSuccess) {
|
||||
std::cerr << " SharedMemoryMPI.cc cudaIpcGetMemHandle failed for rank" << r <<" "<<cudaGetErrorString(err)<< std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef GRID_HIP
|
||||
hipIpcMemHandle_t handle;
|
||||
if ( r==WorldShmRank ) {
|
||||
auto err = hipIpcGetMemHandle(&handle,ShmCommBuf);
|
||||
if ( err != hipSuccess) {
|
||||
std::cerr << " SharedMemoryMPI.cc hipIpcGetMemHandle failed for rank" << r <<" "<<hipGetErrorString(err)<< std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
//////////////////////////////////////////////////
|
||||
// Share this IPC handle across the Shm Comm
|
||||
//////////////////////////////////////////////////
|
||||
@ -490,17 +509,31 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
// If I am not the source, overwrite thisBuf with remote buffer
|
||||
///////////////////////////////////////////////////////////////
|
||||
void * thisBuf = ShmCommBuf;
|
||||
#ifdef GRID_CUDA
|
||||
if ( r!=WorldShmRank ) {
|
||||
err = cudaIpcOpenMemHandle(&thisBuf,handle,cudaIpcMemLazyEnablePeerAccess);
|
||||
auto err = cudaIpcOpenMemHandle(&thisBuf,handle,cudaIpcMemLazyEnablePeerAccess);
|
||||
if ( err != cudaSuccess) {
|
||||
std::cerr << " SharedMemoryMPI.cc cudaIpcOpenMemHandle failed for rank" << r <<" "<<cudaGetErrorString(err)<< std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef GRID_HIP
|
||||
if ( r!=WorldShmRank ) {
|
||||
auto err = hipIpcOpenMemHandle(&thisBuf,handle,hipIpcMemLazyEnablePeerAccess);
|
||||
if ( err != hipSuccess) {
|
||||
std::cerr << " SharedMemoryMPI.cc hipIpcOpenMemHandle failed for rank" << r <<" "<<hipGetErrorString(err)<< std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Save a copy of the device buffers
|
||||
///////////////////////////////////////////////////////////////
|
||||
WorldShmCommBufs[r] = thisBuf;
|
||||
#else
|
||||
WorldShmCommBufs[r] = ShmCommBuf;
|
||||
#endif
|
||||
}
|
||||
|
||||
_ShmAllocBytes=bytes;
|
||||
@ -633,7 +666,6 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
#endif
|
||||
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, mmap_flag, fd, 0);
|
||||
|
||||
// std::cout << "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< size<< "bytes)"<<std::endl;
|
||||
if ( ptr == (void * )MAP_FAILED ) {
|
||||
perror("failed mmap");
|
||||
assert(0);
|
||||
@ -683,7 +715,7 @@ void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
|
||||
bzero(dest,bytes);
|
||||
#endif
|
||||
}
|
||||
void GlobalSharedMemory::SharedMemoryCopy(void *dest,const void *src,size_t bytes)
|
||||
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
|
||||
{
|
||||
#ifdef GRID_CUDA
|
||||
cudaMemcpy(dest,src,bytes,cudaMemcpyDefault);
|
||||
@ -705,7 +737,11 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// Split into groups that can share memory
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
#ifndef GRID_MPI3_SHM_NONE
|
||||
MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&ShmComm);
|
||||
#else
|
||||
MPI_Comm_split(comm, rank, 0, &ShmComm);
|
||||
#endif
|
||||
MPI_Comm_rank(ShmComm ,&ShmRank);
|
||||
MPI_Comm_size(ShmComm ,&ShmSize);
|
||||
ShmCommBufs.resize(ShmSize);
|
||||
@ -735,19 +771,12 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
|
||||
std::vector<int> ranks(size); for(int r=0;r<size;r++) ranks[r]=r;
|
||||
MPI_Group_translate_ranks (FullGroup,size,&ranks[0],ShmGroup, &ShmRanks[0]);
|
||||
|
||||
#ifdef GRID_IBM_SUMMIT
|
||||
// Hide the shared memory path between sockets
|
||||
// if even number of nodes
|
||||
if ( (ShmSize & 0x1)==0 ) {
|
||||
int SocketSize = ShmSize/2;
|
||||
int mySocket = ShmRank/SocketSize;
|
||||
#ifdef GRID_SHM_FORCE_MPI
|
||||
// Hide the shared memory path between ranks
|
||||
{
|
||||
for(int r=0;r<size;r++){
|
||||
int hisRank=ShmRanks[r];
|
||||
if ( hisRank!= MPI_UNDEFINED ) {
|
||||
int hisSocket=hisRank/SocketSize;
|
||||
if ( hisSocket != mySocket ) {
|
||||
ShmRanks[r] = MPI_UNDEFINED;
|
||||
}
|
||||
if ( r!=rank ) {
|
||||
ShmRanks[r] = MPI_UNDEFINED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/GridCore.h>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
#define header "SharedMemoryNone: "
|
||||
|
||||
/*Construct from an MPI communicator*/
|
||||
void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
||||
@ -55,6 +56,38 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Hugetlbfs mapping intended, use anonymous mmap
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#if 1
|
||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
{
|
||||
std::cout << header "SharedMemoryAllocate "<< bytes<< " GPU implementation "<<std::endl;
|
||||
void * ShmCommBuf ;
|
||||
assert(_ShmSetup==1);
|
||||
assert(_ShmAlloc==0);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Each MPI rank should allocate our own buffer
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
ShmCommBuf = acceleratorAllocDevice(bytes);
|
||||
|
||||
if (ShmCommBuf == (void *)NULL ) {
|
||||
std::cerr << " SharedMemoryNone.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if ( WorldRank == 0 ){
|
||||
std::cout << WorldRank << header " SharedMemoryNone.cc acceleratorAllocDevice "<< bytes
|
||||
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
||||
}
|
||||
SharedMemoryZero(ShmCommBuf,bytes);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Loop over ranks/gpu's on our node
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
WorldShmCommBufs[0] = ShmCommBuf;
|
||||
|
||||
_ShmAllocBytes=bytes;
|
||||
_ShmAlloc=1;
|
||||
}
|
||||
#else
|
||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
{
|
||||
void * ShmCommBuf ;
|
||||
@ -83,7 +116,15 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
_ShmAllocBytes=bytes;
|
||||
_ShmAlloc=1;
|
||||
};
|
||||
|
||||
#endif
|
||||
void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
|
||||
{
|
||||
acceleratorMemSet(dest,0,bytes);
|
||||
}
|
||||
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
|
||||
{
|
||||
acceleratorCopyToDevice(src,dest,bytes);
|
||||
}
|
||||
////////////////////////////////////////////////////////
|
||||
// Global shared functionality finished
|
||||
// Now move to per communicator functionality
|
||||
|
@ -52,23 +52,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<typename Op, typename T1>
|
||||
auto Cshift(const LatticeUnaryExpression<Op,T1> &expr,int dim,int shift)
|
||||
-> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))>
|
||||
{
|
||||
return Cshift(closure(expr),dim,shift);
|
||||
}
|
||||
template <class Op, class T1, class T2>
|
||||
auto Cshift(const LatticeBinaryExpression<Op,T1,T2> &expr,int dim,int shift)
|
||||
-> Lattice<decltype(expr.op.func(eval(0, expr.arg1),eval(0, expr.arg2)))>
|
||||
{
|
||||
return Cshift(closure(expr),dim,shift);
|
||||
}
|
||||
template <class Op, class T1, class T2, class T3>
|
||||
auto Cshift(const LatticeTrinaryExpression<Op,T1,T2,T3> &expr,int dim,int shift)
|
||||
-> Lattice<decltype(expr.op.func(eval(0, expr.arg1),
|
||||
eval(0, expr.arg2),
|
||||
eval(0, expr.arg3)))>
|
||||
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
|
||||
auto Cshift(const Expression &expr,int dim,int shift) -> decltype(closure(expr))
|
||||
{
|
||||
return Cshift(closure(expr),dim,shift);
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ extern Vector<std::pair<int,int> > Cshift_table;
|
||||
// Gather for when there is no need to SIMD split
|
||||
///////////////////////////////////////////////////////////////////
|
||||
template<class vobj> void
|
||||
Gather_plane_simple (const Lattice<vobj> &rhs,commVector<vobj> &buffer,int dimension,int plane,int cbmask, int off=0)
|
||||
Gather_plane_simple (const Lattice<vobj> &rhs,cshiftVector<vobj> &buffer,int dimension,int plane,int cbmask, int off=0)
|
||||
{
|
||||
int rd = rhs.Grid()->_rdimensions[dimension];
|
||||
|
||||
@ -73,12 +73,19 @@ Gather_plane_simple (const Lattice<vobj> &rhs,commVector<vobj> &buffer,int dimen
|
||||
}
|
||||
}
|
||||
{
|
||||
autoView(rhs_v , rhs, AcceleratorRead);
|
||||
auto buffer_p = & buffer[0];
|
||||
auto table = &Cshift_table[0];
|
||||
accelerator_for(i,ent,1,{
|
||||
#ifdef ACCELERATOR_CSHIFT
|
||||
autoView(rhs_v , rhs, AcceleratorRead);
|
||||
accelerator_for(i,ent,vobj::Nsimd(),{
|
||||
coalescedWrite(buffer_p[table[i].first],coalescedRead(rhs_v[table[i].second]));
|
||||
});
|
||||
#else
|
||||
autoView(rhs_v , rhs, CpuRead);
|
||||
thread_for(i,ent,{
|
||||
buffer_p[table[i].first]=rhs_v[table[i].second];
|
||||
});
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -103,6 +110,7 @@ Gather_plane_extract(const Lattice<vobj> &rhs,
|
||||
int n1=rhs.Grid()->_slice_stride[dimension];
|
||||
|
||||
if ( cbmask ==0x3){
|
||||
#ifdef ACCELERATOR_CSHIFT
|
||||
autoView(rhs_v , rhs, AcceleratorRead);
|
||||
accelerator_for2d(n,e1,b,e2,1,{
|
||||
int o = n*n1;
|
||||
@ -111,12 +119,22 @@ Gather_plane_extract(const Lattice<vobj> &rhs,
|
||||
vobj temp =rhs_v[so+o+b];
|
||||
extract<vobj>(temp,pointers,offset);
|
||||
});
|
||||
#else
|
||||
autoView(rhs_v , rhs, CpuRead);
|
||||
thread_for2d(n,e1,b,e2,{
|
||||
int o = n*n1;
|
||||
int offset = b+n*e2;
|
||||
|
||||
vobj temp =rhs_v[so+o+b];
|
||||
extract<vobj>(temp,pointers,offset);
|
||||
});
|
||||
#endif
|
||||
} else {
|
||||
autoView(rhs_v , rhs, AcceleratorRead);
|
||||
|
||||
Coordinate rdim=rhs.Grid()->_rdimensions;
|
||||
Coordinate cdm =rhs.Grid()->_checker_dim_mask;
|
||||
std::cout << " Dense packed buffer WARNING " <<std::endl; // Does this get called twice once for each cb?
|
||||
#ifdef ACCELERATOR_CSHIFT
|
||||
autoView(rhs_v , rhs, AcceleratorRead);
|
||||
accelerator_for2d(n,e1,b,e2,1,{
|
||||
|
||||
Coordinate coor;
|
||||
@ -134,13 +152,33 @@ Gather_plane_extract(const Lattice<vobj> &rhs,
|
||||
extract<vobj>(temp,pointers,offset);
|
||||
}
|
||||
});
|
||||
#else
|
||||
autoView(rhs_v , rhs, CpuRead);
|
||||
thread_for2d(n,e1,b,e2,{
|
||||
|
||||
Coordinate coor;
|
||||
|
||||
int o=n*n1;
|
||||
int oindex = o+b;
|
||||
|
||||
int cb = RedBlackCheckerBoardFromOindex(oindex, rdim, cdm);
|
||||
|
||||
int ocb=1<<cb;
|
||||
int offset = b+n*e2;
|
||||
|
||||
if ( ocb & cbmask ) {
|
||||
vobj temp =rhs_v[so+o+b];
|
||||
extract<vobj>(temp,pointers,offset);
|
||||
}
|
||||
});
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// Scatter for when there is no need to SIMD split
|
||||
//////////////////////////////////////////////////////
|
||||
template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,commVector<vobj> &buffer, int dimension,int plane,int cbmask)
|
||||
template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,cshiftVector<vobj> &buffer, int dimension,int plane,int cbmask)
|
||||
{
|
||||
int rd = rhs.Grid()->_rdimensions[dimension];
|
||||
|
||||
@ -182,12 +220,19 @@ template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,commVector<vo
|
||||
}
|
||||
|
||||
{
|
||||
autoView( rhs_v, rhs, AcceleratorWrite);
|
||||
auto buffer_p = & buffer[0];
|
||||
auto table = &Cshift_table[0];
|
||||
accelerator_for(i,ent,1,{
|
||||
rhs_v[table[i].first]=buffer_p[table[i].second];
|
||||
#ifdef ACCELERATOR_CSHIFT
|
||||
autoView( rhs_v, rhs, AcceleratorWrite);
|
||||
accelerator_for(i,ent,vobj::Nsimd(),{
|
||||
coalescedWrite(rhs_v[table[i].first],coalescedRead(buffer_p[table[i].second]));
|
||||
});
|
||||
#else
|
||||
autoView( rhs_v, rhs, CpuWrite);
|
||||
thread_for(i,ent,{
|
||||
rhs_v[table[i].first]=buffer_p[table[i].second];
|
||||
});
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,18 +253,30 @@ template<class vobj> void Scatter_plane_merge(Lattice<vobj> &rhs,ExtractPointerA
|
||||
int e2=rhs.Grid()->_slice_block[dimension];
|
||||
|
||||
if(cbmask ==0x3 ) {
|
||||
int _slice_stride = rhs.Grid()->_slice_stride[dimension];
|
||||
int _slice_block = rhs.Grid()->_slice_block[dimension];
|
||||
#ifdef ACCELERATOR_CSHIFT
|
||||
autoView( rhs_v , rhs, AcceleratorWrite);
|
||||
accelerator_for2d(n,e1,b,e2,1,{
|
||||
int o = n*rhs.Grid()->_slice_stride[dimension];
|
||||
int offset = b+n*rhs.Grid()->_slice_block[dimension];
|
||||
int o = n*_slice_stride;
|
||||
int offset = b+n*_slice_block;
|
||||
merge(rhs_v[so+o+b],pointers,offset);
|
||||
});
|
||||
#else
|
||||
autoView( rhs_v , rhs, CpuWrite);
|
||||
thread_for2d(n,e1,b,e2,{
|
||||
int o = n*_slice_stride;
|
||||
int offset = b+n*_slice_block;
|
||||
merge(rhs_v[so+o+b],pointers,offset);
|
||||
});
|
||||
#endif
|
||||
} else {
|
||||
|
||||
// Case of SIMD split AND checker dim cannot currently be hit, except in
|
||||
// Test_cshift_red_black code.
|
||||
// std::cout << "Scatter_plane merge assert(0); think this is buggy FIXME "<< std::endl;// think this is buggy FIXME
|
||||
std::cout<<" Unthreaded warning -- buffer is not densely packed ??"<<std::endl;
|
||||
assert(0); // This will fail if hit on GPU
|
||||
autoView( rhs_v, rhs, CpuWrite);
|
||||
for(int n=0;n<e1;n++){
|
||||
for(int b=0;b<e2;b++){
|
||||
@ -277,12 +334,20 @@ template<class vobj> void Copy_plane(Lattice<vobj>& lhs,const Lattice<vobj> &rhs
|
||||
}
|
||||
|
||||
{
|
||||
auto table = &Cshift_table[0];
|
||||
#ifdef ACCELERATOR_CSHIFT
|
||||
autoView(rhs_v , rhs, AcceleratorRead);
|
||||
autoView(lhs_v , lhs, AcceleratorWrite);
|
||||
auto table = &Cshift_table[0];
|
||||
accelerator_for(i,ent,1,{
|
||||
accelerator_for(i,ent,vobj::Nsimd(),{
|
||||
coalescedWrite(lhs_v[table[i].first],coalescedRead(rhs_v[table[i].second]));
|
||||
});
|
||||
#else
|
||||
autoView(rhs_v , rhs, CpuRead);
|
||||
autoView(lhs_v , lhs, CpuWrite);
|
||||
thread_for(i,ent,{
|
||||
lhs_v[table[i].first]=rhs_v[table[i].second];
|
||||
});
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -321,12 +386,20 @@ template<class vobj> void Copy_plane_permute(Lattice<vobj>& lhs,const Lattice<vo
|
||||
}
|
||||
|
||||
{
|
||||
auto table = &Cshift_table[0];
|
||||
#ifdef ACCELERATOR_CSHIFT
|
||||
autoView( rhs_v, rhs, AcceleratorRead);
|
||||
autoView( lhs_v, lhs, AcceleratorWrite);
|
||||
auto table = &Cshift_table[0];
|
||||
accelerator_for(i,ent,1,{
|
||||
permute(lhs_v[table[i].first],rhs_v[table[i].second],permute_type);
|
||||
});
|
||||
#else
|
||||
autoView( rhs_v, rhs, CpuRead);
|
||||
autoView( lhs_v, lhs, CpuWrite);
|
||||
thread_for(i,ent,{
|
||||
permute(lhs_v[table[i].first],rhs_v[table[i].second],permute_type);
|
||||
});
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,8 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj>& ret,const Lattice<vob
|
||||
Cshift_comms_simd(ret,rhs,dimension,shift,0x2);// both with block stride loop iteration
|
||||
}
|
||||
}
|
||||
|
||||
#define ACCELERATOR_CSHIFT_NO_COPY
|
||||
#ifdef ACCELERATOR_CSHIFT_NO_COPY
|
||||
template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
|
||||
{
|
||||
typedef typename vobj::vector_type vector_type;
|
||||
@ -121,9 +122,9 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
|
||||
assert(shift<fd);
|
||||
|
||||
int buffer_size = rhs.Grid()->_slice_nblock[dimension]*rhs.Grid()->_slice_block[dimension];
|
||||
commVector<vobj> send_buf(buffer_size);
|
||||
commVector<vobj> recv_buf(buffer_size);
|
||||
|
||||
cshiftVector<vobj> send_buf(buffer_size);
|
||||
cshiftVector<vobj> recv_buf(buffer_size);
|
||||
|
||||
int cb= (cbmask==0x2)? Odd : Even;
|
||||
int sshift= rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
||||
|
||||
@ -138,7 +139,7 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
|
||||
|
||||
} else {
|
||||
|
||||
int words = send_buf.size();
|
||||
int words = buffer_size;
|
||||
if (cbmask != 0x3) words=words>>1;
|
||||
|
||||
int bytes = words * sizeof(vobj);
|
||||
@ -150,12 +151,14 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
|
||||
int xmit_to_rank;
|
||||
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
|
||||
|
||||
grid->Barrier();
|
||||
|
||||
grid->SendToRecvFrom((void *)&send_buf[0],
|
||||
xmit_to_rank,
|
||||
(void *)&recv_buf[0],
|
||||
recv_from_rank,
|
||||
bytes);
|
||||
|
||||
grid->Barrier();
|
||||
|
||||
Scatter_plane_simple (ret,recv_buf,dimension,x,cbmask);
|
||||
@ -195,8 +198,15 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
||||
int buffer_size = grid->_slice_nblock[dimension]*grid->_slice_block[dimension];
|
||||
// int words = sizeof(vobj)/sizeof(vector_type);
|
||||
|
||||
std::vector<commVector<scalar_object> > send_buf_extract(Nsimd,commVector<scalar_object>(buffer_size) );
|
||||
std::vector<commVector<scalar_object> > recv_buf_extract(Nsimd,commVector<scalar_object>(buffer_size) );
|
||||
std::vector<cshiftVector<scalar_object> > send_buf_extract(Nsimd);
|
||||
std::vector<cshiftVector<scalar_object> > recv_buf_extract(Nsimd);
|
||||
scalar_object * recv_buf_extract_mpi;
|
||||
scalar_object * send_buf_extract_mpi;
|
||||
|
||||
for(int s=0;s<Nsimd;s++){
|
||||
send_buf_extract[s].resize(buffer_size);
|
||||
recv_buf_extract[s].resize(buffer_size);
|
||||
}
|
||||
|
||||
int bytes = buffer_size*sizeof(scalar_object);
|
||||
|
||||
@ -242,11 +252,204 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
||||
if(nbr_proc){
|
||||
grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
||||
|
||||
grid->SendToRecvFrom((void *)&send_buf_extract[nbr_lane][0],
|
||||
grid->Barrier();
|
||||
|
||||
send_buf_extract_mpi = &send_buf_extract[nbr_lane][0];
|
||||
recv_buf_extract_mpi = &recv_buf_extract[i][0];
|
||||
grid->SendToRecvFrom((void *)send_buf_extract_mpi,
|
||||
xmit_to_rank,
|
||||
(void *)&recv_buf_extract[i][0],
|
||||
(void *)recv_buf_extract_mpi,
|
||||
recv_from_rank,
|
||||
bytes);
|
||||
|
||||
grid->Barrier();
|
||||
|
||||
rpointers[i] = &recv_buf_extract[i][0];
|
||||
} else {
|
||||
rpointers[i] = &send_buf_extract[nbr_lane][0];
|
||||
}
|
||||
|
||||
}
|
||||
Scatter_plane_merge(ret,rpointers,dimension,x,cbmask);
|
||||
}
|
||||
|
||||
}
|
||||
#else
|
||||
template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
|
||||
{
|
||||
typedef typename vobj::vector_type vector_type;
|
||||
typedef typename vobj::scalar_type scalar_type;
|
||||
|
||||
GridBase *grid=rhs.Grid();
|
||||
Lattice<vobj> temp(rhs.Grid());
|
||||
|
||||
int fd = rhs.Grid()->_fdimensions[dimension];
|
||||
int rd = rhs.Grid()->_rdimensions[dimension];
|
||||
int pd = rhs.Grid()->_processors[dimension];
|
||||
int simd_layout = rhs.Grid()->_simd_layout[dimension];
|
||||
int comm_dim = rhs.Grid()->_processors[dimension] >1 ;
|
||||
assert(simd_layout==1);
|
||||
assert(comm_dim==1);
|
||||
assert(shift>=0);
|
||||
assert(shift<fd);
|
||||
|
||||
int buffer_size = rhs.Grid()->_slice_nblock[dimension]*rhs.Grid()->_slice_block[dimension];
|
||||
cshiftVector<vobj> send_buf_v(buffer_size);
|
||||
cshiftVector<vobj> recv_buf_v(buffer_size);
|
||||
vobj *send_buf;
|
||||
vobj *recv_buf;
|
||||
{
|
||||
grid->ShmBufferFreeAll();
|
||||
size_t bytes = buffer_size*sizeof(vobj);
|
||||
send_buf=(vobj *)grid->ShmBufferMalloc(bytes);
|
||||
recv_buf=(vobj *)grid->ShmBufferMalloc(bytes);
|
||||
}
|
||||
|
||||
int cb= (cbmask==0x2)? Odd : Even;
|
||||
int sshift= rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
||||
|
||||
for(int x=0;x<rd;x++){
|
||||
|
||||
int sx = (x+sshift)%rd;
|
||||
int comm_proc = ((x+sshift)/rd)%pd;
|
||||
|
||||
if (comm_proc==0) {
|
||||
|
||||
Copy_plane(ret,rhs,dimension,x,sx,cbmask);
|
||||
|
||||
} else {
|
||||
|
||||
int words = buffer_size;
|
||||
if (cbmask != 0x3) words=words>>1;
|
||||
|
||||
int bytes = words * sizeof(vobj);
|
||||
|
||||
Gather_plane_simple (rhs,send_buf_v,dimension,sx,cbmask);
|
||||
|
||||
// int rank = grid->_processor;
|
||||
int recv_from_rank;
|
||||
int xmit_to_rank;
|
||||
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
|
||||
|
||||
|
||||
grid->Barrier();
|
||||
|
||||
acceleratorCopyDeviceToDevice((void *)&send_buf_v[0],(void *)&send_buf[0],bytes);
|
||||
grid->SendToRecvFrom((void *)&send_buf[0],
|
||||
xmit_to_rank,
|
||||
(void *)&recv_buf[0],
|
||||
recv_from_rank,
|
||||
bytes);
|
||||
acceleratorCopyDeviceToDevice((void *)&recv_buf[0],(void *)&recv_buf_v[0],bytes);
|
||||
|
||||
grid->Barrier();
|
||||
|
||||
Scatter_plane_simple (ret,recv_buf_v,dimension,x,cbmask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
|
||||
{
|
||||
GridBase *grid=rhs.Grid();
|
||||
const int Nsimd = grid->Nsimd();
|
||||
typedef typename vobj::vector_type vector_type;
|
||||
typedef typename vobj::scalar_object scalar_object;
|
||||
typedef typename vobj::scalar_type scalar_type;
|
||||
|
||||
int fd = grid->_fdimensions[dimension];
|
||||
int rd = grid->_rdimensions[dimension];
|
||||
int ld = grid->_ldimensions[dimension];
|
||||
int pd = grid->_processors[dimension];
|
||||
int simd_layout = grid->_simd_layout[dimension];
|
||||
int comm_dim = grid->_processors[dimension] >1 ;
|
||||
|
||||
//std::cout << "Cshift_comms_simd dim "<< dimension << " fd "<<fd<<" rd "<<rd
|
||||
// << " ld "<<ld<<" pd " << pd<<" simd_layout "<<simd_layout
|
||||
// << " comm_dim " << comm_dim << " cbmask " << cbmask <<std::endl;
|
||||
|
||||
assert(comm_dim==1);
|
||||
assert(simd_layout==2);
|
||||
assert(shift>=0);
|
||||
assert(shift<fd);
|
||||
|
||||
int permute_type=grid->PermuteType(dimension);
|
||||
|
||||
///////////////////////////////////////////////
|
||||
// Simd direction uses an extract/merge pair
|
||||
///////////////////////////////////////////////
|
||||
int buffer_size = grid->_slice_nblock[dimension]*grid->_slice_block[dimension];
|
||||
// int words = sizeof(vobj)/sizeof(vector_type);
|
||||
|
||||
std::vector<cshiftVector<scalar_object> > send_buf_extract(Nsimd);
|
||||
std::vector<cshiftVector<scalar_object> > recv_buf_extract(Nsimd);
|
||||
scalar_object * recv_buf_extract_mpi;
|
||||
scalar_object * send_buf_extract_mpi;
|
||||
{
|
||||
size_t bytes = sizeof(scalar_object)*buffer_size;
|
||||
grid->ShmBufferFreeAll();
|
||||
send_buf_extract_mpi = (scalar_object *)grid->ShmBufferMalloc(bytes);
|
||||
recv_buf_extract_mpi = (scalar_object *)grid->ShmBufferMalloc(bytes);
|
||||
}
|
||||
for(int s=0;s<Nsimd;s++){
|
||||
send_buf_extract[s].resize(buffer_size);
|
||||
recv_buf_extract[s].resize(buffer_size);
|
||||
}
|
||||
|
||||
int bytes = buffer_size*sizeof(scalar_object);
|
||||
|
||||
ExtractPointerArray<scalar_object> pointers(Nsimd); //
|
||||
ExtractPointerArray<scalar_object> rpointers(Nsimd); // received pointers
|
||||
|
||||
///////////////////////////////////////////
|
||||
// Work out what to send where
|
||||
///////////////////////////////////////////
|
||||
int cb = (cbmask==0x2)? Odd : Even;
|
||||
int sshift= grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
||||
|
||||
// loop over outer coord planes orthog to dim
|
||||
for(int x=0;x<rd;x++){
|
||||
|
||||
// FIXME call local permute copy if none are offnode.
|
||||
for(int i=0;i<Nsimd;i++){
|
||||
pointers[i] = &send_buf_extract[i][0];
|
||||
}
|
||||
int sx = (x+sshift)%rd;
|
||||
Gather_plane_extract(rhs,pointers,dimension,sx,cbmask);
|
||||
|
||||
for(int i=0;i<Nsimd;i++){
|
||||
|
||||
int inner_bit = (Nsimd>>(permute_type+1));
|
||||
int ic= (i&inner_bit)? 1:0;
|
||||
|
||||
int my_coor = rd*ic + x;
|
||||
int nbr_coor = my_coor+sshift;
|
||||
int nbr_proc = ((nbr_coor)/ld) % pd;// relative shift in processors
|
||||
|
||||
int nbr_ic = (nbr_coor%ld)/rd; // inner coord of peer
|
||||
int nbr_ox = (nbr_coor%rd); // outer coord of peer
|
||||
int nbr_lane = (i&(~inner_bit));
|
||||
|
||||
int recv_from_rank;
|
||||
int xmit_to_rank;
|
||||
|
||||
if (nbr_ic) nbr_lane|=inner_bit;
|
||||
|
||||
assert (sx == nbr_ox);
|
||||
|
||||
if(nbr_proc){
|
||||
grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
||||
|
||||
grid->Barrier();
|
||||
|
||||
acceleratorCopyDeviceToDevice((void *)&send_buf_extract[nbr_lane][0],(void *)send_buf_extract_mpi,bytes);
|
||||
grid->SendToRecvFrom((void *)send_buf_extract_mpi,
|
||||
xmit_to_rank,
|
||||
(void *)recv_buf_extract_mpi,
|
||||
recv_from_rank,
|
||||
bytes);
|
||||
acceleratorCopyDeviceToDevice((void *)recv_buf_extract_mpi,(void *)&recv_buf_extract[i][0],bytes);
|
||||
|
||||
grid->Barrier();
|
||||
rpointers[i] = &recv_buf_extract[i][0];
|
||||
} else {
|
||||
@ -258,7 +461,7 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
#endif
|
||||
|
@ -36,7 +36,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/lattice/Lattice_local.h>
|
||||
#include <Grid/lattice/Lattice_reduction.h>
|
||||
#include <Grid/lattice/Lattice_peekpoke.h>
|
||||
//#include <Grid/lattice/Lattice_reality.h>
|
||||
#include <Grid/lattice/Lattice_reality.h>
|
||||
#include <Grid/lattice/Lattice_real_imag.h>
|
||||
#include <Grid/lattice/Lattice_comparison_utils.h>
|
||||
#include <Grid/lattice/Lattice_comparison.h>
|
||||
#include <Grid/lattice/Lattice_coordinate.h>
|
||||
|
@ -42,9 +42,24 @@ NAMESPACE_BEGIN(Grid);
|
||||
////////////////////////////////////////////////////
|
||||
// Predicated where support
|
||||
////////////////////////////////////////////////////
|
||||
#ifdef GRID_SIMT
|
||||
// drop to scalar in SIMT; cleaner in fact
|
||||
template <class iobj, class vobj, class robj>
|
||||
accelerator_inline vobj predicatedWhere(const iobj &predicate, const vobj &iftrue,
|
||||
const robj &iffalse) {
|
||||
accelerator_inline vobj predicatedWhere(const iobj &predicate,
|
||||
const vobj &iftrue,
|
||||
const robj &iffalse)
|
||||
{
|
||||
Integer mask = TensorRemove(predicate);
|
||||
typename std::remove_const<vobj>::type ret= iffalse;
|
||||
if (mask) ret=iftrue;
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
template <class iobj, class vobj, class robj>
|
||||
accelerator_inline vobj predicatedWhere(const iobj &predicate,
|
||||
const vobj &iftrue,
|
||||
const robj &iffalse)
|
||||
{
|
||||
typename std::remove_const<vobj>::type ret;
|
||||
|
||||
typedef typename vobj::scalar_object scalar_object;
|
||||
@ -68,6 +83,7 @@ accelerator_inline vobj predicatedWhere(const iobj &predicate, const vobj &iftru
|
||||
merge(ret, falsevals);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//Specialization of getVectorType for lattices
|
||||
@ -81,32 +97,62 @@ struct getVectorType<Lattice<T> >{
|
||||
//-- recursive evaluation of expressions; --
|
||||
// handle leaves of syntax tree
|
||||
///////////////////////////////////////////////////
|
||||
template<class sobj> accelerator_inline
|
||||
template<class sobj,
|
||||
typename std::enable_if<!is_lattice<sobj>::value&&!is_lattice_expr<sobj>::value,sobj>::type * = nullptr>
|
||||
accelerator_inline
|
||||
sobj eval(const uint64_t ss, const sobj &arg)
|
||||
{
|
||||
return arg;
|
||||
}
|
||||
|
||||
template <class lobj> accelerator_inline
|
||||
const lobj & eval(const uint64_t ss, const LatticeView<lobj> &arg)
|
||||
auto eval(const uint64_t ss, const LatticeView<lobj> &arg) -> decltype(arg(ss))
|
||||
{
|
||||
return arg(ss);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////
|
||||
//-- recursive evaluation of expressions; --
|
||||
// whole vector return, used only for expression return type inference
|
||||
///////////////////////////////////////////////////
|
||||
template<class sobj> accelerator_inline
|
||||
sobj vecEval(const uint64_t ss, const sobj &arg)
|
||||
{
|
||||
return arg;
|
||||
}
|
||||
template <class lobj> accelerator_inline
|
||||
const lobj & vecEval(const uint64_t ss, const LatticeView<lobj> &arg)
|
||||
{
|
||||
return arg[ss];
|
||||
}
|
||||
|
||||
// What needs this?
|
||||
// Cannot be legal on accelerator
|
||||
// Comparison must convert
|
||||
#if 1
|
||||
template <class lobj> accelerator_inline
|
||||
const lobj & eval(const uint64_t ss, const Lattice<lobj> &arg)
|
||||
{
|
||||
auto view = arg.View(AcceleratorRead);
|
||||
return view[ss];
|
||||
}
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// handle nodes in syntax tree- eval one operand
|
||||
// vecEval needed (but never called as all expressions offloaded) to infer the return type
|
||||
// in SIMT contexts of closure.
|
||||
///////////////////////////////////////////////////
|
||||
template <typename Op, typename T1> accelerator_inline
|
||||
auto vecEval(const uint64_t ss, const LatticeUnaryExpression<Op, T1> &expr)
|
||||
-> decltype(expr.op.func( vecEval(ss, expr.arg1)))
|
||||
{
|
||||
return expr.op.func( vecEval(ss, expr.arg1) );
|
||||
}
|
||||
// vecEval two operands
|
||||
template <typename Op, typename T1, typename T2> accelerator_inline
|
||||
auto vecEval(const uint64_t ss, const LatticeBinaryExpression<Op, T1, T2> &expr)
|
||||
-> decltype(expr.op.func( vecEval(ss,expr.arg1),vecEval(ss,expr.arg2)))
|
||||
{
|
||||
return expr.op.func( vecEval(ss,expr.arg1), vecEval(ss,expr.arg2) );
|
||||
}
|
||||
// vecEval three operands
|
||||
template <typename Op, typename T1, typename T2, typename T3> accelerator_inline
|
||||
auto vecEval(const uint64_t ss, const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
|
||||
-> decltype(expr.op.func(vecEval(ss, expr.arg1), vecEval(ss, expr.arg2), vecEval(ss, expr.arg3)))
|
||||
{
|
||||
return expr.op.func(vecEval(ss, expr.arg1), vecEval(ss, expr.arg2), vecEval(ss, expr.arg3));
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// handle nodes in syntax tree- eval one operand coalesced
|
||||
///////////////////////////////////////////////////
|
||||
template <typename Op, typename T1> accelerator_inline
|
||||
auto eval(const uint64_t ss, const LatticeUnaryExpression<Op, T1> &expr)
|
||||
@ -114,23 +160,41 @@ auto eval(const uint64_t ss, const LatticeUnaryExpression<Op, T1> &expr)
|
||||
{
|
||||
return expr.op.func( eval(ss, expr.arg1) );
|
||||
}
|
||||
///////////////////////
|
||||
// eval two operands
|
||||
///////////////////////
|
||||
template <typename Op, typename T1, typename T2> accelerator_inline
|
||||
auto eval(const uint64_t ss, const LatticeBinaryExpression<Op, T1, T2> &expr)
|
||||
-> decltype(expr.op.func( eval(ss,expr.arg1),eval(ss,expr.arg2)))
|
||||
{
|
||||
return expr.op.func( eval(ss,expr.arg1), eval(ss,expr.arg2) );
|
||||
}
|
||||
///////////////////////
|
||||
// eval three operands
|
||||
///////////////////////
|
||||
template <typename Op, typename T1, typename T2, typename T3> accelerator_inline
|
||||
auto eval(const uint64_t ss, const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
|
||||
-> decltype(expr.op.func(eval(ss, expr.arg1), eval(ss, expr.arg2), eval(ss, expr.arg3)))
|
||||
-> decltype(expr.op.func(eval(ss, expr.arg1),
|
||||
eval(ss, expr.arg2),
|
||||
eval(ss, expr.arg3)))
|
||||
{
|
||||
return expr.op.func(eval(ss, expr.arg1), eval(ss, expr.arg2), eval(ss, expr.arg3));
|
||||
#ifdef GRID_SIMT
|
||||
// Handles Nsimd (vInteger) != Nsimd(ComplexD)
|
||||
typedef decltype(vecEval(ss, expr.arg2)) rvobj;
|
||||
typedef typename std::remove_reference<rvobj>::type vobj;
|
||||
|
||||
const int Nsimd = vobj::vector_type::Nsimd();
|
||||
|
||||
auto vpred = vecEval(ss,expr.arg1);
|
||||
|
||||
ExtractBuffer<Integer> mask(Nsimd);
|
||||
extract<vInteger, Integer>(TensorRemove(vpred), mask);
|
||||
|
||||
int s = acceleratorSIMTlane(Nsimd);
|
||||
return expr.op.func(mask[s],
|
||||
eval(ss, expr.arg2),
|
||||
eval(ss, expr.arg3));
|
||||
#else
|
||||
return expr.op.func(eval(ss, expr.arg1),
|
||||
eval(ss, expr.arg2),
|
||||
eval(ss, expr.arg3));
|
||||
#endif
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
@ -228,7 +292,7 @@ template <typename Op, typename T1, typename T2> inline
|
||||
void ExpressionViewOpen(LatticeBinaryExpression<Op, T1, T2> &expr)
|
||||
{
|
||||
ExpressionViewOpen(expr.arg1); // recurse AST
|
||||
ExpressionViewOpen(expr.arg2); // recurse AST
|
||||
ExpressionViewOpen(expr.arg2); // rrecurse AST
|
||||
}
|
||||
template <typename Op, typename T1, typename T2, typename T3>
|
||||
inline void ExpressionViewOpen(LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
|
||||
@ -272,28 +336,20 @@ inline void ExpressionViewClose(LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
|
||||
// Unary operators and funcs
|
||||
////////////////////////////////////////////
|
||||
#define GridUnopClass(name, ret) \
|
||||
template <class arg> \
|
||||
struct name { \
|
||||
static auto accelerator_inline func(const arg a) -> decltype(ret) { return ret; } \
|
||||
template<class _arg> static auto accelerator_inline func(const _arg a) -> decltype(ret) { return ret; } \
|
||||
};
|
||||
|
||||
GridUnopClass(UnarySub, -a);
|
||||
GridUnopClass(UnaryNot, Not(a));
|
||||
GridUnopClass(UnaryAdj, adj(a));
|
||||
GridUnopClass(UnaryConj, conjugate(a));
|
||||
GridUnopClass(UnaryTrace, trace(a));
|
||||
GridUnopClass(UnaryTranspose, transpose(a));
|
||||
GridUnopClass(UnaryTa, Ta(a));
|
||||
GridUnopClass(UnaryProjectOnGroup, ProjectOnGroup(a));
|
||||
GridUnopClass(UnaryReal, real(a));
|
||||
GridUnopClass(UnaryImag, imag(a));
|
||||
GridUnopClass(UnaryToReal, toReal(a));
|
||||
GridUnopClass(UnaryToComplex, toComplex(a));
|
||||
GridUnopClass(UnaryTimesI, timesI(a));
|
||||
GridUnopClass(UnaryTimesMinusI, timesMinusI(a));
|
||||
GridUnopClass(UnaryAbs, abs(a));
|
||||
GridUnopClass(UnarySqrt, sqrt(a));
|
||||
GridUnopClass(UnaryRsqrt, rsqrt(a));
|
||||
GridUnopClass(UnarySin, sin(a));
|
||||
GridUnopClass(UnaryCos, cos(a));
|
||||
GridUnopClass(UnaryAsin, asin(a));
|
||||
@ -305,10 +361,10 @@ GridUnopClass(UnaryExp, exp(a));
|
||||
// Binary operators
|
||||
////////////////////////////////////////////
|
||||
#define GridBinOpClass(name, combination) \
|
||||
template <class left, class right> \
|
||||
struct name { \
|
||||
template <class _left, class _right> \
|
||||
static auto accelerator_inline \
|
||||
func(const left &lhs, const right &rhs) \
|
||||
func(const _left &lhs, const _right &rhs) \
|
||||
-> decltype(combination) const \
|
||||
{ \
|
||||
return combination; \
|
||||
@ -328,10 +384,10 @@ GridBinOpClass(BinaryOrOr, lhs || rhs);
|
||||
// Trinary conditional op
|
||||
////////////////////////////////////////////////////
|
||||
#define GridTrinOpClass(name, combination) \
|
||||
template <class predicate, class left, class right> \
|
||||
struct name { \
|
||||
template <class _predicate,class _left, class _right> \
|
||||
static auto accelerator_inline \
|
||||
func(const predicate &pred, const left &lhs, const right &rhs) \
|
||||
func(const _predicate &pred, const _left &lhs, const _right &rhs) \
|
||||
-> decltype(combination) const \
|
||||
{ \
|
||||
return combination; \
|
||||
@ -339,17 +395,17 @@ GridBinOpClass(BinaryOrOr, lhs || rhs);
|
||||
};
|
||||
|
||||
GridTrinOpClass(TrinaryWhere,
|
||||
(predicatedWhere<predicate,
|
||||
typename std::remove_reference<left>::type,
|
||||
typename std::remove_reference<right>::type>(pred, lhs,rhs)));
|
||||
(predicatedWhere<
|
||||
typename std::remove_reference<_predicate>::type,
|
||||
typename std::remove_reference<_left>::type,
|
||||
typename std::remove_reference<_right>::type>(pred, lhs,rhs)));
|
||||
|
||||
////////////////////////////////////////////
|
||||
// Operator syntactical glue
|
||||
////////////////////////////////////////////
|
||||
|
||||
#define GRID_UNOP(name) name<decltype(eval(0, arg))>
|
||||
#define GRID_BINOP(name) name<decltype(eval(0, lhs)), decltype(eval(0, rhs))>
|
||||
#define GRID_TRINOP(name) name<decltype(eval(0, pred)), decltype(eval(0, lhs)), decltype(eval(0, rhs))>
|
||||
#define GRID_UNOP(name) name
|
||||
#define GRID_BINOP(name) name
|
||||
#define GRID_TRINOP(name) name
|
||||
|
||||
#define GRID_DEF_UNOP(op, name) \
|
||||
template <typename T1, typename std::enable_if<is_lattice<T1>::value||is_lattice_expr<T1>::value,T1>::type * = nullptr> \
|
||||
@ -395,22 +451,17 @@ GridTrinOpClass(TrinaryWhere,
|
||||
GRID_DEF_UNOP(operator-, UnarySub);
|
||||
GRID_DEF_UNOP(Not, UnaryNot);
|
||||
GRID_DEF_UNOP(operator!, UnaryNot);
|
||||
GRID_DEF_UNOP(adj, UnaryAdj);
|
||||
GRID_DEF_UNOP(conjugate, UnaryConj);
|
||||
//GRID_DEF_UNOP(adj, UnaryAdj);
|
||||
//GRID_DEF_UNOP(conjugate, UnaryConj);
|
||||
GRID_DEF_UNOP(trace, UnaryTrace);
|
||||
GRID_DEF_UNOP(transpose, UnaryTranspose);
|
||||
GRID_DEF_UNOP(Ta, UnaryTa);
|
||||
GRID_DEF_UNOP(ProjectOnGroup, UnaryProjectOnGroup);
|
||||
GRID_DEF_UNOP(real, UnaryReal);
|
||||
GRID_DEF_UNOP(imag, UnaryImag);
|
||||
GRID_DEF_UNOP(toReal, UnaryToReal);
|
||||
GRID_DEF_UNOP(toComplex, UnaryToComplex);
|
||||
GRID_DEF_UNOP(timesI, UnaryTimesI);
|
||||
GRID_DEF_UNOP(timesMinusI, UnaryTimesMinusI);
|
||||
GRID_DEF_UNOP(abs, UnaryAbs); // abs overloaded in cmath C++98; DON'T do the
|
||||
// abs-fabs-dabs-labs thing
|
||||
GRID_DEF_UNOP(sqrt, UnarySqrt);
|
||||
GRID_DEF_UNOP(rsqrt, UnaryRsqrt);
|
||||
GRID_DEF_UNOP(sin, UnarySin);
|
||||
GRID_DEF_UNOP(cos, UnaryCos);
|
||||
GRID_DEF_UNOP(asin, UnaryAsin);
|
||||
@ -435,29 +486,36 @@ GRID_DEF_TRINOP(where, TrinaryWhere);
|
||||
/////////////////////////////////////////////////////////////
|
||||
template <class Op, class T1>
|
||||
auto closure(const LatticeUnaryExpression<Op, T1> &expr)
|
||||
-> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))>
|
||||
-> Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1)))>::type >
|
||||
{
|
||||
Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> ret(expr);
|
||||
Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1)))>::type > ret(expr);
|
||||
return ret;
|
||||
}
|
||||
template <class Op, class T1, class T2>
|
||||
auto closure(const LatticeBinaryExpression<Op, T1, T2> &expr)
|
||||
-> Lattice<decltype(expr.op.func(eval(0, expr.arg1),eval(0, expr.arg2)))>
|
||||
-> Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),vecEval(0, expr.arg2)))>::type >
|
||||
{
|
||||
Lattice<decltype(expr.op.func(eval(0, expr.arg1),eval(0, expr.arg2)))> ret(expr);
|
||||
Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),vecEval(0, expr.arg2)))>::type > ret(expr);
|
||||
return ret;
|
||||
}
|
||||
template <class Op, class T1, class T2, class T3>
|
||||
auto closure(const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
|
||||
-> Lattice<decltype(expr.op.func(eval(0, expr.arg1),
|
||||
eval(0, expr.arg2),
|
||||
eval(0, expr.arg3)))>
|
||||
-> Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),
|
||||
vecEval(0, expr.arg2),
|
||||
vecEval(0, expr.arg3)))>::type >
|
||||
{
|
||||
Lattice<decltype(expr.op.func(eval(0, expr.arg1),
|
||||
eval(0, expr.arg2),
|
||||
eval(0, expr.arg3)))> ret(expr);
|
||||
Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),
|
||||
vecEval(0, expr.arg2),
|
||||
vecEval(0, expr.arg3)))>::type > ret(expr);
|
||||
return ret;
|
||||
}
|
||||
#define EXPRESSION_CLOSURE(function) \
|
||||
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr> \
|
||||
auto function(Expression &expr) -> decltype(function(closure(expr))) \
|
||||
{ \
|
||||
return function(closure(expr)); \
|
||||
}
|
||||
|
||||
|
||||
#undef GRID_UNOP
|
||||
#undef GRID_BINOP
|
||||
|
@ -60,9 +60,9 @@ void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
autoView( rhs_v , rhs, AcceleratorRead);
|
||||
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto lhs_t=lhs_v(ss);
|
||||
auto rhs_t=rhs_v(ss);
|
||||
auto tmp =ret_v(ss);
|
||||
mac(&tmp,&lhs_t,&rhs_t);
|
||||
coalescedWrite(ret_v[ss],tmp);
|
||||
});
|
||||
@ -124,7 +124,7 @@ void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto tmp =ret_v(ss);
|
||||
auto lhs_t=lhs_v(ss);
|
||||
mac(&tmp,&lhs_t,&rhs);
|
||||
coalescedWrite(ret_v[ss],tmp);
|
||||
@ -182,7 +182,7 @@ void mac(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( rhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for(ss,rhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto tmp =ret_v(ss);
|
||||
auto rhs_t=rhs_v(ss);
|
||||
mac(&tmp,&lhs,&rhs_t);
|
||||
coalescedWrite(ret_v[ss],tmp);
|
||||
|
@ -123,9 +123,9 @@ public:
|
||||
auto exprCopy = expr;
|
||||
ExpressionViewOpen(exprCopy);
|
||||
auto me = View(AcceleratorWriteDiscard);
|
||||
accelerator_for(ss,me.size(),1,{
|
||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||
auto tmp = eval(ss,exprCopy);
|
||||
vstream(me[ss],tmp);
|
||||
coalescedWrite(me[ss],tmp);
|
||||
});
|
||||
me.ViewClose();
|
||||
ExpressionViewClose(exprCopy);
|
||||
@ -146,9 +146,9 @@ public:
|
||||
auto exprCopy = expr;
|
||||
ExpressionViewOpen(exprCopy);
|
||||
auto me = View(AcceleratorWriteDiscard);
|
||||
accelerator_for(ss,me.size(),1,{
|
||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||
auto tmp = eval(ss,exprCopy);
|
||||
vstream(me[ss],tmp);
|
||||
coalescedWrite(me[ss],tmp);
|
||||
});
|
||||
me.ViewClose();
|
||||
ExpressionViewClose(exprCopy);
|
||||
@ -168,9 +168,9 @@ public:
|
||||
auto exprCopy = expr;
|
||||
ExpressionViewOpen(exprCopy);
|
||||
auto me = View(AcceleratorWriteDiscard);
|
||||
accelerator_for(ss,me.size(),1,{
|
||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||
auto tmp = eval(ss,exprCopy);
|
||||
vstream(me[ss],tmp);
|
||||
coalescedWrite(me[ss],tmp);
|
||||
});
|
||||
me.ViewClose();
|
||||
ExpressionViewClose(exprCopy);
|
||||
|
@ -54,13 +54,34 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
|
||||
typedef decltype(basis[0].View(AcceleratorRead)) View;
|
||||
|
||||
Vector<View> basis_v; basis_v.reserve(basis.size());
|
||||
typedef typename std::remove_reference<decltype(basis_v[0][0])>::type vobj;
|
||||
typedef typename std::remove_reference<decltype(Qt(0,0))>::type Coeff_t;
|
||||
GridBase* grid = basis[0].Grid();
|
||||
|
||||
for(int k=0;k<basis.size();k++){
|
||||
basis_v.push_back(basis[k].View(AcceleratorWrite));
|
||||
}
|
||||
|
||||
|
||||
#if ( (!defined(GRID_CUDA)) )
|
||||
int max_threads = thread_max();
|
||||
Vector < vobj > Bt(Nm * max_threads);
|
||||
thread_region
|
||||
{
|
||||
vobj* B = &Bt[Nm * thread_num()];
|
||||
thread_for_in_region(ss, grid->oSites(),{
|
||||
for(int j=j0; j<j1; ++j) B[j]=0.;
|
||||
|
||||
for(int j=j0; j<j1; ++j){
|
||||
for(int k=k0; k<k1; ++k){
|
||||
B[j] +=Qt(j,k) * basis_v[k][ss];
|
||||
}
|
||||
}
|
||||
for(int j=j0; j<j1; ++j){
|
||||
basis_v[j][ss] = B[j];
|
||||
}
|
||||
});
|
||||
}
|
||||
#else
|
||||
View *basis_vp = &basis_v[0];
|
||||
|
||||
int nrot = j1-j0;
|
||||
@ -70,14 +91,12 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
|
||||
uint64_t oSites =grid->oSites();
|
||||
uint64_t siteBlock=(grid->oSites()+nrot-1)/nrot; // Maximum 1 additional vector overhead
|
||||
|
||||
typedef typename std::remove_reference<decltype(basis_v[0][0])>::type vobj;
|
||||
|
||||
Vector <vobj> Bt(siteBlock * nrot);
|
||||
auto Bp=&Bt[0];
|
||||
|
||||
// GPU readable copy of matrix
|
||||
Vector<double> Qt_jv(Nm*Nm);
|
||||
double *Qt_p = & Qt_jv[0];
|
||||
Vector<Coeff_t> Qt_jv(Nm*Nm);
|
||||
Coeff_t *Qt_p = & Qt_jv[0];
|
||||
thread_for(i,Nm*Nm,{
|
||||
int j = i/Nm;
|
||||
int k = i%Nm;
|
||||
@ -118,6 +137,7 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
|
||||
coalescedWrite(basis_v[jj][sss],coalescedRead(Bp[ss*nrot+j]));
|
||||
});
|
||||
}
|
||||
#endif
|
||||
|
||||
for(int k=0;k<basis.size();k++) basis_v[k].ViewClose();
|
||||
}
|
||||
@ -141,11 +161,13 @@ void basisRotateJ(Field &result,std::vector<Field> &basis,Eigen::MatrixXd& Qt,in
|
||||
double * Qt_j = & Qt_jv[0];
|
||||
for(int k=0;k<Nm;++k) Qt_j[k]=Qt(j,k);
|
||||
|
||||
auto basis_vp=& basis_v[0];
|
||||
autoView(result_v,result,AcceleratorWrite);
|
||||
accelerator_for(ss, grid->oSites(),vobj::Nsimd(),{
|
||||
auto B=coalescedRead(zz);
|
||||
vobj zzz=Zero();
|
||||
auto B=coalescedRead(zzz);
|
||||
for(int k=k0; k<k1; ++k){
|
||||
B +=Qt_j[k] * coalescedRead(basis_v[k][ss]);
|
||||
B +=Qt_j[k] * coalescedRead(basis_vp[k][ss]);
|
||||
}
|
||||
coalescedWrite(result_v[ss], B);
|
||||
});
|
||||
|
@ -42,34 +42,6 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
typedef iScalar<vInteger> vPredicate ;
|
||||
|
||||
/*
|
||||
template <class iobj, class vobj, class robj> accelerator_inline
|
||||
vobj predicatedWhere(const iobj &predicate, const vobj &iftrue, const robj &iffalse)
|
||||
{
|
||||
typename std::remove_const<vobj>::type ret;
|
||||
|
||||
typedef typename vobj::scalar_object scalar_object;
|
||||
typedef typename vobj::scalar_type scalar_type;
|
||||
typedef typename vobj::vector_type vector_type;
|
||||
|
||||
const int Nsimd = vobj::vector_type::Nsimd();
|
||||
|
||||
ExtractBuffer<Integer> mask(Nsimd);
|
||||
ExtractBuffer<scalar_object> truevals(Nsimd);
|
||||
ExtractBuffer<scalar_object> falsevals(Nsimd);
|
||||
|
||||
extract(iftrue, truevals);
|
||||
extract(iffalse, falsevals);
|
||||
extract<vInteger, Integer>(TensorRemove(predicate), mask);
|
||||
|
||||
for (int s = 0; s < Nsimd; s++) {
|
||||
if (mask[s]) falsevals[s] = truevals[s];
|
||||
}
|
||||
|
||||
merge(ret, falsevals);
|
||||
return ret;
|
||||
}
|
||||
*/
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// compare lattice to lattice
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
@ -182,6 +182,14 @@ inline void peekLocalSite(sobj &s,const LatticeView<vobj> &l,Coordinate &site)
|
||||
|
||||
return;
|
||||
};
|
||||
template<class vobj,class sobj>
|
||||
inline void peekLocalSite(sobj &s,const Lattice<vobj> &l,Coordinate &site)
|
||||
{
|
||||
autoView(lv,l,CpuRead);
|
||||
peekLocalSite(s,lv,site);
|
||||
return;
|
||||
};
|
||||
|
||||
// Must be CPU write view
|
||||
template<class vobj,class sobj>
|
||||
inline void pokeLocalSite(const sobj &s,LatticeView<vobj> &l,Coordinate &site)
|
||||
@ -210,6 +218,14 @@ inline void pokeLocalSite(const sobj &s,LatticeView<vobj> &l,Coordinate &site)
|
||||
return;
|
||||
};
|
||||
|
||||
template<class vobj,class sobj>
|
||||
inline void pokeLocalSite(const sobj &s, Lattice<vobj> &l,Coordinate &site)
|
||||
{
|
||||
autoView(lv,l,CpuWrite);
|
||||
pokeLocalSite(s,lv,site);
|
||||
return;
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
#endif
|
||||
|
||||
|
79
Grid/lattice/Lattice_real_imag.h
Normal file
79
Grid/lattice/Lattice_real_imag.h
Normal file
@ -0,0 +1,79 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/lattice/Lattice_reality.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: neo <cossu@post.kek.jp>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#ifndef GRID_LATTICE_REAL_IMAG_H
|
||||
#define GRID_LATTICE_REAL_IMAG_H
|
||||
|
||||
|
||||
// FIXME .. this is the sector of the code
|
||||
// I am most worried about the directions
|
||||
// The choice of burying complex in the SIMD
|
||||
// is making the use of "real" and "imag" very cumbersome
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class vobj> inline Lattice<vobj> real(const Lattice<vobj> &lhs){
|
||||
Lattice<vobj> ret(lhs.Grid());
|
||||
|
||||
autoView( lhs_v, lhs, AcceleratorRead);
|
||||
autoView( ret_v, ret, AcceleratorWrite);
|
||||
|
||||
ret.Checkerboard()=lhs.Checkerboard();
|
||||
accelerator_for( ss, lhs_v.size(), 1, {
|
||||
ret_v[ss] =real(lhs_v[ss]);
|
||||
});
|
||||
return ret;
|
||||
};
|
||||
template<class vobj> inline Lattice<vobj> imag(const Lattice<vobj> &lhs){
|
||||
Lattice<vobj> ret(lhs.Grid());
|
||||
|
||||
autoView( lhs_v, lhs, AcceleratorRead);
|
||||
autoView( ret_v, ret, AcceleratorWrite);
|
||||
|
||||
ret.Checkerboard()=lhs.Checkerboard();
|
||||
accelerator_for( ss, lhs_v.size(), 1, {
|
||||
ret_v[ss] =imag(lhs_v[ss]);
|
||||
});
|
||||
return ret;
|
||||
};
|
||||
|
||||
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
|
||||
auto real(const Expression &expr) -> decltype(real(closure(expr)))
|
||||
{
|
||||
return real(closure(expr));
|
||||
}
|
||||
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
|
||||
auto imag(const Expression &expr) -> decltype(imag(closure(expr)))
|
||||
{
|
||||
return imag(closure(expr));
|
||||
}
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
#endif
|
@ -45,8 +45,8 @@ template<class vobj> inline Lattice<vobj> adj(const Lattice<vobj> &lhs){
|
||||
autoView( ret_v, ret, AcceleratorWrite);
|
||||
|
||||
ret.Checkerboard()=lhs.Checkerboard();
|
||||
accelerator_for( ss, lhs_v.size(), vobj::Nsimd(), {
|
||||
coalescedWrite(ret_v[ss], adj(lhs_v(ss)));
|
||||
accelerator_for( ss, lhs_v.size(), 1, {
|
||||
ret_v[ss] = adj(lhs_v[ss]);
|
||||
});
|
||||
return ret;
|
||||
};
|
||||
@ -64,6 +64,53 @@ template<class vobj> inline Lattice<vobj> conjugate(const Lattice<vobj> &lhs){
|
||||
return ret;
|
||||
};
|
||||
|
||||
template<class vobj> inline Lattice<typename vobj::Complexified> toComplex(const Lattice<vobj> &lhs){
|
||||
Lattice<typename vobj::Complexified> ret(lhs.Grid());
|
||||
|
||||
autoView( lhs_v, lhs, AcceleratorRead);
|
||||
autoView( ret_v, ret, AcceleratorWrite);
|
||||
|
||||
ret.Checkerboard() = lhs.Checkerboard();
|
||||
accelerator_for( ss, lhs_v.size(), 1, {
|
||||
ret_v[ss] = toComplex(lhs_v[ss]);
|
||||
});
|
||||
return ret;
|
||||
};
|
||||
template<class vobj> inline Lattice<typename vobj::Realified> toReal(const Lattice<vobj> &lhs){
|
||||
Lattice<typename vobj::Realified> ret(lhs.Grid());
|
||||
|
||||
autoView( lhs_v, lhs, AcceleratorRead);
|
||||
autoView( ret_v, ret, AcceleratorWrite);
|
||||
|
||||
ret.Checkerboard() = lhs.Checkerboard();
|
||||
accelerator_for( ss, lhs_v.size(), 1, {
|
||||
ret_v[ss] = toReal(lhs_v[ss]);
|
||||
});
|
||||
return ret;
|
||||
};
|
||||
|
||||
|
||||
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
|
||||
auto toComplex(const Expression &expr) -> decltype(closure(expr))
|
||||
{
|
||||
return toComplex(closure(expr));
|
||||
}
|
||||
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
|
||||
auto toReal(const Expression &expr) -> decltype(closure(expr))
|
||||
{
|
||||
return toReal(closure(expr));
|
||||
}
|
||||
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
|
||||
auto adj(const Expression &expr) -> decltype(closure(expr))
|
||||
{
|
||||
return adj(closure(expr));
|
||||
}
|
||||
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
|
||||
auto conjugate(const Expression &expr) -> decltype(closure(expr))
|
||||
{
|
||||
return conjugate(closure(expr));
|
||||
}
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
#endif
|
||||
|
@ -96,8 +96,34 @@ inline typename vobj::scalar_objectD sumD_cpu(const vobj *arg, Integer osites)
|
||||
ssobj ret = ssum;
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
Threaded max, don't use for now
|
||||
template<class Double>
|
||||
inline Double max(const Double *arg, Integer osites)
|
||||
{
|
||||
// const int Nsimd = vobj::Nsimd();
|
||||
const int nthread = GridThread::GetThreads();
|
||||
|
||||
|
||||
std::vector<Double> maxarray(nthread);
|
||||
|
||||
thread_for(thr,nthread, {
|
||||
int nwork, mywork, myoff;
|
||||
nwork = osites;
|
||||
GridThread::GetWork(nwork,thr,mywork,myoff);
|
||||
Double max=arg[0];
|
||||
for(int ss=myoff;ss<mywork+myoff; ss++){
|
||||
if( arg[ss] > max ) max = arg[ss];
|
||||
}
|
||||
maxarray[thr]=max;
|
||||
});
|
||||
|
||||
Double tmax=maxarray[0];
|
||||
for(int i=0;i<nthread;i++){
|
||||
if (maxarray[i]>tmax) tmax = maxarray[i];
|
||||
}
|
||||
return tmax;
|
||||
}
|
||||
*/
|
||||
template<class vobj>
|
||||
inline typename vobj::scalar_object sum(const vobj *arg, Integer osites)
|
||||
{
|
||||
@ -141,6 +167,32 @@ template<class vobj> inline RealD norm2(const Lattice<vobj> &arg){
|
||||
return real(nrm);
|
||||
}
|
||||
|
||||
//The global maximum of the site norm2
|
||||
template<class vobj> inline RealD maxLocalNorm2(const Lattice<vobj> &arg)
|
||||
{
|
||||
typedef typename vobj::tensor_reduced vscalar; //iScalar<iScalar<.... <vPODtype> > >
|
||||
typedef typename vscalar::scalar_object scalar; //iScalar<iScalar<.... <PODtype> > >
|
||||
|
||||
Lattice<vscalar> inner = localNorm2(arg);
|
||||
|
||||
auto grid = arg.Grid();
|
||||
|
||||
RealD max;
|
||||
for(int l=0;l<grid->lSites();l++){
|
||||
Coordinate coor;
|
||||
scalar val;
|
||||
RealD r;
|
||||
grid->LocalIndexToLocalCoor(l,coor);
|
||||
peekLocalSite(val,inner,coor);
|
||||
r=real(TensorRemove(val));
|
||||
if( (l==0) || (r>max)){
|
||||
max=r;
|
||||
}
|
||||
}
|
||||
grid->GlobalMax(max);
|
||||
return max;
|
||||
}
|
||||
|
||||
// Double inner product
|
||||
template<class vobj>
|
||||
inline ComplexD rankInnerProduct(const Lattice<vobj> &left,const Lattice<vobj> &right)
|
||||
|
@ -2,12 +2,13 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
#ifdef GRID_HIP
|
||||
extern hipDeviceProp_t *gpu_props;
|
||||
#define WARP_SIZE 64
|
||||
#endif
|
||||
#ifdef GRID_CUDA
|
||||
extern cudaDeviceProp *gpu_props;
|
||||
#define WARP_SIZE 32
|
||||
#endif
|
||||
|
||||
#define WARP_SIZE 32
|
||||
__device__ unsigned int retirementCount = 0;
|
||||
|
||||
template <class Iterator>
|
||||
@ -64,7 +65,7 @@ __device__ void reduceBlock(volatile sobj *sdata, sobj mySum, const Iterator tid
|
||||
|
||||
// cannot use overloaded operators for sobj as they are not volatile-qualified
|
||||
memcpy((void *)&sdata[tid], (void *)&mySum, sizeof(sobj));
|
||||
__syncwarp();
|
||||
acceleratorSynchronise();
|
||||
|
||||
const Iterator VEC = WARP_SIZE;
|
||||
const Iterator vid = tid & (VEC-1);
|
||||
@ -78,9 +79,9 @@ __device__ void reduceBlock(volatile sobj *sdata, sobj mySum, const Iterator tid
|
||||
beta += temp;
|
||||
memcpy((void *)&sdata[tid], (void *)&beta, sizeof(sobj));
|
||||
}
|
||||
__syncwarp();
|
||||
acceleratorSynchronise();
|
||||
}
|
||||
__syncthreads();
|
||||
acceleratorSynchroniseAll();
|
||||
|
||||
if (threadIdx.x == 0) {
|
||||
beta = Zero();
|
||||
@ -90,7 +91,7 @@ __device__ void reduceBlock(volatile sobj *sdata, sobj mySum, const Iterator tid
|
||||
}
|
||||
memcpy((void *)&sdata[0], (void *)&beta, sizeof(sobj));
|
||||
}
|
||||
__syncthreads();
|
||||
acceleratorSynchroniseAll();
|
||||
}
|
||||
|
||||
|
||||
|
@ -127,6 +127,11 @@ accelerator_inline void convertType(T1 & out, const iScalar<T2> & in) {
|
||||
convertType(out,in._internal);
|
||||
}
|
||||
|
||||
template<typename T1, typename std::enable_if<!isGridScalar<T1>::value, T1>::type* = nullptr>
|
||||
accelerator_inline void convertType(T1 & out, const iScalar<T1> & in) {
|
||||
convertType(out,in._internal);
|
||||
}
|
||||
|
||||
template<typename T1,typename T2>
|
||||
accelerator_inline void convertType(iScalar<T1> & out, const T2 & in) {
|
||||
convertType(out._internal,in);
|
||||
@ -240,6 +245,8 @@ template<class vobj,class vobj2,class CComplex>
|
||||
autoView( fineX_ , fineX, AcceleratorRead);
|
||||
autoView( fineY_ , fineY, AcceleratorRead);
|
||||
autoView( coarseA_, coarseA, AcceleratorRead);
|
||||
Coordinate fine_rdimensions = fine->_rdimensions;
|
||||
Coordinate coarse_rdimensions = coarse->_rdimensions;
|
||||
|
||||
accelerator_for(sf, fine->oSites(), CComplex::Nsimd(), {
|
||||
|
||||
@ -247,9 +254,9 @@ template<class vobj,class vobj2,class CComplex>
|
||||
Coordinate coor_c(_ndimension);
|
||||
Coordinate coor_f(_ndimension);
|
||||
|
||||
Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions);
|
||||
Lexicographic::CoorFromIndex(coor_f,sf,fine_rdimensions);
|
||||
for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
|
||||
Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
|
||||
Lexicographic::IndexFromCoor(coor_c,sc,coarse_rdimensions);
|
||||
|
||||
// z = A x + y
|
||||
#ifdef GRID_SIMT
|
||||
@ -353,11 +360,14 @@ inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
|
||||
autoView( coarseData_ , coarseData, AcceleratorWrite);
|
||||
autoView( fineData_ , fineData, AcceleratorRead);
|
||||
|
||||
Coordinate fine_rdimensions = fine->_rdimensions;
|
||||
Coordinate coarse_rdimensions = coarse->_rdimensions;
|
||||
|
||||
accelerator_for(sc,coarse->oSites(),1,{
|
||||
|
||||
// One thread per sub block
|
||||
Coordinate coor_c(_ndimension);
|
||||
Lexicographic::CoorFromIndex(coor_c,sc,coarse->_rdimensions); // Block coordinate
|
||||
Lexicographic::CoorFromIndex(coor_c,sc,coarse_rdimensions); // Block coordinate
|
||||
coarseData_[sc]=Zero();
|
||||
|
||||
for(int sb=0;sb<blockVol;sb++){
|
||||
@ -367,7 +377,7 @@ inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
|
||||
Coordinate coor_f(_ndimension);
|
||||
Lexicographic::CoorFromIndex(coor_b,sb,block_r); // Block sub coordinate
|
||||
for(int d=0;d<_ndimension;d++) coor_f[d]=coor_c[d]*block_r[d] + coor_b[d];
|
||||
Lexicographic::IndexFromCoor(coor_f,sf,fine->_rdimensions);
|
||||
Lexicographic::IndexFromCoor(coor_f,sf,fine_rdimensions);
|
||||
|
||||
coarseData_[sc]=coarseData_[sc]+fineData_[sf];
|
||||
}
|
||||
|
@ -67,8 +67,13 @@ public:
|
||||
accelerator_inline const vobj & operator()(size_t i) const { return this->_odata[i]; }
|
||||
#endif
|
||||
|
||||
accelerator_inline const vobj & operator[](size_t i) const { return this->_odata[i]; };
|
||||
accelerator_inline vobj & operator[](size_t i) { return this->_odata[i]; };
|
||||
#if 1
|
||||
// accelerator_inline const vobj & operator[](size_t i) const { return this->_odata[i]; };
|
||||
accelerator_inline vobj & operator[](size_t i) const { return this->_odata[i]; };
|
||||
#else
|
||||
// accelerator_inline const vobj & operator[](size_t i) const { return this->_odata[i]; };
|
||||
// accelerator_inline vobj & operator[](size_t i) { return this->_odata[i]; };
|
||||
#endif
|
||||
|
||||
accelerator_inline uint64_t begin(void) const { return 0;};
|
||||
accelerator_inline uint64_t end(void) const { return this->_odata_size; };
|
||||
|
@ -130,6 +130,8 @@ public:
|
||||
friend std::ostream& operator<< (std::ostream& stream, Logger& log){
|
||||
|
||||
if ( log.active ) {
|
||||
std::ios_base::fmtflags f(stream.flags());
|
||||
|
||||
stream << log.background()<< std::left;
|
||||
if (log.topWidth > 0)
|
||||
{
|
||||
@ -152,6 +154,8 @@ public:
|
||||
<< now << log.background() << " : " ;
|
||||
}
|
||||
stream << log.colour();
|
||||
stream.flags(f);
|
||||
|
||||
return stream;
|
||||
} else {
|
||||
return devnull;
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <Grid/GridCore.h>
|
||||
|
||||
int Grid::BinaryIO::latticeWriteMaxRetry = -1;
|
||||
int Grid::BinaryIO::latticeWriteMaxRetry = -1;
|
||||
Grid::BinaryIO::IoPerf Grid::BinaryIO::lastPerf;
|
||||
|
@ -79,6 +79,13 @@ inline void removeWhitespace(std::string &key)
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class BinaryIO {
|
||||
public:
|
||||
struct IoPerf
|
||||
{
|
||||
uint64_t size{0},time{0};
|
||||
double mbytesPerSecond{0.};
|
||||
};
|
||||
|
||||
static IoPerf lastPerf;
|
||||
static int latticeWriteMaxRetry;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
@ -502,12 +509,15 @@ class BinaryIO {
|
||||
timer.Stop();
|
||||
}
|
||||
|
||||
lastPerf.size = sizeof(fobj)*iodata.size()*nrank;
|
||||
lastPerf.time = timer.useconds();
|
||||
lastPerf.mbytesPerSecond = lastPerf.size/1024./1024./(lastPerf.time/1.0e6);
|
||||
std::cout<<GridLogMessage<<"IOobject: ";
|
||||
if ( control & BINARYIO_READ) std::cout << " read ";
|
||||
else std::cout << " write ";
|
||||
uint64_t bytes = sizeof(fobj)*iodata.size()*nrank;
|
||||
std::cout<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
||||
<< (double)bytes/ (double)timer.useconds() <<" MB/s "<<std::endl;
|
||||
std::cout<< lastPerf.size <<" bytes in "<< timer.Elapsed() <<" "
|
||||
<< lastPerf.mbytesPerSecond <<" MB/s "<<std::endl;
|
||||
|
||||
std::cout<<GridLogMessage<<"IOobject: endian and checksum overhead "<<bstimer.Elapsed() <<std::endl;
|
||||
|
||||
@ -663,10 +673,15 @@ class BinaryIO {
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
|
||||
timer.Start();
|
||||
thread_for(lidx,lsites,{
|
||||
thread_for(lidx,lsites,{ // FIX ME, suboptimal implementation
|
||||
std::vector<RngStateType> tmp(RngStateCount);
|
||||
std::copy(iodata[lidx].begin(),iodata[lidx].end(),tmp.begin());
|
||||
parallel_rng.SetState(tmp,lidx);
|
||||
Coordinate lcoor;
|
||||
grid->LocalIndexToLocalCoor(lidx, lcoor);
|
||||
int o_idx=grid->oIndex(lcoor);
|
||||
int i_idx=grid->iIndex(lcoor);
|
||||
int gidx=parallel_rng.generator_idx(o_idx,i_idx);
|
||||
parallel_rng.SetState(tmp,gidx);
|
||||
});
|
||||
timer.Stop();
|
||||
|
||||
@ -723,7 +738,12 @@ class BinaryIO {
|
||||
std::vector<RNGstate> iodata(lsites);
|
||||
thread_for(lidx,lsites,{
|
||||
std::vector<RngStateType> tmp(RngStateCount);
|
||||
parallel_rng.GetState(tmp,lidx);
|
||||
Coordinate lcoor;
|
||||
grid->LocalIndexToLocalCoor(lidx, lcoor);
|
||||
int o_idx=grid->oIndex(lcoor);
|
||||
int i_idx=grid->iIndex(lcoor);
|
||||
int gidx=parallel_rng.generator_idx(o_idx,i_idx);
|
||||
parallel_rng.GetState(tmp,gidx);
|
||||
std::copy(tmp.begin(),tmp.end(),iodata[lidx].begin());
|
||||
});
|
||||
timer.Stop();
|
||||
|
@ -123,7 +123,7 @@ assert(GRID_FIELD_NORM_CALC(FieldNormMetaData_, n2ck) < 1.0e-5);
|
||||
////////////////////////////////////////////////////////////
|
||||
// Helper to fill out metadata
|
||||
////////////////////////////////////////////////////////////
|
||||
template<class vobj> void ScidacMetaData(Lattice<vobj> & field,
|
||||
template<class vobj> void ScidacMetaData(Lattice<vobj> & field,
|
||||
FieldMetaData &header,
|
||||
scidacRecord & _scidacRecord,
|
||||
scidacFile & _scidacFile)
|
||||
@ -619,12 +619,12 @@ class IldgWriter : public ScidacWriter {
|
||||
// Don't require scidac records EXCEPT checksum
|
||||
// Use Grid MetaData object if present.
|
||||
////////////////////////////////////////////////////////////////
|
||||
template <class vsimd>
|
||||
void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,int sequence,std::string LFN,std::string description)
|
||||
template <class stats = PeriodicGaugeStatistics>
|
||||
void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,int sequence,std::string LFN,std::string description)
|
||||
{
|
||||
GridBase * grid = Umu.Grid();
|
||||
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
||||
typedef iLorentzColourMatrix<vsimd> vobj;
|
||||
typedef Lattice<vLorentzColourMatrixD> GaugeField;
|
||||
typedef vLorentzColourMatrixD vobj;
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
|
||||
////////////////////////////////////////
|
||||
@ -636,6 +636,9 @@ class IldgWriter : public ScidacWriter {
|
||||
|
||||
ScidacMetaData(Umu,header,_scidacRecord,_scidacFile);
|
||||
|
||||
stats Stats;
|
||||
Stats(Umu,header);
|
||||
|
||||
std::string format = header.floating_point;
|
||||
header.ensemble_id = description;
|
||||
header.ensemble_label = description;
|
||||
@ -705,10 +708,10 @@ class IldgReader : public GridLimeReader {
|
||||
// Else use ILDG MetaData object if present.
|
||||
// Else use SciDAC MetaData object if present.
|
||||
////////////////////////////////////////////////////////////////
|
||||
template <class vsimd>
|
||||
void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu, FieldMetaData &FieldMetaData_) {
|
||||
template <class stats = PeriodicGaugeStatistics>
|
||||
void readConfiguration(Lattice<vLorentzColourMatrixD> &Umu, FieldMetaData &FieldMetaData_) {
|
||||
|
||||
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
||||
typedef Lattice<vLorentzColourMatrixD > GaugeField;
|
||||
typedef typename GaugeField::vector_object vobj;
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
|
||||
@ -921,7 +924,8 @@ class IldgReader : public GridLimeReader {
|
||||
|
||||
if ( found_FieldMetaData || found_usqcdInfo ) {
|
||||
FieldMetaData checker;
|
||||
GaugeStatistics(Umu,checker);
|
||||
stats Stats;
|
||||
Stats(Umu,checker);
|
||||
assert(fabs(checker.plaquette - FieldMetaData_.plaquette )<1.0e-5);
|
||||
assert(fabs(checker.link_trace - FieldMetaData_.link_trace)<1.0e-5);
|
||||
std::cout << GridLogMessage<<"Plaquette and link trace match " << std::endl;
|
||||
|
@ -176,29 +176,18 @@ template<class vobj> inline void PrepareMetaData(Lattice<vobj> & field, FieldMet
|
||||
GridMetaData(grid,header);
|
||||
MachineCharacteristics(header);
|
||||
}
|
||||
inline void GaugeStatistics(Lattice<vLorentzColourMatrixF> & data,FieldMetaData &header)
|
||||
template<class Impl>
|
||||
class GaugeStatistics
|
||||
{
|
||||
// How to convert data precision etc...
|
||||
header.link_trace=WilsonLoops<PeriodicGimplF>::linkTrace(data);
|
||||
header.plaquette =WilsonLoops<PeriodicGimplF>::avgPlaquette(data);
|
||||
}
|
||||
inline void GaugeStatistics(Lattice<vLorentzColourMatrixD> & data,FieldMetaData &header)
|
||||
{
|
||||
// How to convert data precision etc...
|
||||
header.link_trace=WilsonLoops<PeriodicGimplD>::linkTrace(data);
|
||||
header.plaquette =WilsonLoops<PeriodicGimplD>::avgPlaquette(data);
|
||||
}
|
||||
template<> inline void PrepareMetaData<vLorentzColourMatrixF>(Lattice<vLorentzColourMatrixF> & field, FieldMetaData &header)
|
||||
{
|
||||
|
||||
GridBase *grid = field.Grid();
|
||||
std::string format = getFormatString<vLorentzColourMatrixF>();
|
||||
header.floating_point = format;
|
||||
header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac
|
||||
GridMetaData(grid,header);
|
||||
GaugeStatistics(field,header);
|
||||
MachineCharacteristics(header);
|
||||
}
|
||||
public:
|
||||
void operator()(Lattice<vLorentzColourMatrixD> & data,FieldMetaData &header)
|
||||
{
|
||||
header.link_trace=WilsonLoops<Impl>::linkTrace(data);
|
||||
header.plaquette =WilsonLoops<Impl>::avgPlaquette(data);
|
||||
}
|
||||
};
|
||||
typedef GaugeStatistics<PeriodicGimplD> PeriodicGaugeStatistics;
|
||||
typedef GaugeStatistics<ConjugateGimplD> ConjugateGaugeStatistics;
|
||||
template<> inline void PrepareMetaData<vLorentzColourMatrixD>(Lattice<vLorentzColourMatrixD> & field, FieldMetaData &header)
|
||||
{
|
||||
GridBase *grid = field.Grid();
|
||||
@ -206,7 +195,6 @@ template<> inline void PrepareMetaData<vLorentzColourMatrixD>(Lattice<vLorentzCo
|
||||
header.floating_point = format;
|
||||
header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac
|
||||
GridMetaData(grid,header);
|
||||
GaugeStatistics(field,header);
|
||||
MachineCharacteristics(header);
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,8 @@ using namespace Grid;
|
||||
class NerscIO : public BinaryIO {
|
||||
public:
|
||||
|
||||
typedef Lattice<vLorentzColourMatrixD> GaugeField;
|
||||
|
||||
static inline void truncate(std::string file){
|
||||
std::ofstream fout(file,std::ios::out);
|
||||
}
|
||||
@ -129,12 +131,12 @@ public:
|
||||
// Now the meat: the object readers
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
template<class vsimd>
|
||||
static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
|
||||
template<class GaugeStats=PeriodicGaugeStatistics>
|
||||
static inline void readConfiguration(GaugeField &Umu,
|
||||
FieldMetaData& header,
|
||||
std::string file)
|
||||
std::string file,
|
||||
GaugeStats GaugeStatisticsCalculator=GaugeStats())
|
||||
{
|
||||
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
||||
|
||||
GridBase *grid = Umu.Grid();
|
||||
uint64_t offset = readHeader(file,Umu.Grid(),header);
|
||||
@ -153,23 +155,23 @@ public:
|
||||
// munger is a function of <floating point, Real, data_type>
|
||||
if ( header.data_type == std::string("4D_SU3_GAUGE") ) {
|
||||
if ( ieee32 || ieee32big ) {
|
||||
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
|
||||
BinaryIO::readLatticeObject<vLorentzColourMatrixD, LorentzColour2x3F>
|
||||
(Umu,file,Gauge3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
}
|
||||
if ( ieee64 || ieee64big ) {
|
||||
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
|
||||
BinaryIO::readLatticeObject<vLorentzColourMatrixD, LorentzColour2x3D>
|
||||
(Umu,file,Gauge3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
}
|
||||
} else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) {
|
||||
if ( ieee32 || ieee32big ) {
|
||||
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
|
||||
BinaryIO::readLatticeObject<vLorentzColourMatrixD,LorentzColourMatrixF>
|
||||
(Umu,file,GaugeSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
}
|
||||
if ( ieee64 || ieee64big ) {
|
||||
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
|
||||
BinaryIO::readLatticeObject<vLorentzColourMatrixD,LorentzColourMatrixD>
|
||||
(Umu,file,GaugeSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
}
|
||||
@ -177,7 +179,7 @@ public:
|
||||
assert(0);
|
||||
}
|
||||
|
||||
GaugeStatistics(Umu,clone);
|
||||
GaugeStats Stats; Stats(Umu,clone);
|
||||
|
||||
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<<nersc_csum<< std::dec
|
||||
<<" header "<<std::hex<<header.checksum<<std::dec <<std::endl;
|
||||
@ -203,15 +205,13 @@ public:
|
||||
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
|
||||
}
|
||||
|
||||
template<class vsimd>
|
||||
static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
|
||||
template<class GaugeStats=PeriodicGaugeStatistics>
|
||||
static inline void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,
|
||||
std::string file,
|
||||
int two_row,
|
||||
int bits32)
|
||||
{
|
||||
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
||||
|
||||
typedef iLorentzColourMatrix<vsimd> vobj;
|
||||
typedef vLorentzColourMatrixD vobj;
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
|
||||
FieldMetaData header;
|
||||
@ -229,7 +229,7 @@ public:
|
||||
|
||||
GridMetaData(grid,header);
|
||||
assert(header.nd==4);
|
||||
GaugeStatistics(Umu,header);
|
||||
GaugeStats Stats; Stats(Umu,header);
|
||||
MachineCharacteristics(header);
|
||||
|
||||
uint64_t offset;
|
||||
@ -238,19 +238,19 @@ public:
|
||||
header.floating_point = std::string("IEEE64BIG");
|
||||
header.data_type = std::string("4D_SU3_GAUGE_3x3");
|
||||
GaugeSimpleUnmunger<fobj3D,sobj> munge;
|
||||
if ( grid->IsBoss() ) {
|
||||
truncate(file);
|
||||
offset = writeHeader(header,file);
|
||||
}
|
||||
grid->Broadcast(0,(void *)&offset,sizeof(offset));
|
||||
if ( grid->IsBoss() ) {
|
||||
truncate(file);
|
||||
offset = writeHeader(header,file);
|
||||
}
|
||||
grid->Broadcast(0,(void *)&offset,sizeof(offset));
|
||||
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
BinaryIO::writeLatticeObject<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
header.checksum = nersc_csum;
|
||||
if ( grid->IsBoss() ) {
|
||||
writeHeader(header,file);
|
||||
}
|
||||
if ( grid->IsBoss() ) {
|
||||
writeHeader(header,file);
|
||||
}
|
||||
|
||||
std::cout<<GridLogMessage <<"Written NERSC Configuration on "<< file << " checksum "
|
||||
<<std::hex<<header.checksum
|
||||
|
@ -154,7 +154,7 @@ public:
|
||||
grid->Barrier(); timer.Stop();
|
||||
std::cout << Grid::GridLogMessage << "OpenQcdIO::readConfiguration: redistribute overhead " << timer.Elapsed() << std::endl;
|
||||
|
||||
GaugeStatistics(Umu, clone);
|
||||
PeriodicGaugeStatistics Stats; Stats(Umu, clone);
|
||||
|
||||
RealD plaq_diff = fabs(clone.plaquette - header.plaquette);
|
||||
|
||||
|
@ -208,7 +208,7 @@ public:
|
||||
|
||||
FieldMetaData clone(header);
|
||||
|
||||
GaugeStatistics(Umu, clone);
|
||||
PeriodicGaugeStatistics Stats; Stats(Umu, clone);
|
||||
|
||||
RealD plaq_diff = fabs(clone.plaquette - header.plaquette);
|
||||
|
||||
|
@ -47,7 +47,7 @@ static constexpr int Ym = 5;
|
||||
static constexpr int Zm = 6;
|
||||
static constexpr int Tm = 7;
|
||||
|
||||
static constexpr int Nc=3;
|
||||
static constexpr int Nc=Config_Nc;
|
||||
static constexpr int Ns=4;
|
||||
static constexpr int Nd=4;
|
||||
static constexpr int Nhs=2; // half spinor
|
||||
@ -80,6 +80,13 @@ template<typename T> struct isSpinor {
|
||||
template <typename T> using IfSpinor = Invoke<std::enable_if< isSpinor<T>::value,int> > ;
|
||||
template <typename T> using IfNotSpinor = Invoke<std::enable_if<!isSpinor<T>::value,int> > ;
|
||||
|
||||
const int CoarseIndex = 4;
|
||||
template<typename T> struct isCoarsened {
|
||||
static constexpr bool value = (CoarseIndex<=T::TensorLevel);
|
||||
};
|
||||
template <typename T> using IfCoarsened = Invoke<std::enable_if< isCoarsened<T>::value,int> > ;
|
||||
template <typename T> using IfNotCoarsened = Invoke<std::enable_if<!isCoarsened<T>::value,int> > ;
|
||||
|
||||
// ChrisK very keen to add extra space for Gparity doubling.
|
||||
//
|
||||
// Also add domain wall index, in a way where Wilson operator
|
||||
|
@ -88,7 +88,7 @@ public:
|
||||
const _Spinor &chi,
|
||||
int mu,
|
||||
StencilEntry *SE,
|
||||
StencilView &St)
|
||||
const StencilView &St)
|
||||
{
|
||||
int direction = St._directions[mu];
|
||||
int distance = St._distances[mu];
|
||||
@ -97,42 +97,30 @@ public:
|
||||
Coordinate icoor;
|
||||
|
||||
#ifdef GRID_SIMT
|
||||
_Spinor tmp;
|
||||
|
||||
const int Nsimd =SiteDoubledGaugeField::Nsimd();
|
||||
int s = acceleratorSIMTlane(Nsimd);
|
||||
St.iCoorFromIindex(icoor,s);
|
||||
|
||||
int mmu = mu % Nd;
|
||||
if ( SE->_around_the_world && St.parameters.twists[mmu] ) {
|
||||
|
||||
int permute_lane = (sl==1)
|
||||
|| ((distance== 1)&&(icoor[direction]==1))
|
||||
|| ((distance==-1)&&(icoor[direction]==0));
|
||||
|
||||
if ( permute_lane ) {
|
||||
tmp(0) = chi(1);
|
||||
tmp(1) = chi(0);
|
||||
} else {
|
||||
tmp(0) = chi(0);
|
||||
tmp(1) = chi(1);
|
||||
}
|
||||
auto UU0=coalescedRead(U(0)(mu));
|
||||
auto UU1=coalescedRead(U(1)(mu));
|
||||
|
||||
//Decide whether we do a G-parity flavor twist
|
||||
//Note: this assumes (but does not check) that sl==1 || sl==2 i.e. max 2 SIMD lanes in G-parity dir
|
||||
//It also assumes (but does not check) that abs(distance) == 1
|
||||
int permute_lane = (sl==1)
|
||||
|| ((distance== 1)&&(icoor[direction]==1))
|
||||
|| ((distance==-1)&&(icoor[direction]==0));
|
||||
|
||||
auto UU0=coalescedRead(U(0)(mu));
|
||||
auto UU1=coalescedRead(U(1)(mu));
|
||||
permute_lane = permute_lane && SE->_around_the_world && St.parameters.twists[mmu]; //only if we are going around the world
|
||||
|
||||
mult(&phi(0),&UU0,&tmp(0));
|
||||
mult(&phi(1),&UU1,&tmp(1));
|
||||
//Apply the links
|
||||
int f_upper = permute_lane ? 1 : 0;
|
||||
int f_lower = !f_upper;
|
||||
|
||||
} else {
|
||||
|
||||
auto UU0=coalescedRead(U(0)(mu));
|
||||
auto UU1=coalescedRead(U(1)(mu));
|
||||
|
||||
mult(&phi(0),&UU0,&chi(0));
|
||||
mult(&phi(1),&UU1,&chi(1));
|
||||
|
||||
}
|
||||
mult(&phi(0),&UU0,&chi(f_upper));
|
||||
mult(&phi(1),&UU1,&chi(f_lower));
|
||||
|
||||
#else
|
||||
typedef _Spinor vobj;
|
||||
|
@ -208,7 +208,7 @@ public:
|
||||
LebesgueOrder LebesgueEvenOdd;
|
||||
|
||||
// Comms buffer
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > comm_buf;
|
||||
// std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > comm_buf;
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Conserved current utilities
|
||||
|
@ -85,7 +85,7 @@ class MADWF
|
||||
maxiter =_maxiter;
|
||||
};
|
||||
|
||||
void operator() (const FermionFieldo &src4,FermionFieldo &sol5)
|
||||
void operator() (const FermionFieldo &src,FermionFieldo &sol5)
|
||||
{
|
||||
std::cout << GridLogMessage<< " ************************************************" << std::endl;
|
||||
std::cout << GridLogMessage<< " MADWF-like algorithm " << std::endl;
|
||||
@ -114,8 +114,16 @@ class MADWF
|
||||
///////////////////////////////////////
|
||||
//Import source, include Dminus factors
|
||||
///////////////////////////////////////
|
||||
Mato.ImportPhysicalFermionSource(src4,b);
|
||||
std::cout << GridLogMessage << " src4 " <<norm2(src4)<<std::endl;
|
||||
GridBase *src_grid = src.Grid();
|
||||
|
||||
assert( (src_grid == Mato.GaugeGrid()) || (src_grid == Mato.FermionGrid()));
|
||||
|
||||
if ( src_grid == Mato.GaugeGrid() ) {
|
||||
Mato.ImportPhysicalFermionSource(src,b);
|
||||
} else {
|
||||
b=src;
|
||||
}
|
||||
std::cout << GridLogMessage << " src " <<norm2(src)<<std::endl;
|
||||
std::cout << GridLogMessage << " b " <<norm2(b)<<std::endl;
|
||||
|
||||
defect = b;
|
||||
|
@ -56,55 +56,80 @@ template<class Impl> class StaggeredKernels : public FermionOperator<Impl> , pub
|
||||
DoubledGaugeField &U,
|
||||
const FermionField &in, FermionField &out, int dag, int interior,int exterior);
|
||||
|
||||
void DhopDirKernel(StencilImpl &st, DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, SiteSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out, int dir,int disp);
|
||||
void DhopDirKernel(StencilImpl &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU, SiteSpinor * buf,
|
||||
int sF, int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out, int dir,int disp);
|
||||
protected:
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
// Generic Nc kernels
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
template<int Naik> accelerator_inline
|
||||
void DhopSiteGeneric(StencilView &st,
|
||||
DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
|
||||
template<int Naik>
|
||||
static accelerator_inline
|
||||
void DhopSiteGeneric(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag);
|
||||
template<int Naik> accelerator_inline
|
||||
void DhopSiteGenericInt(StencilView &st,
|
||||
DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag);
|
||||
|
||||
template<int Naik> static accelerator_inline
|
||||
void DhopSiteGenericInt(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag);
|
||||
template<int Naik> accelerator_inline
|
||||
void DhopSiteGenericExt(StencilView &st,
|
||||
DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag);
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag);
|
||||
|
||||
template<int Naik> static accelerator_inline
|
||||
void DhopSiteGenericExt(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
// Nc=3 specific kernels
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
template<int Naik> accelerator_inline
|
||||
void DhopSiteHand(StencilView &st,
|
||||
DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag);
|
||||
template<int Naik> accelerator_inline
|
||||
void DhopSiteHandInt(StencilView &st,
|
||||
DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag);
|
||||
template<int Naik> accelerator_inline
|
||||
void DhopSiteHandExt(StencilView &st,
|
||||
DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag);
|
||||
|
||||
template<int Naik> static accelerator_inline
|
||||
void DhopSiteHand(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag);
|
||||
|
||||
template<int Naik> static accelerator_inline
|
||||
void DhopSiteHandInt(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag);
|
||||
|
||||
template<int Naik> static accelerator_inline
|
||||
void DhopSiteHandExt(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
// Asm Nc=3 specific kernels
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
void DhopSiteAsm(StencilView &st,
|
||||
DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag);
|
||||
|
||||
void DhopSiteAsm(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf, int LLs, int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag);
|
||||
|
||||
public:
|
||||
|
||||
|
@ -61,7 +61,7 @@ public:
|
||||
typedef typename SiteHalfSpinor::vector_type vComplexHigh;
|
||||
constexpr static int Nw=sizeof(SiteHalfSpinor)/sizeof(vComplexHigh);
|
||||
|
||||
accelerator_inline int CommDatumSize(void) {
|
||||
accelerator_inline int CommDatumSize(void) const {
|
||||
return sizeof(SiteHalfCommSpinor);
|
||||
}
|
||||
|
||||
@ -69,7 +69,7 @@ public:
|
||||
/* Compress includes precision change if mpi data is not same */
|
||||
/*****************************************************/
|
||||
template<class _SiteHalfSpinor, class _SiteSpinor>
|
||||
accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) {
|
||||
accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) const {
|
||||
_SiteHalfSpinor tmp;
|
||||
projector::Proj(tmp,in,mu,dag);
|
||||
vstream(buf[o],tmp);
|
||||
@ -81,7 +81,7 @@ public:
|
||||
accelerator_inline void Exchange(SiteHalfSpinor *mp,
|
||||
const SiteHalfSpinor * __restrict__ vp0,
|
||||
const SiteHalfSpinor * __restrict__ vp1,
|
||||
Integer type,Integer o){
|
||||
Integer type,Integer o) const {
|
||||
SiteHalfSpinor tmp1;
|
||||
SiteHalfSpinor tmp2;
|
||||
exchange(tmp1,tmp2,vp0[o],vp1[o],type);
|
||||
@ -93,7 +93,7 @@ public:
|
||||
/* Have a decompression step if mpi data is not same */
|
||||
/*****************************************************/
|
||||
accelerator_inline void Decompress(SiteHalfSpinor * __restrict__ out,
|
||||
SiteHalfSpinor * __restrict__ in, Integer o) {
|
||||
SiteHalfSpinor * __restrict__ in, Integer o) const {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
@ -103,7 +103,7 @@ public:
|
||||
accelerator_inline void CompressExchange(SiteHalfSpinor * __restrict__ out0,
|
||||
SiteHalfSpinor * __restrict__ out1,
|
||||
const SiteSpinor * __restrict__ in,
|
||||
Integer j,Integer k, Integer m,Integer type)
|
||||
Integer j,Integer k, Integer m,Integer type) const
|
||||
{
|
||||
SiteHalfSpinor temp1, temp2;
|
||||
SiteHalfSpinor temp3, temp4;
|
||||
@ -117,7 +117,7 @@ public:
|
||||
/*****************************************************/
|
||||
/* Pass the info to the stencil */
|
||||
/*****************************************************/
|
||||
accelerator_inline bool DecompressionStep(void) { return false; }
|
||||
accelerator_inline bool DecompressionStep(void) const { return false; }
|
||||
|
||||
};
|
||||
|
||||
@ -142,7 +142,7 @@ public:
|
||||
typedef typename SiteHalfSpinor::vector_type vComplexHigh;
|
||||
constexpr static int Nw=sizeof(SiteHalfSpinor)/sizeof(vComplexHigh);
|
||||
|
||||
accelerator_inline int CommDatumSize(void) {
|
||||
accelerator_inline int CommDatumSize(void) const {
|
||||
return sizeof(SiteHalfCommSpinor);
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ public:
|
||||
/* Compress includes precision change if mpi data is not same */
|
||||
/*****************************************************/
|
||||
template<class _SiteHalfSpinor, class _SiteSpinor>
|
||||
accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) {
|
||||
accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) const {
|
||||
_SiteHalfSpinor hsp;
|
||||
SiteHalfCommSpinor *hbuf = (SiteHalfCommSpinor *)buf;
|
||||
projector::Proj(hsp,in,mu,dag);
|
||||
@ -163,7 +163,7 @@ public:
|
||||
accelerator_inline void Exchange(SiteHalfSpinor *mp,
|
||||
SiteHalfSpinor *vp0,
|
||||
SiteHalfSpinor *vp1,
|
||||
Integer type,Integer o){
|
||||
Integer type,Integer o) const {
|
||||
SiteHalfSpinor vt0,vt1;
|
||||
SiteHalfCommSpinor *vpp0 = (SiteHalfCommSpinor *)vp0;
|
||||
SiteHalfCommSpinor *vpp1 = (SiteHalfCommSpinor *)vp1;
|
||||
@ -175,7 +175,7 @@ public:
|
||||
/*****************************************************/
|
||||
/* Have a decompression step if mpi data is not same */
|
||||
/*****************************************************/
|
||||
accelerator_inline void Decompress(SiteHalfSpinor *out, SiteHalfSpinor *in, Integer o){
|
||||
accelerator_inline void Decompress(SiteHalfSpinor *out, SiteHalfSpinor *in, Integer o) const {
|
||||
SiteHalfCommSpinor *hin=(SiteHalfCommSpinor *)in;
|
||||
precisionChange((vComplexHigh *)&out[o],(vComplexLow *)&hin[o],Nw);
|
||||
}
|
||||
@ -186,7 +186,7 @@ public:
|
||||
accelerator_inline void CompressExchange(SiteHalfSpinor *out0,
|
||||
SiteHalfSpinor *out1,
|
||||
const SiteSpinor *in,
|
||||
Integer j,Integer k, Integer m,Integer type){
|
||||
Integer j,Integer k, Integer m,Integer type) const {
|
||||
SiteHalfSpinor temp1, temp2,temp3,temp4;
|
||||
SiteHalfCommSpinor *hout0 = (SiteHalfCommSpinor *)out0;
|
||||
SiteHalfCommSpinor *hout1 = (SiteHalfCommSpinor *)out1;
|
||||
@ -200,7 +200,7 @@ public:
|
||||
/*****************************************************/
|
||||
/* Pass the info to the stencil */
|
||||
/*****************************************************/
|
||||
accelerator_inline bool DecompressionStep(void) { return true; }
|
||||
accelerator_inline bool DecompressionStep(void) const { return true; }
|
||||
|
||||
};
|
||||
|
||||
|
@ -50,14 +50,14 @@ public:
|
||||
double, nu);
|
||||
|
||||
WilsonAnisotropyCoefficients():
|
||||
isAnisotropic(false),
|
||||
t_direction(Nd-1),
|
||||
xi_0(1.0),
|
||||
isAnisotropic(false),
|
||||
t_direction(Nd-1),
|
||||
xi_0(1.0),
|
||||
nu(1.0){}
|
||||
};
|
||||
|
||||
template <class Impl>
|
||||
class WilsonFermion : public WilsonKernels<Impl>, public WilsonFermionStatic
|
||||
class WilsonFermion : public WilsonKernels<Impl>, public WilsonFermionStatic
|
||||
{
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
@ -74,6 +74,20 @@ public:
|
||||
FermionField _tmp;
|
||||
FermionField &tmp(void) { return _tmp; }
|
||||
|
||||
void Report(void);
|
||||
void ZeroCounters(void);
|
||||
double DhopCalls;
|
||||
double DhopCommTime;
|
||||
double DhopComputeTime;
|
||||
double DhopComputeTime2;
|
||||
double DhopFaceTime;
|
||||
double DhopTotalTime;
|
||||
|
||||
double DerivCalls;
|
||||
double DerivCommTime;
|
||||
double DerivComputeTime;
|
||||
double DerivDhopComputeTime;
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
// override multiply; cut number routines if pass dagger argument
|
||||
// and also make interface more uniformly consistent
|
||||
@ -138,7 +152,7 @@ public:
|
||||
// Constructor
|
||||
WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
|
||||
GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||
const ImplParams &p = ImplParams(),
|
||||
const ImplParams &p = ImplParams(),
|
||||
const WilsonAnisotropyCoefficients &anis = WilsonAnisotropyCoefficients() );
|
||||
|
||||
// DoubleStore impl dependent
|
||||
@ -170,9 +184,9 @@ public:
|
||||
|
||||
LebesgueOrder Lebesgue;
|
||||
LebesgueOrder LebesgueEvenOdd;
|
||||
|
||||
|
||||
WilsonAnisotropyCoefficients anisotropyCoeff;
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Conserved current utilities
|
||||
///////////////////////////////////////////////////////////////
|
||||
@ -186,7 +200,7 @@ public:
|
||||
PropagatorField &q_out,
|
||||
PropagatorField &phys_src,
|
||||
Current curr_type,
|
||||
unsigned int mu,
|
||||
unsigned int mu,
|
||||
unsigned int tmin,
|
||||
unsigned int tmax,
|
||||
ComplexField &lattice_cmplx);
|
||||
@ -196,5 +210,3 @@ typedef WilsonFermion<WilsonImplF> WilsonFermionF;
|
||||
typedef WilsonFermion<WilsonImplD> WilsonFermionD;
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
||||
|
@ -215,7 +215,7 @@ public:
|
||||
LebesgueOrder LebesgueEvenOdd;
|
||||
|
||||
// Comms buffer
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > comm_buf;
|
||||
// std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > comm_buf;
|
||||
|
||||
|
||||
};
|
||||
|
@ -95,7 +95,7 @@ public:
|
||||
const _Spinor &chi,
|
||||
int mu,
|
||||
StencilEntry *SE,
|
||||
StencilView &St)
|
||||
const StencilView &St)
|
||||
{
|
||||
multLink(phi,U,chi,mu);
|
||||
}
|
||||
@ -106,11 +106,15 @@ public:
|
||||
const _SpinorField & phi,
|
||||
int mu)
|
||||
{
|
||||
const int Nsimd = SiteHalfSpinor::Nsimd();
|
||||
autoView( out_v, out, AcceleratorWrite);
|
||||
autoView( phi_v, phi, AcceleratorRead);
|
||||
autoView( Umu_v, Umu, AcceleratorRead);
|
||||
accelerator_for(sss,out.Grid()->oSites(),1,{
|
||||
multLink(out_v[sss],Umu_v[sss],phi_v[sss],mu);
|
||||
typedef decltype(coalescedRead(out_v[0])) calcSpinor;
|
||||
accelerator_for(sss,out.Grid()->oSites(),Nsimd,{
|
||||
calcSpinor tmp;
|
||||
multLink(tmp,Umu_v[sss],phi_v(sss),mu);
|
||||
coalescedWrite(out_v[sss],tmp);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,7 @@ public:
|
||||
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
typedef FermionOperator<Impl> Base;
|
||||
typedef AcceleratorVector<int,STENCIL_MAX> StencilVector;
|
||||
|
||||
public:
|
||||
|
||||
@ -68,73 +69,87 @@ public:
|
||||
|
||||
private:
|
||||
|
||||
static accelerator_inline void DhopDirK(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out, int dirdisp, int gamma);
|
||||
static accelerator_inline void DhopDirK(const StencilView &st, const DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor * buf, int sF, int sU,
|
||||
const FermionFieldView &in,const FermionFieldView &out, int dirdisp, int gamma);
|
||||
|
||||
static accelerator_inline void DhopDirXp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirYp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirZp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirTp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirXm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirYm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirZm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirTm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirXp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
|
||||
const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirYp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
|
||||
const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirZp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
|
||||
const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirTp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
|
||||
const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirXm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
|
||||
const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirYm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
|
||||
const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirZm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
|
||||
const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
|
||||
static accelerator_inline void DhopDirTm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
|
||||
const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
|
||||
|
||||
// Specialised variants
|
||||
static accelerator void GenericDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
|
||||
static accelerator void GenericDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
|
||||
static accelerator void GenericDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
|
||||
static accelerator void GenericDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
|
||||
static accelerator void GenericDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
|
||||
static accelerator void GenericDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
static accelerator void GenericDhopSite(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static accelerator void GenericDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static accelerator void GenericDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static accelerator void GenericDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static accelerator void GenericDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static accelerator void GenericDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static void AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,FermionFieldView &out);
|
||||
|
||||
static void AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, FermionFieldView &out);
|
||||
|
||||
static void AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,FermionFieldView &out);
|
||||
|
||||
static void AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, FermionFieldView &out);
|
||||
|
||||
static void AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,FermionFieldView &out);
|
||||
|
||||
static void AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, FermionFieldView &out);
|
||||
// Keep Hand unrolled
|
||||
static accelerator void HandDhopSiteSycl(StencilVector st_perm, StencilEntry *st_p, SiteDoubledGaugeField *U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const SiteSpinor *in, SiteSpinor *out);
|
||||
|
||||
// Keep Hand unrolled temporarily
|
||||
static accelerator void HandDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
static accelerator void HandDhopSite(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in,const FermionFieldView &out);
|
||||
|
||||
static accelerator void HandDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static accelerator void HandDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static accelerator void HandDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
static accelerator void HandDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static accelerator void HandDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
static accelerator void HandDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static accelerator void HandDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
|
||||
//AVX 512 ASM
|
||||
static void AsmDhopSite(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,const FermionFieldView &out);
|
||||
|
||||
static accelerator void HandDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
static void AsmDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static accelerator void HandDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
static void AsmDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,const FermionFieldView &out);
|
||||
|
||||
static accelerator void HandDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
|
||||
static void AsmDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
static void AsmDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,const FermionFieldView &out);
|
||||
|
||||
static void AsmDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, const FermionFieldView &out);
|
||||
|
||||
public:
|
||||
WilsonKernels(const ImplParams &p = ImplParams()) : Base(p){};
|
||||
};
|
||||
|
@ -642,7 +642,7 @@ void CayleyFermion5D<Impl>::ContractConservedCurrent( PropagatorField &q_in_1,
|
||||
Current curr_type,
|
||||
unsigned int mu)
|
||||
{
|
||||
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
|
||||
#if (!defined(GRID_HIP))
|
||||
Gamma::Algebra Gmu [] = {
|
||||
Gamma::Algebra::GammaX,
|
||||
Gamma::Algebra::GammaY,
|
||||
@ -799,7 +799,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
|
||||
PropagatorField tmp(UGrid);
|
||||
PropagatorField Utmp(UGrid);
|
||||
LatticeInteger zz (UGrid); zz=0.0;
|
||||
PropagatorField zz (UGrid); zz=0.0;
|
||||
LatticeInteger lcoor(UGrid); LatticeCoordinate(lcoor,Nd-1);
|
||||
for (int s=0;s<Ls;s++) {
|
||||
|
||||
@ -826,7 +826,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
}
|
||||
#endif
|
||||
|
||||
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
|
||||
#if (!defined(GRID_HIP))
|
||||
int tshift = (mu == Nd-1) ? 1 : 0;
|
||||
////////////////////////////////////////////////
|
||||
// GENERAL CAYLEY CASE
|
||||
@ -850,7 +850,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
PropagatorField tmp(UGrid);
|
||||
PropagatorField Utmp(UGrid);
|
||||
|
||||
LatticeInteger zz (UGrid); zz=0.0;
|
||||
PropagatorField zz (UGrid); zz=0.0;
|
||||
LatticeInteger lcoor(UGrid); LatticeCoordinate(lcoor,Nd-1);
|
||||
|
||||
for(int s=0;s<Ls;s++){
|
||||
|
@ -618,11 +618,13 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template <class Impl>
|
||||
void StaggeredKernels<Impl>::DhopSiteAsm(StencilView &st,
|
||||
DoubledGaugeFieldView &U,
|
||||
DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out,int dag)
|
||||
void StaggeredKernels<Impl>::DhopSiteAsm(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF,
|
||||
int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag)
|
||||
{
|
||||
assert(0);
|
||||
};
|
||||
@ -683,11 +685,13 @@ void StaggeredKernels<Impl>::DhopSiteAsm(StencilView &st,
|
||||
|
||||
// This is the single precision 5th direction vectorised kernel
|
||||
#include <Grid/simd/Intel512single.h>
|
||||
template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilView &st,
|
||||
DoubledGaugeFieldView &U,
|
||||
DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out,int dag)
|
||||
template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF,
|
||||
int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag)
|
||||
{
|
||||
#ifdef AVX512
|
||||
uint64_t gauge0,gauge1,gauge2,gauge3;
|
||||
@ -738,11 +742,13 @@ template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilView
|
||||
}
|
||||
|
||||
#include <Grid/simd/Intel512double.h>
|
||||
template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilView &st,
|
||||
DoubledGaugeFieldView &U,
|
||||
DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out, int dag)
|
||||
template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF,
|
||||
int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out, int dag)
|
||||
{
|
||||
#ifdef AVX512
|
||||
uint64_t gauge0,gauge1,gauge2,gauge3;
|
||||
@ -824,11 +830,13 @@ template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilView
|
||||
// This is the single precision 5th direction vectorised kernel
|
||||
|
||||
#include <Grid/simd/Intel512single.h>
|
||||
template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilView &st,
|
||||
DoubledGaugeFieldView &U,
|
||||
DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out,int dag)
|
||||
template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF,
|
||||
int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag)
|
||||
{
|
||||
#ifdef AVX512
|
||||
uint64_t gauge0,gauge1,gauge2,gauge3;
|
||||
@ -893,11 +901,13 @@ template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilView &st,
|
||||
}
|
||||
|
||||
#include <Grid/simd/Intel512double.h>
|
||||
template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(StencilView &st,
|
||||
DoubledGaugeFieldView &U,
|
||||
DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out,int dag)
|
||||
template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF,
|
||||
int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag)
|
||||
{
|
||||
#ifdef AVX512
|
||||
uint64_t gauge0,gauge1,gauge2,gauge3;
|
||||
|
@ -146,11 +146,13 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
|
||||
template <class Impl>
|
||||
template <int Naik>
|
||||
void StaggeredKernels<Impl>::DhopSiteHand(StencilView &st,
|
||||
DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag)
|
||||
template <int Naik> accelerator_inline
|
||||
void StaggeredKernels<Impl>::DhopSiteHand(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag)
|
||||
{
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
@ -221,11 +223,13 @@ void StaggeredKernels<Impl>::DhopSiteHand(StencilView &st,
|
||||
|
||||
|
||||
template <class Impl>
|
||||
template <int Naik>
|
||||
void StaggeredKernels<Impl>::DhopSiteHandInt(StencilView &st,
|
||||
DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag)
|
||||
template <int Naik> accelerator_inline
|
||||
void StaggeredKernels<Impl>::DhopSiteHandInt(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag)
|
||||
{
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
@ -300,11 +304,13 @@ void StaggeredKernels<Impl>::DhopSiteHandInt(StencilView &st,
|
||||
|
||||
|
||||
template <class Impl>
|
||||
template <int Naik>
|
||||
void StaggeredKernels<Impl>::DhopSiteHandExt(StencilView &st,
|
||||
DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag)
|
||||
template <int Naik> accelerator_inline
|
||||
void StaggeredKernels<Impl>::DhopSiteHandExt(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag)
|
||||
{
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
|
@ -78,11 +78,11 @@ StaggeredKernels<Impl>::StaggeredKernels(const ImplParams &p) : Base(p){};
|
||||
// Int, Ext, Int+Ext cases for comms overlap
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
template <class Impl>
|
||||
template <int Naik>
|
||||
void StaggeredKernels<Impl>::DhopSiteGeneric(StencilView &st,
|
||||
DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out, int dag)
|
||||
template <int Naik> accelerator_inline
|
||||
void StaggeredKernels<Impl>::DhopSiteGeneric(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U, const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in, const FermionFieldView &out, int dag)
|
||||
{
|
||||
const SiteSpinor *chi_p;
|
||||
SiteSpinor chi;
|
||||
@ -126,11 +126,12 @@ void StaggeredKernels<Impl>::DhopSiteGeneric(StencilView &st,
|
||||
// Only contributions from interior of our node
|
||||
///////////////////////////////////////////////////
|
||||
template <class Impl>
|
||||
template <int Naik>
|
||||
void StaggeredKernels<Impl>::DhopSiteGenericInt(StencilView &st,
|
||||
DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag) {
|
||||
template <int Naik> accelerator_inline
|
||||
void StaggeredKernels<Impl>::DhopSiteGenericInt(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U, const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in, const FermionFieldView &out,int dag)
|
||||
{
|
||||
const SiteSpinor *chi_p;
|
||||
SiteSpinor chi;
|
||||
SiteSpinor Uchi;
|
||||
@ -174,11 +175,14 @@ void StaggeredKernels<Impl>::DhopSiteGenericInt(StencilView &st,
|
||||
// Only contributions from exterior of our node
|
||||
///////////////////////////////////////////////////
|
||||
template <class Impl>
|
||||
template <int Naik>
|
||||
void StaggeredKernels<Impl>::DhopSiteGenericExt(StencilView &st,
|
||||
DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in, FermionFieldView &out,int dag) {
|
||||
template <int Naik> accelerator_inline
|
||||
void StaggeredKernels<Impl>::DhopSiteGenericExt(const StencilView &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor *buf, int sF, int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out,int dag)
|
||||
{
|
||||
const SiteSpinor *chi_p;
|
||||
// SiteSpinor chi;
|
||||
SiteSpinor Uchi;
|
||||
@ -224,9 +228,14 @@ void StaggeredKernels<Impl>::DhopSiteGenericExt(StencilView &st,
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// Driving / wrapping routine to select right kernel
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
template <class Impl>
|
||||
void StaggeredKernels<Impl>::DhopDirKernel(StencilImpl &st, DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, SiteSpinor * buf,
|
||||
int sF, int sU, const FermionFieldView &in, FermionFieldView &out, int dir,int disp)
|
||||
template <class Impl>
|
||||
void StaggeredKernels<Impl>::DhopDirKernel(StencilImpl &st,
|
||||
const DoubledGaugeFieldView &U,
|
||||
const DoubledGaugeFieldView &UUU,
|
||||
SiteSpinor * buf,
|
||||
int sF, int sU,
|
||||
const FermionFieldView &in,
|
||||
const FermionFieldView &out, int dir,int disp)
|
||||
{
|
||||
// Disp should be either +1,-1,+3,-3
|
||||
// What about "dag" ?
|
||||
@ -253,8 +262,9 @@ void StaggeredKernels<Impl>::DhopDirKernel(StencilImpl &st, DoubledGaugeFieldVie
|
||||
ThisKernel::A(st_v,U_v,UUU_v,buf,sF,sU,in_v,out_v,dag); \
|
||||
});
|
||||
|
||||
template <class Impl>
|
||||
void StaggeredKernels<Impl>::DhopImproved(StencilImpl &st, LebesgueOrder &lo,
|
||||
template <class Impl>
|
||||
void StaggeredKernels<Impl>::DhopImproved(StencilImpl &st,
|
||||
LebesgueOrder &lo,
|
||||
DoubledGaugeField &U, DoubledGaugeField &UUU,
|
||||
const FermionField &in, FermionField &out, int dag, int interior,int exterior)
|
||||
{
|
||||
@ -293,7 +303,7 @@ void StaggeredKernels<Impl>::DhopImproved(StencilImpl &st, LebesgueOrder &lo,
|
||||
}
|
||||
assert(0 && " Kernel optimisation case not covered ");
|
||||
}
|
||||
template <class Impl>
|
||||
template <class Impl>
|
||||
void StaggeredKernels<Impl>::DhopNaive(StencilImpl &st, LebesgueOrder &lo,
|
||||
DoubledGaugeField &U,
|
||||
const FermionField &in, FermionField &out, int dag, int interior,int exterior)
|
||||
|
@ -92,20 +92,16 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
int lvol = _Umu.Grid()->lSites();
|
||||
int DimRep = Impl::Dimension;
|
||||
|
||||
Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||
Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||
|
||||
Coordinate lcoor;
|
||||
typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero();
|
||||
|
||||
{
|
||||
autoView(CTv,CloverTerm,CpuRead);
|
||||
autoView(CTIv,CloverTermInv,CpuWrite);
|
||||
for (int site = 0; site < lvol; site++) {
|
||||
thread_for(site, lvol, {
|
||||
Coordinate lcoor;
|
||||
grid->LocalIndexToLocalCoor(site, lcoor);
|
||||
EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||
Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||
Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||
typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero();
|
||||
peekLocalSite(Qx, CTv, lcoor);
|
||||
Qxinv = Zero();
|
||||
//if (csw!=0){
|
||||
for (int j = 0; j < Ns; j++)
|
||||
for (int k = 0; k < Ns; k++)
|
||||
@ -126,21 +122,21 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
// if (site==0) std::cout << "site =" << site << "\n" << EigenInvCloverOp << std::endl;
|
||||
// }
|
||||
pokeLocalSite(Qxinv, CTIv, lcoor);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Separate the even and odd parts
|
||||
pickCheckerboard(Even, CloverTermEven, CloverTerm);
|
||||
pickCheckerboard(Odd, CloverTermOdd, CloverTerm);
|
||||
|
||||
pickCheckerboard(Even, CloverTermDagEven, closure(adj(CloverTerm)));
|
||||
pickCheckerboard(Odd, CloverTermDagOdd, closure(adj(CloverTerm)));
|
||||
pickCheckerboard(Even, CloverTermDagEven, adj(CloverTerm));
|
||||
pickCheckerboard(Odd, CloverTermDagOdd, adj(CloverTerm));
|
||||
|
||||
pickCheckerboard(Even, CloverTermInvEven, CloverTermInv);
|
||||
pickCheckerboard(Odd, CloverTermInvOdd, CloverTermInv);
|
||||
|
||||
pickCheckerboard(Even, CloverTermInvDagEven, closure(adj(CloverTermInv)));
|
||||
pickCheckerboard(Odd, CloverTermInvDagOdd, closure(adj(CloverTermInv)));
|
||||
pickCheckerboard(Even, CloverTermInvDagEven, adj(CloverTermInv));
|
||||
pickCheckerboard(Odd, CloverTermInvDagOdd, adj(CloverTermInv));
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
|
@ -43,7 +43,7 @@ WilsonFermion<Impl>::WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
|
||||
GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||
const ImplParams &p,
|
||||
const WilsonAnisotropyCoefficients &anis)
|
||||
:
|
||||
:
|
||||
Kernels(p),
|
||||
_grid(&Fgrid),
|
||||
_cbgrid(&Hgrid),
|
||||
@ -75,8 +75,93 @@ WilsonFermion<Impl>::WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
|
||||
StencilOdd.BuildSurfaceList(1,vol4);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion<Impl>::Report(void)
|
||||
{
|
||||
RealD NP = _grid->_Nprocessors;
|
||||
RealD NN = _grid->NodeCount();
|
||||
RealD volume = 1;
|
||||
Coordinate latt = _grid->GlobalDimensions();
|
||||
for(int mu=0;mu<Nd;mu++) volume=volume*latt[mu];
|
||||
|
||||
if ( DhopCalls > 0 ) {
|
||||
std::cout << GridLogMessage << "#### Dhop calls report " << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion Number of DhopEO Calls : " << DhopCalls << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion TotalTime /Calls : " << DhopTotalTime / DhopCalls << " us" << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion CommTime /Calls : " << DhopCommTime / DhopCalls << " us" << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion FaceTime /Calls : " << DhopFaceTime / DhopCalls << " us" << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion ComputeTime1/Calls : " << DhopComputeTime / DhopCalls << " us" << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion ComputeTime2/Calls : " << DhopComputeTime2/ DhopCalls << " us" << std::endl;
|
||||
|
||||
// Average the compute time
|
||||
_grid->GlobalSum(DhopComputeTime);
|
||||
DhopComputeTime/=NP;
|
||||
RealD mflops = 1320*volume*DhopCalls/DhopComputeTime/2; // 2 for red black counting
|
||||
std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per rank : " << mflops/NP << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per node : " << mflops/NN << std::endl;
|
||||
|
||||
RealD Fullmflops = 1320*volume*DhopCalls/(DhopTotalTime)/2; // 2 for red black counting
|
||||
std::cout << GridLogMessage << "Average mflops/s per call (full) : " << Fullmflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per rank (full): " << Fullmflops/NP << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per node (full): " << Fullmflops/NN << std::endl;
|
||||
|
||||
}
|
||||
|
||||
if ( DerivCalls > 0 ) {
|
||||
std::cout << GridLogMessage << "#### Deriv calls report "<< std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion Number of Deriv Calls : " <<DerivCalls <<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion CommTime/Calls : " <<DerivCommTime/DerivCalls<<" us" <<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion ComputeTime/Calls : " <<DerivComputeTime/DerivCalls<<" us" <<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion Dhop ComputeTime/Calls : " <<DerivDhopComputeTime/DerivCalls<<" us" <<std::endl;
|
||||
|
||||
// how to count flops here?
|
||||
RealD mflops = 144*volume*DerivCalls/DerivDhopComputeTime;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call ? : " << mflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per node ? : " << mflops/NP << std::endl;
|
||||
|
||||
// how to count flops here?
|
||||
RealD Fullmflops = 144*volume*DerivCalls/(DerivDhopComputeTime+DerivCommTime)/2; // 2 for red black counting
|
||||
std::cout << GridLogMessage << "Average mflops/s per call (full) ? : " << Fullmflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per node (full) ? : " << Fullmflops/NP << std::endl; }
|
||||
|
||||
if (DerivCalls > 0 || DhopCalls > 0){
|
||||
std::cout << GridLogMessage << "WilsonFermion Stencil" <<std::endl; Stencil.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion StencilEven"<<std::endl; StencilEven.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion StencilOdd" <<std::endl; StencilOdd.Report();
|
||||
}
|
||||
if ( DhopCalls > 0){
|
||||
std::cout << GridLogMessage << "WilsonFermion Stencil Reporti()" <<std::endl; Stencil.Reporti(DhopCalls);
|
||||
std::cout << GridLogMessage << "WilsonFermion StencilEven Reporti()"<<std::endl; StencilEven.Reporti(DhopCalls);
|
||||
std::cout << GridLogMessage << "WilsonFermion StencilOdd Reporti()" <<std::endl; StencilOdd.Reporti(DhopCalls);
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion<Impl>::ZeroCounters(void) {
|
||||
DhopCalls = 0; // ok
|
||||
DhopCommTime = 0;
|
||||
DhopComputeTime = 0;
|
||||
DhopComputeTime2= 0;
|
||||
DhopFaceTime = 0;
|
||||
DhopTotalTime = 0;
|
||||
|
||||
DerivCalls = 0; // ok
|
||||
DerivCommTime = 0;
|
||||
DerivComputeTime = 0;
|
||||
DerivDhopComputeTime = 0;
|
||||
|
||||
Stencil.ZeroCounters();
|
||||
StencilEven.ZeroCounters();
|
||||
StencilOdd.ZeroCounters();
|
||||
Stencil.ZeroCountersi();
|
||||
StencilEven.ZeroCountersi();
|
||||
StencilOdd.ZeroCountersi();
|
||||
}
|
||||
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
void WilsonFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
{
|
||||
GaugeField HUmu(_Umu.Grid());
|
||||
|
||||
@ -107,7 +192,7 @@ void WilsonFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
/////////////////////////////
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::M(const FermionField &in, FermionField &out)
|
||||
void WilsonFermion<Impl>::M(const FermionField &in, FermionField &out)
|
||||
{
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
Dhop(in, out, DaggerNo);
|
||||
@ -115,7 +200,7 @@ void WilsonFermion<Impl>::M(const FermionField &in, FermionField &out)
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::Mdag(const FermionField &in, FermionField &out)
|
||||
void WilsonFermion<Impl>::Mdag(const FermionField &in, FermionField &out)
|
||||
{
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
Dhop(in, out, DaggerYes);
|
||||
@ -123,7 +208,7 @@ void WilsonFermion<Impl>::Mdag(const FermionField &in, FermionField &out)
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::Meooe(const FermionField &in, FermionField &out)
|
||||
void WilsonFermion<Impl>::Meooe(const FermionField &in, FermionField &out)
|
||||
{
|
||||
if (in.Checkerboard() == Odd) {
|
||||
DhopEO(in, out, DaggerNo);
|
||||
@ -133,7 +218,7 @@ void WilsonFermion<Impl>::Meooe(const FermionField &in, FermionField &out)
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out)
|
||||
void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out)
|
||||
{
|
||||
if (in.Checkerboard() == Odd) {
|
||||
DhopEO(in, out, DaggerYes);
|
||||
@ -141,9 +226,9 @@ void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out)
|
||||
DhopOE(in, out, DaggerYes);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out)
|
||||
void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out)
|
||||
{
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
typename FermionField::scalar_type scal(diag_mass);
|
||||
@ -151,80 +236,80 @@ void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out)
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out)
|
||||
void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out)
|
||||
{
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
Mooee(in, out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out)
|
||||
void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out)
|
||||
{
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
out = (1.0/(diag_mass))*in;
|
||||
}
|
||||
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &out)
|
||||
void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &out)
|
||||
{
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
MooeeInv(in,out);
|
||||
}
|
||||
template<class Impl>
|
||||
void WilsonFermion<Impl>::MomentumSpacePropagator(FermionField &out, const FermionField &in,RealD _m,std::vector<double> twist)
|
||||
{
|
||||
{
|
||||
typedef typename FermionField::vector_type vector_type;
|
||||
typedef typename FermionField::scalar_type ScalComplex;
|
||||
typedef Lattice<iSinglet<vector_type> > LatComplex;
|
||||
|
||||
// what type LatticeComplex
|
||||
|
||||
// what type LatticeComplex
|
||||
conformable(_grid,out.Grid());
|
||||
|
||||
|
||||
Gamma::Algebra Gmu [] = {
|
||||
Gamma::Algebra::GammaX,
|
||||
Gamma::Algebra::GammaY,
|
||||
Gamma::Algebra::GammaZ,
|
||||
Gamma::Algebra::GammaT
|
||||
};
|
||||
|
||||
|
||||
Coordinate latt_size = _grid->_fdimensions;
|
||||
|
||||
|
||||
FermionField num (_grid); num = Zero();
|
||||
LatComplex wilson(_grid); wilson= Zero();
|
||||
LatComplex one (_grid); one = ScalComplex(1.0,0.0);
|
||||
|
||||
|
||||
LatComplex denom(_grid); denom= Zero();
|
||||
LatComplex kmu(_grid);
|
||||
LatComplex kmu(_grid);
|
||||
ScalComplex ci(0.0,1.0);
|
||||
// momphase = n * 2pi / L
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
|
||||
|
||||
LatticeCoordinate(kmu,mu);
|
||||
|
||||
|
||||
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||
|
||||
|
||||
kmu = TwoPiL * kmu;
|
||||
kmu = kmu + TwoPiL * one * twist[mu];//momentum for twisted boundary conditions
|
||||
|
||||
|
||||
wilson = wilson + 2.0*sin(kmu*0.5)*sin(kmu*0.5); // Wilson term
|
||||
|
||||
|
||||
num = num - sin(kmu)*ci*(Gamma(Gmu[mu])*in); // derivative term
|
||||
|
||||
|
||||
denom=denom + sin(kmu)*sin(kmu);
|
||||
}
|
||||
|
||||
|
||||
wilson = wilson + _m; // 2 sin^2 k/2 + m
|
||||
|
||||
|
||||
num = num + wilson*in; // -i gmu sin k + 2 sin^2 k/2 + m
|
||||
|
||||
|
||||
denom= denom+wilson*wilson; // sin^2 k + (2 sin^2 k/2 + m)^2
|
||||
|
||||
|
||||
denom= one/denom;
|
||||
|
||||
|
||||
out = num*denom; // [ -i gmu sin k + 2 sin^2 k/2 + m] / [ sin^2 k + (2 sin^2 k/2 + m)^2 ]
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
///////////////////////////////////
|
||||
// Internal
|
||||
@ -234,6 +319,7 @@ template <class Impl>
|
||||
void WilsonFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U,
|
||||
GaugeField &mat, const FermionField &A,
|
||||
const FermionField &B, int dag) {
|
||||
DerivCalls++;
|
||||
assert((dag == DaggerNo) || (dag == DaggerYes));
|
||||
|
||||
Compressor compressor(dag);
|
||||
@ -242,8 +328,11 @@ void WilsonFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U,
|
||||
FermionField Atilde(B.Grid());
|
||||
Atilde = A;
|
||||
|
||||
DerivCommTime-=usecond();
|
||||
st.HaloExchange(B, compressor);
|
||||
DerivCommTime+=usecond();
|
||||
|
||||
DerivComputeTime-=usecond();
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Flip gamma (1+g)<->(1-g) if dag
|
||||
@ -251,6 +340,7 @@ void WilsonFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U,
|
||||
int gamma = mu;
|
||||
if (!dag) gamma += Nd;
|
||||
|
||||
DerivDhopComputeTime -= usecond();
|
||||
int Ls=1;
|
||||
Kernels::DhopDirKernel(st, U, st.CommBuf(), Ls, B.Grid()->oSites(), B, Btilde, mu, gamma);
|
||||
|
||||
@ -258,11 +348,13 @@ void WilsonFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U,
|
||||
// spin trace outer product
|
||||
//////////////////////////////////////////////////
|
||||
Impl::InsertForce4D(mat, Btilde, Atilde, mu);
|
||||
DerivDhopComputeTime += usecond();
|
||||
}
|
||||
DerivComputeTime += usecond();
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::DhopDeriv(GaugeField &mat, const FermionField &U, const FermionField &V, int dag)
|
||||
void WilsonFermion<Impl>::DhopDeriv(GaugeField &mat, const FermionField &U, const FermionField &V, int dag)
|
||||
{
|
||||
conformable(U.Grid(), _grid);
|
||||
conformable(U.Grid(), V.Grid());
|
||||
@ -274,13 +366,13 @@ void WilsonFermion<Impl>::DhopDeriv(GaugeField &mat, const FermionField &U, cons
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag)
|
||||
void WilsonFermion<Impl>::DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag)
|
||||
{
|
||||
conformable(U.Grid(), _cbgrid);
|
||||
conformable(U.Grid(), V.Grid());
|
||||
//conformable(U.Grid(), mat.Grid()); not general, leaving as a comment (Guido)
|
||||
// Motivation: look at the SchurDiff operator
|
||||
|
||||
|
||||
assert(V.Checkerboard() == Even);
|
||||
assert(U.Checkerboard() == Odd);
|
||||
mat.Checkerboard() = Odd;
|
||||
@ -289,7 +381,7 @@ void WilsonFermion<Impl>::DhopDerivOE(GaugeField &mat, const FermionField &U, co
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag)
|
||||
void WilsonFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag)
|
||||
{
|
||||
conformable(U.Grid(), _cbgrid);
|
||||
conformable(U.Grid(), V.Grid());
|
||||
@ -303,7 +395,7 @@ void WilsonFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionField &U, co
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int dag)
|
||||
void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int dag)
|
||||
{
|
||||
conformable(in.Grid(), _grid); // verifies full grid
|
||||
conformable(in.Grid(), out.Grid());
|
||||
@ -314,7 +406,7 @@ void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int da
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int dag)
|
||||
void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int dag)
|
||||
{
|
||||
conformable(in.Grid(), _cbgrid); // verifies half grid
|
||||
conformable(in.Grid(), out.Grid()); // drops the cb check
|
||||
@ -326,7 +418,7 @@ void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag)
|
||||
void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag)
|
||||
{
|
||||
conformable(in.Grid(), _cbgrid); // verifies half grid
|
||||
conformable(in.Grid(), out.Grid()); // drops the cb check
|
||||
@ -338,18 +430,18 @@ void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,int d
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp)
|
||||
void WilsonFermion<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp)
|
||||
{
|
||||
DhopDir(in, out, dir, disp);
|
||||
}
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MdirAll(const FermionField &in, std::vector<FermionField> &out)
|
||||
void WilsonFermion<Impl>::MdirAll(const FermionField &in, std::vector<FermionField> &out)
|
||||
{
|
||||
DhopDirAll(in, out);
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out, int dir, int disp)
|
||||
void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out, int dir, int disp)
|
||||
{
|
||||
Compressor compressor(DaggerNo);
|
||||
Stencil.HaloExchange(in, compressor);
|
||||
@ -361,12 +453,12 @@ void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out, int
|
||||
DhopDirCalc(in, out, dirdisp, gamma, DaggerNo);
|
||||
};
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::DhopDirAll(const FermionField &in, std::vector<FermionField> &out)
|
||||
void WilsonFermion<Impl>::DhopDirAll(const FermionField &in, std::vector<FermionField> &out)
|
||||
{
|
||||
Compressor compressor(DaggerNo);
|
||||
Stencil.HaloExchange(in, compressor);
|
||||
|
||||
assert((out.size()==8)||(out.size()==9));
|
||||
assert((out.size()==8)||(out.size()==9));
|
||||
for(int dir=0;dir<Nd;dir++){
|
||||
for(int disp=-1;disp<=1;disp+=2){
|
||||
|
||||
@ -379,7 +471,7 @@ void WilsonFermion<Impl>::DhopDirAll(const FermionField &in, std::vector<Fermion
|
||||
}
|
||||
}
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::DhopDirCalc(const FermionField &in, FermionField &out,int dirdisp, int gamma, int dag)
|
||||
void WilsonFermion<Impl>::DhopDirCalc(const FermionField &in, FermionField &out,int dirdisp, int gamma, int dag)
|
||||
{
|
||||
int Ls=1;
|
||||
uint64_t Nsite=in.oSites();
|
||||
@ -390,22 +482,23 @@ template <class Impl>
|
||||
void WilsonFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
|
||||
DoubledGaugeField &U,
|
||||
const FermionField &in,
|
||||
FermionField &out, int dag)
|
||||
FermionField &out, int dag)
|
||||
{
|
||||
DhopTotalTime-=usecond();
|
||||
#ifdef GRID_OMP
|
||||
if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute )
|
||||
DhopInternalOverlappedComms(st,lo,U,in,out,dag);
|
||||
else
|
||||
#endif
|
||||
#endif
|
||||
DhopInternalSerial(st,lo,U,in,out,dag);
|
||||
|
||||
DhopTotalTime+=usecond();
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::DhopInternalOverlappedComms(StencilImpl &st, LebesgueOrder &lo,
|
||||
DoubledGaugeField &U,
|
||||
const FermionField &in,
|
||||
FermionField &out, int dag)
|
||||
FermionField &out, int dag)
|
||||
{
|
||||
assert((dag == DaggerNo) || (dag == DaggerYes));
|
||||
|
||||
@ -417,38 +510,53 @@ void WilsonFermion<Impl>::DhopInternalOverlappedComms(StencilImpl &st, LebesgueO
|
||||
/////////////////////////////
|
||||
std::vector<std::vector<CommsRequest_t> > requests;
|
||||
st.Prepare();
|
||||
DhopFaceTime-=usecond();
|
||||
st.HaloGather(in,compressor);
|
||||
DhopFaceTime+=usecond();
|
||||
|
||||
DhopCommTime -=usecond();
|
||||
st.CommunicateBegin(requests);
|
||||
|
||||
/////////////////////////////
|
||||
// Overlap with comms
|
||||
/////////////////////////////
|
||||
DhopFaceTime-=usecond();
|
||||
st.CommsMergeSHM(compressor);
|
||||
DhopFaceTime+=usecond();
|
||||
|
||||
/////////////////////////////
|
||||
// do the compute interior
|
||||
/////////////////////////////
|
||||
int Opt = WilsonKernelsStatic::Opt;
|
||||
DhopComputeTime-=usecond();
|
||||
if (dag == DaggerYes) {
|
||||
Kernels::DhopDagKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out,1,0);
|
||||
} else {
|
||||
Kernels::DhopKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out,1,0);
|
||||
}
|
||||
}
|
||||
DhopComputeTime+=usecond();
|
||||
|
||||
/////////////////////////////
|
||||
// Complete comms
|
||||
/////////////////////////////
|
||||
st.CommunicateComplete(requests);
|
||||
DhopCommTime +=usecond();
|
||||
|
||||
DhopFaceTime-=usecond();
|
||||
st.CommsMerge(compressor);
|
||||
DhopFaceTime+=usecond();
|
||||
|
||||
/////////////////////////////
|
||||
// do the compute exterior
|
||||
/////////////////////////////
|
||||
|
||||
DhopComputeTime2-=usecond();
|
||||
if (dag == DaggerYes) {
|
||||
Kernels::DhopDagKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out,0,1);
|
||||
} else {
|
||||
Kernels::DhopKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out,0,1);
|
||||
}
|
||||
DhopComputeTime2+=usecond();
|
||||
};
|
||||
|
||||
|
||||
@ -456,24 +564,28 @@ template <class Impl>
|
||||
void WilsonFermion<Impl>::DhopInternalSerial(StencilImpl &st, LebesgueOrder &lo,
|
||||
DoubledGaugeField &U,
|
||||
const FermionField &in,
|
||||
FermionField &out, int dag)
|
||||
FermionField &out, int dag)
|
||||
{
|
||||
assert((dag == DaggerNo) || (dag == DaggerYes));
|
||||
Compressor compressor(dag);
|
||||
DhopCommTime-=usecond();
|
||||
st.HaloExchange(in, compressor);
|
||||
DhopCommTime+=usecond();
|
||||
|
||||
DhopComputeTime-=usecond();
|
||||
int Opt = WilsonKernelsStatic::Opt;
|
||||
if (dag == DaggerYes) {
|
||||
Kernels::DhopDagKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out);
|
||||
} else {
|
||||
Kernels::DhopKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out);
|
||||
}
|
||||
DhopComputeTime+=usecond();
|
||||
};
|
||||
/*Change ends */
|
||||
|
||||
/*******************************************************************************
|
||||
* Conserved current utilities for Wilson fermions, for contracting propagators
|
||||
* to make a conserved current sink or inserting the conserved current
|
||||
* to make a conserved current sink or inserting the conserved current
|
||||
* sequentially.
|
||||
******************************************************************************/
|
||||
template <class Impl>
|
||||
@ -493,12 +605,12 @@ void WilsonFermion<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
|
||||
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
void WilsonFermion<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
PropagatorField &q_out,
|
||||
PropagatorField &src,
|
||||
Current curr_type,
|
||||
unsigned int mu,
|
||||
unsigned int tmin,
|
||||
unsigned int tmin,
|
||||
unsigned int tmax,
|
||||
ComplexField &lattice_cmplx)
|
||||
{
|
||||
|
450
Grid/qcd/action/fermion/implementation/WilsonKernelsAsmA64FX.h
Normal file
450
Grid/qcd/action/fermion/implementation/WilsonKernelsAsmA64FX.h
Normal file
@ -0,0 +1,450 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/WilsonKernelsAsmA64FX.h
|
||||
|
||||
Copyright (C) 2020
|
||||
|
||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
|
||||
//#if defined(A64FXASM)
|
||||
#if defined(A64FX)
|
||||
|
||||
// safety include
|
||||
#include <arm_sve.h>
|
||||
|
||||
// undefine everything related to kernels
|
||||
#include <simd/Fujitsu_A64FX_undef.h>
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// If we are A64FX specialise the single precision routine
|
||||
///////////////////////////////////////////////////////////
|
||||
#if defined(DSLASHINTRIN)
|
||||
//#pragma message ("A64FX Dslash: intrin")
|
||||
#include <simd/Fujitsu_A64FX_intrin_single.h>
|
||||
#else
|
||||
#pragma message ("A64FX Dslash: asm")
|
||||
#include <simd/Fujitsu_A64FX_asm_single.h>
|
||||
#endif
|
||||
|
||||
/// Switch off the 5d vectorised code optimisations
|
||||
#undef DWFVEC5D
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// XYZT vectorised, undag Kernel, single
|
||||
/////////////////////////////////////////////////////////////////
|
||||
#undef KERNEL_DAG
|
||||
#define INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#define INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#define EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// XYZT vectorised, dag Kernel, single
|
||||
/////////////////////////////////////////////////////////////////
|
||||
#define KERNEL_DAG
|
||||
#define INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#define INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#define EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
|
||||
// undefine
|
||||
#include <simd/Fujitsu_A64FX_undef.h>
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// If we are A64FX specialise the double precision routine
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
#if defined(DSLASHINTRIN)
|
||||
#include <simd/Fujitsu_A64FX_intrin_double.h>
|
||||
#else
|
||||
#include <simd/Fujitsu_A64FX_asm_double.h>
|
||||
#endif
|
||||
|
||||
// former KNL
|
||||
//#define MAYBEPERM(A,perm) if (perm) { A ; }
|
||||
//#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf)
|
||||
//#define COMPLEX_SIGNS(isigns) vComplexD *isigns = &signsD[0];
|
||||
|
||||
|
||||
#define INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// XYZT vectorised, undag Kernel, double
|
||||
/////////////////////////////////////////////////////////////////
|
||||
#undef KERNEL_DAG
|
||||
#define INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#define INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#define EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// XYZT vectorised, dag Kernel, double
|
||||
/////////////////////////////////////////////////////////////////
|
||||
#define KERNEL_DAG
|
||||
#define INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#define INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#define EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
|
||||
|
||||
|
||||
|
||||
// undefs
|
||||
#include <simd/Fujitsu_A64FX_undef.h>
|
||||
|
||||
#endif //A64FXASM
|
@ -0,0 +1,395 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: WilsonKernelsAsmBodyA64FX.h
|
||||
|
||||
Copyright (C) 2020
|
||||
|
||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
// GCC 10 messes up SVE instruction scheduling using -O3, but
|
||||
// -O3 -fno-schedule-insns -fno-schedule-insns2 does wonders
|
||||
// performance now is better than armclang 20.2
|
||||
|
||||
#ifdef KERNEL_DAG
|
||||
#define DIR0_PROJ XP_PROJ
|
||||
#define DIR1_PROJ YP_PROJ
|
||||
#define DIR2_PROJ ZP_PROJ
|
||||
#define DIR3_PROJ TP_PROJ
|
||||
#define DIR4_PROJ XM_PROJ
|
||||
#define DIR5_PROJ YM_PROJ
|
||||
#define DIR6_PROJ ZM_PROJ
|
||||
#define DIR7_PROJ TM_PROJ
|
||||
#define DIR0_RECON XP_RECON
|
||||
#define DIR1_RECON YP_RECON_ACCUM
|
||||
#define DIR2_RECON ZP_RECON_ACCUM
|
||||
#define DIR3_RECON TP_RECON_ACCUM
|
||||
#define DIR4_RECON XM_RECON_ACCUM
|
||||
#define DIR5_RECON YM_RECON_ACCUM
|
||||
#define DIR6_RECON ZM_RECON_ACCUM
|
||||
#define DIR7_RECON TM_RECON_ACCUM
|
||||
#else
|
||||
#define DIR0_PROJ XM_PROJ
|
||||
#define DIR1_PROJ YM_PROJ
|
||||
#define DIR2_PROJ ZM_PROJ
|
||||
#define DIR3_PROJ TM_PROJ
|
||||
#define DIR4_PROJ XP_PROJ
|
||||
#define DIR5_PROJ YP_PROJ
|
||||
#define DIR6_PROJ ZP_PROJ
|
||||
#define DIR7_PROJ TP_PROJ
|
||||
#define DIR0_RECON XM_RECON
|
||||
#define DIR1_RECON YM_RECON_ACCUM
|
||||
#define DIR2_RECON ZM_RECON_ACCUM
|
||||
#define DIR3_RECON TM_RECON_ACCUM
|
||||
#define DIR4_RECON XP_RECON_ACCUM
|
||||
#define DIR5_RECON YP_RECON_ACCUM
|
||||
#define DIR6_RECON ZP_RECON_ACCUM
|
||||
#define DIR7_RECON TP_RECON_ACCUM
|
||||
#endif
|
||||
|
||||
//using namespace std;
|
||||
|
||||
#undef SHOW
|
||||
//#define SHOW
|
||||
|
||||
#undef WHERE
|
||||
|
||||
#ifdef INTERIOR_AND_EXTERIOR
|
||||
#define WHERE "INT_AND_EXT"
|
||||
#endif
|
||||
|
||||
#ifdef INTERIOR
|
||||
#define WHERE "INT"
|
||||
#endif
|
||||
|
||||
#ifdef EXTERIOR
|
||||
#define WHERE "EXT"
|
||||
#endif
|
||||
|
||||
//#pragma message("here")
|
||||
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Comms then compute kernel
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef INTERIOR_AND_EXTERIOR
|
||||
|
||||
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
basep = st.GetPFInfo(nent,plocal); nent++; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU(base); \
|
||||
LOAD_TABLE(PERMUTE_DIR); \
|
||||
PROJ; \
|
||||
MAYBEPERM(PERMUTE_DIR,perm); \
|
||||
} else { \
|
||||
LOAD_CHI(base); \
|
||||
} \
|
||||
base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++; \
|
||||
MULT_2SPIN_1(Dir); \
|
||||
PREFETCH_CHIMU(base); \
|
||||
PREFETCH_CHIMU_L2(basep); \
|
||||
/* PREFETCH_GAUGE_L1(NxtDir); */ \
|
||||
MULT_2SPIN_2; \
|
||||
if (s == 0) { \
|
||||
if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
|
||||
} \
|
||||
RECON; \
|
||||
|
||||
/*
|
||||
NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty
|
||||
though I expected that it would improve on performance
|
||||
*/
|
||||
|
||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
|
||||
PREFETCH1_CHIMU(base); \
|
||||
ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)
|
||||
|
||||
#define RESULT(base,basep) SAVE_RESULT(base,basep);
|
||||
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Pre comms kernel -- prefetch like normal because it is mostly right
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef INTERIOR
|
||||
|
||||
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
basep = st.GetPFInfo(nent,plocal); nent++; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU(base); \
|
||||
LOAD_TABLE(PERMUTE_DIR); \
|
||||
PROJ; \
|
||||
MAYBEPERM(PERMUTE_DIR,perm); \
|
||||
}else if ( st.same_node[Dir] ) {LOAD_CHI(base);} \
|
||||
if ( local || st.same_node[Dir] ) { \
|
||||
MULT_2SPIN_1(Dir); \
|
||||
MULT_2SPIN_2; \
|
||||
RECON; \
|
||||
} \
|
||||
base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++; \
|
||||
PREFETCH_CHIMU(base); \
|
||||
PREFETCH_CHIMU_L2(basep); \
|
||||
|
||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
|
||||
PREFETCH1_CHIMU(base); \
|
||||
{ ZERO_PSI; } \
|
||||
ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)
|
||||
|
||||
#define RESULT(base,basep) SAVE_RESULT(base,basep);
|
||||
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Post comms kernel
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef EXTERIOR
|
||||
|
||||
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
|
||||
if((!local)&&(!st.same_node[Dir]) ) { \
|
||||
LOAD_CHI(base); \
|
||||
MULT_2SPIN_1(Dir); \
|
||||
MULT_2SPIN_2; \
|
||||
RECON; \
|
||||
nmu++; \
|
||||
}
|
||||
|
||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
nmu=0; \
|
||||
{ ZERO_PSI;} \
|
||||
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
|
||||
if((!local)&&(!st.same_node[Dir]) ) { \
|
||||
LOAD_CHI(base); \
|
||||
MULT_2SPIN_1(Dir); \
|
||||
MULT_2SPIN_2; \
|
||||
RECON; \
|
||||
nmu++; \
|
||||
}
|
||||
|
||||
#define RESULT(base,basep) if (nmu){ ADD_RESULT(base,base);}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
{
|
||||
int nmu;
|
||||
int local,perm, ptype;
|
||||
uint64_t base;
|
||||
uint64_t basep;
|
||||
const uint64_t plocal =(uint64_t) & in[0];
|
||||
|
||||
MASK_REGS;
|
||||
int nmax=U.oSites();
|
||||
for(int site=0;site<Ns;site++) {
|
||||
#ifndef EXTERIOR
|
||||
// int sU =lo.Reorder(ssU);
|
||||
int sU =ssU;
|
||||
int ssn=ssU+1; if(ssn>=nmax) ssn=0;
|
||||
// int sUn=lo.Reorder(ssn);
|
||||
int sUn=ssn;
|
||||
#else
|
||||
int sU =ssU;
|
||||
int ssn=ssU+1; if(ssn>=nmax) ssn=0;
|
||||
int sUn=ssn;
|
||||
#endif
|
||||
for(int s=0;s<Ls;s++) {
|
||||
ss =sU*Ls+s;
|
||||
ssn=sUn*Ls+s;
|
||||
int ent=ss*8;// 2*Ndim
|
||||
int nent=ssn*8;
|
||||
|
||||
uint64_t delta_base, delta_base_p;
|
||||
|
||||
ASM_LEG_XP(Xp,Yp,PERMUTE_DIR3,DIR0_PROJ,DIR0_RECON);
|
||||
|
||||
#ifdef SHOW
|
||||
float rescale = 64. * 12.;
|
||||
std::cout << "=================================================================" << std::endl;
|
||||
std::cout << "ss = " << ss << " ssn = " << ssn << std::endl;
|
||||
std::cout << "sU = " << sU << " ssU = " << ssU << std::endl;
|
||||
std::cout << " " << std::endl;
|
||||
|
||||
|
||||
std::cout << "Dir = " << Xp << " " << WHERE<< std::endl;
|
||||
|
||||
std::cout << "ent nent local perm = " << ent << " " << nent << " " << local << " " << perm << std::endl;
|
||||
std::cout << "st.same_node[Dir] = " << st.same_node[Xp] << std::endl;
|
||||
std::cout << "base = " << (base - plocal)/rescale << std::endl;
|
||||
std::cout << "Basep = " << (basep - plocal)/rescale << std::endl;
|
||||
//printf("U = %llu\n", (uint64_t)&[sU](Dir));
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
ASM_LEG(Yp,Zp,PERMUTE_DIR2,DIR1_PROJ,DIR1_RECON);
|
||||
|
||||
#ifdef SHOW
|
||||
std::cout << "Dir = " << Yp << " " << WHERE<< std::endl;
|
||||
|
||||
std::cout << "ent nent local perm = " << ent << " " << nent << " " << local << " " << perm << std::endl;
|
||||
std::cout << "st.same_node[Dir] = " << st.same_node[Yp] << std::endl;
|
||||
std::cout << "base = " << (base - plocal)/rescale << std::endl;
|
||||
std::cout << "Basep = " << (basep - plocal)/rescale << std::endl;
|
||||
//printf("U = %llu\n", (uint64_t)&[sU](Dir));
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
ASM_LEG(Zp,Tp,PERMUTE_DIR1,DIR2_PROJ,DIR2_RECON);
|
||||
|
||||
#ifdef SHOW
|
||||
std::cout << "Dir = " << Zp << " " << WHERE<< std::endl;
|
||||
|
||||
std::cout << "ent nent local perm = " << ent << " " << nent << " " << local << " " << perm << std::endl;
|
||||
std::cout << "st.same_node[Dir] = " << st.same_node[Zp] << std::endl;
|
||||
std::cout << "base = " << (base - plocal)/rescale << std::endl;
|
||||
std::cout << "Basep = " << (basep - plocal)/rescale << std::endl;
|
||||
//printf("U = %llu\n", (uint64_t)&[sU](Dir));
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
ASM_LEG(Tp,Xm,PERMUTE_DIR0,DIR3_PROJ,DIR3_RECON);
|
||||
|
||||
#ifdef SHOW
|
||||
std::cout << "Dir = " << Tp << " " << WHERE<< std::endl;
|
||||
|
||||
std::cout << "ent nent local perm = " << ent << " " << nent << " " << local << " " << perm << std::endl;
|
||||
std::cout << "st.same_node[Dir] = " << st.same_node[Tp] << std::endl;
|
||||
std::cout << "base = " << (base - plocal)/rescale << std::endl;
|
||||
std::cout << "Basep = " << (basep - plocal)/rescale << std::endl;
|
||||
//printf("U = %llu\n", (uint64_t)&[sU](Dir));
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
ASM_LEG(Xm,Ym,PERMUTE_DIR3,DIR4_PROJ,DIR4_RECON);
|
||||
|
||||
#ifdef SHOW
|
||||
std::cout << "Dir = " << Xm << " " << WHERE<< std::endl;
|
||||
|
||||
std::cout << "ent nent local perm = " << ent << " " << nent << " " << local << " " << perm << std::endl;
|
||||
std::cout << "st.same_node[Dir] = " << st.same_node[Xm] << std::endl;
|
||||
std::cout << "base = " << (base - plocal)/rescale << std::endl;
|
||||
std::cout << "Basep = " << (basep - plocal)/rescale << std::endl;
|
||||
//printf("U = %llu\n", (uint64_t)&[sU](Dir));
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
// DC ZVA test
|
||||
// { uint64_t basestore = (uint64_t)&out[ss];
|
||||
// PREFETCH_RESULT_L2_STORE(basestore); }
|
||||
|
||||
|
||||
ASM_LEG(Ym,Zm,PERMUTE_DIR2,DIR5_PROJ,DIR5_RECON);
|
||||
|
||||
#ifdef SHOW
|
||||
std::cout << "Dir = " << Ym << " " << WHERE<< std::endl;
|
||||
|
||||
std::cout << "ent nent local perm = " << ent << " " << nent << " " << local << " " << perm << std::endl;
|
||||
std::cout << "st.same_node[Dir] = " << st.same_node[Ym] << std::endl;
|
||||
std::cout << "base = " << (base - plocal)/rescale << std::endl;
|
||||
std::cout << "Basep = " << (basep - plocal)/rescale << std::endl;
|
||||
//printf("U = %llu\n", (uint64_t)&[sU](Dir));
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
// DC ZVA test
|
||||
//{ uint64_t basestore = (uint64_t)&out[ss];
|
||||
// PREFETCH_RESULT_L2_STORE(basestore); }
|
||||
|
||||
|
||||
ASM_LEG(Zm,Tm,PERMUTE_DIR1,DIR6_PROJ,DIR6_RECON);
|
||||
|
||||
#ifdef SHOW
|
||||
std::cout << "Dir = " << Zm << " " << WHERE<< std::endl;
|
||||
|
||||
std::cout << "ent nent local perm = " << ent << " " << nent << " " << local << " " << perm << std::endl;
|
||||
std::cout << "st.same_node[Dir] = " << st.same_node[Zm] << std::endl;
|
||||
std::cout << "base = " << (base - plocal)/rescale << std::endl;
|
||||
std::cout << "Basep = " << (basep - plocal)/rescale << std::endl;
|
||||
//printf("U = %llu\n", (uint64_t)&[sU](Dir));
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
// DC ZVA test
|
||||
//{ uint64_t basestore = (uint64_t)&out[ss];
|
||||
// PREFETCH_RESULT_L2_STORE(basestore); }
|
||||
|
||||
|
||||
ASM_LEG(Tm,Xp,PERMUTE_DIR0,DIR7_PROJ,DIR7_RECON);
|
||||
|
||||
#ifdef SHOW
|
||||
std::cout << "Dir = " << Tm << " " << WHERE<< std::endl;
|
||||
|
||||
std::cout << "ent nent local perm = " << ent << " " << nent << " " << local << " " << perm << std::endl;
|
||||
std::cout << "st.same_node[Dir] = " << st.same_node[Tm] << std::endl;
|
||||
std::cout << "base = " << (base - plocal)/rescale << std::endl;
|
||||
std::cout << "Basep = " << (basep - plocal)/rescale << std::endl;
|
||||
//printf("U = %llu\n", (uint64_t)&[sU](Dir));
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
#ifdef EXTERIOR
|
||||
if (nmu==0) break;
|
||||
// if (nmu!=0) std::cout << "EXT "<<sU<<std::endl;
|
||||
#endif
|
||||
base = (uint64_t) &out[ss];
|
||||
basep= st.GetPFInfo(nent,plocal); ent++;
|
||||
basep = (uint64_t) &out[ssn];
|
||||
//PREFETCH_RESULT_L1_STORE(base);
|
||||
RESULT(base,basep);
|
||||
|
||||
#ifdef SHOW
|
||||
std::cout << "Dir = FINAL " << WHERE<< std::endl;;
|
||||
|
||||
base_ss = base;
|
||||
std::cout << "base = " << (base - (uint64_t) &out[0])/rescale << std::endl;
|
||||
std::cout << "Basep = " << (basep - plocal)/rescale << std::endl;
|
||||
//printf("U = %llu\n", (uint64_t)&[sU](Dir));
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
}
|
||||
ssU++;
|
||||
UNLOCK_GAUGE(0);
|
||||
}
|
||||
}
|
||||
|
||||
#undef DIR0_PROJ
|
||||
#undef DIR1_PROJ
|
||||
#undef DIR2_PROJ
|
||||
#undef DIR3_PROJ
|
||||
#undef DIR4_PROJ
|
||||
#undef DIR5_PROJ
|
||||
#undef DIR6_PROJ
|
||||
#undef DIR7_PROJ
|
||||
#undef DIR0_RECON
|
||||
#undef DIR1_RECON
|
||||
#undef DIR2_RECON
|
||||
#undef DIR3_RECON
|
||||
#undef DIR4_RECON
|
||||
#undef DIR5_RECON
|
||||
#undef DIR6_RECON
|
||||
#undef DIR7_RECON
|
||||
#undef ASM_LEG
|
||||
#undef ASM_LEG_XP
|
||||
#undef RESULT
|
@ -38,46 +38,46 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Default to no assembler implementation
|
||||
// Will specialise to
|
||||
// Will specialise to AVX512 if available
|
||||
///////////////////////////////////////////////////////////
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl >::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
WilsonKernels<Impl >::AsmDhopSite(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl >::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
WilsonKernels<Impl >::AsmDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl >::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
WilsonKernels<Impl >::AsmDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl >::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
WilsonKernels<Impl >::AsmDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl >::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
WilsonKernels<Impl >::AsmDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl >::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
WilsonKernels<Impl >::AsmDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
@ -646,9 +646,14 @@ NAMESPACE_BEGIN(Grid);
|
||||
HAND_RESULT_EXT(ss,F)
|
||||
|
||||
#define HAND_SPECIALISE_GPARITY(IMPL) \
|
||||
template<> void \
|
||||
WilsonKernels<IMPL>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
|
||||
template<> accelerator_inline void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteSycl(StencilVector st_perm, StencilEntry *st_p, \
|
||||
SiteDoubledGaugeField *U, SiteHalfSpinor * buf, \
|
||||
int sF, int sU, const SiteSpinor *in, SiteSpinor *out) {} \
|
||||
\
|
||||
template<> accelerator_inline void \
|
||||
WilsonKernels<IMPL>::HandDhopSite(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
@ -662,9 +667,9 @@ NAMESPACE_BEGIN(Grid);
|
||||
HAND_DOP_SITE(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
} \
|
||||
\
|
||||
template<> void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
|
||||
template<> accelerator_inline void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
@ -678,9 +683,9 @@ NAMESPACE_BEGIN(Grid);
|
||||
HAND_DOP_SITE_DAG(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
} \
|
||||
\
|
||||
template<> void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
|
||||
template<> accelerator_inline void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
@ -694,9 +699,9 @@ NAMESPACE_BEGIN(Grid);
|
||||
HAND_DOP_SITE_INT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
} \
|
||||
\
|
||||
template<> void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
|
||||
template<> accelerator_inline void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
@ -710,9 +715,9 @@ NAMESPACE_BEGIN(Grid);
|
||||
HAND_DOP_SITE_DAG_INT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
} \
|
||||
\
|
||||
template<> void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
|
||||
template<> accelerator_inline void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
@ -727,9 +732,9 @@ NAMESPACE_BEGIN(Grid);
|
||||
nmu = 0; \
|
||||
HAND_DOP_SITE_EXT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
} \
|
||||
template<> void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
|
||||
template<> accelerator_inline void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
|
@ -495,9 +495,9 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
template<class Impl> accelerator_inline void
|
||||
WilsonKernels<Impl>::HandDhopSite(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||
typedef typename Simd::scalar_type S;
|
||||
@ -519,9 +519,9 @@ WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,Site
|
||||
HAND_RESULT(ss);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
template<class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::HandDhopSiteDag(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
@ -542,9 +542,9 @@ void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView
|
||||
HAND_RESULT(ss);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
template<class Impl> accelerator_inline void
|
||||
WilsonKernels<Impl>::HandDhopSiteInt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||
typedef typename Simd::scalar_type S;
|
||||
@ -566,9 +566,9 @@ WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,Si
|
||||
HAND_RESULT(ss);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
template<class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::HandDhopSiteDagInt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
@ -589,9 +589,9 @@ void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldVi
|
||||
HAND_RESULT(ss);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
template<class Impl> accelerator_inline void
|
||||
WilsonKernels<Impl>::HandDhopSiteExt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||
typedef typename Simd::scalar_type S;
|
||||
@ -614,9 +614,9 @@ WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,Si
|
||||
HAND_RESULT_EXT(ss);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
template<class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::HandDhopSiteDagExt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
@ -682,3 +682,4 @@ NAMESPACE_END(Grid);
|
||||
#undef HAND_RESULT
|
||||
#undef HAND_RESULT_INT
|
||||
#undef HAND_RESULT_EXT
|
||||
#undef HAND_DECLARATIONS
|
||||
|
@ -0,0 +1,943 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/WilsonKernelsHand.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
|
||||
|
||||
#undef LOAD_CHIMU
|
||||
#undef LOAD_CHI
|
||||
#undef MULT_2SPIN
|
||||
#undef PERMUTE_DIR
|
||||
#undef XP_PROJ
|
||||
#undef YP_PROJ
|
||||
#undef ZP_PROJ
|
||||
#undef TP_PROJ
|
||||
#undef XM_PROJ
|
||||
#undef YM_PROJ
|
||||
#undef ZM_PROJ
|
||||
#undef TM_PROJ
|
||||
#undef XP_RECON
|
||||
#undef XP_RECON_ACCUM
|
||||
#undef XM_RECON
|
||||
#undef XM_RECON_ACCUM
|
||||
#undef YP_RECON_ACCUM
|
||||
#undef YM_RECON_ACCUM
|
||||
#undef ZP_RECON_ACCUM
|
||||
#undef ZM_RECON_ACCUM
|
||||
#undef TP_RECON_ACCUM
|
||||
#undef TM_RECON_ACCUM
|
||||
#undef ZERO_RESULT
|
||||
#undef Chimu_00
|
||||
#undef Chimu_01
|
||||
#undef Chimu_02
|
||||
#undef Chimu_10
|
||||
#undef Chimu_11
|
||||
#undef Chimu_12
|
||||
#undef Chimu_20
|
||||
#undef Chimu_21
|
||||
#undef Chimu_22
|
||||
#undef Chimu_30
|
||||
#undef Chimu_31
|
||||
#undef Chimu_32
|
||||
#undef HAND_STENCIL_LEG
|
||||
#undef HAND_STENCIL_LEG_INT
|
||||
#undef HAND_STENCIL_LEG_EXT
|
||||
#undef HAND_RESULT
|
||||
#undef HAND_RESULT_INT
|
||||
#undef HAND_RESULT_EXT
|
||||
|
||||
#define REGISTER
|
||||
|
||||
#define LOAD_CHIMU \
|
||||
{const SiteSpinor & ref (in[offset]); \
|
||||
Chimu_00=ref()(0)(0);\
|
||||
Chimu_01=ref()(0)(1);\
|
||||
Chimu_02=ref()(0)(2);\
|
||||
Chimu_10=ref()(1)(0);\
|
||||
Chimu_11=ref()(1)(1);\
|
||||
Chimu_12=ref()(1)(2);\
|
||||
Chimu_20=ref()(2)(0);\
|
||||
Chimu_21=ref()(2)(1);\
|
||||
Chimu_22=ref()(2)(2);\
|
||||
Chimu_30=ref()(3)(0);\
|
||||
Chimu_31=ref()(3)(1);\
|
||||
Chimu_32=ref()(3)(2);\
|
||||
std::cout << std::endl << "DEBUG -- LOAD_CHIMU" << std::endl; \
|
||||
std::cout << "Chimu_00 -- " << Chimu_00 << std::endl; \
|
||||
std::cout << "Chimu_01 -- " << Chimu_01 << std::endl; \
|
||||
std::cout << "Chimu_02 -- " << Chimu_02 << std::endl; \
|
||||
std::cout << "Chimu_10 -- " << Chimu_10 << std::endl; \
|
||||
std::cout << "Chimu_11 -- " << Chimu_11 << std::endl; \
|
||||
std::cout << "Chimu_12 -- " << Chimu_12 << std::endl; \
|
||||
std::cout << "Chimu_20 -- " << Chimu_20 << std::endl; \
|
||||
std::cout << "Chimu_21 -- " << Chimu_21 << std::endl; \
|
||||
std::cout << "Chimu_22 -- " << Chimu_22 << std::endl; \
|
||||
std::cout << "Chimu_30 -- " << Chimu_30 << std::endl; \
|
||||
std::cout << "Chimu_31 -- " << Chimu_31 << std::endl; \
|
||||
std::cout << "Chimu_32 -- " << Chimu_32 << std::endl; \
|
||||
}
|
||||
|
||||
#define LOAD_CHI\
|
||||
{const SiteHalfSpinor &ref(buf[offset]); \
|
||||
Chi_00 = ref()(0)(0);\
|
||||
Chi_01 = ref()(0)(1);\
|
||||
Chi_02 = ref()(0)(2);\
|
||||
Chi_10 = ref()(1)(0);\
|
||||
Chi_11 = ref()(1)(1);\
|
||||
Chi_12 = ref()(1)(2);\
|
||||
std::cout << std::endl << "DEBUG -- LOAD_CHI" << std::endl; \
|
||||
std::cout << "Chi_00 -- " << Chi_00 << std::endl; \
|
||||
std::cout << "Chi_01 -- " << Chi_01 << std::endl; \
|
||||
std::cout << "Chi_02 -- " << Chi_02 << std::endl; \
|
||||
std::cout << "Chi_10 -- " << Chi_10 << std::endl; \
|
||||
std::cout << "Chi_11 -- " << Chi_11 << std::endl; \
|
||||
std::cout << "Chi_12 -- " << Chi_12 << std::endl; \
|
||||
}
|
||||
|
||||
// To splat or not to splat depends on the implementation
|
||||
#define MULT_2SPIN(A)\
|
||||
{auto & ref(U[sU](A)); \
|
||||
Impl::loadLinkElement(U_00,ref()(0,0)); \
|
||||
Impl::loadLinkElement(U_10,ref()(1,0)); \
|
||||
Impl::loadLinkElement(U_20,ref()(2,0)); \
|
||||
Impl::loadLinkElement(U_01,ref()(0,1)); \
|
||||
Impl::loadLinkElement(U_11,ref()(1,1)); \
|
||||
Impl::loadLinkElement(U_21,ref()(2,1)); \
|
||||
UChi_00 = U_00*Chi_00;\
|
||||
UChi_10 = U_00*Chi_10;\
|
||||
UChi_01 = U_10*Chi_00;\
|
||||
UChi_11 = U_10*Chi_10;\
|
||||
UChi_02 = U_20*Chi_00;\
|
||||
UChi_12 = U_20*Chi_10;\
|
||||
UChi_00+= U_01*Chi_01;\
|
||||
UChi_10+= U_01*Chi_11;\
|
||||
UChi_01+= U_11*Chi_01;\
|
||||
UChi_11+= U_11*Chi_11;\
|
||||
UChi_02+= U_21*Chi_01;\
|
||||
UChi_12+= U_21*Chi_11;\
|
||||
Impl::loadLinkElement(U_00,ref()(0,2)); \
|
||||
Impl::loadLinkElement(U_10,ref()(1,2)); \
|
||||
Impl::loadLinkElement(U_20,ref()(2,2)); \
|
||||
UChi_00+= U_00*Chi_02;\
|
||||
UChi_10+= U_00*Chi_12;\
|
||||
UChi_01+= U_10*Chi_02;\
|
||||
UChi_11+= U_10*Chi_12;\
|
||||
UChi_02+= U_20*Chi_02;\
|
||||
UChi_12+= U_20*Chi_12;\
|
||||
std::cout << std::endl << "DEBUG -- MULT_2SPIN" << std::endl; \
|
||||
std::cout << "UChi_00 -- " << UChi_00 << std::endl; \
|
||||
std::cout << "UChi_01 -- " << UChi_01 << std::endl; \
|
||||
std::cout << "UChi_02 -- " << UChi_02 << std::endl; \
|
||||
std::cout << "UChi_10 -- " << UChi_10 << std::endl; \
|
||||
std::cout << "UChi_11 -- " << UChi_11 << std::endl; \
|
||||
std::cout << "UChi_12 -- " << UChi_12 << std::endl; \
|
||||
}
|
||||
|
||||
|
||||
#define PERMUTE_DIR(dir) \
|
||||
std::cout << std::endl << "DEBUG -- PERM PRE" << std::endl; \
|
||||
std::cout << "Chi_00 -- " << Chi_00 << std::endl; \
|
||||
std::cout << "Chi_01 -- " << Chi_01 << std::endl; \
|
||||
std::cout << "Chi_02 -- " << Chi_02 << std::endl; \
|
||||
std::cout << "Chi_10 -- " << Chi_10 << std::endl; \
|
||||
std::cout << "Chi_11 -- " << Chi_11 << std::endl; \
|
||||
std::cout << "Chi_12 -- " << Chi_12 << std::endl; \
|
||||
permute##dir(Chi_00,Chi_00);\
|
||||
permute##dir(Chi_01,Chi_01);\
|
||||
permute##dir(Chi_02,Chi_02);\
|
||||
permute##dir(Chi_10,Chi_10);\
|
||||
permute##dir(Chi_11,Chi_11);\
|
||||
permute##dir(Chi_12,Chi_12);\
|
||||
std::cout << std::endl << "DEBUG -- PERM POST" << std::endl; \
|
||||
std::cout << "Chi_00 -- " << Chi_00 << std::endl; \
|
||||
std::cout << "Chi_01 -- " << Chi_01 << std::endl; \
|
||||
std::cout << "Chi_02 -- " << Chi_02 << std::endl; \
|
||||
std::cout << "Chi_10 -- " << Chi_10 << std::endl; \
|
||||
std::cout << "Chi_11 -- " << Chi_11 << std::endl; \
|
||||
std::cout << "Chi_12 -- " << Chi_12 << std::endl;
|
||||
|
||||
// hspin(0)=fspin(0)+timesI(fspin(3));
|
||||
// hspin(1)=fspin(1)+timesI(fspin(2));
|
||||
#define XP_PROJ \
|
||||
Chi_00 = Chimu_00+timesI(Chimu_30);\
|
||||
Chi_01 = Chimu_01+timesI(Chimu_31);\
|
||||
Chi_02 = Chimu_02+timesI(Chimu_32);\
|
||||
Chi_10 = Chimu_10+timesI(Chimu_20);\
|
||||
Chi_11 = Chimu_11+timesI(Chimu_21);\
|
||||
Chi_12 = Chimu_12+timesI(Chimu_22);\
|
||||
std::cout << std::endl << "DEBUG -- XP_PROJ" << std::endl; \
|
||||
std::cout << "Chi_00 -- " << Chi_00 << std::endl; \
|
||||
std::cout << "Chi_01 -- " << Chi_01 << std::endl; \
|
||||
std::cout << "Chi_02 -- " << Chi_02 << std::endl; \
|
||||
std::cout << "Chi_10 -- " << Chi_10 << std::endl; \
|
||||
std::cout << "Chi_11 -- " << Chi_11 << std::endl; \
|
||||
std::cout << "Chi_12 -- " << Chi_12 << std::endl;
|
||||
|
||||
#define YP_PROJ \
|
||||
Chi_00 = Chimu_00-Chimu_30;\
|
||||
Chi_01 = Chimu_01-Chimu_31;\
|
||||
Chi_02 = Chimu_02-Chimu_32;\
|
||||
Chi_10 = Chimu_10+Chimu_20;\
|
||||
Chi_11 = Chimu_11+Chimu_21;\
|
||||
Chi_12 = Chimu_12+Chimu_22;\
|
||||
std::cout << std::endl << "DEBUG -- YP_PROJ" << std::endl; \
|
||||
std::cout << "Chi_00 -- " << Chi_00 << std::endl; \
|
||||
std::cout << "Chi_01 -- " << Chi_01 << std::endl; \
|
||||
std::cout << "Chi_02 -- " << Chi_02 << std::endl; \
|
||||
std::cout << "Chi_10 -- " << Chi_10 << std::endl; \
|
||||
std::cout << "Chi_11 -- " << Chi_11 << std::endl; \
|
||||
std::cout << "Chi_12 -- " << Chi_12 << std::endl;
|
||||
|
||||
#define ZP_PROJ \
|
||||
Chi_00 = Chimu_00+timesI(Chimu_20); \
|
||||
Chi_01 = Chimu_01+timesI(Chimu_21); \
|
||||
Chi_02 = Chimu_02+timesI(Chimu_22); \
|
||||
Chi_10 = Chimu_10-timesI(Chimu_30); \
|
||||
Chi_11 = Chimu_11-timesI(Chimu_31); \
|
||||
Chi_12 = Chimu_12-timesI(Chimu_32);\
|
||||
std::cout << std::endl << "DEBUG -- ZP_PROJ" << std::endl; \
|
||||
std::cout << "Chi_00 -- " << Chi_00 << std::endl; \
|
||||
std::cout << "Chi_01 -- " << Chi_01 << std::endl; \
|
||||
std::cout << "Chi_02 -- " << Chi_02 << std::endl; \
|
||||
std::cout << "Chi_10 -- " << Chi_10 << std::endl; \
|
||||
std::cout << "Chi_11 -- " << Chi_11 << std::endl; \
|
||||
std::cout << "Chi_12 -- " << Chi_12 << std::endl;
|
||||
|
||||
#define TP_PROJ \
|
||||
Chi_00 = Chimu_00+Chimu_20; \
|
||||
Chi_01 = Chimu_01+Chimu_21; \
|
||||
Chi_02 = Chimu_02+Chimu_22; \
|
||||
Chi_10 = Chimu_10+Chimu_30; \
|
||||
Chi_11 = Chimu_11+Chimu_31; \
|
||||
Chi_12 = Chimu_12+Chimu_32;\
|
||||
std::cout << std::endl << "DEBUG -- TP_PROJ" << std::endl; \
|
||||
std::cout << "Chi_00 -- " << Chi_00 << std::endl; \
|
||||
std::cout << "Chi_01 -- " << Chi_01 << std::endl; \
|
||||
std::cout << "Chi_02 -- " << Chi_02 << std::endl; \
|
||||
std::cout << "Chi_10 -- " << Chi_10 << std::endl; \
|
||||
std::cout << "Chi_11 -- " << Chi_11 << std::endl; \
|
||||
std::cout << "Chi_12 -- " << Chi_12 << std::endl;
|
||||
|
||||
|
||||
// hspin(0)=fspin(0)-timesI(fspin(3));
|
||||
// hspin(1)=fspin(1)-timesI(fspin(2));
|
||||
#define XM_PROJ \
|
||||
Chi_00 = Chimu_00-timesI(Chimu_30);\
|
||||
Chi_01 = Chimu_01-timesI(Chimu_31);\
|
||||
Chi_02 = Chimu_02-timesI(Chimu_32);\
|
||||
Chi_10 = Chimu_10-timesI(Chimu_20);\
|
||||
Chi_11 = Chimu_11-timesI(Chimu_21);\
|
||||
Chi_12 = Chimu_12-timesI(Chimu_22);\
|
||||
std::cout << std::endl << "DEBUG -- XM_PROJ" << std::endl; \
|
||||
std::cout << "Chi_00 -- " << Chi_00 << std::endl; \
|
||||
std::cout << "Chi_01 -- " << Chi_01 << std::endl; \
|
||||
std::cout << "Chi_02 -- " << Chi_02 << std::endl; \
|
||||
std::cout << "Chi_10 -- " << Chi_10 << std::endl; \
|
||||
std::cout << "Chi_11 -- " << Chi_11 << std::endl; \
|
||||
std::cout << "Chi_12 -- " << Chi_12 << std::endl;
|
||||
|
||||
#define YM_PROJ \
|
||||
Chi_00 = Chimu_00+Chimu_30;\
|
||||
Chi_01 = Chimu_01+Chimu_31;\
|
||||
Chi_02 = Chimu_02+Chimu_32;\
|
||||
Chi_10 = Chimu_10-Chimu_20;\
|
||||
Chi_11 = Chimu_11-Chimu_21;\
|
||||
Chi_12 = Chimu_12-Chimu_22;\
|
||||
std::cout << std::endl << "DEBUG -- YM_PROJ" << std::endl; \
|
||||
std::cout << "Chi_00 -- " << Chi_00 << std::endl; \
|
||||
std::cout << "Chi_01 -- " << Chi_01 << std::endl; \
|
||||
std::cout << "Chi_02 -- " << Chi_02 << std::endl; \
|
||||
std::cout << "Chi_10 -- " << Chi_10 << std::endl; \
|
||||
std::cout << "Chi_11 -- " << Chi_11 << std::endl; \
|
||||
std::cout << "Chi_12 -- " << Chi_12 << std::endl;
|
||||
|
||||
#define ZM_PROJ \
|
||||
Chi_00 = Chimu_00-timesI(Chimu_20); \
|
||||
Chi_01 = Chimu_01-timesI(Chimu_21); \
|
||||
Chi_02 = Chimu_02-timesI(Chimu_22); \
|
||||
Chi_10 = Chimu_10+timesI(Chimu_30); \
|
||||
Chi_11 = Chimu_11+timesI(Chimu_31); \
|
||||
Chi_12 = Chimu_12+timesI(Chimu_32);\
|
||||
std::cout << std::endl << "DEBUG -- ZM_PROJ" << std::endl; \
|
||||
std::cout << "Chi_00 -- " << Chi_00 << std::endl; \
|
||||
std::cout << "Chi_01 -- " << Chi_01 << std::endl; \
|
||||
std::cout << "Chi_02 -- " << Chi_02 << std::endl; \
|
||||
std::cout << "Chi_10 -- " << Chi_10 << std::endl; \
|
||||
std::cout << "Chi_11 -- " << Chi_11 << std::endl; \
|
||||
std::cout << "Chi_12 -- " << Chi_12 << std::endl;
|
||||
|
||||
#define TM_PROJ \
|
||||
Chi_00 = Chimu_00-Chimu_20; \
|
||||
Chi_01 = Chimu_01-Chimu_21; \
|
||||
Chi_02 = Chimu_02-Chimu_22; \
|
||||
Chi_10 = Chimu_10-Chimu_30; \
|
||||
Chi_11 = Chimu_11-Chimu_31; \
|
||||
Chi_12 = Chimu_12-Chimu_32;\
|
||||
std::cout << std::endl << "DEBUG -- TM_PROJ" << std::endl; \
|
||||
std::cout << "Chi_00 -- " << Chi_00 << std::endl; \
|
||||
std::cout << "Chi_01 -- " << Chi_01 << std::endl; \
|
||||
std::cout << "Chi_02 -- " << Chi_02 << std::endl; \
|
||||
std::cout << "Chi_10 -- " << Chi_10 << std::endl; \
|
||||
std::cout << "Chi_11 -- " << Chi_11 << std::endl; \
|
||||
std::cout << "Chi_12 -- " << Chi_12 << std::endl;
|
||||
|
||||
// fspin(0)=hspin(0);
|
||||
// fspin(1)=hspin(1);
|
||||
// fspin(2)=timesMinusI(hspin(1));
|
||||
// fspin(3)=timesMinusI(hspin(0));
|
||||
#define XP_RECON\
|
||||
result_00 = UChi_00;\
|
||||
result_01 = UChi_01;\
|
||||
result_02 = UChi_02;\
|
||||
result_10 = UChi_10;\
|
||||
result_11 = UChi_11;\
|
||||
result_12 = UChi_12;\
|
||||
result_20 = timesMinusI(UChi_10);\
|
||||
result_21 = timesMinusI(UChi_11);\
|
||||
result_22 = timesMinusI(UChi_12);\
|
||||
result_30 = timesMinusI(UChi_00);\
|
||||
result_31 = timesMinusI(UChi_01);\
|
||||
result_32 = timesMinusI(UChi_02);\
|
||||
std::cout << std::endl << "DEBUG -- XP_RECON" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;
|
||||
|
||||
#define XP_RECON_ACCUM\
|
||||
result_00+=UChi_00;\
|
||||
result_01+=UChi_01;\
|
||||
result_02+=UChi_02;\
|
||||
result_10+=UChi_10;\
|
||||
result_11+=UChi_11;\
|
||||
result_12+=UChi_12;\
|
||||
result_20-=timesI(UChi_10);\
|
||||
result_21-=timesI(UChi_11);\
|
||||
result_22-=timesI(UChi_12);\
|
||||
result_30-=timesI(UChi_00);\
|
||||
result_31-=timesI(UChi_01);\
|
||||
result_32-=timesI(UChi_02);\
|
||||
std::cout << std::endl << "DEBUG -- XP_RECON_ACCUM" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;
|
||||
|
||||
#define XM_RECON\
|
||||
result_00 = UChi_00;\
|
||||
result_01 = UChi_01;\
|
||||
result_02 = UChi_02;\
|
||||
result_10 = UChi_10;\
|
||||
result_11 = UChi_11;\
|
||||
result_12 = UChi_12;\
|
||||
result_20 = timesI(UChi_10);\
|
||||
result_21 = timesI(UChi_11);\
|
||||
result_22 = timesI(UChi_12);\
|
||||
result_30 = timesI(UChi_00);\
|
||||
result_31 = timesI(UChi_01);\
|
||||
result_32 = timesI(UChi_02);\
|
||||
std::cout << std::endl << "DEBUG -- XM_RECON" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;
|
||||
|
||||
#define XM_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20+= timesI(UChi_10);\
|
||||
result_21+= timesI(UChi_11);\
|
||||
result_22+= timesI(UChi_12);\
|
||||
result_30+= timesI(UChi_00);\
|
||||
result_31+= timesI(UChi_01);\
|
||||
result_32+= timesI(UChi_02);\
|
||||
std::cout << std::endl << "DEBUG -- XM_RECON_ACCUM" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;
|
||||
|
||||
#define YP_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20+= UChi_10;\
|
||||
result_21+= UChi_11;\
|
||||
result_22+= UChi_12;\
|
||||
result_30-= UChi_00;\
|
||||
result_31-= UChi_01;\
|
||||
result_32-= UChi_02;\
|
||||
std::cout << std::endl << "DEBUG -- YP_RECON_ACCUM" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;
|
||||
|
||||
#define YM_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20-= UChi_10;\
|
||||
result_21-= UChi_11;\
|
||||
result_22-= UChi_12;\
|
||||
result_30+= UChi_00;\
|
||||
result_31+= UChi_01;\
|
||||
result_32+= UChi_02;\
|
||||
std::cout << std::endl << "DEBUG -- YM_RECON_ACCUM" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;
|
||||
|
||||
#define ZP_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20-= timesI(UChi_00); \
|
||||
result_21-= timesI(UChi_01); \
|
||||
result_22-= timesI(UChi_02); \
|
||||
result_30+= timesI(UChi_10); \
|
||||
result_31+= timesI(UChi_11); \
|
||||
result_32+= timesI(UChi_12);\
|
||||
std::cout << std::endl << "DEBUG -- ZP_RECON_ACCUM" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;
|
||||
|
||||
#define ZM_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20+= timesI(UChi_00); \
|
||||
result_21+= timesI(UChi_01); \
|
||||
result_22+= timesI(UChi_02); \
|
||||
result_30-= timesI(UChi_10); \
|
||||
result_31-= timesI(UChi_11); \
|
||||
result_32-= timesI(UChi_12);\
|
||||
std::cout << std::endl << "DEBUG -- ZM_RECON_ACCUM" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;
|
||||
|
||||
#define TP_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20+= UChi_00; \
|
||||
result_21+= UChi_01; \
|
||||
result_22+= UChi_02; \
|
||||
result_30+= UChi_10; \
|
||||
result_31+= UChi_11; \
|
||||
result_32+= UChi_12;\
|
||||
std::cout << std::endl << "DEBUG -- TP_RECON_ACCUM" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;
|
||||
|
||||
#define TM_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20-= UChi_00; \
|
||||
result_21-= UChi_01; \
|
||||
result_22-= UChi_02; \
|
||||
result_30-= UChi_10; \
|
||||
result_31-= UChi_11; \
|
||||
result_32-= UChi_12;\
|
||||
std::cout << std::endl << "DEBUG -- TM_RECON_ACCUM" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;
|
||||
|
||||
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON) \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
offset = SE->_offset; \
|
||||
local = SE->_is_local; \
|
||||
perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU; \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else { \
|
||||
LOAD_CHI; \
|
||||
} \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON;
|
||||
|
||||
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON) \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
offset = SE->_offset; \
|
||||
local = SE->_is_local; \
|
||||
perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU; \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else if ( st.same_node[DIR] ) { \
|
||||
LOAD_CHI; \
|
||||
} \
|
||||
if (local || st.same_node[DIR] ) { \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON; \
|
||||
}
|
||||
|
||||
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON) \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
offset = SE->_offset; \
|
||||
if((!SE->_is_local)&&(!st.same_node[DIR]) ) { \
|
||||
LOAD_CHI; \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON; \
|
||||
nmu++; \
|
||||
}
|
||||
|
||||
#define HAND_RESULT(ss) \
|
||||
{ \
|
||||
SiteSpinor & ref (out[ss]); \
|
||||
vstream(ref()(0)(0),result_00); \
|
||||
vstream(ref()(0)(1),result_01); \
|
||||
vstream(ref()(0)(2),result_02); \
|
||||
vstream(ref()(1)(0),result_10); \
|
||||
vstream(ref()(1)(1),result_11); \
|
||||
vstream(ref()(1)(2),result_12); \
|
||||
vstream(ref()(2)(0),result_20); \
|
||||
vstream(ref()(2)(1),result_21); \
|
||||
vstream(ref()(2)(2),result_22); \
|
||||
vstream(ref()(3)(0),result_30); \
|
||||
vstream(ref()(3)(1),result_31); \
|
||||
vstream(ref()(3)(2),result_32); \
|
||||
std::cout << std::endl << "DEBUG -- RESULT" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;\
|
||||
}
|
||||
|
||||
#define HAND_RESULT_EXT(ss) \
|
||||
if (nmu){ \
|
||||
SiteSpinor & ref (out[ss]); \
|
||||
ref()(0)(0)+=result_00; \
|
||||
ref()(0)(1)+=result_01; \
|
||||
ref()(0)(2)+=result_02; \
|
||||
ref()(1)(0)+=result_10; \
|
||||
ref()(1)(1)+=result_11; \
|
||||
ref()(1)(2)+=result_12; \
|
||||
ref()(2)(0)+=result_20; \
|
||||
ref()(2)(1)+=result_21; \
|
||||
ref()(2)(2)+=result_22; \
|
||||
ref()(3)(0)+=result_30; \
|
||||
ref()(3)(1)+=result_31; \
|
||||
ref()(3)(2)+=result_32; \
|
||||
std::cout << std::endl << "DEBUG -- RESULT EXT" << std::endl; \
|
||||
std::cout << "result_00 -- " << result_00 << std::endl; \
|
||||
std::cout << "result_01 -- " << result_01 << std::endl; \
|
||||
std::cout << "result_02 -- " << result_02 << std::endl; \
|
||||
std::cout << "result_10 -- " << result_10 << std::endl; \
|
||||
std::cout << "result_11 -- " << result_11 << std::endl; \
|
||||
std::cout << "result_12 -- " << result_12 << std::endl; \
|
||||
std::cout << "result_20 -- " << result_20 << std::endl; \
|
||||
std::cout << "result_21 -- " << result_21 << std::endl; \
|
||||
std::cout << "result_22 -- " << result_22 << std::endl; \
|
||||
std::cout << "result_30 -- " << result_30 << std::endl; \
|
||||
std::cout << "result_31 -- " << result_31 << std::endl; \
|
||||
std::cout << "result_32 -- " << result_32 << std::endl;\
|
||||
}
|
||||
|
||||
|
||||
#define HAND_DECLARATIONS(a) \
|
||||
Simd result_00; \
|
||||
Simd result_01; \
|
||||
Simd result_02; \
|
||||
Simd result_10; \
|
||||
Simd result_11; \
|
||||
Simd result_12; \
|
||||
Simd result_20; \
|
||||
Simd result_21; \
|
||||
Simd result_22; \
|
||||
Simd result_30; \
|
||||
Simd result_31; \
|
||||
Simd result_32; \
|
||||
Simd Chi_00; \
|
||||
Simd Chi_01; \
|
||||
Simd Chi_02; \
|
||||
Simd Chi_10; \
|
||||
Simd Chi_11; \
|
||||
Simd Chi_12; \
|
||||
Simd UChi_00; \
|
||||
Simd UChi_01; \
|
||||
Simd UChi_02; \
|
||||
Simd UChi_10; \
|
||||
Simd UChi_11; \
|
||||
Simd UChi_12; \
|
||||
Simd U_00; \
|
||||
Simd U_10; \
|
||||
Simd U_20; \
|
||||
Simd U_01; \
|
||||
Simd U_11; \
|
||||
Simd U_21;\
|
||||
Simd debugreg;\
|
||||
svbool_t pg1; \
|
||||
pg1 = svptrue_b64(); \
|
||||
|
||||
#define ZERO_RESULT \
|
||||
result_00=Zero(); \
|
||||
result_01=Zero(); \
|
||||
result_02=Zero(); \
|
||||
result_10=Zero(); \
|
||||
result_11=Zero(); \
|
||||
result_12=Zero(); \
|
||||
result_20=Zero(); \
|
||||
result_21=Zero(); \
|
||||
result_22=Zero(); \
|
||||
result_30=Zero(); \
|
||||
result_31=Zero(); \
|
||||
result_32=Zero();
|
||||
|
||||
#define Chimu_00 Chi_00
|
||||
#define Chimu_01 Chi_01
|
||||
#define Chimu_02 Chi_02
|
||||
#define Chimu_10 Chi_10
|
||||
#define Chimu_11 Chi_11
|
||||
#define Chimu_12 Chi_12
|
||||
#define Chimu_20 UChi_00
|
||||
#define Chimu_21 UChi_01
|
||||
#define Chimu_22 UChi_02
|
||||
#define Chimu_30 UChi_10
|
||||
#define Chimu_31 UChi_11
|
||||
#define Chimu_32 UChi_12
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
{
|
||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
|
||||
HAND_DECLARATIONS(ignore);
|
||||
|
||||
int offset,local,perm, ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
|
||||
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(TM_PROJ,0,Tp,TM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(XP_PROJ,3,Xm,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(YP_PROJ,2,Ym,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(TP_PROJ,0,Tm,TP_RECON_ACCUM);
|
||||
HAND_RESULT(ss);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
{
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
|
||||
HAND_DECLARATIONS(ignore);
|
||||
|
||||
StencilEntry *SE;
|
||||
int offset,local,perm, ptype;
|
||||
|
||||
HAND_STENCIL_LEG(XP_PROJ,3,Xp,XP_RECON);
|
||||
HAND_STENCIL_LEG(YP_PROJ,2,Yp,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(TP_PROJ,0,Tp,TP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(XM_PROJ,3,Xm,XM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(YM_PROJ,2,Ym,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(TM_PROJ,0,Tm,TM_RECON_ACCUM);
|
||||
HAND_RESULT(ss);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
{
|
||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
|
||||
HAND_DECLARATIONS(ignore);
|
||||
|
||||
int offset,local,perm, ptype;
|
||||
StencilEntry *SE;
|
||||
ZERO_RESULT;
|
||||
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xp,XM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(YM_PROJ,2,Yp,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tp,TM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xm,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(YP_PROJ,2,Ym,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tm,TP_RECON_ACCUM);
|
||||
HAND_RESULT(ss);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
{
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
|
||||
HAND_DECLARATIONS(ignore);
|
||||
|
||||
StencilEntry *SE;
|
||||
int offset,local,perm, ptype;
|
||||
ZERO_RESULT;
|
||||
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(YP_PROJ,2,Yp,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tp,TP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xm,XM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(YM_PROJ,2,Ym,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tm,TM_RECON_ACCUM);
|
||||
HAND_RESULT(ss);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
{
|
||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
|
||||
HAND_DECLARATIONS(ignore);
|
||||
|
||||
int offset, ptype;
|
||||
StencilEntry *SE;
|
||||
int nmu=0;
|
||||
ZERO_RESULT;
|
||||
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xp,XM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Yp,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tp,TM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xm,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Ym,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tm,TP_RECON_ACCUM);
|
||||
HAND_RESULT_EXT(ss);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
{
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
|
||||
HAND_DECLARATIONS(ignore);
|
||||
|
||||
StencilEntry *SE;
|
||||
int offset, ptype;
|
||||
int nmu=0;
|
||||
ZERO_RESULT;
|
||||
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Yp,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tp,TP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xm,XM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Ym,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tm,TM_RECON_ACCUM);
|
||||
HAND_RESULT_EXT(ss);
|
||||
}
|
||||
|
||||
////////////// Wilson ; uses this implementation /////////////////////
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
#undef LOAD_CHIMU
|
||||
#undef LOAD_CHI
|
||||
#undef MULT_2SPIN
|
||||
#undef PERMUTE_DIR
|
||||
#undef XP_PROJ
|
||||
#undef YP_PROJ
|
||||
#undef ZP_PROJ
|
||||
#undef TP_PROJ
|
||||
#undef XM_PROJ
|
||||
#undef YM_PROJ
|
||||
#undef ZM_PROJ
|
||||
#undef TM_PROJ
|
||||
#undef XP_RECON
|
||||
#undef XP_RECON_ACCUM
|
||||
#undef XM_RECON
|
||||
#undef XM_RECON_ACCUM
|
||||
#undef YP_RECON_ACCUM
|
||||
#undef YM_RECON_ACCUM
|
||||
#undef ZP_RECON_ACCUM
|
||||
#undef ZM_RECON_ACCUM
|
||||
#undef TP_RECON_ACCUM
|
||||
#undef TM_RECON_ACCUM
|
||||
#undef ZERO_RESULT
|
||||
#undef Chimu_00
|
||||
#undef Chimu_01
|
||||
#undef Chimu_02
|
||||
#undef Chimu_10
|
||||
#undef Chimu_11
|
||||
#undef Chimu_12
|
||||
#undef Chimu_20
|
||||
#undef Chimu_21
|
||||
#undef Chimu_22
|
||||
#undef Chimu_30
|
||||
#undef Chimu_31
|
||||
#undef Chimu_32
|
||||
#undef HAND_STENCIL_LEG
|
||||
#undef HAND_STENCIL_LEG_INT
|
||||
#undef HAND_STENCIL_LEG_EXT
|
||||
#undef HAND_RESULT
|
||||
#undef HAND_RESULT_INT
|
||||
#undef HAND_RESULT_EXT
|
@ -0,0 +1,598 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/WilsonKernelsHand.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
|
||||
|
||||
#undef LOAD_CHIMU
|
||||
#undef LOAD_CHI
|
||||
#undef MULT_2SPIN
|
||||
#undef PERMUTE_DIR
|
||||
#undef XP_PROJ
|
||||
#undef YP_PROJ
|
||||
#undef ZP_PROJ
|
||||
#undef TP_PROJ
|
||||
#undef XM_PROJ
|
||||
#undef YM_PROJ
|
||||
#undef ZM_PROJ
|
||||
#undef TM_PROJ
|
||||
#undef XP_RECON
|
||||
#undef XP_RECON_ACCUM
|
||||
#undef XM_RECON
|
||||
#undef XM_RECON_ACCUM
|
||||
#undef YP_RECON_ACCUM
|
||||
#undef YM_RECON_ACCUM
|
||||
#undef ZP_RECON_ACCUM
|
||||
#undef ZM_RECON_ACCUM
|
||||
#undef TP_RECON_ACCUM
|
||||
#undef TM_RECON_ACCUM
|
||||
#undef ZERO_RESULT
|
||||
#undef Chimu_00
|
||||
#undef Chimu_01
|
||||
#undef Chimu_02
|
||||
#undef Chimu_10
|
||||
#undef Chimu_11
|
||||
#undef Chimu_12
|
||||
#undef Chimu_20
|
||||
#undef Chimu_21
|
||||
#undef Chimu_22
|
||||
#undef Chimu_30
|
||||
#undef Chimu_31
|
||||
#undef Chimu_32
|
||||
#undef HAND_STENCIL_LEG
|
||||
#undef HAND_STENCIL_LEG_INT
|
||||
#undef HAND_STENCIL_LEG_EXT
|
||||
#undef HAND_RESULT
|
||||
#undef HAND_RESULT_INT
|
||||
#undef HAND_RESULT_EXT
|
||||
|
||||
#define REGISTER
|
||||
|
||||
#ifdef GRID_SIMT
|
||||
#define LOAD_CHIMU(ptype) \
|
||||
{const SiteSpinor & ref (in[offset]); \
|
||||
Chimu_00=coalescedReadPermute<ptype>(ref()(0)(0),perm); \
|
||||
Chimu_01=coalescedReadPermute<ptype>(ref()(0)(1),perm); \
|
||||
Chimu_02=coalescedReadPermute<ptype>(ref()(0)(2),perm); \
|
||||
Chimu_10=coalescedReadPermute<ptype>(ref()(1)(0),perm); \
|
||||
Chimu_11=coalescedReadPermute<ptype>(ref()(1)(1),perm); \
|
||||
Chimu_12=coalescedReadPermute<ptype>(ref()(1)(2),perm); \
|
||||
Chimu_20=coalescedReadPermute<ptype>(ref()(2)(0),perm); \
|
||||
Chimu_21=coalescedReadPermute<ptype>(ref()(2)(1),perm); \
|
||||
Chimu_22=coalescedReadPermute<ptype>(ref()(2)(2),perm); \
|
||||
Chimu_30=coalescedReadPermute<ptype>(ref()(3)(0),perm); \
|
||||
Chimu_31=coalescedReadPermute<ptype>(ref()(3)(1),perm); \
|
||||
Chimu_32=coalescedReadPermute<ptype>(ref()(3)(2),perm); }
|
||||
|
||||
#define PERMUTE_DIR(dir) ;
|
||||
#else
|
||||
#define LOAD_CHIMU(ptype) \
|
||||
{const SiteSpinor & ref (in[offset]); \
|
||||
Chimu_00=coalescedRead(ref()(0)(0)); \
|
||||
Chimu_01=coalescedRead(ref()(0)(1)); \
|
||||
Chimu_02=coalescedRead(ref()(0)(2)); \
|
||||
Chimu_10=coalescedRead(ref()(1)(0)); \
|
||||
Chimu_11=coalescedRead(ref()(1)(1)); \
|
||||
Chimu_12=coalescedRead(ref()(1)(2)); \
|
||||
Chimu_20=coalescedRead(ref()(2)(0)); \
|
||||
Chimu_21=coalescedRead(ref()(2)(1)); \
|
||||
Chimu_22=coalescedRead(ref()(2)(2)); \
|
||||
Chimu_30=coalescedRead(ref()(3)(0)); \
|
||||
Chimu_31=coalescedRead(ref()(3)(1)); \
|
||||
Chimu_32=coalescedRead(ref()(3)(2)); }
|
||||
|
||||
#define PERMUTE_DIR(dir) \
|
||||
permute##dir(Chi_00,Chi_00); \
|
||||
permute##dir(Chi_01,Chi_01);\
|
||||
permute##dir(Chi_02,Chi_02);\
|
||||
permute##dir(Chi_10,Chi_10); \
|
||||
permute##dir(Chi_11,Chi_11);\
|
||||
permute##dir(Chi_12,Chi_12);
|
||||
#endif
|
||||
|
||||
#define MULT_2SPIN(A)\
|
||||
{auto & ref(U[sU](A)); \
|
||||
U_00=coalescedRead(ref()(0,0)); \
|
||||
U_10=coalescedRead(ref()(1,0)); \
|
||||
U_20=coalescedRead(ref()(2,0)); \
|
||||
U_01=coalescedRead(ref()(0,1)); \
|
||||
U_11=coalescedRead(ref()(1,1)); \
|
||||
U_21=coalescedRead(ref()(2,1)); \
|
||||
UChi_00 = U_00*Chi_00; \
|
||||
UChi_10 = U_00*Chi_10; \
|
||||
UChi_01 = U_10*Chi_00; \
|
||||
UChi_11 = U_10*Chi_10; \
|
||||
UChi_02 = U_20*Chi_00; \
|
||||
UChi_12 = U_20*Chi_10; \
|
||||
UChi_00+= U_01*Chi_01; \
|
||||
UChi_10+= U_01*Chi_11; \
|
||||
UChi_01+= U_11*Chi_01; \
|
||||
UChi_11+= U_11*Chi_11; \
|
||||
UChi_02+= U_21*Chi_01; \
|
||||
UChi_12+= U_21*Chi_11; \
|
||||
U_00=coalescedRead(ref()(0,2)); \
|
||||
U_10=coalescedRead(ref()(1,2)); \
|
||||
U_20=coalescedRead(ref()(2,2)); \
|
||||
UChi_00+= U_00*Chi_02; \
|
||||
UChi_10+= U_00*Chi_12; \
|
||||
UChi_01+= U_10*Chi_02; \
|
||||
UChi_11+= U_10*Chi_12; \
|
||||
UChi_02+= U_20*Chi_02; \
|
||||
UChi_12+= U_20*Chi_12;}
|
||||
|
||||
#define LOAD_CHI \
|
||||
{const SiteHalfSpinor &ref(buf[offset]); \
|
||||
Chi_00 = coalescedRead(ref()(0)(0)); \
|
||||
Chi_01 = coalescedRead(ref()(0)(1)); \
|
||||
Chi_02 = coalescedRead(ref()(0)(2)); \
|
||||
Chi_10 = coalescedRead(ref()(1)(0)); \
|
||||
Chi_11 = coalescedRead(ref()(1)(1)); \
|
||||
Chi_12 = coalescedRead(ref()(1)(2));}
|
||||
|
||||
// hspin(0)=fspin(0)+timesI(fspin(3));
|
||||
// hspin(1)=fspin(1)+timesI(fspin(2));
|
||||
#define XP_PROJ \
|
||||
Chi_00 = Chimu_00+timesI(Chimu_30);\
|
||||
Chi_01 = Chimu_01+timesI(Chimu_31);\
|
||||
Chi_02 = Chimu_02+timesI(Chimu_32);\
|
||||
Chi_10 = Chimu_10+timesI(Chimu_20);\
|
||||
Chi_11 = Chimu_11+timesI(Chimu_21);\
|
||||
Chi_12 = Chimu_12+timesI(Chimu_22);
|
||||
|
||||
#define YP_PROJ \
|
||||
Chi_00 = Chimu_00-Chimu_30;\
|
||||
Chi_01 = Chimu_01-Chimu_31;\
|
||||
Chi_02 = Chimu_02-Chimu_32;\
|
||||
Chi_10 = Chimu_10+Chimu_20;\
|
||||
Chi_11 = Chimu_11+Chimu_21;\
|
||||
Chi_12 = Chimu_12+Chimu_22;
|
||||
|
||||
#define ZP_PROJ \
|
||||
Chi_00 = Chimu_00+timesI(Chimu_20); \
|
||||
Chi_01 = Chimu_01+timesI(Chimu_21); \
|
||||
Chi_02 = Chimu_02+timesI(Chimu_22); \
|
||||
Chi_10 = Chimu_10-timesI(Chimu_30); \
|
||||
Chi_11 = Chimu_11-timesI(Chimu_31); \
|
||||
Chi_12 = Chimu_12-timesI(Chimu_32);
|
||||
|
||||
#define TP_PROJ \
|
||||
Chi_00 = Chimu_00+Chimu_20; \
|
||||
Chi_01 = Chimu_01+Chimu_21; \
|
||||
Chi_02 = Chimu_02+Chimu_22; \
|
||||
Chi_10 = Chimu_10+Chimu_30; \
|
||||
Chi_11 = Chimu_11+Chimu_31; \
|
||||
Chi_12 = Chimu_12+Chimu_32;
|
||||
|
||||
|
||||
// hspin(0)=fspin(0)-timesI(fspin(3));
|
||||
// hspin(1)=fspin(1)-timesI(fspin(2));
|
||||
#define XM_PROJ \
|
||||
Chi_00 = Chimu_00-timesI(Chimu_30);\
|
||||
Chi_01 = Chimu_01-timesI(Chimu_31);\
|
||||
Chi_02 = Chimu_02-timesI(Chimu_32);\
|
||||
Chi_10 = Chimu_10-timesI(Chimu_20);\
|
||||
Chi_11 = Chimu_11-timesI(Chimu_21);\
|
||||
Chi_12 = Chimu_12-timesI(Chimu_22);
|
||||
|
||||
#define YM_PROJ \
|
||||
Chi_00 = Chimu_00+Chimu_30;\
|
||||
Chi_01 = Chimu_01+Chimu_31;\
|
||||
Chi_02 = Chimu_02+Chimu_32;\
|
||||
Chi_10 = Chimu_10-Chimu_20;\
|
||||
Chi_11 = Chimu_11-Chimu_21;\
|
||||
Chi_12 = Chimu_12-Chimu_22;
|
||||
|
||||
#define ZM_PROJ \
|
||||
Chi_00 = Chimu_00-timesI(Chimu_20); \
|
||||
Chi_01 = Chimu_01-timesI(Chimu_21); \
|
||||
Chi_02 = Chimu_02-timesI(Chimu_22); \
|
||||
Chi_10 = Chimu_10+timesI(Chimu_30); \
|
||||
Chi_11 = Chimu_11+timesI(Chimu_31); \
|
||||
Chi_12 = Chimu_12+timesI(Chimu_32);
|
||||
|
||||
#define TM_PROJ \
|
||||
Chi_00 = Chimu_00-Chimu_20; \
|
||||
Chi_01 = Chimu_01-Chimu_21; \
|
||||
Chi_02 = Chimu_02-Chimu_22; \
|
||||
Chi_10 = Chimu_10-Chimu_30; \
|
||||
Chi_11 = Chimu_11-Chimu_31; \
|
||||
Chi_12 = Chimu_12-Chimu_32;
|
||||
|
||||
// fspin(0)=hspin(0);
|
||||
// fspin(1)=hspin(1);
|
||||
// fspin(2)=timesMinusI(hspin(1));
|
||||
// fspin(3)=timesMinusI(hspin(0));
|
||||
#define XP_RECON\
|
||||
result_00 = UChi_00;\
|
||||
result_01 = UChi_01;\
|
||||
result_02 = UChi_02;\
|
||||
result_10 = UChi_10;\
|
||||
result_11 = UChi_11;\
|
||||
result_12 = UChi_12;\
|
||||
result_20 = timesMinusI(UChi_10);\
|
||||
result_21 = timesMinusI(UChi_11);\
|
||||
result_22 = timesMinusI(UChi_12);\
|
||||
result_30 = timesMinusI(UChi_00);\
|
||||
result_31 = timesMinusI(UChi_01);\
|
||||
result_32 = timesMinusI(UChi_02);
|
||||
|
||||
#define XP_RECON_ACCUM\
|
||||
result_00+=UChi_00;\
|
||||
result_01+=UChi_01;\
|
||||
result_02+=UChi_02;\
|
||||
result_10+=UChi_10;\
|
||||
result_11+=UChi_11;\
|
||||
result_12+=UChi_12;\
|
||||
result_20-=timesI(UChi_10);\
|
||||
result_21-=timesI(UChi_11);\
|
||||
result_22-=timesI(UChi_12);\
|
||||
result_30-=timesI(UChi_00);\
|
||||
result_31-=timesI(UChi_01);\
|
||||
result_32-=timesI(UChi_02);
|
||||
|
||||
#define XM_RECON\
|
||||
result_00 = UChi_00;\
|
||||
result_01 = UChi_01;\
|
||||
result_02 = UChi_02;\
|
||||
result_10 = UChi_10;\
|
||||
result_11 = UChi_11;\
|
||||
result_12 = UChi_12;\
|
||||
result_20 = timesI(UChi_10);\
|
||||
result_21 = timesI(UChi_11);\
|
||||
result_22 = timesI(UChi_12);\
|
||||
result_30 = timesI(UChi_00);\
|
||||
result_31 = timesI(UChi_01);\
|
||||
result_32 = timesI(UChi_02);
|
||||
|
||||
#define XM_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20+= timesI(UChi_10);\
|
||||
result_21+= timesI(UChi_11);\
|
||||
result_22+= timesI(UChi_12);\
|
||||
result_30+= timesI(UChi_00);\
|
||||
result_31+= timesI(UChi_01);\
|
||||
result_32+= timesI(UChi_02);
|
||||
|
||||
#define YP_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20+= UChi_10;\
|
||||
result_21+= UChi_11;\
|
||||
result_22+= UChi_12;\
|
||||
result_30-= UChi_00;\
|
||||
result_31-= UChi_01;\
|
||||
result_32-= UChi_02;
|
||||
|
||||
#define YM_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20-= UChi_10;\
|
||||
result_21-= UChi_11;\
|
||||
result_22-= UChi_12;\
|
||||
result_30+= UChi_00;\
|
||||
result_31+= UChi_01;\
|
||||
result_32+= UChi_02;
|
||||
|
||||
#define ZP_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20-= timesI(UChi_00); \
|
||||
result_21-= timesI(UChi_01); \
|
||||
result_22-= timesI(UChi_02); \
|
||||
result_30+= timesI(UChi_10); \
|
||||
result_31+= timesI(UChi_11); \
|
||||
result_32+= timesI(UChi_12);
|
||||
|
||||
#define ZM_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20+= timesI(UChi_00); \
|
||||
result_21+= timesI(UChi_01); \
|
||||
result_22+= timesI(UChi_02); \
|
||||
result_30-= timesI(UChi_10); \
|
||||
result_31-= timesI(UChi_11); \
|
||||
result_32-= timesI(UChi_12);
|
||||
|
||||
#define TP_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20+= UChi_00; \
|
||||
result_21+= UChi_01; \
|
||||
result_22+= UChi_02; \
|
||||
result_30+= UChi_10; \
|
||||
result_31+= UChi_11; \
|
||||
result_32+= UChi_12;
|
||||
|
||||
#define TM_RECON_ACCUM\
|
||||
result_00+= UChi_00;\
|
||||
result_01+= UChi_01;\
|
||||
result_02+= UChi_02;\
|
||||
result_10+= UChi_10;\
|
||||
result_11+= UChi_11;\
|
||||
result_12+= UChi_12;\
|
||||
result_20-= UChi_00; \
|
||||
result_21-= UChi_01; \
|
||||
result_22-= UChi_02; \
|
||||
result_30-= UChi_10; \
|
||||
result_31-= UChi_11; \
|
||||
result_32-= UChi_12;
|
||||
|
||||
#define HAND_STENCIL_LEGA(PROJ,PERM,DIR,RECON) \
|
||||
SE=&st_p[DIR+8*ss]; \
|
||||
ptype=st_perm[DIR]; \
|
||||
offset = SE->_offset; \
|
||||
local = SE->_is_local; \
|
||||
perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU(PERM); \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else { \
|
||||
LOAD_CHI; \
|
||||
} \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON;
|
||||
|
||||
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON) \
|
||||
SE=&st_p[DIR+8*ss]; \
|
||||
ptype=st_perm[DIR]; \
|
||||
offset = SE->_offset; \
|
||||
local = SE->_is_local; \
|
||||
perm = SE->_permute; \
|
||||
LOAD_CHIMU(PERM); \
|
||||
PROJ; \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON;
|
||||
|
||||
|
||||
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON) \
|
||||
SE=&st_p[DIR+8*ss]; \
|
||||
ptype=st_perm[DIR]; \
|
||||
offset = SE->_offset; \
|
||||
local = SE->_is_local; \
|
||||
perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU; \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else if ( st.same_node[DIR] ) { \
|
||||
LOAD_CHI; \
|
||||
} \
|
||||
if (local || st.same_node[DIR] ) { \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON; \
|
||||
}
|
||||
|
||||
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON) \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
offset = SE->_offset; \
|
||||
if((!SE->_is_local)&&(!st.same_node[DIR]) ) { \
|
||||
LOAD_CHI; \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON; \
|
||||
nmu++; \
|
||||
}
|
||||
|
||||
#define HAND_RESULT(ss) \
|
||||
{ \
|
||||
SiteSpinor & ref (out[ss]); \
|
||||
coalescedWrite(ref()(0)(0),result_00); \
|
||||
coalescedWrite(ref()(0)(1),result_01); \
|
||||
coalescedWrite(ref()(0)(2),result_02); \
|
||||
coalescedWrite(ref()(1)(0),result_10); \
|
||||
coalescedWrite(ref()(1)(1),result_11); \
|
||||
coalescedWrite(ref()(1)(2),result_12); \
|
||||
coalescedWrite(ref()(2)(0),result_20); \
|
||||
coalescedWrite(ref()(2)(1),result_21); \
|
||||
coalescedWrite(ref()(2)(2),result_22); \
|
||||
coalescedWrite(ref()(3)(0),result_30); \
|
||||
coalescedWrite(ref()(3)(1),result_31); \
|
||||
coalescedWrite(ref()(3)(2),result_32); \
|
||||
}
|
||||
|
||||
#define HAND_RESULT_EXT(ss) \
|
||||
if (nmu){ \
|
||||
SiteSpinor & ref (out[ss]); \
|
||||
ref()(0)(0)+=result_00; \
|
||||
ref()(0)(1)+=result_01; \
|
||||
ref()(0)(2)+=result_02; \
|
||||
ref()(1)(0)+=result_10; \
|
||||
ref()(1)(1)+=result_11; \
|
||||
ref()(1)(2)+=result_12; \
|
||||
ref()(2)(0)+=result_20; \
|
||||
ref()(2)(1)+=result_21; \
|
||||
ref()(2)(2)+=result_22; \
|
||||
ref()(3)(0)+=result_30; \
|
||||
ref()(3)(1)+=result_31; \
|
||||
ref()(3)(2)+=result_32; \
|
||||
}
|
||||
|
||||
#define HAND_DECLARATIONS(Simd) \
|
||||
Simd result_00; \
|
||||
Simd result_01; \
|
||||
Simd result_02; \
|
||||
Simd result_10; \
|
||||
Simd result_11; \
|
||||
Simd result_12; \
|
||||
Simd result_20; \
|
||||
Simd result_21; \
|
||||
Simd result_22; \
|
||||
Simd result_30; \
|
||||
Simd result_31; \
|
||||
Simd result_32; \
|
||||
Simd Chi_00; \
|
||||
Simd Chi_01; \
|
||||
Simd Chi_02; \
|
||||
Simd Chi_10; \
|
||||
Simd Chi_11; \
|
||||
Simd Chi_12; \
|
||||
Simd UChi_00; \
|
||||
Simd UChi_01; \
|
||||
Simd UChi_02; \
|
||||
Simd UChi_10; \
|
||||
Simd UChi_11; \
|
||||
Simd UChi_12; \
|
||||
Simd U_00; \
|
||||
Simd U_10; \
|
||||
Simd U_20; \
|
||||
Simd U_01; \
|
||||
Simd U_11; \
|
||||
Simd U_21;
|
||||
|
||||
#define ZERO_RESULT \
|
||||
result_00=Zero(); \
|
||||
result_01=Zero(); \
|
||||
result_02=Zero(); \
|
||||
result_10=Zero(); \
|
||||
result_11=Zero(); \
|
||||
result_12=Zero(); \
|
||||
result_20=Zero(); \
|
||||
result_21=Zero(); \
|
||||
result_22=Zero(); \
|
||||
result_30=Zero(); \
|
||||
result_31=Zero(); \
|
||||
result_32=Zero();
|
||||
|
||||
#define Chimu_00 Chi_00
|
||||
#define Chimu_01 Chi_01
|
||||
#define Chimu_02 Chi_02
|
||||
#define Chimu_10 Chi_10
|
||||
#define Chimu_11 Chi_11
|
||||
#define Chimu_12 Chi_12
|
||||
#define Chimu_20 UChi_00
|
||||
#define Chimu_21 UChi_01
|
||||
#define Chimu_22 UChi_02
|
||||
#define Chimu_30 UChi_10
|
||||
#define Chimu_31 UChi_11
|
||||
#define Chimu_32 UChi_12
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class Impl> accelerator_inline void
|
||||
WilsonKernels<Impl>::HandDhopSiteSycl(StencilVector st_perm,StencilEntry *st_p, SiteDoubledGaugeField *U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const SiteSpinor *in, SiteSpinor *out)
|
||||
{
|
||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
typedef iSinglet<Simd> vCplx;
|
||||
// typedef decltype( coalescedRead( vCplx()()() )) Simt;
|
||||
typedef decltype( coalescedRead( in[0]()(0)(0) )) Simt;
|
||||
|
||||
HAND_DECLARATIONS(Simt);
|
||||
|
||||
int offset,local,perm, ptype;
|
||||
StencilEntry *SE;
|
||||
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
|
||||
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(TM_PROJ,0,Tp,TM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(XP_PROJ,3,Xm,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(YP_PROJ,2,Ym,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(TP_PROJ,0,Tm,TP_RECON_ACCUM);
|
||||
HAND_RESULT(ss);
|
||||
}
|
||||
|
||||
////////////// Wilson ; uses this implementation /////////////////////
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
#undef LOAD_CHIMU
|
||||
#undef LOAD_CHI
|
||||
#undef MULT_2SPIN
|
||||
#undef PERMUTE_DIR
|
||||
#undef XP_PROJ
|
||||
#undef YP_PROJ
|
||||
#undef ZP_PROJ
|
||||
#undef TP_PROJ
|
||||
#undef XM_PROJ
|
||||
#undef YM_PROJ
|
||||
#undef ZM_PROJ
|
||||
#undef TM_PROJ
|
||||
#undef XP_RECON
|
||||
#undef XP_RECON_ACCUM
|
||||
#undef XM_RECON
|
||||
#undef XM_RECON_ACCUM
|
||||
#undef YP_RECON_ACCUM
|
||||
#undef YM_RECON_ACCUM
|
||||
#undef ZP_RECON_ACCUM
|
||||
#undef ZM_RECON_ACCUM
|
||||
#undef TP_RECON_ACCUM
|
||||
#undef TM_RECON_ACCUM
|
||||
#undef ZERO_RESULT
|
||||
#undef Chimu_00
|
||||
#undef Chimu_01
|
||||
#undef Chimu_02
|
||||
#undef Chimu_10
|
||||
#undef Chimu_11
|
||||
#undef Chimu_12
|
||||
#undef Chimu_20
|
||||
#undef Chimu_21
|
||||
#undef Chimu_22
|
||||
#undef Chimu_30
|
||||
#undef Chimu_31
|
||||
#undef Chimu_32
|
||||
#undef HAND_STENCIL_LEG
|
||||
#undef HAND_STENCIL_LEG_INT
|
||||
#undef HAND_STENCIL_LEG_EXT
|
||||
#undef HAND_RESULT
|
||||
#undef HAND_RESULT_INT
|
||||
#undef HAND_RESULT_EXT
|
||||
#undef HAND_DECLARATIONS
|
@ -43,11 +43,11 @@ NAMESPACE_BEGIN(Grid);
|
||||
accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
|
||||
{
|
||||
#ifdef GRID_SIMT
|
||||
static_assert(sizeof(StencilEntry)==sizeof(uint4),"Unexpected Stencil Entry Size");
|
||||
static_assert(sizeof(StencilEntry)==sizeof(uint4),"Unexpected Stencil Entry Size");
|
||||
uint4 * mem_pun = (uint4 *)mem; // force 128 bit loads
|
||||
uint4 * chip_pun = (uint4 *)&chip;
|
||||
* chip_pun = * mem_pun;
|
||||
#else
|
||||
#else
|
||||
chip = *mem;
|
||||
#endif
|
||||
return;
|
||||
@ -66,7 +66,7 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
|
||||
acceleratorSynchronise(); \
|
||||
Impl::multLink(Uchi, U[sU], chi, Dir, SE, st); \
|
||||
Recon(result, Uchi);
|
||||
|
||||
|
||||
#define GENERIC_STENCIL_LEG_INT(Dir,spProj,Recon) \
|
||||
SE = st.GetEntry(ptype, Dir, sF); \
|
||||
if (SE->_is_local) { \
|
||||
@ -81,7 +81,7 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
|
||||
Impl::multLink(Uchi, U[sU], chi, Dir, SE, st); \
|
||||
Recon(result, Uchi); \
|
||||
} \
|
||||
acceleratorSynchronise();
|
||||
acceleratorSynchronise();
|
||||
|
||||
#define GENERIC_STENCIL_LEG_EXT(Dir,spProj,Recon) \
|
||||
SE = st.GetEntry(ptype, Dir, sF); \
|
||||
@ -91,7 +91,7 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
|
||||
Recon(result, Uchi); \
|
||||
nmu++; \
|
||||
} \
|
||||
acceleratorSynchronise();
|
||||
acceleratorSynchronise();
|
||||
|
||||
#define GENERIC_DHOPDIR_LEG_BODY(Dir,spProj,Recon) \
|
||||
if (SE->_is_local ) { \
|
||||
@ -103,7 +103,7 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
|
||||
} \
|
||||
acceleratorSynchronise(); \
|
||||
Impl::multLink(Uchi, U[sU], chi, dir, SE, st); \
|
||||
Recon(result, Uchi);
|
||||
Recon(result, Uchi);
|
||||
|
||||
#define GENERIC_DHOPDIR_LEG(Dir,spProj,Recon) \
|
||||
if (gamma == Dir) { \
|
||||
@ -114,10 +114,10 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// All legs kernels ; comms then compute
|
||||
////////////////////////////////////////////////////////////////////
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::GenericDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out)
|
||||
template <class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::GenericDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
|
||||
typedef decltype(coalescedRead(in[0])) calcSpinor;
|
||||
@ -140,10 +140,10 @@ void WilsonKernels<Impl>::GenericDhopSiteDag(StencilView &st, DoubledGaugeFieldV
|
||||
coalescedWrite(out[sF],result,lane);
|
||||
};
|
||||
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::GenericDhopSite(StencilView &st, DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out)
|
||||
template <class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::GenericDhopSite(const StencilView &st, const DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
|
||||
typedef decltype(coalescedRead(in[0])) calcSpinor;
|
||||
@ -169,10 +169,10 @@ void WilsonKernels<Impl>::GenericDhopSite(StencilView &st, DoubledGaugeFieldView
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// Interior kernels
|
||||
////////////////////////////////////////////////////////////////////
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::GenericDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out)
|
||||
template <class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::GenericDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
|
||||
typedef decltype(coalescedRead(in[0])) calcSpinor;
|
||||
@ -197,10 +197,10 @@ void WilsonKernels<Impl>::GenericDhopSiteDagInt(StencilView &st, DoubledGaugeFi
|
||||
coalescedWrite(out[sF], result,lane);
|
||||
};
|
||||
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::GenericDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out)
|
||||
template <class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::GenericDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
|
||||
typedef decltype(coalescedRead(in[0])) calcSpinor;
|
||||
@ -227,10 +227,10 @@ void WilsonKernels<Impl>::GenericDhopSiteInt(StencilView &st, DoubledGaugeField
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// Exterior kernels
|
||||
////////////////////////////////////////////////////////////////////
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::GenericDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out)
|
||||
template <class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::GenericDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
|
||||
typedef decltype(coalescedRead(in[0])) calcSpinor;
|
||||
@ -251,17 +251,17 @@ void WilsonKernels<Impl>::GenericDhopSiteDagExt(StencilView &st, DoubledGaugeFi
|
||||
GENERIC_STENCIL_LEG_EXT(Ym,spProjYm,accumReconYm);
|
||||
GENERIC_STENCIL_LEG_EXT(Zm,spProjZm,accumReconZm);
|
||||
GENERIC_STENCIL_LEG_EXT(Tm,spProjTm,accumReconTm);
|
||||
if ( nmu ) {
|
||||
if ( nmu ) {
|
||||
auto out_t = coalescedRead(out[sF],lane);
|
||||
out_t = out_t + result;
|
||||
coalescedWrite(out[sF],out_t,lane);
|
||||
}
|
||||
};
|
||||
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out)
|
||||
template <class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::GenericDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, const FermionFieldView &out)
|
||||
{
|
||||
typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
|
||||
typedef decltype(coalescedRead(in[0])) calcSpinor;
|
||||
@ -282,7 +282,7 @@ void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st, DoubledGaugeField
|
||||
GENERIC_STENCIL_LEG_EXT(Yp,spProjYm,accumReconYm);
|
||||
GENERIC_STENCIL_LEG_EXT(Zp,spProjZm,accumReconZm);
|
||||
GENERIC_STENCIL_LEG_EXT(Tp,spProjTm,accumReconTm);
|
||||
if ( nmu ) {
|
||||
if ( nmu ) {
|
||||
auto out_t = coalescedRead(out[sF],lane);
|
||||
out_t = out_t + result;
|
||||
coalescedWrite(out[sF],out_t,lane);
|
||||
@ -290,9 +290,9 @@ void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st, DoubledGaugeField
|
||||
};
|
||||
|
||||
#define DhopDirMacro(Dir,spProj,spRecon) \
|
||||
template <class Impl> \
|
||||
void WilsonKernels<Impl>::DhopDir##Dir(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF, \
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out, int dir) \
|
||||
template <class Impl> accelerator_inline \
|
||||
void WilsonKernels<Impl>::DhopDir##Dir(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF, \
|
||||
int sU, const FermionFieldView &in, const FermionFieldView &out, int dir) \
|
||||
{ \
|
||||
typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; \
|
||||
typedef decltype(coalescedRead(in[0])) calcSpinor; \
|
||||
@ -302,12 +302,12 @@ void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st, DoubledGaugeField
|
||||
StencilEntry *SE; \
|
||||
int ptype; \
|
||||
const int Nsimd = SiteHalfSpinor::Nsimd(); \
|
||||
const int lane=acceleratorSIMTlane(Nsimd); \
|
||||
\
|
||||
const int lane=acceleratorSIMTlane(Nsimd); \
|
||||
\
|
||||
SE = st.GetEntry(ptype, dir, sF); \
|
||||
GENERIC_DHOPDIR_LEG_BODY(Dir,spProj,spRecon); \
|
||||
coalescedWrite(out[sF], result,lane); \
|
||||
}
|
||||
}
|
||||
|
||||
DhopDirMacro(Xp,spProjXp,spReconXp);
|
||||
DhopDirMacro(Yp,spProjYp,spReconYp);
|
||||
@ -318,9 +318,9 @@ DhopDirMacro(Ym,spProjYm,spReconYm);
|
||||
DhopDirMacro(Zm,spProjZm,spReconZm);
|
||||
DhopDirMacro(Tm,spProjTm,spReconTm);
|
||||
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::DhopDirK( StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, FermionFieldView &out, int dir, int gamma)
|
||||
template <class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::DhopDirK(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionFieldView &in, const FermionFieldView &out, int dir, int gamma)
|
||||
{
|
||||
typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
|
||||
typedef decltype(coalescedRead(in[0])) calcSpinor;
|
||||
@ -345,8 +345,8 @@ void WilsonKernels<Impl>::DhopDirK( StencilView &st, DoubledGaugeFieldView &U,Si
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::DhopDirAll( StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int Ls,
|
||||
int Nsite, const FermionField &in, std::vector<FermionField> &out)
|
||||
void WilsonKernels<Impl>::DhopDirAll(StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int Ls,
|
||||
int Nsite, const FermionField &in, std::vector<FermionField> &out)
|
||||
{
|
||||
autoView(U_v ,U,AcceleratorRead);
|
||||
autoView(in_v ,in,AcceleratorRead);
|
||||
@ -362,8 +362,8 @@ void WilsonKernels<Impl>::DhopDirAll( StencilImpl &st, DoubledGaugeField &U,Site
|
||||
autoView(out_Tp,out[7],AcceleratorWrite);
|
||||
auto CBp=st.CommBuf();
|
||||
accelerator_for(sss,Nsite*Ls,Simd::Nsimd(),{
|
||||
int sU=sss/Ls;
|
||||
int sF =sss;
|
||||
int sU=sss/Ls;
|
||||
int sF =sss;
|
||||
DhopDirXm(st_v,U_v,CBp,sF,sU,in_v,out_Xm,0);
|
||||
DhopDirYm(st_v,U_v,CBp,sF,sU,in_v,out_Ym,1);
|
||||
DhopDirZm(st_v,U_v,CBp,sF,sU,in_v,out_Zm,2);
|
||||
@ -378,7 +378,7 @@ void WilsonKernels<Impl>::DhopDirAll( StencilImpl &st, DoubledGaugeField &U,Site
|
||||
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int Ls,
|
||||
int Nsite, const FermionField &in, FermionField &out, int dirdisp, int gamma)
|
||||
int Nsite, const FermionField &in, FermionField &out, int dirdisp, int gamma)
|
||||
{
|
||||
assert(dirdisp<=7);
|
||||
assert(dirdisp>=0);
|
||||
@ -387,7 +387,7 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
|
||||
autoView(in_v ,in ,AcceleratorRead);
|
||||
autoView(out_v,out,AcceleratorWrite);
|
||||
autoView(st_v ,st ,AcceleratorRead);
|
||||
auto CBp=st.CommBuf();
|
||||
auto CBp=st.CommBuf();
|
||||
#define LoopBody(Dir) \
|
||||
case Dir : \
|
||||
accelerator_for(ss,Nsite,Simd::Nsimd(),{ \
|
||||
@ -414,7 +414,7 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
|
||||
break;
|
||||
}
|
||||
#undef LoopBody
|
||||
}
|
||||
}
|
||||
|
||||
#define KERNEL_CALLNB(A) \
|
||||
const uint64_t NN = Nsite*Ls; \
|
||||
@ -424,7 +424,21 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
|
||||
WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,in_v,out_v); \
|
||||
});
|
||||
|
||||
#define KERNEL_CALL(A) KERNEL_CALLNB(A); accelerator_barrier();
|
||||
#define KERNEL_CALL_TMP(A) \
|
||||
const uint64_t NN = Nsite*Ls; \
|
||||
auto U_p = & U_v[0]; \
|
||||
auto in_p = & in_v[0]; \
|
||||
auto out_p = & out_v[0]; \
|
||||
auto st_p = st_v._entries_p; \
|
||||
auto st_perm = st_v._permute_type; \
|
||||
accelerator_forNB( ss, NN, Simd::Nsimd(), { \
|
||||
int sF = ss; \
|
||||
int sU = ss/Ls; \
|
||||
WilsonKernels<Impl>::A(st_perm,st_p,U_p,buf,sF,sU,in_p,out_p); \
|
||||
}); \
|
||||
accelerator_barrier();
|
||||
|
||||
#define KERNEL_CALL(A) KERNEL_CALLNB(A); accelerator_barrier();
|
||||
|
||||
#define ASM_CALL(A) \
|
||||
thread_for( ss, Nsite, { \
|
||||
@ -436,17 +450,18 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int Ls, int Nsite, const FermionField &in, FermionField &out,
|
||||
int interior,int exterior)
|
||||
int interior,int exterior)
|
||||
{
|
||||
autoView(U_v , U,AcceleratorRead);
|
||||
autoView(in_v , in,AcceleratorRead);
|
||||
autoView(out_v,out,AcceleratorWrite);
|
||||
autoView(st_v , st,AcceleratorRead);
|
||||
|
||||
if( interior && exterior ) {
|
||||
if( interior && exterior ) {
|
||||
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSite); return;}
|
||||
#ifndef GRID_CUDA
|
||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite); return;}
|
||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL_TMP(HandDhopSiteSycl); return; }
|
||||
// if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite); return;}
|
||||
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSite); return;}
|
||||
#endif
|
||||
} else if( interior ) {
|
||||
@ -455,7 +470,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
|
||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALLNB(HandDhopSiteInt); return;}
|
||||
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteInt); return;}
|
||||
#endif
|
||||
} else if( exterior ) {
|
||||
} else if( exterior ) {
|
||||
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteExt); return;}
|
||||
#ifndef GRID_CUDA
|
||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteExt); return;}
|
||||
@ -467,14 +482,14 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::DhopDagKernel(int Opt,StencilImpl &st, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int Ls, int Nsite, const FermionField &in, FermionField &out,
|
||||
int interior,int exterior)
|
||||
int interior,int exterior)
|
||||
{
|
||||
autoView(U_v ,U,AcceleratorRead);
|
||||
autoView(in_v ,in,AcceleratorRead);
|
||||
autoView(out_v,out,AcceleratorWrite);
|
||||
autoView(st_v ,st,AcceleratorRead);
|
||||
|
||||
if( interior && exterior ) {
|
||||
if( interior && exterior ) {
|
||||
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteDag); return;}
|
||||
#ifndef GRID_CUDA
|
||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDag); return;}
|
||||
@ -486,7 +501,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
|
||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDagInt); return;}
|
||||
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagInt); return;}
|
||||
#endif
|
||||
} else if( exterior ) {
|
||||
} else if( exterior ) {
|
||||
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteDagExt); return;}
|
||||
#ifndef GRID_CUDA
|
||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDagExt); return;}
|
||||
@ -501,4 +516,3 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
|
||||
#undef ASM_CALL
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -4,11 +4,12 @@ Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
Copyright (C) 2015, 2020
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -31,17 +32,21 @@ directory
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementationSycl.h>
|
||||
|
||||
#ifndef AVX512
|
||||
#ifndef QPX
|
||||
#ifndef A64FX
|
||||
#ifndef A64FXFIXEDSIZE
|
||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
#include "impl.h"
|
||||
template class WilsonKernels<IMPLEMENTATION>;
|
||||
template class WilsonKernels<IMPLEMENTATION>;
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -37,6 +37,7 @@ directory
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmAvx512.h>
|
||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmA64FX.h>
|
||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmQPX.h>
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
38
Grid/qcd/action/gauge/Gauge.cc
Normal file
38
Grid/qcd/action/gauge/Gauge.cc
Normal file
@ -0,0 +1,38 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/gauge/Gauge.cc
|
||||
|
||||
Copyright (C) 2020
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
std::vector<int> ConjugateGaugeImplBase::_conjDirs;
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -154,6 +154,10 @@ public:
|
||||
return Hsum.real();
|
||||
}
|
||||
|
||||
static inline void Project(Field &U) {
|
||||
ProjectSUn(U);
|
||||
}
|
||||
|
||||
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||
SU<Nc>::HotConfiguration(pRNG, U);
|
||||
}
|
||||
|
@ -59,14 +59,14 @@ public:
|
||||
}
|
||||
static inline GaugeLinkField
|
||||
CovShiftIdentityBackward(const GaugeLinkField &Link, int mu) {
|
||||
return Cshift(closure(adj(Link)), mu, -1);
|
||||
return PeriodicBC::CovShiftIdentityBackward(Link, mu);
|
||||
}
|
||||
static inline GaugeLinkField
|
||||
CovShiftIdentityForward(const GaugeLinkField &Link, int mu) {
|
||||
return Link;
|
||||
return PeriodicBC::CovShiftIdentityForward(Link,mu);
|
||||
}
|
||||
static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu) {
|
||||
return Cshift(Link, mu, 1);
|
||||
return PeriodicBC::ShiftStaple(Link,mu);
|
||||
}
|
||||
|
||||
static inline bool isPeriodicGaugeField(void) { return true; }
|
||||
@ -74,7 +74,13 @@ public:
|
||||
|
||||
// Composition with smeared link, bc's etc.. probably need multiple inheritance
|
||||
// Variable precision "S" and variable Nc
|
||||
template <class GimplTypes> class ConjugateGaugeImpl : public GimplTypes {
|
||||
class ConjugateGaugeImplBase {
|
||||
protected:
|
||||
static std::vector<int> _conjDirs;
|
||||
};
|
||||
|
||||
template <class GimplTypes> class ConjugateGaugeImpl : public GimplTypes, ConjugateGaugeImplBase {
|
||||
private:
|
||||
public:
|
||||
INHERIT_GIMPL_TYPES(GimplTypes);
|
||||
|
||||
@ -84,47 +90,56 @@ public:
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
template <class covariant>
|
||||
static Lattice<covariant> CovShiftForward(const GaugeLinkField &Link, int mu,
|
||||
const Lattice<covariant> &field) {
|
||||
return ConjugateBC::CovShiftForward(Link, mu, field);
|
||||
const Lattice<covariant> &field)
|
||||
{
|
||||
assert(_conjDirs.size() == Nd);
|
||||
if(_conjDirs[mu])
|
||||
return ConjugateBC::CovShiftForward(Link, mu, field);
|
||||
else
|
||||
return PeriodicBC::CovShiftForward(Link, mu, field);
|
||||
}
|
||||
|
||||
template <class covariant>
|
||||
static Lattice<covariant> CovShiftBackward(const GaugeLinkField &Link, int mu,
|
||||
const Lattice<covariant> &field) {
|
||||
return ConjugateBC::CovShiftBackward(Link, mu, field);
|
||||
const Lattice<covariant> &field)
|
||||
{
|
||||
assert(_conjDirs.size() == Nd);
|
||||
if(_conjDirs[mu])
|
||||
return ConjugateBC::CovShiftBackward(Link, mu, field);
|
||||
else
|
||||
return PeriodicBC::CovShiftBackward(Link, mu, field);
|
||||
}
|
||||
|
||||
static inline GaugeLinkField
|
||||
CovShiftIdentityBackward(const GaugeLinkField &Link, int mu) {
|
||||
GridBase *grid = Link.Grid();
|
||||
int Lmu = grid->GlobalDimensions()[mu] - 1;
|
||||
|
||||
Lattice<iScalar<vInteger>> coor(grid);
|
||||
LatticeCoordinate(coor, mu);
|
||||
|
||||
GaugeLinkField tmp(grid);
|
||||
tmp = adj(Link);
|
||||
tmp = where(coor == Lmu, conjugate(tmp), tmp);
|
||||
return Cshift(tmp, mu, -1); // moves towards positive mu
|
||||
CovShiftIdentityBackward(const GaugeLinkField &Link, int mu)
|
||||
{
|
||||
assert(_conjDirs.size() == Nd);
|
||||
if(_conjDirs[mu])
|
||||
return ConjugateBC::CovShiftIdentityBackward(Link, mu);
|
||||
else
|
||||
return PeriodicBC::CovShiftIdentityBackward(Link, mu);
|
||||
}
|
||||
static inline GaugeLinkField
|
||||
CovShiftIdentityForward(const GaugeLinkField &Link, int mu) {
|
||||
return Link;
|
||||
CovShiftIdentityForward(const GaugeLinkField &Link, int mu)
|
||||
{
|
||||
assert(_conjDirs.size() == Nd);
|
||||
if(_conjDirs[mu])
|
||||
return ConjugateBC::CovShiftIdentityForward(Link,mu);
|
||||
else
|
||||
return PeriodicBC::CovShiftIdentityForward(Link,mu);
|
||||
}
|
||||
|
||||
static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu) {
|
||||
GridBase *grid = Link.Grid();
|
||||
int Lmu = grid->GlobalDimensions()[mu] - 1;
|
||||
|
||||
Lattice<iScalar<vInteger>> coor(grid);
|
||||
LatticeCoordinate(coor, mu);
|
||||
|
||||
GaugeLinkField tmp(grid);
|
||||
tmp = Cshift(Link, mu, 1);
|
||||
tmp = where(coor == Lmu, conjugate(tmp), tmp);
|
||||
return tmp;
|
||||
static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu)
|
||||
{
|
||||
assert(_conjDirs.size() == Nd);
|
||||
if(_conjDirs[mu])
|
||||
return ConjugateBC::ShiftStaple(Link,mu);
|
||||
else
|
||||
return PeriodicBC::ShiftStaple(Link,mu);
|
||||
}
|
||||
|
||||
static inline void setDirections(std::vector<int> &conjDirs) { _conjDirs=conjDirs; }
|
||||
static inline std::vector<int> getDirections(void) { return _conjDirs; }
|
||||
static inline bool isPeriodicGaugeField(void) { return false; }
|
||||
};
|
||||
|
||||
|
@ -54,6 +54,10 @@ public:
|
||||
static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||
U = 1.0;
|
||||
}
|
||||
|
||||
static inline void Project(Field &U) {
|
||||
return;
|
||||
}
|
||||
|
||||
static void MomentumSpacePropagator(Field &out, RealD m)
|
||||
{
|
||||
@ -234,6 +238,10 @@ public:
|
||||
#endif //USE_FFT_ACCELERATION
|
||||
}
|
||||
|
||||
static inline void Project(Field &U) {
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||
Group::GaussianFundamentalLieAlgebraMatrix(pRNG, U);
|
||||
}
|
||||
|
@ -140,17 +140,7 @@ private:
|
||||
|
||||
// Can move this outside?
|
||||
typedef IntegratorType<SmearingPolicy> TheIntegrator;
|
||||
// Metric
|
||||
//TrivialMetric<typename Implementation::Field> Mtr;
|
||||
ConjugateGradient<LatticeGaugeField> CG(1.0e-8,10000);
|
||||
LaplacianParams LapPar(0.0001, 1.0, 10000, 1e-8, 12, 64);
|
||||
// RealD Kappa = 1.2;
|
||||
RealD Kappa = Parameters.Kappa;
|
||||
std::cout << GridLogMessage << "Kappa = " << Kappa << std::endl;
|
||||
|
||||
// Better to pass the generalised momenta to the integrator
|
||||
LaplacianAdjointField<PeriodicGimplR> Laplacian(UGrid, CG, LapPar, Kappa);
|
||||
TheIntegrator MDynamics(UGrid, Parameters.MD, TheAction, Smearing, Laplacian);
|
||||
TheIntegrator MDynamics(UGrid, Parameters.MD, TheAction, Smearing);
|
||||
|
||||
if (Parameters.StartingType == "HotStart") {
|
||||
// Hot start
|
||||
@ -169,6 +159,13 @@ private:
|
||||
Resources.GetCheckPointer()->CheckpointRestore(Parameters.StartTrajectory, U,
|
||||
Resources.GetSerialRNG(),
|
||||
Resources.GetParallelRNG());
|
||||
} else {
|
||||
// others
|
||||
std::cout << GridLogError << "Unrecognized StartingType\n";
|
||||
std::cout
|
||||
<< GridLogError
|
||||
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
|
||||
exit(1);
|
||||
}
|
||||
|
||||
Smearing.set_Field(U);
|
||||
|
@ -53,7 +53,6 @@ struct HMCparameters: Serializable {
|
||||
bool, MetropolisTest,
|
||||
Integer, NoMetropolisUntil,
|
||||
std::string, StartingType,
|
||||
RealD, Kappa,
|
||||
IntegratorParameters, MD)
|
||||
|
||||
HMCparameters() {
|
||||
@ -96,7 +95,7 @@ private:
|
||||
|
||||
typedef typename IntegratorType::Field Field;
|
||||
typedef std::vector< HmcObservable<Field> * > ObsListType;
|
||||
|
||||
|
||||
//pass these from the resource manager
|
||||
GridSerialRNG &sRNG;
|
||||
GridParallelRNG &pRNG;
|
||||
|
@ -74,7 +74,7 @@ public:
|
||||
conf_file = os.str();
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~BaseHmcCheckpointer(){};
|
||||
void check_filename(const std::string &filename){
|
||||
std::ifstream f(filename.c_str());
|
||||
if(!f.good()){
|
||||
@ -82,7 +82,6 @@ public:
|
||||
abort();
|
||||
};
|
||||
}
|
||||
|
||||
virtual void initialize(const CheckpointerParameters &Params) = 0;
|
||||
|
||||
virtual void CheckpointRestore(int traj, typename Impl::Field &U,
|
||||
|
@ -45,6 +45,7 @@ private:
|
||||
|
||||
public:
|
||||
INHERIT_GIMPL_TYPES(Implementation);
|
||||
typedef GaugeStatistics<Implementation> GaugeStats;
|
||||
|
||||
ILDGHmcCheckpointer(const CheckpointerParameters &Params_) { initialize(Params_); }
|
||||
|
||||
@ -78,7 +79,7 @@ public:
|
||||
BinaryIO::writeRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
|
||||
IldgWriter _IldgWriter(grid->IsBoss());
|
||||
_IldgWriter.open(config);
|
||||
_IldgWriter.writeConfiguration(U, traj, config, config);
|
||||
_IldgWriter.writeConfiguration<GaugeStats>(U, traj, config, config);
|
||||
_IldgWriter.close();
|
||||
|
||||
std::cout << GridLogMessage << "Written ILDG Configuration on " << config
|
||||
@ -105,7 +106,7 @@ public:
|
||||
FieldMetaData header;
|
||||
IldgReader _IldgReader;
|
||||
_IldgReader.open(config);
|
||||
_IldgReader.readConfiguration(U,header); // format from the header
|
||||
_IldgReader.readConfiguration<GaugeStats>(U,header); // format from the header
|
||||
_IldgReader.close();
|
||||
|
||||
std::cout << GridLogMessage << "Read ILDG Configuration from " << config
|
||||
|
@ -43,7 +43,8 @@ private:
|
||||
|
||||
public:
|
||||
INHERIT_GIMPL_TYPES(Gimpl); // only for gauge configurations
|
||||
|
||||
typedef GaugeStatistics<Gimpl> GaugeStats;
|
||||
|
||||
NerscHmcCheckpointer(const CheckpointerParameters &Params_) { initialize(Params_); }
|
||||
|
||||
void initialize(const CheckpointerParameters &Params_) {
|
||||
@ -60,7 +61,7 @@ public:
|
||||
int precision32 = 1;
|
||||
int tworow = 0;
|
||||
NerscIO::writeRNGState(sRNG, pRNG, rng);
|
||||
NerscIO::writeConfiguration(U, config, tworow, precision32);
|
||||
NerscIO::writeConfiguration<GaugeStats>(U, config, tworow, precision32);
|
||||
}
|
||||
};
|
||||
|
||||
@ -74,7 +75,7 @@ public:
|
||||
|
||||
FieldMetaData header;
|
||||
NerscIO::readRNGState(sRNG, pRNG, header, rng);
|
||||
NerscIO::readConfiguration(U, header, config);
|
||||
NerscIO::readConfiguration<GaugeStats>(U, header, config);
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -73,8 +73,7 @@ protected:
|
||||
double t_U; // Track time passing on each level and for U and for P
|
||||
std::vector<double> t_P;
|
||||
|
||||
// MomentaField P;
|
||||
GeneralisedMomenta<FieldImplementation > P;
|
||||
MomentaField P;
|
||||
SmearingPolicy& Smearer;
|
||||
RepresentationPolicy Representations;
|
||||
IntegratorParameters Params;
|
||||
@ -84,7 +83,7 @@ protected:
|
||||
void update_P(Field& U, int level, double ep)
|
||||
{
|
||||
t_P[level] += ep;
|
||||
update_P(P.Mom, U, level, ep);
|
||||
update_P(P, U, level, ep);
|
||||
|
||||
std::cout << GridLogIntegrator << "[" << level << "] P " << " dt " << ep << " : t_P " << t_P[level] << std::endl;
|
||||
}
|
||||
@ -112,21 +111,6 @@ protected:
|
||||
// input U actually not used in the fundamental case
|
||||
// Fundamental updates, include smearing
|
||||
|
||||
// Generalised momenta
|
||||
// Derivative of the kinetic term must be computed before
|
||||
// Mom is the momenta and gets updated by the
|
||||
// actions derivatives
|
||||
MomentaField MomDer(P.Mom.Grid());
|
||||
P.M.ImportGauge(U);
|
||||
P.DerivativeU(P.Mom, MomDer);
|
||||
Mom -= MomDer * ep;
|
||||
|
||||
// Auxiliary fields
|
||||
P.update_auxiliary_momenta(ep*0.5);
|
||||
P.AuxiliaryFieldsDerivative(MomDer);
|
||||
Mom -= MomDer * ep;
|
||||
P.update_auxiliary_momenta(ep*0.5);
|
||||
|
||||
for (int a = 0; a < as[level].actions.size(); ++a) {
|
||||
double start_full = usecond();
|
||||
Field force(U.Grid());
|
||||
@ -153,83 +137,9 @@ protected:
|
||||
as[level].apply(update_P_hireps, Representations, Mom, U, ep);
|
||||
}
|
||||
|
||||
void implicit_update_P(Field& U, int level, double ep, bool intermediate = false) {
|
||||
t_P[level] += ep;
|
||||
|
||||
std::cout << GridLogIntegrator << "[" << level << "] P "
|
||||
<< " dt " << ep << " : t_P " << t_P[level] << std::endl;
|
||||
// Fundamental updates, include smearing
|
||||
MomentaField Msum(P.Mom.Grid());
|
||||
Msum = Zero();
|
||||
for (int a = 0; a < as[level].actions.size(); ++a) {
|
||||
// Compute the force terms for the lagrangian part
|
||||
// We need to compute the derivative of the actions
|
||||
// only once
|
||||
Field force(U.Grid());
|
||||
conformable(U.Grid(), P.Mom.Grid());
|
||||
Field& Us = Smearer.get_U(as[level].actions.at(a)->is_smeared);
|
||||
as[level].actions.at(a)->deriv(Us, force); // deriv should NOT include Ta
|
||||
|
||||
std::cout << GridLogIntegrator << "Smearing (on/off): " << as[level].actions.at(a)->is_smeared << std::endl;
|
||||
if (as[level].actions.at(a)->is_smeared) Smearer.smeared_force(force);
|
||||
force = FieldImplementation::projectForce(force); // Ta for gauge fields
|
||||
Real force_abs = std::sqrt(norm2(force) / U.Grid()->gSites());
|
||||
std::cout << GridLogIntegrator << "|Force| site average: " << force_abs
|
||||
<< std::endl;
|
||||
Msum += force;
|
||||
}
|
||||
|
||||
MomentaField NewMom = P.Mom;
|
||||
MomentaField OldMom = P.Mom;
|
||||
double threshold = 1e-8;
|
||||
P.M.ImportGauge(U);
|
||||
MomentaField MomDer(P.Mom.Grid());
|
||||
MomentaField MomDer1(P.Mom.Grid());
|
||||
MomentaField AuxDer(P.Mom.Grid());
|
||||
MomDer1 = Zero();
|
||||
MomentaField diff(P.Mom.Grid());
|
||||
double factor = 2.0;
|
||||
if (intermediate){
|
||||
P.DerivativeU(P.Mom, MomDer1);
|
||||
factor = 1.0;
|
||||
}
|
||||
|
||||
// Auxiliary fields
|
||||
P.update_auxiliary_momenta(ep*0.5);
|
||||
P.AuxiliaryFieldsDerivative(AuxDer);
|
||||
Msum += AuxDer;
|
||||
|
||||
|
||||
// Here run recursively
|
||||
int counter = 1;
|
||||
RealD RelativeError;
|
||||
do {
|
||||
std::cout << GridLogIntegrator << "UpdateP implicit step "<< counter << std::endl;
|
||||
|
||||
// Compute the derivative of the kinetic term
|
||||
// with respect to the gauge field
|
||||
P.DerivativeU(NewMom, MomDer);
|
||||
Real force_abs = std::sqrt(norm2(MomDer) / U.Grid()->gSites());
|
||||
std::cout << GridLogIntegrator << "|Force| laplacian site average: " << force_abs
|
||||
<< std::endl;
|
||||
|
||||
NewMom = P.Mom - ep* 0.5 * (2.0*Msum + factor*MomDer + MomDer1);// simplify
|
||||
diff = NewMom - OldMom;
|
||||
counter++;
|
||||
RelativeError = std::sqrt(norm2(diff))/std::sqrt(norm2(NewMom));
|
||||
std::cout << GridLogIntegrator << "UpdateP RelativeError: " << RelativeError << std::endl;
|
||||
OldMom = NewMom;
|
||||
} while (RelativeError > threshold);
|
||||
|
||||
P.Mom = NewMom;
|
||||
|
||||
// update the auxiliary fields momenta
|
||||
P.update_auxiliary_momenta(ep*0.5);
|
||||
}
|
||||
|
||||
void update_U(Field& U, double ep)
|
||||
{
|
||||
update_U(P.Mom, U, ep);
|
||||
update_U(P, U, ep);
|
||||
|
||||
t_U += ep;
|
||||
int fl = levels - 1;
|
||||
@ -248,64 +158,15 @@ protected:
|
||||
Representations.update(U); // void functions if fundamental representation
|
||||
}
|
||||
|
||||
void implicit_update_U(Field&U, double ep){
|
||||
t_U += ep;
|
||||
int fl = levels - 1;
|
||||
std::cout << GridLogIntegrator << " " << "[" << fl << "] U " << " dt " << ep << " : t_U " << t_U << std::endl;
|
||||
|
||||
MomentaField Mom1(P.Mom.Grid());
|
||||
MomentaField Mom2(P.Mom.Grid());
|
||||
RealD RelativeError;
|
||||
Field diff(U.Grid());
|
||||
Real threshold = 1e-8;
|
||||
int counter = 1;
|
||||
int MaxCounter = 100;
|
||||
|
||||
Field OldU = U;
|
||||
Field NewU = U;
|
||||
|
||||
P.M.ImportGauge(U);
|
||||
P.DerivativeP(Mom1); // first term in the derivative
|
||||
|
||||
P.update_auxiliary_fields(ep*0.5);
|
||||
|
||||
|
||||
MomentaField sum=Mom1;
|
||||
do {
|
||||
std::cout << GridLogIntegrator << "UpdateU implicit step "<< counter << std::endl;
|
||||
|
||||
P.DerivativeP(Mom2); // second term in the derivative, on the updated U
|
||||
sum = (Mom1 + Mom2);
|
||||
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
auto Umu = PeekIndex<LorentzIndex>(U, mu);
|
||||
auto Pmu = PeekIndex<LorentzIndex>(sum, mu);
|
||||
Umu = expMat(Pmu, ep * 0.5, 12) * Umu;
|
||||
PokeIndex<LorentzIndex>(NewU, ProjectOnGroup(Umu), mu);
|
||||
}
|
||||
|
||||
diff = NewU - OldU;
|
||||
RelativeError = std::sqrt(norm2(diff))/std::sqrt(norm2(NewU));
|
||||
std::cout << GridLogIntegrator << "UpdateU RelativeError: " << RelativeError << std::endl;
|
||||
|
||||
P.M.ImportGauge(NewU);
|
||||
OldU = NewU; // some redundancy to be eliminated
|
||||
counter++;
|
||||
} while (RelativeError > threshold && counter < MaxCounter);
|
||||
|
||||
U = NewU;
|
||||
P.update_auxiliary_fields(ep*0.5);
|
||||
}
|
||||
|
||||
virtual void step(Field& U, int level, int first, int last) = 0;
|
||||
|
||||
public:
|
||||
Integrator(GridBase* grid, IntegratorParameters Par,
|
||||
ActionSet<Field, RepresentationPolicy>& Aset,
|
||||
SmearingPolicy& Sm, Metric<MomentaField>& M)
|
||||
SmearingPolicy& Sm)
|
||||
: Params(Par),
|
||||
as(Aset),
|
||||
P(grid, M),
|
||||
P(grid),
|
||||
levels(Aset.size()),
|
||||
Smearer(Sm),
|
||||
Representations(grid)
|
||||
@ -342,9 +203,7 @@ public:
|
||||
|
||||
void reverse_momenta()
|
||||
{
|
||||
// P *= -1.0;
|
||||
P.Mom *= -1.0;
|
||||
P.AuxMom *= -1.0;
|
||||
P *= -1.0;
|
||||
}
|
||||
|
||||
// to be used by the actionlevel class to iterate
|
||||
@ -364,13 +223,10 @@ public:
|
||||
// Initialization of momenta and actions
|
||||
void refresh(Field& U, GridParallelRNG& pRNG)
|
||||
{
|
||||
assert(P.Mom.Grid() == U.Grid());
|
||||
assert(P.Grid() == U.Grid());
|
||||
std::cout << GridLogIntegrator << "Integrator refresh\n";
|
||||
|
||||
// FieldImplementation::generate_momenta(P.Mom, pRNG);
|
||||
P.M.ImportGauge(U);
|
||||
P.MomentaDistribution(pRNG);
|
||||
|
||||
FieldImplementation::generate_momenta(P, pRNG);
|
||||
|
||||
// Update the smeared fields, can be implemented as observer
|
||||
// necessary to keep the fields updated even after a reject
|
||||
@ -416,11 +272,9 @@ public:
|
||||
|
||||
std::cout << GridLogIntegrator << "Integrator action\n";
|
||||
|
||||
// RealD H = - FieldImplementation::FieldSquareNorm(P)/HMC_MOMENTUM_DENOMINATOR; // - trace (P*P)/denom
|
||||
P.M.ImportGauge(U);
|
||||
RealD H = - P.MomentaAction();
|
||||
RealD H = - FieldImplementation::FieldSquareNorm(P)/HMC_MOMENTUM_DENOMINATOR; // - trace (P*P)/denom
|
||||
|
||||
RealD Hterm;
|
||||
std::cout << GridLogMessage << "Momentum action H_p = " << H << "\n";
|
||||
|
||||
// Actions
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
@ -447,9 +301,9 @@ public:
|
||||
t_P[level] = 0;
|
||||
}
|
||||
|
||||
for (int step = 0; step < Params.MDsteps; ++step) { // MD step
|
||||
int first_step = (step == 0);
|
||||
int last_step = (step == Params.MDsteps - 1);
|
||||
for (int stp = 0; stp < Params.MDsteps; ++stp) { // MD step
|
||||
int first_step = (stp == 0);
|
||||
int last_step = (stp == Params.MDsteps - 1);
|
||||
this->step(U, 0, first_step, last_step);
|
||||
}
|
||||
|
||||
@ -459,6 +313,8 @@ public:
|
||||
std::cout << GridLogIntegrator << " times[" << level << "]= " << t_P[level] << " " << t_U << std::endl;
|
||||
}
|
||||
|
||||
FieldImplementation::Project(U);
|
||||
|
||||
// and that we indeed got to the end of the trajectory
|
||||
assert(fabs(t_U - Params.trajL) < 1.0e-6);
|
||||
|
||||
|
@ -101,8 +101,8 @@ public:
|
||||
|
||||
std::string integrator_name(){return "LeapFrog";}
|
||||
|
||||
LeapFrog(GridBase* grid, IntegratorParameters Par, ActionSet<Field, RepresentationPolicy>& Aset, SmearingPolicy& Sm, Metric<Field>& M)
|
||||
: Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>(grid, Par, Aset, Sm,M){};
|
||||
LeapFrog(GridBase* grid, IntegratorParameters Par, ActionSet<Field, RepresentationPolicy>& Aset, SmearingPolicy& Sm)
|
||||
: Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>(grid, Par, Aset, Sm){};
|
||||
|
||||
void step(Field& U, int level, int _first, int _last) {
|
||||
int fl = this->as.size() - 1;
|
||||
@ -144,8 +144,8 @@ private:
|
||||
public:
|
||||
INHERIT_FIELD_TYPES(FieldImplementation);
|
||||
|
||||
MinimumNorm2(GridBase* grid, IntegratorParameters Par, ActionSet<Field, RepresentationPolicy>& Aset, SmearingPolicy& Sm, Metric<Field>& M)
|
||||
: Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>(grid, Par, Aset, Sm,M){};
|
||||
MinimumNorm2(GridBase* grid, IntegratorParameters Par, ActionSet<Field, RepresentationPolicy>& Aset, SmearingPolicy& Sm)
|
||||
: Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>(grid, Par, Aset, Sm){};
|
||||
|
||||
std::string integrator_name(){return "MininumNorm2";}
|
||||
|
||||
@ -207,9 +207,9 @@ public:
|
||||
// Looks like dH scales as dt^4. tested wilson/wilson 2 level.
|
||||
ForceGradient(GridBase* grid, IntegratorParameters Par,
|
||||
ActionSet<Field, RepresentationPolicy>& Aset,
|
||||
SmearingPolicy& Sm, Metric<Field>& M)
|
||||
SmearingPolicy& Sm)
|
||||
: Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>(
|
||||
grid, Par, Aset, Sm,M){};
|
||||
grid, Par, Aset, Sm){};
|
||||
|
||||
std::string integrator_name(){return "ForceGradient";}
|
||||
|
||||
@ -271,139 +271,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
////////////////////////////////
|
||||
// Riemannian Manifold HMC
|
||||
// Girolami et al
|
||||
////////////////////////////////
|
||||
|
||||
|
||||
|
||||
// correct
|
||||
template <class FieldImplementation, class SmearingPolicy,
|
||||
class RepresentationPolicy =
|
||||
Representations<FundamentalRepresentation> >
|
||||
class ImplicitLeapFrog : public Integrator<FieldImplementation, SmearingPolicy,
|
||||
RepresentationPolicy> {
|
||||
public:
|
||||
typedef ImplicitLeapFrog<FieldImplementation, SmearingPolicy, RepresentationPolicy>
|
||||
Algorithm;
|
||||
INHERIT_FIELD_TYPES(FieldImplementation);
|
||||
|
||||
// Riemannian manifold metric operator
|
||||
// Hermitian operator Fisher
|
||||
|
||||
std::string integrator_name(){return "ImplicitLeapFrog";}
|
||||
|
||||
ImplicitLeapFrog(GridBase* grid, IntegratorParameters Par,
|
||||
ActionSet<Field, RepresentationPolicy>& Aset, SmearingPolicy& Sm, Metric<Field>& M)
|
||||
: Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>(
|
||||
grid, Par, Aset, Sm, M){};
|
||||
|
||||
void step(Field& U, int level, int _first, int _last) {
|
||||
int fl = this->as.size() - 1;
|
||||
// level : current level
|
||||
// fl : final level
|
||||
// eps : current step size
|
||||
|
||||
// Get current level step size
|
||||
RealD eps = this->Params.trajL/this->Params.MDsteps;
|
||||
for (int l = 0; l <= level; ++l) eps /= this->as[l].multiplier;
|
||||
|
||||
int multiplier = this->as[level].multiplier;
|
||||
for (int e = 0; e < multiplier; ++e) {
|
||||
int first_step = _first && (e == 0);
|
||||
int last_step = _last && (e == multiplier - 1);
|
||||
|
||||
if (first_step) { // initial half step
|
||||
this->implicit_update_P(U, level, eps / 2.0);
|
||||
}
|
||||
|
||||
if (level == fl) { // lowest level
|
||||
this->implicit_update_U(U, eps);
|
||||
} else { // recursive function call
|
||||
this->step(U, level + 1, first_step, last_step);
|
||||
}
|
||||
|
||||
//int mm = last_step ? 1 : 2;
|
||||
if (last_step){
|
||||
this->update_P(U, level, eps / 2.0);
|
||||
} else {
|
||||
this->implicit_update_P(U, level, eps, true);// works intermediate step
|
||||
// this->update_P(U, level, eps); // looks not reversible
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// This is not completely tested
|
||||
template <class FieldImplementation, class SmearingPolicy,
|
||||
class RepresentationPolicy =
|
||||
Representations<FundamentalRepresentation> >
|
||||
class ImplicitMinimumNorm2 : public Integrator<FieldImplementation, SmearingPolicy,
|
||||
RepresentationPolicy> {
|
||||
private:
|
||||
const RealD lambda = 0.1931833275037836;
|
||||
|
||||
public:
|
||||
INHERIT_FIELD_TYPES(FieldImplementation);
|
||||
|
||||
ImplicitMinimumNorm2(GridBase* grid, IntegratorParameters Par,
|
||||
ActionSet<Field, RepresentationPolicy>& Aset, SmearingPolicy& Sm, Metric<Field>& M)
|
||||
: Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>(
|
||||
grid, Par, Aset, Sm, M){};
|
||||
|
||||
std::string integrator_name(){return "ImplicitMininumNorm2";}
|
||||
|
||||
void step(Field& U, int level, int _first, int _last) {
|
||||
// level : current level
|
||||
// fl : final level
|
||||
// eps : current step size
|
||||
|
||||
int fl = this->as.size() - 1;
|
||||
|
||||
RealD eps = this->Params.trajL/this->Params.MDsteps * 2.0;
|
||||
for (int l = 0; l <= level; ++l) eps /= 2.0 * this->as[l].multiplier;
|
||||
|
||||
// Nesting: 2xupdate_U of size eps/2
|
||||
// Next level is eps/2/multiplier
|
||||
|
||||
int multiplier = this->as[level].multiplier;
|
||||
for (int e = 0; e < multiplier; ++e) { // steps per step
|
||||
|
||||
int first_step = _first && (e == 0);
|
||||
int last_step = _last && (e == multiplier - 1);
|
||||
|
||||
if (first_step) { // initial half step
|
||||
this->implicit_update_P(U, level, lambda * eps);
|
||||
}
|
||||
|
||||
if (level == fl) { // lowest level
|
||||
this->implicit_update_U(U, 0.5 * eps);
|
||||
} else { // recursive function call
|
||||
this->step(U, level + 1, first_step, 0);
|
||||
}
|
||||
|
||||
this->implicit_update_P(U, level, (1.0 - 2.0 * lambda) * eps, true);
|
||||
|
||||
if (level == fl) { // lowest level
|
||||
this->implicit_update_U(U, 0.5 * eps);
|
||||
} else { // recursive function call
|
||||
this->step(U, level + 1, 0, last_step);
|
||||
}
|
||||
|
||||
//int mm = (last_step) ? 1 : 2;
|
||||
//this->update_P(U, level, lambda * eps * mm);
|
||||
|
||||
if (last_step) {
|
||||
this->update_P(U, level, eps * lambda);
|
||||
} else {
|
||||
this->implicit_update_P(U, level, lambda * eps*2.0, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
#endif // INTEGRATOR_INCLUDED
|
||||
|
@ -99,7 +99,7 @@ public:
|
||||
virtual Prod* getPtr() = 0;
|
||||
|
||||
// add a getReference?
|
||||
|
||||
virtual ~HMCModuleBase(){};
|
||||
virtual void print_parameters(){}; // default to nothing
|
||||
};
|
||||
|
||||
|
@ -128,7 +128,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProjTm (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
hspin(0)=fspin(0)-fspin(2);
|
||||
hspin(1)=fspin(1)-fspin(3);
|
||||
}
|
||||
@ -138,40 +137,50 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
* 0 0 -1 0
|
||||
* 0 0 0 -1
|
||||
*/
|
||||
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5p (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
hspin(0)=fspin(0);
|
||||
hspin(1)=fspin(1);
|
||||
}
|
||||
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5m (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
hspin(0)=fspin(2);
|
||||
hspin(1)=fspin(3);
|
||||
}
|
||||
|
||||
// template<class vtype> accelerator_inline void fspProj5p (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5p (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
rfspin(0)=fspin(0);
|
||||
rfspin(1)=fspin(1);
|
||||
rfspin(2)=Zero();
|
||||
rfspin(3)=Zero();
|
||||
}
|
||||
// template<class vtype> accelerator_inline void fspProj5m (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5m (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
rfspin(0)=Zero();
|
||||
rfspin(1)=Zero();
|
||||
rfspin(2)=fspin(2);
|
||||
rfspin(3)=fspin(3);
|
||||
}
|
||||
|
||||
template<class vtype,int N,IfCoarsened<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &rfspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
const int hN = N>>1;
|
||||
for(int s=0;s<hN;s++){
|
||||
rfspin(s)=fspin(s);
|
||||
rfspin(s+hN)=Zero();
|
||||
}
|
||||
}
|
||||
template<class vtype,int N,IfCoarsened<iVector<vtype,N> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &rfspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
const int hN = N>>1;
|
||||
for(int s=0;s<hN;s++){
|
||||
rfspin(s)=Zero();
|
||||
rfspin(s+hN)=fspin(s+hN);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Reconstruction routines to move back again to four spin
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -183,7 +192,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
*/
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconXp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)=hspin(0);
|
||||
fspin(1)=hspin(1);
|
||||
fspin(2)=timesMinusI(hspin(1));
|
||||
@ -191,7 +199,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconXm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)=hspin(0);
|
||||
fspin(1)=hspin(1);
|
||||
fspin(2)=timesI(hspin(1));
|
||||
@ -199,7 +206,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconXp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)+=hspin(0);
|
||||
fspin(1)+=hspin(1);
|
||||
fspin(2)-=timesI(hspin(1));
|
||||
@ -207,7 +213,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconXm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)+=hspin(0);
|
||||
fspin(1)+=hspin(1);
|
||||
fspin(2)+=timesI(hspin(1));
|
||||
@ -221,7 +226,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
|
||||
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconYp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)=hspin(0);
|
||||
fspin(1)=hspin(1);
|
||||
fspin(2)= hspin(1);
|
||||
@ -229,7 +233,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconYm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)=hspin(0);
|
||||
fspin(1)=hspin(1);
|
||||
fspin(2)=-hspin(1);
|
||||
@ -237,7 +240,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconYp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)+=hspin(0);
|
||||
fspin(1)+=hspin(1);
|
||||
fspin(2)+=hspin(1);
|
||||
@ -245,7 +247,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconYm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)+=hspin(0);
|
||||
fspin(1)+=hspin(1);
|
||||
fspin(2)-=hspin(1);
|
||||
@ -260,7 +261,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
|
||||
*/
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconZp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)=hspin(0);
|
||||
fspin(1)=hspin(1);
|
||||
fspin(2)=timesMinusI(hspin(0));
|
||||
@ -268,7 +268,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconZm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)=hspin(0);
|
||||
fspin(1)=hspin(1);
|
||||
fspin(2)= timesI(hspin(0));
|
||||
@ -276,7 +275,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconZp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)+=hspin(0);
|
||||
fspin(1)+=hspin(1);
|
||||
fspin(2)-=timesI(hspin(0));
|
||||
@ -284,7 +282,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconZm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)+=hspin(0);
|
||||
fspin(1)+=hspin(1);
|
||||
fspin(2)+=timesI(hspin(0));
|
||||
@ -298,7 +295,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
|
||||
*/
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconTp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)=hspin(0);
|
||||
fspin(1)=hspin(1);
|
||||
fspin(2)=hspin(0);
|
||||
@ -306,7 +302,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconTm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)=hspin(0);
|
||||
fspin(1)=hspin(1);
|
||||
fspin(2)=-hspin(0);
|
||||
@ -314,7 +309,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconTp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)+=hspin(0);
|
||||
fspin(1)+=hspin(1);
|
||||
fspin(2)+=hspin(0);
|
||||
@ -322,7 +316,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconTm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)+=hspin(0);
|
||||
fspin(1)+=hspin(1);
|
||||
fspin(2)-=hspin(0);
|
||||
@ -336,7 +329,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
|
||||
*/
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spRecon5p (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)=hspin(0)+hspin(0); // add is lower latency than mul
|
||||
fspin(1)=hspin(1)+hspin(1); // probably no measurable diffence though
|
||||
fspin(2)=Zero();
|
||||
@ -344,7 +336,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spRecon5m (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)=Zero();
|
||||
fspin(1)=Zero();
|
||||
fspin(2)=hspin(0)+hspin(0);
|
||||
@ -352,7 +343,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
|
||||
}
|
||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumRecon5p (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
|
||||
fspin(0)+=hspin(0)+hspin(0);
|
||||
fspin(1)+=hspin(1)+hspin(1);
|
||||
}
|
||||
@ -372,7 +362,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
|
||||
//////////
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjXp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spProjXp(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
@ -426,26 +415,21 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconXp (iM
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
|
||||
////////
|
||||
// Xm
|
||||
////////
|
||||
template<class rtype,class vtype> accelerator_inline void spProjXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spProjXm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spProjXm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spProjXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spProjXm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -455,19 +439,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjXm (iMatri
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void spReconXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spReconXm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spReconXm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spReconXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spReconXm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -476,45 +457,37 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconXm (iMatr
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void accumReconXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
accumReconXm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
accumReconXm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
accumReconXm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
|
||||
////////
|
||||
// Yp
|
||||
////////
|
||||
template<class rtype,class vtype> accelerator_inline void spProjYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spProjYp(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spProjYp(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spProjYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spProjYp(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -524,19 +497,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjYp (iMatri
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void spReconYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spReconYp(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spReconYp(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spReconYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spReconYp(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -545,66 +515,55 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconYp (iMatr
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void accumReconYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
accumReconYp(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
accumReconYp(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
accumReconYp(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
////////
|
||||
// Ym
|
||||
////////
|
||||
template<class rtype,class vtype> accelerator_inline void spProjYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spProjYm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spProjYm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spProjYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spProjYm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void spReconYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spReconYm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,const iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spReconYm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spReconYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spReconYm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -613,19 +572,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconYm (iMatr
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void accumReconYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
accumReconYm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
accumReconYm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
accumReconYm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -638,66 +594,57 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconYm (iM
|
||||
////////
|
||||
template<class rtype,class vtype> accelerator_inline void spProjZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spProjZp(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spProjZp(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spProjZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spProjZp(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void spReconZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spReconZp(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spReconZp(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spReconZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spReconZp(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}}
|
||||
}
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void accumReconZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
accumReconZp(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
accumReconZp(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
accumReconZp(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
@ -706,62 +653,53 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconZp (iM
|
||||
////////
|
||||
template<class rtype,class vtype> accelerator_inline void spProjZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spProjZm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spProjZm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spProjZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spProjZm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void spReconZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spReconZm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spReconZm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spReconZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spReconZm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}}
|
||||
}
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void accumReconZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
accumReconZm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
accumReconZm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
accumReconZm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -774,41 +712,35 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconZm (iM
|
||||
////////
|
||||
template<class rtype,class vtype> accelerator_inline void spProjTp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spProjTp(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjTp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spProjTp(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spProjTp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spProjTp(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void spReconTp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spReconTp(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconTp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spReconTp(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spReconTp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spReconTp(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -817,44 +749,37 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconTp (iMatr
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void accumReconTp (iScalar<rtype> &hspin, iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
accumReconTp(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconTp (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
accumReconTp(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconTp (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
accumReconTp(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
////////
|
||||
// Tm
|
||||
////////
|
||||
template<class rtype,class vtype> accelerator_inline void spProjTm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spProjTm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjTm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spProjTm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spProjTm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spProjTm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -864,19 +789,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjTm (iMatri
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void spReconTm (iScalar<rtype> &hspin, const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spReconTm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconTm (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spReconTm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spReconTm (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spReconTm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -885,44 +807,37 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconTm (iMatr
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void accumReconTm (iScalar<rtype> &hspin, const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
accumReconTm(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconTm (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
accumReconTm(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconTm (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
accumReconTm(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
////////
|
||||
// 5p
|
||||
////////
|
||||
template<class rtype,class vtype> accelerator_inline void spProj5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
template<class rtype,class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spProj5p(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spProj5p(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spProj5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spProj5p(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -931,19 +846,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spProj5p (iMatri
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void spRecon5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spRecon5p(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spRecon5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spRecon5p(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spRecon5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spRecon5p(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -952,19 +864,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spRecon5p (iMatr
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void accumRecon5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
accumRecon5p(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumRecon5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
accumRecon5p(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void accumRecon5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
accumRecon5p(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -972,24 +881,18 @@ template<class rtype,class vtype,int N> accelerator_inline void accumRecon5p (iM
|
||||
}
|
||||
|
||||
// four spinor projectors for chiral proj
|
||||
// template<class vtype> accelerator_inline void fspProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
|
||||
template<class vtype> accelerator_inline void spProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
|
||||
template<class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spProj5p(hspin._internal,fspin._internal);
|
||||
}
|
||||
// template<class vtype,int N> accelerator_inline void fspProj5p (iVector<vtype,N> &hspin,iVector<vtype,N> &fspin)
|
||||
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spProj5p(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
// template<class vtype,int N> accelerator_inline void fspProj5p (iMatrix<vtype,N> &hspin,iMatrix<vtype,N> &fspin)
|
||||
template<class vtype,int N> accelerator_inline void spProj5p (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
template<class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spProj5p(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -1001,17 +904,17 @@ template<class vtype,int N> accelerator_inline void spProj5p (iMatrix<vtype,N> &
|
||||
// 5m
|
||||
////////
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void spProj5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
template<class rtype,class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
spProj5m(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<rtype,N> > = 0> accelerator_inline void spProj5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<rtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
for(int i=0;i<N;i++) {
|
||||
spProj5m(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spProj5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
@ -1021,40 +924,34 @@ template<class rtype,class vtype,int N> accelerator_inline void spProj5m (iMatri
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void spRecon5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spRecon5m(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spRecon5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spRecon5m(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void spRecon5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spRecon5m(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
}}
|
||||
}}
|
||||
}
|
||||
|
||||
template<class rtype,class vtype> accelerator_inline void accumRecon5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
accumRecon5m(hspin._internal,fspin._internal);
|
||||
}
|
||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumRecon5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
accumRecon5m(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
template<class rtype,class vtype,int N> accelerator_inline void accumRecon5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
accumRecon5m(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
@ -1063,24 +960,18 @@ template<class rtype,class vtype,int N> accelerator_inline void accumRecon5m (iM
|
||||
|
||||
|
||||
// four spinor projectors for chiral proj
|
||||
// template<class vtype> accelerator_inline void fspProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
|
||||
template<class vtype> accelerator_inline void spProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
|
||||
template<class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
|
||||
spProj5m(hspin._internal,fspin._internal);
|
||||
}
|
||||
// template<class vtype,int N> accelerator_inline void fspProj5m (iVector<vtype,N> &hspin,iVector<vtype,N> &fspin)
|
||||
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++) {
|
||||
spProj5m(hspin._internal[i],fspin._internal[i]);
|
||||
}
|
||||
}
|
||||
// template<class vtype,int N> accelerator_inline void fspProj5m (iMatrix<vtype,N> &hspin,iMatrix<vtype,N> &fspin)
|
||||
template<class vtype,int N> accelerator_inline void spProj5m (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
template<class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
|
||||
{
|
||||
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
spProj5m(hspin._internal[i][j],fspin._internal[i][j]);
|
||||
|
@ -51,7 +51,7 @@ public:
|
||||
|
||||
private:
|
||||
template <class mobj, class robj>
|
||||
static void baryon_site(const mobj &D1,
|
||||
static void BaryonSite(const mobj &D1,
|
||||
const mobj &D2,
|
||||
const mobj &D3,
|
||||
const Gamma GammaA_left,
|
||||
@ -61,8 +61,18 @@ public:
|
||||
const int parity,
|
||||
const bool * wick_contractions,
|
||||
robj &result);
|
||||
template <class mobj, class robj>
|
||||
static void BaryonSiteMatrix(const mobj &D1,
|
||||
const mobj &D2,
|
||||
const mobj &D3,
|
||||
const Gamma GammaA_left,
|
||||
const Gamma GammaB_left,
|
||||
const Gamma GammaA_right,
|
||||
const Gamma GammaB_right,
|
||||
const bool * wick_contractions,
|
||||
robj &result);
|
||||
public:
|
||||
static void Wick_Contractions(std::string qi,
|
||||
static void WickContractions(std::string qi,
|
||||
std::string qf,
|
||||
bool* wick_contractions);
|
||||
static void ContractBaryons(const PropagatorField &q1_left,
|
||||
@ -75,8 +85,17 @@ public:
|
||||
const bool* wick_contractions,
|
||||
const int parity,
|
||||
ComplexField &baryon_corr);
|
||||
static void ContractBaryonsMatrix(const PropagatorField &q1_left,
|
||||
const PropagatorField &q2_left,
|
||||
const PropagatorField &q3_left,
|
||||
const Gamma GammaA_left,
|
||||
const Gamma GammaB_left,
|
||||
const Gamma GammaA_right,
|
||||
const Gamma GammaB_right,
|
||||
const bool* wick_contractions,
|
||||
SpinMatrixField &baryon_corr);
|
||||
template <class mobj, class robj>
|
||||
static void ContractBaryons_Sliced(const mobj &D1,
|
||||
static void ContractBaryonsSliced(const mobj &D1,
|
||||
const mobj &D2,
|
||||
const mobj &D3,
|
||||
const Gamma GammaA_left,
|
||||
@ -87,9 +106,20 @@ public:
|
||||
const int parity,
|
||||
const int nt,
|
||||
robj &result);
|
||||
template <class mobj, class robj>
|
||||
static void ContractBaryonsSlicedMatrix(const mobj &D1,
|
||||
const mobj &D2,
|
||||
const mobj &D3,
|
||||
const Gamma GammaA_left,
|
||||
const Gamma GammaB_left,
|
||||
const Gamma GammaA_right,
|
||||
const Gamma GammaB_right,
|
||||
const bool* wick_contractions,
|
||||
const int nt,
|
||||
robj &result);
|
||||
private:
|
||||
template <class mobj, class mobj2, class robj>
|
||||
static void Baryon_Gamma_3pt_Group1_Site(
|
||||
static void BaryonGamma3ptGroup1Site(
|
||||
const mobj &Dq1_ti,
|
||||
const mobj2 &Dq2_spec,
|
||||
const mobj2 &Dq3_spec,
|
||||
@ -101,7 +131,7 @@ public:
|
||||
robj &result);
|
||||
|
||||
template <class mobj, class mobj2, class robj>
|
||||
static void Baryon_Gamma_3pt_Group2_Site(
|
||||
static void BaryonGamma3ptGroup2Site(
|
||||
const mobj2 &Dq1_spec,
|
||||
const mobj &Dq2_ti,
|
||||
const mobj2 &Dq3_spec,
|
||||
@ -113,7 +143,7 @@ public:
|
||||
robj &result);
|
||||
|
||||
template <class mobj, class mobj2, class robj>
|
||||
static void Baryon_Gamma_3pt_Group3_Site(
|
||||
static void BaryonGamma3ptGroup3Site(
|
||||
const mobj2 &Dq1_spec,
|
||||
const mobj2 &Dq2_spec,
|
||||
const mobj &Dq3_ti,
|
||||
@ -125,7 +155,7 @@ public:
|
||||
robj &result);
|
||||
public:
|
||||
template <class mobj>
|
||||
static void Baryon_Gamma_3pt(
|
||||
static void BaryonGamma3pt(
|
||||
const PropagatorField &q_ti,
|
||||
const mobj &Dq_spec1,
|
||||
const mobj &Dq_spec2,
|
||||
@ -138,7 +168,7 @@ public:
|
||||
SpinMatrixField &stn_corr);
|
||||
private:
|
||||
template <class mobj, class mobj2, class robj>
|
||||
static void Sigma_to_Nucleon_Q1_Eye_site(const mobj &Dq_loop,
|
||||
static void SigmaToNucleonQ1EyeSite(const mobj &Dq_loop,
|
||||
const mobj2 &Du_spec,
|
||||
const mobj &Dd_tf,
|
||||
const mobj &Ds_ti,
|
||||
@ -147,7 +177,7 @@ public:
|
||||
const Gamma GammaB_nucl,
|
||||
robj &result);
|
||||
template <class mobj, class mobj2, class robj>
|
||||
static void Sigma_to_Nucleon_Q1_NonEye_site(const mobj &Du_ti,
|
||||
static void SigmaToNucleonQ1NonEyeSite(const mobj &Du_ti,
|
||||
const mobj &Du_tf,
|
||||
const mobj2 &Du_spec,
|
||||
const mobj &Dd_tf,
|
||||
@ -159,7 +189,7 @@ public:
|
||||
|
||||
|
||||
template <class mobj, class mobj2, class robj>
|
||||
static void Sigma_to_Nucleon_Q2_Eye_site(const mobj &Dq_loop,
|
||||
static void SigmaToNucleonQ2EyeSite(const mobj &Dq_loop,
|
||||
const mobj2 &Du_spec,
|
||||
const mobj &Dd_tf,
|
||||
const mobj &Ds_ti,
|
||||
@ -168,7 +198,7 @@ public:
|
||||
const Gamma GammaB_nucl,
|
||||
robj &result);
|
||||
template <class mobj, class mobj2, class robj>
|
||||
static void Sigma_to_Nucleon_Q2_NonEye_site(const mobj &Du_ti,
|
||||
static void SigmaToNucleonQ2NonEyeSite(const mobj &Du_ti,
|
||||
const mobj &Du_tf,
|
||||
const mobj2 &Du_spec,
|
||||
const mobj &Dd_tf,
|
||||
@ -179,7 +209,7 @@ public:
|
||||
robj &result);
|
||||
public:
|
||||
template <class mobj>
|
||||
static void Sigma_to_Nucleon_Eye(const PropagatorField &qq_loop,
|
||||
static void SigmaToNucleonEye(const PropagatorField &qq_loop,
|
||||
const mobj &Du_spec,
|
||||
const PropagatorField &qd_tf,
|
||||
const PropagatorField &qs_ti,
|
||||
@ -189,7 +219,7 @@ public:
|
||||
const std::string op,
|
||||
SpinMatrixField &stn_corr);
|
||||
template <class mobj>
|
||||
static void Sigma_to_Nucleon_NonEye(const PropagatorField &qq_ti,
|
||||
static void SigmaToNucleonNonEye(const PropagatorField &qq_ti,
|
||||
const PropagatorField &qq_tf,
|
||||
const mobj &Du_spec,
|
||||
const PropagatorField &qd_tf,
|
||||
@ -217,7 +247,7 @@ const Real BaryonUtils<FImpl>::epsilon_sgn[6] = {1.,1.,1.,-1.,-1.,-1.};
|
||||
//This is the old version
|
||||
template <class FImpl>
|
||||
template <class mobj, class robj>
|
||||
void BaryonUtils<FImpl>::baryon_site(const mobj &D1,
|
||||
void BaryonUtils<FImpl>::BaryonSite(const mobj &D1,
|
||||
const mobj &D2,
|
||||
const mobj &D3,
|
||||
const Gamma GammaA_i,
|
||||
@ -329,12 +359,132 @@ void BaryonUtils<FImpl>::baryon_site(const mobj &D1,
|
||||
}}
|
||||
}
|
||||
|
||||
//New version without parity projection or trace
|
||||
template <class FImpl>
|
||||
template <class mobj, class robj>
|
||||
void BaryonUtils<FImpl>::BaryonSiteMatrix(const mobj &D1,
|
||||
const mobj &D2,
|
||||
const mobj &D3,
|
||||
const Gamma GammaA_i,
|
||||
const Gamma GammaB_i,
|
||||
const Gamma GammaA_f,
|
||||
const Gamma GammaB_f,
|
||||
const bool * wick_contraction,
|
||||
robj &result)
|
||||
{
|
||||
|
||||
auto D1_GAi = D1 * GammaA_i;
|
||||
auto GAf_D1_GAi = GammaA_f * D1_GAi;
|
||||
auto GBf_D1_GAi = GammaB_f * D1_GAi;
|
||||
|
||||
auto D2_GBi = D2 * GammaB_i;
|
||||
auto GBf_D2_GBi = GammaB_f * D2_GBi;
|
||||
auto GAf_D2_GBi = GammaA_f * D2_GBi;
|
||||
|
||||
auto GBf_D3 = GammaB_f * D3;
|
||||
auto GAf_D3 = GammaA_f * D3;
|
||||
|
||||
for (int ie_f=0; ie_f < 6 ; ie_f++){
|
||||
int a_f = epsilon[ie_f][0]; //a
|
||||
int b_f = epsilon[ie_f][1]; //b
|
||||
int c_f = epsilon[ie_f][2]; //c
|
||||
for (int ie_i=0; ie_i < 6 ; ie_i++){
|
||||
int a_i = epsilon[ie_i][0]; //a'
|
||||
int b_i = epsilon[ie_i][1]; //b'
|
||||
int c_i = epsilon[ie_i][2]; //c'
|
||||
|
||||
Real ee = epsilon_sgn[ie_f] * epsilon_sgn[ie_i];
|
||||
//This is the \delta_{456}^{123} part
|
||||
if (wick_contraction[0]){
|
||||
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||
auto GAf_D1_GAi_rr_cc = GAf_D1_GAi()(rho_f,rho_i)(c_f,c_i);
|
||||
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||
result()(rho_f,rho_i)() += ee * GAf_D1_GAi_rr_cc
|
||||
* D2_GBi ()(alpha_f,beta_i)(a_f,a_i)
|
||||
* GBf_D3 ()(alpha_f,beta_i)(b_f,b_i);
|
||||
}}
|
||||
}}
|
||||
}
|
||||
//This is the \delta_{456}^{231} part
|
||||
if (wick_contraction[1]){
|
||||
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||
auto D1_GAi_ar_ac = D1_GAi()(alpha_f,rho_i)(a_f,c_i);
|
||||
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||
auto GBf_D2_GBi_ab_ba = GBf_D2_GBi ()(alpha_f,beta_i)(b_f,a_i);
|
||||
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||
result()(rho_f,rho_i)() += ee * D1_GAi_ar_ac
|
||||
* GBf_D2_GBi_ab_ba
|
||||
* GAf_D3 ()(rho_f,beta_i)(c_f,b_i);
|
||||
}}
|
||||
}}
|
||||
}
|
||||
//This is the \delta_{456}^{312} part
|
||||
if (wick_contraction[2]){
|
||||
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||
auto GBf_D1_GAi_ar_bc = GBf_D1_GAi()(alpha_f,rho_i)(b_f,c_i);
|
||||
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||
auto D3_ab_ab = D3 ()(alpha_f,beta_i)(a_f,b_i);
|
||||
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||
result()(rho_f,rho_i)() += ee * GBf_D1_GAi_ar_bc
|
||||
* GAf_D2_GBi ()(rho_f,beta_i)(c_f,a_i)
|
||||
* D3_ab_ab;
|
||||
}}
|
||||
}}
|
||||
}
|
||||
//This is the \delta_{456}^{132} part
|
||||
if (wick_contraction[3]){
|
||||
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||
auto GAf_D1_GAi_rr_cc = GAf_D1_GAi()(rho_f,rho_i)(c_f,c_i);
|
||||
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||
result()(rho_f,rho_i)() -= ee * GAf_D1_GAi_rr_cc
|
||||
* GBf_D2_GBi ()(alpha_f,beta_i)(b_f,a_i)
|
||||
* D3 ()(alpha_f,beta_i)(a_f,b_i);
|
||||
}}
|
||||
}}
|
||||
}
|
||||
//This is the \delta_{456}^{321} part
|
||||
if (wick_contraction[4]){
|
||||
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||
auto GBf_D1_GAi_ar_bc = GBf_D1_GAi()(alpha_f,rho_i)(b_f,c_i);
|
||||
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||
auto D2_GBi_ab_aa = D2_GBi()(alpha_f,beta_i)(a_f,a_i);
|
||||
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||
result()(rho_f,rho_i)() -= ee * GBf_D1_GAi_ar_bc
|
||||
* D2_GBi_ab_aa
|
||||
* GAf_D3 ()(rho_f,beta_i)(c_f,b_i);
|
||||
}}
|
||||
}}
|
||||
}
|
||||
//This is the \delta_{456}^{213} part
|
||||
if (wick_contraction[5]){
|
||||
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||
auto D1_GAi_ar_ac = D1_GAi()(alpha_f,rho_i)(a_f,c_i);
|
||||
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||
auto GBf_D3_ab_bb = GBf_D3()(alpha_f,beta_i)(b_f,b_i);
|
||||
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||
result()(rho_f,rho_i)() -= ee * D1_GAi_ar_ac
|
||||
* GAf_D2_GBi ()(rho_f,beta_i)(c_f,a_i)
|
||||
* GBf_D3_ab_bb;
|
||||
}}
|
||||
}}
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
/* Computes which wick contractions should be performed for a *
|
||||
* baryon 2pt function given the initial and finals state quark *
|
||||
* flavours. *
|
||||
* The array wick_contractions must be of length 6 */
|
||||
template<class FImpl>
|
||||
void BaryonUtils<FImpl>::Wick_Contractions(std::string qi, std::string qf, bool* wick_contractions) {
|
||||
void BaryonUtils<FImpl>::WickContractions(std::string qi, std::string qf, bool* wick_contractions) {
|
||||
const int epsilon[6][3] = {{0,1,2},{1,2,0},{2,0,1},{0,2,1},{2,1,0},{1,0,2}};
|
||||
for (int ie=0; ie < 6 ; ie++) {
|
||||
wick_contractions[ie] = (qi.size() == 3 && qf.size() == 3
|
||||
@ -364,11 +514,6 @@ void BaryonUtils<FImpl>::ContractBaryons(const PropagatorField &q1_left,
|
||||
|
||||
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
|
||||
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
|
||||
|
||||
std::cout << "GammaA (left) " << (GammaA_left.g) << std::endl;
|
||||
std::cout << "GammaB (left) " << (GammaB_left.g) << std::endl;
|
||||
std::cout << "GammaA (right) " << (GammaA_right.g) << std::endl;
|
||||
std::cout << "GammaB (right) " << (GammaB_right.g) << std::endl;
|
||||
|
||||
assert(parity==1 || parity == -1 && "Parity must be +1 or -1");
|
||||
|
||||
@ -397,13 +542,62 @@ void BaryonUtils<FImpl>::ContractBaryons(const PropagatorField &q1_left,
|
||||
auto D2 = v2[ss];
|
||||
auto D3 = v3[ss];
|
||||
vobj result=Zero();
|
||||
baryon_site(D1,D2,D3,GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result);
|
||||
BaryonSite(D1,D2,D3,GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result);
|
||||
vbaryon_corr[ss] = result;
|
||||
} );//end loop over lattice sites
|
||||
|
||||
t += usecond();
|
||||
|
||||
std::cout << std::setw(10) << bytes/t*1.0e6/1024/1024/1024 << " GB/s " << std::endl;
|
||||
std::cout << GridLogDebug << std::setw(10) << bytes/t*1.0e6/1024/1024/1024 << " GB/s " << std::endl;
|
||||
}
|
||||
|
||||
template<class FImpl>
|
||||
void BaryonUtils<FImpl>::ContractBaryonsMatrix(const PropagatorField &q1_left,
|
||||
const PropagatorField &q2_left,
|
||||
const PropagatorField &q3_left,
|
||||
const Gamma GammaA_left,
|
||||
const Gamma GammaB_left,
|
||||
const Gamma GammaA_right,
|
||||
const Gamma GammaB_right,
|
||||
const bool* wick_contractions,
|
||||
SpinMatrixField &baryon_corr)
|
||||
{
|
||||
|
||||
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
|
||||
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
|
||||
|
||||
GridBase *grid = q1_left.Grid();
|
||||
|
||||
autoView(vbaryon_corr, baryon_corr,CpuWrite);
|
||||
autoView( v1 , q1_left, CpuRead);
|
||||
autoView( v2 , q2_left, CpuRead);
|
||||
autoView( v3 , q3_left, CpuRead);
|
||||
|
||||
// Real bytes =0.;
|
||||
// bytes += grid->oSites() * (432.*sizeof(vComplex) + 126.*sizeof(int) + 36.*sizeof(Real));
|
||||
// for (int ie=0; ie < 6 ; ie++){
|
||||
// if(ie==0 or ie==3){
|
||||
// bytes += grid->oSites() * (4.*sizeof(int) + 4752.*sizeof(vComplex)) * wick_contractions[ie];
|
||||
// }
|
||||
// else{
|
||||
// bytes += grid->oSites() * (64.*sizeof(int) + 5184.*sizeof(vComplex)) * wick_contractions[ie];
|
||||
// }
|
||||
// }
|
||||
// Real t=0.;
|
||||
// t =-usecond();
|
||||
|
||||
accelerator_for(ss, grid->oSites(), grid->Nsimd(), {
|
||||
auto D1 = v1[ss];
|
||||
auto D2 = v2[ss];
|
||||
auto D3 = v3[ss];
|
||||
sobj result=Zero();
|
||||
BaryonSiteMatrix(D1,D2,D3,GammaA_left,GammaB_left,GammaA_right,GammaB_right,wick_contractions,result);
|
||||
vbaryon_corr[ss] = result;
|
||||
} );//end loop over lattice sites
|
||||
|
||||
// t += usecond();
|
||||
|
||||
// std::cout << GridLogDebug << std::setw(10) << bytes/t*1.0e6/1024/1024/1024 << " GB/s " << std::endl;
|
||||
|
||||
}
|
||||
|
||||
@ -414,7 +608,7 @@ void BaryonUtils<FImpl>::ContractBaryons(const PropagatorField &q1_left,
|
||||
* Wick_Contractions function above */
|
||||
template <class FImpl>
|
||||
template <class mobj, class robj>
|
||||
void BaryonUtils<FImpl>::ContractBaryons_Sliced(const mobj &D1,
|
||||
void BaryonUtils<FImpl>::ContractBaryonsSliced(const mobj &D1,
|
||||
const mobj &D2,
|
||||
const mobj &D3,
|
||||
const Gamma GammaA_left,
|
||||
@ -429,16 +623,33 @@ void BaryonUtils<FImpl>::ContractBaryons_Sliced(const mobj &D1,
|
||||
|
||||
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
|
||||
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
|
||||
|
||||
std::cout << "GammaA (left) " << (GammaA_left.g) << std::endl;
|
||||
std::cout << "GammaB (left) " << (GammaB_left.g) << std::endl;
|
||||
std::cout << "GammaA (right) " << (GammaA_right.g) << std::endl;
|
||||
std::cout << "GammaB (right) " << (GammaB_right.g) << std::endl;
|
||||
|
||||
assert(parity==1 || parity == -1 && "Parity must be +1 or -1");
|
||||
|
||||
for (int t=0; t<nt; t++) {
|
||||
baryon_site(D1[t],D2[t],D3[t],GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result[t]);
|
||||
BaryonSite(D1[t],D2[t],D3[t],GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result[t]);
|
||||
}
|
||||
}
|
||||
|
||||
template <class FImpl>
|
||||
template <class mobj, class robj>
|
||||
void BaryonUtils<FImpl>::ContractBaryonsSlicedMatrix(const mobj &D1,
|
||||
const mobj &D2,
|
||||
const mobj &D3,
|
||||
const Gamma GammaA_left,
|
||||
const Gamma GammaB_left,
|
||||
const Gamma GammaA_right,
|
||||
const Gamma GammaB_right,
|
||||
const bool* wick_contractions,
|
||||
const int nt,
|
||||
robj &result)
|
||||
{
|
||||
|
||||
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
|
||||
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
|
||||
|
||||
for (int t=0; t<nt; t++) {
|
||||
BaryonSiteMatrix(D1[t],D2[t],D3[t],GammaA_left,GammaB_left,GammaA_right,GammaB_right,wick_contractions,result[t]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -454,7 +665,7 @@ void BaryonUtils<FImpl>::ContractBaryons_Sliced(const mobj &D1,
|
||||
* Dq4_tf is a quark line from t_f to t_J */
|
||||
template<class FImpl>
|
||||
template <class mobj, class mobj2, class robj>
|
||||
void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group1_Site(
|
||||
void BaryonUtils<FImpl>::BaryonGamma3ptGroup1Site(
|
||||
const mobj &Dq1_ti,
|
||||
const mobj2 &Dq2_spec,
|
||||
const mobj2 &Dq3_spec,
|
||||
@ -546,7 +757,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group1_Site(
|
||||
* Dq4_tf is a quark line from t_f to t_J */
|
||||
template<class FImpl>
|
||||
template <class mobj, class mobj2, class robj>
|
||||
void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group2_Site(
|
||||
void BaryonUtils<FImpl>::BaryonGamma3ptGroup2Site(
|
||||
const mobj2 &Dq1_spec,
|
||||
const mobj &Dq2_ti,
|
||||
const mobj2 &Dq3_spec,
|
||||
@ -636,7 +847,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group2_Site(
|
||||
* Dq4_tf is a quark line from t_f to t_J */
|
||||
template<class FImpl>
|
||||
template <class mobj, class mobj2, class robj>
|
||||
void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group3_Site(
|
||||
void BaryonUtils<FImpl>::BaryonGamma3ptGroup3Site(
|
||||
const mobj2 &Dq1_spec,
|
||||
const mobj2 &Dq2_spec,
|
||||
const mobj &Dq3_ti,
|
||||
@ -728,7 +939,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group3_Site(
|
||||
* https://aportelli.github.io/Hadrons-doc/#/mcontraction */
|
||||
template<class FImpl>
|
||||
template <class mobj>
|
||||
void BaryonUtils<FImpl>::Baryon_Gamma_3pt(
|
||||
void BaryonUtils<FImpl>::BaryonGamma3pt(
|
||||
const PropagatorField &q_ti,
|
||||
const mobj &Dq_spec1,
|
||||
const mobj &Dq_spec2,
|
||||
@ -751,7 +962,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt(
|
||||
auto Dq_ti = vq_ti[ss];
|
||||
auto Dq_tf = vq_tf[ss];
|
||||
sobj result=Zero();
|
||||
Baryon_Gamma_3pt_Group1_Site(Dq_ti,Dq_spec1,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
||||
BaryonGamma3ptGroup1Site(Dq_ti,Dq_spec1,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
||||
vcorr[ss] += result;
|
||||
});//end loop over lattice sites
|
||||
} else if (group == 2) {
|
||||
@ -759,7 +970,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt(
|
||||
auto Dq_ti = vq_ti[ss];
|
||||
auto Dq_tf = vq_tf[ss];
|
||||
sobj result=Zero();
|
||||
Baryon_Gamma_3pt_Group2_Site(Dq_spec1,Dq_ti,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
||||
BaryonGamma3ptGroup2Site(Dq_spec1,Dq_ti,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
||||
vcorr[ss] += result;
|
||||
});//end loop over lattice sites
|
||||
} else if (group == 3) {
|
||||
@ -767,7 +978,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt(
|
||||
auto Dq_ti = vq_ti[ss];
|
||||
auto Dq_tf = vq_tf[ss];
|
||||
sobj result=Zero();
|
||||
Baryon_Gamma_3pt_Group3_Site(Dq_spec1,Dq_spec2,Dq_ti,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
||||
BaryonGamma3ptGroup3Site(Dq_spec1,Dq_spec2,Dq_ti,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
||||
|
||||
vcorr[ss] += result;
|
||||
});//end loop over lattice sites
|
||||
@ -787,7 +998,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt(
|
||||
* Ds_ti is a quark line from t_i to t_H */
|
||||
template <class FImpl>
|
||||
template <class mobj, class mobj2, class robj>
|
||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q1_Eye_site(const mobj &Dq_loop,
|
||||
void BaryonUtils<FImpl>::SigmaToNucleonQ1EyeSite(const mobj &Dq_loop,
|
||||
const mobj2 &Du_spec,
|
||||
const mobj &Dd_tf,
|
||||
const mobj &Ds_ti,
|
||||
@ -838,7 +1049,7 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q1_Eye_site(const mobj &Dq_loop,
|
||||
* Ds_ti is a quark line from t_i to t_H */
|
||||
template <class FImpl>
|
||||
template <class mobj, class mobj2, class robj>
|
||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q1_NonEye_site(const mobj &Du_ti,
|
||||
void BaryonUtils<FImpl>::SigmaToNucleonQ1NonEyeSite(const mobj &Du_ti,
|
||||
const mobj &Du_tf,
|
||||
const mobj2 &Du_spec,
|
||||
const mobj &Dd_tf,
|
||||
@ -897,7 +1108,7 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q1_NonEye_site(const mobj &Du_ti,
|
||||
* Ds_ti is a quark line from t_i to t_H */
|
||||
template <class FImpl>
|
||||
template <class mobj, class mobj2, class robj>
|
||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q2_Eye_site(const mobj &Dq_loop,
|
||||
void BaryonUtils<FImpl>::SigmaToNucleonQ2EyeSite(const mobj &Dq_loop,
|
||||
const mobj2 &Du_spec,
|
||||
const mobj &Dd_tf,
|
||||
const mobj &Ds_ti,
|
||||
@ -948,7 +1159,7 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q2_Eye_site(const mobj &Dq_loop,
|
||||
* Ds_ti is a quark line from t_i to t_H */
|
||||
template <class FImpl>
|
||||
template <class mobj, class mobj2, class robj>
|
||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q2_NonEye_site(const mobj &Du_ti,
|
||||
void BaryonUtils<FImpl>::SigmaToNucleonQ2NonEyeSite(const mobj &Du_ti,
|
||||
const mobj &Du_tf,
|
||||
const mobj2 &Du_spec,
|
||||
const mobj &Dd_tf,
|
||||
@ -1002,7 +1213,7 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q2_NonEye_site(const mobj &Du_ti,
|
||||
|
||||
template<class FImpl>
|
||||
template <class mobj>
|
||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Eye(const PropagatorField &qq_loop,
|
||||
void BaryonUtils<FImpl>::SigmaToNucleonEye(const PropagatorField &qq_loop,
|
||||
const mobj &Du_spec,
|
||||
const PropagatorField &qd_tf,
|
||||
const PropagatorField &qs_ti,
|
||||
@ -1029,9 +1240,9 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Eye(const PropagatorField &qq_loop,
|
||||
auto Ds_ti = vs_ti[ss];
|
||||
sobj result=Zero();
|
||||
if(op == "Q1"){
|
||||
Sigma_to_Nucleon_Q1_Eye_site(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||
SigmaToNucleonQ1EyeSite(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||
} else if(op == "Q2"){
|
||||
Sigma_to_Nucleon_Q2_Eye_site(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||
SigmaToNucleonQ2EyeSite(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||
} else {
|
||||
assert(0 && "Weak Operator not correctly specified");
|
||||
}
|
||||
@ -1041,7 +1252,7 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Eye(const PropagatorField &qq_loop,
|
||||
|
||||
template<class FImpl>
|
||||
template <class mobj>
|
||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_NonEye(const PropagatorField &qq_ti,
|
||||
void BaryonUtils<FImpl>::SigmaToNucleonNonEye(const PropagatorField &qq_ti,
|
||||
const PropagatorField &qq_tf,
|
||||
const mobj &Du_spec,
|
||||
const PropagatorField &qd_tf,
|
||||
@ -1071,9 +1282,9 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_NonEye(const PropagatorField &qq_ti,
|
||||
auto Ds_ti = vs_ti[ss];
|
||||
sobj result=Zero();
|
||||
if(op == "Q1"){
|
||||
Sigma_to_Nucleon_Q1_NonEye_site(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||
SigmaToNucleonQ1NonEyeSite(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||
} else if(op == "Q2"){
|
||||
Sigma_to_Nucleon_Q2_NonEye_site(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||
SigmaToNucleonQ2NonEyeSite(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||
} else {
|
||||
assert(0 && "Weak Operator not correctly specified");
|
||||
}
|
||||
|
@ -53,25 +53,42 @@ namespace PeriodicBC {
|
||||
return Cshift(tmp,mu,-1);// moves towards positive mu
|
||||
}
|
||||
|
||||
template<class gauge,typename Op, typename T1> auto
|
||||
CovShiftForward(const Lattice<gauge> &Link,
|
||||
int mu,
|
||||
const LatticeUnaryExpression<Op,T1> &expr)
|
||||
-> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))>
|
||||
template<class gauge> Lattice<gauge>
|
||||
CovShiftIdentityBackward(const Lattice<gauge> &Link, int mu)
|
||||
{
|
||||
Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> arg(expr);
|
||||
return Cshift(adj(Link), mu, -1);
|
||||
}
|
||||
|
||||
template<class gauge> Lattice<gauge>
|
||||
CovShiftIdentityForward(const Lattice<gauge> &Link, int mu)
|
||||
{
|
||||
return Link;
|
||||
}
|
||||
|
||||
template<class gauge> Lattice<gauge>
|
||||
ShiftStaple(const Lattice<gauge> &Link, int mu)
|
||||
{
|
||||
return Cshift(Link, mu, 1);
|
||||
}
|
||||
|
||||
template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
|
||||
auto CovShiftForward(const Lattice<gauge> &Link,
|
||||
int mu,
|
||||
const Expr &expr) -> decltype(closure(expr))
|
||||
{
|
||||
auto arg = closure(expr);
|
||||
return CovShiftForward(Link,mu,arg);
|
||||
}
|
||||
template<class gauge,typename Op, typename T1> auto
|
||||
CovShiftBackward(const Lattice<gauge> &Link,
|
||||
int mu,
|
||||
const LatticeUnaryExpression<Op,T1> &expr)
|
||||
-> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))>
|
||||
template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
|
||||
auto CovShiftBackward(const Lattice<gauge> &Link,
|
||||
int mu,
|
||||
const Expr &expr) -> decltype(closure(expr))
|
||||
{
|
||||
Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> arg(expr);
|
||||
return CovShiftForward(Link,mu,arg);
|
||||
auto arg = closure(expr);
|
||||
return CovShiftBackward(Link,mu,arg);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -141,26 +158,55 @@ namespace ConjugateBC {
|
||||
// std::cout<<"Gparity::CovCshiftBackward mu="<<mu<<std::endl;
|
||||
return Cshift(tmp,mu,-1);// moves towards positive mu
|
||||
}
|
||||
template<class gauge> Lattice<gauge>
|
||||
CovShiftIdentityBackward(const Lattice<gauge> &Link, int mu) {
|
||||
GridBase *grid = Link.Grid();
|
||||
int Lmu = grid->GlobalDimensions()[mu] - 1;
|
||||
|
||||
template<class gauge,typename Op, typename T1> auto
|
||||
CovShiftForward(const Lattice<gauge> &Link,
|
||||
int mu,
|
||||
const LatticeUnaryExpression<Op,T1> &expr)
|
||||
-> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))>
|
||||
{
|
||||
Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> arg(expr);
|
||||
return CovShiftForward(Link,mu,arg);
|
||||
Lattice<iScalar<vInteger>> coor(grid);
|
||||
LatticeCoordinate(coor, mu);
|
||||
|
||||
Lattice<gauge> tmp(grid);
|
||||
tmp = adj(Link);
|
||||
tmp = where(coor == Lmu, conjugate(tmp), tmp);
|
||||
return Cshift(tmp, mu, -1); // moves towards positive mu
|
||||
}
|
||||
template<class gauge,typename Op, typename T1> auto
|
||||
CovShiftBackward(const Lattice<gauge> &Link,
|
||||
int mu,
|
||||
const LatticeUnaryExpression<Op,T1> &expr)
|
||||
-> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))>
|
||||
{
|
||||
Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> arg(expr);
|
||||
return CovShiftForward(Link,mu,arg);
|
||||
template<class gauge> Lattice<gauge>
|
||||
CovShiftIdentityForward(const Lattice<gauge> &Link, int mu) {
|
||||
return Link;
|
||||
}
|
||||
|
||||
template<class gauge> Lattice<gauge>
|
||||
ShiftStaple(const Lattice<gauge> &Link, int mu)
|
||||
{
|
||||
GridBase *grid = Link.Grid();
|
||||
int Lmu = grid->GlobalDimensions()[mu] - 1;
|
||||
|
||||
Lattice<iScalar<vInteger>> coor(grid);
|
||||
LatticeCoordinate(coor, mu);
|
||||
|
||||
Lattice<gauge> tmp(grid);
|
||||
tmp = Cshift(Link, mu, 1);
|
||||
tmp = where(coor == Lmu, conjugate(tmp), tmp);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
|
||||
auto CovShiftForward(const Lattice<gauge> &Link,
|
||||
int mu,
|
||||
const Expr &expr) -> decltype(closure(expr))
|
||||
{
|
||||
auto arg = closure(expr);
|
||||
return CovShiftForward(Link,mu,arg);
|
||||
}
|
||||
template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
|
||||
auto CovShiftBackward(const Lattice<gauge> &Link,
|
||||
int mu,
|
||||
const Expr &expr) -> decltype(closure(expr))
|
||||
{
|
||||
auto arg = closure(expr);
|
||||
return CovShiftBackward(Link,mu,arg);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -154,8 +154,8 @@ void axpby_ssp_pminus(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,co
|
||||
accelerator_for(sss,nloop,vobj::Nsimd(),{
|
||||
uint64_t ss = sss*Ls;
|
||||
decltype(coalescedRead(y_v[ss+sp])) tmp;
|
||||
spProj5m(tmp,y_v(ss+sp));
|
||||
tmp = a*x_v(ss+s)+b*tmp;
|
||||
spProj5m(tmp,y_v(ss+sp));
|
||||
tmp = a*x_v(ss+s)+b*tmp;
|
||||
coalescedWrite(z_v[ss+s],tmp);
|
||||
});
|
||||
}
|
||||
@ -188,7 +188,6 @@ void G5R5(Lattice<vobj> &z,const Lattice<vobj> &x)
|
||||
z.Checkerboard() = x.Checkerboard();
|
||||
conformable(x,z);
|
||||
int Ls = grid->_rdimensions[0];
|
||||
Gamma G5(Gamma::Algebra::Gamma5);
|
||||
autoView( x_v, x, AcceleratorRead);
|
||||
autoView( z_v, z, AcceleratorWrite);
|
||||
uint64_t nloop = grid->oSites()/Ls;
|
||||
@ -196,7 +195,13 @@ void G5R5(Lattice<vobj> &z,const Lattice<vobj> &x)
|
||||
uint64_t ss = sss*Ls;
|
||||
for(int s=0;s<Ls;s++){
|
||||
int sp = Ls-1-s;
|
||||
coalescedWrite(z_v[ss+sp],G5*x_v(ss+s));
|
||||
auto tmp = x_v(ss+s);
|
||||
decltype(tmp) tmp_p;
|
||||
decltype(tmp) tmp_m;
|
||||
spProj5p(tmp_p,tmp);
|
||||
spProj5m(tmp_m,tmp);
|
||||
// Use of spProj5m, 5p captures the coarse space too
|
||||
coalescedWrite(z_v[ss+sp],tmp_p - tmp_m);
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -208,10 +213,20 @@ void G5C(Lattice<vobj> &z, const Lattice<vobj> &x)
|
||||
z.Checkerboard() = x.Checkerboard();
|
||||
conformable(x, z);
|
||||
|
||||
Gamma G5(Gamma::Algebra::Gamma5);
|
||||
z = G5 * x;
|
||||
autoView( x_v, x, AcceleratorRead);
|
||||
autoView( z_v, z, AcceleratorWrite);
|
||||
uint64_t nloop = grid->oSites();
|
||||
accelerator_for(ss,nloop,vobj::Nsimd(),{
|
||||
auto tmp = x_v(ss);
|
||||
decltype(tmp) tmp_p;
|
||||
decltype(tmp) tmp_m;
|
||||
spProj5p(tmp_p,tmp);
|
||||
spProj5m(tmp_m,tmp);
|
||||
coalescedWrite(z_v[ss],tmp_p - tmp_m);
|
||||
});
|
||||
}
|
||||
|
||||
/*
|
||||
template<class CComplex, int nbasis>
|
||||
void G5C(Lattice<iVector<CComplex, nbasis>> &z, const Lattice<iVector<CComplex, nbasis>> &x)
|
||||
{
|
||||
@ -234,6 +249,7 @@ void G5C(Lattice<iVector<CComplex, nbasis>> &z, const Lattice<iVector<CComplex,
|
||||
}
|
||||
});
|
||||
}
|
||||
*/
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -449,7 +449,8 @@ public:
|
||||
LatticeReal alpha(grid);
|
||||
|
||||
// std::cout<<GridLogMessage<<"xi "<<xi <<std::endl;
|
||||
alpha = toReal(2.0 * xi);
|
||||
xi = 2.0 *xi;
|
||||
alpha = toReal(xi);
|
||||
|
||||
do {
|
||||
// A. Generate two uniformly distributed pseudo-random numbers R and R',
|
||||
@ -734,7 +735,6 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename GaugeField>
|
||||
static void HotConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
|
||||
typedef typename GaugeField::vector_type vector_type;
|
||||
@ -799,6 +799,88 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
template<int N>
|
||||
LatticeComplexD Determinant(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
|
||||
{
|
||||
GridBase *grid=Umu.Grid();
|
||||
auto lvol = grid->lSites();
|
||||
LatticeComplexD ret(grid);
|
||||
|
||||
autoView(Umu_v,Umu,CpuRead);
|
||||
autoView(ret_v,ret,CpuWrite);
|
||||
thread_for(site,lvol,{
|
||||
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
|
||||
Coordinate lcoor;
|
||||
grid->LocalIndexToLocalCoor(site, lcoor);
|
||||
iScalar<iScalar<iMatrix<ComplexD, N> > > Us;
|
||||
peekLocalSite(Us, Umu_v, lcoor);
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
EigenU(i,j) = Us()()(i,j);
|
||||
}}
|
||||
ComplexD det = EigenU.determinant();
|
||||
pokeLocalSite(det,ret_v,lcoor);
|
||||
});
|
||||
return ret;
|
||||
}
|
||||
template<int N>
|
||||
static void ProjectSUn(Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
|
||||
{
|
||||
Umu = ProjectOnGroup(Umu);
|
||||
auto det = Determinant(Umu);
|
||||
|
||||
det = conjugate(det);
|
||||
|
||||
for(int i=0;i<N;i++){
|
||||
auto element = PeekIndex<ColourIndex>(Umu,N-1,i);
|
||||
element = element * det;
|
||||
PokeIndex<ColourIndex>(Umu,element,Nc-1,i);
|
||||
}
|
||||
}
|
||||
template<int N>
|
||||
static void ProjectSUn(Lattice<iVector<iScalar<iMatrix<vComplexD, N> >,Nd> > &U)
|
||||
{
|
||||
GridBase *grid=U.Grid();
|
||||
// Reunitarise
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
auto Umu = PeekIndex<LorentzIndex>(U,mu);
|
||||
Umu = ProjectOnGroup(Umu);
|
||||
ProjectSUn(Umu);
|
||||
PokeIndex<LorentzIndex>(U,Umu,mu);
|
||||
}
|
||||
}
|
||||
// Explicit specialisation for SU(3).
|
||||
// Explicit specialisation for SU(3).
|
||||
static void
|
||||
ProjectSU3 (Lattice<iScalar<iScalar<iMatrix<vComplexD, 3> > > > &Umu)
|
||||
{
|
||||
GridBase *grid=Umu.Grid();
|
||||
const int x=0;
|
||||
const int y=1;
|
||||
const int z=2;
|
||||
// Reunitarise
|
||||
Umu = ProjectOnGroup(Umu);
|
||||
autoView(Umu_v,Umu,CpuWrite);
|
||||
thread_for(ss,grid->oSites(),{
|
||||
auto cm = Umu_v[ss];
|
||||
cm()()(2,x) = adj(cm()()(0,y)*cm()()(1,z)-cm()()(0,z)*cm()()(1,y)); //x= yz-zy
|
||||
cm()()(2,y) = adj(cm()()(0,z)*cm()()(1,x)-cm()()(0,x)*cm()()(1,z)); //y= zx-xz
|
||||
cm()()(2,z) = adj(cm()()(0,x)*cm()()(1,y)-cm()()(0,y)*cm()()(1,x)); //z= xy-yx
|
||||
Umu_v[ss]=cm;
|
||||
});
|
||||
}
|
||||
static void ProjectSU3(Lattice<iVector<iScalar<iMatrix<vComplexD, 3> >,Nd> > &U)
|
||||
{
|
||||
GridBase *grid=U.Grid();
|
||||
// Reunitarise
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
auto Umu = PeekIndex<LorentzIndex>(U,mu);
|
||||
Umu = ProjectOnGroup(Umu);
|
||||
ProjectSU3(Umu);
|
||||
PokeIndex<LorentzIndex>(U,Umu,mu);
|
||||
}
|
||||
}
|
||||
|
||||
typedef SU<2> SU2;
|
||||
typedef SU<3> SU3;
|
||||
typedef SU<4> SU4;
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
typedef iSUnAdjointMatrix<ComplexF> AMatrixF;
|
||||
typedef iSUnAdjointMatrix<ComplexD> AMatrixD;
|
||||
|
||||
typedef iSUnAdjointMatrix<vComplex> vAMatrix;
|
||||
typedef iSUnAdjointMatrix<vComplex> vAMatrix;
|
||||
typedef iSUnAdjointMatrix<vComplexF> vAMatrixF;
|
||||
typedef iSUnAdjointMatrix<vComplexD> vAMatrixD;
|
||||
|
||||
@ -47,14 +47,9 @@ public:
|
||||
typedef Lattice<vAMatrixF> LatticeAdjMatrixF;
|
||||
typedef Lattice<vAMatrixD> LatticeAdjMatrixD;
|
||||
|
||||
typedef Lattice<iVector<iScalar<iMatrix<vComplex, Dimension> >, Nd> >
|
||||
LatticeAdjField;
|
||||
typedef Lattice<iVector<iScalar<iMatrix<vComplexF, Dimension> >, Nd> >
|
||||
LatticeAdjFieldF;
|
||||
typedef Lattice<iVector<iScalar<iMatrix<vComplexD, Dimension> >, Nd> >
|
||||
LatticeAdjFieldD;
|
||||
|
||||
|
||||
typedef Lattice<iVector<iScalar<iMatrix<vComplex, Dimension> >, Nd> > LatticeAdjField;
|
||||
typedef Lattice<iVector<iScalar<iMatrix<vComplexF, Dimension> >, Nd> > LatticeAdjFieldF;
|
||||
typedef Lattice<iVector<iScalar<iMatrix<vComplexD, Dimension> >, Nd> > LatticeAdjFieldD;
|
||||
|
||||
|
||||
template <class cplx>
|
||||
@ -128,7 +123,9 @@ public:
|
||||
}
|
||||
|
||||
// Projects the algebra components a lattice matrix (of dimension ncol*ncol -1 )
|
||||
static void projectOnAlgebra(typename SU<ncolour>::LatticeAlgebraVector &h_out, const LatticeAdjMatrix &in, Real scale = 1.0) {
|
||||
static void projectOnAlgebra(typename SU<ncolour>::LatticeAlgebraVector &h_out, const LatticeAdjMatrix &in, Real scale = 1.0)
|
||||
{
|
||||
|
||||
conformable(h_out, in);
|
||||
h_out = Zero();
|
||||
AMatrix iTa;
|
||||
@ -136,7 +133,7 @@ public:
|
||||
|
||||
for (int a = 0; a < Dimension; a++) {
|
||||
generator(a, iTa);
|
||||
auto tmp = real(trace(iTa * in)) * coefficient;
|
||||
LatticeComplex tmp = real(trace(iTa * in)) * coefficient;
|
||||
pokeColour(h_out, tmp, a);
|
||||
}
|
||||
}
|
||||
|
@ -485,7 +485,7 @@ public:
|
||||
|
||||
// Up staple ___ ___
|
||||
// | |
|
||||
tmp = Cshift(closure(adj(U[nu])), nu, -1);
|
||||
tmp = Cshift(adj(U[nu]), nu, -1);
|
||||
tmp = adj(U2[mu]) * tmp;
|
||||
tmp = Cshift(tmp, mu, -2);
|
||||
|
||||
@ -519,7 +519,7 @@ public:
|
||||
//
|
||||
// | |
|
||||
|
||||
tmp = Cshift(closure(adj(U2[nu])), nu, -2);
|
||||
tmp = Cshift(adj(U2[nu]), nu, -2);
|
||||
tmp = Gimpl::CovShiftBackward(U[mu], mu, tmp);
|
||||
tmp = U2[nu] * Cshift(tmp, nu, 2);
|
||||
Stap += Cshift(tmp, mu, 1);
|
||||
|
@ -26,7 +26,7 @@
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
#ifndef __NVCC__
|
||||
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
|
603
Grid/simd/Fujitsu_A64FX_intrin_double.h
Normal file
603
Grid/simd/Fujitsu_A64FX_intrin_double.h
Normal file
@ -0,0 +1,603 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: Fujitsu_A64FX_intrin_double.h
|
||||
|
||||
Copyright (C) 2020
|
||||
|
||||
Author: Nils Meyer <nils.meyer@ur.de>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#define LOAD_CHIMU(base) LOAD_CHIMU_INTERLEAVED_A64FXd(base)
|
||||
#define PREFETCH_CHIMU_L1(A) PREFETCH_CHIMU_L1_INTERNAL_A64FXd(A)
|
||||
#define PREFETCH_GAUGE_L1(A) PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)
|
||||
#define PREFETCH_CHIMU_L2(A) PREFETCH_CHIMU_L2_INTERNAL_A64FXd(A)
|
||||
#define PREFETCH_GAUGE_L2(A) PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)
|
||||
#define PF_GAUGE(A)
|
||||
#define PREFETCH_RESULT_L2_STORE(A) PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(A)
|
||||
#define PREFETCH_RESULT_L1_STORE(A) PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(A)
|
||||
#define PREFETCH1_CHIMU(A) PREFETCH_CHIMU_L1(A)
|
||||
#define PREFETCH_CHIMU(A) PREFETCH_CHIMU_L1(A)
|
||||
#define LOCK_GAUGE(A)
|
||||
#define UNLOCK_GAUGE(A)
|
||||
#define MASK_REGS DECLARATIONS_A64FXd
|
||||
#define SAVE_RESULT(A,B) RESULT_A64FXd(A);
|
||||
#define MULT_2SPIN_1(Dir) MULT_2SPIN_1_A64FXd(Dir)
|
||||
#define MULT_2SPIN_2 MULT_2SPIN_2_A64FXd
|
||||
#define LOAD_CHI(base) LOAD_CHI_A64FXd(base)
|
||||
#define ZERO_PSI ZERO_PSI_A64FXd
|
||||
#define ADD_RESULT(base,basep) LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)
|
||||
#define XP_PROJ XP_PROJ_A64FXd
|
||||
#define YP_PROJ YP_PROJ_A64FXd
|
||||
#define ZP_PROJ ZP_PROJ_A64FXd
|
||||
#define TP_PROJ TP_PROJ_A64FXd
|
||||
#define XM_PROJ XM_PROJ_A64FXd
|
||||
#define YM_PROJ YM_PROJ_A64FXd
|
||||
#define ZM_PROJ ZM_PROJ_A64FXd
|
||||
#define TM_PROJ TM_PROJ_A64FXd
|
||||
#define XP_RECON XP_RECON_A64FXd
|
||||
#define XM_RECON XM_RECON_A64FXd
|
||||
#define XM_RECON_ACCUM XM_RECON_ACCUM_A64FXd
|
||||
#define YM_RECON_ACCUM YM_RECON_ACCUM_A64FXd
|
||||
#define ZM_RECON_ACCUM ZM_RECON_ACCUM_A64FXd
|
||||
#define TM_RECON_ACCUM TM_RECON_ACCUM_A64FXd
|
||||
#define XP_RECON_ACCUM XP_RECON_ACCUM_A64FXd
|
||||
#define YP_RECON_ACCUM YP_RECON_ACCUM_A64FXd
|
||||
#define ZP_RECON_ACCUM ZP_RECON_ACCUM_A64FXd
|
||||
#define TP_RECON_ACCUM TP_RECON_ACCUM_A64FXd
|
||||
#define PERMUTE_DIR0 0
|
||||
#define PERMUTE_DIR1 1
|
||||
#define PERMUTE_DIR2 2
|
||||
#define PERMUTE_DIR3 3
|
||||
#define PERMUTE PERMUTE_A64FXd;
|
||||
#define LOAD_TABLE(Dir) if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1; } else if (Dir == 2) { LOAD_TABLE2; }
|
||||
#define MAYBEPERM(Dir,perm) if (Dir != 3) { if (perm) { PERMUTE; } }
|
||||
// DECLARATIONS
|
||||
#define DECLARATIONS_A64FXd \
|
||||
uint64_t baseU; \
|
||||
const uint64_t lut[4][8] = { \
|
||||
{4, 5, 6, 7, 0, 1, 2, 3}, \
|
||||
{2, 3, 0, 1, 6, 7, 4, 5}, \
|
||||
{1, 0, 3, 2, 5, 4, 7, 6}, \
|
||||
{0, 1, 2, 4, 5, 6, 7, 8} };\
|
||||
svfloat64_t result_00; \
|
||||
svfloat64_t result_01; \
|
||||
svfloat64_t result_02; \
|
||||
svfloat64_t result_10; \
|
||||
svfloat64_t result_11; \
|
||||
svfloat64_t result_12; \
|
||||
svfloat64_t result_20; \
|
||||
svfloat64_t result_21; \
|
||||
svfloat64_t result_22; \
|
||||
svfloat64_t result_30; \
|
||||
svfloat64_t result_31; \
|
||||
svfloat64_t result_32; \
|
||||
svfloat64_t Chi_00; \
|
||||
svfloat64_t Chi_01; \
|
||||
svfloat64_t Chi_02; \
|
||||
svfloat64_t Chi_10; \
|
||||
svfloat64_t Chi_11; \
|
||||
svfloat64_t Chi_12; \
|
||||
svfloat64_t UChi_00; \
|
||||
svfloat64_t UChi_01; \
|
||||
svfloat64_t UChi_02; \
|
||||
svfloat64_t UChi_10; \
|
||||
svfloat64_t UChi_11; \
|
||||
svfloat64_t UChi_12; \
|
||||
svfloat64_t U_00; \
|
||||
svfloat64_t U_10; \
|
||||
svfloat64_t U_20; \
|
||||
svfloat64_t U_01; \
|
||||
svfloat64_t U_11; \
|
||||
svfloat64_t U_21; \
|
||||
svbool_t pg1; \
|
||||
pg1 = svptrue_b64(); \
|
||||
svuint64_t table0; \
|
||||
svfloat64_t zero0; \
|
||||
zero0 = svdup_f64(0.);
|
||||
|
||||
#define Chimu_00 Chi_00
|
||||
#define Chimu_01 Chi_01
|
||||
#define Chimu_02 Chi_02
|
||||
#define Chimu_10 Chi_10
|
||||
#define Chimu_11 Chi_11
|
||||
#define Chimu_12 Chi_12
|
||||
#define Chimu_20 UChi_00
|
||||
#define Chimu_21 UChi_01
|
||||
#define Chimu_22 UChi_02
|
||||
#define Chimu_30 UChi_10
|
||||
#define Chimu_31 UChi_11
|
||||
#define Chimu_32 UChi_12
|
||||
// RESULT
|
||||
#define RESULT_A64FXd(base) \
|
||||
{ \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-6), result_00); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-5), result_01); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-4), result_02); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-3), result_10); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-2), result_11); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-1), result_12); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(0), result_20); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(1), result_21); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(2), result_22); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(3), result_30); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(4), result_31); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(5), result_32); \
|
||||
}
|
||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
|
||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXd(base) \
|
||||
{ \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL2STRM); \
|
||||
}
|
||||
// PREFETCH_CHIMU_L1 (prefetch to L1)
|
||||
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXd(base) \
|
||||
{ \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL1STRM); \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL1STRM); \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL1STRM); \
|
||||
}
|
||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
|
||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A) \
|
||||
{ \
|
||||
const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(-4), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(12), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(16), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(20), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(24), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(28), SV_PLDL2STRM); \
|
||||
}
|
||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
|
||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL1STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL1STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL1STRM); \
|
||||
}
|
||||
// LOAD_CHI
|
||||
#define LOAD_CHI_A64FXd(base) \
|
||||
{ \
|
||||
Chi_00 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(0)); \
|
||||
Chi_01 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(1)); \
|
||||
Chi_02 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(2)); \
|
||||
Chi_10 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(3)); \
|
||||
Chi_11 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(4)); \
|
||||
Chi_12 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(5)); \
|
||||
}
|
||||
// LOAD_CHIMU
|
||||
#define LOAD_CHIMU_INTERLEAVED_A64FXd(base) \
|
||||
{ \
|
||||
Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// LOAD_CHIMU_0213
|
||||
#define LOAD_CHIMU_0213_A64FXd \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
}
|
||||
// LOAD_CHIMU_0312
|
||||
#define LOAD_CHIMU_0312_A64FXd \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// LOAD_TABLE0
|
||||
#define LOAD_TABLE0 \
|
||||
table0 = svld1(pg1, (uint64_t*)&lut[0]);
|
||||
|
||||
// LOAD_TABLE1
|
||||
#define LOAD_TABLE1 \
|
||||
table0 = svld1(pg1, (uint64_t*)&lut[1]);
|
||||
|
||||
// LOAD_TABLE2
|
||||
#define LOAD_TABLE2 \
|
||||
table0 = svld1(pg1, (uint64_t*)&lut[2]);
|
||||
|
||||
// LOAD_TABLE3
|
||||
#define LOAD_TABLE3 \
|
||||
table0 = svld1(pg1, (uint64_t*)&lut[3]);
|
||||
|
||||
// PERMUTE
|
||||
#define PERMUTE_A64FXd \
|
||||
Chi_00 = svtbl(Chi_00, table0); \
|
||||
Chi_01 = svtbl(Chi_01, table0); \
|
||||
Chi_02 = svtbl(Chi_02, table0); \
|
||||
Chi_10 = svtbl(Chi_10, table0); \
|
||||
Chi_11 = svtbl(Chi_11, table0); \
|
||||
Chi_12 = svtbl(Chi_12, table0);
|
||||
|
||||
// LOAD_GAUGE
|
||||
#define LOAD_GAUGE(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
|
||||
U_01 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
U_11 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
U_21 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
|
||||
}
|
||||
// MULT_2SPIN
|
||||
#define MULT_2SPIN_1_A64FXd(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
|
||||
U_01 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
U_11 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
U_21 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
|
||||
UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \
|
||||
UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \
|
||||
UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \
|
||||
UChi_11 = svcmla_x(pg1, zero0, U_10, Chi_10, 0); \
|
||||
UChi_02 = svcmla_x(pg1, zero0, U_20, Chi_00, 0); \
|
||||
UChi_12 = svcmla_x(pg1, zero0, U_20, Chi_10, 0); \
|
||||
UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_00, 90); \
|
||||
UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_10, 90); \
|
||||
UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_00, 90); \
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \
|
||||
U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// MULT_2SPIN_BACKEND
|
||||
#define MULT_2SPIN_2_A64FXd \
|
||||
{ \
|
||||
UChi_00 = svcmla_x(pg1, UChi_00, U_01, Chi_01, 0); \
|
||||
UChi_10 = svcmla_x(pg1, UChi_10, U_01, Chi_11, 0); \
|
||||
UChi_01 = svcmla_x(pg1, UChi_01, U_11, Chi_01, 0); \
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_11, Chi_11, 0); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_21, Chi_01, 0); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_21, Chi_11, 0); \
|
||||
UChi_00 = svcmla_x(pg1, UChi_00, U_01, Chi_01, 90); \
|
||||
UChi_10 = svcmla_x(pg1, UChi_10, U_01, Chi_11, 90); \
|
||||
UChi_01 = svcmla_x(pg1, UChi_01, U_11, Chi_01, 90); \
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_11, Chi_11, 90); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_21, Chi_01, 90); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_21, Chi_11, 90); \
|
||||
UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_02, 0); \
|
||||
UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_12, 0); \
|
||||
UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_02, 0); \
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_12, 0); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_02, 0); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_12, 0); \
|
||||
UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_02, 90); \
|
||||
UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_12, 90); \
|
||||
UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_02, 90); \
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_12, 90); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_02, 90); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_12, 90); \
|
||||
}
|
||||
// XP_PROJ
|
||||
#define XP_PROJ_A64FXd \
|
||||
{ \
|
||||
Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_30, 90); \
|
||||
Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_31, 90); \
|
||||
Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_32, 90); \
|
||||
Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_20, 90); \
|
||||
Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_21, 90); \
|
||||
Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_22, 90); \
|
||||
}
|
||||
// XP_RECON
|
||||
#define XP_RECON_A64FXd \
|
||||
result_20 = svcadd_x(pg1, zero0, UChi_10, 270); \
|
||||
result_21 = svcadd_x(pg1, zero0, UChi_11, 270); \
|
||||
result_22 = svcadd_x(pg1, zero0, UChi_12, 270); \
|
||||
result_30 = svcadd_x(pg1, zero0, UChi_00, 270); \
|
||||
result_31 = svcadd_x(pg1, zero0, UChi_01, 270); \
|
||||
result_32 = svcadd_x(pg1, zero0, UChi_02, 270); \
|
||||
result_00 = UChi_00; \
|
||||
result_01 = UChi_01; \
|
||||
result_02 = UChi_02; \
|
||||
result_10 = UChi_10; \
|
||||
result_11 = UChi_11; \
|
||||
result_12 = UChi_12;
|
||||
|
||||
// XP_RECON_ACCUM
|
||||
#define XP_RECON_ACCUM_A64FXd \
|
||||
result_30 = svcadd_x(pg1, result_30, UChi_00, 270); \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_31 = svcadd_x(pg1, result_31, UChi_01, 270); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_32 = svcadd_x(pg1, result_32, UChi_02, 270); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_20 = svcadd_x(pg1, result_20, UChi_10, 270); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_21 = svcadd_x(pg1, result_21, UChi_11, 270); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_22 = svcadd_x(pg1, result_22, UChi_12, 270); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12);
|
||||
|
||||
// YP_PROJ
|
||||
#define YP_PROJ_A64FXd \
|
||||
{ \
|
||||
Chi_00 = svsub_x(pg1, Chimu_00, Chimu_30); \
|
||||
Chi_01 = svsub_x(pg1, Chimu_01, Chimu_31); \
|
||||
Chi_02 = svsub_x(pg1, Chimu_02, Chimu_32); \
|
||||
Chi_10 = svadd_x(pg1, Chimu_10, Chimu_20); \
|
||||
Chi_11 = svadd_x(pg1, Chimu_11, Chimu_21); \
|
||||
Chi_12 = svadd_x(pg1, Chimu_12, Chimu_22); \
|
||||
}
|
||||
// ZP_PROJ
|
||||
#define ZP_PROJ_A64FXd \
|
||||
{ \
|
||||
Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_20, 90); \
|
||||
Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_21, 90); \
|
||||
Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_22, 90); \
|
||||
Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_30, 270); \
|
||||
Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_31, 270); \
|
||||
Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_32, 270); \
|
||||
}
|
||||
// TP_PROJ
|
||||
#define TP_PROJ_A64FXd \
|
||||
{ \
|
||||
Chi_00 = svadd_x(pg1, Chimu_00, Chimu_20); \
|
||||
Chi_01 = svadd_x(pg1, Chimu_01, Chimu_21); \
|
||||
Chi_02 = svadd_x(pg1, Chimu_02, Chimu_22); \
|
||||
Chi_10 = svadd_x(pg1, Chimu_10, Chimu_30); \
|
||||
Chi_11 = svadd_x(pg1, Chimu_11, Chimu_31); \
|
||||
Chi_12 = svadd_x(pg1, Chimu_12, Chimu_32); \
|
||||
}
|
||||
// XM_PROJ
|
||||
#define XM_PROJ_A64FXd \
|
||||
{ \
|
||||
Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_30, 270); \
|
||||
Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_31, 270); \
|
||||
Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_32, 270); \
|
||||
Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_20, 270); \
|
||||
Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_21, 270); \
|
||||
Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_22, 270); \
|
||||
}
|
||||
// XM_RECON
|
||||
#define XM_RECON_A64FXd \
|
||||
result_20 = svcadd_x(pg1, zero0, UChi_10, 90); \
|
||||
result_21 = svcadd_x(pg1, zero0, UChi_11, 90); \
|
||||
result_22 = svcadd_x(pg1, zero0, UChi_12, 90); \
|
||||
result_30 = svcadd_x(pg1, zero0, UChi_00, 90); \
|
||||
result_31 = svcadd_x(pg1, zero0, UChi_01, 90); \
|
||||
result_32 = svcadd_x(pg1, zero0, UChi_02, 90); \
|
||||
result_00 = UChi_00; \
|
||||
result_01 = UChi_01; \
|
||||
result_02 = UChi_02; \
|
||||
result_10 = UChi_10; \
|
||||
result_11 = UChi_11; \
|
||||
result_12 = UChi_12;
|
||||
|
||||
// YM_PROJ
|
||||
#define YM_PROJ_A64FXd \
|
||||
{ \
|
||||
Chi_00 = svadd_x(pg1, Chimu_00, Chimu_30); \
|
||||
Chi_01 = svadd_x(pg1, Chimu_01, Chimu_31); \
|
||||
Chi_02 = svadd_x(pg1, Chimu_02, Chimu_32); \
|
||||
Chi_10 = svsub_x(pg1, Chimu_10, Chimu_20); \
|
||||
Chi_11 = svsub_x(pg1, Chimu_11, Chimu_21); \
|
||||
Chi_12 = svsub_x(pg1, Chimu_12, Chimu_22); \
|
||||
}
|
||||
// ZM_PROJ
|
||||
#define ZM_PROJ_A64FXd \
|
||||
{ \
|
||||
Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_20, 270); \
|
||||
Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_21, 270); \
|
||||
Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_22, 270); \
|
||||
Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_30, 90); \
|
||||
Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_31, 90); \
|
||||
Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_32, 90); \
|
||||
}
|
||||
// TM_PROJ
|
||||
#define TM_PROJ_A64FXd \
|
||||
{ \
|
||||
Chi_00 = svsub_x(pg1, Chimu_00, Chimu_20); \
|
||||
Chi_01 = svsub_x(pg1, Chimu_01, Chimu_21); \
|
||||
Chi_02 = svsub_x(pg1, Chimu_02, Chimu_22); \
|
||||
Chi_10 = svsub_x(pg1, Chimu_10, Chimu_30); \
|
||||
Chi_11 = svsub_x(pg1, Chimu_11, Chimu_31); \
|
||||
Chi_12 = svsub_x(pg1, Chimu_12, Chimu_32); \
|
||||
}
|
||||
// XM_RECON_ACCUM
|
||||
#define XM_RECON_ACCUM_A64FXd \
|
||||
result_30 = svcadd_x(pg1, result_30, UChi_00, 90); \
|
||||
result_31 = svcadd_x(pg1, result_31, UChi_01, 90); \
|
||||
result_32 = svcadd_x(pg1, result_32, UChi_02, 90); \
|
||||
result_20 = svcadd_x(pg1, result_20, UChi_10, 90); \
|
||||
result_21 = svcadd_x(pg1, result_21, UChi_11, 90); \
|
||||
result_22 = svcadd_x(pg1, result_22, UChi_12, 90); \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12);
|
||||
|
||||
// YP_RECON_ACCUM
|
||||
#define YP_RECON_ACCUM_A64FXd \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_30 = svsub_x(pg1, result_30, UChi_00); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_31 = svsub_x(pg1, result_31, UChi_01); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_32 = svsub_x(pg1, result_32, UChi_02); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_20 = svadd_x(pg1, result_20, UChi_10); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_21 = svadd_x(pg1, result_21, UChi_11); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12); \
|
||||
result_22 = svadd_x(pg1, result_22, UChi_12);
|
||||
|
||||
// YM_RECON_ACCUM
|
||||
#define YM_RECON_ACCUM_A64FXd \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_30 = svadd_x(pg1, result_30, UChi_00); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_31 = svadd_x(pg1, result_31, UChi_01); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_32 = svadd_x(pg1, result_32, UChi_02); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_20 = svsub_x(pg1, result_20, UChi_10); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_21 = svsub_x(pg1, result_21, UChi_11); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12); \
|
||||
result_22 = svsub_x(pg1, result_22, UChi_12);
|
||||
|
||||
// ZP_RECON_ACCUM
|
||||
#define ZP_RECON_ACCUM_A64FXd \
|
||||
result_20 = svcadd_x(pg1, result_20, UChi_00, 270); \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_21 = svcadd_x(pg1, result_21, UChi_01, 270); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_22 = svcadd_x(pg1, result_22, UChi_02, 270); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_30 = svcadd_x(pg1, result_30, UChi_10, 90); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_31 = svcadd_x(pg1, result_31, UChi_11, 90); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_32 = svcadd_x(pg1, result_32, UChi_12, 90); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12);
|
||||
|
||||
// ZM_RECON_ACCUM
|
||||
#define ZM_RECON_ACCUM_A64FXd \
|
||||
result_20 = svcadd_x(pg1, result_20, UChi_00, 90); \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_21 = svcadd_x(pg1, result_21, UChi_01, 90); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_22 = svcadd_x(pg1, result_22, UChi_02, 90); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_30 = svcadd_x(pg1, result_30, UChi_10, 270); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_31 = svcadd_x(pg1, result_31, UChi_11, 270); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_32 = svcadd_x(pg1, result_32, UChi_12, 270); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12);
|
||||
|
||||
// TP_RECON_ACCUM
|
||||
#define TP_RECON_ACCUM_A64FXd \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_20 = svadd_x(pg1, result_20, UChi_00); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_21 = svadd_x(pg1, result_21, UChi_01); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_22 = svadd_x(pg1, result_22, UChi_02); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_30 = svadd_x(pg1, result_30, UChi_10); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_31 = svadd_x(pg1, result_31, UChi_11); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12); \
|
||||
result_32 = svadd_x(pg1, result_32, UChi_12);
|
||||
|
||||
// TM_RECON_ACCUM
|
||||
#define TM_RECON_ACCUM_A64FXd \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_20 = svsub_x(pg1, result_20, UChi_00); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_21 = svsub_x(pg1, result_21, UChi_01); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_22 = svsub_x(pg1, result_22, UChi_02); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_30 = svsub_x(pg1, result_30, UChi_10); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_31 = svsub_x(pg1, result_31, UChi_11); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12); \
|
||||
result_32 = svsub_x(pg1, result_32, UChi_12);
|
||||
|
||||
// ZERO_PSI
|
||||
#define ZERO_PSI_A64FXd \
|
||||
result_00 = svdup_f64(0.); \
|
||||
result_01 = svdup_f64(0.); \
|
||||
result_02 = svdup_f64(0.); \
|
||||
result_10 = svdup_f64(0.); \
|
||||
result_11 = svdup_f64(0.); \
|
||||
result_12 = svdup_f64(0.); \
|
||||
result_20 = svdup_f64(0.); \
|
||||
result_21 = svdup_f64(0.); \
|
||||
result_22 = svdup_f64(0.); \
|
||||
result_30 = svdup_f64(0.); \
|
||||
result_31 = svdup_f64(0.); \
|
||||
result_32 = svdup_f64(0.);
|
||||
|
||||
// PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing)
|
||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base) \
|
||||
{ \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 0) : "memory" ); \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 1) : "memory" ); \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 2) : "memory" ); \
|
||||
}
|
||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
|
||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(base) \
|
||||
{ \
|
||||
svprfd(pg1, (int64_t*)(base + 0), SV_PSTL1STRM); \
|
||||
svprfd(pg1, (int64_t*)(base + 256), SV_PSTL1STRM); \
|
||||
svprfd(pg1, (int64_t*)(base + 512), SV_PSTL1STRM); \
|
||||
}
|
||||
// ADD_RESULT_INTERNAL
|
||||
#define ADD_RESULT_INTERNAL_A64FXd \
|
||||
result_00 = svadd_x(pg1, result_00, Chimu_00); \
|
||||
result_01 = svadd_x(pg1, result_01, Chimu_01); \
|
||||
result_02 = svadd_x(pg1, result_02, Chimu_02); \
|
||||
result_10 = svadd_x(pg1, result_10, Chimu_10); \
|
||||
result_11 = svadd_x(pg1, result_11, Chimu_11); \
|
||||
result_12 = svadd_x(pg1, result_12, Chimu_12); \
|
||||
result_20 = svadd_x(pg1, result_20, Chimu_20); \
|
||||
result_21 = svadd_x(pg1, result_21, Chimu_21); \
|
||||
result_22 = svadd_x(pg1, result_22, Chimu_22); \
|
||||
result_30 = svadd_x(pg1, result_30, Chimu_30); \
|
||||
result_31 = svadd_x(pg1, result_31, Chimu_31); \
|
||||
result_32 = svadd_x(pg1, result_32, Chimu_32);
|
||||
|
603
Grid/simd/Fujitsu_A64FX_intrin_single.h
Normal file
603
Grid/simd/Fujitsu_A64FX_intrin_single.h
Normal file
@ -0,0 +1,603 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: Fujitsu_A64FX_intrin_single.h
|
||||
|
||||
Copyright (C) 2020
|
||||
|
||||
Author: Nils Meyer <nils.meyer@ur.de>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#define LOAD_CHIMU(base) LOAD_CHIMU_INTERLEAVED_A64FXf(base)
|
||||
#define PREFETCH_CHIMU_L1(A) PREFETCH_CHIMU_L1_INTERNAL_A64FXf(A)
|
||||
#define PREFETCH_GAUGE_L1(A) PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)
|
||||
#define PREFETCH_CHIMU_L2(A) PREFETCH_CHIMU_L2_INTERNAL_A64FXf(A)
|
||||
#define PREFETCH_GAUGE_L2(A) PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)
|
||||
#define PF_GAUGE(A)
|
||||
#define PREFETCH_RESULT_L2_STORE(A) PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(A)
|
||||
#define PREFETCH_RESULT_L1_STORE(A) PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(A)
|
||||
#define PREFETCH1_CHIMU(A) PREFETCH_CHIMU_L1(A)
|
||||
#define PREFETCH_CHIMU(A) PREFETCH_CHIMU_L1(A)
|
||||
#define LOCK_GAUGE(A)
|
||||
#define UNLOCK_GAUGE(A)
|
||||
#define MASK_REGS DECLARATIONS_A64FXf
|
||||
#define SAVE_RESULT(A,B) RESULT_A64FXf(A);
|
||||
#define MULT_2SPIN_1(Dir) MULT_2SPIN_1_A64FXf(Dir)
|
||||
#define MULT_2SPIN_2 MULT_2SPIN_2_A64FXf
|
||||
#define LOAD_CHI(base) LOAD_CHI_A64FXf(base)
|
||||
#define ZERO_PSI ZERO_PSI_A64FXf
|
||||
#define ADD_RESULT(base,basep) LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)
|
||||
#define XP_PROJ XP_PROJ_A64FXf
|
||||
#define YP_PROJ YP_PROJ_A64FXf
|
||||
#define ZP_PROJ ZP_PROJ_A64FXf
|
||||
#define TP_PROJ TP_PROJ_A64FXf
|
||||
#define XM_PROJ XM_PROJ_A64FXf
|
||||
#define YM_PROJ YM_PROJ_A64FXf
|
||||
#define ZM_PROJ ZM_PROJ_A64FXf
|
||||
#define TM_PROJ TM_PROJ_A64FXf
|
||||
#define XP_RECON XP_RECON_A64FXf
|
||||
#define XM_RECON XM_RECON_A64FXf
|
||||
#define XM_RECON_ACCUM XM_RECON_ACCUM_A64FXf
|
||||
#define YM_RECON_ACCUM YM_RECON_ACCUM_A64FXf
|
||||
#define ZM_RECON_ACCUM ZM_RECON_ACCUM_A64FXf
|
||||
#define TM_RECON_ACCUM TM_RECON_ACCUM_A64FXf
|
||||
#define XP_RECON_ACCUM XP_RECON_ACCUM_A64FXf
|
||||
#define YP_RECON_ACCUM YP_RECON_ACCUM_A64FXf
|
||||
#define ZP_RECON_ACCUM ZP_RECON_ACCUM_A64FXf
|
||||
#define TP_RECON_ACCUM TP_RECON_ACCUM_A64FXf
|
||||
#define PERMUTE_DIR0 0
|
||||
#define PERMUTE_DIR1 1
|
||||
#define PERMUTE_DIR2 2
|
||||
#define PERMUTE_DIR3 3
|
||||
#define PERMUTE PERMUTE_A64FXf;
|
||||
#define LOAD_TABLE(Dir) if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1 } else if (Dir == 2) { LOAD_TABLE2; } else if (Dir == 3) { LOAD_TABLE3; }
|
||||
#define MAYBEPERM(A,perm) if (perm) { PERMUTE; }
|
||||
// DECLARATIONS
|
||||
#define DECLARATIONS_A64FXf \
|
||||
uint64_t baseU; \
|
||||
const uint32_t lut[4][16] = { \
|
||||
{8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \
|
||||
{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \
|
||||
{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, \
|
||||
{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14} }; \
|
||||
svfloat32_t result_00; \
|
||||
svfloat32_t result_01; \
|
||||
svfloat32_t result_02; \
|
||||
svfloat32_t result_10; \
|
||||
svfloat32_t result_11; \
|
||||
svfloat32_t result_12; \
|
||||
svfloat32_t result_20; \
|
||||
svfloat32_t result_21; \
|
||||
svfloat32_t result_22; \
|
||||
svfloat32_t result_30; \
|
||||
svfloat32_t result_31; \
|
||||
svfloat32_t result_32; \
|
||||
svfloat32_t Chi_00; \
|
||||
svfloat32_t Chi_01; \
|
||||
svfloat32_t Chi_02; \
|
||||
svfloat32_t Chi_10; \
|
||||
svfloat32_t Chi_11; \
|
||||
svfloat32_t Chi_12; \
|
||||
svfloat32_t UChi_00; \
|
||||
svfloat32_t UChi_01; \
|
||||
svfloat32_t UChi_02; \
|
||||
svfloat32_t UChi_10; \
|
||||
svfloat32_t UChi_11; \
|
||||
svfloat32_t UChi_12; \
|
||||
svfloat32_t U_00; \
|
||||
svfloat32_t U_10; \
|
||||
svfloat32_t U_20; \
|
||||
svfloat32_t U_01; \
|
||||
svfloat32_t U_11; \
|
||||
svfloat32_t U_21; \
|
||||
svbool_t pg1; \
|
||||
pg1 = svptrue_b32(); \
|
||||
svuint32_t table0; \
|
||||
svfloat32_t zero0; \
|
||||
zero0 = svdup_f32(0.);
|
||||
|
||||
#define Chimu_00 Chi_00
|
||||
#define Chimu_01 Chi_01
|
||||
#define Chimu_02 Chi_02
|
||||
#define Chimu_10 Chi_10
|
||||
#define Chimu_11 Chi_11
|
||||
#define Chimu_12 Chi_12
|
||||
#define Chimu_20 UChi_00
|
||||
#define Chimu_21 UChi_01
|
||||
#define Chimu_22 UChi_02
|
||||
#define Chimu_30 UChi_10
|
||||
#define Chimu_31 UChi_11
|
||||
#define Chimu_32 UChi_12
|
||||
// RESULT
|
||||
#define RESULT_A64FXf(base) \
|
||||
{ \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-6), result_00); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-5), result_01); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-4), result_02); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-3), result_10); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-2), result_11); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-1), result_12); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(0), result_20); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(1), result_21); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(2), result_22); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(3), result_30); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(4), result_31); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(5), result_32); \
|
||||
}
|
||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
|
||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXf(base) \
|
||||
{ \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL2STRM); \
|
||||
}
|
||||
// PREFETCH_CHIMU_L1 (prefetch to L1)
|
||||
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXf(base) \
|
||||
{ \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL1STRM); \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL1STRM); \
|
||||
svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL1STRM); \
|
||||
}
|
||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
|
||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A) \
|
||||
{ \
|
||||
const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(-4), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(12), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(16), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(20), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(24), SV_PLDL2STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(28), SV_PLDL2STRM); \
|
||||
}
|
||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
|
||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL1STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL1STRM); \
|
||||
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL1STRM); \
|
||||
}
|
||||
// LOAD_CHI
|
||||
#define LOAD_CHI_A64FXf(base) \
|
||||
{ \
|
||||
Chi_00 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(0)); \
|
||||
Chi_01 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(1)); \
|
||||
Chi_02 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(2)); \
|
||||
Chi_10 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(3)); \
|
||||
Chi_11 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(4)); \
|
||||
Chi_12 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(5)); \
|
||||
}
|
||||
// LOAD_CHIMU
|
||||
#define LOAD_CHIMU_INTERLEAVED_A64FXf(base) \
|
||||
{ \
|
||||
Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// LOAD_CHIMU_0213
|
||||
#define LOAD_CHIMU_0213_A64FXf \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
}
|
||||
// LOAD_CHIMU_0312
|
||||
#define LOAD_CHIMU_0312_A64FXf \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// LOAD_TABLE0
|
||||
#define LOAD_TABLE0 \
|
||||
table0 = svld1(pg1, (uint32_t*)&lut[0]);
|
||||
|
||||
// LOAD_TABLE1
|
||||
#define LOAD_TABLE1 \
|
||||
table0 = svld1(pg1, (uint32_t*)&lut[1]);
|
||||
|
||||
// LOAD_TABLE2
|
||||
#define LOAD_TABLE2 \
|
||||
table0 = svld1(pg1, (uint32_t*)&lut[2]);
|
||||
|
||||
// LOAD_TABLE3
|
||||
#define LOAD_TABLE3 \
|
||||
table0 = svld1(pg1, (uint32_t*)&lut[3]);
|
||||
|
||||
// PERMUTE
|
||||
#define PERMUTE_A64FXf \
|
||||
Chi_00 = svtbl(Chi_00, table0); \
|
||||
Chi_01 = svtbl(Chi_01, table0); \
|
||||
Chi_02 = svtbl(Chi_02, table0); \
|
||||
Chi_10 = svtbl(Chi_10, table0); \
|
||||
Chi_11 = svtbl(Chi_11, table0); \
|
||||
Chi_12 = svtbl(Chi_12, table0);
|
||||
|
||||
// LOAD_GAUGE
|
||||
#define LOAD_GAUGE(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
|
||||
U_01 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
U_11 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
U_21 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
|
||||
}
|
||||
// MULT_2SPIN
|
||||
#define MULT_2SPIN_1_A64FXf(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
|
||||
U_01 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
U_11 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
U_21 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
|
||||
UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \
|
||||
UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \
|
||||
UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \
|
||||
UChi_11 = svcmla_x(pg1, zero0, U_10, Chi_10, 0); \
|
||||
UChi_02 = svcmla_x(pg1, zero0, U_20, Chi_00, 0); \
|
||||
UChi_12 = svcmla_x(pg1, zero0, U_20, Chi_10, 0); \
|
||||
UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_00, 90); \
|
||||
UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_10, 90); \
|
||||
UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_00, 90); \
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \
|
||||
U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// MULT_2SPIN_BACKEND
|
||||
#define MULT_2SPIN_2_A64FXf \
|
||||
{ \
|
||||
UChi_00 = svcmla_x(pg1, UChi_00, U_01, Chi_01, 0); \
|
||||
UChi_10 = svcmla_x(pg1, UChi_10, U_01, Chi_11, 0); \
|
||||
UChi_01 = svcmla_x(pg1, UChi_01, U_11, Chi_01, 0); \
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_11, Chi_11, 0); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_21, Chi_01, 0); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_21, Chi_11, 0); \
|
||||
UChi_00 = svcmla_x(pg1, UChi_00, U_01, Chi_01, 90); \
|
||||
UChi_10 = svcmla_x(pg1, UChi_10, U_01, Chi_11, 90); \
|
||||
UChi_01 = svcmla_x(pg1, UChi_01, U_11, Chi_01, 90); \
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_11, Chi_11, 90); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_21, Chi_01, 90); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_21, Chi_11, 90); \
|
||||
UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_02, 0); \
|
||||
UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_12, 0); \
|
||||
UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_02, 0); \
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_12, 0); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_02, 0); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_12, 0); \
|
||||
UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_02, 90); \
|
||||
UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_12, 90); \
|
||||
UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_02, 90); \
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_12, 90); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_02, 90); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_12, 90); \
|
||||
}
|
||||
// XP_PROJ
|
||||
#define XP_PROJ_A64FXf \
|
||||
{ \
|
||||
Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_30, 90); \
|
||||
Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_31, 90); \
|
||||
Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_32, 90); \
|
||||
Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_20, 90); \
|
||||
Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_21, 90); \
|
||||
Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_22, 90); \
|
||||
}
|
||||
// XP_RECON
|
||||
#define XP_RECON_A64FXf \
|
||||
result_20 = svcadd_x(pg1, zero0, UChi_10, 270); \
|
||||
result_21 = svcadd_x(pg1, zero0, UChi_11, 270); \
|
||||
result_22 = svcadd_x(pg1, zero0, UChi_12, 270); \
|
||||
result_30 = svcadd_x(pg1, zero0, UChi_00, 270); \
|
||||
result_31 = svcadd_x(pg1, zero0, UChi_01, 270); \
|
||||
result_32 = svcadd_x(pg1, zero0, UChi_02, 270); \
|
||||
result_00 = UChi_00; \
|
||||
result_01 = UChi_01; \
|
||||
result_02 = UChi_02; \
|
||||
result_10 = UChi_10; \
|
||||
result_11 = UChi_11; \
|
||||
result_12 = UChi_12;
|
||||
|
||||
// XP_RECON_ACCUM
|
||||
#define XP_RECON_ACCUM_A64FXf \
|
||||
result_30 = svcadd_x(pg1, result_30, UChi_00, 270); \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_31 = svcadd_x(pg1, result_31, UChi_01, 270); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_32 = svcadd_x(pg1, result_32, UChi_02, 270); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_20 = svcadd_x(pg1, result_20, UChi_10, 270); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_21 = svcadd_x(pg1, result_21, UChi_11, 270); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_22 = svcadd_x(pg1, result_22, UChi_12, 270); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12);
|
||||
|
||||
// YP_PROJ
|
||||
#define YP_PROJ_A64FXf \
|
||||
{ \
|
||||
Chi_00 = svsub_x(pg1, Chimu_00, Chimu_30); \
|
||||
Chi_01 = svsub_x(pg1, Chimu_01, Chimu_31); \
|
||||
Chi_02 = svsub_x(pg1, Chimu_02, Chimu_32); \
|
||||
Chi_10 = svadd_x(pg1, Chimu_10, Chimu_20); \
|
||||
Chi_11 = svadd_x(pg1, Chimu_11, Chimu_21); \
|
||||
Chi_12 = svadd_x(pg1, Chimu_12, Chimu_22); \
|
||||
}
|
||||
// ZP_PROJ
|
||||
#define ZP_PROJ_A64FXf \
|
||||
{ \
|
||||
Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_20, 90); \
|
||||
Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_21, 90); \
|
||||
Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_22, 90); \
|
||||
Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_30, 270); \
|
||||
Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_31, 270); \
|
||||
Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_32, 270); \
|
||||
}
|
||||
// TP_PROJ
|
||||
#define TP_PROJ_A64FXf \
|
||||
{ \
|
||||
Chi_00 = svadd_x(pg1, Chimu_00, Chimu_20); \
|
||||
Chi_01 = svadd_x(pg1, Chimu_01, Chimu_21); \
|
||||
Chi_02 = svadd_x(pg1, Chimu_02, Chimu_22); \
|
||||
Chi_10 = svadd_x(pg1, Chimu_10, Chimu_30); \
|
||||
Chi_11 = svadd_x(pg1, Chimu_11, Chimu_31); \
|
||||
Chi_12 = svadd_x(pg1, Chimu_12, Chimu_32); \
|
||||
}
|
||||
// XM_PROJ
|
||||
#define XM_PROJ_A64FXf \
|
||||
{ \
|
||||
Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_30, 270); \
|
||||
Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_31, 270); \
|
||||
Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_32, 270); \
|
||||
Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_20, 270); \
|
||||
Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_21, 270); \
|
||||
Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_22, 270); \
|
||||
}
|
||||
// XM_RECON
|
||||
#define XM_RECON_A64FXf \
|
||||
result_20 = svcadd_x(pg1, zero0, UChi_10, 90); \
|
||||
result_21 = svcadd_x(pg1, zero0, UChi_11, 90); \
|
||||
result_22 = svcadd_x(pg1, zero0, UChi_12, 90); \
|
||||
result_30 = svcadd_x(pg1, zero0, UChi_00, 90); \
|
||||
result_31 = svcadd_x(pg1, zero0, UChi_01, 90); \
|
||||
result_32 = svcadd_x(pg1, zero0, UChi_02, 90); \
|
||||
result_00 = UChi_00; \
|
||||
result_01 = UChi_01; \
|
||||
result_02 = UChi_02; \
|
||||
result_10 = UChi_10; \
|
||||
result_11 = UChi_11; \
|
||||
result_12 = UChi_12;
|
||||
|
||||
// YM_PROJ
|
||||
#define YM_PROJ_A64FXf \
|
||||
{ \
|
||||
Chi_00 = svadd_x(pg1, Chimu_00, Chimu_30); \
|
||||
Chi_01 = svadd_x(pg1, Chimu_01, Chimu_31); \
|
||||
Chi_02 = svadd_x(pg1, Chimu_02, Chimu_32); \
|
||||
Chi_10 = svsub_x(pg1, Chimu_10, Chimu_20); \
|
||||
Chi_11 = svsub_x(pg1, Chimu_11, Chimu_21); \
|
||||
Chi_12 = svsub_x(pg1, Chimu_12, Chimu_22); \
|
||||
}
|
||||
// ZM_PROJ
|
||||
#define ZM_PROJ_A64FXf \
|
||||
{ \
|
||||
Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_20, 270); \
|
||||
Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_21, 270); \
|
||||
Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_22, 270); \
|
||||
Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_30, 90); \
|
||||
Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_31, 90); \
|
||||
Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_32, 90); \
|
||||
}
|
||||
// TM_PROJ
|
||||
#define TM_PROJ_A64FXf \
|
||||
{ \
|
||||
Chi_00 = svsub_x(pg1, Chimu_00, Chimu_20); \
|
||||
Chi_01 = svsub_x(pg1, Chimu_01, Chimu_21); \
|
||||
Chi_02 = svsub_x(pg1, Chimu_02, Chimu_22); \
|
||||
Chi_10 = svsub_x(pg1, Chimu_10, Chimu_30); \
|
||||
Chi_11 = svsub_x(pg1, Chimu_11, Chimu_31); \
|
||||
Chi_12 = svsub_x(pg1, Chimu_12, Chimu_32); \
|
||||
}
|
||||
// XM_RECON_ACCUM
|
||||
#define XM_RECON_ACCUM_A64FXf \
|
||||
result_30 = svcadd_x(pg1, result_30, UChi_00, 90); \
|
||||
result_31 = svcadd_x(pg1, result_31, UChi_01, 90); \
|
||||
result_32 = svcadd_x(pg1, result_32, UChi_02, 90); \
|
||||
result_20 = svcadd_x(pg1, result_20, UChi_10, 90); \
|
||||
result_21 = svcadd_x(pg1, result_21, UChi_11, 90); \
|
||||
result_22 = svcadd_x(pg1, result_22, UChi_12, 90); \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12);
|
||||
|
||||
// YP_RECON_ACCUM
|
||||
#define YP_RECON_ACCUM_A64FXf \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_30 = svsub_x(pg1, result_30, UChi_00); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_31 = svsub_x(pg1, result_31, UChi_01); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_32 = svsub_x(pg1, result_32, UChi_02); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_20 = svadd_x(pg1, result_20, UChi_10); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_21 = svadd_x(pg1, result_21, UChi_11); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12); \
|
||||
result_22 = svadd_x(pg1, result_22, UChi_12);
|
||||
|
||||
// YM_RECON_ACCUM
|
||||
#define YM_RECON_ACCUM_A64FXf \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_30 = svadd_x(pg1, result_30, UChi_00); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_31 = svadd_x(pg1, result_31, UChi_01); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_32 = svadd_x(pg1, result_32, UChi_02); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_20 = svsub_x(pg1, result_20, UChi_10); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_21 = svsub_x(pg1, result_21, UChi_11); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12); \
|
||||
result_22 = svsub_x(pg1, result_22, UChi_12);
|
||||
|
||||
// ZP_RECON_ACCUM
|
||||
#define ZP_RECON_ACCUM_A64FXf \
|
||||
result_20 = svcadd_x(pg1, result_20, UChi_00, 270); \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_21 = svcadd_x(pg1, result_21, UChi_01, 270); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_22 = svcadd_x(pg1, result_22, UChi_02, 270); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_30 = svcadd_x(pg1, result_30, UChi_10, 90); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_31 = svcadd_x(pg1, result_31, UChi_11, 90); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_32 = svcadd_x(pg1, result_32, UChi_12, 90); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12);
|
||||
|
||||
// ZM_RECON_ACCUM
|
||||
#define ZM_RECON_ACCUM_A64FXf \
|
||||
result_20 = svcadd_x(pg1, result_20, UChi_00, 90); \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_21 = svcadd_x(pg1, result_21, UChi_01, 90); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_22 = svcadd_x(pg1, result_22, UChi_02, 90); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_30 = svcadd_x(pg1, result_30, UChi_10, 270); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_31 = svcadd_x(pg1, result_31, UChi_11, 270); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_32 = svcadd_x(pg1, result_32, UChi_12, 270); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12);
|
||||
|
||||
// TP_RECON_ACCUM
|
||||
#define TP_RECON_ACCUM_A64FXf \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_20 = svadd_x(pg1, result_20, UChi_00); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_21 = svadd_x(pg1, result_21, UChi_01); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_22 = svadd_x(pg1, result_22, UChi_02); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_30 = svadd_x(pg1, result_30, UChi_10); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_31 = svadd_x(pg1, result_31, UChi_11); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12); \
|
||||
result_32 = svadd_x(pg1, result_32, UChi_12);
|
||||
|
||||
// TM_RECON_ACCUM
|
||||
#define TM_RECON_ACCUM_A64FXf \
|
||||
result_00 = svadd_x(pg1, result_00, UChi_00); \
|
||||
result_20 = svsub_x(pg1, result_20, UChi_00); \
|
||||
result_01 = svadd_x(pg1, result_01, UChi_01); \
|
||||
result_21 = svsub_x(pg1, result_21, UChi_01); \
|
||||
result_02 = svadd_x(pg1, result_02, UChi_02); \
|
||||
result_22 = svsub_x(pg1, result_22, UChi_02); \
|
||||
result_10 = svadd_x(pg1, result_10, UChi_10); \
|
||||
result_30 = svsub_x(pg1, result_30, UChi_10); \
|
||||
result_11 = svadd_x(pg1, result_11, UChi_11); \
|
||||
result_31 = svsub_x(pg1, result_31, UChi_11); \
|
||||
result_12 = svadd_x(pg1, result_12, UChi_12); \
|
||||
result_32 = svsub_x(pg1, result_32, UChi_12);
|
||||
|
||||
// ZERO_PSI
|
||||
#define ZERO_PSI_A64FXf \
|
||||
result_00 = svdup_f32(0.); \
|
||||
result_01 = svdup_f32(0.); \
|
||||
result_02 = svdup_f32(0.); \
|
||||
result_10 = svdup_f32(0.); \
|
||||
result_11 = svdup_f32(0.); \
|
||||
result_12 = svdup_f32(0.); \
|
||||
result_20 = svdup_f32(0.); \
|
||||
result_21 = svdup_f32(0.); \
|
||||
result_22 = svdup_f32(0.); \
|
||||
result_30 = svdup_f32(0.); \
|
||||
result_31 = svdup_f32(0.); \
|
||||
result_32 = svdup_f32(0.);
|
||||
|
||||
// PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing)
|
||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base) \
|
||||
{ \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 0) : "memory" ); \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 1) : "memory" ); \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 2) : "memory" ); \
|
||||
}
|
||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
|
||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(base) \
|
||||
{ \
|
||||
svprfd(pg1, (int64_t*)(base + 0), SV_PSTL1STRM); \
|
||||
svprfd(pg1, (int64_t*)(base + 256), SV_PSTL1STRM); \
|
||||
svprfd(pg1, (int64_t*)(base + 512), SV_PSTL1STRM); \
|
||||
}
|
||||
// ADD_RESULT_INTERNAL
|
||||
#define ADD_RESULT_INTERNAL_A64FXf \
|
||||
result_00 = svadd_x(pg1, result_00, Chimu_00); \
|
||||
result_01 = svadd_x(pg1, result_01, Chimu_01); \
|
||||
result_02 = svadd_x(pg1, result_02, Chimu_02); \
|
||||
result_10 = svadd_x(pg1, result_10, Chimu_10); \
|
||||
result_11 = svadd_x(pg1, result_11, Chimu_11); \
|
||||
result_12 = svadd_x(pg1, result_12, Chimu_12); \
|
||||
result_20 = svadd_x(pg1, result_20, Chimu_20); \
|
||||
result_21 = svadd_x(pg1, result_21, Chimu_21); \
|
||||
result_22 = svadd_x(pg1, result_22, Chimu_22); \
|
||||
result_30 = svadd_x(pg1, result_30, Chimu_30); \
|
||||
result_31 = svadd_x(pg1, result_31, Chimu_31); \
|
||||
result_32 = svadd_x(pg1, result_32, Chimu_32);
|
||||
|
77
Grid/simd/Fujitsu_A64FX_undef.h
Normal file
77
Grid/simd/Fujitsu_A64FX_undef.h
Normal file
@ -0,0 +1,77 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: Fujitsu_A64FX_undef.h
|
||||
|
||||
Copyright (C) 2020
|
||||
|
||||
Author: Nils Meyer <nils.meyer@ur.de>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#undef LOAD_CHIMU
|
||||
#undef PREFETCH_CHIMU_L1
|
||||
#undef PREFETCH_GAUGE_L1
|
||||
#undef PREFETCH_CHIMU_L2
|
||||
#undef PREFETCH_GAUGE_L2
|
||||
#undef PREFETCH_GAUGE_L1_INTERNAL
|
||||
#undef PREFETCH1_CHIMU
|
||||
#undef PREFETCH_CHIMU
|
||||
#undef PREFETCH_RESULT_L2_STORE
|
||||
#undef PREFETCH_RESULT_L1_STORE
|
||||
#undef LOAD_GAUGE
|
||||
#undef LOCK_GAUGE
|
||||
#undef UNLOCK_GAUGE
|
||||
#undef MASK_REGS
|
||||
#undef SAVE_RESULT
|
||||
#undef ADD_RESULT
|
||||
#undef MULT_2SPIN_1
|
||||
#undef MULT_2SPIN_2
|
||||
#undef MAYBEPERM
|
||||
#undef LOAD_CHI
|
||||
#undef ZERO_PSI
|
||||
#undef XP_PROJ
|
||||
#undef YP_PROJ
|
||||
#undef ZP_PROJ
|
||||
#undef TP_PROJ
|
||||
#undef XM_PROJ
|
||||
#undef YM_PROJ
|
||||
#undef ZM_PROJ
|
||||
#undef TM_PROJ
|
||||
#undef XP_RECON
|
||||
#undef XM_RECON
|
||||
#undef XM_RECON_ACCUM
|
||||
#undef YM_RECON_ACCUM
|
||||
#undef ZM_RECON_ACCUM
|
||||
#undef TM_RECON_ACCUM
|
||||
#undef XP_RECON_ACCUM
|
||||
#undef YP_RECON_ACCUM
|
||||
#undef ZP_RECON_ACCUM
|
||||
#undef TP_RECON_ACCUM
|
||||
#undef PERMUTE
|
||||
#undef PERMUTE_DIR0
|
||||
#undef PERMUTE_DIR1
|
||||
#undef PERMUTE_DIR2
|
||||
#undef PERMUTE_DIR3
|
||||
#undef LOAD_TABLE
|
||||
#undef LOAD_TABLE0
|
||||
#undef LOAD_TABLE1
|
||||
#undef LOAD_TABLE2
|
||||
#undef LOAD_TABLE3
|
942
Grid/simd/Grid_a64fx-2.h
Normal file
942
Grid/simd/Grid_a64fx-2.h
Normal file
@ -0,0 +1,942 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: Grid_a64fx-2.h
|
||||
|
||||
Copyright (C) 2020
|
||||
|
||||
Author: Nils Meyer <nils.meyer@ur.de>
|
||||
|
||||
with support from Arm
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Using SVE ACLE
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
static_assert(GEN_SIMD_WIDTH % 64u == 0, "A64FX SIMD vector size is 64 bytes");
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
NAMESPACE_BEGIN(Optimization);
|
||||
|
||||
// type traits giving the number of elements for each vector type
|
||||
template <typename T> struct W;
|
||||
template <> struct W<double> {
|
||||
constexpr static unsigned int c = GEN_SIMD_WIDTH/16u;
|
||||
constexpr static unsigned int r = GEN_SIMD_WIDTH/8u;
|
||||
};
|
||||
template <> struct W<float> {
|
||||
constexpr static unsigned int c = GEN_SIMD_WIDTH/8u;
|
||||
constexpr static unsigned int r = GEN_SIMD_WIDTH/4u;
|
||||
};
|
||||
template <> struct W<Integer> {
|
||||
constexpr static unsigned int r = GEN_SIMD_WIDTH/4u;
|
||||
};
|
||||
template <> struct W<uint16_t> {
|
||||
constexpr static unsigned int c = GEN_SIMD_WIDTH/4u;
|
||||
constexpr static unsigned int r = GEN_SIMD_WIDTH/2u;
|
||||
};
|
||||
template <> struct W<uint64_t> {
|
||||
constexpr static unsigned int c = GEN_SIMD_WIDTH/16u;
|
||||
constexpr static unsigned int r = GEN_SIMD_WIDTH/8u;
|
||||
};
|
||||
|
||||
#ifdef ARMCLANGCOMPAT
|
||||
// SIMD vector immediate types
|
||||
template <typename T>
|
||||
struct vec_imm {
|
||||
alignas(GEN_SIMD_WIDTH) T v[W<T>::r];
|
||||
};
|
||||
|
||||
// SIMD vector types
|
||||
template <typename T>
|
||||
struct vec {
|
||||
alignas(GEN_SIMD_WIDTH) T v[W<T>::r];
|
||||
vec() = default;
|
||||
vec(const vec &rhs) { this->operator=(rhs); }
|
||||
vec(const vec_imm<T> &rhs) {
|
||||
// v = rhs.v
|
||||
svst1(svptrue_b8(), (T*)this, svld1(svptrue_b8(), (T*)rhs.v));
|
||||
}
|
||||
|
||||
inline vec &operator=(const vec &rhs) {
|
||||
// v = rhs.v
|
||||
svst1(svptrue_b8(), (T*)this, svld1(svptrue_b8(), (T*)rhs.v));
|
||||
return *this;
|
||||
};
|
||||
};
|
||||
|
||||
#else // no ARMCLANGCOMPAT
|
||||
#define vec_imm vec
|
||||
// SIMD vector types
|
||||
template <typename T>
|
||||
struct vec {
|
||||
alignas(GEN_SIMD_WIDTH) T v[W<T>::r];
|
||||
};
|
||||
#endif
|
||||
|
||||
typedef vec<float> vecf;
|
||||
typedef vec<double> vecd;
|
||||
typedef vec<uint16_t> vech; // half precision comms
|
||||
typedef vec<Integer> veci;
|
||||
|
||||
NAMESPACE_END(Optimization)
|
||||
NAMESPACE_END(Grid)
|
||||
|
||||
// low-level API
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
NAMESPACE_BEGIN(Optimization);
|
||||
|
||||
template <typename T>
|
||||
struct acle{};
|
||||
|
||||
template <>
|
||||
struct acle<double>{
|
||||
typedef svfloat64_t vt;
|
||||
typedef svfloat64x2_t vt2;
|
||||
typedef svfloat64x4_t vt4;
|
||||
typedef float64_t pt;
|
||||
typedef uint64_t uint;
|
||||
typedef svuint64_t svuint;
|
||||
|
||||
static inline svbool_t pg1(){return svptrue_b64();}
|
||||
static inline svbool_t pg2(){return svptrue_pat_b64(SV_VL4);}
|
||||
static inline svbool_t pg4(){return svptrue_pat_b64(SV_VL2);}
|
||||
static inline vec<uint64_t> tbl_swap(){
|
||||
//const vec<uint64_t> t = {1, 0, 3, 2, 5, 4, 7, 6};
|
||||
const vec_imm<uint64_t> t = {1, 0, 3, 2, 5, 4, 7, 6};
|
||||
return t;
|
||||
}
|
||||
static inline vec<uint64_t> tbl0(){
|
||||
//const vec<uint64_t> t = {4, 5, 6, 7, 0, 1, 2, 3};
|
||||
const vec_imm<uint64_t> t = {4, 5, 6, 7, 0, 1, 2, 3};
|
||||
return t;
|
||||
}
|
||||
static inline vec<uint64_t> tbl1(){
|
||||
//const vec<uint64_t> t = {2, 3, 0, 1, 6, 7, 4, 5};
|
||||
const vec_imm<uint64_t> t = {2, 3, 0, 1, 6, 7, 4, 5};
|
||||
return t;
|
||||
}
|
||||
static inline vec<uint64_t> tbl_exch1a(){ // Exchange1
|
||||
//const vec<uint64_t> t = {0, 1, 4, 5, 2, 3, 6, 7};
|
||||
const vec_imm<uint64_t> t = {0, 1, 4, 5, 2, 3, 6, 7};
|
||||
return t;
|
||||
}
|
||||
static inline vec<uint64_t> tbl_exch1b(){ // Exchange1
|
||||
//const vec<uint64_t> t = {2, 3, 6, 7, 0, 1, 4, 5};
|
||||
const vec_imm<uint64_t> t = {2, 3, 6, 7, 0, 1, 4, 5};
|
||||
return t;
|
||||
}
|
||||
static inline vec<uint64_t> tbl_exch1c(){ // Exchange1
|
||||
//const vec<uint64_t> t = {4, 5, 0, 1, 6, 7, 2, 3};
|
||||
const vec_imm<uint64_t> t = {4, 5, 0, 1, 6, 7, 2, 3};
|
||||
return t;
|
||||
}
|
||||
static inline svbool_t pg_even(){return svzip1_b64(svptrue_b64(), svpfalse_b());}
|
||||
static inline svbool_t pg_odd() {return svzip1_b64(svpfalse_b(), svptrue_b64());}
|
||||
static inline svfloat64_t zero(){return svdup_f64(0.);}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct acle<float>{
|
||||
typedef svfloat32_t vt;
|
||||
typedef svfloat32x2_t vt2;
|
||||
typedef float32_t pt;
|
||||
typedef uint32_t uint;
|
||||
typedef svuint32_t svuint;
|
||||
|
||||
static inline svbool_t pg1(){return svptrue_b32();}
|
||||
static inline svbool_t pg2(){return svptrue_pat_b32(SV_VL8);}
|
||||
// exchange neighboring elements
|
||||
static inline vec<uint32_t> tbl_swap(){
|
||||
//const vec<uint32_t> t = {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14};
|
||||
const vec_imm<uint32_t> t = {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14};
|
||||
return t;
|
||||
}
|
||||
static inline vec<uint32_t> tbl0(){
|
||||
//const vec<uint32_t> t = {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7};
|
||||
const vec_imm<uint32_t> t = {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7};
|
||||
return t;
|
||||
}
|
||||
static inline vec<uint32_t> tbl1(){
|
||||
//const vec<uint32_t> t = {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11};
|
||||
const vec_imm<uint32_t> t = {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11};
|
||||
return t;
|
||||
}
|
||||
static inline vec<uint32_t> tbl2(){
|
||||
//const vec<uint32_t> t = {2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13};
|
||||
const vec_imm<uint32_t> t = {2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13};
|
||||
return t;
|
||||
}
|
||||
static inline vec<uint32_t> tbl_exch1a(){ // Exchange1
|
||||
//const vec<uint32_t> t = {0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 };
|
||||
const vec_imm<uint32_t> t = {0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 };
|
||||
return t;
|
||||
}
|
||||
static inline vec<uint32_t> tbl_exch1b(){ // Exchange1
|
||||
//const vec<uint32_t> t = {4, 5, 6, 7, 12, 13, 14, 15, 0, 1, 2, 3, 8, 9, 10, 11 };
|
||||
const vec_imm<uint32_t> t = {4, 5, 6, 7, 12, 13, 14, 15, 0, 1, 2, 3, 8, 9, 10, 11 };
|
||||
return t;
|
||||
}
|
||||
static inline vec<uint32_t> tbl_exch1c(){ // Exchange1
|
||||
//const vec<uint32_t> t = {8, 9, 10, 11, 0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7};
|
||||
const vec_imm<uint32_t> t = {8, 9, 10, 11, 0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7};
|
||||
return t;
|
||||
}
|
||||
static inline svbool_t pg_even(){return svzip1_b32(svptrue_b32(), svpfalse_b());}
|
||||
static inline svbool_t pg_odd() {return svzip1_b32(svpfalse_b(), svptrue_b32());}
|
||||
static inline svfloat32_t zero(){return svdup_f32(0.);}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct acle<uint16_t>{
|
||||
typedef svfloat16_t vt;
|
||||
typedef float16_t pt;
|
||||
typedef uint16_t uint;
|
||||
typedef svuint16_t svuint;
|
||||
|
||||
static inline svbool_t pg1(){return svptrue_b16();}
|
||||
static inline svbool_t pg2(){return svptrue_pat_b16(SV_VL16);}
|
||||
static inline svbool_t pg_even(){return svzip1_b16(svptrue_b16(), svpfalse_b());}
|
||||
static inline svbool_t pg_odd() {return svzip1_b16(svpfalse_b(), svptrue_b16());}
|
||||
static inline svfloat16_t zero(){return svdup_f16(0.);}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct acle<Integer>{
|
||||
typedef svuint32_t vt;
|
||||
typedef svuint32x2_t vt2;
|
||||
typedef Integer pt;
|
||||
typedef uint32_t uint;
|
||||
typedef svuint32_t svuint;
|
||||
|
||||
//static inline svbool_t pg1(){return svptrue_b16();}
|
||||
static inline svbool_t pg1(){return svptrue_b32();}
|
||||
static inline svbool_t pg2(){return svptrue_pat_b32(SV_VL8);}
|
||||
static inline svbool_t pg_even(){return svzip1_b32(svptrue_b32(), svpfalse_b());}
|
||||
static inline svbool_t pg_odd() {return svzip1_b32(svpfalse_b(), svptrue_b32());}
|
||||
};
|
||||
|
||||
// ---------------------------------------------------
|
||||
|
||||
struct Vsplat{
|
||||
// Complex float
|
||||
inline vecf operator()(float a, float b){
|
||||
vecf out;
|
||||
svbool_t pg1 = acle<float>::pg1();
|
||||
typename acle<float>::vt a_v = svdup_f32(a);
|
||||
typename acle<float>::vt b_v = svdup_f32(b);
|
||||
typename acle<float>::vt r_v = svzip1(a_v, b_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
return out;
|
||||
}
|
||||
|
||||
// Real float
|
||||
inline vecf operator()(float a){
|
||||
vecf out;
|
||||
svbool_t pg1 = acle<float>::pg1();
|
||||
typename acle<float>::vt r_v = svdup_f32(a);
|
||||
svst1(pg1, out.v, r_v);
|
||||
return out;
|
||||
}
|
||||
|
||||
// Complex double
|
||||
inline vecd operator()(double a, double b){
|
||||
vecd out;
|
||||
svbool_t pg1 = acle<double>::pg1();
|
||||
typename acle<double>::vt a_v = svdup_f64(a);
|
||||
typename acle<double>::vt b_v = svdup_f64(b);
|
||||
typename acle<double>::vt r_v = svzip1(a_v, b_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
return out;
|
||||
}
|
||||
|
||||
// Real double
|
||||
inline vecd operator()(double a){
|
||||
vecd out;
|
||||
svbool_t pg1 = acle<double>::pg1();
|
||||
typename acle<double>::vt r_v = svdup_f64(a);
|
||||
svst1(pg1, out.v, r_v);
|
||||
return out;
|
||||
}
|
||||
|
||||
// Integer
|
||||
inline vec<Integer> operator()(Integer a){
|
||||
vec<Integer> out;
|
||||
svbool_t pg1 = acle<Integer>::pg1();
|
||||
// Add check whether Integer is really a uint32_t???
|
||||
typename acle<Integer>::vt r_v = svdup_u32(a);
|
||||
svst1(pg1, out.v, r_v);
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct Vstore{
|
||||
// Real
|
||||
template <typename T>
|
||||
inline void operator()(vec<T> a, T *D){
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, (typename acle<T>::pt*)&a.v);
|
||||
svst1(pg1, D, a_v);
|
||||
}
|
||||
};
|
||||
|
||||
struct Vstream{
|
||||
// Real
|
||||
template <typename T>
|
||||
inline void operator()(T * a, vec<T> b){
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt b_v = svld1(pg1, b.v);
|
||||
svstnt1(pg1, a, b_v);
|
||||
//svst1(pg1, a, b_v);
|
||||
}
|
||||
};
|
||||
|
||||
struct Vset{
|
||||
// Complex
|
||||
template <typename T>
|
||||
inline vec<T> operator()(std::complex<T> *a){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, (T*)a);
|
||||
svst1(pg1, out.v, a_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
// Real
|
||||
template <typename T>
|
||||
inline vec<T> operator()(T *a){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, a);
|
||||
svst1(pg1, out.v, a_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Arithmetic operations
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
struct Sum{
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
typename acle<T>::vt b_v = svld1(pg1, b.v);
|
||||
typename acle<T>::vt r_v = svadd_x(pg1, a_v, b_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct Sub{
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
typename acle<T>::vt b_v = svld1(pg1, b.v);
|
||||
typename acle<T>::vt r_v = svsub_x(pg1, a_v, b_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct Mult{
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b, vec<T> c){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
typename acle<T>::vt b_v = svld1(pg1, b.v);
|
||||
typename acle<T>::vt c_v = svld1(pg1, c.v);
|
||||
typename acle<T>::vt r_v = svmla_x(pg1, c_v, a_v, b_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
typename acle<T>::vt b_v = svld1(pg1, b.v);
|
||||
typename acle<T>::vt r_v = svmul_x(pg1, a_v, b_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct MultRealPart{
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
typename acle<T>::vt b_v = svld1(pg1, b.v);
|
||||
|
||||
// using FCMLA
|
||||
typename acle<T>::vt z_v = acle<T>::zero();
|
||||
typename acle<T>::vt r_v = svcmla_x(pg1, z_v, a_v, b_v, 0);
|
||||
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct MaddRealPart{
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b, vec<T> c){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
typename acle<T>::vt b_v = svld1(pg1, b.v);
|
||||
typename acle<T>::vt c_v = svld1(pg1, c.v);
|
||||
|
||||
// using FCMLA
|
||||
typename acle<T>::vt r_v = svcmla_x(pg1, c_v, a_v, b_v, 0);
|
||||
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct MultComplex{
|
||||
// Complex a*b
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
typename acle<T>::vt b_v = svld1(pg1, b.v);
|
||||
typename acle<T>::vt z_v = acle<T>::zero();
|
||||
|
||||
// using FCMLA
|
||||
typename acle<T>::vt r_v = svcmla_x(pg1, z_v, a_v, b_v, 0);
|
||||
r_v = svcmla_x(pg1, r_v, a_v, b_v, 90);
|
||||
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct MultAddComplex{
|
||||
// Complex a*b+c
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b, vec<T> c){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
typename acle<T>::vt b_v = svld1(pg1, b.v);
|
||||
typename acle<T>::vt c_v = svld1(pg1, c.v);;
|
||||
|
||||
// using FCMLA
|
||||
typename acle<T>::vt r_v = svcmla_x(pg1, c_v, a_v, b_v, 0);
|
||||
r_v = svcmla_x(pg1, r_v, a_v, b_v, 90);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct Div{
|
||||
// Real
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
typename acle<T>::vt b_v = svld1(pg1, b.v);
|
||||
typename acle<T>::vt r_v = svdiv_x(pg1, a_v, b_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct Conj{
|
||||
// Complex
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
svbool_t pg_odd = acle<T>::pg_odd();
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
//typename acle<T>::vt r_v = svneg_x(pg_odd, a_v);
|
||||
typename acle<T>::vt r_v = svneg_m(a_v, pg_odd, a_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct TimesMinusI{
|
||||
// Complex
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
const vec<typename acle<T>::uint> tbl_swap = acle<T>::tbl_swap();
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
svbool_t pg_odd = acle<T>::pg_odd();
|
||||
|
||||
typename acle<T>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
a_v = svtbl(a_v, tbl_swap_v);
|
||||
typename acle<T>::vt r_v = svneg_m(a_v, pg_odd, a_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct TimesI{
|
||||
// Complex
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
const vec<typename acle<T>::uint> tbl_swap = acle<T>::tbl_swap();
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
svbool_t pg_even = acle<T>::pg_even();
|
||||
|
||||
typename acle<T>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
|
||||
typename acle<T>::vt a_v = svld1(pg1, a.v);
|
||||
a_v = svtbl(a_v, tbl_swap_v);
|
||||
//typename acle<T>::vt r_v = svneg_x(pg_even, a_v);
|
||||
typename acle<T>::vt r_v = svneg_m(a_v, pg_even, a_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct PrecisionChange {
|
||||
static inline vech StoH (const vecf &sa,const vecf &sb) {
|
||||
vech ret;
|
||||
svbool_t pg1s = acle<float>::pg1();
|
||||
svbool_t pg1h = acle<uint16_t>::pg1();
|
||||
typename acle<float>::vt sa_v = svld1(pg1s, sa.v);
|
||||
typename acle<float>::vt sb_v = svld1(pg1s, sb.v);
|
||||
typename acle<uint16_t>::vt ha_v = svcvt_f16_x(pg1s, sa_v);
|
||||
typename acle<uint16_t>::vt hb_v = svcvt_f16_x(pg1s, sb_v);
|
||||
typename acle<uint16_t>::vt r_v = svuzp1(ha_v, hb_v);
|
||||
svst1(pg1h, (typename acle<uint16_t>::pt*)&ret.v, r_v);
|
||||
|
||||
return ret;
|
||||
}
|
||||
static inline void HtoS(vech h,vecf &sa,vecf &sb) {
|
||||
svbool_t pg1h = acle<uint16_t>::pg1();
|
||||
svbool_t pg1s = acle<float>::pg1();
|
||||
typename acle<uint16_t>::vt h_v = svld1(pg1h, (typename acle<uint16_t>::pt*)&h.v);
|
||||
typename acle<uint16_t>::vt ha_v = svzip1(h_v, h_v);
|
||||
typename acle<uint16_t>::vt hb_v = svzip2(h_v, h_v);
|
||||
typename acle<float>::vt sa_v = svcvt_f32_x(pg1s, ha_v);
|
||||
typename acle<float>::vt sb_v = svcvt_f32_x(pg1s, hb_v);
|
||||
svst1(pg1s, sa.v, sa_v);
|
||||
svst1(pg1s, sb.v, sb_v);
|
||||
}
|
||||
static inline vecf DtoS (vecd a,vecd b) {
|
||||
vecf ret;
|
||||
svbool_t pg1d = acle<double>::pg1();
|
||||
svbool_t pg1s = acle<float>::pg1();
|
||||
typename acle<double>::vt a_v = svld1(pg1d, a.v);
|
||||
typename acle<double>::vt b_v = svld1(pg1d, b.v);
|
||||
typename acle<float>::vt sa_v = svcvt_f32_x(pg1d, a_v);
|
||||
typename acle<float>::vt sb_v = svcvt_f32_x(pg1d, b_v);
|
||||
typename acle<float>::vt r_v = svuzp1(sa_v, sb_v);
|
||||
svst1(pg1s, ret.v, r_v);
|
||||
|
||||
return ret;
|
||||
}
|
||||
static inline void StoD (vecf s,vecd &a,vecd &b) {
|
||||
svbool_t pg1s = acle<float>::pg1();
|
||||
svbool_t pg1d = acle<double>::pg1();
|
||||
typename acle<float>::vt s_v = svld1(pg1s, s.v);
|
||||
typename acle<float>::vt sa_v = svzip1(s_v, s_v);
|
||||
typename acle<float>::vt sb_v = svzip2(s_v, s_v);
|
||||
typename acle<double>::vt a_v = svcvt_f64_x(pg1d, sa_v);
|
||||
typename acle<double>::vt b_v = svcvt_f64_x(pg1d, sb_v);
|
||||
svst1(pg1d, a.v, a_v);
|
||||
svst1(pg1d, b.v, b_v);
|
||||
}
|
||||
static inline vech DtoH (vecd a,vecd b,vecd c,vecd d) {
|
||||
vech ret;
|
||||
svbool_t pg1d = acle<double>::pg1();
|
||||
svbool_t pg1h = acle<uint16_t>::pg1();
|
||||
typename acle<double>::vt a_v = svld1(pg1d, a.v);
|
||||
typename acle<double>::vt b_v = svld1(pg1d, b.v);
|
||||
typename acle<double>::vt c_v = svld1(pg1d, c.v);
|
||||
typename acle<double>::vt d_v = svld1(pg1d, d.v);
|
||||
typename acle<uint16_t>::vt ha_v = svcvt_f16_x(pg1d, a_v);
|
||||
typename acle<uint16_t>::vt hb_v = svcvt_f16_x(pg1d, b_v);
|
||||
typename acle<uint16_t>::vt hc_v = svcvt_f16_x(pg1d, c_v);
|
||||
typename acle<uint16_t>::vt hd_v = svcvt_f16_x(pg1d, d_v);
|
||||
typename acle<uint16_t>::vt hab_v = svuzp1(ha_v, hb_v);
|
||||
typename acle<uint16_t>::vt hcd_v = svuzp1(hc_v, hd_v);
|
||||
typename acle<uint16_t>::vt r_v = svuzp1(hab_v, hcd_v);
|
||||
svst1(pg1h, (typename acle<uint16_t>::pt*)&ret.v, r_v);
|
||||
|
||||
return ret;
|
||||
/*
|
||||
vecf sa,sb;
|
||||
sa = DtoS(a,b);
|
||||
sb = DtoS(c,d);
|
||||
return StoH(sa,sb);
|
||||
*/
|
||||
}
|
||||
static inline void HtoD(vech h,vecd &a,vecd &b,vecd &c,vecd &d) {
|
||||
svbool_t pg1h = acle<uint16_t>::pg1();
|
||||
svbool_t pg1d = acle<double>::pg1();
|
||||
typename acle<uint16_t>::vt h_v = svld1(pg1h, (typename acle<uint16_t>::pt*)&h.v);
|
||||
typename acle<uint16_t>::vt sa_v = svzip1(h_v, h_v);
|
||||
typename acle<uint16_t>::vt sb_v = svzip2(h_v, h_v);
|
||||
typename acle<uint16_t>::vt da_v = svzip1(sa_v, sa_v);
|
||||
typename acle<uint16_t>::vt db_v = svzip2(sa_v, sa_v);
|
||||
typename acle<uint16_t>::vt dc_v = svzip1(sb_v, sb_v);
|
||||
typename acle<uint16_t>::vt dd_v = svzip2(sb_v, sb_v);
|
||||
typename acle<double>::vt a_v = svcvt_f64_x(pg1d, da_v);
|
||||
typename acle<double>::vt b_v = svcvt_f64_x(pg1d, db_v);
|
||||
typename acle<double>::vt c_v = svcvt_f64_x(pg1d, dc_v);
|
||||
typename acle<double>::vt d_v = svcvt_f64_x(pg1d, dd_v);
|
||||
svst1(pg1d, a.v, a_v);
|
||||
svst1(pg1d, b.v, b_v);
|
||||
svst1(pg1d, c.v, c_v);
|
||||
svst1(pg1d, d.v, d_v);
|
||||
/*
|
||||
vecf sa,sb;
|
||||
HtoS(h,sa,sb);
|
||||
StoD(sa,a,b);
|
||||
StoD(sb,c,d);
|
||||
*/
|
||||
}
|
||||
};
|
||||
|
||||
struct Exchange{
|
||||
|
||||
// Exchange0 is valid for arbitrary SVE vector length
|
||||
template <typename T>
|
||||
static inline void Exchange0(vec<T> &out1, vec<T> &out2, const vec<T> &in1, const vec<T> &in2){
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a1_v = svld1(pg1, in1.v);
|
||||
typename acle<T>::vt a2_v = svld1(pg1, in2.v);
|
||||
typename acle<T>::vt r1_v = svext(a1_v, a1_v, (uint64_t)W<T>::c);
|
||||
r1_v = svext(r1_v, a2_v, (uint64_t)W<T>::c);
|
||||
typename acle<T>::vt r2_v = svext(a2_v, a2_v, (uint64_t)W<T>::c);
|
||||
r2_v = svext(a1_v, r2_v, (uint64_t)W<T>::c);
|
||||
svst1(pg1, out1.v, r1_v);
|
||||
svst1(pg1, out2.v, r2_v);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline void Exchange1(vec<T> &out1, vec<T> &out2, const vec<T> &in1, const vec<T> &in2){
|
||||
// this one is tricky; svtrn2q* from SVE2 fits best, but it is not available in SVE1
|
||||
// alternative: use 4-el structure; expect translation into ldp + stp -> SFI
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
const vec<typename acle<T>::uint> tbl_exch1a = acle<T>::tbl_exch1a();
|
||||
const vec<typename acle<T>::uint> tbl_exch1b = acle<T>::tbl_exch1b();
|
||||
const vec<typename acle<T>::uint> tbl_exch1c = acle<T>::tbl_exch1c();
|
||||
|
||||
typename acle<T>::svuint tbl_exch1a_v = svld1(pg1, tbl_exch1a.v);
|
||||
typename acle<T>::svuint tbl_exch1b_v = svld1(pg1, tbl_exch1b.v);
|
||||
typename acle<T>::svuint tbl_exch1c_v = svld1(pg1, tbl_exch1c.v);
|
||||
|
||||
typename acle<T>::vt in1_v = svld1(pg1, in1.v);
|
||||
typename acle<T>::vt in2_v = svld1(pg1, in2.v);
|
||||
|
||||
typename acle<T>::vt a1_v = svtbl(in1_v, tbl_exch1a_v);
|
||||
typename acle<T>::vt a2_v = svtbl(in2_v, tbl_exch1b_v);
|
||||
typename acle<T>::vt b1_v = svext(a2_v, a1_v, (uint64_t)(W<T>::r / 2u));
|
||||
typename acle<T>::vt b2_v = svext(a1_v, a2_v, (uint64_t)(W<T>::r / 2u));
|
||||
typename acle<T>::vt out1_v = svtbl(b1_v, tbl_exch1c_v);
|
||||
typename acle<T>::vt out2_v = svtbl(b2_v, tbl_exch1a_v);
|
||||
|
||||
svst1(pg1, out1.v, out1_v);
|
||||
svst1(pg1, out2.v, out2_v);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline void Exchange2(vec<T> &out1, vec<T> &out2, const vec<T> &in1, const vec<T> &in2){
|
||||
svbool_t pg1 = acle<double>::pg1();
|
||||
typename acle<double>::vt a1_v = svld1(pg1, (typename acle<double>::pt*)in1.v);
|
||||
typename acle<double>::vt a2_v = svld1(pg1, (typename acle<double>::pt*)in2.v);
|
||||
typename acle<double>::vt r1_v = svtrn1(a1_v, a2_v);
|
||||
typename acle<double>::vt r2_v = svtrn2(a1_v, a2_v);
|
||||
svst1(pg1, (typename acle<double>::pt*)out1.v, r1_v);
|
||||
svst1(pg1, (typename acle<double>::pt*)out2.v, r2_v);
|
||||
}
|
||||
|
||||
static inline void Exchange3(vecf &out1, vecf &out2, const vecf &in1, const vecf &in2){
|
||||
svbool_t pg1 = acle<float>::pg1();
|
||||
typename acle<float>::vt a1_v = svld1(pg1, in1.v);
|
||||
typename acle<float>::vt a2_v = svld1(pg1, in2.v);
|
||||
typename acle<float>::vt r1_v = svtrn1(a1_v, a2_v);
|
||||
typename acle<float>::vt r2_v = svtrn2(a1_v, a2_v);
|
||||
svst1(pg1, out1.v, r1_v);
|
||||
svst1(pg1, out2.v, r2_v);
|
||||
}
|
||||
|
||||
static inline void Exchange3(vecd &out1, vecd &out2, const vecd &in1, const vecd &in2){
|
||||
assert(0);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
struct Permute{
|
||||
|
||||
// Permute0 is valid for any SVE vector width
|
||||
template <typename T>
|
||||
static inline vec<T> Permute0(vec<T> in) {
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, in.v);
|
||||
typename acle<T>::vt r_v = svext(a_v, a_v, (uint64_t)(W<T>::r / 2u));
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
static inline vecd Permute1(vecd in) {
|
||||
vecd out;
|
||||
const vec<typename acle<double>::uint> tbl_swap = acle<double>::tbl1();
|
||||
svbool_t pg1 = acle<double>::pg1();
|
||||
typename acle<double>::vt a_v = svld1(pg1, in.v);
|
||||
typename acle<double>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
|
||||
typename acle<double>::vt r_v = svtbl(a_v, tbl_swap_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
static inline vecf Permute1(vecf in) {
|
||||
vecf out;
|
||||
const vec<typename acle<float>::uint> tbl_swap = acle<float>::tbl1();
|
||||
svbool_t pg1 = acle<float>::pg1();
|
||||
typename acle<float>::vt a_v = svld1(pg1, in.v);
|
||||
typename acle<float>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
|
||||
typename acle<float>::vt r_v = svtbl(a_v, tbl_swap_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
static inline vecd Permute2(vecd in) {
|
||||
vecd out;
|
||||
const vec<typename acle<double>::uint> tbl_swap = acle<double>::tbl_swap();
|
||||
svbool_t pg1 = acle<double>::pg1();
|
||||
typename acle<double>::vt a_v = svld1(pg1, in.v);
|
||||
typename acle<double>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
|
||||
typename acle<double>::vt r_v = svtbl(a_v, tbl_swap_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
static inline vecf Permute2(vecf in) {
|
||||
vecf out;
|
||||
const vec<typename acle<float>::uint> tbl_swap = acle<float>::tbl2();
|
||||
svbool_t pg1 = acle<float>::pg1();
|
||||
typename acle<float>::vt a_v = svld1(pg1, in.v);
|
||||
typename acle<float>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
|
||||
typename acle<float>::vt r_v = svtbl(a_v, tbl_swap_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
static inline vecf Permute3(vecf in) {
|
||||
vecf out;
|
||||
const vec<typename acle<float>::uint> tbl_swap = acle<float>::tbl_swap();
|
||||
svbool_t pg1 = acle<float>::pg1();
|
||||
typename acle<float>::vt a_v = svld1(pg1, in.v);
|
||||
typename acle<float>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
|
||||
typename acle<float>::vt r_v = svtbl(a_v, tbl_swap_v);
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
static inline vecd Permute3(vecd in) {
|
||||
return in;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
struct Rotate{
|
||||
|
||||
template <int n, typename T> static inline vec<T> tRotate(vec<T> in){
|
||||
vec<T> out;
|
||||
svbool_t pg1 = acle<T>::pg1();
|
||||
typename acle<T>::vt a_v = svld1(pg1, in.v);
|
||||
typename acle<T>::vt r_v = svext(a_v, a_v, (uint64_t)(n%W<T>::r));
|
||||
svst1(pg1, out.v, r_v);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline vec<T> rotate(vec<T> in, int n){
|
||||
|
||||
switch(n){
|
||||
case 0: return tRotate<0, T>(in); break;
|
||||
case 1: return tRotate<1, T>(in); break;
|
||||
case 2: return tRotate<2, T>(in); break;
|
||||
case 3: return tRotate<3, T>(in); break;
|
||||
case 4: return tRotate<4, T>(in); break;
|
||||
case 5: return tRotate<5, T>(in); break;
|
||||
case 6: return tRotate<6, T>(in); break;
|
||||
case 7: return tRotate<7, T>(in); break;
|
||||
|
||||
case 8: return tRotate<8, T>(in); break;
|
||||
case 9: return tRotate<9, T>(in); break;
|
||||
case 10: return tRotate<10, T>(in); break;
|
||||
case 11: return tRotate<11, T>(in); break;
|
||||
case 12: return tRotate<12, T>(in); break;
|
||||
case 13: return tRotate<13, T>(in); break;
|
||||
case 14: return tRotate<14, T>(in); break;
|
||||
case 15: return tRotate<15, T>(in); break;
|
||||
default: assert(0);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// tree-based reduction
|
||||
#define svred(pg, v)\
|
||||
svaddv(pg, v);
|
||||
|
||||
// left-to-right reduction
|
||||
// #define svred(pg, v)\
|
||||
// svadda(pg, 0, v)
|
||||
|
||||
template <typename Out_type, typename In_type>
|
||||
struct Reduce{
|
||||
//Need templated class to overload output type
|
||||
//General form must generate error if compiled
|
||||
inline Out_type operator()(In_type in){
|
||||
printf("Error, using wrong Reduce function\n");
|
||||
exit(1);
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
//Complex float Reduce
|
||||
template <>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, vecf>::operator()(vecf in){
|
||||
svbool_t pg1 = acle<float>::pg1();
|
||||
svbool_t pg_even = acle<float>::pg_even();
|
||||
svbool_t pg_odd = acle<float>::pg_odd();
|
||||
typename acle<float>::vt a_v = svld1(pg1, in.v);
|
||||
float a = svred(pg_even, a_v);
|
||||
float b = svred(pg_odd, a_v);
|
||||
|
||||
return Grid::ComplexF(a, b);
|
||||
|
||||
}
|
||||
|
||||
//Real float Reduce
|
||||
template <>
|
||||
inline Grid::RealF Reduce<Grid::RealF, vecf>::operator()(vecf in){
|
||||
svbool_t pg1 = acle<float>::pg1();
|
||||
typename acle<float>::vt a_v = svld1(pg1, in.v);
|
||||
float a = svred(pg1, a_v);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
//Complex double Reduce
|
||||
template <>
|
||||
inline Grid::ComplexD Reduce<Grid::ComplexD, vecd>::operator()(vecd in){
|
||||
svbool_t pg1 = acle<double>::pg1();
|
||||
svbool_t pg_even = acle<double>::pg_even();
|
||||
svbool_t pg_odd = acle<double>::pg_odd();
|
||||
typename acle<double>::vt a_v = svld1(pg1, in.v);
|
||||
double a = svred(pg_even, a_v);
|
||||
double b = svred(pg_odd, a_v);
|
||||
|
||||
return Grid::ComplexD(a, b);
|
||||
}
|
||||
|
||||
//Real double Reduce
|
||||
template <>
|
||||
inline Grid::RealD Reduce<Grid::RealD, vecd>::operator()(vecd in){
|
||||
svbool_t pg1 = acle<double>::pg1();
|
||||
typename acle<double>::vt a_v = svld1(pg1, in.v);
|
||||
double a = svred(pg1, a_v);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
//Integer Reduce
|
||||
template <>
|
||||
inline Integer Reduce<Integer, veci>::operator()(veci in){
|
||||
svbool_t pg1 = acle<Integer>::pg1();
|
||||
typename acle<Integer>::vt a_v = svld1(pg1, in.v);
|
||||
Integer a = svred(pg1, a_v);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
#undef svred
|
||||
#undef vec_imm
|
||||
|
||||
NAMESPACE_END(Optimization)
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// Here assign types
|
||||
|
||||
typedef Optimization::vech SIMD_Htype; // Reduced precision type
|
||||
typedef Optimization::vecf SIMD_Ftype; // Single precision type
|
||||
typedef Optimization::vecd SIMD_Dtype; // Double precision type
|
||||
typedef Optimization::veci SIMD_Itype; // Integer type
|
||||
|
||||
// prefetch utilities
|
||||
inline void v_prefetch0(int size, const char *ptr){};
|
||||
inline void prefetch_HINT_T0(const char *ptr){};
|
||||
|
||||
// Function name aliases
|
||||
typedef Optimization::Vsplat VsplatSIMD;
|
||||
typedef Optimization::Vstore VstoreSIMD;
|
||||
typedef Optimization::Vset VsetSIMD;
|
||||
typedef Optimization::Vstream VstreamSIMD;
|
||||
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S,T>;
|
||||
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::MultAddComplex MultAddComplexSIMD;
|
||||
typedef Optimization::MultRealPart MultRealPartSIMD;
|
||||
typedef Optimization::MaddRealPart MaddRealPartSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
||||
typedef Optimization::TimesI TimesISIMD;
|
||||
|
||||
NAMESPACE_END(Grid)
|
769
Grid/simd/Grid_a64fx-fixedsize.h
Normal file
769
Grid/simd/Grid_a64fx-fixedsize.h
Normal file
@ -0,0 +1,769 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: Grid_a64fx-fixedsize.h
|
||||
|
||||
Copyright (C) 2020
|
||||
|
||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
|
||||
with support from Arm
|
||||
Richard Sandiford <richard.sandiford@arm.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Using SVE ACLE with fixed-size data types
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
|
||||
// gcc 10 features
|
||||
#if __ARM_FEATURE_SVE_BITS==512
|
||||
/* gcc 10.0.1 and gcc 10.1 bug using ACLE data types CAS-159553-Y1K4C6
|
||||
workaround: use gcc's internal data types, bugfix expected for gcc 10.2
|
||||
typedef svbool_t pred __attribute__((arm_sve_vector_bits(512)));
|
||||
typedef svfloat16_t vech __attribute__((arm_sve_vector_bits(512)));
|
||||
typedef svfloat32_t vecf __attribute__((arm_sve_vector_bits(512)));
|
||||
typedef svfloat64_t vecd __attribute__((arm_sve_vector_bits(512)));
|
||||
typedef svuint32_t veci __attribute__((arm_sve_vector_bits(512)));
|
||||
typedef svuint32_t lutf __attribute__((arm_sve_vector_bits(512))); // LUTs for float
|
||||
typedef svuint64_t lutd __attribute__((arm_sve_vector_bits(512))); // LUTs for double
|
||||
*/
|
||||
typedef __SVBool_t pred __attribute__((arm_sve_vector_bits(512)));
|
||||
typedef __SVFloat16_t vech __attribute__((arm_sve_vector_bits(512)));
|
||||
typedef __SVFloat32_t vecf __attribute__((arm_sve_vector_bits(512)));
|
||||
typedef __SVFloat64_t vecd __attribute__((arm_sve_vector_bits(512)));
|
||||
typedef __SVUint32_t veci __attribute__((arm_sve_vector_bits(512)));
|
||||
typedef __SVUint32_t lutf __attribute__((arm_sve_vector_bits(512))); // LUTs for float
|
||||
typedef __SVUint64_t lutd __attribute__((arm_sve_vector_bits(512))); // LUTs for double
|
||||
#else
|
||||
#pragma error("Oops. Illegal SVE vector size!?")
|
||||
#endif /* __ARM_FEATURE_SVE_BITS */
|
||||
|
||||
// low-level API
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
NAMESPACE_BEGIN(Optimization);
|
||||
|
||||
// convenience union types for tables eliminating loads
|
||||
union ulutf {
|
||||
lutf v;
|
||||
uint32_t s[16];
|
||||
};
|
||||
union ulutd {
|
||||
lutd v;
|
||||
uint64_t s[8];
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct acle{};
|
||||
|
||||
template <>
|
||||
struct acle<double>{
|
||||
static inline lutd tbl_swap(){
|
||||
const ulutd t = { .s = {1, 0, 3, 2, 5, 4, 7, 6} };
|
||||
return t.v;
|
||||
}
|
||||
static inline lutd tbl0(){
|
||||
const ulutd t = { .s = {4, 5, 6, 7, 0, 1, 2, 3} };
|
||||
return t.v;
|
||||
}
|
||||
static inline lutd tbl1(){
|
||||
const ulutd t = { .s = {2, 3, 0, 1, 6, 7, 4, 5} };
|
||||
return t.v;
|
||||
}
|
||||
static inline lutd tbl_exch1a(){ // Exchange1
|
||||
const ulutd t = { .s = {0, 1, 4, 5, 2, 3, 6, 7} };
|
||||
return t.v;
|
||||
}
|
||||
static inline lutd tbl_exch1b(){ // Exchange1
|
||||
const ulutd t = { .s = {2, 3, 6, 7, 0, 1, 4, 5} };
|
||||
return t.v;
|
||||
}
|
||||
static inline lutd tbl_exch1c(){ // Exchange1
|
||||
const ulutd t = { .s = {4, 5, 0, 1, 6, 7, 2, 3} };
|
||||
return t.v;
|
||||
}
|
||||
static inline pred pg1(){return svptrue_b64();}
|
||||
static inline pred pg_even(){return svzip1_b64(svptrue_b64(), svpfalse_b());}
|
||||
static inline pred pg_odd() {return svzip1_b64(svpfalse_b(), svptrue_b64());}
|
||||
static inline vecd zero(){return svdup_f64(0.);}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct acle<float>{
|
||||
// exchange neighboring elements
|
||||
static inline lutf tbl_swap(){
|
||||
const ulutf t = { .s = {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14} };
|
||||
return t.v;
|
||||
}
|
||||
static inline lutf tbl0(){
|
||||
const ulutf t = { .s = {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7} };
|
||||
return t.v;
|
||||
}
|
||||
static inline lutf tbl1(){
|
||||
const ulutf t = { .s = {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} };
|
||||
return t.v;
|
||||
}
|
||||
static inline lutf tbl2(){
|
||||
const ulutf t = { .s = {2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13} };
|
||||
return t.v;
|
||||
}
|
||||
static inline lutf tbl_exch1a(){ // Exchange1
|
||||
const ulutf t = { .s = {0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 } };
|
||||
return t.v;
|
||||
}
|
||||
static inline lutf tbl_exch1b(){ // Exchange1
|
||||
const ulutf t = { .s = {4, 5, 6, 7, 12, 13, 14, 15, 0, 1, 2, 3, 8, 9, 10, 11 } };
|
||||
return t.v;
|
||||
}
|
||||
static inline lutf tbl_exch1c(){ // Exchange1
|
||||
const ulutf t = { .s = {8, 9, 10, 11, 0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7} };
|
||||
return t.v;
|
||||
}
|
||||
static inline pred pg1(){return svptrue_b32();}
|
||||
static inline pred pg_even(){return svzip1_b32(svptrue_b32(), svpfalse_b());}
|
||||
static inline pred pg_odd() {return svzip1_b32(svpfalse_b(), svptrue_b32());}
|
||||
static inline vecf zero(){return svdup_f32(0.);}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct acle<uint16_t>{
|
||||
static inline pred pg1(){return svptrue_b16();}
|
||||
static inline pred pg_even(){return svzip1_b16(svptrue_b16(), svpfalse_b());}
|
||||
static inline pred pg_odd() {return svzip1_b16(svpfalse_b(), svptrue_b16());}
|
||||
static inline vech zero(){return svdup_f16(0.);}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct acle<Integer>{
|
||||
//static inline svbool_t pg1(){return svptrue_b16();}
|
||||
static inline pred pg1(){return svptrue_b32();}
|
||||
static inline pred pg_even(){return svzip1_b32(svptrue_b32(), svpfalse_b());}
|
||||
static inline pred pg_odd() {return svzip1_b32(svpfalse_b(), svptrue_b32());}
|
||||
};
|
||||
|
||||
// ---------------------------------------------------
|
||||
|
||||
struct Vsplat{
|
||||
// Complex float
|
||||
inline vecf operator()(float a, float b){
|
||||
vecf a_v = svdup_f32(a);
|
||||
vecf b_v = svdup_f32(b);
|
||||
return svzip1(a_v, b_v);
|
||||
}
|
||||
// Real float
|
||||
inline vecf operator()(float a){
|
||||
return svdup_f32(a);
|
||||
}
|
||||
// Complex double
|
||||
inline vecd operator()(double a, double b){
|
||||
vecd a_v = svdup_f64(a);
|
||||
vecd b_v = svdup_f64(b);
|
||||
return svzip1(a_v, b_v);
|
||||
}
|
||||
// Real double
|
||||
inline vecd operator()(double a){
|
||||
return svdup_f64(a);
|
||||
}
|
||||
// Integer
|
||||
inline veci operator()(Integer a){
|
||||
return svdup_u32(a);
|
||||
}
|
||||
};
|
||||
|
||||
struct Vstore{
|
||||
// Real float
|
||||
inline void operator()(vecf a, float *D){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
svst1(pg1, D, a);
|
||||
}
|
||||
// Real double
|
||||
inline void operator()(vecd a, double *D){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
svst1(pg1, D, a);
|
||||
}
|
||||
// Real float
|
||||
inline void operator()(veci a, Integer *D){
|
||||
pred pg1 = acle<Integer>::pg1();
|
||||
svst1(pg1, D, a);
|
||||
}
|
||||
};
|
||||
|
||||
struct Vstream{
|
||||
// Real float
|
||||
inline void operator()(float * a, vecf b){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
svstnt1(pg1, a, b);
|
||||
//svst1(pg1, a, b);
|
||||
}
|
||||
// Real double
|
||||
inline void operator()(double * a, vecd b){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
svstnt1(pg1, a, b);
|
||||
//svst1(pg1, a, b);
|
||||
}
|
||||
};
|
||||
|
||||
struct Vset{
|
||||
// Complex float
|
||||
inline vecf operator()(Grid::ComplexF *a){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
return svld1(pg1, (float*)a);
|
||||
}
|
||||
// Complex double
|
||||
inline vecd operator()(Grid::ComplexD *a){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
return svld1(pg1, (double*)a);
|
||||
}
|
||||
// Real float
|
||||
inline vecf operator()(float *a){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
return svld1(pg1, a);
|
||||
}
|
||||
// Real double
|
||||
inline vecd operator()(double *a){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
return svld1(pg1, a);
|
||||
}
|
||||
// Integer
|
||||
inline veci operator()(Integer *a){
|
||||
pred pg1 = acle<Integer>::pg1();
|
||||
return svld1(pg1, a);
|
||||
}
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Arithmetic operations
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
struct Sum{
|
||||
// Complex/real float
|
||||
inline vecf operator()(vecf a, vecf b){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
return svadd_x(pg1, a, b);
|
||||
}
|
||||
// Complex/real double
|
||||
inline vecd operator()(vecd a, vecd b){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
return svadd_x(pg1, a, b);
|
||||
}
|
||||
// Integer
|
||||
inline veci operator()(veci a, veci b){
|
||||
pred pg1 = acle<Integer>::pg1();
|
||||
return svadd_x(pg1, a, b);
|
||||
}
|
||||
};
|
||||
|
||||
struct Sub{
|
||||
// Complex/real float
|
||||
inline vecf operator()(vecf a, vecf b){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
return svsub_x(pg1, a, b);
|
||||
}
|
||||
// Complex/real double
|
||||
inline vecd operator()(vecd a, vecd b){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
return svsub_x(pg1, a, b);
|
||||
}
|
||||
// Integer
|
||||
inline veci operator()(veci a, veci b){
|
||||
pred pg1 = acle<Integer>::pg1();
|
||||
return svsub_x(pg1, a, b);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
struct Mult{
|
||||
// Real float fma
|
||||
inline vecf operator()(vecf a, vecf b, vecf c){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
return svmad_x(pg1, b, c, a);
|
||||
}
|
||||
// Real double fma
|
||||
inline vecd operator()(vecd a, vecd b, vecd c){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
return svmad_x(pg1, b, c, a);
|
||||
}
|
||||
// Real float
|
||||
inline vecf operator()(vecf a, vecf b){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
return svmul_x(pg1, a, b);
|
||||
}
|
||||
// Real double
|
||||
inline vecd operator()(vecd a, vecd b){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
return svmul_x(pg1, a, b);
|
||||
}
|
||||
// Integer
|
||||
inline veci operator()(veci a, veci b){
|
||||
pred pg1 = acle<Integer>::pg1();
|
||||
return svmul_x(pg1, a, b);
|
||||
}
|
||||
};
|
||||
|
||||
struct MultRealPart{
|
||||
// Complex float
|
||||
inline vecf operator()(vecf a, vecf b){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
// using FCMLA
|
||||
vecf z_v = acle<float>::zero();
|
||||
return svcmla_x(pg1, z_v, a, b, 0);
|
||||
}
|
||||
// Complex double
|
||||
inline vecd operator()(vecd a, vecd b){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
// using FCMLA
|
||||
vecd z_v = acle<double>::zero();
|
||||
return svcmla_x(pg1, z_v, a, b, 0);
|
||||
}
|
||||
};
|
||||
|
||||
struct MaddRealPart{
|
||||
// Complex float
|
||||
inline vecf operator()(vecf a, vecf b, vecf c){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
// using FCMLA
|
||||
return svcmla_x(pg1, c, a, b, 0);
|
||||
}
|
||||
// Complex double
|
||||
inline vecd operator()(vecd a, vecd b, vecd c){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
// using FCMLA
|
||||
return svcmla_x(pg1, c, a, b, 0);
|
||||
}
|
||||
};
|
||||
|
||||
struct MultComplex{
|
||||
// Complex a*b
|
||||
// Complex float
|
||||
inline vecf operator()(vecf a, vecf b){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
vecf z = acle<float>::zero();
|
||||
// using FCMLA
|
||||
vecf r_v = svcmla_x(pg1, z, a, b, 0);
|
||||
return svcmla_x(pg1, r_v, a, b, 90);
|
||||
}
|
||||
// Complex double
|
||||
inline vecd operator()(vecd a, vecd b){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
vecd z = acle<double>::zero();
|
||||
// using FCMLA
|
||||
vecd r_v = svcmla_x(pg1, z, a, b, 0);
|
||||
return svcmla_x(pg1, r_v, a, b, 90);
|
||||
}
|
||||
};
|
||||
|
||||
struct MultAddComplex{
|
||||
// Complex a*b+c
|
||||
// Complex float
|
||||
inline vecf operator()(vecf a, vecf b, vecf c){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
// using FCMLA
|
||||
vecf r_v = svcmla_x(pg1, c, a, b, 0);
|
||||
return svcmla_x(pg1, r_v, a, b, 90);
|
||||
}
|
||||
// Complex double
|
||||
inline vecd operator()(vecd a, vecd b, vecd c){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
// using FCMLA
|
||||
vecd r_v = svcmla_x(pg1, c, a, b, 0);
|
||||
return svcmla_x(pg1, r_v, a, b, 90);
|
||||
}
|
||||
};
|
||||
|
||||
struct Div{
|
||||
// Real float
|
||||
inline vecf operator()(vecf a, vecf b){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
return svdiv_x(pg1, a, b);
|
||||
}
|
||||
// Real double
|
||||
inline vecd operator()(vecd a, vecd b){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
return svdiv_x(pg1, a, b);
|
||||
}
|
||||
};
|
||||
|
||||
struct Conj{
|
||||
// Complex float
|
||||
inline vecf operator()(vecf a){
|
||||
pred pg_odd = acle<float>::pg_odd();
|
||||
//return svneg_x(pg_odd, a); this is unsafe
|
||||
return svneg_m(a, pg_odd, a);
|
||||
}
|
||||
// Complex double
|
||||
inline vecd operator()(vecd a){
|
||||
pred pg_odd = acle<double>::pg_odd();
|
||||
//return svneg_x(pg_odd, a); this is unsafe
|
||||
return svneg_m(a, pg_odd, a);
|
||||
}
|
||||
};
|
||||
|
||||
struct TimesMinusI{
|
||||
// Complex float
|
||||
inline vecf operator()(vecf a, vecf b){
|
||||
lutf tbl_swap = acle<float>::tbl_swap();
|
||||
pred pg1 = acle<float>::pg1();
|
||||
pred pg_odd = acle<float>::pg_odd();
|
||||
|
||||
vecf a_v = svtbl(a, tbl_swap);
|
||||
//return svneg_x(pg_odd, a_v); this is unsafe
|
||||
return svneg_m(a_v, pg_odd, a_v);
|
||||
}
|
||||
// Complex double
|
||||
inline vecd operator()(vecd a, vecd b){
|
||||
lutd tbl_swap = acle<double>::tbl_swap();
|
||||
pred pg1 = acle<double>::pg1();
|
||||
pred pg_odd = acle<double>::pg_odd();
|
||||
|
||||
vecd a_v = svtbl(a, tbl_swap);
|
||||
//return svneg_x(pg_odd, a_v); this is unsafe
|
||||
return svneg_m(a_v, pg_odd, a_v);
|
||||
}
|
||||
};
|
||||
|
||||
struct TimesI{
|
||||
// Complex float
|
||||
inline vecf operator()(vecf a, vecf b){
|
||||
lutf tbl_swap = acle<float>::tbl_swap();
|
||||
pred pg1 = acle<float>::pg1();
|
||||
pred pg_even = acle<float>::pg_even();
|
||||
|
||||
vecf a_v = svtbl(a, tbl_swap);
|
||||
//return svneg_x(pg_even, a_v); this is unsafe
|
||||
return svneg_m(a_v, pg_even, a_v);
|
||||
}
|
||||
// Complex double
|
||||
inline vecd operator()(vecd a, vecd b){
|
||||
lutd tbl_swap = acle<double>::tbl_swap();
|
||||
pred pg1 = acle<double>::pg1();
|
||||
pred pg_even = acle<double>::pg_even();
|
||||
|
||||
vecd a_v = svtbl(a, tbl_swap);
|
||||
//return svneg_x(pg_even, a_v); this is unsafe
|
||||
return svneg_m(a_v, pg_even, a_v);
|
||||
}
|
||||
};
|
||||
|
||||
struct PrecisionChange {
|
||||
static inline vech StoH (vecf sa, vecf sb) {
|
||||
pred pg1s = acle<float>::pg1();
|
||||
vech ha_v = svcvt_f16_x(pg1s, sa);
|
||||
vech hb_v = svcvt_f16_x(pg1s, sb);
|
||||
return svuzp1(ha_v, hb_v);
|
||||
}
|
||||
static inline void HtoS(vech h,vecf &sa,vecf &sb) {
|
||||
pred pg1s = acle<float>::pg1();
|
||||
vech ha_v = svzip1(h, h);
|
||||
vech hb_v = svzip2(h, h);
|
||||
sa = svcvt_f32_x(pg1s, ha_v);
|
||||
sb = svcvt_f32_x(pg1s, hb_v);
|
||||
}
|
||||
static inline vecf DtoS (vecd a,vecd b) {
|
||||
pred pg1d = acle<double>::pg1();
|
||||
vecf sa_v = svcvt_f32_x(pg1d, a);
|
||||
vecf sb_v = svcvt_f32_x(pg1d, b);
|
||||
return svuzp1(sa_v, sb_v);
|
||||
}
|
||||
static inline void StoD (vecf s,vecd &a,vecd &b) {
|
||||
pred pg1d = acle<double>::pg1();
|
||||
vecf sa_v = svzip1(s, s);
|
||||
vecf sb_v = svzip2(s, s);
|
||||
a = svcvt_f64_x(pg1d, sa_v);
|
||||
b = svcvt_f64_x(pg1d, sb_v);
|
||||
}
|
||||
static inline vech DtoH (vecd a,vecd b,vecd c,vecd d) {
|
||||
pred pg1d = acle<double>::pg1();
|
||||
pred pg1h = acle<uint16_t>::pg1();
|
||||
vech ha_v = svcvt_f16_x(pg1d, a);
|
||||
vech hb_v = svcvt_f16_x(pg1d, b);
|
||||
vech hc_v = svcvt_f16_x(pg1d, c);
|
||||
vech hd_v = svcvt_f16_x(pg1d, d);
|
||||
vech hab_v = svuzp1(ha_v, hb_v);
|
||||
vech hcd_v = svuzp1(hc_v, hd_v);
|
||||
return svuzp1(hab_v, hcd_v);
|
||||
|
||||
/*
|
||||
vecf sa,sb;
|
||||
sa = DtoS(a,b);
|
||||
sb = DtoS(c,d);
|
||||
return StoH(sa,sb);
|
||||
*/
|
||||
}
|
||||
static inline void HtoD(vech h,vecd &a,vecd &b,vecd &c,vecd &d) {
|
||||
pred pg1h = acle<uint16_t>::pg1();
|
||||
pred pg1d = acle<double>::pg1();
|
||||
vech sa_v = svzip1(h, h);
|
||||
vech sb_v = svzip2(h, h);
|
||||
vech da_v = svzip1(sa_v, sa_v);
|
||||
vech db_v = svzip2(sa_v, sa_v);
|
||||
vech dc_v = svzip1(sb_v, sb_v);
|
||||
vech dd_v = svzip2(sb_v, sb_v);
|
||||
a = svcvt_f64_x(pg1d, da_v);
|
||||
b = svcvt_f64_x(pg1d, db_v);
|
||||
c = svcvt_f64_x(pg1d, dc_v);
|
||||
d = svcvt_f64_x(pg1d, dd_v);
|
||||
|
||||
/*
|
||||
vecf sa,sb;
|
||||
HtoS(h,sa,sb);
|
||||
StoD(sa,a,b);
|
||||
StoD(sb,c,d);
|
||||
*/
|
||||
}
|
||||
};
|
||||
|
||||
struct Exchange{
|
||||
// float
|
||||
static inline void Exchange0(vecf &out1, vecf &out2, vecf in1, vecf in2){
|
||||
vecf r1_v = svext(in1, in1, (uint64_t)8u);
|
||||
vecf r2_v = svext(in2, in2, (uint64_t)8u);
|
||||
out1 = svext(r1_v, in2, (uint64_t)8u);
|
||||
out2 = svext(in1, r2_v, (uint64_t)8u);
|
||||
}
|
||||
static inline void Exchange1(vecf &out1, vecf &out2, vecf in1, vecf in2){
|
||||
// this one is tricky; svtrn2q* from SVE2 fits best, but it is not available in SVE1
|
||||
// alternative: use 4-el structure; expect translation into 4x ldp + 4x stp -> SFI
|
||||
lutf tbl_exch1a = acle<float>::tbl_exch1a();
|
||||
lutf tbl_exch1b = acle<float>::tbl_exch1b();
|
||||
lutf tbl_exch1c = acle<float>::tbl_exch1c();
|
||||
|
||||
vecf a1_v = svtbl(in1, tbl_exch1a);
|
||||
vecf a2_v = svtbl(in2, tbl_exch1b);
|
||||
vecf b1_v = svext(a2_v, a1_v, (uint64_t)8u);
|
||||
vecf b2_v = svext(a1_v, a2_v, (uint64_t)8u);
|
||||
out1 = svtbl(b1_v, tbl_exch1c);
|
||||
out2 = svtbl(b2_v, tbl_exch1a);
|
||||
}
|
||||
static inline void Exchange2(vecf &out1, vecf &out2, vecf in1, vecf in2){
|
||||
out1 = (vecf)svtrn1((vecd)in1, (vecd)in2);
|
||||
out2 = (vecf)svtrn2((vecd)in1, (vecd)in2);
|
||||
}
|
||||
static inline void Exchange3(vecf &out1, vecf &out2, vecf in1, vecf in2){
|
||||
out1 = svtrn1(in1, in2);
|
||||
out2 = svtrn2(in1, in2);
|
||||
}
|
||||
|
||||
// double
|
||||
static inline void Exchange0(vecd &out1, vecd &out2, vecd in1, vecd in2){
|
||||
vecd r1_v = svext(in1, in1, (uint64_t)4u);
|
||||
vecd r2_v = svext(in2, in2, (uint64_t)4u);
|
||||
out1 = svext(r1_v, in2, (uint64_t)4u);
|
||||
out2 = svext(in1, r2_v, (uint64_t)4u);
|
||||
}
|
||||
static inline void Exchange1(vecd &out1, vecd &out2, vecd in1, vecd in2){
|
||||
// this one is tricky; svtrn2q* from SVE2 fits best, but it is not available in SVE1
|
||||
// alternative: use 4-el structure; expect translation into 4x ldp + 4x stp -> SFI
|
||||
lutd tbl_exch1a = acle<double>::tbl_exch1a();
|
||||
lutd tbl_exch1b = acle<double>::tbl_exch1b();
|
||||
lutd tbl_exch1c = acle<double>::tbl_exch1c();
|
||||
|
||||
vecd a1_v = svtbl(in1, tbl_exch1a);
|
||||
vecd a2_v = svtbl(in2, tbl_exch1b);
|
||||
vecd b1_v = svext(a2_v, a1_v, (uint64_t)4u);
|
||||
vecd b2_v = svext(a1_v, a2_v, (uint64_t)4u);
|
||||
out1 = svtbl(b1_v, tbl_exch1c);
|
||||
out2 = svtbl(b2_v, tbl_exch1a);
|
||||
}
|
||||
static inline void Exchange2(vecd &out1, vecd &out2, vecd in1, vecd in2){
|
||||
out1 = svtrn1(in1, in2);
|
||||
out2 = svtrn2(in1, in2);
|
||||
}
|
||||
static inline void Exchange3(vecd &out1, vecd &out2, vecd in1, vecd in2){
|
||||
assert(0);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
#undef VECTOR_FOR
|
||||
|
||||
struct Permute{
|
||||
// float
|
||||
static inline vecf Permute0(vecf in) {
|
||||
return svext(in, in, (uint64_t)8u);
|
||||
}
|
||||
static inline vecf Permute1(vecf in) {
|
||||
lutf tbl_swap = acle<float>::tbl1();
|
||||
return svtbl(in, tbl_swap);
|
||||
}
|
||||
static inline vecf Permute2(vecf in) {
|
||||
lutf tbl_swap = acle<float>::tbl2();
|
||||
return svtbl(in, tbl_swap);
|
||||
}
|
||||
static inline vecf Permute3(vecf in) {
|
||||
lutf tbl_swap = acle<float>::tbl_swap();
|
||||
return svtbl(in, tbl_swap);
|
||||
}
|
||||
|
||||
// double
|
||||
static inline vecd Permute0(vecd in) {
|
||||
return svext(in, in, (uint64_t)(8u / 2u));
|
||||
}
|
||||
static inline vecd Permute1(vecd in) {
|
||||
lutd tbl_swap = acle<double>::tbl1();
|
||||
return svtbl(in, tbl_swap);
|
||||
}
|
||||
static inline vecd Permute2(vecd in) {
|
||||
lutd tbl_swap = acle<double>::tbl_swap();
|
||||
return svtbl(in, tbl_swap);
|
||||
}
|
||||
static inline vecd Permute3(vecd in) {
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
struct Rotate{
|
||||
|
||||
static inline vecf rotate(vecf in, int n){
|
||||
switch(n){
|
||||
case 0: return tRotate<0>(in); break;
|
||||
case 1: return tRotate<1>(in); break;
|
||||
case 2: return tRotate<2>(in); break;
|
||||
case 3: return tRotate<3>(in); break;
|
||||
case 4: return tRotate<4>(in); break;
|
||||
case 5: return tRotate<5>(in); break;
|
||||
case 6: return tRotate<6>(in); break;
|
||||
case 7: return tRotate<7>(in); break;
|
||||
|
||||
case 8: return tRotate<8>(in); break;
|
||||
case 9: return tRotate<9>(in); break;
|
||||
case 10: return tRotate<10>(in); break;
|
||||
case 11: return tRotate<11>(in); break;
|
||||
case 12: return tRotate<12>(in); break;
|
||||
case 13: return tRotate<13>(in); break;
|
||||
case 14: return tRotate<14>(in); break;
|
||||
case 15: return tRotate<15>(in); break;
|
||||
default: assert(0);
|
||||
}
|
||||
}
|
||||
static inline vecd rotate(vecd in, int n){
|
||||
switch(n){
|
||||
case 0: return tRotate<0>(in); break;
|
||||
case 1: return tRotate<1>(in); break;
|
||||
case 2: return tRotate<2>(in); break;
|
||||
case 3: return tRotate<3>(in); break;
|
||||
case 4: return tRotate<4>(in); break;
|
||||
case 5: return tRotate<5>(in); break;
|
||||
case 6: return tRotate<6>(in); break;
|
||||
case 7: return tRotate<7>(in); break;
|
||||
default: assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
template <int n> static inline vecf tRotate(vecf in){
|
||||
return svext(in, in, (uint64_t)n);
|
||||
}
|
||||
template <int n> static inline vecd tRotate(vecd in){
|
||||
return svext(in, in, (uint64_t)n);
|
||||
}
|
||||
};
|
||||
|
||||
// tree-based reduction
|
||||
#define svred(pg, v)\
|
||||
svaddv(pg, v);
|
||||
|
||||
// left-to-right reduction
|
||||
// #define svred(pg, v)\
|
||||
// svadda(pg, 0, v)
|
||||
|
||||
template <typename Out_type, typename In_type>
|
||||
struct Reduce{
|
||||
//Need templated class to overload output type
|
||||
//General form must generate error if compiled
|
||||
inline Out_type operator()(In_type in){
|
||||
printf("Error, using wrong Reduce function\n");
|
||||
//exit(1);
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
//Complex float Reduce
|
||||
template <>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, vecf>::operator()(vecf in){
|
||||
pred pg_even = acle<float>::pg_even();
|
||||
pred pg_odd = acle<float>::pg_odd();
|
||||
float a = svred(pg_even, in);
|
||||
float b = svred(pg_odd, in);
|
||||
return Grid::ComplexF(a, b);
|
||||
}
|
||||
//Real float Reduce
|
||||
template <>
|
||||
inline Grid::RealF Reduce<Grid::RealF, vecf>::operator()(vecf in){
|
||||
pred pg1 = acle<float>::pg1();
|
||||
return svred(pg1, in);
|
||||
}
|
||||
//Complex double Reduce
|
||||
template <>
|
||||
inline Grid::ComplexD Reduce<Grid::ComplexD, vecd>::operator()(vecd in){
|
||||
pred pg_even = acle<double>::pg_even();
|
||||
pred pg_odd = acle<double>::pg_odd();
|
||||
double a = svred(pg_even, in);
|
||||
double b = svred(pg_odd, in);
|
||||
return Grid::ComplexD(a, b);
|
||||
}
|
||||
//Real double Reduce
|
||||
template <>
|
||||
inline Grid::RealD Reduce<Grid::RealD, vecd>::operator()(vecd in){
|
||||
pred pg1 = acle<double>::pg1();
|
||||
return svred(pg1, in);
|
||||
}
|
||||
//Integer Reduce
|
||||
template <>
|
||||
inline Integer Reduce<Integer, veci>::operator()(veci in){
|
||||
pred pg1 = acle<Integer>::pg1();
|
||||
return svred(pg1, in);
|
||||
}
|
||||
|
||||
#undef svred
|
||||
|
||||
NAMESPACE_END(Optimization);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// Here assign types
|
||||
|
||||
typedef vech SIMD_Htype; // Reduced precision type
|
||||
typedef vecf SIMD_Ftype; // Single precision type
|
||||
typedef vecd SIMD_Dtype; // Double precision type
|
||||
typedef veci SIMD_Itype; // Integer type
|
||||
|
||||
// prefetch utilities
|
||||
inline void v_prefetch0(int size, const char *ptr){};
|
||||
inline void prefetch_HINT_T0(const char *ptr){};
|
||||
|
||||
// Function name aliases
|
||||
typedef Optimization::Vsplat VsplatSIMD;
|
||||
typedef Optimization::Vstore VstoreSIMD;
|
||||
typedef Optimization::Vset VsetSIMD;
|
||||
typedef Optimization::Vstream VstreamSIMD;
|
||||
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S,T>;
|
||||
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::MultAddComplex MultAddComplexSIMD;
|
||||
typedef Optimization::MultRealPart MultRealPartSIMD;
|
||||
typedef Optimization::MaddRealPart MaddRealPartSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
||||
typedef Optimization::TimesI TimesISIMD;
|
||||
|
||||
NAMESPACE_END(Grid);
|
@ -38,20 +38,47 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#ifdef GRID_HIP
|
||||
#include <hip/hip_fp16.h>
|
||||
#endif
|
||||
#ifdef GRID_SYCL
|
||||
namespace Grid {
|
||||
typedef struct { uint16_t x;} half;
|
||||
typedef struct { half x; half y;} half2;
|
||||
typedef struct { float x; float y;} float2;
|
||||
typedef struct { double x; double y;} double2;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
namespace Grid {
|
||||
|
||||
|
||||
|
||||
typedef struct Half2_t { half x; half y; } Half2;
|
||||
|
||||
#define COALESCE_GRANULARITY ( GEN_SIMD_WIDTH )
|
||||
|
||||
template<class pair>
|
||||
class GpuComplex {
|
||||
public:
|
||||
pair z;
|
||||
typedef decltype(z.x) real;
|
||||
typedef decltype(z.x) Real;
|
||||
public:
|
||||
accelerator_inline GpuComplex() = default;
|
||||
accelerator_inline GpuComplex(real re,real im) { z.x=re; z.y=im; };
|
||||
accelerator_inline GpuComplex(Real re,Real im) { z.x=re; z.y=im; };
|
||||
accelerator_inline GpuComplex(const GpuComplex &zz) { z = zz.z;};
|
||||
accelerator_inline Real real(void) const { return z.x; };
|
||||
accelerator_inline Real imag(void) const { return z.y; };
|
||||
accelerator_inline GpuComplex &operator*=(const GpuComplex &r) {
|
||||
*this = (*this) * r;
|
||||
return *this;
|
||||
}
|
||||
accelerator_inline GpuComplex &operator+=(const GpuComplex &r) {
|
||||
*this = (*this) + r;
|
||||
return *this;
|
||||
}
|
||||
accelerator_inline GpuComplex &operator-=(const GpuComplex &r) {
|
||||
*this = (*this) - r;
|
||||
return *this;
|
||||
}
|
||||
friend accelerator_inline GpuComplex operator+(const GpuComplex &lhs,const GpuComplex &rhs) {
|
||||
GpuComplex r ;
|
||||
r.z.x = lhs.z.x + rhs.z.x;
|
||||
@ -125,14 +152,14 @@ inline accelerator GpuVector<N,datum> operator/(const GpuVector<N,datum> l,const
|
||||
}
|
||||
|
||||
constexpr int NSIMD_RealH = COALESCE_GRANULARITY / sizeof(half);
|
||||
constexpr int NSIMD_ComplexH = COALESCE_GRANULARITY / sizeof(half2);
|
||||
constexpr int NSIMD_ComplexH = COALESCE_GRANULARITY / sizeof(Half2);
|
||||
constexpr int NSIMD_RealF = COALESCE_GRANULARITY / sizeof(float);
|
||||
constexpr int NSIMD_ComplexF = COALESCE_GRANULARITY / sizeof(float2);
|
||||
constexpr int NSIMD_RealD = COALESCE_GRANULARITY / sizeof(double);
|
||||
constexpr int NSIMD_ComplexD = COALESCE_GRANULARITY / sizeof(double2);
|
||||
constexpr int NSIMD_Integer = COALESCE_GRANULARITY / sizeof(Integer);
|
||||
|
||||
typedef GpuComplex<half2 > GpuComplexH;
|
||||
typedef GpuComplex<Half2 > GpuComplexH;
|
||||
typedef GpuComplex<float2 > GpuComplexF;
|
||||
typedef GpuComplex<double2> GpuComplexD;
|
||||
|
||||
@ -144,16 +171,19 @@ typedef GpuVector<NSIMD_RealD, double > GpuVectorRD;
|
||||
typedef GpuVector<NSIMD_ComplexD, GpuComplexD > GpuVectorCD;
|
||||
typedef GpuVector<NSIMD_Integer, Integer > GpuVectorI;
|
||||
|
||||
accelerator_inline GpuComplexF timesI(const GpuComplexF &r) { return(GpuComplexF(-r.imag(),r.real()));}
|
||||
accelerator_inline GpuComplexD timesI(const GpuComplexD &r) { return(GpuComplexD(-r.imag(),r.real()));}
|
||||
accelerator_inline GpuComplexF timesMinusI(const GpuComplexF &r){ return(GpuComplexF(r.imag(),-r.real()));}
|
||||
accelerator_inline GpuComplexD timesMinusI(const GpuComplexD &r){ return(GpuComplexD(r.imag(),-r.real()));}
|
||||
|
||||
accelerator_inline float half2float(half h)
|
||||
{
|
||||
float f;
|
||||
#ifdef GRID_SIMT
|
||||
#if defined(GRID_CUDA) || defined(GRID_HIP)
|
||||
f = __half2float(h);
|
||||
#else
|
||||
//f = __half2float(h);
|
||||
__half_raw hr(h);
|
||||
Grid_half hh;
|
||||
hh.x = hr.x;
|
||||
hh.x = h.x;
|
||||
f= sfw_half_to_float(hh);
|
||||
#endif
|
||||
return f;
|
||||
@ -161,13 +191,11 @@ accelerator_inline float half2float(half h)
|
||||
accelerator_inline half float2half(float f)
|
||||
{
|
||||
half h;
|
||||
#ifdef GRID_SIMT
|
||||
#if defined(GRID_CUDA) || defined(GRID_HIP)
|
||||
h = __float2half(f);
|
||||
#else
|
||||
Grid_half hh = sfw_float_to_half(f);
|
||||
__half_raw hr;
|
||||
hr.x = hh.x;
|
||||
h = __half(hr);
|
||||
h.x = hh.x;
|
||||
#endif
|
||||
return h;
|
||||
}
|
||||
@ -523,7 +551,7 @@ namespace Optimization {
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// Single / Half
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
static accelerator_inline GpuVectorCH StoH (GpuVectorCF a,GpuVectorCF b) {
|
||||
static accelerator_inline GpuVectorCH StoH (GpuVectorCF a,GpuVectorCF b) {
|
||||
int N = GpuVectorCF::N;
|
||||
GpuVectorCH h;
|
||||
for(int i=0;i<N;i++) {
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/simd/Grid_vector_types.h
|
||||
|
||||
@ -73,7 +73,7 @@ accelerator_inline Grid_half sfw_float_to_half(float ff) {
|
||||
const FP32 denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 };
|
||||
unsigned int sign_mask = 0x80000000u;
|
||||
Grid_half o;
|
||||
|
||||
|
||||
o.x = static_cast<unsigned short>(0x0u);
|
||||
unsigned int sign = f.u & sign_mask;
|
||||
f.u ^= sign;
|
||||
@ -93,7 +93,7 @@ accelerator_inline Grid_half sfw_float_to_half(float ff) {
|
||||
o.x = static_cast<unsigned short>(f.u - denorm_magic.u);
|
||||
} else {
|
||||
unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd
|
||||
|
||||
|
||||
// update exponent, rounding bias part 1
|
||||
f.u += ((unsigned int)(15 - 127) << 23) + 0xfff;
|
||||
// rounding bias part 2
|
||||
@ -101,7 +101,7 @@ accelerator_inline Grid_half sfw_float_to_half(float ff) {
|
||||
// take the bits!
|
||||
o.x = static_cast<unsigned short>(f.u >> 13);
|
||||
}
|
||||
}
|
||||
}
|
||||
o.x |= static_cast<unsigned short>(sign >> 16);
|
||||
return o;
|
||||
}
|
||||
@ -110,9 +110,63 @@ accelerator_inline Grid_half sfw_float_to_half(float ff) {
|
||||
#ifdef GPU_VEC
|
||||
#include "Grid_gpu_vec.h"
|
||||
#endif
|
||||
/*
|
||||
#ifdef GEN
|
||||
#include "Grid_generic.h"
|
||||
#endif
|
||||
*/
|
||||
|
||||
#ifdef GEN
|
||||
#if defined(A64FX) || defined(A64FXFIXEDSIZE) // breakout A64FX SVE ACLE here
|
||||
#include <arm_sve.h>
|
||||
#if defined(A64FX) // VLA
|
||||
#pragma message("building A64FX / SVE ACLE VLA")
|
||||
#if defined(ARMCLANGCOMPAT)
|
||||
#pragma message("applying data types patch")
|
||||
#endif
|
||||
#include "Grid_a64fx-2.h"
|
||||
#endif
|
||||
#if defined(A64FXFIXEDSIZE) // fixed size data types
|
||||
#pragma message("building for A64FX / SVE ACLE fixed size")
|
||||
#include "Grid_a64fx-fixedsize.h"
|
||||
#endif
|
||||
#else
|
||||
//#pragma message("building GEN") // generic
|
||||
#include "Grid_generic.h"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef A64FX
|
||||
#include <arm_sve.h>
|
||||
#ifdef __ARM_FEATURE_SVE_BITS
|
||||
//#pragma message("building A64FX SVE VLS")
|
||||
#include "Grid_a64fx-fixedsize.h"
|
||||
#else
|
||||
#pragma message("building A64FX SVE VLA")
|
||||
#if defined(ARMCLANGCOMPAT)
|
||||
#pragma message("applying data types patch")
|
||||
#endif
|
||||
#include "Grid_a64fx-2.h"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
#ifdef A64FXVLA
|
||||
#pragma message("building A64FX VLA")
|
||||
#if defined(ARMCLANGCOMPAT)
|
||||
#pragma message("applying data types patch")
|
||||
#endif
|
||||
#include <arm_sve.h>
|
||||
#include "Grid_a64fx-2.h"
|
||||
#endif
|
||||
|
||||
#ifdef A64FXVLS
|
||||
#pragma message("building A64FX VLS")
|
||||
#include <arm_sve.h>
|
||||
#include "Grid_a64fx-fixedsize.h"
|
||||
#endif
|
||||
*/
|
||||
|
||||
#ifdef SSE4
|
||||
#include "Grid_sse4.h"
|
||||
#endif
|
||||
@ -163,6 +217,12 @@ template <typename T> struct is_complex : public std::false_type {};
|
||||
template <> struct is_complex<ComplexD> : public std::true_type {};
|
||||
template <> struct is_complex<ComplexF> : public std::true_type {};
|
||||
|
||||
template <typename T> struct is_ComplexD : public std::false_type {};
|
||||
template <> struct is_ComplexD<ComplexD> : public std::true_type {};
|
||||
|
||||
template <typename T> struct is_ComplexF : public std::false_type {};
|
||||
template <> struct is_ComplexF<ComplexF> : public std::true_type {};
|
||||
|
||||
template<typename T, typename V=void> struct is_real : public std::false_type {};
|
||||
template<typename T> struct is_real<T, typename std::enable_if<std::is_floating_point<T>::value,
|
||||
void>::type> : public std::true_type {};
|
||||
@ -170,7 +230,7 @@ template<typename T> struct is_real<T, typename std::enable_if<std::is_floating_
|
||||
template<typename T, typename V=void> struct is_integer : public std::false_type {};
|
||||
template<typename T> struct is_integer<T, typename std::enable_if<std::is_integral<T>::value,
|
||||
void>::type> : public std::true_type {};
|
||||
|
||||
|
||||
template <typename T> using IfReal = Invoke<std::enable_if<is_real<T>::value, int> >;
|
||||
template <typename T> using IfComplex = Invoke<std::enable_if<is_complex<T>::value, int> >;
|
||||
template <typename T> using IfInteger = Invoke<std::enable_if<is_integer<T>::value, int> >;
|
||||
@ -223,6 +283,69 @@ public:
|
||||
return sizeof(Vector_type) / sizeof(Scalar_type);
|
||||
}
|
||||
|
||||
#ifdef ARMCLANGCOMPAT
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<!is_complex<S>::value, S>::type, Vector_type> &&rhs) {
|
||||
//v = rhs.v;
|
||||
svst1(svptrue_b8(), (Scalar_type*)this, svld1(svptrue_b8(), (Scalar_type*)&(rhs.v)));
|
||||
return *this;
|
||||
};
|
||||
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<!is_complex<S>::value, S>::type, Vector_type> &rhs) {
|
||||
//v = rhs.v;
|
||||
svst1(svptrue_b8(), (Scalar_type*)this, svld1(svptrue_b8(), (Scalar_type*)&(rhs.v)));
|
||||
return *this;
|
||||
};
|
||||
|
||||
/*
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_complex<S>::value, S>::type, Vector_type> &&rhs) {
|
||||
//v = rhs.v;
|
||||
svst1(svptrue_b8(), (int8_t*)this, svld1(svptrue_b8(), (int8_t*)&(rhs.v)));
|
||||
return *this;
|
||||
};
|
||||
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_complex<S>::value, S>::type, Vector_type> &rhs) {
|
||||
//v = rhs.v;
|
||||
svst1(svptrue_b8(), (int8_t*)this, svld1(svptrue_b8(), (int8_t*)&(rhs.v)));
|
||||
return *this;
|
||||
};
|
||||
*/
|
||||
|
||||
// ComplexF
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_ComplexF<S>::value, S>::type, Vector_type> &&rhs) {
|
||||
//v = rhs.v;
|
||||
svst1(svptrue_b32(), (float*)this, svld1(svptrue_b32(), (float*)&(rhs.v)));
|
||||
return *this;
|
||||
};
|
||||
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_ComplexF<S>::value, S>::type, Vector_type> &rhs) {
|
||||
//v = rhs.v;
|
||||
svst1(svptrue_b32(), (float*)this, svld1(svptrue_b32(), (float*)&(rhs.v)));
|
||||
return *this;
|
||||
};
|
||||
|
||||
// ComplexD
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_ComplexD<S>::value, S>::type, Vector_type> &&rhs) {
|
||||
//v = rhs.v;
|
||||
svst1(svptrue_b64(), (double*)this, svld1(svptrue_b64(), (double*)&(rhs.v)));
|
||||
return *this;
|
||||
};
|
||||
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_ComplexD<S>::value, S>::type, Vector_type> &rhs) {
|
||||
//v = rhs.v;
|
||||
svst1(svptrue_b64(), (double*)this, svld1(svptrue_b64(), (double*)&(rhs.v)));
|
||||
return *this;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
accelerator_inline Grid_simd &operator=(const Grid_simd &&rhs) {
|
||||
v = rhs.v;
|
||||
return *this;
|
||||
@ -232,10 +355,23 @@ public:
|
||||
return *this;
|
||||
}; // faster than not declaring it and leaving to the compiler
|
||||
|
||||
#endif
|
||||
|
||||
accelerator Grid_simd() = default;
|
||||
accelerator_inline Grid_simd(const Grid_simd &rhs) : v(rhs.v){}; // compiles in movaps
|
||||
accelerator_inline Grid_simd(const Grid_simd &&rhs) : v(rhs.v){};
|
||||
|
||||
#ifdef ARMCLANGCOMPAT
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd(const Grid_simd<typename std::enable_if<!is_complex<S>::value, S>::type, Vector_type> &rhs) { this->operator=(rhs); }
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd(const Grid_simd<typename std::enable_if<!is_complex<S>::value, S>::type, Vector_type> &&rhs) { this->operator=(rhs); }
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd(const Grid_simd<typename std::enable_if<is_complex<S>::value, S>::type, Vector_type> &rhs) { this->operator=(rhs); }
|
||||
template <class S = Scalar_type>
|
||||
accelerator_inline Grid_simd(const Grid_simd<typename std::enable_if<is_complex<S>::value, S>::type, Vector_type> &&rhs) { this->operator=(rhs); }
|
||||
#else
|
||||
accelerator_inline Grid_simd(const Grid_simd &rhs) : v(rhs.v){}; // compiles in movaps
|
||||
accelerator_inline Grid_simd(const Grid_simd &&rhs) : v(rhs.v){};
|
||||
#endif
|
||||
accelerator_inline Grid_simd(const Real a) { vsplat(*this, Scalar_type(a)); };
|
||||
// Enable if complex type
|
||||
template <typename S = Scalar_type> accelerator_inline
|
||||
@ -258,12 +394,21 @@ public:
|
||||
///////////////////////////////////////////////
|
||||
|
||||
// FIXME -- alias this to an accelerator_inline MAC struct.
|
||||
|
||||
#if defined(A64FX) || defined(A64FXFIXEDSIZE)
|
||||
friend accelerator_inline void mac(Grid_simd *__restrict__ y,
|
||||
const Grid_simd *__restrict__ a,
|
||||
const Grid_simd *__restrict__ x) {
|
||||
*y = fxmac((*a), (*x), (*y));
|
||||
};
|
||||
#else
|
||||
friend accelerator_inline void mac(Grid_simd *__restrict__ y,
|
||||
const Grid_simd *__restrict__ a,
|
||||
const Grid_simd *__restrict__ x) {
|
||||
*y = (*a) * (*x) + (*y);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
friend accelerator_inline void mult(Grid_simd *__restrict__ y,
|
||||
const Grid_simd *__restrict__ l,
|
||||
const Grid_simd *__restrict__ r) {
|
||||
@ -412,7 +557,7 @@ public:
|
||||
Grid_simd ret;
|
||||
Grid_simd::conv_t conv;
|
||||
Grid_simd::scalar_type s;
|
||||
|
||||
|
||||
conv.v = v.v;
|
||||
for (int i = 0; i < Nsimd(); i++) {
|
||||
s = conv.s[i];
|
||||
@ -441,7 +586,7 @@ public:
|
||||
return ret;
|
||||
}
|
||||
///////////////////////
|
||||
// Exchange
|
||||
// Exchange
|
||||
// Al Ah , Bl Bh -> Al Bl Ah,Bh
|
||||
///////////////////////
|
||||
friend accelerator_inline void exchange(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2,int n)
|
||||
@ -452,20 +597,20 @@ public:
|
||||
Optimization::Exchange::Exchange2(out1.v,out2.v,in1.v,in2.v);
|
||||
} else if(n==1) {
|
||||
Optimization::Exchange::Exchange1(out1.v,out2.v,in1.v,in2.v);
|
||||
} else if(n==0) {
|
||||
} else if(n==0) {
|
||||
Optimization::Exchange::Exchange0(out1.v,out2.v,in1.v,in2.v);
|
||||
}
|
||||
}
|
||||
friend accelerator_inline void exchange0(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
friend accelerator_inline void exchange0(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
Optimization::Exchange::Exchange0(out1.v,out2.v,in1.v,in2.v);
|
||||
}
|
||||
friend accelerator_inline void exchange1(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
friend accelerator_inline void exchange1(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
Optimization::Exchange::Exchange1(out1.v,out2.v,in1.v,in2.v);
|
||||
}
|
||||
friend accelerator_inline void exchange2(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
friend accelerator_inline void exchange2(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
Optimization::Exchange::Exchange2(out1.v,out2.v,in1.v,in2.v);
|
||||
}
|
||||
friend accelerator_inline void exchange3(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
friend accelerator_inline void exchange3(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
Optimization::Exchange::Exchange3(out1.v,out2.v,in1.v,in2.v);
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////
|
||||
@ -490,7 +635,7 @@ public:
|
||||
int dist = perm & 0xF;
|
||||
y = rotate(b, dist);
|
||||
return;
|
||||
}
|
||||
}
|
||||
else if(perm==3) permute3(y, b);
|
||||
else if(perm==2) permute2(y, b);
|
||||
else if(perm==1) permute1(y, b);
|
||||
@ -564,29 +709,29 @@ accelerator_inline Grid_simd<S, V> rotate(Grid_simd<S, V> b, int nrot) {
|
||||
ret.v = Optimization::Rotate::rotate(b.v, 2 * nrot);
|
||||
return ret;
|
||||
}
|
||||
template <class S, class V, IfNotComplex<S> =0>
|
||||
template <class S, class V, IfNotComplex<S> =0>
|
||||
accelerator_inline void rotate( Grid_simd<S,V> &ret,Grid_simd<S,V> b,int nrot)
|
||||
{
|
||||
nrot = nrot % Grid_simd<S,V>::Nsimd();
|
||||
ret.v = Optimization::Rotate::rotate(b.v,nrot);
|
||||
}
|
||||
template <class S, class V, IfComplex<S> =0>
|
||||
template <class S, class V, IfComplex<S> =0>
|
||||
accelerator_inline void rotate(Grid_simd<S,V> &ret,Grid_simd<S,V> b,int nrot)
|
||||
{
|
||||
nrot = nrot % Grid_simd<S,V>::Nsimd();
|
||||
ret.v = Optimization::Rotate::rotate(b.v,2*nrot);
|
||||
}
|
||||
|
||||
template <class S, class V>
|
||||
template <class S, class V>
|
||||
accelerator_inline void vbroadcast(Grid_simd<S,V> &ret,const Grid_simd<S,V> &src,int lane){
|
||||
S* typepun =(S*) &src;
|
||||
vsplat(ret,typepun[lane]);
|
||||
}
|
||||
template <class S, class V, IfComplex<S> =0>
|
||||
}
|
||||
template <class S, class V, IfComplex<S> =0>
|
||||
accelerator_inline void rbroadcast(Grid_simd<S,V> &ret,const Grid_simd<S,V> &src,int lane){
|
||||
S* typepun =(S*) &src;
|
||||
ret.v = unary<V>(real(typepun[lane]), VsplatSIMD());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -741,6 +886,27 @@ accelerator_inline Grid_simd<S, V> operator*(Grid_simd<S, V> a, Grid_simd<S, V>
|
||||
return ret;
|
||||
};
|
||||
|
||||
// ---------------- A64FX MAC -------------------
|
||||
// Distinguish between complex types and others
|
||||
#if defined(A64FX) || defined(A64FXFIXEDSIZE)
|
||||
template <class S, class V, IfComplex<S> = 0>
|
||||
accelerator_inline Grid_simd<S, V> fxmac(Grid_simd<S, V> a, Grid_simd<S, V> b, Grid_simd<S, V> c) {
|
||||
Grid_simd<S, V> ret;
|
||||
ret.v = trinary<V>(a.v, b.v, c.v, MultAddComplexSIMD());
|
||||
return ret;
|
||||
};
|
||||
|
||||
// Real/Integer types
|
||||
template <class S, class V, IfNotComplex<S> = 0>
|
||||
accelerator_inline Grid_simd<S, V> fxmac(Grid_simd<S, V> a, Grid_simd<S, V> b, Grid_simd<S, V> c) {
|
||||
Grid_simd<S, V> ret;
|
||||
ret.v = trinary<V>(a.v, b.v, c.v, MultSIMD());
|
||||
return ret;
|
||||
};
|
||||
#endif
|
||||
// ----------------------------------------------
|
||||
|
||||
|
||||
// Distinguish between complex types and others
|
||||
template <class S, class V, IfComplex<S> = 0>
|
||||
accelerator_inline Grid_simd<S, V> operator/(Grid_simd<S, V> a, Grid_simd<S, V> b) {
|
||||
@ -877,7 +1043,7 @@ accelerator_inline typename toComplexMapper<Rsimd>::Complexified toComplex(const
|
||||
|
||||
conv.v = in.v;
|
||||
for (int i = 0; i < Rsimd::Nsimd(); i += 2) {
|
||||
assert(conv.s[i + 1] == conv.s[i]);
|
||||
assert(conv.s[i + 1] == conv.s[i]);
|
||||
// trap any cases where real was not duplicated
|
||||
// indicating the SIMD grids of real and imag assignment did not correctly
|
||||
// match
|
||||
@ -919,6 +1085,14 @@ accelerator_inline void precisionChange(vRealD *out,vRealF *in,int nvec)
|
||||
for(int m=0;m*2<nvec;m++){
|
||||
int n=m*2;
|
||||
Optimization::PrecisionChange::StoD(in[m].v,out[n].v,out[n+1].v);
|
||||
// Bug in gcc 10.0.1 and gcc 10.1 using fixed-size SVE ACLE data types CAS-159553-Y1K4C6
|
||||
// function call results in compile-time error:
|
||||
// In function ‘void Grid::precisionChange(Grid::vRealD*, Grid::vRealF*, int)’:
|
||||
// .../Grid_vector_types.h:961:56: error:
|
||||
// cannot bind non-const lvalue reference of type ‘vecd&’ {aka ‘svfloat64_t&’}
|
||||
// to an rvalue of type ‘vecd’ {aka ‘svfloat64_t’}
|
||||
// 961 | Optimization::PrecisionChange::StoD(in[m].v,out[n].v,out[n+1].v);
|
||||
// | ~~~~~~~^
|
||||
}
|
||||
}
|
||||
accelerator_inline void precisionChange(vRealD *out,vRealH *in,int nvec)
|
||||
|
@ -125,14 +125,6 @@ accelerator_inline Grid_simd<S, V> sqrt(const Grid_simd<S, V> &r) {
|
||||
return SimdApply(SqrtRealFunctor<S>(), r);
|
||||
}
|
||||
template <class S, class V>
|
||||
accelerator_inline Grid_simd<S, V> rsqrt(const Grid_simd<S, V> &r) {
|
||||
return SimdApply(RSqrtRealFunctor<S>(), r);
|
||||
}
|
||||
template <class Scalar>
|
||||
accelerator_inline Scalar rsqrt(const Scalar &r) {
|
||||
return (RSqrtRealFunctor<Scalar>(), r);
|
||||
}
|
||||
template <class S, class V>
|
||||
accelerator_inline Grid_simd<S, V> cos(const Grid_simd<S, V> &r) {
|
||||
return SimdApply(CosRealFunctor<S>(), r);
|
||||
}
|
||||
|
@ -93,6 +93,11 @@ accelerator_inline ComplexF pow(const ComplexF& r,RealF y){ return(std::pow(r,y)
|
||||
using std::abs;
|
||||
using std::pow;
|
||||
using std::sqrt;
|
||||
using std::log;
|
||||
using std::exp;
|
||||
using std::sin;
|
||||
using std::cos;
|
||||
|
||||
|
||||
accelerator_inline RealF conjugate(const RealF & r){ return r; }
|
||||
accelerator_inline RealD conjugate(const RealD & r){ return r; }
|
||||
@ -143,10 +148,14 @@ accelerator_inline void sub (ComplexF * __restrict__ y,const ComplexF * __restri
|
||||
accelerator_inline void add (ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) + (*r); }
|
||||
|
||||
//conjugate already supported for complex
|
||||
accelerator_inline ComplexF timesI(const ComplexF &r) { return(r*ComplexF(0.0,1.0));}
|
||||
accelerator_inline ComplexD timesI(const ComplexD &r) { return(r*ComplexD(0.0,1.0));}
|
||||
accelerator_inline ComplexF timesMinusI(const ComplexF &r){ return(r*ComplexF(0.0,-1.0));}
|
||||
accelerator_inline ComplexD timesMinusI(const ComplexD &r){ return(r*ComplexD(0.0,-1.0));}
|
||||
accelerator_inline ComplexF timesI(const ComplexF &r) { return(ComplexF(-r.imag(),r.real()));}
|
||||
accelerator_inline ComplexD timesI(const ComplexD &r) { return(ComplexD(-r.imag(),r.real()));}
|
||||
accelerator_inline ComplexF timesMinusI(const ComplexF &r){ return(ComplexF(r.imag(),-r.real()));}
|
||||
accelerator_inline ComplexD timesMinusI(const ComplexD &r){ return(ComplexD(r.imag(),-r.real()));}
|
||||
//accelerator_inline ComplexF timesI(const ComplexF &r) { return(r*ComplexF(0.0,1.0));}
|
||||
//accelerator_inline ComplexD timesI(const ComplexD &r) { return(r*ComplexD(0.0,1.0));}
|
||||
//accelerator_inline ComplexF timesMinusI(const ComplexF &r){ return(r*ComplexF(0.0,-1.0));}
|
||||
//accelerator_inline ComplexD timesMinusI(const ComplexD &r){ return(r*ComplexD(0.0,-1.0));}
|
||||
|
||||
// define projections to real and imaginay parts
|
||||
accelerator_inline ComplexF projReal(const ComplexF &r){return( ComplexF(r.real(), 0.0));}
|
||||
|
@ -7,20 +7,20 @@ template<class vobj>
|
||||
class SimpleCompressor {
|
||||
public:
|
||||
void Point(int) {};
|
||||
accelerator_inline int CommDatumSize(void) { return sizeof(vobj); }
|
||||
accelerator_inline bool DecompressionStep(void) { return false; }
|
||||
template<class cobj> accelerator_inline void Compress(cobj *buf,int o,const cobj &in) { buf[o]=in; }
|
||||
accelerator_inline void Exchange(vobj *mp,vobj *vp0,vobj *vp1,Integer type,Integer o){
|
||||
accelerator_inline int CommDatumSize(void) const { return sizeof(vobj); }
|
||||
accelerator_inline bool DecompressionStep(void) const { return false; }
|
||||
template<class cobj> accelerator_inline void Compress(cobj *buf,int o,const cobj &in) const { buf[o]=in; }
|
||||
accelerator_inline void Exchange(vobj *mp,vobj *vp0,vobj *vp1,Integer type,Integer o) const {
|
||||
exchange(mp[2*o],mp[2*o+1],vp0[o],vp1[o],type);
|
||||
}
|
||||
accelerator_inline void Decompress(vobj *out,vobj *in, int o){ assert(0); }
|
||||
accelerator_inline void Decompress(vobj *out,vobj *in, int o) const { assert(0); }
|
||||
accelerator_inline void CompressExchange(vobj *out0,vobj *out1,const vobj *in,
|
||||
int j,int k, int m,int type){
|
||||
int j,int k, int m,int type) const {
|
||||
exchange(out0[j],out1[j],in[k],in[m],type);
|
||||
}
|
||||
// For cshift. Cshift should drop compressor coupling altogether
|
||||
// because I had to decouple the code from the Stencil anyway
|
||||
accelerator_inline vobj operator() (const vobj &arg) {
|
||||
accelerator_inline vobj operator() (const vobj &arg) const {
|
||||
return arg;
|
||||
}
|
||||
};
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/Stencil.h
|
||||
|
||||
@ -41,13 +41,13 @@
|
||||
// Stencil based code will exchange haloes and use a table lookup for neighbours.
|
||||
// This will be done with generality to allow easier efficient implementations.
|
||||
// Overlap of comms and compute is enabled by tabulating off-node connected,
|
||||
//
|
||||
//
|
||||
// Generic services
|
||||
// 0) Prebuild neighbour tables
|
||||
// 1) Compute sizes of all haloes/comms buffers; allocate them.
|
||||
// 2) Gather all faces, and communicate.
|
||||
// 3) Loop over result sites, giving nbr index/offnode info for each
|
||||
//
|
||||
//
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
@ -59,10 +59,10 @@ NAMESPACE_BEGIN(Grid);
|
||||
void Gather_plane_table_compute (GridBase *grid,int dimension,int plane,int cbmask,
|
||||
int off,Vector<std::pair<int,int> > & table);
|
||||
|
||||
template<class vobj,class cobj,class compressor>
|
||||
template<class vobj,class cobj,class compressor>
|
||||
void Gather_plane_simple_table (Vector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so) __attribute__((noinline));
|
||||
|
||||
template<class vobj,class cobj,class compressor>
|
||||
template<class vobj,class cobj,class compressor>
|
||||
void Gather_plane_simple_table (Vector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so)
|
||||
{
|
||||
int num=table.size();
|
||||
@ -94,13 +94,13 @@ void Gather_plane_exchange_table(Vector<std::pair<int,int> >& table,const Lattic
|
||||
{
|
||||
assert( (table.size()&0x1)==0);
|
||||
int num=table.size()/2;
|
||||
int so = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
|
||||
int so = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
|
||||
|
||||
auto rhs_v = rhs.View(AcceleratorRead);
|
||||
auto p0=&pointers[0][0];
|
||||
auto p1=&pointers[1][0];
|
||||
auto tp=&table[0];
|
||||
accelerator_forNB(j, num, 1, {
|
||||
accelerator_forNB(j, num, 1, {
|
||||
compress.CompressExchange(p0,p1, &rhs_v[0], j,
|
||||
so+tp[2*j ].second,
|
||||
so+tp[2*j+1].second,
|
||||
@ -109,20 +109,20 @@ void Gather_plane_exchange_table(Vector<std::pair<int,int> >& table,const Lattic
|
||||
rhs_v.ViewClose();
|
||||
}
|
||||
|
||||
struct StencilEntry {
|
||||
struct StencilEntry {
|
||||
#ifdef GRID_CUDA
|
||||
uint64_t _byte_offset; // 8 bytes
|
||||
uint32_t _offset; // 4 bytes
|
||||
uint64_t _byte_offset; // 8 bytes
|
||||
uint32_t _offset; // 4 bytes
|
||||
#else
|
||||
uint64_t _byte_offset; // 8 bytes
|
||||
uint64_t _byte_offset; // 8 bytes
|
||||
uint64_t _offset; // 8 bytes (8 ever required?)
|
||||
#endif
|
||||
uint8_t _is_local; // 1 bytes
|
||||
uint8_t _is_local; // 1 bytes
|
||||
uint8_t _permute; // 1 bytes
|
||||
uint8_t _around_the_world; // 1 bytes
|
||||
uint8_t _pad; // 1 bytes
|
||||
};
|
||||
// Could pack to 8 + 4 + 4 = 128 bit and use
|
||||
// Could pack to 8 + 4 + 4 = 128 bit and use
|
||||
|
||||
template<class vobj,class cobj,class Parameters>
|
||||
class CartesianStencilAccelerator {
|
||||
@ -147,20 +147,20 @@ class CartesianStencilAccelerator {
|
||||
cobj* u_recv_buf_p;
|
||||
cobj* u_send_buf_p;
|
||||
|
||||
accelerator_inline cobj *CommBuf(void) { return u_recv_buf_p; }
|
||||
accelerator_inline cobj *CommBuf(void) const { return u_recv_buf_p; }
|
||||
|
||||
accelerator_inline int GetNodeLocal(int osite,int point) {
|
||||
accelerator_inline int GetNodeLocal(int osite,int point) const {
|
||||
return this->_entries_p[point+this->_npoints*osite]._is_local;
|
||||
}
|
||||
accelerator_inline StencilEntry * GetEntry(int &ptype,int point,int osite) {
|
||||
ptype = this->_permute_type[point]; return & this->_entries_p[point+this->_npoints*osite];
|
||||
accelerator_inline StencilEntry * GetEntry(int &ptype,int point,int osite) const {
|
||||
ptype = this->_permute_type[point]; return & this->_entries_p[point+this->_npoints*osite];
|
||||
}
|
||||
|
||||
accelerator_inline uint64_t GetInfo(int &ptype,int &local,int &perm,int point,int ent,uint64_t base) {
|
||||
accelerator_inline uint64_t GetInfo(int &ptype,int &local,int &perm,int point,int ent,uint64_t base) const {
|
||||
uint64_t cbase = (uint64_t)&u_recv_buf_p[0];
|
||||
local = this->_entries_p[ent]._is_local;
|
||||
perm = this->_entries_p[ent]._permute;
|
||||
if (perm) ptype = this->_permute_type[point];
|
||||
if (perm) ptype = this->_permute_type[point];
|
||||
if (local) {
|
||||
return base + this->_entries_p[ent]._byte_offset;
|
||||
} else {
|
||||
@ -168,21 +168,21 @@ class CartesianStencilAccelerator {
|
||||
}
|
||||
}
|
||||
|
||||
accelerator_inline uint64_t GetPFInfo(int ent,uint64_t base) {
|
||||
accelerator_inline uint64_t GetPFInfo(int ent,uint64_t base) const {
|
||||
uint64_t cbase = (uint64_t)&u_recv_buf_p[0];
|
||||
int local = this->_entries_p[ent]._is_local;
|
||||
if (local) return base + this->_entries_p[ent]._byte_offset;
|
||||
else return cbase + this->_entries_p[ent]._byte_offset;
|
||||
}
|
||||
|
||||
accelerator_inline void iCoorFromIindex(Coordinate &coor,int lane)
|
||||
accelerator_inline void iCoorFromIindex(Coordinate &coor,int lane) const
|
||||
{
|
||||
Lexicographic::CoorFromIndex(coor,lane,this->_simd_layout);
|
||||
}
|
||||
};
|
||||
|
||||
template<class vobj,class cobj,class Parameters>
|
||||
class CartesianStencilView : public CartesianStencilAccelerator<vobj,cobj,Parameters>
|
||||
class CartesianStencilView : public CartesianStencilAccelerator<vobj,cobj,Parameters>
|
||||
{
|
||||
private:
|
||||
int *closed;
|
||||
@ -192,7 +192,7 @@ class CartesianStencilView : public CartesianStencilAccelerator<vobj,cobj,Parame
|
||||
// default copy constructor
|
||||
CartesianStencilView (const CartesianStencilView &refer_to_me) = default;
|
||||
|
||||
CartesianStencilView (const CartesianStencilAccelerator<vobj,cobj,Parameters> &refer_to_me,ViewMode _mode)
|
||||
CartesianStencilView (const CartesianStencilAccelerator<vobj,cobj,Parameters> &refer_to_me,ViewMode _mode)
|
||||
: CartesianStencilAccelerator<vobj,cobj,Parameters>(refer_to_me),
|
||||
cpu_ptr(this->_entries_p),
|
||||
mode(_mode)
|
||||
@ -201,14 +201,14 @@ class CartesianStencilView : public CartesianStencilAccelerator<vobj,cobj,Parame
|
||||
MemoryManager::ViewOpen(this->_entries_p,
|
||||
this->_npoints*this->_osites*sizeof(StencilEntry),
|
||||
mode,
|
||||
AdviseDefault);
|
||||
AdviseDefault);
|
||||
}
|
||||
|
||||
|
||||
void ViewClose(void)
|
||||
{
|
||||
MemoryManager::ViewClose(this->cpu_ptr,this->mode);
|
||||
MemoryManager::ViewClose(this->cpu_ptr,this->mode);
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
////////////////////////////////////////
|
||||
@ -245,12 +245,12 @@ public:
|
||||
cobj * mpi_p;
|
||||
Integer buffer_size;
|
||||
};
|
||||
|
||||
|
||||
|
||||
protected:
|
||||
GridBase * _grid;
|
||||
|
||||
public:
|
||||
public:
|
||||
GridBase *Grid(void) const { return _grid; }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
@ -264,12 +264,12 @@ public:
|
||||
View_type accessor(*( (View_type *) this),mode);
|
||||
return accessor;
|
||||
}
|
||||
|
||||
|
||||
int face_table_computed;
|
||||
std::vector<Vector<std::pair<int,int> > > face_table ;
|
||||
Vector<int> surface_list;
|
||||
|
||||
Vector<StencilEntry> _entries; // Resident in managed memory
|
||||
stencilVector<StencilEntry> _entries; // Resident in managed memory
|
||||
std::vector<Packet> Packets;
|
||||
std::vector<Merge> Mergers;
|
||||
std::vector<Merge> MergersSHM;
|
||||
@ -314,7 +314,7 @@ public:
|
||||
////////////////////////////////////////
|
||||
// Stencil query
|
||||
////////////////////////////////////////
|
||||
inline int SameNode(int point) {
|
||||
inline int SameNode(int point) {
|
||||
|
||||
int dimension = this->_directions[point];
|
||||
int displacement = this->_distances[point];
|
||||
@ -338,7 +338,7 @@ public:
|
||||
// FIXME this logic needs to be sorted for three link term
|
||||
// assert( (displacement==1) || (displacement==-1));
|
||||
// Present hack only works for >= 4^4 subvol per node
|
||||
_grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
||||
_grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
||||
|
||||
void *shm = (void *) _grid->ShmBufferTranslate(recv_from_rank,this->u_recv_buf_p);
|
||||
|
||||
@ -378,7 +378,7 @@ public:
|
||||
comm_time_thr[mythread] += comm_leave_thr[mythread] - comm_enter_thr[mythread];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CollateThreads(void)
|
||||
{
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
@ -402,7 +402,7 @@ public:
|
||||
if ( (t0 > 0.0) && ( t0 < first ) ) first = t0; // min time seen
|
||||
|
||||
if ( t1 > last ) last = t1; // max time seen
|
||||
|
||||
|
||||
}
|
||||
commtime+= last-first;
|
||||
}
|
||||
@ -464,30 +464,30 @@ public:
|
||||
this->CommunicateBegin(reqs);
|
||||
this->CommunicateComplete(reqs);
|
||||
}
|
||||
}
|
||||
|
||||
template<class compressor> void HaloExchange(const Lattice<vobj> &source,compressor &compress)
|
||||
}
|
||||
|
||||
template<class compressor> void HaloExchange(const Lattice<vobj> &source,compressor &compress)
|
||||
{
|
||||
Prepare();
|
||||
HaloGather(source,compress);
|
||||
Communicate();
|
||||
CommsMergeSHM(compress);
|
||||
CommsMerge(compress);
|
||||
CommsMergeSHM(compress);
|
||||
CommsMerge(compress);
|
||||
}
|
||||
|
||||
|
||||
template<class compressor> int HaloGatherDir(const Lattice<vobj> &source,compressor &compress,int point,int & face_idx)
|
||||
{
|
||||
int dimension = this->_directions[point];
|
||||
int displacement = this->_distances[point];
|
||||
|
||||
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
|
||||
|
||||
// Map to always positive shift modulo global full dimension.
|
||||
int shift = (displacement+fd)%fd;
|
||||
|
||||
assert (source.Checkerboard()== this->_checkerboard);
|
||||
|
||||
|
||||
// the permute type
|
||||
int simd_layout = _grid->_simd_layout[dimension];
|
||||
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||
@ -505,7 +505,7 @@ public:
|
||||
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx);
|
||||
is_same_node = is_same_node && tmp;
|
||||
splicetime+=usecond();
|
||||
} else {
|
||||
} else {
|
||||
nosplicetime-=usecond();
|
||||
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx);
|
||||
is_same_node = is_same_node && tmp;
|
||||
@ -531,7 +531,7 @@ public:
|
||||
}
|
||||
return is_same_node;
|
||||
}
|
||||
|
||||
|
||||
template<class compressor>
|
||||
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
||||
{
|
||||
@ -542,9 +542,9 @@ public:
|
||||
// conformable(source.Grid(),_grid);
|
||||
assert(source.Grid()==_grid);
|
||||
halogtime-=usecond();
|
||||
|
||||
|
||||
u_comm_offset=0;
|
||||
|
||||
|
||||
// Gather all comms buffers
|
||||
int face_idx=0;
|
||||
for(int point = 0 ; point < this->_npoints; point++) {
|
||||
@ -557,16 +557,16 @@ public:
|
||||
accelerator_barrier();
|
||||
halogtime+=usecond();
|
||||
}
|
||||
|
||||
|
||||
/////////////////////////
|
||||
// Implementation
|
||||
/////////////////////////
|
||||
void Prepare(void)
|
||||
{
|
||||
Decompressions.resize(0);
|
||||
DecompressionsSHM.resize(0);
|
||||
Mergers.resize(0);
|
||||
MergersSHM.resize(0);
|
||||
Decompressions.resize(0);
|
||||
DecompressionsSHM.resize(0);
|
||||
Mergers.resize(0);
|
||||
MergersSHM.resize(0);
|
||||
Packets.resize(0);
|
||||
calls++;
|
||||
}
|
||||
@ -595,22 +595,22 @@ public:
|
||||
mv.push_back(m);
|
||||
}
|
||||
template<class decompressor> void CommsMerge(decompressor decompress) {
|
||||
CommsMerge(decompress,Mergers,Decompressions);
|
||||
CommsMerge(decompress,Mergers,Decompressions);
|
||||
}
|
||||
template<class decompressor> void CommsMergeSHM(decompressor decompress) {
|
||||
mpi3synctime-=usecond();
|
||||
mpi3synctime-=usecond();
|
||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||
mpi3synctime+=usecond();
|
||||
shmmergetime-=usecond();
|
||||
mpi3synctime+=usecond();
|
||||
shmmergetime-=usecond();
|
||||
CommsMerge(decompress,MergersSHM,DecompressionsSHM);
|
||||
shmmergetime+=usecond();
|
||||
shmmergetime+=usecond();
|
||||
}
|
||||
|
||||
template<class decompressor>
|
||||
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
|
||||
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
|
||||
|
||||
mergetime-=usecond();
|
||||
for(int i=0;i<mm.size();i++){
|
||||
for(int i=0;i<mm.size();i++){
|
||||
auto mp = &mm[i].mpointer[0];
|
||||
auto vp0= &mm[i].vpointers[0][0];
|
||||
auto vp1= &mm[i].vpointers[1][0];
|
||||
@ -622,7 +622,7 @@ public:
|
||||
mergetime+=usecond();
|
||||
|
||||
decompresstime-=usecond();
|
||||
for(int i=0;i<dd.size();i++){
|
||||
for(int i=0;i<dd.size();i++){
|
||||
auto kp = dd[i].kernel_p;
|
||||
auto mp = dd[i].mpi_p;
|
||||
accelerator_forNB(o,dd[i].buffer_size,1,{
|
||||
@ -638,7 +638,7 @@ public:
|
||||
for(int i=0;i<_entries.size();i++){
|
||||
if( _entries[i]._is_local ) {
|
||||
_entries[i]._byte_offset = _entries[i]._offset*sizeof(vobj);
|
||||
} else {
|
||||
} else {
|
||||
_entries[i]._byte_offset = _entries[i]._offset*sizeof(cobj);
|
||||
}
|
||||
}
|
||||
@ -653,15 +653,15 @@ public:
|
||||
for(int point=0;point<this->_npoints;point++){
|
||||
this->same_node[point] = this->SameNode(point);
|
||||
}
|
||||
|
||||
|
||||
for(int site = 0 ;site< vol4;site++){
|
||||
int local = 1;
|
||||
for(int point=0;point<this->_npoints;point++){
|
||||
if( (!this->GetNodeLocal(site*Ls,point)) && (!this->same_node[point]) ){
|
||||
if( (!this->GetNodeLocal(site*Ls,point)) && (!this->same_node[point]) ){
|
||||
local = 0;
|
||||
}
|
||||
}
|
||||
if(local == 0) {
|
||||
if(local == 0) {
|
||||
surface_list.push_back(site);
|
||||
}
|
||||
}
|
||||
@ -672,11 +672,11 @@ public:
|
||||
int checkerboard,
|
||||
const std::vector<int> &directions,
|
||||
const std::vector<int> &distances,
|
||||
Parameters p)
|
||||
: shm_bytes_thr(npoints),
|
||||
comm_bytes_thr(npoints),
|
||||
Parameters p)
|
||||
: shm_bytes_thr(npoints),
|
||||
comm_bytes_thr(npoints),
|
||||
comm_enter_thr(npoints),
|
||||
comm_leave_thr(npoints),
|
||||
comm_leave_thr(npoints),
|
||||
comm_time_thr(npoints)
|
||||
{
|
||||
face_table_computed=0;
|
||||
@ -687,7 +687,7 @@ public:
|
||||
/////////////////////////////////////
|
||||
this->_npoints = npoints;
|
||||
this->_comm_buf_size.resize(npoints),
|
||||
this->_permute_type.resize(npoints),
|
||||
this->_permute_type.resize(npoints),
|
||||
this->_simd_layout = _grid->_simd_layout; // copy simd_layout to give access to Accelerator Kernels
|
||||
this->_directions = StencilVector(directions);
|
||||
this->_distances = StencilVector(distances);
|
||||
@ -697,24 +697,24 @@ public:
|
||||
surface_list.resize(0);
|
||||
|
||||
this->_osites = _grid->oSites();
|
||||
|
||||
|
||||
_entries.resize(this->_npoints* this->_osites);
|
||||
this->_entries_p = &_entries[0];
|
||||
for(int ii=0;ii<npoints;ii++){
|
||||
|
||||
|
||||
int i = ii; // reverse direction to get SIMD comms done first
|
||||
int point = i;
|
||||
|
||||
|
||||
int dimension = directions[i];
|
||||
int displacement = distances[i];
|
||||
int shift = displacement;
|
||||
|
||||
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
this->_permute_type[point]=_grid->PermuteType(dimension);
|
||||
|
||||
|
||||
this->_checkerboard = checkerboard;
|
||||
|
||||
|
||||
//////////////////////////
|
||||
// the permute type
|
||||
//////////////////////////
|
||||
@ -724,25 +724,25 @@ public:
|
||||
int rotate_dim = _grid->_simd_layout[dimension]>2;
|
||||
|
||||
assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported
|
||||
|
||||
|
||||
int sshift[2];
|
||||
|
||||
|
||||
//////////////////////////
|
||||
// Underlying approach. For each local site build
|
||||
// up a table containing the npoint "neighbours" and whether they
|
||||
// up a table containing the npoint "neighbours" and whether they
|
||||
// live in lattice or a comms buffer.
|
||||
//////////////////////////
|
||||
if ( !comm_dim ) {
|
||||
sshift[0] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Even);
|
||||
sshift[1] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Odd);
|
||||
|
||||
|
||||
if ( sshift[0] == sshift[1] ) {
|
||||
Local(point,dimension,shift,0x3);
|
||||
} else {
|
||||
Local(point,dimension,shift,0x1);// if checkerboard is unfavourable take two passes
|
||||
Local(point,dimension,shift,0x2);// both with block stride loop iteration
|
||||
}
|
||||
} else {
|
||||
} else {
|
||||
// All permute extract done in comms phase prior to Stencil application
|
||||
// So tables are the same whether comm_dim or splice_dim
|
||||
sshift[0] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Even);
|
||||
@ -784,23 +784,23 @@ public:
|
||||
int ld = _grid->_ldimensions[dimension];
|
||||
int gd = _grid->_gdimensions[dimension];
|
||||
int ly = _grid->_simd_layout[dimension];
|
||||
|
||||
|
||||
// Map to always positive shift modulo global full dimension.
|
||||
int shift = (shiftpm+fd)%fd;
|
||||
|
||||
// the permute type
|
||||
int permute_dim =_grid->PermuteDim(dimension);
|
||||
|
||||
for(int x=0;x<rd;x++){
|
||||
|
||||
|
||||
for(int x=0;x<rd;x++){
|
||||
|
||||
// int o = 0;
|
||||
int bo = x * _grid->_ostride[dimension];
|
||||
|
||||
|
||||
int cb= (cbmask==0x2)? Odd : Even;
|
||||
|
||||
|
||||
int sshift = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,cb);
|
||||
int sx = (x+sshift)%rd;
|
||||
|
||||
|
||||
int wraparound=0;
|
||||
if ( (shiftpm==-1) && (sx>x) ) {
|
||||
wraparound = 1;
|
||||
@ -808,7 +808,7 @@ public:
|
||||
if ( (shiftpm== 1) && (sx<x) ) {
|
||||
wraparound = 1;
|
||||
}
|
||||
|
||||
|
||||
int permute_slice=0;
|
||||
if(permute_dim){
|
||||
int wrap = sshift/rd; wrap=wrap % ly; // but it is local anyway
|
||||
@ -816,66 +816,66 @@ public:
|
||||
if ( x< rd-num ) permute_slice=wrap;
|
||||
else permute_slice = (wrap+1)%ly;
|
||||
}
|
||||
|
||||
|
||||
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Comms (int point,int dimension,int shiftpm,int cbmask)
|
||||
{
|
||||
GridBase *grid=_grid;
|
||||
const int Nsimd = grid->Nsimd();
|
||||
|
||||
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int ld = _grid->_ldimensions[dimension];
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
int pd = _grid->_processors[dimension];
|
||||
int simd_layout = _grid->_simd_layout[dimension];
|
||||
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||
|
||||
|
||||
assert(comm_dim==1);
|
||||
int shift = (shiftpm + fd) %fd;
|
||||
assert(shift>=0);
|
||||
assert(shift<fd);
|
||||
|
||||
|
||||
// done in reduced dims, so SIMD factored
|
||||
int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension];
|
||||
int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension];
|
||||
|
||||
this->_comm_buf_size[point] = buffer_size; // Size of _one_ plane. Multiple planes may be gathered and
|
||||
|
||||
// send to one or more remote nodes.
|
||||
|
||||
|
||||
int cb= (cbmask==0x2)? Odd : Even;
|
||||
int sshift= _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,cb);
|
||||
|
||||
for(int x=0;x<rd;x++){
|
||||
|
||||
|
||||
for(int x=0;x<rd;x++){
|
||||
|
||||
int permute_type=grid->PermuteType(dimension);
|
||||
|
||||
|
||||
int sx = (x+sshift)%rd;
|
||||
|
||||
|
||||
int offnode = 0;
|
||||
if ( simd_layout > 1 ) {
|
||||
|
||||
|
||||
for(int i=0;i<Nsimd;i++){
|
||||
|
||||
|
||||
int inner_bit = (Nsimd>>(permute_type+1));
|
||||
int ic= (i&inner_bit)? 1:0;
|
||||
int my_coor = rd*ic + x;
|
||||
int nbr_coor = my_coor+sshift;
|
||||
int nbr_proc = ((nbr_coor)/ld) % pd;// relative shift in processors
|
||||
|
||||
if ( nbr_proc ) {
|
||||
|
||||
if ( nbr_proc ) {
|
||||
offnode =1;
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
} else {
|
||||
int comm_proc = ((x+sshift)/rd)%pd;
|
||||
offnode = (comm_proc!= 0);
|
||||
}
|
||||
|
||||
|
||||
int wraparound=0;
|
||||
if ( (shiftpm==-1) && (sx>x) && (grid->_processor_coor[dimension]==0) ) {
|
||||
wraparound = 1;
|
||||
@ -884,24 +884,24 @@ public:
|
||||
wraparound = 1;
|
||||
}
|
||||
if (!offnode) {
|
||||
|
||||
|
||||
int permute_slice=0;
|
||||
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
||||
|
||||
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
||||
|
||||
} else {
|
||||
|
||||
int words = buffer_size;
|
||||
if (cbmask != 0x3) words=words>>1;
|
||||
|
||||
|
||||
// int rank = grid->_processor;
|
||||
// int recv_from_rank;
|
||||
// int xmit_to_rank;
|
||||
|
||||
|
||||
int unified_buffer_offset = _unified_buffer_size;
|
||||
_unified_buffer_size += words;
|
||||
|
||||
|
||||
ScatterPlane(point,dimension,x,cbmask,unified_buffer_offset,wraparound); // permute/extract/merge is done in comms phase
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -909,13 +909,13 @@ public:
|
||||
void CopyPlane(int point, int dimension,int lplane,int rplane,int cbmask,int permute,int wrap)
|
||||
{
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
|
||||
|
||||
if ( !_grid->CheckerBoarded(dimension) ) {
|
||||
|
||||
|
||||
int o = 0; // relative offset to base within plane
|
||||
int ro = rplane*_grid->_ostride[dimension]; // base offset for start of plane
|
||||
int ro = rplane*_grid->_ostride[dimension]; // base offset for start of plane
|
||||
int lo = lplane*_grid->_ostride[dimension]; // offset in buffer
|
||||
|
||||
|
||||
// Simple block stride gather of SIMD objects
|
||||
for(int n=0;n<_grid->_slice_nblock[dimension];n++){
|
||||
for(int b=0;b<_grid->_slice_block[dimension];b++){
|
||||
@ -927,18 +927,18 @@ public:
|
||||
}
|
||||
o +=_grid->_slice_stride[dimension];
|
||||
}
|
||||
|
||||
|
||||
} else {
|
||||
|
||||
int ro = rplane*_grid->_ostride[dimension]; // base offset for start of plane
|
||||
int lo = lplane*_grid->_ostride[dimension]; // base offset for start of plane
|
||||
|
||||
int ro = rplane*_grid->_ostride[dimension]; // base offset for start of plane
|
||||
int lo = lplane*_grid->_ostride[dimension]; // base offset for start of plane
|
||||
int o = 0; // relative offset to base within plane
|
||||
|
||||
|
||||
for(int n=0;n<_grid->_slice_nblock[dimension];n++){
|
||||
for(int b=0;b<_grid->_slice_block[dimension];b++){
|
||||
|
||||
|
||||
int ocb=1<<_grid->CheckerBoardFromOindex(o+b);
|
||||
|
||||
|
||||
if ( ocb&cbmask ) {
|
||||
int idx = point+(lo+o+b)*this->_npoints;
|
||||
_entries[idx]._offset =ro+o+b;
|
||||
@ -946,24 +946,24 @@ public:
|
||||
_entries[idx]._permute=permute;
|
||||
_entries[idx]._around_the_world=wrap;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
o +=_grid->_slice_stride[dimension];
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
// Routine builds up integer table for each site in _offsets, _is_local, _permute
|
||||
void ScatterPlane (int point,int dimension,int plane,int cbmask,int offset, int wrap)
|
||||
{
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
|
||||
|
||||
if ( !_grid->CheckerBoarded(dimension) ) {
|
||||
|
||||
int so = plane*_grid->_ostride[dimension]; // base offset for start of plane
|
||||
|
||||
int so = plane*_grid->_ostride[dimension]; // base offset for start of plane
|
||||
int o = 0; // relative offset to base within plane
|
||||
int bo = 0; // offset in buffer
|
||||
|
||||
|
||||
// Simple block stride gather of SIMD objects
|
||||
for(int n=0;n<_grid->_slice_nblock[dimension];n++){
|
||||
for(int b=0;b<_grid->_slice_block[dimension];b++){
|
||||
@ -975,16 +975,16 @@ public:
|
||||
}
|
||||
o +=_grid->_slice_stride[dimension];
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
int so = plane*_grid->_ostride[dimension]; // base offset for start of plane
|
||||
|
||||
} else {
|
||||
|
||||
int so = plane*_grid->_ostride[dimension]; // base offset for start of plane
|
||||
int o = 0; // relative offset to base within plane
|
||||
int bo = 0; // offset in buffer
|
||||
|
||||
|
||||
for(int n=0;n<_grid->_slice_nblock[dimension];n++){
|
||||
for(int b=0;b<_grid->_slice_block[dimension];b++){
|
||||
|
||||
|
||||
int ocb=1<<_grid->CheckerBoardFromOindex(o+b);// Could easily be a table lookup
|
||||
if ( ocb & cbmask ) {
|
||||
int idx = point+(so+o+b)*this->_npoints;
|
||||
@ -998,16 +998,16 @@ public:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class compressor>
|
||||
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx)
|
||||
{
|
||||
typedef typename cobj::vector_type vector_type;
|
||||
typedef typename cobj::scalar_type scalar_type;
|
||||
|
||||
|
||||
assert(rhs.Grid()==_grid);
|
||||
// conformable(_grid,rhs.Grid());
|
||||
|
||||
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
int pd = _grid->_processors[dimension];
|
||||
@ -1019,37 +1019,37 @@ public:
|
||||
assert(shift<fd);
|
||||
|
||||
int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension];
|
||||
|
||||
|
||||
int cb= (cbmask==0x2)? Odd : Even;
|
||||
int sshift= _grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
||||
|
||||
|
||||
int shm_receive_only = 1;
|
||||
for(int x=0;x<rd;x++){
|
||||
|
||||
for(int x=0;x<rd;x++){
|
||||
|
||||
int sx = (x+sshift)%rd;
|
||||
int comm_proc = ((x+sshift)/rd)%pd;
|
||||
|
||||
|
||||
if (comm_proc) {
|
||||
|
||||
int words = buffer_size;
|
||||
if (cbmask != 0x3) words=words>>1;
|
||||
|
||||
|
||||
int bytes = words * compress.CommDatumSize();
|
||||
|
||||
int so = sx*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
|
||||
|
||||
int so = sx*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
|
||||
if ( !face_table_computed ) {
|
||||
face_table.resize(face_idx+1);
|
||||
Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table[face_idx]);
|
||||
}
|
||||
|
||||
|
||||
// int rank = _grid->_processor;
|
||||
int recv_from_rank;
|
||||
int xmit_to_rank;
|
||||
_grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
|
||||
|
||||
|
||||
assert (xmit_to_rank != _grid->ThisRank());
|
||||
assert (recv_from_rank != _grid->ThisRank());
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////
|
||||
// try the direct copy if possible
|
||||
/////////////////////////////////////////////////////////
|
||||
@ -1062,13 +1062,13 @@ public:
|
||||
}
|
||||
|
||||
send_buf = (cobj *)_grid->ShmBufferTranslate(xmit_to_rank,recv_buf);
|
||||
if ( send_buf==NULL ) {
|
||||
if ( send_buf==NULL ) {
|
||||
send_buf = this->u_send_buf_p;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Find out if we get the direct copy.
|
||||
void *success = (void *) _grid->ShmBufferTranslate(recv_from_rank,this->u_send_buf_p);
|
||||
if (success==NULL) {
|
||||
if (success==NULL) {
|
||||
// we found a packet that comes from MPI and contributes to this leg of stencil
|
||||
shm_receive_only = 0;
|
||||
}
|
||||
@ -1077,9 +1077,9 @@ public:
|
||||
assert(send_buf!=NULL);
|
||||
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so); face_idx++;
|
||||
gathertime+=usecond();
|
||||
|
||||
|
||||
if ( compress.DecompressionStep() ) {
|
||||
|
||||
|
||||
if ( shm_receive_only ) { // Early decompress before MPI is finished is possible
|
||||
AddDecompress(&this->u_recv_buf_p[u_comm_offset],
|
||||
&recv_buf[u_comm_offset],
|
||||
@ -1108,7 +1108,7 @@ public:
|
||||
}
|
||||
return shm_receive_only;
|
||||
}
|
||||
|
||||
|
||||
template<class compressor>
|
||||
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx)
|
||||
{
|
||||
@ -1136,7 +1136,7 @@ public:
|
||||
///////////////////////////////////////////////
|
||||
int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension];
|
||||
// int words = sizeof(cobj)/sizeof(vector_type);
|
||||
|
||||
|
||||
assert(cbmask==0x3); // Fixme think there is a latent bug if not true
|
||||
// This assert will trap it if ever hit. Not hit normally so far
|
||||
int reduced_buffer_size = buffer_size;
|
||||
@ -1152,22 +1152,22 @@ public:
|
||||
///////////////////////////////////////////
|
||||
// Work out what to send where
|
||||
///////////////////////////////////////////
|
||||
|
||||
|
||||
int cb = (cbmask==0x2)? Odd : Even;
|
||||
int sshift= _grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
||||
|
||||
|
||||
// loop over outer coord planes orthog to dim
|
||||
int shm_receive_only = 1;
|
||||
for(int x=0;x<rd;x++){
|
||||
|
||||
for(int x=0;x<rd;x++){
|
||||
|
||||
int any_offnode = ( ((x+sshift)%fd) >= rd );
|
||||
|
||||
if ( any_offnode ) {
|
||||
|
||||
for(int i=0;i<maxl;i++){
|
||||
|
||||
for(int i=0;i<maxl;i++){
|
||||
spointers[i] = (cobj *) &u_simd_send_buf[i][u_comm_offset];
|
||||
}
|
||||
|
||||
|
||||
int sx = (x+sshift)%rd;
|
||||
|
||||
if ( !face_table_computed ) {
|
||||
@ -1202,13 +1202,13 @@ public:
|
||||
|
||||
int recv_from_rank;
|
||||
int xmit_to_rank;
|
||||
|
||||
_grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
||||
|
||||
|
||||
_grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
||||
|
||||
// shm == receive pointer if offnode
|
||||
// shm == Translate[send pointer] if on node -- my view of his send pointer
|
||||
cobj *shm = (cobj *) _grid->ShmBufferTranslate(recv_from_rank,sp);
|
||||
if (shm==NULL) {
|
||||
if (shm==NULL) {
|
||||
shm = rp;
|
||||
// we found a packet that comes from MPI and contributes to this shift.
|
||||
// is_same_node is only used in the WilsonStencil, and gets set for this point in the stencil.
|
||||
@ -1222,15 +1222,15 @@ public:
|
||||
|
||||
AddPacket((void *)sp,(void *)rp,xmit_to_rank,recv_from_rank,bytes);
|
||||
|
||||
|
||||
} else {
|
||||
|
||||
|
||||
} else {
|
||||
|
||||
rpointers[i] = sp;
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if ( shm_receive_only ) {
|
||||
if ( shm_receive_only ) {
|
||||
AddMerge(&this->u_recv_buf_p[u_comm_offset],rpointers,reduced_buffer_size,permute_type,MergersSHM);
|
||||
} else {
|
||||
AddMerge(&this->u_recv_buf_p[u_comm_offset],rpointers,reduced_buffer_size,permute_type,Mergers);
|
||||
@ -1265,9 +1265,9 @@ public:
|
||||
shm_bytes = 0.;
|
||||
calls = 0.;
|
||||
};
|
||||
|
||||
|
||||
void Report(void) {
|
||||
#define AVERAGE(A)
|
||||
#define AVERAGE(A)
|
||||
#define PRINTIT(A) AVERAGE(A); std::cout << GridLogMessage << " Stencil " << #A << " "<< A/calls<<std::endl;
|
||||
RealD NP = _grid->_Nprocessors;
|
||||
RealD NN = _grid->NodeCount();
|
||||
@ -1284,7 +1284,7 @@ public:
|
||||
}
|
||||
}
|
||||
if (threaded) commtime += t;
|
||||
|
||||
|
||||
_grid->GlobalSum(commtime); commtime/=NP;
|
||||
if ( calls > 0. ) {
|
||||
std::cout << GridLogMessage << " Stencil calls "<<calls<<std::endl;
|
||||
@ -1307,8 +1307,8 @@ public:
|
||||
std::cout << GridLogMessage << " Stencil SHM " << (shm_bytes)/gatheralltime/1000.*NP/NN << " GB/s per node"<<std::endl;
|
||||
|
||||
auto all_bytes = comms_bytes+shm_bytes;
|
||||
std::cout << GridLogMessage << " Stencil SHM all" << (all_bytes)/gatheralltime/1000. << " GB/s per rank"<<std::endl;
|
||||
std::cout << GridLogMessage << " Stencil SHM all" << (all_bytes)/gatheralltime/1000.*NP/NN << " GB/s per node"<<std::endl;
|
||||
std::cout << GridLogMessage << " Stencil SHM all " << (all_bytes)/gatheralltime/1000. << " GB/s per rank"<<std::endl;
|
||||
std::cout << GridLogMessage << " Stencil SHM all " << (all_bytes)/gatheralltime/1000.*NP/NN << " GB/s per node"<<std::endl;
|
||||
|
||||
auto membytes = (shm_bytes + comms_bytes/2) // read/write
|
||||
+ (shm_bytes+comms_bytes)/2 * sizeof(vobj)/sizeof(cobj);
|
||||
@ -1326,7 +1326,7 @@ public:
|
||||
#undef PRINTIT
|
||||
#undef AVERAGE
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user