mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-11-04 05:54:32 +00:00 
			
		
		
		
	Compare commits
	
		
			260 Commits
		
	
	
		
			feature/a2
			...
			DIRAC-ITT-
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					12e239dd9f | ||
| 
						 | 
					af2301afbb | ||
| 
						 | 
					f98856a26f | ||
| 
						 | 
					d55cc5b380 | ||
| c2b688abc9 | |||
| b0d61b9687 | |||
| 5f893bf9af | |||
| 0e17bd6597 | |||
| 22caa158cc | |||
| b24a504d7c | |||
| 
						 | 
					992ef6e9fc | ||
| 
						 | 
					f32a320bc3 | ||
| 
						 | 
					5f0fe029d2 | ||
| 6b1486e89b | |||
| 
						 | 
					3f9c427a3a | ||
| 
						 | 
					d201277652 | ||
| fdda7cf9cf | |||
| e22d30f715 | |||
| 1ba25a0d8c | |||
| 9ba3647bdf | |||
| 5ee832f738 | |||
| 
						 | 
					35a69a5133 | ||
| e9c5a271a8 | |||
| acac2d6938 | |||
| 97db2b8d20 | |||
| 
						 | 
					ace9cd64bb | ||
| 
						 | 
					a3e2aeb603 | ||
| 
						 | 
					049dd25785 | ||
| 
						 | 
					d43d372294 | ||
| 
						 | 
					b71a081cba | ||
| 
						 | 
					c48909590b | ||
| 
						 | 
					446ef40570 | ||
| 
						 | 
					81441e98f4 | ||
| 
						 | 
					ecd3f890f5 | ||
| 
						 | 
					1c881ce23c | ||
| 
						 | 
					dacbbdd051 | ||
| 
						 | 
					2859955a03 | ||
| 
						 | 
					cc220abd1d | ||
| 
						 | 
					d1c0c0197e | ||
| 
						 | 
					fd9424ef27 | ||
| 
						 | 
					a5c35c4024 | ||
| 
						 | 
					e03b64dc06 | ||
| 
						 | 
					4677c40195 | ||
| 
						 | 
					288c615782 | ||
| 
						 | 
					48e81cf6f8 | ||
| 
						 | 
					65b724bb5f | ||
| 
						 | 
					6dbd117aa5 | ||
| 
						 | 
					198b29f618 | ||
| 
						 | 
					a8309638d4 | ||
| 
						 | 
					f98a4e880e | ||
| 
						 | 
					8244caff25 | ||
| 
						 | 
					bcd7895362 | ||
| 
						 | 
					85b1c5df39 | ||
| 
						 | 
					b4255140d6 | ||
| 
						 | 
					0c3095e173 | ||
| 
						 | 
					d3ce60713d | ||
| 
						 | 
					eac1f08b7b | ||
| 
						 | 
					1654c4f3c0 | ||
| 
						 | 
					8807d998bc | ||
| 
						 | 
					5791021dcd | ||
| 
						 | 
					c273fb051c | ||
| 
						 | 
					c545530170 | ||
| 
						 | 
					d982a5b6d5 | ||
| 
						 | 
					15ca8637f3 | ||
| 
						 | 
					cbc995b74c | ||
| 
						 | 
					8b74174d74 | ||
| 
						 | 
					e21fef17df | ||
| 
						 | 
					3d27708f07 | ||
| 
						 | 
					b918744184 | ||
| 
						 | 
					7d14a3c086 | ||
| 
						 | 
					e14a84317d | ||
| 
						 | 
					6c31b99f1f | ||
| 
						 | 
					9522dcd611 | ||
| 
						 | 
					ed469898dc | ||
| 
						 | 
					1eee94a809 | ||
| 
						 | 
					54523369a3 | ||
| 
						 | 
					a98c91c2a5 | ||
| 
						 | 
					a9b92867a8 | ||
| 
						 | 
					65920faeba | ||
| 
						 | 
					3448b7387c | ||
| 
						 | 
					47b89d2739 | ||
| 
						 | 
					1efe30d6cc | ||
| 
						 | 
					0b787e9fe0 | ||
| 
						 | 
					37ec4b241c | ||
| 
						 | 
					90ea7dfa99 | ||
| 
						 | 
					f866d7c33e | ||
| 
						 | 
					542bdef198 | ||
| 
						 | 
					06007db3d9 | ||
| 
						 | 
					12e6059a70 | ||
| 
						 | 
					dbaa24ebf6 | ||
| 
						 | 
					3b30b9f0c0 | ||
| 
						 | 
					69db4816f7 | ||
| 
						 | 
					3abe09025a | ||
| 
						 | 
					e33878e0de | ||
| 
						 | 
					27b4fbf3f0 | ||
| 
						 | 
					968a90633a | ||
| 
						 | 
					6365a89ba3 | ||
| 
						 | 
					ddbb008694 | ||
| 
						 | 
					7997e0a449 | ||
| 
						 | 
					197612bc7a | ||
| 
						 | 
					0e88bf4bff | ||
| 
						 | 
					3e64d78469 | ||
| 
						 | 
					2004611def | ||
| 
						 | 
					a2868c96a4 | ||
| 
						 | 
					ea7f8fda5e | ||
| 
						 | 
					906b78811b | ||
| 
						 | 
					97703b181b | ||
| 
						 | 
					d9474c6cb6 | ||
| 
						 | 
					bbd145382b | ||
| 
						 | 
					1b08cb7300 | ||
| 
						 | 
					337d9dc043 | ||
| 
						 | 
					8726e94ea7 | ||
| 
						 | 
					67db4993c2 | ||
| 
						 | 
					fd3c8b0e85 | ||
| 
						 | 
					1635c263ee | ||
| 
						 | 
					5b117865b2 | ||
| 
						 | 
					05bbc49a99 | ||
| 
						 | 
					81a8209749 | ||
| 
						 | 
					a87e45ba25 | ||
| 
						 | 
					465856331a | ||
| 
						 | 
					cc958aa9ed | ||
| 
						 | 
					a25e4b3d0c | ||
| 
						 | 
					d1210ca12a | ||
| 
						 | 
					36ea0e222a | ||
| 
						 | 
					92281ec22d | ||
| 
						 | 
					87266ce099 | ||
| 
						 | 
					2a23f133e8 | ||
| 
						 | 
					8dbf790f62 | ||
| 
						 | 
					2402b4940e | ||
| 
						 | 
					2111052fbe | ||
| 
						 | 
					433766ac62 | ||
| 
						 | 
					93a37c8f68 | ||
| 
						 | 
					9872c76825 | ||
| 
						 | 
					5ee3ea2144 | ||
| 
						 | 
					5050833b42 | ||
| 
						 | 
					7bee4ebb54 | ||
| 
						 | 
					71cf9851e7 | ||
| 
						 | 
					b4735c9904 | ||
| 
						 | 
					9b2699226c | ||
| 
						 | 
					5f52804907 | ||
| 
						 | 
					936071773e | ||
| 
						 | 
					1732f9319e | ||
| 
						 | 
					91c81cab30 | ||
| 
						 | 
					38164f8480 | ||
| 
						 | 
					f013979791 | ||
| 
						 | 
					e947b563ea | ||
| 
						 | 
					5cb3530c34 | ||
| 
						 | 
					250008372f | ||
| 
						 | 
					4fedd8d29f | ||
| 
						 | 
					6ddcef1bca | ||
| 
						 | 
					8c5a5fdfce | ||
| 
						 | 
					046b1cbbc0 | ||
| 
						 | 
					a65ce237c1 | ||
| 
						 | 
					cd27f1005d | ||
| 
						 | 
					f8c0a59221 | ||
| 
						 | 
					832485699f | ||
| 
						 | 
					81484a4760 | ||
| 
						 | 
					9a86059761 | ||
| 
						 | 
					b780b7b7a0 | ||
| 
						 | 
					9e085bd04e | ||
| 
						 | 
					6b6bf537d3 | ||
| 
						 | 
					323a651c71 | ||
| 
						 | 
					9f212679f1 | ||
| 
						 | 
					032f7dde1a | ||
| 
						 | 
					50b1db1e8b | ||
| 
						 | 
					015d8bb38a | ||
| 
						 | 
					10a34312dc | ||
| 
						 | 
					db8c0e7584 | ||
| 
						 | 
					d15ccad8a7 | ||
| 
						 | 
					0009b5cee8 | ||
| 
						 | 
					20d1941a45 | ||
| 
						 | 
					b7c76ede29 | ||
| 
						 | 
					05edf803bd | ||
| 
						 | 
					78b8e40f83 | ||
| 
						 | 
					fc2e9850d3 | ||
| 
						 | 
					ffaaed679e | ||
| 
						 | 
					b2fd8b993a | ||
| 
						 | 
					291ee8c3d0 | ||
| 
						 | 
					e1a5b3ea49 | ||
| 
						 | 
					55a55660cb | ||
| 
						 | 
					ceb8b374da | ||
| 
						 | 
					4bc2ad2894 | ||
| 
						 | 
					798af3e68f | ||
| 
						 | 
					b0ef2367f3 | ||
| 
						 | 
					71a7350a85 | ||
| 
						 | 
					6f79369955 | ||
| 
						 | 
					f9cb6b979f | ||
| 
						 | 
					ed4d9d17f8 | ||
| 
						 | 
					fbed02690d | ||
| 
						 | 
					39f3ae5b1d | ||
| 
						 | 
					e64bec8c8e | ||
| 
						 | 
					0893b4e552 | ||
| 
						 | 
					92f0f29670 | ||
| 
						 | 
					48a340a9d1 | ||
| 
						 | 
					f45621109b | ||
| 
						 | 
					32d1a0bbea | ||
| 
						 | 
					267cce66a1 | ||
| 
						 | 
					3417147b11 | ||
| 
						 | 
					b338719bc8 | ||
| 
						 | 
					2b81cbe2c2 | ||
| 
						 | 
					acff9d6ed2 | ||
| 
						 | 
					a306a49788 | ||
| 
						 | 
					7ef03c5368 | ||
| 
						 | 
					5abec5b8a9 | ||
| 
						 | 
					499edc0636 | ||
| 
						 | 
					d990e61be3 | ||
| 
						 | 
					3edb2dc2da | ||
| 
						 | 
					345721220e | ||
| 
						 | 
					6db68d6ecb | ||
| 
						 | 
					09f0963d1f | ||
| 
						 | 
					6f44e3c192 | ||
| 
						 | 
					5893888f87 | ||
| 
						 | 
					39b448affb | ||
| 
						 | 
					e54a8f05a9 | ||
| 
						 | 
					64b72fc17f | ||
| 
						 | 
					6fdce60492 | ||
| 
						 | 
					852db4626a | ||
| 
						 | 
					6504a098cc | ||
| 
						 | 
					79a385faca | ||
| 
						 | 
					c12a67030a | ||
| 
						 | 
					581392f2f2 | ||
| 
						 | 
					113f277b6a | ||
| 
						 | 
					974586bedc | ||
| 
						 | 
					160f78c1e4 | ||
| 
						 | 
					7e4e1bbbc2 | ||
| 
						 | 
					e699b7e9f9 | ||
| 
						 | 
					a28bc0de90 | ||
| 
						 | 
					14d0fe4d6c | ||
| 
						 | 
					0ad2e0815c | ||
| 
						 | 
					1c8ca05e16 | ||
| 
						 | 
					dc9c8340bb | ||
| 
						 | 
					19eef97503 | ||
| 
						 | 
					635246ce50 | ||
| 
						 | 
					5cdbb7e71e | ||
| 
						 | 
					8123590a1b | ||
| 
						 | 
					86c9c4da8b | ||
| 
						 | 
					cd1efee866 | ||
| 
						 | 
					bd310932f7 | ||
| 
						 | 
					304762e7ac | ||
| 
						 | 
					d79ab03a6c | ||
| 
						 | 
					d5708e0eb2 | ||
| 
						 | 
					123f6b7a61 | ||
| 
						 | 
					2b6457dd9a | ||
| 
						 | 
					b367cbd422 | ||
| 
						 | 
					e252c1aca3 | ||
| 
						 | 
					b140c6a4f9 | ||
| 
						 | 
					326de36467 | ||
| 
						 | 
					9f224a1647 | ||
| 
						 | 
					bb46ba9b5f | ||
| 
						 | 
					dd5a22b36b | ||
| 
						 | 
					1ea85b9972 | ||
| 
						 | 
					8fb63f1c25 | ||
| 
						 | 
					77fa586f6c | ||
| 
						 | 
					15238e8d5e | ||
| 
						 | 
					b27e31957a | ||
| 
						 | 
					46927771e3 | ||
| 
						 | 
					d8cea77707 | ||
| 
						 | 
					5f8a76d490 | ||
| 
						 | 
					28d49a3b60 | ||
| 
						 | 
					b4c624ece6 | 
@@ -9,11 +9,6 @@ matrix:
 | 
			
		||||
    - os:        osx
 | 
			
		||||
      osx_image: xcode8.3
 | 
			
		||||
      compiler: clang
 | 
			
		||||
      env: PREC=single
 | 
			
		||||
    - os:        osx
 | 
			
		||||
      osx_image: xcode8.3
 | 
			
		||||
      compiler: clang
 | 
			
		||||
      env: PREC=double
 | 
			
		||||
      
 | 
			
		||||
before_install:
 | 
			
		||||
    - export GRIDDIR=`pwd`
 | 
			
		||||
@@ -55,7 +50,7 @@ script:
 | 
			
		||||
    - make -j4
 | 
			
		||||
    - make install
 | 
			
		||||
    - cd $CWD/build
 | 
			
		||||
    - ../configure --enable-precision=$PREC --enable-simd=SSE4 --enable-comms=none --with-lime=$CWD/build/lime/install ${EXTRACONF}
 | 
			
		||||
    - ../configure --enable-simd=SSE4 --enable-comms=none --with-lime=$CWD/build/lime/install ${EXTRACONF}
 | 
			
		||||
    - make -j4 
 | 
			
		||||
    - ./benchmarks/Benchmark_dwf --threads 1 --debug-signals
 | 
			
		||||
    - make check
 | 
			
		||||
 
 | 
			
		||||
@@ -34,6 +34,12 @@
 | 
			
		||||
#define __SYCL__REDEFINE__
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/* HIP save and restore compile environment*/
 | 
			
		||||
#ifdef GRID_HIP
 | 
			
		||||
#pragma push
 | 
			
		||||
#pragma push_macro("__HIP_DEVICE_COMPILE__")
 | 
			
		||||
#endif
 | 
			
		||||
#define EIGEN_NO_HIP
 | 
			
		||||
 | 
			
		||||
#include <Grid/Eigen/Dense>
 | 
			
		||||
#include <Grid/Eigen/unsupported/CXX11/Tensor>
 | 
			
		||||
@@ -42,7 +48,7 @@
 | 
			
		||||
#ifdef __NVCC__REDEFINE__
 | 
			
		||||
#pragma pop_macro("__CUDACC__")
 | 
			
		||||
#pragma pop_macro("__NVCC__")
 | 
			
		||||
#pragma pop_macro("GRID_SIMT")
 | 
			
		||||
#pragma pop_macro("__CUDA_ARCH__")
 | 
			
		||||
#pragma pop
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
@@ -52,6 +58,12 @@
 | 
			
		||||
#pragma pop
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*HIP restore*/
 | 
			
		||||
#ifdef __HIP__REDEFINE__
 | 
			
		||||
#pragma pop_macro("__HIP_DEVICE_COMPILE__")
 | 
			
		||||
#pragma pop
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if defined __GNUC__
 | 
			
		||||
#pragma GCC diagnostic pop
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -65,8 +65,7 @@ public:
 | 
			
		||||
    MemoryManager::CpuFree((void *)__p,bytes);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // FIXME: hack for the copy constructor, eventually it must be avoided
 | 
			
		||||
  //void construct(pointer __p, const _Tp& __val) { new((void *)__p) _Tp(__val); };
 | 
			
		||||
  // FIXME: hack for the copy constructor: it must be avoided to avoid single thread loop
 | 
			
		||||
  void construct(pointer __p, const _Tp& __val) { assert(0);};
 | 
			
		||||
  void construct(pointer __p) { };
 | 
			
		||||
  void destroy(pointer __p) { };
 | 
			
		||||
@@ -74,6 +73,9 @@ public:
 | 
			
		||||
template<typename _Tp>  inline bool operator==(const alignedAllocator<_Tp>&, const alignedAllocator<_Tp>&){ return true; }
 | 
			
		||||
template<typename _Tp>  inline bool operator!=(const alignedAllocator<_Tp>&, const alignedAllocator<_Tp>&){ return false; }
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Unified virtual memory
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
template<typename _Tp>
 | 
			
		||||
class uvmAllocator {
 | 
			
		||||
public: 
 | 
			
		||||
@@ -109,22 +111,63 @@ public:
 | 
			
		||||
    MemoryManager::SharedFree((void *)__p,bytes);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // FIXME: hack for the copy constructor, eventually it must be avoided
 | 
			
		||||
  void construct(pointer __p, const _Tp& __val) { new((void *)__p) _Tp(__val); };
 | 
			
		||||
  //void construct(pointer __p, const _Tp& __val) { };
 | 
			
		||||
  void construct(pointer __p) { };
 | 
			
		||||
  void destroy(pointer __p) { };
 | 
			
		||||
};
 | 
			
		||||
template<typename _Tp>  inline bool operator==(const uvmAllocator<_Tp>&, const uvmAllocator<_Tp>&){ return true; }
 | 
			
		||||
template<typename _Tp>  inline bool operator!=(const uvmAllocator<_Tp>&, const uvmAllocator<_Tp>&){ return false; }
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Device memory
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
template<typename _Tp>
 | 
			
		||||
class devAllocator {
 | 
			
		||||
public: 
 | 
			
		||||
  typedef std::size_t     size_type;
 | 
			
		||||
  typedef std::ptrdiff_t  difference_type;
 | 
			
		||||
  typedef _Tp*       pointer;
 | 
			
		||||
  typedef const _Tp* const_pointer;
 | 
			
		||||
  typedef _Tp&       reference;
 | 
			
		||||
  typedef const _Tp& const_reference;
 | 
			
		||||
  typedef _Tp        value_type;
 | 
			
		||||
 | 
			
		||||
  template<typename _Tp1>  struct rebind { typedef devAllocator<_Tp1> other; };
 | 
			
		||||
  devAllocator() throw() { }
 | 
			
		||||
  devAllocator(const devAllocator&) throw() { }
 | 
			
		||||
  template<typename _Tp1> devAllocator(const devAllocator<_Tp1>&) throw() { }
 | 
			
		||||
  ~devAllocator() throw() { }
 | 
			
		||||
  pointer       address(reference __x)       const { return &__x; }
 | 
			
		||||
  size_type  max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
 | 
			
		||||
 | 
			
		||||
  pointer allocate(size_type __n, const void* _p= 0)
 | 
			
		||||
  { 
 | 
			
		||||
    size_type bytes = __n*sizeof(_Tp);
 | 
			
		||||
    profilerAllocate(bytes);
 | 
			
		||||
    _Tp *ptr = (_Tp*) MemoryManager::AcceleratorAllocate(bytes);
 | 
			
		||||
    assert( ( (_Tp*)ptr != (_Tp *)NULL ) );
 | 
			
		||||
    return ptr;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void deallocate(pointer __p, size_type __n) 
 | 
			
		||||
  { 
 | 
			
		||||
    size_type bytes = __n * sizeof(_Tp);
 | 
			
		||||
    profilerFree(bytes);
 | 
			
		||||
    MemoryManager::AcceleratorFree((void *)__p,bytes);
 | 
			
		||||
  }
 | 
			
		||||
  void construct(pointer __p, const _Tp& __val) { };
 | 
			
		||||
  void construct(pointer __p) { };
 | 
			
		||||
  void destroy(pointer __p) { };
 | 
			
		||||
};
 | 
			
		||||
template<typename _Tp>  inline bool operator==(const devAllocator<_Tp>&, const devAllocator<_Tp>&){ return true; }
 | 
			
		||||
template<typename _Tp>  inline bool operator!=(const devAllocator<_Tp>&, const devAllocator<_Tp>&){ return false; }
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Template typedefs
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
template<class T> using commAllocator = uvmAllocator<T>;
 | 
			
		||||
//template<class T> using commAllocator = devAllocator<T>;
 | 
			
		||||
template<class T> using Vector     = std::vector<T,uvmAllocator<T> >;           
 | 
			
		||||
template<class T> using commVector = std::vector<T,uvmAllocator<T> >;
 | 
			
		||||
//template<class T> using Matrix     = std::vector<std::vector<T,alignedAllocator<T> > >;
 | 
			
		||||
template<class T> using commVector = std::vector<T,devAllocator<T> >;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -136,11 +136,20 @@ void MemoryManager::Init(void)
 | 
			
		||||
      Ncache[SharedSmall]=Nc;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void MemoryManager::InitMessage(void) {
 | 
			
		||||
 | 
			
		||||
#ifndef GRID_UVM
 | 
			
		||||
  std::cout << GridLogMessage << "MemoryManager Cache "<< MemoryManager::DeviceMaxBytes <<" bytes "<<std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
  
 | 
			
		||||
  std::cout << GridLogMessage<< "MemoryManager::Init() setting up"<<std::endl;
 | 
			
		||||
#ifdef ALLOCATION_CACHE
 | 
			
		||||
  std::cout << GridLogMessage<< "MemoryManager::Init() cache pool for recent allocations: SMALL "<<Ncache[CpuSmall]<<" LARGE "<<Ncache[Cpu]<<std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
#ifdef GRID_UVM
 | 
			
		||||
  std::cout << GridLogMessage<< "MemoryManager::Init() Unified memory space"<<std::endl;
 | 
			
		||||
#ifdef GRID_CUDA
 | 
			
		||||
@@ -164,6 +173,7 @@ void MemoryManager::Init(void)
 | 
			
		||||
  std::cout << GridLogMessage<< "MemoryManager::Init() Using SYCL malloc_device"<<std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *MemoryManager::Insert(void *ptr,size_t bytes,int type) 
 | 
			
		||||
 
 | 
			
		||||
@@ -93,11 +93,12 @@ private:
 | 
			
		||||
  static void *Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim) ;
 | 
			
		||||
  static void *Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache) ;
 | 
			
		||||
 | 
			
		||||
  static void *AcceleratorAllocate(size_t bytes);
 | 
			
		||||
  static void  AcceleratorFree    (void *ptr,size_t bytes);
 | 
			
		||||
  static void PrintBytes(void);
 | 
			
		||||
 public:
 | 
			
		||||
  static void Init(void);
 | 
			
		||||
  static void InitMessage(void);
 | 
			
		||||
  static void *AcceleratorAllocate(size_t bytes);
 | 
			
		||||
  static void  AcceleratorFree    (void *ptr,size_t bytes);
 | 
			
		||||
  static void *SharedAllocate(size_t bytes);
 | 
			
		||||
  static void  SharedFree    (void *ptr,size_t bytes);
 | 
			
		||||
  static void *CpuAllocate(size_t bytes);
 | 
			
		||||
 
 | 
			
		||||
@@ -138,21 +138,6 @@ public:
 | 
			
		||||
		      int recv_from_rank,
 | 
			
		||||
		      int bytes);
 | 
			
		||||
  
 | 
			
		||||
  void SendRecvPacket(void *xmit,
 | 
			
		||||
		      void *recv,
 | 
			
		||||
		      int xmit_to_rank,
 | 
			
		||||
		      int recv_from_rank,
 | 
			
		||||
		      int bytes);
 | 
			
		||||
  
 | 
			
		||||
  void SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
			   void *xmit,
 | 
			
		||||
			   int xmit_to_rank,
 | 
			
		||||
			   void *recv,
 | 
			
		||||
			   int recv_from_rank,
 | 
			
		||||
			   int bytes);
 | 
			
		||||
  
 | 
			
		||||
  void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
 | 
			
		||||
 | 
			
		||||
  double StencilSendToRecvFrom(void *xmit,
 | 
			
		||||
			       int xmit_to_rank,
 | 
			
		||||
			       void *recv,
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,6 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/communicator/Communicator_mpi.cc
 | 
			
		||||
 | 
			
		||||
@@ -35,7 +35,7 @@ Grid_MPI_Comm       CartesianCommunicator::communicator_world;
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// First initialise of comms system
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
void CartesianCommunicator::Init(int *argc, char ***argv) 
 | 
			
		||||
void CartesianCommunicator::Init(int *argc, char ***argv)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  int flag;
 | 
			
		||||
@@ -43,8 +43,16 @@ void CartesianCommunicator::Init(int *argc, char ***argv)
 | 
			
		||||
 | 
			
		||||
  MPI_Initialized(&flag); // needed to coexist with other libs apparently
 | 
			
		||||
  if ( !flag ) {
 | 
			
		||||
    MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
 | 
			
		||||
 | 
			
		||||
#if defined (TOFU) // FUGAKU, credits go to Issaku Kanamori
 | 
			
		||||
    nCommThreads=1;
 | 
			
		||||
    // wrong results here too
 | 
			
		||||
    // For now: comms-overlap leads to wrong results in Benchmark_wilson even on single node MPI runs
 | 
			
		||||
    // other comms schemes are ok
 | 
			
		||||
    MPI_Init_thread(argc,argv,MPI_THREAD_SERIALIZED,&provided);
 | 
			
		||||
#else
 | 
			
		||||
    MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
 | 
			
		||||
#endif
 | 
			
		||||
    //If only 1 comms thread we require any threading mode other than SINGLE, but for multiple comms threads we need MULTIPLE
 | 
			
		||||
    if( (nCommThreads == 1) && (provided == MPI_THREAD_SINGLE) ) {
 | 
			
		||||
      assert(0);
 | 
			
		||||
@@ -91,7 +99,7 @@ void  CartesianCommunicator::ProcessorCoorFromRank(int rank, Coordinate &coor)
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Initialises from communicator_world
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors) 
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
 | 
			
		||||
{
 | 
			
		||||
  MPI_Comm optimal_comm;
 | 
			
		||||
  ////////////////////////////////////////////////////
 | 
			
		||||
@@ -110,7 +118,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
 | 
			
		||||
//////////////////////////////////
 | 
			
		||||
// Try to subdivide communicator
 | 
			
		||||
//////////////////////////////////
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)    
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
 | 
			
		||||
{
 | 
			
		||||
  _ndimension = processors.size();  assert(_ndimension>=1);
 | 
			
		||||
  int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
 | 
			
		||||
@@ -127,7 +135,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // split the communicator
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  //  int Nparent = parent._processors ; 
 | 
			
		||||
  //  int Nparent = parent._processors ;
 | 
			
		||||
  int Nparent;
 | 
			
		||||
  MPI_Comm_size(parent.communicator,&Nparent);
 | 
			
		||||
 | 
			
		||||
@@ -149,13 +157,13 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // rank within subcomm ; srank is rank of subcomm within blocks of subcomms
 | 
			
		||||
  int crank;  
 | 
			
		||||
  int crank;
 | 
			
		||||
  // Mpi uses the reverse Lexico convention to us; so reversed routines called
 | 
			
		||||
  Lexicographic::IndexFromCoorReversed(ccoor,crank,processors); // processors is the split grid dimensions
 | 
			
		||||
  Lexicographic::IndexFromCoorReversed(scoor,srank,ssize);      // ssize is the number of split grids
 | 
			
		||||
 | 
			
		||||
  MPI_Comm comm_split;
 | 
			
		||||
  if ( Nchild > 1 ) { 
 | 
			
		||||
  if ( Nchild > 1 ) {
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Split the communicator
 | 
			
		||||
@@ -180,11 +188,11 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
 | 
			
		||||
  SetCommunicator(comm_split);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////
 | 
			
		||||
  // Free the temp communicator 
 | 
			
		||||
  // Free the temp communicator
 | 
			
		||||
  ///////////////////////////////////////////////
 | 
			
		||||
  MPI_Comm_free(&comm_split);
 | 
			
		||||
 | 
			
		||||
  if(0){ 
 | 
			
		||||
  if(0){
 | 
			
		||||
    std::cout << " ndim " <<_ndimension<<" " << parent._ndimension << std::endl;
 | 
			
		||||
    for(int d=0;d<processors.size();d++){
 | 
			
		||||
      std::cout << d<< " " << _processor_coor[d] <<" " <<  ccoor[d]<<std::endl;
 | 
			
		||||
@@ -245,7 +253,7 @@ CartesianCommunicator::~CartesianCommunicator()
 | 
			
		||||
    for(int i=0;i<communicator_halo.size();i++){
 | 
			
		||||
      MPI_Comm_free(&communicator_halo[i]);
 | 
			
		||||
    }
 | 
			
		||||
  }  
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
 | 
			
		||||
@@ -294,60 +302,28 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
 | 
			
		||||
					   int bytes)
 | 
			
		||||
{
 | 
			
		||||
  std::vector<CommsRequest_t> reqs(0);
 | 
			
		||||
  //    unsigned long  xcrc = crc32(0L, Z_NULL, 0);
 | 
			
		||||
  //    unsigned long  rcrc = crc32(0L, Z_NULL, 0);
 | 
			
		||||
  //    xcrc = crc32(xcrc,(unsigned char *)xmit,bytes);
 | 
			
		||||
  SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes);
 | 
			
		||||
  SendToRecvFromComplete(reqs);
 | 
			
		||||
  //    rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
 | 
			
		||||
  //    printf("proc %d SendToRecvFrom %d bytes %lx %lx\n",_processor,bytes,xcrc,rcrc);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
 | 
			
		||||
					   void *recv,
 | 
			
		||||
					   int sender,
 | 
			
		||||
					   int receiver,
 | 
			
		||||
					   int bytes)
 | 
			
		||||
{
 | 
			
		||||
  MPI_Status stat;
 | 
			
		||||
  assert(sender != receiver);
 | 
			
		||||
  int tag = sender;
 | 
			
		||||
  if ( _processor == sender ) {
 | 
			
		||||
    MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator);
 | 
			
		||||
  }
 | 
			
		||||
  if ( _processor == receiver ) { 
 | 
			
		||||
    MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
// Basic Halo comms primitive
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
						void *xmit,
 | 
			
		||||
						int dest,
 | 
			
		||||
						void *recv,
 | 
			
		||||
						int from,
 | 
			
		||||
						int bytes)
 | 
			
		||||
{
 | 
			
		||||
  unsigned long  xcrc = crc32(0L, Z_NULL, 0);
 | 
			
		||||
  unsigned long  rcrc = crc32(0L, Z_NULL, 0);
 | 
			
		||||
 | 
			
		||||
  int myrank = _processor;
 | 
			
		||||
  int ierr;
 | 
			
		||||
 | 
			
		||||
  if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { 
 | 
			
		||||
    MPI_Request xrq;
 | 
			
		||||
    MPI_Request rrq;
 | 
			
		||||
  // Enforce no UVM in comms, device or host OK
 | 
			
		||||
  assert(acceleratorIsCommunicable(xmit));
 | 
			
		||||
  assert(acceleratorIsCommunicable(recv));
 | 
			
		||||
 | 
			
		||||
    ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
 | 
			
		||||
    ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
 | 
			
		||||
    
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(xrq);
 | 
			
		||||
    list.push_back(rrq);
 | 
			
		||||
  } else { 
 | 
			
		||||
    // Give the CPU to MPI immediately; can use threads to overlap optionally
 | 
			
		||||
    ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
 | 
			
		||||
		      recv,bytes,MPI_CHAR,from, from,
 | 
			
		||||
		      communicator,MPI_STATUS_IGNORE);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
  }
 | 
			
		||||
  // Give the CPU to MPI immediately; can use threads to overlap optionally
 | 
			
		||||
  //  printf("proc %d SendToRecvFrom %d bytes Sendrecv \n",_processor,bytes);
 | 
			
		||||
  ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
 | 
			
		||||
		    recv,bytes,MPI_CHAR,from, from,
 | 
			
		||||
		    communicator,MPI_STATUS_IGNORE);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
 | 
			
		||||
  //  xcrc = crc32(xcrc,(unsigned char *)xmit,bytes);
 | 
			
		||||
  //  rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
 | 
			
		||||
  //  printf("proc %d SendToRecvFrom %d bytes xcrc %lx rcrc %lx\n",_processor,bytes,xcrc,rcrc); fflush
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Basic Halo comms primitive
 | 
			
		||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
 | 
			
		||||
						     int dest,
 | 
			
		||||
						     void *recv,
 | 
			
		||||
@@ -367,7 +343,7 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
 | 
			
		||||
							 int from,
 | 
			
		||||
							 int bytes,int dir)
 | 
			
		||||
{
 | 
			
		||||
  int ncomm  =communicator_halo.size(); 
 | 
			
		||||
  int ncomm  =communicator_halo.size();
 | 
			
		||||
  int commdir=dir%ncomm;
 | 
			
		||||
 | 
			
		||||
  MPI_Request xrq;
 | 
			
		||||
@@ -397,21 +373,13 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
 | 
			
		||||
    off_node_bytes+=bytes;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if ( CommunicatorPolicy == CommunicatorPolicySequential ) { 
 | 
			
		||||
  if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
 | 
			
		||||
    this->StencilSendToRecvFromComplete(list,dir);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return off_node_bytes;
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir)
 | 
			
		||||
{
 | 
			
		||||
  SendToRecvFromComplete(waitall);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::StencilBarrier(void)
 | 
			
		||||
{
 | 
			
		||||
  MPI_Barrier  (ShmComm);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
 | 
			
		||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list,int dir)
 | 
			
		||||
{
 | 
			
		||||
  int nreq=list.size();
 | 
			
		||||
 | 
			
		||||
@@ -422,6 +390,13 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
  list.resize(0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::StencilBarrier(void)
 | 
			
		||||
{
 | 
			
		||||
  MPI_Barrier  (ShmComm);
 | 
			
		||||
}
 | 
			
		||||
//void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
 | 
			
		||||
//{
 | 
			
		||||
//}
 | 
			
		||||
void CartesianCommunicator::Barrier(void)
 | 
			
		||||
{
 | 
			
		||||
  int ierr = MPI_Barrier(communicator);
 | 
			
		||||
@@ -436,8 +411,8 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
 | 
			
		||||
		     communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
int CartesianCommunicator::RankWorld(void){ 
 | 
			
		||||
  int r; 
 | 
			
		||||
int CartesianCommunicator::RankWorld(void){
 | 
			
		||||
  int r;
 | 
			
		||||
  MPI_Comm_rank(communicator_world,&r);
 | 
			
		||||
  return r;
 | 
			
		||||
}
 | 
			
		||||
@@ -470,7 +445,7 @@ void CartesianCommunicator::AllToAll(void  *in,void *out,uint64_t words,uint64_t
 | 
			
		||||
  // When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug.
 | 
			
		||||
  // (Turns up on 32^3 x 64 Gparity too)
 | 
			
		||||
  MPI_Datatype object;
 | 
			
		||||
  int iwords; 
 | 
			
		||||
  int iwords;
 | 
			
		||||
  int ibytes;
 | 
			
		||||
  iwords = words;
 | 
			
		||||
  ibytes = bytes;
 | 
			
		||||
@@ -483,5 +458,3 @@ void CartesianCommunicator::AllToAll(void  *in,void *out,uint64_t words,uint64_t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -77,15 +77,6 @@ void CartesianCommunicator::GlobalSumVector(uint64_t *,int N){}
 | 
			
		||||
void CartesianCommunicator::GlobalXOR(uint32_t &){}
 | 
			
		||||
void CartesianCommunicator::GlobalXOR(uint64_t &){}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
 | 
			
		||||
					   void *recv,
 | 
			
		||||
					   int xmit_to_rank,
 | 
			
		||||
					   int recv_from_rank,
 | 
			
		||||
					   int bytes)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// Basic Halo comms primitive -- should never call in single node
 | 
			
		||||
void CartesianCommunicator::SendToRecvFrom(void *xmit,
 | 
			
		||||
@@ -96,20 +87,6 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
						void *xmit,
 | 
			
		||||
						int dest,
 | 
			
		||||
						void *recv,
 | 
			
		||||
						int from,
 | 
			
		||||
						int bytes)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::AllToAll(int dim,void  *in,void *out,uint64_t words,uint64_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  bcopy(in,out,bytes*words);
 | 
			
		||||
@@ -137,10 +114,6 @@ double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
 | 
			
		||||
						     int recv_from_rank,
 | 
			
		||||
						     int bytes, int dir)
 | 
			
		||||
{
 | 
			
		||||
  std::vector<CommsRequest_t> list;
 | 
			
		||||
  // Discard the "dir"
 | 
			
		||||
  SendToRecvFromBegin   (list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
 | 
			
		||||
  SendToRecvFromComplete(list);
 | 
			
		||||
  return 2.0*bytes;
 | 
			
		||||
}
 | 
			
		||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
@@ -150,13 +123,10 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
 | 
			
		||||
							 int recv_from_rank,
 | 
			
		||||
							 int bytes, int dir)
 | 
			
		||||
{
 | 
			
		||||
  // Discard the "dir"
 | 
			
		||||
  SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
 | 
			
		||||
  return 2.0*bytes;
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir)
 | 
			
		||||
{
 | 
			
		||||
  SendToRecvFromComplete(waitall);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::StencilBarrier(void){};
 | 
			
		||||
 
 | 
			
		||||
@@ -32,6 +32,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifdef GRID_CUDA
 | 
			
		||||
#include <cuda_runtime_api.h>
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef GRID_HIP
 | 
			
		||||
#include <hip/hip_runtime_api.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid); 
 | 
			
		||||
#define header "SharedMemoryMpi: "
 | 
			
		||||
@@ -47,7 +50,12 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Split into groups that can share memory
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
#ifndef GRID_MPI3_SHM_NONE
 | 
			
		||||
  MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&WorldShmComm);
 | 
			
		||||
#else
 | 
			
		||||
  MPI_Comm_split(comm, WorldRank, 0, &WorldShmComm);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  MPI_Comm_rank(WorldShmComm     ,&WorldShmRank);
 | 
			
		||||
  MPI_Comm_size(WorldShmComm     ,&WorldShmSize);
 | 
			
		||||
 | 
			
		||||
@@ -420,7 +428,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Hugetlbfs mapping intended
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#ifdef GRID_CUDA
 | 
			
		||||
#if defined(GRID_CUDA) ||defined(GRID_HIP)
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
{
 | 
			
		||||
  void * ShmCommBuf ; 
 | 
			
		||||
@@ -443,17 +451,15 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Each MPI rank should allocate our own buffer
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  auto err =  cudaMalloc(&ShmCommBuf, bytes);
 | 
			
		||||
  if ( err !=  cudaSuccess) {
 | 
			
		||||
    std::cerr << " SharedMemoryMPI.cc cudaMallocManaged failed for " << bytes<<" bytes " <<cudaGetErrorString(err)<< std::endl;
 | 
			
		||||
    exit(EXIT_FAILURE);  
 | 
			
		||||
  }
 | 
			
		||||
  ShmCommBuf = acceleratorAllocDevice(bytes);
 | 
			
		||||
 | 
			
		||||
  if (ShmCommBuf == (void *)NULL ) {
 | 
			
		||||
    std::cerr << " SharedMemoryMPI.cc cudaMallocManaged failed NULL pointer for " << bytes<<" bytes " << std::endl;
 | 
			
		||||
    std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
 | 
			
		||||
    exit(EXIT_FAILURE);  
 | 
			
		||||
  }
 | 
			
		||||
  if ( WorldRank == 0 ){
 | 
			
		||||
    std::cout << header " SharedMemoryMPI.cc cudaMalloc "<< bytes << "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
 | 
			
		||||
    std::cout << header " SharedMemoryMPI.cc cudaMalloc "<< bytes 
 | 
			
		||||
	      << "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  SharedMemoryZero(ShmCommBuf,bytes);
 | 
			
		||||
 | 
			
		||||
@@ -461,19 +467,31 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
  // Loop over ranks/gpu's on our node
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  for(int r=0;r<WorldShmSize;r++){
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
#ifndef GRID_MPI3_SHM_NONE
 | 
			
		||||
    //////////////////////////////////////////////////
 | 
			
		||||
    // If it is me, pass around the IPC access key
 | 
			
		||||
    //////////////////////////////////////////////////
 | 
			
		||||
#ifdef GRID_CUDA
 | 
			
		||||
    cudaIpcMemHandle_t handle;
 | 
			
		||||
    
 | 
			
		||||
    if ( r==WorldShmRank ) { 
 | 
			
		||||
      err = cudaIpcGetMemHandle(&handle,ShmCommBuf);
 | 
			
		||||
      auto err = cudaIpcGetMemHandle(&handle,ShmCommBuf);
 | 
			
		||||
      if ( err !=  cudaSuccess) {
 | 
			
		||||
	std::cerr << " SharedMemoryMPI.cc cudaIpcGetMemHandle failed for rank" << r <<" "<<cudaGetErrorString(err)<< std::endl;
 | 
			
		||||
	exit(EXIT_FAILURE);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef GRID_HIP
 | 
			
		||||
    hipIpcMemHandle_t handle;    
 | 
			
		||||
    if ( r==WorldShmRank ) { 
 | 
			
		||||
      auto err = hipIpcGetMemHandle(&handle,ShmCommBuf);
 | 
			
		||||
      if ( err !=  hipSuccess) {
 | 
			
		||||
	std::cerr << " SharedMemoryMPI.cc hipIpcGetMemHandle failed for rank" << r <<" "<<hipGetErrorString(err)<< std::endl;
 | 
			
		||||
	exit(EXIT_FAILURE);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
    //////////////////////////////////////////////////
 | 
			
		||||
    // Share this IPC handle across the Shm Comm
 | 
			
		||||
    //////////////////////////////////////////////////
 | 
			
		||||
@@ -490,17 +508,31 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
    // If I am not the source, overwrite thisBuf with remote buffer
 | 
			
		||||
    ///////////////////////////////////////////////////////////////
 | 
			
		||||
    void * thisBuf = ShmCommBuf;
 | 
			
		||||
#ifdef GRID_CUDA
 | 
			
		||||
    if ( r!=WorldShmRank ) { 
 | 
			
		||||
      err = cudaIpcOpenMemHandle(&thisBuf,handle,cudaIpcMemLazyEnablePeerAccess);
 | 
			
		||||
      auto err = cudaIpcOpenMemHandle(&thisBuf,handle,cudaIpcMemLazyEnablePeerAccess);
 | 
			
		||||
      if ( err !=  cudaSuccess) {
 | 
			
		||||
	std::cerr << " SharedMemoryMPI.cc cudaIpcOpenMemHandle failed for rank" << r <<" "<<cudaGetErrorString(err)<< std::endl;
 | 
			
		||||
	exit(EXIT_FAILURE);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef GRID_HIP
 | 
			
		||||
    if ( r!=WorldShmRank ) { 
 | 
			
		||||
      auto err = hipIpcOpenMemHandle(&thisBuf,handle,hipIpcMemLazyEnablePeerAccess);
 | 
			
		||||
      if ( err !=  hipSuccess) {
 | 
			
		||||
	std::cerr << " SharedMemoryMPI.cc hipIpcOpenMemHandle failed for rank" << r <<" "<<hipGetErrorString(err)<< std::endl;
 | 
			
		||||
	exit(EXIT_FAILURE);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
    ///////////////////////////////////////////////////////////////
 | 
			
		||||
    // Save a copy of the device buffers
 | 
			
		||||
    ///////////////////////////////////////////////////////////////
 | 
			
		||||
    WorldShmCommBufs[r] = thisBuf;
 | 
			
		||||
#else
 | 
			
		||||
    WorldShmCommBufs[r] = ShmCommBuf;
 | 
			
		||||
#endif
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  _ShmAllocBytes=bytes;
 | 
			
		||||
@@ -705,7 +737,11 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Split into groups that can share memory
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
#ifndef GRID_MPI3_SHM_NONE
 | 
			
		||||
  MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&ShmComm);
 | 
			
		||||
#else
 | 
			
		||||
  MPI_Comm_split(comm, rank, 0, &ShmComm);
 | 
			
		||||
#endif
 | 
			
		||||
  MPI_Comm_rank(ShmComm     ,&ShmRank);
 | 
			
		||||
  MPI_Comm_size(ShmComm     ,&ShmSize);
 | 
			
		||||
  ShmCommBufs.resize(ShmSize);
 | 
			
		||||
 
 | 
			
		||||
@@ -52,23 +52,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
template<typename Op, typename T1> 
 | 
			
		||||
auto Cshift(const LatticeUnaryExpression<Op,T1> &expr,int dim,int shift)
 | 
			
		||||
    -> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> 
 | 
			
		||||
{
 | 
			
		||||
  return Cshift(closure(expr),dim,shift);
 | 
			
		||||
}
 | 
			
		||||
template <class Op, class T1, class T2>
 | 
			
		||||
auto Cshift(const LatticeBinaryExpression<Op,T1,T2> &expr,int dim,int shift)
 | 
			
		||||
  -> Lattice<decltype(expr.op.func(eval(0, expr.arg1),eval(0, expr.arg2)))> 
 | 
			
		||||
{
 | 
			
		||||
  return Cshift(closure(expr),dim,shift);
 | 
			
		||||
}
 | 
			
		||||
template <class Op, class T1, class T2, class T3>
 | 
			
		||||
auto Cshift(const LatticeTrinaryExpression<Op,T1,T2,T3> &expr,int dim,int shift)
 | 
			
		||||
  -> Lattice<decltype(expr.op.func(eval(0, expr.arg1),
 | 
			
		||||
				   eval(0, expr.arg2),
 | 
			
		||||
				   eval(0, expr.arg3)))> 
 | 
			
		||||
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr> 
 | 
			
		||||
auto Cshift(const Expression &expr,int dim,int shift)  -> decltype(closure(expr)) 
 | 
			
		||||
{
 | 
			
		||||
  return Cshift(closure(expr),dim,shift);
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -76,8 +76,8 @@ Gather_plane_simple (const Lattice<vobj> &rhs,commVector<vobj> &buffer,int dimen
 | 
			
		||||
    autoView(rhs_v , rhs, AcceleratorRead);
 | 
			
		||||
    auto buffer_p = & buffer[0];
 | 
			
		||||
    auto table = &Cshift_table[0];
 | 
			
		||||
    accelerator_for(i,ent,1,{
 | 
			
		||||
      buffer_p[table[i].first]=rhs_v[table[i].second];
 | 
			
		||||
    accelerator_for(i,ent,vobj::Nsimd(),{
 | 
			
		||||
	coalescedWrite(buffer_p[table[i].first],coalescedRead(rhs_v[table[i].second]));
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -185,8 +185,8 @@ template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,commVector<vo
 | 
			
		||||
    autoView( rhs_v, rhs, AcceleratorWrite);
 | 
			
		||||
    auto buffer_p = & buffer[0];
 | 
			
		||||
    auto table = &Cshift_table[0];
 | 
			
		||||
    accelerator_for(i,ent,1,{
 | 
			
		||||
	rhs_v[table[i].first]=buffer_p[table[i].second];
 | 
			
		||||
    accelerator_for(i,ent,vobj::Nsimd(),{
 | 
			
		||||
	coalescedWrite(rhs_v[table[i].first],coalescedRead(buffer_p[table[i].second]));
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -209,9 +209,11 @@ template<class vobj> void Scatter_plane_merge(Lattice<vobj> &rhs,ExtractPointerA
 | 
			
		||||
 | 
			
		||||
  if(cbmask ==0x3 ) {
 | 
			
		||||
    autoView( rhs_v , rhs, AcceleratorWrite);
 | 
			
		||||
    int _slice_stride = rhs.Grid()->_slice_stride[dimension];
 | 
			
		||||
    int _slice_block = rhs.Grid()->_slice_block[dimension];
 | 
			
		||||
    accelerator_for2d(n,e1,b,e2,1,{
 | 
			
		||||
	int o      = n*rhs.Grid()->_slice_stride[dimension];
 | 
			
		||||
	int offset = b+n*rhs.Grid()->_slice_block[dimension];
 | 
			
		||||
	int o      = n*_slice_stride;
 | 
			
		||||
	int offset = b+n*_slice_block;
 | 
			
		||||
	merge(rhs_v[so+o+b],pointers,offset);
 | 
			
		||||
      });
 | 
			
		||||
  } else { 
 | 
			
		||||
@@ -220,6 +222,7 @@ template<class vobj> void Scatter_plane_merge(Lattice<vobj> &rhs,ExtractPointerA
 | 
			
		||||
    // Test_cshift_red_black code.
 | 
			
		||||
    //    std::cout << "Scatter_plane merge assert(0); think this is buggy FIXME "<< std::endl;// think this is buggy FIXME
 | 
			
		||||
    std::cout<<" Unthreaded warning -- buffer is not densely packed ??"<<std::endl;
 | 
			
		||||
    assert(0); // This will fail if hit on GPU
 | 
			
		||||
    autoView( rhs_v, rhs, CpuWrite);
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
@@ -280,8 +283,8 @@ template<class vobj> void Copy_plane(Lattice<vobj>& lhs,const Lattice<vobj> &rhs
 | 
			
		||||
    autoView(rhs_v , rhs, AcceleratorRead);
 | 
			
		||||
    autoView(lhs_v , lhs, AcceleratorWrite);
 | 
			
		||||
    auto table = &Cshift_table[0];
 | 
			
		||||
    accelerator_for(i,ent,1,{
 | 
			
		||||
      lhs_v[table[i].first]=rhs_v[table[i].second];
 | 
			
		||||
    accelerator_for(i,ent,vobj::Nsimd(),{
 | 
			
		||||
      coalescedWrite(lhs_v[table[i].first],coalescedRead(rhs_v[table[i].second]));
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -37,6 +37,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#include <Grid/lattice/Lattice_reduction.h>
 | 
			
		||||
#include <Grid/lattice/Lattice_peekpoke.h>
 | 
			
		||||
//#include <Grid/lattice/Lattice_reality.h>
 | 
			
		||||
#include <Grid/lattice/Lattice_real_imag.h>
 | 
			
		||||
#include <Grid/lattice/Lattice_comparison_utils.h>
 | 
			
		||||
#include <Grid/lattice/Lattice_comparison.h>
 | 
			
		||||
#include <Grid/lattice/Lattice_coordinate.h>
 | 
			
		||||
 
 | 
			
		||||
@@ -42,9 +42,24 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
////////////////////////////////////////////////////
 | 
			
		||||
// Predicated where support
 | 
			
		||||
////////////////////////////////////////////////////
 | 
			
		||||
#ifdef GRID_SIMT
 | 
			
		||||
// drop to scalar in SIMT; cleaner in fact
 | 
			
		||||
template <class iobj, class vobj, class robj>
 | 
			
		||||
accelerator_inline vobj predicatedWhere(const iobj &predicate, const vobj &iftrue,
 | 
			
		||||
                            const robj &iffalse) {
 | 
			
		||||
accelerator_inline vobj predicatedWhere(const iobj &predicate, 
 | 
			
		||||
					const vobj &iftrue, 
 | 
			
		||||
					const robj &iffalse) 
 | 
			
		||||
{
 | 
			
		||||
  Integer mask = TensorRemove(predicate);
 | 
			
		||||
  typename std::remove_const<vobj>::type ret= iffalse;
 | 
			
		||||
  if (mask) ret=iftrue;
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
template <class iobj, class vobj, class robj>
 | 
			
		||||
accelerator_inline vobj predicatedWhere(const iobj &predicate, 
 | 
			
		||||
					const vobj &iftrue, 
 | 
			
		||||
					const robj &iffalse) 
 | 
			
		||||
{
 | 
			
		||||
  typename std::remove_const<vobj>::type ret;
 | 
			
		||||
 | 
			
		||||
  typedef typename vobj::scalar_object scalar_object;
 | 
			
		||||
@@ -68,6 +83,7 @@ accelerator_inline vobj predicatedWhere(const iobj &predicate, const vobj &iftru
 | 
			
		||||
  merge(ret, falsevals);
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////
 | 
			
		||||
//Specialization of getVectorType for lattices
 | 
			
		||||
@@ -81,32 +97,62 @@ struct getVectorType<Lattice<T> >{
 | 
			
		||||
//--  recursive evaluation of expressions; --
 | 
			
		||||
// handle leaves of syntax tree
 | 
			
		||||
///////////////////////////////////////////////////
 | 
			
		||||
template<class sobj> accelerator_inline 
 | 
			
		||||
template<class sobj,
 | 
			
		||||
  typename std::enable_if<!is_lattice<sobj>::value&&!is_lattice_expr<sobj>::value,sobj>::type * = nullptr> 
 | 
			
		||||
accelerator_inline 
 | 
			
		||||
sobj eval(const uint64_t ss, const sobj &arg)
 | 
			
		||||
{
 | 
			
		||||
  return arg;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class lobj> accelerator_inline 
 | 
			
		||||
const lobj & eval(const uint64_t ss, const LatticeView<lobj> &arg) 
 | 
			
		||||
auto eval(const uint64_t ss, const LatticeView<lobj> &arg) -> decltype(arg(ss))
 | 
			
		||||
{
 | 
			
		||||
  return arg(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
//--  recursive evaluation of expressions; --
 | 
			
		||||
// whole vector return, used only for expression return type inference
 | 
			
		||||
///////////////////////////////////////////////////
 | 
			
		||||
template<class sobj> accelerator_inline 
 | 
			
		||||
sobj vecEval(const uint64_t ss, const sobj &arg)
 | 
			
		||||
{
 | 
			
		||||
  return arg;
 | 
			
		||||
}
 | 
			
		||||
template <class lobj> accelerator_inline 
 | 
			
		||||
const lobj & vecEval(const uint64_t ss, const LatticeView<lobj> &arg) 
 | 
			
		||||
{
 | 
			
		||||
  return arg[ss];
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// What needs this?
 | 
			
		||||
// Cannot be legal on accelerator
 | 
			
		||||
// Comparison must convert
 | 
			
		||||
#if 1
 | 
			
		||||
template <class lobj> accelerator_inline 
 | 
			
		||||
const lobj & eval(const uint64_t ss, const Lattice<lobj> &arg) 
 | 
			
		||||
{
 | 
			
		||||
  auto view = arg.View(AcceleratorRead);
 | 
			
		||||
  return view[ss];
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////////////////////
 | 
			
		||||
// handle nodes in syntax tree- eval one operand
 | 
			
		||||
// vecEval needed (but never called as all expressions offloaded) to infer the return type
 | 
			
		||||
// in SIMT contexts of closure.
 | 
			
		||||
///////////////////////////////////////////////////
 | 
			
		||||
template <typename Op, typename T1> accelerator_inline 
 | 
			
		||||
auto vecEval(const uint64_t ss, const LatticeUnaryExpression<Op, T1> &expr)  
 | 
			
		||||
  -> decltype(expr.op.func( vecEval(ss, expr.arg1)))
 | 
			
		||||
{
 | 
			
		||||
  return expr.op.func( vecEval(ss, expr.arg1) );
 | 
			
		||||
}
 | 
			
		||||
// vecEval two operands
 | 
			
		||||
template <typename Op, typename T1, typename T2> accelerator_inline
 | 
			
		||||
auto vecEval(const uint64_t ss, const LatticeBinaryExpression<Op, T1, T2> &expr)  
 | 
			
		||||
  -> decltype(expr.op.func( vecEval(ss,expr.arg1),vecEval(ss,expr.arg2)))
 | 
			
		||||
{
 | 
			
		||||
  return expr.op.func( vecEval(ss,expr.arg1), vecEval(ss,expr.arg2) );
 | 
			
		||||
}
 | 
			
		||||
// vecEval three operands
 | 
			
		||||
template <typename Op, typename T1, typename T2, typename T3> accelerator_inline
 | 
			
		||||
auto vecEval(const uint64_t ss, const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)  
 | 
			
		||||
  -> decltype(expr.op.func(vecEval(ss, expr.arg1), vecEval(ss, expr.arg2), vecEval(ss, expr.arg3)))
 | 
			
		||||
{
 | 
			
		||||
  return expr.op.func(vecEval(ss, expr.arg1), vecEval(ss, expr.arg2), vecEval(ss, expr.arg3));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////////////////////
 | 
			
		||||
// handle nodes in syntax tree- eval one operand coalesced
 | 
			
		||||
///////////////////////////////////////////////////
 | 
			
		||||
template <typename Op, typename T1> accelerator_inline 
 | 
			
		||||
auto eval(const uint64_t ss, const LatticeUnaryExpression<Op, T1> &expr)  
 | 
			
		||||
@@ -114,23 +160,41 @@ auto eval(const uint64_t ss, const LatticeUnaryExpression<Op, T1> &expr)
 | 
			
		||||
{
 | 
			
		||||
  return expr.op.func( eval(ss, expr.arg1) );
 | 
			
		||||
}
 | 
			
		||||
///////////////////////
 | 
			
		||||
// eval two operands
 | 
			
		||||
///////////////////////
 | 
			
		||||
template <typename Op, typename T1, typename T2> accelerator_inline
 | 
			
		||||
auto eval(const uint64_t ss, const LatticeBinaryExpression<Op, T1, T2> &expr)  
 | 
			
		||||
  -> decltype(expr.op.func( eval(ss,expr.arg1),eval(ss,expr.arg2)))
 | 
			
		||||
{
 | 
			
		||||
  return expr.op.func( eval(ss,expr.arg1), eval(ss,expr.arg2) );
 | 
			
		||||
}
 | 
			
		||||
///////////////////////
 | 
			
		||||
// eval three operands
 | 
			
		||||
///////////////////////
 | 
			
		||||
template <typename Op, typename T1, typename T2, typename T3> accelerator_inline
 | 
			
		||||
auto eval(const uint64_t ss, const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)  
 | 
			
		||||
  -> decltype(expr.op.func(eval(ss, expr.arg1), eval(ss, expr.arg2), eval(ss, expr.arg3)))
 | 
			
		||||
  -> decltype(expr.op.func(eval(ss, expr.arg1), 
 | 
			
		||||
			   eval(ss, expr.arg2), 
 | 
			
		||||
			   eval(ss, expr.arg3)))
 | 
			
		||||
{
 | 
			
		||||
  return expr.op.func(eval(ss, expr.arg1), eval(ss, expr.arg2), eval(ss, expr.arg3));
 | 
			
		||||
#ifdef GRID_SIMT
 | 
			
		||||
  // Handles Nsimd (vInteger) != Nsimd(ComplexD)
 | 
			
		||||
  typedef decltype(vecEval(ss, expr.arg2)) rvobj;
 | 
			
		||||
  typedef typename std::remove_reference<rvobj>::type vobj;
 | 
			
		||||
 | 
			
		||||
  const int Nsimd = vobj::vector_type::Nsimd();
 | 
			
		||||
 | 
			
		||||
  auto vpred = vecEval(ss,expr.arg1);
 | 
			
		||||
 | 
			
		||||
  ExtractBuffer<Integer> mask(Nsimd);
 | 
			
		||||
  extract<vInteger, Integer>(TensorRemove(vpred), mask);
 | 
			
		||||
 | 
			
		||||
  int s = acceleratorSIMTlane(Nsimd);
 | 
			
		||||
  return expr.op.func(mask[s],
 | 
			
		||||
		      eval(ss, expr.arg2), 
 | 
			
		||||
		      eval(ss, expr.arg3));
 | 
			
		||||
#else
 | 
			
		||||
  return expr.op.func(eval(ss, expr.arg1),
 | 
			
		||||
		      eval(ss, expr.arg2), 
 | 
			
		||||
		      eval(ss, expr.arg3));
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -228,7 +292,7 @@ template <typename Op, typename T1, typename T2> inline
 | 
			
		||||
void ExpressionViewOpen(LatticeBinaryExpression<Op, T1, T2> &expr) 
 | 
			
		||||
{
 | 
			
		||||
  ExpressionViewOpen(expr.arg1);  // recurse AST
 | 
			
		||||
  ExpressionViewOpen(expr.arg2);  // recurse AST
 | 
			
		||||
  ExpressionViewOpen(expr.arg2);  // rrecurse AST
 | 
			
		||||
}
 | 
			
		||||
template <typename Op, typename T1, typename T2, typename T3>
 | 
			
		||||
inline void ExpressionViewOpen(LatticeTrinaryExpression<Op, T1, T2, T3> &expr) 
 | 
			
		||||
@@ -272,9 +336,8 @@ inline void ExpressionViewClose(LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
 | 
			
		||||
// Unary operators and funcs
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
#define GridUnopClass(name, ret)					\
 | 
			
		||||
  template <class arg>							\
 | 
			
		||||
  struct name {								\
 | 
			
		||||
    static auto accelerator_inline func(const arg a) -> decltype(ret) { return ret; } \
 | 
			
		||||
    template<class _arg> static auto accelerator_inline func(const _arg a) -> decltype(ret) { return ret; } \
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
GridUnopClass(UnarySub, -a);
 | 
			
		||||
@@ -285,8 +348,6 @@ GridUnopClass(UnaryTrace, trace(a));
 | 
			
		||||
GridUnopClass(UnaryTranspose, transpose(a));
 | 
			
		||||
GridUnopClass(UnaryTa, Ta(a));
 | 
			
		||||
GridUnopClass(UnaryProjectOnGroup, ProjectOnGroup(a));
 | 
			
		||||
GridUnopClass(UnaryReal, real(a));
 | 
			
		||||
GridUnopClass(UnaryImag, imag(a));
 | 
			
		||||
GridUnopClass(UnaryToReal, toReal(a));
 | 
			
		||||
GridUnopClass(UnaryToComplex, toComplex(a));
 | 
			
		||||
GridUnopClass(UnaryTimesI, timesI(a));
 | 
			
		||||
@@ -305,10 +366,10 @@ GridUnopClass(UnaryExp, exp(a));
 | 
			
		||||
// Binary operators
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
#define GridBinOpClass(name, combination)			\
 | 
			
		||||
  template <class left, class right>				\
 | 
			
		||||
  struct name {							\
 | 
			
		||||
    template <class _left, class _right>			\
 | 
			
		||||
    static auto accelerator_inline				\
 | 
			
		||||
    func(const left &lhs, const right &rhs)			\
 | 
			
		||||
    func(const _left &lhs, const _right &rhs)			\
 | 
			
		||||
      -> decltype(combination) const				\
 | 
			
		||||
    {								\
 | 
			
		||||
      return combination;					\
 | 
			
		||||
@@ -328,10 +389,10 @@ GridBinOpClass(BinaryOrOr, lhs || rhs);
 | 
			
		||||
// Trinary conditional op
 | 
			
		||||
////////////////////////////////////////////////////
 | 
			
		||||
#define GridTrinOpClass(name, combination)				\
 | 
			
		||||
  template <class predicate, class left, class right>			\
 | 
			
		||||
  struct name {								\
 | 
			
		||||
    template <class _predicate,class _left, class _right>		\
 | 
			
		||||
    static auto accelerator_inline					\
 | 
			
		||||
    func(const predicate &pred, const left &lhs, const right &rhs)	\
 | 
			
		||||
    func(const _predicate &pred, const _left &lhs, const _right &rhs)	\
 | 
			
		||||
      -> decltype(combination) const					\
 | 
			
		||||
    {									\
 | 
			
		||||
      return combination;						\
 | 
			
		||||
@@ -339,17 +400,17 @@ GridBinOpClass(BinaryOrOr, lhs || rhs);
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
GridTrinOpClass(TrinaryWhere,
 | 
			
		||||
		(predicatedWhere<predicate, 
 | 
			
		||||
		 typename std::remove_reference<left>::type,
 | 
			
		||||
		 typename std::remove_reference<right>::type>(pred, lhs,rhs)));
 | 
			
		||||
		(predicatedWhere<
 | 
			
		||||
		 typename std::remove_reference<_predicate>::type, 
 | 
			
		||||
		 typename std::remove_reference<_left>::type,
 | 
			
		||||
		 typename std::remove_reference<_right>::type>(pred, lhs,rhs)));
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Operator syntactical glue
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
#define GRID_UNOP(name)   name<decltype(eval(0, arg))>
 | 
			
		||||
#define GRID_BINOP(name)  name<decltype(eval(0, lhs)), decltype(eval(0, rhs))>
 | 
			
		||||
#define GRID_TRINOP(name) name<decltype(eval(0, pred)), decltype(eval(0, lhs)), decltype(eval(0, rhs))>
 | 
			
		||||
#define GRID_UNOP(name)   name
 | 
			
		||||
#define GRID_BINOP(name)  name
 | 
			
		||||
#define GRID_TRINOP(name) name
 | 
			
		||||
 | 
			
		||||
#define GRID_DEF_UNOP(op, name)						\
 | 
			
		||||
  template <typename T1, typename std::enable_if<is_lattice<T1>::value||is_lattice_expr<T1>::value,T1>::type * = nullptr> \
 | 
			
		||||
@@ -401,8 +462,6 @@ GRID_DEF_UNOP(trace, UnaryTrace);
 | 
			
		||||
GRID_DEF_UNOP(transpose, UnaryTranspose);
 | 
			
		||||
GRID_DEF_UNOP(Ta, UnaryTa);
 | 
			
		||||
GRID_DEF_UNOP(ProjectOnGroup, UnaryProjectOnGroup);
 | 
			
		||||
GRID_DEF_UNOP(real, UnaryReal);
 | 
			
		||||
GRID_DEF_UNOP(imag, UnaryImag);
 | 
			
		||||
GRID_DEF_UNOP(toReal, UnaryToReal);
 | 
			
		||||
GRID_DEF_UNOP(toComplex, UnaryToComplex);
 | 
			
		||||
GRID_DEF_UNOP(timesI, UnaryTimesI);
 | 
			
		||||
@@ -435,29 +494,36 @@ GRID_DEF_TRINOP(where, TrinaryWhere);
 | 
			
		||||
/////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Op, class T1>
 | 
			
		||||
auto closure(const LatticeUnaryExpression<Op, T1> &expr)
 | 
			
		||||
  -> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> 
 | 
			
		||||
  -> Lattice<decltype(expr.op.func(vecEval(0, expr.arg1)))> 
 | 
			
		||||
{
 | 
			
		||||
  Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> ret(expr);
 | 
			
		||||
  Lattice<decltype(expr.op.func(vecEval(0, expr.arg1)))> ret(expr);
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
template <class Op, class T1, class T2>
 | 
			
		||||
auto closure(const LatticeBinaryExpression<Op, T1, T2> &expr)
 | 
			
		||||
  -> Lattice<decltype(expr.op.func(eval(0, expr.arg1),eval(0, expr.arg2)))> 
 | 
			
		||||
  -> Lattice<decltype(expr.op.func(vecEval(0, expr.arg1),vecEval(0, expr.arg2)))> 
 | 
			
		||||
{
 | 
			
		||||
  Lattice<decltype(expr.op.func(eval(0, expr.arg1),eval(0, expr.arg2)))> ret(expr);
 | 
			
		||||
  Lattice<decltype(expr.op.func(vecEval(0, expr.arg1),vecEval(0, expr.arg2)))> ret(expr);
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
template <class Op, class T1, class T2, class T3>
 | 
			
		||||
auto closure(const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
 | 
			
		||||
  -> Lattice<decltype(expr.op.func(eval(0, expr.arg1),
 | 
			
		||||
				   eval(0, expr.arg2),
 | 
			
		||||
				   eval(0, expr.arg3)))> 
 | 
			
		||||
  -> Lattice<decltype(expr.op.func(vecEval(0, expr.arg1),
 | 
			
		||||
				   vecEval(0, expr.arg2),
 | 
			
		||||
				   vecEval(0, expr.arg3)))> 
 | 
			
		||||
{
 | 
			
		||||
  Lattice<decltype(expr.op.func(eval(0, expr.arg1),
 | 
			
		||||
				eval(0, expr.arg2),
 | 
			
		||||
				eval(0, expr.arg3)))>  ret(expr);
 | 
			
		||||
  Lattice<decltype(expr.op.func(vecEval(0, expr.arg1),
 | 
			
		||||
				vecEval(0, expr.arg2),
 | 
			
		||||
			        vecEval(0, expr.arg3)))>  ret(expr);
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
#define EXPRESSION_CLOSURE(function)					\
 | 
			
		||||
  template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr> \
 | 
			
		||||
    auto function(Expression &expr) -> decltype(function(closure(expr))) \
 | 
			
		||||
  {									\
 | 
			
		||||
    return function(closure(expr));					\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef GRID_UNOP
 | 
			
		||||
#undef GRID_BINOP
 | 
			
		||||
 
 | 
			
		||||
@@ -60,9 +60,9 @@ void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
 | 
			
		||||
  autoView( lhs_v , lhs, AcceleratorRead);
 | 
			
		||||
  autoView( rhs_v , rhs, AcceleratorRead);
 | 
			
		||||
  accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
 | 
			
		||||
    decltype(coalescedRead(obj1())) tmp;
 | 
			
		||||
    auto lhs_t=lhs_v(ss);
 | 
			
		||||
    auto rhs_t=rhs_v(ss);
 | 
			
		||||
    auto tmp  =ret_v(ss);
 | 
			
		||||
    mac(&tmp,&lhs_t,&rhs_t);
 | 
			
		||||
    coalescedWrite(ret_v[ss],tmp);
 | 
			
		||||
  });
 | 
			
		||||
@@ -124,7 +124,7 @@ void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
 | 
			
		||||
  autoView( ret_v , ret, AcceleratorWrite);
 | 
			
		||||
  autoView( lhs_v , lhs, AcceleratorRead);
 | 
			
		||||
  accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
 | 
			
		||||
    decltype(coalescedRead(obj1())) tmp;
 | 
			
		||||
    auto tmp  =ret_v(ss);
 | 
			
		||||
    auto lhs_t=lhs_v(ss);
 | 
			
		||||
    mac(&tmp,&lhs_t,&rhs);
 | 
			
		||||
    coalescedWrite(ret_v[ss],tmp);
 | 
			
		||||
@@ -182,7 +182,7 @@ void mac(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
 | 
			
		||||
  autoView( ret_v , ret, AcceleratorWrite);
 | 
			
		||||
  autoView( rhs_v , lhs, AcceleratorRead);
 | 
			
		||||
  accelerator_for(ss,rhs_v.size(),obj1::Nsimd(),{
 | 
			
		||||
    decltype(coalescedRead(obj1())) tmp;
 | 
			
		||||
    auto tmp  =ret_v(ss);
 | 
			
		||||
    auto rhs_t=rhs_v(ss);
 | 
			
		||||
    mac(&tmp,&lhs,&rhs_t);
 | 
			
		||||
    coalescedWrite(ret_v[ss],tmp);
 | 
			
		||||
 
 | 
			
		||||
@@ -123,9 +123,9 @@ public:
 | 
			
		||||
    auto exprCopy = expr;
 | 
			
		||||
    ExpressionViewOpen(exprCopy);
 | 
			
		||||
    auto me  = View(AcceleratorWriteDiscard);
 | 
			
		||||
    accelerator_for(ss,me.size(),1,{
 | 
			
		||||
    accelerator_for(ss,me.size(),vobj::Nsimd(),{
 | 
			
		||||
      auto tmp = eval(ss,exprCopy);
 | 
			
		||||
      vstream(me[ss],tmp);
 | 
			
		||||
      coalescedWrite(me[ss],tmp);
 | 
			
		||||
    });
 | 
			
		||||
    me.ViewClose();
 | 
			
		||||
    ExpressionViewClose(exprCopy);
 | 
			
		||||
@@ -146,9 +146,9 @@ public:
 | 
			
		||||
    auto exprCopy = expr;
 | 
			
		||||
    ExpressionViewOpen(exprCopy);
 | 
			
		||||
    auto me  = View(AcceleratorWriteDiscard);
 | 
			
		||||
    accelerator_for(ss,me.size(),1,{
 | 
			
		||||
    accelerator_for(ss,me.size(),vobj::Nsimd(),{
 | 
			
		||||
      auto tmp = eval(ss,exprCopy);
 | 
			
		||||
      vstream(me[ss],tmp);
 | 
			
		||||
      coalescedWrite(me[ss],tmp);
 | 
			
		||||
    });
 | 
			
		||||
    me.ViewClose();
 | 
			
		||||
    ExpressionViewClose(exprCopy);
 | 
			
		||||
@@ -168,9 +168,9 @@ public:
 | 
			
		||||
    auto exprCopy = expr;
 | 
			
		||||
    ExpressionViewOpen(exprCopy);
 | 
			
		||||
    auto me  = View(AcceleratorWriteDiscard);
 | 
			
		||||
    accelerator_for(ss,me.size(),1,{
 | 
			
		||||
    accelerator_for(ss,me.size(),vobj::Nsimd(),{
 | 
			
		||||
      auto tmp = eval(ss,exprCopy);
 | 
			
		||||
      vstream(me[ss],tmp);
 | 
			
		||||
      coalescedWrite(me[ss],tmp);
 | 
			
		||||
    });
 | 
			
		||||
    me.ViewClose();
 | 
			
		||||
    ExpressionViewClose(exprCopy);
 | 
			
		||||
 
 | 
			
		||||
@@ -54,13 +54,34 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
 | 
			
		||||
  typedef decltype(basis[0].View(AcceleratorRead)) View;
 | 
			
		||||
 | 
			
		||||
  Vector<View> basis_v; basis_v.reserve(basis.size());
 | 
			
		||||
  typedef typename std::remove_reference<decltype(basis_v[0][0])>::type vobj;
 | 
			
		||||
  typedef typename std::remove_reference<decltype(Qt(0,0))>::type Coeff_t;
 | 
			
		||||
  GridBase* grid = basis[0].Grid();
 | 
			
		||||
      
 | 
			
		||||
  for(int k=0;k<basis.size();k++){
 | 
			
		||||
    basis_v.push_back(basis[k].View(AcceleratorWrite));
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#if ( (!defined(GRID_SYCL)) && (!defined(GRID_CUDA)) && (!defined(GRID_HIP)) )
 | 
			
		||||
  int max_threads = thread_max();
 | 
			
		||||
  Vector < vobj > Bt(Nm * max_threads);
 | 
			
		||||
  thread_region
 | 
			
		||||
    {
 | 
			
		||||
      vobj* B = &Bt[Nm * thread_num()];
 | 
			
		||||
      thread_for_in_region(ss, grid->oSites(),{
 | 
			
		||||
	  for(int j=j0; j<j1; ++j) B[j]=0.;
 | 
			
		||||
      
 | 
			
		||||
	  for(int j=j0; j<j1; ++j){
 | 
			
		||||
	    for(int k=k0; k<k1; ++k){
 | 
			
		||||
	      B[j] +=Qt(j,k) * basis_v[k][ss];
 | 
			
		||||
	    }
 | 
			
		||||
	  }
 | 
			
		||||
	  for(int j=j0; j<j1; ++j){
 | 
			
		||||
	    basis_v[j][ss] = B[j];
 | 
			
		||||
	  }
 | 
			
		||||
	});
 | 
			
		||||
    }
 | 
			
		||||
#else
 | 
			
		||||
  View *basis_vp = &basis_v[0];
 | 
			
		||||
 | 
			
		||||
  int nrot = j1-j0;
 | 
			
		||||
@@ -70,14 +91,12 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
 | 
			
		||||
  uint64_t oSites   =grid->oSites();
 | 
			
		||||
  uint64_t siteBlock=(grid->oSites()+nrot-1)/nrot; // Maximum 1 additional vector overhead
 | 
			
		||||
 | 
			
		||||
  typedef typename std::remove_reference<decltype(basis_v[0][0])>::type vobj;
 | 
			
		||||
 | 
			
		||||
  Vector <vobj> Bt(siteBlock * nrot); 
 | 
			
		||||
  auto Bp=&Bt[0];
 | 
			
		||||
 | 
			
		||||
  // GPU readable copy of matrix
 | 
			
		||||
  Vector<double> Qt_jv(Nm*Nm);
 | 
			
		||||
  double *Qt_p = & Qt_jv[0];
 | 
			
		||||
  Vector<Coeff_t> Qt_jv(Nm*Nm);
 | 
			
		||||
  Coeff_t *Qt_p = & Qt_jv[0];
 | 
			
		||||
  thread_for(i,Nm*Nm,{
 | 
			
		||||
      int j = i/Nm;
 | 
			
		||||
      int k = i%Nm;
 | 
			
		||||
@@ -118,6 +137,7 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
 | 
			
		||||
	coalescedWrite(basis_v[jj][sss],coalescedRead(Bp[ss*nrot+j]));
 | 
			
		||||
      });
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  for(int k=0;k<basis.size();k++) basis_v[k].ViewClose();
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -42,34 +42,6 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
typedef iScalar<vInteger> vPredicate ;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
template <class iobj, class vobj, class robj> accelerator_inline 
 | 
			
		||||
vobj predicatedWhere(const iobj &predicate, const vobj &iftrue, const robj &iffalse) 
 | 
			
		||||
{
 | 
			
		||||
  typename std::remove_const<vobj>::type ret;
 | 
			
		||||
 | 
			
		||||
  typedef typename vobj::scalar_object scalar_object;
 | 
			
		||||
  typedef typename vobj::scalar_type scalar_type;
 | 
			
		||||
  typedef typename vobj::vector_type vector_type;
 | 
			
		||||
 | 
			
		||||
  const int Nsimd = vobj::vector_type::Nsimd();
 | 
			
		||||
 | 
			
		||||
  ExtractBuffer<Integer> mask(Nsimd);
 | 
			
		||||
  ExtractBuffer<scalar_object> truevals(Nsimd);
 | 
			
		||||
  ExtractBuffer<scalar_object> falsevals(Nsimd);
 | 
			
		||||
 | 
			
		||||
  extract(iftrue, truevals);
 | 
			
		||||
  extract(iffalse, falsevals);
 | 
			
		||||
  extract<vInteger, Integer>(TensorRemove(predicate), mask);
 | 
			
		||||
 | 
			
		||||
  for (int s = 0; s < Nsimd; s++) {
 | 
			
		||||
    if (mask[s]) falsevals[s] = truevals[s];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  merge(ret, falsevals);
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
*/
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// compare lattice to lattice
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 
 | 
			
		||||
@@ -182,6 +182,14 @@ inline void peekLocalSite(sobj &s,const LatticeView<vobj> &l,Coordinate &site)
 | 
			
		||||
      
 | 
			
		||||
  return;
 | 
			
		||||
};
 | 
			
		||||
template<class vobj,class sobj>
 | 
			
		||||
inline void peekLocalSite(sobj &s,const Lattice<vobj> &l,Coordinate &site)
 | 
			
		||||
{
 | 
			
		||||
  autoView(lv,l,CpuRead);
 | 
			
		||||
  peekLocalSite(s,lv,site);
 | 
			
		||||
  return;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Must be CPU write view
 | 
			
		||||
template<class vobj,class sobj>
 | 
			
		||||
inline void pokeLocalSite(const sobj &s,LatticeView<vobj> &l,Coordinate &site)
 | 
			
		||||
@@ -210,6 +218,14 @@ inline void pokeLocalSite(const sobj &s,LatticeView<vobj> &l,Coordinate &site)
 | 
			
		||||
  return;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<class vobj,class sobj>
 | 
			
		||||
inline void pokeLocalSite(const sobj &s, Lattice<vobj> &l,Coordinate &site)
 | 
			
		||||
{
 | 
			
		||||
  autoView(lv,l,CpuWrite);
 | 
			
		||||
  pokeLocalSite(s,lv,site);
 | 
			
		||||
  return;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										79
									
								
								Grid/lattice/Lattice_real_imag.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								Grid/lattice/Lattice_real_imag.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,79 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/lattice/Lattice_reality.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: neo <cossu@post.kek.jp>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifndef GRID_LATTICE_REAL_IMAG_H
 | 
			
		||||
#define GRID_LATTICE_REAL_IMAG_H
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// FIXME .. this is the sector of the code 
 | 
			
		||||
// I am most worried about the directions
 | 
			
		||||
// The choice of burying complex in the SIMD
 | 
			
		||||
// is making the use of "real" and "imag" very cumbersome
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
template<class vobj> inline Lattice<vobj> real(const Lattice<vobj> &lhs){
 | 
			
		||||
  Lattice<vobj> ret(lhs.Grid());
 | 
			
		||||
 | 
			
		||||
  autoView( lhs_v, lhs, AcceleratorRead);
 | 
			
		||||
  autoView( ret_v, ret, AcceleratorWrite);
 | 
			
		||||
 | 
			
		||||
  ret.Checkerboard()=lhs.Checkerboard();
 | 
			
		||||
  accelerator_for( ss, lhs_v.size(), 1, {
 | 
			
		||||
    ret_v[ss] =real(lhs_v[ss]);
 | 
			
		||||
  });
 | 
			
		||||
  return ret;
 | 
			
		||||
};
 | 
			
		||||
template<class vobj> inline Lattice<vobj> imag(const Lattice<vobj> &lhs){
 | 
			
		||||
  Lattice<vobj> ret(lhs.Grid());
 | 
			
		||||
 | 
			
		||||
  autoView( lhs_v, lhs, AcceleratorRead);
 | 
			
		||||
  autoView( ret_v, ret, AcceleratorWrite);
 | 
			
		||||
 | 
			
		||||
  ret.Checkerboard()=lhs.Checkerboard();
 | 
			
		||||
  accelerator_for( ss, lhs_v.size(), 1, {
 | 
			
		||||
    ret_v[ss] =imag(lhs_v[ss]);
 | 
			
		||||
  });
 | 
			
		||||
  return ret;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr> 
 | 
			
		||||
  auto real(const Expression &expr) -> decltype(real(closure(expr)))		
 | 
			
		||||
{									
 | 
			
		||||
  return real(closure(expr));					
 | 
			
		||||
}
 | 
			
		||||
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr> 
 | 
			
		||||
  auto imag(const Expression &expr) -> decltype(imag(closure(expr)))		
 | 
			
		||||
{									
 | 
			
		||||
  return imag(closure(expr));					
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
@@ -2,12 +2,13 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#ifdef GRID_HIP
 | 
			
		||||
extern hipDeviceProp_t *gpu_props;
 | 
			
		||||
#define WARP_SIZE 64
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef GRID_CUDA
 | 
			
		||||
extern cudaDeviceProp *gpu_props;
 | 
			
		||||
#define WARP_SIZE 32
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define WARP_SIZE 32
 | 
			
		||||
__device__ unsigned int retirementCount = 0;
 | 
			
		||||
 | 
			
		||||
template <class Iterator>
 | 
			
		||||
@@ -64,7 +65,7 @@ __device__ void reduceBlock(volatile sobj *sdata, sobj mySum, const Iterator tid
 | 
			
		||||
  
 | 
			
		||||
  // cannot use overloaded operators for sobj as they are not volatile-qualified
 | 
			
		||||
  memcpy((void *)&sdata[tid], (void *)&mySum, sizeof(sobj));
 | 
			
		||||
  __syncwarp();
 | 
			
		||||
  acceleratorSynchronise();
 | 
			
		||||
  
 | 
			
		||||
  const Iterator VEC = WARP_SIZE;
 | 
			
		||||
  const Iterator vid = tid & (VEC-1);
 | 
			
		||||
@@ -78,9 +79,9 @@ __device__ void reduceBlock(volatile sobj *sdata, sobj mySum, const Iterator tid
 | 
			
		||||
      beta += temp;
 | 
			
		||||
      memcpy((void *)&sdata[tid], (void *)&beta, sizeof(sobj));
 | 
			
		||||
    }
 | 
			
		||||
    __syncwarp();
 | 
			
		||||
    acceleratorSynchronise();
 | 
			
		||||
  }
 | 
			
		||||
  __syncthreads();
 | 
			
		||||
  acceleratorSynchroniseAll();
 | 
			
		||||
  
 | 
			
		||||
  if (threadIdx.x == 0) {
 | 
			
		||||
    beta  = Zero();
 | 
			
		||||
@@ -90,7 +91,7 @@ __device__ void reduceBlock(volatile sobj *sdata, sobj mySum, const Iterator tid
 | 
			
		||||
    }
 | 
			
		||||
    memcpy((void *)&sdata[0], (void *)&beta, sizeof(sobj));
 | 
			
		||||
  }
 | 
			
		||||
  __syncthreads();
 | 
			
		||||
  acceleratorSynchroniseAll();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -240,6 +240,8 @@ template<class vobj,class vobj2,class CComplex>
 | 
			
		||||
  autoView( fineX_  , fineX, AcceleratorRead);
 | 
			
		||||
  autoView( fineY_  , fineY, AcceleratorRead);
 | 
			
		||||
  autoView( coarseA_, coarseA, AcceleratorRead);
 | 
			
		||||
  Coordinate fine_rdimensions = fine->_rdimensions;
 | 
			
		||||
  Coordinate coarse_rdimensions = coarse->_rdimensions;
 | 
			
		||||
 | 
			
		||||
  accelerator_for(sf, fine->oSites(), CComplex::Nsimd(), {
 | 
			
		||||
 | 
			
		||||
@@ -247,9 +249,9 @@ template<class vobj,class vobj2,class CComplex>
 | 
			
		||||
      Coordinate coor_c(_ndimension);
 | 
			
		||||
      Coordinate coor_f(_ndimension);
 | 
			
		||||
 | 
			
		||||
      Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions);
 | 
			
		||||
      Lexicographic::CoorFromIndex(coor_f,sf,fine_rdimensions);
 | 
			
		||||
      for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
 | 
			
		||||
      Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
 | 
			
		||||
      Lexicographic::IndexFromCoor(coor_c,sc,coarse_rdimensions);
 | 
			
		||||
 | 
			
		||||
      // z = A x + y
 | 
			
		||||
#ifdef GRID_SIMT
 | 
			
		||||
@@ -353,11 +355,14 @@ inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
 | 
			
		||||
  autoView( coarseData_ , coarseData, AcceleratorWrite);
 | 
			
		||||
  autoView( fineData_   , fineData, AcceleratorRead);
 | 
			
		||||
 | 
			
		||||
  Coordinate fine_rdimensions = fine->_rdimensions;
 | 
			
		||||
  Coordinate coarse_rdimensions = coarse->_rdimensions;
 | 
			
		||||
  
 | 
			
		||||
  accelerator_for(sc,coarse->oSites(),1,{
 | 
			
		||||
 | 
			
		||||
      // One thread per sub block
 | 
			
		||||
      Coordinate coor_c(_ndimension);
 | 
			
		||||
      Lexicographic::CoorFromIndex(coor_c,sc,coarse->_rdimensions);  // Block coordinate
 | 
			
		||||
      Lexicographic::CoorFromIndex(coor_c,sc,coarse_rdimensions);  // Block coordinate
 | 
			
		||||
      coarseData_[sc]=Zero();
 | 
			
		||||
 | 
			
		||||
      for(int sb=0;sb<blockVol;sb++){
 | 
			
		||||
@@ -367,7 +372,7 @@ inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
 | 
			
		||||
	Coordinate coor_f(_ndimension);
 | 
			
		||||
	Lexicographic::CoorFromIndex(coor_b,sb,block_r);               // Block sub coordinate
 | 
			
		||||
	for(int d=0;d<_ndimension;d++) coor_f[d]=coor_c[d]*block_r[d] + coor_b[d];
 | 
			
		||||
	Lexicographic::IndexFromCoor(coor_f,sf,fine->_rdimensions);
 | 
			
		||||
	Lexicographic::IndexFromCoor(coor_f,sf,fine_rdimensions);
 | 
			
		||||
 | 
			
		||||
	coarseData_[sc]=coarseData_[sc]+fineData_[sf];
 | 
			
		||||
      }
 | 
			
		||||
 
 | 
			
		||||
@@ -130,6 +130,8 @@ public:
 | 
			
		||||
  friend std::ostream& operator<< (std::ostream& stream, Logger& log){
 | 
			
		||||
 | 
			
		||||
    if ( log.active ) {
 | 
			
		||||
      std::ios_base::fmtflags f(stream.flags());
 | 
			
		||||
 | 
			
		||||
      stream << log.background()<<  std::left;
 | 
			
		||||
      if (log.topWidth > 0)
 | 
			
		||||
      {
 | 
			
		||||
@@ -152,6 +154,8 @@ public:
 | 
			
		||||
	       << now	       << log.background() << " : " ;
 | 
			
		||||
      }
 | 
			
		||||
      stream << log.colour();
 | 
			
		||||
      stream.flags(f);
 | 
			
		||||
 | 
			
		||||
      return stream;
 | 
			
		||||
    } else { 
 | 
			
		||||
      return devnull;
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,4 @@
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
int Grid::BinaryIO::latticeWriteMaxRetry = -1;
 | 
			
		||||
int                    Grid::BinaryIO::latticeWriteMaxRetry = -1;
 | 
			
		||||
Grid::BinaryIO::IoPerf Grid::BinaryIO::lastPerf;
 | 
			
		||||
 
 | 
			
		||||
@@ -79,6 +79,13 @@ inline void removeWhitespace(std::string &key)
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
class BinaryIO {
 | 
			
		||||
 public:
 | 
			
		||||
  struct IoPerf
 | 
			
		||||
  {
 | 
			
		||||
    uint64_t size{0},time{0};
 | 
			
		||||
    double   mbytesPerSecond{0.};
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  static IoPerf lastPerf;
 | 
			
		||||
  static int latticeWriteMaxRetry;
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -502,12 +509,15 @@ class BinaryIO {
 | 
			
		||||
      timer.Stop();
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    lastPerf.size            = sizeof(fobj)*iodata.size()*nrank;
 | 
			
		||||
    lastPerf.time            = timer.useconds();
 | 
			
		||||
    lastPerf.mbytesPerSecond = lastPerf.size/1024./1024./(lastPerf.time/1.0e6);
 | 
			
		||||
    std::cout<<GridLogMessage<<"IOobject: ";
 | 
			
		||||
    if ( control & BINARYIO_READ) std::cout << " read  ";
 | 
			
		||||
    else                          std::cout << " write ";
 | 
			
		||||
    uint64_t bytes = sizeof(fobj)*iodata.size()*nrank;
 | 
			
		||||
    std::cout<< bytes <<" bytes in "<<timer.Elapsed() <<" "
 | 
			
		||||
	     << (double)bytes/ (double)timer.useconds() <<" MB/s "<<std::endl;
 | 
			
		||||
    std::cout<< lastPerf.size <<" bytes in "<< timer.Elapsed() <<" "
 | 
			
		||||
	     << lastPerf.mbytesPerSecond <<" MB/s "<<std::endl;
 | 
			
		||||
 | 
			
		||||
    std::cout<<GridLogMessage<<"IOobject: endian and checksum overhead "<<bstimer.Elapsed()  <<std::endl;
 | 
			
		||||
 | 
			
		||||
@@ -663,10 +673,15 @@ class BinaryIO {
 | 
			
		||||
	     nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
 | 
			
		||||
    timer.Start();
 | 
			
		||||
    thread_for(lidx,lsites,{
 | 
			
		||||
    thread_for(lidx,lsites,{  // FIX ME, suboptimal implementation
 | 
			
		||||
      std::vector<RngStateType> tmp(RngStateCount);
 | 
			
		||||
      std::copy(iodata[lidx].begin(),iodata[lidx].end(),tmp.begin());
 | 
			
		||||
      parallel_rng.SetState(tmp,lidx);
 | 
			
		||||
      Coordinate lcoor;
 | 
			
		||||
      grid->LocalIndexToLocalCoor(lidx, lcoor);
 | 
			
		||||
      int o_idx=grid->oIndex(lcoor);
 | 
			
		||||
      int i_idx=grid->iIndex(lcoor);
 | 
			
		||||
      int gidx=parallel_rng.generator_idx(o_idx,i_idx);
 | 
			
		||||
      parallel_rng.SetState(tmp,gidx);
 | 
			
		||||
      });
 | 
			
		||||
    timer.Stop();
 | 
			
		||||
 | 
			
		||||
@@ -723,7 +738,12 @@ class BinaryIO {
 | 
			
		||||
    std::vector<RNGstate> iodata(lsites);
 | 
			
		||||
    thread_for(lidx,lsites,{
 | 
			
		||||
      std::vector<RngStateType> tmp(RngStateCount);
 | 
			
		||||
      parallel_rng.GetState(tmp,lidx);
 | 
			
		||||
      Coordinate lcoor;
 | 
			
		||||
      grid->LocalIndexToLocalCoor(lidx, lcoor);
 | 
			
		||||
      int o_idx=grid->oIndex(lcoor);
 | 
			
		||||
      int i_idx=grid->iIndex(lcoor);
 | 
			
		||||
      int gidx=parallel_rng.generator_idx(o_idx,i_idx);
 | 
			
		||||
      parallel_rng.GetState(tmp,gidx);
 | 
			
		||||
      std::copy(tmp.begin(),tmp.end(),iodata[lidx].begin());
 | 
			
		||||
    });
 | 
			
		||||
    timer.Stop();
 | 
			
		||||
 
 | 
			
		||||
@@ -47,7 +47,7 @@ static constexpr int Ym = 5;
 | 
			
		||||
static constexpr int Zm = 6;
 | 
			
		||||
static constexpr int Tm = 7;
 | 
			
		||||
 | 
			
		||||
static constexpr int Nc=3;
 | 
			
		||||
static constexpr int Nc=Config_Nc;
 | 
			
		||||
static constexpr int Ns=4;
 | 
			
		||||
static constexpr int Nd=4;
 | 
			
		||||
static constexpr int Nhs=2; // half spinor
 | 
			
		||||
 
 | 
			
		||||
@@ -208,7 +208,7 @@ public:
 | 
			
		||||
  LebesgueOrder LebesgueEvenOdd;
 | 
			
		||||
    
 | 
			
		||||
  // Comms buffer
 | 
			
		||||
  std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> >  comm_buf;
 | 
			
		||||
  //  std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> >  comm_buf;
 | 
			
		||||
    
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // Conserved current utilities
 | 
			
		||||
 
 | 
			
		||||
@@ -63,17 +63,20 @@ template<class Impl> class StaggeredKernels : public FermionOperator<Impl> , pub
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
   // Generic Nc kernels
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
   template<int Naik> accelerator_inline
 | 
			
		||||
   template<int Naik> 
 | 
			
		||||
   static accelerator_inline
 | 
			
		||||
   void DhopSiteGeneric(StencilView &st, 
 | 
			
		||||
			DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, 
 | 
			
		||||
			SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
			const FermionFieldView &in, FermionFieldView &out,int dag);
 | 
			
		||||
   template<int Naik> accelerator_inline
 | 
			
		||||
   
 | 
			
		||||
   template<int Naik> static accelerator_inline
 | 
			
		||||
   void DhopSiteGenericInt(StencilView &st, 
 | 
			
		||||
			   DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, 
 | 
			
		||||
			   SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
			   const FermionFieldView &in, FermionFieldView &out,int dag);
 | 
			
		||||
   template<int Naik> accelerator_inline
 | 
			
		||||
   
 | 
			
		||||
   template<int Naik> static accelerator_inline
 | 
			
		||||
   void DhopSiteGenericExt(StencilView &st, 
 | 
			
		||||
			   DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
			   SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
@@ -82,17 +85,20 @@ template<class Impl> class StaggeredKernels : public FermionOperator<Impl> , pub
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
   // Nc=3 specific kernels
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
   template<int Naik> accelerator_inline
 | 
			
		||||
   
 | 
			
		||||
   template<int Naik> static accelerator_inline
 | 
			
		||||
   void DhopSiteHand(StencilView &st, 
 | 
			
		||||
		     DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU, 
 | 
			
		||||
		     SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
		     const FermionFieldView &in, FermionFieldView &out,int dag);
 | 
			
		||||
   template<int Naik> accelerator_inline
 | 
			
		||||
   
 | 
			
		||||
   template<int Naik> static accelerator_inline
 | 
			
		||||
   void DhopSiteHandInt(StencilView &st, 
 | 
			
		||||
			DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU, 
 | 
			
		||||
			SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
			const FermionFieldView &in, FermionFieldView &out,int dag);
 | 
			
		||||
   template<int Naik> accelerator_inline
 | 
			
		||||
   
 | 
			
		||||
   template<int Naik> static accelerator_inline
 | 
			
		||||
   void DhopSiteHandExt(StencilView &st, 
 | 
			
		||||
			DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU, 
 | 
			
		||||
			SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
@@ -101,6 +107,7 @@ template<class Impl> class StaggeredKernels : public FermionOperator<Impl> , pub
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
   // Asm Nc=3 specific kernels
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
   
 | 
			
		||||
   void DhopSiteAsm(StencilView &st, 
 | 
			
		||||
		    DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU, 
 | 
			
		||||
		    SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
 
 | 
			
		||||
@@ -50,14 +50,14 @@ public:
 | 
			
		||||
  double, nu);
 | 
			
		||||
 | 
			
		||||
  WilsonAnisotropyCoefficients():
 | 
			
		||||
    isAnisotropic(false), 
 | 
			
		||||
    t_direction(Nd-1), 
 | 
			
		||||
    xi_0(1.0), 
 | 
			
		||||
    isAnisotropic(false),
 | 
			
		||||
    t_direction(Nd-1),
 | 
			
		||||
    xi_0(1.0),
 | 
			
		||||
    nu(1.0){}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
class WilsonFermion : public WilsonKernels<Impl>, public WilsonFermionStatic 
 | 
			
		||||
class WilsonFermion : public WilsonKernels<Impl>, public WilsonFermionStatic
 | 
			
		||||
{
 | 
			
		||||
public:
 | 
			
		||||
  INHERIT_IMPL_TYPES(Impl);
 | 
			
		||||
@@ -74,6 +74,20 @@ public:
 | 
			
		||||
  FermionField _tmp;
 | 
			
		||||
  FermionField &tmp(void) { return _tmp; }
 | 
			
		||||
 | 
			
		||||
  void Report(void);
 | 
			
		||||
  void ZeroCounters(void);
 | 
			
		||||
  double DhopCalls;
 | 
			
		||||
  double DhopCommTime;
 | 
			
		||||
  double DhopComputeTime;
 | 
			
		||||
  double DhopComputeTime2;
 | 
			
		||||
  double DhopFaceTime;
 | 
			
		||||
  double DhopTotalTime;
 | 
			
		||||
 | 
			
		||||
  double DerivCalls;
 | 
			
		||||
  double DerivCommTime;
 | 
			
		||||
  double DerivComputeTime;
 | 
			
		||||
  double DerivDhopComputeTime;
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////
 | 
			
		||||
  // override multiply; cut number routines if pass dagger argument
 | 
			
		||||
  // and also make interface more uniformly consistent
 | 
			
		||||
@@ -138,7 +152,7 @@ public:
 | 
			
		||||
  // Constructor
 | 
			
		||||
  WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
 | 
			
		||||
                GridRedBlackCartesian &Hgrid, RealD _mass,
 | 
			
		||||
                const ImplParams &p = ImplParams(), 
 | 
			
		||||
                const ImplParams &p = ImplParams(),
 | 
			
		||||
                const WilsonAnisotropyCoefficients &anis = WilsonAnisotropyCoefficients() );
 | 
			
		||||
 | 
			
		||||
  // DoubleStore impl dependent
 | 
			
		||||
@@ -170,9 +184,9 @@ public:
 | 
			
		||||
 | 
			
		||||
  LebesgueOrder Lebesgue;
 | 
			
		||||
  LebesgueOrder LebesgueEvenOdd;
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  WilsonAnisotropyCoefficients anisotropyCoeff;
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // Conserved current utilities
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -186,7 +200,7 @@ public:
 | 
			
		||||
                           PropagatorField &q_out,
 | 
			
		||||
                           PropagatorField &phys_src,
 | 
			
		||||
                           Current curr_type,
 | 
			
		||||
                           unsigned int mu, 
 | 
			
		||||
                           unsigned int mu,
 | 
			
		||||
                           unsigned int tmin,
 | 
			
		||||
			   unsigned int tmax,
 | 
			
		||||
			   ComplexField &lattice_cmplx);
 | 
			
		||||
@@ -196,5 +210,3 @@ typedef WilsonFermion<WilsonImplF> WilsonFermionF;
 | 
			
		||||
typedef WilsonFermion<WilsonImplD> WilsonFermionD;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -215,7 +215,7 @@ public:
 | 
			
		||||
  LebesgueOrder LebesgueEvenOdd;
 | 
			
		||||
    
 | 
			
		||||
  // Comms buffer
 | 
			
		||||
  std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> >  comm_buf;
 | 
			
		||||
  //  std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> >  comm_buf;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -799,7 +799,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
 | 
			
		||||
 | 
			
		||||
  PropagatorField tmp(UGrid);
 | 
			
		||||
  PropagatorField Utmp(UGrid);
 | 
			
		||||
  LatticeInteger zz (UGrid);   zz=0.0;
 | 
			
		||||
  PropagatorField zz (UGrid);   zz=0.0;
 | 
			
		||||
  LatticeInteger lcoor(UGrid); LatticeCoordinate(lcoor,Nd-1);
 | 
			
		||||
  for (int s=0;s<Ls;s++) {
 | 
			
		||||
 | 
			
		||||
@@ -850,7 +850,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
 | 
			
		||||
  PropagatorField tmp(UGrid);
 | 
			
		||||
  PropagatorField Utmp(UGrid);
 | 
			
		||||
 | 
			
		||||
  LatticeInteger zz (UGrid);   zz=0.0;
 | 
			
		||||
  PropagatorField  zz (UGrid);   zz=0.0;
 | 
			
		||||
  LatticeInteger lcoor(UGrid); LatticeCoordinate(lcoor,Nd-1);
 | 
			
		||||
 | 
			
		||||
  for(int s=0;s<Ls;s++){
 | 
			
		||||
 
 | 
			
		||||
@@ -146,7 +146,7 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteHand(StencilView &st,
 | 
			
		||||
					  DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU,
 | 
			
		||||
					  SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
@@ -221,7 +221,7 @@ void StaggeredKernels<Impl>::DhopSiteHand(StencilView &st,
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteHandInt(StencilView &st, 
 | 
			
		||||
					     DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
					     SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
@@ -300,7 +300,7 @@ void StaggeredKernels<Impl>::DhopSiteHandInt(StencilView &st,
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteHandExt(StencilView &st,
 | 
			
		||||
					     DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
					     SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
 
 | 
			
		||||
@@ -78,7 +78,7 @@ StaggeredKernels<Impl>::StaggeredKernels(const ImplParams &p) : Base(p){};
 | 
			
		||||
// Int, Ext, Int+Ext cases for comms overlap
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteGeneric(StencilView &st, 
 | 
			
		||||
					     DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
					     SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
@@ -126,7 +126,7 @@ void StaggeredKernels<Impl>::DhopSiteGeneric(StencilView &st,
 | 
			
		||||
  // Only contributions from interior of our node
 | 
			
		||||
  ///////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteGenericInt(StencilView &st, 
 | 
			
		||||
						DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
						SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
@@ -174,7 +174,7 @@ void StaggeredKernels<Impl>::DhopSiteGenericInt(StencilView &st,
 | 
			
		||||
  // Only contributions from exterior of our node
 | 
			
		||||
  ///////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteGenericExt(StencilView &st, 
 | 
			
		||||
						DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
						SiteSpinor *buf, int sF, int sU,
 | 
			
		||||
@@ -224,7 +224,7 @@ void StaggeredKernels<Impl>::DhopSiteGenericExt(StencilView &st,
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Driving / wrapping routine to select right kernel
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <class Impl> 
 | 
			
		||||
void StaggeredKernels<Impl>::DhopDirKernel(StencilImpl &st, DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, SiteSpinor * buf,
 | 
			
		||||
					   int sF, int sU, const FermionFieldView &in, FermionFieldView &out, int dir,int disp)
 | 
			
		||||
{
 | 
			
		||||
@@ -253,7 +253,7 @@ void StaggeredKernels<Impl>::DhopDirKernel(StencilImpl &st, DoubledGaugeFieldVie
 | 
			
		||||
      ThisKernel::A(st_v,U_v,UUU_v,buf,sF,sU,in_v,out_v,dag);		\
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <class Impl> 
 | 
			
		||||
void StaggeredKernels<Impl>::DhopImproved(StencilImpl &st, LebesgueOrder &lo, 
 | 
			
		||||
					  DoubledGaugeField &U, DoubledGaugeField &UUU, 
 | 
			
		||||
					  const FermionField &in, FermionField &out, int dag, int interior,int exterior)
 | 
			
		||||
@@ -293,7 +293,7 @@ void StaggeredKernels<Impl>::DhopImproved(StencilImpl &st, LebesgueOrder &lo,
 | 
			
		||||
  }
 | 
			
		||||
  assert(0 && " Kernel optimisation case not covered ");
 | 
			
		||||
}
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <class Impl> 
 | 
			
		||||
void StaggeredKernels<Impl>::DhopNaive(StencilImpl &st, LebesgueOrder &lo, 
 | 
			
		||||
				       DoubledGaugeField &U,
 | 
			
		||||
				       const FermionField &in, FermionField &out, int dag, int interior,int exterior)
 | 
			
		||||
 
 | 
			
		||||
@@ -43,7 +43,7 @@ WilsonFermion<Impl>::WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
 | 
			
		||||
                                   GridRedBlackCartesian &Hgrid, RealD _mass,
 | 
			
		||||
                                   const ImplParams &p,
 | 
			
		||||
                                   const WilsonAnisotropyCoefficients &anis)
 | 
			
		||||
  : 
 | 
			
		||||
  :
 | 
			
		||||
    Kernels(p),
 | 
			
		||||
    _grid(&Fgrid),
 | 
			
		||||
    _cbgrid(&Hgrid),
 | 
			
		||||
@@ -75,8 +75,93 @@ WilsonFermion<Impl>::WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
 | 
			
		||||
  StencilOdd.BuildSurfaceList(1,vol4);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::Report(void)
 | 
			
		||||
{
 | 
			
		||||
  RealD NP = _grid->_Nprocessors;
 | 
			
		||||
  RealD NN = _grid->NodeCount();
 | 
			
		||||
  RealD volume = 1;
 | 
			
		||||
  Coordinate latt = _grid->GlobalDimensions();
 | 
			
		||||
  for(int mu=0;mu<Nd;mu++) volume=volume*latt[mu];
 | 
			
		||||
 | 
			
		||||
  if ( DhopCalls > 0 ) {
 | 
			
		||||
    std::cout << GridLogMessage << "#### Dhop calls report " << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion Number of DhopEO Calls   : " << DhopCalls   << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion TotalTime   /Calls        : " << DhopTotalTime   / DhopCalls << " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion CommTime    /Calls        : " << DhopCommTime    / DhopCalls << " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion FaceTime    /Calls        : " << DhopFaceTime    / DhopCalls << " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion ComputeTime1/Calls        : " << DhopComputeTime / DhopCalls << " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion ComputeTime2/Calls        : " << DhopComputeTime2/ DhopCalls << " us" << std::endl;
 | 
			
		||||
 | 
			
		||||
    // Average the compute time
 | 
			
		||||
    _grid->GlobalSum(DhopComputeTime);
 | 
			
		||||
    DhopComputeTime/=NP;
 | 
			
		||||
    RealD mflops = 1320*volume*DhopCalls/DhopComputeTime/2; // 2 for red black counting
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call                : " << mflops << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call per rank       : " << mflops/NP << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call per node       : " << mflops/NN << std::endl;
 | 
			
		||||
 | 
			
		||||
    RealD Fullmflops = 1320*volume*DhopCalls/(DhopTotalTime)/2; // 2 for red black counting
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call (full)         : " << Fullmflops << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call per rank (full): " << Fullmflops/NP << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call per node (full): " << Fullmflops/NN << std::endl;
 | 
			
		||||
 | 
			
		||||
   }
 | 
			
		||||
 | 
			
		||||
  if ( DerivCalls > 0 ) {
 | 
			
		||||
    std::cout << GridLogMessage << "#### Deriv calls report "<< std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion Number of Deriv Calls    : " <<DerivCalls <<std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion CommTime/Calls           : " <<DerivCommTime/DerivCalls<<" us" <<std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion ComputeTime/Calls        : " <<DerivComputeTime/DerivCalls<<" us" <<std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion Dhop ComputeTime/Calls   : " <<DerivDhopComputeTime/DerivCalls<<" us" <<std::endl;
 | 
			
		||||
 | 
			
		||||
    // how to count flops here?
 | 
			
		||||
    RealD mflops = 144*volume*DerivCalls/DerivDhopComputeTime;
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call               ? : " << mflops << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call per node      ? : " << mflops/NP << std::endl;
 | 
			
		||||
 | 
			
		||||
    // how to count flops here?
 | 
			
		||||
    RealD Fullmflops = 144*volume*DerivCalls/(DerivDhopComputeTime+DerivCommTime)/2; // 2 for red black counting
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call (full)        ? : " << Fullmflops << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call per node (full) ? : " << Fullmflops/NP << std::endl;  }
 | 
			
		||||
 | 
			
		||||
  if (DerivCalls > 0 || DhopCalls > 0){
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion Stencil"    <<std::endl;  Stencil.Report();
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion StencilEven"<<std::endl;  StencilEven.Report();
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion StencilOdd" <<std::endl;  StencilOdd.Report();
 | 
			
		||||
  }
 | 
			
		||||
  if ( DhopCalls > 0){
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion Stencil     Reporti()"    <<std::endl;  Stencil.Reporti(DhopCalls);
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion StencilEven Reporti()"<<std::endl;  StencilEven.Reporti(DhopCalls);
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion StencilOdd  Reporti()" <<std::endl;  StencilOdd.Reporti(DhopCalls);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::ZeroCounters(void) {
 | 
			
		||||
  DhopCalls       = 0; // ok
 | 
			
		||||
  DhopCommTime    = 0;
 | 
			
		||||
  DhopComputeTime = 0;
 | 
			
		||||
  DhopComputeTime2= 0;
 | 
			
		||||
  DhopFaceTime    = 0;
 | 
			
		||||
  DhopTotalTime   = 0;
 | 
			
		||||
 | 
			
		||||
  DerivCalls       = 0; // ok
 | 
			
		||||
  DerivCommTime    = 0;
 | 
			
		||||
  DerivComputeTime = 0;
 | 
			
		||||
  DerivDhopComputeTime = 0;
 | 
			
		||||
 | 
			
		||||
  Stencil.ZeroCounters();
 | 
			
		||||
  StencilEven.ZeroCounters();
 | 
			
		||||
  StencilOdd.ZeroCounters();
 | 
			
		||||
  Stencil.ZeroCountersi();
 | 
			
		||||
  StencilEven.ZeroCountersi();
 | 
			
		||||
  StencilOdd.ZeroCountersi();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::ImportGauge(const GaugeField &_Umu) 
 | 
			
		||||
void WilsonFermion<Impl>::ImportGauge(const GaugeField &_Umu)
 | 
			
		||||
{
 | 
			
		||||
  GaugeField HUmu(_Umu.Grid());
 | 
			
		||||
 | 
			
		||||
@@ -107,7 +192,7 @@ void WilsonFermion<Impl>::ImportGauge(const GaugeField &_Umu)
 | 
			
		||||
/////////////////////////////
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::M(const FermionField &in, FermionField &out) 
 | 
			
		||||
void WilsonFermion<Impl>::M(const FermionField &in, FermionField &out)
 | 
			
		||||
{
 | 
			
		||||
  out.Checkerboard() = in.Checkerboard();
 | 
			
		||||
  Dhop(in, out, DaggerNo);
 | 
			
		||||
@@ -115,7 +200,7 @@ void WilsonFermion<Impl>::M(const FermionField &in, FermionField &out)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::Mdag(const FermionField &in, FermionField &out) 
 | 
			
		||||
void WilsonFermion<Impl>::Mdag(const FermionField &in, FermionField &out)
 | 
			
		||||
{
 | 
			
		||||
  out.Checkerboard() = in.Checkerboard();
 | 
			
		||||
  Dhop(in, out, DaggerYes);
 | 
			
		||||
@@ -123,7 +208,7 @@ void WilsonFermion<Impl>::Mdag(const FermionField &in, FermionField &out)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::Meooe(const FermionField &in, FermionField &out) 
 | 
			
		||||
void WilsonFermion<Impl>::Meooe(const FermionField &in, FermionField &out)
 | 
			
		||||
{
 | 
			
		||||
  if (in.Checkerboard() == Odd) {
 | 
			
		||||
    DhopEO(in, out, DaggerNo);
 | 
			
		||||
@@ -133,7 +218,7 @@ void WilsonFermion<Impl>::Meooe(const FermionField &in, FermionField &out)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out) 
 | 
			
		||||
void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out)
 | 
			
		||||
{
 | 
			
		||||
  if (in.Checkerboard() == Odd) {
 | 
			
		||||
    DhopEO(in, out, DaggerYes);
 | 
			
		||||
@@ -141,9 +226,9 @@ void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out)
 | 
			
		||||
    DhopOE(in, out, DaggerYes);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out) 
 | 
			
		||||
void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out)
 | 
			
		||||
{
 | 
			
		||||
  out.Checkerboard() = in.Checkerboard();
 | 
			
		||||
  typename FermionField::scalar_type scal(diag_mass);
 | 
			
		||||
@@ -151,80 +236,80 @@ void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) 
 | 
			
		||||
void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out)
 | 
			
		||||
{
 | 
			
		||||
  out.Checkerboard() = in.Checkerboard();
 | 
			
		||||
  Mooee(in, out);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) 
 | 
			
		||||
void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out)
 | 
			
		||||
{
 | 
			
		||||
  out.Checkerboard() = in.Checkerboard();
 | 
			
		||||
  out = (1.0/(diag_mass))*in;
 | 
			
		||||
}
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &out) 
 | 
			
		||||
void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &out)
 | 
			
		||||
{
 | 
			
		||||
  out.Checkerboard() = in.Checkerboard();
 | 
			
		||||
  MooeeInv(in,out);
 | 
			
		||||
}
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::MomentumSpacePropagator(FermionField &out, const FermionField &in,RealD _m,std::vector<double> twist)
 | 
			
		||||
{  
 | 
			
		||||
{
 | 
			
		||||
  typedef typename FermionField::vector_type vector_type;
 | 
			
		||||
  typedef typename FermionField::scalar_type ScalComplex;
 | 
			
		||||
  typedef Lattice<iSinglet<vector_type> > LatComplex;
 | 
			
		||||
  
 | 
			
		||||
  // what type LatticeComplex 
 | 
			
		||||
 | 
			
		||||
  // what type LatticeComplex
 | 
			
		||||
  conformable(_grid,out.Grid());
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  Gamma::Algebra Gmu [] = {
 | 
			
		||||
    Gamma::Algebra::GammaX,
 | 
			
		||||
    Gamma::Algebra::GammaY,
 | 
			
		||||
    Gamma::Algebra::GammaZ,
 | 
			
		||||
    Gamma::Algebra::GammaT
 | 
			
		||||
  };
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  Coordinate latt_size   = _grid->_fdimensions;
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  FermionField   num  (_grid); num  = Zero();
 | 
			
		||||
  LatComplex    wilson(_grid); wilson= Zero();
 | 
			
		||||
  LatComplex     one  (_grid); one = ScalComplex(1.0,0.0);
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  LatComplex denom(_grid); denom= Zero();
 | 
			
		||||
  LatComplex kmu(_grid); 
 | 
			
		||||
  LatComplex kmu(_grid);
 | 
			
		||||
  ScalComplex ci(0.0,1.0);
 | 
			
		||||
  // momphase = n * 2pi / L
 | 
			
		||||
  for(int mu=0;mu<Nd;mu++) {
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    LatticeCoordinate(kmu,mu);
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    RealD TwoPiL =  M_PI * 2.0/ latt_size[mu];
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    kmu = TwoPiL * kmu;
 | 
			
		||||
    kmu = kmu + TwoPiL * one * twist[mu];//momentum for twisted boundary conditions
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    wilson = wilson + 2.0*sin(kmu*0.5)*sin(kmu*0.5); // Wilson term
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    num = num - sin(kmu)*ci*(Gamma(Gmu[mu])*in);    // derivative term
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    denom=denom + sin(kmu)*sin(kmu);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  wilson = wilson + _m;     // 2 sin^2 k/2 + m
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  num   = num + wilson*in;     // -i gmu sin k + 2 sin^2 k/2 + m
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  denom= denom+wilson*wilson; // sin^2 k + (2 sin^2 k/2 + m)^2
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  denom= one/denom;
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  out = num*denom; // [ -i gmu sin k + 2 sin^2 k/2 + m] / [ sin^2 k + (2 sin^2 k/2 + m)^2 ]
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////
 | 
			
		||||
// Internal
 | 
			
		||||
@@ -234,6 +319,7 @@ template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U,
 | 
			
		||||
                                        GaugeField &mat, const FermionField &A,
 | 
			
		||||
                                        const FermionField &B, int dag) {
 | 
			
		||||
  DerivCalls++;
 | 
			
		||||
  assert((dag == DaggerNo) || (dag == DaggerYes));
 | 
			
		||||
 | 
			
		||||
  Compressor compressor(dag);
 | 
			
		||||
@@ -242,8 +328,11 @@ void WilsonFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U,
 | 
			
		||||
  FermionField Atilde(B.Grid());
 | 
			
		||||
  Atilde = A;
 | 
			
		||||
 | 
			
		||||
  DerivCommTime-=usecond();
 | 
			
		||||
  st.HaloExchange(B, compressor);
 | 
			
		||||
  DerivCommTime+=usecond();
 | 
			
		||||
 | 
			
		||||
  DerivComputeTime-=usecond();
 | 
			
		||||
  for (int mu = 0; mu < Nd; mu++) {
 | 
			
		||||
    ////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Flip gamma (1+g)<->(1-g) if dag
 | 
			
		||||
@@ -251,6 +340,7 @@ void WilsonFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U,
 | 
			
		||||
    int gamma = mu;
 | 
			
		||||
    if (!dag) gamma += Nd;
 | 
			
		||||
 | 
			
		||||
    DerivDhopComputeTime -= usecond();
 | 
			
		||||
    int Ls=1;
 | 
			
		||||
    Kernels::DhopDirKernel(st, U, st.CommBuf(), Ls, B.Grid()->oSites(), B, Btilde, mu, gamma);
 | 
			
		||||
 | 
			
		||||
@@ -258,11 +348,13 @@ void WilsonFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U,
 | 
			
		||||
    // spin trace outer product
 | 
			
		||||
    //////////////////////////////////////////////////
 | 
			
		||||
    Impl::InsertForce4D(mat, Btilde, Atilde, mu);
 | 
			
		||||
    DerivDhopComputeTime += usecond();
 | 
			
		||||
  }
 | 
			
		||||
  DerivComputeTime += usecond();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopDeriv(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) 
 | 
			
		||||
void WilsonFermion<Impl>::DhopDeriv(GaugeField &mat, const FermionField &U, const FermionField &V, int dag)
 | 
			
		||||
{
 | 
			
		||||
  conformable(U.Grid(), _grid);
 | 
			
		||||
  conformable(U.Grid(), V.Grid());
 | 
			
		||||
@@ -274,13 +366,13 @@ void WilsonFermion<Impl>::DhopDeriv(GaugeField &mat, const FermionField &U, cons
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) 
 | 
			
		||||
void WilsonFermion<Impl>::DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag)
 | 
			
		||||
{
 | 
			
		||||
  conformable(U.Grid(), _cbgrid);
 | 
			
		||||
  conformable(U.Grid(), V.Grid());
 | 
			
		||||
  //conformable(U.Grid(), mat.Grid()); not general, leaving as a comment (Guido)
 | 
			
		||||
  // Motivation: look at the SchurDiff operator
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  assert(V.Checkerboard() == Even);
 | 
			
		||||
  assert(U.Checkerboard() == Odd);
 | 
			
		||||
  mat.Checkerboard() = Odd;
 | 
			
		||||
@@ -289,7 +381,7 @@ void WilsonFermion<Impl>::DhopDerivOE(GaugeField &mat, const FermionField &U, co
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) 
 | 
			
		||||
void WilsonFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag)
 | 
			
		||||
{
 | 
			
		||||
  conformable(U.Grid(), _cbgrid);
 | 
			
		||||
  conformable(U.Grid(), V.Grid());
 | 
			
		||||
@@ -303,7 +395,7 @@ void WilsonFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionField &U, co
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int dag) 
 | 
			
		||||
void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int dag)
 | 
			
		||||
{
 | 
			
		||||
  conformable(in.Grid(), _grid);  // verifies full grid
 | 
			
		||||
  conformable(in.Grid(), out.Grid());
 | 
			
		||||
@@ -314,7 +406,7 @@ void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int da
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int dag) 
 | 
			
		||||
void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int dag)
 | 
			
		||||
{
 | 
			
		||||
  conformable(in.Grid(), _cbgrid);    // verifies half grid
 | 
			
		||||
  conformable(in.Grid(), out.Grid());  // drops the cb check
 | 
			
		||||
@@ -326,7 +418,7 @@ void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag) 
 | 
			
		||||
void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag)
 | 
			
		||||
{
 | 
			
		||||
  conformable(in.Grid(), _cbgrid);    // verifies half grid
 | 
			
		||||
  conformable(in.Grid(), out.Grid());  // drops the cb check
 | 
			
		||||
@@ -338,18 +430,18 @@ void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,int d
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp) 
 | 
			
		||||
void WilsonFermion<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp)
 | 
			
		||||
{
 | 
			
		||||
  DhopDir(in, out, dir, disp);
 | 
			
		||||
}
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::MdirAll(const FermionField &in, std::vector<FermionField> &out) 
 | 
			
		||||
void WilsonFermion<Impl>::MdirAll(const FermionField &in, std::vector<FermionField> &out)
 | 
			
		||||
{
 | 
			
		||||
  DhopDirAll(in, out);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out, int dir, int disp) 
 | 
			
		||||
void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out, int dir, int disp)
 | 
			
		||||
{
 | 
			
		||||
  Compressor compressor(DaggerNo);
 | 
			
		||||
  Stencil.HaloExchange(in, compressor);
 | 
			
		||||
@@ -361,12 +453,12 @@ void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out, int
 | 
			
		||||
  DhopDirCalc(in, out, dirdisp, gamma, DaggerNo);
 | 
			
		||||
};
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopDirAll(const FermionField &in, std::vector<FermionField> &out) 
 | 
			
		||||
void WilsonFermion<Impl>::DhopDirAll(const FermionField &in, std::vector<FermionField> &out)
 | 
			
		||||
{
 | 
			
		||||
  Compressor compressor(DaggerNo);
 | 
			
		||||
  Stencil.HaloExchange(in, compressor);
 | 
			
		||||
 | 
			
		||||
  assert((out.size()==8)||(out.size()==9)); 
 | 
			
		||||
  assert((out.size()==8)||(out.size()==9));
 | 
			
		||||
  for(int dir=0;dir<Nd;dir++){
 | 
			
		||||
    for(int disp=-1;disp<=1;disp+=2){
 | 
			
		||||
 | 
			
		||||
@@ -379,7 +471,7 @@ void WilsonFermion<Impl>::DhopDirAll(const FermionField &in, std::vector<Fermion
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopDirCalc(const FermionField &in, FermionField &out,int dirdisp, int gamma, int dag) 
 | 
			
		||||
void WilsonFermion<Impl>::DhopDirCalc(const FermionField &in, FermionField &out,int dirdisp, int gamma, int dag)
 | 
			
		||||
{
 | 
			
		||||
  int Ls=1;
 | 
			
		||||
  uint64_t Nsite=in.oSites();
 | 
			
		||||
@@ -390,22 +482,23 @@ template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
 | 
			
		||||
                                       DoubledGaugeField &U,
 | 
			
		||||
                                       const FermionField &in,
 | 
			
		||||
                                       FermionField &out, int dag) 
 | 
			
		||||
                                       FermionField &out, int dag)
 | 
			
		||||
{
 | 
			
		||||
  DhopTotalTime-=usecond();
 | 
			
		||||
#ifdef GRID_OMP
 | 
			
		||||
  if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute )
 | 
			
		||||
    DhopInternalOverlappedComms(st,lo,U,in,out,dag);
 | 
			
		||||
  else
 | 
			
		||||
#endif 
 | 
			
		||||
#endif
 | 
			
		||||
    DhopInternalSerial(st,lo,U,in,out,dag);
 | 
			
		||||
 | 
			
		||||
  DhopTotalTime+=usecond();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopInternalOverlappedComms(StencilImpl &st, LebesgueOrder &lo,
 | 
			
		||||
						      DoubledGaugeField &U,
 | 
			
		||||
						      const FermionField &in,
 | 
			
		||||
						      FermionField &out, int dag) 
 | 
			
		||||
						      FermionField &out, int dag)
 | 
			
		||||
{
 | 
			
		||||
  assert((dag == DaggerNo) || (dag == DaggerYes));
 | 
			
		||||
 | 
			
		||||
@@ -417,38 +510,53 @@ void WilsonFermion<Impl>::DhopInternalOverlappedComms(StencilImpl &st, LebesgueO
 | 
			
		||||
  /////////////////////////////
 | 
			
		||||
  std::vector<std::vector<CommsRequest_t> > requests;
 | 
			
		||||
  st.Prepare();
 | 
			
		||||
  DhopFaceTime-=usecond();
 | 
			
		||||
  st.HaloGather(in,compressor);
 | 
			
		||||
  DhopFaceTime+=usecond();
 | 
			
		||||
 | 
			
		||||
  DhopCommTime -=usecond();
 | 
			
		||||
  st.CommunicateBegin(requests);
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////
 | 
			
		||||
  // Overlap with comms
 | 
			
		||||
  /////////////////////////////
 | 
			
		||||
  DhopFaceTime-=usecond();
 | 
			
		||||
  st.CommsMergeSHM(compressor);
 | 
			
		||||
  DhopFaceTime+=usecond();
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////
 | 
			
		||||
  // do the compute interior
 | 
			
		||||
  /////////////////////////////
 | 
			
		||||
  int Opt = WilsonKernelsStatic::Opt;
 | 
			
		||||
  DhopComputeTime-=usecond();
 | 
			
		||||
  if (dag == DaggerYes) {
 | 
			
		||||
    Kernels::DhopDagKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out,1,0);
 | 
			
		||||
  } else {
 | 
			
		||||
    Kernels::DhopKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out,1,0);
 | 
			
		||||
  } 
 | 
			
		||||
  }
 | 
			
		||||
  DhopComputeTime+=usecond();
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////
 | 
			
		||||
  // Complete comms
 | 
			
		||||
  /////////////////////////////
 | 
			
		||||
  st.CommunicateComplete(requests);
 | 
			
		||||
  DhopCommTime   +=usecond();
 | 
			
		||||
 | 
			
		||||
  DhopFaceTime-=usecond();
 | 
			
		||||
  st.CommsMerge(compressor);
 | 
			
		||||
  DhopFaceTime+=usecond();
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////
 | 
			
		||||
  // do the compute exterior
 | 
			
		||||
  /////////////////////////////
 | 
			
		||||
 | 
			
		||||
  DhopComputeTime2-=usecond();
 | 
			
		||||
  if (dag == DaggerYes) {
 | 
			
		||||
    Kernels::DhopDagKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out,0,1);
 | 
			
		||||
  } else {
 | 
			
		||||
    Kernels::DhopKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out,0,1);
 | 
			
		||||
  }
 | 
			
		||||
  DhopComputeTime2+=usecond();
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -456,24 +564,28 @@ template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopInternalSerial(StencilImpl &st, LebesgueOrder &lo,
 | 
			
		||||
                                       DoubledGaugeField &U,
 | 
			
		||||
                                       const FermionField &in,
 | 
			
		||||
                                       FermionField &out, int dag) 
 | 
			
		||||
                                       FermionField &out, int dag)
 | 
			
		||||
{
 | 
			
		||||
  assert((dag == DaggerNo) || (dag == DaggerYes));
 | 
			
		||||
  Compressor compressor(dag);
 | 
			
		||||
  DhopCommTime-=usecond();
 | 
			
		||||
  st.HaloExchange(in, compressor);
 | 
			
		||||
  DhopCommTime+=usecond();
 | 
			
		||||
 | 
			
		||||
  DhopComputeTime-=usecond();
 | 
			
		||||
  int Opt = WilsonKernelsStatic::Opt;
 | 
			
		||||
  if (dag == DaggerYes) {
 | 
			
		||||
    Kernels::DhopDagKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out);
 | 
			
		||||
  } else {
 | 
			
		||||
    Kernels::DhopKernel(Opt,st,U,st.CommBuf(),1,U.oSites(),in,out);
 | 
			
		||||
  }
 | 
			
		||||
  DhopComputeTime+=usecond();
 | 
			
		||||
};
 | 
			
		||||
/*Change ends */
 | 
			
		||||
 | 
			
		||||
/*******************************************************************************
 | 
			
		||||
 * Conserved current utilities for Wilson fermions, for contracting propagators
 | 
			
		||||
 * to make a conserved current sink or inserting the conserved current 
 | 
			
		||||
 * to make a conserved current sink or inserting the conserved current
 | 
			
		||||
 * sequentially.
 | 
			
		||||
 ******************************************************************************/
 | 
			
		||||
template <class Impl>
 | 
			
		||||
@@ -493,12 +605,12 @@ void WilsonFermion<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::SeqConservedCurrent(PropagatorField &q_in, 
 | 
			
		||||
void WilsonFermion<Impl>::SeqConservedCurrent(PropagatorField &q_in,
 | 
			
		||||
                                              PropagatorField &q_out,
 | 
			
		||||
                                              PropagatorField &src,
 | 
			
		||||
                                              Current curr_type,
 | 
			
		||||
                                              unsigned int mu,
 | 
			
		||||
                                              unsigned int tmin, 
 | 
			
		||||
                                              unsigned int tmin,
 | 
			
		||||
                                              unsigned int tmax,
 | 
			
		||||
					      ComplexField &lattice_cmplx)
 | 
			
		||||
{
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										574
									
								
								Grid/qcd/action/fermion/implementation/WilsonKernelsAsmA64FX.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										574
									
								
								Grid/qcd/action/fermion/implementation/WilsonKernelsAsmA64FX.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,574 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/qcd/action/fermion/WilsonKernelsAsmA64FX.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
Author: Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
//#if defined(A64FXASM)
 | 
			
		||||
#if defined(A64FX)
 | 
			
		||||
 | 
			
		||||
// safety include
 | 
			
		||||
#include <arm_sve.h>
 | 
			
		||||
 | 
			
		||||
// undefine everything related to kernels
 | 
			
		||||
#include <simd/Fujitsu_A64FX_undef.h>
 | 
			
		||||
 | 
			
		||||
// enable A64FX body
 | 
			
		||||
#define WILSONKERNELSASMBODYA64FX
 | 
			
		||||
//#pragma message("A64FX Dslash: WilsonKernelsAsmBodyA64FX.h")
 | 
			
		||||
 | 
			
		||||
    ///////////////////////////////////////////////////////////
 | 
			
		||||
    // If we are A64FX specialise the single precision routine
 | 
			
		||||
    ///////////////////////////////////////////////////////////
 | 
			
		||||
#if defined(DSLASHINTRIN)
 | 
			
		||||
//#pragma message ("A64FX Dslash: intrin")
 | 
			
		||||
#include <simd/Fujitsu_A64FX_intrin_single.h>
 | 
			
		||||
#else
 | 
			
		||||
#pragma message ("A64FX Dslash: asm")
 | 
			
		||||
#include <simd/Fujitsu_A64FX_asm_single.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/// Switch off the 5d vectorised code optimisations
 | 
			
		||||
#undef DWFVEC5D
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////////////////
 | 
			
		||||
// XYZT vectorised, undag Kernel, single
 | 
			
		||||
/////////////////////////////////////////////////////////////////
 | 
			
		||||
#undef KERNEL_DAG
 | 
			
		||||
#define INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#define INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#define EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////////////////
 | 
			
		||||
// XYZT vectorised, dag Kernel, single
 | 
			
		||||
/////////////////////////////////////////////////////////////////
 | 
			
		||||
#define KERNEL_DAG
 | 
			
		||||
#define INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#define INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#define EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// undefine
 | 
			
		||||
#include <simd/Fujitsu_A64FX_undef.h>
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////////////////////////////
 | 
			
		||||
// If we are A64FX specialise the double precision routine
 | 
			
		||||
///////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
#if defined(DSLASHINTRIN)
 | 
			
		||||
#include <simd/Fujitsu_A64FX_intrin_double.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <simd/Fujitsu_A64FX_asm_double.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
// former KNL
 | 
			
		||||
//#define MAYBEPERM(A,perm) if (perm) { A ; }
 | 
			
		||||
//#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf)
 | 
			
		||||
//#define COMPLEX_SIGNS(isigns) vComplexD *isigns = &signsD[0];
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef  INTERIOR
 | 
			
		||||
#undef  EXTERIOR
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////////////////
 | 
			
		||||
// XYZT vectorised, undag Kernel, double
 | 
			
		||||
/////////////////////////////////////////////////////////////////
 | 
			
		||||
#undef KERNEL_DAG
 | 
			
		||||
#define INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#define INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#define EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////////////////
 | 
			
		||||
// XYZT vectorised, dag Kernel, double
 | 
			
		||||
/////////////////////////////////////////////////////////////////
 | 
			
		||||
#define KERNEL_DAG
 | 
			
		||||
#define INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#define INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#define EXTERIOR
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// undefs
 | 
			
		||||
#undef WILSONKERNELSASMBODYA64FX
 | 
			
		||||
#include <simd/Fujitsu_A64FX_undef.h>
 | 
			
		||||
 | 
			
		||||
#endif //A64FXASM
 | 
			
		||||
@@ -0,0 +1,380 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: WilsonKernelsAsmBodyA64FX.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifdef KERNEL_DAG
 | 
			
		||||
#define DIR0_PROJ    XP_PROJ
 | 
			
		||||
#define DIR1_PROJ    YP_PROJ
 | 
			
		||||
#define DIR2_PROJ    ZP_PROJ
 | 
			
		||||
#define DIR3_PROJ    TP_PROJ
 | 
			
		||||
#define DIR4_PROJ    XM_PROJ
 | 
			
		||||
#define DIR5_PROJ    YM_PROJ
 | 
			
		||||
#define DIR6_PROJ    ZM_PROJ
 | 
			
		||||
#define DIR7_PROJ    TM_PROJ
 | 
			
		||||
#define DIR0_RECON   XP_RECON
 | 
			
		||||
#define DIR1_RECON   YP_RECON_ACCUM
 | 
			
		||||
#define DIR2_RECON   ZP_RECON_ACCUM
 | 
			
		||||
#define DIR3_RECON   TP_RECON_ACCUM
 | 
			
		||||
#define DIR4_RECON   XM_RECON_ACCUM
 | 
			
		||||
#define DIR5_RECON   YM_RECON_ACCUM
 | 
			
		||||
#define DIR6_RECON   ZM_RECON_ACCUM
 | 
			
		||||
#define DIR7_RECON   TM_RECON_ACCUM
 | 
			
		||||
#else
 | 
			
		||||
#define DIR0_PROJ    XM_PROJ
 | 
			
		||||
#define DIR1_PROJ    YM_PROJ
 | 
			
		||||
#define DIR2_PROJ    ZM_PROJ
 | 
			
		||||
#define DIR3_PROJ    TM_PROJ
 | 
			
		||||
#define DIR4_PROJ    XP_PROJ
 | 
			
		||||
#define DIR5_PROJ    YP_PROJ
 | 
			
		||||
#define DIR6_PROJ    ZP_PROJ
 | 
			
		||||
#define DIR7_PROJ    TP_PROJ
 | 
			
		||||
#define DIR0_RECON   XM_RECON
 | 
			
		||||
#define DIR1_RECON   YM_RECON_ACCUM
 | 
			
		||||
#define DIR2_RECON   ZM_RECON_ACCUM
 | 
			
		||||
#define DIR3_RECON   TM_RECON_ACCUM
 | 
			
		||||
#define DIR4_RECON   XP_RECON_ACCUM
 | 
			
		||||
#define DIR5_RECON   YP_RECON_ACCUM
 | 
			
		||||
#define DIR6_RECON   ZP_RECON_ACCUM
 | 
			
		||||
#define DIR7_RECON   TP_RECON_ACCUM
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
//using namespace std;
 | 
			
		||||
 | 
			
		||||
#undef SHOW
 | 
			
		||||
//#define SHOW
 | 
			
		||||
 | 
			
		||||
#undef WHERE
 | 
			
		||||
 | 
			
		||||
#ifdef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#define WHERE "INT_AND_EXT"
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef INTERIOR
 | 
			
		||||
#define WHERE "INT"
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef EXTERIOR
 | 
			
		||||
#define WHERE "EXT"
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
//#pragma message("here")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Comms then compute kernel
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#ifdef INTERIOR_AND_EXTERIOR
 | 
			
		||||
 | 
			
		||||
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\
 | 
			
		||||
      basep = st.GetPFInfo(nent,plocal); nent++;			\
 | 
			
		||||
      if ( local ) {							            \
 | 
			
		||||
    LOAD_CHIMU(base);                                       \
 | 
			
		||||
    LOAD_TABLE(PERMUTE_DIR);                                \
 | 
			
		||||
    PROJ;							                        \
 | 
			
		||||
    MAYBEPERM(PERMUTE_DIR,perm);					        \
 | 
			
		||||
      } else {								                \
 | 
			
		||||
	LOAD_CHI(base);							                \
 | 
			
		||||
      }									                    \
 | 
			
		||||
      base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++;	\
 | 
			
		||||
    MULT_2SPIN_1(Dir);					                    \
 | 
			
		||||
    PREFETCH_CHIMU(base);                                   \
 | 
			
		||||
    PREFETCH_CHIMU_L2(basep);                               \
 | 
			
		||||
    /* PREFETCH_GAUGE_L1(NxtDir); */                        \
 | 
			
		||||
    MULT_2SPIN_2;					                        \
 | 
			
		||||
    if (s == 0) {                                           \
 | 
			
		||||
      if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
 | 
			
		||||
    }                                                       \
 | 
			
		||||
    RECON;								                    \
 | 
			
		||||
 | 
			
		||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)	    \
 | 
			
		||||
  base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
 | 
			
		||||
  PREFETCH1_CHIMU(base);						            \
 | 
			
		||||
  ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)
 | 
			
		||||
 | 
			
		||||
#define RESULT(base,basep) SAVE_RESULT(base,basep);
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Pre comms kernel -- prefetch like normal because it is mostly right
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#ifdef INTERIOR
 | 
			
		||||
 | 
			
		||||
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\
 | 
			
		||||
      basep = st.GetPFInfo(nent,plocal); nent++;			\
 | 
			
		||||
      if ( local ) {							            \
 | 
			
		||||
    LOAD_CHIMU(base);                                       \
 | 
			
		||||
    LOAD_TABLE(PERMUTE_DIR);                                \
 | 
			
		||||
    PROJ;							                        \
 | 
			
		||||
    MAYBEPERM(PERMUTE_DIR,perm);					        \
 | 
			
		||||
      }else if ( st.same_node[Dir] ) {LOAD_CHI(base);}	    \
 | 
			
		||||
      base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++;	\
 | 
			
		||||
      if ( local || st.same_node[Dir] ) {				    \
 | 
			
		||||
    MULT_2SPIN_1(Dir);					                    \
 | 
			
		||||
    PREFETCH_CHIMU(base);                                   \
 | 
			
		||||
    /* PREFETCH_GAUGE_L1(NxtDir); */                        \
 | 
			
		||||
    MULT_2SPIN_2;					                        \
 | 
			
		||||
    if (s == 0) {                                           \
 | 
			
		||||
       if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
 | 
			
		||||
    }                                                       \
 | 
			
		||||
    RECON;								                    \
 | 
			
		||||
    PREFETCH_CHIMU_L2(basep);                               \
 | 
			
		||||
      } else { PREFETCH_CHIMU(base); }								                    \
 | 
			
		||||
 | 
			
		||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\
 | 
			
		||||
  base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;		\
 | 
			
		||||
  PREFETCH1_CHIMU(base);						\
 | 
			
		||||
  ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)
 | 
			
		||||
 | 
			
		||||
#define RESULT(base,basep) SAVE_RESULT(base,basep);
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Post comms kernel
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#ifdef EXTERIOR
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\
 | 
			
		||||
  base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
 | 
			
		||||
  if((!local)&&(!st.same_node[Dir]) ) {					    \
 | 
			
		||||
    LOAD_CHI(base);							                \
 | 
			
		||||
    MULT_2SPIN_1(Dir);					                    \
 | 
			
		||||
    PREFETCH_CHIMU(base);                                   \
 | 
			
		||||
    /* PREFETCH_GAUGE_L1(NxtDir); */                        \
 | 
			
		||||
    MULT_2SPIN_2;					                        \
 | 
			
		||||
    if (s == 0) {                                           \
 | 
			
		||||
      if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
 | 
			
		||||
    }                                                       \
 | 
			
		||||
    RECON;								                    \
 | 
			
		||||
    nmu++;								                    \
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)	    \
 | 
			
		||||
  nmu=0;								                    \
 | 
			
		||||
  base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;\
 | 
			
		||||
  if((!local)&&(!st.same_node[Dir]) ) {					    \
 | 
			
		||||
    LOAD_CHI(base);							                \
 | 
			
		||||
    MULT_2SPIN_1(Dir);					                    \
 | 
			
		||||
    PREFETCH_CHIMU(base);                                   \
 | 
			
		||||
    /* PREFETCH_GAUGE_L1(NxtDir); */                        \
 | 
			
		||||
    MULT_2SPIN_2;					                        \
 | 
			
		||||
    if (s == 0) {                                           \
 | 
			
		||||
      if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
 | 
			
		||||
    }                                                       \
 | 
			
		||||
    RECON;								                    \
 | 
			
		||||
    nmu++;								                    \
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#define RESULT(base,basep) if (nmu){ ADD_RESULT(base,base);}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
{
 | 
			
		||||
  int nmu;
 | 
			
		||||
  int local,perm, ptype;
 | 
			
		||||
  uint64_t base;
 | 
			
		||||
  uint64_t basep;
 | 
			
		||||
  const uint64_t plocal =(uint64_t) & in[0];
 | 
			
		||||
 | 
			
		||||
  MASK_REGS;
 | 
			
		||||
  int nmax=U.oSites();
 | 
			
		||||
  for(int site=0;site<Ns;site++) {
 | 
			
		||||
#ifndef EXTERIOR
 | 
			
		||||
    //    int sU =lo.Reorder(ssU);
 | 
			
		||||
    int sU =ssU;
 | 
			
		||||
    int ssn=ssU+1;     if(ssn>=nmax) ssn=0;
 | 
			
		||||
    //    int sUn=lo.Reorder(ssn);
 | 
			
		||||
    int sUn=ssn;
 | 
			
		||||
    LOCK_GAUGE(0);
 | 
			
		||||
#else
 | 
			
		||||
    int sU =ssU;
 | 
			
		||||
    int ssn=ssU+1;     if(ssn>=nmax) ssn=0;
 | 
			
		||||
    int sUn=ssn;
 | 
			
		||||
#endif
 | 
			
		||||
    for(int s=0;s<Ls;s++) {
 | 
			
		||||
      ss =sU*Ls+s;
 | 
			
		||||
      ssn=sUn*Ls+s;
 | 
			
		||||
      int  ent=ss*8;// 2*Ndim
 | 
			
		||||
      int nent=ssn*8;
 | 
			
		||||
 | 
			
		||||
      uint64_t delta_base, delta_base_p;
 | 
			
		||||
 | 
			
		||||
   ASM_LEG_XP(Xp,Yp,PERMUTE_DIR3,DIR0_PROJ,DIR0_RECON);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
      float rescale = 64. * 12.;
 | 
			
		||||
      std::cout << "=================================================================" << std::endl;
 | 
			
		||||
      std::cout << "ss = " << ss << "   ssn = " << ssn << std::endl;
 | 
			
		||||
      std::cout << "sU = " << sU << "   ssU = " << ssU << std::endl;
 | 
			
		||||
      std::cout << " " << std::endl;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
      std::cout << "Dir = " << Xp << "        "  << WHERE<< std::endl;
 | 
			
		||||
 | 
			
		||||
      std::cout << "ent  nent  local  perm       = " << ent << "  " << nent << "  " << local << "  "  << perm << std::endl;
 | 
			
		||||
      std::cout << "st.same_node[Dir] = " << st.same_node[Xp] << std::endl;
 | 
			
		||||
      std::cout << "base              = " << (base - plocal)/rescale << std::endl;
 | 
			
		||||
      std::cout << "Basep             = " << (basep - plocal)/rescale << std::endl;
 | 
			
		||||
      //printf("U                 = %llu\n", (uint64_t)&[sU](Dir));
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
      ASM_LEG(Yp,Zp,PERMUTE_DIR2,DIR1_PROJ,DIR1_RECON);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
      std::cout << "Dir = " << Yp << "        "  << WHERE<< std::endl;
 | 
			
		||||
 | 
			
		||||
      std::cout << "ent  nent  local  perm       = " << ent << "  " << nent << "  " << local << "  "  << perm << std::endl;
 | 
			
		||||
      std::cout << "st.same_node[Dir] = " << st.same_node[Yp] << std::endl;
 | 
			
		||||
      std::cout << "base              = " << (base - plocal)/rescale << std::endl;
 | 
			
		||||
      std::cout << "Basep             = " << (basep - plocal)/rescale << std::endl;
 | 
			
		||||
      //printf("U                 = %llu\n", (uint64_t)&[sU](Dir));
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
      ASM_LEG(Zp,Tp,PERMUTE_DIR1,DIR2_PROJ,DIR2_RECON);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
      std::cout << "Dir = " << Zp << "        "  << WHERE<< std::endl;
 | 
			
		||||
 | 
			
		||||
      std::cout << "ent  nent  local  perm       = " << ent << "  " << nent << "  " << local << "  "  << perm << std::endl;
 | 
			
		||||
      std::cout << "st.same_node[Dir] = " << st.same_node[Zp] << std::endl;
 | 
			
		||||
      std::cout << "base              = " << (base - plocal)/rescale << std::endl;
 | 
			
		||||
      std::cout << "Basep             = " << (basep - plocal)/rescale << std::endl;
 | 
			
		||||
      //printf("U                 = %llu\n", (uint64_t)&[sU](Dir));
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
      ASM_LEG(Tp,Xm,PERMUTE_DIR0,DIR3_PROJ,DIR3_RECON);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
      std::cout << "Dir = " << Tp << "        "  << WHERE<< std::endl;
 | 
			
		||||
 | 
			
		||||
      std::cout << "ent  nent  local  perm       = " << ent << "  " << nent << "  " << local << "  "  << perm << std::endl;
 | 
			
		||||
      std::cout << "st.same_node[Dir] = " << st.same_node[Tp] << std::endl;
 | 
			
		||||
      std::cout << "base              = " << (base - plocal)/rescale << std::endl;
 | 
			
		||||
      std::cout << "Basep             = " << (basep - plocal)/rescale << std::endl;
 | 
			
		||||
      //printf("U                 = %llu\n", (uint64_t)&[sU](Dir));
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
      ASM_LEG(Xm,Ym,PERMUTE_DIR3,DIR4_PROJ,DIR4_RECON);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
      std::cout << "Dir = " << Xm << "        "  << WHERE<< std::endl;
 | 
			
		||||
 | 
			
		||||
      std::cout << "ent  nent  local  perm       = " << ent << "  " << nent << "  " << local << "  "  << perm << std::endl;
 | 
			
		||||
      std::cout << "st.same_node[Dir] = " << st.same_node[Xm] << std::endl;
 | 
			
		||||
      std::cout << "base              = " << (base - plocal)/rescale << std::endl;
 | 
			
		||||
      std::cout << "Basep             = " << (basep - plocal)/rescale << std::endl;
 | 
			
		||||
      //printf("U                 = %llu\n", (uint64_t)&[sU](Dir));
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
      ASM_LEG(Ym,Zm,PERMUTE_DIR2,DIR5_PROJ,DIR5_RECON);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
      std::cout << "Dir = " << Ym << "        "  << WHERE<< std::endl;
 | 
			
		||||
 | 
			
		||||
      std::cout << "ent  nent  local  perm       = " << ent << "  " << nent << "  " << local << "  "  << perm << std::endl;
 | 
			
		||||
      std::cout << "st.same_node[Dir] = " << st.same_node[Ym] << std::endl;
 | 
			
		||||
      std::cout << "base              = " << (base - plocal)/rescale << std::endl;
 | 
			
		||||
      std::cout << "Basep             = " << (basep - plocal)/rescale << std::endl;
 | 
			
		||||
      //printf("U                 = %llu\n", (uint64_t)&[sU](Dir));
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
      ASM_LEG(Zm,Tm,PERMUTE_DIR1,DIR6_PROJ,DIR6_RECON);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
      std::cout << "Dir = " << Zm << "        "  << WHERE<< std::endl;
 | 
			
		||||
 | 
			
		||||
      std::cout << "ent  nent  local  perm       = " << ent << "  " << nent << "  " << local << "  "  << perm << std::endl;
 | 
			
		||||
      std::cout << "st.same_node[Dir] = " << st.same_node[Zm] << std::endl;
 | 
			
		||||
      std::cout << "base              = " << (base - plocal)/rescale << std::endl;
 | 
			
		||||
      std::cout << "Basep             = " << (basep - plocal)/rescale << std::endl;
 | 
			
		||||
      //printf("U                 = %llu\n", (uint64_t)&[sU](Dir));
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
      ASM_LEG(Tm,Xp,PERMUTE_DIR0,DIR7_PROJ,DIR7_RECON);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
      std::cout << "Dir = " << Tm << "        "  << WHERE<< std::endl;
 | 
			
		||||
 | 
			
		||||
      std::cout << "ent  nent  local  perm       = " << ent << "  " << nent << "  " << local << "  "  << perm << std::endl;
 | 
			
		||||
      std::cout << "st.same_node[Dir] = " << st.same_node[Tm] << std::endl;
 | 
			
		||||
      std::cout << "base              = " << (base - plocal)/rescale << std::endl;
 | 
			
		||||
      std::cout << "Basep             = " << (basep - plocal)/rescale << std::endl;
 | 
			
		||||
      //printf("U                 = %llu\n", (uint64_t)&[sU](Dir));
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef EXTERIOR
 | 
			
		||||
      if (nmu==0) break;
 | 
			
		||||
      //      if (nmu!=0) std::cout << "EXT "<<sU<<std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
      base = (uint64_t) &out[ss];
 | 
			
		||||
      basep= st.GetPFInfo(nent,plocal); ent++;
 | 
			
		||||
      basep = (uint64_t) &out[ssn];
 | 
			
		||||
      RESULT(base,basep);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
      std::cout << "Dir = FINAL        " <<  WHERE<< std::endl;;
 | 
			
		||||
 | 
			
		||||
      base_ss = base;
 | 
			
		||||
      std::cout << "base              = " << (base - (uint64_t) &out[0])/rescale << std::endl;
 | 
			
		||||
      std::cout << "Basep             = " << (basep - plocal)/rescale << std::endl;
 | 
			
		||||
      //printf("U                 = %llu\n", (uint64_t)&[sU](Dir));
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
    ssU++;
 | 
			
		||||
    UNLOCK_GAUGE(0);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#undef DIR0_PROJ
 | 
			
		||||
#undef DIR1_PROJ
 | 
			
		||||
#undef DIR2_PROJ
 | 
			
		||||
#undef DIR3_PROJ
 | 
			
		||||
#undef DIR4_PROJ
 | 
			
		||||
#undef DIR5_PROJ
 | 
			
		||||
#undef DIR6_PROJ
 | 
			
		||||
#undef DIR7_PROJ
 | 
			
		||||
#undef DIR0_RECON
 | 
			
		||||
#undef DIR1_RECON
 | 
			
		||||
#undef DIR2_RECON
 | 
			
		||||
#undef DIR3_RECON
 | 
			
		||||
#undef DIR4_RECON
 | 
			
		||||
#undef DIR5_RECON
 | 
			
		||||
#undef DIR6_RECON
 | 
			
		||||
#undef DIR7_RECON
 | 
			
		||||
#undef ASM_LEG
 | 
			
		||||
#undef ASM_LEG_XP
 | 
			
		||||
#undef RESULT
 | 
			
		||||
@@ -646,7 +646,7 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
  HAND_RESULT_EXT(ss,F)
 | 
			
		||||
 | 
			
		||||
#define HAND_SPECIALISE_GPARITY(IMPL)					\
 | 
			
		||||
  template<> void						\
 | 
			
		||||
  template<> accelerator_inline void						\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \
 | 
			
		||||
				    int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
@@ -662,7 +662,7 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
    HAND_DOP_SITE(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
 | 
			
		||||
  }									\
 | 
			
		||||
									\
 | 
			
		||||
  template<> void						\
 | 
			
		||||
  template<> accelerator_inline void						\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
 | 
			
		||||
				       int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
@@ -678,7 +678,7 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
    HAND_DOP_SITE_DAG(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
 | 
			
		||||
  }									\
 | 
			
		||||
									\
 | 
			
		||||
  template<> void						\
 | 
			
		||||
  template<> accelerator_inline void						\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \
 | 
			
		||||
				       int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
@@ -694,7 +694,7 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
    HAND_DOP_SITE_INT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
 | 
			
		||||
  }									\
 | 
			
		||||
									\
 | 
			
		||||
  template<> void						\
 | 
			
		||||
  template<> accelerator_inline void						\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
@@ -710,7 +710,7 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
    HAND_DOP_SITE_DAG_INT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
 | 
			
		||||
  }									\
 | 
			
		||||
									\
 | 
			
		||||
  template<> void							\
 | 
			
		||||
  template<> accelerator_inline void							\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \
 | 
			
		||||
				       int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
@@ -727,7 +727,7 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
    nmu = 0;								\
 | 
			
		||||
    HAND_DOP_SITE_EXT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
 | 
			
		||||
  }									\
 | 
			
		||||
  template<> void						\
 | 
			
		||||
  template<> accelerator_inline void						\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
 
 | 
			
		||||
@@ -495,7 +495,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
template<class Impl> void 
 | 
			
		||||
template<class Impl> accelerator_inline void 
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
				  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
@@ -519,7 +519,7 @@ WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,Site
 | 
			
		||||
  HAND_RESULT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
template<class Impl>  accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
@@ -542,7 +542,7 @@ void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView
 | 
			
		||||
  HAND_RESULT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl> void 
 | 
			
		||||
template<class Impl>  accelerator_inline void 
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
@@ -566,7 +566,7 @@ WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,Si
 | 
			
		||||
  HAND_RESULT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
template<class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
						  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
@@ -589,7 +589,7 @@ void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldVi
 | 
			
		||||
  HAND_RESULT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl> void 
 | 
			
		||||
template<class Impl>  accelerator_inline void 
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
@@ -614,7 +614,7 @@ WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,Si
 | 
			
		||||
  HAND_RESULT_EXT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
template<class Impl>  accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
						  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
 
 | 
			
		||||
@@ -0,0 +1,943 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/qcd/action/fermion/WilsonKernelsHand.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef LOAD_CHIMU
 | 
			
		||||
#undef LOAD_CHI
 | 
			
		||||
#undef MULT_2SPIN
 | 
			
		||||
#undef PERMUTE_DIR
 | 
			
		||||
#undef XP_PROJ
 | 
			
		||||
#undef YP_PROJ
 | 
			
		||||
#undef ZP_PROJ
 | 
			
		||||
#undef TP_PROJ
 | 
			
		||||
#undef XM_PROJ
 | 
			
		||||
#undef YM_PROJ
 | 
			
		||||
#undef ZM_PROJ
 | 
			
		||||
#undef TM_PROJ
 | 
			
		||||
#undef XP_RECON
 | 
			
		||||
#undef XP_RECON_ACCUM
 | 
			
		||||
#undef XM_RECON
 | 
			
		||||
#undef XM_RECON_ACCUM
 | 
			
		||||
#undef YP_RECON_ACCUM
 | 
			
		||||
#undef YM_RECON_ACCUM
 | 
			
		||||
#undef ZP_RECON_ACCUM
 | 
			
		||||
#undef ZM_RECON_ACCUM
 | 
			
		||||
#undef TP_RECON_ACCUM
 | 
			
		||||
#undef TM_RECON_ACCUM
 | 
			
		||||
#undef ZERO_RESULT
 | 
			
		||||
#undef Chimu_00
 | 
			
		||||
#undef Chimu_01
 | 
			
		||||
#undef Chimu_02
 | 
			
		||||
#undef Chimu_10
 | 
			
		||||
#undef Chimu_11
 | 
			
		||||
#undef Chimu_12
 | 
			
		||||
#undef Chimu_20
 | 
			
		||||
#undef Chimu_21
 | 
			
		||||
#undef Chimu_22
 | 
			
		||||
#undef Chimu_30
 | 
			
		||||
#undef Chimu_31
 | 
			
		||||
#undef Chimu_32
 | 
			
		||||
#undef HAND_STENCIL_LEG
 | 
			
		||||
#undef HAND_STENCIL_LEG_INT
 | 
			
		||||
#undef HAND_STENCIL_LEG_EXT
 | 
			
		||||
#undef HAND_RESULT
 | 
			
		||||
#undef HAND_RESULT_INT
 | 
			
		||||
#undef HAND_RESULT_EXT
 | 
			
		||||
 | 
			
		||||
#define REGISTER
 | 
			
		||||
 | 
			
		||||
#define LOAD_CHIMU \
 | 
			
		||||
  {const SiteSpinor & ref (in[offset]);	\
 | 
			
		||||
    Chimu_00=ref()(0)(0);\
 | 
			
		||||
    Chimu_01=ref()(0)(1);\
 | 
			
		||||
    Chimu_02=ref()(0)(2);\
 | 
			
		||||
    Chimu_10=ref()(1)(0);\
 | 
			
		||||
    Chimu_11=ref()(1)(1);\
 | 
			
		||||
    Chimu_12=ref()(1)(2);\
 | 
			
		||||
    Chimu_20=ref()(2)(0);\
 | 
			
		||||
    Chimu_21=ref()(2)(1);\
 | 
			
		||||
    Chimu_22=ref()(2)(2);\
 | 
			
		||||
    Chimu_30=ref()(3)(0);\
 | 
			
		||||
    Chimu_31=ref()(3)(1);\
 | 
			
		||||
    Chimu_32=ref()(3)(2);\
 | 
			
		||||
    std::cout << std::endl << "DEBUG -- LOAD_CHIMU" << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_00 -- " <<  Chimu_00 << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_01 -- " <<  Chimu_01 << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_02 -- " <<  Chimu_02 << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_10 -- " <<  Chimu_10 << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_11 -- " <<  Chimu_11 << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_12 -- " <<  Chimu_12 << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_20 -- " <<  Chimu_20 << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_21 -- " <<  Chimu_21 << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_22 -- " <<  Chimu_22 << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_30 -- " <<  Chimu_30 << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_31 -- " <<  Chimu_31 << std::endl; \
 | 
			
		||||
    std::cout << "Chimu_32 -- " <<  Chimu_32 << std::endl; \
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define LOAD_CHI\
 | 
			
		||||
  {const SiteHalfSpinor &ref(buf[offset]);	\
 | 
			
		||||
    Chi_00 = ref()(0)(0);\
 | 
			
		||||
    Chi_01 = ref()(0)(1);\
 | 
			
		||||
    Chi_02 = ref()(0)(2);\
 | 
			
		||||
    Chi_10 = ref()(1)(0);\
 | 
			
		||||
    Chi_11 = ref()(1)(1);\
 | 
			
		||||
    Chi_12 = ref()(1)(2);\
 | 
			
		||||
    std::cout << std::endl << "DEBUG -- LOAD_CHI" << std::endl; \
 | 
			
		||||
    std::cout << "Chi_00 -- " <<  Chi_00 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_01 -- " <<  Chi_01 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_02 -- " <<  Chi_02 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_10 -- " <<  Chi_10 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_11 -- " <<  Chi_11 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_12 -- " <<  Chi_12 << std::endl; \
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
// To splat or not to splat depends on the implementation
 | 
			
		||||
#define MULT_2SPIN(A)\
 | 
			
		||||
  {auto & ref(U[sU](A));			\
 | 
			
		||||
   Impl::loadLinkElement(U_00,ref()(0,0));	\
 | 
			
		||||
   Impl::loadLinkElement(U_10,ref()(1,0));	\
 | 
			
		||||
   Impl::loadLinkElement(U_20,ref()(2,0));	\
 | 
			
		||||
   Impl::loadLinkElement(U_01,ref()(0,1));	\
 | 
			
		||||
   Impl::loadLinkElement(U_11,ref()(1,1));	\
 | 
			
		||||
   Impl::loadLinkElement(U_21,ref()(2,1));	\
 | 
			
		||||
    UChi_00 = U_00*Chi_00;\
 | 
			
		||||
    UChi_10 = U_00*Chi_10;\
 | 
			
		||||
    UChi_01 = U_10*Chi_00;\
 | 
			
		||||
    UChi_11 = U_10*Chi_10;\
 | 
			
		||||
    UChi_02 = U_20*Chi_00;\
 | 
			
		||||
    UChi_12 = U_20*Chi_10;\
 | 
			
		||||
    UChi_00+= U_01*Chi_01;\
 | 
			
		||||
    UChi_10+= U_01*Chi_11;\
 | 
			
		||||
    UChi_01+= U_11*Chi_01;\
 | 
			
		||||
    UChi_11+= U_11*Chi_11;\
 | 
			
		||||
    UChi_02+= U_21*Chi_01;\
 | 
			
		||||
    UChi_12+= U_21*Chi_11;\
 | 
			
		||||
    Impl::loadLinkElement(U_00,ref()(0,2));	\
 | 
			
		||||
    Impl::loadLinkElement(U_10,ref()(1,2));	\
 | 
			
		||||
    Impl::loadLinkElement(U_20,ref()(2,2));	\
 | 
			
		||||
    UChi_00+= U_00*Chi_02;\
 | 
			
		||||
    UChi_10+= U_00*Chi_12;\
 | 
			
		||||
    UChi_01+= U_10*Chi_02;\
 | 
			
		||||
    UChi_11+= U_10*Chi_12;\
 | 
			
		||||
    UChi_02+= U_20*Chi_02;\
 | 
			
		||||
    UChi_12+= U_20*Chi_12;\
 | 
			
		||||
    std::cout << std::endl << "DEBUG -- MULT_2SPIN" << std::endl; \
 | 
			
		||||
    std::cout << "UChi_00 -- " <<  UChi_00 << std::endl; \
 | 
			
		||||
    std::cout << "UChi_01 -- " <<  UChi_01 << std::endl; \
 | 
			
		||||
    std::cout << "UChi_02 -- " <<  UChi_02 << std::endl; \
 | 
			
		||||
    std::cout << "UChi_10 -- " <<  UChi_10 << std::endl; \
 | 
			
		||||
    std::cout << "UChi_11 -- " <<  UChi_11 << std::endl; \
 | 
			
		||||
    std::cout << "UChi_12 -- " <<  UChi_12 << std::endl; \
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define PERMUTE_DIR(dir)			\
 | 
			
		||||
std::cout << std::endl << "DEBUG -- PERM PRE" << std::endl; \
 | 
			
		||||
std::cout << "Chi_00 -- " <<  Chi_00 << std::endl; \
 | 
			
		||||
std::cout << "Chi_01 -- " <<  Chi_01 << std::endl; \
 | 
			
		||||
std::cout << "Chi_02 -- " <<  Chi_02 << std::endl; \
 | 
			
		||||
std::cout << "Chi_10 -- " <<  Chi_10 << std::endl; \
 | 
			
		||||
std::cout << "Chi_11 -- " <<  Chi_11 << std::endl; \
 | 
			
		||||
std::cout << "Chi_12 -- " <<  Chi_12 << std::endl; \
 | 
			
		||||
      permute##dir(Chi_00,Chi_00);\
 | 
			
		||||
      permute##dir(Chi_01,Chi_01);\
 | 
			
		||||
      permute##dir(Chi_02,Chi_02);\
 | 
			
		||||
      permute##dir(Chi_10,Chi_10);\
 | 
			
		||||
      permute##dir(Chi_11,Chi_11);\
 | 
			
		||||
      permute##dir(Chi_12,Chi_12);\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- PERM POST" << std::endl; \
 | 
			
		||||
  std::cout << "Chi_00 -- " <<  Chi_00 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_01 -- " <<  Chi_01 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_02 -- " <<  Chi_02 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_10 -- " <<  Chi_10 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_11 -- " <<  Chi_11 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_12 -- " <<  Chi_12 << std::endl;
 | 
			
		||||
 | 
			
		||||
//      hspin(0)=fspin(0)+timesI(fspin(3));
 | 
			
		||||
//      hspin(1)=fspin(1)+timesI(fspin(2));
 | 
			
		||||
#define XP_PROJ \
 | 
			
		||||
    Chi_00 = Chimu_00+timesI(Chimu_30);\
 | 
			
		||||
    Chi_01 = Chimu_01+timesI(Chimu_31);\
 | 
			
		||||
    Chi_02 = Chimu_02+timesI(Chimu_32);\
 | 
			
		||||
    Chi_10 = Chimu_10+timesI(Chimu_20);\
 | 
			
		||||
    Chi_11 = Chimu_11+timesI(Chimu_21);\
 | 
			
		||||
    Chi_12 = Chimu_12+timesI(Chimu_22);\
 | 
			
		||||
    std::cout << std::endl << "DEBUG -- XP_PROJ" << std::endl; \
 | 
			
		||||
    std::cout << "Chi_00 -- " <<  Chi_00 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_01 -- " <<  Chi_01 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_02 -- " <<  Chi_02 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_10 -- " <<  Chi_10 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_11 -- " <<  Chi_11 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_12 -- " <<  Chi_12 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define YP_PROJ \
 | 
			
		||||
    Chi_00 = Chimu_00-Chimu_30;\
 | 
			
		||||
    Chi_01 = Chimu_01-Chimu_31;\
 | 
			
		||||
    Chi_02 = Chimu_02-Chimu_32;\
 | 
			
		||||
    Chi_10 = Chimu_10+Chimu_20;\
 | 
			
		||||
    Chi_11 = Chimu_11+Chimu_21;\
 | 
			
		||||
    Chi_12 = Chimu_12+Chimu_22;\
 | 
			
		||||
    std::cout << std::endl << "DEBUG -- YP_PROJ" << std::endl; \
 | 
			
		||||
    std::cout << "Chi_00 -- " <<  Chi_00 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_01 -- " <<  Chi_01 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_02 -- " <<  Chi_02 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_10 -- " <<  Chi_10 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_11 -- " <<  Chi_11 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_12 -- " <<  Chi_12 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define ZP_PROJ \
 | 
			
		||||
  Chi_00 = Chimu_00+timesI(Chimu_20);		\
 | 
			
		||||
  Chi_01 = Chimu_01+timesI(Chimu_21);		\
 | 
			
		||||
  Chi_02 = Chimu_02+timesI(Chimu_22);		\
 | 
			
		||||
  Chi_10 = Chimu_10-timesI(Chimu_30);		\
 | 
			
		||||
  Chi_11 = Chimu_11-timesI(Chimu_31);		\
 | 
			
		||||
  Chi_12 = Chimu_12-timesI(Chimu_32);\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- ZP_PROJ" << std::endl; \
 | 
			
		||||
  std::cout << "Chi_00 -- " <<  Chi_00 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_01 -- " <<  Chi_01 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_02 -- " <<  Chi_02 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_10 -- " <<  Chi_10 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_11 -- " <<  Chi_11 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_12 -- " <<  Chi_12 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define TP_PROJ \
 | 
			
		||||
  Chi_00 = Chimu_00+Chimu_20;		\
 | 
			
		||||
  Chi_01 = Chimu_01+Chimu_21;		\
 | 
			
		||||
  Chi_02 = Chimu_02+Chimu_22;		\
 | 
			
		||||
  Chi_10 = Chimu_10+Chimu_30;		\
 | 
			
		||||
  Chi_11 = Chimu_11+Chimu_31;		\
 | 
			
		||||
  Chi_12 = Chimu_12+Chimu_32;\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- TP_PROJ" << std::endl; \
 | 
			
		||||
  std::cout << "Chi_00 -- " <<  Chi_00 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_01 -- " <<  Chi_01 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_02 -- " <<  Chi_02 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_10 -- " <<  Chi_10 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_11 -- " <<  Chi_11 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_12 -- " <<  Chi_12 << std::endl;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
//      hspin(0)=fspin(0)-timesI(fspin(3));
 | 
			
		||||
//      hspin(1)=fspin(1)-timesI(fspin(2));
 | 
			
		||||
#define XM_PROJ \
 | 
			
		||||
    Chi_00 = Chimu_00-timesI(Chimu_30);\
 | 
			
		||||
    Chi_01 = Chimu_01-timesI(Chimu_31);\
 | 
			
		||||
    Chi_02 = Chimu_02-timesI(Chimu_32);\
 | 
			
		||||
    Chi_10 = Chimu_10-timesI(Chimu_20);\
 | 
			
		||||
    Chi_11 = Chimu_11-timesI(Chimu_21);\
 | 
			
		||||
    Chi_12 = Chimu_12-timesI(Chimu_22);\
 | 
			
		||||
    std::cout << std::endl << "DEBUG -- XM_PROJ" << std::endl; \
 | 
			
		||||
    std::cout << "Chi_00 -- " <<  Chi_00 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_01 -- " <<  Chi_01 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_02 -- " <<  Chi_02 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_10 -- " <<  Chi_10 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_11 -- " <<  Chi_11 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_12 -- " <<  Chi_12 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define YM_PROJ \
 | 
			
		||||
    Chi_00 = Chimu_00+Chimu_30;\
 | 
			
		||||
    Chi_01 = Chimu_01+Chimu_31;\
 | 
			
		||||
    Chi_02 = Chimu_02+Chimu_32;\
 | 
			
		||||
    Chi_10 = Chimu_10-Chimu_20;\
 | 
			
		||||
    Chi_11 = Chimu_11-Chimu_21;\
 | 
			
		||||
    Chi_12 = Chimu_12-Chimu_22;\
 | 
			
		||||
    std::cout << std::endl << "DEBUG -- YM_PROJ" << std::endl; \
 | 
			
		||||
    std::cout << "Chi_00 -- " <<  Chi_00 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_01 -- " <<  Chi_01 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_02 -- " <<  Chi_02 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_10 -- " <<  Chi_10 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_11 -- " <<  Chi_11 << std::endl; \
 | 
			
		||||
    std::cout << "Chi_12 -- " <<  Chi_12 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define ZM_PROJ \
 | 
			
		||||
  Chi_00 = Chimu_00-timesI(Chimu_20);		\
 | 
			
		||||
  Chi_01 = Chimu_01-timesI(Chimu_21);		\
 | 
			
		||||
  Chi_02 = Chimu_02-timesI(Chimu_22);		\
 | 
			
		||||
  Chi_10 = Chimu_10+timesI(Chimu_30);		\
 | 
			
		||||
  Chi_11 = Chimu_11+timesI(Chimu_31);		\
 | 
			
		||||
  Chi_12 = Chimu_12+timesI(Chimu_32);\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- ZM_PROJ" << std::endl; \
 | 
			
		||||
  std::cout << "Chi_00 -- " <<  Chi_00 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_01 -- " <<  Chi_01 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_02 -- " <<  Chi_02 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_10 -- " <<  Chi_10 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_11 -- " <<  Chi_11 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_12 -- " <<  Chi_12 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define TM_PROJ \
 | 
			
		||||
  Chi_00 = Chimu_00-Chimu_20;		\
 | 
			
		||||
  Chi_01 = Chimu_01-Chimu_21;		\
 | 
			
		||||
  Chi_02 = Chimu_02-Chimu_22;		\
 | 
			
		||||
  Chi_10 = Chimu_10-Chimu_30;		\
 | 
			
		||||
  Chi_11 = Chimu_11-Chimu_31;		\
 | 
			
		||||
  Chi_12 = Chimu_12-Chimu_32;\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- TM_PROJ" << std::endl; \
 | 
			
		||||
  std::cout << "Chi_00 -- " <<  Chi_00 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_01 -- " <<  Chi_01 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_02 -- " <<  Chi_02 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_10 -- " <<  Chi_10 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_11 -- " <<  Chi_11 << std::endl; \
 | 
			
		||||
  std::cout << "Chi_12 -- " <<  Chi_12 << std::endl;
 | 
			
		||||
 | 
			
		||||
//      fspin(0)=hspin(0);
 | 
			
		||||
//      fspin(1)=hspin(1);
 | 
			
		||||
//      fspin(2)=timesMinusI(hspin(1));
 | 
			
		||||
//      fspin(3)=timesMinusI(hspin(0));
 | 
			
		||||
#define XP_RECON\
 | 
			
		||||
  result_00 = UChi_00;\
 | 
			
		||||
  result_01 = UChi_01;\
 | 
			
		||||
  result_02 = UChi_02;\
 | 
			
		||||
  result_10 = UChi_10;\
 | 
			
		||||
  result_11 = UChi_11;\
 | 
			
		||||
  result_12 = UChi_12;\
 | 
			
		||||
  result_20 = timesMinusI(UChi_10);\
 | 
			
		||||
  result_21 = timesMinusI(UChi_11);\
 | 
			
		||||
  result_22 = timesMinusI(UChi_12);\
 | 
			
		||||
  result_30 = timesMinusI(UChi_00);\
 | 
			
		||||
  result_31 = timesMinusI(UChi_01);\
 | 
			
		||||
  result_32 = timesMinusI(UChi_02);\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- XP_RECON" << std::endl; \
 | 
			
		||||
  std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
  std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
  std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
  std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
  std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
  std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
  std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
  std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
  std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
  std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
  std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
  std::cout << "result_32 -- " <<  result_32 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define XP_RECON_ACCUM\
 | 
			
		||||
  result_00+=UChi_00;\
 | 
			
		||||
  result_01+=UChi_01;\
 | 
			
		||||
  result_02+=UChi_02;\
 | 
			
		||||
  result_10+=UChi_10;\
 | 
			
		||||
  result_11+=UChi_11;\
 | 
			
		||||
  result_12+=UChi_12;\
 | 
			
		||||
  result_20-=timesI(UChi_10);\
 | 
			
		||||
  result_21-=timesI(UChi_11);\
 | 
			
		||||
  result_22-=timesI(UChi_12);\
 | 
			
		||||
  result_30-=timesI(UChi_00);\
 | 
			
		||||
  result_31-=timesI(UChi_01);\
 | 
			
		||||
  result_32-=timesI(UChi_02);\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- XP_RECON_ACCUM" << std::endl; \
 | 
			
		||||
  std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
  std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
  std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
  std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
  std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
  std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
  std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
  std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
  std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
  std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
  std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
  std::cout << "result_32 -- " <<  result_32 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define XM_RECON\
 | 
			
		||||
  result_00 = UChi_00;\
 | 
			
		||||
  result_01 = UChi_01;\
 | 
			
		||||
  result_02 = UChi_02;\
 | 
			
		||||
  result_10 = UChi_10;\
 | 
			
		||||
  result_11 = UChi_11;\
 | 
			
		||||
  result_12 = UChi_12;\
 | 
			
		||||
  result_20 = timesI(UChi_10);\
 | 
			
		||||
  result_21 = timesI(UChi_11);\
 | 
			
		||||
  result_22 = timesI(UChi_12);\
 | 
			
		||||
  result_30 = timesI(UChi_00);\
 | 
			
		||||
  result_31 = timesI(UChi_01);\
 | 
			
		||||
  result_32 = timesI(UChi_02);\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- XM_RECON" << std::endl; \
 | 
			
		||||
  std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
  std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
  std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
  std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
  std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
  std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
  std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
  std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
  std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
  std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
  std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
  std::cout << "result_32 -- " <<  result_32 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define XM_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20+= timesI(UChi_10);\
 | 
			
		||||
  result_21+= timesI(UChi_11);\
 | 
			
		||||
  result_22+= timesI(UChi_12);\
 | 
			
		||||
  result_30+= timesI(UChi_00);\
 | 
			
		||||
  result_31+= timesI(UChi_01);\
 | 
			
		||||
  result_32+= timesI(UChi_02);\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- XM_RECON_ACCUM" << std::endl; \
 | 
			
		||||
  std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
  std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
  std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
  std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
  std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
  std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
  std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
  std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
  std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
  std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
  std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
  std::cout << "result_32 -- " <<  result_32 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define YP_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20+= UChi_10;\
 | 
			
		||||
  result_21+= UChi_11;\
 | 
			
		||||
  result_22+= UChi_12;\
 | 
			
		||||
  result_30-= UChi_00;\
 | 
			
		||||
  result_31-= UChi_01;\
 | 
			
		||||
  result_32-= UChi_02;\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- YP_RECON_ACCUM" << std::endl; \
 | 
			
		||||
  std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
  std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
  std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
  std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
  std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
  std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
  std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
  std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
  std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
  std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
  std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
  std::cout << "result_32 -- " <<  result_32 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define YM_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20-= UChi_10;\
 | 
			
		||||
  result_21-= UChi_11;\
 | 
			
		||||
  result_22-= UChi_12;\
 | 
			
		||||
  result_30+= UChi_00;\
 | 
			
		||||
  result_31+= UChi_01;\
 | 
			
		||||
  result_32+= UChi_02;\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- YM_RECON_ACCUM" << std::endl; \
 | 
			
		||||
  std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
  std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
  std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
  std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
  std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
  std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
  std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
  std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
  std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
  std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
  std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
  std::cout << "result_32 -- " <<  result_32 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define ZP_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20-= timesI(UChi_00);			\
 | 
			
		||||
  result_21-= timesI(UChi_01);			\
 | 
			
		||||
  result_22-= timesI(UChi_02);			\
 | 
			
		||||
  result_30+= timesI(UChi_10);			\
 | 
			
		||||
  result_31+= timesI(UChi_11);			\
 | 
			
		||||
  result_32+= timesI(UChi_12);\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- ZP_RECON_ACCUM" << std::endl; \
 | 
			
		||||
  std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
  std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
  std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
  std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
  std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
  std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
  std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
  std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
  std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
  std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
  std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
  std::cout << "result_32 -- " <<  result_32 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define ZM_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20+= timesI(UChi_00);			\
 | 
			
		||||
  result_21+= timesI(UChi_01);			\
 | 
			
		||||
  result_22+= timesI(UChi_02);			\
 | 
			
		||||
  result_30-= timesI(UChi_10);			\
 | 
			
		||||
  result_31-= timesI(UChi_11);			\
 | 
			
		||||
  result_32-= timesI(UChi_12);\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- ZM_RECON_ACCUM" << std::endl; \
 | 
			
		||||
  std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
  std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
  std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
  std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
  std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
  std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
  std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
  std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
  std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
  std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
  std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
  std::cout << "result_32 -- " <<  result_32 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define TP_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20+= UChi_00;			\
 | 
			
		||||
  result_21+= UChi_01;			\
 | 
			
		||||
  result_22+= UChi_02;			\
 | 
			
		||||
  result_30+= UChi_10;			\
 | 
			
		||||
  result_31+= UChi_11;			\
 | 
			
		||||
  result_32+= UChi_12;\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- TP_RECON_ACCUM" << std::endl; \
 | 
			
		||||
  std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
  std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
  std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
  std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
  std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
  std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
  std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
  std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
  std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
  std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
  std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
  std::cout << "result_32 -- " <<  result_32 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define TM_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20-= UChi_00;	\
 | 
			
		||||
  result_21-= UChi_01;	\
 | 
			
		||||
  result_22-= UChi_02;	\
 | 
			
		||||
  result_30-= UChi_10;	\
 | 
			
		||||
  result_31-= UChi_11;	\
 | 
			
		||||
  result_32-= UChi_12;\
 | 
			
		||||
  std::cout << std::endl << "DEBUG -- TM_RECON_ACCUM" << std::endl; \
 | 
			
		||||
  std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
  std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
  std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
  std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
  std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
  std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
  std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
  std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
  std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
  std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
  std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
  std::cout << "result_32 -- " <<  result_32 << std::endl;
 | 
			
		||||
 | 
			
		||||
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON)	\
 | 
			
		||||
  SE=st.GetEntry(ptype,DIR,ss);			\
 | 
			
		||||
  offset = SE->_offset;				\
 | 
			
		||||
  local  = SE->_is_local;			\
 | 
			
		||||
  perm   = SE->_permute;			\
 | 
			
		||||
  if ( local ) {				\
 | 
			
		||||
    LOAD_CHIMU;					\
 | 
			
		||||
    PROJ;					\
 | 
			
		||||
    if ( perm) {				\
 | 
			
		||||
      PERMUTE_DIR(PERM);			\
 | 
			
		||||
    }						\
 | 
			
		||||
  } else {					\
 | 
			
		||||
    LOAD_CHI;					\
 | 
			
		||||
  }						\
 | 
			
		||||
  MULT_2SPIN(DIR);				\
 | 
			
		||||
  RECON;
 | 
			
		||||
 | 
			
		||||
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON)	\
 | 
			
		||||
  SE=st.GetEntry(ptype,DIR,ss);			\
 | 
			
		||||
  offset = SE->_offset;				\
 | 
			
		||||
  local  = SE->_is_local;			\
 | 
			
		||||
  perm   = SE->_permute;			\
 | 
			
		||||
  if ( local ) {				\
 | 
			
		||||
    LOAD_CHIMU;					\
 | 
			
		||||
    PROJ;					\
 | 
			
		||||
    if ( perm) {				\
 | 
			
		||||
      PERMUTE_DIR(PERM);			\
 | 
			
		||||
    }						\
 | 
			
		||||
  } else if ( st.same_node[DIR] ) {		\
 | 
			
		||||
    LOAD_CHI;					\
 | 
			
		||||
  }						\
 | 
			
		||||
  if (local || st.same_node[DIR] ) {		\
 | 
			
		||||
    MULT_2SPIN(DIR);				\
 | 
			
		||||
    RECON;					\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON)	\
 | 
			
		||||
  SE=st.GetEntry(ptype,DIR,ss);			\
 | 
			
		||||
  offset = SE->_offset;				\
 | 
			
		||||
  if((!SE->_is_local)&&(!st.same_node[DIR]) ) {	\
 | 
			
		||||
    LOAD_CHI;					\
 | 
			
		||||
    MULT_2SPIN(DIR);				\
 | 
			
		||||
    RECON;					\
 | 
			
		||||
    nmu++;					\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#define HAND_RESULT(ss)				\
 | 
			
		||||
  {						\
 | 
			
		||||
    SiteSpinor & ref (out[ss]);		\
 | 
			
		||||
    vstream(ref()(0)(0),result_00);		\
 | 
			
		||||
    vstream(ref()(0)(1),result_01);		\
 | 
			
		||||
    vstream(ref()(0)(2),result_02);		\
 | 
			
		||||
    vstream(ref()(1)(0),result_10);		\
 | 
			
		||||
    vstream(ref()(1)(1),result_11);		\
 | 
			
		||||
    vstream(ref()(1)(2),result_12);		\
 | 
			
		||||
    vstream(ref()(2)(0),result_20);		\
 | 
			
		||||
    vstream(ref()(2)(1),result_21);		\
 | 
			
		||||
    vstream(ref()(2)(2),result_22);		\
 | 
			
		||||
    vstream(ref()(3)(0),result_30);		\
 | 
			
		||||
    vstream(ref()(3)(1),result_31);		\
 | 
			
		||||
    vstream(ref()(3)(2),result_32);		\
 | 
			
		||||
    std::cout << std::endl << "DEBUG -- RESULT" << std::endl; \
 | 
			
		||||
    std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
    std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
    std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
    std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
    std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
    std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
    std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
    std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
    std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
    std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
    std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
    std::cout << "result_32 -- " <<  result_32 << std::endl;\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#define HAND_RESULT_EXT(ss)			\
 | 
			
		||||
  if (nmu){					\
 | 
			
		||||
    SiteSpinor & ref (out[ss]);		\
 | 
			
		||||
    ref()(0)(0)+=result_00;		\
 | 
			
		||||
    ref()(0)(1)+=result_01;		\
 | 
			
		||||
    ref()(0)(2)+=result_02;		\
 | 
			
		||||
    ref()(1)(0)+=result_10;		\
 | 
			
		||||
    ref()(1)(1)+=result_11;		\
 | 
			
		||||
    ref()(1)(2)+=result_12;		\
 | 
			
		||||
    ref()(2)(0)+=result_20;		\
 | 
			
		||||
    ref()(2)(1)+=result_21;		\
 | 
			
		||||
    ref()(2)(2)+=result_22;		\
 | 
			
		||||
    ref()(3)(0)+=result_30;		\
 | 
			
		||||
    ref()(3)(1)+=result_31;		\
 | 
			
		||||
    ref()(3)(2)+=result_32;		\
 | 
			
		||||
    std::cout << std::endl << "DEBUG -- RESULT EXT" << std::endl; \
 | 
			
		||||
    std::cout << "result_00 -- " <<  result_00 << std::endl; \
 | 
			
		||||
    std::cout << "result_01 -- " <<  result_01 << std::endl; \
 | 
			
		||||
    std::cout << "result_02 -- " <<  result_02 << std::endl; \
 | 
			
		||||
    std::cout << "result_10 -- " <<  result_10 << std::endl; \
 | 
			
		||||
    std::cout << "result_11 -- " <<  result_11 << std::endl; \
 | 
			
		||||
    std::cout << "result_12 -- " <<  result_12 << std::endl; \
 | 
			
		||||
    std::cout << "result_20 -- " <<  result_20 << std::endl; \
 | 
			
		||||
    std::cout << "result_21 -- " <<  result_21 << std::endl; \
 | 
			
		||||
    std::cout << "result_22 -- " <<  result_22 << std::endl; \
 | 
			
		||||
    std::cout << "result_30 -- " <<  result_30 << std::endl; \
 | 
			
		||||
    std::cout << "result_31 -- " <<  result_31 << std::endl; \
 | 
			
		||||
    std::cout << "result_32 -- " <<  result_32 << std::endl;\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define HAND_DECLARATIONS(a)			\
 | 
			
		||||
  Simd result_00;				\
 | 
			
		||||
  Simd result_01;				\
 | 
			
		||||
  Simd result_02;				\
 | 
			
		||||
  Simd result_10;				\
 | 
			
		||||
  Simd result_11;				\
 | 
			
		||||
  Simd result_12;				\
 | 
			
		||||
  Simd result_20;				\
 | 
			
		||||
  Simd result_21;				\
 | 
			
		||||
  Simd result_22;				\
 | 
			
		||||
  Simd result_30;				\
 | 
			
		||||
  Simd result_31;				\
 | 
			
		||||
  Simd result_32;				\
 | 
			
		||||
  Simd Chi_00;					\
 | 
			
		||||
  Simd Chi_01;					\
 | 
			
		||||
  Simd Chi_02;					\
 | 
			
		||||
  Simd Chi_10;					\
 | 
			
		||||
  Simd Chi_11;					\
 | 
			
		||||
  Simd Chi_12;					\
 | 
			
		||||
  Simd UChi_00;					\
 | 
			
		||||
  Simd UChi_01;					\
 | 
			
		||||
  Simd UChi_02;					\
 | 
			
		||||
  Simd UChi_10;					\
 | 
			
		||||
  Simd UChi_11;					\
 | 
			
		||||
  Simd UChi_12;					\
 | 
			
		||||
  Simd U_00;					\
 | 
			
		||||
  Simd U_10;					\
 | 
			
		||||
  Simd U_20;					\
 | 
			
		||||
  Simd U_01;					\
 | 
			
		||||
  Simd U_11;					\
 | 
			
		||||
  Simd U_21;\
 | 
			
		||||
  Simd debugreg;\
 | 
			
		||||
  svbool_t pg1;        \
 | 
			
		||||
  pg1 = svptrue_b64();        \
 | 
			
		||||
 | 
			
		||||
#define ZERO_RESULT				\
 | 
			
		||||
  result_00=Zero();				\
 | 
			
		||||
  result_01=Zero();				\
 | 
			
		||||
  result_02=Zero();				\
 | 
			
		||||
  result_10=Zero();				\
 | 
			
		||||
  result_11=Zero();				\
 | 
			
		||||
  result_12=Zero();				\
 | 
			
		||||
  result_20=Zero();				\
 | 
			
		||||
  result_21=Zero();				\
 | 
			
		||||
  result_22=Zero();				\
 | 
			
		||||
  result_30=Zero();				\
 | 
			
		||||
  result_31=Zero();				\
 | 
			
		||||
  result_32=Zero();
 | 
			
		||||
 | 
			
		||||
#define Chimu_00 Chi_00
 | 
			
		||||
#define Chimu_01 Chi_01
 | 
			
		||||
#define Chimu_02 Chi_02
 | 
			
		||||
#define Chimu_10 Chi_10
 | 
			
		||||
#define Chimu_11 Chi_11
 | 
			
		||||
#define Chimu_12 Chi_12
 | 
			
		||||
#define Chimu_20 UChi_00
 | 
			
		||||
#define Chimu_21 UChi_01
 | 
			
		||||
#define Chimu_22 UChi_02
 | 
			
		||||
#define Chimu_30 UChi_10
 | 
			
		||||
#define Chimu_31 UChi_11
 | 
			
		||||
#define Chimu_32 UChi_12
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
template<class Impl> void
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
				  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
 | 
			
		||||
  HAND_DECLARATIONS(ignore);
 | 
			
		||||
 | 
			
		||||
  int offset,local,perm, ptype;
 | 
			
		||||
  StencilEntry *SE;
 | 
			
		||||
 | 
			
		||||
  HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
 | 
			
		||||
  HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(TM_PROJ,0,Tp,TM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(XP_PROJ,3,Xm,XP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(YP_PROJ,2,Ym,YP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(TP_PROJ,0,Tm,TP_RECON_ACCUM);
 | 
			
		||||
  HAND_RESULT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
 | 
			
		||||
  HAND_DECLARATIONS(ignore);
 | 
			
		||||
 | 
			
		||||
  StencilEntry *SE;
 | 
			
		||||
  int offset,local,perm, ptype;
 | 
			
		||||
 | 
			
		||||
  HAND_STENCIL_LEG(XP_PROJ,3,Xp,XP_RECON);
 | 
			
		||||
  HAND_STENCIL_LEG(YP_PROJ,2,Yp,YP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(TP_PROJ,0,Tp,TP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(XM_PROJ,3,Xm,XM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(YM_PROJ,2,Ym,YM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(TM_PROJ,0,Tm,TM_RECON_ACCUM);
 | 
			
		||||
  HAND_RESULT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl> void
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
 | 
			
		||||
  HAND_DECLARATIONS(ignore);
 | 
			
		||||
 | 
			
		||||
  int offset,local,perm, ptype;
 | 
			
		||||
  StencilEntry *SE;
 | 
			
		||||
  ZERO_RESULT;
 | 
			
		||||
  HAND_STENCIL_LEG_INT(XM_PROJ,3,Xp,XM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(YM_PROJ,2,Yp,YM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(TM_PROJ,0,Tp,TM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(XP_PROJ,3,Xm,XP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(YP_PROJ,2,Ym,YP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(TP_PROJ,0,Tm,TP_RECON_ACCUM);
 | 
			
		||||
  HAND_RESULT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
						  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
 | 
			
		||||
  HAND_DECLARATIONS(ignore);
 | 
			
		||||
 | 
			
		||||
  StencilEntry *SE;
 | 
			
		||||
  int offset,local,perm, ptype;
 | 
			
		||||
  ZERO_RESULT;
 | 
			
		||||
  HAND_STENCIL_LEG_INT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(YP_PROJ,2,Yp,YP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(TP_PROJ,0,Tp,TP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(XM_PROJ,3,Xm,XM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(YM_PROJ,2,Ym,YM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_INT(TM_PROJ,0,Tm,TM_RECON_ACCUM);
 | 
			
		||||
  HAND_RESULT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl> void
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
 | 
			
		||||
  HAND_DECLARATIONS(ignore);
 | 
			
		||||
 | 
			
		||||
  int offset, ptype;
 | 
			
		||||
  StencilEntry *SE;
 | 
			
		||||
  int nmu=0;
 | 
			
		||||
  ZERO_RESULT;
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xp,XM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(YM_PROJ,2,Yp,YM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tp,TM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xm,XP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(YP_PROJ,2,Ym,YP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tm,TP_RECON_ACCUM);
 | 
			
		||||
  HAND_RESULT_EXT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
						  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
 | 
			
		||||
  HAND_DECLARATIONS(ignore);
 | 
			
		||||
 | 
			
		||||
  StencilEntry *SE;
 | 
			
		||||
  int offset, ptype;
 | 
			
		||||
  int nmu=0;
 | 
			
		||||
  ZERO_RESULT;
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(YP_PROJ,2,Yp,YP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tp,TP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xm,XM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(YM_PROJ,2,Ym,YM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tm,TM_RECON_ACCUM);
 | 
			
		||||
  HAND_RESULT_EXT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////// Wilson ; uses this implementation /////////////////////
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
#undef LOAD_CHIMU
 | 
			
		||||
#undef LOAD_CHI
 | 
			
		||||
#undef MULT_2SPIN
 | 
			
		||||
#undef PERMUTE_DIR
 | 
			
		||||
#undef XP_PROJ
 | 
			
		||||
#undef YP_PROJ
 | 
			
		||||
#undef ZP_PROJ
 | 
			
		||||
#undef TP_PROJ
 | 
			
		||||
#undef XM_PROJ
 | 
			
		||||
#undef YM_PROJ
 | 
			
		||||
#undef ZM_PROJ
 | 
			
		||||
#undef TM_PROJ
 | 
			
		||||
#undef XP_RECON
 | 
			
		||||
#undef XP_RECON_ACCUM
 | 
			
		||||
#undef XM_RECON
 | 
			
		||||
#undef XM_RECON_ACCUM
 | 
			
		||||
#undef YP_RECON_ACCUM
 | 
			
		||||
#undef YM_RECON_ACCUM
 | 
			
		||||
#undef ZP_RECON_ACCUM
 | 
			
		||||
#undef ZM_RECON_ACCUM
 | 
			
		||||
#undef TP_RECON_ACCUM
 | 
			
		||||
#undef TM_RECON_ACCUM
 | 
			
		||||
#undef ZERO_RESULT
 | 
			
		||||
#undef Chimu_00
 | 
			
		||||
#undef Chimu_01
 | 
			
		||||
#undef Chimu_02
 | 
			
		||||
#undef Chimu_10
 | 
			
		||||
#undef Chimu_11
 | 
			
		||||
#undef Chimu_12
 | 
			
		||||
#undef Chimu_20
 | 
			
		||||
#undef Chimu_21
 | 
			
		||||
#undef Chimu_22
 | 
			
		||||
#undef Chimu_30
 | 
			
		||||
#undef Chimu_31
 | 
			
		||||
#undef Chimu_32
 | 
			
		||||
#undef HAND_STENCIL_LEG
 | 
			
		||||
#undef HAND_STENCIL_LEG_INT
 | 
			
		||||
#undef HAND_STENCIL_LEG_EXT
 | 
			
		||||
#undef HAND_RESULT
 | 
			
		||||
#undef HAND_RESULT_INT
 | 
			
		||||
#undef HAND_RESULT_EXT
 | 
			
		||||
@@ -43,11 +43,11 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
 | 
			
		||||
{
 | 
			
		||||
#ifdef GRID_SIMT
 | 
			
		||||
  static_assert(sizeof(StencilEntry)==sizeof(uint4),"Unexpected Stencil Entry Size"); 
 | 
			
		||||
  static_assert(sizeof(StencilEntry)==sizeof(uint4),"Unexpected Stencil Entry Size");
 | 
			
		||||
  uint4 * mem_pun  = (uint4 *)mem; // force 128 bit loads
 | 
			
		||||
  uint4 * chip_pun = (uint4 *)&chip;
 | 
			
		||||
  * chip_pun = * mem_pun;
 | 
			
		||||
#else 
 | 
			
		||||
#else
 | 
			
		||||
  chip = *mem;
 | 
			
		||||
#endif
 | 
			
		||||
  return;
 | 
			
		||||
@@ -66,7 +66,7 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
 | 
			
		||||
  acceleratorSynchronise();						\
 | 
			
		||||
  Impl::multLink(Uchi, U[sU], chi, Dir, SE, st);		\
 | 
			
		||||
  Recon(result, Uchi);
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
#define GENERIC_STENCIL_LEG_INT(Dir,spProj,Recon)		\
 | 
			
		||||
  SE = st.GetEntry(ptype, Dir, sF);				\
 | 
			
		||||
  if (SE->_is_local) {						\
 | 
			
		||||
@@ -81,7 +81,7 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
 | 
			
		||||
    Impl::multLink(Uchi, U[sU], chi, Dir, SE, st);		\
 | 
			
		||||
    Recon(result, Uchi);					\
 | 
			
		||||
  }								\
 | 
			
		||||
  acceleratorSynchronise();						
 | 
			
		||||
  acceleratorSynchronise();
 | 
			
		||||
 | 
			
		||||
#define GENERIC_STENCIL_LEG_EXT(Dir,spProj,Recon)		\
 | 
			
		||||
  SE = st.GetEntry(ptype, Dir, sF);				\
 | 
			
		||||
@@ -91,7 +91,7 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
 | 
			
		||||
    Recon(result, Uchi);					\
 | 
			
		||||
    nmu++;							\
 | 
			
		||||
  }								\
 | 
			
		||||
  acceleratorSynchronise();						
 | 
			
		||||
  acceleratorSynchronise();
 | 
			
		||||
 | 
			
		||||
#define GENERIC_DHOPDIR_LEG_BODY(Dir,spProj,Recon)		\
 | 
			
		||||
    if (SE->_is_local ) {					\
 | 
			
		||||
@@ -103,7 +103,7 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
 | 
			
		||||
    }								\
 | 
			
		||||
    acceleratorSynchronise();					\
 | 
			
		||||
    Impl::multLink(Uchi, U[sU], chi, dir, SE, st);		\
 | 
			
		||||
    Recon(result, Uchi);					
 | 
			
		||||
    Recon(result, Uchi);
 | 
			
		||||
 | 
			
		||||
#define GENERIC_DHOPDIR_LEG(Dir,spProj,Recon)			\
 | 
			
		||||
  if (gamma == Dir) {						\
 | 
			
		||||
@@ -114,7 +114,7 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // All legs kernels ; comms then compute
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U,
 | 
			
		||||
					     SiteHalfSpinor *buf, int sF,
 | 
			
		||||
					     int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
@@ -140,10 +140,10 @@ void WilsonKernels<Impl>::GenericDhopSiteDag(StencilView &st, DoubledGaugeFieldV
 | 
			
		||||
  coalescedWrite(out[sF],result,lane);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSite(StencilView &st, DoubledGaugeFieldView &U,
 | 
			
		||||
					  SiteHalfSpinor *buf, int sF,
 | 
			
		||||
					  int sU, const FermionFieldView &in, FermionFieldView &out) 
 | 
			
		||||
					  int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
 | 
			
		||||
  typedef decltype(coalescedRead(in[0]))  calcSpinor;
 | 
			
		||||
@@ -169,7 +169,7 @@ void WilsonKernels<Impl>::GenericDhopSite(StencilView &st, DoubledGaugeFieldView
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Interior kernels
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteDagInt(StencilView &st,  DoubledGaugeFieldView &U,
 | 
			
		||||
						SiteHalfSpinor *buf, int sF,
 | 
			
		||||
						int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
@@ -197,10 +197,10 @@ void WilsonKernels<Impl>::GenericDhopSiteDagInt(StencilView &st,  DoubledGaugeFi
 | 
			
		||||
  coalescedWrite(out[sF], result,lane);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteInt(StencilView &st,  DoubledGaugeFieldView &U,
 | 
			
		||||
							 SiteHalfSpinor *buf, int sF,
 | 
			
		||||
							 int sU, const FermionFieldView &in, FermionFieldView &out) 
 | 
			
		||||
							 int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
 | 
			
		||||
  typedef decltype(coalescedRead(in[0]))  calcSpinor;
 | 
			
		||||
@@ -227,7 +227,7 @@ void WilsonKernels<Impl>::GenericDhopSiteInt(StencilView &st,  DoubledGaugeField
 | 
			
		||||
////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Exterior kernels
 | 
			
		||||
////////////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteDagExt(StencilView &st,  DoubledGaugeFieldView &U,
 | 
			
		||||
						SiteHalfSpinor *buf, int sF,
 | 
			
		||||
						int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
@@ -251,17 +251,17 @@ void WilsonKernels<Impl>::GenericDhopSiteDagExt(StencilView &st,  DoubledGaugeFi
 | 
			
		||||
  GENERIC_STENCIL_LEG_EXT(Ym,spProjYm,accumReconYm);
 | 
			
		||||
  GENERIC_STENCIL_LEG_EXT(Zm,spProjZm,accumReconZm);
 | 
			
		||||
  GENERIC_STENCIL_LEG_EXT(Tm,spProjTm,accumReconTm);
 | 
			
		||||
  if ( nmu ) { 
 | 
			
		||||
  if ( nmu ) {
 | 
			
		||||
    auto out_t = coalescedRead(out[sF],lane);
 | 
			
		||||
    out_t = out_t + result;
 | 
			
		||||
    coalescedWrite(out[sF],out_t,lane);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st,  DoubledGaugeFieldView &U,
 | 
			
		||||
					     SiteHalfSpinor *buf, int sF,
 | 
			
		||||
					     int sU, const FermionFieldView &in, FermionFieldView &out) 
 | 
			
		||||
					     int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
 | 
			
		||||
  typedef decltype(coalescedRead(in[0]))  calcSpinor;
 | 
			
		||||
@@ -282,7 +282,7 @@ void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st,  DoubledGaugeField
 | 
			
		||||
  GENERIC_STENCIL_LEG_EXT(Yp,spProjYm,accumReconYm);
 | 
			
		||||
  GENERIC_STENCIL_LEG_EXT(Zp,spProjZm,accumReconZm);
 | 
			
		||||
  GENERIC_STENCIL_LEG_EXT(Tp,spProjTm,accumReconTm);
 | 
			
		||||
  if ( nmu ) { 
 | 
			
		||||
  if ( nmu ) {
 | 
			
		||||
    auto out_t = coalescedRead(out[sF],lane);
 | 
			
		||||
    out_t = out_t + result;
 | 
			
		||||
    coalescedWrite(out[sF],out_t,lane);
 | 
			
		||||
@@ -290,7 +290,7 @@ void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st,  DoubledGaugeField
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#define DhopDirMacro(Dir,spProj,spRecon)	\
 | 
			
		||||
  template <class Impl>							\
 | 
			
		||||
  template <class Impl> accelerator_inline				\
 | 
			
		||||
  void WilsonKernels<Impl>::DhopDir##Dir(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF, \
 | 
			
		||||
					 int sU, const FermionFieldView &in, FermionFieldView &out, int dir) \
 | 
			
		||||
  {									\
 | 
			
		||||
@@ -307,7 +307,7 @@ void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st,  DoubledGaugeField
 | 
			
		||||
  SE = st.GetEntry(ptype, dir, sF);					\
 | 
			
		||||
  GENERIC_DHOPDIR_LEG_BODY(Dir,spProj,spRecon);				\
 | 
			
		||||
  coalescedWrite(out[sF], result,lane);					\
 | 
			
		||||
  }									
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
DhopDirMacro(Xp,spProjXp,spReconXp);
 | 
			
		||||
DhopDirMacro(Yp,spProjYp,spReconYp);
 | 
			
		||||
@@ -318,9 +318,9 @@ DhopDirMacro(Ym,spProjYm,spReconYm);
 | 
			
		||||
DhopDirMacro(Zm,spProjZm,spReconZm);
 | 
			
		||||
DhopDirMacro(Tm,spProjTm,spReconTm);
 | 
			
		||||
 | 
			
		||||
template <class Impl> 
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::DhopDirK( StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF,
 | 
			
		||||
				    int sU, const FermionFieldView &in, FermionFieldView &out, int dir, int gamma) 
 | 
			
		||||
				    int sU, const FermionFieldView &in, FermionFieldView &out, int dir, int gamma)
 | 
			
		||||
{
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
 | 
			
		||||
  typedef decltype(coalescedRead(in[0]))  calcSpinor;
 | 
			
		||||
@@ -346,7 +346,7 @@ void WilsonKernels<Impl>::DhopDirK( StencilView &st, DoubledGaugeFieldView &U,Si
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonKernels<Impl>::DhopDirAll( StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int Ls,
 | 
			
		||||
				      int Nsite, const FermionField &in, std::vector<FermionField> &out) 
 | 
			
		||||
				      int Nsite, const FermionField &in, std::vector<FermionField> &out)
 | 
			
		||||
{
 | 
			
		||||
   autoView(U_v  ,U,AcceleratorRead);
 | 
			
		||||
   autoView(in_v ,in,AcceleratorRead);
 | 
			
		||||
@@ -362,8 +362,8 @@ void WilsonKernels<Impl>::DhopDirAll( StencilImpl &st, DoubledGaugeField &U,Site
 | 
			
		||||
   autoView(out_Tp,out[7],AcceleratorWrite);
 | 
			
		||||
   auto CBp=st.CommBuf();
 | 
			
		||||
   accelerator_for(sss,Nsite*Ls,Simd::Nsimd(),{
 | 
			
		||||
      int sU=sss/Ls;				
 | 
			
		||||
      int sF =sss;				
 | 
			
		||||
      int sU=sss/Ls;
 | 
			
		||||
      int sF =sss;
 | 
			
		||||
      DhopDirXm(st_v,U_v,CBp,sF,sU,in_v,out_Xm,0);
 | 
			
		||||
      DhopDirYm(st_v,U_v,CBp,sF,sU,in_v,out_Ym,1);
 | 
			
		||||
      DhopDirZm(st_v,U_v,CBp,sF,sU,in_v,out_Zm,2);
 | 
			
		||||
@@ -378,7 +378,7 @@ void WilsonKernels<Impl>::DhopDirAll( StencilImpl &st, DoubledGaugeField &U,Site
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int Ls,
 | 
			
		||||
					 int Nsite, const FermionField &in, FermionField &out, int dirdisp, int gamma) 
 | 
			
		||||
					 int Nsite, const FermionField &in, FermionField &out, int dirdisp, int gamma)
 | 
			
		||||
{
 | 
			
		||||
  assert(dirdisp<=7);
 | 
			
		||||
  assert(dirdisp>=0);
 | 
			
		||||
@@ -387,7 +387,7 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
 | 
			
		||||
   autoView(in_v ,in ,AcceleratorRead);
 | 
			
		||||
   autoView(out_v,out,AcceleratorWrite);
 | 
			
		||||
   autoView(st_v ,st ,AcceleratorRead);
 | 
			
		||||
   auto CBp=st.CommBuf();			
 | 
			
		||||
   auto CBp=st.CommBuf();
 | 
			
		||||
#define LoopBody(Dir)				\
 | 
			
		||||
   case Dir :					\
 | 
			
		||||
     accelerator_for(ss,Nsite,Simd::Nsimd(),{	\
 | 
			
		||||
@@ -414,7 +414,7 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
 | 
			
		||||
     break;
 | 
			
		||||
   }
 | 
			
		||||
#undef LoopBody
 | 
			
		||||
} 
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define KERNEL_CALLNB(A) \
 | 
			
		||||
  const uint64_t    NN = Nsite*Ls;					\
 | 
			
		||||
@@ -424,7 +424,7 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
 | 
			
		||||
      WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,in_v,out_v);		\
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
#define KERNEL_CALL(A) KERNEL_CALLNB(A); accelerator_barrier(); 
 | 
			
		||||
#define KERNEL_CALL(A) KERNEL_CALLNB(A); accelerator_barrier();
 | 
			
		||||
 | 
			
		||||
#define ASM_CALL(A)							\
 | 
			
		||||
  thread_for( ss, Nsite, {						\
 | 
			
		||||
@@ -436,14 +436,14 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st,  DoubledGaugeField &U, SiteHalfSpinor * buf,
 | 
			
		||||
				     int Ls, int Nsite, const FermionField &in, FermionField &out,
 | 
			
		||||
				     int interior,int exterior) 
 | 
			
		||||
				     int interior,int exterior)
 | 
			
		||||
{
 | 
			
		||||
    autoView(U_v  ,  U,AcceleratorRead);
 | 
			
		||||
    autoView(in_v , in,AcceleratorRead);
 | 
			
		||||
    autoView(out_v,out,AcceleratorWrite);
 | 
			
		||||
    autoView(st_v , st,AcceleratorRead);
 | 
			
		||||
 | 
			
		||||
   if( interior && exterior ) { 
 | 
			
		||||
   if( interior && exterior ) {
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptGeneric    ) { KERNEL_CALL(GenericDhopSite); return;}
 | 
			
		||||
#ifndef GRID_CUDA
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite);    return;}
 | 
			
		||||
@@ -455,7 +455,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st,  DoubledGaugeField
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALLNB(HandDhopSiteInt);    return;}
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptInlineAsm  ) {  ASM_CALL(AsmDhopSiteInt);    return;}
 | 
			
		||||
#endif
 | 
			
		||||
   } else if( exterior ) { 
 | 
			
		||||
   } else if( exterior ) {
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptGeneric    ) { KERNEL_CALL(GenericDhopSiteExt); return;}
 | 
			
		||||
#ifndef GRID_CUDA
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteExt);    return;}
 | 
			
		||||
@@ -467,14 +467,14 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st,  DoubledGaugeField
 | 
			
		||||
  template <class Impl>
 | 
			
		||||
  void WilsonKernels<Impl>::DhopDagKernel(int Opt,StencilImpl &st,  DoubledGaugeField &U, SiteHalfSpinor * buf,
 | 
			
		||||
					  int Ls, int Nsite, const FermionField &in, FermionField &out,
 | 
			
		||||
					  int interior,int exterior) 
 | 
			
		||||
					  int interior,int exterior)
 | 
			
		||||
  {
 | 
			
		||||
    autoView(U_v  ,U,AcceleratorRead);
 | 
			
		||||
    autoView(in_v ,in,AcceleratorRead);
 | 
			
		||||
    autoView(out_v,out,AcceleratorWrite);
 | 
			
		||||
    autoView(st_v ,st,AcceleratorRead);
 | 
			
		||||
 | 
			
		||||
   if( interior && exterior ) { 
 | 
			
		||||
   if( interior && exterior ) {
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptGeneric    ) { KERNEL_CALL(GenericDhopSiteDag); return;}
 | 
			
		||||
#ifndef GRID_CUDA
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDag);    return;}
 | 
			
		||||
@@ -486,7 +486,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st,  DoubledGaugeField
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDagInt);    return;}
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptInlineAsm  ) {  ASM_CALL(AsmDhopSiteDagInt);     return;}
 | 
			
		||||
#endif
 | 
			
		||||
   } else if( exterior ) { 
 | 
			
		||||
   } else if( exterior ) {
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptGeneric    ) { KERNEL_CALL(GenericDhopSiteDagExt); return;}
 | 
			
		||||
#ifndef GRID_CUDA
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDagExt);    return;}
 | 
			
		||||
@@ -501,4 +501,3 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st,  DoubledGaugeField
 | 
			
		||||
#undef ASM_CALL
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -4,11 +4,12 @@ Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
@@ -34,14 +35,17 @@ directory
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>; 
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -37,6 +37,7 @@ directory
 | 
			
		||||
////////////////////////////////////////////////////////////////////////
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmAvx512.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmA64FX.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmQPX.h>
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -0,0 +1,51 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -59,7 +59,7 @@ public:
 | 
			
		||||
  }
 | 
			
		||||
  static inline GaugeLinkField
 | 
			
		||||
  CovShiftIdentityBackward(const GaugeLinkField &Link, int mu) {
 | 
			
		||||
    return Cshift(closure(adj(Link)), mu, -1);
 | 
			
		||||
    return Cshift(adj(Link), mu, -1);
 | 
			
		||||
  }
 | 
			
		||||
  static inline GaugeLinkField
 | 
			
		||||
  CovShiftIdentityForward(const GaugeLinkField &Link, int mu) {
 | 
			
		||||
 
 | 
			
		||||
@@ -301,9 +301,9 @@ public:
 | 
			
		||||
      t_P[level] = 0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (int step = 0; step < Params.MDsteps; ++step) {  // MD step
 | 
			
		||||
      int first_step = (step == 0);
 | 
			
		||||
      int last_step = (step == Params.MDsteps - 1);
 | 
			
		||||
    for (int stp = 0; stp < Params.MDsteps; ++stp) {  // MD step
 | 
			
		||||
      int first_step = (stp == 0);
 | 
			
		||||
      int last_step = (stp == Params.MDsteps - 1);
 | 
			
		||||
      this->step(U, 0, first_step, last_step);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -53,23 +53,21 @@ namespace PeriodicBC {
 | 
			
		||||
    return Cshift(tmp,mu,-1);// moves towards positive mu
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class gauge,typename Op, typename T1> auto
 | 
			
		||||
    CovShiftForward(const Lattice<gauge> &Link, 
 | 
			
		||||
		    int mu,
 | 
			
		||||
		    const LatticeUnaryExpression<Op,T1> &expr)
 | 
			
		||||
    -> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> 
 | 
			
		||||
  template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
 | 
			
		||||
    auto  CovShiftForward(const Lattice<gauge> &Link, 
 | 
			
		||||
			  int mu,
 | 
			
		||||
			  const Expr &expr) -> decltype(closure(expr))
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> arg(expr);
 | 
			
		||||
    auto arg = closure(expr);
 | 
			
		||||
    return CovShiftForward(Link,mu,arg);
 | 
			
		||||
  }
 | 
			
		||||
  template<class gauge,typename Op, typename T1> auto
 | 
			
		||||
    CovShiftBackward(const Lattice<gauge> &Link, 
 | 
			
		||||
		     int mu,
 | 
			
		||||
		     const LatticeUnaryExpression<Op,T1> &expr)
 | 
			
		||||
    -> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> 
 | 
			
		||||
  template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
 | 
			
		||||
    auto  CovShiftBackward(const Lattice<gauge> &Link, 
 | 
			
		||||
			   int mu,
 | 
			
		||||
			   const Expr &expr) -> decltype(closure(expr))
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> arg(expr);
 | 
			
		||||
    return CovShiftForward(Link,mu,arg);
 | 
			
		||||
    auto arg = closure(expr);
 | 
			
		||||
    return CovShiftBackward(Link,mu,arg);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
@@ -142,26 +140,23 @@ namespace ConjugateBC {
 | 
			
		||||
    return Cshift(tmp,mu,-1);// moves towards positive mu
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class gauge,typename Op, typename T1> auto
 | 
			
		||||
    CovShiftForward(const Lattice<gauge> &Link, 
 | 
			
		||||
		    int mu,
 | 
			
		||||
		    const LatticeUnaryExpression<Op,T1> &expr)
 | 
			
		||||
    -> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> 
 | 
			
		||||
  template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
 | 
			
		||||
    auto  CovShiftForward(const Lattice<gauge> &Link, 
 | 
			
		||||
			  int mu,
 | 
			
		||||
			  const Expr &expr) -> decltype(closure(expr))
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> arg(expr);
 | 
			
		||||
    auto arg = closure(expr);
 | 
			
		||||
    return CovShiftForward(Link,mu,arg);
 | 
			
		||||
  }
 | 
			
		||||
  template<class gauge,typename Op, typename T1> auto
 | 
			
		||||
    CovShiftBackward(const Lattice<gauge> &Link, 
 | 
			
		||||
		     int mu,
 | 
			
		||||
		     const LatticeUnaryExpression<Op,T1> &expr)
 | 
			
		||||
    -> Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> 
 | 
			
		||||
  template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
 | 
			
		||||
    auto  CovShiftBackward(const Lattice<gauge> &Link, 
 | 
			
		||||
			   int mu,
 | 
			
		||||
			   const Expr &expr)  -> decltype(closure(expr))
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<decltype(expr.op.func(eval(0, expr.arg1)))> arg(expr);
 | 
			
		||||
    return CovShiftForward(Link,mu,arg);
 | 
			
		||||
    auto arg = closure(expr);
 | 
			
		||||
    return CovShiftBackward(Link,mu,arg);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -39,7 +39,7 @@ public:
 | 
			
		||||
  typedef iSUnAdjointMatrix<ComplexF> AMatrixF;
 | 
			
		||||
  typedef iSUnAdjointMatrix<ComplexD> AMatrixD;
 | 
			
		||||
 | 
			
		||||
  typedef iSUnAdjointMatrix<vComplex> vAMatrix;
 | 
			
		||||
  typedef iSUnAdjointMatrix<vComplex>  vAMatrix;
 | 
			
		||||
  typedef iSUnAdjointMatrix<vComplexF> vAMatrixF;
 | 
			
		||||
  typedef iSUnAdjointMatrix<vComplexD> vAMatrixD;
 | 
			
		||||
 | 
			
		||||
@@ -47,14 +47,9 @@ public:
 | 
			
		||||
  typedef Lattice<vAMatrixF> LatticeAdjMatrixF;
 | 
			
		||||
  typedef Lattice<vAMatrixD> LatticeAdjMatrixD;
 | 
			
		||||
 | 
			
		||||
  typedef Lattice<iVector<iScalar<iMatrix<vComplex, Dimension> >, Nd> >
 | 
			
		||||
  LatticeAdjField;
 | 
			
		||||
  typedef Lattice<iVector<iScalar<iMatrix<vComplexF, Dimension> >, Nd> >
 | 
			
		||||
  LatticeAdjFieldF;
 | 
			
		||||
  typedef Lattice<iVector<iScalar<iMatrix<vComplexD, Dimension> >, Nd> >
 | 
			
		||||
  LatticeAdjFieldD;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  typedef Lattice<iVector<iScalar<iMatrix<vComplex, Dimension> >, Nd> >  LatticeAdjField;
 | 
			
		||||
  typedef Lattice<iVector<iScalar<iMatrix<vComplexF, Dimension> >, Nd> > LatticeAdjFieldF;
 | 
			
		||||
  typedef Lattice<iVector<iScalar<iMatrix<vComplexD, Dimension> >, Nd> > LatticeAdjFieldD;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  template <class cplx>
 | 
			
		||||
@@ -128,7 +123,9 @@ public:
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Projects the algebra components a lattice matrix (of dimension ncol*ncol -1 )
 | 
			
		||||
  static void projectOnAlgebra(typename SU<ncolour>::LatticeAlgebraVector &h_out, const LatticeAdjMatrix &in, Real scale = 1.0) {
 | 
			
		||||
  static void projectOnAlgebra(typename SU<ncolour>::LatticeAlgebraVector &h_out, const LatticeAdjMatrix &in, Real scale = 1.0) 
 | 
			
		||||
  {
 | 
			
		||||
    
 | 
			
		||||
    conformable(h_out, in);
 | 
			
		||||
    h_out = Zero();
 | 
			
		||||
    AMatrix iTa;
 | 
			
		||||
@@ -136,7 +133,7 @@ public:
 | 
			
		||||
 | 
			
		||||
    for (int a = 0; a < Dimension; a++) {
 | 
			
		||||
      generator(a, iTa);
 | 
			
		||||
      auto tmp = real(trace(iTa * in)) * coefficient;
 | 
			
		||||
      LatticeComplex tmp = real(trace(iTa * in)) * coefficient;
 | 
			
		||||
      pokeColour(h_out, tmp, a);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 
 | 
			
		||||
@@ -485,7 +485,7 @@ public:
 | 
			
		||||
 | 
			
		||||
        // Up staple    ___ ___
 | 
			
		||||
        //             |       |
 | 
			
		||||
        tmp = Cshift(closure(adj(U[nu])), nu, -1);
 | 
			
		||||
        tmp = Cshift(adj(U[nu]), nu, -1);
 | 
			
		||||
        tmp = adj(U2[mu]) * tmp;
 | 
			
		||||
        tmp = Cshift(tmp, mu, -2);
 | 
			
		||||
 | 
			
		||||
@@ -519,7 +519,7 @@ public:
 | 
			
		||||
        //
 | 
			
		||||
        //      |  |
 | 
			
		||||
 | 
			
		||||
        tmp = Cshift(closure(adj(U2[nu])), nu, -2);
 | 
			
		||||
        tmp = Cshift(adj(U2[nu]), nu, -2);
 | 
			
		||||
        tmp = Gimpl::CovShiftBackward(U[mu], mu, tmp);
 | 
			
		||||
        tmp = U2[nu] * Cshift(tmp, nu, 2);
 | 
			
		||||
        Stap += Cshift(tmp, mu, 1);
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										779
									
								
								Grid/simd/Fujitsu_A64FX_asm_double.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										779
									
								
								Grid/simd/Fujitsu_A64FX_asm_double.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,779 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: Fujitsu_A64FX_asm_double.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#define LOAD_CHIMU(base)               LOAD_CHIMU_INTERLEAVED_A64FXd(base)  
 | 
			
		||||
#define PREFETCH_CHIMU_L1(A)           PREFETCH_CHIMU_L1_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L1(A)           PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_CHIMU_L2(A)           PREFETCH_CHIMU_L2_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L2(A)           PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PF_GAUGE(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE(A)    PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE(A)    PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH1_CHIMU(A)             PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define PREFETCH_CHIMU(A)              PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define LOCK_GAUGE(A)  
 | 
			
		||||
#define UNLOCK_GAUGE(A)  
 | 
			
		||||
#define MASK_REGS                      DECLARATIONS_A64FXd  
 | 
			
		||||
#define SAVE_RESULT(A,B)               RESULT_A64FXd(A); PREFETCH_RESULT_L2_STORE(B)  
 | 
			
		||||
#define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXd(Dir)  
 | 
			
		||||
#define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXd  
 | 
			
		||||
#define LOAD_CHI(base)                 LOAD_CHI_A64FXd(base)  
 | 
			
		||||
#define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)  
 | 
			
		||||
#define XP_PROJ                        XP_PROJ_A64FXd  
 | 
			
		||||
#define YP_PROJ                        YP_PROJ_A64FXd  
 | 
			
		||||
#define ZP_PROJ                        ZP_PROJ_A64FXd  
 | 
			
		||||
#define TP_PROJ                        TP_PROJ_A64FXd  
 | 
			
		||||
#define XM_PROJ                        XM_PROJ_A64FXd  
 | 
			
		||||
#define YM_PROJ                        YM_PROJ_A64FXd  
 | 
			
		||||
#define ZM_PROJ                        ZM_PROJ_A64FXd  
 | 
			
		||||
#define TM_PROJ                        TM_PROJ_A64FXd  
 | 
			
		||||
#define XP_RECON                       XP_RECON_A64FXd  
 | 
			
		||||
#define XM_RECON                       XM_RECON_A64FXd  
 | 
			
		||||
#define XM_RECON_ACCUM                 XM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define YM_RECON_ACCUM                 YM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define ZM_RECON_ACCUM                 ZM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define TM_RECON_ACCUM                 TM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define XP_RECON_ACCUM                 XP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define YP_RECON_ACCUM                 YP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define ZP_RECON_ACCUM                 ZP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define TP_RECON_ACCUM                 TP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define PERMUTE_DIR0                   0  
 | 
			
		||||
#define PERMUTE_DIR1                   1  
 | 
			
		||||
#define PERMUTE_DIR2                   2  
 | 
			
		||||
#define PERMUTE_DIR3                   3  
 | 
			
		||||
#define PERMUTE                        PERMUTE_A64FXd;  
 | 
			
		||||
#define LOAD_TABLE(Dir)                if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1; } else if (Dir == 2) { LOAD_TABLE2; }  
 | 
			
		||||
#define MAYBEPERM(Dir,perm)            if (Dir != 3) { if (perm) { PERMUTE; } }  
 | 
			
		||||
// DECLARATIONS
 | 
			
		||||
#define DECLARATIONS_A64FXd  \
 | 
			
		||||
    const uint64_t lut[4][8] = { \
 | 
			
		||||
        {4, 5, 6, 7, 0, 1, 2, 3}, \
 | 
			
		||||
        {2, 3, 0, 1, 6, 7, 4, 5}, \
 | 
			
		||||
        {1, 0, 3, 2, 5, 4, 7, 6}, \
 | 
			
		||||
        {0, 1, 2, 4, 5, 6, 7, 8} };\
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fmov z31.d , 0 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// RESULT
 | 
			
		||||
#define RESULT_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "str z0, [%[storeptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "str z1, [%[storeptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "str z2, [%[storeptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "str z3, [%[storeptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "str z4, [%[storeptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "str z5, [%[storeptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "str z6, [%[storeptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "str z7, [%[storeptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "str z8, [%[storeptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "str z9, [%[storeptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "str z10, [%[storeptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "str z11, [%[storeptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [storeptr] "r" (base + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 12, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 16, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 20, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 24, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 28, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHI
 | 
			
		||||
#define LOAD_CHI_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z12, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU
 | 
			
		||||
#define LOAD_CHIMU_INTERLEAVED_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0213
 | 
			
		||||
#define LOAD_CHIMU_0213_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (&ref[2][0]) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0312
 | 
			
		||||
#define LOAD_CHIMU_0312_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (&ref[2][0]) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_TABLE0
 | 
			
		||||
#define LOAD_TABLE0  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (0) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE1
 | 
			
		||||
#define LOAD_TABLE1  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (1) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE2
 | 
			
		||||
#define LOAD_TABLE2  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (2) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE3
 | 
			
		||||
#define LOAD_TABLE3  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (3) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// PERMUTE
 | 
			
		||||
#define PERMUTE_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "tbl z12.d, { z12.d }, z30.d \n\t"  \
 | 
			
		||||
    "tbl z13.d, { z13.d }, z30.d \n\t"  \
 | 
			
		||||
    "tbl z14.d, { z14.d }, z30.d \n\t"  \
 | 
			
		||||
    "tbl z15.d, { z15.d }, z30.d \n\t"  \
 | 
			
		||||
    "tbl z16.d, { z16.d }, z30.d \n\t"  \
 | 
			
		||||
    "tbl z17.d, { z17.d }, z30.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_GAUGE
 | 
			
		||||
#define LOAD_GAUGE  \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN
 | 
			
		||||
#define MULT_2SPIN_1_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "movprfx z18.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z18.d, p5/m, z24.d, z12.d, 0 \n\t" \
 | 
			
		||||
    "movprfx z21.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z24.d, z15.d, 0 \n\t" \
 | 
			
		||||
    "movprfx z19.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z25.d, z12.d, 0 \n\t" \
 | 
			
		||||
    "movprfx z22.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z25.d, z15.d, 0 \n\t" \
 | 
			
		||||
    "movprfx z20.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z26.d, z12.d, 0 \n\t" \
 | 
			
		||||
    "movprfx z23.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z26.d, z15.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.d, p5/m, z24.d, z12.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z24.d, z15.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z25.d, z12.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z25.d, z15.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z26.d, z12.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z26.d, z15.d, 90 \n\t" \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN_BACKEND
 | 
			
		||||
#define MULT_2SPIN_2_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcmla z18.d, p5/m, z27.d, z13.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z27.d, z16.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z28.d, z13.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z28.d, z16.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z29.d, z13.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z29.d, z16.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.d, p5/m, z27.d, z13.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z27.d, z16.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z28.d, z13.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z28.d, z16.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z29.d, z13.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z29.d, z16.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z18.d, p5/m, z24.d, z14.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z24.d, z17.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z25.d, z14.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z25.d, z17.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z26.d, z14.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z26.d, z17.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.d, p5/m, z24.d, z14.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z24.d, z17.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z25.d, z14.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z25.d, z17.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z26.d, z14.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z26.d, z17.d, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XP_PROJ
 | 
			
		||||
#define XP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.d, p5/m, z12.d, z21.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z13.d, p5/m, z13.d, z22.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z14.d, p5/m, z14.d, z23.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z15.d, p5/m, z15.d, z18.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z16.d, p5/m, z16.d, z19.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z17.d, p5/m, z17.d, z20.d, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XP_RECON
 | 
			
		||||
#define XP_RECON_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "movprfx z6.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z21.d, 270 \n\t" \
 | 
			
		||||
    "movprfx z7.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z22.d, 270 \n\t" \
 | 
			
		||||
    "movprfx z8.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z23.d, 270 \n\t" \
 | 
			
		||||
    "movprfx z9.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z18.d, 270 \n\t" \
 | 
			
		||||
    "movprfx z10.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z19.d, 270 \n\t" \
 | 
			
		||||
    "movprfx z11.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z20.d, 270 \n\t" \
 | 
			
		||||
    "mov z0.d, p5/m, z18.d \n\t" \
 | 
			
		||||
    "mov z1.d, p5/m, z19.d \n\t" \
 | 
			
		||||
    "mov z2.d, p5/m, z20.d \n\t" \
 | 
			
		||||
    "mov z3.d, p5/m, z21.d \n\t" \
 | 
			
		||||
    "mov z4.d, p5/m, z22.d \n\t" \
 | 
			
		||||
    "mov z5.d, p5/m, z23.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// XP_RECON_ACCUM
 | 
			
		||||
#define XP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z18.d, 270 \n\t" \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z19.d, 270 \n\t" \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z20.d, 270 \n\t" \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z21.d, 270 \n\t" \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z22.d, 270 \n\t" \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z23.d, 270 \n\t" \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YP_PROJ
 | 
			
		||||
#define YP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fsub z12.d, p5/m, z12.d, z21.d \n\t" \
 | 
			
		||||
    "fsub z13.d, p5/m, z13.d, z22.d \n\t" \
 | 
			
		||||
    "fsub z14.d, p5/m, z14.d, z23.d \n\t" \
 | 
			
		||||
    "fadd z15.d, p5/m, z15.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z16.d, p5/m, z16.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z17.d, p5/m, z17.d, z20.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ZP_PROJ
 | 
			
		||||
#define ZP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.d, p5/m, z12.d, z18.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z13.d, p5/m, z13.d, z19.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z14.d, p5/m, z14.d, z20.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z15.d, p5/m, z15.d, z21.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z16.d, p5/m, z16.d, z22.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z17.d, p5/m, z17.d, z23.d, 270 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// TP_PROJ
 | 
			
		||||
#define TP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z12.d, p5/m, z12.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z13.d, p5/m, z13.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z14.d, p5/m, z14.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z15.d, p5/m, z15.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z16.d, p5/m, z16.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z17.d, p5/m, z17.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_PROJ
 | 
			
		||||
#define XM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.d, p5/m, z12.d, z21.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z13.d, p5/m, z13.d, z22.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z14.d, p5/m, z14.d, z23.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z15.d, p5/m, z15.d, z18.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z16.d, p5/m, z16.d, z19.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z17.d, p5/m, z17.d, z20.d, 270 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON
 | 
			
		||||
#define XM_RECON_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "movprfx z6.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z21.d, 90 \n\t" \
 | 
			
		||||
    "movprfx z7.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z22.d, 90 \n\t" \
 | 
			
		||||
    "movprfx z8.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z23.d, 90 \n\t" \
 | 
			
		||||
    "movprfx z9.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z18.d, 90 \n\t" \
 | 
			
		||||
    "movprfx z10.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z19.d, 90 \n\t" \
 | 
			
		||||
    "movprfx z11.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z20.d, 90 \n\t" \
 | 
			
		||||
    "mov z0.d, p5/m, z18.d \n\t" \
 | 
			
		||||
    "mov z1.d, p5/m, z19.d \n\t" \
 | 
			
		||||
    "mov z2.d, p5/m, z20.d \n\t" \
 | 
			
		||||
    "mov z3.d, p5/m, z21.d \n\t" \
 | 
			
		||||
    "mov z4.d, p5/m, z22.d \n\t" \
 | 
			
		||||
    "mov z5.d, p5/m, z23.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YM_PROJ
 | 
			
		||||
#define YM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z12.d, p5/m, z12.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z13.d, p5/m, z13.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z14.d, p5/m, z14.d, z23.d \n\t"  \
 | 
			
		||||
    "fsub z15.d, p5/m, z15.d, z18.d \n\t" \
 | 
			
		||||
    "fsub z16.d, p5/m, z16.d, z19.d \n\t" \
 | 
			
		||||
    "fsub z17.d, p5/m, z17.d, z20.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ZM_PROJ
 | 
			
		||||
#define ZM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.d, p5/m, z12.d, z18.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z13.d, p5/m, z13.d, z19.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z14.d, p5/m, z14.d, z20.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z15.d, p5/m, z15.d, z21.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z16.d, p5/m, z16.d, z22.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z17.d, p5/m, z17.d, z23.d, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// TM_PROJ
 | 
			
		||||
#define TM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "fsub z12.d, p5/m, z12.d, z18.d \n\t" \
 | 
			
		||||
    "fsub z13.d, p5/m, z13.d, z19.d \n\t" \
 | 
			
		||||
    "fsub z14.d, p5/m, z14.d, z20.d \n\t" \
 | 
			
		||||
    "fsub z15.d, p5/m, z15.d, z21.d \n\t" \
 | 
			
		||||
    "fsub z16.d, p5/m, z16.d, z22.d \n\t" \
 | 
			
		||||
    "fsub z17.d, p5/m, z17.d, z23.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON_ACCUM
 | 
			
		||||
#define XM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z18.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z19.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z20.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z21.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z22.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z23.d, 90 \n\t" \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YP_RECON_ACCUM
 | 
			
		||||
#define YP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fsub z9.d, p5/m, z9.d, z18.d \n\t" \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fsub z10.d, p5/m, z10.d, z19.d \n\t" \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fsub z11.d, p5/m, z11.d, z20.d \n\t" \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z6.d, p5/m, z6.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z7.d, p5/m, z7.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    "fadd z8.d, p5/m, z8.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YM_RECON_ACCUM
 | 
			
		||||
#define YM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z9.d, p5/m, z9.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z10.d, p5/m, z10.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z11.d, p5/m, z11.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fsub z6.d, p5/m, z6.d, z21.d \n\t" \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fsub z7.d, p5/m, z7.d, z22.d \n\t" \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    "fsub z8.d, p5/m, z8.d, z23.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZP_RECON_ACCUM
 | 
			
		||||
#define ZP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z18.d, 270 \n\t" \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z19.d, 270 \n\t" \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z20.d, 270 \n\t" \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z21.d, 90 \n\t" \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z22.d, 90 \n\t" \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z23.d, 90 \n\t" \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZM_RECON_ACCUM
 | 
			
		||||
#define ZM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z18.d, 90 \n\t" \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z19.d, 90 \n\t" \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z20.d, 90 \n\t" \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z21.d, 270 \n\t" \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z22.d, 270 \n\t" \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z23.d, 270 \n\t" \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// TP_RECON_ACCUM
 | 
			
		||||
#define TP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z6.d, p5/m, z6.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z7.d, p5/m, z7.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z8.d, p5/m, z8.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z9.d, p5/m, z9.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z10.d, p5/m, z10.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    "fadd z11.d, p5/m, z11.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// TM_RECON_ACCUM
 | 
			
		||||
#define TM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fsub z6.d, p5/m, z6.d, z18.d \n\t" \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fsub z7.d, p5/m, z7.d, z19.d \n\t" \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fsub z8.d, p5/m, z8.d, z20.d \n\t" \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fsub z9.d, p5/m, z9.d, z21.d \n\t" \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fsub z10.d, p5/m, z10.d, z22.d \n\t" \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    "fsub z11.d, p5/m, z11.d, z23.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZERO_PSI
 | 
			
		||||
#define ZERO_PSI_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "fmov z0.d , 0 \n\t" \
 | 
			
		||||
    "fmov z1.d , 0 \n\t" \
 | 
			
		||||
    "fmov z2.d , 0 \n\t" \
 | 
			
		||||
    "fmov z3.d , 0 \n\t" \
 | 
			
		||||
    "fmov z4.d , 0 \n\t" \
 | 
			
		||||
    "fmov z5.d , 0 \n\t" \
 | 
			
		||||
    "fmov z6.d , 0 \n\t" \
 | 
			
		||||
    "fmov z7.d , 0 \n\t" \
 | 
			
		||||
    "fmov z8.d , 0 \n\t" \
 | 
			
		||||
    "fmov z9.d , 0 \n\t" \
 | 
			
		||||
    "fmov z10.d , 0 \n\t" \
 | 
			
		||||
    "fmov z11.d , 0 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ADD_RESULT_INTERNAL
 | 
			
		||||
#define ADD_RESULT_INTERNAL_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z12.d \n\t"  \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z13.d \n\t"  \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z14.d \n\t"  \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z15.d \n\t"  \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z16.d \n\t"  \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z17.d \n\t"  \
 | 
			
		||||
    "fadd z6.d, p5/m, z6.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z7.d, p5/m, z7.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z8.d, p5/m, z8.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z9.d, p5/m, z9.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z10.d, p5/m, z10.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z11.d, p5/m, z11.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										779
									
								
								Grid/simd/Fujitsu_A64FX_asm_single.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										779
									
								
								Grid/simd/Fujitsu_A64FX_asm_single.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,779 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: Fujitsu_A64FX_asm_single.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#define LOAD_CHIMU(base)               LOAD_CHIMU_INTERLEAVED_A64FXf(base)  
 | 
			
		||||
#define PREFETCH_CHIMU_L1(A)           PREFETCH_CHIMU_L1_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L1(A)           PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_CHIMU_L2(A)           PREFETCH_CHIMU_L2_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L2(A)           PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PF_GAUGE(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE(A)    PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE(A)    PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH1_CHIMU(A)             PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define PREFETCH_CHIMU(A)              PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define LOCK_GAUGE(A)  
 | 
			
		||||
#define UNLOCK_GAUGE(A)  
 | 
			
		||||
#define MASK_REGS                      DECLARATIONS_A64FXf  
 | 
			
		||||
#define SAVE_RESULT(A,B)               RESULT_A64FXf(A); PREFETCH_RESULT_L2_STORE(B)  
 | 
			
		||||
#define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXf(Dir)  
 | 
			
		||||
#define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXf  
 | 
			
		||||
#define LOAD_CHI(base)                 LOAD_CHI_A64FXf(base)  
 | 
			
		||||
#define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)  
 | 
			
		||||
#define XP_PROJ                        XP_PROJ_A64FXf  
 | 
			
		||||
#define YP_PROJ                        YP_PROJ_A64FXf  
 | 
			
		||||
#define ZP_PROJ                        ZP_PROJ_A64FXf  
 | 
			
		||||
#define TP_PROJ                        TP_PROJ_A64FXf  
 | 
			
		||||
#define XM_PROJ                        XM_PROJ_A64FXf  
 | 
			
		||||
#define YM_PROJ                        YM_PROJ_A64FXf  
 | 
			
		||||
#define ZM_PROJ                        ZM_PROJ_A64FXf  
 | 
			
		||||
#define TM_PROJ                        TM_PROJ_A64FXf  
 | 
			
		||||
#define XP_RECON                       XP_RECON_A64FXf  
 | 
			
		||||
#define XM_RECON                       XM_RECON_A64FXf  
 | 
			
		||||
#define XM_RECON_ACCUM                 XM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define YM_RECON_ACCUM                 YM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define ZM_RECON_ACCUM                 ZM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define TM_RECON_ACCUM                 TM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define XP_RECON_ACCUM                 XP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define YP_RECON_ACCUM                 YP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define ZP_RECON_ACCUM                 ZP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define TP_RECON_ACCUM                 TP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define PERMUTE_DIR0                   0  
 | 
			
		||||
#define PERMUTE_DIR1                   1  
 | 
			
		||||
#define PERMUTE_DIR2                   2  
 | 
			
		||||
#define PERMUTE_DIR3                   3  
 | 
			
		||||
#define PERMUTE                        PERMUTE_A64FXf;  
 | 
			
		||||
#define LOAD_TABLE(Dir)                if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1 } else if (Dir == 2) { LOAD_TABLE2; } else if (Dir == 3) { LOAD_TABLE3; }  
 | 
			
		||||
#define MAYBEPERM(A,perm)              if (perm) { PERMUTE; }  
 | 
			
		||||
// DECLARATIONS
 | 
			
		||||
#define DECLARATIONS_A64FXf  \
 | 
			
		||||
    const uint32_t lut[4][16] = { \
 | 
			
		||||
        {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \
 | 
			
		||||
        {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \
 | 
			
		||||
        {2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, \
 | 
			
		||||
        {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14} }; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fmov z31.s , 0 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// RESULT
 | 
			
		||||
#define RESULT_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "str z0, [%[storeptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "str z1, [%[storeptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "str z2, [%[storeptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "str z3, [%[storeptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "str z4, [%[storeptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "str z5, [%[storeptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "str z6, [%[storeptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "str z7, [%[storeptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "str z8, [%[storeptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "str z9, [%[storeptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "str z10, [%[storeptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "str z11, [%[storeptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [storeptr] "r" (base + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 12, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 16, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 20, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 24, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 28, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHI
 | 
			
		||||
#define LOAD_CHI_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z12, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU
 | 
			
		||||
#define LOAD_CHIMU_INTERLEAVED_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0213
 | 
			
		||||
#define LOAD_CHIMU_0213_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (&ref[2][0]) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0312
 | 
			
		||||
#define LOAD_CHIMU_0312_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (&ref[2][0]) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_TABLE0
 | 
			
		||||
#define LOAD_TABLE0  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (0) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE1
 | 
			
		||||
#define LOAD_TABLE1  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (1) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE2
 | 
			
		||||
#define LOAD_TABLE2  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (2) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE3
 | 
			
		||||
#define LOAD_TABLE3  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (3) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// PERMUTE
 | 
			
		||||
#define PERMUTE_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "tbl z12.s, { z12.s }, z30.s \n\t"  \
 | 
			
		||||
    "tbl z13.s, { z13.s }, z30.s \n\t"  \
 | 
			
		||||
    "tbl z14.s, { z14.s }, z30.s \n\t"  \
 | 
			
		||||
    "tbl z15.s, { z15.s }, z30.s \n\t"  \
 | 
			
		||||
    "tbl z16.s, { z16.s }, z30.s \n\t"  \
 | 
			
		||||
    "tbl z17.s, { z17.s }, z30.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_GAUGE
 | 
			
		||||
#define LOAD_GAUGE  \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN
 | 
			
		||||
#define MULT_2SPIN_1_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "movprfx z18.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z18.s, p5/m, z24.s, z12.s, 0 \n\t" \
 | 
			
		||||
    "movprfx z21.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z24.s, z15.s, 0 \n\t" \
 | 
			
		||||
    "movprfx z19.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z25.s, z12.s, 0 \n\t" \
 | 
			
		||||
    "movprfx z22.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z25.s, z15.s, 0 \n\t" \
 | 
			
		||||
    "movprfx z20.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z26.s, z12.s, 0 \n\t" \
 | 
			
		||||
    "movprfx z23.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z26.s, z15.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.s, p5/m, z24.s, z12.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z24.s, z15.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z25.s, z12.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z25.s, z15.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z26.s, z12.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z26.s, z15.s, 90 \n\t" \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN_BACKEND
 | 
			
		||||
#define MULT_2SPIN_2_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcmla z18.s, p5/m, z27.s, z13.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z27.s, z16.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z28.s, z13.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z28.s, z16.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z29.s, z13.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z29.s, z16.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.s, p5/m, z27.s, z13.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z27.s, z16.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z28.s, z13.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z28.s, z16.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z29.s, z13.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z29.s, z16.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z18.s, p5/m, z24.s, z14.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z24.s, z17.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z25.s, z14.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z25.s, z17.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z26.s, z14.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z26.s, z17.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.s, p5/m, z24.s, z14.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z24.s, z17.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z25.s, z14.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z25.s, z17.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z26.s, z14.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z26.s, z17.s, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XP_PROJ
 | 
			
		||||
#define XP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.s, p5/m, z12.s, z21.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z13.s, p5/m, z13.s, z22.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z14.s, p5/m, z14.s, z23.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z15.s, p5/m, z15.s, z18.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z16.s, p5/m, z16.s, z19.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z17.s, p5/m, z17.s, z20.s, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XP_RECON
 | 
			
		||||
#define XP_RECON_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "movprfx z6.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z21.s, 270 \n\t" \
 | 
			
		||||
    "movprfx z7.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z22.s, 270 \n\t" \
 | 
			
		||||
    "movprfx z8.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z23.s, 270 \n\t" \
 | 
			
		||||
    "movprfx z9.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z18.s, 270 \n\t" \
 | 
			
		||||
    "movprfx z10.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z19.s, 270 \n\t" \
 | 
			
		||||
    "movprfx z11.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z20.s, 270 \n\t" \
 | 
			
		||||
    "mov z0.s, p5/m, z18.s \n\t" \
 | 
			
		||||
    "mov z1.s, p5/m, z19.s \n\t" \
 | 
			
		||||
    "mov z2.s, p5/m, z20.s \n\t" \
 | 
			
		||||
    "mov z3.s, p5/m, z21.s \n\t" \
 | 
			
		||||
    "mov z4.s, p5/m, z22.s \n\t" \
 | 
			
		||||
    "mov z5.s, p5/m, z23.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// XP_RECON_ACCUM
 | 
			
		||||
#define XP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z18.s, 270 \n\t" \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z19.s, 270 \n\t" \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z20.s, 270 \n\t" \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z21.s, 270 \n\t" \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z22.s, 270 \n\t" \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z23.s, 270 \n\t" \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YP_PROJ
 | 
			
		||||
#define YP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fsub z12.s, p5/m, z12.s, z21.s \n\t" \
 | 
			
		||||
    "fsub z13.s, p5/m, z13.s, z22.s \n\t" \
 | 
			
		||||
    "fsub z14.s, p5/m, z14.s, z23.s \n\t" \
 | 
			
		||||
    "fadd z15.s, p5/m, z15.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z16.s, p5/m, z16.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z17.s, p5/m, z17.s, z20.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ZP_PROJ
 | 
			
		||||
#define ZP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.s, p5/m, z12.s, z18.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z13.s, p5/m, z13.s, z19.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z14.s, p5/m, z14.s, z20.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z15.s, p5/m, z15.s, z21.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z16.s, p5/m, z16.s, z22.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z17.s, p5/m, z17.s, z23.s, 270 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// TP_PROJ
 | 
			
		||||
#define TP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z12.s, p5/m, z12.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z13.s, p5/m, z13.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z14.s, p5/m, z14.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z15.s, p5/m, z15.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z16.s, p5/m, z16.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z17.s, p5/m, z17.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_PROJ
 | 
			
		||||
#define XM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.s, p5/m, z12.s, z21.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z13.s, p5/m, z13.s, z22.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z14.s, p5/m, z14.s, z23.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z15.s, p5/m, z15.s, z18.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z16.s, p5/m, z16.s, z19.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z17.s, p5/m, z17.s, z20.s, 270 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON
 | 
			
		||||
#define XM_RECON_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "movprfx z6.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z21.s, 90 \n\t" \
 | 
			
		||||
    "movprfx z7.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z22.s, 90 \n\t" \
 | 
			
		||||
    "movprfx z8.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z23.s, 90 \n\t" \
 | 
			
		||||
    "movprfx z9.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z18.s, 90 \n\t" \
 | 
			
		||||
    "movprfx z10.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z19.s, 90 \n\t" \
 | 
			
		||||
    "movprfx z11.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z20.s, 90 \n\t" \
 | 
			
		||||
    "mov z0.s, p5/m, z18.s \n\t" \
 | 
			
		||||
    "mov z1.s, p5/m, z19.s \n\t" \
 | 
			
		||||
    "mov z2.s, p5/m, z20.s \n\t" \
 | 
			
		||||
    "mov z3.s, p5/m, z21.s \n\t" \
 | 
			
		||||
    "mov z4.s, p5/m, z22.s \n\t" \
 | 
			
		||||
    "mov z5.s, p5/m, z23.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YM_PROJ
 | 
			
		||||
#define YM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z12.s, p5/m, z12.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z13.s, p5/m, z13.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z14.s, p5/m, z14.s, z23.s \n\t"  \
 | 
			
		||||
    "fsub z15.s, p5/m, z15.s, z18.s \n\t" \
 | 
			
		||||
    "fsub z16.s, p5/m, z16.s, z19.s \n\t" \
 | 
			
		||||
    "fsub z17.s, p5/m, z17.s, z20.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ZM_PROJ
 | 
			
		||||
#define ZM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.s, p5/m, z12.s, z18.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z13.s, p5/m, z13.s, z19.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z14.s, p5/m, z14.s, z20.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z15.s, p5/m, z15.s, z21.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z16.s, p5/m, z16.s, z22.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z17.s, p5/m, z17.s, z23.s, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// TM_PROJ
 | 
			
		||||
#define TM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "fsub z12.s, p5/m, z12.s, z18.s \n\t" \
 | 
			
		||||
    "fsub z13.s, p5/m, z13.s, z19.s \n\t" \
 | 
			
		||||
    "fsub z14.s, p5/m, z14.s, z20.s \n\t" \
 | 
			
		||||
    "fsub z15.s, p5/m, z15.s, z21.s \n\t" \
 | 
			
		||||
    "fsub z16.s, p5/m, z16.s, z22.s \n\t" \
 | 
			
		||||
    "fsub z17.s, p5/m, z17.s, z23.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON_ACCUM
 | 
			
		||||
#define XM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z18.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z19.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z20.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z21.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z22.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z23.s, 90 \n\t" \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YP_RECON_ACCUM
 | 
			
		||||
#define YP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fsub z9.s, p5/m, z9.s, z18.s \n\t" \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fsub z10.s, p5/m, z10.s, z19.s \n\t" \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fsub z11.s, p5/m, z11.s, z20.s \n\t" \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z6.s, p5/m, z6.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z7.s, p5/m, z7.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    "fadd z8.s, p5/m, z8.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YM_RECON_ACCUM
 | 
			
		||||
#define YM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z9.s, p5/m, z9.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z10.s, p5/m, z10.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z11.s, p5/m, z11.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fsub z6.s, p5/m, z6.s, z21.s \n\t" \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fsub z7.s, p5/m, z7.s, z22.s \n\t" \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    "fsub z8.s, p5/m, z8.s, z23.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZP_RECON_ACCUM
 | 
			
		||||
#define ZP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z18.s, 270 \n\t" \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z19.s, 270 \n\t" \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z20.s, 270 \n\t" \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z21.s, 90 \n\t" \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z22.s, 90 \n\t" \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z23.s, 90 \n\t" \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZM_RECON_ACCUM
 | 
			
		||||
#define ZM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z18.s, 90 \n\t" \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z19.s, 90 \n\t" \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z20.s, 90 \n\t" \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z21.s, 270 \n\t" \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z22.s, 270 \n\t" \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z23.s, 270 \n\t" \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// TP_RECON_ACCUM
 | 
			
		||||
#define TP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z6.s, p5/m, z6.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z7.s, p5/m, z7.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z8.s, p5/m, z8.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z9.s, p5/m, z9.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z10.s, p5/m, z10.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    "fadd z11.s, p5/m, z11.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// TM_RECON_ACCUM
 | 
			
		||||
#define TM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fsub z6.s, p5/m, z6.s, z18.s \n\t" \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fsub z7.s, p5/m, z7.s, z19.s \n\t" \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fsub z8.s, p5/m, z8.s, z20.s \n\t" \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fsub z9.s, p5/m, z9.s, z21.s \n\t" \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fsub z10.s, p5/m, z10.s, z22.s \n\t" \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    "fsub z11.s, p5/m, z11.s, z23.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZERO_PSI
 | 
			
		||||
#define ZERO_PSI_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "fmov z0.s , 0 \n\t" \
 | 
			
		||||
    "fmov z1.s , 0 \n\t" \
 | 
			
		||||
    "fmov z2.s , 0 \n\t" \
 | 
			
		||||
    "fmov z3.s , 0 \n\t" \
 | 
			
		||||
    "fmov z4.s , 0 \n\t" \
 | 
			
		||||
    "fmov z5.s , 0 \n\t" \
 | 
			
		||||
    "fmov z6.s , 0 \n\t" \
 | 
			
		||||
    "fmov z7.s , 0 \n\t" \
 | 
			
		||||
    "fmov z8.s , 0 \n\t" \
 | 
			
		||||
    "fmov z9.s , 0 \n\t" \
 | 
			
		||||
    "fmov z10.s , 0 \n\t" \
 | 
			
		||||
    "fmov z11.s , 0 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ADD_RESULT_INTERNAL
 | 
			
		||||
#define ADD_RESULT_INTERNAL_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z12.s \n\t"  \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z13.s \n\t"  \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z14.s \n\t"  \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z15.s \n\t"  \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z16.s \n\t"  \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z17.s \n\t"  \
 | 
			
		||||
    "fadd z6.s, p5/m, z6.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z7.s, p5/m, z7.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z8.s, p5/m, z8.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z9.s, p5/m, z9.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z10.s, p5/m, z10.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z11.s, p5/m, z11.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										601
									
								
								Grid/simd/Fujitsu_A64FX_intrin_double.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										601
									
								
								Grid/simd/Fujitsu_A64FX_intrin_double.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,601 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: Fujitsu_A64FX_intrin_double.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#define LOAD_CHIMU(base)               LOAD_CHIMU_INTERLEAVED_A64FXd(base)  
 | 
			
		||||
#define PREFETCH_CHIMU_L1(A)           PREFETCH_CHIMU_L1_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L1(A)           PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_CHIMU_L2(A)           PREFETCH_CHIMU_L2_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L2(A)           PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PF_GAUGE(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE(A)    PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE(A)    PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH1_CHIMU(A)             PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define PREFETCH_CHIMU(A)              PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define LOCK_GAUGE(A)  
 | 
			
		||||
#define UNLOCK_GAUGE(A)  
 | 
			
		||||
#define MASK_REGS                      DECLARATIONS_A64FXd  
 | 
			
		||||
#define SAVE_RESULT(A,B)               RESULT_A64FXd(A); PREFETCH_RESULT_L2_STORE(B)  
 | 
			
		||||
#define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXd(Dir)  
 | 
			
		||||
#define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXd  
 | 
			
		||||
#define LOAD_CHI(base)                 LOAD_CHI_A64FXd(base)  
 | 
			
		||||
#define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)  
 | 
			
		||||
#define XP_PROJ                        XP_PROJ_A64FXd  
 | 
			
		||||
#define YP_PROJ                        YP_PROJ_A64FXd  
 | 
			
		||||
#define ZP_PROJ                        ZP_PROJ_A64FXd  
 | 
			
		||||
#define TP_PROJ                        TP_PROJ_A64FXd  
 | 
			
		||||
#define XM_PROJ                        XM_PROJ_A64FXd  
 | 
			
		||||
#define YM_PROJ                        YM_PROJ_A64FXd  
 | 
			
		||||
#define ZM_PROJ                        ZM_PROJ_A64FXd  
 | 
			
		||||
#define TM_PROJ                        TM_PROJ_A64FXd  
 | 
			
		||||
#define XP_RECON                       XP_RECON_A64FXd  
 | 
			
		||||
#define XM_RECON                       XM_RECON_A64FXd  
 | 
			
		||||
#define XM_RECON_ACCUM                 XM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define YM_RECON_ACCUM                 YM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define ZM_RECON_ACCUM                 ZM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define TM_RECON_ACCUM                 TM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define XP_RECON_ACCUM                 XP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define YP_RECON_ACCUM                 YP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define ZP_RECON_ACCUM                 ZP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define TP_RECON_ACCUM                 TP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define PERMUTE_DIR0                   0  
 | 
			
		||||
#define PERMUTE_DIR1                   1  
 | 
			
		||||
#define PERMUTE_DIR2                   2  
 | 
			
		||||
#define PERMUTE_DIR3                   3  
 | 
			
		||||
#define PERMUTE                        PERMUTE_A64FXd;  
 | 
			
		||||
#define LOAD_TABLE(Dir)                if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1; } else if (Dir == 2) { LOAD_TABLE2; }  
 | 
			
		||||
#define MAYBEPERM(Dir,perm)            if (Dir != 3) { if (perm) { PERMUTE; } }  
 | 
			
		||||
// DECLARATIONS
 | 
			
		||||
#define DECLARATIONS_A64FXd  \
 | 
			
		||||
    const uint64_t lut[4][8] = { \
 | 
			
		||||
        {4, 5, 6, 7, 0, 1, 2, 3}, \
 | 
			
		||||
        {2, 3, 0, 1, 6, 7, 4, 5}, \
 | 
			
		||||
        {1, 0, 3, 2, 5, 4, 7, 6}, \
 | 
			
		||||
        {0, 1, 2, 4, 5, 6, 7, 8} };\
 | 
			
		||||
    svfloat64_t result_00;        \
 | 
			
		||||
    svfloat64_t result_01;        \
 | 
			
		||||
    svfloat64_t result_02;        \
 | 
			
		||||
    svfloat64_t result_10;        \
 | 
			
		||||
    svfloat64_t result_11;        \
 | 
			
		||||
    svfloat64_t result_12;        \
 | 
			
		||||
    svfloat64_t result_20;        \
 | 
			
		||||
    svfloat64_t result_21;        \
 | 
			
		||||
    svfloat64_t result_22;        \
 | 
			
		||||
    svfloat64_t result_30;        \
 | 
			
		||||
    svfloat64_t result_31;        \
 | 
			
		||||
    svfloat64_t result_32;        \
 | 
			
		||||
    svfloat64_t Chi_00;        \
 | 
			
		||||
    svfloat64_t Chi_01;        \
 | 
			
		||||
    svfloat64_t Chi_02;        \
 | 
			
		||||
    svfloat64_t Chi_10;        \
 | 
			
		||||
    svfloat64_t Chi_11;        \
 | 
			
		||||
    svfloat64_t Chi_12;        \
 | 
			
		||||
    svfloat64_t UChi_00;        \
 | 
			
		||||
    svfloat64_t UChi_01;        \
 | 
			
		||||
    svfloat64_t UChi_02;        \
 | 
			
		||||
    svfloat64_t UChi_10;        \
 | 
			
		||||
    svfloat64_t UChi_11;        \
 | 
			
		||||
    svfloat64_t UChi_12;        \
 | 
			
		||||
    svfloat64_t U_00;        \
 | 
			
		||||
    svfloat64_t U_10;        \
 | 
			
		||||
    svfloat64_t U_20;        \
 | 
			
		||||
    svfloat64_t U_01;        \
 | 
			
		||||
    svfloat64_t U_11;        \
 | 
			
		||||
    svfloat64_t U_21;        \
 | 
			
		||||
    svbool_t pg1;        \
 | 
			
		||||
    pg1 = svptrue_b64();        \
 | 
			
		||||
    svuint64_t table0; \
 | 
			
		||||
    svfloat64_t zero0;        \
 | 
			
		||||
    zero0 = svdup_f64(0.); 
 | 
			
		||||
 | 
			
		||||
#define Chimu_00 Chi_00  
 | 
			
		||||
#define Chimu_01 Chi_01  
 | 
			
		||||
#define Chimu_02 Chi_02  
 | 
			
		||||
#define Chimu_10 Chi_10  
 | 
			
		||||
#define Chimu_11 Chi_11  
 | 
			
		||||
#define Chimu_12 Chi_12  
 | 
			
		||||
#define Chimu_20 UChi_00  
 | 
			
		||||
#define Chimu_21 UChi_01  
 | 
			
		||||
#define Chimu_22 UChi_02  
 | 
			
		||||
#define Chimu_30 UChi_10  
 | 
			
		||||
#define Chimu_31 UChi_11  
 | 
			
		||||
#define Chimu_32 UChi_12  
 | 
			
		||||
// RESULT
 | 
			
		||||
#define RESULT_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64), result_00);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64), result_01);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64), result_02);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64), result_10);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64), result_11);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64), result_12);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64), result_20);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64), result_21);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64), result_22);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64), result_30);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64), result_31);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64), result_32);  \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PLDL2STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PLDL1STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + -256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 768), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1024), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1280), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1536), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1792), SV_PLDL2STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL1STRM); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHI
 | 
			
		||||
#define LOAD_CHI_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svld1(pg1, (float64_t*)(base + 0 * 64));  \
 | 
			
		||||
    Chi_01 = svld1(pg1, (float64_t*)(base + 1 * 64));  \
 | 
			
		||||
    Chi_02 = svld1(pg1, (float64_t*)(base + 2 * 64));  \
 | 
			
		||||
    Chi_10 = svld1(pg1, (float64_t*)(base + 3 * 64));  \
 | 
			
		||||
    Chi_11 = svld1(pg1, (float64_t*)(base + 4 * 64));  \
 | 
			
		||||
    Chi_12 = svld1(pg1, (float64_t*)(base + 5 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU
 | 
			
		||||
#define LOAD_CHIMU_INTERLEAVED_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0213
 | 
			
		||||
#define LOAD_CHIMU_0213_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0312
 | 
			
		||||
#define LOAD_CHIMU_0312_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_TABLE0
 | 
			
		||||
#define LOAD_TABLE0  \
 | 
			
		||||
    table0 = svld1(pg1, (uint64_t*)&lut[0]);  
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE1
 | 
			
		||||
#define LOAD_TABLE1  \
 | 
			
		||||
    table0 = svld1(pg1, (uint64_t*)&lut[1]);  
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE2
 | 
			
		||||
#define LOAD_TABLE2  \
 | 
			
		||||
    table0 = svld1(pg1, (uint64_t*)&lut[2]);  
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE3
 | 
			
		||||
#define LOAD_TABLE3  \
 | 
			
		||||
    table0 = svld1(pg1, (uint64_t*)&lut[3]);  
 | 
			
		||||
 | 
			
		||||
// PERMUTE
 | 
			
		||||
#define PERMUTE_A64FXd  \
 | 
			
		||||
    Chi_00 = svtbl(Chi_00, table0);    \
 | 
			
		||||
    Chi_01 = svtbl(Chi_01, table0);    \
 | 
			
		||||
    Chi_02 = svtbl(Chi_02, table0);    \
 | 
			
		||||
    Chi_10 = svtbl(Chi_10, table0);    \
 | 
			
		||||
    Chi_11 = svtbl(Chi_11, table0);    \
 | 
			
		||||
    Chi_12 = svtbl(Chi_12, table0);    
 | 
			
		||||
 | 
			
		||||
// LOAD_GAUGE
 | 
			
		||||
#define LOAD_GAUGE  \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
{ \
 | 
			
		||||
    U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    U_01 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    U_11 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    U_21 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN
 | 
			
		||||
#define MULT_2SPIN_1_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
    U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    U_01 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    U_11 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    U_21 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, zero0, U_10, Chi_10, 0); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, zero0, U_20, Chi_00, 0); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, zero0, U_20, Chi_10, 0); \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_00, 90); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_10, 90); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_00, 90); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \
 | 
			
		||||
    U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN_BACKEND
 | 
			
		||||
#define MULT_2SPIN_2_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, UChi_00, U_01, Chi_01, 0); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, UChi_10, U_01, Chi_11, 0); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, UChi_01, U_11, Chi_01, 0); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_11, Chi_11, 0); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_21, Chi_01, 0); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_21, Chi_11, 0); \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, UChi_00, U_01, Chi_01, 90); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, UChi_10, U_01, Chi_11, 90); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, UChi_01, U_11, Chi_01, 90); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_11, Chi_11, 90); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_21, Chi_01, 90); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_21, Chi_11, 90); \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_02, 0); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_12, 0); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_02, 0); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_12, 0); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_02, 0); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_12, 0); \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_02, 90); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_12, 90); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_02, 90); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_12, 90); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_02, 90); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_12, 90); \
 | 
			
		||||
}
 | 
			
		||||
// XP_PROJ
 | 
			
		||||
#define XP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_30, 90);   \
 | 
			
		||||
    Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_31, 90);   \
 | 
			
		||||
    Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_32, 90);   \
 | 
			
		||||
    Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_20, 90);   \
 | 
			
		||||
    Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_21, 90);   \
 | 
			
		||||
    Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_22, 90);   \
 | 
			
		||||
}
 | 
			
		||||
// XP_RECON
 | 
			
		||||
#define XP_RECON_A64FXd  \
 | 
			
		||||
    result_20 = svcadd_x(pg1, zero0, UChi_10, 270);   \
 | 
			
		||||
    result_21 = svcadd_x(pg1, zero0, UChi_11, 270);   \
 | 
			
		||||
    result_22 = svcadd_x(pg1, zero0, UChi_12, 270);   \
 | 
			
		||||
    result_30 = svcadd_x(pg1, zero0, UChi_00, 270);   \
 | 
			
		||||
    result_31 = svcadd_x(pg1, zero0, UChi_01, 270);   \
 | 
			
		||||
    result_32 = svcadd_x(pg1, zero0, UChi_02, 270);   \
 | 
			
		||||
    result_00 = UChi_00;        \
 | 
			
		||||
    result_01 = UChi_01;        \
 | 
			
		||||
    result_02 = UChi_02;        \
 | 
			
		||||
    result_10 = UChi_10;        \
 | 
			
		||||
    result_11 = UChi_11;        \
 | 
			
		||||
    result_12 = UChi_12;        
 | 
			
		||||
 | 
			
		||||
// XP_RECON_ACCUM
 | 
			
		||||
#define XP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
    result_30 = svcadd_x(pg1, result_30, UChi_00, 270);   \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_31 = svcadd_x(pg1, result_31, UChi_01, 270);   \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_32 = svcadd_x(pg1, result_32, UChi_02, 270);   \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_20 = svcadd_x(pg1, result_20, UChi_10, 270);   \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_21 = svcadd_x(pg1, result_21, UChi_11, 270);   \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_22 = svcadd_x(pg1, result_22, UChi_12, 270);   \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// YP_PROJ
 | 
			
		||||
#define YP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svsub_x(pg1, Chimu_00, Chimu_30);  \
 | 
			
		||||
    Chi_01 = svsub_x(pg1, Chimu_01, Chimu_31);  \
 | 
			
		||||
    Chi_02 = svsub_x(pg1, Chimu_02, Chimu_32);  \
 | 
			
		||||
    Chi_10 = svadd_x(pg1, Chimu_10, Chimu_20);  \
 | 
			
		||||
    Chi_11 = svadd_x(pg1, Chimu_11, Chimu_21);  \
 | 
			
		||||
    Chi_12 = svadd_x(pg1, Chimu_12, Chimu_22);  \
 | 
			
		||||
}
 | 
			
		||||
// ZP_PROJ
 | 
			
		||||
#define ZP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_20, 90);   \
 | 
			
		||||
    Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_21, 90);   \
 | 
			
		||||
    Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_22, 90);   \
 | 
			
		||||
    Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_30, 270);   \
 | 
			
		||||
    Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_31, 270);   \
 | 
			
		||||
    Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_32, 270);   \
 | 
			
		||||
}
 | 
			
		||||
// TP_PROJ
 | 
			
		||||
#define TP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svadd_x(pg1, Chimu_00, Chimu_20);  \
 | 
			
		||||
    Chi_01 = svadd_x(pg1, Chimu_01, Chimu_21);  \
 | 
			
		||||
    Chi_02 = svadd_x(pg1, Chimu_02, Chimu_22);  \
 | 
			
		||||
    Chi_10 = svadd_x(pg1, Chimu_10, Chimu_30);  \
 | 
			
		||||
    Chi_11 = svadd_x(pg1, Chimu_11, Chimu_31);  \
 | 
			
		||||
    Chi_12 = svadd_x(pg1, Chimu_12, Chimu_32);  \
 | 
			
		||||
}
 | 
			
		||||
// XM_PROJ
 | 
			
		||||
#define XM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_30, 270);   \
 | 
			
		||||
    Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_31, 270);   \
 | 
			
		||||
    Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_32, 270);   \
 | 
			
		||||
    Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_20, 270);   \
 | 
			
		||||
    Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_21, 270);   \
 | 
			
		||||
    Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_22, 270);   \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON
 | 
			
		||||
#define XM_RECON_A64FXd  \
 | 
			
		||||
    result_20 = svcadd_x(pg1, zero0, UChi_10, 90);   \
 | 
			
		||||
    result_21 = svcadd_x(pg1, zero0, UChi_11, 90);   \
 | 
			
		||||
    result_22 = svcadd_x(pg1, zero0, UChi_12, 90);   \
 | 
			
		||||
    result_30 = svcadd_x(pg1, zero0, UChi_00, 90);   \
 | 
			
		||||
    result_31 = svcadd_x(pg1, zero0, UChi_01, 90);   \
 | 
			
		||||
    result_32 = svcadd_x(pg1, zero0, UChi_02, 90);   \
 | 
			
		||||
    result_00 = UChi_00;        \
 | 
			
		||||
    result_01 = UChi_01;        \
 | 
			
		||||
    result_02 = UChi_02;        \
 | 
			
		||||
    result_10 = UChi_10;        \
 | 
			
		||||
    result_11 = UChi_11;        \
 | 
			
		||||
    result_12 = UChi_12;        
 | 
			
		||||
 | 
			
		||||
// YM_PROJ
 | 
			
		||||
#define YM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svadd_x(pg1, Chimu_00, Chimu_30);  \
 | 
			
		||||
    Chi_01 = svadd_x(pg1, Chimu_01, Chimu_31);  \
 | 
			
		||||
    Chi_02 = svadd_x(pg1, Chimu_02, Chimu_32);  \
 | 
			
		||||
    Chi_10 = svsub_x(pg1, Chimu_10, Chimu_20);  \
 | 
			
		||||
    Chi_11 = svsub_x(pg1, Chimu_11, Chimu_21);  \
 | 
			
		||||
    Chi_12 = svsub_x(pg1, Chimu_12, Chimu_22);  \
 | 
			
		||||
}
 | 
			
		||||
// ZM_PROJ
 | 
			
		||||
#define ZM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_20, 270);   \
 | 
			
		||||
    Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_21, 270);   \
 | 
			
		||||
    Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_22, 270);   \
 | 
			
		||||
    Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_30, 90);   \
 | 
			
		||||
    Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_31, 90);   \
 | 
			
		||||
    Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_32, 90);   \
 | 
			
		||||
}
 | 
			
		||||
// TM_PROJ
 | 
			
		||||
#define TM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svsub_x(pg1, Chimu_00, Chimu_20);  \
 | 
			
		||||
    Chi_01 = svsub_x(pg1, Chimu_01, Chimu_21);  \
 | 
			
		||||
    Chi_02 = svsub_x(pg1, Chimu_02, Chimu_22);  \
 | 
			
		||||
    Chi_10 = svsub_x(pg1, Chimu_10, Chimu_30);  \
 | 
			
		||||
    Chi_11 = svsub_x(pg1, Chimu_11, Chimu_31);  \
 | 
			
		||||
    Chi_12 = svsub_x(pg1, Chimu_12, Chimu_32);  \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON_ACCUM
 | 
			
		||||
#define XM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
    result_30 = svcadd_x(pg1, result_30, UChi_00, 90);   \
 | 
			
		||||
    result_31 = svcadd_x(pg1, result_31, UChi_01, 90);   \
 | 
			
		||||
    result_32 = svcadd_x(pg1, result_32, UChi_02, 90);   \
 | 
			
		||||
    result_20 = svcadd_x(pg1, result_20, UChi_10, 90);   \
 | 
			
		||||
    result_21 = svcadd_x(pg1, result_21, UChi_11, 90);   \
 | 
			
		||||
    result_22 = svcadd_x(pg1, result_22, UChi_12, 90);   \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// YP_RECON_ACCUM
 | 
			
		||||
#define YP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_30 = svsub_x(pg1, result_30, UChi_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_31 = svsub_x(pg1, result_31, UChi_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_32 = svsub_x(pg1, result_32, UChi_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_20 = svadd_x(pg1, result_20, UChi_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_21 = svadd_x(pg1, result_21, UChi_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); \
 | 
			
		||||
    result_22 = svadd_x(pg1, result_22, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// YM_RECON_ACCUM
 | 
			
		||||
#define YM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_30 = svadd_x(pg1, result_30, UChi_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_31 = svadd_x(pg1, result_31, UChi_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_32 = svadd_x(pg1, result_32, UChi_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_20 = svsub_x(pg1, result_20, UChi_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_21 = svsub_x(pg1, result_21, UChi_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); \
 | 
			
		||||
    result_22 = svsub_x(pg1, result_22, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// ZP_RECON_ACCUM
 | 
			
		||||
#define ZP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
    result_20 = svcadd_x(pg1, result_20, UChi_00, 270);   \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_21 = svcadd_x(pg1, result_21, UChi_01, 270);   \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_22 = svcadd_x(pg1, result_22, UChi_02, 270);   \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_30 = svcadd_x(pg1, result_30, UChi_10, 90);   \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_31 = svcadd_x(pg1, result_31, UChi_11, 90);   \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_32 = svcadd_x(pg1, result_32, UChi_12, 90);   \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// ZM_RECON_ACCUM
 | 
			
		||||
#define ZM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
    result_20 = svcadd_x(pg1, result_20, UChi_00, 90);   \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_21 = svcadd_x(pg1, result_21, UChi_01, 90);   \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_22 = svcadd_x(pg1, result_22, UChi_02, 90);   \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_30 = svcadd_x(pg1, result_30, UChi_10, 270);   \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_31 = svcadd_x(pg1, result_31, UChi_11, 270);   \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_32 = svcadd_x(pg1, result_32, UChi_12, 270);   \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// TP_RECON_ACCUM
 | 
			
		||||
#define TP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_20 = svadd_x(pg1, result_20, UChi_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_21 = svadd_x(pg1, result_21, UChi_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_22 = svadd_x(pg1, result_22, UChi_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_30 = svadd_x(pg1, result_30, UChi_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_31 = svadd_x(pg1, result_31, UChi_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); \
 | 
			
		||||
    result_32 = svadd_x(pg1, result_32, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// TM_RECON_ACCUM
 | 
			
		||||
#define TM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_20 = svsub_x(pg1, result_20, UChi_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_21 = svsub_x(pg1, result_21, UChi_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_22 = svsub_x(pg1, result_22, UChi_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_30 = svsub_x(pg1, result_30, UChi_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_31 = svsub_x(pg1, result_31, UChi_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); \
 | 
			
		||||
    result_32 = svsub_x(pg1, result_32, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// ZERO_PSI
 | 
			
		||||
#define ZERO_PSI_A64FXd  \
 | 
			
		||||
    result_00 = svdup_f64(0.); \
 | 
			
		||||
    result_01 = svdup_f64(0.); \
 | 
			
		||||
    result_02 = svdup_f64(0.); \
 | 
			
		||||
    result_10 = svdup_f64(0.); \
 | 
			
		||||
    result_11 = svdup_f64(0.); \
 | 
			
		||||
    result_12 = svdup_f64(0.); \
 | 
			
		||||
    result_20 = svdup_f64(0.); \
 | 
			
		||||
    result_21 = svdup_f64(0.); \
 | 
			
		||||
    result_22 = svdup_f64(0.); \
 | 
			
		||||
    result_30 = svdup_f64(0.); \
 | 
			
		||||
    result_31 = svdup_f64(0.); \
 | 
			
		||||
    result_32 = svdup_f64(0.); 
 | 
			
		||||
 | 
			
		||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PSTL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PSTL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PSTL2STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PSTL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PSTL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PSTL1STRM); \
 | 
			
		||||
}
 | 
			
		||||
// ADD_RESULT_INTERNAL
 | 
			
		||||
#define ADD_RESULT_INTERNAL_A64FXd  \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, Chimu_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, Chimu_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, Chimu_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, Chimu_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, Chimu_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, Chimu_12); \
 | 
			
		||||
    result_20 = svadd_x(pg1, result_20, Chimu_20); \
 | 
			
		||||
    result_21 = svadd_x(pg1, result_21, Chimu_21); \
 | 
			
		||||
    result_22 = svadd_x(pg1, result_22, Chimu_22); \
 | 
			
		||||
    result_30 = svadd_x(pg1, result_30, Chimu_30); \
 | 
			
		||||
    result_31 = svadd_x(pg1, result_31, Chimu_31); \
 | 
			
		||||
    result_32 = svadd_x(pg1, result_32, Chimu_32); 
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										601
									
								
								Grid/simd/Fujitsu_A64FX_intrin_single.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										601
									
								
								Grid/simd/Fujitsu_A64FX_intrin_single.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,601 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: Fujitsu_A64FX_intrin_single.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#define LOAD_CHIMU(base)               LOAD_CHIMU_INTERLEAVED_A64FXf(base)  
 | 
			
		||||
#define PREFETCH_CHIMU_L1(A)           PREFETCH_CHIMU_L1_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L1(A)           PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_CHIMU_L2(A)           PREFETCH_CHIMU_L2_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L2(A)           PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PF_GAUGE(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE(A)    PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE(A)    PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH1_CHIMU(A)             PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define PREFETCH_CHIMU(A)              PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define LOCK_GAUGE(A)  
 | 
			
		||||
#define UNLOCK_GAUGE(A)  
 | 
			
		||||
#define MASK_REGS                      DECLARATIONS_A64FXf  
 | 
			
		||||
#define SAVE_RESULT(A,B)               RESULT_A64FXf(A); PREFETCH_RESULT_L2_STORE(B)  
 | 
			
		||||
#define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXf(Dir)  
 | 
			
		||||
#define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXf  
 | 
			
		||||
#define LOAD_CHI(base)                 LOAD_CHI_A64FXf(base)  
 | 
			
		||||
#define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)  
 | 
			
		||||
#define XP_PROJ                        XP_PROJ_A64FXf  
 | 
			
		||||
#define YP_PROJ                        YP_PROJ_A64FXf  
 | 
			
		||||
#define ZP_PROJ                        ZP_PROJ_A64FXf  
 | 
			
		||||
#define TP_PROJ                        TP_PROJ_A64FXf  
 | 
			
		||||
#define XM_PROJ                        XM_PROJ_A64FXf  
 | 
			
		||||
#define YM_PROJ                        YM_PROJ_A64FXf  
 | 
			
		||||
#define ZM_PROJ                        ZM_PROJ_A64FXf  
 | 
			
		||||
#define TM_PROJ                        TM_PROJ_A64FXf  
 | 
			
		||||
#define XP_RECON                       XP_RECON_A64FXf  
 | 
			
		||||
#define XM_RECON                       XM_RECON_A64FXf  
 | 
			
		||||
#define XM_RECON_ACCUM                 XM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define YM_RECON_ACCUM                 YM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define ZM_RECON_ACCUM                 ZM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define TM_RECON_ACCUM                 TM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define XP_RECON_ACCUM                 XP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define YP_RECON_ACCUM                 YP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define ZP_RECON_ACCUM                 ZP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define TP_RECON_ACCUM                 TP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define PERMUTE_DIR0                   0  
 | 
			
		||||
#define PERMUTE_DIR1                   1  
 | 
			
		||||
#define PERMUTE_DIR2                   2  
 | 
			
		||||
#define PERMUTE_DIR3                   3  
 | 
			
		||||
#define PERMUTE                        PERMUTE_A64FXf;  
 | 
			
		||||
#define LOAD_TABLE(Dir)                if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1 } else if (Dir == 2) { LOAD_TABLE2; } else if (Dir == 3) { LOAD_TABLE3; }  
 | 
			
		||||
#define MAYBEPERM(A,perm)              if (perm) { PERMUTE; }  
 | 
			
		||||
// DECLARATIONS
 | 
			
		||||
#define DECLARATIONS_A64FXf  \
 | 
			
		||||
    const uint32_t lut[4][16] = { \
 | 
			
		||||
        {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \
 | 
			
		||||
        {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \
 | 
			
		||||
        {2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, \
 | 
			
		||||
        {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14} }; \
 | 
			
		||||
    svfloat32_t result_00;        \
 | 
			
		||||
    svfloat32_t result_01;        \
 | 
			
		||||
    svfloat32_t result_02;        \
 | 
			
		||||
    svfloat32_t result_10;        \
 | 
			
		||||
    svfloat32_t result_11;        \
 | 
			
		||||
    svfloat32_t result_12;        \
 | 
			
		||||
    svfloat32_t result_20;        \
 | 
			
		||||
    svfloat32_t result_21;        \
 | 
			
		||||
    svfloat32_t result_22;        \
 | 
			
		||||
    svfloat32_t result_30;        \
 | 
			
		||||
    svfloat32_t result_31;        \
 | 
			
		||||
    svfloat32_t result_32;        \
 | 
			
		||||
    svfloat32_t Chi_00;        \
 | 
			
		||||
    svfloat32_t Chi_01;        \
 | 
			
		||||
    svfloat32_t Chi_02;        \
 | 
			
		||||
    svfloat32_t Chi_10;        \
 | 
			
		||||
    svfloat32_t Chi_11;        \
 | 
			
		||||
    svfloat32_t Chi_12;        \
 | 
			
		||||
    svfloat32_t UChi_00;        \
 | 
			
		||||
    svfloat32_t UChi_01;        \
 | 
			
		||||
    svfloat32_t UChi_02;        \
 | 
			
		||||
    svfloat32_t UChi_10;        \
 | 
			
		||||
    svfloat32_t UChi_11;        \
 | 
			
		||||
    svfloat32_t UChi_12;        \
 | 
			
		||||
    svfloat32_t U_00;        \
 | 
			
		||||
    svfloat32_t U_10;        \
 | 
			
		||||
    svfloat32_t U_20;        \
 | 
			
		||||
    svfloat32_t U_01;        \
 | 
			
		||||
    svfloat32_t U_11;        \
 | 
			
		||||
    svfloat32_t U_21;        \
 | 
			
		||||
    svbool_t pg1;        \
 | 
			
		||||
    pg1 = svptrue_b32();        \
 | 
			
		||||
    svuint32_t table0; \
 | 
			
		||||
    svfloat32_t zero0;        \
 | 
			
		||||
    zero0 = svdup_f32(0.); 
 | 
			
		||||
 | 
			
		||||
#define Chimu_00 Chi_00  
 | 
			
		||||
#define Chimu_01 Chi_01  
 | 
			
		||||
#define Chimu_02 Chi_02  
 | 
			
		||||
#define Chimu_10 Chi_10  
 | 
			
		||||
#define Chimu_11 Chi_11  
 | 
			
		||||
#define Chimu_12 Chi_12  
 | 
			
		||||
#define Chimu_20 UChi_00  
 | 
			
		||||
#define Chimu_21 UChi_01  
 | 
			
		||||
#define Chimu_22 UChi_02  
 | 
			
		||||
#define Chimu_30 UChi_10  
 | 
			
		||||
#define Chimu_31 UChi_11  
 | 
			
		||||
#define Chimu_32 UChi_12  
 | 
			
		||||
// RESULT
 | 
			
		||||
#define RESULT_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64), result_00);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64), result_01);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64), result_02);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64), result_10);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64), result_11);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64), result_12);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64), result_20);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64), result_21);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64), result_22);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64), result_30);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64), result_31);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64), result_32);  \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PLDL2STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PLDL1STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + -256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 768), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1024), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1280), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1536), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1792), SV_PLDL2STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL1STRM); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHI
 | 
			
		||||
#define LOAD_CHI_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svld1(pg1, (float32_t*)(base + 0 * 64));  \
 | 
			
		||||
    Chi_01 = svld1(pg1, (float32_t*)(base + 1 * 64));  \
 | 
			
		||||
    Chi_02 = svld1(pg1, (float32_t*)(base + 2 * 64));  \
 | 
			
		||||
    Chi_10 = svld1(pg1, (float32_t*)(base + 3 * 64));  \
 | 
			
		||||
    Chi_11 = svld1(pg1, (float32_t*)(base + 4 * 64));  \
 | 
			
		||||
    Chi_12 = svld1(pg1, (float32_t*)(base + 5 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU
 | 
			
		||||
#define LOAD_CHIMU_INTERLEAVED_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0213
 | 
			
		||||
#define LOAD_CHIMU_0213_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0312
 | 
			
		||||
#define LOAD_CHIMU_0312_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_TABLE0
 | 
			
		||||
#define LOAD_TABLE0  \
 | 
			
		||||
    table0 = svld1(pg1, (uint32_t*)&lut[0]);  
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE1
 | 
			
		||||
#define LOAD_TABLE1  \
 | 
			
		||||
    table0 = svld1(pg1, (uint32_t*)&lut[1]);  
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE2
 | 
			
		||||
#define LOAD_TABLE2  \
 | 
			
		||||
    table0 = svld1(pg1, (uint32_t*)&lut[2]);  
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE3
 | 
			
		||||
#define LOAD_TABLE3  \
 | 
			
		||||
    table0 = svld1(pg1, (uint32_t*)&lut[3]);  
 | 
			
		||||
 | 
			
		||||
// PERMUTE
 | 
			
		||||
#define PERMUTE_A64FXf  \
 | 
			
		||||
    Chi_00 = svtbl(Chi_00, table0);    \
 | 
			
		||||
    Chi_01 = svtbl(Chi_01, table0);    \
 | 
			
		||||
    Chi_02 = svtbl(Chi_02, table0);    \
 | 
			
		||||
    Chi_10 = svtbl(Chi_10, table0);    \
 | 
			
		||||
    Chi_11 = svtbl(Chi_11, table0);    \
 | 
			
		||||
    Chi_12 = svtbl(Chi_12, table0);    
 | 
			
		||||
 | 
			
		||||
// LOAD_GAUGE
 | 
			
		||||
#define LOAD_GAUGE  \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
{ \
 | 
			
		||||
    U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    U_01 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    U_11 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    U_21 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN
 | 
			
		||||
#define MULT_2SPIN_1_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
    U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    U_01 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    U_11 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    U_21 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, zero0, U_10, Chi_10, 0); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, zero0, U_20, Chi_00, 0); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, zero0, U_20, Chi_10, 0); \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_00, 90); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_10, 90); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_00, 90); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \
 | 
			
		||||
    U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN_BACKEND
 | 
			
		||||
#define MULT_2SPIN_2_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, UChi_00, U_01, Chi_01, 0); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, UChi_10, U_01, Chi_11, 0); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, UChi_01, U_11, Chi_01, 0); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_11, Chi_11, 0); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_21, Chi_01, 0); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_21, Chi_11, 0); \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, UChi_00, U_01, Chi_01, 90); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, UChi_10, U_01, Chi_11, 90); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, UChi_01, U_11, Chi_01, 90); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_11, Chi_11, 90); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_21, Chi_01, 90); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_21, Chi_11, 90); \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_02, 0); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_12, 0); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_02, 0); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_12, 0); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_02, 0); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_12, 0); \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, UChi_00, U_00, Chi_02, 90); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, UChi_10, U_00, Chi_12, 90); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, UChi_01, U_10, Chi_02, 90); \
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_12, 90); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_02, 90); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_12, 90); \
 | 
			
		||||
}
 | 
			
		||||
// XP_PROJ
 | 
			
		||||
#define XP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_30, 90);   \
 | 
			
		||||
    Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_31, 90);   \
 | 
			
		||||
    Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_32, 90);   \
 | 
			
		||||
    Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_20, 90);   \
 | 
			
		||||
    Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_21, 90);   \
 | 
			
		||||
    Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_22, 90);   \
 | 
			
		||||
}
 | 
			
		||||
// XP_RECON
 | 
			
		||||
#define XP_RECON_A64FXf  \
 | 
			
		||||
    result_20 = svcadd_x(pg1, zero0, UChi_10, 270);   \
 | 
			
		||||
    result_21 = svcadd_x(pg1, zero0, UChi_11, 270);   \
 | 
			
		||||
    result_22 = svcadd_x(pg1, zero0, UChi_12, 270);   \
 | 
			
		||||
    result_30 = svcadd_x(pg1, zero0, UChi_00, 270);   \
 | 
			
		||||
    result_31 = svcadd_x(pg1, zero0, UChi_01, 270);   \
 | 
			
		||||
    result_32 = svcadd_x(pg1, zero0, UChi_02, 270);   \
 | 
			
		||||
    result_00 = UChi_00;        \
 | 
			
		||||
    result_01 = UChi_01;        \
 | 
			
		||||
    result_02 = UChi_02;        \
 | 
			
		||||
    result_10 = UChi_10;        \
 | 
			
		||||
    result_11 = UChi_11;        \
 | 
			
		||||
    result_12 = UChi_12;        
 | 
			
		||||
 | 
			
		||||
// XP_RECON_ACCUM
 | 
			
		||||
#define XP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
    result_30 = svcadd_x(pg1, result_30, UChi_00, 270);   \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_31 = svcadd_x(pg1, result_31, UChi_01, 270);   \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_32 = svcadd_x(pg1, result_32, UChi_02, 270);   \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_20 = svcadd_x(pg1, result_20, UChi_10, 270);   \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_21 = svcadd_x(pg1, result_21, UChi_11, 270);   \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_22 = svcadd_x(pg1, result_22, UChi_12, 270);   \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// YP_PROJ
 | 
			
		||||
#define YP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svsub_x(pg1, Chimu_00, Chimu_30);  \
 | 
			
		||||
    Chi_01 = svsub_x(pg1, Chimu_01, Chimu_31);  \
 | 
			
		||||
    Chi_02 = svsub_x(pg1, Chimu_02, Chimu_32);  \
 | 
			
		||||
    Chi_10 = svadd_x(pg1, Chimu_10, Chimu_20);  \
 | 
			
		||||
    Chi_11 = svadd_x(pg1, Chimu_11, Chimu_21);  \
 | 
			
		||||
    Chi_12 = svadd_x(pg1, Chimu_12, Chimu_22);  \
 | 
			
		||||
}
 | 
			
		||||
// ZP_PROJ
 | 
			
		||||
#define ZP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_20, 90);   \
 | 
			
		||||
    Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_21, 90);   \
 | 
			
		||||
    Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_22, 90);   \
 | 
			
		||||
    Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_30, 270);   \
 | 
			
		||||
    Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_31, 270);   \
 | 
			
		||||
    Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_32, 270);   \
 | 
			
		||||
}
 | 
			
		||||
// TP_PROJ
 | 
			
		||||
#define TP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svadd_x(pg1, Chimu_00, Chimu_20);  \
 | 
			
		||||
    Chi_01 = svadd_x(pg1, Chimu_01, Chimu_21);  \
 | 
			
		||||
    Chi_02 = svadd_x(pg1, Chimu_02, Chimu_22);  \
 | 
			
		||||
    Chi_10 = svadd_x(pg1, Chimu_10, Chimu_30);  \
 | 
			
		||||
    Chi_11 = svadd_x(pg1, Chimu_11, Chimu_31);  \
 | 
			
		||||
    Chi_12 = svadd_x(pg1, Chimu_12, Chimu_32);  \
 | 
			
		||||
}
 | 
			
		||||
// XM_PROJ
 | 
			
		||||
#define XM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_30, 270);   \
 | 
			
		||||
    Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_31, 270);   \
 | 
			
		||||
    Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_32, 270);   \
 | 
			
		||||
    Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_20, 270);   \
 | 
			
		||||
    Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_21, 270);   \
 | 
			
		||||
    Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_22, 270);   \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON
 | 
			
		||||
#define XM_RECON_A64FXf  \
 | 
			
		||||
    result_20 = svcadd_x(pg1, zero0, UChi_10, 90);   \
 | 
			
		||||
    result_21 = svcadd_x(pg1, zero0, UChi_11, 90);   \
 | 
			
		||||
    result_22 = svcadd_x(pg1, zero0, UChi_12, 90);   \
 | 
			
		||||
    result_30 = svcadd_x(pg1, zero0, UChi_00, 90);   \
 | 
			
		||||
    result_31 = svcadd_x(pg1, zero0, UChi_01, 90);   \
 | 
			
		||||
    result_32 = svcadd_x(pg1, zero0, UChi_02, 90);   \
 | 
			
		||||
    result_00 = UChi_00;        \
 | 
			
		||||
    result_01 = UChi_01;        \
 | 
			
		||||
    result_02 = UChi_02;        \
 | 
			
		||||
    result_10 = UChi_10;        \
 | 
			
		||||
    result_11 = UChi_11;        \
 | 
			
		||||
    result_12 = UChi_12;        
 | 
			
		||||
 | 
			
		||||
// YM_PROJ
 | 
			
		||||
#define YM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svadd_x(pg1, Chimu_00, Chimu_30);  \
 | 
			
		||||
    Chi_01 = svadd_x(pg1, Chimu_01, Chimu_31);  \
 | 
			
		||||
    Chi_02 = svadd_x(pg1, Chimu_02, Chimu_32);  \
 | 
			
		||||
    Chi_10 = svsub_x(pg1, Chimu_10, Chimu_20);  \
 | 
			
		||||
    Chi_11 = svsub_x(pg1, Chimu_11, Chimu_21);  \
 | 
			
		||||
    Chi_12 = svsub_x(pg1, Chimu_12, Chimu_22);  \
 | 
			
		||||
}
 | 
			
		||||
// ZM_PROJ
 | 
			
		||||
#define ZM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svcadd_x(pg1, Chimu_00, Chimu_20, 270);   \
 | 
			
		||||
    Chi_01 = svcadd_x(pg1, Chimu_01, Chimu_21, 270);   \
 | 
			
		||||
    Chi_02 = svcadd_x(pg1, Chimu_02, Chimu_22, 270);   \
 | 
			
		||||
    Chi_10 = svcadd_x(pg1, Chimu_10, Chimu_30, 90);   \
 | 
			
		||||
    Chi_11 = svcadd_x(pg1, Chimu_11, Chimu_31, 90);   \
 | 
			
		||||
    Chi_12 = svcadd_x(pg1, Chimu_12, Chimu_32, 90);   \
 | 
			
		||||
}
 | 
			
		||||
// TM_PROJ
 | 
			
		||||
#define TM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svsub_x(pg1, Chimu_00, Chimu_20);  \
 | 
			
		||||
    Chi_01 = svsub_x(pg1, Chimu_01, Chimu_21);  \
 | 
			
		||||
    Chi_02 = svsub_x(pg1, Chimu_02, Chimu_22);  \
 | 
			
		||||
    Chi_10 = svsub_x(pg1, Chimu_10, Chimu_30);  \
 | 
			
		||||
    Chi_11 = svsub_x(pg1, Chimu_11, Chimu_31);  \
 | 
			
		||||
    Chi_12 = svsub_x(pg1, Chimu_12, Chimu_32);  \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON_ACCUM
 | 
			
		||||
#define XM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
    result_30 = svcadd_x(pg1, result_30, UChi_00, 90);   \
 | 
			
		||||
    result_31 = svcadd_x(pg1, result_31, UChi_01, 90);   \
 | 
			
		||||
    result_32 = svcadd_x(pg1, result_32, UChi_02, 90);   \
 | 
			
		||||
    result_20 = svcadd_x(pg1, result_20, UChi_10, 90);   \
 | 
			
		||||
    result_21 = svcadd_x(pg1, result_21, UChi_11, 90);   \
 | 
			
		||||
    result_22 = svcadd_x(pg1, result_22, UChi_12, 90);   \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// YP_RECON_ACCUM
 | 
			
		||||
#define YP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_30 = svsub_x(pg1, result_30, UChi_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_31 = svsub_x(pg1, result_31, UChi_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_32 = svsub_x(pg1, result_32, UChi_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_20 = svadd_x(pg1, result_20, UChi_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_21 = svadd_x(pg1, result_21, UChi_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); \
 | 
			
		||||
    result_22 = svadd_x(pg1, result_22, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// YM_RECON_ACCUM
 | 
			
		||||
#define YM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_30 = svadd_x(pg1, result_30, UChi_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_31 = svadd_x(pg1, result_31, UChi_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_32 = svadd_x(pg1, result_32, UChi_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_20 = svsub_x(pg1, result_20, UChi_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_21 = svsub_x(pg1, result_21, UChi_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); \
 | 
			
		||||
    result_22 = svsub_x(pg1, result_22, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// ZP_RECON_ACCUM
 | 
			
		||||
#define ZP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
    result_20 = svcadd_x(pg1, result_20, UChi_00, 270);   \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_21 = svcadd_x(pg1, result_21, UChi_01, 270);   \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_22 = svcadd_x(pg1, result_22, UChi_02, 270);   \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_30 = svcadd_x(pg1, result_30, UChi_10, 90);   \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_31 = svcadd_x(pg1, result_31, UChi_11, 90);   \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_32 = svcadd_x(pg1, result_32, UChi_12, 90);   \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// ZM_RECON_ACCUM
 | 
			
		||||
#define ZM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
    result_20 = svcadd_x(pg1, result_20, UChi_00, 90);   \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_21 = svcadd_x(pg1, result_21, UChi_01, 90);   \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_22 = svcadd_x(pg1, result_22, UChi_02, 90);   \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_30 = svcadd_x(pg1, result_30, UChi_10, 270);   \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_31 = svcadd_x(pg1, result_31, UChi_11, 270);   \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_32 = svcadd_x(pg1, result_32, UChi_12, 270);   \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// TP_RECON_ACCUM
 | 
			
		||||
#define TP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_20 = svadd_x(pg1, result_20, UChi_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_21 = svadd_x(pg1, result_21, UChi_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_22 = svadd_x(pg1, result_22, UChi_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_30 = svadd_x(pg1, result_30, UChi_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_31 = svadd_x(pg1, result_31, UChi_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); \
 | 
			
		||||
    result_32 = svadd_x(pg1, result_32, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// TM_RECON_ACCUM
 | 
			
		||||
#define TM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, UChi_00); \
 | 
			
		||||
    result_20 = svsub_x(pg1, result_20, UChi_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, UChi_01); \
 | 
			
		||||
    result_21 = svsub_x(pg1, result_21, UChi_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, UChi_02); \
 | 
			
		||||
    result_22 = svsub_x(pg1, result_22, UChi_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, UChi_10); \
 | 
			
		||||
    result_30 = svsub_x(pg1, result_30, UChi_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, UChi_11); \
 | 
			
		||||
    result_31 = svsub_x(pg1, result_31, UChi_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, UChi_12); \
 | 
			
		||||
    result_32 = svsub_x(pg1, result_32, UChi_12); 
 | 
			
		||||
 | 
			
		||||
// ZERO_PSI
 | 
			
		||||
#define ZERO_PSI_A64FXf  \
 | 
			
		||||
    result_00 = svdup_f32(0.); \
 | 
			
		||||
    result_01 = svdup_f32(0.); \
 | 
			
		||||
    result_02 = svdup_f32(0.); \
 | 
			
		||||
    result_10 = svdup_f32(0.); \
 | 
			
		||||
    result_11 = svdup_f32(0.); \
 | 
			
		||||
    result_12 = svdup_f32(0.); \
 | 
			
		||||
    result_20 = svdup_f32(0.); \
 | 
			
		||||
    result_21 = svdup_f32(0.); \
 | 
			
		||||
    result_22 = svdup_f32(0.); \
 | 
			
		||||
    result_30 = svdup_f32(0.); \
 | 
			
		||||
    result_31 = svdup_f32(0.); \
 | 
			
		||||
    result_32 = svdup_f32(0.); 
 | 
			
		||||
 | 
			
		||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PSTL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PSTL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PSTL2STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PSTL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PSTL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PSTL1STRM); \
 | 
			
		||||
}
 | 
			
		||||
// ADD_RESULT_INTERNAL
 | 
			
		||||
#define ADD_RESULT_INTERNAL_A64FXf  \
 | 
			
		||||
    result_00 = svadd_x(pg1, result_00, Chimu_00); \
 | 
			
		||||
    result_01 = svadd_x(pg1, result_01, Chimu_01); \
 | 
			
		||||
    result_02 = svadd_x(pg1, result_02, Chimu_02); \
 | 
			
		||||
    result_10 = svadd_x(pg1, result_10, Chimu_10); \
 | 
			
		||||
    result_11 = svadd_x(pg1, result_11, Chimu_11); \
 | 
			
		||||
    result_12 = svadd_x(pg1, result_12, Chimu_12); \
 | 
			
		||||
    result_20 = svadd_x(pg1, result_20, Chimu_20); \
 | 
			
		||||
    result_21 = svadd_x(pg1, result_21, Chimu_21); \
 | 
			
		||||
    result_22 = svadd_x(pg1, result_22, Chimu_22); \
 | 
			
		||||
    result_30 = svadd_x(pg1, result_30, Chimu_30); \
 | 
			
		||||
    result_31 = svadd_x(pg1, result_31, Chimu_31); \
 | 
			
		||||
    result_32 = svadd_x(pg1, result_32, Chimu_32); 
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										76
									
								
								Grid/simd/Fujitsu_A64FX_undef.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										76
									
								
								Grid/simd/Fujitsu_A64FX_undef.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,76 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: Fujitsu_A64FX_undef.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#undef LOAD_CHIMU
 | 
			
		||||
#undef PREFETCH_CHIMU_L1
 | 
			
		||||
#undef PREFETCH_GAUGE_L1
 | 
			
		||||
#undef PREFETCH_CHIMU_L2
 | 
			
		||||
#undef PREFETCH_GAUGE_L2
 | 
			
		||||
#undef PREFETCH_GAUGE_L1_INTERNAL
 | 
			
		||||
#undef PREFETCH1_CHIMU
 | 
			
		||||
#undef PREFETCH_CHIMU
 | 
			
		||||
#undef PREFETCH_RESULT_L2_STORE
 | 
			
		||||
#undef PREFETCH_RESULT_L1_STORE
 | 
			
		||||
#undef LOAD_GAUGE
 | 
			
		||||
#undef LOCK_GAUGE
 | 
			
		||||
#undef UNLOCK_GAUGE
 | 
			
		||||
#undef MASK_REGS
 | 
			
		||||
#undef SAVE_RESULT
 | 
			
		||||
#undef ADD_RESULT
 | 
			
		||||
#undef MULT_2SPIN_1
 | 
			
		||||
#undef MULT_2SPIN_2
 | 
			
		||||
#undef MAYBEPERM
 | 
			
		||||
#undef LOAD_CHI
 | 
			
		||||
#undef XP_PROJ
 | 
			
		||||
#undef YP_PROJ
 | 
			
		||||
#undef ZP_PROJ
 | 
			
		||||
#undef TP_PROJ
 | 
			
		||||
#undef XM_PROJ
 | 
			
		||||
#undef YM_PROJ
 | 
			
		||||
#undef ZM_PROJ
 | 
			
		||||
#undef TM_PROJ
 | 
			
		||||
#undef XP_RECON
 | 
			
		||||
#undef XM_RECON
 | 
			
		||||
#undef XM_RECON_ACCUM
 | 
			
		||||
#undef YM_RECON_ACCUM
 | 
			
		||||
#undef ZM_RECON_ACCUM
 | 
			
		||||
#undef TM_RECON_ACCUM
 | 
			
		||||
#undef XP_RECON_ACCUM
 | 
			
		||||
#undef YP_RECON_ACCUM
 | 
			
		||||
#undef ZP_RECON_ACCUM
 | 
			
		||||
#undef TP_RECON_ACCUM
 | 
			
		||||
#undef PERMUTE
 | 
			
		||||
#undef PERMUTE_DIR0
 | 
			
		||||
#undef PERMUTE_DIR1
 | 
			
		||||
#undef PERMUTE_DIR2
 | 
			
		||||
#undef PERMUTE_DIR3
 | 
			
		||||
#undef LOAD_TABLE
 | 
			
		||||
#undef LOAD_TABLE0
 | 
			
		||||
#undef LOAD_TABLE1
 | 
			
		||||
#undef LOAD_TABLE2
 | 
			
		||||
#undef LOAD_TABLE3
 | 
			
		||||
							
								
								
									
										942
									
								
								Grid/simd/Grid_a64fx-2.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										942
									
								
								Grid/simd/Grid_a64fx-2.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,942 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: Grid_a64fx-2.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
    Author: Nils Meyer          <nils.meyer@ur.de>
 | 
			
		||||
 | 
			
		||||
    with support from Arm
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////
 | 
			
		||||
// Using SVE ACLE
 | 
			
		||||
/////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
static_assert(GEN_SIMD_WIDTH % 64u == 0, "A64FX SIMD vector size is 64 bytes");
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
NAMESPACE_BEGIN(Optimization);
 | 
			
		||||
 | 
			
		||||
  // type traits giving the number of elements for each vector type
 | 
			
		||||
  template <typename T> struct W;
 | 
			
		||||
  template <> struct W<double> {
 | 
			
		||||
    constexpr static unsigned int c = GEN_SIMD_WIDTH/16u;
 | 
			
		||||
    constexpr static unsigned int r = GEN_SIMD_WIDTH/8u;
 | 
			
		||||
  };
 | 
			
		||||
  template <> struct W<float> {
 | 
			
		||||
    constexpr static unsigned int c = GEN_SIMD_WIDTH/8u;
 | 
			
		||||
    constexpr static unsigned int r = GEN_SIMD_WIDTH/4u;
 | 
			
		||||
  };
 | 
			
		||||
  template <> struct W<Integer> {
 | 
			
		||||
    constexpr static unsigned int r = GEN_SIMD_WIDTH/4u;
 | 
			
		||||
  };
 | 
			
		||||
  template <> struct W<uint16_t> {
 | 
			
		||||
    constexpr static unsigned int c = GEN_SIMD_WIDTH/4u;
 | 
			
		||||
    constexpr static unsigned int r = GEN_SIMD_WIDTH/2u;
 | 
			
		||||
  };
 | 
			
		||||
  template <> struct W<uint64_t> {
 | 
			
		||||
    constexpr static unsigned int c = GEN_SIMD_WIDTH/16u;
 | 
			
		||||
    constexpr static unsigned int r = GEN_SIMD_WIDTH/8u;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  #ifdef ARMCLANGCOMPAT
 | 
			
		||||
  // SIMD vector immediate types
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  struct vec_imm {
 | 
			
		||||
    alignas(GEN_SIMD_WIDTH) T v[W<T>::r];
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  // SIMD vector types
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  struct vec {
 | 
			
		||||
    alignas(GEN_SIMD_WIDTH) T v[W<T>::r];
 | 
			
		||||
    vec() = default;
 | 
			
		||||
    vec(const vec &rhs) { this->operator=(rhs); }
 | 
			
		||||
    vec(const vec_imm<T> &rhs) {
 | 
			
		||||
      // v = rhs.v
 | 
			
		||||
      svst1(svptrue_b8(), (T*)this, svld1(svptrue_b8(), (T*)rhs.v));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    inline vec &operator=(const vec &rhs) {
 | 
			
		||||
      // v = rhs.v
 | 
			
		||||
      svst1(svptrue_b8(), (T*)this, svld1(svptrue_b8(), (T*)rhs.v));
 | 
			
		||||
      return *this;
 | 
			
		||||
    };
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  #else //  no ARMCLANGCOMPAT
 | 
			
		||||
  #define vec_imm vec
 | 
			
		||||
  // SIMD vector types
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  struct vec {
 | 
			
		||||
    alignas(GEN_SIMD_WIDTH) T v[W<T>::r];
 | 
			
		||||
  };
 | 
			
		||||
  #endif
 | 
			
		||||
 | 
			
		||||
  typedef vec<float>     vecf;
 | 
			
		||||
  typedef vec<double>    vecd;
 | 
			
		||||
  typedef vec<uint16_t>  vech; // half precision comms
 | 
			
		||||
  typedef vec<Integer>   veci;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Optimization)
 | 
			
		||||
NAMESPACE_END(Grid)
 | 
			
		||||
 | 
			
		||||
// low-level API
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
NAMESPACE_BEGIN(Optimization);
 | 
			
		||||
 | 
			
		||||
template <typename T>
 | 
			
		||||
struct acle{};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct acle<double>{
 | 
			
		||||
  typedef svfloat64_t vt;
 | 
			
		||||
  typedef svfloat64x2_t vt2;
 | 
			
		||||
  typedef svfloat64x4_t vt4;
 | 
			
		||||
  typedef float64_t pt;
 | 
			
		||||
  typedef uint64_t uint;
 | 
			
		||||
  typedef svuint64_t svuint;
 | 
			
		||||
 | 
			
		||||
  static inline svbool_t pg1(){return svptrue_b64();}
 | 
			
		||||
  static inline svbool_t pg2(){return svptrue_pat_b64(SV_VL4);}
 | 
			
		||||
  static inline svbool_t pg4(){return svptrue_pat_b64(SV_VL2);}
 | 
			
		||||
  static inline vec<uint64_t> tbl_swap(){
 | 
			
		||||
    //const vec<uint64_t> t = {1, 0, 3, 2, 5, 4, 7, 6};
 | 
			
		||||
    const vec_imm<uint64_t> t = {1, 0, 3, 2, 5, 4, 7, 6};
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline vec<uint64_t> tbl0(){
 | 
			
		||||
    //const vec<uint64_t> t = {4, 5, 6, 7, 0, 1, 2, 3};
 | 
			
		||||
    const vec_imm<uint64_t> t = {4, 5, 6, 7, 0, 1, 2, 3};
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline vec<uint64_t> tbl1(){
 | 
			
		||||
    //const vec<uint64_t> t = {2, 3, 0, 1, 6, 7, 4, 5};
 | 
			
		||||
    const vec_imm<uint64_t> t = {2, 3, 0, 1, 6, 7, 4, 5};
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline vec<uint64_t> tbl_exch1a(){ // Exchange1
 | 
			
		||||
    //const vec<uint64_t> t = {0, 1, 4, 5, 2, 3, 6, 7};
 | 
			
		||||
    const vec_imm<uint64_t> t = {0, 1, 4, 5, 2, 3, 6, 7};
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline vec<uint64_t> tbl_exch1b(){ // Exchange1
 | 
			
		||||
    //const vec<uint64_t> t = {2, 3, 6, 7, 0, 1, 4, 5};
 | 
			
		||||
    const vec_imm<uint64_t> t = {2, 3, 6, 7, 0, 1, 4, 5};
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline vec<uint64_t> tbl_exch1c(){ // Exchange1
 | 
			
		||||
    //const vec<uint64_t> t = {4, 5, 0, 1, 6, 7, 2, 3};
 | 
			
		||||
    const vec_imm<uint64_t> t = {4, 5, 0, 1, 6, 7, 2, 3};
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline svbool_t pg_even(){return svzip1_b64(svptrue_b64(), svpfalse_b());}
 | 
			
		||||
  static inline svbool_t pg_odd() {return svzip1_b64(svpfalse_b(), svptrue_b64());}
 | 
			
		||||
  static inline svfloat64_t zero(){return svdup_f64(0.);}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct acle<float>{
 | 
			
		||||
  typedef svfloat32_t vt;
 | 
			
		||||
  typedef svfloat32x2_t vt2;
 | 
			
		||||
  typedef float32_t pt;
 | 
			
		||||
  typedef uint32_t uint;
 | 
			
		||||
  typedef svuint32_t svuint;
 | 
			
		||||
 | 
			
		||||
  static inline svbool_t pg1(){return svptrue_b32();}
 | 
			
		||||
  static inline svbool_t pg2(){return svptrue_pat_b32(SV_VL8);}
 | 
			
		||||
  // exchange neighboring elements
 | 
			
		||||
  static inline vec<uint32_t> tbl_swap(){
 | 
			
		||||
    //const vec<uint32_t> t = {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14};
 | 
			
		||||
    const vec_imm<uint32_t> t = {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14};
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline vec<uint32_t> tbl0(){
 | 
			
		||||
    //const vec<uint32_t> t = {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7};
 | 
			
		||||
    const vec_imm<uint32_t> t = {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7};
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline vec<uint32_t> tbl1(){
 | 
			
		||||
    //const vec<uint32_t> t = {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11};
 | 
			
		||||
    const vec_imm<uint32_t> t = {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11};
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline vec<uint32_t> tbl2(){
 | 
			
		||||
    //const vec<uint32_t> t = {2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13};
 | 
			
		||||
    const vec_imm<uint32_t> t = {2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13};
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline vec<uint32_t> tbl_exch1a(){ // Exchange1
 | 
			
		||||
    //const vec<uint32_t> t = {0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 };
 | 
			
		||||
    const vec_imm<uint32_t> t = {0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 };
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline vec<uint32_t> tbl_exch1b(){ // Exchange1
 | 
			
		||||
    //const vec<uint32_t> t = {4, 5, 6, 7, 12, 13, 14, 15, 0, 1, 2, 3, 8, 9, 10, 11 };
 | 
			
		||||
    const vec_imm<uint32_t> t = {4, 5, 6, 7, 12, 13, 14, 15, 0, 1, 2, 3, 8, 9, 10, 11 };
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline vec<uint32_t> tbl_exch1c(){ // Exchange1
 | 
			
		||||
    //const vec<uint32_t> t = {8, 9, 10, 11, 0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7};
 | 
			
		||||
    const vec_imm<uint32_t> t = {8, 9, 10, 11, 0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7};
 | 
			
		||||
    return t;
 | 
			
		||||
  }
 | 
			
		||||
  static inline svbool_t pg_even(){return svzip1_b32(svptrue_b32(), svpfalse_b());}
 | 
			
		||||
  static inline svbool_t pg_odd() {return svzip1_b32(svpfalse_b(), svptrue_b32());}
 | 
			
		||||
  static inline svfloat32_t zero(){return svdup_f32(0.);}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct acle<uint16_t>{
 | 
			
		||||
  typedef svfloat16_t vt;
 | 
			
		||||
  typedef float16_t pt;
 | 
			
		||||
  typedef uint16_t uint;
 | 
			
		||||
  typedef svuint16_t svuint;
 | 
			
		||||
 | 
			
		||||
  static inline svbool_t pg1(){return svptrue_b16();}
 | 
			
		||||
  static inline svbool_t pg2(){return svptrue_pat_b16(SV_VL16);}
 | 
			
		||||
  static inline svbool_t pg_even(){return svzip1_b16(svptrue_b16(), svpfalse_b());}
 | 
			
		||||
  static inline svbool_t pg_odd() {return svzip1_b16(svpfalse_b(), svptrue_b16());}
 | 
			
		||||
  static inline svfloat16_t zero(){return svdup_f16(0.);}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct acle<Integer>{
 | 
			
		||||
  typedef svuint32_t vt;
 | 
			
		||||
  typedef svuint32x2_t vt2;
 | 
			
		||||
  typedef Integer pt;
 | 
			
		||||
  typedef uint32_t uint;
 | 
			
		||||
  typedef svuint32_t svuint;
 | 
			
		||||
 | 
			
		||||
  //static inline svbool_t pg1(){return svptrue_b16();}
 | 
			
		||||
  static inline svbool_t pg1(){return svptrue_b32();}
 | 
			
		||||
  static inline svbool_t pg2(){return svptrue_pat_b32(SV_VL8);}
 | 
			
		||||
  static inline svbool_t pg_even(){return svzip1_b32(svptrue_b32(), svpfalse_b());}
 | 
			
		||||
  static inline svbool_t pg_odd() {return svzip1_b32(svpfalse_b(), svptrue_b32());}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// ---------------------------------------------------
 | 
			
		||||
 | 
			
		||||
struct Vsplat{
 | 
			
		||||
  // Complex float
 | 
			
		||||
  inline vecf operator()(float a, float b){
 | 
			
		||||
    vecf out;
 | 
			
		||||
    svbool_t pg1 = acle<float>::pg1();
 | 
			
		||||
    typename acle<float>::vt a_v = svdup_f32(a);
 | 
			
		||||
    typename acle<float>::vt b_v = svdup_f32(b);
 | 
			
		||||
    typename acle<float>::vt r_v = svzip1(a_v, b_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Real float
 | 
			
		||||
  inline vecf operator()(float a){
 | 
			
		||||
    vecf out;
 | 
			
		||||
    svbool_t pg1 = acle<float>::pg1();
 | 
			
		||||
    typename acle<float>::vt r_v = svdup_f32(a);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 // Complex double
 | 
			
		||||
  inline vecd operator()(double a, double b){
 | 
			
		||||
    vecd out;
 | 
			
		||||
    svbool_t pg1 = acle<double>::pg1();
 | 
			
		||||
    typename acle<double>::vt a_v = svdup_f64(a);
 | 
			
		||||
    typename acle<double>::vt b_v = svdup_f64(b);
 | 
			
		||||
    typename acle<double>::vt r_v = svzip1(a_v, b_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Real double
 | 
			
		||||
  inline vecd operator()(double a){
 | 
			
		||||
    vecd out;
 | 
			
		||||
    svbool_t pg1 = acle<double>::pg1();
 | 
			
		||||
    typename acle<double>::vt r_v = svdup_f64(a);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Integer
 | 
			
		||||
  inline vec<Integer> operator()(Integer a){
 | 
			
		||||
    vec<Integer> out;
 | 
			
		||||
    svbool_t pg1 = acle<Integer>::pg1();
 | 
			
		||||
    // Add check whether Integer is really a uint32_t???
 | 
			
		||||
    typename acle<Integer>::vt r_v = svdup_u32(a);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Vstore{
 | 
			
		||||
  // Real
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline void operator()(vec<T> a, T *D){
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, (typename acle<T>::pt*)&a.v);
 | 
			
		||||
    svst1(pg1, D, a_v);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Vstream{
 | 
			
		||||
  // Real
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline void operator()(T * a, vec<T> b){
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt b_v = svld1(pg1, b.v);
 | 
			
		||||
    svstnt1(pg1, a, b_v);
 | 
			
		||||
    //svst1(pg1, a, b_v);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
  struct Vset{
 | 
			
		||||
    // Complex
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    inline vec<T> operator()(std::complex<T> *a){
 | 
			
		||||
      vec<T> out;
 | 
			
		||||
      svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
      typename acle<T>::vt a_v = svld1(pg1, (T*)a);
 | 
			
		||||
      svst1(pg1, out.v, a_v);
 | 
			
		||||
 | 
			
		||||
      return out;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Real
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    inline vec<T> operator()(T *a){
 | 
			
		||||
      vec<T> out;
 | 
			
		||||
      svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
      typename acle<T>::vt a_v = svld1(pg1, a);
 | 
			
		||||
      svst1(pg1, out.v, a_v);
 | 
			
		||||
 | 
			
		||||
      return out;
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////
 | 
			
		||||
// Arithmetic operations
 | 
			
		||||
/////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
struct Sum{
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a, vec<T> b){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, a.v);
 | 
			
		||||
    typename acle<T>::vt b_v = svld1(pg1, b.v);
 | 
			
		||||
    typename acle<T>::vt r_v = svadd_x(pg1, a_v, b_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Sub{
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a, vec<T> b){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, a.v);
 | 
			
		||||
    typename acle<T>::vt b_v = svld1(pg1, b.v);
 | 
			
		||||
    typename acle<T>::vt r_v = svsub_x(pg1, a_v, b_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Mult{
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a, vec<T> b, vec<T> c){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, a.v);
 | 
			
		||||
    typename acle<T>::vt b_v = svld1(pg1, b.v);
 | 
			
		||||
    typename acle<T>::vt c_v = svld1(pg1, c.v);
 | 
			
		||||
    typename acle<T>::vt r_v = svmla_x(pg1, c_v, a_v, b_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a, vec<T> b){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, a.v);
 | 
			
		||||
    typename acle<T>::vt b_v = svld1(pg1, b.v);
 | 
			
		||||
    typename acle<T>::vt r_v = svmul_x(pg1, a_v, b_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct MultRealPart{
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a, vec<T> b){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v  = svld1(pg1, a.v);
 | 
			
		||||
    typename acle<T>::vt b_v  = svld1(pg1, b.v);
 | 
			
		||||
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    typename acle<T>::vt z_v = acle<T>::zero();
 | 
			
		||||
    typename acle<T>::vt r_v = svcmla_x(pg1, z_v, a_v, b_v, 0);
 | 
			
		||||
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct MaddRealPart{
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a, vec<T> b, vec<T> c){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v  = svld1(pg1, a.v);
 | 
			
		||||
    typename acle<T>::vt b_v  = svld1(pg1, b.v);
 | 
			
		||||
    typename acle<T>::vt c_v  = svld1(pg1, c.v);
 | 
			
		||||
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    typename acle<T>::vt r_v = svcmla_x(pg1, c_v, a_v, b_v, 0);
 | 
			
		||||
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct MultComplex{
 | 
			
		||||
  // Complex a*b
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a, vec<T> b){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, a.v);
 | 
			
		||||
    typename acle<T>::vt b_v = svld1(pg1, b.v);
 | 
			
		||||
    typename acle<T>::vt z_v = acle<T>::zero();
 | 
			
		||||
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    typename acle<T>::vt r_v = svcmla_x(pg1, z_v, a_v, b_v, 0);
 | 
			
		||||
    r_v = svcmla_x(pg1, r_v, a_v, b_v, 90);
 | 
			
		||||
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct MultAddComplex{
 | 
			
		||||
  // Complex a*b+c
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a, vec<T> b, vec<T> c){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, a.v);
 | 
			
		||||
    typename acle<T>::vt b_v = svld1(pg1, b.v);
 | 
			
		||||
    typename acle<T>::vt c_v = svld1(pg1, c.v);;
 | 
			
		||||
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    typename acle<T>::vt r_v = svcmla_x(pg1, c_v, a_v, b_v, 0);
 | 
			
		||||
    r_v = svcmla_x(pg1, r_v, a_v, b_v, 90);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Div{
 | 
			
		||||
  // Real
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a, vec<T> b){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, a.v);
 | 
			
		||||
    typename acle<T>::vt b_v = svld1(pg1, b.v);
 | 
			
		||||
    typename acle<T>::vt r_v = svdiv_x(pg1, a_v, b_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Conj{
 | 
			
		||||
  // Complex
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    svbool_t pg_odd = acle<T>::pg_odd();
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, a.v);
 | 
			
		||||
    //typename acle<T>::vt r_v = svneg_x(pg_odd, a_v);
 | 
			
		||||
    typename acle<T>::vt r_v = svneg_m(a_v, pg_odd, a_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct TimesMinusI{
 | 
			
		||||
  // Complex
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a, vec<T> b){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    const vec<typename acle<T>::uint> tbl_swap = acle<T>::tbl_swap();
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    svbool_t pg_odd = acle<T>::pg_odd();
 | 
			
		||||
 | 
			
		||||
    typename acle<T>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, a.v);
 | 
			
		||||
    a_v = svtbl(a_v, tbl_swap_v);
 | 
			
		||||
    typename acle<T>::vt r_v = svneg_m(a_v, pg_odd, a_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct TimesI{
 | 
			
		||||
  // Complex
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  inline vec<T> operator()(vec<T> a, vec<T> b){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    const vec<typename acle<T>::uint> tbl_swap = acle<T>::tbl_swap();
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    svbool_t pg_even = acle<T>::pg_even();
 | 
			
		||||
 | 
			
		||||
    typename acle<T>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, a.v);
 | 
			
		||||
    a_v = svtbl(a_v, tbl_swap_v);
 | 
			
		||||
    //typename acle<T>::vt r_v = svneg_x(pg_even, a_v);
 | 
			
		||||
    typename acle<T>::vt r_v = svneg_m(a_v, pg_even, a_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct PrecisionChange {
 | 
			
		||||
  static inline vech StoH (const vecf &sa,const vecf &sb) {
 | 
			
		||||
    vech ret;
 | 
			
		||||
    svbool_t pg1s = acle<float>::pg1();
 | 
			
		||||
    svbool_t pg1h = acle<uint16_t>::pg1();
 | 
			
		||||
    typename acle<float>::vt sa_v = svld1(pg1s, sa.v);
 | 
			
		||||
    typename acle<float>::vt sb_v = svld1(pg1s, sb.v);
 | 
			
		||||
    typename acle<uint16_t>::vt ha_v = svcvt_f16_x(pg1s, sa_v);
 | 
			
		||||
    typename acle<uint16_t>::vt hb_v = svcvt_f16_x(pg1s, sb_v);
 | 
			
		||||
    typename acle<uint16_t>::vt r_v = svuzp1(ha_v, hb_v);
 | 
			
		||||
    svst1(pg1h, (typename acle<uint16_t>::pt*)&ret.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return ret;
 | 
			
		||||
  }
 | 
			
		||||
  static inline void HtoS(vech h,vecf &sa,vecf &sb) {
 | 
			
		||||
    svbool_t pg1h = acle<uint16_t>::pg1();
 | 
			
		||||
    svbool_t pg1s = acle<float>::pg1();
 | 
			
		||||
    typename acle<uint16_t>::vt h_v = svld1(pg1h, (typename acle<uint16_t>::pt*)&h.v);
 | 
			
		||||
    typename acle<uint16_t>::vt ha_v = svzip1(h_v, h_v);
 | 
			
		||||
    typename acle<uint16_t>::vt hb_v = svzip2(h_v, h_v);
 | 
			
		||||
    typename acle<float>::vt sa_v = svcvt_f32_x(pg1s, ha_v);
 | 
			
		||||
    typename acle<float>::vt sb_v = svcvt_f32_x(pg1s, hb_v);
 | 
			
		||||
    svst1(pg1s, sa.v, sa_v);
 | 
			
		||||
    svst1(pg1s, sb.v, sb_v);
 | 
			
		||||
  }
 | 
			
		||||
  static inline vecf DtoS (vecd a,vecd b) {
 | 
			
		||||
    vecf ret;
 | 
			
		||||
    svbool_t pg1d = acle<double>::pg1();
 | 
			
		||||
    svbool_t pg1s = acle<float>::pg1();
 | 
			
		||||
    typename acle<double>::vt a_v = svld1(pg1d, a.v);
 | 
			
		||||
    typename acle<double>::vt b_v = svld1(pg1d, b.v);
 | 
			
		||||
    typename acle<float>::vt sa_v = svcvt_f32_x(pg1d, a_v);
 | 
			
		||||
    typename acle<float>::vt sb_v = svcvt_f32_x(pg1d, b_v);
 | 
			
		||||
    typename acle<float>::vt r_v = svuzp1(sa_v, sb_v);
 | 
			
		||||
    svst1(pg1s, ret.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return ret;
 | 
			
		||||
  }
 | 
			
		||||
  static inline void StoD (vecf s,vecd &a,vecd &b) {
 | 
			
		||||
    svbool_t pg1s = acle<float>::pg1();
 | 
			
		||||
    svbool_t pg1d = acle<double>::pg1();
 | 
			
		||||
    typename acle<float>::vt s_v = svld1(pg1s, s.v);
 | 
			
		||||
    typename acle<float>::vt sa_v = svzip1(s_v, s_v);
 | 
			
		||||
    typename acle<float>::vt sb_v = svzip2(s_v, s_v);
 | 
			
		||||
    typename acle<double>::vt a_v = svcvt_f64_x(pg1d, sa_v);
 | 
			
		||||
    typename acle<double>::vt b_v = svcvt_f64_x(pg1d, sb_v);
 | 
			
		||||
    svst1(pg1d, a.v, a_v);
 | 
			
		||||
    svst1(pg1d, b.v, b_v);
 | 
			
		||||
  }
 | 
			
		||||
  static inline vech DtoH (vecd a,vecd b,vecd c,vecd d) {
 | 
			
		||||
    vech ret;
 | 
			
		||||
    svbool_t pg1d = acle<double>::pg1();
 | 
			
		||||
    svbool_t pg1h = acle<uint16_t>::pg1();
 | 
			
		||||
    typename acle<double>::vt a_v = svld1(pg1d, a.v);
 | 
			
		||||
    typename acle<double>::vt b_v = svld1(pg1d, b.v);
 | 
			
		||||
    typename acle<double>::vt c_v = svld1(pg1d, c.v);
 | 
			
		||||
    typename acle<double>::vt d_v = svld1(pg1d, d.v);
 | 
			
		||||
    typename acle<uint16_t>::vt ha_v = svcvt_f16_x(pg1d, a_v);
 | 
			
		||||
    typename acle<uint16_t>::vt hb_v = svcvt_f16_x(pg1d, b_v);
 | 
			
		||||
    typename acle<uint16_t>::vt hc_v = svcvt_f16_x(pg1d, c_v);
 | 
			
		||||
    typename acle<uint16_t>::vt hd_v = svcvt_f16_x(pg1d, d_v);
 | 
			
		||||
    typename acle<uint16_t>::vt hab_v = svuzp1(ha_v, hb_v);
 | 
			
		||||
    typename acle<uint16_t>::vt hcd_v = svuzp1(hc_v, hd_v);
 | 
			
		||||
    typename acle<uint16_t>::vt r_v = svuzp1(hab_v, hcd_v);
 | 
			
		||||
    svst1(pg1h, (typename acle<uint16_t>::pt*)&ret.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return ret;
 | 
			
		||||
/*
 | 
			
		||||
    vecf sa,sb;
 | 
			
		||||
    sa = DtoS(a,b);
 | 
			
		||||
    sb = DtoS(c,d);
 | 
			
		||||
    return StoH(sa,sb);
 | 
			
		||||
*/
 | 
			
		||||
  }
 | 
			
		||||
  static inline void HtoD(vech h,vecd &a,vecd &b,vecd &c,vecd &d) {
 | 
			
		||||
    svbool_t pg1h = acle<uint16_t>::pg1();
 | 
			
		||||
    svbool_t pg1d = acle<double>::pg1();
 | 
			
		||||
    typename acle<uint16_t>::vt h_v = svld1(pg1h, (typename acle<uint16_t>::pt*)&h.v);
 | 
			
		||||
    typename acle<uint16_t>::vt sa_v = svzip1(h_v, h_v);
 | 
			
		||||
    typename acle<uint16_t>::vt sb_v = svzip2(h_v, h_v);
 | 
			
		||||
    typename acle<uint16_t>::vt da_v = svzip1(sa_v, sa_v);
 | 
			
		||||
    typename acle<uint16_t>::vt db_v = svzip2(sa_v, sa_v);
 | 
			
		||||
    typename acle<uint16_t>::vt dc_v = svzip1(sb_v, sb_v);
 | 
			
		||||
    typename acle<uint16_t>::vt dd_v = svzip2(sb_v, sb_v);
 | 
			
		||||
    typename acle<double>::vt a_v = svcvt_f64_x(pg1d, da_v);
 | 
			
		||||
    typename acle<double>::vt b_v = svcvt_f64_x(pg1d, db_v);
 | 
			
		||||
    typename acle<double>::vt c_v = svcvt_f64_x(pg1d, dc_v);
 | 
			
		||||
    typename acle<double>::vt d_v = svcvt_f64_x(pg1d, dd_v);
 | 
			
		||||
    svst1(pg1d, a.v, a_v);
 | 
			
		||||
    svst1(pg1d, b.v, b_v);
 | 
			
		||||
    svst1(pg1d, c.v, c_v);
 | 
			
		||||
    svst1(pg1d, d.v, d_v);
 | 
			
		||||
/*
 | 
			
		||||
    vecf sa,sb;
 | 
			
		||||
    HtoS(h,sa,sb);
 | 
			
		||||
    StoD(sa,a,b);
 | 
			
		||||
    StoD(sb,c,d);
 | 
			
		||||
*/
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Exchange{
 | 
			
		||||
 | 
			
		||||
  // Exchange0 is valid for arbitrary SVE vector length
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  static inline void Exchange0(vec<T> &out1, vec<T> &out2, const vec<T> &in1, const vec<T> &in2){
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a1_v = svld1(pg1, in1.v);
 | 
			
		||||
    typename acle<T>::vt a2_v = svld1(pg1, in2.v);
 | 
			
		||||
    typename acle<T>::vt r1_v = svext(a1_v, a1_v, (uint64_t)W<T>::c);
 | 
			
		||||
    r1_v = svext(r1_v, a2_v, (uint64_t)W<T>::c);
 | 
			
		||||
    typename acle<T>::vt r2_v = svext(a2_v, a2_v, (uint64_t)W<T>::c);
 | 
			
		||||
    r2_v = svext(a1_v, r2_v, (uint64_t)W<T>::c);
 | 
			
		||||
    svst1(pg1, out1.v, r1_v);
 | 
			
		||||
    svst1(pg1, out2.v, r2_v);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  static inline void Exchange1(vec<T> &out1, vec<T> &out2, const vec<T> &in1, const vec<T> &in2){
 | 
			
		||||
    // this one is tricky; svtrn2q* from SVE2 fits best, but it is not available in SVE1
 | 
			
		||||
    // alternative: use 4-el structure; expect translation into ldp + stp -> SFI
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    const vec<typename acle<T>::uint> tbl_exch1a = acle<T>::tbl_exch1a();
 | 
			
		||||
    const vec<typename acle<T>::uint> tbl_exch1b = acle<T>::tbl_exch1b();
 | 
			
		||||
    const vec<typename acle<T>::uint> tbl_exch1c = acle<T>::tbl_exch1c();
 | 
			
		||||
 | 
			
		||||
    typename acle<T>::svuint tbl_exch1a_v = svld1(pg1, tbl_exch1a.v);
 | 
			
		||||
    typename acle<T>::svuint tbl_exch1b_v = svld1(pg1, tbl_exch1b.v);
 | 
			
		||||
    typename acle<T>::svuint tbl_exch1c_v = svld1(pg1, tbl_exch1c.v);
 | 
			
		||||
 | 
			
		||||
    typename acle<T>::vt in1_v  = svld1(pg1, in1.v);
 | 
			
		||||
    typename acle<T>::vt in2_v  = svld1(pg1, in2.v);
 | 
			
		||||
 | 
			
		||||
    typename acle<T>::vt a1_v   = svtbl(in1_v, tbl_exch1a_v);
 | 
			
		||||
    typename acle<T>::vt a2_v   = svtbl(in2_v, tbl_exch1b_v);
 | 
			
		||||
    typename acle<T>::vt b1_v   = svext(a2_v, a1_v, (uint64_t)(W<T>::r / 2u));
 | 
			
		||||
    typename acle<T>::vt b2_v   = svext(a1_v, a2_v, (uint64_t)(W<T>::r / 2u));
 | 
			
		||||
    typename acle<T>::vt out1_v = svtbl(b1_v, tbl_exch1c_v);
 | 
			
		||||
    typename acle<T>::vt out2_v = svtbl(b2_v, tbl_exch1a_v);
 | 
			
		||||
 | 
			
		||||
    svst1(pg1, out1.v, out1_v);
 | 
			
		||||
    svst1(pg1, out2.v, out2_v);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  static inline void Exchange2(vec<T> &out1, vec<T> &out2, const vec<T> &in1, const vec<T> &in2){
 | 
			
		||||
    svbool_t pg1 = acle<double>::pg1();
 | 
			
		||||
    typename acle<double>::vt a1_v = svld1(pg1, (typename acle<double>::pt*)in1.v);
 | 
			
		||||
    typename acle<double>::vt a2_v = svld1(pg1, (typename acle<double>::pt*)in2.v);
 | 
			
		||||
    typename acle<double>::vt r1_v = svtrn1(a1_v, a2_v);
 | 
			
		||||
    typename acle<double>::vt r2_v = svtrn2(a1_v, a2_v);
 | 
			
		||||
    svst1(pg1, (typename acle<double>::pt*)out1.v, r1_v);
 | 
			
		||||
    svst1(pg1, (typename acle<double>::pt*)out2.v, r2_v);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline void Exchange3(vecf &out1, vecf &out2, const vecf &in1, const vecf &in2){
 | 
			
		||||
    svbool_t pg1 = acle<float>::pg1();
 | 
			
		||||
    typename acle<float>::vt a1_v = svld1(pg1, in1.v);
 | 
			
		||||
    typename acle<float>::vt a2_v = svld1(pg1, in2.v);
 | 
			
		||||
    typename acle<float>::vt r1_v = svtrn1(a1_v, a2_v);
 | 
			
		||||
    typename acle<float>::vt r2_v = svtrn2(a1_v, a2_v);
 | 
			
		||||
    svst1(pg1, out1.v, r1_v);
 | 
			
		||||
    svst1(pg1, out2.v, r2_v);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline void Exchange3(vecd &out1, vecd &out2, const vecd &in1, const vecd &in2){
 | 
			
		||||
    assert(0);
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Permute{
 | 
			
		||||
 | 
			
		||||
  // Permute0 is valid for any SVE vector width
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  static inline vec<T> Permute0(vec<T> in) {
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
    typename acle<T>::vt r_v = svext(a_v, a_v, (uint64_t)(W<T>::r / 2u));
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline vecd Permute1(vecd in) {
 | 
			
		||||
    vecd out;
 | 
			
		||||
    const vec<typename acle<double>::uint> tbl_swap = acle<double>::tbl1();
 | 
			
		||||
    svbool_t pg1 = acle<double>::pg1();
 | 
			
		||||
    typename acle<double>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
    typename acle<double>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
 | 
			
		||||
    typename acle<double>::vt r_v = svtbl(a_v, tbl_swap_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline vecf Permute1(vecf in) {
 | 
			
		||||
    vecf out;
 | 
			
		||||
    const vec<typename acle<float>::uint> tbl_swap = acle<float>::tbl1();
 | 
			
		||||
    svbool_t pg1 = acle<float>::pg1();
 | 
			
		||||
    typename acle<float>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
    typename acle<float>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
 | 
			
		||||
    typename acle<float>::vt r_v = svtbl(a_v, tbl_swap_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline vecd Permute2(vecd in) {
 | 
			
		||||
    vecd out;
 | 
			
		||||
    const vec<typename acle<double>::uint> tbl_swap = acle<double>::tbl_swap();
 | 
			
		||||
    svbool_t pg1 = acle<double>::pg1();
 | 
			
		||||
    typename acle<double>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
    typename acle<double>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
 | 
			
		||||
    typename acle<double>::vt r_v = svtbl(a_v, tbl_swap_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline vecf Permute2(vecf in) {
 | 
			
		||||
    vecf out;
 | 
			
		||||
    const vec<typename acle<float>::uint> tbl_swap = acle<float>::tbl2();
 | 
			
		||||
    svbool_t pg1 = acle<float>::pg1();
 | 
			
		||||
    typename acle<float>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
    typename acle<float>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
 | 
			
		||||
    typename acle<float>::vt r_v = svtbl(a_v, tbl_swap_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline vecf Permute3(vecf in) {
 | 
			
		||||
    vecf out;
 | 
			
		||||
    const vec<typename acle<float>::uint> tbl_swap = acle<float>::tbl_swap();
 | 
			
		||||
    svbool_t pg1 = acle<float>::pg1();
 | 
			
		||||
    typename acle<float>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
    typename acle<float>::svuint tbl_swap_v = svld1(pg1, tbl_swap.v);
 | 
			
		||||
    typename acle<float>::vt r_v = svtbl(a_v, tbl_swap_v);
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline vecd Permute3(vecd in) {
 | 
			
		||||
    return in;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Rotate{
 | 
			
		||||
 | 
			
		||||
  template <int n, typename T> static inline vec<T> tRotate(vec<T> in){
 | 
			
		||||
    vec<T> out;
 | 
			
		||||
    svbool_t pg1 = acle<T>::pg1();
 | 
			
		||||
    typename acle<T>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
    typename acle<T>::vt r_v = svext(a_v, a_v, (uint64_t)(n%W<T>::r));
 | 
			
		||||
    svst1(pg1, out.v, r_v);
 | 
			
		||||
 | 
			
		||||
    return out;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  static inline vec<T> rotate(vec<T> in, int n){
 | 
			
		||||
 | 
			
		||||
    switch(n){
 | 
			
		||||
    case 0:  return tRotate<0,  T>(in); break;
 | 
			
		||||
    case 1:  return tRotate<1,  T>(in); break;
 | 
			
		||||
    case 2:  return tRotate<2,  T>(in); break;
 | 
			
		||||
    case 3:  return tRotate<3,  T>(in); break;
 | 
			
		||||
    case 4:  return tRotate<4,  T>(in); break;
 | 
			
		||||
    case 5:  return tRotate<5,  T>(in); break;
 | 
			
		||||
    case 6:  return tRotate<6,  T>(in); break;
 | 
			
		||||
    case 7:  return tRotate<7,  T>(in); break;
 | 
			
		||||
 | 
			
		||||
    case 8:  return tRotate<8,  T>(in); break;
 | 
			
		||||
    case 9:  return tRotate<9,  T>(in); break;
 | 
			
		||||
    case 10: return tRotate<10, T>(in); break;
 | 
			
		||||
    case 11: return tRotate<11, T>(in); break;
 | 
			
		||||
    case 12: return tRotate<12, T>(in); break;
 | 
			
		||||
    case 13: return tRotate<13, T>(in); break;
 | 
			
		||||
    case 14: return tRotate<14, T>(in); break;
 | 
			
		||||
    case 15: return tRotate<15, T>(in); break;
 | 
			
		||||
    default: assert(0);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// tree-based reduction
 | 
			
		||||
#define svred(pg, v)\
 | 
			
		||||
svaddv(pg, v);
 | 
			
		||||
 | 
			
		||||
// left-to-right reduction
 | 
			
		||||
// #define svred(pg, v)\
 | 
			
		||||
// svadda(pg, 0, v)
 | 
			
		||||
 | 
			
		||||
template <typename Out_type, typename In_type>
 | 
			
		||||
struct Reduce{
 | 
			
		||||
  //Need templated class to overload output type
 | 
			
		||||
  //General form must generate error if compiled
 | 
			
		||||
  inline Out_type operator()(In_type in){
 | 
			
		||||
    printf("Error, using wrong Reduce function\n");
 | 
			
		||||
    exit(1);
 | 
			
		||||
    return 0;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
//Complex float Reduce
 | 
			
		||||
template <>
 | 
			
		||||
inline Grid::ComplexF Reduce<Grid::ComplexF, vecf>::operator()(vecf in){
 | 
			
		||||
  svbool_t pg1 = acle<float>::pg1();
 | 
			
		||||
  svbool_t pg_even = acle<float>::pg_even();
 | 
			
		||||
  svbool_t pg_odd  = acle<float>::pg_odd();
 | 
			
		||||
  typename acle<float>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
  float a = svred(pg_even, a_v);
 | 
			
		||||
  float b = svred(pg_odd, a_v);
 | 
			
		||||
 | 
			
		||||
  return Grid::ComplexF(a, b);
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//Real float Reduce
 | 
			
		||||
template <>
 | 
			
		||||
inline Grid::RealF Reduce<Grid::RealF, vecf>::operator()(vecf in){
 | 
			
		||||
  svbool_t pg1 = acle<float>::pg1();
 | 
			
		||||
  typename acle<float>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
  float a = svred(pg1, a_v);
 | 
			
		||||
 | 
			
		||||
  return a;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//Complex double Reduce
 | 
			
		||||
template <>
 | 
			
		||||
inline Grid::ComplexD Reduce<Grid::ComplexD, vecd>::operator()(vecd in){
 | 
			
		||||
  svbool_t pg1 = acle<double>::pg1();
 | 
			
		||||
  svbool_t pg_even = acle<double>::pg_even();
 | 
			
		||||
  svbool_t pg_odd  = acle<double>::pg_odd();
 | 
			
		||||
  typename acle<double>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
  double a = svred(pg_even, a_v);
 | 
			
		||||
  double b = svred(pg_odd, a_v);
 | 
			
		||||
 | 
			
		||||
  return Grid::ComplexD(a, b);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//Real double Reduce
 | 
			
		||||
template <>
 | 
			
		||||
inline Grid::RealD Reduce<Grid::RealD, vecd>::operator()(vecd in){
 | 
			
		||||
  svbool_t pg1 = acle<double>::pg1();
 | 
			
		||||
  typename acle<double>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
  double a = svred(pg1, a_v);
 | 
			
		||||
 | 
			
		||||
  return a;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//Integer Reduce
 | 
			
		||||
template <>
 | 
			
		||||
inline Integer Reduce<Integer, veci>::operator()(veci in){
 | 
			
		||||
  svbool_t pg1 = acle<Integer>::pg1();
 | 
			
		||||
  typename acle<Integer>::vt a_v = svld1(pg1, in.v);
 | 
			
		||||
  Integer a = svred(pg1, a_v);
 | 
			
		||||
 | 
			
		||||
  return a;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#undef svred
 | 
			
		||||
#undef vec_imm
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Optimization)
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Here assign types
 | 
			
		||||
 | 
			
		||||
typedef Optimization::vech SIMD_Htype; // Reduced precision type
 | 
			
		||||
typedef Optimization::vecf SIMD_Ftype; // Single precision type
 | 
			
		||||
typedef Optimization::vecd SIMD_Dtype; // Double precision type
 | 
			
		||||
typedef Optimization::veci SIMD_Itype; // Integer type
 | 
			
		||||
 | 
			
		||||
// prefetch utilities
 | 
			
		||||
inline void v_prefetch0(int size, const char *ptr){};
 | 
			
		||||
inline void prefetch_HINT_T0(const char *ptr){};
 | 
			
		||||
 | 
			
		||||
// Function name aliases
 | 
			
		||||
typedef Optimization::Vsplat   VsplatSIMD;
 | 
			
		||||
typedef Optimization::Vstore   VstoreSIMD;
 | 
			
		||||
typedef Optimization::Vset     VsetSIMD;
 | 
			
		||||
typedef Optimization::Vstream  VstreamSIMD;
 | 
			
		||||
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S,T>;
 | 
			
		||||
 | 
			
		||||
// Arithmetic operations
 | 
			
		||||
typedef Optimization::Sum            SumSIMD;
 | 
			
		||||
typedef Optimization::Sub            SubSIMD;
 | 
			
		||||
typedef Optimization::Div            DivSIMD;
 | 
			
		||||
typedef Optimization::Mult           MultSIMD;
 | 
			
		||||
typedef Optimization::MultComplex    MultComplexSIMD;
 | 
			
		||||
typedef Optimization::MultAddComplex MultAddComplexSIMD;
 | 
			
		||||
typedef Optimization::MultRealPart   MultRealPartSIMD;
 | 
			
		||||
typedef Optimization::MaddRealPart   MaddRealPartSIMD;
 | 
			
		||||
typedef Optimization::Conj           ConjSIMD;
 | 
			
		||||
typedef Optimization::TimesMinusI    TimesMinusISIMD;
 | 
			
		||||
typedef Optimization::TimesI         TimesISIMD;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid)
 | 
			
		||||
							
								
								
									
										769
									
								
								Grid/simd/Grid_a64fx-fixedsize.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										769
									
								
								Grid/simd/Grid_a64fx-fixedsize.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,769 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: Grid_a64fx-fixedsize.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
    Author: Nils Meyer         <nils.meyer@ur.de>           Regensburg University
 | 
			
		||||
 | 
			
		||||
    with support from Arm
 | 
			
		||||
            Richard Sandiford  <richard.sandiford@arm.com>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////
 | 
			
		||||
// Using SVE ACLE with fixed-size data types
 | 
			
		||||
/////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// gcc 10 features
 | 
			
		||||
#if __ARM_FEATURE_SVE_BITS==512
 | 
			
		||||
/* gcc 10.0.1 and gcc 10.1 bug using ACLE data types  CAS-159553-Y1K4C6
 | 
			
		||||
   workaround: use gcc's internal data types, bugfix expected for gcc 10.2
 | 
			
		||||
typedef svbool_t    pred __attribute__((arm_sve_vector_bits(512)));
 | 
			
		||||
typedef svfloat16_t vech __attribute__((arm_sve_vector_bits(512)));
 | 
			
		||||
typedef svfloat32_t vecf __attribute__((arm_sve_vector_bits(512)));
 | 
			
		||||
typedef svfloat64_t vecd __attribute__((arm_sve_vector_bits(512)));
 | 
			
		||||
typedef svuint32_t  veci __attribute__((arm_sve_vector_bits(512)));
 | 
			
		||||
typedef svuint32_t  lutf __attribute__((arm_sve_vector_bits(512))); // LUTs for float
 | 
			
		||||
typedef svuint64_t  lutd __attribute__((arm_sve_vector_bits(512))); // LUTs for double
 | 
			
		||||
*/
 | 
			
		||||
typedef __SVBool_t    pred __attribute__((arm_sve_vector_bits(512)));
 | 
			
		||||
typedef __SVFloat16_t vech __attribute__((arm_sve_vector_bits(512)));
 | 
			
		||||
typedef __SVFloat32_t vecf __attribute__((arm_sve_vector_bits(512)));
 | 
			
		||||
typedef __SVFloat64_t vecd __attribute__((arm_sve_vector_bits(512)));
 | 
			
		||||
typedef __SVUint32_t  veci __attribute__((arm_sve_vector_bits(512)));
 | 
			
		||||
typedef __SVUint32_t  lutf __attribute__((arm_sve_vector_bits(512))); // LUTs for float
 | 
			
		||||
typedef __SVUint64_t  lutd __attribute__((arm_sve_vector_bits(512))); // LUTs for double
 | 
			
		||||
#else
 | 
			
		||||
#pragma error("Oops. Illegal SVE vector size!?")
 | 
			
		||||
#endif /* __ARM_FEATURE_SVE_BITS */
 | 
			
		||||
 | 
			
		||||
// low-level API
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
NAMESPACE_BEGIN(Optimization);
 | 
			
		||||
 | 
			
		||||
// convenience union types for tables eliminating loads
 | 
			
		||||
union ulutf {
 | 
			
		||||
  lutf v;
 | 
			
		||||
  uint32_t s[16];
 | 
			
		||||
};
 | 
			
		||||
union ulutd {
 | 
			
		||||
  lutd v;
 | 
			
		||||
  uint64_t s[8];
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <typename T>
 | 
			
		||||
struct acle{};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct acle<double>{
 | 
			
		||||
  static inline lutd tbl_swap(){
 | 
			
		||||
    const ulutd t = { .s = {1, 0, 3, 2, 5, 4, 7, 6} };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline lutd tbl0(){
 | 
			
		||||
    const ulutd t = { .s = {4, 5, 6, 7, 0, 1, 2, 3} };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline lutd tbl1(){
 | 
			
		||||
    const ulutd t = { .s = {2, 3, 0, 1, 6, 7, 4, 5} };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline lutd tbl_exch1a(){ // Exchange1
 | 
			
		||||
    const ulutd t = { .s = {0, 1, 4, 5, 2, 3, 6, 7} };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline lutd tbl_exch1b(){ // Exchange1
 | 
			
		||||
    const ulutd t = { .s = {2, 3, 6, 7, 0, 1, 4, 5} };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline lutd tbl_exch1c(){ // Exchange1
 | 
			
		||||
    const ulutd t = { .s = {4, 5, 0, 1, 6, 7, 2, 3} };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline pred pg1(){return svptrue_b64();}
 | 
			
		||||
  static inline pred pg_even(){return svzip1_b64(svptrue_b64(), svpfalse_b());}
 | 
			
		||||
  static inline pred pg_odd() {return svzip1_b64(svpfalse_b(), svptrue_b64());}
 | 
			
		||||
  static inline vecd zero(){return svdup_f64(0.);}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct acle<float>{
 | 
			
		||||
  // exchange neighboring elements
 | 
			
		||||
  static inline lutf tbl_swap(){
 | 
			
		||||
    const ulutf t = { .s = {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14} };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline lutf tbl0(){
 | 
			
		||||
    const ulutf t = { .s = {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7} };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline lutf tbl1(){
 | 
			
		||||
    const ulutf t = { .s = {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline lutf tbl2(){
 | 
			
		||||
    const ulutf t = { .s = {2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13} };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline lutf tbl_exch1a(){ // Exchange1
 | 
			
		||||
    const ulutf t = { .s = {0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 } };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline lutf tbl_exch1b(){ // Exchange1
 | 
			
		||||
    const ulutf t = { .s = {4, 5, 6, 7, 12, 13, 14, 15, 0, 1, 2, 3, 8, 9, 10, 11 } };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline lutf tbl_exch1c(){ // Exchange1
 | 
			
		||||
    const ulutf t = { .s = {8, 9, 10, 11, 0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7} };
 | 
			
		||||
    return t.v;
 | 
			
		||||
  }
 | 
			
		||||
  static inline pred pg1(){return svptrue_b32();}
 | 
			
		||||
  static inline pred pg_even(){return svzip1_b32(svptrue_b32(), svpfalse_b());}
 | 
			
		||||
  static inline pred pg_odd() {return svzip1_b32(svpfalse_b(), svptrue_b32());}
 | 
			
		||||
  static inline vecf zero(){return svdup_f32(0.);}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct acle<uint16_t>{
 | 
			
		||||
  static inline pred pg1(){return svptrue_b16();}
 | 
			
		||||
  static inline pred pg_even(){return svzip1_b16(svptrue_b16(), svpfalse_b());}
 | 
			
		||||
  static inline pred pg_odd() {return svzip1_b16(svpfalse_b(), svptrue_b16());}
 | 
			
		||||
  static inline vech zero(){return svdup_f16(0.);}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct acle<Integer>{
 | 
			
		||||
  //static inline svbool_t pg1(){return svptrue_b16();}
 | 
			
		||||
  static inline pred pg1(){return svptrue_b32();}
 | 
			
		||||
  static inline pred pg_even(){return svzip1_b32(svptrue_b32(), svpfalse_b());}
 | 
			
		||||
  static inline pred pg_odd() {return svzip1_b32(svpfalse_b(), svptrue_b32());}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// ---------------------------------------------------
 | 
			
		||||
 | 
			
		||||
struct Vsplat{
 | 
			
		||||
  // Complex float
 | 
			
		||||
  inline vecf operator()(float a, float b){
 | 
			
		||||
    vecf a_v = svdup_f32(a);
 | 
			
		||||
    vecf b_v = svdup_f32(b);
 | 
			
		||||
    return svzip1(a_v, b_v);
 | 
			
		||||
  }
 | 
			
		||||
  // Real float
 | 
			
		||||
  inline vecf operator()(float a){
 | 
			
		||||
    return svdup_f32(a);
 | 
			
		||||
  }
 | 
			
		||||
  // Complex double
 | 
			
		||||
  inline vecd operator()(double a, double b){
 | 
			
		||||
    vecd a_v = svdup_f64(a);
 | 
			
		||||
    vecd b_v = svdup_f64(b);
 | 
			
		||||
    return svzip1(a_v, b_v);
 | 
			
		||||
  }
 | 
			
		||||
  // Real double
 | 
			
		||||
  inline vecd operator()(double a){
 | 
			
		||||
    return svdup_f64(a);
 | 
			
		||||
  }
 | 
			
		||||
  // Integer
 | 
			
		||||
  inline veci operator()(Integer a){
 | 
			
		||||
    return svdup_u32(a);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Vstore{
 | 
			
		||||
  // Real float
 | 
			
		||||
  inline void operator()(vecf a, float *D){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    svst1(pg1, D, a);
 | 
			
		||||
  }
 | 
			
		||||
  // Real double
 | 
			
		||||
  inline void operator()(vecd a, double *D){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    svst1(pg1, D, a);
 | 
			
		||||
  }
 | 
			
		||||
  // Real float
 | 
			
		||||
  inline void operator()(veci a, Integer *D){
 | 
			
		||||
    pred pg1 = acle<Integer>::pg1();
 | 
			
		||||
    svst1(pg1, D, a);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Vstream{
 | 
			
		||||
  // Real float
 | 
			
		||||
  inline void operator()(float * a, vecf b){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    svstnt1(pg1, a, b);
 | 
			
		||||
    //svst1(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
  // Real double
 | 
			
		||||
  inline void operator()(double * a, vecd b){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    svstnt1(pg1, a, b);
 | 
			
		||||
    //svst1(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Vset{
 | 
			
		||||
  // Complex float
 | 
			
		||||
  inline vecf operator()(Grid::ComplexF *a){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    return svld1(pg1, (float*)a);
 | 
			
		||||
  }
 | 
			
		||||
  // Complex double
 | 
			
		||||
  inline vecd operator()(Grid::ComplexD *a){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    return svld1(pg1, (double*)a);
 | 
			
		||||
  }
 | 
			
		||||
  // Real float
 | 
			
		||||
  inline vecf operator()(float *a){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    return svld1(pg1, a);
 | 
			
		||||
  }
 | 
			
		||||
  // Real double
 | 
			
		||||
  inline vecd operator()(double *a){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    return svld1(pg1, a);
 | 
			
		||||
  }
 | 
			
		||||
  // Integer
 | 
			
		||||
  inline veci operator()(Integer *a){
 | 
			
		||||
    pred pg1 = acle<Integer>::pg1();
 | 
			
		||||
    return svld1(pg1, a);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////
 | 
			
		||||
// Arithmetic operations
 | 
			
		||||
/////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
struct Sum{
 | 
			
		||||
  // Complex/real float
 | 
			
		||||
  inline vecf operator()(vecf a, vecf b){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    return svadd_x(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
  // Complex/real double
 | 
			
		||||
  inline vecd operator()(vecd a, vecd b){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    return svadd_x(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
  // Integer
 | 
			
		||||
  inline veci operator()(veci a, veci b){
 | 
			
		||||
    pred pg1 = acle<Integer>::pg1();
 | 
			
		||||
    return svadd_x(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Sub{
 | 
			
		||||
  // Complex/real float
 | 
			
		||||
  inline vecf operator()(vecf a, vecf b){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    return svsub_x(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
  // Complex/real double
 | 
			
		||||
  inline vecd operator()(vecd a, vecd b){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    return svsub_x(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
  // Integer
 | 
			
		||||
  inline veci operator()(veci a, veci b){
 | 
			
		||||
    pred pg1 = acle<Integer>::pg1();
 | 
			
		||||
    return svsub_x(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Mult{
 | 
			
		||||
  // Real float fma
 | 
			
		||||
  inline vecf operator()(vecf a, vecf b, vecf c){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    return svmad_x(pg1, b, c, a);
 | 
			
		||||
  }
 | 
			
		||||
  // Real double fma
 | 
			
		||||
  inline vecd operator()(vecd a, vecd b, vecd c){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    return svmad_x(pg1, b, c, a);
 | 
			
		||||
  }
 | 
			
		||||
  // Real float
 | 
			
		||||
  inline vecf operator()(vecf a, vecf b){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    return svmul_x(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
  // Real double
 | 
			
		||||
  inline vecd operator()(vecd a, vecd b){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    return svmul_x(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
  // Integer
 | 
			
		||||
  inline veci operator()(veci a, veci b){
 | 
			
		||||
    pred pg1 = acle<Integer>::pg1();
 | 
			
		||||
    return svmul_x(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct MultRealPart{
 | 
			
		||||
  // Complex float
 | 
			
		||||
  inline vecf operator()(vecf a, vecf b){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    vecf z_v = acle<float>::zero();
 | 
			
		||||
    return svcmla_x(pg1, z_v, a, b, 0);
 | 
			
		||||
  }
 | 
			
		||||
  // Complex double
 | 
			
		||||
  inline vecd operator()(vecd a, vecd b){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    vecd z_v = acle<double>::zero();
 | 
			
		||||
    return svcmla_x(pg1, z_v, a, b, 0);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct MaddRealPart{
 | 
			
		||||
  // Complex float
 | 
			
		||||
  inline vecf operator()(vecf a, vecf b, vecf c){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    return svcmla_x(pg1, c, a, b, 0);
 | 
			
		||||
  }
 | 
			
		||||
  // Complex double
 | 
			
		||||
  inline vecd operator()(vecd a, vecd b, vecd c){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    return svcmla_x(pg1, c, a, b, 0);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct MultComplex{
 | 
			
		||||
  // Complex a*b
 | 
			
		||||
  // Complex float
 | 
			
		||||
  inline vecf operator()(vecf a, vecf b){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    vecf z = acle<float>::zero();
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    vecf r_v = svcmla_x(pg1, z, a, b, 0);
 | 
			
		||||
    return svcmla_x(pg1, r_v, a, b, 90);
 | 
			
		||||
  }
 | 
			
		||||
  // Complex double
 | 
			
		||||
  inline vecd operator()(vecd a, vecd b){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    vecd z = acle<double>::zero();
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    vecd r_v = svcmla_x(pg1, z, a, b, 0);
 | 
			
		||||
    return svcmla_x(pg1, r_v, a, b, 90);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct MultAddComplex{
 | 
			
		||||
  // Complex a*b+c
 | 
			
		||||
  // Complex float
 | 
			
		||||
  inline vecf operator()(vecf a, vecf b, vecf c){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    vecf r_v = svcmla_x(pg1, c, a, b, 0);
 | 
			
		||||
    return svcmla_x(pg1, r_v, a, b, 90);
 | 
			
		||||
  }
 | 
			
		||||
  // Complex double
 | 
			
		||||
  inline vecd operator()(vecd a, vecd b, vecd c){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    // using FCMLA
 | 
			
		||||
    vecd r_v = svcmla_x(pg1, c, a, b, 0);
 | 
			
		||||
    return svcmla_x(pg1, r_v, a, b, 90);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Div{
 | 
			
		||||
  // Real float
 | 
			
		||||
  inline vecf operator()(vecf a, vecf b){
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    return svdiv_x(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
  // Real double
 | 
			
		||||
  inline vecd operator()(vecd a, vecd b){
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    return svdiv_x(pg1, a, b);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Conj{
 | 
			
		||||
  // Complex float
 | 
			
		||||
  inline vecf operator()(vecf a){
 | 
			
		||||
    pred pg_odd = acle<float>::pg_odd();
 | 
			
		||||
    //return svneg_x(pg_odd, a);  this is unsafe
 | 
			
		||||
    return svneg_m(a, pg_odd, a);
 | 
			
		||||
  }
 | 
			
		||||
  // Complex double
 | 
			
		||||
  inline vecd operator()(vecd a){
 | 
			
		||||
    pred pg_odd = acle<double>::pg_odd();
 | 
			
		||||
    //return svneg_x(pg_odd, a);  this is unsafe
 | 
			
		||||
    return svneg_m(a, pg_odd, a);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct TimesMinusI{
 | 
			
		||||
  // Complex float
 | 
			
		||||
  inline vecf operator()(vecf a, vecf b){
 | 
			
		||||
    lutf tbl_swap = acle<float>::tbl_swap();
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    pred pg_odd = acle<float>::pg_odd();
 | 
			
		||||
 | 
			
		||||
    vecf a_v = svtbl(a, tbl_swap);
 | 
			
		||||
    //return svneg_x(pg_odd, a_v);  this is unsafe
 | 
			
		||||
    return svneg_m(a_v, pg_odd, a_v);
 | 
			
		||||
  }
 | 
			
		||||
  // Complex double
 | 
			
		||||
  inline vecd operator()(vecd a, vecd b){
 | 
			
		||||
    lutd tbl_swap = acle<double>::tbl_swap();
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    pred pg_odd = acle<double>::pg_odd();
 | 
			
		||||
 | 
			
		||||
    vecd a_v = svtbl(a, tbl_swap);
 | 
			
		||||
    //return svneg_x(pg_odd, a_v);  this is unsafe
 | 
			
		||||
    return svneg_m(a_v, pg_odd, a_v);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct TimesI{
 | 
			
		||||
  // Complex float
 | 
			
		||||
  inline vecf operator()(vecf a, vecf b){
 | 
			
		||||
    lutf tbl_swap = acle<float>::tbl_swap();
 | 
			
		||||
    pred pg1 = acle<float>::pg1();
 | 
			
		||||
    pred pg_even = acle<float>::pg_even();
 | 
			
		||||
 | 
			
		||||
    vecf a_v = svtbl(a, tbl_swap);
 | 
			
		||||
    //return svneg_x(pg_even, a_v);  this is unsafe
 | 
			
		||||
    return svneg_m(a_v, pg_even, a_v);
 | 
			
		||||
  }
 | 
			
		||||
  // Complex double
 | 
			
		||||
  inline vecd operator()(vecd a, vecd b){
 | 
			
		||||
    lutd tbl_swap = acle<double>::tbl_swap();
 | 
			
		||||
    pred pg1 = acle<double>::pg1();
 | 
			
		||||
    pred pg_even = acle<double>::pg_even();
 | 
			
		||||
 | 
			
		||||
    vecd a_v = svtbl(a, tbl_swap);
 | 
			
		||||
    //return svneg_x(pg_even, a_v);  this is unsafe
 | 
			
		||||
    return svneg_m(a_v, pg_even, a_v);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct PrecisionChange {
 | 
			
		||||
  static inline vech StoH (vecf sa, vecf sb) {
 | 
			
		||||
    pred pg1s = acle<float>::pg1();
 | 
			
		||||
    vech ha_v = svcvt_f16_x(pg1s, sa);
 | 
			
		||||
    vech hb_v = svcvt_f16_x(pg1s, sb);
 | 
			
		||||
    return svuzp1(ha_v, hb_v);
 | 
			
		||||
  }
 | 
			
		||||
  static inline void HtoS(vech h,vecf &sa,vecf &sb) {
 | 
			
		||||
    pred pg1s = acle<float>::pg1();
 | 
			
		||||
    vech ha_v = svzip1(h, h);
 | 
			
		||||
    vech hb_v = svzip2(h, h);
 | 
			
		||||
    sa = svcvt_f32_x(pg1s, ha_v);
 | 
			
		||||
    sb = svcvt_f32_x(pg1s, hb_v);
 | 
			
		||||
  }
 | 
			
		||||
  static inline vecf DtoS (vecd a,vecd b) {
 | 
			
		||||
    pred pg1d = acle<double>::pg1();
 | 
			
		||||
    vecf sa_v = svcvt_f32_x(pg1d, a);
 | 
			
		||||
    vecf sb_v = svcvt_f32_x(pg1d, b);
 | 
			
		||||
    return svuzp1(sa_v, sb_v);
 | 
			
		||||
  }
 | 
			
		||||
  static inline void StoD (vecf s,vecd &a,vecd &b) {
 | 
			
		||||
    pred pg1d = acle<double>::pg1();
 | 
			
		||||
    vecf sa_v = svzip1(s, s);
 | 
			
		||||
    vecf sb_v = svzip2(s, s);
 | 
			
		||||
    a = svcvt_f64_x(pg1d, sa_v);
 | 
			
		||||
    b = svcvt_f64_x(pg1d, sb_v);
 | 
			
		||||
  }
 | 
			
		||||
  static inline vech DtoH (vecd a,vecd b,vecd c,vecd d) {
 | 
			
		||||
    pred pg1d = acle<double>::pg1();
 | 
			
		||||
    pred pg1h = acle<uint16_t>::pg1();
 | 
			
		||||
    vech ha_v = svcvt_f16_x(pg1d, a);
 | 
			
		||||
    vech hb_v = svcvt_f16_x(pg1d, b);
 | 
			
		||||
    vech hc_v = svcvt_f16_x(pg1d, c);
 | 
			
		||||
    vech hd_v = svcvt_f16_x(pg1d, d);
 | 
			
		||||
    vech hab_v = svuzp1(ha_v, hb_v);
 | 
			
		||||
    vech hcd_v = svuzp1(hc_v, hd_v);
 | 
			
		||||
    return svuzp1(hab_v, hcd_v);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
    vecf sa,sb;
 | 
			
		||||
    sa = DtoS(a,b);
 | 
			
		||||
    sb = DtoS(c,d);
 | 
			
		||||
    return StoH(sa,sb);
 | 
			
		||||
*/
 | 
			
		||||
  }
 | 
			
		||||
  static inline void HtoD(vech h,vecd &a,vecd &b,vecd &c,vecd &d) {
 | 
			
		||||
    pred pg1h = acle<uint16_t>::pg1();
 | 
			
		||||
    pred pg1d = acle<double>::pg1();
 | 
			
		||||
    vech sa_v = svzip1(h, h);
 | 
			
		||||
    vech sb_v = svzip2(h, h);
 | 
			
		||||
    vech da_v = svzip1(sa_v, sa_v);
 | 
			
		||||
    vech db_v = svzip2(sa_v, sa_v);
 | 
			
		||||
    vech dc_v = svzip1(sb_v, sb_v);
 | 
			
		||||
    vech dd_v = svzip2(sb_v, sb_v);
 | 
			
		||||
    a = svcvt_f64_x(pg1d, da_v);
 | 
			
		||||
    b = svcvt_f64_x(pg1d, db_v);
 | 
			
		||||
    c = svcvt_f64_x(pg1d, dc_v);
 | 
			
		||||
    d = svcvt_f64_x(pg1d, dd_v);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
    vecf sa,sb;
 | 
			
		||||
    HtoS(h,sa,sb);
 | 
			
		||||
    StoD(sa,a,b);
 | 
			
		||||
    StoD(sb,c,d);
 | 
			
		||||
*/
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Exchange{
 | 
			
		||||
  // float
 | 
			
		||||
  static inline void Exchange0(vecf &out1, vecf &out2, vecf in1, vecf in2){
 | 
			
		||||
    vecf r1_v = svext(in1, in1, (uint64_t)8u);
 | 
			
		||||
    vecf r2_v = svext(in2, in2, (uint64_t)8u);
 | 
			
		||||
    out1 = svext(r1_v, in2, (uint64_t)8u);
 | 
			
		||||
    out2 = svext(in1, r2_v, (uint64_t)8u);
 | 
			
		||||
  }
 | 
			
		||||
  static inline void Exchange1(vecf &out1, vecf &out2, vecf in1, vecf in2){
 | 
			
		||||
    // this one is tricky; svtrn2q* from SVE2 fits best, but it is not available in SVE1
 | 
			
		||||
    // alternative: use 4-el structure; expect translation into 4x ldp + 4x stp -> SFI
 | 
			
		||||
    lutf tbl_exch1a = acle<float>::tbl_exch1a();
 | 
			
		||||
    lutf tbl_exch1b = acle<float>::tbl_exch1b();
 | 
			
		||||
    lutf tbl_exch1c = acle<float>::tbl_exch1c();
 | 
			
		||||
 | 
			
		||||
    vecf a1_v = svtbl(in1, tbl_exch1a);
 | 
			
		||||
    vecf a2_v = svtbl(in2, tbl_exch1b);
 | 
			
		||||
    vecf b1_v  = svext(a2_v, a1_v, (uint64_t)8u);
 | 
			
		||||
    vecf b2_v  = svext(a1_v, a2_v, (uint64_t)8u);
 | 
			
		||||
    out1 = svtbl(b1_v, tbl_exch1c);
 | 
			
		||||
    out2 = svtbl(b2_v, tbl_exch1a);
 | 
			
		||||
  }
 | 
			
		||||
  static inline void Exchange2(vecf &out1, vecf &out2, vecf in1, vecf in2){
 | 
			
		||||
    out1 = (vecf)svtrn1((vecd)in1, (vecd)in2);
 | 
			
		||||
    out2 = (vecf)svtrn2((vecd)in1, (vecd)in2);
 | 
			
		||||
  }
 | 
			
		||||
  static inline void Exchange3(vecf &out1, vecf &out2, vecf in1, vecf in2){
 | 
			
		||||
    out1 = svtrn1(in1, in2);
 | 
			
		||||
    out2 = svtrn2(in1, in2);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // double
 | 
			
		||||
  static inline void Exchange0(vecd &out1, vecd &out2, vecd in1, vecd in2){
 | 
			
		||||
    vecd r1_v = svext(in1, in1, (uint64_t)4u);
 | 
			
		||||
    vecd r2_v = svext(in2, in2, (uint64_t)4u);
 | 
			
		||||
    out1 = svext(r1_v, in2, (uint64_t)4u);
 | 
			
		||||
    out2 = svext(in1, r2_v, (uint64_t)4u);
 | 
			
		||||
  }
 | 
			
		||||
  static inline void Exchange1(vecd &out1, vecd &out2, vecd in1, vecd in2){
 | 
			
		||||
    // this one is tricky; svtrn2q* from SVE2 fits best, but it is not available in SVE1
 | 
			
		||||
    // alternative: use 4-el structure; expect translation into 4x ldp + 4x stp -> SFI
 | 
			
		||||
    lutd tbl_exch1a = acle<double>::tbl_exch1a();
 | 
			
		||||
    lutd tbl_exch1b = acle<double>::tbl_exch1b();
 | 
			
		||||
    lutd tbl_exch1c = acle<double>::tbl_exch1c();
 | 
			
		||||
 | 
			
		||||
    vecd a1_v = svtbl(in1, tbl_exch1a);
 | 
			
		||||
    vecd a2_v = svtbl(in2, tbl_exch1b);
 | 
			
		||||
    vecd b1_v = svext(a2_v, a1_v, (uint64_t)4u);
 | 
			
		||||
    vecd b2_v = svext(a1_v, a2_v, (uint64_t)4u);
 | 
			
		||||
    out1 = svtbl(b1_v, tbl_exch1c);
 | 
			
		||||
    out2 = svtbl(b2_v, tbl_exch1a);
 | 
			
		||||
  }
 | 
			
		||||
  static inline void Exchange2(vecd &out1, vecd &out2, vecd in1, vecd in2){
 | 
			
		||||
    out1 = svtrn1(in1, in2);
 | 
			
		||||
    out2 = svtrn2(in1, in2);
 | 
			
		||||
  }
 | 
			
		||||
  static inline void Exchange3(vecd &out1, vecd &out2, vecd in1, vecd in2){
 | 
			
		||||
    assert(0);
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#undef VECTOR_FOR
 | 
			
		||||
 | 
			
		||||
struct Permute{
 | 
			
		||||
  // float
 | 
			
		||||
  static inline vecf Permute0(vecf in) {
 | 
			
		||||
    return svext(in, in, (uint64_t)8u);
 | 
			
		||||
  }
 | 
			
		||||
  static inline vecf Permute1(vecf in) {
 | 
			
		||||
    lutf tbl_swap = acle<float>::tbl1();
 | 
			
		||||
    return svtbl(in, tbl_swap);
 | 
			
		||||
  }
 | 
			
		||||
  static inline vecf Permute2(vecf in) {
 | 
			
		||||
    lutf tbl_swap = acle<float>::tbl2();
 | 
			
		||||
    return svtbl(in, tbl_swap);
 | 
			
		||||
  }
 | 
			
		||||
  static inline vecf Permute3(vecf in) {
 | 
			
		||||
    lutf tbl_swap = acle<float>::tbl_swap();
 | 
			
		||||
    return svtbl(in, tbl_swap);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // double
 | 
			
		||||
  static inline vecd Permute0(vecd in) {
 | 
			
		||||
    return svext(in, in, (uint64_t)(8u / 2u));
 | 
			
		||||
  }
 | 
			
		||||
  static inline vecd Permute1(vecd in) {
 | 
			
		||||
    lutd tbl_swap = acle<double>::tbl1();
 | 
			
		||||
    return svtbl(in, tbl_swap);
 | 
			
		||||
  }
 | 
			
		||||
  static inline vecd Permute2(vecd in) {
 | 
			
		||||
    lutd tbl_swap = acle<double>::tbl_swap();
 | 
			
		||||
    return svtbl(in, tbl_swap);
 | 
			
		||||
  }
 | 
			
		||||
  static inline vecd Permute3(vecd in) {
 | 
			
		||||
    return in;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct Rotate{
 | 
			
		||||
 | 
			
		||||
  static inline vecf rotate(vecf in, int n){
 | 
			
		||||
    switch(n){
 | 
			
		||||
    case 0:  return tRotate<0>(in); break;
 | 
			
		||||
    case 1:  return tRotate<1>(in); break;
 | 
			
		||||
    case 2:  return tRotate<2>(in); break;
 | 
			
		||||
    case 3:  return tRotate<3>(in); break;
 | 
			
		||||
    case 4:  return tRotate<4>(in); break;
 | 
			
		||||
    case 5:  return tRotate<5>(in); break;
 | 
			
		||||
    case 6:  return tRotate<6>(in); break;
 | 
			
		||||
    case 7:  return tRotate<7>(in); break;
 | 
			
		||||
 | 
			
		||||
    case 8:  return tRotate<8>(in); break;
 | 
			
		||||
    case 9:  return tRotate<9>(in); break;
 | 
			
		||||
    case 10: return tRotate<10>(in); break;
 | 
			
		||||
    case 11: return tRotate<11>(in); break;
 | 
			
		||||
    case 12: return tRotate<12>(in); break;
 | 
			
		||||
    case 13: return tRotate<13>(in); break;
 | 
			
		||||
    case 14: return tRotate<14>(in); break;
 | 
			
		||||
    case 15: return tRotate<15>(in); break;
 | 
			
		||||
    default: assert(0);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  static inline vecd rotate(vecd in, int n){
 | 
			
		||||
    switch(n){
 | 
			
		||||
    case 0:  return tRotate<0>(in); break;
 | 
			
		||||
    case 1:  return tRotate<1>(in); break;
 | 
			
		||||
    case 2:  return tRotate<2>(in); break;
 | 
			
		||||
    case 3:  return tRotate<3>(in); break;
 | 
			
		||||
    case 4:  return tRotate<4>(in); break;
 | 
			
		||||
    case 5:  return tRotate<5>(in); break;
 | 
			
		||||
    case 6:  return tRotate<6>(in); break;
 | 
			
		||||
    case 7:  return tRotate<7>(in); break;
 | 
			
		||||
    default: assert(0);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <int n> static inline vecf tRotate(vecf in){
 | 
			
		||||
    return svext(in, in, (uint64_t)n);
 | 
			
		||||
  }
 | 
			
		||||
  template <int n> static inline vecd tRotate(vecd in){
 | 
			
		||||
    return svext(in, in, (uint64_t)n);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// tree-based reduction
 | 
			
		||||
#define svred(pg, v)\
 | 
			
		||||
svaddv(pg, v);
 | 
			
		||||
 | 
			
		||||
// left-to-right reduction
 | 
			
		||||
// #define svred(pg, v)\
 | 
			
		||||
// svadda(pg, 0, v)
 | 
			
		||||
 | 
			
		||||
template <typename Out_type, typename In_type>
 | 
			
		||||
struct Reduce{
 | 
			
		||||
  //Need templated class to overload output type
 | 
			
		||||
  //General form must generate error if compiled
 | 
			
		||||
  inline Out_type operator()(In_type in){
 | 
			
		||||
    printf("Error, using wrong Reduce function\n");
 | 
			
		||||
    //exit(1);
 | 
			
		||||
    return 0;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
//Complex float Reduce
 | 
			
		||||
template <>
 | 
			
		||||
inline Grid::ComplexF Reduce<Grid::ComplexF, vecf>::operator()(vecf in){
 | 
			
		||||
  pred pg_even = acle<float>::pg_even();
 | 
			
		||||
  pred pg_odd  = acle<float>::pg_odd();
 | 
			
		||||
  float a = svred(pg_even, in);
 | 
			
		||||
  float b = svred(pg_odd, in);
 | 
			
		||||
  return Grid::ComplexF(a, b);
 | 
			
		||||
}
 | 
			
		||||
//Real float Reduce
 | 
			
		||||
template <>
 | 
			
		||||
inline Grid::RealF Reduce<Grid::RealF, vecf>::operator()(vecf in){
 | 
			
		||||
  pred pg1 = acle<float>::pg1();
 | 
			
		||||
  return svred(pg1, in);
 | 
			
		||||
}
 | 
			
		||||
//Complex double Reduce
 | 
			
		||||
template <>
 | 
			
		||||
inline Grid::ComplexD Reduce<Grid::ComplexD, vecd>::operator()(vecd in){
 | 
			
		||||
  pred pg_even = acle<double>::pg_even();
 | 
			
		||||
  pred pg_odd  = acle<double>::pg_odd();
 | 
			
		||||
  double a = svred(pg_even, in);
 | 
			
		||||
  double b = svred(pg_odd, in);
 | 
			
		||||
  return Grid::ComplexD(a, b);
 | 
			
		||||
}
 | 
			
		||||
//Real double Reduce
 | 
			
		||||
template <>
 | 
			
		||||
inline Grid::RealD Reduce<Grid::RealD, vecd>::operator()(vecd in){
 | 
			
		||||
  pred pg1 = acle<double>::pg1();
 | 
			
		||||
  return svred(pg1, in);
 | 
			
		||||
}
 | 
			
		||||
//Integer Reduce
 | 
			
		||||
template <>
 | 
			
		||||
inline Integer Reduce<Integer, veci>::operator()(veci in){
 | 
			
		||||
  pred pg1 = acle<Integer>::pg1();
 | 
			
		||||
  return svred(pg1, in);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#undef svred
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Optimization);
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Here assign types
 | 
			
		||||
 | 
			
		||||
typedef vech SIMD_Htype; // Reduced precision type
 | 
			
		||||
typedef vecf SIMD_Ftype; // Single precision type
 | 
			
		||||
typedef vecd SIMD_Dtype; // Double precision type
 | 
			
		||||
typedef veci SIMD_Itype; // Integer type
 | 
			
		||||
 | 
			
		||||
// prefetch utilities
 | 
			
		||||
inline void v_prefetch0(int size, const char *ptr){};
 | 
			
		||||
inline void prefetch_HINT_T0(const char *ptr){};
 | 
			
		||||
 | 
			
		||||
// Function name aliases
 | 
			
		||||
typedef Optimization::Vsplat   VsplatSIMD;
 | 
			
		||||
typedef Optimization::Vstore   VstoreSIMD;
 | 
			
		||||
typedef Optimization::Vset     VsetSIMD;
 | 
			
		||||
typedef Optimization::Vstream  VstreamSIMD;
 | 
			
		||||
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S,T>;
 | 
			
		||||
 | 
			
		||||
// Arithmetic operations
 | 
			
		||||
typedef Optimization::Sum            SumSIMD;
 | 
			
		||||
typedef Optimization::Sub            SubSIMD;
 | 
			
		||||
typedef Optimization::Div            DivSIMD;
 | 
			
		||||
typedef Optimization::Mult           MultSIMD;
 | 
			
		||||
typedef Optimization::MultComplex    MultComplexSIMD;
 | 
			
		||||
typedef Optimization::MultAddComplex MultAddComplexSIMD;
 | 
			
		||||
typedef Optimization::MultRealPart   MultRealPartSIMD;
 | 
			
		||||
typedef Optimization::MaddRealPart   MaddRealPartSIMD;
 | 
			
		||||
typedef Optimization::Conj           ConjSIMD;
 | 
			
		||||
typedef Optimization::TimesMinusI    TimesMinusISIMD;
 | 
			
		||||
typedef Optimization::TimesI         TimesISIMD;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -41,6 +41,11 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
 | 
			
		||||
typedef struct { uint16_t x;} half;
 | 
			
		||||
#endif
 | 
			
		||||
typedef struct Half2_t { half x; half y; } Half2;
 | 
			
		||||
 | 
			
		||||
#define COALESCE_GRANULARITY ( GEN_SIMD_WIDTH )
 | 
			
		||||
 | 
			
		||||
template<class pair>
 | 
			
		||||
@@ -125,14 +130,14 @@ inline accelerator GpuVector<N,datum> operator/(const GpuVector<N,datum> l,const
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
constexpr int NSIMD_RealH    = COALESCE_GRANULARITY / sizeof(half);
 | 
			
		||||
constexpr int NSIMD_ComplexH = COALESCE_GRANULARITY / sizeof(half2);
 | 
			
		||||
constexpr int NSIMD_ComplexH = COALESCE_GRANULARITY / sizeof(Half2);
 | 
			
		||||
constexpr int NSIMD_RealF    = COALESCE_GRANULARITY / sizeof(float);
 | 
			
		||||
constexpr int NSIMD_ComplexF = COALESCE_GRANULARITY / sizeof(float2);
 | 
			
		||||
constexpr int NSIMD_RealD    = COALESCE_GRANULARITY / sizeof(double);
 | 
			
		||||
constexpr int NSIMD_ComplexD = COALESCE_GRANULARITY / sizeof(double2);
 | 
			
		||||
constexpr int NSIMD_Integer  = COALESCE_GRANULARITY / sizeof(Integer);
 | 
			
		||||
 | 
			
		||||
typedef GpuComplex<half2  > GpuComplexH;
 | 
			
		||||
typedef GpuComplex<Half2  > GpuComplexH;
 | 
			
		||||
typedef GpuComplex<float2 > GpuComplexF;
 | 
			
		||||
typedef GpuComplex<double2> GpuComplexD;
 | 
			
		||||
 | 
			
		||||
@@ -147,11 +152,9 @@ typedef GpuVector<NSIMD_Integer,  Integer     > GpuVectorI;
 | 
			
		||||
accelerator_inline float half2float(half h)
 | 
			
		||||
{
 | 
			
		||||
  float f;
 | 
			
		||||
#ifdef GRID_SIMT
 | 
			
		||||
#if defined(GRID_CUDA) || defined(GRID_HIP)
 | 
			
		||||
  f = __half2float(h);
 | 
			
		||||
#else 
 | 
			
		||||
  //f = __half2float(h);
 | 
			
		||||
  __half_raw hr(h);
 | 
			
		||||
  Grid_half hh; 
 | 
			
		||||
  hh.x = hr.x;
 | 
			
		||||
  f=  sfw_half_to_float(hh);
 | 
			
		||||
@@ -161,13 +164,11 @@ accelerator_inline float half2float(half h)
 | 
			
		||||
accelerator_inline half float2half(float f)
 | 
			
		||||
{
 | 
			
		||||
  half h;
 | 
			
		||||
#ifdef GRID_SIMT
 | 
			
		||||
#if defined(GRID_CUDA) || defined(GRID_HIP)
 | 
			
		||||
  h = __float2half(f);
 | 
			
		||||
#else
 | 
			
		||||
  Grid_half hh = sfw_float_to_half(f);
 | 
			
		||||
  __half_raw hr;  
 | 
			
		||||
  hr.x = hh.x;
 | 
			
		||||
  h = __half(hr);
 | 
			
		||||
  h.x = hh.x;
 | 
			
		||||
#endif
 | 
			
		||||
  return h;
 | 
			
		||||
}
 | 
			
		||||
@@ -523,7 +524,7 @@ namespace Optimization {
 | 
			
		||||
    ////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Single / Half
 | 
			
		||||
    ////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    static accelerator_inline GpuVectorCH StoH (GpuVectorCF a,GpuVectorCF b) {
 | 
			
		||||
     static accelerator_inline GpuVectorCH StoH (GpuVectorCF a,GpuVectorCF b) {
 | 
			
		||||
      int N = GpuVectorCF::N;
 | 
			
		||||
      GpuVectorCH h;
 | 
			
		||||
      for(int i=0;i<N;i++) {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,6 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/simd/Grid_vector_types.h
 | 
			
		||||
 | 
			
		||||
@@ -73,7 +73,7 @@ accelerator_inline Grid_half sfw_float_to_half(float ff) {
 | 
			
		||||
  const FP32 denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 };
 | 
			
		||||
  unsigned int sign_mask = 0x80000000u;
 | 
			
		||||
  Grid_half o;
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
  o.x = static_cast<unsigned short>(0x0u);
 | 
			
		||||
  unsigned int sign = f.u & sign_mask;
 | 
			
		||||
  f.u ^= sign;
 | 
			
		||||
@@ -93,7 +93,7 @@ accelerator_inline Grid_half sfw_float_to_half(float ff) {
 | 
			
		||||
      o.x = static_cast<unsigned short>(f.u - denorm_magic.u);
 | 
			
		||||
    } else {
 | 
			
		||||
      unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
      // update exponent, rounding bias part 1
 | 
			
		||||
      f.u += ((unsigned int)(15 - 127) << 23) + 0xfff;
 | 
			
		||||
      // rounding bias part 2
 | 
			
		||||
@@ -101,7 +101,7 @@ accelerator_inline Grid_half sfw_float_to_half(float ff) {
 | 
			
		||||
      // take the bits!
 | 
			
		||||
      o.x = static_cast<unsigned short>(f.u >> 13);
 | 
			
		||||
    }
 | 
			
		||||
  } 
 | 
			
		||||
  }
 | 
			
		||||
  o.x |= static_cast<unsigned short>(sign >> 16);
 | 
			
		||||
  return o;
 | 
			
		||||
}
 | 
			
		||||
@@ -110,9 +110,63 @@ accelerator_inline Grid_half sfw_float_to_half(float ff) {
 | 
			
		||||
#ifdef GPU_VEC
 | 
			
		||||
#include "Grid_gpu_vec.h"
 | 
			
		||||
#endif
 | 
			
		||||
/*
 | 
			
		||||
#ifdef GEN
 | 
			
		||||
#include "Grid_generic.h"
 | 
			
		||||
#endif
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#ifdef GEN
 | 
			
		||||
  #if defined(A64FX) || defined(A64FXFIXEDSIZE) // breakout A64FX SVE ACLE here
 | 
			
		||||
    #include <arm_sve.h>
 | 
			
		||||
    #if defined(A64FX) // VLA
 | 
			
		||||
      #pragma message("building A64FX / SVE ACLE VLA")
 | 
			
		||||
      #if defined(ARMCLANGCOMPAT)
 | 
			
		||||
        #pragma message("applying data types patch")
 | 
			
		||||
      #endif
 | 
			
		||||
      #include "Grid_a64fx-2.h"
 | 
			
		||||
    #endif
 | 
			
		||||
    #if defined(A64FXFIXEDSIZE) // fixed size data types
 | 
			
		||||
      #pragma message("building for A64FX / SVE ACLE fixed size")
 | 
			
		||||
      #include "Grid_a64fx-fixedsize.h"
 | 
			
		||||
    #endif
 | 
			
		||||
  #else
 | 
			
		||||
    //#pragma message("building GEN") // generic
 | 
			
		||||
    #include "Grid_generic.h"
 | 
			
		||||
  #endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef A64FX
 | 
			
		||||
  #include <arm_sve.h>
 | 
			
		||||
  #ifdef __ARM_FEATURE_SVE_BITS
 | 
			
		||||
    //#pragma message("building A64FX SVE VLS")
 | 
			
		||||
    #include "Grid_a64fx-fixedsize.h"
 | 
			
		||||
  #else
 | 
			
		||||
    #pragma message("building A64FX SVE VLA")
 | 
			
		||||
    #if defined(ARMCLANGCOMPAT)
 | 
			
		||||
      #pragma message("applying data types patch")
 | 
			
		||||
    #endif
 | 
			
		||||
    #include "Grid_a64fx-2.h"
 | 
			
		||||
  #endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
#ifdef A64FXVLA
 | 
			
		||||
#pragma message("building A64FX VLA")
 | 
			
		||||
#if defined(ARMCLANGCOMPAT)
 | 
			
		||||
  #pragma message("applying data types patch")
 | 
			
		||||
#endif
 | 
			
		||||
#include <arm_sve.h>
 | 
			
		||||
#include "Grid_a64fx-2.h"
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef A64FXVLS
 | 
			
		||||
#pragma message("building A64FX VLS")
 | 
			
		||||
#include <arm_sve.h>
 | 
			
		||||
#include "Grid_a64fx-fixedsize.h"
 | 
			
		||||
#endif
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#ifdef SSE4
 | 
			
		||||
#include "Grid_sse4.h"
 | 
			
		||||
#endif
 | 
			
		||||
@@ -163,6 +217,12 @@ template <typename T> struct is_complex : public std::false_type {};
 | 
			
		||||
template <> struct is_complex<ComplexD> : public std::true_type {};
 | 
			
		||||
template <> struct is_complex<ComplexF> : public std::true_type {};
 | 
			
		||||
 | 
			
		||||
template <typename T> struct is_ComplexD : public std::false_type {};
 | 
			
		||||
template <> struct is_ComplexD<ComplexD> : public std::true_type {};
 | 
			
		||||
 | 
			
		||||
template <typename T> struct is_ComplexF : public std::false_type {};
 | 
			
		||||
template <> struct is_ComplexF<ComplexF> : public std::true_type {};
 | 
			
		||||
 | 
			
		||||
template<typename T, typename V=void> struct is_real : public std::false_type {};
 | 
			
		||||
template<typename T> struct is_real<T, typename std::enable_if<std::is_floating_point<T>::value,
 | 
			
		||||
  void>::type> : public std::true_type {};
 | 
			
		||||
@@ -170,7 +230,7 @@ template<typename T> struct is_real<T, typename std::enable_if<std::is_floating_
 | 
			
		||||
template<typename T, typename V=void> struct is_integer : public std::false_type {};
 | 
			
		||||
template<typename T> struct is_integer<T, typename std::enable_if<std::is_integral<T>::value,
 | 
			
		||||
  void>::type> : public std::true_type {};
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
template <typename T>              using IfReal    = Invoke<std::enable_if<is_real<T>::value, int> >;
 | 
			
		||||
template <typename T>              using IfComplex = Invoke<std::enable_if<is_complex<T>::value, int> >;
 | 
			
		||||
template <typename T>              using IfInteger = Invoke<std::enable_if<is_integer<T>::value, int> >;
 | 
			
		||||
@@ -223,6 +283,69 @@ public:
 | 
			
		||||
    return sizeof(Vector_type) / sizeof(Scalar_type);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  #ifdef ARMCLANGCOMPAT
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<!is_complex<S>::value, S>::type, Vector_type> &&rhs) {
 | 
			
		||||
      //v = rhs.v;
 | 
			
		||||
      svst1(svptrue_b8(), (Scalar_type*)this, svld1(svptrue_b8(), (Scalar_type*)&(rhs.v)));
 | 
			
		||||
      return *this;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<!is_complex<S>::value, S>::type, Vector_type> &rhs) {
 | 
			
		||||
      //v = rhs.v;
 | 
			
		||||
      svst1(svptrue_b8(), (Scalar_type*)this, svld1(svptrue_b8(), (Scalar_type*)&(rhs.v)));
 | 
			
		||||
      return *this;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    /*
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_complex<S>::value, S>::type, Vector_type> &&rhs) {
 | 
			
		||||
      //v = rhs.v;
 | 
			
		||||
      svst1(svptrue_b8(), (int8_t*)this, svld1(svptrue_b8(), (int8_t*)&(rhs.v)));
 | 
			
		||||
      return *this;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_complex<S>::value, S>::type, Vector_type> &rhs) {
 | 
			
		||||
      //v = rhs.v;
 | 
			
		||||
      svst1(svptrue_b8(), (int8_t*)this, svld1(svptrue_b8(), (int8_t*)&(rhs.v)));
 | 
			
		||||
      return *this;
 | 
			
		||||
    };
 | 
			
		||||
    */
 | 
			
		||||
 | 
			
		||||
    // ComplexF
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_ComplexF<S>::value, S>::type, Vector_type> &&rhs) {
 | 
			
		||||
      //v = rhs.v;
 | 
			
		||||
      svst1(svptrue_b32(), (float*)this, svld1(svptrue_b32(), (float*)&(rhs.v)));
 | 
			
		||||
      return *this;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_ComplexF<S>::value, S>::type, Vector_type> &rhs) {
 | 
			
		||||
      //v = rhs.v;
 | 
			
		||||
      svst1(svptrue_b32(), (float*)this, svld1(svptrue_b32(), (float*)&(rhs.v)));
 | 
			
		||||
      return *this;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    // ComplexD
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_ComplexD<S>::value, S>::type, Vector_type> &&rhs) {
 | 
			
		||||
      //v = rhs.v;
 | 
			
		||||
      svst1(svptrue_b64(), (double*)this, svld1(svptrue_b64(), (double*)&(rhs.v)));
 | 
			
		||||
      return *this;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd &operator=(const Grid_simd<typename std::enable_if<is_ComplexD<S>::value, S>::type, Vector_type> &rhs) {
 | 
			
		||||
      //v = rhs.v;
 | 
			
		||||
      svst1(svptrue_b64(), (double*)this, svld1(svptrue_b64(), (double*)&(rhs.v)));
 | 
			
		||||
      return *this;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
  #else
 | 
			
		||||
 | 
			
		||||
  accelerator_inline Grid_simd &operator=(const Grid_simd &&rhs) {
 | 
			
		||||
    v = rhs.v;
 | 
			
		||||
    return *this;
 | 
			
		||||
@@ -232,10 +355,23 @@ public:
 | 
			
		||||
    return *this;
 | 
			
		||||
  };  // faster than not declaring it and leaving to the compiler
 | 
			
		||||
 | 
			
		||||
  #endif
 | 
			
		||||
 | 
			
		||||
  accelerator Grid_simd() = default;
 | 
			
		||||
  accelerator_inline Grid_simd(const Grid_simd &rhs) : v(rhs.v){};  // compiles in movaps
 | 
			
		||||
  accelerator_inline Grid_simd(const Grid_simd &&rhs) : v(rhs.v){};
 | 
			
		||||
 | 
			
		||||
  #ifdef ARMCLANGCOMPAT
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd(const Grid_simd<typename std::enable_if<!is_complex<S>::value, S>::type, Vector_type> &rhs) { this->operator=(rhs); }
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd(const Grid_simd<typename std::enable_if<!is_complex<S>::value, S>::type, Vector_type> &&rhs) { this->operator=(rhs); }
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd(const Grid_simd<typename std::enable_if<is_complex<S>::value, S>::type, Vector_type> &rhs) { this->operator=(rhs); }
 | 
			
		||||
    template <class S = Scalar_type>
 | 
			
		||||
    accelerator_inline Grid_simd(const Grid_simd<typename std::enable_if<is_complex<S>::value, S>::type, Vector_type> &&rhs) { this->operator=(rhs); }
 | 
			
		||||
  #else
 | 
			
		||||
    accelerator_inline Grid_simd(const Grid_simd &rhs) : v(rhs.v){};  // compiles in movaps
 | 
			
		||||
    accelerator_inline Grid_simd(const Grid_simd &&rhs) : v(rhs.v){};
 | 
			
		||||
  #endif
 | 
			
		||||
  accelerator_inline Grid_simd(const Real a) { vsplat(*this, Scalar_type(a)); };
 | 
			
		||||
  // Enable if complex type
 | 
			
		||||
  template <typename S = Scalar_type> accelerator_inline
 | 
			
		||||
@@ -258,12 +394,21 @@ public:
 | 
			
		||||
  ///////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
  // FIXME -- alias this to an accelerator_inline MAC struct.
 | 
			
		||||
 | 
			
		||||
  #if defined(A64FX) || defined(A64FXFIXEDSIZE)
 | 
			
		||||
  friend accelerator_inline void mac(Grid_simd *__restrict__ y,
 | 
			
		||||
				     const Grid_simd *__restrict__ a,
 | 
			
		||||
				     const Grid_simd *__restrict__ x) {
 | 
			
		||||
    *y = fxmac((*a), (*x), (*y));
 | 
			
		||||
  };
 | 
			
		||||
  #else
 | 
			
		||||
  friend accelerator_inline void mac(Grid_simd *__restrict__ y,
 | 
			
		||||
				     const Grid_simd *__restrict__ a,
 | 
			
		||||
				     const Grid_simd *__restrict__ x) {
 | 
			
		||||
    *y = (*a) * (*x) + (*y);
 | 
			
		||||
  };
 | 
			
		||||
  
 | 
			
		||||
  #endif
 | 
			
		||||
 | 
			
		||||
  friend accelerator_inline void mult(Grid_simd *__restrict__ y,
 | 
			
		||||
				      const Grid_simd *__restrict__ l,
 | 
			
		||||
				      const Grid_simd *__restrict__ r) {
 | 
			
		||||
@@ -412,7 +557,7 @@ public:
 | 
			
		||||
    Grid_simd ret;
 | 
			
		||||
    Grid_simd::conv_t conv;
 | 
			
		||||
    Grid_simd::scalar_type s;
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    conv.v = v.v;
 | 
			
		||||
    for (int i = 0; i < Nsimd(); i++) {
 | 
			
		||||
      s = conv.s[i];
 | 
			
		||||
@@ -441,7 +586,7 @@ public:
 | 
			
		||||
    return ret;
 | 
			
		||||
  }
 | 
			
		||||
  ///////////////////////
 | 
			
		||||
  // Exchange 
 | 
			
		||||
  // Exchange
 | 
			
		||||
  // Al Ah , Bl Bh -> Al Bl Ah,Bh
 | 
			
		||||
  ///////////////////////
 | 
			
		||||
  friend accelerator_inline void exchange(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2,int n)
 | 
			
		||||
@@ -452,20 +597,20 @@ public:
 | 
			
		||||
      Optimization::Exchange::Exchange2(out1.v,out2.v,in1.v,in2.v);
 | 
			
		||||
    } else if(n==1) {
 | 
			
		||||
      Optimization::Exchange::Exchange1(out1.v,out2.v,in1.v,in2.v);
 | 
			
		||||
    } else if(n==0) { 
 | 
			
		||||
    } else if(n==0) {
 | 
			
		||||
      Optimization::Exchange::Exchange0(out1.v,out2.v,in1.v,in2.v);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  friend accelerator_inline void exchange0(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){    
 | 
			
		||||
  friend accelerator_inline void exchange0(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
 | 
			
		||||
    Optimization::Exchange::Exchange0(out1.v,out2.v,in1.v,in2.v);
 | 
			
		||||
  }
 | 
			
		||||
  friend accelerator_inline void exchange1(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){    
 | 
			
		||||
  friend accelerator_inline void exchange1(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
 | 
			
		||||
    Optimization::Exchange::Exchange1(out1.v,out2.v,in1.v,in2.v);
 | 
			
		||||
  }
 | 
			
		||||
  friend accelerator_inline void exchange2(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){    
 | 
			
		||||
  friend accelerator_inline void exchange2(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
 | 
			
		||||
    Optimization::Exchange::Exchange2(out1.v,out2.v,in1.v,in2.v);
 | 
			
		||||
  }
 | 
			
		||||
  friend accelerator_inline void exchange3(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){    
 | 
			
		||||
  friend accelerator_inline void exchange3(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
 | 
			
		||||
    Optimization::Exchange::Exchange3(out1.v,out2.v,in1.v,in2.v);
 | 
			
		||||
  }
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -490,7 +635,7 @@ public:
 | 
			
		||||
      int dist = perm & 0xF;
 | 
			
		||||
      y = rotate(b, dist);
 | 
			
		||||
      return;
 | 
			
		||||
    } 
 | 
			
		||||
    }
 | 
			
		||||
    else if(perm==3) permute3(y, b);
 | 
			
		||||
    else if(perm==2) permute2(y, b);
 | 
			
		||||
    else if(perm==1) permute1(y, b);
 | 
			
		||||
@@ -564,29 +709,29 @@ accelerator_inline Grid_simd<S, V> rotate(Grid_simd<S, V> b, int nrot) {
 | 
			
		||||
  ret.v = Optimization::Rotate::rotate(b.v, 2 * nrot);
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
template <class S, class V, IfNotComplex<S> =0> 
 | 
			
		||||
template <class S, class V, IfNotComplex<S> =0>
 | 
			
		||||
accelerator_inline void rotate( Grid_simd<S,V> &ret,Grid_simd<S,V> b,int nrot)
 | 
			
		||||
{
 | 
			
		||||
  nrot = nrot % Grid_simd<S,V>::Nsimd();
 | 
			
		||||
  ret.v = Optimization::Rotate::rotate(b.v,nrot);
 | 
			
		||||
}
 | 
			
		||||
template <class S, class V, IfComplex<S> =0> 
 | 
			
		||||
template <class S, class V, IfComplex<S> =0>
 | 
			
		||||
accelerator_inline void rotate(Grid_simd<S,V> &ret,Grid_simd<S,V> b,int nrot)
 | 
			
		||||
{
 | 
			
		||||
  nrot = nrot % Grid_simd<S,V>::Nsimd();
 | 
			
		||||
  ret.v = Optimization::Rotate::rotate(b.v,2*nrot);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class S, class V> 
 | 
			
		||||
template <class S, class V>
 | 
			
		||||
accelerator_inline void vbroadcast(Grid_simd<S,V> &ret,const Grid_simd<S,V> &src,int lane){
 | 
			
		||||
  S* typepun =(S*) &src;
 | 
			
		||||
  vsplat(ret,typepun[lane]);
 | 
			
		||||
}    
 | 
			
		||||
template <class S, class V, IfComplex<S> =0> 
 | 
			
		||||
}
 | 
			
		||||
template <class S, class V, IfComplex<S> =0>
 | 
			
		||||
accelerator_inline void rbroadcast(Grid_simd<S,V> &ret,const Grid_simd<S,V> &src,int lane){
 | 
			
		||||
  S* typepun =(S*) &src;
 | 
			
		||||
  ret.v = unary<V>(real(typepun[lane]), VsplatSIMD());
 | 
			
		||||
}    
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -741,6 +886,27 @@ accelerator_inline Grid_simd<S, V> operator*(Grid_simd<S, V> a, Grid_simd<S, V>
 | 
			
		||||
  return ret;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// ---------------- A64FX MAC -------------------
 | 
			
		||||
// Distinguish between complex types and others
 | 
			
		||||
#if defined(A64FX) || defined(A64FXFIXEDSIZE)
 | 
			
		||||
template <class S, class V, IfComplex<S> = 0>
 | 
			
		||||
accelerator_inline Grid_simd<S, V> fxmac(Grid_simd<S, V> a, Grid_simd<S, V> b, Grid_simd<S, V> c) {
 | 
			
		||||
  Grid_simd<S, V> ret;
 | 
			
		||||
  ret.v = trinary<V>(a.v, b.v, c.v, MultAddComplexSIMD());
 | 
			
		||||
  return ret;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Real/Integer types
 | 
			
		||||
template <class S, class V, IfNotComplex<S> = 0>
 | 
			
		||||
accelerator_inline Grid_simd<S, V> fxmac(Grid_simd<S, V> a, Grid_simd<S, V> b, Grid_simd<S, V> c) {
 | 
			
		||||
  Grid_simd<S, V> ret;
 | 
			
		||||
  ret.v = trinary<V>(a.v, b.v, c.v, MultSIMD());
 | 
			
		||||
  return ret;
 | 
			
		||||
};
 | 
			
		||||
#endif
 | 
			
		||||
// ----------------------------------------------
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// Distinguish between complex types and others
 | 
			
		||||
template <class S, class V, IfComplex<S> = 0>
 | 
			
		||||
accelerator_inline Grid_simd<S, V> operator/(Grid_simd<S, V> a, Grid_simd<S, V> b) {
 | 
			
		||||
@@ -877,7 +1043,7 @@ accelerator_inline typename toComplexMapper<Rsimd>::Complexified toComplex(const
 | 
			
		||||
 | 
			
		||||
  conv.v = in.v;
 | 
			
		||||
  for (int i = 0; i < Rsimd::Nsimd(); i += 2) {
 | 
			
		||||
    assert(conv.s[i + 1] == conv.s[i]);  
 | 
			
		||||
    assert(conv.s[i + 1] == conv.s[i]);
 | 
			
		||||
    // trap any cases where real was not duplicated
 | 
			
		||||
    // indicating the SIMD grids of real and imag assignment did not correctly
 | 
			
		||||
    // match
 | 
			
		||||
@@ -919,6 +1085,14 @@ accelerator_inline void precisionChange(vRealD    *out,vRealF    *in,int nvec)
 | 
			
		||||
  for(int m=0;m*2<nvec;m++){
 | 
			
		||||
    int n=m*2;
 | 
			
		||||
    Optimization::PrecisionChange::StoD(in[m].v,out[n].v,out[n+1].v);
 | 
			
		||||
    // Bug in gcc 10.0.1 and gcc 10.1 using fixed-size SVE ACLE data types  CAS-159553-Y1K4C6
 | 
			
		||||
    // function call results in compile-time error:
 | 
			
		||||
    // In function ‘void Grid::precisionChange(Grid::vRealD*, Grid::vRealF*, int)’:
 | 
			
		||||
    // .../Grid_vector_types.h:961:56: error:
 | 
			
		||||
    // cannot bind non-const lvalue reference of type ‘vecd&’ {aka ‘svfloat64_t&’}
 | 
			
		||||
    // to an rvalue of type ‘vecd’ {aka ‘svfloat64_t’}
 | 
			
		||||
    // 961 |     Optimization::PrecisionChange::StoD(in[m].v,out[n].v,out[n+1].v);
 | 
			
		||||
    //  |                                                 ~~~~~~~^
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
accelerator_inline void precisionChange(vRealD    *out,vRealH    *in,int nvec)
 | 
			
		||||
 
 | 
			
		||||
@@ -93,6 +93,11 @@ accelerator_inline ComplexF pow(const ComplexF& r,RealF y){ return(std::pow(r,y)
 | 
			
		||||
using std::abs;
 | 
			
		||||
using std::pow;
 | 
			
		||||
using std::sqrt;
 | 
			
		||||
using std::log;
 | 
			
		||||
using std::exp;
 | 
			
		||||
using std::sin;
 | 
			
		||||
using std::cos;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
accelerator_inline RealF    conjugate(const RealF  & r){ return r; }
 | 
			
		||||
accelerator_inline RealD    conjugate(const RealD  & r){ return r; }
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2377
									
								
								Grid/simd/gridverter.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										2377
									
								
								Grid/simd/gridverter.py
									
									
									
									
									
										Executable file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -1,6 +1,6 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
     Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
     Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
     Source file: ./lib/Stencil.h
 | 
			
		||||
 | 
			
		||||
@@ -41,13 +41,13 @@
 | 
			
		||||
// Stencil based code will exchange haloes and use a table lookup for neighbours.
 | 
			
		||||
// This will be done with generality to allow easier efficient implementations.
 | 
			
		||||
// Overlap of comms and compute is enabled by tabulating off-node connected,
 | 
			
		||||
// 
 | 
			
		||||
//
 | 
			
		||||
// Generic services
 | 
			
		||||
// 0) Prebuild neighbour tables
 | 
			
		||||
// 1) Compute sizes of all haloes/comms buffers; allocate them.
 | 
			
		||||
// 2) Gather all faces, and communicate.
 | 
			
		||||
// 3) Loop over result sites, giving nbr index/offnode info for each
 | 
			
		||||
// 
 | 
			
		||||
//
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
@@ -59,10 +59,10 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
void Gather_plane_table_compute (GridBase *grid,int dimension,int plane,int cbmask,
 | 
			
		||||
				 int off,Vector<std::pair<int,int> > & table);
 | 
			
		||||
 | 
			
		||||
template<class vobj,class cobj,class compressor> 
 | 
			
		||||
template<class vobj,class cobj,class compressor>
 | 
			
		||||
void Gather_plane_simple_table (Vector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so)   __attribute__((noinline));
 | 
			
		||||
 | 
			
		||||
template<class vobj,class cobj,class compressor> 
 | 
			
		||||
template<class vobj,class cobj,class compressor>
 | 
			
		||||
void Gather_plane_simple_table (Vector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so)
 | 
			
		||||
{
 | 
			
		||||
  int num=table.size();
 | 
			
		||||
@@ -94,13 +94,13 @@ void Gather_plane_exchange_table(Vector<std::pair<int,int> >& table,const Lattic
 | 
			
		||||
{
 | 
			
		||||
  assert( (table.size()&0x1)==0);
 | 
			
		||||
  int num=table.size()/2;
 | 
			
		||||
  int so  = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane 
 | 
			
		||||
  int so  = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
 | 
			
		||||
 | 
			
		||||
  auto rhs_v = rhs.View(AcceleratorRead);
 | 
			
		||||
  auto p0=&pointers[0][0];
 | 
			
		||||
  auto p1=&pointers[1][0];
 | 
			
		||||
  auto tp=&table[0];
 | 
			
		||||
  accelerator_forNB(j, num, 1, { 
 | 
			
		||||
  accelerator_forNB(j, num, 1, {
 | 
			
		||||
      compress.CompressExchange(p0,p1, &rhs_v[0], j,
 | 
			
		||||
			      so+tp[2*j  ].second,
 | 
			
		||||
			      so+tp[2*j+1].second,
 | 
			
		||||
@@ -109,20 +109,20 @@ void Gather_plane_exchange_table(Vector<std::pair<int,int> >& table,const Lattic
 | 
			
		||||
  rhs_v.ViewClose();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct StencilEntry { 
 | 
			
		||||
struct StencilEntry {
 | 
			
		||||
#ifdef GRID_CUDA
 | 
			
		||||
  uint64_t _byte_offset;       // 8 bytes 
 | 
			
		||||
  uint32_t _offset;            // 4 bytes 
 | 
			
		||||
  uint64_t _byte_offset;       // 8 bytes
 | 
			
		||||
  uint32_t _offset;            // 4 bytes
 | 
			
		||||
#else
 | 
			
		||||
  uint64_t _byte_offset;       // 8 bytes 
 | 
			
		||||
  uint64_t _byte_offset;       // 8 bytes
 | 
			
		||||
  uint64_t _offset;            // 8 bytes (8 ever required?)
 | 
			
		||||
#endif
 | 
			
		||||
  uint8_t _is_local;           // 1 bytes 
 | 
			
		||||
  uint8_t _is_local;           // 1 bytes
 | 
			
		||||
  uint8_t _permute;            // 1 bytes
 | 
			
		||||
  uint8_t _around_the_world;   // 1 bytes
 | 
			
		||||
  uint8_t _pad;   // 1 bytes
 | 
			
		||||
};
 | 
			
		||||
// Could pack to 8 + 4 + 4 = 128 bit and use 
 | 
			
		||||
// Could pack to 8 + 4 + 4 = 128 bit and use
 | 
			
		||||
 | 
			
		||||
template<class vobj,class cobj,class Parameters>
 | 
			
		||||
class CartesianStencilAccelerator {
 | 
			
		||||
@@ -149,18 +149,18 @@ class CartesianStencilAccelerator {
 | 
			
		||||
 | 
			
		||||
  accelerator_inline cobj *CommBuf(void) { return u_recv_buf_p; }
 | 
			
		||||
 | 
			
		||||
  accelerator_inline int GetNodeLocal(int osite,int point) { 
 | 
			
		||||
  accelerator_inline int GetNodeLocal(int osite,int point) {
 | 
			
		||||
    return this->_entries_p[point+this->_npoints*osite]._is_local;
 | 
			
		||||
  }
 | 
			
		||||
  accelerator_inline StencilEntry * GetEntry(int &ptype,int point,int osite) { 
 | 
			
		||||
    ptype = this->_permute_type[point]; return & this->_entries_p[point+this->_npoints*osite]; 
 | 
			
		||||
  accelerator_inline StencilEntry * GetEntry(int &ptype,int point,int osite) {
 | 
			
		||||
    ptype = this->_permute_type[point]; return & this->_entries_p[point+this->_npoints*osite];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  accelerator_inline uint64_t GetInfo(int &ptype,int &local,int &perm,int point,int ent,uint64_t base) {
 | 
			
		||||
    uint64_t cbase = (uint64_t)&u_recv_buf_p[0];
 | 
			
		||||
    local = this->_entries_p[ent]._is_local;
 | 
			
		||||
    perm  = this->_entries_p[ent]._permute;
 | 
			
		||||
    if (perm)  ptype = this->_permute_type[point]; 
 | 
			
		||||
    if (perm)  ptype = this->_permute_type[point];
 | 
			
		||||
    if (local) {
 | 
			
		||||
      return  base + this->_entries_p[ent]._byte_offset;
 | 
			
		||||
    } else {
 | 
			
		||||
@@ -175,14 +175,14 @@ class CartesianStencilAccelerator {
 | 
			
		||||
    else       return cbase + this->_entries_p[ent]._byte_offset;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  accelerator_inline void iCoorFromIindex(Coordinate &coor,int lane) 
 | 
			
		||||
  accelerator_inline void iCoorFromIindex(Coordinate &coor,int lane)
 | 
			
		||||
  {
 | 
			
		||||
    Lexicographic::CoorFromIndex(coor,lane,this->_simd_layout);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<class vobj,class cobj,class Parameters>
 | 
			
		||||
class CartesianStencilView : public CartesianStencilAccelerator<vobj,cobj,Parameters> 
 | 
			
		||||
class CartesianStencilView : public CartesianStencilAccelerator<vobj,cobj,Parameters>
 | 
			
		||||
{
 | 
			
		||||
 private:
 | 
			
		||||
  int *closed;
 | 
			
		||||
@@ -192,7 +192,7 @@ class CartesianStencilView : public CartesianStencilAccelerator<vobj,cobj,Parame
 | 
			
		||||
  // default copy constructor
 | 
			
		||||
  CartesianStencilView (const CartesianStencilView &refer_to_me) = default;
 | 
			
		||||
 | 
			
		||||
  CartesianStencilView (const CartesianStencilAccelerator<vobj,cobj,Parameters> &refer_to_me,ViewMode _mode) 
 | 
			
		||||
  CartesianStencilView (const CartesianStencilAccelerator<vobj,cobj,Parameters> &refer_to_me,ViewMode _mode)
 | 
			
		||||
    : CartesianStencilAccelerator<vobj,cobj,Parameters>(refer_to_me),
 | 
			
		||||
    cpu_ptr(this->_entries_p),
 | 
			
		||||
    mode(_mode)
 | 
			
		||||
@@ -201,14 +201,14 @@ class CartesianStencilView : public CartesianStencilAccelerator<vobj,cobj,Parame
 | 
			
		||||
      MemoryManager::ViewOpen(this->_entries_p,
 | 
			
		||||
			      this->_npoints*this->_osites*sizeof(StencilEntry),
 | 
			
		||||
			      mode,
 | 
			
		||||
			      AdviseDefault);    
 | 
			
		||||
			      AdviseDefault);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  void ViewClose(void)
 | 
			
		||||
  {
 | 
			
		||||
    MemoryManager::ViewClose(this->cpu_ptr,this->mode);    
 | 
			
		||||
    MemoryManager::ViewClose(this->cpu_ptr,this->mode);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////
 | 
			
		||||
@@ -245,12 +245,12 @@ public:
 | 
			
		||||
    cobj * mpi_p;
 | 
			
		||||
    Integer buffer_size;
 | 
			
		||||
  };
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
protected:
 | 
			
		||||
  GridBase *                        _grid;
 | 
			
		||||
 | 
			
		||||
public: 
 | 
			
		||||
public:
 | 
			
		||||
  GridBase *Grid(void) const { return _grid; }
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -264,7 +264,7 @@ public:
 | 
			
		||||
    View_type accessor(*( (View_type *) this),mode);
 | 
			
		||||
    return accessor;
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  int face_table_computed;
 | 
			
		||||
  std::vector<Vector<std::pair<int,int> > > face_table ;
 | 
			
		||||
  Vector<int> surface_list;
 | 
			
		||||
@@ -314,7 +314,7 @@ public:
 | 
			
		||||
  ////////////////////////////////////////
 | 
			
		||||
  // Stencil query
 | 
			
		||||
  ////////////////////////////////////////
 | 
			
		||||
  inline int SameNode(int point) { 
 | 
			
		||||
  inline int SameNode(int point) {
 | 
			
		||||
 | 
			
		||||
    int dimension    = this->_directions[point];
 | 
			
		||||
    int displacement = this->_distances[point];
 | 
			
		||||
@@ -338,7 +338,7 @@ public:
 | 
			
		||||
    // FIXME  this logic needs to be sorted for three link term
 | 
			
		||||
    //    assert( (displacement==1) || (displacement==-1));
 | 
			
		||||
    // Present hack only works for >= 4^4 subvol per node
 | 
			
		||||
    _grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank); 
 | 
			
		||||
    _grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
 | 
			
		||||
 | 
			
		||||
    void *shm = (void *) _grid->ShmBufferTranslate(recv_from_rank,this->u_recv_buf_p);
 | 
			
		||||
 | 
			
		||||
@@ -378,7 +378,7 @@ public:
 | 
			
		||||
      comm_time_thr[mythread] += comm_leave_thr[mythread] - comm_enter_thr[mythread];
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  void CollateThreads(void)
 | 
			
		||||
  {
 | 
			
		||||
    int nthreads = CartesianCommunicator::nCommThreads;
 | 
			
		||||
@@ -402,7 +402,7 @@ public:
 | 
			
		||||
      if ( (t0 > 0.0) && ( t0 < first ) ) first = t0;   // min time seen
 | 
			
		||||
 | 
			
		||||
      if ( t1 > last ) last = t1;                       // max time seen
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
    commtime+= last-first;
 | 
			
		||||
  }
 | 
			
		||||
@@ -464,30 +464,30 @@ public:
 | 
			
		||||
      this->CommunicateBegin(reqs);
 | 
			
		||||
      this->CommunicateComplete(reqs);
 | 
			
		||||
    }
 | 
			
		||||
  }    
 | 
			
		||||
  
 | 
			
		||||
  template<class compressor> void HaloExchange(const Lattice<vobj> &source,compressor &compress) 
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class compressor> void HaloExchange(const Lattice<vobj> &source,compressor &compress)
 | 
			
		||||
  {
 | 
			
		||||
    Prepare();
 | 
			
		||||
    HaloGather(source,compress);
 | 
			
		||||
    Communicate();
 | 
			
		||||
    CommsMergeSHM(compress); 
 | 
			
		||||
    CommsMerge(compress); 
 | 
			
		||||
    CommsMergeSHM(compress);
 | 
			
		||||
    CommsMerge(compress);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  template<class compressor> int HaloGatherDir(const Lattice<vobj> &source,compressor &compress,int point,int & face_idx)
 | 
			
		||||
  {
 | 
			
		||||
    int dimension    = this->_directions[point];
 | 
			
		||||
    int displacement = this->_distances[point];
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    int fd = _grid->_fdimensions[dimension];
 | 
			
		||||
    int rd = _grid->_rdimensions[dimension];
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    // Map to always positive shift modulo global full dimension.
 | 
			
		||||
    int shift = (displacement+fd)%fd;
 | 
			
		||||
 | 
			
		||||
    assert (source.Checkerboard()== this->_checkerboard);
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    // the permute type
 | 
			
		||||
    int simd_layout     = _grid->_simd_layout[dimension];
 | 
			
		||||
    int comm_dim        = _grid->_processors[dimension] >1 ;
 | 
			
		||||
@@ -505,7 +505,7 @@ public:
 | 
			
		||||
	  auto tmp  = GatherSimd(source,dimension,shift,0x3,compress,face_idx);
 | 
			
		||||
	  is_same_node = is_same_node && tmp;
 | 
			
		||||
	  splicetime+=usecond();
 | 
			
		||||
	} else { 
 | 
			
		||||
	} else {
 | 
			
		||||
	  nosplicetime-=usecond();
 | 
			
		||||
	  auto tmp  = Gather(source,dimension,shift,0x3,compress,face_idx);
 | 
			
		||||
	  is_same_node = is_same_node && tmp;
 | 
			
		||||
@@ -531,7 +531,7 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
    return is_same_node;
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  template<class compressor>
 | 
			
		||||
  void HaloGather(const Lattice<vobj> &source,compressor &compress)
 | 
			
		||||
  {
 | 
			
		||||
@@ -542,9 +542,9 @@ public:
 | 
			
		||||
    // conformable(source.Grid(),_grid);
 | 
			
		||||
    assert(source.Grid()==_grid);
 | 
			
		||||
    halogtime-=usecond();
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    u_comm_offset=0;
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    // Gather all comms buffers
 | 
			
		||||
    int face_idx=0;
 | 
			
		||||
    for(int point = 0 ; point < this->_npoints; point++) {
 | 
			
		||||
@@ -557,16 +557,16 @@ public:
 | 
			
		||||
    accelerator_barrier();
 | 
			
		||||
    halogtime+=usecond();
 | 
			
		||||
  }
 | 
			
		||||
 
 | 
			
		||||
 | 
			
		||||
  /////////////////////////
 | 
			
		||||
  // Implementation
 | 
			
		||||
  /////////////////////////
 | 
			
		||||
  void Prepare(void)
 | 
			
		||||
  {
 | 
			
		||||
    Decompressions.resize(0); 
 | 
			
		||||
    DecompressionsSHM.resize(0); 
 | 
			
		||||
    Mergers.resize(0); 
 | 
			
		||||
    MergersSHM.resize(0); 
 | 
			
		||||
    Decompressions.resize(0);
 | 
			
		||||
    DecompressionsSHM.resize(0);
 | 
			
		||||
    Mergers.resize(0);
 | 
			
		||||
    MergersSHM.resize(0);
 | 
			
		||||
    Packets.resize(0);
 | 
			
		||||
    calls++;
 | 
			
		||||
  }
 | 
			
		||||
@@ -595,22 +595,22 @@ public:
 | 
			
		||||
    mv.push_back(m);
 | 
			
		||||
  }
 | 
			
		||||
  template<class decompressor>  void CommsMerge(decompressor decompress)    {
 | 
			
		||||
    CommsMerge(decompress,Mergers,Decompressions); 
 | 
			
		||||
    CommsMerge(decompress,Mergers,Decompressions);
 | 
			
		||||
  }
 | 
			
		||||
  template<class decompressor>  void CommsMergeSHM(decompressor decompress) {
 | 
			
		||||
    mpi3synctime-=usecond();    
 | 
			
		||||
    mpi3synctime-=usecond();
 | 
			
		||||
    _grid->StencilBarrier();// Synch shared memory on a single nodes
 | 
			
		||||
    mpi3synctime+=usecond();    
 | 
			
		||||
    shmmergetime-=usecond();    
 | 
			
		||||
    mpi3synctime+=usecond();
 | 
			
		||||
    shmmergetime-=usecond();
 | 
			
		||||
    CommsMerge(decompress,MergersSHM,DecompressionsSHM);
 | 
			
		||||
    shmmergetime+=usecond();    
 | 
			
		||||
    shmmergetime+=usecond();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class decompressor>
 | 
			
		||||
  void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) { 
 | 
			
		||||
  void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
 | 
			
		||||
 | 
			
		||||
    mergetime-=usecond();
 | 
			
		||||
    for(int i=0;i<mm.size();i++){	
 | 
			
		||||
    for(int i=0;i<mm.size();i++){
 | 
			
		||||
      auto mp = &mm[i].mpointer[0];
 | 
			
		||||
      auto vp0= &mm[i].vpointers[0][0];
 | 
			
		||||
      auto vp1= &mm[i].vpointers[1][0];
 | 
			
		||||
@@ -622,7 +622,7 @@ public:
 | 
			
		||||
    mergetime+=usecond();
 | 
			
		||||
 | 
			
		||||
    decompresstime-=usecond();
 | 
			
		||||
    for(int i=0;i<dd.size();i++){	
 | 
			
		||||
    for(int i=0;i<dd.size();i++){
 | 
			
		||||
      auto kp = dd[i].kernel_p;
 | 
			
		||||
      auto mp = dd[i].mpi_p;
 | 
			
		||||
      accelerator_forNB(o,dd[i].buffer_size,1,{
 | 
			
		||||
@@ -638,7 +638,7 @@ public:
 | 
			
		||||
    for(int i=0;i<_entries.size();i++){
 | 
			
		||||
      if( _entries[i]._is_local ) {
 | 
			
		||||
	_entries[i]._byte_offset = _entries[i]._offset*sizeof(vobj);
 | 
			
		||||
      } else { 
 | 
			
		||||
      } else {
 | 
			
		||||
	_entries[i]._byte_offset = _entries[i]._offset*sizeof(cobj);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
@@ -653,15 +653,15 @@ public:
 | 
			
		||||
    for(int point=0;point<this->_npoints;point++){
 | 
			
		||||
      this->same_node[point] = this->SameNode(point);
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    for(int site = 0 ;site< vol4;site++){
 | 
			
		||||
      int local = 1;
 | 
			
		||||
      for(int point=0;point<this->_npoints;point++){
 | 
			
		||||
	if( (!this->GetNodeLocal(site*Ls,point)) && (!this->same_node[point]) ){ 
 | 
			
		||||
	if( (!this->GetNodeLocal(site*Ls,point)) && (!this->same_node[point]) ){
 | 
			
		||||
	  local = 0;
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      if(local == 0) { 
 | 
			
		||||
      if(local == 0) {
 | 
			
		||||
	surface_list.push_back(site);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
@@ -672,11 +672,11 @@ public:
 | 
			
		||||
		   int checkerboard,
 | 
			
		||||
		   const std::vector<int> &directions,
 | 
			
		||||
		   const std::vector<int> &distances,
 | 
			
		||||
		   Parameters p) 
 | 
			
		||||
    : shm_bytes_thr(npoints), 
 | 
			
		||||
      comm_bytes_thr(npoints), 
 | 
			
		||||
		   Parameters p)
 | 
			
		||||
    : shm_bytes_thr(npoints),
 | 
			
		||||
      comm_bytes_thr(npoints),
 | 
			
		||||
      comm_enter_thr(npoints),
 | 
			
		||||
      comm_leave_thr(npoints), 
 | 
			
		||||
      comm_leave_thr(npoints),
 | 
			
		||||
      comm_time_thr(npoints)
 | 
			
		||||
  {
 | 
			
		||||
    face_table_computed=0;
 | 
			
		||||
@@ -687,7 +687,7 @@ public:
 | 
			
		||||
    /////////////////////////////////////
 | 
			
		||||
    this->_npoints = npoints;
 | 
			
		||||
    this->_comm_buf_size.resize(npoints),
 | 
			
		||||
    this->_permute_type.resize(npoints), 
 | 
			
		||||
    this->_permute_type.resize(npoints),
 | 
			
		||||
    this->_simd_layout = _grid->_simd_layout; // copy simd_layout to give access to Accelerator Kernels
 | 
			
		||||
    this->_directions = StencilVector(directions);
 | 
			
		||||
    this->_distances  = StencilVector(distances);
 | 
			
		||||
@@ -697,24 +697,24 @@ public:
 | 
			
		||||
    surface_list.resize(0);
 | 
			
		||||
 | 
			
		||||
    this->_osites  = _grid->oSites();
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    _entries.resize(this->_npoints* this->_osites);
 | 
			
		||||
    this->_entries_p = &_entries[0];
 | 
			
		||||
    for(int ii=0;ii<npoints;ii++){
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int i = ii; // reverse direction to get SIMD comms done first
 | 
			
		||||
      int point = i;
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int dimension    = directions[i];
 | 
			
		||||
      int displacement = distances[i];
 | 
			
		||||
      int shift = displacement;
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int fd = _grid->_fdimensions[dimension];
 | 
			
		||||
      int rd = _grid->_rdimensions[dimension];
 | 
			
		||||
      this->_permute_type[point]=_grid->PermuteType(dimension);
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      this->_checkerboard = checkerboard;
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      //////////////////////////
 | 
			
		||||
      // the permute type
 | 
			
		||||
      //////////////////////////
 | 
			
		||||
@@ -724,25 +724,25 @@ public:
 | 
			
		||||
      int rotate_dim      = _grid->_simd_layout[dimension]>2;
 | 
			
		||||
 | 
			
		||||
      assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int sshift[2];
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      //////////////////////////
 | 
			
		||||
      // Underlying approach. For each local site build
 | 
			
		||||
      // up a table containing the npoint "neighbours" and whether they 
 | 
			
		||||
      // up a table containing the npoint "neighbours" and whether they
 | 
			
		||||
      // live in lattice or a comms buffer.
 | 
			
		||||
      //////////////////////////
 | 
			
		||||
      if ( !comm_dim ) {
 | 
			
		||||
	sshift[0] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Even);
 | 
			
		||||
	sshift[1] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Odd);
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	if ( sshift[0] == sshift[1] ) {
 | 
			
		||||
	  Local(point,dimension,shift,0x3);
 | 
			
		||||
	} else {
 | 
			
		||||
	  Local(point,dimension,shift,0x1);// if checkerboard is unfavourable take two passes
 | 
			
		||||
	  Local(point,dimension,shift,0x2);// both with block stride loop iteration
 | 
			
		||||
	}
 | 
			
		||||
      } else { 
 | 
			
		||||
      } else {
 | 
			
		||||
	// All permute extract done in comms phase prior to Stencil application
 | 
			
		||||
	//        So tables are the same whether comm_dim or splice_dim
 | 
			
		||||
	sshift[0] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Even);
 | 
			
		||||
@@ -784,23 +784,23 @@ public:
 | 
			
		||||
    int ld = _grid->_ldimensions[dimension];
 | 
			
		||||
    int gd = _grid->_gdimensions[dimension];
 | 
			
		||||
    int ly = _grid->_simd_layout[dimension];
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    // Map to always positive shift modulo global full dimension.
 | 
			
		||||
    int shift = (shiftpm+fd)%fd;
 | 
			
		||||
 | 
			
		||||
    // the permute type
 | 
			
		||||
    int permute_dim =_grid->PermuteDim(dimension);
 | 
			
		||||
    
 | 
			
		||||
    for(int x=0;x<rd;x++){       
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
    for(int x=0;x<rd;x++){
 | 
			
		||||
 | 
			
		||||
      //      int o   = 0;
 | 
			
		||||
      int bo  = x * _grid->_ostride[dimension];
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int cb= (cbmask==0x2)? Odd : Even;
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int sshift = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,cb);
 | 
			
		||||
      int sx     = (x+sshift)%rd;
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int wraparound=0;
 | 
			
		||||
      if ( (shiftpm==-1) && (sx>x)  ) {
 | 
			
		||||
	wraparound = 1;
 | 
			
		||||
@@ -808,7 +808,7 @@ public:
 | 
			
		||||
      if ( (shiftpm== 1) && (sx<x)  ) {
 | 
			
		||||
	wraparound = 1;
 | 
			
		||||
      }
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int permute_slice=0;
 | 
			
		||||
      if(permute_dim){
 | 
			
		||||
	int wrap = sshift/rd; wrap=wrap % ly; // but it is local anyway
 | 
			
		||||
@@ -816,66 +816,66 @@ public:
 | 
			
		||||
	if ( x< rd-num ) permute_slice=wrap;
 | 
			
		||||
	else permute_slice = (wrap+1)%ly;
 | 
			
		||||
      }
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  void Comms     (int point,int dimension,int shiftpm,int cbmask)
 | 
			
		||||
  {
 | 
			
		||||
    GridBase *grid=_grid;
 | 
			
		||||
    const int Nsimd = grid->Nsimd();
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    int fd              = _grid->_fdimensions[dimension];
 | 
			
		||||
    int ld              = _grid->_ldimensions[dimension];
 | 
			
		||||
    int rd              = _grid->_rdimensions[dimension];
 | 
			
		||||
    int pd              = _grid->_processors[dimension];
 | 
			
		||||
    int simd_layout     = _grid->_simd_layout[dimension];
 | 
			
		||||
    int comm_dim        = _grid->_processors[dimension] >1 ;
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    assert(comm_dim==1);
 | 
			
		||||
    int shift = (shiftpm + fd) %fd;
 | 
			
		||||
    assert(shift>=0);
 | 
			
		||||
    assert(shift<fd);
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    // done in reduced dims, so SIMD factored
 | 
			
		||||
    int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension]; 
 | 
			
		||||
    int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension];
 | 
			
		||||
 | 
			
		||||
    this->_comm_buf_size[point] = buffer_size; // Size of _one_ plane. Multiple planes may be gathered and
 | 
			
		||||
 | 
			
		||||
    // send to one or more remote nodes.
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    int cb= (cbmask==0x2)? Odd : Even;
 | 
			
		||||
    int sshift= _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,cb);
 | 
			
		||||
    
 | 
			
		||||
    for(int x=0;x<rd;x++){       
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
    for(int x=0;x<rd;x++){
 | 
			
		||||
 | 
			
		||||
      int permute_type=grid->PermuteType(dimension);
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int sx        =  (x+sshift)%rd;
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int offnode = 0;
 | 
			
		||||
      if ( simd_layout > 1 ) {
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	for(int i=0;i<Nsimd;i++){
 | 
			
		||||
	  
 | 
			
		||||
 | 
			
		||||
	  int inner_bit = (Nsimd>>(permute_type+1));
 | 
			
		||||
	  int ic= (i&inner_bit)? 1:0;
 | 
			
		||||
	  int my_coor          = rd*ic + x;
 | 
			
		||||
	  int nbr_coor         = my_coor+sshift;
 | 
			
		||||
	  int nbr_proc = ((nbr_coor)/ld) % pd;// relative shift in processors
 | 
			
		||||
	  
 | 
			
		||||
	  if ( nbr_proc ) { 
 | 
			
		||||
 | 
			
		||||
	  if ( nbr_proc ) {
 | 
			
		||||
	    offnode =1;
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
	
 | 
			
		||||
      } else { 
 | 
			
		||||
 | 
			
		||||
      } else {
 | 
			
		||||
	int comm_proc = ((x+sshift)/rd)%pd;
 | 
			
		||||
	offnode = (comm_proc!= 0);
 | 
			
		||||
      }
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int wraparound=0;
 | 
			
		||||
      if ( (shiftpm==-1) && (sx>x) && (grid->_processor_coor[dimension]==0) ) {
 | 
			
		||||
	wraparound = 1;
 | 
			
		||||
@@ -884,24 +884,24 @@ public:
 | 
			
		||||
	wraparound = 1;
 | 
			
		||||
      }
 | 
			
		||||
      if (!offnode) {
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	int permute_slice=0;
 | 
			
		||||
	CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound); 
 | 
			
		||||
	
 | 
			
		||||
	CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
 | 
			
		||||
 | 
			
		||||
      } else {
 | 
			
		||||
 | 
			
		||||
	int words = buffer_size;
 | 
			
		||||
	if (cbmask != 0x3) words=words>>1;
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	//	int rank           = grid->_processor;
 | 
			
		||||
	//	int recv_from_rank;
 | 
			
		||||
	//	int xmit_to_rank;
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	int unified_buffer_offset = _unified_buffer_size;
 | 
			
		||||
	_unified_buffer_size    += words;
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	ScatterPlane(point,dimension,x,cbmask,unified_buffer_offset,wraparound); // permute/extract/merge is done in comms phase
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
@@ -909,13 +909,13 @@ public:
 | 
			
		||||
  void CopyPlane(int point, int dimension,int lplane,int rplane,int cbmask,int permute,int wrap)
 | 
			
		||||
  {
 | 
			
		||||
    int rd = _grid->_rdimensions[dimension];
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    if ( !_grid->CheckerBoarded(dimension) ) {
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      int o   = 0;                                     // relative offset to base within plane
 | 
			
		||||
      int ro  = rplane*_grid->_ostride[dimension]; // base offset for start of plane 
 | 
			
		||||
      int ro  = rplane*_grid->_ostride[dimension]; // base offset for start of plane
 | 
			
		||||
      int lo  = lplane*_grid->_ostride[dimension]; // offset in buffer
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      // Simple block stride gather of SIMD objects
 | 
			
		||||
      for(int n=0;n<_grid->_slice_nblock[dimension];n++){
 | 
			
		||||
	for(int b=0;b<_grid->_slice_block[dimension];b++){
 | 
			
		||||
@@ -927,18 +927,18 @@ public:
 | 
			
		||||
	}
 | 
			
		||||
	o +=_grid->_slice_stride[dimension];
 | 
			
		||||
      }
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
    } else {
 | 
			
		||||
      
 | 
			
		||||
      int ro  = rplane*_grid->_ostride[dimension]; // base offset for start of plane 
 | 
			
		||||
      int lo  = lplane*_grid->_ostride[dimension]; // base offset for start of plane 
 | 
			
		||||
 | 
			
		||||
      int ro  = rplane*_grid->_ostride[dimension]; // base offset for start of plane
 | 
			
		||||
      int lo  = lplane*_grid->_ostride[dimension]; // base offset for start of plane
 | 
			
		||||
      int o   = 0;                                     // relative offset to base within plane
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      for(int n=0;n<_grid->_slice_nblock[dimension];n++){
 | 
			
		||||
	for(int b=0;b<_grid->_slice_block[dimension];b++){
 | 
			
		||||
	  
 | 
			
		||||
 | 
			
		||||
	  int ocb=1<<_grid->CheckerBoardFromOindex(o+b);
 | 
			
		||||
	  
 | 
			
		||||
 | 
			
		||||
	  if ( ocb&cbmask ) {
 | 
			
		||||
	    int idx = point+(lo+o+b)*this->_npoints;
 | 
			
		||||
	    _entries[idx]._offset =ro+o+b;
 | 
			
		||||
@@ -946,24 +946,24 @@ public:
 | 
			
		||||
	    _entries[idx]._permute=permute;
 | 
			
		||||
	    _entries[idx]._around_the_world=wrap;
 | 
			
		||||
	  }
 | 
			
		||||
	  
 | 
			
		||||
 | 
			
		||||
	}
 | 
			
		||||
	o +=_grid->_slice_stride[dimension];
 | 
			
		||||
      }
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  // Routine builds up integer table for each site in _offsets, _is_local, _permute
 | 
			
		||||
  void ScatterPlane (int point,int dimension,int plane,int cbmask,int offset, int wrap)
 | 
			
		||||
  {
 | 
			
		||||
    int rd = _grid->_rdimensions[dimension];
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    if ( !_grid->CheckerBoarded(dimension) ) {
 | 
			
		||||
      
 | 
			
		||||
      int so  = plane*_grid->_ostride[dimension]; // base offset for start of plane 
 | 
			
		||||
 | 
			
		||||
      int so  = plane*_grid->_ostride[dimension]; // base offset for start of plane
 | 
			
		||||
      int o   = 0;                                    // relative offset to base within plane
 | 
			
		||||
      int bo  = 0;                                    // offset in buffer
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      // Simple block stride gather of SIMD objects
 | 
			
		||||
      for(int n=0;n<_grid->_slice_nblock[dimension];n++){
 | 
			
		||||
	for(int b=0;b<_grid->_slice_block[dimension];b++){
 | 
			
		||||
@@ -975,16 +975,16 @@ public:
 | 
			
		||||
	}
 | 
			
		||||
	o +=_grid->_slice_stride[dimension];
 | 
			
		||||
      }
 | 
			
		||||
      
 | 
			
		||||
    } else { 
 | 
			
		||||
      
 | 
			
		||||
      int so  = plane*_grid->_ostride[dimension]; // base offset for start of plane 
 | 
			
		||||
 | 
			
		||||
    } else {
 | 
			
		||||
 | 
			
		||||
      int so  = plane*_grid->_ostride[dimension]; // base offset for start of plane
 | 
			
		||||
      int o   = 0;                                      // relative offset to base within plane
 | 
			
		||||
      int bo  = 0;                                      // offset in buffer
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      for(int n=0;n<_grid->_slice_nblock[dimension];n++){
 | 
			
		||||
	for(int b=0;b<_grid->_slice_block[dimension];b++){
 | 
			
		||||
	  
 | 
			
		||||
 | 
			
		||||
	  int ocb=1<<_grid->CheckerBoardFromOindex(o+b);// Could easily be a table lookup
 | 
			
		||||
	  if ( ocb & cbmask ) {
 | 
			
		||||
	    int idx = point+(so+o+b)*this->_npoints;
 | 
			
		||||
@@ -998,16 +998,16 @@ public:
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  template<class compressor>
 | 
			
		||||
  int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx)
 | 
			
		||||
  {
 | 
			
		||||
    typedef typename cobj::vector_type vector_type;
 | 
			
		||||
    typedef typename cobj::scalar_type scalar_type;
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    assert(rhs.Grid()==_grid);
 | 
			
		||||
    //	  conformable(_grid,rhs.Grid());
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    int fd              = _grid->_fdimensions[dimension];
 | 
			
		||||
    int rd              = _grid->_rdimensions[dimension];
 | 
			
		||||
    int pd              = _grid->_processors[dimension];
 | 
			
		||||
@@ -1019,37 +1019,37 @@ public:
 | 
			
		||||
    assert(shift<fd);
 | 
			
		||||
 | 
			
		||||
    int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension];
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    int cb= (cbmask==0x2)? Odd : Even;
 | 
			
		||||
    int sshift= _grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    int shm_receive_only = 1;
 | 
			
		||||
    for(int x=0;x<rd;x++){       
 | 
			
		||||
      
 | 
			
		||||
    for(int x=0;x<rd;x++){
 | 
			
		||||
 | 
			
		||||
      int sx        = (x+sshift)%rd;
 | 
			
		||||
      int comm_proc = ((x+sshift)/rd)%pd;
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
      if (comm_proc) {
 | 
			
		||||
 | 
			
		||||
	int words = buffer_size;
 | 
			
		||||
	if (cbmask != 0x3) words=words>>1;
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	int bytes =  words * compress.CommDatumSize();
 | 
			
		||||
	
 | 
			
		||||
	int so  = sx*rhs.Grid()->_ostride[dimension]; // base offset for start of plane 
 | 
			
		||||
 | 
			
		||||
	int so  = sx*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
 | 
			
		||||
	if ( !face_table_computed ) {
 | 
			
		||||
	  face_table.resize(face_idx+1);
 | 
			
		||||
	  Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table[face_idx]);
 | 
			
		||||
	}
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	//      	int rank           = _grid->_processor;
 | 
			
		||||
	int recv_from_rank;
 | 
			
		||||
	int xmit_to_rank;
 | 
			
		||||
	_grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	assert (xmit_to_rank   != _grid->ThisRank());
 | 
			
		||||
	assert (recv_from_rank != _grid->ThisRank());
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	/////////////////////////////////////////////////////////
 | 
			
		||||
	// try the direct copy if possible
 | 
			
		||||
	/////////////////////////////////////////////////////////
 | 
			
		||||
@@ -1062,13 +1062,13 @@ public:
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	send_buf = (cobj *)_grid->ShmBufferTranslate(xmit_to_rank,recv_buf);
 | 
			
		||||
	if ( send_buf==NULL ) { 
 | 
			
		||||
	if ( send_buf==NULL ) {
 | 
			
		||||
	  send_buf = this->u_send_buf_p;
 | 
			
		||||
	} 
 | 
			
		||||
	
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Find out if we get the direct copy.
 | 
			
		||||
	void *success = (void *) _grid->ShmBufferTranslate(recv_from_rank,this->u_send_buf_p);
 | 
			
		||||
	if (success==NULL) { 
 | 
			
		||||
	if (success==NULL) {
 | 
			
		||||
	  // we found a packet that comes from MPI and contributes to this leg of stencil
 | 
			
		||||
	  shm_receive_only = 0;
 | 
			
		||||
	}
 | 
			
		||||
@@ -1077,9 +1077,9 @@ public:
 | 
			
		||||
	assert(send_buf!=NULL);
 | 
			
		||||
	Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so);  face_idx++;
 | 
			
		||||
	gathertime+=usecond();
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	if ( compress.DecompressionStep() ) {
 | 
			
		||||
	  
 | 
			
		||||
 | 
			
		||||
	  if ( shm_receive_only ) { // Early decompress before MPI is finished is possible
 | 
			
		||||
	    AddDecompress(&this->u_recv_buf_p[u_comm_offset],
 | 
			
		||||
			  &recv_buf[u_comm_offset],
 | 
			
		||||
@@ -1108,7 +1108,7 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
    return shm_receive_only;
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  template<class compressor>
 | 
			
		||||
  int  GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx)
 | 
			
		||||
  {
 | 
			
		||||
@@ -1136,7 +1136,7 @@ public:
 | 
			
		||||
    ///////////////////////////////////////////////
 | 
			
		||||
    int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension];
 | 
			
		||||
    //    int words = sizeof(cobj)/sizeof(vector_type);
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    assert(cbmask==0x3); // Fixme think there is a latent bug if not true
 | 
			
		||||
                         // This assert will trap it if ever hit. Not hit normally so far
 | 
			
		||||
    int reduced_buffer_size = buffer_size;
 | 
			
		||||
@@ -1152,22 +1152,22 @@ public:
 | 
			
		||||
    ///////////////////////////////////////////
 | 
			
		||||
    // Work out what to send where
 | 
			
		||||
    ///////////////////////////////////////////
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    int cb    = (cbmask==0x2)? Odd : Even;
 | 
			
		||||
    int sshift= _grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    // loop over outer coord planes orthog to dim
 | 
			
		||||
    int shm_receive_only = 1;
 | 
			
		||||
    for(int x=0;x<rd;x++){       
 | 
			
		||||
      
 | 
			
		||||
    for(int x=0;x<rd;x++){
 | 
			
		||||
 | 
			
		||||
      int any_offnode = ( ((x+sshift)%fd) >= rd );
 | 
			
		||||
 | 
			
		||||
      if ( any_offnode ) {
 | 
			
		||||
	
 | 
			
		||||
	for(int i=0;i<maxl;i++){       
 | 
			
		||||
 | 
			
		||||
	for(int i=0;i<maxl;i++){
 | 
			
		||||
	  spointers[i] = (cobj *) &u_simd_send_buf[i][u_comm_offset];
 | 
			
		||||
	}
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	int sx   = (x+sshift)%rd;
 | 
			
		||||
 | 
			
		||||
	if ( !face_table_computed ) {
 | 
			
		||||
@@ -1202,13 +1202,13 @@ public:
 | 
			
		||||
 | 
			
		||||
	    int recv_from_rank;
 | 
			
		||||
	    int xmit_to_rank;
 | 
			
		||||
	    
 | 
			
		||||
	    _grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank); 
 | 
			
		||||
 
 | 
			
		||||
 | 
			
		||||
	    _grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
 | 
			
		||||
 | 
			
		||||
	    // shm == receive pointer         if offnode
 | 
			
		||||
	    // shm == Translate[send pointer] if on node -- my view of his send pointer
 | 
			
		||||
	    cobj *shm = (cobj *) _grid->ShmBufferTranslate(recv_from_rank,sp);
 | 
			
		||||
	    if (shm==NULL) { 
 | 
			
		||||
	    if (shm==NULL) {
 | 
			
		||||
	      shm = rp;
 | 
			
		||||
	      // we found a packet that comes from MPI and contributes to this shift.
 | 
			
		||||
	      // is_same_node is only used in the WilsonStencil, and gets set for this point in the stencil.
 | 
			
		||||
@@ -1222,15 +1222,15 @@ public:
 | 
			
		||||
 | 
			
		||||
	    AddPacket((void *)sp,(void *)rp,xmit_to_rank,recv_from_rank,bytes);
 | 
			
		||||
 | 
			
		||||
	    
 | 
			
		||||
	  } else { 
 | 
			
		||||
	    
 | 
			
		||||
 | 
			
		||||
	  } else {
 | 
			
		||||
 | 
			
		||||
	    rpointers[i] = sp;
 | 
			
		||||
	    
 | 
			
		||||
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ( shm_receive_only ) { 
 | 
			
		||||
	if ( shm_receive_only ) {
 | 
			
		||||
	  AddMerge(&this->u_recv_buf_p[u_comm_offset],rpointers,reduced_buffer_size,permute_type,MergersSHM);
 | 
			
		||||
	} else {
 | 
			
		||||
	  AddMerge(&this->u_recv_buf_p[u_comm_offset],rpointers,reduced_buffer_size,permute_type,Mergers);
 | 
			
		||||
@@ -1265,9 +1265,9 @@ public:
 | 
			
		||||
    shm_bytes = 0.;
 | 
			
		||||
    calls = 0.;
 | 
			
		||||
  };
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  void Report(void) {
 | 
			
		||||
#define AVERAGE(A) 
 | 
			
		||||
#define AVERAGE(A)
 | 
			
		||||
#define PRINTIT(A) AVERAGE(A); std::cout << GridLogMessage << " Stencil " << #A << " "<< A/calls<<std::endl;
 | 
			
		||||
    RealD NP = _grid->_Nprocessors;
 | 
			
		||||
    RealD NN = _grid->NodeCount();
 | 
			
		||||
@@ -1284,7 +1284,7 @@ public:
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    if (threaded) commtime += t;
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    _grid->GlobalSum(commtime);    commtime/=NP;
 | 
			
		||||
    if ( calls > 0. ) {
 | 
			
		||||
      std::cout << GridLogMessage << " Stencil calls "<<calls<<std::endl;
 | 
			
		||||
@@ -1307,8 +1307,8 @@ public:
 | 
			
		||||
	std::cout << GridLogMessage << " Stencil SHM " << (shm_bytes)/gatheralltime/1000.*NP/NN << " GB/s per node"<<std::endl;
 | 
			
		||||
 | 
			
		||||
	auto all_bytes = comms_bytes+shm_bytes;
 | 
			
		||||
	std::cout << GridLogMessage << " Stencil SHM all" << (all_bytes)/gatheralltime/1000. << " GB/s per rank"<<std::endl;
 | 
			
		||||
	std::cout << GridLogMessage << " Stencil SHM all" << (all_bytes)/gatheralltime/1000.*NP/NN << " GB/s per node"<<std::endl;
 | 
			
		||||
	std::cout << GridLogMessage << " Stencil SHM all " << (all_bytes)/gatheralltime/1000. << " GB/s per rank"<<std::endl;
 | 
			
		||||
	std::cout << GridLogMessage << " Stencil SHM all " << (all_bytes)/gatheralltime/1000.*NP/NN << " GB/s per node"<<std::endl;
 | 
			
		||||
 | 
			
		||||
	auto membytes = (shm_bytes + comms_bytes/2) // read/write
 | 
			
		||||
	              + (shm_bytes+comms_bytes)/2 * sizeof(vobj)/sizeof(cobj);
 | 
			
		||||
@@ -1326,7 +1326,7 @@ public:
 | 
			
		||||
#undef PRINTIT
 | 
			
		||||
#undef AVERAGE
 | 
			
		||||
  };
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -272,7 +272,7 @@ public:
 | 
			
		||||
  static auto traceIndex(const iVector<vtype,N> arg) ->  iScalar<RemoveCRV(arg._internal[0])>
 | 
			
		||||
  {
 | 
			
		||||
    iScalar<RemoveCRV(arg._internal[0])> ret;
 | 
			
		||||
    ret._internal=Zero();
 | 
			
		||||
    zeroit(ret);
 | 
			
		||||
    for(int i=0;i<N;i++){
 | 
			
		||||
      ret._internal = ret._internal+ arg._internal[i];
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -190,6 +190,36 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
    typedef ComplexD DoublePrecision;
 | 
			
		||||
    typedef ComplexD DoublePrecision2;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
#ifdef GRID_CUDA
 | 
			
		||||
  template<> struct GridTypeMapper<std::complex<float> > : public GridTypeMapper_Base {
 | 
			
		||||
    typedef std::complex<float> scalar_type;
 | 
			
		||||
    typedef std::complex<double> scalar_typeD;
 | 
			
		||||
    typedef scalar_type vector_type;
 | 
			
		||||
    typedef scalar_typeD vector_typeD;
 | 
			
		||||
    typedef scalar_type tensor_reduced;
 | 
			
		||||
    typedef scalar_type scalar_object;
 | 
			
		||||
    typedef scalar_typeD scalar_objectD;
 | 
			
		||||
    typedef scalar_type Complexified;
 | 
			
		||||
    typedef RealF Realified;
 | 
			
		||||
    typedef scalar_typeD DoublePrecision;
 | 
			
		||||
    typedef scalar_typeD DoublePrecision2;
 | 
			
		||||
  };
 | 
			
		||||
  template<> struct GridTypeMapper<std::complex<double> > : public GridTypeMapper_Base {
 | 
			
		||||
    typedef std::complex<double> scalar_type;
 | 
			
		||||
    typedef std::complex<double> scalar_typeD;
 | 
			
		||||
    typedef scalar_type vector_type;
 | 
			
		||||
    typedef scalar_typeD vector_typeD;
 | 
			
		||||
    typedef scalar_type tensor_reduced;
 | 
			
		||||
    typedef scalar_type scalar_object;
 | 
			
		||||
    typedef scalar_typeD scalar_objectD;
 | 
			
		||||
    typedef scalar_type Complexified;
 | 
			
		||||
    typedef RealD Realified;
 | 
			
		||||
    typedef scalar_typeD DoublePrecision;
 | 
			
		||||
    typedef scalar_typeD DoublePrecision2;
 | 
			
		||||
  };
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  template<> struct GridTypeMapper<ComplexD2> : public GridTypeMapper_Base {
 | 
			
		||||
    typedef ComplexD2 scalar_type;
 | 
			
		||||
    typedef ComplexD2 scalar_typeD;
 | 
			
		||||
 
 | 
			
		||||
@@ -16,40 +16,54 @@ void acceleratorInit(void)
 | 
			
		||||
  char * localRankStr = NULL;
 | 
			
		||||
  int rank = 0, world_rank=0; 
 | 
			
		||||
#define ENV_LOCAL_RANK_OMPI    "OMPI_COMM_WORLD_LOCAL_RANK"
 | 
			
		||||
#define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK"
 | 
			
		||||
#define ENV_RANK_OMPI          "OMPI_COMM_WORLD_RANK"
 | 
			
		||||
#define ENV_LOCAL_RANK_SLURM   "SLURM_LOCALID"
 | 
			
		||||
#define ENV_RANK_SLURM         "SLURM_PROCID"
 | 
			
		||||
#define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK"
 | 
			
		||||
#define ENV_RANK_MVAPICH       "MV2_COMM_WORLD_RANK"
 | 
			
		||||
  // We extract the local rank initialization using an environment variable
 | 
			
		||||
  if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL)
 | 
			
		||||
  {
 | 
			
		||||
  if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL) {
 | 
			
		||||
    printf("OPENMPI detected\n");
 | 
			
		||||
    rank = atoi(localRankStr);		
 | 
			
		||||
  }
 | 
			
		||||
  if ((localRankStr = getenv(ENV_LOCAL_RANK_MVAPICH)) != NULL)
 | 
			
		||||
  {
 | 
			
		||||
  } else if ((localRankStr = getenv(ENV_LOCAL_RANK_MVAPICH)) != NULL) {
 | 
			
		||||
    printf("MVAPICH detected\n");
 | 
			
		||||
    rank = atoi(localRankStr);		
 | 
			
		||||
  } else if ((localRankStr = getenv(ENV_LOCAL_RANK_SLURM)) != NULL) {
 | 
			
		||||
    printf("SLURM detected\n");
 | 
			
		||||
    rank = atoi(localRankStr);		
 | 
			
		||||
  } else { 
 | 
			
		||||
    printf("MPI version is unknown - bad things may happen\n");
 | 
			
		||||
  }
 | 
			
		||||
  if ((localRankStr = getenv(ENV_RANK_OMPI   )) != NULL) { world_rank = atoi(localRankStr);}
 | 
			
		||||
  if ((localRankStr = getenv(ENV_RANK_MVAPICH)) != NULL) { world_rank = atoi(localRankStr);}
 | 
			
		||||
  if ((localRankStr = getenv(ENV_RANK_SLURM  )) != NULL) { world_rank = atoi(localRankStr);}
 | 
			
		||||
 | 
			
		||||
  size_t totalDeviceMem=0;
 | 
			
		||||
  for (int i = 0; i < nDevices; i++) {
 | 
			
		||||
 | 
			
		||||
#define GPU_PROP_FMT(canMapHostMemory,FMT)     printf("AcceleratorCudaInit:   " #canMapHostMemory ": " FMT" \n",prop.canMapHostMemory);
 | 
			
		||||
#define GPU_PROP_FMT(canMapHostMemory,FMT)     printf("AcceleratorCudaInit[%d]:   " #canMapHostMemory ": " FMT" \n",rank,prop.canMapHostMemory);
 | 
			
		||||
#define GPU_PROP(canMapHostMemory)             GPU_PROP_FMT(canMapHostMemory,"%d");
 | 
			
		||||
    cudaGetDeviceProperties(&gpu_props[i], i);
 | 
			
		||||
    cudaDeviceProp prop; 
 | 
			
		||||
    prop = gpu_props[i];
 | 
			
		||||
    totalDeviceMem = prop.totalGlobalMem;
 | 
			
		||||
    if ( world_rank == 0) {
 | 
			
		||||
      printf("AcceleratorCudaInit: ========================\n");
 | 
			
		||||
      printf("AcceleratorCudaInit: Device Number    : %d\n", i);
 | 
			
		||||
      printf("AcceleratorCudaInit: ========================\n");
 | 
			
		||||
      printf("AcceleratorCudaInit: Device identifier: %s\n", prop.name);
 | 
			
		||||
#ifndef GRID_IBM_SUMMIT
 | 
			
		||||
      if ( i==rank ) {
 | 
			
		||||
	printf("AcceleratorCudaInit[%d]: ========================\n",rank);
 | 
			
		||||
	printf("AcceleratorCudaInit[%d]: Device Number    : %d\n", rank,i);
 | 
			
		||||
	printf("AcceleratorCudaInit[%d]: ========================\n",rank);
 | 
			
		||||
	printf("AcceleratorCudaInit[%d]: Device identifier: %s\n",rank, prop.name);
 | 
			
		||||
 | 
			
		||||
      GPU_PROP_FMT(totalGlobalMem,"%lld");
 | 
			
		||||
      GPU_PROP(managedMemory);
 | 
			
		||||
      GPU_PROP(isMultiGpuBoard);
 | 
			
		||||
      GPU_PROP(warpSize);
 | 
			
		||||
 | 
			
		||||
	GPU_PROP_FMT(totalGlobalMem,"%lld");
 | 
			
		||||
	GPU_PROP(managedMemory);
 | 
			
		||||
	GPU_PROP(isMultiGpuBoard);
 | 
			
		||||
	GPU_PROP(warpSize);
 | 
			
		||||
	GPU_PROP(pciBusID);
 | 
			
		||||
	GPU_PROP(pciDeviceID);
 | 
			
		||||
      }
 | 
			
		||||
#endif
 | 
			
		||||
      //      GPU_PROP(unifiedAddressing);
 | 
			
		||||
      //      GPU_PROP(l2CacheSize);
 | 
			
		||||
      //      GPU_PROP(singleToDoublePrecisionPerfRatio);
 | 
			
		||||
@@ -61,9 +75,9 @@ void acceleratorInit(void)
 | 
			
		||||
 | 
			
		||||
#ifdef GRID_IBM_SUMMIT
 | 
			
		||||
  // IBM Jsrun makes cuda Device numbering screwy and not match rank
 | 
			
		||||
  if ( world_rank == 0 )  printf("AcceleratorCudaInit: IBM Summit or similar - NOT setting device to node rank\n");
 | 
			
		||||
  if ( world_rank == 0 )  printf("AcceleratorCudaInit: IBM Summit or similar - use default device\n");
 | 
			
		||||
#else
 | 
			
		||||
  if ( world_rank == 0 )  printf("AcceleratorCudaInit: setting device to node rank\n");
 | 
			
		||||
  printf("AcceleratorCudaInit: rank %d setting device to node rank %d\n",world_rank,rank);
 | 
			
		||||
  cudaSetDevice(rank);
 | 
			
		||||
#endif
 | 
			
		||||
  if ( world_rank == 0 )  printf("AcceleratorCudaInit: ================================================\n");
 | 
			
		||||
@@ -96,20 +110,24 @@ void acceleratorInit(void)
 | 
			
		||||
  if ((localRankStr = getenv(ENV_RANK_OMPI   )) != NULL) { world_rank = atoi(localRankStr);}
 | 
			
		||||
  if ((localRankStr = getenv(ENV_RANK_MVAPICH)) != NULL) { world_rank = atoi(localRankStr);}
 | 
			
		||||
 | 
			
		||||
  printf("world_rank %d has %d devices\n",world_rank,nDevices);
 | 
			
		||||
  size_t totalDeviceMem=0;
 | 
			
		||||
  for (int i = 0; i < nDevices; i++) {
 | 
			
		||||
 | 
			
		||||
#define GPU_PROP_FMT(canMapHostMemory,FMT)     printf("AcceleratorHipInit:   " #canMapHostMemory ": " FMT" \n",prop.canMapHostMemory);
 | 
			
		||||
#define GPU_PROP(canMapHostMemory)             GPU_PROP_FMT(canMapHostMemory,"%d");
 | 
			
		||||
    
 | 
			
		||||
    hipGetDeviceProperties(&gpu_props[i], i);
 | 
			
		||||
    hipDeviceProp_t prop; 
 | 
			
		||||
    prop = gpu_props[i];
 | 
			
		||||
    totalDeviceMem = prop.totalGlobalMem;
 | 
			
		||||
    if ( world_rank == 0) {
 | 
			
		||||
      hipDeviceProp_t prop; 
 | 
			
		||||
      prop = gpu_props[i];
 | 
			
		||||
      printf("AcceleratorHipInit: ========================\n");
 | 
			
		||||
      printf("AcceleratorHipInit: Device Number    : %d\n", i);
 | 
			
		||||
      printf("AcceleratorHipInit: ========================\n");
 | 
			
		||||
      printf("AcceleratorHipInit: Device identifier: %s\n", prop.name);
 | 
			
		||||
 | 
			
		||||
      GPU_PROP_FMT(totalGlobalMem,"%lu");
 | 
			
		||||
      //      GPU_PROP(managedMemory);
 | 
			
		||||
      GPU_PROP(isMultiGpuBoard);
 | 
			
		||||
      GPU_PROP(warpSize);
 | 
			
		||||
@@ -118,6 +136,7 @@ void acceleratorInit(void)
 | 
			
		||||
      //      GPU_PROP(singleToDoublePrecisionPerfRatio);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  MemoryManager::DeviceMaxBytes = (8*totalDeviceMem)/10; // Assume 80% ours
 | 
			
		||||
#undef GPU_PROP_FMT    
 | 
			
		||||
#undef GPU_PROP
 | 
			
		||||
#ifdef GRID_IBM_SUMMIT
 | 
			
		||||
 
 | 
			
		||||
@@ -70,6 +70,7 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
//
 | 
			
		||||
// Memory management:
 | 
			
		||||
//
 | 
			
		||||
//    int   acceleratorIsCommunicable(void *pointer);
 | 
			
		||||
//    void *acceleratorAllocShared(size_t bytes);
 | 
			
		||||
//    void acceleratorFreeShared(void *ptr);
 | 
			
		||||
//
 | 
			
		||||
@@ -90,6 +91,7 @@ void     acceleratorInit(void);
 | 
			
		||||
//////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
#ifdef GRID_CUDA
 | 
			
		||||
#include <cuda.h>
 | 
			
		||||
 | 
			
		||||
#ifdef __CUDA_ARCH__
 | 
			
		||||
#define GRID_SIMT
 | 
			
		||||
@@ -165,6 +167,16 @@ inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);};
 | 
			
		||||
inline void acceleratorFreeDevice(void *ptr){ cudaFree(ptr);};
 | 
			
		||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes)  { cudaMemcpy(to,from,bytes, cudaMemcpyHostToDevice);}
 | 
			
		||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);}
 | 
			
		||||
inline int  acceleratorIsCommunicable(void *ptr)
 | 
			
		||||
{
 | 
			
		||||
  int uvm;
 | 
			
		||||
  auto 
 | 
			
		||||
  cuerr = cuPointerGetAttribute( &uvm, CU_POINTER_ATTRIBUTE_IS_MANAGED, (CUdeviceptr) ptr);
 | 
			
		||||
  assert(cuerr == cudaSuccess );
 | 
			
		||||
  if(uvm) return 0;
 | 
			
		||||
  else    return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////
 | 
			
		||||
@@ -219,6 +231,15 @@ inline void acceleratorFreeShared(void *ptr){free(ptr,*theGridAccelerator);};
 | 
			
		||||
inline void acceleratorFreeDevice(void *ptr){free(ptr,*theGridAccelerator);};
 | 
			
		||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes)  { theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
 | 
			
		||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
 | 
			
		||||
inline int  acceleratorIsCommunicable(void *ptr)
 | 
			
		||||
{
 | 
			
		||||
#if 0
 | 
			
		||||
  auto uvm = cl::sycl::usm::get_pointer_type(ptr, theGridAccelerator->get_context());
 | 
			
		||||
  if ( uvm = cl::sycl::usm::alloc::shared ) return 1;
 | 
			
		||||
  else return 0;
 | 
			
		||||
#endif
 | 
			
		||||
  return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
@@ -286,18 +307,15 @@ void LambdaApply(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
 | 
			
		||||
 | 
			
		||||
inline void *acceleratorAllocShared(size_t bytes)
 | 
			
		||||
{
 | 
			
		||||
#if 0
 | 
			
		||||
  void *ptr=NULL;
 | 
			
		||||
  auto err = hipMallocManaged((void **)&ptr,bytes);
 | 
			
		||||
  if( err != hipSuccess ) {
 | 
			
		||||
    ptr = (void *) NULL;
 | 
			
		||||
    printf(" hipMallocManaged failed for %d %s \n",bytes,hipGetErrorString(err));
 | 
			
		||||
    printf(" hipMallocManaged failed for %ld %s \n",bytes,hipGetErrorString(err));
 | 
			
		||||
  }
 | 
			
		||||
  return ptr;
 | 
			
		||||
#else
 | 
			
		||||
  return malloc(bytes);
 | 
			
		||||
#endif
 | 
			
		||||
};
 | 
			
		||||
inline int  acceleratorIsCommunicable(void *ptr){ return 1; }
 | 
			
		||||
 | 
			
		||||
inline void *acceleratorAllocDevice(size_t bytes)
 | 
			
		||||
{
 | 
			
		||||
@@ -305,7 +323,7 @@ inline void *acceleratorAllocDevice(size_t bytes)
 | 
			
		||||
  auto err = hipMalloc((void **)&ptr,bytes);
 | 
			
		||||
  if( err != hipSuccess ) {
 | 
			
		||||
    ptr = (void *) NULL;
 | 
			
		||||
    printf(" hipMalloc failed for %d %s \n",bytes,hipGetErrorString(err));
 | 
			
		||||
    printf(" hipMalloc failed for %ld %s \n",bytes,hipGetErrorString(err));
 | 
			
		||||
  }
 | 
			
		||||
  return ptr;
 | 
			
		||||
};
 | 
			
		||||
@@ -352,6 +370,7 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) { return 0; } // CUDA spec
 | 
			
		||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes)  { memcpy(to,from,bytes);}
 | 
			
		||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ memcpy(to,from,bytes);}
 | 
			
		||||
 | 
			
		||||
inline int  acceleratorIsCommunicable(void *ptr){ return 1; }
 | 
			
		||||
#ifdef HAVE_MM_MALLOC_H
 | 
			
		||||
inline void *acceleratorAllocShared(size_t bytes){return _mm_malloc(bytes,GRID_ALLOC_ALIGN);};
 | 
			
		||||
inline void *acceleratorAllocDevice(size_t bytes){return _mm_malloc(bytes,GRID_ALLOC_ALIGN);};
 | 
			
		||||
 
 | 
			
		||||
@@ -99,10 +99,10 @@ inline std::ostream & operator<<(std::ostream &os, const AcceleratorVector<T,_nd
 | 
			
		||||
{
 | 
			
		||||
  os << "[";
 | 
			
		||||
  for(int s=0;s<v.size();s++) {
 | 
			
		||||
    os << v[s] << " ";
 | 
			
		||||
  }
 | 
			
		||||
  if (v.size() > 0) {
 | 
			
		||||
    os << "\b";
 | 
			
		||||
    os << v[s];
 | 
			
		||||
    if( s < (v.size()-1) ){
 | 
			
		||||
      os << " ";
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  os << "]";
 | 
			
		||||
  return os;
 | 
			
		||||
 
 | 
			
		||||
@@ -318,6 +318,13 @@ void Grid_init(int *argc,char ***argv)
 | 
			
		||||
    Grid_debug_handler_init();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#if defined(A64FX)
 | 
			
		||||
  if( GridCmdOptionExists(*argv,*argv+*argc,"--comms-overlap") ){
 | 
			
		||||
    std::cout << "Option --comms-overlap currently not supported on QPACE4. Exiting." << std::endl;
 | 
			
		||||
    exit(EXIT_FAILURE);
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////
 | 
			
		||||
  // Memory manager
 | 
			
		||||
  //////////////////////////////////////////////////////////
 | 
			
		||||
@@ -370,9 +377,7 @@ void Grid_init(int *argc,char ***argv)
 | 
			
		||||
    std::cout << GridLogMessage << "Mapped stencil comms buffers as MAP_HUGETLB "<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#ifndef GRID_UVM
 | 
			
		||||
  std::cout << GridLogMessage << "MemoryManager Cache "<< MemoryManager::DeviceMaxBytes <<" bytes "<<std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
  MemoryManager::InitMessage();
 | 
			
		||||
 | 
			
		||||
  if( GridCmdOptionExists(*argv,*argv+*argc,"--debug-mem") ){
 | 
			
		||||
    MemoryProfiler::debug = true;
 | 
			
		||||
@@ -467,7 +472,7 @@ void Grid_init(int *argc,char ***argv)
 | 
			
		||||
  if( GridCmdOptionExists(*argv,*argv+*argc,"--lebesgue") ){
 | 
			
		||||
    LebesgueOrder::UseLebesgueOrder=1;
 | 
			
		||||
  }
 | 
			
		||||
  CartesianCommunicator::nCommThreads = -1;
 | 
			
		||||
  CartesianCommunicator::nCommThreads = 1;
 | 
			
		||||
  if( GridCmdOptionExists(*argv,*argv+*argc,"--comms-threads") ){
 | 
			
		||||
    arg= GridCmdOptionPayload(*argv,*argv+*argc,"--comms-threads");
 | 
			
		||||
    GridCmdOptionInt(arg,CartesianCommunicator::nCommThreads);
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										33
									
								
								README
									
									
									
									
									
								
							
							
						
						
									
										33
									
								
								README
									
									
									
									
									
								
							@@ -111,11 +111,10 @@ Now you can execute the `configure` script to generate makefiles (here from a bu
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
mkdir build; cd build
 | 
			
		||||
../configure --enable-precision=double --enable-simd=AVX --enable-comms=mpi-auto --prefix=<path>
 | 
			
		||||
../configure --enable-simd=AVX --enable-comms=mpi-auto --prefix=<path>
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
where `--enable-precision=` set the default precision,
 | 
			
		||||
`--enable-simd=` set the SIMD type, `--enable-
 | 
			
		||||
where `--enable-simd=` set the SIMD type, `--enable-
 | 
			
		||||
comms=`, and `<path>` should be replaced by the prefix path where you want to
 | 
			
		||||
install Grid. Other options are detailed in the next section, you can also use `configure
 | 
			
		||||
--help` to display them. Like with any other program using GNU autotool, the
 | 
			
		||||
@@ -146,8 +145,8 @@ If you want to build all the tests at once just use `make tests`.
 | 
			
		||||
- `--enable-numa`: enable NUMA first touch optimisation
 | 
			
		||||
- `--enable-simd=<code>`: setup Grid for the SIMD target `<code>` (default: `GEN`). A list of possible SIMD targets is detailed in a section below.
 | 
			
		||||
- `--enable-gen-simd-width=<size>`: select the size (in bytes) of the generic SIMD vector type (default: 32 bytes).
 | 
			
		||||
- `--enable-precision={single|double}`: set the default precision (default: `double`).
 | 
			
		||||
- `--enable-precision=<comm>`: Use `<comm>` for message passing (default: `none`). A list of possible SIMD targets is detailed in a section below.
 | 
			
		||||
- `--enable-precision={single|double}`: set the default precision (default: `double`). **Deprecated option**
 | 
			
		||||
- `--enable-comms=<comm>`: Use `<comm>` for message passing (default: `none`). A list of possible SIMD targets is detailed in a section below.
 | 
			
		||||
- `--enable-rng={sitmo|ranlux48|mt19937}`: choose the RNG (default: `sitmo `).
 | 
			
		||||
- `--disable-timers`: disable system dependent high-resolution timers.
 | 
			
		||||
- `--enable-chroma`: enable Chroma regression tests.
 | 
			
		||||
@@ -201,8 +200,7 @@ Alternatively, some CPU codenames can be directly used:
 | 
			
		||||
The following configuration is recommended for the Intel Knights Landing platform:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=KNL        \
 | 
			
		||||
../configure --enable-simd=KNL        \
 | 
			
		||||
             --enable-comms=mpi-auto  \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=icpc MPICXX=mpiicpc
 | 
			
		||||
@@ -212,8 +210,7 @@ The MKL flag enables use of BLAS and FFTW from the Intel Math Kernels Library.
 | 
			
		||||
If you are working on a Cray machine that does not use the `mpiicpc` wrapper, please use:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=KNL        \
 | 
			
		||||
../configure --enable-simd=KNL        \
 | 
			
		||||
             --enable-comms=mpi       \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=CC CC=cc
 | 
			
		||||
@@ -232,8 +229,7 @@ for interior communication. This is the mpi3 communications implementation.
 | 
			
		||||
We recommend four ranks per node for best performance, but optimum is local volume dependent.
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=KNL        \
 | 
			
		||||
../configure --enable-simd=KNL        \
 | 
			
		||||
             --enable-comms=mpi3-auto \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CC=icpc MPICXX=mpiicpc 
 | 
			
		||||
@@ -244,8 +240,7 @@ We recommend four ranks per node for best performance, but optimum is local volu
 | 
			
		||||
The following configuration is recommended for the Intel Haswell platform:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=AVX2       \
 | 
			
		||||
../configure --enable-simd=AVX2       \
 | 
			
		||||
             --enable-comms=mpi3-auto \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=icpc MPICXX=mpiicpc
 | 
			
		||||
@@ -262,8 +257,7 @@ where `<path>` is the UNIX prefix where GMP and MPFR are installed.
 | 
			
		||||
If you are working on a Cray machine that does not use the `mpiicpc` wrapper, please use:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=AVX2       \
 | 
			
		||||
../configure --enable-simd=AVX2       \
 | 
			
		||||
             --enable-comms=mpi3      \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=CC CC=cc
 | 
			
		||||
@@ -280,8 +274,7 @@ This is the default.
 | 
			
		||||
The following configuration is recommended for the Intel Skylake platform:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=AVX512     \
 | 
			
		||||
../configure --enable-simd=AVX512     \
 | 
			
		||||
             --enable-comms=mpi3      \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=mpiicpc
 | 
			
		||||
@@ -298,8 +291,7 @@ where `<path>` is the UNIX prefix where GMP and MPFR are installed.
 | 
			
		||||
If you are working on a Cray machine that does not use the `mpiicpc` wrapper, please use:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=AVX512     \
 | 
			
		||||
../configure --enable-simd=AVX512     \
 | 
			
		||||
             --enable-comms=mpi3      \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=CC CC=cc
 | 
			
		||||
@@ -330,8 +322,7 @@ and 8 threads per rank.
 | 
			
		||||
The following configuration is recommended for the AMD EPYC platform.
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=AVX2       \
 | 
			
		||||
../configure --enable-simd=AVX2       \
 | 
			
		||||
             --enable-comms=mpi3 \
 | 
			
		||||
             CXX=mpicxx 
 | 
			
		||||
```
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										33
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										33
									
								
								README.md
									
									
									
									
									
								
							@@ -115,11 +115,10 @@ Now you can execute the `configure` script to generate makefiles (here from a bu
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
mkdir build; cd build
 | 
			
		||||
../configure --enable-precision=double --enable-simd=AVX --enable-comms=mpi-auto --prefix=<path>
 | 
			
		||||
../configure --enable-simd=AVX --enable-comms=mpi-auto --prefix=<path>
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
where `--enable-precision=` set the default precision,
 | 
			
		||||
`--enable-simd=` set the SIMD type, `--enable-
 | 
			
		||||
where `--enable-simd=` set the SIMD type, `--enable-
 | 
			
		||||
comms=`, and `<path>` should be replaced by the prefix path where you want to
 | 
			
		||||
install Grid. Other options are detailed in the next section, you can also use `configure
 | 
			
		||||
--help` to display them. Like with any other program using GNU autotool, the
 | 
			
		||||
@@ -150,8 +149,8 @@ If you want to build all the tests at once just use `make tests`.
 | 
			
		||||
- `--enable-numa`: enable NUMA first touch optimisation
 | 
			
		||||
- `--enable-simd=<code>`: setup Grid for the SIMD target `<code>` (default: `GEN`). A list of possible SIMD targets is detailed in a section below.
 | 
			
		||||
- `--enable-gen-simd-width=<size>`: select the size (in bytes) of the generic SIMD vector type (default: 32 bytes).
 | 
			
		||||
- `--enable-precision={single|double}`: set the default precision (default: `double`).
 | 
			
		||||
- `--enable-precision=<comm>`: Use `<comm>` for message passing (default: `none`). A list of possible SIMD targets is detailed in a section below.
 | 
			
		||||
- `--enable-precision={single|double}`: set the default precision (default: `double`). **Deprecated option**
 | 
			
		||||
- `--enable-comms=<comm>`: Use `<comm>` for message passing (default: `none`). A list of possible SIMD targets is detailed in a section below.
 | 
			
		||||
- `--enable-rng={sitmo|ranlux48|mt19937}`: choose the RNG (default: `sitmo `).
 | 
			
		||||
- `--disable-timers`: disable system dependent high-resolution timers.
 | 
			
		||||
- `--enable-chroma`: enable Chroma regression tests.
 | 
			
		||||
@@ -205,8 +204,7 @@ Alternatively, some CPU codenames can be directly used:
 | 
			
		||||
The following configuration is recommended for the Intel Knights Landing platform:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=KNL        \
 | 
			
		||||
../configure --enable-simd=KNL        \
 | 
			
		||||
             --enable-comms=mpi-auto  \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=icpc MPICXX=mpiicpc
 | 
			
		||||
@@ -216,8 +214,7 @@ The MKL flag enables use of BLAS and FFTW from the Intel Math Kernels Library.
 | 
			
		||||
If you are working on a Cray machine that does not use the `mpiicpc` wrapper, please use:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=KNL        \
 | 
			
		||||
../configure --enable-simd=KNL        \
 | 
			
		||||
             --enable-comms=mpi       \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=CC CC=cc
 | 
			
		||||
@@ -236,8 +233,7 @@ for interior communication. This is the mpi3 communications implementation.
 | 
			
		||||
We recommend four ranks per node for best performance, but optimum is local volume dependent.
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=KNL        \
 | 
			
		||||
../configure --enable-simd=KNL        \
 | 
			
		||||
             --enable-comms=mpi3-auto \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CC=icpc MPICXX=mpiicpc 
 | 
			
		||||
@@ -248,8 +244,7 @@ We recommend four ranks per node for best performance, but optimum is local volu
 | 
			
		||||
The following configuration is recommended for the Intel Haswell platform:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=AVX2       \
 | 
			
		||||
../configure --enable-simd=AVX2       \
 | 
			
		||||
             --enable-comms=mpi3-auto \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=icpc MPICXX=mpiicpc
 | 
			
		||||
@@ -266,8 +261,7 @@ where `<path>` is the UNIX prefix where GMP and MPFR are installed.
 | 
			
		||||
If you are working on a Cray machine that does not use the `mpiicpc` wrapper, please use:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=AVX2       \
 | 
			
		||||
../configure --enable-simd=AVX2       \
 | 
			
		||||
             --enable-comms=mpi3      \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=CC CC=cc
 | 
			
		||||
@@ -284,8 +278,7 @@ This is the default.
 | 
			
		||||
The following configuration is recommended for the Intel Skylake platform:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=AVX512     \
 | 
			
		||||
../configure --enable-simd=AVX512     \
 | 
			
		||||
             --enable-comms=mpi3      \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=mpiicpc
 | 
			
		||||
@@ -302,8 +295,7 @@ where `<path>` is the UNIX prefix where GMP and MPFR are installed.
 | 
			
		||||
If you are working on a Cray machine that does not use the `mpiicpc` wrapper, please use:
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=AVX512     \
 | 
			
		||||
../configure --enable-simd=AVX512     \
 | 
			
		||||
             --enable-comms=mpi3      \
 | 
			
		||||
             --enable-mkl             \
 | 
			
		||||
             CXX=CC CC=cc
 | 
			
		||||
@@ -334,8 +326,7 @@ and 8 threads per rank.
 | 
			
		||||
The following configuration is recommended for the AMD EPYC platform.
 | 
			
		||||
 | 
			
		||||
``` bash
 | 
			
		||||
../configure --enable-precision=double\
 | 
			
		||||
             --enable-simd=AVX2       \
 | 
			
		||||
../configure --enable-simd=AVX2       \
 | 
			
		||||
             --enable-comms=mpi3 \
 | 
			
		||||
             CXX=mpicxx 
 | 
			
		||||
```
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										89
									
								
								SVE_README.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										89
									
								
								SVE_README.txt
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,89 @@
 | 
			
		||||
* gcc 10.1 prebuild, QPACE4 interactive login w/ MPI
 | 
			
		||||
 | 
			
		||||
scl enable gcc-toolset-10 bash
 | 
			
		||||
module load mpi/openmpi-aarch64
 | 
			
		||||
 | 
			
		||||
../configure --enable-simd=A64FX --enable-comms=mpi3 --enable-shm=shmget CXX=mpicxx CC=mpicc
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
================================== deprecated ================================================
 | 
			
		||||
 | 
			
		||||
* gcc 10.1 prebuild, QPACE4 interactive login
 | 
			
		||||
 | 
			
		||||
scl enable gcc-toolset-10 bash
 | 
			
		||||
 | 
			
		||||
../configure --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=none --enable-openmp CXX=g++ CC=gcc CXXFLAGS="-std=c++11 -march=armv8-a+sve -msve-vector-bits=512 -fno-gcse -DA64FXFIXEDSIZE -DA64FXASM -DDSLASHINTRIN"
 | 
			
		||||
 | 
			
		||||
* gcc 10.1 prebuild w/ MPI, QPACE4 interactive login
 | 
			
		||||
 | 
			
		||||
scl enable gcc-toolset-10 bash
 | 
			
		||||
module load mpi/openmpi-aarch64
 | 
			
		||||
 | 
			
		||||
../configure --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=mpi-auto --enable-shm=shmget --enable-openmp CXX=mpicxx CC=mpicc CXXFLAGS="-std=c++11 -march=armv8-a+sve -msve-vector-bits=512 -fno-gcse -DA64FXFIXEDSIZE -DA64FXASM -DDSLASHINTRIN"
 | 
			
		||||
 | 
			
		||||
------------------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
* armclang 20.2 (qp4)
 | 
			
		||||
 | 
			
		||||
../configure --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=none --enable-openmp CXX=armclang++ CC=armclang CXXFLAGS="-std=c++11 -mcpu=a64fx -DA64FX -DARMCLANGCOMPAT -DA64FXASM -DDSLASHINTRIN"
 | 
			
		||||
 | 
			
		||||
------------------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
* gcc 10.0.1 VLA (merlin)
 | 
			
		||||
 | 
			
		||||
../configure --with-lime=/home/men04359/lime/c-lime --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=none --enable-openmp CXX=g++-10.0.1 CC=gcc-10.0.1 CXXFLAGS="-std=c++11 -march=armv8-a+sve -msve-vector-bits=512 -fno-gcse -DA64FX -DA64FXASM -DDSLASHINTRIN" LDFLAGS=-static GRID_LDFLAGS=-static MPI_CXXLDFLAGS=-static
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* gcc 10.0.1 fixed-size ACLE (merlin)
 | 
			
		||||
 | 
			
		||||
../configure --with-lime=/home/men04359/lime/c-lime --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=none --enable-openmp CXX=g++-10.0.1 CC=gcc-10.0.1 CXXFLAGS="-std=c++11 -march=armv8-a+sve -msve-vector-bits=512 -fno-gcse -DA64FXFIXEDSIZE -DA64FXASM -DDSLASHINTRIN"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* gcc 10.0.1 fixed-size ACLE (fjt) w/ MPI
 | 
			
		||||
 | 
			
		||||
export OMPI_CC=gcc-10.0.1
 | 
			
		||||
export OMPI_CXX=g++-10.0.1
 | 
			
		||||
export MPICH_CC=gcc-10.0.1
 | 
			
		||||
export MPICH_CXX=g++-10.0.1
 | 
			
		||||
 | 
			
		||||
$ ../configure --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=mpi3 --enable-openmp CXX=mpiFCC CC=mpifcc CXXFLAGS="-std=c++11 -march=armv8-a+sve -msve-vector-bits=512 -fno-gcse -DA64FXFIXEDSIZE -DA64FXASM -DDSLASHINTRIN -DTOFU -I/opt/FJSVxtclanga/tcsds-1.2.25/include/mpi/fujitsu -lrt" LDFLAGS="-L/opt/FJSVxtclanga/tcsds-1.2.25/lib64 -lrt"
 | 
			
		||||
 | 
			
		||||
--------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
* armclang 20.0 VLA (merlin)
 | 
			
		||||
 | 
			
		||||
../configure --with-lime=/home/men04359/lime/c-lime --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=none --enable-openmp CXX=armclang++ CC=armclang CXXFLAGS="-std=c++11 -fno-unroll-loops -mllvm -vectorizer-min-trip-count=2 -march=armv8-a+sve -DARMCLANGCOMPAT -DA64FX -DA64FXASM -DDSLASHINTRIN" LDFLAGS=-static GRID_LDFLAGS=-static MPI_CXXLDFLAGS=-static
 | 
			
		||||
 | 
			
		||||
TODO check ARMCLANGCOMPAT
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* armclang 20.1 VLA (merlin)
 | 
			
		||||
 | 
			
		||||
../configure --with-lime=/home/men04359/lime/c-lime --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=none --enable-openmp CXX=armclang++ CC=armclang CXXFLAGS="-std=c++11 -mcpu=a64fx -DARMCLANGCOMPAT -DA64FX -DA64FXASM -DDSLASHINTRIN" LDFLAGS=-static GRID_LDFLAGS=-static MPI_CXXLDFLAGS=-static
 | 
			
		||||
 | 
			
		||||
TODO check ARMCLANGCOMPAT
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* armclang 20.1 VLA (fjt cluster)
 | 
			
		||||
 | 
			
		||||
../configure --with-lime=$HOME/local --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=none --enable-openmp CXX=armclang++ CC=armclang CXXFLAGS="-std=c++11 -mcpu=a64fx -DARMCLANGCOMPAT -DA64FX -DA64FXASM -DDSLASHINTRIN -DTOFU"
 | 
			
		||||
 | 
			
		||||
TODO check ARMCLANGCOMPAT
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* armclang 20.1 VLA w/MPI (fjt cluster)
 | 
			
		||||
 | 
			
		||||
../configure --with-lime=$HOME/local --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=mpi3 --enable-openmp CXX=mpiFCC CC=mpifcc CXXFLAGS="-std=c++11 -mcpu=a64fx -DA64FX -DA64FXASM -DDSLASHINTRIN -DTOFU -I/opt/FJSVxtclanga/tcsds-1.2.25/include/mpi/fujitsu -lrt" LDFLAGS="-L/opt/FJSVxtclanga/tcsds-1.2.25/lib64"
 | 
			
		||||
 | 
			
		||||
No ARMCLANGCOMPAT -> still correct ?
 | 
			
		||||
 | 
			
		||||
--------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
* Fujitsu fcc
 | 
			
		||||
 | 
			
		||||
../configure --with-lime=$HOME/grid-a64fx/lime/c-lime --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=none --enable-openmp --with-mpfr=/home/users/gre/gre-1/grid-a64fx/mpfr-build/install CXX=FCC CC=fcc CXXFLAGS="-Nclang -Kfast -DA64FX -DA64FXASM -DDSLASHINTRIN"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* Fujitsu fcc w/ MPI
 | 
			
		||||
 | 
			
		||||
../configure --with-lime=$HOME/grid-a64fx/lime/c-lime --without-hdf5 --enable-gen-simd-width=64 --enable-simd=GEN --enable-comms=mpi --enable-openmp --with-mpfr=/home/users/gre/gre-1/grid-a64fx/mpfr-build/install CXX=mpiFCC CC=mpifcc CXXFLAGS="-Nclang -Kfast -DA64FX -DA64FXASM -DDSLASHINTRIN -DTOFU"
 | 
			
		||||
@@ -1,8 +1,16 @@
 | 
			
		||||
 | 
			
		||||
#include "Benchmark_IO.hpp"
 | 
			
		||||
 | 
			
		||||
#ifndef BENCH_IO_LMIN
 | 
			
		||||
#define BENCH_IO_LMIN 8
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef BENCH_IO_LMAX
 | 
			
		||||
#define BENCH_IO_LMAX 40
 | 
			
		||||
#define BENCH_IO_LMAX 32
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef BENCH_IO_NPASS
 | 
			
		||||
#define BENCH_IO_NPASS 10
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
using namespace Grid;
 | 
			
		||||
@@ -12,37 +20,179 @@ std::string filestem(const int l)
 | 
			
		||||
  return "iobench_l" + std::to_string(l);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int vol(const int i)
 | 
			
		||||
{
 | 
			
		||||
  return BENCH_IO_LMIN + 2*i;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int volInd(const int l)
 | 
			
		||||
{
 | 
			
		||||
  return (l - BENCH_IO_LMIN)/2;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Mat>
 | 
			
		||||
void stats(Mat &mean, Mat &stdDev, const std::vector<Mat> &data)
 | 
			
		||||
{
 | 
			
		||||
  auto            nr = data[0].rows(), nc = data[0].cols();
 | 
			
		||||
  Eigen::MatrixXd sqSum(nr, nc);
 | 
			
		||||
  double          n = static_cast<double>(data.size());
 | 
			
		||||
 | 
			
		||||
  assert(n > 1.);
 | 
			
		||||
  mean  = Mat::Zero(nr, nc);
 | 
			
		||||
  sqSum = Mat::Zero(nr, nc);
 | 
			
		||||
  for (auto &d: data)
 | 
			
		||||
  {
 | 
			
		||||
    mean  += d;
 | 
			
		||||
    sqSum += d.cwiseProduct(d);
 | 
			
		||||
  }
 | 
			
		||||
  stdDev = ((sqSum - mean.cwiseProduct(mean)/n)/(n - 1.)).cwiseSqrt();
 | 
			
		||||
  mean  /= n;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define grid_printf(...) \
 | 
			
		||||
{\
 | 
			
		||||
  char _buf[1024];\
 | 
			
		||||
  sprintf(_buf, __VA_ARGS__);\
 | 
			
		||||
  MSG << _buf;\
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
enum {sRead = 0, sWrite = 1, gRead = 2, gWrite = 3};
 | 
			
		||||
 | 
			
		||||
int main (int argc, char ** argv)
 | 
			
		||||
{
 | 
			
		||||
#ifdef HAVE_LIME
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
  int64_t threads = GridThread::GetThreads();
 | 
			
		||||
  int64_t                      threads = GridThread::GetThreads();
 | 
			
		||||
  auto                         mpi     = GridDefaultMpi();
 | 
			
		||||
  unsigned int                 nVol    = (BENCH_IO_LMAX - BENCH_IO_LMIN)/2 + 1;
 | 
			
		||||
  unsigned int                 nRelVol = (BENCH_IO_LMAX - 24)/2 + 1;
 | 
			
		||||
  std::vector<Eigen::MatrixXd> perf(BENCH_IO_NPASS, Eigen::MatrixXd::Zero(nVol, 4));
 | 
			
		||||
  std::vector<Eigen::VectorXd> avPerf(BENCH_IO_NPASS, Eigen::VectorXd::Zero(4));
 | 
			
		||||
  std::vector<int>             latt;
 | 
			
		||||
 | 
			
		||||
  MSG << "Grid is setup to use " << threads << " threads" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark Lime write" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
  MSG << "MPI partition " << mpi << std::endl;
 | 
			
		||||
  for (unsigned int i = 0; i < BENCH_IO_NPASS; ++i)
 | 
			
		||||
  {
 | 
			
		||||
    auto             mpi  = GridDefaultMpi();
 | 
			
		||||
    std::vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
    MSG << BIGSEP << std::endl;
 | 
			
		||||
    MSG << "Pass " << i + 1 << "/" << BENCH_IO_NPASS << std::endl;
 | 
			
		||||
    MSG << BIGSEP << std::endl;
 | 
			
		||||
    MSG << SEP << std::endl;
 | 
			
		||||
    MSG << "Benchmark std write" << std::endl;
 | 
			
		||||
    MSG << SEP << std::endl;
 | 
			
		||||
    for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
    {
 | 
			
		||||
      latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
 | 
			
		||||
    std::cout << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
    writeBenchmark<LatticeFermion>(latt, filestem(l), limeWrite<LatticeFermion>);
 | 
			
		||||
      MSG << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
      writeBenchmark<LatticeFermion>(latt, filestem(l), stdWrite<LatticeFermion>);
 | 
			
		||||
      perf[i](volInd(l), sWrite) = BinaryIO::lastPerf.mbytesPerSecond;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    MSG << SEP << std::endl;
 | 
			
		||||
    MSG << "Benchmark std read" << std::endl;
 | 
			
		||||
    MSG << SEP << std::endl;
 | 
			
		||||
    for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
    {
 | 
			
		||||
      latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
 | 
			
		||||
      MSG << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
      readBenchmark<LatticeFermion>(latt, filestem(l), stdRead<LatticeFermion>);
 | 
			
		||||
      perf[i](volInd(l), sRead) = BinaryIO::lastPerf.mbytesPerSecond;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  #ifdef HAVE_LIME
 | 
			
		||||
    MSG << SEP << std::endl;
 | 
			
		||||
    MSG << "Benchmark Grid C-Lime write" << std::endl;
 | 
			
		||||
    MSG << SEP << std::endl;
 | 
			
		||||
    for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
    {
 | 
			
		||||
      latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
 | 
			
		||||
      MSG << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
      writeBenchmark<LatticeFermion>(latt, filestem(l), limeWrite<LatticeFermion>);
 | 
			
		||||
      perf[i](volInd(l), gWrite) = BinaryIO::lastPerf.mbytesPerSecond;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    MSG << SEP << std::endl;
 | 
			
		||||
    MSG << "Benchmark Grid C-Lime read" << std::endl;
 | 
			
		||||
    MSG << SEP << std::endl;
 | 
			
		||||
    for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
    {
 | 
			
		||||
      latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
 | 
			
		||||
      MSG << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
      readBenchmark<LatticeFermion>(latt, filestem(l), limeRead<LatticeFermion>);
 | 
			
		||||
      perf[i](volInd(l), gRead) = BinaryIO::lastPerf.mbytesPerSecond;
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
    avPerf[i].fill(0.);
 | 
			
		||||
    for (int f = 0; f < 4; ++f)
 | 
			
		||||
    for (int l = 24; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
    {
 | 
			
		||||
      avPerf[i](f) += perf[i](volInd(l), f);
 | 
			
		||||
    }
 | 
			
		||||
    avPerf[i] /= nRelVol;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  MSG << "Benchmark Lime read" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
  {
 | 
			
		||||
    auto             mpi  = GridDefaultMpi();
 | 
			
		||||
    std::vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
  Eigen::MatrixXd mean(nVol, 4), stdDev(nVol, 4), rob(nVol, 4);
 | 
			
		||||
  Eigen::VectorXd avMean(4), avStdDev(4), avRob(4);
 | 
			
		||||
  double          n = BENCH_IO_NPASS;
 | 
			
		||||
 | 
			
		||||
    std::cout << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
    readBenchmark<LatticeFermion>(latt, filestem(l), limeRead<LatticeFermion>);
 | 
			
		||||
  stats(mean, stdDev, perf);
 | 
			
		||||
  stats(avMean, avStdDev, avPerf);
 | 
			
		||||
  rob.fill(100.);
 | 
			
		||||
  rob -= 100.*stdDev.cwiseQuotient(mean.cwiseAbs());
 | 
			
		||||
  avRob.fill(100.);
 | 
			
		||||
  avRob -= 100.*avStdDev.cwiseQuotient(avMean.cwiseAbs());
 | 
			
		||||
 | 
			
		||||
  MSG << BIGSEP << std::endl;
 | 
			
		||||
  MSG << "SUMMARY" << std::endl;
 | 
			
		||||
  MSG << BIGSEP << std::endl;
 | 
			
		||||
  MSG << "Summary of individual results (all results in MB/s)." << std::endl;
 | 
			
		||||
  MSG << "Every second colum gives the standard deviation of the previous column." << std::endl;
 | 
			
		||||
  MSG << std::endl;
 | 
			
		||||
  grid_printf("%4s %12s %12s %12s %12s %12s %12s %12s %12s\n",
 | 
			
		||||
              "L", "std read", "std dev", "std write", "std dev",
 | 
			
		||||
              "Grid read", "std dev", "Grid write", "std dev");
 | 
			
		||||
  for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
  {
 | 
			
		||||
    grid_printf("%4d %12.1f %12.1f %12.1f %12.1f %12.1f %12.1f %12.1f %12.1f\n",
 | 
			
		||||
                l, mean(volInd(l), sRead), stdDev(volInd(l), sRead),
 | 
			
		||||
                mean(volInd(l), sWrite), stdDev(volInd(l), sWrite),
 | 
			
		||||
                mean(volInd(l), gRead), stdDev(volInd(l), gRead),
 | 
			
		||||
                mean(volInd(l), gWrite), stdDev(volInd(l), gWrite));
 | 
			
		||||
  }
 | 
			
		||||
  MSG << std::endl;
 | 
			
		||||
  MSG << "Robustness of individual results, in \%. (rob = 100\% - std dev / mean)" << std::endl;
 | 
			
		||||
  MSG << std::endl;
 | 
			
		||||
  grid_printf("%4s %12s %12s %12s %12s\n",
 | 
			
		||||
              "L", "std read", "std write", "Grid read", "Grid write");
 | 
			
		||||
  for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
  {
 | 
			
		||||
    grid_printf("%4d %12.1f %12.1f %12.1f %12.1f\n",
 | 
			
		||||
                l, rob(volInd(l), sRead), rob(volInd(l), sWrite),
 | 
			
		||||
                rob(volInd(l), gRead), rob(volInd(l), gWrite));
 | 
			
		||||
  }
 | 
			
		||||
  MSG << std::endl;
 | 
			
		||||
  MSG << "Summary of results averaged over local volumes 24^4-" << BENCH_IO_LMAX << "^4 (all results in MB/s)." << std::endl;
 | 
			
		||||
  MSG << "Every second colum gives the standard deviation of the previous column." << std::endl;
 | 
			
		||||
  MSG << std::endl;
 | 
			
		||||
  grid_printf("%12s %12s %12s %12s %12s %12s %12s %12s\n",
 | 
			
		||||
              "std read", "std dev", "std write", "std dev",
 | 
			
		||||
              "Grid read", "std dev", "Grid write", "std dev");
 | 
			
		||||
  grid_printf("%12.1f %12.1f %12.1f %12.1f %12.1f %12.1f %12.1f %12.1f\n",
 | 
			
		||||
              avMean(sRead), avStdDev(sRead), avMean(sWrite), avStdDev(sWrite),
 | 
			
		||||
              avMean(gRead), avStdDev(gRead), avMean(gWrite), avStdDev(gWrite));
 | 
			
		||||
  MSG << std::endl;
 | 
			
		||||
  MSG << "Robustness of volume-averaged results, in \%. (rob = 100\% - std dev / mean)" << std::endl;
 | 
			
		||||
  MSG << std::endl;
 | 
			
		||||
  grid_printf("%12s %12s %12s %12s\n",
 | 
			
		||||
              "std read", "std write", "Grid read", "Grid write");
 | 
			
		||||
  grid_printf("%12.1f %12.1f %12.1f %12.1f\n",
 | 
			
		||||
              avRob(sRead), avRob(sWrite), avRob(gRead), avRob(gWrite));
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  return EXIT_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -5,6 +5,8 @@
 | 
			
		||||
#ifdef HAVE_LIME
 | 
			
		||||
#define MSG std::cout << GridLogMessage
 | 
			
		||||
#define SEP \
 | 
			
		||||
"-----------------------------------------------------------------------------"
 | 
			
		||||
#define BIGSEP \
 | 
			
		||||
"============================================================================="
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
@@ -14,13 +16,152 @@ using WriterFn = std::function<void(const std::string, Field &)> ;
 | 
			
		||||
template <typename Field>
 | 
			
		||||
using ReaderFn = std::function<void(Field &, const std::string)>;
 | 
			
		||||
 | 
			
		||||
// AP 06/10/2020: Standard C version in case one is suspicious of the C++ API
 | 
			
		||||
// 
 | 
			
		||||
// template <typename Field>
 | 
			
		||||
// void stdWrite(const std::string filestem, Field &vec)
 | 
			
		||||
// {
 | 
			
		||||
//   std::string   rankStr = std::to_string(vec.Grid()->ThisRank());
 | 
			
		||||
//   std::FILE     *file = std::fopen((filestem + "." + rankStr + ".bin").c_str(), "wb");
 | 
			
		||||
//   size_t        size;
 | 
			
		||||
//   uint32_t      crc;
 | 
			
		||||
//   GridStopWatch ioWatch, crcWatch;
 | 
			
		||||
 | 
			
		||||
//   size = vec.Grid()->lSites()*sizeof(typename Field::scalar_object);
 | 
			
		||||
//   autoView(vec_v, vec, CpuRead);
 | 
			
		||||
//   crcWatch.Start();
 | 
			
		||||
//   crc = GridChecksum::crc32(vec_v.cpu_ptr, size);
 | 
			
		||||
//   std::fwrite(&crc, sizeof(uint32_t), 1, file);
 | 
			
		||||
//   crcWatch.Stop();
 | 
			
		||||
//   MSG << "Std I/O write: Data CRC32 " << std::hex << crc << std::dec << std::endl;
 | 
			
		||||
//   ioWatch.Start();
 | 
			
		||||
//   std::fwrite(vec_v.cpu_ptr, sizeof(typename Field::scalar_object), vec.Grid()->lSites(), file);
 | 
			
		||||
//   ioWatch.Stop();
 | 
			
		||||
//   std::fclose(file);
 | 
			
		||||
//   size *= vec.Grid()->ProcessorCount();
 | 
			
		||||
//   auto &p = BinaryIO::lastPerf;
 | 
			
		||||
//   p.size            = size;
 | 
			
		||||
//   p.time            = ioWatch.useconds();
 | 
			
		||||
//   p.mbytesPerSecond = size/1024./1024./(ioWatch.useconds()/1.e6);
 | 
			
		||||
//   MSG << "Std I/O write: Wrote " << p.size << " bytes in " << ioWatch.Elapsed() 
 | 
			
		||||
//       << ", " << p.mbytesPerSecond << " MB/s" << std::endl;
 | 
			
		||||
//   MSG << "Std I/O write: checksum overhead " << crcWatch.Elapsed() << std::endl;
 | 
			
		||||
// }
 | 
			
		||||
//
 | 
			
		||||
// template <typename Field>
 | 
			
		||||
// void stdRead(Field &vec, const std::string filestem)
 | 
			
		||||
// {
 | 
			
		||||
//   std::string   rankStr = std::to_string(vec.Grid()->ThisRank());
 | 
			
		||||
//   std::FILE     *file = std::fopen((filestem + "." + rankStr + ".bin").c_str(), "rb");
 | 
			
		||||
//   size_t        size;
 | 
			
		||||
//   uint32_t      crcRead, crcData;
 | 
			
		||||
//   GridStopWatch ioWatch, crcWatch;
 | 
			
		||||
 | 
			
		||||
//   size = vec.Grid()->lSites()*sizeof(typename Field::scalar_object);
 | 
			
		||||
//   crcWatch.Start();
 | 
			
		||||
//   std::fread(&crcRead, sizeof(uint32_t), 1, file);
 | 
			
		||||
//   crcWatch.Stop();
 | 
			
		||||
//   {
 | 
			
		||||
//     autoView(vec_v, vec, CpuWrite);
 | 
			
		||||
//     ioWatch.Start();
 | 
			
		||||
//     std::fread(vec_v.cpu_ptr, sizeof(typename Field::scalar_object), vec.Grid()->lSites(), file);
 | 
			
		||||
//     ioWatch.Stop();
 | 
			
		||||
//     std::fclose(file);
 | 
			
		||||
//   }
 | 
			
		||||
//   {
 | 
			
		||||
//     autoView(vec_v, vec, CpuRead);
 | 
			
		||||
//     crcWatch.Start();
 | 
			
		||||
//     crcData = GridChecksum::crc32(vec_v.cpu_ptr, size);
 | 
			
		||||
//     crcWatch.Stop();
 | 
			
		||||
//   }
 | 
			
		||||
//   MSG << "Std I/O read: Data CRC32 " << std::hex << crcData << std::dec << std::endl;
 | 
			
		||||
//   assert(crcData == crcRead);
 | 
			
		||||
//   size *= vec.Grid()->ProcessorCount();
 | 
			
		||||
//   auto &p = BinaryIO::lastPerf;
 | 
			
		||||
//   p.size            = size;
 | 
			
		||||
//   p.time            = ioWatch.useconds();
 | 
			
		||||
//   p.mbytesPerSecond = size/1024./1024./(ioWatch.useconds()/1.e6);
 | 
			
		||||
//   MSG << "Std I/O read: Read " <<  p.size << " bytes in " << ioWatch.Elapsed() 
 | 
			
		||||
//       << ", " << p.mbytesPerSecond << " MB/s" << std::endl;
 | 
			
		||||
//   MSG << "Std I/O read: checksum overhead " << crcWatch.Elapsed() << std::endl;
 | 
			
		||||
// }
 | 
			
		||||
 | 
			
		||||
template <typename Field>
 | 
			
		||||
void stdWrite(const std::string filestem, Field &vec)
 | 
			
		||||
{
 | 
			
		||||
  std::string   rankStr = std::to_string(vec.Grid()->ThisRank());
 | 
			
		||||
  std::ofstream file(filestem + "." + rankStr + ".bin", std::ios::out | std::ios::binary);
 | 
			
		||||
  size_t        size, sizec;
 | 
			
		||||
  uint32_t      crc;
 | 
			
		||||
  GridStopWatch ioWatch, crcWatch;
 | 
			
		||||
 | 
			
		||||
  size  = vec.Grid()->lSites()*sizeof(typename Field::scalar_object);
 | 
			
		||||
  sizec = size/sizeof(char); // just in case of...
 | 
			
		||||
  autoView(vec_v, vec, CpuRead);
 | 
			
		||||
  crcWatch.Start();
 | 
			
		||||
  crc = GridChecksum::crc32(vec_v.cpu_ptr, size);
 | 
			
		||||
  file.write(reinterpret_cast<char *>(&crc), sizeof(uint32_t)/sizeof(char));
 | 
			
		||||
  crcWatch.Stop();
 | 
			
		||||
  MSG << "Std I/O write: Data CRC32 " << std::hex << crc << std::dec << std::endl;
 | 
			
		||||
  ioWatch.Start();
 | 
			
		||||
  file.write(reinterpret_cast<char *>(vec_v.cpu_ptr), sizec);
 | 
			
		||||
  file.flush();
 | 
			
		||||
  ioWatch.Stop();
 | 
			
		||||
  size *= vec.Grid()->ProcessorCount();
 | 
			
		||||
  auto &p = BinaryIO::lastPerf;
 | 
			
		||||
  p.size            = size;
 | 
			
		||||
  p.time            = ioWatch.useconds();
 | 
			
		||||
  p.mbytesPerSecond = size/1024./1024./(ioWatch.useconds()/1.e6);
 | 
			
		||||
  MSG << "Std I/O write: Wrote " << p.size << " bytes in " << ioWatch.Elapsed() 
 | 
			
		||||
      << ", " << p.mbytesPerSecond << " MB/s" << std::endl;
 | 
			
		||||
  MSG << "Std I/O write: checksum overhead " << crcWatch.Elapsed() << std::endl;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Field>
 | 
			
		||||
void stdRead(Field &vec, const std::string filestem)
 | 
			
		||||
{
 | 
			
		||||
  std::string   rankStr = std::to_string(vec.Grid()->ThisRank());
 | 
			
		||||
  std::ifstream file(filestem + "." + rankStr + ".bin", std::ios::in | std::ios::binary);
 | 
			
		||||
  size_t        size, sizec;
 | 
			
		||||
  uint32_t      crcRead, crcData;
 | 
			
		||||
  GridStopWatch ioWatch, crcWatch;
 | 
			
		||||
 | 
			
		||||
  size  = vec.Grid()->lSites()*sizeof(typename Field::scalar_object);
 | 
			
		||||
  sizec = size/sizeof(char); // just in case of...
 | 
			
		||||
  crcWatch.Start();
 | 
			
		||||
  file.read(reinterpret_cast<char *>(&crcRead), sizeof(uint32_t)/sizeof(char));
 | 
			
		||||
  crcWatch.Stop();
 | 
			
		||||
  {
 | 
			
		||||
    autoView(vec_v, vec, CpuWrite);
 | 
			
		||||
    ioWatch.Start();
 | 
			
		||||
    file.read(reinterpret_cast<char *>(vec_v.cpu_ptr), sizec);
 | 
			
		||||
    ioWatch.Stop();
 | 
			
		||||
  }
 | 
			
		||||
  {
 | 
			
		||||
    autoView(vec_v, vec, CpuRead);
 | 
			
		||||
    crcWatch.Start();
 | 
			
		||||
    crcData = GridChecksum::crc32(vec_v.cpu_ptr, size);
 | 
			
		||||
    crcWatch.Stop();
 | 
			
		||||
  }
 | 
			
		||||
  MSG << "Std I/O read: Data CRC32 " << std::hex << crcData << std::dec << std::endl;
 | 
			
		||||
  assert(crcData == crcRead);
 | 
			
		||||
  size *= vec.Grid()->ProcessorCount();
 | 
			
		||||
  auto &p = BinaryIO::lastPerf;
 | 
			
		||||
  p.size            = size;
 | 
			
		||||
  p.time            = ioWatch.useconds();
 | 
			
		||||
  p.mbytesPerSecond = size/1024./1024./(ioWatch.useconds()/1.e6);
 | 
			
		||||
  MSG << "Std I/O read: Read " <<  p.size << " bytes in " << ioWatch.Elapsed() 
 | 
			
		||||
      << ", " << p.mbytesPerSecond << " MB/s" << std::endl;
 | 
			
		||||
  MSG << "Std I/O read: checksum overhead " << crcWatch.Elapsed() << std::endl;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Field>
 | 
			
		||||
void limeWrite(const std::string filestem, Field &vec)
 | 
			
		||||
{
 | 
			
		||||
  emptyUserRecord   record;
 | 
			
		||||
  ScidacWriter binWriter(vec.Grid()->IsBoss());
 | 
			
		||||
 | 
			
		||||
  binWriter.open(filestem + ".bin");
 | 
			
		||||
  binWriter.open(filestem + ".lime.bin");
 | 
			
		||||
  binWriter.writeScidacFieldRecord(vec, record);
 | 
			
		||||
  binWriter.close();
 | 
			
		||||
}
 | 
			
		||||
@@ -31,7 +172,7 @@ void limeRead(Field &vec, const std::string filestem)
 | 
			
		||||
  emptyUserRecord   record;
 | 
			
		||||
  ScidacReader binReader;
 | 
			
		||||
 | 
			
		||||
  binReader.open(filestem + ".bin");
 | 
			
		||||
  binReader.open(filestem + ".lime.bin");
 | 
			
		||||
  binReader.readScidacFieldRecord(vec, record);
 | 
			
		||||
  binReader.close();
 | 
			
		||||
}
 | 
			
		||||
@@ -73,12 +214,18 @@ void writeBenchmark(const Coordinate &latt, const std::string filename,
 | 
			
		||||
  auto                           simd = GridDefaultSimd(latt.size(), Field::vector_type::Nsimd());
 | 
			
		||||
  std::shared_ptr<GridCartesian> gBasePt(SpaceTimeGrid::makeFourDimGrid(latt, simd, mpi));
 | 
			
		||||
  std::shared_ptr<GridBase>      gPt;
 | 
			
		||||
  std::random_device             rd;
 | 
			
		||||
 | 
			
		||||
  makeGrid(gPt, gBasePt, Ls, rb);
 | 
			
		||||
 | 
			
		||||
  GridBase                       *g = gPt.get();
 | 
			
		||||
  GridParallelRNG                rng(g);
 | 
			
		||||
  Field                          vec(g);
 | 
			
		||||
  GridBase         *g = gPt.get();
 | 
			
		||||
  GridParallelRNG  rng(g);
 | 
			
		||||
  Field            vec(g);
 | 
			
		||||
 | 
			
		||||
  rng.SeedFixedIntegers({static_cast<int>(rd()), static_cast<int>(rd()),
 | 
			
		||||
                         static_cast<int>(rd()), static_cast<int>(rd()),
 | 
			
		||||
                         static_cast<int>(rd()), static_cast<int>(rd()),
 | 
			
		||||
                         static_cast<int>(rd()), static_cast<int>(rd())});
 | 
			
		||||
 | 
			
		||||
  random(rng, vec);
 | 
			
		||||
  write(filename, vec);
 | 
			
		||||
@@ -96,8 +243,8 @@ void readBenchmark(const Coordinate &latt, const std::string filename,
 | 
			
		||||
 | 
			
		||||
  makeGrid(gPt, gBasePt, Ls, rb);
 | 
			
		||||
 | 
			
		||||
  GridBase                       *g = gPt.get();
 | 
			
		||||
  Field                          vec(g);
 | 
			
		||||
  GridBase *g = gPt.get();
 | 
			
		||||
  Field    vec(g);
 | 
			
		||||
 | 
			
		||||
  read(vec, filename);
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,14 +1,9 @@
 | 
			
		||||
#include "Benchmark_IO.hpp"
 | 
			
		||||
 | 
			
		||||
#define MSG std::cout << GridLogMessage
 | 
			
		||||
#define SEP \
 | 
			
		||||
"============================================================================="
 | 
			
		||||
 | 
			
		||||
using namespace Grid;
 | 
			
		||||
 | 
			
		||||
int main (int argc, char ** argv)
 | 
			
		||||
{
 | 
			
		||||
#ifdef HAVE_LIME
 | 
			
		||||
  std::vector<std::string> dir;
 | 
			
		||||
  unsigned int             Ls;
 | 
			
		||||
  bool                     rb;
 | 
			
		||||
@@ -34,46 +29,71 @@ int main (int argc, char ** argv)
 | 
			
		||||
  }
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  int64_t threads = GridThread::GetThreads();
 | 
			
		||||
  auto    mpi     = GridDefaultMpi();
 | 
			
		||||
 | 
			
		||||
  MSG << "Grid is setup to use " << threads << " threads" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark double precision Lime write" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    writeBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", limeWrite<LatticeFermion>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
  MSG << "MPI partition " << mpi << std::endl;
 | 
			
		||||
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark double precision Lime read" << std::endl;
 | 
			
		||||
  MSG << "Benchmark Grid std write" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    readBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", limeRead<LatticeFermion>, Ls, rb);
 | 
			
		||||
    writeBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", 
 | 
			
		||||
                                   stdWrite<LatticeFermion>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark Grid std read" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    readBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", 
 | 
			
		||||
                                  stdRead<LatticeFermion>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#ifdef HAVE_LIME
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark single precision Lime write" << std::endl;
 | 
			
		||||
  MSG << "Benchmark Grid C-Lime write" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    writeBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeWrite<LatticeFermionF>, Ls, rb);
 | 
			
		||||
    writeBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", 
 | 
			
		||||
                                   limeWrite<LatticeFermion>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark Grid C-Lime read" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    readBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", 
 | 
			
		||||
                                  limeRead<LatticeFermion>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark single precision Lime read" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    readBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeRead<LatticeFermionF>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
  // MSG << SEP << std::endl;
 | 
			
		||||
  // MSG << "Benchmark single precision Lime write" << std::endl;
 | 
			
		||||
  // MSG << SEP << std::endl;
 | 
			
		||||
  // for (auto &d: dir)
 | 
			
		||||
  // {
 | 
			
		||||
  //   MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
  //   writeBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeWrite<LatticeFermionF>, Ls, rb);
 | 
			
		||||
  // }
 | 
			
		||||
 | 
			
		||||
  // MSG << SEP << std::endl;
 | 
			
		||||
  // MSG << "Benchmark single precision Lime read" << std::endl;
 | 
			
		||||
  // MSG << SEP << std::endl;
 | 
			
		||||
  // for (auto &d: dir)
 | 
			
		||||
  // {
 | 
			
		||||
  //   MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
  //   readBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeRead<LatticeFermionF>, Ls, rb);
 | 
			
		||||
  // }
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  return EXIT_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,4 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
@@ -125,7 +125,7 @@ public:
 | 
			
		||||
	      lat*mpi_layout[1],
 | 
			
		||||
	      lat*mpi_layout[2],
 | 
			
		||||
	      lat*mpi_layout[3]});
 | 
			
		||||
	std::cout << GridLogMessage<< latt_size <<std::endl;
 | 
			
		||||
 | 
			
		||||
	GridCartesian     Grid(latt_size,simd_layout,mpi_layout);
 | 
			
		||||
	RealD Nrank = Grid._Nprocessors;
 | 
			
		||||
	RealD Nnode = Grid.NodeCount();
 | 
			
		||||
@@ -137,8 +137,8 @@ public:
 | 
			
		||||
	for(int d=0;d<8;d++){
 | 
			
		||||
	  xbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
 | 
			
		||||
	  rbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
 | 
			
		||||
	  bzero((void *)xbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
 | 
			
		||||
	  bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
 | 
			
		||||
	  //	  bzero((void *)xbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
 | 
			
		||||
	  //	  bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
 | 
			
		||||
@@ -202,6 +202,8 @@ public:
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
  static void Memory(void)
 | 
			
		||||
  {
 | 
			
		||||
    const int Nvec=8;
 | 
			
		||||
@@ -222,7 +224,7 @@ public:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  uint64_t lmax=32;
 | 
			
		||||
#define NLOOP (100*lmax*lmax*lmax*lmax/lat/lat/lat/lat)
 | 
			
		||||
#define NLOOP (1000*lmax*lmax*lmax*lmax/lat/lat/lat/lat)
 | 
			
		||||
 | 
			
		||||
    GridSerialRNG          sRNG;      sRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
 | 
			
		||||
    for(int lat=8;lat<=lmax;lat+=8){
 | 
			
		||||
@@ -247,11 +249,6 @@ public:
 | 
			
		||||
      double start=usecond();
 | 
			
		||||
      for(int i=0;i<Nloop;i++){
 | 
			
		||||
	z=a*x-y;
 | 
			
		||||
	autoView( x_v , x, CpuWrite);
 | 
			
		||||
	autoView( y_v , y, CpuWrite);
 | 
			
		||||
	autoView( z_v , z, CpuRead);
 | 
			
		||||
        x_v[0]=z_v[0]; // force serial dependency to prevent optimise away
 | 
			
		||||
        y_v[4]=z_v[4];
 | 
			
		||||
      }
 | 
			
		||||
      double stop=usecond();
 | 
			
		||||
      double time = (stop-start)/Nloop*1000;
 | 
			
		||||
@@ -266,6 +263,61 @@ public:
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  static void SU4(void)
 | 
			
		||||
  {
 | 
			
		||||
    const int Nc4=4;
 | 
			
		||||
    typedef Lattice< iMatrix< vComplexF,Nc4> > LatticeSU4;
 | 
			
		||||
 | 
			
		||||
    Coordinate simd_layout = GridDefaultSimd(Nd,vComplexF::Nsimd());
 | 
			
		||||
    Coordinate mpi_layout  = GridDefaultMpi();
 | 
			
		||||
    
 | 
			
		||||
    std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "= Benchmarking z = y*x SU(4) bandwidth"<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "  L  "<<"\t\t"<<"bytes"<<"\t\t\t"<<"GB/s"<<"\t\t"<<"Gflop/s"<<"\t\t seconds"<< "\t\tGB/s / node"<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "----------------------------------------------------------"<<std::endl;
 | 
			
		||||
  
 | 
			
		||||
    uint64_t NN;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    uint64_t lmax=32;
 | 
			
		||||
#define NLOOP (1000*lmax*lmax*lmax*lmax/lat/lat/lat/lat)
 | 
			
		||||
 | 
			
		||||
    GridSerialRNG          sRNG;      sRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
 | 
			
		||||
    for(int lat=8;lat<=lmax;lat+=8){
 | 
			
		||||
 | 
			
		||||
      Coordinate latt_size  ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]});
 | 
			
		||||
      int64_t vol= latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
 | 
			
		||||
 | 
			
		||||
      GridCartesian     Grid(latt_size,simd_layout,mpi_layout);
 | 
			
		||||
 | 
			
		||||
      NN =Grid.NodeCount();
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
      LatticeSU4 z(&Grid); z=Zero();
 | 
			
		||||
      LatticeSU4 x(&Grid); x=Zero();
 | 
			
		||||
      LatticeSU4 y(&Grid); y=Zero();
 | 
			
		||||
      double a=2.0;
 | 
			
		||||
 | 
			
		||||
      uint64_t Nloop=NLOOP;
 | 
			
		||||
 | 
			
		||||
      double start=usecond();
 | 
			
		||||
      for(int i=0;i<Nloop;i++){
 | 
			
		||||
	z=x*y;
 | 
			
		||||
      }
 | 
			
		||||
      double stop=usecond();
 | 
			
		||||
      double time = (stop-start)/Nloop*1000;
 | 
			
		||||
     
 | 
			
		||||
      double flops=vol*Nc4*Nc4*(6+(Nc4-1)*8);// mul,add
 | 
			
		||||
      double bytes=3.0*vol*Nc4*Nc4*2*sizeof(RealF);
 | 
			
		||||
      std::cout<<GridLogMessage<<std::setprecision(3) 
 | 
			
		||||
	       << lat<<"\t\t"<<bytes<<"   \t\t"<<bytes/time<<"\t\t"<<flops/time<<"\t\t"<<(stop-start)/1000./1000.
 | 
			
		||||
	       << "\t\t"<< bytes/time/NN <<std::endl;
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  static double DWF(int Ls,int L)
 | 
			
		||||
  {
 | 
			
		||||
    RealD mass=0.1;
 | 
			
		||||
@@ -296,6 +348,7 @@ public:
 | 
			
		||||
    ///////// Welcome message ////////////
 | 
			
		||||
    std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "Benchmark DWF on "<<L<<"^4 local volume "<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "* Nc             : "<<Nc<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "* Global volume  : "<<GridCmdVectorIntToString(latt4)<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "* Ls             : "<<Ls<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "* ranks          : "<<NP  <<std::endl;
 | 
			
		||||
@@ -324,7 +377,7 @@ public:
 | 
			
		||||
    typedef LatticeGaugeFieldF Gauge;
 | 
			
		||||
    
 | 
			
		||||
    ///////// Source preparation ////////////
 | 
			
		||||
    Gauge Umu(UGrid);  SU3::HotConfiguration(RNG4,Umu); 
 | 
			
		||||
    Gauge Umu(UGrid);  SU<Nc>::HotConfiguration(RNG4,Umu); 
 | 
			
		||||
    Fermion src   (FGrid); random(RNG5,src);
 | 
			
		||||
    Fermion src_e (FrbGrid);
 | 
			
		||||
    Fermion src_o (FrbGrid);
 | 
			
		||||
@@ -369,7 +422,7 @@ public:
 | 
			
		||||
	}
 | 
			
		||||
	FGrid->Barrier();
 | 
			
		||||
	double t1=usecond();
 | 
			
		||||
	uint64_t ncall = 50;
 | 
			
		||||
	uint64_t ncall = 500;
 | 
			
		||||
 | 
			
		||||
	FGrid->Broadcast(0,&ncall,sizeof(ncall));
 | 
			
		||||
 | 
			
		||||
@@ -387,7 +440,13 @@ public:
 | 
			
		||||
	FGrid->Barrier();
 | 
			
		||||
	
 | 
			
		||||
	double volume=Ls;  for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
 | 
			
		||||
	double flops=(1344.0*volume)/2;
 | 
			
		||||
 | 
			
		||||
	// Nc=3 gives
 | 
			
		||||
	// 1344= 3*(2*8+6)*2*8 + 8*3*2*2 + 3*4*2*8
 | 
			
		||||
	// 1344 = Nc* (6+(Nc-1)*8)*2*Nd + Nd*Nc*2*2  + Nd*Nc*Ns*2
 | 
			
		||||
	//	double flops=(1344.0*volume)/2;
 | 
			
		||||
	double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + Nd*Nc*Ns  + Nd*Nc*Ns*2;
 | 
			
		||||
	double flops=(fps*volume)/2;
 | 
			
		||||
	double mf_hi, mf_lo, mf_err;
 | 
			
		||||
 | 
			
		||||
	timestat.statistics(t_time);
 | 
			
		||||
@@ -402,6 +461,7 @@ public:
 | 
			
		||||
	if ( mflops>mflops_best ) mflops_best = mflops;
 | 
			
		||||
	if ( mflops<mflops_worst) mflops_worst= mflops;
 | 
			
		||||
 | 
			
		||||
	std::cout<<GridLogMessage<< "Deo FlopsPerSite is "<<fps<<std::endl;
 | 
			
		||||
	std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s =   "<< mflops << " ("<<mf_err<<") " << mf_lo<<"-"<<mf_hi <<std::endl;
 | 
			
		||||
	std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per rank   "<< mflops/NP<<std::endl;
 | 
			
		||||
	std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per node   "<< mflops/NN<<std::endl;
 | 
			
		||||
@@ -478,7 +538,7 @@ public:
 | 
			
		||||
    typedef typename Action::FermionField Fermion; 
 | 
			
		||||
    typedef LatticeGaugeFieldF Gauge;
 | 
			
		||||
    
 | 
			
		||||
    Gauge Umu(FGrid);  SU3::HotConfiguration(RNG4,Umu); 
 | 
			
		||||
    Gauge Umu(FGrid);  SU<Nc>::HotConfiguration(RNG4,Umu); 
 | 
			
		||||
 | 
			
		||||
    typename Action::ImplParams params;
 | 
			
		||||
    Action Ds(Umu,Umu,*FGrid,*FrbGrid,mass,c1,c2,u0,params);
 | 
			
		||||
@@ -596,11 +656,12 @@ int main (int argc, char ** argv)
 | 
			
		||||
#endif
 | 
			
		||||
  Benchmark::Decomposition();
 | 
			
		||||
 | 
			
		||||
  int do_su4=1;
 | 
			
		||||
  int do_memory=1;
 | 
			
		||||
  int do_comms =1;
 | 
			
		||||
 | 
			
		||||
  int sel=2;
 | 
			
		||||
  std::vector<int> L_list({16,24,32});
 | 
			
		||||
  int sel=4;
 | 
			
		||||
  std::vector<int> L_list({8,12,16,24,32});
 | 
			
		||||
  int selm1=sel-1;
 | 
			
		||||
 | 
			
		||||
  std::vector<double> wilson;
 | 
			
		||||
@@ -624,7 +685,6 @@ int main (int argc, char ** argv)
 | 
			
		||||
    dwf4.push_back(result);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << " Improved Staggered dslash 4D vectorised" <<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
 | 
			
		||||
@@ -632,14 +692,13 @@ int main (int argc, char ** argv)
 | 
			
		||||
    double result = Benchmark::Staggered(L_list[l]) ;
 | 
			
		||||
    staggered.push_back(result);
 | 
			
		||||
  }
 | 
			
		||||
  */
 | 
			
		||||
 | 
			
		||||
  std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << " Summary table Ls="<<Ls <<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "L \t\t Wilson \t\t DWF4 \t\tt Staggered" <<std::endl;
 | 
			
		||||
  for(int l=0;l<L_list.size();l++){
 | 
			
		||||
    std::cout<<GridLogMessage << L_list[l] <<" \t\t "<< wilson[l]<<" \t\t "<<dwf4[l] <<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << L_list[l] <<" \t\t "<< wilson[l]<<" \t\t "<<dwf4[l] << " \t\t "<< staggered[l]<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
 | 
			
		||||
 | 
			
		||||
@@ -651,6 +710,13 @@ int main (int argc, char ** argv)
 | 
			
		||||
    Benchmark::Memory();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if ( do_su4 ) {
 | 
			
		||||
    std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << " Memory benchmark " <<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
 | 
			
		||||
    Benchmark::SU4();
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  if ( do_comms && (NN>1) ) {
 | 
			
		||||
    std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << " Communications benchmark " <<std::endl;
 | 
			
		||||
 
 | 
			
		||||
@@ -74,90 +74,6 @@ int main (int argc, char ** argv)
 | 
			
		||||
  std::vector<double> t_time(Nloop);
 | 
			
		||||
  time_statistics timestat;
 | 
			
		||||
 | 
			
		||||
  std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "= Benchmarking concurrent halo exchange in "<<nmu<<" dimensions"<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
 | 
			
		||||
  header();
 | 
			
		||||
  for(int lat=8;lat<=maxlat;lat+=4){
 | 
			
		||||
    for(int Ls=8;Ls<=8;Ls*=2){
 | 
			
		||||
 | 
			
		||||
      Coordinate latt_size  ({lat*mpi_layout[0],
 | 
			
		||||
	                      lat*mpi_layout[1],
 | 
			
		||||
      			      lat*mpi_layout[2],
 | 
			
		||||
      			      lat*mpi_layout[3]});
 | 
			
		||||
 | 
			
		||||
      GridCartesian     Grid(latt_size,simd_layout,mpi_layout);
 | 
			
		||||
      RealD Nrank = Grid._Nprocessors;
 | 
			
		||||
      RealD Nnode = Grid.NodeCount();
 | 
			
		||||
      RealD ppn = Nrank/Nnode;
 | 
			
		||||
 | 
			
		||||
      std::vector<Vector<HalfSpinColourVectorD> > xbuf(8);	
 | 
			
		||||
      std::vector<Vector<HalfSpinColourVectorD> > rbuf(8);
 | 
			
		||||
 | 
			
		||||
      int ncomm;
 | 
			
		||||
      int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
 | 
			
		||||
      for(int mu=0;mu<8;mu++){
 | 
			
		||||
	xbuf[mu].resize(lat*lat*lat*Ls);
 | 
			
		||||
	rbuf[mu].resize(lat*lat*lat*Ls);
 | 
			
		||||
	//	std::cout << " buffers " << std::hex << (uint64_t)&xbuf[mu][0] <<" " << (uint64_t)&rbuf[mu][0] <<std::endl;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      for(int i=0;i<Nloop;i++){
 | 
			
		||||
      double start=usecond();
 | 
			
		||||
 | 
			
		||||
	std::vector<CommsRequest_t> requests;
 | 
			
		||||
 | 
			
		||||
	ncomm=0;
 | 
			
		||||
	for(int mu=0;mu<4;mu++){
 | 
			
		||||
	
 | 
			
		||||
	  if (mpi_layout[mu]>1 ) {
 | 
			
		||||
	  
 | 
			
		||||
	    ncomm++;
 | 
			
		||||
	    int comm_proc=1;
 | 
			
		||||
	    int xmit_to_rank;
 | 
			
		||||
	    int recv_from_rank;
 | 
			
		||||
	    Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
 | 
			
		||||
	    Grid.SendToRecvFromBegin(requests,
 | 
			
		||||
				   (void *)&xbuf[mu][0],
 | 
			
		||||
				   xmit_to_rank,
 | 
			
		||||
				   (void *)&rbuf[mu][0],
 | 
			
		||||
				   recv_from_rank,
 | 
			
		||||
				   bytes);
 | 
			
		||||
	
 | 
			
		||||
	    comm_proc = mpi_layout[mu]-1;
 | 
			
		||||
	  
 | 
			
		||||
	    Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
 | 
			
		||||
	    Grid.SendToRecvFromBegin(requests,
 | 
			
		||||
				     (void *)&xbuf[mu+4][0],
 | 
			
		||||
				     xmit_to_rank,
 | 
			
		||||
				     (void *)&rbuf[mu+4][0],
 | 
			
		||||
				     recv_from_rank,
 | 
			
		||||
				     bytes);
 | 
			
		||||
	  
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
	Grid.SendToRecvFromComplete(requests);
 | 
			
		||||
	Grid.Barrier();
 | 
			
		||||
	double stop=usecond();
 | 
			
		||||
	t_time[i] = stop-start; // microseconds
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      timestat.statistics(t_time);
 | 
			
		||||
 | 
			
		||||
      double dbytes    = bytes*ppn;
 | 
			
		||||
      double xbytes    = dbytes*2.0*ncomm;
 | 
			
		||||
      double rbytes    = xbytes;
 | 
			
		||||
      double bidibytes = xbytes+rbytes;
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogMessage << std::setw(4) << lat<<"\t"<<Ls<<"\t"
 | 
			
		||||
               <<std::setw(11) << bytes<< std::fixed << std::setprecision(1) << std::setw(7)
 | 
			
		||||
               <<std::right<< xbytes/timestat.mean<<"  "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " "
 | 
			
		||||
               <<xbytes/timestat.max <<" "<< xbytes/timestat.min  
 | 
			
		||||
               << "\t\t"<<std::setw(7)<< bidibytes/timestat.mean<< "  " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " "
 | 
			
		||||
               << bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl;
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
  }    
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
 | 
			
		||||
@@ -206,26 +122,22 @@ int main (int argc, char ** argv)
 | 
			
		||||
	    {
 | 
			
		||||
	      std::vector<CommsRequest_t> requests;
 | 
			
		||||
	      Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
 | 
			
		||||
	      Grid.SendToRecvFromBegin(requests,
 | 
			
		||||
				       (void *)&xbuf[mu][0],
 | 
			
		||||
				       xmit_to_rank,
 | 
			
		||||
				       (void *)&rbuf[mu][0],
 | 
			
		||||
				       recv_from_rank,
 | 
			
		||||
				       bytes);
 | 
			
		||||
	      Grid.SendToRecvFromComplete(requests);
 | 
			
		||||
	      Grid.SendToRecvFrom((void *)&xbuf[mu][0],
 | 
			
		||||
				  xmit_to_rank,
 | 
			
		||||
				  (void *)&rbuf[mu][0],
 | 
			
		||||
				  recv_from_rank,
 | 
			
		||||
				  bytes);
 | 
			
		||||
	    }
 | 
			
		||||
 | 
			
		||||
	    comm_proc = mpi_layout[mu]-1;
 | 
			
		||||
	    {
 | 
			
		||||
	      std::vector<CommsRequest_t> requests;
 | 
			
		||||
	      Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
 | 
			
		||||
	      Grid.SendToRecvFromBegin(requests,
 | 
			
		||||
				       (void *)&xbuf[mu+4][0],
 | 
			
		||||
				       xmit_to_rank,
 | 
			
		||||
				       (void *)&rbuf[mu+4][0],
 | 
			
		||||
				       recv_from_rank,
 | 
			
		||||
				       bytes);
 | 
			
		||||
	      Grid.SendToRecvFromComplete(requests);
 | 
			
		||||
	      Grid.SendToRecvFrom((void *)&xbuf[mu+4][0],
 | 
			
		||||
				  xmit_to_rank,
 | 
			
		||||
				  (void *)&rbuf[mu+4][0],
 | 
			
		||||
				  recv_from_rank,
 | 
			
		||||
				  bytes);
 | 
			
		||||
	    }
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user