mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-14 22:07:05 +01:00
Merge branch 'develop' of https://github.com/paboyle/Grid into feature/staggering
This commit is contained in:
@ -244,7 +244,10 @@ namespace Grid {
|
||||
pokeLocalSite(s,pgbuf,cbuf);
|
||||
}
|
||||
}
|
||||
result = Cshift(result,dim,L);
|
||||
if (p != processors[dim] - 1)
|
||||
{
|
||||
result = Cshift(result,dim,L);
|
||||
}
|
||||
}
|
||||
|
||||
// Loop over orthog coords
|
||||
@ -287,10 +290,10 @@ namespace Grid {
|
||||
cgbuf = clbuf;
|
||||
cgbuf[dim] = clbuf[dim]+L*pc;
|
||||
peekLocalSite(s,pgbuf,cgbuf);
|
||||
s = s * div;
|
||||
pokeLocalSite(s,result,clbuf);
|
||||
}
|
||||
}
|
||||
result = result*div;
|
||||
|
||||
// destroying plan
|
||||
FFTW<scalar>::fftw_destroy_plan(p);
|
||||
|
@ -203,6 +203,7 @@ typedef WilsonTMFermion<WilsonImplD> WilsonTMFermionD;
|
||||
typedef DomainWallFermion<WilsonImplR> DomainWallFermionR;
|
||||
typedef DomainWallFermion<WilsonImplF> DomainWallFermionF;
|
||||
typedef DomainWallFermion<WilsonImplD> DomainWallFermionD;
|
||||
|
||||
typedef MobiusFermion<WilsonImplR> MobiusFermionR;
|
||||
typedef MobiusFermion<WilsonImplF> MobiusFermionF;
|
||||
typedef MobiusFermion<WilsonImplD> MobiusFermionD;
|
||||
@ -211,6 +212,20 @@ typedef ZMobiusFermion<ZWilsonImplR> ZMobiusFermionR;
|
||||
typedef ZMobiusFermion<ZWilsonImplF> ZMobiusFermionF;
|
||||
typedef ZMobiusFermion<ZWilsonImplD> ZMobiusFermionD;
|
||||
|
||||
// Ls vectorised
|
||||
typedef DomainWallFermion<DomainWallVec5dImplR> DomainWallFermionVec5dR;
|
||||
typedef DomainWallFermion<DomainWallVec5dImplF> DomainWallFermionVec5dF;
|
||||
typedef DomainWallFermion<DomainWallVec5dImplD> DomainWallFermionVec5dD;
|
||||
|
||||
typedef MobiusFermion<DomainWallVec5dImplR> MobiusFermionVec5dR;
|
||||
typedef MobiusFermion<DomainWallVec5dImplF> MobiusFermionVec5dF;
|
||||
typedef MobiusFermion<DomainWallVec5dImplD> MobiusFermionVec5dD;
|
||||
|
||||
typedef ZMobiusFermion<ZDomainWallVec5dImplR> ZMobiusFermionVec5dR;
|
||||
typedef ZMobiusFermion<ZDomainWallVec5dImplF> ZMobiusFermionVec5dF;
|
||||
typedef ZMobiusFermion<ZDomainWallVec5dImplD> ZMobiusFermionVec5dD;
|
||||
|
||||
|
||||
typedef ScaledShamirFermion<WilsonImplR> ScaledShamirFermionR;
|
||||
typedef ScaledShamirFermion<WilsonImplF> ScaledShamirFermionF;
|
||||
typedef ScaledShamirFermion<WilsonImplD> ScaledShamirFermionD;
|
||||
@ -269,6 +284,7 @@ typedef ImprovedStaggeredFermion5D<StaggeredImplR> ImprovedStaggeredFermion5DR;
|
||||
typedef ImprovedStaggeredFermion5D<StaggeredImplF> ImprovedStaggeredFermion5DF;
|
||||
typedef ImprovedStaggeredFermion5D<StaggeredImplD> ImprovedStaggeredFermion5DD;
|
||||
|
||||
|
||||
}}
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// G5 herm -- this has to live in QCD since dirac matrix is not in the broader sector of code
|
||||
|
@ -194,6 +194,11 @@ void WilsonFermion5D<Impl>::Report(void)
|
||||
std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per rank : " << mflops/NP << std::endl;
|
||||
|
||||
RealD Fullmflops = 1344*volume*DhopCalls/(DhopComputeTime+DhopCommTime)/2; // 2 for red black counting
|
||||
std::cout << GridLogMessage << "Average mflops/s per call (full) : " << Fullmflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per rank (full): " << Fullmflops/NP << std::endl;
|
||||
|
||||
|
||||
}
|
||||
|
||||
if ( DerivCalls > 0 ) {
|
||||
@ -209,12 +214,15 @@ void WilsonFermion5D<Impl>::Report(void)
|
||||
RealD mflops = 144*volume*DerivCalls/DerivDhopComputeTime;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per node : " << mflops/NP << std::endl;
|
||||
}
|
||||
|
||||
RealD Fullmflops = 144*volume*DerivCalls/(DerivDhopComputeTime+DerivCommTime)/2; // 2 for red black counting
|
||||
std::cout << GridLogMessage << "Average mflops/s per call (full) : " << Fullmflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per node (full): " << Fullmflops/NP << std::endl; }
|
||||
|
||||
if (DerivCalls > 0 || DhopCalls > 0){
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Stencil"<<std::endl; Stencil.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Stencil" <<std::endl; Stencil.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion5D StencilEven"<<std::endl; StencilEven.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion5D StencilOdd"<<std::endl; StencilOdd.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion5D StencilOdd" <<std::endl; StencilOdd.Report();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ namespace Optimization {
|
||||
}
|
||||
//Integer
|
||||
inline __m256i operator()(__m256i a, __m256i b){
|
||||
#if defined (AVX1) || defined (AVXFMA4)
|
||||
#if defined (AVX1) || defined (AVXFMA) || defined (AVXFMA4)
|
||||
__m128i a0,a1;
|
||||
__m128i b0,b1;
|
||||
a0 = _mm256_extractf128_si256(a,0);
|
||||
@ -195,7 +195,7 @@ namespace Optimization {
|
||||
}
|
||||
//Integer
|
||||
inline __m256i operator()(__m256i a, __m256i b){
|
||||
#if defined (AVX1) || defined (AVXFMA4)
|
||||
#if defined (AVX1) || defined (AVXFMA) || defined (AVXFMA4)
|
||||
__m128i a0,a1;
|
||||
__m128i b0,b1;
|
||||
a0 = _mm256_extractf128_si256(a,0);
|
||||
@ -216,7 +216,7 @@ namespace Optimization {
|
||||
struct MultComplex{
|
||||
// Complex float
|
||||
inline __m256 operator()(__m256 a, __m256 b){
|
||||
#if defined (AVX1)
|
||||
#if defined (AVX1)
|
||||
__m256 ymm0,ymm1,ymm2;
|
||||
ymm0 = _mm256_shuffle_ps(a,a,_MM_SELECT_FOUR_FOUR(2,2,0,0)); // ymm0 <- ar ar,
|
||||
ymm0 = _mm256_mul_ps(ymm0,b); // ymm0 <- ar bi, ar br
|
||||
@ -233,7 +233,7 @@ namespace Optimization {
|
||||
a_imag = _mm256_mul_ps( a_imag,tmp ); // (Ai, Ai) * (Bi, Br) = Ai Bi, Ai Br
|
||||
return _mm256_maddsub_ps( a_real, b, a_imag ); // Ar Br , Ar Bi +- Ai Bi = ArBr-AiBi , ArBi+AiBr
|
||||
#endif
|
||||
#if defined (AVX2)
|
||||
#if defined (AVX2) || defined (AVXFMA)
|
||||
__m256 a_real = _mm256_moveldup_ps( a ); // Ar Ar
|
||||
__m256 a_imag = _mm256_movehdup_ps( a ); // Ai Ai
|
||||
a_imag = _mm256_mul_ps( a_imag, _mm256_shuffle_ps( b,b, _MM_SELECT_FOUR_FOUR(2,3,0,1) )); // (Ai, Ai) * (Bi, Br) = Ai Bi, Ai Br
|
||||
@ -264,7 +264,7 @@ namespace Optimization {
|
||||
IF IMM0[3] = 0
|
||||
THEN DEST[255:192]=SRC2[191:128] ELSE DEST[255:192]=SRC2[255:192] FI; // Ox5 r<->i ; 0xC unchanged
|
||||
*/
|
||||
#if defined (AVX1)
|
||||
#if defined (AVX1)
|
||||
__m256d ymm0,ymm1,ymm2;
|
||||
ymm0 = _mm256_shuffle_pd(a,a,0x0); // ymm0 <- ar ar, ar,ar b'00,00
|
||||
ymm0 = _mm256_mul_pd(ymm0,b); // ymm0 <- ar bi, ar br
|
||||
@ -279,7 +279,7 @@ namespace Optimization {
|
||||
a_imag = _mm256_mul_pd( a_imag, _mm256_permute_pd( b, 0x5 ) ); // (Ai, Ai) * (Bi, Br) = Ai Bi, Ai Br
|
||||
return _mm256_maddsub_pd( a_real, b, a_imag ); // Ar Br , Ar Bi +- Ai Bi = ArBr-AiBi , ArBi+AiBr
|
||||
#endif
|
||||
#if defined (AVX2)
|
||||
#if defined (AVX2) || defined (AVXFMA)
|
||||
__m256d a_real = _mm256_movedup_pd( a ); // Ar Ar
|
||||
__m256d a_imag = _mm256_shuffle_pd(a,a,0xF);//aiai
|
||||
a_imag = _mm256_mul_pd( a_imag, _mm256_permute_pd( b, 0x5 ) ); // (Ai, Ai) * (Bi, Br) = Ai Bi, Ai Br
|
||||
@ -320,7 +320,7 @@ namespace Optimization {
|
||||
#if defined (AVXFMA4)
|
||||
a= _mm256_macc_ps(b,c,a);
|
||||
#endif
|
||||
#if defined (AVX2)
|
||||
#if defined (AVX2) || defined (AVXFMA)
|
||||
a= _mm256_fmadd_ps( b, c, a);
|
||||
#endif
|
||||
}
|
||||
@ -332,7 +332,7 @@ namespace Optimization {
|
||||
#if defined (AVXFMA4)
|
||||
a= _mm256_macc_pd(b,c,a);
|
||||
#endif
|
||||
#if defined (AVX2)
|
||||
#if defined (AVX2) || defined (AVXFMA)
|
||||
a= _mm256_fmadd_pd( b, c, a);
|
||||
#endif
|
||||
}
|
||||
@ -347,7 +347,7 @@ namespace Optimization {
|
||||
}
|
||||
// Integer
|
||||
inline __m256i operator()(__m256i a, __m256i b){
|
||||
#if defined (AVX1)
|
||||
#if defined (AVX1) || defined (AVXFMA)
|
||||
__m128i a0,a1;
|
||||
__m128i b0,b1;
|
||||
a0 = _mm256_extractf128_si256(a,0);
|
||||
|
@ -244,7 +244,22 @@ namespace Optimization {
|
||||
return a*b;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Div{
|
||||
// Real double
|
||||
inline vector4double operator()(vector4double a, vector4double b){
|
||||
return vec_swdiv(a, b);
|
||||
}
|
||||
|
||||
// Real float
|
||||
FLOAT_WRAP_2(operator(), inline)
|
||||
|
||||
// Integer
|
||||
inline int operator()(int a, int b){
|
||||
return a/b;
|
||||
}
|
||||
};
|
||||
|
||||
struct Conj{
|
||||
// Complex double
|
||||
inline vector4double operator()(vector4double v){
|
||||
@ -413,6 +428,7 @@ template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S,T>;
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
||||
|
@ -44,7 +44,7 @@ directory
|
||||
#ifdef SSE4
|
||||
#include "Grid_sse4.h"
|
||||
#endif
|
||||
#if defined(AVX1) || defined(AVX2) || defined(AVXFMA4)
|
||||
#if defined(AVX1) || defined (AVXFMA) || defined(AVX2) || defined(AVXFMA4)
|
||||
#include "Grid_avx.h"
|
||||
#endif
|
||||
#if defined AVX512
|
||||
|
Reference in New Issue
Block a user