1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-12 20:27:06 +01:00

AddSub based alternate SU3 routine

This commit is contained in:
paboyle
2016-03-28 17:55:22 -06:00
parent b6c3bc574b
commit c77b7ee897
9 changed files with 116 additions and 1086 deletions

File diff suppressed because one or more lines are too long

View File

@ -30,7 +30,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#if defined(AVX512) || defined (IMCI)
//#if defined (IMCI)
#include <simd/Avx512Asm.h>
#include <simd/Intel512wilson.h>
#undef VLOAD
#undef VSTORE

File diff suppressed because it is too large Load Diff

View File

@ -84,6 +84,16 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#define ZEND2d(Criir,Ciirr, tmp) "vshufpd $0x55," #Ciirr "," #Ciirr "," #tmp ";\n"\
"vsubpd " #tmp "," #Ciirr "," #Criir"{%k7};\n" // ri+ir ; ri+ir,rr-ii
#define VMOVRDUPd(OFF,A,DEST) "vpshufd $0x44," #OFF "*64(" #A ")," #DEST ";\n" // 32 bit level: 1,0,3,2
#define VMOVIDUPd(OFF,A,DEST) "vpshufd $0xee," #OFF "*64(" #A ")," #DEST ";\n" // 32 bit level: 3,2,3,2
#define VMOVRDUPf(OFF,PTR,DEST) "vmovsldup " #OFF "*64(" #PTR "), " #DEST ";\n"
#define VMOVIDUPf(OFF,PTR,DEST) "vmovshdup " #OFF "*64(" #PTR "), " #DEST ";\n"
#define VMADDSUBf(A,B,accum) "vfmaddsub231ps " #A "," #B "," #accum ";\n"
#define VMADDSUBd(A,B,accum) "vfmaddsub231pd " #A "," #B "," #accum ";\n"
#define VTIMESI0f(A,DEST, Z) VSHUFf(A,DEST)
#define VTIMESI1f(A,DEST, Z) "vaddps " #DEST "," #Z "," #DEST"{%k6}" ";\n"

View File

@ -28,18 +28,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#ifndef GRID_ASM_AV512_ADDSUB_H
#define GRID_ASM_AV512_ADDSUB_H
////////////////////////////////////////////////////////////
// Knights Landing specials
////////////////////////////////////////////////////////////
#define VMOVRDUPd(OFF,A,DEST) "vpshufd $0x44," #OFF"*64("#A ")," #DEST ";\n" // 32 bit level: 1,0,3,2
#define VMOVIDUPd(OFF,A,DEST) "vpshufd $0xee," #OFF"*64("#A ")," #DEST ";\n" // 32 bit level: 3,2,3,2
#define VMOVRDUPf(O,P,DEST) "vmovsldup " #OFF "*64(" #PTR "), " #DEST ";\n"
#define VMOVIDUPf(O,P,DEST) "vmovshdup " #OFF "*64(" #PTR "), " #DEST ";\n"
#define VMADDSUBf(Aii,Bri,accum) "vfmaddsub231ps " #A "," #B "," #accum ";\n"
#define VMADDSUBd(Aii,Bri,accum) "vfmaddsub231pd " #A "," #B "," #accum ";\n"
////////////////////////////////////////////////////////////////
// Building blocks for SU3 x 2spinor
@ -48,7 +36,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
// 6 Chi shuffles ir,ri
// 6muls, 30 fmaddsubs
////////////////////////////////////////////////////////////////
#define MULT_ADDSUB_2SPIN_PF(ptr) \
#define MULT_ADDSUB_2SPIN(ptr) \
LOAD64(%r8,ptr) \
__asm__ ( \
VMOVIDUPf(0,%r8,Z0 ) \

View File

@ -66,6 +66,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#define Uir %zmm24
//#define ONE %zmm24
#define Uri %zmm25
#define T1 %zmm24
#define T2 %zmm25
#define Z0 %zmm26
#define Z1 %zmm27
@ -288,7 +290,9 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
ZEND2(UChi_02,Z4,Chi_02) \
ZEND2(UChi_12,Z5,Chi_12) );
#define MULT_2SPIN(ptr) MULT_2SPIN_PF(ptr,ptr,VPREFETCHG);
#define MULT_2SPINa(ptr) MULT_2SPIN_PF(ptr,ptr,VPREFETCHG);
#define MULT_2SPIN(ptr) MULT_ADDSUB_2SPIN(ptr);
#define MULT_2SPIN_PFXM(ptr,pf) MULT_2SPIN_PF(ptr,pf,VPREFETCHNTA)
#define MULT_2SPIN_PFYM(ptr,pf) MULT_2SPIN_PF(ptr,pf,VPREFETCHNTA)
#define MULT_2SPIN_PFZM(ptr,pf) MULT_2SPIN_PF(ptr,pf,VPREFETCHNTA)
@ -750,8 +754,63 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
VPERM3(Chi_11,Chi_11) \
VPERM3(Chi_12,Chi_12) );
#ifdef AVX512
#include <simd/Intel512avxAddsub.h>
#endif
#define MULT_ADDSUB_2SPIN1(ptr) \
LOAD64(%r8,ptr)
/*
* __asm__ ( \
);
VMUL(Z0,%zmm2,%zmm3) \
*/
#define MULT_ADDSUB_2SPIN(ptr) \
LOAD64(%r8,ptr) \
__asm__ ( \
VMOVIDUP(0,%r8,Z0 ) \
VMOVIDUP(3,%r8,Z1 )\
VMOVIDUP(6,%r8,Z2 )\
VSHUF(Chi_00,T1) \
VSHUF(Chi_10,T2) \
\
VMUL(Z0,T1,UChi_00) VMOVRDUP(0,%r8,Z3 ) \
VMUL(Z0,T2,UChi_10) VMOVRDUP(3,%r8,Z4 ) \
VMUL(Z1,T1,UChi_01) VMOVRDUP(6,%r8,Z5 ) \
VMUL(Z1,T2,UChi_11) VMOVIDUP(1,%r8,Z0 ) \
VMUL(Z2,T1,UChi_02) VMOVIDUP(4,%r8,Z1 ) \
VMUL(Z2,T2,UChi_12) VMOVIDUP(7,%r8,Z2 ) \
\
VMADDSUB(Z3,Chi_00,UChi_00) VSHUF(Chi_01,T1) \
VMADDSUB(Z3,Chi_10,UChi_10) VSHUF(Chi_11,T2) \
VMADDSUB(Z4,Chi_00,UChi_01) VMOVRDUP(1,%r8,Z3 ) \
VMADDSUB(Z4,Chi_10,UChi_11)\
VMADDSUB(Z5,Chi_00,UChi_02) VMOVRDUP(4,%r8,Z4 ) \
VMADDSUB(Z5,Chi_10,UChi_12)\
\
VMADDSUB(Z0,T1,UChi_00) VMOVRDUP(7,%r8,Z5 ) \
VMADDSUB(Z0,T2,UChi_10)\
VMADDSUB(Z1,T1,UChi_01) VMOVIDUP(2,%r8,Z0 ) \
VMADDSUB(Z1,T2,UChi_11)\
VMADDSUB(Z2,T1,UChi_02) VMOVIDUP(5,%r8,Z1 ) \
VMADDSUB(Z2,T2,UChi_12) VMOVIDUP(8,%r8,Z2 ) \
\
VMADDSUB(Z3,Chi_01,UChi_00) VSHUF(Chi_02,T1) \
VMADDSUB(Z3,Chi_11,UChi_10) VSHUF(Chi_12,T2) \
VMADDSUB(Z4,Chi_01,UChi_01) VMOVRDUP(2,%r8,Z3 ) \
VMADDSUB(Z4,Chi_11,UChi_11)\
VMADDSUB(Z5,Chi_01,UChi_02) VMOVRDUP(5,%r8,Z4 ) \
VMADDSUB(Z5,Chi_11,UChi_12)\
\
VMADDSUB(Z0,T1,UChi_00) VMOVRDUP(8,%r8,Z5 ) \
VMADDSUB(Z0,T2,UChi_10)\
VMADDSUB(Z1,T1,UChi_01)\
VMADDSUB(Z1,T2,UChi_11)\
VMADDSUB(Z2,T1,UChi_02)\
VMADDSUB(Z2,T2,UChi_12)\
\
VMADDSUB(Z3,Chi_02,UChi_00)\
VMADDSUB(Z3,Chi_12,UChi_10)\
VMADDSUB(Z4,Chi_02,UChi_01)\
VMADDSUB(Z4,Chi_12,UChi_11)\
VMADDSUB(Z5,Chi_02,UChi_02)\
VMADDSUB(Z5,Chi_12,UChi_12)\
);
#endif