mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 07:55:35 +00:00
Moved code from summation into transfer and reduction
This commit is contained in:
parent
b8eef54fa7
commit
74432432b6
@ -2,213 +2,5 @@
|
||||
#define GRID_SUMMATION_H
|
||||
namespace Grid {
|
||||
|
||||
inline void subdivides(GridBase *coarse,GridBase *fine)
|
||||
{
|
||||
assert(coarse->_ndimension == fine->_ndimension);
|
||||
|
||||
int _ndimension = coarse->_ndimension;
|
||||
|
||||
// local and global volumes subdivide cleanly after SIMDization
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
assert(coarse->_processors[d] == fine->_processors[d]);
|
||||
assert(coarse->_simd_layout[d] == fine->_simd_layout[d]);
|
||||
assert((fine->_rdimensions[d] / coarse->_rdimensions[d])* coarse->_rdimensions[d]==fine->_rdimensions[d]);
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj,int nbasis>
|
||||
inline void projectBlockBasis(Lattice<iVector<vComplex,nbasis > > &coarseData,
|
||||
const Lattice<vobj> &fineData,
|
||||
const std::vector<Lattice<vobj> > &Basis)
|
||||
{
|
||||
GridBase * fine = fineData._grid;
|
||||
GridBase * coarse= coarseData._grid;
|
||||
int _ndimension = coarse->_ndimension;
|
||||
|
||||
// checks
|
||||
assert( nbasis == Basis.size() );
|
||||
subdivides(coarse,fine);
|
||||
for(int i=0;i<nbasis;i++){
|
||||
conformable(Basis,fineData);
|
||||
}
|
||||
|
||||
std::vector<int> block_r (_ndimension);
|
||||
|
||||
for(int d=0 ; d<_ndimension;d++){
|
||||
block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d];
|
||||
}
|
||||
|
||||
coarseData=zero;
|
||||
|
||||
// Loop with a cache friendly loop ordering
|
||||
for(int sf=0;sf<fine->oSites();sf++){
|
||||
|
||||
int sc;
|
||||
std::vector<int> coor_c(_ndimension);
|
||||
std::vector<int> coor_f(_ndimension);
|
||||
GridBase::CoorFromIndex(coor_f,sf,fine->_rdimensions);
|
||||
for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
|
||||
GridBase::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
|
||||
|
||||
for(int i=0;i<nbasis;i++) {
|
||||
|
||||
coarseData._odata[sc][i]=coarseData._odata[sc][i]
|
||||
+ innerProduct(Basis[i]._odata[sf],fineData._odata[sf]);
|
||||
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
|
||||
template<class vobj,int nbasis>
|
||||
inline void promoteBlockBasis(const Lattice<iVector<vComplex,nbasis > > &coarseData,
|
||||
Lattice<vobj> &fineData,
|
||||
const std::vector<Lattice<vobj> > &Basis)
|
||||
{
|
||||
GridBase * fine = fineData._grid;
|
||||
GridBase * coarse= coarseData._grid;
|
||||
int _ndimension = coarse->_ndimension;
|
||||
|
||||
// checks
|
||||
assert( nbasis == Basis.size() );
|
||||
subdivides(coarse,fine);
|
||||
for(int i=0;i<nbasis;i++){
|
||||
conformable(Basis,fineData);
|
||||
}
|
||||
|
||||
std::vector<int> block_r (_ndimension);
|
||||
|
||||
for(int d=0 ; d<_ndimension;d++){
|
||||
block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d];
|
||||
}
|
||||
|
||||
// Loop with a cache friendly loop ordering
|
||||
for(int sf=0;sf<fine->oSites();sf++){
|
||||
|
||||
int sc;
|
||||
std::vector<int> coor_c(_ndimension);
|
||||
std::vector<int> coor_f(_ndimension);
|
||||
|
||||
GridBase::CoorFromIndex(coor_f,sf,fine->_rdimensions);
|
||||
for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
|
||||
GridBase::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
|
||||
|
||||
for(int i=0;i<nbasis;i++) {
|
||||
|
||||
if(i==0) fineData._odata[sf]= coarseData._odata[sc][i]*Basis[i]._odata[sf];
|
||||
else fineData._odata[sf]=fineData._odata[sf]+coarseData._odata[sc][i]*Basis[i]._odata[sf];
|
||||
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
// useful in multigrid project;
|
||||
// Generic name : Coarsen?
|
||||
template<class vobj>
|
||||
inline void sumBlocks(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
|
||||
{
|
||||
GridBase * fine = fineData._grid;
|
||||
GridBase * coarse= coarseData._grid;
|
||||
|
||||
subdivides(coarse,fine); // require they map
|
||||
|
||||
int _ndimension = coarse->_ndimension;
|
||||
|
||||
std::vector<int> block_r (_ndimension);
|
||||
|
||||
for(int d=0 ; d<_ndimension;d++){
|
||||
block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d];
|
||||
}
|
||||
|
||||
coarseData=zero;
|
||||
for(int sf=0;sf<fine->oSites();sf++){
|
||||
|
||||
int sc;
|
||||
std::vector<int> coor_c(_ndimension);
|
||||
std::vector<int> coor_f(_ndimension);
|
||||
|
||||
GridBase::CoorFromIndex(coor_f,sf,fine->_rdimensions);
|
||||
for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
|
||||
GridBase::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
|
||||
|
||||
coarseData._odata[sc]=coarseData._odata[sc]+fineData._odata[sf];
|
||||
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<typename vobj::scalar_object> &result,int orthogdim)
|
||||
{
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
|
||||
GridBase *grid = Data._grid;
|
||||
const int Nd = grid->_ndimension;
|
||||
const int Nsimd = grid->Nsimd();
|
||||
|
||||
assert(orthogdim >= 0);
|
||||
assert(orthogdim < Nd);
|
||||
|
||||
int fd=grid->_fdimensions[orthogdim];
|
||||
int ld=grid->_ldimensions[orthogdim];
|
||||
int rd=grid->_rdimensions[orthogdim];
|
||||
|
||||
std::vector<vobj,alignedAllocator<vobj> > lvSum(rd); // will locally sum vectors first
|
||||
std::vector<sobj> lsSum(ld,sobj(zero)); // sum across these down to scalars
|
||||
std::vector<sobj> extracted(Nsimd); // splitting the SIMD
|
||||
|
||||
result.resize(fd); // And then global sum to return the same vector to every node for IO to file
|
||||
for(int r=0;r<rd;r++){
|
||||
lvSum[r]=zero;
|
||||
}
|
||||
|
||||
std::vector<int> coor(Nd);
|
||||
|
||||
// sum over reduced dimension planes, breaking out orthog dir
|
||||
for(int ss=0;ss<grid->oSites();ss++){
|
||||
GridBase::CoorFromIndex(coor,ss,grid->_rdimensions);
|
||||
int r = coor[orthogdim];
|
||||
lvSum[r]=lvSum[r]+Data._odata[ss];
|
||||
}
|
||||
|
||||
// Sum across simd lanes in the plane, breaking out orthog dir.
|
||||
std::vector<int> icoor(Nd);
|
||||
|
||||
for(int rt=0;rt<rd;rt++){
|
||||
|
||||
extract(lvSum[rt],extracted);
|
||||
|
||||
for(int idx=0;idx<Nsimd;idx++){
|
||||
|
||||
grid->iCoorFromIindex(icoor,idx);
|
||||
|
||||
int ldx =rt+icoor[orthogdim]*rd;
|
||||
|
||||
lsSum[ldx]=lsSum[ldx]+extracted[idx];
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// sum over nodes.
|
||||
sobj gsum;
|
||||
for(int t=0;t<fd;t++){
|
||||
int pt = t/ld; // processor plane
|
||||
int lt = t%ld;
|
||||
if ( pt == grid->_processor_coor[orthogdim] ) {
|
||||
gsum=lsSum[lt];
|
||||
} else {
|
||||
gsum=zero;
|
||||
}
|
||||
|
||||
grid->GlobalSum(gsum);
|
||||
|
||||
result[t]=gsum;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
@ -2,6 +2,9 @@
|
||||
#define GRID_LATTICE_REDUCTION_H
|
||||
|
||||
namespace Grid {
|
||||
#ifdef GRID_WARN_SUBOPTIMAL
|
||||
#warning "Optimisation alert all these reduction loops are NOT threaded "
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Reduction operations
|
||||
@ -73,6 +76,79 @@ namespace Grid {
|
||||
return ssum;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<typename vobj::scalar_object> &result,int orthogdim)
|
||||
{
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
|
||||
GridBase *grid = Data._grid;
|
||||
const int Nd = grid->_ndimension;
|
||||
const int Nsimd = grid->Nsimd();
|
||||
|
||||
assert(orthogdim >= 0);
|
||||
assert(orthogdim < Nd);
|
||||
|
||||
int fd=grid->_fdimensions[orthogdim];
|
||||
int ld=grid->_ldimensions[orthogdim];
|
||||
int rd=grid->_rdimensions[orthogdim];
|
||||
|
||||
std::vector<vobj,alignedAllocator<vobj> > lvSum(rd); // will locally sum vectors first
|
||||
std::vector<sobj> lsSum(ld,sobj(zero)); // sum across these down to scalars
|
||||
std::vector<sobj> extracted(Nsimd); // splitting the SIMD
|
||||
|
||||
result.resize(fd); // And then global sum to return the same vector to every node for IO to file
|
||||
for(int r=0;r<rd;r++){
|
||||
lvSum[r]=zero;
|
||||
}
|
||||
|
||||
std::vector<int> coor(Nd);
|
||||
|
||||
// sum over reduced dimension planes, breaking out orthog dir
|
||||
for(int ss=0;ss<grid->oSites();ss++){
|
||||
GridBase::CoorFromIndex(coor,ss,grid->_rdimensions);
|
||||
int r = coor[orthogdim];
|
||||
lvSum[r]=lvSum[r]+Data._odata[ss];
|
||||
}
|
||||
|
||||
// Sum across simd lanes in the plane, breaking out orthog dir.
|
||||
std::vector<int> icoor(Nd);
|
||||
|
||||
for(int rt=0;rt<rd;rt++){
|
||||
|
||||
extract(lvSum[rt],extracted);
|
||||
|
||||
for(int idx=0;idx<Nsimd;idx++){
|
||||
|
||||
grid->iCoorFromIindex(icoor,idx);
|
||||
|
||||
int ldx =rt+icoor[orthogdim]*rd;
|
||||
|
||||
lsSum[ldx]=lsSum[ldx]+extracted[idx];
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// sum over nodes.
|
||||
sobj gsum;
|
||||
for(int t=0;t<fd;t++){
|
||||
int pt = t/ld; // processor plane
|
||||
int lt = t%ld;
|
||||
if ( pt == grid->_processor_coor[orthogdim] ) {
|
||||
gsum=lsSum[lt];
|
||||
} else {
|
||||
gsum=zero;
|
||||
}
|
||||
|
||||
grid->GlobalSum(gsum);
|
||||
|
||||
result[t]=gsum;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -3,6 +3,20 @@
|
||||
|
||||
namespace Grid {
|
||||
|
||||
inline void subdivides(GridBase *coarse,GridBase *fine)
|
||||
{
|
||||
assert(coarse->_ndimension == fine->_ndimension);
|
||||
|
||||
int _ndimension = coarse->_ndimension;
|
||||
|
||||
// local and global volumes subdivide cleanly after SIMDization
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
assert(coarse->_processors[d] == fine->_processors[d]);
|
||||
assert(coarse->_simd_layout[d] == fine->_simd_layout[d]);
|
||||
assert((fine->_rdimensions[d] / coarse->_rdimensions[d])* coarse->_rdimensions[d]==fine->_rdimensions[d]);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// remove and insert a half checkerboard
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -42,5 +56,131 @@ namespace Grid {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class vobj,int nbasis>
|
||||
inline void projectBlockBasis(Lattice<iVector<vComplex,nbasis > > &coarseData,
|
||||
const Lattice<vobj> &fineData,
|
||||
const std::vector<Lattice<vobj> > &Basis)
|
||||
{
|
||||
GridBase * fine = fineData._grid;
|
||||
GridBase * coarse= coarseData._grid;
|
||||
int _ndimension = coarse->_ndimension;
|
||||
|
||||
// checks
|
||||
assert( nbasis == Basis.size() );
|
||||
subdivides(coarse,fine);
|
||||
for(int i=0;i<nbasis;i++){
|
||||
conformable(Basis,fineData);
|
||||
}
|
||||
|
||||
std::vector<int> block_r (_ndimension);
|
||||
|
||||
for(int d=0 ; d<_ndimension;d++){
|
||||
block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d];
|
||||
}
|
||||
|
||||
coarseData=zero;
|
||||
|
||||
// Loop with a cache friendly loop ordering
|
||||
for(int sf=0;sf<fine->oSites();sf++){
|
||||
|
||||
int sc;
|
||||
std::vector<int> coor_c(_ndimension);
|
||||
std::vector<int> coor_f(_ndimension);
|
||||
GridBase::CoorFromIndex(coor_f,sf,fine->_rdimensions);
|
||||
for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
|
||||
GridBase::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
|
||||
|
||||
for(int i=0;i<nbasis;i++) {
|
||||
|
||||
coarseData._odata[sc][i]=coarseData._odata[sc][i]
|
||||
+ innerProduct(Basis[i]._odata[sf],fineData._odata[sf]);
|
||||
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
|
||||
template<class vobj,int nbasis>
|
||||
inline void promoteBlockBasis(const Lattice<iVector<vComplex,nbasis > > &coarseData,
|
||||
Lattice<vobj> &fineData,
|
||||
const std::vector<Lattice<vobj> > &Basis)
|
||||
{
|
||||
GridBase * fine = fineData._grid;
|
||||
GridBase * coarse= coarseData._grid;
|
||||
int _ndimension = coarse->_ndimension;
|
||||
|
||||
// checks
|
||||
assert( nbasis == Basis.size() );
|
||||
subdivides(coarse,fine);
|
||||
for(int i=0;i<nbasis;i++){
|
||||
conformable(Basis,fineData);
|
||||
}
|
||||
|
||||
std::vector<int> block_r (_ndimension);
|
||||
|
||||
for(int d=0 ; d<_ndimension;d++){
|
||||
block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d];
|
||||
}
|
||||
|
||||
// Loop with a cache friendly loop ordering
|
||||
for(int sf=0;sf<fine->oSites();sf++){
|
||||
|
||||
int sc;
|
||||
std::vector<int> coor_c(_ndimension);
|
||||
std::vector<int> coor_f(_ndimension);
|
||||
|
||||
GridBase::CoorFromIndex(coor_f,sf,fine->_rdimensions);
|
||||
for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
|
||||
GridBase::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
|
||||
|
||||
for(int i=0;i<nbasis;i++) {
|
||||
|
||||
if(i==0) fineData._odata[sf]= coarseData._odata[sc][i]*Basis[i]._odata[sf];
|
||||
else fineData._odata[sf]=fineData._odata[sf]+coarseData._odata[sc][i]*Basis[i]._odata[sf];
|
||||
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
// useful in multigrid project;
|
||||
// Generic name : Coarsen?
|
||||
template<class vobj>
|
||||
inline void sumBlocks(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
|
||||
{
|
||||
GridBase * fine = fineData._grid;
|
||||
GridBase * coarse= coarseData._grid;
|
||||
|
||||
subdivides(coarse,fine); // require they map
|
||||
|
||||
int _ndimension = coarse->_ndimension;
|
||||
|
||||
std::vector<int> block_r (_ndimension);
|
||||
|
||||
for(int d=0 ; d<_ndimension;d++){
|
||||
block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d];
|
||||
}
|
||||
|
||||
coarseData=zero;
|
||||
for(int sf=0;sf<fine->oSites();sf++){
|
||||
|
||||
int sc;
|
||||
std::vector<int> coor_c(_ndimension);
|
||||
std::vector<int> coor_f(_ndimension);
|
||||
|
||||
GridBase::CoorFromIndex(coor_f,sf,fine->_rdimensions);
|
||||
for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
|
||||
GridBase::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
|
||||
|
||||
coarseData._odata[sc]=coarseData._odata[sc]+fineData._odata[sf];
|
||||
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
@ -59,6 +59,8 @@ namespace QCD {
|
||||
* 0 -i 0 0
|
||||
* -i 0 0 0
|
||||
*/
|
||||
// right multiplication makes sense for matrix args, not for vector since there is
|
||||
// no concept of row versus columnar indices
|
||||
template<class vtype> inline void rmultMinusGammaX(iMatrix<vtype,Ns> &ret,const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
ret(i,0) = timesI(rhs(i,3));
|
||||
@ -75,19 +77,6 @@ namespace QCD {
|
||||
ret(i,3) = timesI(rhs(i,1));
|
||||
}
|
||||
};
|
||||
|
||||
template<class vtype> inline void multGammaX(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret._internal[0] = timesI(rhs._internal[3]);
|
||||
ret._internal[1] = timesI(rhs._internal[2]);
|
||||
ret._internal[2] = timesMinusI(rhs._internal[1]);
|
||||
ret._internal[3] = timesMinusI(rhs._internal[0]);
|
||||
};
|
||||
template<class vtype> inline void multMinusGammaX(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = timesMinusI(rhs(3));
|
||||
ret(1) = timesMinusI(rhs(2));
|
||||
ret(2) = timesI(rhs(1));
|
||||
ret(3) = timesI(rhs(0));
|
||||
};
|
||||
template<class vtype> inline void multGammaX(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
ret(0,i) = timesI(rhs(3,i));
|
||||
@ -105,7 +94,18 @@ namespace QCD {
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<class vtype> inline void multGammaX(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret._internal[0] = timesI(rhs._internal[3]);
|
||||
ret._internal[1] = timesI(rhs._internal[2]);
|
||||
ret._internal[2] = timesMinusI(rhs._internal[1]);
|
||||
ret._internal[3] = timesMinusI(rhs._internal[0]);
|
||||
};
|
||||
template<class vtype> inline void multMinusGammaX(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = timesMinusI(rhs(3));
|
||||
ret(1) = timesMinusI(rhs(2));
|
||||
ret(2) = timesI(rhs(1));
|
||||
ret(3) = timesI(rhs(0));
|
||||
};
|
||||
|
||||
|
||||
/*Gy
|
||||
@ -114,17 +114,21 @@ namespace QCD {
|
||||
* 0 1 0 0
|
||||
* -1 0 0 0
|
||||
*/
|
||||
template<class vtype> inline void multGammaY(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = -rhs(3);
|
||||
ret(1) = rhs(2);
|
||||
ret(2) = rhs(1);
|
||||
ret(3) = -rhs(0);
|
||||
template<class vtype> inline void rmultGammaY(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
ret(i,0) = -rhs(i,3);
|
||||
ret(i,1) = rhs(i,2);
|
||||
ret(i,2) = rhs(i,1);
|
||||
ret(i,3) = -rhs(i,0);
|
||||
}
|
||||
};
|
||||
template<class vtype> inline void multMinusGammaY(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = rhs(3);
|
||||
ret(1) = -rhs(2);
|
||||
ret(2) = -rhs(1);
|
||||
ret(3) = rhs(0);
|
||||
template<class vtype> inline void rmultMinusGammaY(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
ret(i,0) = rhs(i,3);
|
||||
ret(i,1) = -rhs(i,2);
|
||||
ret(i,2) = -rhs(i,1);
|
||||
ret(i,3) = rhs(i,0);
|
||||
}
|
||||
};
|
||||
template<class vtype> inline void multGammaY(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
@ -142,23 +146,39 @@ namespace QCD {
|
||||
ret(3,i) = rhs(0,i);
|
||||
}
|
||||
};
|
||||
template<class vtype> inline void multGammaY(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = -rhs(3);
|
||||
ret(1) = rhs(2);
|
||||
ret(2) = rhs(1);
|
||||
ret(3) = -rhs(0);
|
||||
};
|
||||
template<class vtype> inline void multMinusGammaY(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = rhs(3);
|
||||
ret(1) = -rhs(2);
|
||||
ret(2) = -rhs(1);
|
||||
ret(3) = rhs(0);
|
||||
};
|
||||
/*Gz
|
||||
* 0 0 i 0 [0]+-i[2]
|
||||
* 0 0 0 -i [1]-+i[3]
|
||||
* -i 0 0 0
|
||||
* 0 i 0 0
|
||||
*/
|
||||
template<class vtype> inline void multGammaZ(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = timesI(rhs(2));
|
||||
ret(1) =timesMinusI(rhs(3));
|
||||
ret(2) =timesMinusI(rhs(0));
|
||||
ret(3) = timesI(rhs(1));
|
||||
template<class vtype> inline void rmultGammaZ(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
ret(i,0) = timesMinusI(rhs(i,2));
|
||||
ret(i,1) = timesI(rhs(i,3));
|
||||
ret(i,2) = timesI(rhs(i,0));
|
||||
ret(i,3) = timesMinusI(rhs(i,1));
|
||||
}
|
||||
};
|
||||
template<class vtype> inline void multMinusGammaZ(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = timesMinusI(rhs(2));
|
||||
ret(1) = timesI(rhs(3));
|
||||
ret(2) = timesI(rhs(0));
|
||||
ret(3) = timesMinusI(rhs(1));
|
||||
template<class vtype> inline void rmultMinusGammaZ(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
ret(i,0) = timesI(rhs(i,2));
|
||||
ret(i,1) = timesMinusI(rhs(i,3));
|
||||
ret(i,2) = timesMinusI(rhs(i,0));
|
||||
ret(i,3) = timesI(rhs(i,1));
|
||||
}
|
||||
};
|
||||
template<class vtype> inline void multGammaZ(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
@ -176,23 +196,39 @@ namespace QCD {
|
||||
ret(3,i) = timesMinusI(rhs(1,i));
|
||||
}
|
||||
};
|
||||
template<class vtype> inline void multGammaZ(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = timesI(rhs(2));
|
||||
ret(1) =timesMinusI(rhs(3));
|
||||
ret(2) =timesMinusI(rhs(0));
|
||||
ret(3) = timesI(rhs(1));
|
||||
};
|
||||
template<class vtype> inline void multMinusGammaZ(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = timesMinusI(rhs(2));
|
||||
ret(1) = timesI(rhs(3));
|
||||
ret(2) = timesI(rhs(0));
|
||||
ret(3) = timesMinusI(rhs(1));
|
||||
};
|
||||
/*Gt
|
||||
* 0 0 1 0 [0]+-[2]
|
||||
* 0 0 0 1 [1]+-[3]
|
||||
* 1 0 0 0
|
||||
* 0 1 0 0
|
||||
*/
|
||||
template<class vtype> inline void multGammaT(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = rhs(2);
|
||||
ret(1) = rhs(3);
|
||||
ret(2) = rhs(0);
|
||||
ret(3) = rhs(1);
|
||||
template<class vtype> inline void rmultGammaT(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
ret(i,0) = rhs(i,2);
|
||||
ret(i,1) = rhs(i,3);
|
||||
ret(i,2) = rhs(i,0);
|
||||
ret(i,3) = rhs(i,1);
|
||||
}
|
||||
};
|
||||
template<class vtype> inline void multMinusGammaT(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) =-rhs(2);
|
||||
ret(1) =-rhs(3);
|
||||
ret(2) =-rhs(0);
|
||||
ret(3) =-rhs(1);
|
||||
template<class vtype> inline void rmultMinusGammaT(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
ret(i,0) =- rhs(i,2);
|
||||
ret(i,1) =- rhs(i,3);
|
||||
ret(i,2) =- rhs(i,0);
|
||||
ret(i,3) =- rhs(i,1);
|
||||
}
|
||||
};
|
||||
template<class vtype> inline void multGammaT(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
@ -210,24 +246,41 @@ namespace QCD {
|
||||
ret(3,i) =-rhs(1,i);
|
||||
}
|
||||
};
|
||||
template<class vtype> inline void multGammaT(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = rhs(2);
|
||||
ret(1) = rhs(3);
|
||||
ret(2) = rhs(0);
|
||||
ret(3) = rhs(1);
|
||||
};
|
||||
template<class vtype> inline void multMinusGammaT(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) =-rhs(2);
|
||||
ret(1) =-rhs(3);
|
||||
ret(2) =-rhs(0);
|
||||
ret(3) =-rhs(1);
|
||||
};
|
||||
/*G5
|
||||
* 1 0 0 0 [0]+-[2]
|
||||
* 0 1 0 0 [1]+-[3]
|
||||
* 0 0 -1 0
|
||||
* 0 0 0 -1
|
||||
*/
|
||||
template<class vtype> inline void multGamma5(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = rhs(0);
|
||||
ret(1) = rhs(1);
|
||||
ret(2) =-rhs(2);
|
||||
ret(3) =-rhs(3);
|
||||
template<class vtype> inline void rmultGamma5(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
ret(i,0) = rhs(i,0);
|
||||
ret(i,1) = rhs(i,1);
|
||||
ret(i,2) =-rhs(i,2);
|
||||
ret(i,3) =-rhs(i,3);
|
||||
}
|
||||
};
|
||||
template<class vtype> inline void multMinusGamma5(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) =-rhs(0);
|
||||
ret(1) =-rhs(1);
|
||||
ret(2) = rhs(2);
|
||||
ret(3) = rhs(3);
|
||||
template<class vtype> inline void rmultMinusGamma5(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
ret(i,0) =-rhs(i,0);
|
||||
ret(i,1) =-rhs(i,1);
|
||||
ret(i,2) = rhs(i,2);
|
||||
ret(i,3) = rhs(i,3);
|
||||
}
|
||||
};
|
||||
|
||||
template<class vtype> inline void multGamma5(iMatrix<vtype,Ns> &ret, const iMatrix<vtype,Ns> &rhs){
|
||||
for(int i=0;i<Ns;i++){
|
||||
ret(0,i) = rhs(0,i);
|
||||
@ -245,6 +298,19 @@ namespace QCD {
|
||||
}
|
||||
};
|
||||
|
||||
template<class vtype> inline void multGamma5(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) = rhs(0);
|
||||
ret(1) = rhs(1);
|
||||
ret(2) =-rhs(2);
|
||||
ret(3) =-rhs(3);
|
||||
};
|
||||
template<class vtype> inline void multMinusGamma5(iVector<vtype,Ns> &ret, iVector<vtype,Ns> &rhs){
|
||||
ret(0) =-rhs(0);
|
||||
ret(1) =-rhs(1);
|
||||
ret(2) = rhs(2);
|
||||
ret(3) = rhs(3);
|
||||
};
|
||||
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
Loading…
Reference in New Issue
Block a user