2016-01-02 14:51:32 +00:00
|
|
|
/*************************************************************************************
|
|
|
|
|
|
|
|
Grid physics library, www.github.com/paboyle/Grid
|
|
|
|
|
|
|
|
Source file: ./lib/qcd/utils/WilsonLoops.h
|
|
|
|
|
|
|
|
Copyright (C) 2015
|
|
|
|
|
|
|
|
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
|
|
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
|
|
Author: neo <cossu@post.kek.jp>
|
|
|
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along
|
|
|
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
|
|
|
|
See the full license in the file "LICENSE" in the top level distribution directory
|
|
|
|
*************************************************************************************/
|
|
|
|
/* END LEGAL */
|
2015-06-05 10:02:36 +01:00
|
|
|
#ifndef QCD_UTILS_WILSON_LOOPS_H
|
|
|
|
#define QCD_UTILS_WILSON_LOOPS_H
|
|
|
|
namespace Grid {
|
|
|
|
namespace QCD {
|
|
|
|
|
|
|
|
// Common wilson loop observables
|
2016-01-02 13:37:25 +00:00
|
|
|
template<class Gimpl>
|
|
|
|
class WilsonLoops : public Gimpl {
|
2015-06-05 10:02:36 +01:00
|
|
|
public:
|
2016-01-02 13:37:25 +00:00
|
|
|
|
|
|
|
INHERIT_GIMPL_TYPES(Gimpl);
|
2015-08-30 12:18:34 +01:00
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
typedef typename Gimpl::GaugeLinkField GaugeMat;
|
|
|
|
typedef typename Gimpl::GaugeField GaugeLorentz;
|
2015-08-30 12:18:34 +01:00
|
|
|
|
2015-06-05 10:02:36 +01:00
|
|
|
//////////////////////////////////////////////////
|
|
|
|
// directed plaquette oriented in mu,nu plane
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
static void dirPlaquette(GaugeMat &plaq,const std::vector<GaugeMat> &U, const int mu, const int nu)
|
|
|
|
{
|
2016-01-02 13:37:25 +00:00
|
|
|
// Annoyingly, must use either scope resolution to find dependent base class,
|
|
|
|
// or this-> ; there is no "this" in a static method. This forces explicit Gimpl scope
|
|
|
|
// resolution throughout the usage in this file, and rather defeats the purpose of deriving
|
|
|
|
// from Gimpl.
|
|
|
|
plaq= Gimpl::CovShiftBackward(U[mu],mu,
|
|
|
|
Gimpl::CovShiftBackward(U[nu],nu,
|
|
|
|
Gimpl::CovShiftForward (U[mu],mu,U[nu])));
|
2015-06-05 10:02:36 +01:00
|
|
|
}
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
// trace of directed plaquette oriented in mu,nu plane
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
static void traceDirPlaquette(LatticeComplex &plaq, const std::vector<GaugeMat> &U, const int mu, const int nu)
|
|
|
|
{
|
|
|
|
GaugeMat sp(U[0]._grid);
|
|
|
|
dirPlaquette(sp,U,mu,nu);
|
|
|
|
plaq=trace(sp);
|
|
|
|
}
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
// sum over all planes of plaquette
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
static void sitePlaquette(LatticeComplex &Plaq,const std::vector<GaugeMat> &U)
|
|
|
|
{
|
|
|
|
LatticeComplex sitePlaq(U[0]._grid);
|
2015-06-16 14:07:05 +01:00
|
|
|
Plaq=zero;
|
2015-06-05 10:02:36 +01:00
|
|
|
for(int mu=1;mu<Nd;mu++){
|
|
|
|
for(int nu=0;nu<mu;nu++){
|
|
|
|
traceDirPlaquette(sitePlaq,U,mu,nu);
|
|
|
|
Plaq = Plaq + sitePlaq;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
// sum over all x,y,z,t and over all planes of plaquette
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
static RealD sumPlaquette(const GaugeLorentz &Umu){
|
2016-03-08 09:55:14 +00:00
|
|
|
std::vector<GaugeMat> U(Nd,Umu._grid);
|
2015-06-16 14:07:05 +01:00
|
|
|
|
2015-06-05 10:02:36 +01:00
|
|
|
for(int mu=0;mu<Nd;mu++){
|
2015-06-30 15:03:11 +01:00
|
|
|
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
2015-06-05 10:02:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
LatticeComplex Plaq(Umu._grid);
|
|
|
|
|
|
|
|
sitePlaquette(Plaq,U);
|
|
|
|
|
|
|
|
TComplex Tp = sum(Plaq);
|
|
|
|
Complex p = TensorRemove(Tp);
|
|
|
|
return p.real();
|
|
|
|
}
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
// average over all x,y,z,t and over all planes of plaquette
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
static RealD avgPlaquette(const GaugeLorentz &Umu){
|
|
|
|
|
|
|
|
RealD sumplaq = sumPlaquette(Umu);
|
|
|
|
|
|
|
|
double vol = Umu._grid->gSites();
|
|
|
|
|
|
|
|
double faces = (1.0*Nd*(Nd-1))/2.0;
|
|
|
|
|
|
|
|
return sumplaq/vol/faces/Nc; // Nd , Nc dependent... FIXME
|
|
|
|
}
|
Binary IO file for generic Grid array parallel I/O.
Number of IO MPI tasks can be varied by selecting which
dimensions use parallel IO and which dimensions use Serial send to boss
I/O.
Thus can neck down from, say 1024 nodes = 4x4x8x8 to {1,8,32,64,128,256,1024} nodes
doing the I/O.
Interpolates nicely between ALL nodes write their data, a single boss per time-plane
in processor space [old UKQCD fortran code did this], and a single node doing all I/O.
Not sure I have the transfer sizes big enough and am not overly convinced fstream
is guaranteed to not give buffer inconsistencies unless I set streambuf size to zero.
Practically it has worked on 8 tasks, 2x1x2x2 writing /cloning NERSC configurations
on my MacOS + OpenMPI and Clang environment.
It is VERY easy to switch to pwrite at a later date, and also easy to send x-strips around from
each node in order to gather bigger chunks at the syscall level.
That would push us up to the circa 8x 18*4*8 == 4KB size write chunk, and by taking, say, x/y non
parallel we get to 16MB contiguous chunks written in multi 4KB transactions
per IOnode in 64^3 lattices for configuration I/O.
I suspect this is fine for system performance.
2015-08-26 13:40:29 +01:00
|
|
|
static RealD linkTrace(const GaugeLorentz &Umu){
|
2016-03-08 09:55:14 +00:00
|
|
|
std::vector<GaugeMat> U(Nd,Umu._grid);
|
Binary IO file for generic Grid array parallel I/O.
Number of IO MPI tasks can be varied by selecting which
dimensions use parallel IO and which dimensions use Serial send to boss
I/O.
Thus can neck down from, say 1024 nodes = 4x4x8x8 to {1,8,32,64,128,256,1024} nodes
doing the I/O.
Interpolates nicely between ALL nodes write their data, a single boss per time-plane
in processor space [old UKQCD fortran code did this], and a single node doing all I/O.
Not sure I have the transfer sizes big enough and am not overly convinced fstream
is guaranteed to not give buffer inconsistencies unless I set streambuf size to zero.
Practically it has worked on 8 tasks, 2x1x2x2 writing /cloning NERSC configurations
on my MacOS + OpenMPI and Clang environment.
It is VERY easy to switch to pwrite at a later date, and also easy to send x-strips around from
each node in order to gather bigger chunks at the syscall level.
That would push us up to the circa 8x 18*4*8 == 4KB size write chunk, and by taking, say, x/y non
parallel we get to 16MB contiguous chunks written in multi 4KB transactions
per IOnode in 64^3 lattices for configuration I/O.
I suspect this is fine for system performance.
2015-08-26 13:40:29 +01:00
|
|
|
|
|
|
|
LatticeComplex Tr(Umu._grid); Tr=zero;
|
|
|
|
for(int mu=0;mu<Nd;mu++){
|
|
|
|
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
|
|
|
Tr = Tr+trace(U[mu]);
|
|
|
|
}
|
|
|
|
|
|
|
|
TComplex Tp = sum(Tr);
|
|
|
|
Complex p = TensorRemove(Tp);
|
|
|
|
|
|
|
|
double vol = Umu._grid->gSites();
|
|
|
|
|
2016-03-08 09:55:14 +00:00
|
|
|
return p.real()/vol/((double)(Nd*(Nd-1)));
|
Binary IO file for generic Grid array parallel I/O.
Number of IO MPI tasks can be varied by selecting which
dimensions use parallel IO and which dimensions use Serial send to boss
I/O.
Thus can neck down from, say 1024 nodes = 4x4x8x8 to {1,8,32,64,128,256,1024} nodes
doing the I/O.
Interpolates nicely between ALL nodes write their data, a single boss per time-plane
in processor space [old UKQCD fortran code did this], and a single node doing all I/O.
Not sure I have the transfer sizes big enough and am not overly convinced fstream
is guaranteed to not give buffer inconsistencies unless I set streambuf size to zero.
Practically it has worked on 8 tasks, 2x1x2x2 writing /cloning NERSC configurations
on my MacOS + OpenMPI and Clang environment.
It is VERY easy to switch to pwrite at a later date, and also easy to send x-strips around from
each node in order to gather bigger chunks at the syscall level.
That would push us up to the circa 8x 18*4*8 == 4KB size write chunk, and by taking, say, x/y non
parallel we get to 16MB contiguous chunks written in multi 4KB transactions
per IOnode in 64^3 lattices for configuration I/O.
I suspect this is fine for system performance.
2015-08-26 13:40:29 +01:00
|
|
|
};
|
2015-06-05 10:02:36 +01:00
|
|
|
//////////////////////////////////////////////////
|
|
|
|
// the sum over all staples on each site
|
|
|
|
//////////////////////////////////////////////////
|
2015-06-14 00:59:50 +01:00
|
|
|
static void Staple(GaugeMat &staple,const GaugeLorentz &Umu,int mu){
|
2015-06-05 10:02:36 +01:00
|
|
|
|
2015-06-16 14:07:05 +01:00
|
|
|
GridBase *grid = Umu._grid;
|
|
|
|
|
2016-03-08 09:55:14 +00:00
|
|
|
std::vector<GaugeMat> U(Nd,grid);
|
2015-06-05 10:02:36 +01:00
|
|
|
for(int d=0;d<Nd;d++){
|
2015-06-30 15:03:11 +01:00
|
|
|
U[d] = PeekIndex<LorentzIndex>(Umu,d);
|
2015-06-05 10:02:36 +01:00
|
|
|
}
|
|
|
|
staple = zero;
|
2015-06-16 14:07:05 +01:00
|
|
|
GaugeMat tmp(grid);
|
2015-06-05 10:02:36 +01:00
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
|
2015-06-05 10:02:36 +01:00
|
|
|
for(int nu=0;nu<Nd;nu++){
|
|
|
|
|
|
|
|
if(nu != mu) {
|
|
|
|
|
|
|
|
// mu
|
|
|
|
// ^
|
2015-12-28 16:38:31 +00:00
|
|
|
// |__> nu
|
2015-06-05 10:02:36 +01:00
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
// __
|
|
|
|
// |
|
|
|
|
// __|
|
2015-06-05 10:02:36 +01:00
|
|
|
//
|
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
staple+=Gimpl::ShiftStaple(
|
|
|
|
Gimpl::CovShiftForward (U[nu],nu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu,
|
|
|
|
Gimpl::CovShiftIdentityBackward(U[nu],nu))),mu);
|
2015-12-28 16:38:31 +00:00
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
// __
|
|
|
|
// |
|
|
|
|
// |__
|
2015-06-05 10:02:36 +01:00
|
|
|
//
|
|
|
|
//
|
2016-01-02 13:37:25 +00:00
|
|
|
staple+=Gimpl::ShiftStaple(
|
|
|
|
Gimpl::CovShiftBackward(U[nu],nu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu,U[nu])),mu);
|
2015-06-05 10:02:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////
|
|
|
|
// Similar to above for rectangle is required
|
|
|
|
//////////////////////////////////////////////////////
|
2015-12-28 16:38:31 +00:00
|
|
|
static void dirRectangle(GaugeMat &rect,const std::vector<GaugeMat> &U, const int mu, const int nu)
|
|
|
|
{
|
2016-01-02 13:37:25 +00:00
|
|
|
rect = Gimpl::CovShiftForward(U[mu],mu,Gimpl::CovShiftForward(U[mu],mu,U[nu]))* // ->->|
|
|
|
|
adj(Gimpl::CovShiftForward(U[nu],nu,Gimpl::CovShiftForward(U[mu],mu,U[mu]))) ;
|
2015-12-28 16:38:31 +00:00
|
|
|
rect = rect +
|
2016-01-02 13:37:25 +00:00
|
|
|
Gimpl::CovShiftForward(U[mu],mu,Gimpl::CovShiftForward(U[nu],nu,U[nu]))* // ->||
|
|
|
|
adj(Gimpl::CovShiftForward(U[nu],nu,Gimpl::CovShiftForward(U[nu],nu,U[mu]))) ;
|
2015-12-28 16:38:31 +00:00
|
|
|
}
|
|
|
|
static void traceDirRectangle(LatticeComplex &rect, const std::vector<GaugeMat> &U, const int mu, const int nu)
|
|
|
|
{
|
|
|
|
GaugeMat sp(U[0]._grid);
|
|
|
|
dirRectangle(sp,U,mu,nu);
|
|
|
|
rect=trace(sp);
|
|
|
|
}
|
|
|
|
static void siteRectangle(LatticeComplex &Rect,const std::vector<GaugeMat> &U)
|
|
|
|
{
|
|
|
|
LatticeComplex siteRect(U[0]._grid);
|
|
|
|
Rect=zero;
|
|
|
|
for(int mu=1;mu<Nd;mu++){
|
|
|
|
for(int nu=0;nu<mu;nu++){
|
|
|
|
traceDirRectangle(siteRect,U,mu,nu);
|
|
|
|
Rect = Rect + siteRect;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
// sum over all x,y,z,t and over all planes of plaquette
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
static RealD sumRectangle(const GaugeLorentz &Umu){
|
2016-03-08 09:55:14 +00:00
|
|
|
std::vector<GaugeMat> U(Nd,Umu._grid);
|
2015-12-28 16:38:31 +00:00
|
|
|
|
|
|
|
for(int mu=0;mu<Nd;mu++){
|
|
|
|
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
|
|
|
}
|
|
|
|
|
|
|
|
LatticeComplex Rect(Umu._grid);
|
|
|
|
|
|
|
|
siteRectangle(Rect,U);
|
|
|
|
|
|
|
|
TComplex Tp = sum(Rect);
|
|
|
|
Complex p = TensorRemove(Tp);
|
|
|
|
return p.real();
|
|
|
|
}
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
// average over all x,y,z,t and over all planes of plaquette
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
static RealD avgRectangle(const GaugeLorentz &Umu){
|
|
|
|
|
|
|
|
RealD sumrect = sumRectangle(Umu);
|
|
|
|
|
|
|
|
double vol = Umu._grid->gSites();
|
|
|
|
|
|
|
|
double faces = (1.0*Nd*(Nd-1)); // 2 distinct orientations summed
|
|
|
|
|
|
|
|
return sumrect/vol/faces/Nc; // Nd , Nc dependent... FIXME
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
// the sum over all staples on each site
|
|
|
|
//////////////////////////////////////////////////
|
2015-12-29 19:22:59 +00:00
|
|
|
static void RectStapleDouble(GaugeMat &U2,const GaugeMat & U,int mu){
|
|
|
|
U2 = U * Cshift(U,mu,1);
|
|
|
|
}
|
2016-01-02 13:37:25 +00:00
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Hop by two optimisation strategy does not work nicely with Gparity. (could do,
|
|
|
|
// but need to track two deep where cross boundary and apply a conjugation).
|
|
|
|
// Must differentiate this in Gimpl, and use Gimpl::isPeriodicGaugeField to do so .
|
|
|
|
////////////////////////////////////////////////////////////////////////////
|
2015-12-29 19:22:59 +00:00
|
|
|
static void RectStapleOptimised(GaugeMat &Stap,std::vector<GaugeMat> &U2,std::vector<GaugeMat> &U,int mu){
|
|
|
|
|
|
|
|
Stap = zero;
|
|
|
|
|
|
|
|
GridBase *grid = U[0]._grid;
|
|
|
|
|
|
|
|
GaugeMat Staple2x1 (grid);
|
|
|
|
GaugeMat tmp (grid);
|
|
|
|
|
|
|
|
for(int nu=0;nu<Nd;nu++){
|
|
|
|
if ( nu!=mu) {
|
|
|
|
|
|
|
|
// Up staple ___ ___
|
|
|
|
// | |
|
|
|
|
tmp = Cshift(adj(U[nu]),nu,-1);
|
|
|
|
tmp = adj(U2[mu])*tmp;
|
|
|
|
tmp = Cshift(tmp,mu,-2);
|
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
Staple2x1 = Gimpl::CovShiftForward (U[nu],nu,tmp);
|
2015-12-29 19:22:59 +00:00
|
|
|
|
|
|
|
|
|
|
|
// Down staple
|
|
|
|
// |___ ___|
|
|
|
|
//
|
|
|
|
tmp = adj(U2[mu])*U[nu];
|
2016-01-02 13:37:25 +00:00
|
|
|
Staple2x1+= Gimpl::CovShiftBackward(U[nu],nu,Cshift(tmp,mu,-2));
|
2015-12-29 19:22:59 +00:00
|
|
|
|
|
|
|
|
|
|
|
// ___ ___
|
|
|
|
// | ___|
|
|
|
|
// |___ ___|
|
|
|
|
//
|
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
Stap+= Cshift(Gimpl::CovShiftForward (U[mu],mu,Staple2x1),mu,1);
|
2015-12-29 19:22:59 +00:00
|
|
|
|
|
|
|
// ___ ___
|
|
|
|
// |___ |
|
|
|
|
// |___ ___|
|
|
|
|
//
|
|
|
|
|
|
|
|
// tmp= Staple2x1* Cshift(U[mu],mu,-2);
|
|
|
|
// Stap+= Cshift(tmp,mu,1) ;
|
|
|
|
Stap+= Cshift(Staple2x1,mu,1)*Cshift(U[mu],mu,-1); ;
|
|
|
|
|
|
|
|
// --
|
|
|
|
// | |
|
|
|
|
//
|
|
|
|
// | |
|
|
|
|
|
|
|
|
tmp = Cshift(adj(U2[nu]),nu,-2);
|
2016-01-02 13:37:25 +00:00
|
|
|
tmp = Gimpl::CovShiftBackward(U[mu],mu,tmp);
|
2015-12-29 19:22:59 +00:00
|
|
|
tmp = U2[nu]*Cshift(tmp,nu,2);
|
|
|
|
Stap+= Cshift(tmp, mu, 1);
|
|
|
|
|
|
|
|
// | |
|
|
|
|
//
|
|
|
|
// | |
|
|
|
|
// --
|
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
tmp = Gimpl::CovShiftBackward(U[mu],mu,U2[nu]);
|
2015-12-29 19:22:59 +00:00
|
|
|
tmp = adj(U2[nu])*tmp;
|
|
|
|
tmp = Cshift(tmp,nu,-2);
|
|
|
|
Stap+=Cshift(tmp, mu, 1);
|
|
|
|
}}
|
|
|
|
|
|
|
|
|
|
|
|
}
|
2015-12-28 16:38:31 +00:00
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
static void RectStaple(GaugeMat &Stap,const GaugeLorentz & Umu,int mu)
|
|
|
|
{
|
2015-12-29 19:22:59 +00:00
|
|
|
RectStapleUnoptimised(Stap,Umu,mu);
|
2016-01-02 13:37:25 +00:00
|
|
|
}
|
|
|
|
static void RectStaple(const GaugeLorentz & Umu,GaugeMat &Stap,
|
|
|
|
std::vector<GaugeMat> &U2,
|
|
|
|
std::vector<GaugeMat> &U, int mu)
|
|
|
|
{
|
|
|
|
if ( Gimpl::isPeriodicGaugeField() ){
|
|
|
|
RectStapleOptimised(Stap,U2,U,mu);
|
|
|
|
} else {
|
|
|
|
RectStapleUnoptimised(Stap,Umu,mu);
|
2015-12-29 19:22:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void RectStapleUnoptimised(GaugeMat &Stap,const GaugeLorentz &Umu,int mu){
|
2015-12-28 16:38:31 +00:00
|
|
|
GridBase *grid = Umu._grid;
|
|
|
|
|
2016-03-08 09:55:14 +00:00
|
|
|
std::vector<GaugeMat> U(Nd,grid);
|
2015-12-28 16:38:31 +00:00
|
|
|
for(int d=0;d<Nd;d++){
|
|
|
|
U[d] = PeekIndex<LorentzIndex>(Umu,d);
|
|
|
|
}
|
|
|
|
|
|
|
|
Stap=zero;
|
|
|
|
|
|
|
|
for(int nu=0;nu<Nd;nu++){
|
|
|
|
if ( nu!=mu) {
|
|
|
|
// __ ___
|
|
|
|
// | __ |
|
|
|
|
//
|
2016-01-02 13:37:25 +00:00
|
|
|
Stap+= Gimpl::ShiftStaple(
|
|
|
|
Gimpl::CovShiftForward (U[mu],mu,
|
|
|
|
Gimpl::CovShiftForward (U[nu],nu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu,
|
|
|
|
Gimpl::CovShiftIdentityBackward(U[nu],nu))))) , mu);
|
2015-12-28 16:38:31 +00:00
|
|
|
|
|
|
|
// __
|
|
|
|
// |__ __ |
|
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
Stap+= Gimpl::ShiftStaple(
|
|
|
|
Gimpl::CovShiftForward (U[mu],mu,
|
|
|
|
Gimpl::CovShiftBackward(U[nu],nu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu, U[nu])))) , mu);
|
2015-12-28 16:38:31 +00:00
|
|
|
|
|
|
|
// __
|
|
|
|
// |__ __ |
|
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
Stap+= Gimpl::ShiftStaple(
|
|
|
|
Gimpl::CovShiftBackward(U[nu],nu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu,
|
|
|
|
Gimpl::CovShiftForward(U[nu],nu,U[mu])))) , mu);
|
2015-12-28 16:38:31 +00:00
|
|
|
|
|
|
|
// __ ___
|
|
|
|
// |__ |
|
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
Stap+= Gimpl::ShiftStaple(
|
|
|
|
Gimpl::CovShiftForward (U[nu],nu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu,
|
|
|
|
Gimpl::CovShiftBackward(U[nu],nu,U[mu])))) , mu);
|
2015-12-28 16:38:31 +00:00
|
|
|
|
|
|
|
// --
|
|
|
|
// | |
|
|
|
|
//
|
|
|
|
// | |
|
2016-01-02 13:37:25 +00:00
|
|
|
|
|
|
|
Stap+= Gimpl::ShiftStaple(
|
|
|
|
Gimpl::CovShiftForward(U[nu],nu,
|
|
|
|
Gimpl::CovShiftForward(U[nu],nu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu,
|
|
|
|
Gimpl::CovShiftBackward(U[nu],nu,
|
|
|
|
Gimpl::CovShiftIdentityBackward(U[nu],nu))))) , mu);
|
2015-12-28 16:38:31 +00:00
|
|
|
|
|
|
|
|
|
|
|
// | |
|
|
|
|
//
|
|
|
|
// | |
|
|
|
|
// --
|
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
Stap+= Gimpl::ShiftStaple(
|
|
|
|
Gimpl::CovShiftBackward(U[nu],nu,
|
|
|
|
Gimpl::CovShiftBackward(U[nu],nu,
|
|
|
|
Gimpl::CovShiftBackward(U[mu],mu,
|
|
|
|
Gimpl::CovShiftForward (U[nu],nu,U[nu])))) , mu);
|
2015-12-28 16:38:31 +00:00
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2015-06-05 10:02:36 +01:00
|
|
|
|
|
|
|
};
|
2015-06-05 10:29:42 +01:00
|
|
|
|
|
|
|
|
2016-01-02 13:37:25 +00:00
|
|
|
typedef WilsonLoops<PeriodicGimplR> ColourWilsonLoops;
|
|
|
|
typedef WilsonLoops<PeriodicGimplR> U1WilsonLoops;
|
|
|
|
typedef WilsonLoops<PeriodicGimplR> SU2WilsonLoops;
|
|
|
|
typedef WilsonLoops<PeriodicGimplR> SU3WilsonLoops;
|
2015-06-05 10:29:42 +01:00
|
|
|
|
2015-06-05 10:02:36 +01:00
|
|
|
}}
|
|
|
|
|
|
|
|
#endif
|