From 164d3691db0da45060553c5f6dbe2abb5e47d08b Mon Sep 17 00:00:00 2001 From: Azusa Yamaguchi Date: Tue, 1 Nov 2016 14:24:22 +0000 Subject: [PATCH 001/101] Staggered --- lib/qcd/action/ActionParams.h | 4 + lib/qcd/action/Actions.h | 7 + lib/qcd/action/fermion/FermionOperatorImpl.h | 97 +++++- .../fermion/ImprovedStaggeredFermion.cc | 309 ++++++++++++++++++ .../action/fermion/ImprovedStaggeredFermion.h | 152 +++++++++ lib/qcd/action/fermion/StaggeredKernels.cc | 223 +++++++++++++ lib/qcd/action/fermion/StaggeredKernels.h | 70 ++++ lib/qcd/action/fermion/WilsonFermion.cc | 19 +- lib/qcd/action/pseudofermion/TwoFlavour.h | 10 +- 9 files changed, 872 insertions(+), 19 deletions(-) create mode 100644 lib/qcd/action/fermion/ImprovedStaggeredFermion.cc create mode 100644 lib/qcd/action/fermion/ImprovedStaggeredFermion.h create mode 100644 lib/qcd/action/fermion/StaggeredKernels.cc create mode 100644 lib/qcd/action/fermion/StaggeredKernels.h diff --git a/lib/qcd/action/ActionParams.h b/lib/qcd/action/ActionParams.h index dcbdfce8..91e94741 100644 --- a/lib/qcd/action/ActionParams.h +++ b/lib/qcd/action/ActionParams.h @@ -45,6 +45,10 @@ namespace QCD { WilsonImplParams() : overlapCommsCompute(false) {}; }; + struct StaggeredImplParams { + StaggeredImplParams() {}; + }; + struct OneFlavourRationalParams { RealD lo; RealD hi; diff --git a/lib/qcd/action/Actions.h b/lib/qcd/action/Actions.h index ba6e577d..1c4acd86 100644 --- a/lib/qcd/action/Actions.h +++ b/lib/qcd/action/Actions.h @@ -53,6 +53,7 @@ Author: paboyle #include #include #include //used by all wilson type fermions +#include //used by all wilson type fermions //////////////////////////////////////////// // Gauge Actions @@ -108,6 +109,10 @@ typedef SymanzikGaugeAction ConjugateSymanzikGaugeAction //////////////////////////////////////////////////////////////////////////////////////////////////// +#define FermOpStaggeredTemplateInstantiate(A) \ + template class A; \ + template class A; + #define FermOp4dVecTemplateInstantiate(A) \ template class A; \ template class A; \ @@ -147,6 +152,8 @@ typedef SymanzikGaugeAction ConjugateSymanzikGaugeAction //#include +#include + #include // Cayley types #include #include diff --git a/lib/qcd/action/fermion/FermionOperatorImpl.h b/lib/qcd/action/fermion/FermionOperatorImpl.h index 0800dea6..0b162f77 100644 --- a/lib/qcd/action/fermion/FermionOperatorImpl.h +++ b/lib/qcd/action/fermion/FermionOperatorImpl.h @@ -343,7 +343,7 @@ class GparityWilsonImpl : public ConjugateGaugeImpl + class StaggeredImpl : public PeriodicGaugeImpl > { + + public: + + typedef RealD _Coeff_t ; + static const int Dimension = Representation::Dimension; + typedef PeriodicGaugeImpl > Gimpl; + + //Necessary? + constexpr bool is_fundamental() const{return Dimension == Nc ? 1 : 0;} + + const bool LsVectorised=false; + typedef _Coeff_t Coeff_t; + + INHERIT_GIMPL_TYPES(Gimpl); + + template using iImplSpinor = iScalar > >; + template using iImplHalfSpinor = iVector >, Ngp>; + template using iImplDoubledGaugeField = iVector >, Nds>; + + typedef iImplSpinor SiteSpinor; + typedef iImplHalfSpinor SiteHalfSpinor; + typedef iImplDoubledGaugeField SiteDoubledGaugeField; + + typedef Lattice FermionField; + typedef Lattice DoubledGaugeField; + + typedef SimpleCompressor Compressor; + typedef StaggeredImplParams ImplParams; + typedef CartesianStencil StencilImpl; + + ImplParams Params; + + StaggeredImpl(const ImplParams &p = ImplParams()) : Params(p){}; + + inline void multLink(SiteSpinor &phi, + const SiteDoubledGaugeField &U, + const SiteSpinor &chi, + int mu){ + mult(&phi(), &U(mu), &chi()); + } + inline void multLinkAdd(SiteSpinor &phi, + const SiteDoubledGaugeField &U, + const SiteSpinor &chi, + int mu){ + mac(&phi(), &U(mu), &chi()); + } + + template + inline void loadLinkElement(Simd ®, ref &memory) { + reg = memory; + } + + inline void DoubleStore(GridBase *GaugeGrid, + DoubledGaugeField &Uds, + DoubledGaugeField &UUUds, // for Naik term + const GaugeField &Umu) { + conformable(Uds._grid, GaugeGrid); + conformable(Umu._grid, GaugeGrid); + GaugeLinkField U(GaugeGrid); + for (int mu = 0; mu < Nd; mu++) { + U = PeekIndex(Umu, mu); + PokeIndex(Uds, U, mu); + PokeIndex(UUUds, U, mu); + std::cout << GridLogMessage << " NOT created the treble links for staggered yet" <(Uds, U, mu + 4); + PokeIndex(UUUds, U, mu+4); + } + } + + inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A,int mu){ + GaugeLinkField link(mat._grid); + link = TraceIndex(outerProduct(Btilde,A)); + PokeIndex(mat,link,mu); + } + + inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField Ã,int mu){ + assert (0); + // Must never hit + } + }; + + + typedef WilsonImpl WilsonImplR; // Real.. whichever prec typedef WilsonImpl WilsonImplF; // Float typedef WilsonImpl WilsonImplD; // Double @@ -527,6 +618,10 @@ PARALLEL_FOR_LOOP typedef GparityWilsonImpl GparityWilsonImplF; // Float typedef GparityWilsonImpl GparityWilsonImplD; // Double + typedef StaggeredImpl StaggeredImplR; // Real.. whichever prec + typedef StaggeredImpl StaggeredImplF; // Float + typedef StaggeredImpl StaggeredImplD; // Double + }} #endif diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc b/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc new file mode 100644 index 00000000..73cc272a --- /dev/null +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc @@ -0,0 +1,309 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion.cc + +Copyright (C) 2015 + +Author: Azusa Yamaguchi, Peter Boyle + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution +directory +*************************************************************************************/ +/* END LEGAL */ +#include + +namespace Grid { +namespace QCD { + +const std::vector +ImprovedStaggeredFermionStatic::directions({0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3}); +const std::vector +ImprovedStaggeredFermionStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3, 3, 3, 3, -3, -3, -3, -3}); + +///////////////////////////////// +// Constructor and gauge import +///////////////////////////////// + +template +ImprovedStaggeredFermion::ImprovedStaggeredFermion(GaugeField &_Umu, GridCartesian &Fgrid, + GridRedBlackCartesian &Hgrid, RealD _mass, + const ImplParams &p) + : Kernels(p), + _grid(&Fgrid), + _cbgrid(&Hgrid), + Stencil(&Fgrid, npoint, Even, directions, displacements), + StencilEven(&Hgrid, npoint, Even, directions, displacements), // source is Even + StencilOdd(&Hgrid, npoint, Odd, directions, displacements), // source is Odd + mass(_mass), + Lebesgue(_grid), + LebesgueEvenOdd(_cbgrid), + Umu(&Fgrid), + UmuEven(&Hgrid), + UmuOdd(&Hgrid), + UUUmu(&Fgrid), + UUUmuEven(&Hgrid), + UUUmuOdd(&Hgrid) { + // Allocate the required comms buffer + ImportGauge(_Umu); +} + +template +void ImprovedStaggeredFermion::ImportGauge(const GaugeField &_Umu) { + GaugeField HUmu(_Umu._grid); + HUmu = _Umu * (-0.5); + Impl::DoubleStore(GaugeGrid(), Umu, UUUmu, HUmu); + pickCheckerboard(Even, UmuEven, Umu); + pickCheckerboard(Odd, UmuOdd, Umu); + pickCheckerboard(Even, UUUmuEven, UUUmu); + pickCheckerboard(Odd, UUUmuOdd, UUUmu); +} + +///////////////////////////// +// Implement the interface +///////////////////////////// + +template +RealD ImprovedStaggeredFermion::M(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + Dhop(in, out, DaggerNo); + return axpy_norm(out, mass, in, out); +} + +template +RealD ImprovedStaggeredFermion::Mdag(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + Dhop(in, out, DaggerYes); + return axpy_norm(out, mass, in, out); +} + +template +void ImprovedStaggeredFermion::Meooe(const FermionField &in, FermionField &out) { + if (in.checkerboard == Odd) { + DhopEO(in, out, DaggerNo); + } else { + DhopOE(in, out, DaggerNo); + } +} +template +void ImprovedStaggeredFermion::MeooeDag(const FermionField &in, FermionField &out) { + if (in.checkerboard == Odd) { + DhopEO(in, out, DaggerYes); + } else { + DhopOE(in, out, DaggerYes); + } +} + +template +void ImprovedStaggeredFermion::Mooee(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + typename FermionField::scalar_type scal(mass); + out = scal * in; +} + +template +void ImprovedStaggeredFermion::MooeeDag(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + Mooee(in, out); +} + +template +void ImprovedStaggeredFermion::MooeeInv(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + out = (1.0 / (mass)) * in; +} + +template +void ImprovedStaggeredFermion::MooeeInvDag(const FermionField &in, + FermionField &out) { + out.checkerboard = in.checkerboard; + MooeeInv(in, out); +} + +/////////////////////////////////// +// Internal +/////////////////////////////////// + +template +void ImprovedStaggeredFermion::DerivInternal(StencilImpl &st, DoubledGaugeField &U, DoubledGaugeField &UUU, + GaugeField & mat, + const FermionField &A, const FermionField &B, int dag) { + assert((dag == DaggerNo) || (dag == DaggerYes)); + + Compressor compressor; + + FermionField Btilde(B._grid); + FermionField Atilde(B._grid); + Atilde = A; + + st.HaloExchange(B, compressor); + + for (int mu = 0; mu < Nd; mu++) { + + //////////////////////// + // Call the single hop + //////////////////////// + PARALLEL_FOR_LOOP + for (int sss = 0; sss < B._grid->oSites(); sss++) { + Kernels::DhopDir(st, U, UUU, st.CommBuf(), sss, sss, B, Btilde, mu,1); + } + + // Force in three link terms + // + // Impl::InsertForce4D(mat, Btilde, Atilde, mu); + // + // dU_ac(x)/dt = i p_ab U_bc(x) + // + // => dS_f/dt = dS_f/dU_ac(x) . dU_ac(x)/dt = i p_ab U_bc(x) dS_f/dU_ac(x) + // + // One link: form fragments S_f = A U B + // + // write Btilde = U(x) B(x+mu) + // + // mat+= TraceIndex(outerProduct(Btilde,A)); + // + // Three link: form fragments S_f = A UUU B + // + // mat+= outer ( A, UUUB) <-- Best take DhopDeriv with one linke or identity matrix + // mat+= outer ( AU, UUB) <-- and then use covariant cshift? + // mat+= outer ( AUU, UB) <-- Returned from call to DhopDir + + assert(0);// need to figure out the force interface with a blasted three link term. + + } +} + +template +void ImprovedStaggeredFermion::DhopDeriv(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) { + + conformable(U._grid, _grid); + conformable(U._grid, V._grid); + conformable(U._grid, mat._grid); + + mat.checkerboard = U.checkerboard; + + DerivInternal(Stencil, Umu, UUUmu, mat, U, V, dag); +} + +template +void ImprovedStaggeredFermion::DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) { + + conformable(U._grid, _cbgrid); + conformable(U._grid, V._grid); + conformable(U._grid, mat._grid); + + assert(V.checkerboard == Even); + assert(U.checkerboard == Odd); + mat.checkerboard = Odd; + + DerivInternal(StencilEven, UmuOdd, UUUmuOdd, mat, U, V, dag); +} + +template +void ImprovedStaggeredFermion::DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) { + + conformable(U._grid, _cbgrid); + conformable(U._grid, V._grid); + conformable(U._grid, mat._grid); + + assert(V.checkerboard == Odd); + assert(U.checkerboard == Even); + mat.checkerboard = Even; + + DerivInternal(StencilOdd, UmuEven, UUUmuEven, mat, U, V, dag); +} + +template +void ImprovedStaggeredFermion::Dhop(const FermionField &in, FermionField &out, int dag) { + conformable(in._grid, _grid); // verifies full grid + conformable(in._grid, out._grid); + + out.checkerboard = in.checkerboard; + + DhopInternal(Stencil, Lebesgue, Umu, UUUmu, in, out, dag); +} + +template +void ImprovedStaggeredFermion::DhopOE(const FermionField &in, FermionField &out, int dag) { + conformable(in._grid, _cbgrid); // verifies half grid + conformable(in._grid, out._grid); // drops the cb check + + assert(in.checkerboard == Even); + out.checkerboard = Odd; + + DhopInternal(StencilEven, LebesgueEvenOdd, UmuOdd, UUUmuOdd, in, out, dag); +} + +template +void ImprovedStaggeredFermion::DhopEO(const FermionField &in, FermionField &out, int dag) { + conformable(in._grid, _cbgrid); // verifies half grid + conformable(in._grid, out._grid); // drops the cb check + + assert(in.checkerboard == Odd); + out.checkerboard = Even; + + DhopInternal(StencilOdd, LebesgueEvenOdd, UmuEven, UUUmuEven, in, out, dag); +} + +template +void ImprovedStaggeredFermion::Mdir(const FermionField &in, FermionField &out, int dir, int disp) { + DhopDir(in, out, dir, disp); +} + +template +void ImprovedStaggeredFermion::DhopDir(const FermionField &in, FermionField &out, int dir, int disp) { + + Compressor compressor; + Stencil.HaloExchange(in, compressor); + +PARALLEL_FOR_LOOP + for (int sss = 0; sss < in._grid->oSites(); sss++) { + Kernels::DhopDir(Stencil, Umu, UUUmu, Stencil.CommBuf(), sss, sss, in, out, dir, disp); + } +}; + +template +void ImprovedStaggeredFermion::DhopInternal(StencilImpl &st, LebesgueOrder &lo, + DoubledGaugeField &U, + DoubledGaugeField &UUU, + const FermionField &in, + FermionField &out, int dag) { + assert((dag == DaggerNo) || (dag == DaggerYes)); + + Compressor compressor; + st.HaloExchange(in, compressor); + + if (dag == DaggerYes) { + PARALLEL_FOR_LOOP + for (int sss = 0; sss < in._grid->oSites(); sss++) { + Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), sss, sss, in, out); + } + } else { + PARALLEL_FOR_LOOP + for (int sss = 0; sss < in._grid->oSites(); sss++) { + Kernels::DhopSite(st, lo, U, UUU, st.CommBuf(), sss, sss, in, out); + } + } +}; + +FermOpStaggeredTemplateInstantiate(ImprovedStaggeredFermion); + + //AdjointFermOpTemplateInstantiate(ImprovedStaggeredFermion); + //TwoIndexFermOpTemplateInstantiate(ImprovedStaggeredFermion); + +}} diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion.h b/lib/qcd/action/fermion/ImprovedStaggeredFermion.h new file mode 100644 index 00000000..bf05240e --- /dev/null +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion.h @@ -0,0 +1,152 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/ImprovedStaggered.h + +Copyright (C) 2015 + +Author: Azusa Yamaguchi, Peter Boyle + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution +directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef GRID_QCD_IMPR_STAG_FERMION_H +#define GRID_QCD_IMPR_STAG_FERMION_H + +namespace Grid { + +namespace QCD { + +class ImprovedStaggeredFermionStatic { + public: + static const std::vector directions; + static const std::vector displacements; + static const int npoint = 16; +}; + +template +class ImprovedStaggeredFermion : public StaggeredKernels, public ImprovedStaggeredFermionStatic { + public: + INHERIT_IMPL_TYPES(Impl); + typedef StaggeredKernels Kernels; + + /////////////////////////////////////////////////////////////// + // Implement the abstract base + /////////////////////////////////////////////////////////////// + GridBase *GaugeGrid(void) { return _grid; } + GridBase *GaugeRedBlackGrid(void) { return _cbgrid; } + GridBase *FermionGrid(void) { return _grid; } + GridBase *FermionRedBlackGrid(void) { return _cbgrid; } + + ////////////////////////////////////////////////////////////////// + // override multiply; cut number routines if pass dagger argument + // and also make interface more uniformly consistent + ////////////////////////////////////////////////////////////////// + RealD M(const FermionField &in, FermionField &out); + RealD Mdag(const FermionField &in, FermionField &out); + + ///////////////////////////////////////////////////////// + // half checkerboard operations + ///////////////////////////////////////////////////////// + void Meooe(const FermionField &in, FermionField &out); + void MeooeDag(const FermionField &in, FermionField &out); + + // allow override for twisted mass and clover + virtual void Mooee(const FermionField &in, FermionField &out); + virtual void MooeeDag(const FermionField &in, FermionField &out); + virtual void MooeeInv(const FermionField &in, FermionField &out); + virtual void MooeeInvDag(const FermionField &in, FermionField &out); + + //////////////////////// + // Derivative interface + //////////////////////// + // Interface calls an internal routine + void DhopDeriv (GaugeField &mat, const FermionField &U, const FermionField &V, int dag); + void DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag); + void DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag); + + /////////////////////////////////////////////////////////////// + // non-hermitian hopping term; half cb or both + /////////////////////////////////////////////////////////////// + void Dhop (const FermionField &in, FermionField &out, int dag); + void DhopOE(const FermionField &in, FermionField &out, int dag); + void DhopEO(const FermionField &in, FermionField &out, int dag); + + /////////////////////////////////////////////////////////////// + // Multigrid assistance; force term uses too + /////////////////////////////////////////////////////////////// + void Mdir(const FermionField &in, FermionField &out, int dir, int disp); + void DhopDir(const FermionField &in, FermionField &out, int dir, int disp); + + /////////////////////////////////////////////////////////////// + // Extra methods added by derived + /////////////////////////////////////////////////////////////// + void DerivInternal(StencilImpl &st, + DoubledGaugeField &U,DoubledGaugeField &UUU, + GaugeField &mat, + const FermionField &A, const FermionField &B, int dag); + + void DhopInternal(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU, + const FermionField &in, FermionField &out, int dag); + + // Constructor + ImprovedStaggeredFermion(GaugeField &_Umu, GridCartesian &Fgrid, + GridRedBlackCartesian &Hgrid, RealD _mass, + const ImplParams &p = ImplParams()); + + // DoubleStore impl dependent + void ImportGauge(const GaugeField &_Umu); + + /////////////////////////////////////////////////////////////// + // Data members require to support the functionality + /////////////////////////////////////////////////////////////// + + // protected: + public: + // any other parameters of action ??? + + RealD mass; + + GridBase *_grid; + GridBase *_cbgrid; + + // Defines the stencils for even and odd + StencilImpl Stencil; + StencilImpl StencilEven; + StencilImpl StencilOdd; + + // Copy of the gauge field , with even and odd subsets + DoubledGaugeField Umu; + DoubledGaugeField UmuEven; + DoubledGaugeField UmuOdd; + + DoubledGaugeField UUUmu; + DoubledGaugeField UUUmuEven; + DoubledGaugeField UUUmuOdd; + + LebesgueOrder Lebesgue; + LebesgueOrder LebesgueEvenOdd; +}; + +typedef ImprovedStaggeredFermion ImprovedStaggeredFermionF; +typedef ImprovedStaggeredFermion ImprovedStaggeredFermionD; + +} +} +#endif diff --git a/lib/qcd/action/fermion/StaggeredKernels.cc b/lib/qcd/action/fermion/StaggeredKernels.cc new file mode 100644 index 00000000..8df7f3e4 --- /dev/null +++ b/lib/qcd/action/fermion/StaggeredKernels.cc @@ -0,0 +1,223 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/WilsonKernels.cc + +Copyright (C) 2015 + +Author: Azusa Yamaguchi, Peter Boyle + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution +directory +*************************************************************************************/ +/* END LEGAL */ +#include +namespace Grid { +namespace QCD { + +template +StaggeredKernels::StaggeredKernels(const ImplParams &p) : Base(p){}; + +//////////////////////////////////////////// +// Generic implementation; move to different file? +//////////////////////////////////////////// + +template +void StaggeredKernels::DhopSiteDepth(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, + SiteSpinor *buf, int sF, + int sU, const FermionField &in, SiteSpinor &out,int threeLink) { + const SiteSpinor *chi_p; + SiteSpinor chi; + SiteSpinor Uchi; + StencilEntry *SE; + int ptype; + int skew = 0; + if (threeLink) skew=8; + /////////////////////////// + // Xp + /////////////////////////// + + SE = st.GetEntry(ptype, Xp+skew, sF); + if (SE->_is_local) { + if (SE->_permute) { + chi_p = χ + permute(chi, in._odata[SE->_offset], ptype); + } else { + chi_p = &in._odata[SE->_offset]; + } + } else { + chi_p = &buf[SE->_offset]; + } + Impl::multLink(Uchi, U._odata[sU], *chi_p, Xp); + + /////////////////////////// + // Yp + /////////////////////////// + SE = st.GetEntry(ptype, Yp+skew, sF); + if (SE->_is_local) { + if (SE->_permute) { + chi_p = χ + permute(chi, in._odata[SE->_offset], ptype); + } else { + chi_p = &in._odata[SE->_offset]; + } + } else { + chi_p = &buf[SE->_offset]; + } + Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Yp); + + /////////////////////////// + // Zp + /////////////////////////// + SE = st.GetEntry(ptype, Zp+skew, sF); + if (SE->_is_local) { + if (SE->_permute) { + chi_p = χ + permute(chi, in._odata[SE->_offset], ptype); + } else { + chi_p = &in._odata[SE->_offset]; + } + } else { + chi_p = &buf[SE->_offset]; + } + Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Zp); + + /////////////////////////// + // Tp + /////////////////////////// + SE = st.GetEntry(ptype, Tp+skew, sF); + if (SE->_is_local) { + if (SE->_permute) { + chi_p = χ + permute(chi, in._odata[SE->_offset], ptype); + } else { + chi_p = &in._odata[SE->_offset]; + } + } else { + chi_p = &buf[SE->_offset]; + } + Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Tp); + + /////////////////////////// + // Xm + /////////////////////////// + SE = st.GetEntry(ptype, Xm+skew, sF); + if (SE->_is_local) { + if (SE->_permute) { + chi_p = χ + permute(chi, in._odata[SE->_offset], ptype); + } else { + chi_p = &in._odata[SE->_offset]; + } + } else { + chi_p = &buf[SE->_offset]; + } + Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Xm); + + /////////////////////////// + // Ym + /////////////////////////// + SE = st.GetEntry(ptype, Ym+skew, sF); + if (SE->_is_local) { + if (SE->_permute) { + chi_p = χ + permute(chi, in._odata[SE->_offset], ptype); + } else { + chi_p = &in._odata[SE->_offset]; + } + } else { + chi_p = &buf[SE->_offset]; + } + Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Ym); + + /////////////////////////// + // Zm + /////////////////////////// + SE = st.GetEntry(ptype, Zm+skew, sF); + if (SE->_is_local) { + if (SE->_permute) { + chi_p = χ + permute(chi, in._odata[SE->_offset], ptype); + } else { + chi_p = &in._odata[SE->_offset]; + } + } else { + chi_p = &buf[SE->_offset]; + } + Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Zm); + + /////////////////////////// + // Tm + /////////////////////////// + SE = st.GetEntry(ptype, Tm+skew, sF); + if (SE->_is_local) { + if (SE->_permute) { + chi_p = χ + permute(chi, in._odata[SE->_offset], ptype); + } else { + chi_p = &in._odata[SE->_offset]; + } + } else { + chi_p = &buf[SE->_offset]; + } + Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Tm); + + vstream(out, Uchi); +}; + +// Need controls to do interior, exterior, or both +template +void StaggeredKernels::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, + SiteSpinor *buf, int sF, + int sU, const FermionField &in, FermionField &out) { + SiteSpinor naik; + SiteSpinor naive; + int oneLink =0; + int threeLink=1; + DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); + DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + out._odata[sF] =naive+naik; +}; +template +void StaggeredKernels::DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, + SiteSpinor *buf, int sF, + int sU, const FermionField &in, FermionField &out) { + SiteSpinor naik; + SiteSpinor naive; + int oneLink =0; + int threeLink=1; + DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); + DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + out._odata[sF] =-naive-naik; +}; + +template +void StaggeredKernels::DhopDir( StencilImpl &st, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor *buf, int sF, + int sU, const FermionField &in, FermionField &out, int dir, int disp) +{ + // Disp should be either +1,-1,+3,-3 + // What about "dag" ? + // Because we work out pU . dS/dU + // U + assert(0); +} + +FermOpStaggeredTemplateInstantiate(StaggeredKernels); + +}} + diff --git a/lib/qcd/action/fermion/StaggeredKernels.h b/lib/qcd/action/fermion/StaggeredKernels.h new file mode 100644 index 00000000..f51a4b37 --- /dev/null +++ b/lib/qcd/action/fermion/StaggeredKernels.h @@ -0,0 +1,70 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/StaggeredKernels.h + +Copyright (C) 2015 + +Author: Azusa Yamaguchi, Peter Boyle + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution +directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef GRID_QCD_STAGGERED_KERNELS_H +#define GRID_QCD_STAGGERED_KERNELS_H + +namespace Grid { +namespace QCD { + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Helper routines that implement Staggered stencil for a single site. + //////////////////////////////////////////////////////////////////////////////////////////////////////////////// +class StaggeredKernelsStatic { + public: +}; + +template class StaggeredKernels : public FermionOperator , public StaggeredKernelsStatic { + public: + + INHERIT_IMPL_TYPES(Impl); + typedef FermionOperator Base; + +public: + + void DhopDir(StencilImpl &st, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf, + int sF, int sU, const FermionField &in, FermionField &out, int dir,int disp); + + void DhopSiteDepth(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf, + int sF, int sU, const FermionField &in, SiteSpinor &out,int threeLink); + + void DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf, + int sF, int sU, const FermionField &in, FermionField &out); + + void DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf, + int sF, int sU, const FermionField &in, FermionField &out); + +public: + + StaggeredKernels(const ImplParams &p = ImplParams()); + +}; + +}} + +#endif diff --git a/lib/qcd/action/fermion/WilsonFermion.cc b/lib/qcd/action/fermion/WilsonFermion.cc index 4bc28bc7..845250fc 100644 --- a/lib/qcd/action/fermion/WilsonFermion.cc +++ b/lib/qcd/action/fermion/WilsonFermion.cc @@ -34,10 +34,9 @@ directory namespace Grid { namespace QCD { -const std::vector WilsonFermionStatic::directions({0, 1, 2, 3, 0, 1, 2, - 3}); -const std::vector WilsonFermionStatic::displacements({1, 1, 1, 1, -1, -1, - -1, -1}); +const std::vector WilsonFermionStatic::directions({0, 1, 2, 3, 0, 1, 2, 3}); +const std::vector WilsonFermionStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1}); + int WilsonFermionStatic::HandOptDslash; ///////////////////////////////// @@ -166,8 +165,7 @@ void WilsonFermion::DerivInternal(StencilImpl &st, DoubledGaugeField &U, //////////////////////// PARALLEL_FOR_LOOP for (int sss = 0; sss < B._grid->oSites(); sss++) { - Kernels::DiracOptDhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu, - gamma); + Kernels::DiracOptDhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu, gamma); } ////////////////////////////////////////////////// @@ -277,8 +275,7 @@ void WilsonFermion::DhopDirDisp(const FermionField &in, FermionField &out, PARALLEL_FOR_LOOP for (int sss = 0; sss < in._grid->oSites(); sss++) { - Kernels::DiracOptDhopDir(Stencil, Umu, Stencil.CommBuf(), sss, sss, in, out, - dirdisp, gamma); + Kernels::DiracOptDhopDir(Stencil, Umu, Stencil.CommBuf(), sss, sss, in, out, dirdisp, gamma); } }; @@ -295,14 +292,12 @@ void WilsonFermion::DhopInternal(StencilImpl &st, LebesgueOrder &lo, if (dag == DaggerYes) { PARALLEL_FOR_LOOP for (int sss = 0; sss < in._grid->oSites(); sss++) { - Kernels::DiracOptDhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, - out); + Kernels::DiracOptDhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out); } } else { PARALLEL_FOR_LOOP for (int sss = 0; sss < in._grid->oSites(); sss++) { - Kernels::DiracOptDhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, - out); + Kernels::DiracOptDhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out); } } }; diff --git a/lib/qcd/action/pseudofermion/TwoFlavour.h b/lib/qcd/action/pseudofermion/TwoFlavour.h index 6b65a95d..ddc17d42 100644 --- a/lib/qcd/action/pseudofermion/TwoFlavour.h +++ b/lib/qcd/action/pseudofermion/TwoFlavour.h @@ -63,8 +63,7 @@ class TwoFlavourPseudoFermionAction : public Action { Phi(Op.FermionGrid()){}; ////////////////////////////////////////////////////////////////////////////////////// - // Push the gauge field in to the dops. Assume any BC's and smearing already - // applied + // Push the gauge field in to the dops. Assume any BC's and smearing already applied ////////////////////////////////////////////////////////////////////////////////////// virtual void refresh(const GaugeField &U, GridParallelRNG &pRNG) { // P(phi) = e^{- phi^dag (MdagM)^-1 phi} @@ -107,8 +106,7 @@ class TwoFlavourPseudoFermionAction : public Action { MdagMOp.Op(X, Y); RealD action = norm2(Y); - std::cout << GridLogMessage << "Pseudofermion action " << action - << std::endl; + std::cout << GridLogMessage << "Pseudofermion action " << action << std::endl; return action; }; @@ -119,6 +117,7 @@ class TwoFlavourPseudoFermionAction : public Action { // // = - Ydag dM X - Xdag dMdag Y // + // ////////////////////////////////////////////////////// virtual void deriv(const GaugeField &U, GaugeField &dSdU) { FermOp.ImportGauge(U); @@ -133,8 +132,7 @@ class TwoFlavourPseudoFermionAction : public Action { DerivativeSolver(MdagMOp, Phi, X); // X = (MdagM)^-1 phi MdagMOp.Op(X, Y); // Y = M X = (Mdag)^-1 phi - // Our conventions really make this UdSdU; We do not differentiate wrt Udag - // here. + // Our conventions really make this UdSdU; We do not differentiate wrt Udag here. // So must take dSdU - adj(dSdU) and left multiply by mom to get dS/dt. FermOp.MDeriv(tmp, Y, X, DaggerNo); From 1c5b7a6be5d2d65d268cb0af048b963d078109ec Mon Sep 17 00:00:00 2001 From: Azusa Yamaguchi Date: Thu, 3 Nov 2016 16:26:56 +0000 Subject: [PATCH 002/101] Staggered phases first cut, c1, c2, u0 --- benchmarks/Benchmark_staggered.cc | 133 ++++++++++++++++++ lib/qcd/action/Actions.h | 3 + lib/qcd/action/fermion/FermionOperatorImpl.h | 48 ++++++- .../fermion/ImprovedStaggeredFermion.cc | 51 ++++++- .../action/fermion/ImprovedStaggeredFermion.h | 4 + 5 files changed, 227 insertions(+), 12 deletions(-) create mode 100644 benchmarks/Benchmark_staggered.cc diff --git a/benchmarks/Benchmark_staggered.cc b/benchmarks/Benchmark_staggered.cc new file mode 100644 index 00000000..c647cda0 --- /dev/null +++ b/benchmarks/Benchmark_staggered.cc @@ -0,0 +1,133 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./benchmarks/Benchmark_wilson.cc + + Copyright (C) 2015 + +Author: Peter Boyle +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + GridCartesian Grid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + + int threads = GridThread::GetThreads(); + std::cout< seeds({1,2,3,4}); + GridParallelRNG pRNG(&Grid); + pRNG.SeedFixedIntegers(seeds); + // pRNG.SeedRandomDevice(); + + typedef typename ImprovedStaggeredFermionR::FermionField FermionField; + typename ImprovedStaggeredFermionR::ImplParams params; + + FermionField src (&Grid); random(pRNG,src); + FermionField result(&Grid); result=zero; + FermionField ref(&Grid); ref=zero; + FermionField tmp(&Grid); tmp=zero; + FermionField err(&Grid); tmp=zero; + LatticeGaugeField Umu(&Grid); random(pRNG,Umu); + std::vector U(4,&Grid); + + double volume=1; + for(int mu=0;mu(Umu,U[nn],nn); + } +#endif + + for(int mu=0;mu(Umu,mu); + } + ref = zero; + /* + { // Naive wilson implementation + ref = zero; + for(int mu=0;mu GparityMobiusFermionR; typedef MobiusFermion GparityMobiusFermionF; typedef MobiusFermion GparityMobiusFermionD; +typedef ImprovedStaggeredFermion ImprovedStaggeredFermionR; +typedef ImprovedStaggeredFermion ImprovedStaggeredFermionF; +typedef ImprovedStaggeredFermion ImprovedStaggeredFermionD; }} /////////////////////////////////////////////////////////////////////////////// diff --git a/lib/qcd/action/fermion/FermionOperatorImpl.h b/lib/qcd/action/fermion/FermionOperatorImpl.h index 0b162f77..98d2f859 100644 --- a/lib/qcd/action/fermion/FermionOperatorImpl.h +++ b/lib/qcd/action/fermion/FermionOperatorImpl.h @@ -520,14 +520,17 @@ PARALLEL_FOR_LOOP INHERIT_GIMPL_TYPES(Gimpl); + template using iImplScalar = iScalar > >; template using iImplSpinor = iScalar > >; template using iImplHalfSpinor = iVector >, Ngp>; template using iImplDoubledGaugeField = iVector >, Nds>; + typedef iImplScalar SiteComplex; typedef iImplSpinor SiteSpinor; typedef iImplHalfSpinor SiteHalfSpinor; typedef iImplDoubledGaugeField SiteDoubledGaugeField; + typedef Lattice ComplexField; typedef Lattice FermionField; typedef Lattice DoubledGaugeField; @@ -564,15 +567,46 @@ PARALLEL_FOR_LOOP conformable(Uds._grid, GaugeGrid); conformable(Umu._grid, GaugeGrid); GaugeLinkField U(GaugeGrid); + GaugeLinkField UU(GaugeGrid); + GaugeLinkField UUU(GaugeGrid); + GaugeLinkField Udag(GaugeGrid); + GaugeLinkField UUUdag(GaugeGrid); for (int mu = 0; mu < Nd; mu++) { - U = PeekIndex(Umu, mu); + + // Staggered Phase. + ComplexField coor(GaugeGrid); + ComplexField phases(GaugeGrid); + ComplexField x(GaugeGrid); LatticeCoordinate(x,0); + ComplexField y(GaugeGrid); LatticeCoordinate(y,1); + ComplexField z(GaugeGrid); LatticeCoordinate(z,2); + ComplexField t(GaugeGrid); LatticeCoordinate(t,3); + + SiteComplex zz(0.0,0.0); + SiteComplex one(1.0,0.0); + + phases = one; + if ( mu == 1 ) phases = where( mod(x ,2)== zz, phases,-phases); + if ( mu == 2 ) phases = where( mod(x+y ,2)== zz, phases,-phases); + if ( mu == 3 ) phases = where( mod(x+y+z,2)== zz, phases,-phases); + + U = PeekIndex(Umu, mu); + UU = Gimpl::CovShiftForward(U,mu,U); + UUU= Gimpl::CovShiftForward(U,mu,UU); + + U = U *phases; + UUU = UUU *phases; + PokeIndex(Uds, U, mu); - PokeIndex(UUUds, U, mu); - std::cout << GridLogMessage << " NOT created the treble links for staggered yet" <(Uds, U, mu + 4); - PokeIndex(UUUds, U, mu+4); + PokeIndex(UUUds, UUU, mu); + + std::cout << GridLogMessage << " Created the treble links for staggered Naik term" <(Uds, Udag, mu + 4); + PokeIndex(UUUds, UUUdag, mu+4); + } } diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc b/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc index 73cc272a..d0927222 100644 --- a/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc @@ -43,6 +43,7 @@ ImprovedStaggeredFermionStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3, 3, template ImprovedStaggeredFermion::ImprovedStaggeredFermion(GaugeField &_Umu, GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid, RealD _mass, + RealD _c1, RealD _c2,RealD _u0, const ImplParams &p) : Kernels(p), _grid(&Fgrid), @@ -51,6 +52,9 @@ ImprovedStaggeredFermion::ImprovedStaggeredFermion(GaugeField &_Umu, GridC StencilEven(&Hgrid, npoint, Even, directions, displacements), // source is Even StencilOdd(&Hgrid, npoint, Odd, directions, displacements), // source is Odd mass(_mass), + c1(_c1), + c2(_c2), + u0(_u0), Lebesgue(_grid), LebesgueEvenOdd(_cbgrid), Umu(&Fgrid), @@ -63,15 +67,52 @@ ImprovedStaggeredFermion::ImprovedStaggeredFermion(GaugeField &_Umu, GridC ImportGauge(_Umu); } + //////////////////////////////////////////////////////////// + // Momentum space propagator should be + // https://arxiv.org/pdf/hep-lat/9712010.pdf + // + // mom space action. + // gamma_mu i ( c1 sin pmu + c2 sin 3 pmu ) + m + // + // must track through staggered flavour/spin reduction in literature to + // turn to free propagator for the one component chi field, a la page 4/5 + // of above link to implmement fourier based solver. + //////////////////////////////////////////////////////////// template void ImprovedStaggeredFermion::ImportGauge(const GaugeField &_Umu) { - GaugeField HUmu(_Umu._grid); - HUmu = _Umu * (-0.5); - Impl::DoubleStore(GaugeGrid(), Umu, UUUmu, HUmu); + + GaugeLinkField U(GaugeGrid); + + //////////////////////////////////////////////////////// + // Double Store should take two fields for Naik and one hop separately. + //////////////////////////////////////////////////////// + Impl::DoubleStore(GaugeGrid(), Umu, UUUmu, _Umu); + + + //////////////////////////////////////////////////////// + // Apply scale factors to get the right fermion Kinetic term + // + // 0.5 ( U p(x+mu) - Udag(x-mu) p(x-mu) ) + //////////////////////////////////////////////////////// + for (int mu = 0; mu < Nd; mu++) { + + U = PeekIndex(Umu, mu); + PokeIndex(Umu, U*( 0.5*c1/u0), mu ); + + U = PeekIndex(Umu, mu+4); + PokeIndex(Umu, U*(-0.5*c1/u0), mu+4); + + U = PeekIndex(UUUmu, mu); + PokeIndex(UUUmu, U*( 0.5*c2/u0/u0/u0), mu ); + + U = PeekIndex(UUUmu, mu+4); + PokeIndex(UUUmu, U*(-0.5*c2/u0/u0/u0), mu+4); + } + pickCheckerboard(Even, UmuEven, Umu); - pickCheckerboard(Odd, UmuOdd, Umu); + pickCheckerboard(Odd, UmuOdd , Umu); pickCheckerboard(Even, UUUmuEven, UUUmu); - pickCheckerboard(Odd, UUUmuOdd, UUUmu); + pickCheckerboard(Odd, UUUmuOdd, UUUmu); } ///////////////////////////// diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion.h b/lib/qcd/action/fermion/ImprovedStaggeredFermion.h index bf05240e..72f01bf3 100644 --- a/lib/qcd/action/fermion/ImprovedStaggeredFermion.h +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion.h @@ -108,6 +108,7 @@ class ImprovedStaggeredFermion : public StaggeredKernels, public ImprovedS // Constructor ImprovedStaggeredFermion(GaugeField &_Umu, GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid, RealD _mass, + RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0, const ImplParams &p = ImplParams()); // DoubleStore impl dependent @@ -122,6 +123,9 @@ class ImprovedStaggeredFermion : public StaggeredKernels, public ImprovedS // any other parameters of action ??? RealD mass; + RealD u0; + RealD c1; + RealD c2; GridBase *_grid; GridBase *_cbgrid; From ee686a7d85d5cb357d7d76fa649b87b9e7ade881 Mon Sep 17 00:00:00 2001 From: Azusa Yamaguchi Date: Thu, 3 Nov 2016 16:58:23 +0000 Subject: [PATCH 003/101] Compiles now --- benchmarks/Benchmark_staggered.cc | 3 ++- lib/communicator/Communicator_none.cc | 2 +- lib/qcd/action/fermion/FermionOperatorImpl.h | 24 +++++++++---------- .../fermion/ImprovedStaggeredFermion.cc | 2 +- .../action/fermion/ImprovedStaggeredFermion.h | 2 +- 5 files changed, 17 insertions(+), 16 deletions(-) diff --git a/benchmarks/Benchmark_staggered.cc b/benchmarks/Benchmark_staggered.cc index c647cda0..53b8e222 100644 --- a/benchmarks/Benchmark_staggered.cc +++ b/benchmarks/Benchmark_staggered.cc @@ -111,7 +111,8 @@ int main (int argc, char ** argv) RealD mass=0.1; RealD c1=9.0/8.0; RealD c2=-1.0/24.0; - ImprovedStaggeredFermionR Ds(Umu,Grid,RBGrid,mass,c1,c2,params); + RealD u0=1.0; + ImprovedStaggeredFermionR Ds(Umu,Grid,RBGrid,mass,c1,c2,u0,params); std::cout< &coor) { return 0;} -void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor){ assert(0);} +void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor){ coor = _processor_coor; } void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest) { source =0; diff --git a/lib/qcd/action/fermion/FermionOperatorImpl.h b/lib/qcd/action/fermion/FermionOperatorImpl.h index 98d2f859..cc1370e8 100644 --- a/lib/qcd/action/fermion/FermionOperatorImpl.h +++ b/lib/qcd/action/fermion/FermionOperatorImpl.h @@ -574,20 +574,20 @@ PARALLEL_FOR_LOOP for (int mu = 0; mu < Nd; mu++) { // Staggered Phase. - ComplexField coor(GaugeGrid); - ComplexField phases(GaugeGrid); - ComplexField x(GaugeGrid); LatticeCoordinate(x,0); - ComplexField y(GaugeGrid); LatticeCoordinate(y,1); - ComplexField z(GaugeGrid); LatticeCoordinate(z,2); - ComplexField t(GaugeGrid); LatticeCoordinate(t,3); + Lattice > coor(GaugeGrid); + Lattice > x(GaugeGrid); LatticeCoordinate(x,0); + Lattice > y(GaugeGrid); LatticeCoordinate(y,1); + Lattice > z(GaugeGrid); LatticeCoordinate(z,2); + Lattice > t(GaugeGrid); LatticeCoordinate(t,3); - SiteComplex zz(0.0,0.0); - SiteComplex one(1.0,0.0); + Lattice > lin_z(GaugeGrid); lin_z=x+y; + Lattice > lin_t(GaugeGrid); lin_t=x+y+z; - phases = one; - if ( mu == 1 ) phases = where( mod(x ,2)== zz, phases,-phases); - if ( mu == 2 ) phases = where( mod(x+y ,2)== zz, phases,-phases); - if ( mu == 3 ) phases = where( mod(x+y+z,2)== zz, phases,-phases); + ComplexField phases(GaugeGrid); phases=1.0; + + if ( mu == 1 ) phases = where( mod(x ,2)==(Integer)0, phases,-phases); + if ( mu == 2 ) phases = where( mod(lin_z,2)==(Integer)0, phases,-phases); + if ( mu == 3 ) phases = where( mod(lin_t,2)==(Integer)0, phases,-phases); U = PeekIndex(Umu, mu); UU = Gimpl::CovShiftForward(U,mu,U); diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc b/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc index d0927222..bf9eb8bd 100644 --- a/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc @@ -81,7 +81,7 @@ ImprovedStaggeredFermion::ImprovedStaggeredFermion(GaugeField &_Umu, GridC template void ImprovedStaggeredFermion::ImportGauge(const GaugeField &_Umu) { - GaugeLinkField U(GaugeGrid); + GaugeLinkField U(GaugeGrid()); //////////////////////////////////////////////////////// // Double Store should take two fields for Naik and one hop separately. diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion.h b/lib/qcd/action/fermion/ImprovedStaggeredFermion.h index 72f01bf3..d659155f 100644 --- a/lib/qcd/action/fermion/ImprovedStaggeredFermion.h +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion.h @@ -108,7 +108,7 @@ class ImprovedStaggeredFermion : public StaggeredKernels, public ImprovedS // Constructor ImprovedStaggeredFermion(GaugeField &_Umu, GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid, RealD _mass, - RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0, + RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0=1.0, const ImplParams &p = ImplParams()); // DoubleStore impl dependent From 389e0a77bd201cc6351b7b556dcc6f28e5b6aa05 Mon Sep 17 00:00:00 2001 From: Azusa Yamaguchi Date: Tue, 29 Nov 2016 13:13:56 +0000 Subject: [PATCH 004/101] Staggerd Fermion 5D --- benchmarks/Benchmark_staggered.cc | 4 +-- lib/qcd/action/Actions.h | 5 +++ lib/qcd/action/fermion/FermionOperatorImpl.h | 34 ++++++++++++------- .../fermion/ImprovedStaggeredFermion.cc | 22 +++++++----- .../action/fermion/ImprovedStaggeredFermion.h | 15 ++++---- lib/qcd/action/fermion/StaggeredKernels.cc | 4 +-- tests/core/Test_cf_coarsen_support.cc | 2 +- tests/core/Test_contfrac_even_odd.cc | 2 +- tests/core/Test_dwf_even_odd.cc | 2 +- tests/core/Test_dwf_rb5d.cc | 2 +- tests/core/Test_gpwilson_even_odd.cc | 2 +- tests/core/Test_wilson_even_odd.cc | 2 +- tests/core/Test_wilson_tm_even_odd.cc | 2 +- tests/debug/Test_cayley_coarsen_support.cc | 2 +- tests/debug/Test_cayley_even_odd.cc | 2 +- tests/debug/Test_cayley_even_odd_vec.cc | 2 +- tests/debug/Test_zmm.cc | 2 +- tests/solver/Test_cf_cr_unprec.cc | 2 +- tests/solver/Test_contfrac_cg.cc | 2 +- tests/solver/Test_dwf_cg_schur.cc | 2 +- tests/solver/Test_dwf_cg_unprec.cc | 2 +- tests/solver/Test_dwf_cr_unprec.cc | 2 +- tests/solver/Test_wilson_cg_prec.cc | 2 +- tests/solver/Test_wilson_cg_schur.cc | 2 +- tests/solver/Test_wilson_cg_unprec.cc | 2 +- tests/solver/Test_wilson_cr_unprec.cc | 2 +- 26 files changed, 72 insertions(+), 52 deletions(-) diff --git a/benchmarks/Benchmark_staggered.cc b/benchmarks/Benchmark_staggered.cc index 53b8e222..121dc0d5 100644 --- a/benchmarks/Benchmark_staggered.cc +++ b/benchmarks/Benchmark_staggered.cc @@ -2,7 +2,7 @@ Grid physics library, www.github.com/paboyle/Grid - Source file: ./benchmarks/Benchmark_wilson.cc + Source file: ./benchmarks/Benchmark_staggered.cc Copyright (C) 2015 @@ -112,7 +112,7 @@ int main (int argc, char ** argv) RealD c1=9.0/8.0; RealD c2=-1.0/24.0; RealD u0=1.0; - ImprovedStaggeredFermionR Ds(Umu,Grid,RBGrid,mass,c1,c2,u0,params); + ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0,params); std::cout< ConjugateSymanzikGaugeAction //#include #include +#include #include // Cayley types #include @@ -264,6 +265,10 @@ typedef ImprovedStaggeredFermion ImprovedStaggeredFermionR; typedef ImprovedStaggeredFermion ImprovedStaggeredFermionF; typedef ImprovedStaggeredFermion ImprovedStaggeredFermionD; +typedef ImprovedStaggeredFermion5D ImprovedStaggeredFermion5DR; +typedef ImprovedStaggeredFermion5D ImprovedStaggeredFermion5DF; +typedef ImprovedStaggeredFermion5D ImprovedStaggeredFermion5DD; + }} /////////////////////////////////////////////////////////////////////////////// // G5 herm -- this has to live in QCD since dirac matrix is not in the broader sector of code diff --git a/lib/qcd/action/fermion/FermionOperatorImpl.h b/lib/qcd/action/fermion/FermionOperatorImpl.h index cc1370e8..42f89157 100644 --- a/lib/qcd/action/fermion/FermionOperatorImpl.h +++ b/lib/qcd/action/fermion/FermionOperatorImpl.h @@ -561,11 +561,13 @@ PARALLEL_FOR_LOOP } inline void DoubleStore(GridBase *GaugeGrid, - DoubledGaugeField &Uds, DoubledGaugeField &UUUds, // for Naik term - const GaugeField &Umu) { + DoubledGaugeField &Uds, + const GaugeField &Uthin, + const GaugeField &Ufat) { conformable(Uds._grid, GaugeGrid); - conformable(Umu._grid, GaugeGrid); + conformable(Uthin._grid, GaugeGrid); + conformable(Ufat._grid, GaugeGrid); GaugeLinkField U(GaugeGrid); GaugeLinkField UU(GaugeGrid); GaugeLinkField UUU(GaugeGrid); @@ -589,24 +591,32 @@ PARALLEL_FOR_LOOP if ( mu == 2 ) phases = where( mod(lin_z,2)==(Integer)0, phases,-phases); if ( mu == 3 ) phases = where( mod(lin_t,2)==(Integer)0, phases,-phases); - U = PeekIndex(Umu, mu); + // 1 hop based on fat links + U = PeekIndex(Ufat, mu); + Udag = adj( Cshift(U, mu, -1)); + + U = U *phases; + Udag = Udag *phases; + + PokeIndex(Uds, U, mu); + PokeIndex(Uds, Udag, mu + 4); + + // 3 hop based on thin links. Crazy huh ? + U = PeekIndex(Uthin, mu); UU = Gimpl::CovShiftForward(U,mu,U); UUU= Gimpl::CovShiftForward(U,mu,UU); - U = U *phases; - UUU = UUU *phases; + UUUdag = adj( Cshift(UUU, mu, -3)); + + UUU = UUU *phases; + UUUdag = UUUdag *phases; - PokeIndex(Uds, U, mu); PokeIndex(UUUds, UUU, mu); + PokeIndex(UUUds, UUUdag, mu+4); std::cout << GridLogMessage << " Created the treble links for staggered Naik term" <(Uds, Udag, mu + 4); - PokeIndex(UUUds, UUUdag, mu+4); - } } diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc b/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc index bf9eb8bd..42dff5b2 100644 --- a/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion.cc @@ -41,7 +41,7 @@ ImprovedStaggeredFermionStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3, 3, ///////////////////////////////// template -ImprovedStaggeredFermion::ImprovedStaggeredFermion(GaugeField &_Umu, GridCartesian &Fgrid, +ImprovedStaggeredFermion::ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Ufat, GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid, RealD _mass, RealD _c1, RealD _c2,RealD _u0, const ImplParams &p) @@ -62,9 +62,10 @@ ImprovedStaggeredFermion::ImprovedStaggeredFermion(GaugeField &_Umu, GridC UmuOdd(&Hgrid), UUUmu(&Fgrid), UUUmuEven(&Hgrid), - UUUmuOdd(&Hgrid) { + UUUmuOdd(&Hgrid) +{ // Allocate the required comms buffer - ImportGauge(_Umu); + ImportGauge(_Uthin,_Ufat); } //////////////////////////////////////////////////////////// @@ -79,19 +80,24 @@ ImprovedStaggeredFermion::ImprovedStaggeredFermion(GaugeField &_Umu, GridC // of above link to implmement fourier based solver. //////////////////////////////////////////////////////////// template -void ImprovedStaggeredFermion::ImportGauge(const GaugeField &_Umu) { - +void ImprovedStaggeredFermion::ImportGauge(const GaugeField &_Uthin) +{ + ImportGauge(_Uthin,_Uthin); +}; +template +void ImprovedStaggeredFermion::ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat) +{ GaugeLinkField U(GaugeGrid()); //////////////////////////////////////////////////////// // Double Store should take two fields for Naik and one hop separately. //////////////////////////////////////////////////////// - Impl::DoubleStore(GaugeGrid(), Umu, UUUmu, _Umu); + Impl::DoubleStore(GaugeGrid(), UUUmu, Umu, _Uthin, _Ufat ); //////////////////////////////////////////////////////// // Apply scale factors to get the right fermion Kinetic term - // + // Could pass coeffs into the double store to save work. // 0.5 ( U p(x+mu) - Udag(x-mu) p(x-mu) ) //////////////////////////////////////////////////////// for (int mu = 0; mu < Nd; mu++) { @@ -312,7 +318,7 @@ void ImprovedStaggeredFermion::DhopDir(const FermionField &in, FermionFiel Compressor compressor; Stencil.HaloExchange(in, compressor); -PARALLEL_FOR_LOOP + PARALLEL_FOR_LOOP for (int sss = 0; sss < in._grid->oSites(); sss++) { Kernels::DhopDir(Stencil, Umu, UUUmu, Stencil.CommBuf(), sss, sss, in, out, dir, disp); } diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion.h b/lib/qcd/action/fermion/ImprovedStaggeredFermion.h index d659155f..ad298d29 100644 --- a/lib/qcd/action/fermion/ImprovedStaggeredFermion.h +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion.h @@ -66,12 +66,10 @@ class ImprovedStaggeredFermion : public StaggeredKernels, public ImprovedS ///////////////////////////////////////////////////////// void Meooe(const FermionField &in, FermionField &out); void MeooeDag(const FermionField &in, FermionField &out); - - // allow override for twisted mass and clover - virtual void Mooee(const FermionField &in, FermionField &out); - virtual void MooeeDag(const FermionField &in, FermionField &out); - virtual void MooeeInv(const FermionField &in, FermionField &out); - virtual void MooeeInvDag(const FermionField &in, FermionField &out); + void Mooee(const FermionField &in, FermionField &out); + void MooeeDag(const FermionField &in, FermionField &out); + void MooeeInv(const FermionField &in, FermionField &out); + void MooeeInvDag(const FermionField &in, FermionField &out); //////////////////////// // Derivative interface @@ -106,13 +104,14 @@ class ImprovedStaggeredFermion : public StaggeredKernels, public ImprovedS const FermionField &in, FermionField &out, int dag); // Constructor - ImprovedStaggeredFermion(GaugeField &_Umu, GridCartesian &Fgrid, + ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Ufat, GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid, RealD _mass, RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0=1.0, const ImplParams &p = ImplParams()); // DoubleStore impl dependent - void ImportGauge(const GaugeField &_Umu); + void ImportGauge(const GaugeField &_Uthin, const GaugeField &_Ufat); + void ImportGauge(const GaugeField &_Uthin); /////////////////////////////////////////////////////////////// // Data members require to support the functionality diff --git a/lib/qcd/action/fermion/StaggeredKernels.cc b/lib/qcd/action/fermion/StaggeredKernels.cc index 8df7f3e4..b437199c 100644 --- a/lib/qcd/action/fermion/StaggeredKernels.cc +++ b/lib/qcd/action/fermion/StaggeredKernels.cc @@ -191,7 +191,7 @@ void StaggeredKernels::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, Dou int threeLink=1; DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); - out._odata[sF] =naive+naik; + out._odata[sF] =-naive-naik; }; template void StaggeredKernels::DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, @@ -203,7 +203,7 @@ void StaggeredKernels::DhopSite(StencilImpl &st, LebesgueOrder &lo, Double int threeLink=1; DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); - out._odata[sF] =-naive-naik; + out._odata[sF] =naive+naik; }; template diff --git a/tests/core/Test_cf_coarsen_support.cc b/tests/core/Test_cf_coarsen_support.cc index 325cc3a5..9edfc71d 100644 --- a/tests/core/Test_cf_coarsen_support.cc +++ b/tests/core/Test_cf_coarsen_support.cc @@ -64,7 +64,7 @@ int main (int argc, char ** argv) LatticeFermion ref(FGrid); ref=zero; LatticeFermion tmp(FGrid); LatticeFermion err(FGrid); - LatticeGaugeField Umu(UGrid); random(RNG4,Umu); + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(RNG4,Umu); std::vector U(4,UGrid); for(int mu=0;mu U(4,UGrid); RealD mass=0.1; diff --git a/tests/core/Test_dwf_even_odd.cc b/tests/core/Test_dwf_even_odd.cc index 5c60d84b..dd34bea9 100644 --- a/tests/core/Test_dwf_even_odd.cc +++ b/tests/core/Test_dwf_even_odd.cc @@ -72,7 +72,7 @@ int main (int argc, char ** argv) LatticeFermion ref(FGrid); ref=zero; LatticeFermion tmp(FGrid); tmp=zero; LatticeFermion err(FGrid); tmp=zero; - LatticeGaugeField Umu(UGrid); random(RNG4,Umu); + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(RNG4,Umu); std::vector U(4,UGrid); // Only one non-zero (y) diff --git a/tests/core/Test_dwf_rb5d.cc b/tests/core/Test_dwf_rb5d.cc index ab9e4d6e..5f776aec 100644 --- a/tests/core/Test_dwf_rb5d.cc +++ b/tests/core/Test_dwf_rb5d.cc @@ -81,7 +81,7 @@ int main (int argc, char ** argv) LatticeFermion tmp(FGrid); LatticeFermion err(FGrid); - LatticeGaugeField Umu(UGrid); random(RNG4,Umu); + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(RNG4,Umu); std::vector U(4,UGrid); // Only one non-zero (y) diff --git a/tests/core/Test_gpwilson_even_odd.cc b/tests/core/Test_gpwilson_even_odd.cc index b69bf266..b8b320d8 100644 --- a/tests/core/Test_gpwilson_even_odd.cc +++ b/tests/core/Test_gpwilson_even_odd.cc @@ -61,7 +61,7 @@ int main (int argc, char ** argv) FermionField ref(&Grid); ref=zero; FermionField tmp(&Grid); tmp=zero; FermionField err(&Grid); tmp=zero; - LatticeGaugeField Umu(&Grid); random(pRNG,Umu); + LatticeGaugeField Umu(&Grid); SU3::HotConfiguration(pRNG,Umu); std::vector U(4,&Grid); double volume=1; diff --git a/tests/core/Test_wilson_even_odd.cc b/tests/core/Test_wilson_even_odd.cc index 8028e6de..8218068b 100644 --- a/tests/core/Test_wilson_even_odd.cc +++ b/tests/core/Test_wilson_even_odd.cc @@ -71,7 +71,7 @@ int main (int argc, char ** argv) LatticeFermion ref(&Grid); ref=zero; LatticeFermion tmp(&Grid); tmp=zero; LatticeFermion err(&Grid); tmp=zero; - LatticeGaugeField Umu(&Grid); random(pRNG,Umu); + LatticeGaugeField Umu(&Grid); SU3::HotConfiguration(pRNG,Umu); std::vector U(4,&Grid); double volume=1; diff --git a/tests/core/Test_wilson_tm_even_odd.cc b/tests/core/Test_wilson_tm_even_odd.cc index 2d28240c..2084b249 100644 --- a/tests/core/Test_wilson_tm_even_odd.cc +++ b/tests/core/Test_wilson_tm_even_odd.cc @@ -70,7 +70,7 @@ int main (int argc, char ** argv) LatticeFermion ref(&Grid); ref=zero; LatticeFermion tmp(&Grid); tmp=zero; LatticeFermion err(&Grid); tmp=zero; - LatticeGaugeField Umu(&Grid); random(pRNG,Umu); + LatticeGaugeField Umu(&Grid); SU3::HotConfiguration(pRNG,Umu); std::vector U(4,&Grid); double volume=1; diff --git a/tests/debug/Test_cayley_coarsen_support.cc b/tests/debug/Test_cayley_coarsen_support.cc index 9c740b8b..4cfe0e22 100644 --- a/tests/debug/Test_cayley_coarsen_support.cc +++ b/tests/debug/Test_cayley_coarsen_support.cc @@ -77,7 +77,7 @@ int main (int argc, char ** argv) LatticeFermion ref(FGrid); ref=zero; LatticeFermion tmp(FGrid); LatticeFermion err(FGrid); - LatticeGaugeField Umu(UGrid); random(RNG4,Umu); + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(RNG4,Umu); #if 0 std::vector U(4,UGrid); diff --git a/tests/debug/Test_cayley_even_odd.cc b/tests/debug/Test_cayley_even_odd.cc index 5c15450c..81c2e1d4 100644 --- a/tests/debug/Test_cayley_even_odd.cc +++ b/tests/debug/Test_cayley_even_odd.cc @@ -70,7 +70,7 @@ int main (int argc, char ** argv) GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); - LatticeGaugeField Umu(UGrid); random(RNG4,Umu); + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(RNG4,Umu); std::vector U(4,UGrid); RealD mass=0.1; diff --git a/tests/debug/Test_cayley_even_odd_vec.cc b/tests/debug/Test_cayley_even_odd_vec.cc index f8600782..99ae610d 100644 --- a/tests/debug/Test_cayley_even_odd_vec.cc +++ b/tests/debug/Test_cayley_even_odd_vec.cc @@ -91,7 +91,7 @@ int main (int argc, char ** argv) GridParallelRNG sRNG4(sUGrid); sRNG4.SeedFixedIntegers(seeds4); GridParallelRNG sRNG5(sFGrid); sRNG5.SeedFixedIntegers(seeds5); - LatticeGaugeField Umu(UGrid); random(RNG4,Umu); + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(RNG4,Umu); RealD mass=0.1; RealD M5 =1.8; diff --git a/tests/debug/Test_zmm.cc b/tests/debug/Test_zmm.cc index 40263cb9..4f372296 100644 --- a/tests/debug/Test_zmm.cc +++ b/tests/debug/Test_zmm.cc @@ -187,7 +187,7 @@ int main(int argc,char **argv) GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); random(RNG5,src); #if 1 - random(RNG4,Umu); + SU3::HotConfiguration(RNG4,Umu); #else int mmu=2; std::vector U(4,UGrid); diff --git a/tests/solver/Test_cf_cr_unprec.cc b/tests/solver/Test_cf_cr_unprec.cc index 59031ba8..045d6d75 100644 --- a/tests/solver/Test_cf_cr_unprec.cc +++ b/tests/solver/Test_cf_cr_unprec.cc @@ -61,7 +61,7 @@ int main (int argc, char ** argv) LatticeFermion src(FGrid); random(RNG5,src); LatticeFermion result(FGrid); result=zero; - LatticeGaugeField Umu(UGrid); random(RNG4,Umu); + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(RNG4,Umu); std::vector U(4,UGrid); for(int mu=0;mu U(4,UGrid); RealD mass=0.1; diff --git a/tests/solver/Test_dwf_cg_schur.cc b/tests/solver/Test_dwf_cg_schur.cc index 75ca3521..d61580dc 100644 --- a/tests/solver/Test_dwf_cg_schur.cc +++ b/tests/solver/Test_dwf_cg_schur.cc @@ -61,7 +61,7 @@ int main (int argc, char ** argv) LatticeFermion src(FGrid); random(RNG5,src); LatticeFermion result(FGrid); result=zero; - LatticeGaugeField Umu(UGrid); random(RNG4,Umu); + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(RNG4,Umu); std::vector U(4,UGrid); for(int mu=0;mu U(4,UGrid); for(int mu=0;mu U(4,UGrid); diff --git a/tests/solver/Test_wilson_cg_prec.cc b/tests/solver/Test_wilson_cg_prec.cc index 7cc9d574..0f9ed5b3 100644 --- a/tests/solver/Test_wilson_cg_prec.cc +++ b/tests/solver/Test_wilson_cg_prec.cc @@ -60,7 +60,7 @@ int main (int argc, char ** argv) LatticeFermion src(&Grid); random(pRNG,src); RealD nrm = norm2(src); LatticeFermion result(&Grid); result=zero; - LatticeGaugeField Umu(&Grid); random(pRNG,Umu); + LatticeGaugeField Umu(&Grid); SU3::HotConfiguration(pRNG,Umu); std::vector U(4,&Grid); diff --git a/tests/solver/Test_wilson_cg_schur.cc b/tests/solver/Test_wilson_cg_schur.cc index 1ea6a07c..a23cf9b7 100644 --- a/tests/solver/Test_wilson_cg_schur.cc +++ b/tests/solver/Test_wilson_cg_schur.cc @@ -57,7 +57,7 @@ int main (int argc, char ** argv) std::vector seeds({1,2,3,4}); GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds); - LatticeGaugeField Umu(&Grid); random(pRNG,Umu); + LatticeGaugeField Umu(&Grid); SU3::HotConfiguration(pRNG,Umu); LatticeFermion src(&Grid); random(pRNG,src); LatticeFermion result(&Grid); result=zero; diff --git a/tests/solver/Test_wilson_cg_unprec.cc b/tests/solver/Test_wilson_cg_unprec.cc index 34b0a687..bad72ea4 100644 --- a/tests/solver/Test_wilson_cg_unprec.cc +++ b/tests/solver/Test_wilson_cg_unprec.cc @@ -60,7 +60,7 @@ int main (int argc, char ** argv) LatticeFermion src(&Grid); random(pRNG,src); RealD nrm = norm2(src); LatticeFermion result(&Grid); result=zero; - LatticeGaugeField Umu(&Grid); random(pRNG,Umu); + LatticeGaugeField Umu(&Grid); SU3::HotConfiguration(pRNG,Umu); double volume=1; for(int mu=0;mu U(4,&Grid); From 77fb25fb292878f4bdeae5442f9270b6a38d2e46 Mon Sep 17 00:00:00 2001 From: Azusa Yamaguchi Date: Tue, 29 Nov 2016 13:43:56 +0000 Subject: [PATCH 005/101] Push 5d tests --- .../fermion/ImprovedStaggeredFermion5D.cc | 344 ++++++++++++++++++ .../fermion/ImprovedStaggeredFermion5D.h | 164 +++++++++ tests/core/Test_staggered.cc | 291 +++++++++++++++ tests/core/Test_staggered5D.cc | 314 ++++++++++++++++ 4 files changed, 1113 insertions(+) create mode 100644 lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc create mode 100644 lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h create mode 100644 tests/core/Test_staggered.cc create mode 100644 tests/core/Test_staggered5D.cc diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc new file mode 100644 index 00000000..71a6bf06 --- /dev/null +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc @@ -0,0 +1,344 @@ +/************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc + + Copyright (C) 2015 + +Author: Azusa Yamaguchi +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include + +namespace Grid { +namespace QCD { + +// S-direction is INNERMOST and takes no part in the parity. +const std::vector +ImprovedStaggeredFermion5DStatic::directions({1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4}); +const std::vector +ImprovedStaggeredFermion5DStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3, 3, 3, 3, -3, -3, -3, -3}); + + // 5d lattice for DWF. +template +ImprovedStaggeredFermion5D::ImprovedStaggeredFermion5D(GaugeField &_Uthin,GaugeField &_Ufat, + GridCartesian &FiveDimGrid, + GridRedBlackCartesian &FiveDimRedBlackGrid, + GridCartesian &FourDimGrid, + GridRedBlackCartesian &FourDimRedBlackGrid, + RealD _mass, + RealD _c1,RealD _c2, RealD _u0, + const ImplParams &p) : + Kernels(p), + _FiveDimGrid (&FiveDimGrid), + _FiveDimRedBlackGrid(&FiveDimRedBlackGrid), + _FourDimGrid (&FourDimGrid), + _FourDimRedBlackGrid(&FourDimRedBlackGrid), + Stencil (_FiveDimGrid,npoint,Even,directions,displacements), + StencilEven(_FiveDimRedBlackGrid,npoint,Even,directions,displacements), // source is Even + StencilOdd (_FiveDimRedBlackGrid,npoint,Odd ,directions,displacements), // source is Odd + mass(_mass), + c1(_c1), + c2(_c2), + u0(_u0), + Umu(_FourDimGrid), + UmuEven(_FourDimRedBlackGrid), + UmuOdd (_FourDimRedBlackGrid), + UUUmu(_FourDimGrid), + UUUmuEven(_FourDimRedBlackGrid), + UUUmuOdd(_FourDimRedBlackGrid), + Lebesgue(_FourDimGrid), + LebesgueEvenOdd(_FourDimRedBlackGrid) +{ + // some assertions + assert(FiveDimGrid._ndimension==5); + assert(FourDimGrid._ndimension==4); + assert(FiveDimRedBlackGrid._ndimension==5); + assert(FourDimRedBlackGrid._ndimension==4); + assert(FiveDimRedBlackGrid._checker_dim==1); + + // Dimension zero of the five-d is the Ls direction + Ls=FiveDimGrid._fdimensions[0]; + assert(FiveDimRedBlackGrid._fdimensions[0]==Ls); + assert(FiveDimRedBlackGrid._processors[0] ==1); + assert(FiveDimRedBlackGrid._simd_layout[0]==1); + assert(FiveDimGrid._processors[0] ==1); + assert(FiveDimGrid._simd_layout[0] ==1); + + // Other dimensions must match the decomposition of the four-D fields + for(int d=0;d<4;d++){ + assert(FourDimRedBlackGrid._fdimensions[d] ==FourDimGrid._fdimensions[d]); + assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]); + + assert(FourDimRedBlackGrid._processors[d] ==FourDimGrid._processors[d]); + assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]); + + assert(FourDimRedBlackGrid._simd_layout[d] ==FourDimGrid._simd_layout[d]); + assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]); + + assert(FiveDimGrid._fdimensions[d+1] ==FourDimGrid._fdimensions[d]); + assert(FiveDimGrid._processors[d+1] ==FourDimGrid._processors[d]); + assert(FiveDimGrid._simd_layout[d+1] ==FourDimGrid._simd_layout[d]); + } + + // Allocate the required comms buffer + ImportGauge(_Uthin,_Ufat); +} + +template +void ImprovedStaggeredFermion5D::ImportGauge(const GaugeField &_Uthin) +{ + ImportGauge(_Uthin,_Uthin); +}; +template +void ImprovedStaggeredFermion5D::ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat) +{ + GaugeLinkField U(GaugeGrid()); + + //////////////////////////////////////////////////////// + // Double Store should take two fields for Naik and one hop separately. + //////////////////////////////////////////////////////// + Impl::DoubleStore(GaugeGrid(), UUUmu, Umu, _Uthin, _Ufat ); + + //////////////////////////////////////////////////////// + // Apply scale factors to get the right fermion Kinetic term + // Could pass coeffs into the double store to save work. + // 0.5 ( U p(x+mu) - Udag(x-mu) p(x-mu) ) + //////////////////////////////////////////////////////// + for (int mu = 0; mu < Nd; mu++) { + + U = PeekIndex(Umu, mu); + PokeIndex(Umu, U*( 0.5*c1/u0), mu ); + + U = PeekIndex(Umu, mu+4); + PokeIndex(Umu, U*(-0.5*c1/u0), mu+4); + + U = PeekIndex(UUUmu, mu); + PokeIndex(UUUmu, U*( 0.5*c2/u0/u0/u0), mu ); + + U = PeekIndex(UUUmu, mu+4); + PokeIndex(UUUmu, U*(-0.5*c2/u0/u0/u0), mu+4); + } + + pickCheckerboard(Even, UmuEven, Umu); + pickCheckerboard(Odd, UmuOdd , Umu); + pickCheckerboard(Even, UUUmuEven, UUUmu); + pickCheckerboard(Odd, UUUmuOdd, UUUmu); +} +template +void ImprovedStaggeredFermion5D::DhopDir(const FermionField &in, FermionField &out,int dir5,int disp) +{ + int dir = dir5-1; // Maps to the ordering above in "directions" that is passed to stencil + // we drop off the innermost fifth dimension + + Compressor compressor; + Stencil.HaloExchange(in,compressor); + + PARALLEL_FOR_LOOP + for(int ss=0;ssoSites();ss++){ + for(int s=0;s +void ImprovedStaggeredFermion5D::DerivInternal(StencilImpl & st, + DoubledGaugeField & U, + DoubledGaugeField & UUU, + GaugeField &mat, + const FermionField &A, + const FermionField &B, + int dag) +{ + // No force terms in multi-rhs solver staggered + assert(0); +} + +template +void ImprovedStaggeredFermion5D::DhopDeriv(GaugeField &mat, + const FermionField &A, + const FermionField &B, + int dag) +{ + assert(0); +} + +template +void ImprovedStaggeredFermion5D::DhopDerivEO(GaugeField &mat, + const FermionField &A, + const FermionField &B, + int dag) +{ + assert(0); +} + + +template +void ImprovedStaggeredFermion5D::DhopDerivOE(GaugeField &mat, + const FermionField &A, + const FermionField &B, + int dag) +{ + assert(0); +} + +template +void ImprovedStaggeredFermion5D::DhopInternal(StencilImpl & st, LebesgueOrder &lo, + DoubledGaugeField & U,DoubledGaugeField & UUU, + const FermionField &in, FermionField &out,int dag) +{ + Compressor compressor; + + int LLs = in._grid->_rdimensions[0]; + + st.HaloExchange(in,compressor); + + // Dhop takes the 4d grid from U, and makes a 5d index for fermion + if (dag == DaggerYes) { + PARALLEL_FOR_LOOP + for (int ss = 0; ss < U._grid->oSites(); ss++) { + for(int s=0;soSites(); ss++) { + for(int s=0;s +void ImprovedStaggeredFermion5D::DhopOE(const FermionField &in, FermionField &out,int dag) +{ + conformable(in._grid,FermionRedBlackGrid()); // verifies half grid + conformable(in._grid,out._grid); // drops the cb check + + assert(in.checkerboard==Even); + out.checkerboard = Odd; + + DhopInternal(StencilEven,LebesgueEvenOdd,UmuOdd,UUUmuOdd,in,out,dag); +} +template +void ImprovedStaggeredFermion5D::DhopEO(const FermionField &in, FermionField &out,int dag) +{ + conformable(in._grid,FermionRedBlackGrid()); // verifies half grid + conformable(in._grid,out._grid); // drops the cb check + + assert(in.checkerboard==Odd); + out.checkerboard = Even; + + DhopInternal(StencilOdd,LebesgueEvenOdd,UmuEven,UUUmuEven,in,out,dag); +} +template +void ImprovedStaggeredFermion5D::Dhop(const FermionField &in, FermionField &out,int dag) +{ + conformable(in._grid,FermionGrid()); // verifies full grid + conformable(in._grid,out._grid); + + out.checkerboard = in.checkerboard; + + DhopInternal(Stencil,Lebesgue,Umu,UUUmu,in,out,dag); +} + + +///////////////////////////////////////////////////////////////////////// +// Implement the general interface. Here we use SAME mass on all slices +///////////////////////////////////////////////////////////////////////// +template +void ImprovedStaggeredFermion5D::Mdir(const FermionField &in, FermionField &out, int dir, int disp) { + DhopDir(in, out, dir, disp); +} +template +RealD ImprovedStaggeredFermion5D::M(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + Dhop(in, out, DaggerNo); + return axpy_norm(out, mass, in, out); +} + +template +RealD ImprovedStaggeredFermion5D::Mdag(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + Dhop(in, out, DaggerYes); + return axpy_norm(out, mass, in, out); +} + +template +void ImprovedStaggeredFermion5D::Meooe(const FermionField &in, FermionField &out) { + if (in.checkerboard == Odd) { + DhopEO(in, out, DaggerNo); + } else { + DhopOE(in, out, DaggerNo); + } +} +template +void ImprovedStaggeredFermion5D::MeooeDag(const FermionField &in, FermionField &out) { + if (in.checkerboard == Odd) { + DhopEO(in, out, DaggerYes); + } else { + DhopOE(in, out, DaggerYes); + } +} + +template +void ImprovedStaggeredFermion5D::Mooee(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + typename FermionField::scalar_type scal(mass); + out = scal * in; +} + +template +void ImprovedStaggeredFermion5D::MooeeDag(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + Mooee(in, out); +} + +template +void ImprovedStaggeredFermion5D::MooeeInv(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + out = (1.0 / (mass)) * in; +} + +template +void ImprovedStaggeredFermion5D::MooeeInvDag(const FermionField &in, + FermionField &out) { + out.checkerboard = in.checkerboard; + MooeeInv(in, out); +} + + + +FermOpStaggeredTemplateInstantiate(ImprovedStaggeredFermion5D); + +}} + + + diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h new file mode 100644 index 00000000..c3502229 --- /dev/null +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h @@ -0,0 +1,164 @@ + + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h + + Copyright (C) 2015 + +Author: Peter Boyle +Author: AzusaYamaguchi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#ifndef GRID_QCD_IMPROVED_STAGGERED_FERMION_5D_H +#define GRID_QCD_IMPROVED_STAGGERED_FERMION_5D_H + +namespace Grid { +namespace QCD { + + //////////////////////////////////////////////////////////////////////////////// + // This is the 4d red black case appropriate to support + //////////////////////////////////////////////////////////////////////////////// + + class ImprovedStaggeredFermion5DStatic { + public: + // S-direction is INNERMOST and takes no part in the parity. + static const std::vector directions; + static const std::vector displacements; + const int npoint = 16; + }; + + template + class ImprovedStaggeredFermion5D : public StaggeredKernels, public ImprovedStaggeredFermion5DStatic + { + public: + INHERIT_IMPL_TYPES(Impl); + typedef StaggeredKernels Kernels; + + /////////////////////////////////////////////////////////////// + // Implement the abstract base + /////////////////////////////////////////////////////////////// + GridBase *GaugeGrid(void) { return _FourDimGrid ;} + GridBase *GaugeRedBlackGrid(void) { return _FourDimRedBlackGrid ;} + GridBase *FermionGrid(void) { return _FiveDimGrid;} + GridBase *FermionRedBlackGrid(void) { return _FiveDimRedBlackGrid;} + + // full checkerboard operations; leave unimplemented as abstract for now + RealD M (const FermionField &in, FermionField &out); + RealD Mdag (const FermionField &in, FermionField &out); + + // half checkerboard operations + void Meooe (const FermionField &in, FermionField &out); + void Mooee (const FermionField &in, FermionField &out); + void MooeeInv (const FermionField &in, FermionField &out); + + void MeooeDag (const FermionField &in, FermionField &out); + void MooeeDag (const FermionField &in, FermionField &out); + void MooeeInvDag (const FermionField &in, FermionField &out); + + void Mdir (const FermionField &in, FermionField &out,int dir,int disp); + void DhopDir(const FermionField &in, FermionField &out,int dir,int disp); + + // These can be overridden by fancy 5d chiral action + void DhopDeriv (GaugeField &mat,const FermionField &U,const FermionField &V,int dag); + void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag); + void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag); + + // Implement hopping term non-hermitian hopping term; half cb or both + void Dhop (const FermionField &in, FermionField &out,int dag); + void DhopOE(const FermionField &in, FermionField &out,int dag); + void DhopEO(const FermionField &in, FermionField &out,int dag); + + + /////////////////////////////////////////////////////////////// + // New methods added + /////////////////////////////////////////////////////////////// + void DerivInternal(StencilImpl & st, + DoubledGaugeField & U, + DoubledGaugeField & UUU, + GaugeField &mat, + const FermionField &A, + const FermionField &B, + int dag); + + void DhopInternal(StencilImpl & st, + LebesgueOrder &lo, + DoubledGaugeField &U, + DoubledGaugeField &UUU, + const FermionField &in, + FermionField &out, + int dag); + + // Constructors + ImprovedStaggeredFermion5D(GaugeField &_Uthin, + GaugeField &_Ufat, + GridCartesian &FiveDimGrid, + GridRedBlackCartesian &FiveDimRedBlackGrid, + GridCartesian &FourDimGrid, + GridRedBlackCartesian &FourDimRedBlackGrid, + double _mass, + RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0=1.0, + const ImplParams &p= ImplParams()); + + // DoubleStore + void ImportGauge(const GaugeField &_U); + void ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat); + + /////////////////////////////////////////////////////////////// + // Data members require to support the functionality + /////////////////////////////////////////////////////////////// + public: + + GridBase *_FourDimGrid; + GridBase *_FourDimRedBlackGrid; + GridBase *_FiveDimGrid; + GridBase *_FiveDimRedBlackGrid; + + RealD mass; + RealD c1; + RealD c2; + RealD u0; + int Ls; + + //Defines the stencils for even and odd + StencilImpl Stencil; + StencilImpl StencilEven; + StencilImpl StencilOdd; + + // Copy of the gauge field , with even and odd subsets + DoubledGaugeField Umu; + DoubledGaugeField UmuEven; + DoubledGaugeField UmuOdd; + + DoubledGaugeField UUUmu; + DoubledGaugeField UUUmuEven; + DoubledGaugeField UUUmuOdd; + + LebesgueOrder Lebesgue; + LebesgueOrder LebesgueEvenOdd; + + // Comms buffer + std::vector > comm_buf; + + }; + +}} + +#endif diff --git a/tests/core/Test_staggered.cc b/tests/core/Test_staggered.cc new file mode 100644 index 00000000..89055fc7 --- /dev/null +++ b/tests/core/Test_staggered.cc @@ -0,0 +1,291 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./benchmarks/Benchmark_wilson.cc + + Copyright (C) 2015 + +Author: Peter Boyle +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + GridCartesian Grid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + + int threads = GridThread::GetThreads(); + std::cout< seeds({1,2,3,4}); + GridParallelRNG pRNG(&Grid); + pRNG.SeedFixedIntegers(seeds); + // pRNG.SeedRandomDevice(); + + typedef typename ImprovedStaggeredFermionR::FermionField FermionField; + typedef typename ImprovedStaggeredFermionR::ComplexField ComplexField; + typename ImprovedStaggeredFermionR::ImplParams params; + + FermionField src (&Grid); random(pRNG,src); + FermionField result(&Grid); result=zero; + FermionField ref(&Grid); ref=zero; + FermionField tmp(&Grid); tmp=zero; + FermionField err(&Grid); tmp=zero; + FermionField phi (&Grid); random(pRNG,phi); + FermionField chi (&Grid); random(pRNG,chi); + LatticeGaugeField Umu(&Grid); SU3::HotConfiguration(pRNG,Umu); + std::vector U(4,&Grid); + + + double volume=1; + for(int mu=0;mu(Umu,mu); + /* Debug force unit + U[mu] = 1.0; + PokeIndex(Umu,U[mu],mu); + */ + } + + ref = zero; + + RealD mass=0.1; + RealD c1=9.0/8.0; + RealD c2=-1.0/24.0; + RealD u0=1.0; + + { // Simple improved staggered implementation + ref = zero; + RealD c1tad = 0.5*c1/u0; + RealD c2tad = 0.5*c2/u0/u0/u0; + + Lattice > coor(&Grid); + + Lattice > x(&Grid); LatticeCoordinate(x,0); + Lattice > y(&Grid); LatticeCoordinate(y,1); + Lattice > z(&Grid); LatticeCoordinate(z,2); + Lattice > t(&Grid); LatticeCoordinate(t,3); + + Lattice > lin_z(&Grid); lin_z=x+y; + Lattice > lin_t(&Grid); lin_t=x+y+z; + + for(int mu=0;mu * = < chi | Deo^dag| phi> "< HermOpEO(Ds); + HermOpEO.MpcDagMpc(chi_e,dchi_e,t1,t2); + HermOpEO.MpcDagMpc(chi_o,dchi_o,t1,t2); + + HermOpEO.MpcDagMpc(phi_e,dphi_e,t1,t2); + HermOpEO.MpcDagMpc(phi_o,dphi_o,t1,t2); + + pDce = innerProduct(phi_e,dchi_e); + pDco = innerProduct(phi_o,dchi_o); + cDpe = innerProduct(chi_e,dphi_e); + cDpo = innerProduct(chi_o,dphi_o); + + std::cout< +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + + std::cout << GridLogMessage << "Making s innermost grids"< seeds({1,2,3,4}); + GridParallelRNG pRNG4(UGrid); + GridParallelRNG pRNG5(FGrid); + pRNG4.SeedFixedIntegers(seeds); + pRNG5.SeedFixedIntegers(seeds); + + typedef typename ImprovedStaggeredFermion5DR::FermionField FermionField; + typedef typename ImprovedStaggeredFermion5DR::ComplexField ComplexField; + typename ImprovedStaggeredFermion5DR::ImplParams params; + + FermionField src (FGrid); random(pRNG5,src); + FermionField result(FGrid); result=zero; + FermionField ref(FGrid); ref=zero; + FermionField tmp(FGrid); tmp=zero; + FermionField err(FGrid); tmp=zero; + FermionField phi (FGrid); random(pRNG5,phi); + FermionField chi (FGrid); random(pRNG5,chi); + + LatticeGaugeField Umu(UGrid); SU3::ColdConfiguration(pRNG4,Umu); + + double volume=Ls; + for(int mu=0;muoSites();ss++){ + for(int s=0;s U(4,FGrid); + for(int mu=0;mu(Umu5d,mu); + if ( mu!=0 ) U[mu]=zero; + PokeIndex(Umu5d,U[mu],mu); + } + + std::vector Ua(4,UGrid); + for(int mu=0;mu(Umu,mu); + if ( mu!=0 ) { + Ua[mu]=zero; + } + PokeIndex(Umu,Ua[mu],mu); + } + + RealD mass=0.1; + RealD c1=9.0/8.0; + RealD c2=-1.0/24.0; + RealD u0=1.0; + + { // Simple improved staggered implementation + ref = zero; + RealD c1tad = 0.5*c1/u0; + RealD c2tad = 0.5*c2/u0/u0/u0; + + Lattice > coor(FGrid); + + Lattice > x(FGrid); LatticeCoordinate(x,1); // s innermost + Lattice > y(FGrid); LatticeCoordinate(y,2); + Lattice > z(FGrid); LatticeCoordinate(z,3); + Lattice > t(FGrid); LatticeCoordinate(t,4); + + Lattice > lin_z(FGrid); lin_z=x+y; + Lattice > lin_t(FGrid); lin_t=x+y+z; + + for(int mu=0;mu * = < chi | Deo^dag| phi> "< HermOpEO(Ds); + HermOpEO.MpcDagMpc(chi_e,dchi_e,t1,t2); + HermOpEO.MpcDagMpc(chi_o,dchi_o,t1,t2); + + HermOpEO.MpcDagMpc(phi_e,dphi_e,t1,t2); + HermOpEO.MpcDagMpc(phi_o,dphi_o,t1,t2); + + pDce = innerProduct(phi_e,dchi_e); + pDco = innerProduct(phi_o,dchi_o); + cDpe = innerProduct(chi_e,dphi_e); + cDpo = innerProduct(chi_o,dphi_o); + + std::cout< Date: Mon, 12 Dec 2016 09:07:38 +0000 Subject: [PATCH 006/101] Staggered kernels options --- lib/Init.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/Init.cc b/lib/Init.cc index d6d6b9f8..c1bc4b4a 100644 --- a/lib/Init.cc +++ b/lib/Init.cc @@ -324,12 +324,15 @@ void Grid_init(int *argc,char ***argv) } if( GridCmdOptionExists(*argv,*argv+*argc,"--dslash-unroll") ){ QCD::WilsonKernelsStatic::Opt=QCD::WilsonKernelsStatic::OptHandUnroll; + QCD::StaggeredKernelsStatic::Opt=QCD::StaggeredKernelsStatic::OptHandUnroll; } if( GridCmdOptionExists(*argv,*argv+*argc,"--dslash-asm") ){ QCD::WilsonKernelsStatic::Opt=QCD::WilsonKernelsStatic::OptInlineAsm; + QCD::StaggeredKernelsStatic::Opt=QCD::StaggeredKernelsStatic::OptInlineAsm; } if( GridCmdOptionExists(*argv,*argv+*argc,"--dslash-generic") ){ QCD::WilsonKernelsStatic::Opt=QCD::WilsonKernelsStatic::OptGeneric; + QCD::StaggeredKernelsStatic::Opt=QCD::StaggeredKernelsStatic::OptGeneric; } if( GridCmdOptionExists(*argv,*argv+*argc,"--lebesgue") ){ LebesgueOrder::UseLebesgueOrder=1; From 1440565a10946375e7ca5ac72627bdf727687199 Mon Sep 17 00:00:00 2001 From: Azusa Yamaguchi Date: Mon, 12 Dec 2016 09:08:04 +0000 Subject: [PATCH 007/101] Decrease verbosity --- lib/qcd/action/fermion/FermionOperatorImpl.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/qcd/action/fermion/FermionOperatorImpl.h b/lib/qcd/action/fermion/FermionOperatorImpl.h index 42f89157..36ab35ca 100644 --- a/lib/qcd/action/fermion/FermionOperatorImpl.h +++ b/lib/qcd/action/fermion/FermionOperatorImpl.h @@ -614,9 +614,6 @@ PARALLEL_FOR_LOOP PokeIndex(UUUds, UUU, mu); PokeIndex(UUUds, UUUdag, mu+4); - std::cout << GridLogMessage << " Created the treble links for staggered Naik term" < Date: Mon, 12 Dec 2016 09:08:53 +0000 Subject: [PATCH 008/101] Kernels options --- lib/qcd/action/fermion/StaggeredKernels.cc | 28 ++++++++++++++++++---- lib/qcd/action/fermion/StaggeredKernels.h | 6 +++++ 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/lib/qcd/action/fermion/StaggeredKernels.cc b/lib/qcd/action/fermion/StaggeredKernels.cc index b437199c..6608f8de 100644 --- a/lib/qcd/action/fermion/StaggeredKernels.cc +++ b/lib/qcd/action/fermion/StaggeredKernels.cc @@ -30,6 +30,8 @@ directory namespace Grid { namespace QCD { +int StaggeredKernelsStatic::Opt; + template StaggeredKernels::StaggeredKernels(const ImplParams &p) : Base(p){}; @@ -189,8 +191,17 @@ void StaggeredKernels::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, Dou SiteSpinor naive; int oneLink =0; int threeLink=1; - DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); - DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + switch(Opt) { + case OptHandUnroll: + DhopSiteDepthHand(st,lo,U,buf,sF,sU,in,naive,oneLink); + DhopSiteDepthHand(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + break; + case OptGeneric: + default: + DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); + DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + break; + } out._odata[sF] =-naive-naik; }; template @@ -201,8 +212,17 @@ void StaggeredKernels::DhopSite(StencilImpl &st, LebesgueOrder &lo, Double SiteSpinor naive; int oneLink =0; int threeLink=1; - DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); - DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + switch(Opt) { + case OptHandUnroll: + DhopSiteDepthHand(st,lo,U,buf,sF,sU,in,naive,oneLink); + DhopSiteDepthHand(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + break; + case OptGeneric: + default: + DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); + DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + break; + } out._odata[sF] =naive+naik; }; diff --git a/lib/qcd/action/fermion/StaggeredKernels.h b/lib/qcd/action/fermion/StaggeredKernels.h index f51a4b37..5a6cb45c 100644 --- a/lib/qcd/action/fermion/StaggeredKernels.h +++ b/lib/qcd/action/fermion/StaggeredKernels.h @@ -37,6 +37,9 @@ namespace QCD { //////////////////////////////////////////////////////////////////////////////////////////////////////////////// class StaggeredKernelsStatic { public: + enum { OptGeneric, OptHandUnroll, OptInlineAsm }; + // S-direction is INNERMOST and takes no part in the parity. + static int Opt; // these are a temporary hack }; template class StaggeredKernels : public FermionOperator , public StaggeredKernelsStatic { @@ -52,6 +55,9 @@ public: void DhopSiteDepth(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf, int sF, int sU, const FermionField &in, SiteSpinor &out,int threeLink); + + void DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf, + int sF, int sU, const FermionField &in, SiteSpinor &out,int threeLink); void DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf, int sF, int sU, const FermionField &in, FermionField &out); From 426197e4468081de37c7b64cd439cb683cad7973 Mon Sep 17 00:00:00 2001 From: Azusa Yamaguchi Date: Mon, 12 Dec 2016 09:10:54 +0000 Subject: [PATCH 009/101] Nc=3 --- .../action/fermion/StaggeredKernelsHand.cc | 283 ++++++++++++++++++ 1 file changed, 283 insertions(+) create mode 100644 lib/qcd/action/fermion/StaggeredKernelsHand.cc diff --git a/lib/qcd/action/fermion/StaggeredKernelsHand.cc b/lib/qcd/action/fermion/StaggeredKernelsHand.cc new file mode 100644 index 00000000..5f9e11e5 --- /dev/null +++ b/lib/qcd/action/fermion/StaggeredKernelsHand.cc @@ -0,0 +1,283 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/qcd/action/fermion/StaggerdKernelsHand.cc + + Copyright (C) 2015 + +Author: Peter Boyle +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +#define REGISTER + +#define LOAD_CHI(b) \ + const SiteSpinor & ref (b[offset]); \ + Chi_0=ref()()(0);\ + Chi_1=ref()()(1);\ + Chi_2=ref()()(2); + + +// To splat or not to splat depends on the implementation +#define MULT(A,UChi) \ + auto & ref(U._odata[sU](A)); \ + Impl::loadLinkElement(U_00,ref()(0,0)); \ + Impl::loadLinkElement(U_10,ref()(1,0)); \ + Impl::loadLinkElement(U_20,ref()(2,0)); \ + Impl::loadLinkElement(U_01,ref()(0,1)); \ + Impl::loadLinkElement(U_11,ref()(1,1)); \ + Impl::loadLinkElement(U_21,ref()(2,1)); \ + Impl::loadLinkElement(U_02,ref()(0,2)); \ + Impl::loadLinkElement(U_12,ref()(1,2)); \ + Impl::loadLinkElement(U_22,ref()(2,2)); \ + UChi ## _0 = U_00*Chi_0; \ + UChi ## _1 = U_10*Chi_0;\ + UChi ## _2 = U_20*Chi_0;\ + UChi ## _0 += U_01*Chi_1;\ + UChi ## _1 += U_11*Chi_1;\ + UChi ## _2 += U_21*Chi_1;\ + UChi ## _0 += U_02*Chi_2;\ + UChi ## _1 += U_12*Chi_2;\ + UChi ## _2 += U_22*Chi_2; + +#define MULT_ADD(A,UChi) \ + auto & ref(U._odata[sU](A)); \ + Impl::loadLinkElement(U_00,ref()(0,0)); \ + Impl::loadLinkElement(U_10,ref()(1,0)); \ + Impl::loadLinkElement(U_20,ref()(2,0)); \ + Impl::loadLinkElement(U_01,ref()(0,1)); \ + Impl::loadLinkElement(U_11,ref()(1,1)); \ + Impl::loadLinkElement(U_21,ref()(2,1)); \ + Impl::loadLinkElement(U_02,ref()(0,2)); \ + Impl::loadLinkElement(U_12,ref()(1,2)); \ + Impl::loadLinkElement(U_22,ref()(2,2)); \ + UChi ## _0 += U_00*Chi_0; \ + UChi ## _1 += U_10*Chi_0;\ + UChi ## _2 += U_20*Chi_0;\ + UChi ## _0 += U_01*Chi_1;\ + UChi ## _1 += U_11*Chi_1;\ + UChi ## _2 += U_21*Chi_1;\ + UChi ## _0 += U_02*Chi_2;\ + UChi ## _1 += U_12*Chi_2;\ + UChi ## _2 += U_22*Chi_2; + + +#define PERMUTE_DIR(dir) \ + permute##dir(Chi_0,Chi_0);\ + permute##dir(Chi_1,Chi_1);\ + permute##dir(Chi_2,Chi_2); + +namespace Grid { +namespace QCD { + + +template +void StaggeredKernels::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, + SiteSpinor *buf, int sF, + int sU, const FermionField &in, SiteSpinor &out,int threeLink) { +{ + typedef typename Simd::scalar_type S; + typedef typename Simd::vector_type V; + + REGISTER Simd even_0; // 12 regs on knc + REGISTER Simd even_1; + REGISTER Simd even_2; + REGISTER Simd odd_0; // 12 regs on knc + REGISTER Simd odd_1; + REGISTER Simd odd_2; + + REGISTER Simd Chi_0; // two spinor; 6 regs + REGISTER Simd Chi_1; + REGISTER Simd Chi_2; + + REGISTER Simd U_00; // two rows of U matrix + REGISTER Simd U_10; + REGISTER Simd U_20; + REGISTER Simd U_01; + REGISTER Simd U_11; + REGISTER Simd U_21; // 2 reg left. + REGISTER Simd U_02; + REGISTER Simd U_12; + REGISTER Simd U_22; + + int skew = 0; + if (threeLink) skew=8; + + int offset,local,perm, ptype; + StencilEntry *SE; + + // Xp + SE=st.GetEntry(ptype,Xp+skew,sF); + offset = SE->_offset; + local = SE->_is_local; + perm = SE->_permute; + + if ( local ) { + LOAD_CHI(in._odata); + if ( perm) { + PERMUTE_DIR(3); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... + } + } else { + LOAD_CHI(buf); + } + { + MULT(Xp,even); + } + + // Yp + SE=st.GetEntry(ptype,Yp+skew,sF); + offset = SE->_offset; + local = SE->_is_local; + perm = SE->_permute; + + if ( local ) { + LOAD_CHI(in._odata); + if ( perm) { + PERMUTE_DIR(2); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... + } + } else { + LOAD_CHI(buf); + } + { + MULT(Yp,odd); + } + + + // Zp + SE=st.GetEntry(ptype,Zp+skew,sF); + offset = SE->_offset; + local = SE->_is_local; + perm = SE->_permute; + + if ( local ) { + LOAD_CHI(in._odata); + if ( perm) { + PERMUTE_DIR(1); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... + } + } else { + LOAD_CHI(buf); + } + { + MULT_ADD(Zp,even); + } + + // Tp + SE=st.GetEntry(ptype,Tp+skew,sF); + offset = SE->_offset; + local = SE->_is_local; + perm = SE->_permute; + + if ( local ) { + LOAD_CHI(in._odata); + if ( perm) { + PERMUTE_DIR(0); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... + } + } else { + LOAD_CHI(buf); + } + { + MULT_ADD(Tp,odd); + } + + // Xm + SE=st.GetEntry(ptype,Xm+skew,sF); + offset = SE->_offset; + local = SE->_is_local; + perm = SE->_permute; + + if ( local ) { + LOAD_CHI(in._odata); + if ( perm) { + PERMUTE_DIR(3); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... + } + } else { + LOAD_CHI(buf); + } + { + MULT_ADD(Xm,even); + } + + + // Ym + SE=st.GetEntry(ptype,Ym+skew,sF); + offset = SE->_offset; + local = SE->_is_local; + perm = SE->_permute; + + if ( local ) { + LOAD_CHI(in._odata); + if ( perm) { + PERMUTE_DIR(2); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... + } + } else { + LOAD_CHI(buf); + } + { + MULT_ADD(Ym,odd); + } + + // Zm + SE=st.GetEntry(ptype,Zm+skew,sF); + offset = SE->_offset; + local = SE->_is_local; + perm = SE->_permute; + + if ( local ) { + LOAD_CHI(in._odata); + if ( perm) { + PERMUTE_DIR(1); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... + } + } else { + LOAD_CHI(buf); + } + { + MULT_ADD(Zm,even); + } + + // Tm + SE=st.GetEntry(ptype,Tm+skew,sF); + offset = SE->_offset; + local = SE->_is_local; + perm = SE->_permute; + + if ( local ) { + LOAD_CHI(in._odata); + if ( perm) { + PERMUTE_DIR(0); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... + } + } else { + LOAD_CHI(buf); + } + { + MULT_ADD(Tm,odd); + } + + vstream(out()()(0),even_0+odd_0); + vstream(out()()(1),even_1+odd_1); + vstream(out()()(2),even_2+odd_2); + + } +} + +FermOpStaggeredTemplateInstantiate(StaggeredKernels); + +}} From eabc5779407ea45e7900e94a5725d953a43ba3b6 Mon Sep 17 00:00:00 2001 From: azusayamaguchi Date: Fri, 16 Dec 2016 16:55:36 +0000 Subject: [PATCH 010/101] Assembler possibly working --- lib/Init.cc | 2 +- lib/qcd/action/Actions.h | 8 + lib/qcd/action/fermion/FermionOperatorImpl.h | 191 +++++++++- .../fermion/ImprovedStaggeredFermion5D.cc | 66 ++-- lib/qcd/action/fermion/StaggeredKernels.cc | 26 +- lib/qcd/action/fermion/StaggeredKernels.h | 3 + lib/qcd/action/fermion/StaggeredKernelsAsm.cc | 327 ++++++++++++++++++ .../action/fermion/StaggeredKernelsHand.cc | 1 + lib/qcd/action/fermion/WilsonFermion5D.cc | 76 ++-- tests/core/Test_staggered5D.cc | 21 +- tests/core/Test_staggered5Dvec.cc | 157 +++++++++ 11 files changed, 784 insertions(+), 94 deletions(-) create mode 100644 lib/qcd/action/fermion/StaggeredKernelsAsm.cc create mode 100644 tests/core/Test_staggered5Dvec.cc diff --git a/lib/Init.cc b/lib/Init.cc index c1bc4b4a..fffc6c31 100644 --- a/lib/Init.cc +++ b/lib/Init.cc @@ -416,7 +416,7 @@ void Grid_sa_signal_handler(int sig,siginfo_t *si,void * ptr) #endif #endif BACKTRACE(); - exit(0); + if ( si->si_signo != SIGTRAP ) exit(0); return; }; diff --git a/lib/qcd/action/Actions.h b/lib/qcd/action/Actions.h index f7471cf0..34656602 100644 --- a/lib/qcd/action/Actions.h +++ b/lib/qcd/action/Actions.h @@ -113,6 +113,10 @@ typedef SymanzikGaugeAction ConjugateSymanzikGaugeAction template class A; \ template class A; +#define FermOpStaggeredVec5dTemplateInstantiate(A) \ + template class A; \ + template class A; + #define FermOp4dVecTemplateInstantiate(A) \ template class A; \ template class A; \ @@ -284,6 +288,10 @@ typedef ImprovedStaggeredFermion5D ImprovedStaggeredFermion5DR; typedef ImprovedStaggeredFermion5D ImprovedStaggeredFermion5DF; typedef ImprovedStaggeredFermion5D ImprovedStaggeredFermion5DD; +typedef ImprovedStaggeredFermion5D ImprovedStaggeredFermionVec5dR; +typedef ImprovedStaggeredFermion5D ImprovedStaggeredFermionVec5dF; +typedef ImprovedStaggeredFermion5D ImprovedStaggeredFermionVec5dD; + }} /////////////////////////////////////////////////////////////////////////////// diff --git a/lib/qcd/action/fermion/FermionOperatorImpl.h b/lib/qcd/action/fermion/FermionOperatorImpl.h index 36ab35ca..dc4d0879 100644 --- a/lib/qcd/action/fermion/FermionOperatorImpl.h +++ b/lib/qcd/action/fermion/FermionOperatorImpl.h @@ -224,12 +224,13 @@ class DomainWallVec5dImpl : public PeriodicGaugeImpl< GaugeImplTypes< S,Nrepres typedef iImplSpinor SiteSpinor; typedef iImplHalfSpinor SiteHalfSpinor; typedef Lattice FermionField; - + + ///////////////////////////////////////////////// // Make the doubled gauge field a *scalar* + ///////////////////////////////////////////////// typedef iImplDoubledGaugeField SiteDoubledGaugeField; // This is a scalar typedef iImplGaugeField SiteScalarGaugeField; // scalar typedef iImplGaugeLink SiteScalarGaugeLink; // scalar - typedef Lattice DoubledGaugeField; typedef WilsonCompressor Compressor; @@ -261,11 +262,11 @@ class DomainWallVec5dImpl : public PeriodicGaugeImpl< GaugeImplTypes< S,Nrepres inline void DoubleStore(GridBase *GaugeGrid, DoubledGaugeField &Uds,const GaugeField &Umu) { - SiteScalarGaugeField ScalarUmu; + SiteScalarGaugeField ScalarUmu; SiteDoubledGaugeField ScalarUds; GaugeLinkField U(Umu._grid); - GaugeField Uadj(Umu._grid); + GaugeField Uadj(Umu._grid); for (int mu = 0; mu < Nd; mu++) { U = PeekIndex(Umu, mu); U = adj(Cshift(U, mu, -1)); @@ -631,6 +632,184 @@ PARALLEL_FOR_LOOP + ///////////////////////////////////////////////////////////////////////////// + // Single flavour one component spinors with colour index. 5d vec + ///////////////////////////////////////////////////////////////////////////// + template + class StaggeredVec5dImpl : public PeriodicGaugeImpl > { + + public: + + typedef RealD _Coeff_t ; + static const int Dimension = Representation::Dimension; + typedef PeriodicGaugeImpl > Gimpl; + + //Necessary? + constexpr bool is_fundamental() const{return Dimension == Nc ? 1 : 0;} + + const bool LsVectorised=true; + + typedef _Coeff_t Coeff_t; + + INHERIT_GIMPL_TYPES(Gimpl); + + template using iImplScalar = iScalar > >; + template using iImplSpinor = iScalar > >; + template using iImplHalfSpinor = iScalar > >; + template using iImplDoubledGaugeField = iVector >, Nds>; + template using iImplGaugeField = iVector >, Nd>; + template using iImplGaugeLink = iScalar > >; + + // Make the doubled gauge field a *scalar* + typedef iImplDoubledGaugeField SiteDoubledGaugeField; // This is a scalar + typedef iImplGaugeField SiteScalarGaugeField; // scalar + typedef iImplGaugeLink SiteScalarGaugeLink; // scalar + typedef Lattice DoubledGaugeField; + + typedef iImplScalar SiteComplex; + typedef iImplSpinor SiteSpinor; + typedef iImplHalfSpinor SiteHalfSpinor; + + + typedef Lattice ComplexField; + typedef Lattice FermionField; + + typedef SimpleCompressor Compressor; + typedef StaggeredImplParams ImplParams; + typedef CartesianStencil StencilImpl; + + ImplParams Params; + + StaggeredVec5dImpl(const ImplParams &p = ImplParams()) : Params(p){}; + + template + inline void loadLinkElement(Simd ®, ref &memory) { + vsplat(reg, memory); + } + + inline void multLink(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U, + const SiteHalfSpinor &chi, int mu) { + SiteGaugeLink UU; + for (int i = 0; i < Dimension; i++) { + for (int j = 0; j < Dimension; j++) { + vsplat(UU()()(i, j), U(mu)()(i, j)); + } + } + mult(&phi(), &UU(), &chi()); + } + inline void multLinkAdd(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U, + const SiteHalfSpinor &chi, int mu) { + SiteGaugeLink UU; + for (int i = 0; i < Dimension; i++) { + for (int j = 0; j < Dimension; j++) { + vsplat(UU()()(i, j), U(mu)()(i, j)); + } + } + mac(&phi(), &UU(), &chi()); + } + + inline void DoubleStore(GridBase *GaugeGrid, + DoubledGaugeField &UUUds, // for Naik term + DoubledGaugeField &Uds, + const GaugeField &Uthin, + const GaugeField &Ufat) + { + + GridBase * InputGrid = Uthin._grid; + conformable(InputGrid,Ufat._grid); + + GaugeLinkField U(InputGrid); + GaugeLinkField UU(InputGrid); + GaugeLinkField UUU(InputGrid); + GaugeLinkField Udag(InputGrid); + GaugeLinkField UUUdag(InputGrid); + + for (int mu = 0; mu < Nd; mu++) { + + // Staggered Phase. + Lattice > coor(InputGrid); + Lattice > x(InputGrid); LatticeCoordinate(x,0); + Lattice > y(InputGrid); LatticeCoordinate(y,1); + Lattice > z(InputGrid); LatticeCoordinate(z,2); + Lattice > t(InputGrid); LatticeCoordinate(t,3); + + Lattice > lin_z(InputGrid); lin_z=x+y; + Lattice > lin_t(InputGrid); lin_t=x+y+z; + + ComplexField phases(InputGrid); phases=1.0; + + if ( mu == 1 ) phases = where( mod(x ,2)==(Integer)0, phases,-phases); + if ( mu == 2 ) phases = where( mod(lin_z,2)==(Integer)0, phases,-phases); + if ( mu == 3 ) phases = where( mod(lin_t,2)==(Integer)0, phases,-phases); + + // 1 hop based on fat links + U = PeekIndex(Ufat, mu); + Udag = adj( Cshift(U, mu, -1)); + + U = U *phases; + Udag = Udag *phases; + + + for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) { + SiteScalarGaugeLink ScalarU; + SiteDoubledGaugeField ScalarUds; + + std::vector lcoor; + GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor); + peekLocalSite(ScalarUds, Uds, lcoor); + + peekLocalSite(ScalarU, U, lcoor); + ScalarUds(mu) = ScalarU(); + + peekLocalSite(ScalarU, Udag, lcoor); + ScalarUds(mu + 4) = ScalarU(); + + pokeLocalSite(ScalarUds, Uds, lcoor); + } + + // 3 hop based on thin links. Crazy huh ? + U = PeekIndex(Uthin, mu); + UU = Gimpl::CovShiftForward(U,mu,U); + UUU= Gimpl::CovShiftForward(U,mu,UU); + + UUUdag = adj( Cshift(UUU, mu, -3)); + + UUU = UUU *phases; + UUUdag = UUUdag *phases; + + for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) { + + SiteScalarGaugeLink ScalarU; + SiteDoubledGaugeField ScalarUds; + + std::vector lcoor; + GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor); + + peekLocalSite(ScalarUds, UUUds, lcoor); + + peekLocalSite(ScalarU, UUU, lcoor); + ScalarUds(mu) = ScalarU(); + + peekLocalSite(ScalarU, UUUdag, lcoor); + ScalarUds(mu + 4) = ScalarU(); + + pokeLocalSite(ScalarUds, UUUds, lcoor); + } + + } + } + + inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A,int mu){ + assert(0); + } + + inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField Ã,int mu){ + assert (0); + } + }; + + + typedef WilsonImpl WilsonImplR; // Real.. whichever prec typedef WilsonImpl WilsonImplF; // Float typedef WilsonImpl WilsonImplD; // Double @@ -663,6 +842,10 @@ PARALLEL_FOR_LOOP typedef StaggeredImpl StaggeredImplF; // Float typedef StaggeredImpl StaggeredImplD; // Double + typedef StaggeredVec5dImpl StaggeredVec5dImplR; // Real.. whichever prec + typedef StaggeredVec5dImpl StaggeredVec5dImplF; // Float + typedef StaggeredVec5dImpl StaggeredVec5dImplD; // Double + }} #endif diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc index 71a6bf06..0455df0d 100644 --- a/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc @@ -69,37 +69,57 @@ ImprovedStaggeredFermion5D::ImprovedStaggeredFermion5D(GaugeField &_Uthin, Lebesgue(_FourDimGrid), LebesgueEvenOdd(_FourDimRedBlackGrid) { + // some assertions assert(FiveDimGrid._ndimension==5); assert(FourDimGrid._ndimension==4); - assert(FiveDimRedBlackGrid._ndimension==5); assert(FourDimRedBlackGrid._ndimension==4); - assert(FiveDimRedBlackGrid._checker_dim==1); - - // Dimension zero of the five-d is the Ls direction + assert(FiveDimRedBlackGrid._ndimension==5); + assert(FiveDimRedBlackGrid._checker_dim==1); // Don't checker the s direction + + // extent of fifth dim and not spread out Ls=FiveDimGrid._fdimensions[0]; assert(FiveDimRedBlackGrid._fdimensions[0]==Ls); - assert(FiveDimRedBlackGrid._processors[0] ==1); - assert(FiveDimRedBlackGrid._simd_layout[0]==1); assert(FiveDimGrid._processors[0] ==1); - assert(FiveDimGrid._simd_layout[0] ==1); - + assert(FiveDimRedBlackGrid._processors[0] ==1); + // Other dimensions must match the decomposition of the four-D fields for(int d=0;d<4;d++){ - assert(FourDimRedBlackGrid._fdimensions[d] ==FourDimGrid._fdimensions[d]); - assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]); - - assert(FourDimRedBlackGrid._processors[d] ==FourDimGrid._processors[d]); - assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]); - - assert(FourDimRedBlackGrid._simd_layout[d] ==FourDimGrid._simd_layout[d]); - assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]); - - assert(FiveDimGrid._fdimensions[d+1] ==FourDimGrid._fdimensions[d]); assert(FiveDimGrid._processors[d+1] ==FourDimGrid._processors[d]); + assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]); + assert(FourDimRedBlackGrid._processors[d] ==FourDimGrid._processors[d]); + + assert(FiveDimGrid._fdimensions[d+1] ==FourDimGrid._fdimensions[d]); + assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]); + assert(FourDimRedBlackGrid._fdimensions[d] ==FourDimGrid._fdimensions[d]); + assert(FiveDimGrid._simd_layout[d+1] ==FourDimGrid._simd_layout[d]); + assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]); + assert(FourDimRedBlackGrid._simd_layout[d] ==FourDimGrid._simd_layout[d]); } + + if (Impl::LsVectorised) { + + int nsimd = Simd::Nsimd(); + // Dimension zero of the five-d is the Ls direction + assert(FiveDimGrid._simd_layout[0] ==nsimd); + assert(FiveDimRedBlackGrid._simd_layout[0]==nsimd); + + for(int d=0;d<4;d++){ + assert(FourDimGrid._simd_layout[d]=1); + assert(FourDimRedBlackGrid._simd_layout[d]=1); + assert(FiveDimRedBlackGrid._simd_layout[d+1]==1); + } + + } else { + + // Dimension zero of the five-d is the Ls direction + assert(FiveDimRedBlackGrid._simd_layout[0]==1); + assert(FiveDimGrid._simd_layout[0] ==1); + + } + // Allocate the required comms buffer ImportGauge(_Uthin,_Ufat); } @@ -112,8 +132,6 @@ void ImprovedStaggeredFermion5D::ImportGauge(const GaugeField &_Uthin) template void ImprovedStaggeredFermion5D::ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat) { - GaugeLinkField U(GaugeGrid()); - //////////////////////////////////////////////////////// // Double Store should take two fields for Naik and one hop separately. //////////////////////////////////////////////////////// @@ -126,7 +144,7 @@ void ImprovedStaggeredFermion5D::ImportGauge(const GaugeField &_Uthin,cons //////////////////////////////////////////////////////// for (int mu = 0; mu < Nd; mu++) { - U = PeekIndex(Umu, mu); + auto U = PeekIndex(Umu, mu); PokeIndex(Umu, U*( 0.5*c1/u0), mu ); U = PeekIndex(Umu, mu+4); @@ -221,7 +239,7 @@ void ImprovedStaggeredFermion5D::DhopInternal(StencilImpl & st, LebesgueOr for (int ss = 0; ss < U._grid->oSites(); ss++) { for(int s=0;s::DhopInternal(StencilImpl & st, LebesgueOr for (int ss = 0; ss < U._grid->oSites(); ss++) { for(int s=0;s::MooeeInvDag(const FermionField &in, } - FermOpStaggeredTemplateInstantiate(ImprovedStaggeredFermion5D); +FermOpStaggeredVec5dTemplateInstantiate(ImprovedStaggeredFermion5D); }} diff --git a/lib/qcd/action/fermion/StaggeredKernels.cc b/lib/qcd/action/fermion/StaggeredKernels.cc index 6608f8de..bb8dee8c 100644 --- a/lib/qcd/action/fermion/StaggeredKernels.cc +++ b/lib/qcd/action/fermion/StaggeredKernels.cc @@ -192,38 +192,51 @@ void StaggeredKernels::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, Dou int oneLink =0; int threeLink=1; switch(Opt) { + case OptInlineAsm: + DhopSiteAsm(st,lo,U,UUU,buf,sF,sU,in,out._odata[sF]); + break; case OptHandUnroll: DhopSiteDepthHand(st,lo,U,buf,sF,sU,in,naive,oneLink); DhopSiteDepthHand(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + out._odata[sF] =-naive-naik; break; case OptGeneric: - default: DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + out._odata[sF] =-naive-naik; + break; + default: + assert(0); break; } - out._odata[sF] =-naive-naik; }; template void StaggeredKernels::DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor *buf, int sF, int sU, const FermionField &in, FermionField &out) { - SiteSpinor naik; - SiteSpinor naive; int oneLink =0; int threeLink=1; + SiteSpinor naik; + SiteSpinor naive; + static int once; switch(Opt) { + case OptInlineAsm: + DhopSiteAsm(st,lo,U,UUU,buf,sF,sU,in,out._odata[sF]); + break; case OptHandUnroll: DhopSiteDepthHand(st,lo,U,buf,sF,sU,in,naive,oneLink); DhopSiteDepthHand(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + out._odata[sF] =naive+naik; break; case OptGeneric: - default: DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + out._odata[sF] =naive+naik; + break; + default: + assert(0); break; } - out._odata[sF] =naive+naik; }; template @@ -238,6 +251,7 @@ void StaggeredKernels::DhopDir( StencilImpl &st, DoubledGaugeField &U, Do } FermOpStaggeredTemplateInstantiate(StaggeredKernels); +FermOpStaggeredVec5dTemplateInstantiate(StaggeredKernels); }} diff --git a/lib/qcd/action/fermion/StaggeredKernels.h b/lib/qcd/action/fermion/StaggeredKernels.h index 5a6cb45c..e4cc8cdd 100644 --- a/lib/qcd/action/fermion/StaggeredKernels.h +++ b/lib/qcd/action/fermion/StaggeredKernels.h @@ -58,6 +58,9 @@ public: void DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf, int sF, int sU, const FermionField &in, SiteSpinor &out,int threeLink); + + void DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU, SiteSpinor * buf, + int sF, int sU, const FermionField &in, SiteSpinor &out); void DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf, int sF, int sU, const FermionField &in, FermionField &out); diff --git a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc new file mode 100644 index 00000000..bb38a6c9 --- /dev/null +++ b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc @@ -0,0 +1,327 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/qcd/action/fermion/StaggerdKernelsHand.cc + + Copyright (C) 2015 + +Author: Peter Boyle +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include +#include +#include + +// Interleave operations from two directions +// This looks just like a 2 spin multiply and reuse same sequence from the Wilson +// Kernel. But the spin index becomes a mu index instead. +#define Chi_00 %zmm0 +#define Chi_01 %zmm1 +#define Chi_02 %zmm2 +#define Chi_10 %zmm3 +#define Chi_11 %zmm4 +#define Chi_12 %zmm5 +#define Chi_20 %zmm6 +#define Chi_21 %zmm7 +#define Chi_22 %zmm8 +#define Chi_30 %zmm9 +#define Chi_31 %zmm10 +#define Chi_32 %zmm11 + +#define UChi_00 %zmm12 +#define UChi_01 %zmm13 +#define UChi_02 %zmm14 +#define UChi_10 %zmm15 +#define UChi_11 %zmm16 +#define UChi_12 %zmm17 +#define UChi_20 %zmm18 +#define UChi_21 %zmm19 +#define UChi_22 %zmm20 +#define UChi_30 %zmm21 +#define UChi_31 %zmm22 +#define UChi_32 %zmm23 + +#define pChi_00 %%zmm0 +#define pChi_01 %%zmm1 +#define pChi_02 %%zmm2 +#define pChi_10 %%zmm3 +#define pChi_11 %%zmm4 +#define pChi_12 %%zmm5 +#define pChi_20 %%zmm6 +#define pChi_21 %%zmm7 +#define pChi_22 %%zmm8 +#define pChi_30 %%zmm9 +#define pChi_31 %%zmm10 +#define pChi_32 %%zmm11 + +#define pUChi_00 %%zmm12 +#define pUChi_01 %%zmm13 +#define pUChi_02 %%zmm14 +#define pUChi_10 %%zmm15 +#define pUChi_11 %%zmm16 +#define pUChi_12 %%zmm17 +#define pUChi_20 %%zmm18 +#define pUChi_21 %%zmm19 +#define pUChi_22 %%zmm20 +#define pUChi_30 %%zmm21 +#define pUChi_31 %%zmm22 +#define pUChi_32 %%zmm23 + +#define T0 %zmm24 +#define T1 %zmm25 +#define T2 %zmm26 +#define T3 %zmm27 + +#define MULT_ADD_LS(g0,g1,g2,g3) \ + asm ( "movq %0, %%r8 \n\t" \ + "movq %1, %%r9 \n\t" \ + "movq %2, %%r10 \n\t" \ + "movq %3, %%r11 \n\t" : : "r"(g0), "r"(g1), "r"(g2), "r"(g3) : "%r8","%r9","%r10","%r11" );\ + asm ( \ + VSHUF(Chi_00,T0) VSHUF(Chi_10,T1) \ + VSHUF(Chi_20,T2) VSHUF(Chi_30,T3) \ + VMADDSUBIDUP(0,%r8,T0,UChi_00) VMADDSUBIDUP(0,%r9,T1,UChi_10) \ + VMADDSUBIDUP(3,%r8,T0,UChi_01) VMADDSUBIDUP(3,%r9,T1,UChi_11) \ + VMADDSUBIDUP(6,%r8,T0,UChi_02) VMADDSUBIDUP(6,%r9,T1,UChi_12) \ + VMADDSUBIDUP(0,%r10,T2,UChi_20) VMADDSUBIDUP(0,%r11,T3,UChi_30) \ + VMADDSUBIDUP(3,%r10,T2,UChi_21) VMADDSUBIDUP(3,%r11,T3,UChi_31) \ + VMADDSUBIDUP(6,%r10,T2,UChi_22) VMADDSUBIDUP(6,%r11,T3,UChi_32) \ + VMADDSUBRDUP(0,%r8,Chi_00,UChi_00) VMADDSUBRDUP(0,%r9,Chi_10,UChi_10) \ + VMADDSUBRDUP(3,%r8,Chi_00,UChi_01) VMADDSUBRDUP(3,%r9,Chi_10,UChi_11) \ + VMADDSUBRDUP(6,%r8,Chi_00,UChi_02) VMADDSUBRDUP(6,%r9,Chi_10,UChi_12) \ + VMADDSUBRDUP(0,%r10,Chi_20,UChi_20) VMADDSUBRDUP(0,%r11,Chi_30,UChi_30) \ + VMADDSUBRDUP(3,%r10,Chi_20,UChi_21) VMADDSUBRDUP(3,%r11,Chi_30,UChi_31) \ + VMADDSUBRDUP(6,%r10,Chi_20,UChi_22) VMADDSUBRDUP(6,%r11,Chi_30,UChi_32) \ + VSHUF(Chi_01,T0) VSHUF(Chi_11,T1) \ + VSHUF(Chi_21,T2) VSHUF(Chi_31,T3) \ + VMADDSUBIDUP(1,%r8,T0,UChi_00) VMADDSUBIDUP(1,%r9,T1,UChi_10) \ + VMADDSUBIDUP(4,%r8,T0,UChi_01) VMADDSUBIDUP(4,%r9,T1,UChi_11) \ + VMADDSUBIDUP(7,%r8,T0,UChi_02) VMADDSUBIDUP(7,%r9,T1,UChi_12) \ + VMADDSUBIDUP(1,%r10,T2,UChi_20) VMADDSUBIDUP(1,%r11,T3,UChi_30) \ + VMADDSUBIDUP(4,%r10,T2,UChi_21) VMADDSUBIDUP(4,%r11,T3,UChi_31) \ + VMADDSUBIDUP(7,%r10,T2,UChi_22) VMADDSUBIDUP(7,%r11,T3,UChi_32) \ + VMADDSUBRDUP(1,%r8,Chi_01,UChi_00) VMADDSUBRDUP(1,%r9,Chi_11,UChi_10) \ + VMADDSUBRDUP(4,%r8,Chi_01,UChi_01) VMADDSUBRDUP(4,%r9,Chi_11,UChi_11) \ + VMADDSUBRDUP(7,%r8,Chi_01,UChi_02) VMADDSUBRDUP(7,%r9,Chi_11,UChi_12) \ + VMADDSUBRDUP(1,%r10,Chi_21,UChi_20) VMADDSUBRDUP(1,%r11,Chi_31,UChi_30) \ + VMADDSUBRDUP(4,%r10,Chi_21,UChi_21) VMADDSUBRDUP(4,%r11,Chi_31,UChi_31) \ + VMADDSUBRDUP(7,%r10,Chi_21,UChi_22) VMADDSUBRDUP(7,%r11,Chi_31,UChi_32) \ + VSHUF(Chi_02,T0) VSHUF(Chi_12,T1) \ + VSHUF(Chi_22,T2) VSHUF(Chi_32,T3) \ + VMADDSUBIDUP(2,%r8,T0,UChi_00) VMADDSUBIDUP(2,%r9,T1,UChi_10) \ + VMADDSUBIDUP(5,%r8,T0,UChi_01) VMADDSUBIDUP(5,%r9,T1,UChi_11) \ + VMADDSUBIDUP(8,%r8,T0,UChi_02) VMADDSUBIDUP(8,%r9,T1,UChi_12) \ + VMADDSUBIDUP(2,%r10,T2,UChi_20) VMADDSUBIDUP(2,%r11,T3,UChi_30) \ + VMADDSUBIDUP(5,%r10,T2,UChi_21) VMADDSUBIDUP(5,%r11,T3,UChi_31) \ + VMADDSUBIDUP(8,%r10,T2,UChi_22) VMADDSUBIDUP(8,%r11,T3,UChi_32) \ + VMADDSUBRDUP(2,%r8,Chi_02,UChi_00) VMADDSUBRDUP(2,%r9,Chi_12,UChi_10) \ + VMADDSUBRDUP(5,%r8,Chi_02,UChi_01) VMADDSUBRDUP(5,%r9,Chi_12,UChi_11) \ + VMADDSUBRDUP(8,%r8,Chi_02,UChi_02) VMADDSUBRDUP(8,%r9,Chi_12,UChi_12) \ + VMADDSUBRDUP(2,%r10,Chi_22,UChi_20) VMADDSUBRDUP(2,%r11,Chi_32,UChi_30) \ + VMADDSUBRDUP(5,%r10,Chi_22,UChi_21) VMADDSUBRDUP(5,%r11,Chi_32,UChi_31) \ + VMADDSUBRDUP(8,%r10,Chi_22,UChi_22) VMADDSUBRDUP(8,%r11,Chi_32,UChi_32) ); + +#define MULT_LS(g0,g1,g2,g3) \ + asm ( "movq %0, %%r8 \n\t" \ + "movq %1, %%r9 \n\t" \ + "movq %2, %%r10 \n\t" \ + "movq %3, %%r11 \n\t" : : "r"(g0), "r"(g1), "r"(g2), "r"(g3) : "%r8","%r9","%r10","%r11" );\ + asm ( \ + VSHUF(Chi_00,T0) VSHUF(Chi_10,T1) \ + VSHUF(Chi_20,T2) VSHUF(Chi_30,T3) \ + VMULIDUP(0,%r8,T0,UChi_00) VMULIDUP(0,%r9,T1,UChi_10) \ + VMULIDUP(3,%r8,T0,UChi_01) VMULIDUP(3,%r9,T1,UChi_11) \ + VMULIDUP(6,%r8,T0,UChi_02) VMULIDUP(6,%r9,T1,UChi_12) \ + VMULIDUP(0,%r10,T2,UChi_20) VMULIDUP(0,%r11,T3,UChi_30) \ + VMULIDUP(3,%r10,T2,UChi_21) VMULIDUP(3,%r11,T3,UChi_31) \ + VMULIDUP(6,%r10,T2,UChi_22) VMULIDUP(6,%r11,T3,UChi_32) \ + VMADDSUBRDUP(0,%r8,Chi_00,UChi_00) VMADDSUBRDUP(0,%r9,Chi_10,UChi_10) \ + VMADDSUBRDUP(3,%r8,Chi_00,UChi_01) VMADDSUBRDUP(3,%r9,Chi_10,UChi_11) \ + VMADDSUBRDUP(6,%r8,Chi_00,UChi_02) VMADDSUBRDUP(6,%r9,Chi_10,UChi_12) \ + VMADDSUBRDUP(0,%r10,Chi_20,UChi_20) VMADDSUBRDUP(0,%r11,Chi_30,UChi_30) \ + VMADDSUBRDUP(3,%r10,Chi_20,UChi_21) VMADDSUBRDUP(3,%r11,Chi_30,UChi_31) \ + VMADDSUBRDUP(6,%r10,Chi_20,UChi_22) VMADDSUBRDUP(6,%r11,Chi_30,UChi_32) \ + VSHUF(Chi_01,T0) VSHUF(Chi_11,T1) \ + VSHUF(Chi_21,T2) VSHUF(Chi_31,T3) \ + VMADDSUBIDUP(1,%r8,T0,UChi_00) VMADDSUBIDUP(1,%r9,T1,UChi_10) \ + VMADDSUBIDUP(4,%r8,T0,UChi_01) VMADDSUBIDUP(4,%r9,T1,UChi_11) \ + VMADDSUBIDUP(7,%r8,T0,UChi_02) VMADDSUBIDUP(7,%r9,T1,UChi_12) \ + VMADDSUBIDUP(1,%r10,T2,UChi_20) VMADDSUBIDUP(1,%r11,T3,UChi_30) \ + VMADDSUBIDUP(4,%r10,T2,UChi_21) VMADDSUBIDUP(4,%r11,T3,UChi_31) \ + VMADDSUBIDUP(7,%r10,T2,UChi_22) VMADDSUBIDUP(7,%r11,T3,UChi_32) \ + VMADDSUBRDUP(1,%r8,Chi_01,UChi_00) VMADDSUBRDUP(1,%r9,Chi_11,UChi_10) \ + VMADDSUBRDUP(4,%r8,Chi_01,UChi_01) VMADDSUBRDUP(4,%r9,Chi_11,UChi_11) \ + VMADDSUBRDUP(7,%r8,Chi_01,UChi_02) VMADDSUBRDUP(7,%r9,Chi_11,UChi_12) \ + VMADDSUBRDUP(1,%r10,Chi_21,UChi_20) VMADDSUBRDUP(1,%r11,Chi_31,UChi_30) \ + VMADDSUBRDUP(4,%r10,Chi_21,UChi_21) VMADDSUBRDUP(4,%r11,Chi_31,UChi_31) \ + VMADDSUBRDUP(7,%r10,Chi_21,UChi_22) VMADDSUBRDUP(7,%r11,Chi_31,UChi_32) \ + VSHUF(Chi_02,T0) VSHUF(Chi_12,T1) \ + VSHUF(Chi_22,T2) VSHUF(Chi_32,T3) \ + VMADDSUBIDUP(2,%r8,T0,UChi_00) VMADDSUBIDUP(2,%r9,T1,UChi_10) \ + VMADDSUBIDUP(5,%r8,T0,UChi_01) VMADDSUBIDUP(5,%r9,T1,UChi_11) \ + VMADDSUBIDUP(8,%r8,T0,UChi_02) VMADDSUBIDUP(8,%r9,T1,UChi_12) \ + VMADDSUBIDUP(2,%r10,T2,UChi_20) VMADDSUBIDUP(2,%r11,T3,UChi_30) \ + VMADDSUBIDUP(5,%r10,T2,UChi_21) VMADDSUBIDUP(5,%r11,T3,UChi_31) \ + VMADDSUBIDUP(8,%r10,T2,UChi_22) VMADDSUBIDUP(8,%r11,T3,UChi_32) \ + VMADDSUBRDUP(2,%r8,Chi_02,UChi_00) VMADDSUBRDUP(2,%r9,Chi_12,UChi_10) \ + VMADDSUBRDUP(5,%r8,Chi_02,UChi_01) VMADDSUBRDUP(5,%r9,Chi_12,UChi_11) \ + VMADDSUBRDUP(8,%r8,Chi_02,UChi_02) VMADDSUBRDUP(8,%r9,Chi_12,UChi_12) \ + VMADDSUBRDUP(2,%r10,Chi_22,UChi_20) VMADDSUBRDUP(2,%r11,Chi_32,UChi_30) \ + VMADDSUBRDUP(5,%r10,Chi_22,UChi_21) VMADDSUBRDUP(5,%r11,Chi_32,UChi_31) \ + VMADDSUBRDUP(8,%r10,Chi_22,UChi_22) VMADDSUBRDUP(8,%r11,Chi_32,UChi_32) ); + +#define LOAD_CHI(a0,a1,a2,a3) \ + asm ( \ + "movq %0, %%r8 \n\t" \ + VLOAD(0,%%r8,pChi_00) \ + VLOAD(1,%%r8,pChi_01) \ + VLOAD(2,%%r8,pChi_02) \ + : : "r" (a0) : "%r8" ); \ + asm ( \ + "movq %0, %%r8 \n\t" \ + VLOAD(0,%%r8,pChi_10) \ + VLOAD(1,%%r8,pChi_11) \ + VLOAD(2,%%r8,pChi_12) \ + : : "r" (a1) : "%r8" ); \ + asm ( \ + "movq %0, %%r8 \n\t" \ + VLOAD(0,%%r8,pChi_20) \ + VLOAD(1,%%r8,pChi_21) \ + VLOAD(2,%%r8,pChi_22) \ + : : "r" (a2) : "%r8" ); \ + asm ( \ + "movq %0, %%r8 \n\t" \ + VLOAD(0,%%r8,pChi_30) \ + VLOAD(1,%%r8,pChi_31) \ + VLOAD(2,%%r8,pChi_32) \ + : : "r" (a3) : "%r8" ); + +#define PF_CHI(a0) \ + asm ( \ + "movq %0, %%r8 \n\t" \ + VPREFETCH1(0,%%r8) \ + VPREFETCH1(1,%%r8) \ + VPREFETCH1(2,%%r8) \ + : : "r" (a0) : "%r8" ); \ + + +#define REDUCE(out) \ + asm ( \ + VADD(UChi_00,UChi_10,UChi_00) \ + VADD(UChi_01,UChi_11,UChi_01) \ + VADD(UChi_02,UChi_12,UChi_02) \ + VADD(UChi_30,UChi_20,UChi_30) \ + VADD(UChi_31,UChi_21,UChi_31) \ + VADD(UChi_32,UChi_22,UChi_32) \ + VADD(UChi_00,UChi_30,UChi_00) \ + VADD(UChi_01,UChi_31,UChi_01) \ + VADD(UChi_02,UChi_32,UChi_02) ); \ + asm ( \ + VSTORE(0,%0,pUChi_00) \ + VSTORE(1,%0,pUChi_01) \ + VSTORE(2,%0,pUChi_02) \ + : : "r" (out) : "memory" ); + +#define PERMUTE_DIR(dir) \ + permute##dir(Chi_0,Chi_0);\ + permute##dir(Chi_1,Chi_1);\ + permute##dir(Chi_2,Chi_2); + +namespace Grid { +namespace QCD { + + // This is the single precision 5th direction vectorised kernel +template +void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, + DoubledGaugeField &U, + DoubledGaugeField &UUU, + SiteSpinor *buf, int sF, + int sU, const FermionField &in, SiteSpinor &out) +{ + uint64_t gauge0,gauge1,gauge2,gauge3; + uint64_t addr0,addr1,addr2,addr3; + + int o0,o1,o2,o3; // offsets + int l0,l1,l2,l3; // local + int p0,p1,p2,p3; // perm + int ptype; + StencilEntry *SE0; + StencilEntry *SE1; + StencilEntry *SE2; + StencilEntry *SE3; + + // Xp, Yp, Zp, Tp +#define PREPARE(X,Y,Z,T,skew,UU) \ + SE0=st.GetEntry(ptype,X+skew,sF); \ + o0 = SE0->_offset; \ + l0 = SE0->_is_local; \ + addr0 = l0 ? (uint64_t) &in._odata[o0] : (uint64_t) &buf[o0]; \ + PF_CHI(addr0); \ + \ + SE1=st.GetEntry(ptype,Y+skew,sF); \ + o1 = SE1->_offset; \ + l1 = SE1->_is_local; \ + addr1 = l1 ? (uint64_t) &in._odata[o1] : (uint64_t) &buf[o1]; \ + PF_CHI(addr1); \ + \ + SE2=st.GetEntry(ptype,Z+skew,sF); \ + o2 = SE2->_offset; \ + l2 = SE2->_is_local; \ + addr2 = l2 ? (uint64_t) &in._odata[o2] : (uint64_t) &buf[o2]; \ + PF_CHI(addr2); \ + \ + SE3=st.GetEntry(ptype,T+skew,sF); \ + o3 = SE3->_offset; \ + l3 = SE3->_is_local; \ + addr3 = l3 ? (uint64_t) &in._odata[o3] : (uint64_t) &buf[o3]; \ + PF_CHI(addr3); \ + \ + gauge0 =(uint64_t)&UU._odata[sU]( X ); \ + gauge1 =(uint64_t)&UU._odata[sU]( Y ); \ + gauge2 =(uint64_t)&UU._odata[sU]( Z ); \ + gauge3 =(uint64_t)&UU._odata[sU]( T ); + + PREPARE(Xp,Yp,Zp,Tp,0,U); + LOAD_CHI(addr0,addr1,addr2,addr3); + MULT_LS(gauge0,gauge1,gauge2,gauge3); + + PREPARE(Xm,Ym,Zm,Tm,0,U); + LOAD_CHI(addr0,addr1,addr2,addr3); + MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3); + + PREPARE(Xp,Yp,Zp,Tp,8,UUU); + LOAD_CHI(addr0,addr1,addr2,addr3); + MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3); + + PREPARE(Xm,Ym,Zm,Tm,8,UUU); + LOAD_CHI(addr0,addr1,addr2,addr3); + MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3); + + addr0 = (uint64_t) &out; + REDUCE(addr0); +} + +FermOpStaggeredTemplateInstantiate(StaggeredKernels); +FermOpStaggeredVec5dTemplateInstantiate(StaggeredKernels); + +}} + diff --git a/lib/qcd/action/fermion/StaggeredKernelsHand.cc b/lib/qcd/action/fermion/StaggeredKernelsHand.cc index 5f9e11e5..06e9d219 100644 --- a/lib/qcd/action/fermion/StaggeredKernelsHand.cc +++ b/lib/qcd/action/fermion/StaggeredKernelsHand.cc @@ -279,5 +279,6 @@ void StaggeredKernels::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &l } FermOpStaggeredTemplateInstantiate(StaggeredKernels); +FermOpStaggeredVec5dTemplateInstantiate(StaggeredKernels); }} diff --git a/lib/qcd/action/fermion/WilsonFermion5D.cc b/lib/qcd/action/fermion/WilsonFermion5D.cc index d2ac96e3..d3a7f941 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.cc +++ b/lib/qcd/action/fermion/WilsonFermion5D.cc @@ -62,71 +62,55 @@ WilsonFermion5D::WilsonFermion5D(GaugeField &_Umu, Lebesgue(_FourDimGrid), LebesgueEvenOdd(_FourDimRedBlackGrid) { + // some assertions + assert(FiveDimGrid._ndimension==5); + assert(FourDimGrid._ndimension==4); + assert(FourDimRedBlackGrid._ndimension==4); + assert(FiveDimRedBlackGrid._ndimension==5); + assert(FiveDimRedBlackGrid._checker_dim==1); // Don't checker the s direction + + // extent of fifth dim and not spread out + Ls=FiveDimGrid._fdimensions[0]; + assert(FiveDimRedBlackGrid._fdimensions[0]==Ls); + assert(FiveDimGrid._processors[0] ==1); + assert(FiveDimRedBlackGrid._processors[0] ==1); + + // Other dimensions must match the decomposition of the four-D fields + for(int d=0;d<4;d++){ + + assert(FiveDimGrid._processors[d+1] ==FourDimGrid._processors[d]); + assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]); + assert(FourDimRedBlackGrid._processors[d] ==FourDimGrid._processors[d]); + + assert(FiveDimGrid._fdimensions[d+1] ==FourDimGrid._fdimensions[d]); + assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]); + assert(FourDimRedBlackGrid._fdimensions[d] ==FourDimGrid._fdimensions[d]); + + assert(FiveDimGrid._simd_layout[d+1] ==FourDimGrid._simd_layout[d]); + assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]); + assert(FourDimRedBlackGrid._simd_layout[d] ==FourDimGrid._simd_layout[d]); + } + if (Impl::LsVectorised) { int nsimd = Simd::Nsimd(); - // some assertions - assert(FiveDimGrid._ndimension==5); - assert(FiveDimRedBlackGrid._ndimension==5); - assert(FiveDimRedBlackGrid._checker_dim==1); // Don't checker the s direction - assert(FourDimGrid._ndimension==4); - // Dimension zero of the five-d is the Ls direction - Ls=FiveDimGrid._fdimensions[0]; - assert(FiveDimGrid._processors[0] ==1); assert(FiveDimGrid._simd_layout[0] ==nsimd); - - assert(FiveDimRedBlackGrid._fdimensions[0]==Ls); - assert(FiveDimRedBlackGrid._processors[0] ==1); assert(FiveDimRedBlackGrid._simd_layout[0]==nsimd); - // Other dimensions must match the decomposition of the four-D fields for(int d=0;d<4;d++){ - assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]); - assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]); - assert(FourDimGrid._simd_layout[d]=1); assert(FourDimRedBlackGrid._simd_layout[d]=1); assert(FiveDimRedBlackGrid._simd_layout[d+1]==1); - - assert(FiveDimGrid._fdimensions[d+1] ==FourDimGrid._fdimensions[d]); - assert(FiveDimGrid._processors[d+1] ==FourDimGrid._processors[d]); - assert(FiveDimGrid._simd_layout[d+1] ==FourDimGrid._simd_layout[d]); } } else { - - // some assertions - assert(FiveDimGrid._ndimension==5); - assert(FourDimGrid._ndimension==4); - assert(FiveDimRedBlackGrid._ndimension==5); - assert(FourDimRedBlackGrid._ndimension==4); - assert(FiveDimRedBlackGrid._checker_dim==1); // Dimension zero of the five-d is the Ls direction - Ls=FiveDimGrid._fdimensions[0]; - assert(FiveDimRedBlackGrid._fdimensions[0]==Ls); - assert(FiveDimRedBlackGrid._processors[0] ==1); assert(FiveDimRedBlackGrid._simd_layout[0]==1); - assert(FiveDimGrid._processors[0] ==1); assert(FiveDimGrid._simd_layout[0] ==1); - - // Other dimensions must match the decomposition of the four-D fields - for(int d=0;d<4;d++){ - assert(FourDimRedBlackGrid._fdimensions[d] ==FourDimGrid._fdimensions[d]); - assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]); - - assert(FourDimRedBlackGrid._processors[d] ==FourDimGrid._processors[d]); - assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]); - - assert(FourDimRedBlackGrid._simd_layout[d] ==FourDimGrid._simd_layout[d]); - assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]); - - assert(FiveDimGrid._fdimensions[d+1] ==FourDimGrid._fdimensions[d]); - assert(FiveDimGrid._processors[d+1] ==FourDimGrid._processors[d]); - assert(FiveDimGrid._simd_layout[d+1] ==FourDimGrid._simd_layout[d]); - } + } // Allocate the required comms buffer diff --git a/tests/core/Test_staggered5D.cc b/tests/core/Test_staggered5D.cc index b467e9b0..be31c438 100644 --- a/tests/core/Test_staggered5D.cc +++ b/tests/core/Test_staggered5D.cc @@ -51,7 +51,6 @@ int main (int argc, char ** argv) int threads = GridThread::GetThreads(); std::cout< U(4,FGrid); + for(int mu=0;mu(Umu5d,mu); - if ( mu!=0 ) U[mu]=zero; - PokeIndex(Umu5d,U[mu],mu); - } - - std::vector Ua(4,UGrid); - for(int mu=0;mu(Umu,mu); - if ( mu!=0 ) { - Ua[mu]=zero; - } - PokeIndex(Umu,Ua[mu],mu); } RealD mass=0.1; @@ -171,6 +164,8 @@ int main (int argc, char ** argv) double flops=(16*(3*(6+8+8)) + 15*3*2)*volume*ncall; // == 66*16 + == 1146 std::cout< + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + + const int Ls=16; + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + std::cout << GridLogMessage << "Making s innermost grids"< seeds({1,2,3,4}); + GridParallelRNG pRNG4(UGrid); + GridParallelRNG pRNG5(FGrid); + pRNG4.SeedFixedIntegers(seeds); + pRNG5.SeedFixedIntegers(seeds); + + typedef typename ImprovedStaggeredFermion5DR::FermionField FermionField; + typedef typename ImprovedStaggeredFermion5DR::ComplexField ComplexField; + typename ImprovedStaggeredFermion5DR::ImplParams params; + + FermionField src (FGrid); + + //random(pRNG5,src); + std::vector site({0,0,0,0,0}); + ColourVector cv = zero; + cv()()(0)=1.0; + src = zero; + pokeSite(cv,src,site); + + FermionField result(FGrid); result=zero; + FermionField tmp(FGrid); tmp=zero; + FermionField err(FGrid); tmp=zero; + FermionField phi (FGrid); random(pRNG5,phi); + FermionField chi (FGrid); random(pRNG5,chi); + + LatticeGaugeField Umu(UGrid); SU3::ColdConfiguration(pRNG4,Umu); + + double volume=Ls; + for(int mu=0;mu Date: Fri, 16 Dec 2016 22:03:29 +0000 Subject: [PATCH 011/101] AVX512 only for ASM compilation --- lib/qcd/action/fermion/StaggeredKernels.cc | 4 ++++ lib/qcd/action/fermion/StaggeredKernelsAsm.cc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/lib/qcd/action/fermion/StaggeredKernels.cc b/lib/qcd/action/fermion/StaggeredKernels.cc index bb8dee8c..06720c64 100644 --- a/lib/qcd/action/fermion/StaggeredKernels.cc +++ b/lib/qcd/action/fermion/StaggeredKernels.cc @@ -192,9 +192,11 @@ void StaggeredKernels::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, Dou int oneLink =0; int threeLink=1; switch(Opt) { +#ifdef AVX512 case OptInlineAsm: DhopSiteAsm(st,lo,U,UUU,buf,sF,sU,in,out._odata[sF]); break; +#endif case OptHandUnroll: DhopSiteDepthHand(st,lo,U,buf,sF,sU,in,naive,oneLink); DhopSiteDepthHand(st,lo,UUU,buf,sF,sU,in,naik,threeLink); @@ -220,9 +222,11 @@ void StaggeredKernels::DhopSite(StencilImpl &st, LebesgueOrder &lo, Double SiteSpinor naive; static int once; switch(Opt) { +#ifdef AVX512 case OptInlineAsm: DhopSiteAsm(st,lo,U,UUU,buf,sF,sU,in,out._odata[sF]); break; +#endif case OptHandUnroll: DhopSiteDepthHand(st,lo,U,buf,sF,sU,in,naive,oneLink); DhopSiteDepthHand(st,lo,UUU,buf,sF,sU,in,naik,threeLink); diff --git a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc index bb38a6c9..ad4bd17d 100644 --- a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc +++ b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc @@ -257,6 +257,7 @@ void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, SiteSpinor *buf, int sF, int sU, const FermionField &in, SiteSpinor &out) { +#ifdef AVX512 uint64_t gauge0,gauge1,gauge2,gauge3; uint64_t addr0,addr1,addr2,addr3; @@ -318,6 +319,9 @@ void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, addr0 = (uint64_t) &out; REDUCE(addr0); +#else + assert(0); +#endif } FermOpStaggeredTemplateInstantiate(StaggeredKernels); From d4071daf2ac4427d839da67f4a5877ea8a11216a Mon Sep 17 00:00:00 2001 From: azusayamaguchi Date: Fri, 16 Dec 2016 22:28:29 +0000 Subject: [PATCH 012/101] Template specialise --- lib/qcd/action/fermion/StaggeredKernelsAsm.cc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc index ad4bd17d..25bde931 100644 --- a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc +++ b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc @@ -249,13 +249,22 @@ Author: paboyle namespace Grid { namespace QCD { - // This is the single precision 5th direction vectorised kernel template void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor *buf, int sF, int sU, const FermionField &in, SiteSpinor &out) +{ + assert(0); + +} + // This is the single precision 5th direction vectorised kernel +template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, + DoubledGaugeField &U, + DoubledGaugeField &UUU, + SiteSpinor *buf, int sF, + int sU, const FermionField &in, SiteSpinor &out) { #ifdef AVX512 uint64_t gauge0,gauge1,gauge2,gauge3; From b3e7f600da93e55dd01797583ae1becb788a967c Mon Sep 17 00:00:00 2001 From: azusayamaguchi Date: Fri, 16 Dec 2016 23:50:30 +0000 Subject: [PATCH 013/101] Partial implementation of 4d vectorisation assembler --- lib/qcd/action/fermion/StaggeredKernelsAsm.cc | 298 +++++++++++++++--- 1 file changed, 261 insertions(+), 37 deletions(-) diff --git a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc index 25bde931..6c213b51 100644 --- a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc +++ b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc @@ -29,7 +29,6 @@ Author: paboyle #include #include #include -#include // Interleave operations from two directions // This looks just like a 2 spin multiply and reuse same sequence from the Wilson @@ -91,6 +90,16 @@ Author: paboyle #define T2 %zmm26 #define T3 %zmm27 +#define Z00 %zmm28 +#define Z10 %zmm29 +#define Z1 %zmm30 +#define Z2 %zmm31 + +#define Z3 Chi_22 +#define Z4 Chi_30 +#define Z5 Chi_31 +#define Z6 Chi_32 + #define MULT_ADD_LS(g0,g1,g2,g3) \ asm ( "movq %0, %%r8 \n\t" \ "movq %1, %%r9 \n\t" \ @@ -189,37 +198,136 @@ Author: paboyle VMADDSUBRDUP(5,%r10,Chi_22,UChi_21) VMADDSUBRDUP(5,%r11,Chi_32,UChi_31) \ VMADDSUBRDUP(8,%r10,Chi_22,UChi_22) VMADDSUBRDUP(8,%r11,Chi_32,UChi_32) ); -#define LOAD_CHI(a0,a1,a2,a3) \ + +#define MULT_ADD_XYZT(g0,g1) \ + asm ( "movq %0, %%r8 \n\t" \ + "movq %1, %%r9 \n\t" : : "r"(g0), "r"(g1) : "%r8","%r9");\ + __asm__ ( \ + VSHUFMEM(0,%r8,Z00) VSHUFMEM(0,%r9,Z10) \ + VRDUP(Chi_00,T1) VIDUP(Chi_00,Chi_00) \ + VRDUP(Chi_10,T2) VIDUP(Chi_10,Chi_10) \ + VMUL(Z00,Chi_00,Z1) VMUL(Z10,Chi_10,Z2) \ + VSHUFMEM(3,%r8,Z00) VSHUFMEM(3,%r9,Z10) \ + VMUL(Z00,Chi_00,Z3) VMUL(Z10,Chi_10,Z4) \ + VSHUFMEM(6,%r8,Z00) VSHUFMEM(6,%r9,Z10) \ + VMUL(Z00,Chi_00,Z5) VMUL(Z10,Chi_10,Z6) \ + VMADDMEM(0,%r8,T1,UChi_00) VMADDMEM(0,%r8,T2,UChi_10) \ + VMADDMEM(3,%r8,T1,UChi_01) VMADDMEM(3,%r8,T2,UChi_11) \ + VMADDMEM(6,%r8,T1,UChi_02) VMADDMEM(6,%r8,T2,UChi_12) \ + VSHUFMEM(1,%r8,Z00) VSHUFMEM(1,%r9,Z10) \ + VRDUP(Chi_01,T1) VIDUP(Chi_01,Chi_01) \ + VRDUP(Chi_11,T2) VIDUP(Chi_11,Chi_11) \ + VMADD(Z00,Chi_01,Z1) VMADD(Z10,Chi_11,Z2) \ + VSHUFMEM(4,%r8,Z00) VSHUFMEM(4,%r9,Z10) \ + VMADD(Z00,Chi_01,Z3) VMADD(Z10,Chi_11,Z4) \ + VSHUFMEM(7,%r8,Z00) VSHUFMEM(7,%r9,Z10) \ + VMADD(Z00,Chi_01,Z5) VMADD(Z10,Chi_11,Z6) \ + VMADDMEM(1,%r8,T1,UChi_00) VMADDMEM(1,%r8,T2,UChi_10) \ + VMADDMEM(4,%r8,T1,UChi_01) VMADDMEM(4,%r8,T2,UChi_11) \ + VMADDMEM(7,%r8,T1,UChi_02) VMADDMEM(7,%r8,T2,UChi_12) \ + VSHUFMEM(2,%r8,Z00) VSHUFMEM(2,%r9,Z10) \ + VRDUP(Chi_02,T1) VIDUP(Chi_02,Chi_02) \ + VRDUP(Chi_12,T2) VIDUP(Chi_12,Chi_12) \ + VMADD(Z00,Chi_02,Z1) VMADD(Z10,Chi_12,Z2) \ + VSHUFMEM(5,%r8,Z00) VSHUFMEM(5,%r9,Z10) \ + VMADD(Z00,Chi_02,Z3) VMADD(Z10,Chi_12,Z4) \ + VSHUFMEM(8,%r8,Z00) VSHUFMEM(8,%r9,Z10) \ + VMADD(Z00,Chi_02,Z5) VMADD(Z10,Chi_12,Z6) \ + VMADDSUBMEM(2,%r8,T1,Z1) VMADDSUBMEM(2,%r8,T2,Z2) \ + VMADDSUBMEM(5,%r8,T1,Z3) VMADDSUBMEM(5,%r8,T2,Z4) \ + VMADDSUBMEM(8,%r8,T1,Z5) VMADDSUBMEM(8,%r8,T2,Z6) \ + VADD(Z1,UChi_00,UChi_00) VADD(Z2,UChi_10,UChi_10) \ + VADD(Z3,UChi_01,UChi_01) VADD(Z4,UChi_11,UChi_11) \ + VADD(Z5,UChi_02,UChi_02) VADD(Z6,UChi_12,UChi_12) ); + + +#define MULT_XYZT(g0,g1) \ + asm ( "movq %0, %%r8 \n\t" \ + "movq %1, %%r9 \n\t" : : "r"(g0), "r"(g1) : "%r8","%r9" ); \ + __asm__ ( \ + VSHUFMEM(0,%r8,Z00) VSHUFMEM(0,%r9,Z10) \ + VRDUP(Chi_00,T1) VIDUP(Chi_00,Chi_00) \ + VRDUP(Chi_10,T2) VIDUP(Chi_10,Chi_10) \ + VMUL(Z00,Chi_00,Z1) VMUL(Z10,Chi_10,Z2) \ + VSHUFMEM(3,%r8,Z00) VSHUFMEM(3,%r9,Z10) \ + VMUL(Z00,Chi_00,Z3) VMUL(Z10,Chi_10,Z4) \ + VSHUFMEM(6,%r8,Z00) VSHUFMEM(6,%r9,Z10) \ + VMUL(Z00,Chi_00,Z5) VMUL(Z10,Chi_10,Z6) \ + VMULMEM(0,%r8,T1,UChi_00) VMULMEM(0,%r8,T2,UChi_10) \ + VMULMEM(3,%r8,T1,UChi_01) VMULMEM(3,%r8,T2,UChi_11) \ + VMULMEM(6,%r8,T1,UChi_02) VMULMEM(6,%r8,T2,UChi_12) \ + VSHUFMEM(1,%r8,Z00) VSHUFMEM(1,%r9,Z10) \ + VRDUP(Chi_01,T1) VIDUP(Chi_01,Chi_01) \ + VRDUP(Chi_11,T2) VIDUP(Chi_11,Chi_11) \ + VMADD(Z00,Chi_01,Z1) VMADD(Z10,Chi_11,Z2) \ + VSHUFMEM(4,%r8,Z00) VSHUFMEM(4,%r9,Z10) \ + VMADD(Z00,Chi_01,Z3) VMADD(Z10,Chi_11,Z4) \ + VSHUFMEM(7,%r8,Z00) VSHUFMEM(7,%r9,Z10) \ + VMADD(Z00,Chi_01,Z5) VMADD(Z10,Chi_11,Z6) \ + VMADDMEM(1,%r8,T1,UChi_00) VMADDMEM(1,%r8,T2,UChi_10) \ + VMADDMEM(4,%r8,T1,UChi_01) VMADDMEM(4,%r8,T2,UChi_11) \ + VMADDMEM(7,%r8,T1,UChi_02) VMADDMEM(7,%r8,T2,UChi_12) \ + VSHUFMEM(2,%r8,Z00) VSHUFMEM(2,%r9,Z10) \ + VRDUP(Chi_02,T1) VIDUP(Chi_02,Chi_02) \ + VRDUP(Chi_12,T2) VIDUP(Chi_12,Chi_12) \ + VMADD(Z00,Chi_02,Z1) VMADD(Z10,Chi_12,Z2) \ + VSHUFMEM(5,%r8,Z00) VSHUFMEM(5,%r9,Z10) \ + VMADD(Z00,Chi_02,Z3) VMADD(Z10,Chi_12,Z4) \ + VSHUFMEM(8,%r8,Z00) VSHUFMEM(8,%r9,Z10) \ + VMADD(Z00,Chi_02,Z5) VMADD(Z10,Chi_12,Z6) \ + VMADDSUBMEM(2,%r8,T1,Z1) VMADDSUBMEM(2,%r8,T2,Z2) \ + VMADDSUBMEM(5,%r8,T1,Z3) VMADDSUBMEM(5,%r8,T2,Z4) \ + VMADDSUBMEM(8,%r8,T1,Z5) VMADDSUBMEM(8,%r8,T2,Z6) \ + VADD(Z1,UChi_00,UChi_00) VADD(Z2,UChi_10,UChi_10) \ + VADD(Z3,UChi_01,UChi_01) VADD(Z4,UChi_11,UChi_11) \ + VADD(Z5,UChi_02,UChi_02) VADD(Z6,UChi_12,UChi_12) ); + + +#define LOAD_CHI(a0,a1,a2,a3) \ asm ( \ "movq %0, %%r8 \n\t" \ - VLOAD(0,%%r8,pChi_00) \ - VLOAD(1,%%r8,pChi_01) \ - VLOAD(2,%%r8,pChi_02) \ + VLOAD(0,%%r8,pChi_00) \ + VLOAD(1,%%r8,pChi_01) \ + VLOAD(2,%%r8,pChi_02) \ : : "r" (a0) : "%r8" ); \ asm ( \ "movq %0, %%r8 \n\t" \ - VLOAD(0,%%r8,pChi_10) \ - VLOAD(1,%%r8,pChi_11) \ - VLOAD(2,%%r8,pChi_12) \ + VLOAD(0,%%r8,pChi_10) \ + VLOAD(1,%%r8,pChi_11) \ + VLOAD(2,%%r8,pChi_12) \ : : "r" (a1) : "%r8" ); \ asm ( \ "movq %0, %%r8 \n\t" \ - VLOAD(0,%%r8,pChi_20) \ - VLOAD(1,%%r8,pChi_21) \ - VLOAD(2,%%r8,pChi_22) \ + VLOAD(0,%%r8,pChi_20) \ + VLOAD(1,%%r8,pChi_21) \ + VLOAD(2,%%r8,pChi_22) \ : : "r" (a2) : "%r8" ); \ asm ( \ "movq %0, %%r8 \n\t" \ - VLOAD(0,%%r8,pChi_30) \ - VLOAD(1,%%r8,pChi_31) \ - VLOAD(2,%%r8,pChi_32) \ + VLOAD(0,%%r8,pChi_30) \ + VLOAD(1,%%r8,pChi_31) \ + VLOAD(2,%%r8,pChi_32) \ : : "r" (a3) : "%r8" ); -#define PF_CHI(a0) \ +#define LOAD_CHIa(a0,a1) \ asm ( \ "movq %0, %%r8 \n\t" \ - VPREFETCH1(0,%%r8) \ - VPREFETCH1(1,%%r8) \ + VLOAD(0,%%r8,pChi_00) \ + VLOAD(1,%%r8,pChi_01) \ + VLOAD(2,%%r8,pChi_02) \ + : : "r" (a0) : "%r8" ); \ + asm ( \ + "movq %0, %%r8 \n\t" \ + VLOAD(0,%%r8,pChi_10) \ + VLOAD(1,%%r8,pChi_11) \ + VLOAD(2,%%r8,pChi_12) \ + : : "r" (a1) : "%r8" ); + +#define PF_CHI(a0) \ + asm ( \ + "movq %0, %%r8 \n\t" \ + VPREFETCH1(0,%%r8) \ + VPREFETCH1(1,%%r8) \ VPREFETCH1(2,%%r8) \ : : "r" (a0) : "%r8" ); \ @@ -235,6 +343,17 @@ Author: paboyle VADD(UChi_00,UChi_30,UChi_00) \ VADD(UChi_01,UChi_31,UChi_01) \ VADD(UChi_02,UChi_32,UChi_02) ); \ + asm ( \ + VSTORE(0,%0,pUChi_00) \ + VSTORE(1,%0,pUChi_01) \ + VSTORE(2,%0,pUChi_02) \ + : : "r" (out) : "memory" ); + +#define REDUCEa(out) \ + asm ( \ + VADD(UChi_00,UChi_10,UChi_00) \ + VADD(UChi_01,UChi_11,UChi_01) \ + VADD(UChi_02,UChi_12,UChi_02) ); \ asm ( \ VSTORE(0,%0,pUChi_00) \ VSTORE(1,%0,pUChi_01) \ @@ -259,27 +378,7 @@ void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, assert(0); } - // This is the single precision 5th direction vectorised kernel -template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, - DoubledGaugeField &U, - DoubledGaugeField &UUU, - SiteSpinor *buf, int sF, - int sU, const FermionField &in, SiteSpinor &out) -{ -#ifdef AVX512 - uint64_t gauge0,gauge1,gauge2,gauge3; - uint64_t addr0,addr1,addr2,addr3; - int o0,o1,o2,o3; // offsets - int l0,l1,l2,l3; // local - int p0,p1,p2,p3; // perm - int ptype; - StencilEntry *SE0; - StencilEntry *SE1; - StencilEntry *SE2; - StencilEntry *SE3; - - // Xp, Yp, Zp, Tp #define PREPARE(X,Y,Z,T,skew,UU) \ SE0=st.GetEntry(ptype,X+skew,sF); \ o0 = SE0->_offset; \ @@ -310,6 +409,29 @@ template <> void StaggeredKernels::DhopSiteAsm(StencilImpl gauge2 =(uint64_t)&UU._odata[sU]( Z ); \ gauge3 =(uint64_t)&UU._odata[sU]( T ); + // This is the single precision 5th direction vectorised kernel +#include +template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, + DoubledGaugeField &U, + DoubledGaugeField &UUU, + SiteSpinor *buf, int sF, + int sU, const FermionField &in, SiteSpinor &out) +{ +#ifdef AVX512 + uint64_t gauge0,gauge1,gauge2,gauge3; + uint64_t addr0,addr1,addr2,addr3; + + int o0,o1,o2,o3; // offsets + int l0,l1,l2,l3; // local + int p0,p1,p2,p3; // perm + int ptype; + StencilEntry *SE0; + StencilEntry *SE1; + StencilEntry *SE2; + StencilEntry *SE3; + + // Xp, Yp, Zp, Tp + PREPARE(Xp,Yp,Zp,Tp,0,U); LOAD_CHI(addr0,addr1,addr2,addr3); MULT_LS(gauge0,gauge1,gauge2,gauge3); @@ -333,6 +455,108 @@ template <> void StaggeredKernels::DhopSiteAsm(StencilImpl #endif } + // This is the single precision 5th direction vectorised kernel +#include +template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, + DoubledGaugeField &U, + DoubledGaugeField &UUU, + SiteSpinor *buf, int sF, + int sU, const FermionField &in, SiteSpinor &out) +{ +#ifdef AVX512 + uint64_t gauge0,gauge1,gauge2,gauge3; + uint64_t addr0,addr1,addr2,addr3; + + int o0,o1,o2,o3; // offsets + int l0,l1,l2,l3; // local + int p0,p1,p2,p3; // perm + int ptype; + StencilEntry *SE0; + StencilEntry *SE1; + StencilEntry *SE2; + StencilEntry *SE3; + + // Xp, Yp, Zp, Tp + + PREPARE(Xp,Yp,Zp,Tp,0,U); + LOAD_CHI(addr0,addr1,addr2,addr3); + MULT_LS(gauge0,gauge1,gauge2,gauge3); + + PREPARE(Xm,Ym,Zm,Tm,0,U); + LOAD_CHI(addr0,addr1,addr2,addr3); + MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3); + + PREPARE(Xp,Yp,Zp,Tp,8,UUU); + LOAD_CHI(addr0,addr1,addr2,addr3); + MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3); + + PREPARE(Xm,Ym,Zm,Tm,8,UUU); + LOAD_CHI(addr0,addr1,addr2,addr3); + MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3); + + addr0 = (uint64_t) &out; + REDUCE(addr0); +#else + assert(0); +#endif +} + + // This is the single precision 5th direction vectorised kernel +#include +template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, + DoubledGaugeField &U, + DoubledGaugeField &UUU, + SiteSpinor *buf, int sF, + int sU, const FermionField &in, SiteSpinor &out) +{ +#ifdef AVX512 + uint64_t gauge0,gauge1,gauge2,gauge3; + uint64_t addr0,addr1,addr2,addr3; + + int o0,o1,o2,o3; // offsets + int l0,l1,l2,l3; // local + int p0,p1,p2,p3; // perm + int ptype; + StencilEntry *SE0; + StencilEntry *SE1; + StencilEntry *SE2; + StencilEntry *SE3; + + // Xp, Yp, Zp, Tp + + PREPARE(Xp,Yp,Zp,Tp,0,U); + LOAD_CHIa(addr0,addr1); + MULT_XYZT(gauge0,gauge1); + LOAD_CHIa(addr2,addr3); + MULT_XYZT(gauge2,gauge3); + + PREPARE(Xm,Ym,Zm,Tm,0,U); + LOAD_CHIa(addr0,addr1); + MULT_ADD_XYZT(gauge0,gauge1); + LOAD_CHIa(addr2,addr3); + MULT_ADD_XYZT(gauge2,gauge3); + + PREPARE(Xp,Yp,Zp,Tp,8,UUU); + LOAD_CHIa(addr0,addr1); + MULT_ADD_XYZT(gauge0,gauge1); + LOAD_CHIa(addr2,addr3); + MULT_ADD_XYZT(gauge2,gauge3); + + PREPARE(Xm,Ym,Zm,Tm,8,UUU); + LOAD_CHIa(addr0,addr1); + MULT_ADD_XYZT(gauge0,gauge1); + LOAD_CHIa(addr2,addr3); + MULT_ADD_XYZT(gauge2,gauge3); + + addr0 = (uint64_t) &out; + REDUCEa(addr0); +#else + assert(0); +#endif +} + + + FermOpStaggeredTemplateInstantiate(StaggeredKernels); FermOpStaggeredVec5dTemplateInstantiate(StaggeredKernels); From df9108154daa740119c22f7a23c3f5230b3a0780 Mon Sep 17 00:00:00 2001 From: azusayamaguchi Date: Sat, 17 Dec 2016 23:47:51 +0000 Subject: [PATCH 014/101] Debugged 2 versions of assembler; ls vectorised, xyzt vectorised --- lib/qcd/action/fermion/StaggeredKernelsAsm.cc | 377 ++++++++++++++++-- tests/core/Test_staggered5Dvec.cc | 27 +- 2 files changed, 361 insertions(+), 43 deletions(-) diff --git a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc index 6c213b51..890cf4e5 100644 --- a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc +++ b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc @@ -90,13 +90,14 @@ Author: paboyle #define T2 %zmm26 #define T3 %zmm27 -#define Z00 %zmm28 -#define Z10 %zmm29 -#define Z1 %zmm30 -#define Z2 %zmm31 +#define Z00 %zmm26 +#define Z10 %zmm27 +#define Z0 Z00 +#define Z1 %zmm28 +#define Z2 %zmm29 -#define Z3 Chi_22 -#define Z4 Chi_30 +#define Z3 %zmm30 +#define Z4 %zmm31 #define Z5 Chi_31 #define Z6 Chi_32 @@ -198,86 +199,269 @@ Author: paboyle VMADDSUBRDUP(5,%r10,Chi_22,UChi_21) VMADDSUBRDUP(5,%r11,Chi_32,UChi_31) \ VMADDSUBRDUP(8,%r10,Chi_22,UChi_22) VMADDSUBRDUP(8,%r11,Chi_32,UChi_32) ); +#define MULT_ADD_XYZTa(g0,g1) \ + asm ( "movq %0, %%r8 \n\t" \ + "movq %1, %%r9 \n\t" : : "r"(g0), "r"(g1) : "%r8","%r9");\ + __asm__ ( \ + VSHUF(Chi_00,T0) \ + VSHUF(Chi_10,T1) \ + VMOVIDUP(0,%r8,Z0 ) \ + VMOVIDUP(3,%r8,Z1 ) \ + VMOVIDUP(6,%r8,Z2 ) \ + VMADDSUB(Z0,T0,UChi_00) \ + VMADDSUB(Z1,T0,UChi_01) \ + VMADDSUB(Z2,T0,UChi_02) \ + \ + VMOVIDUP(0,%r9,Z0 ) \ + VMOVIDUP(3,%r9,Z1 ) \ + VMOVIDUP(6,%r9,Z2 ) \ + VMADDSUB(Z0,T1,UChi_10) \ + VMADDSUB(Z1,T1,UChi_11) \ + VMADDSUB(Z2,T1,UChi_12) \ + \ + \ + VMOVRDUP(0,%r8,Z3 ) \ + VMOVRDUP(3,%r8,Z4 ) \ + VMOVRDUP(6,%r8,Z5 ) \ + VMADDSUB(Z3,Chi_00,UChi_00)/*rr * ir = ri rr*/ \ + VMADDSUB(Z4,Chi_00,UChi_01) \ + VMADDSUB(Z5,Chi_00,UChi_02) \ + \ + VMOVRDUP(0,%r9,Z3 ) \ + VMOVRDUP(3,%r9,Z4 ) \ + VMOVRDUP(6,%r9,Z5 ) \ + VMADDSUB(Z3,Chi_10,UChi_10) \ + VMADDSUB(Z4,Chi_10,UChi_11)\ + VMADDSUB(Z5,Chi_10,UChi_12) \ + \ + \ + VMOVIDUP(1,%r8,Z0 ) \ + VMOVIDUP(4,%r8,Z1 ) \ + VMOVIDUP(7,%r8,Z2 ) \ + VSHUF(Chi_01,T0) \ + VMADDSUB(Z0,T0,UChi_00) \ + VMADDSUB(Z1,T0,UChi_01) \ + VMADDSUB(Z2,T0,UChi_02) \ + \ + VMOVIDUP(1,%r9,Z0 ) \ + VMOVIDUP(4,%r9,Z1 ) \ + VMOVIDUP(7,%r9,Z2 ) \ + VSHUF(Chi_11,T1) \ + VMADDSUB(Z0,T1,UChi_10) \ + VMADDSUB(Z1,T1,UChi_11) \ + VMADDSUB(Z2,T1,UChi_12) \ + \ + VMOVRDUP(1,%r8,Z3 ) \ + VMOVRDUP(4,%r8,Z4 ) \ + VMOVRDUP(7,%r8,Z5 ) \ + VMADDSUB(Z3,Chi_01,UChi_00) \ + VMADDSUB(Z4,Chi_01,UChi_01) \ + VMADDSUB(Z5,Chi_01,UChi_02) \ + \ + VMOVRDUP(1,%r9,Z3 ) \ + VMOVRDUP(4,%r9,Z4 ) \ + VMOVRDUP(7,%r9,Z5 ) \ + VMADDSUB(Z3,Chi_11,UChi_10) \ + VMADDSUB(Z4,Chi_11,UChi_11) \ + VMADDSUB(Z5,Chi_11,UChi_12) \ + \ + VSHUF(Chi_02,T0) \ + VSHUF(Chi_12,T1) \ + VMOVIDUP(2,%r8,Z0 ) \ + VMOVIDUP(5,%r8,Z1 ) \ + VMOVIDUP(8,%r8,Z2 ) \ + VMADDSUB(Z0,T0,UChi_00) \ + VMADDSUB(Z1,T0,UChi_01) \ + VMADDSUB(Z2,T0,UChi_02) \ + VMOVIDUP(2,%r9,Z0 ) \ + VMOVIDUP(5,%r9,Z1 ) \ + VMOVIDUP(8,%r9,Z2 ) \ + VMADDSUB(Z0,T1,UChi_10) \ + VMADDSUB(Z1,T1,UChi_11) \ + VMADDSUB(Z2,T1,UChi_12) \ + /*55*/ \ + VMOVRDUP(2,%r8,Z3 ) \ + VMOVRDUP(5,%r8,Z4 ) \ + VMOVRDUP(8,%r8,Z5 ) \ + VMADDSUB(Z3,Chi_02,UChi_00) \ + VMADDSUB(Z4,Chi_02,UChi_01) \ + VMADDSUB(Z5,Chi_02,UChi_02) \ + VMOVRDUP(2,%r9,Z3 ) \ + VMOVRDUP(5,%r9,Z4 ) \ + VMOVRDUP(8,%r9,Z5 ) \ + VMADDSUB(Z3,Chi_12,UChi_10) \ + VMADDSUB(Z4,Chi_12,UChi_11) \ + VMADDSUB(Z5,Chi_12,UChi_12) \ + /*61 insns*/ ); #define MULT_ADD_XYZT(g0,g1) \ asm ( "movq %0, %%r8 \n\t" \ "movq %1, %%r9 \n\t" : : "r"(g0), "r"(g1) : "%r8","%r9");\ __asm__ ( \ - VSHUFMEM(0,%r8,Z00) VSHUFMEM(0,%r9,Z10) \ - VRDUP(Chi_00,T1) VIDUP(Chi_00,Chi_00) \ - VRDUP(Chi_10,T2) VIDUP(Chi_10,Chi_10) \ + VSHUFMEM(0,%r8,Z00) VSHUFMEM(0,%r9,Z10) \ + VRDUP(Chi_00,T0) VIDUP(Chi_00,Chi_00) \ + VRDUP(Chi_10,T1) VIDUP(Chi_10,Chi_10) \ VMUL(Z00,Chi_00,Z1) VMUL(Z10,Chi_10,Z2) \ - VSHUFMEM(3,%r8,Z00) VSHUFMEM(3,%r9,Z10) \ + VSHUFMEM(3,%r8,Z00) VSHUFMEM(3,%r9,Z10) \ VMUL(Z00,Chi_00,Z3) VMUL(Z10,Chi_10,Z4) \ VSHUFMEM(6,%r8,Z00) VSHUFMEM(6,%r9,Z10) \ VMUL(Z00,Chi_00,Z5) VMUL(Z10,Chi_10,Z6) \ - VMADDMEM(0,%r8,T1,UChi_00) VMADDMEM(0,%r8,T2,UChi_10) \ - VMADDMEM(3,%r8,T1,UChi_01) VMADDMEM(3,%r8,T2,UChi_11) \ - VMADDMEM(6,%r8,T1,UChi_02) VMADDMEM(6,%r8,T2,UChi_12) \ + VMADDMEM(0,%r8,T0,UChi_00) VMADDMEM(0,%r9,T1,UChi_10) \ + VMADDMEM(3,%r8,T0,UChi_01) VMADDMEM(3,%r9,T1,UChi_11) \ + VMADDMEM(6,%r8,T0,UChi_02) VMADDMEM(6,%r9,T1,UChi_12) \ VSHUFMEM(1,%r8,Z00) VSHUFMEM(1,%r9,Z10) \ - VRDUP(Chi_01,T1) VIDUP(Chi_01,Chi_01) \ - VRDUP(Chi_11,T2) VIDUP(Chi_11,Chi_11) \ + VRDUP(Chi_01,T0) VIDUP(Chi_01,Chi_01) \ + VRDUP(Chi_11,T1) VIDUP(Chi_11,Chi_11) \ VMADD(Z00,Chi_01,Z1) VMADD(Z10,Chi_11,Z2) \ VSHUFMEM(4,%r8,Z00) VSHUFMEM(4,%r9,Z10) \ VMADD(Z00,Chi_01,Z3) VMADD(Z10,Chi_11,Z4) \ VSHUFMEM(7,%r8,Z00) VSHUFMEM(7,%r9,Z10) \ VMADD(Z00,Chi_01,Z5) VMADD(Z10,Chi_11,Z6) \ - VMADDMEM(1,%r8,T1,UChi_00) VMADDMEM(1,%r8,T2,UChi_10) \ - VMADDMEM(4,%r8,T1,UChi_01) VMADDMEM(4,%r8,T2,UChi_11) \ - VMADDMEM(7,%r8,T1,UChi_02) VMADDMEM(7,%r8,T2,UChi_12) \ + VMADDMEM(1,%r8,T0,UChi_00) VMADDMEM(1,%r9,T1,UChi_10) \ + VMADDMEM(4,%r8,T0,UChi_01) VMADDMEM(4,%r9,T1,UChi_11) \ + VMADDMEM(7,%r8,T0,UChi_02) VMADDMEM(7,%r9,T1,UChi_12) \ VSHUFMEM(2,%r8,Z00) VSHUFMEM(2,%r9,Z10) \ - VRDUP(Chi_02,T1) VIDUP(Chi_02,Chi_02) \ - VRDUP(Chi_12,T2) VIDUP(Chi_12,Chi_12) \ + VRDUP(Chi_02,T0) VIDUP(Chi_02,Chi_02) \ + VRDUP(Chi_12,T1) VIDUP(Chi_12,Chi_12) \ VMADD(Z00,Chi_02,Z1) VMADD(Z10,Chi_12,Z2) \ VSHUFMEM(5,%r8,Z00) VSHUFMEM(5,%r9,Z10) \ VMADD(Z00,Chi_02,Z3) VMADD(Z10,Chi_12,Z4) \ VSHUFMEM(8,%r8,Z00) VSHUFMEM(8,%r9,Z10) \ VMADD(Z00,Chi_02,Z5) VMADD(Z10,Chi_12,Z6) \ - VMADDSUBMEM(2,%r8,T1,Z1) VMADDSUBMEM(2,%r8,T2,Z2) \ - VMADDSUBMEM(5,%r8,T1,Z3) VMADDSUBMEM(5,%r8,T2,Z4) \ - VMADDSUBMEM(8,%r8,T1,Z5) VMADDSUBMEM(8,%r8,T2,Z6) \ + VMADDSUBMEM(2,%r8,T0,Z1) VMADDSUBMEM(2,%r9,T1,Z2) \ + VMADDSUBMEM(5,%r8,T0,Z3) VMADDSUBMEM(5,%r9,T1,Z4) \ + VMADDSUBMEM(8,%r8,T0,Z5) VMADDSUBMEM(8,%r9,T1,Z6) \ VADD(Z1,UChi_00,UChi_00) VADD(Z2,UChi_10,UChi_10) \ VADD(Z3,UChi_01,UChi_01) VADD(Z4,UChi_11,UChi_11) \ VADD(Z5,UChi_02,UChi_02) VADD(Z6,UChi_12,UChi_12) ); - #define MULT_XYZT(g0,g1) \ + asm ( "movq %0, %%r8 \n\t" \ + "movq %1, %%r9 \n\t" : : "r"(g0), "r"(g1) : "%r8","%r9" ); \ + __asm__ ( \ + VSHUF(Chi_00,T0) \ + VSHUF(Chi_10,T1) \ + VMOVIDUP(0,%r8,Z0 ) \ + VMOVIDUP(3,%r8,Z1 ) \ + VMOVIDUP(6,%r8,Z2 ) \ + /*6*/ \ + VMUL(Z0,T0,UChi_00) \ + VMUL(Z1,T0,UChi_01) \ + VMUL(Z2,T0,UChi_02) \ + VMOVIDUP(0,%r9,Z0 ) \ + VMOVIDUP(3,%r9,Z1 ) \ + VMOVIDUP(6,%r9,Z2 ) \ + VMUL(Z0,T1,UChi_10) \ + VMUL(Z1,T1,UChi_11) \ + VMUL(Z2,T1,UChi_12) \ + VMOVRDUP(0,%r8,Z3 ) \ + VMOVRDUP(3,%r8,Z4 ) \ + VMOVRDUP(6,%r8,Z5 ) \ + /*18*/ \ + VMADDSUB(Z3,Chi_00,UChi_00) \ + VMADDSUB(Z4,Chi_00,UChi_01)\ + VMADDSUB(Z5,Chi_00,UChi_02) \ + VMOVRDUP(0,%r9,Z3 ) \ + VMOVRDUP(3,%r9,Z4 ) \ + VMOVRDUP(6,%r9,Z5 ) \ + VMADDSUB(Z3,Chi_10,UChi_10) \ + VMADDSUB(Z4,Chi_10,UChi_11)\ + VMADDSUB(Z5,Chi_10,UChi_12) \ + VMOVIDUP(1,%r8,Z0 ) \ + VMOVIDUP(4,%r8,Z1 ) \ + VMOVIDUP(7,%r8,Z2 ) \ + /*28*/ \ + VSHUF(Chi_01,T0) \ + VMADDSUB(Z0,T0,UChi_00) \ + VMADDSUB(Z1,T0,UChi_01) \ + VMADDSUB(Z2,T0,UChi_02) \ + VMOVIDUP(1,%r9,Z0 ) \ + VMOVIDUP(4,%r9,Z1 ) \ + VMOVIDUP(7,%r9,Z2 ) \ + VSHUF(Chi_11,T1) \ + VMADDSUB(Z0,T1,UChi_10) \ + VMADDSUB(Z1,T1,UChi_11) \ + VMADDSUB(Z2,T1,UChi_12) \ + VMOVRDUP(1,%r8,Z3 ) \ + VMOVRDUP(4,%r8,Z4 ) \ + VMOVRDUP(7,%r8,Z5 ) \ + /*38*/ \ + VMADDSUB(Z3,Chi_01,UChi_00) \ + VMADDSUB(Z4,Chi_01,UChi_01) \ + VMADDSUB(Z5,Chi_01,UChi_02) \ + VMOVRDUP(1,%r9,Z3 ) \ + VMOVRDUP(4,%r9,Z4 ) \ + VMOVRDUP(7,%r9,Z5 ) \ + VMADDSUB(Z3,Chi_11,UChi_10) \ + VMADDSUB(Z4,Chi_11,UChi_11) \ + VMADDSUB(Z5,Chi_11,UChi_12) \ + /*48*/ \ + VSHUF(Chi_02,T0) \ + VSHUF(Chi_12,T1) \ + VMOVIDUP(2,%r8,Z0 ) \ + VMOVIDUP(5,%r8,Z1 ) \ + VMOVIDUP(8,%r8,Z2 ) \ + VMADDSUB(Z0,T0,UChi_00) \ + VMADDSUB(Z1,T0,UChi_01) \ + VMADDSUB(Z2,T0,UChi_02) \ + VMOVIDUP(2,%r9,Z0 ) \ + VMOVIDUP(5,%r9,Z1 ) \ + VMOVIDUP(8,%r9,Z2 ) \ + VMADDSUB(Z0,T1,UChi_10) \ + VMADDSUB(Z1,T1,UChi_11) \ + VMADDSUB(Z2,T1,UChi_12) \ + /*55*/ \ + VMOVRDUP(2,%r8,Z3 ) \ + VMOVRDUP(5,%r8,Z4 ) \ + VMOVRDUP(8,%r8,Z5 ) \ + VMADDSUB(Z3,Chi_02,UChi_00) \ + VMADDSUB(Z4,Chi_02,UChi_01) \ + VMADDSUB(Z5,Chi_02,UChi_02) \ + VMOVRDUP(2,%r9,Z3 ) \ + VMOVRDUP(5,%r9,Z4 ) \ + VMOVRDUP(8,%r9,Z5 ) \ + VMADDSUB(Z3,Chi_12,UChi_10) \ + VMADDSUB(Z4,Chi_12,UChi_11) \ + VMADDSUB(Z5,Chi_12,UChi_12) \ + /*61 insns*/ ); + +#define MULT_XYZTa(g0,g1) \ asm ( "movq %0, %%r8 \n\t" \ "movq %1, %%r9 \n\t" : : "r"(g0), "r"(g1) : "%r8","%r9" ); \ __asm__ ( \ VSHUFMEM(0,%r8,Z00) VSHUFMEM(0,%r9,Z10) \ - VRDUP(Chi_00,T1) VIDUP(Chi_00,Chi_00) \ - VRDUP(Chi_10,T2) VIDUP(Chi_10,Chi_10) \ + VRDUP(Chi_00,T0) VIDUP(Chi_00,Chi_00) \ + VRDUP(Chi_10,T1) VIDUP(Chi_10,Chi_10) \ VMUL(Z00,Chi_00,Z1) VMUL(Z10,Chi_10,Z2) \ VSHUFMEM(3,%r8,Z00) VSHUFMEM(3,%r9,Z10) \ VMUL(Z00,Chi_00,Z3) VMUL(Z10,Chi_10,Z4) \ VSHUFMEM(6,%r8,Z00) VSHUFMEM(6,%r9,Z10) \ VMUL(Z00,Chi_00,Z5) VMUL(Z10,Chi_10,Z6) \ - VMULMEM(0,%r8,T1,UChi_00) VMULMEM(0,%r8,T2,UChi_10) \ - VMULMEM(3,%r8,T1,UChi_01) VMULMEM(3,%r8,T2,UChi_11) \ - VMULMEM(6,%r8,T1,UChi_02) VMULMEM(6,%r8,T2,UChi_12) \ + VMULMEM(0,%r8,T0,UChi_00) VMULMEM(0,%r9,T1,UChi_10) \ + VMULMEM(3,%r8,T0,UChi_01) VMULMEM(3,%r9,T1,UChi_11) \ + VMULMEM(6,%r8,T0,UChi_02) VMULMEM(6,%r9,T1,UChi_12) \ VSHUFMEM(1,%r8,Z00) VSHUFMEM(1,%r9,Z10) \ - VRDUP(Chi_01,T1) VIDUP(Chi_01,Chi_01) \ - VRDUP(Chi_11,T2) VIDUP(Chi_11,Chi_11) \ + VRDUP(Chi_01,T0) VIDUP(Chi_01,Chi_01) \ + VRDUP(Chi_11,T1) VIDUP(Chi_11,Chi_11) \ VMADD(Z00,Chi_01,Z1) VMADD(Z10,Chi_11,Z2) \ VSHUFMEM(4,%r8,Z00) VSHUFMEM(4,%r9,Z10) \ VMADD(Z00,Chi_01,Z3) VMADD(Z10,Chi_11,Z4) \ VSHUFMEM(7,%r8,Z00) VSHUFMEM(7,%r9,Z10) \ VMADD(Z00,Chi_01,Z5) VMADD(Z10,Chi_11,Z6) \ - VMADDMEM(1,%r8,T1,UChi_00) VMADDMEM(1,%r8,T2,UChi_10) \ - VMADDMEM(4,%r8,T1,UChi_01) VMADDMEM(4,%r8,T2,UChi_11) \ - VMADDMEM(7,%r8,T1,UChi_02) VMADDMEM(7,%r8,T2,UChi_12) \ + VMADDMEM(1,%r8,T0,UChi_00) VMADDMEM(1,%r9,T1,UChi_10) \ + VMADDMEM(4,%r8,T0,UChi_01) VMADDMEM(4,%r9,T1,UChi_11) \ + VMADDMEM(7,%r8,T0,UChi_02) VMADDMEM(7,%r9,T1,UChi_12) \ VSHUFMEM(2,%r8,Z00) VSHUFMEM(2,%r9,Z10) \ - VRDUP(Chi_02,T1) VIDUP(Chi_02,Chi_02) \ - VRDUP(Chi_12,T2) VIDUP(Chi_12,Chi_12) \ + VRDUP(Chi_02,T0) VIDUP(Chi_02,Chi_02) \ + VRDUP(Chi_12,T1) VIDUP(Chi_12,Chi_12) \ VMADD(Z00,Chi_02,Z1) VMADD(Z10,Chi_12,Z2) \ VSHUFMEM(5,%r8,Z00) VSHUFMEM(5,%r9,Z10) \ VMADD(Z00,Chi_02,Z3) VMADD(Z10,Chi_12,Z4) \ VSHUFMEM(8,%r8,Z00) VSHUFMEM(8,%r9,Z10) \ VMADD(Z00,Chi_02,Z5) VMADD(Z10,Chi_12,Z6) \ - VMADDSUBMEM(2,%r8,T1,Z1) VMADDSUBMEM(2,%r8,T2,Z2) \ - VMADDSUBMEM(5,%r8,T1,Z3) VMADDSUBMEM(5,%r8,T2,Z4) \ - VMADDSUBMEM(8,%r8,T1,Z5) VMADDSUBMEM(8,%r8,T2,Z6) \ + VMADDSUBMEM(2,%r8,T0,Z1) VMADDSUBMEM(2,%r9,T1,Z2) \ + VMADDSUBMEM(5,%r8,T0,Z3) VMADDSUBMEM(5,%r9,T1,Z4) \ + VMADDSUBMEM(8,%r8,T0,Z5) VMADDSUBMEM(8,%r9,T1,Z6) \ VADD(Z1,UChi_00,UChi_00) VADD(Z2,UChi_10,UChi_10) \ VADD(Z3,UChi_01,UChi_01) VADD(Z4,UChi_11,UChi_11) \ VADD(Z5,UChi_02,UChi_02) VADD(Z6,UChi_12,UChi_12) ); @@ -383,24 +567,28 @@ void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, SE0=st.GetEntry(ptype,X+skew,sF); \ o0 = SE0->_offset; \ l0 = SE0->_is_local; \ + p0 = SE0->_permute; \ addr0 = l0 ? (uint64_t) &in._odata[o0] : (uint64_t) &buf[o0]; \ PF_CHI(addr0); \ \ SE1=st.GetEntry(ptype,Y+skew,sF); \ o1 = SE1->_offset; \ l1 = SE1->_is_local; \ + p1 = SE1->_permute; \ addr1 = l1 ? (uint64_t) &in._odata[o1] : (uint64_t) &buf[o1]; \ PF_CHI(addr1); \ \ SE2=st.GetEntry(ptype,Z+skew,sF); \ o2 = SE2->_offset; \ l2 = SE2->_is_local; \ + p2 = SE2->_permute; \ addr2 = l2 ? (uint64_t) &in._odata[o2] : (uint64_t) &buf[o2]; \ PF_CHI(addr2); \ \ SE3=st.GetEntry(ptype,T+skew,sF); \ o3 = SE3->_offset; \ l3 = SE3->_is_local; \ + p3 = SE3->_permute; \ addr3 = l3 ? (uint64_t) &in._odata[o3] : (uint64_t) &buf[o3]; \ PF_CHI(addr3); \ \ @@ -501,6 +689,27 @@ template <> void StaggeredKernels::DhopSiteAsm(StencilImpl #endif } + +#define PERMUTE_DIR3 __asm__ ( \ + VPERM3(Chi_00,Chi_00) \ + VPERM3(Chi_01,Chi_01) \ + VPERM3(Chi_02,Chi_02) ); + +#define PERMUTE_DIR2 __asm__ ( \ + VPERM2(Chi_10,Chi_10) \ + VPERM2(Chi_11,Chi_11) \ + VPERM2(Chi_12,Chi_12) ); + +#define PERMUTE_DIR1 __asm__ ( \ + VPERM1(Chi_00,Chi_00) \ + VPERM1(Chi_01,Chi_01) \ + VPERM1(Chi_02,Chi_02) ); + +#define PERMUTE_DIR0 __asm__ ( \ + VPERM0(Chi_10,Chi_10) \ + VPERM0(Chi_11,Chi_11) \ + VPERM0(Chi_12,Chi_12) ); + // This is the single precision 5th direction vectorised kernel #include template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, @@ -523,29 +732,115 @@ template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, StencilEntry *SE3; // Xp, Yp, Zp, Tp - PREPARE(Xp,Yp,Zp,Tp,0,U); LOAD_CHIa(addr0,addr1); + if (l0&&p0) { PERMUTE_DIR3; } + if (l1&&p1) { PERMUTE_DIR2; } MULT_XYZT(gauge0,gauge1); LOAD_CHIa(addr2,addr3); - MULT_XYZT(gauge2,gauge3); + if (l2&&p2) { PERMUTE_DIR1; } + if (l3&&p3) { PERMUTE_DIR0; } + MULT_ADD_XYZT(gauge2,gauge3); PREPARE(Xm,Ym,Zm,Tm,0,U); LOAD_CHIa(addr0,addr1); + if (l0&&p0) { PERMUTE_DIR3; } + if (l1&&p1) { PERMUTE_DIR2; } MULT_ADD_XYZT(gauge0,gauge1); LOAD_CHIa(addr2,addr3); + if (l2&&p2) { PERMUTE_DIR1; } + if (l3&&p3) { PERMUTE_DIR0; } MULT_ADD_XYZT(gauge2,gauge3); PREPARE(Xp,Yp,Zp,Tp,8,UUU); LOAD_CHIa(addr0,addr1); + if (l0&&p0) { PERMUTE_DIR3; } + if (l1&&p1) { PERMUTE_DIR2; } MULT_ADD_XYZT(gauge0,gauge1); LOAD_CHIa(addr2,addr3); + if (l2&&p2) { PERMUTE_DIR1; } + if (l3&&p3) { PERMUTE_DIR0; } MULT_ADD_XYZT(gauge2,gauge3); PREPARE(Xm,Ym,Zm,Tm,8,UUU); LOAD_CHIa(addr0,addr1); + if (l0&&p0) { PERMUTE_DIR3; } + if (l1&&p1) { PERMUTE_DIR2; } MULT_ADD_XYZT(gauge0,gauge1); LOAD_CHIa(addr2,addr3); + if (l2&&p2) { PERMUTE_DIR1; } + if (l3&&p3) { PERMUTE_DIR0; } + MULT_ADD_XYZT(gauge2,gauge3); + + addr0 = (uint64_t) &out; + REDUCEa(addr0); +#else + assert(0); +#endif +} + + + // This is the single precision 5th direction vectorised kernel +#include +template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, + DoubledGaugeField &U, + DoubledGaugeField &UUU, + SiteSpinor *buf, int sF, + int sU, const FermionField &in, SiteSpinor &out) +{ +#ifdef AVX512 + uint64_t gauge0,gauge1,gauge2,gauge3; + uint64_t addr0,addr1,addr2,addr3; + + int o0,o1,o2,o3; // offsets + int l0,l1,l2,l3; // local + int p0,p1,p2,p3; // perm + int ptype; + StencilEntry *SE0; + StencilEntry *SE1; + StencilEntry *SE2; + StencilEntry *SE3; + + // Xp, Yp, Zp, Tp + + PREPARE(Xp,Yp,Zp,Tp,0,U); + LOAD_CHIa(addr0,addr1); + if (p0) { PERMUTE_DIR3; } + if (p1) { PERMUTE_DIR2; } + MULT_XYZT(gauge0,gauge1); + LOAD_CHIa(addr2,addr3); + if (p2) { PERMUTE_DIR1; } + if (p3) { PERMUTE_DIR0; } + MULT_ADD_XYZT(gauge2,gauge3); + + PREPARE(Xm,Ym,Zm,Tm,0,U); + LOAD_CHIa(addr0,addr1); + if (p0) { PERMUTE_DIR3; } + if (p1) { PERMUTE_DIR2; } + MULT_ADD_XYZT(gauge0,gauge1); + LOAD_CHIa(addr2,addr3); + if (p2) { PERMUTE_DIR1; } + if (p3) { PERMUTE_DIR0; } + MULT_ADD_XYZT(gauge2,gauge3); + + PREPARE(Xp,Yp,Zp,Tp,8,UUU); + LOAD_CHIa(addr0,addr1); + if (p0) { PERMUTE_DIR3; } + if (p1) { PERMUTE_DIR2; } + MULT_ADD_XYZT(gauge0,gauge1); + LOAD_CHIa(addr2,addr3); + if (p2) { PERMUTE_DIR1; } + if (p3) { PERMUTE_DIR0; } + MULT_ADD_XYZT(gauge2,gauge3); + + PREPARE(Xm,Ym,Zm,Tm,8,UUU); + LOAD_CHIa(addr0,addr1); + if (p0) { PERMUTE_DIR3; } + if (p1) { PERMUTE_DIR2; } + MULT_ADD_XYZT(gauge0,gauge1); + LOAD_CHIa(addr2,addr3); + if (p2) { PERMUTE_DIR1; } + if (p3) { PERMUTE_DIR0; } MULT_ADD_XYZT(gauge2,gauge3); addr0 = (uint64_t) &out; diff --git a/tests/core/Test_staggered5Dvec.cc b/tests/core/Test_staggered5Dvec.cc index be08c8b4..f3da3a1c 100644 --- a/tests/core/Test_staggered5Dvec.cc +++ b/tests/core/Test_staggered5Dvec.cc @@ -68,12 +68,14 @@ int main (int argc, char ** argv) FermionField src (FGrid); - //random(pRNG5,src); + random(pRNG5,src); + /* std::vector site({0,0,0,0,0}); ColourVector cv = zero; cv()()(0)=1.0; src = zero; pokeSite(cv,src,site); + */ FermionField result(FGrid); result=zero; FermionField tmp(FGrid); tmp=zero; @@ -81,8 +83,15 @@ int main (int argc, char ** argv) FermionField phi (FGrid); random(pRNG5,phi); FermionField chi (FGrid); random(pRNG5,chi); - LatticeGaugeField Umu(UGrid); SU3::ColdConfiguration(pRNG4,Umu); + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG4,Umu); + /* + for(int mu=1;mu<4;mu++){ + auto tmp = PeekIndex(Umu,mu); + tmp = zero; + PokeIndex(Umu,tmp,mu); + } + */ double volume=Ls; for(int mu=0;mu + +/********************************************************* + * Architectural macros + *********************************************************/ +#define VLOADf(OFF,PTR,DEST) "qvlfsux " #DEST "," #OFF "," #PTR ") ;\n" +#define VLOADd(OFF,PTR,DEST) "qvlfdux " #DEST "," #OFF "," #PTR ") ;\n" +#define VSTOREf(OFF,PTR,SRC) "qvstfsux " #SRC "," #OFF "," #PTR ") ;\n" +#define VSTOREd(OFF,PTR,SRC) "qvstfdux " #SRC "," #OFF "," #PTR ") ;\n" +#define VSPLATf(A,B,DEST) "qvlfcdxa " #A "," #B "," #DEST ";\n" +#define VSPLATd(A,B,DEST) "qvlfcsxa " #A "," #B "," #DEST ";\n" + +#define LOAD64(A,ptr) +#define VZERO(DEST) "qvfclr " #DEST "; \n" +#define VONE (DEST) "qvfset " #DEST "; \n" +#define VNEG (SRC,DEST) "qvfneg " #DEST "," #SRC "; \n" +#define VMOV(A,DEST) "qvfmr " #DEST, "," #A ";\n" + +#define VADD(A,B,DEST) "qvfadd " #DEST "," #A "," #B ";\n" +#define VSUB(A,B,DEST) "qvfsub " #DEST "," #A "," #B ";\n" +#define VMUL(A,B,DEST) "qvfmul " #DEST "," #A "," #B ";\n" +#define VMUL_RR_RI(A,B,DEST) "qvfxmul " #DEST "," #A "," #B ";\n" +#define VMADD(A,B,C,DEST) "qvfmadd " #DEST "," #A "," #B ","#C ";\n" +#define VMADD_RR_RI(A,B,C,DEST) "qvfxmadd " #DEST "," #A "," #B ","#C ";\n" +#define VMADD_MII_IR(A,B,C,DEST) "qvfxxnpmadd " #DEST "," #A "," #B ","#C ";\n" +#define VMADD_II_MIR(A,B,C,DEST) "qvfmadd " #DEST "," #A "," #B ","#C ";\n" + +#define CACHE_LOCK (PTR) asm (" dcbtls %%r0, %0 \n" : : "r" (PTR) ); +#define CACHE_UNLOCK(PTR) asm (" dcblc %%r0, %0 \n" : : "r" (PTR) ); +#define CACHE_FLUSH (PTR) asm (" dcbf %%r0, %0 \n" : : "r" (PTR) ); +#define CACHE_TOUCH (PTR) asm (" dcbt %%r0, %0 \n" : : "r" (PTR) ); + +// Gauge field locking 2 x 9 complex == 18*8 / 16 bytes per link +// This is 144/288 bytes == 4.5; 9 lines +#define MASK_REGS /*NOOP ON BGQ*/ +#define PF_GAUGE(A) /*NOOP ON BGQ*/ +#define PREFETCH1_CHIMU(base) /*NOOP ON BGQ*/ +#define PREFETCH_CHIMU(base) /*NOOP ON BGQ*/ + +/********************************************************* + * Register definitions + *********************************************************/ +#define psi_00 0 +#define psi_01 1 +#define psi_02 2 + +#define psi_10 3 +#define psi_11 4 +#define psi_12 5 + +#define psi_20 6 +#define psi_21 7 +#define psi_22 8 + +#define psi_30 9 +#define psi_31 10 +#define psi_32 11 + +#define Chi_00 12 +#define Chi_01 13 +#define Chi_02 14 + +#define Chi_10 15 +#define Chi_11 16 +#define Chi_12 17 + +#define UChi_00 18 +#define UChi_01 19 +#define UChi_02 20 + +#define UChi_10 21 +#define UChi_11 22 +#define UChi_12 23 + +#define U0 24 +#define U1 25 +#define U2 26 +#define one 27 + +#define REP %%r16 +#define IMM %%r17 + +/*Alias regs*/ +#define Chimu_00 Chi_00 +#define Chimu_01 Chi_01 +#define Chimu_02 Chi_02 +#define Chimu_10 Chi_10 +#define Chimu_11 Chi_11 +#define Chimu_12 Chi_02 +#define Chimu_20 UChi_00 +#define Chimu_21 UChi_01 +#define Chimu_22 UChi_02 +#define Chimu_30 UChi_10 +#define Chimu_31 UChi_11 +#define Chimu_32 UChi_02 + +/********************************************************* + * Macro sequences encoding QCD + *********************************************************/ +#define LOCK_GAUGE(dir) \ + { \ + uint8_t *byte_addr = (uint8_t *)&U._odata[sU](dir); \ + for(int i=0;i< 18*2*BYTES_PER_WORD*8;i+=32){ \ + CACHE_LOCK(&byte_addr[i]); \ + } \ + } + +#define UNLOCK_GAUGE(dir) \ + { \ + uint8_t *byte_addr = (uint8_t *)&U._odata[sU](dir); \ + for(int i=0;i< 18*2*BYTES_PER_WORD*8;i+=32){ \ + CACHE_UNLOCK(&byte_addr[i]); \ + } \ + } + +#define MAYBEPERM(A,B) + +#define PERMUTE_DIR3 +#define PERMUTE_DIR2 +#define PERMUTE_DIR1 +#define PERMUTE_DIR0 + +#define MULT_2SPIN_DIR_PFXP(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFYP(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFZP(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFTP(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFXM(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFYM(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFZM(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFTM(A,p) MULT_2SPIN(&U._odata[sU](A),p) + +#define MULT_SPIN(ptr,p) { \ + uint64_t ub = ((uint64_t)base); \ + asm ( \ + VLOAD(%0,%3,U0) \ + VLOAD(%1,%3,U1) \ + VLOAD(%2,%3,U2) \ + VMUL_RR_RI(U0,Chi_00,UChi_00) \ + VMUL_RR_RI(U1,Chi_00,UChi_01) \ + VMUL_RR_RI(U2,Chi_00,UChi_02) \ + VMUL_RR_RI(U0,Chi_10,UChi_10) \ + VMUL_RR_RI(U1,Chi_10,UChi_11) \ + VMUL_RR_RI(U2,Chi_10,UChi_12) \ + VMADD_MII_IR(U0,Chi_00,UChi_00,UChi_00) \ + VMADD_MII_IR(U1,Chi_00,UChi_01,UChi_01) \ + VMADD_MII_IR(U2,Chi_00,UChi_02,UChi_02) \ + VMADD_MII_IR(U0,Chi_10,UChi_10,UChi_10) \ + VMADD_MII_IR(U1,Chi_10,UChi_11,UChi_11) \ + VMADD_MII_IR(U2,Chi_10,UChi_12,UChi_12) \ + : : "r" (0), "r" (32*3), "r" (32*6), "r" (ub )); \ + asm ( \ + VLOAD(%0,%3,U0) \ + VLOAD(%1,%3,U1) \ + VLOAD(%2,%3,U2) \ + VMADD_RR_RI(U0,Chi_01,UChi_00,UChi_00) \ + VMADD_RR_RI(U1,Chi_01,UChi_01,UChi_01) \ + VMADD_RR_RI(U2,Chi_01,UChi_02,UChi_02) \ + VMADD_RR_RI(U0,Chi_11,UChi_10,UChi_10) \ + VMADD_RR_RI(U1,Chi_11,UChi_11,UChi_11) \ + VMADD_RR_RI(U2,Chi_11,UChi_12,UChi_12) \ + VMADD_MII_IR(U0,Chi_01,UChi_00,UChi_00) \ + VMADD_MII_IR(U1,Chi_01,UChi_01,UChi_01) \ + VMADD_MII_IR(U2,Chi_01,UChi_02,UChi_02) \ + VMADD_MII_IR(U0,Chi_11,UChi_10,UChi_10) \ + VMADD_MII_IR(U1,Chi_11,UChi_11,UChi_11) \ + VMADD_MII_IR(U2,Chi_11,UChi_12,UChi_12) \ + : : "r" (32), "r" (32*4), "r" (32*7), "r" (ub )); \ + asm ( \ + VLOAD(%0,%3,U0) \ + VLOAD(%1,%3,U1) \ + VLOAD(%2,%3,U2) \ + VMADD_RR_RI(U0,Chi_02,UChi_00,UChi_00) \ + VMADD_RR_RI(U1,Chi_02,UChi_01,UChi_01) \ + VMADD_RR_RI(U2,Chi_02,UChi_02,UChi_02) \ + VMADD_RR_RI(U0,Chi_12,UChi_10,UChi_10) \ + VMADD_RR_RI(U1,Chi_12,UChi_11,UChi_11) \ + VMADD_RR_RI(U2,Chi_12,UChi_12,UChi_12) \ + VMADD_MII_IR(U0,Chi_02,UChi_00,UChi_00) \ + VMADD_MII_IR(U1,Chi_02,UChi_01,UChi_01) \ + VMADD_MII_IR(U2,Chi_02,UChi_02,UChi_02) \ + VMADD_MII_IR(U0,Chi_12,UChi_10,UChi_10) \ + VMADD_MII_IR(U1,Chi_12,UChi_11,UChi_11) \ + VMADD_MII_IR(U2,Chi_12,UChi_12,UChi_12) \ + : : "r" (32*2), "r" (32*5), "r" (32*8), "r" (ub )); \ + } + +#define SAVE_RESULT(base,basep) {\ + uint64_t ub = ((uint64_t)base) - 32; \ + asm("mr %0,"REP";\n\t" \ + "li " IMM ",32;\n\t" \ + VSTORE(IMM,REP,psi_00) \ + VSTORE(IMM,REP,psi_01) \ + VSTORE(IMM,REP,psi_02) \ + VSTORE(IMM,REP,psi_10) \ + VSTORE(IMM,REP,psi_11) \ + VSTORE(IMM,REP,psi_12) \ + VSTORE(IMM,REP,psi_20) \ + VSTORE(IMM,REP,psi_21) \ + VSTORE(IMM,REP,psi_22) \ + VSTORE(IMM,REP,psi_30) \ + VSTORE(IMM,REP,psi_31) \ + VSTORE(IMM,REP,psi_32) \ + ); \ +} + +/* + *Annoying BG/Q loads with no immediat indexing and big performance hit + *when second miss to a L1 line occurs + */ +#define LOAD_CHI(base) { \ + uint64_t ub = ((uint64_t)base) - 64; \ + asm("mr %0,"REP";\n\t" \ + "li " IMM ",64;\n\t" \ + VLOAD(IMM,REP,Chi_00) \ + VLOAD(IMM,REP,Chi_02) \ + VLOAD(IMM,REP,Chi_11) : : "r" (ub) ); \ + ub = ((uint64_t)base) - 32; \ + asm("mr %0,"REP";\n\t" \ + "li IMM,64;\n\t" \ + VLOAD(IMM,REP,Chimu_01) \ + VLOAD(IMM,REP,Chimu_10) \ + VLOAD(IMM,REP,Chimu_12) : : "r" (ub) ); \ + } + +#define LOAD_CHIMU(base) { \ + uint64_t ub = ((uint64_t)base) - 64; \ + asm("mr %0,"REP";\n\t" \ + "li IMM,64;\n\t" \ + VLOAD(IMM,REP,Chimu_00) \ + VLOAD(IMM,REP,Chimu_02) \ + VLOAD(IMM,REP,Chimu_11) \ + VLOAD(IMM,REP,Chimu_20) \ + VLOAD(IMM,REP,Chimu_22) \ + VLOAD(IMM,REP,Chimu_31) : : "r" (ub) ); \ + ub = ((uint64_t)base) - 32; \ + asm("mr %0,"REP";\n\t" \ + "li IMM,64;\n\t" \ + VLOAD(IMM,REP,Chimu_01) \ + VLOAD(IMM,REP,Chimu_10) \ + VLOAD(IMM,REP,Chimu_12) \ + VLOAD(IMM,REP,Chimu_21) \ + VLOAD(IMM,REP,Chimu_30) \ + VLOAD(IMM,REP,Chimu_32) : : "r" (ub) ); \ + } + +// hspin(0)=fspin(0)+timesI(fspin(3)); +// hspin(1)=fspin(1)+timesI(fspin(2)); +#define XP_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VONE(one) \ + VMADD_MII_IR(one,Chimu_30,Chimu_00,Chi_00) \ + VMADD_MII_IR(one,Chimu_31,Chimu_01,Chi_01) \ + VMADD_MII_IR(one,Chimu_32,Chimu_02,Chi_02) \ + VMADD_MII_IR(one,Chimu_20,Chimu_10,Chi_10) \ + VMADD_MII_IR(one,Chimu_21,Chimu_11,Chi_11) \ + VMADD_MII_IR(one,Chimu_22,Chimu_12,Chi_12) \ + ); \ + } + +#define XM_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VONE(one) \ + VMADD_II_MIR(one,Chimu_30,Chimu_00,Chi_00) \ + VMADD_II_MIR(one,Chimu_31,Chimu_01,Chi_01) \ + VMADD_II_MIR(one,Chimu_32,Chimu_02,Chi_02) \ + VMADD_II_MIR(one,Chimu_20,Chimu_10,Chi_10) \ + VMADD_II_MIR(one,Chimu_21,Chimu_11,Chi_11) \ + VMADD_II_MIR(one,Chimu_22,Chimu_12,Chi_12) \ + ); \ + } + +// hspin(0)=fspin(0)-fspin(3); +// hspin(1)=fspin(1)+fspin(2); +#define YP_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VSUB(Chimu_00,Chimu_00,Chi_30) \ + VSUB(Chimu_01,Chimu_01,Chi_31) \ + VSUB(Chimu_02,Chimu_02,Chi_32) \ + VADD(Chimu_10,Chimu_10,Chi_20) \ + VADD(Chimu_11,Chimu_11,Chi_21) \ + VADD(Chimu_12,Chimu_12,Chi_22) \ + ); \ + } + +#define YM_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VADD(Chimu_00,Chimu_00,Chi_30) \ + VADD(Chimu_01,Chimu_01,Chi_31) \ + VADD(Chimu_02,Chimu_02,Chi_32) \ + VSUB(Chimu_10,Chimu_10,Chi_20) \ + VSUB(Chimu_11,Chimu_11,Chi_21) \ + VSUB(Chimu_12,Chimu_12,Chi_22) \ + ); \ + } + + /*Gz + * 0 0 i 0 [0]+-i[2] + * 0 0 0 -i [1]-+i[3] + * -i 0 0 0 + * 0 i 0 0 + */ +#define ZP_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VONE(one) \ + VMADD_MII_IR(one,Chimu_20,Chimu_00,Chi_00) \ + VMADD_MII_IR(one,Chimu_21,Chimu_01,Chi_01) \ + VMADD_MII_IR(one,Chimu_22,Chimu_02,Chi_02) \ + VMADD_II_MIR(one,Chimu_30,Chimu_10,Chi_10) \ + VMADD_II_MIR(one,Chimu_31,Chimu_11,Chi_11) \ + VMADD_II_MIR(one,Chimu_32,Chimu_12,Chi_12) \ + ); \ + } + +#define ZM_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VONE(one) \ + VMADD_II_MIR(one,Chimu_20,Chimu_00,Chi_00) \ + VMADD_II_MIR(one,Chimu_21,Chimu_01,Chi_01) \ + VMADD_II_MIR(one,Chimu_22,Chimu_02,Chi_02) \ + VMADD_MII_IR(one,Chimu_30,Chimu_10,Chi_10) \ + VMADD_MII_IR(one,Chimu_31,Chimu_11,Chi_11) \ + VMADD_MII_IR(one,Chimu_32,Chimu_12,Chi_12) \ + ); \ + } + /*Gt + * 0 0 1 0 [0]+-[2] + * 0 0 0 1 [1]+-[3] + * 1 0 0 0 + * 0 1 0 0 + */ +#define TP_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VADD(Chimu_00,Chimu_00,Chi_20) \ + VADD(Chimu_01,Chimu_01,Chi_21) \ + VADD(Chimu_02,Chimu_02,Chi_22) \ + VADD(Chimu_10,Chimu_10,Chi_30) \ + VADD(Chimu_11,Chimu_11,Chi_31) \ + VADD(Chimu_12,Chimu_12,Chi_32) \ + ); \ + } + +#define TM_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VSUB(Chimu_00,Chimu_00,Chi_20) \ + VSUB(Chimu_01,Chimu_01,Chi_21) \ + VSUB(Chimu_02,Chimu_02,Chi_22) \ + VSUB(Chimu_10,Chimu_10,Chi_30) \ + VSUB(Chimu_11,Chimu_11,Chi_31) \ + VSUB(Chimu_12,Chimu_12,Chi_32) \ + ); \ + } + +/* + fspin(0)=hspin(0); + fspin(1)=hspin(1); + fspin(2)=timesMinusI(hspin(1)); + fspin(3)=timesMinusI(hspin(0)); + + fspin(0)+=hspin(0); + fspin(1)+=hspin(1); + fspin(2)-=timesI(hspin(1)); + fspin(3)-=timesI(hspin(0)); + */ +#define XP_RECON { \ + asm(\ + VONE(one)\ + VMOV(psi_00,UChi_00) VMOV(psi_01,UChi_01) VMOV(psi_02,UChi_02)\ + VMOV(psi_10,UChi_10) VMOV(psi_11,UChi_11) VMOV(psi_12,UChi_12)\ + VZERO(psi_20) VZERO(psi_21) VZERO(psi_22) \ + VZERO(psi_30) VZERO(psi_31) VZERO(psi_32) \ + VMADD_II_MIR(one,UChi_10,psi_20,psi_20) \ + VMADD_II_MIR(one,UChi_11,psi_21,psi_21) \ + VMADD_II_MIR(one,UChi_12,psi_22,psi_22) \ + VMADD_II_MIR(one,UChi_00,psi_30,psi_30) \ + VMADD_II_MIR(one,UChi_01,psi_31,psi_31) \ + VMADD_II_MIR(one,UChi_02,psi_32,psi_32) \ + ); \ + } + +#define XM_RECON { \ + asm(\ + VONE(one)\ + VMOV(psi_00,UChi_00) VMOV(psi_01,UChi_01) VMOV(psi_02,UChi_02)\ + VMOV(psi_10,UChi_10) VMOV(psi_11,UChi_11) VMOV(psi_12,UChi_12)\ + VZERO(psi_20) VZERO(psi_21) VZERO(psi_22) \ + VZERO(psi_30) VZERO(psi_31) VZERO(psi_32) \ + VMADD_MII_IR(one,UChi_10,psi_20,psi_20) \ + VMADD_MII_IR(one,UChi_11,psi_21,psi_21) \ + VMADD_MII_IR(one,UChi_12,psi_22,psi_22) \ + VMADD_MII_IR(one,UChi_00,psi_30,psi_30) \ + VMADD_MII_IR(one,UChi_01,psi_31,psi_31) \ + VMADD_MII_IR(one,UChi_02,psi_32,psi_32) \ + ); \ + } + +#define XP_RECON_ACCUM { \ + asm(\ + VONE(one)\ + VADD(psi_00,UChi_00,psi_00) VADD(psi_01,UChi_01,psi_01) VADD(psi_02,UChi_02,psi_02) \ + VADD(psi_10,UChi_10,psi_10) VADD(psi_11,UChi_11,psi_11) VADD(psi_12,UChi_12,psi_12) \ + VMADD_II_MIR(one,UChi_10,psi_20,psi_20) \ + VMADD_II_MIR(one,UChi_11,psi_21,psi_21) \ + VMADD_II_MIR(one,UChi_12,psi_22,psi_22) \ + VMADD_II_MIR(one,UChi_00,psi_30,psi_30) \ + VMADD_II_MIR(one,UChi_01,psi_31,psi_31) \ + VMADD_II_MIR(one,UChi_02,psi_32,psi_32) \ + ); \ + } + +#define XM_RECON_ACCUM { \ + asm(\ + VONE(one)\ + VADD(psi_00,UChi_00,psi_00) VADD(psi_01,UChi_01,psi_01) VADD(psi_02,UChi_02,psi_02) \ + VADD(psi_10,UChi_10,psi_10) VADD(psi_11,UChi_11,psi_11) VADD(psi_12,UChi_12,psi_12) \ + VMADD_MII_IR(one,UChi_10,psi_20,psi_20) \ + VMADD_MII_IR(one,UChi_11,psi_21,psi_21) \ + VMADD_MII_IR(one,UChi_12,psi_22,psi_22) \ + VMADD_MII_IR(one,UChi_00,psi_30,psi_30) \ + VMADD_MII_IR(one,UChi_01,psi_31,psi_31) \ + VMADD_MII_IR(one,UChi_02,psi_32,psi_32) \ + ); \ + } + +// fspin(2)+=hspin(1); +// fspin(3)-=hspin(0); +#define YP_RECON_ACCUM {\ + asm(\ + VADD(psi_00,UChi_00,psi_00) VADD(psi_01,UChi_01,psi_01) VADD(psi_02,UChi_02,psi_02) \ + VADD(psi_10,UChi_10,psi_10) VADD(psi_11,UChi_11,psi_11) VADD(psi_12,UChi_12,psi_12) \ + VADD(psi_20,UChi_10,psi_20) VADD(psi_21,UChi_11,psi_21) VADD(psi_22,UChi_12,psi_22) \ + VSUB(psi_30,UChi_00,psi_30) VSUB(psi_31,UChi_01,psi_31) VSUB(psi_32,UChi_02,psi_32) \ + );\ + } +#define YM_RECON_ACCUM {\ + asm(\ + VADD(psi_00,UChi_00,psi_00) VADD(psi_01,UChi_01,psi_01) VADD(psi_02,UChi_02,psi_02) \ + VADD(psi_10,UChi_10,psi_10) VADD(psi_11,UChi_11,psi_11) VADD(psi_12,UChi_12,psi_12) \ + VSUB(psi_20,UChi_10,psi_20) VSUB(psi_21,UChi_11,psi_21) VSUB(psi_22,UChi_12,psi_22) \ + VADD(psi_30,UChi_00,psi_30) VADD(psi_31,UChi_01,psi_31) VADD(psi_32,UChi_02,psi_32) \ + );\ + } + +// fspin(2)-=timesI(hspin(0)); +// fspin(3)+=timesI(hspin(1)); +#define ZP_RECON_ACCUM {\ + asm(\ + VONE(one)\ + VADD(psi_00,UChi_00,psi_00) VADD(psi_01,UChi_01,psi_01) VADD(psi_02,UChi_02,psi_02) \ + VADD(psi_10,UChi_10,psi_10) VADD(psi_11,UChi_11,psi_11) VADD(psi_12,UChi_12,psi_12) \ + VMADD_II_MIR(one,UChi_00,psi_20,psi_20) \ + VMADD_II_MIR(one,UChi_01,psi_21,psi_21) \ + VMADD_II_MIR(one,UChi_02,psi_22,psi_22) \ + VMADD_MII_IR(one,UChi_10,psi_30,psi_30) \ + VMADD_MII_IR(one,UChi_11,psi_31,psi_31) \ + VMADD_MII_IR(one,UChi_12,psi_32,psi_32) \ + );\ + } + +#define ZM_RECON_ACCUM {\ + asm(\ + VONE(one)\ + VADD(psi_00,UChi_00,psi_00) VADD(psi_01,UChi_01,psi_01) VADD(psi_02,UChi_02,psi_02) \ + VADD(psi_10,UChi_10,psi_10) VADD(psi_11,UChi_11,psi_11) VADD(psi_12,UChi_12,psi_12) \ + VMADD_MII_IR(one,UChi_00,psi_20,psi_20) \ + VMADD_MII_IR(one,UChi_01,psi_21,psi_21) \ + VMADD_MII_IR(one,UChi_02,psi_22,psi_22) \ + VMADD_II_MIR(one,UChi_10,psi_30,psi_30) \ + VMADD_II_MIR(one,UChi_11,psi_31,psi_31) \ + VMADD_II_MIR(one,UChi_12,psi_32,psi_32) \ + );\ + } + +// fspin(2)+=hspin(0); +// fspin(3)+=hspin(1); +#define TP_RECON_ACCUM {\ + asm(\ + VADD(psi_00,UChi_00,psi_00) VADD(psi_01,UChi_01,psi_01) VADD(psi_02,UChi_02,psi_02) \ + VADD(psi_10,UChi_10,psi_10) VADD(psi_11,UChi_11,psi_11) VADD(psi_12,UChi_12,psi_12) \ + VADD(psi_20,UChi_00,psi_20) VADD(psi_21,UChi_01,psi_21) VADD(psi_22,UChi_02,psi_22) \ + VADD(psi_30,UChi_10,psi_30) VADD(psi_31,UChi_11,psi_31) VADD(psi_32,UChi_12,psi_32) \ + );\ + } + +#define TM_RECON_ACCUM {\ + asm(\ + VONE(one)\ + VADD(psi_00,UChi_00,psi_00) VADD(psi_01,UChi_01,psi_01) VADD(psi_02,UChi_02,psi_02) \ + VADD(psi_10,UChi_10,psi_10) VADD(psi_11,UChi_11,psi_11) VADD(psi_12,UChi_12,psi_12) \ + VSUB(psi_20,UChi_00,psi_20) VSUB(psi_21,UChi_01,psi_21) VSUB(psi_22,UChi_02,psi_22) \ + VSUB(psi_30,UChi_10,psi_30) VSUB(psi_31,UChi_11,psi_31) VSUB(psi_32,UChi_12,psi_32) \ + );\ + } + +uint64_t GetPFInfo(int nent,int plocal); +uint64_t GetInfo(int ptype,int local,int perm,int Xp,int ent,int plocal); + +#define COMPLEX_TYPE int; +int signs[4]; + +void testme(int osites,int ssU) +{ + int local,perm, ptype; + uint64_t base; + uint64_t basep; + const uint64_t plocal =(uint64_t) & in._odata[0]; + + // vComplexF isigns[2] = { signs[0], signs[1] }; + //COMPLEX_TYPE is vComplexF of vComplexD depending + //on the chosen precision + COMPLEX_TYPE *isigns = &signs[0]; + + MASK_REGS; + int nmax=osites; + for(int site=0;site=nmax) ssn=0; + int sUn=ssn; + for(int s=0;s shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + YP_PROJMEM(base); +#else + YM_PROJMEM(base); +#endif + MAYBEPERM(PERMUTE_DIR2,perm); + } else { + LOAD_CHI(base); + } + base = GetInfo(ptype,local,perm,Zp,ent,plocal); ent++; + PREFETCH_CHIMU(base); + { + MULT_2SPIN_DIR_PFYP(Yp,basep); + } + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + YP_RECON_ACCUM; +#else + YM_RECON_ACCUM; +#endif + + //////////////////////////////// + // Zp + //////////////////////////////// + basep = GetPFInfo(nent,plocal); nent++; + if ( local ) { + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + ZP_PROJMEM(base); +#else + ZM_PROJMEM(base); +#endif + MAYBEPERM(PERMUTE_DIR1,perm); + } else { + LOAD_CHI(base); + } + base = GetInfo(ptype,local,perm,Tp,ent,plocal); ent++; + PREFETCH_CHIMU(base); + { + MULT_2SPIN_DIR_PFZP(Zp,basep); + } + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + ZP_RECON_ACCUM; +#else + ZM_RECON_ACCUM; +#endif + + //////////////////////////////// + // Tp + //////////////////////////////// + basep = GetPFInfo(nent,plocal); nent++; + if ( local ) { + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + TP_PROJMEM(base); +#else + TM_PROJMEM(base); +#endif + MAYBEPERM(PERMUTE_DIR0,perm); + } else { + LOAD_CHI(base); + } + base = GetInfo(ptype,local,perm,Xm,ent,plocal); ent++; + PREFETCH_CHIMU(base); + { + MULT_2SPIN_DIR_PFTP(Tp,basep); + } + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + TP_RECON_ACCUM; +#else + TM_RECON_ACCUM; +#endif + + //////////////////////////////// + // Xm + //////////////////////////////// +#ifndef STREAM_STORE + basep= (uint64_t) &out._odata[ss]; +#endif + // basep= GetPFInfo(nent,plocal); nent++; + if ( local ) { + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + XM_PROJMEM(base); +#else + XP_PROJMEM(base); +#endif + MAYBEPERM(PERMUTE_DIR3,perm); + } else { + LOAD_CHI(base); + } + base = GetInfo(ptype,local,perm,Ym,ent,plocal); ent++; + PREFETCH_CHIMU(base); + { + MULT_2SPIN_DIR_PFXM(Xm,basep); + } + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + XM_RECON_ACCUM; +#else + XP_RECON_ACCUM; +#endif + + //////////////////////////////// + // Ym + //////////////////////////////// + basep= GetPFInfo(nent,plocal); nent++; + if ( local ) { + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + YM_PROJMEM(base); +#else + YP_PROJMEM(base); +#endif + MAYBEPERM(PERMUTE_DIR2,perm); + } else { + LOAD_CHI(base); + } + base = GetInfo(ptype,local,perm,Zm,ent,plocal); ent++; + PREFETCH_CHIMU(base); + { + MULT_2SPIN_DIR_PFYM(Ym,basep); + } + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + YM_RECON_ACCUM; +#else + YP_RECON_ACCUM; +#endif + + //////////////////////////////// + // Zm + //////////////////////////////// + basep= GetPFInfo(nent,plocal); nent++; + if ( local ) { + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + ZM_PROJMEM(base); +#else + ZP_PROJMEM(base); +#endif + MAYBEPERM(PERMUTE_DIR1,perm); + } else { + LOAD_CHI(base); + } + base = GetInfo(ptype,local,perm,Tm,ent,plocal); ent++; + PREFETCH_CHIMU(base); + { + MULT_2SPIN_DIR_PFZM(Zm,basep); + } + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + ZM_RECON_ACCUM; +#else + ZP_RECON_ACCUM; +#endif + + //////////////////////////////// + // Tm + //////////////////////////////// + basep= GetPFInfo(nent,plocal); nent++; + if ( local ) { + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + TM_PROJMEM(base); +#else + TP_PROJMEM(base); +#endif + MAYBEPERM(PERMUTE_DIR0,perm); + } else { + LOAD_CHI(base); + } + base= (uint64_t) &out._odata[ss]; +#ifndef STREAM_STORE + PREFETCH_CHIMU(base); +#endif + { + MULT_2SPIN_DIR_PFTM(Tm,basep); + } + LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit +#ifdef KERNEL_DAG + TM_RECON_ACCUM; +#else + TP_RECON_ACCUM; +#endif + + basep= GetPFInfo(nent,plocal); nent++; + SAVE_RESULT(base,basep); + + } + ssU++; + } +} + + +#endif From 960316e20796a1fb3cf9b67f0b4c37dd0bb679ca Mon Sep 17 00:00:00 2001 From: Dr Peter Boyle Date: Thu, 22 Dec 2016 17:27:01 +0000 Subject: [PATCH 016/101] type conversion in printf --- lib/PerfCount.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/PerfCount.h b/lib/PerfCount.h index 749441c5..fca80b8d 100644 --- a/lib/PerfCount.h +++ b/lib/PerfCount.h @@ -172,7 +172,7 @@ public: const char * name = PerformanceCounterConfigs[PCT].name; fd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1 if (fd == -1) { - fprintf(stderr, "Error opening leader %llx for event %s\n", pe.config,name); + fprintf(stderr, "Error opening leader %llx for event %s\n",(long long) pe.config,name); perror("Error is"); } int norm = PerformanceCounterConfigs[PCT].normalisation; @@ -181,7 +181,7 @@ public: name = PerformanceCounterConfigs[norm].name; cyclefd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1 if (cyclefd == -1) { - fprintf(stderr, "Error opening leader %llx for event %s\n", pe.config,name); + fprintf(stderr, "Error opening leader %llx for event %s\n",(long long) pe.config,name); perror("Error is"); } #endif From 5241245534893885c105166066207ee52509464e Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Thu, 22 Dec 2016 17:49:21 +0000 Subject: [PATCH 017/101] Default to static scheduling --- lib/Threads.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/Threads.h b/lib/Threads.h index 2f072633..de02fe4d 100644 --- a/lib/Threads.h +++ b/lib/Threads.h @@ -37,13 +37,9 @@ Author: paboyle #ifdef GRID_OMP #include -#ifdef GRID_NUMA + #define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(static)") #define PARALLEL_FOR_LOOP_INTERN _Pragma("omp for schedule(static)") -#else -#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(runtime)") -#define PARALLEL_FOR_LOOP_INTERN _Pragma("omp for schedule(runtime)") -#endif #define PARALLEL_NESTED_LOOP2 _Pragma("omp parallel for collapse(2)") #define PARALLEL_REGION _Pragma("omp parallel") #define PARALLEL_CRITICAL _Pragma("omp critical") From b8cdb3e90ae933985f7462dfa7077906bb2a1ac9 Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Thu, 22 Dec 2016 17:50:14 +0000 Subject: [PATCH 018/101] Debug hack; raises from 62GF/s to 72 GF/s per node on BG/Q --- lib/qcd/action/fermion/WilsonFermion5D.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/qcd/action/fermion/WilsonFermion5D.cc b/lib/qcd/action/fermion/WilsonFermion5D.cc index d70c98c3..1c0e0b0e 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.cc +++ b/lib/qcd/action/fermion/WilsonFermion5D.cc @@ -437,12 +437,25 @@ void WilsonFermion5D::DhopInternal(StencilImpl & st, LebesgueOrder &lo, stat.accum(nthreads); #endif } else { +#if 0 PARALLEL_FOR_LOOP for (int ss = 0; ss < U._grid->oSites(); ss++) { int sU = ss; int sF = LLs * sU; Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); } +#else +#pragma omp parallel + { + for(int i=0;i<10;i++){ + int me, myoff,mywork; + int len = U._grid->oSites(); + GridThread::GetWorkBarrier(len,me, mywork,myoff); + int sF = LLs * myoff; + Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out); + } + } +#endif } DhopComputeTime+=usecond(); } From 7dc36628a13b61e20e9f05deca3c162742f869d2 Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Thu, 22 Dec 2016 17:50:48 +0000 Subject: [PATCH 019/101] QPX finishing --- lib/simd/Grid_qpx.h | 49 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/lib/simd/Grid_qpx.h b/lib/simd/Grid_qpx.h index 99a9ea68..e2fe0b63 100644 --- a/lib/simd/Grid_qpx.h +++ b/lib/simd/Grid_qpx.h @@ -163,6 +163,22 @@ namespace Optimization { ///////////////////////////////////////////////////// // Arithmetic operations ///////////////////////////////////////////////////// + + #define FLOAT_WRAP_3(fn, pref)\ + pref vector4float fn(vector4float a, vector4float b, vector4float c) \ + {\ + vector4double ad, bd, rd, cd; \ + vector4float r;\ + \ + ad = Vset()(a);\ + bd = Vset()(b);\ + cd = Vset()(c);\ + rd = fn(ad, bd, cd); \ + Vstore()(rd, r);\ + \ + return r;\ + } + #define FLOAT_WRAP_2(fn, pref)\ pref vector4float fn(vector4float a, vector4float b)\ {\ @@ -228,6 +244,13 @@ namespace Optimization { } FLOAT_WRAP_2(operator(), inline) }; + struct MaddRealPart{ + // Complex double + inline vector4double operator()(vector4double a, vector4double b,vector4double c){ + return vec_xmadd(a, b, c); + } + FLOAT_WRAP_3(operator(), inline) + }; struct MultComplex{ // Complex double inline vector4double operator()(vector4double a, vector4double b){ @@ -323,19 +346,36 @@ namespace Optimization { }; struct Rotate{ + + template static inline vector4double tRotate(vector4double v){ + if ( n==1 ) return vec_perm(v, v, vec_gpci(01230)); + if ( n==2 ) return vec_perm(v, v, vec_gpci(02301)); + if ( n==3 ) return vec_perm(v, v, vec_gpci(03012)); + return v; + }; + template static inline vector4float tRotate(vector4float a) + { + vector4double ad, rd; + vector4float r; + ad = Vset()(a); + rd = tRotate(ad); + Vstore()(rd, r); + return r; + }; + static inline vector4double rotate(vector4double v, int n){ switch(n){ case 0: return v; break; case 1: - return vec_perm(v, v, vec_gpci(01230)); + return tRotate<1>(v); break; case 2: - return vec_perm(v, v, vec_gpci(02301)); + return tRotate<2>(v); break; case 3: - return vec_perm(v, v, vec_gpci(03012)); + return tRotate<3>(v); break; default: assert(0); } @@ -344,11 +384,9 @@ namespace Optimization { static inline vector4float rotate(vector4float v, int n){ vector4double vd, rd; vector4float r; - vd = Vset()(v); rd = rotate(vd, n); Vstore()(rd, r); - return r; } }; @@ -439,6 +477,7 @@ typedef Optimization::Mult MultSIMD; typedef Optimization::Div DivSIMD; typedef Optimization::MultComplex MultComplexSIMD; typedef Optimization::MultRealPart MultRealPartSIMD; +typedef Optimization::MaddRealPart MaddRealPartSIMD; typedef Optimization::Conj ConjSIMD; typedef Optimization::TimesMinusI TimesMinusISIMD; typedef Optimization::TimesI TimesISIMD; From 0903c48caab20729fe3baed65cc485426bdc6363 Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Thu, 22 Dec 2016 17:51:45 +0000 Subject: [PATCH 020/101] Hot start SU3 --- tests/Test_cayley_even_odd_vec.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/Test_cayley_even_odd_vec.cc b/tests/Test_cayley_even_odd_vec.cc index f8600782..e7f85b4e 100644 --- a/tests/Test_cayley_even_odd_vec.cc +++ b/tests/Test_cayley_even_odd_vec.cc @@ -91,7 +91,8 @@ int main (int argc, char ** argv) GridParallelRNG sRNG4(sUGrid); sRNG4.SeedFixedIntegers(seeds4); GridParallelRNG sRNG5(sFGrid); sRNG5.SeedFixedIntegers(seeds5); - LatticeGaugeField Umu(UGrid); random(RNG4,Umu); + LatticeGaugeField Umu(UGrid); + SU3::HotConfiguration(RNG4,Umu); RealD mass=0.1; RealD M5 =1.8; From 9ae81c06d2ffd929ec49db8e5d20b9cf35ce0dc6 Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Thu, 22 Dec 2016 17:52:21 +0000 Subject: [PATCH 021/101] L1p controls for BG/Q --- lib/qcd/action/fermion/WilsonKernels.cc | 45 +++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/lib/qcd/action/fermion/WilsonKernels.cc b/lib/qcd/action/fermion/WilsonKernels.cc index 43776c86..392c7029 100644 --- a/lib/qcd/action/fermion/WilsonKernels.cc +++ b/lib/qcd/action/fermion/WilsonKernels.cc @@ -34,6 +34,51 @@ namespace QCD { int WilsonKernelsStatic::Opt; + +#ifdef QPX +#include +#include +#include +#include +#endif + +void bgq_l1p_optimisation(int mode) +{ +#ifdef QPX +#undef L1P_CFG_PF_USR +#define L1P_CFG_PF_USR (0x3fde8000108ll) /* (64 bit reg, 23 bits wide, user/unpriv) */ + + uint64_t cfg_pf_usr; + if ( mode ) { + cfg_pf_usr = + L1P_CFG_PF_USR_ifetch_depth(0) + | L1P_CFG_PF_USR_ifetch_max_footprint(1) + | L1P_CFG_PF_USR_pf_stream_est_on_dcbt + | L1P_CFG_PF_USR_pf_stream_establish_enable + | L1P_CFG_PF_USR_pf_stream_optimistic + | L1P_CFG_PF_USR_pf_adaptive_throttle(0xF) ; + // if ( sizeof(Float) == sizeof(double) ) { + cfg_pf_usr |= L1P_CFG_PF_USR_dfetch_depth(2)| L1P_CFG_PF_USR_dfetch_max_footprint(3) ; + // } else { + // cfg_pf_usr |= L1P_CFG_PF_USR_dfetch_depth(1)| L1P_CFG_PF_USR_dfetch_max_footprint(2) ; + // } + } else { + cfg_pf_usr = L1P_CFG_PF_USR_dfetch_depth(1) + | L1P_CFG_PF_USR_dfetch_max_footprint(2) + | L1P_CFG_PF_USR_ifetch_depth(0) + | L1P_CFG_PF_USR_ifetch_max_footprint(1) + | L1P_CFG_PF_USR_pf_stream_est_on_dcbt + | L1P_CFG_PF_USR_pf_stream_establish_enable + | L1P_CFG_PF_USR_pf_stream_optimistic + | L1P_CFG_PF_USR_pf_stream_prefetch_enable; + } + *((uint64_t *)L1P_CFG_PF_USR) = cfg_pf_usr; + +#endif + +} + + template WilsonKernels::WilsonKernels(const ImplParams &p) : Base(p){}; From caba0d42a557b810dd4985a96973643dd817ab2e Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Thu, 22 Dec 2016 17:52:55 +0000 Subject: [PATCH 022/101] L1p controls --- lib/qcd/action/fermion/WilsonKernels.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/qcd/action/fermion/WilsonKernels.h b/lib/qcd/action/fermion/WilsonKernels.h index 47da2b14..c859b33d 100644 --- a/lib/qcd/action/fermion/WilsonKernels.h +++ b/lib/qcd/action/fermion/WilsonKernels.h @@ -34,6 +34,8 @@ directory namespace Grid { namespace QCD { +void bgq_l1p_optimisation(int mode); + //////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Helper routines that implement Wilson stencil for a single site. // Common to both the WilsonFermion and WilsonFermion5D @@ -58,8 +60,9 @@ public: DiracOptDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) { + bgq_l1p_optimisation(1); switch(Opt) { -#ifdef AVX512 +#if defined(AVX512) || defined (QPX) case OptInlineAsm: WilsonKernels::DiracOptAsmDhopSite(st,lo,U,buf,sF,sU,Ls,Ns,in,out); break; @@ -85,6 +88,7 @@ public: default: assert(0); } + bgq_l1p_optimisation(0); } template @@ -106,8 +110,9 @@ public: DiracOptDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) { + bgq_l1p_optimisation(1); switch(Opt) { -#ifdef AVX512 +#if defined(AVX512) || defined (QPX) case OptInlineAsm: WilsonKernels::DiracOptAsmDhopSiteDag(st,lo,U,buf,sF,sU,Ls,Ns,in,out); break; @@ -133,6 +138,7 @@ public: default: assert(0); } + bgq_l1p_optimisation(0); } template From 04ae7929a39795cfd623c3b47e855da5c4f53fed Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Thu, 22 Dec 2016 17:53:22 +0000 Subject: [PATCH 023/101] BGQ or KNL assembler now --- lib/qcd/action/fermion/WilsonKernelsAsm.cc | 132 +-------------------- 1 file changed, 2 insertions(+), 130 deletions(-) diff --git a/lib/qcd/action/fermion/WilsonKernelsAsm.cc b/lib/qcd/action/fermion/WilsonKernelsAsm.cc index d7a9edd3..ab805f4f 100644 --- a/lib/qcd/action/fermion/WilsonKernelsAsm.cc +++ b/lib/qcd/action/fermion/WilsonKernelsAsm.cc @@ -53,136 +53,8 @@ WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo, assert(0); } -#if defined(AVX512) -#include - - /////////////////////////////////////////////////////////// - // If we are AVX512 specialise the single precision routine - /////////////////////////////////////////////////////////// - -#include - -static Vector signsF; - - template - int setupSigns(Vector& signs ){ - Vector bother(2); - signs = bother; - vrsign(signs[0]); - visign(signs[1]); - return 1; - } - - static int signInitF = setupSigns(signsF); - -#define label(A) ilabel(A) -#define ilabel(A) ".globl\n" #A ":\n" - -#define MAYBEPERM(A,perm) if (perm) { A ; } -#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf) -#define FX(A) WILSONASM_ ##A -#define COMPLEX_TYPE vComplexF -#define signs signsF - -#undef KERNEL_DAG -template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, - int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) -#include - -#define KERNEL_DAG -template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, - int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) -#include - -#undef VMOVIDUP -#undef VMOVRDUP -#undef MAYBEPERM -#undef MULT_2SPIN -#undef FX -#define FX(A) DWFASM_ ## A -#define MAYBEPERM(A,B) -//#define VMOVIDUP(A,B,C) VBCASTIDUPf(A,B,C) -//#define VMOVRDUP(A,B,C) VBCASTRDUPf(A,B,C) -#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN_LS(ptr,pf) - -#undef KERNEL_DAG -template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, - int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) -#include - -#define KERNEL_DAG -template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, - int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) -#include -#undef COMPLEX_TYPE -#undef signs -#undef VMOVRDUP -#undef MAYBEPERM -#undef MULT_2SPIN -#undef FX - -/////////////////////////////////////////////////////////// -// If we are AVX512 specialise the double precision routine -/////////////////////////////////////////////////////////// - -#include - -static Vector signsD; -#define signs signsD -static int signInitD = setupSigns(signsD); - -#define MAYBEPERM(A,perm) if (perm) { A ; } -#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf) -#define FX(A) WILSONASM_ ##A -#define COMPLEX_TYPE vComplexD - -#undef KERNEL_DAG -template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, - int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) -#include - -#define KERNEL_DAG -template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, - int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) -#include - -#undef VMOVIDUP -#undef VMOVRDUP -#undef MAYBEPERM -#undef MULT_2SPIN -#undef FX -#define FX(A) DWFASM_ ## A -#define MAYBEPERM(A,B) -//#define VMOVIDUP(A,B,C) VBCASTIDUPd(A,B,C) -//#define VMOVRDUP(A,B,C) VBCASTRDUPd(A,B,C) -#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN_LS(ptr,pf) - -#undef KERNEL_DAG -template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, - int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) -#include - -#define KERNEL_DAG -template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, - int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) -#include - -#undef COMPLEX_TYPE -#undef signs -#undef VMOVRDUP -#undef MAYBEPERM -#undef MULT_2SPIN -#undef FX - -#endif //AVX512 +#include +#include #define INSTANTIATE_ASM(A)\ template void WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\ From eabf316ed915f738a75516284072800e1af67d4f Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Thu, 22 Dec 2016 21:56:08 +0000 Subject: [PATCH 024/101] BGQ performance ASM --- .../action/fermion/WilsonKernelsAsmAvx512.h | 162 +++++ lib/qcd/action/fermion/WilsonKernelsAsmBody.h | 8 +- lib/qcd/action/fermion/WilsonKernelsAsmQPX.h | 146 +++++ lib/simd/IBM_qpx.h | 619 ++++++++++++++++++ lib/simd/IBM_qpx_double.h | 46 ++ lib/simd/IBM_qpx_single.h | 46 ++ 6 files changed, 1025 insertions(+), 2 deletions(-) create mode 100644 lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h create mode 100644 lib/qcd/action/fermion/WilsonKernelsAsmQPX.h create mode 100644 lib/simd/IBM_qpx.h create mode 100644 lib/simd/IBM_qpx_double.h create mode 100644 lib/simd/IBM_qpx_single.h diff --git a/lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h b/lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h new file mode 100644 index 00000000..7b5b9803 --- /dev/null +++ b/lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h @@ -0,0 +1,162 @@ +/************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + + + Source file: ./lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h + + Copyright (C) 2015 + +Author: Peter Boyle +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + + +#if defined(AVX512) + /////////////////////////////////////////////////////////// + // If we are AVX512 specialise the single precision routine + /////////////////////////////////////////////////////////// +#include +#include + +static Vector signsF; + + template + int setupSigns(Vector& signs ){ + Vector bother(2); + signs = bother; + vrsign(signs[0]); + visign(signs[1]); + return 1; + } + + static int signInitF = setupSigns(signsF); +#define MAYBEPERM(A,perm) if (perm) { A ; } +#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf) +#define COMPLEX_SIGNS(isigns) vComplexF *isigns = &signsF[0]; + +///////////////////////////////////////////////////////////////// +// XYZT vectorised, undag Kernel, single +///////////////////////////////////////////////////////////////// +#undef KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +///////////////////////////////////////////////////////////////// +// XYZT vectorised, dag Kernel, single +///////////////////////////////////////////////////////////////// +#define KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef MAYBEPERM +#undef MULT_2SPIN +#define MAYBEPERM(A,B) +#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN_LS(ptr,pf) + +///////////////////////////////////////////////////////////////// +// Ls vectorised, undag Kernel, single +///////////////////////////////////////////////////////////////// +#undef KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +///////////////////////////////////////////////////////////////// +// Ls vectorised, dag Kernel, single +///////////////////////////////////////////////////////////////// +#define KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include +#undef COMPLEX_SIGNS +#undef MAYBEPERM +#undef MULT_2SPIN + +/////////////////////////////////////////////////////////// +// If we are AVX512 specialise the double precision routine +/////////////////////////////////////////////////////////// + +#include + +static Vector signsD; +static int signInitD = setupSigns(signsD); + +#define MAYBEPERM(A,perm) if (perm) { A ; } +#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf) +#define COMPLEX_SIGNS(isigns) vComplexD *isigns = &signsD[0]; + +///////////////////////////////////////////////////////////////// +// XYZT Vectorised, undag Kernel, double +///////////////////////////////////////////////////////////////// +#undef KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include +///////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////// +// XYZT Vectorised, dag Kernel, double +///////////////////////////////////////////////////////////////// +#define KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include +///////////////////////////////////////////////////////////////// + +#undef MAYBEPERM +#undef MULT_2SPIN +#define MAYBEPERM(A,B) +#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN_LS(ptr,pf) +///////////////////////////////////////////////////////////////// +// Ls vectorised, undag Kernel, double +///////////////////////////////////////////////////////////////// +#undef KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include +///////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////// +// Ls vectorised, dag Kernel, double +///////////////////////////////////////////////////////////////// +#define KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include +///////////////////////////////////////////////////////////////// + +#undef COMPLEX_SIGNS +#undef MAYBEPERM +#undef MULT_2SPIN + +#endif //AVX512 diff --git a/lib/qcd/action/fermion/WilsonKernelsAsmBody.h b/lib/qcd/action/fermion/WilsonKernelsAsmBody.h index 72e13754..8ec68997 100644 --- a/lib/qcd/action/fermion/WilsonKernelsAsmBody.h +++ b/lib/qcd/action/fermion/WilsonKernelsAsmBody.h @@ -7,12 +7,15 @@ // vComplexF isigns[2] = { signs[0], signs[1] }; //COMPLEX_TYPE is vComplexF of vComplexD depending //on the chosen precision - COMPLEX_TYPE *isigns = &signs[0]; - + COMPLEX_SIGNS(isigns); MASK_REGS; int nmax=U._grid->oSites(); for(int site=0;site=nmax) ssn=0; int sUn=lo.Reorder(ssn); @@ -251,5 +254,6 @@ } ssU++; + UNLOCK_GAUGE(0); } } diff --git a/lib/qcd/action/fermion/WilsonKernelsAsmQPX.h b/lib/qcd/action/fermion/WilsonKernelsAsmQPX.h new file mode 100644 index 00000000..947538ca --- /dev/null +++ b/lib/qcd/action/fermion/WilsonKernelsAsmQPX.h @@ -0,0 +1,146 @@ +/************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + + + Source file: ./lib/qcd/action/fermion/WilsonKernelsAsmQPX.h + + Copyright (C) 2015 + +Author: Peter Boyle +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + + +#if defined(QPX) + + /////////////////////////////////////////////////////////// + // If we are QPX specialise the single precision routine + /////////////////////////////////////////////////////////// + +#include +#include + +#define MAYBEPERM(A,perm) if (perm) { A ; } +#define MULT_2SPIN(ptr,pf) MULT_2SPIN_QPX(ptr,pf) +#define COMPLEX_SIGNS(isigns) + +///////////////////////////////////////////////////////////////// +// XYZT vectorised, undag Kernel, single +///////////////////////////////////////////////////////////////// +#undef KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +///////////////////////////////////////////////////////////////// +// XYZT vectorised, dag Kernel, single +///////////////////////////////////////////////////////////////// +#define KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef MAYBEPERM +#undef MULT_2SPIN +#define MAYBEPERM(A,B) +#define MULT_2SPIN(ptr,pf) MULT_2SPIN_QPX_LS(ptr,pf) + +///////////////////////////////////////////////////////////////// +// Ls vectorised, undag Kernel, single +///////////////////////////////////////////////////////////////// +#undef KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +///////////////////////////////////////////////////////////////// +// Ls vectorised, dag Kernel, single +///////////////////////////////////////////////////////////////// +#define KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include +#undef MAYBEPERM +#undef MULT_2SPIN + +/////////////////////////////////////////////////////////// +// DP routines +/////////////////////////////////////////////////////////// + +#include + +#define MAYBEPERM(A,perm) if (perm) { A ; } +#define MULT_2SPIN(ptr,pf) MULT_2SPIN_QPX(ptr,pf) + +///////////////////////////////////////////////////////////////// +// XYZT Vectorised, undag Kernel, double +///////////////////////////////////////////////////////////////// +#undef KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include +///////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////// +// XYZT Vectorised, dag Kernel, double +///////////////////////////////////////////////////////////////// +#define KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include +///////////////////////////////////////////////////////////////// + +#undef MAYBEPERM +#undef MULT_2SPIN +#define MAYBEPERM(A,B) +#define MULT_2SPIN(ptr,pf) MULT_2SPIN_QPX_LS(ptr,pf) +///////////////////////////////////////////////////////////////// +// Ls vectorised, undag Kernel, double +///////////////////////////////////////////////////////////////// +#undef KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include +///////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////// +// Ls vectorised, dag Kernel, double +///////////////////////////////////////////////////////////////// +#define KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include +///////////////////////////////////////////////////////////////// + +#undef MAYBEPERM +#undef MULT_2SPIN + +#endif diff --git a/lib/simd/IBM_qpx.h b/lib/simd/IBM_qpx.h new file mode 100644 index 00000000..187991c8 --- /dev/null +++ b/lib/simd/IBM_qpx.h @@ -0,0 +1,619 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/simd/BGQQPX.h + + Copyright (C) 2015 + +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#ifndef GRID_ASM_BGQ_QPX_H +#define GRID_ASM_BGQ_QPX_H + +#include + +/********************************************************* + * Register definitions + *********************************************************/ +#define psi_00 0 +#define psi_01 1 +#define psi_02 2 + +#define psi_10 3 +#define psi_11 4 +#define psi_12 5 + +#define psi_20 6 +#define psi_21 7 +#define psi_22 8 + +#define psi_30 9 +#define psi_31 10 +#define psi_32 11 + +#define Chi_00 12 +#define Chi_01 13 +#define Chi_02 14 + +#define Chi_10 15 +#define Chi_11 16 +#define Chi_12 17 + +#define UChi_00 18 +#define UChi_01 19 +#define UChi_02 20 + +#define UChi_10 21 +#define UChi_11 22 +#define UChi_12 23 + +#define U0 24 +#define U1 25 +#define U2 26 +#define one 27 +#define perm_reg 28 + +#define REP %%r16 +#define IMM %%r17 +#define pREP %r16 +#define pIMM %r17 + +#define PPC_INST_DCBTLS 0x7c00014c +#define PPC_INST_DCBLC 0x7c00030c +#define __PPC_CT(t) (((t) & 0x0f) << 21) +#define ___PPC_RA(a) (((a) & 0x1f) << 16) +#define ___PPC_RB(b) (((b) & 0x1f) << 11) + +#define LOCK_SET ".long (" HASH(PPC_INST_DCBTLS) "|" HASH(___PPC_RB(16)) ")\n" +#define LOCK_CLEAR ".long (" HASH(PPC_INST_DCBLC) "|" HASH(___PPC_RB(16)) ")\n" + +/*Alias regs for incoming fourspinor on neighbour site*/ +#define Chi_20 UChi_00 +#define Chi_21 UChi_01 +#define Chi_22 UChi_02 +#define Chi_30 UChi_10 +#define Chi_31 UChi_11 +#define Chi_32 UChi_12 + +/********************************************************* + * Architectural macros + *********************************************************/ +#define HASHit(A) #A +#define HASH(A) HASHit(A) +#define LOAD64(A,ptr) + + +#define MASK_REGS /*NOOP ON BGQ*/ +#define PF_GAUGE(A) /*NOOP ON BGQ*/ +#define PREFETCH1_CHIMU(base) /*NOOP ON BGQ*/ +#define PREFETCH_CHIMU(base) /*NOOP ON BGQ*/ + +#define VLOADf(OFF,PTR,DEST) "qvlfsx " #DEST "," #PTR "," #OFF " ;\n" +#define VLOADuf(OFF,PTR,DEST) "qvlfsux " #DEST "," #PTR "," #OFF " ;\n" +#define VSTOREf(OFF,PTR,SRC) "qvstfsx " #SRC "," #PTR "," #OFF " ;\n" +#define VSTOREuf(OFF,PTR,SRC) "qvstfsux " #SRC "," #PTR "," #OFF " ;\n" +#define VSPLATf(A,B,DEST) "qvlfcsxa " #DEST "," #A "," #B ";\n" +#define VSIZEf (16) + +#define VPERMIi(p) "qvgpci " #p ", 1217;\n" +#define VPERMi(A,p) "qvfperm " #A "," #A "," #A "," #p ";\n" +#define VPERMI(p) VPERMIi(p) +#define VPERM(A,p) VPERMi(A,p) + +#define VLOADd(OFF,PTR,DEST) "qvlfdx " #DEST "," #PTR "," #OFF " ;\n" +#define VLOADud(OFF,PTR,DEST) "qvlfdux " #DEST "," #PTR "," #OFF " ;\n" +#define VSTOREd(OFF,PTR,SRC) "qvstfdx " #SRC "," #PTR "," #OFF " ;\n" +#define VSTOREud(OFF,PTR,SRC) "qvstfdux " #SRC "," #PTR "," #OFF " ;\n" +#define VSPLATd(A,B,DEST) "qvlfcdxa " #DEST "," #A "," #B ";\n" +#define VSIZEd (32) + +// QPX manual ordering QRT comes first (dest) +#define VZEROi(DEST) "qvfset " #DEST "; \n qvfsub " #DEST "," #DEST "," #DEST ";\n" +#define VONEi(DEST) "qvfset " #DEST "; \n" +#define VMOVi(DEST,A) "qvfmr " #DEST "," #A ";\n" +#define VADDi(DEST,A,B) "qvfadd " #DEST "," #A "," #B ";\n" +#define VSUBi(DEST,A,B) "qvfsub " #DEST "," #A "," #B ";\n" +#define VMULi(DEST,A,B) "qvfmul " #DEST "," #A "," #B ";\n" +#define VMUL_RR_RIi(DEST,A,B) "qvfxmul " #DEST "," #A "," #B ";\n" +#define VMADDi(DEST,A,B,C) "qvfmadd " #DEST "," #A "," #B ","#C ";\n" +#define VMADD_RR_RIi(DEST,A,B,C) "qvfxmadd " #DEST "," #A "," #B ","#C ";\n" +#define VMADD_MII_IRi(DEST,A,B,C) "qvfxxnpmadd " #DEST "," #B "," #A ","#C ";\n" +#define VMADD_II_MIRi(DEST,A,B,C) "qvfxxcpnmadd " #DEST "," #B "," #A ","#C ";\n" + +#define VZERO(C) VZEROi(C) +#define VONE(C) VONEi(C) +#define VMOV(C,A) VMOVi(C,A) +#define VADD(A,B,C) VADDi(A,B,C) +#define VSUB(A,B,C) VSUBi(A,B,C) +#define VMUL(A,B,C) VMULi(A,B,C) +#define VMUL_RR_RI(A,B,C) VMUL_RR_RIi(A,B,C) +#define VMADD(A,B,C,D) VMADDi(A,B,C,D) +#define VMADD_RR_RI(A,B,C,D) VMADD_RR_RIi(A,B,C,D) +#define VMADD_MII_IR(A,B,C,D) VMADD_MII_IRi(A,B,C,D) +#define VMADD_II_MIR(A,B,C,D) VMADD_II_MIRi(A,B,C,D) + +/********************************************************* + * Macro sequences encoding QCD + *********************************************************/ +#define LOCK_GAUGEa(dir) +#define LOCK_GAUGE(dir) \ + { \ + uint64_t byte_addr = (uint64_t)&U._odata[sU]; \ + int count = (sizeof(U._odata[0])+63)/64; \ + asm (" mtctr %0 \n" \ + " mr " HASH(REP) ", %1\n" \ + " li " HASH(IMM) ", 64\n" \ + "0:\n" \ + LOCK_SET \ + " add " HASH(REP) "," HASH(IMM) "," HASH(REP) "\n" \ + " bdnz 0b\n" \ + : : "b" (count), "b" (byte_addr) ); \ + } + +#define UNLOCK_GAUGEa(dir) + +#define UNLOCK_GAUGE(dir) \ + { \ + uint64_t byte_addr = (uint64_t)&U._odata[sU]; \ + int count = (sizeof(U._odata[0])+63)/64; \ + asm (" mtctr %0 \n" \ + " mr " HASH(REP) ", %1\n" \ + " li " HASH(IMM) ", 64\n" \ + "0:\n" \ + LOCK_CLEAR \ + " add " HASH(REP) "," HASH(IMM) "," HASH(REP) "\n" \ + " bdnz 0b\n" \ + : : "b" (count), "b" (byte_addr) ); \ + } + +#define MULT_2SPIN_QPX_LSd(ptr,p) MULT_2SPIN_QPX_INTERNAL(ptr,p,VSPLAT,16) +#define MULT_2SPIN_QPX_LSf(ptr,p) MULT_2SPIN_QPX_INTERNAL(ptr,p,VSPLAT,8) +#define MULT_2SPIN_QPXd(ptr,p) MULT_2SPIN_QPX_INTERNAL(ptr,p,VLOAD,32) +#define MULT_2SPIN_QPXf(ptr,p) MULT_2SPIN_QPX_INTERNAL(ptr,p,VLOAD,16) + +#define MULT_2SPIN_QPX_INTERNALa(ptr,p,ULOAD,USKIP) { \ + asm (VMOV(UChi_00,Chi_00) \ + VMOV(UChi_01,Chi_01) \ + VMOV(UChi_02,Chi_02) \ + VMOV(UChi_10,Chi_10) \ + VMOV(UChi_11,Chi_11) \ + VMOV(UChi_12,Chi_12) ); \ + } + +#define MULT_2SPIN_QPX_INTERNAL(ptr,p,ULOAD,USKIP) { \ + uint64_t ub = ((uint64_t)ptr); \ + asm ( \ + ULOAD(%0,%3,U0) \ + ULOAD(%1,%3,U1) \ + ULOAD(%2,%3,U2) \ + VMUL_RR_RI(UChi_00,U0,Chi_00) \ + VMUL_RR_RI(UChi_01,U1,Chi_00) \ + VMUL_RR_RI(UChi_02,U2,Chi_00) \ + VMUL_RR_RI(UChi_10,U0,Chi_10) \ + VMUL_RR_RI(UChi_11,U1,Chi_10) \ + VMUL_RR_RI(UChi_12,U2,Chi_10) \ + VMADD_MII_IR(UChi_00,U0,Chi_00,UChi_00) \ + VMADD_MII_IR(UChi_01,U1,Chi_00,UChi_01) \ + VMADD_MII_IR(UChi_02,U2,Chi_00,UChi_02) \ + VMADD_MII_IR(UChi_10,U0,Chi_10,UChi_10) \ + VMADD_MII_IR(UChi_11,U1,Chi_10,UChi_11) \ + VMADD_MII_IR(UChi_12,U2,Chi_10,UChi_12) \ + : : "b" (0), "b" (USKIP*3), "b" (USKIP*6), "b" (ub )); \ + asm ( \ + ULOAD(%0,%3,U0) \ + ULOAD(%1,%3,U1) \ + ULOAD(%2,%3,U2) \ + VMADD_RR_RI(UChi_00,U0,Chi_01,UChi_00) \ + VMADD_RR_RI(UChi_01,U1,Chi_01,UChi_01) \ + VMADD_RR_RI(UChi_02,U2,Chi_01,UChi_02) \ + VMADD_RR_RI(UChi_10,U0,Chi_11,UChi_10) \ + VMADD_RR_RI(UChi_11,U1,Chi_11,UChi_11) \ + VMADD_RR_RI(UChi_12,U2,Chi_11,UChi_12) \ + VMADD_MII_IR(UChi_00,U0,Chi_01,UChi_00) \ + VMADD_MII_IR(UChi_01,U1,Chi_01,UChi_01) \ + VMADD_MII_IR(UChi_02,U2,Chi_01,UChi_02) \ + VMADD_MII_IR(UChi_10,U0,Chi_11,UChi_10) \ + VMADD_MII_IR(UChi_11,U1,Chi_11,UChi_11) \ + VMADD_MII_IR(UChi_12,U2,Chi_11,UChi_12) \ + : : "b" (USKIP*1), "b" (USKIP*4), "b" (USKIP*7), "b" (ub )); \ + asm ( \ + ULOAD(%0,%3,U0) \ + ULOAD(%1,%3,U1) \ + ULOAD(%2,%3,U2) \ + VMADD_RR_RI(UChi_00,U0,Chi_02,UChi_00) \ + VMADD_RR_RI(UChi_01,U1,Chi_02,UChi_01) \ + VMADD_RR_RI(UChi_02,U2,Chi_02,UChi_02) \ + VMADD_RR_RI(UChi_10,U0,Chi_12,UChi_10) \ + VMADD_RR_RI(UChi_11,U1,Chi_12,UChi_11) \ + VMADD_RR_RI(UChi_12,U2,Chi_12,UChi_12) \ + VMADD_MII_IR(UChi_00,U0,Chi_02,UChi_00) \ + VMADD_MII_IR(UChi_01,U1,Chi_02,UChi_01) \ + VMADD_MII_IR(UChi_02,U2,Chi_02,UChi_02) \ + VMADD_MII_IR(UChi_10,U0,Chi_12,UChi_10) \ + VMADD_MII_IR(UChi_11,U1,Chi_12,UChi_11) \ + VMADD_MII_IR(UChi_12,U2,Chi_12,UChi_12) \ + : : "b" (USKIP*2), "b" (USKIP*5), "b" (USKIP*8), "b" (ub )); \ + } + +#define MULT_2SPIN_DIR_PFXP(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFYP(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFZP(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFTP(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFXM(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFYM(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFZM(A,p) MULT_2SPIN(&U._odata[sU](A),p) +#define MULT_2SPIN_DIR_PFTM(A,p) MULT_2SPIN(&U._odata[sU](A),p) + +#define SAVE_RESULT(base,basep) {\ + uint64_t ub = ((uint64_t)base) - (VSIZE); \ + asm("mr " HASH(REP) ", %0;\n" \ + "li " HASH(IMM) "," HASH(VSIZE)" ;\n" \ + VSTOREu(IMM,REP,psi_00) \ + VSTOREu(IMM,REP,psi_01) \ + VSTOREu(IMM,REP,psi_02) \ + VSTOREu(IMM,REP,psi_10) \ + VSTOREu(IMM,REP,psi_11) \ + VSTOREu(IMM,REP,psi_12) \ + VSTOREu(IMM,REP,psi_20) \ + VSTOREu(IMM,REP,psi_21) \ + VSTOREu(IMM,REP,psi_22) \ + VSTOREu(IMM,REP,psi_30) \ + VSTOREu(IMM,REP,psi_31) \ + VSTOREu(IMM,REP,psi_32) \ + : : "b" (ub) : HASH(pIMM), HASH(pREP) ); \ + } + +/* + *Annoying BG/Q loads with no immediat indexing and big performance hit + *when second miss to a L1 line occurs + */ +#define LOAD_CHI(base) { \ + uint64_t ub = ((uint64_t)base) - (2*VSIZE); \ + asm("mr " HASH(REP) ",%0 ;\n" \ + "li " HASH(IMM) ",(2*" HASH(VSIZE) ");\n" \ + VLOADu(IMM,REP,Chi_00) \ + VLOADu(IMM,REP,Chi_02) \ + VLOADu(IMM,REP,Chi_11) : : "b" (ub) : HASH(pIMM), HASH(pREP) ); \ + ub = ((uint64_t)base) - VSIZE; \ + asm("mr " HASH(REP) ", %0;\n" \ + "li " HASH(IMM) ",(2*" HASH(VSIZE) ");\n" \ + VLOADu(IMM,REP,Chi_01) \ + VLOADu(IMM,REP,Chi_10) \ + VLOADu(IMM,REP,Chi_12) : : "b" (ub) : HASH(pIMM), HASH(pREP) ); \ + } + +#define LOAD_CHIa(base) { \ + uint64_t ub = ((uint64_t)base) - (VSIZE); \ + asm("mr " HASH(REP) ",%0 ;\n" \ + "li " HASH(IMM) "," HASH(VSIZE) ";\n" \ + VLOADu(IMM,REP,Chi_00) \ + VLOADu(IMM,REP,Chi_01) \ + VLOADu(IMM,REP,Chi_02) \ + VLOADu(IMM,REP,Chi_10) \ + VLOADu(IMM,REP,Chi_11) \ + VLOADu(IMM,REP,Chi_12) : : "b" (ub) : HASH(pIMM), HASH(pREP) ); \ + } + +#define LOAD_CHIMUa(base) { \ + uint64_t ub = ((uint64_t)base) - (VSIZE); \ + asm("mr " HASH(REP) ",%0 ;\n" \ + "li " HASH(IMM) "," HASH(VSIZE) ";\n" \ + VLOADu(IMM,REP,Chi_00) \ + VLOADu(IMM,REP,Chi_01) \ + VLOADu(IMM,REP,Chi_02) \ + VLOADu(IMM,REP,Chi_10) \ + VLOADu(IMM,REP,Chi_11) \ + VLOADu(IMM,REP,Chi_12) \ + VLOADu(IMM,REP,Chi_20) \ + VLOADu(IMM,REP,Chi_21) \ + VLOADu(IMM,REP,Chi_22) \ + VLOADu(IMM,REP,Chi_30) \ + VLOADu(IMM,REP,Chi_31) \ + VLOADu(IMM,REP,Chi_32) : : "b" (ub) : HASH(pIMM), HASH(pREP) ); \ + } + +#define LOAD_CHIMU(base) { \ + uint64_t ub = ((uint64_t)base) - (2*VSIZE); \ + asm("mr " HASH(REP) ",%0;\n" \ + "li " HASH(IMM) ",(2*" HASH(VSIZE) ");\n" \ + VLOADu(IMM,REP,Chi_00) \ + VLOADu(IMM,REP,Chi_02) \ + VLOADu(IMM,REP,Chi_11) \ + VLOADu(IMM,REP,Chi_20) \ + VLOADu(IMM,REP,Chi_22) \ + VLOADu(IMM,REP,Chi_31) : : "b" (ub) : HASH(pIMM), HASH(pREP) ); \ + ub = ((uint64_t)base) - VSIZE; \ + asm("mr " HASH(REP) ", %0;\n" \ + "li " HASH(IMM) ", (2*" HASH(VSIZE) ");\n" \ + VLOADu(IMM,REP,Chi_01) \ + VLOADu(IMM,REP,Chi_10) \ + VLOADu(IMM,REP,Chi_12) \ + VLOADu(IMM,REP,Chi_21) \ + VLOADu(IMM,REP,Chi_30) \ + VLOADu(IMM,REP,Chi_32) : : "b" (ub) : HASH(pIMM), HASH(pREP) ); \ + } + +// hspin(0)=fspin(0)+timesI(fspin(3)); +// hspin(1)=fspin(1)+timesI(fspin(2)); +#define XP_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VONE(one) \ + VMADD_MII_IR(Chi_00,one,Chi_30,Chi_00) \ + VMADD_MII_IR(Chi_01,one,Chi_31,Chi_01) \ + VMADD_MII_IR(Chi_02,one,Chi_32,Chi_02) \ + VMADD_MII_IR(Chi_10,one,Chi_20,Chi_10) \ + VMADD_MII_IR(Chi_11,one,Chi_21,Chi_11) \ + VMADD_MII_IR(Chi_12,one,Chi_22,Chi_12) \ + ); \ + } + +#define XM_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VONE(one) \ + VMADD_II_MIR(Chi_00,one,Chi_30,Chi_00) \ + VMADD_II_MIR(Chi_01,one,Chi_31,Chi_01) \ + VMADD_II_MIR(Chi_02,one,Chi_32,Chi_02) \ + VMADD_II_MIR(Chi_10,one,Chi_20,Chi_10) \ + VMADD_II_MIR(Chi_11,one,Chi_21,Chi_11) \ + VMADD_II_MIR(Chi_12,one,Chi_22,Chi_12) \ + ); \ + } + +// hspin(0)=fspin(0)-fspin(3); +// hspin(1)=fspin(1)+fspin(2); +#define YP_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VSUB(Chi_00,Chi_00,Chi_30) \ + VSUB(Chi_01,Chi_01,Chi_31) \ + VSUB(Chi_02,Chi_02,Chi_32) \ + VADD(Chi_10,Chi_10,Chi_20) \ + VADD(Chi_11,Chi_11,Chi_21) \ + VADD(Chi_12,Chi_12,Chi_22) \ + ); \ + } + +#define YM_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VADD(Chi_00,Chi_00,Chi_30) \ + VADD(Chi_01,Chi_01,Chi_31) \ + VADD(Chi_02,Chi_02,Chi_32) \ + VSUB(Chi_10,Chi_10,Chi_20) \ + VSUB(Chi_11,Chi_11,Chi_21) \ + VSUB(Chi_12,Chi_12,Chi_22) ); \ + } + + /*Gz + * 0 0 i 0 [0]+-i[2] + * 0 0 0 -i [1]-+i[3] + * -i 0 0 0 + * 0 i 0 0 + */ +#define ZP_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VONE(one) \ + VMADD_MII_IR(Chi_00,one,Chi_20,Chi_00) \ + VMADD_MII_IR(Chi_01,one,Chi_21,Chi_01) \ + VMADD_MII_IR(Chi_02,one,Chi_22,Chi_02) \ + VMADD_II_MIR(Chi_10,one,Chi_30,Chi_10) \ + VMADD_II_MIR(Chi_11,one,Chi_31,Chi_11) \ + VMADD_II_MIR(Chi_12,one,Chi_32,Chi_12) \ + ); \ + } + +#define ZM_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VONE(one) \ + VMADD_II_MIR(Chi_00,one,Chi_20,Chi_00) \ + VMADD_II_MIR(Chi_01,one,Chi_21,Chi_01) \ + VMADD_II_MIR(Chi_02,one,Chi_22,Chi_02) \ + VMADD_MII_IR(Chi_10,one,Chi_30,Chi_10) \ + VMADD_MII_IR(Chi_11,one,Chi_31,Chi_11) \ + VMADD_MII_IR(Chi_12,one,Chi_32,Chi_12) \ + ); \ + } + /*Gt + * 0 0 1 0 [0]+-[2] + * 0 0 0 1 [1]+-[3] + * 1 0 0 0 + * 0 1 0 0 + */ +#define TP_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VADD(Chi_00,Chi_00,Chi_20) \ + VADD(Chi_01,Chi_01,Chi_21) \ + VADD(Chi_02,Chi_02,Chi_22) \ + VADD(Chi_10,Chi_10,Chi_30) \ + VADD(Chi_11,Chi_11,Chi_31) \ + VADD(Chi_12,Chi_12,Chi_32) \ + ); \ + } + +#define TM_PROJMEM(base) { \ + LOAD_CHIMU(base); \ + asm ( \ + VSUB(Chi_00,Chi_00,Chi_20) \ + VSUB(Chi_01,Chi_01,Chi_21) \ + VSUB(Chi_02,Chi_02,Chi_22) \ + VSUB(Chi_10,Chi_10,Chi_30) \ + VSUB(Chi_11,Chi_11,Chi_31) \ + VSUB(Chi_12,Chi_12,Chi_32) \ + ); \ + } + +/* + fspin(0)=hspin(0); + fspin(1)=hspin(1); + fspin(2)=timesMinusI(hspin(1)); + fspin(3)=timesMinusI(hspin(0)); + + fspin(0)+=hspin(0); + fspin(1)+=hspin(1); + fspin(2)-=timesI(hspin(1)); + fspin(3)-=timesI(hspin(0)); + */ +#define XP_RECON { \ + asm(\ + VONE(one)\ + VMOV(psi_00,UChi_00) VMOV(psi_01,UChi_01) VMOV(psi_02,UChi_02)\ + VMOV(psi_10,UChi_10) VMOV(psi_11,UChi_11) VMOV(psi_12,UChi_12)\ + VZERO(psi_20) VZERO(psi_21) VZERO(psi_22) \ + VZERO(psi_30) VZERO(psi_31) VZERO(psi_32) \ + VMADD_II_MIR(psi_20,one,UChi_10,psi_20) \ + VMADD_II_MIR(psi_21,one,UChi_11,psi_21) \ + VMADD_II_MIR(psi_22,one,UChi_12,psi_22) \ + VMADD_II_MIR(psi_30,one,UChi_00,psi_30) \ + VMADD_II_MIR(psi_31,one,UChi_01,psi_31) \ + VMADD_II_MIR(psi_32,one,UChi_02,psi_32) \ + ); \ + } + +#define XM_RECON { \ + asm(\ + VONE(one)\ + VMOV(psi_00,UChi_00) VMOV(psi_01,UChi_01) VMOV(psi_02,UChi_02)\ + VMOV(psi_10,UChi_10) VMOV(psi_11,UChi_11) VMOV(psi_12,UChi_12)\ + VZERO(psi_20) VZERO(psi_21) VZERO(psi_22) \ + VZERO(psi_30) VZERO(psi_31) VZERO(psi_32) \ + VMADD_MII_IR(psi_20,one,UChi_10,psi_20) \ + VMADD_MII_IR(psi_21,one,UChi_11,psi_21) \ + VMADD_MII_IR(psi_22,one,UChi_12,psi_22) \ + VMADD_MII_IR(psi_30,one,UChi_00,psi_30) \ + VMADD_MII_IR(psi_31,one,UChi_01,psi_31) \ + VMADD_MII_IR(psi_32,one,UChi_02,psi_32) \ + ); \ + } + +#define XP_RECON_ACCUM { \ + asm(\ + VONE(one)\ + VADD(psi_00,psi_00,UChi_00) VADD(psi_01,psi_01,UChi_01) VADD(psi_02,psi_02,UChi_02) \ + VADD(psi_10,psi_10,UChi_10) VADD(psi_11,psi_11,UChi_11) VADD(psi_12,psi_12,UChi_12) \ + VMADD_II_MIR(psi_20,one,UChi_10,psi_20) \ + VMADD_II_MIR(psi_21,one,UChi_11,psi_21) \ + VMADD_II_MIR(psi_22,one,UChi_12,psi_22) \ + VMADD_II_MIR(psi_30,one,UChi_00,psi_30) \ + VMADD_II_MIR(psi_31,one,UChi_01,psi_31) \ + VMADD_II_MIR(psi_32,one,UChi_02,psi_32) \ + ); \ + } + +#define XM_RECON_ACCUM { \ + asm(\ + VONE(one)\ + VADD(psi_00,psi_00,UChi_00) VADD(psi_01,psi_01,UChi_01) VADD(psi_02,psi_02,UChi_02) \ + VADD(psi_10,psi_10,UChi_10) VADD(psi_11,psi_11,UChi_11) VADD(psi_12,psi_12,UChi_12) \ + VMADD_MII_IR(psi_20,one,UChi_10,psi_20) \ + VMADD_MII_IR(psi_21,one,UChi_11,psi_21) \ + VMADD_MII_IR(psi_22,one,UChi_12,psi_22) \ + VMADD_MII_IR(psi_30,one,UChi_00,psi_30) \ + VMADD_MII_IR(psi_31,one,UChi_01,psi_31) \ + VMADD_MII_IR(psi_32,one,UChi_02,psi_32) \ + ); \ + } + +// fspin(2)+=hspin(1); +// fspin(3)-=hspin(0); +#define YP_RECON_ACCUM {\ + asm(\ + VADD(psi_00,psi_00,UChi_00) VADD(psi_01,psi_01,UChi_01) VADD(psi_02,psi_02,UChi_02) \ + VADD(psi_10,psi_10,UChi_10) VADD(psi_11,psi_11,UChi_11) VADD(psi_12,psi_12,UChi_12) \ + VADD(psi_20,psi_20,UChi_10) VADD(psi_21,psi_21,UChi_11) VADD(psi_22,psi_22,UChi_12) \ + VSUB(psi_30,psi_30,UChi_00) VSUB(psi_31,psi_31,UChi_01) VSUB(psi_32,psi_32,UChi_02) \ + );\ + } +#define YM_RECON_ACCUM {\ + asm(\ + VADD(psi_00,psi_00,UChi_00) VADD(psi_01,psi_01,UChi_01) VADD(psi_02,psi_02,UChi_02) \ + VADD(psi_10,psi_10,UChi_10) VADD(psi_11,psi_11,UChi_11) VADD(psi_12,psi_12,UChi_12) \ + VSUB(psi_20,psi_20,UChi_10) VSUB(psi_21,psi_21,UChi_11) VSUB(psi_22,psi_22,UChi_12) \ + VADD(psi_30,psi_30,UChi_00) VADD(psi_31,psi_31,UChi_01) VADD(psi_32,psi_32,UChi_02) \ + );\ + } + +// fspin(2)-=timesI(hspin(0)); +// fspin(3)+=timesI(hspin(1)); +#define ZP_RECON_ACCUM {\ + asm(\ + VONE(one)\ + VADD(psi_00,psi_00,UChi_00) VADD(psi_01,psi_01,UChi_01) VADD(psi_02,psi_02,UChi_02) \ + VADD(psi_10,psi_10,UChi_10) VADD(psi_11,psi_11,UChi_11) VADD(psi_12,psi_12,UChi_12) \ + VMADD_II_MIR(psi_20,one,UChi_00,psi_20) \ + VMADD_II_MIR(psi_21,one,UChi_01,psi_21) \ + VMADD_II_MIR(psi_22,one,UChi_02,psi_22) \ + VMADD_MII_IR(psi_30,one,UChi_10,psi_30) \ + VMADD_MII_IR(psi_31,one,UChi_11,psi_31) \ + VMADD_MII_IR(psi_32,one,UChi_12,psi_32) \ + );\ + } + +#define ZM_RECON_ACCUM {\ + asm(\ + VONE(one)\ + VADD(psi_00,psi_00,UChi_00) VADD(psi_01,psi_01,UChi_01) VADD(psi_02,psi_02,UChi_02) \ + VADD(psi_10,psi_10,UChi_10) VADD(psi_11,psi_11,UChi_11) VADD(psi_12,psi_12,UChi_12) \ + VMADD_MII_IR(psi_20,one,UChi_00,psi_20) \ + VMADD_MII_IR(psi_21,one,UChi_01,psi_21) \ + VMADD_MII_IR(psi_22,one,UChi_02,psi_22) \ + VMADD_II_MIR(psi_30,one,UChi_10,psi_30) \ + VMADD_II_MIR(psi_31,one,UChi_11,psi_31) \ + VMADD_II_MIR(psi_32,one,UChi_12,psi_32) \ + );\ + } + +// fspin(2)+=hspin(0); +// fspin(3)+=hspin(1); +#define TP_RECON_ACCUM {\ + asm(\ + VADD(psi_00,psi_00,UChi_00) VADD(psi_01,psi_01,UChi_01) VADD(psi_02,psi_02,UChi_02) \ + VADD(psi_10,psi_10,UChi_10) VADD(psi_11,psi_11,UChi_11) VADD(psi_12,psi_12,UChi_12) \ + VADD(psi_20,psi_20,UChi_00) VADD(psi_21,psi_21,UChi_01) VADD(psi_22,psi_22,UChi_02) \ + VADD(psi_30,psi_30,UChi_10) VADD(psi_31,psi_31,UChi_11) VADD(psi_32,psi_32,UChi_12) \ + );\ + } + +#define TM_RECON_ACCUM {\ + asm(\ + VADD(psi_00,psi_00,UChi_00) VADD(psi_01,psi_01,UChi_01) VADD(psi_02,psi_02,UChi_02) \ + VADD(psi_10,psi_10,UChi_10) VADD(psi_11,psi_11,UChi_11) VADD(psi_12,psi_12,UChi_12) \ + VSUB(psi_20,psi_20,UChi_00) VSUB(psi_21,psi_21,UChi_01) VSUB(psi_22,psi_22,UChi_02) \ + VSUB(psi_30,psi_30,UChi_10) VSUB(psi_31,psi_31,UChi_11) VSUB(psi_32,psi_32,UChi_12) \ + );\ + } + +#define PERMUTE_DIR3 +#define PERMUTE_DIR2 +#define PERMUTE_DIR1 + +#define PERMUTE_DIR0 { \ + asm( \ + VPERMI(perm_reg) \ + VPERM(Chi_00,perm_reg) VPERM(Chi_01,perm_reg) VPERM(Chi_02,perm_reg) \ + VPERM(Chi_10,perm_reg) VPERM(Chi_11,perm_reg) VPERM(Chi_12,perm_reg) ); \ + } + +#endif diff --git a/lib/simd/IBM_qpx_double.h b/lib/simd/IBM_qpx_double.h new file mode 100644 index 00000000..60709102 --- /dev/null +++ b/lib/simd/IBM_qpx_double.h @@ -0,0 +1,46 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/simd/Avx512Asm.h + + Copyright (C) 2015 + +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +// No guard; ok multi-include +#undef VSIZE +#undef VLOAD +#undef VLOADu +#undef VSPLAT +#undef VSTORE +#undef VSTOREu +#undef MULT_2SPIN_QPX_LS +#undef MULT_2SPIN_QPX + +#define VSIZE VSIZEd +#define VLOAD(A,B,C) VLOADd(A,B,C) +#define VLOADu(A,B,C) VLOADud(A,B,C) +#define VSPLAT(A,B,DEST) VSPLATd(A,B,DEST) +#define VSTORE(A,B,C) VSTOREd(A,B,C) +#define VSTOREu(A,B,C) VSTOREud(A,B,C) +#define MULT_2SPIN_QPX_LS(ptr,p) MULT_2SPIN_QPX_LSd(ptr,p) +#define MULT_2SPIN_QPX(ptr,p) MULT_2SPIN_QPXd(ptr,p) + diff --git a/lib/simd/IBM_qpx_single.h b/lib/simd/IBM_qpx_single.h new file mode 100644 index 00000000..ab903ea7 --- /dev/null +++ b/lib/simd/IBM_qpx_single.h @@ -0,0 +1,46 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/simd/Avx512Asm.h + + Copyright (C) 2015 + +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +// No guard; ok multi-include +#undef VSIZE +#undef VLOAD +#undef VLOADu +#undef VSPLAT +#undef VSTORE +#undef VSTOREu +#undef MULT_2SPIN_QPX_LS +#undef MULT_2SPIN_QPX + +#define VSIZE VSIZEf +#define VLOAD(A,B,C) VLOADf(A,B,C) +#define VLOADu(A,B,C) VLOADuf(A,B,C) +#define VSPLAT(A,B,DEST) VSPLATf(A,B,DEST) +#define VSTORE(A,B,C) VSTOREf(A,B,C) +#define VSTOREu(A,B,C) VSTOREuf(A,B,C) +#define MULT_2SPIN_QPX_LS(ptr,p) MULT_2SPIN_QPX_LSf(ptr,p) +#define MULT_2SPIN_QPX(ptr,p) MULT_2SPIN_QPXf(ptr,p) + From 25efefc5b448eabc3ffef3c7f7434b026a4e30ca Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Fri, 23 Dec 2016 09:49:04 +0000 Subject: [PATCH 025/101] Back to original thread policy post test --- lib/qcd/action/fermion/WilsonFermion5D.cc | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/qcd/action/fermion/WilsonFermion5D.cc b/lib/qcd/action/fermion/WilsonFermion5D.cc index 1c0e0b0e..ab4ba9c0 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.cc +++ b/lib/qcd/action/fermion/WilsonFermion5D.cc @@ -437,7 +437,7 @@ void WilsonFermion5D::DhopInternal(StencilImpl & st, LebesgueOrder &lo, stat.accum(nthreads); #endif } else { -#if 0 +#if 1 PARALLEL_FOR_LOOP for (int ss = 0; ss < U._grid->oSites(); ss++) { int sU = ss; @@ -447,13 +447,11 @@ void WilsonFermion5D::DhopInternal(StencilImpl & st, LebesgueOrder &lo, #else #pragma omp parallel { - for(int i=0;i<10;i++){ - int me, myoff,mywork; int len = U._grid->oSites(); + int me, myoff,mywork; GridThread::GetWorkBarrier(len,me, mywork,myoff); int sF = LLs * myoff; Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out); - } } #endif } From 3d21297bbbb8c46029a0aba04ed0db8cfd4f7969 Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Tue, 27 Dec 2016 11:23:13 +0000 Subject: [PATCH 026/101] Call the fast path compressor for wilson kernels to avoid if else on projector --- lib/Stencil.h | 3 - lib/qcd/action/fermion/WilsonCompressor.h | 114 ++++++++++------------ lib/qcd/action/fermion/WilsonFermion5D.cc | 4 +- 3 files changed, 56 insertions(+), 65 deletions(-) diff --git a/lib/Stencil.h b/lib/Stencil.h index 89533b82..96b40c50 100644 --- a/lib/Stencil.h +++ b/lib/Stencil.h @@ -678,12 +678,9 @@ PARALLEL_FOR_LOOP calls++; Mergers.resize(0); Packets.resize(0); - _grid->StencilBarrier(); HaloGather(source,compress); this->CommunicateBegin(reqs); - _grid->StencilBarrier(); this->CommunicateComplete(reqs); - _grid->StencilBarrier(); CommsMerge(); // spins } diff --git a/lib/qcd/action/fermion/WilsonCompressor.h b/lib/qcd/action/fermion/WilsonCompressor.h index 41f24e1b..5b29c103 100644 --- a/lib/qcd/action/fermion/WilsonCompressor.h +++ b/lib/qcd/action/fermion/WilsonCompressor.h @@ -171,6 +171,8 @@ namespace QCD { class WilsonStencil : public CartesianStencil { public: + typedef CartesianCommunicator::CommsRequest_t CommsRequest_t; + WilsonStencil(GridBase *grid, int npoints, int checkerboard, @@ -178,79 +180,71 @@ namespace QCD { const std::vector &distances) : CartesianStencil (grid,npoints,checkerboard,directions,distances) { }; - template < class compressor> - std::thread HaloExchangeOptBegin(const Lattice &source,compressor &compress) { - this->Mergers.resize(0); - this->Packets.resize(0); - this->HaloGatherOpt(source,compress); - return std::thread([&] { this->Communicate(); }); - } template < class compressor> void HaloExchangeOpt(const Lattice &source,compressor &compress) { - auto thr = this->HaloExchangeOptBegin(source,compress); - this->HaloExchangeOptComplete(thr); + std::vector > reqs; + this->Mergers.resize(0); + this->Packets.resize(0); + this->HaloGatherOpt(source,compress); + this->CommunicateBegin(reqs); + this->CommunicateComplete(reqs); + this->CommsMerge(); // spins + this->calls++; } - void HaloExchangeOptComplete(std::thread &thr) - { - this->CommsMerge(); // spins - this->jointime-=usecond(); - thr.join(); - this->jointime+=usecond(); - } template < class compressor> void HaloGatherOpt(const Lattice &source,compressor &compress) { - // conformable(source._grid,_grid); - assert(source._grid==this->_grid); - this->halogtime-=usecond(); + int face_idx=0; - assert (this->comm_buf.size() == this->_unified_buffer_size ); - this->u_comm_offset=0; + // conformable(source._grid,_grid); + assert(source._grid==this->_grid); + this->halogtime-=usecond(); + + this->u_comm_offset=0; + + int dag = compress.dag; + + WilsonXpCompressor XpCompress; + WilsonYpCompressor YpCompress; + WilsonZpCompressor ZpCompress; + WilsonTpCompressor TpCompress; + WilsonXmCompressor XmCompress; + WilsonYmCompressor YmCompress; + WilsonZmCompressor ZmCompress; + WilsonTmCompressor TmCompress; - int dag = compress.dag; - static std::vector dirs(Nd*2); - for(int mu=0;mu XpCompress; - this->HaloGatherDir(source,XpCompress,dirs[0]); - - WilsonYpCompressor YpCompress; - this->HaloGatherDir(source,YpCompress,dirs[1]); - - WilsonZpCompressor ZpCompress; - this->HaloGatherDir(source,ZpCompress,dirs[2]); - - WilsonTpCompressor TpCompress; - this->HaloGatherDir(source,TpCompress,dirs[3]); - - WilsonXmCompressor XmCompress; - this->HaloGatherDir(source,XmCompress,dirs[4]); - - WilsonYmCompressor YmCompress; - this->HaloGatherDir(source,YmCompress,dirs[5]); - - WilsonZmCompressor ZmCompress; - this->HaloGatherDir(source,ZmCompress,dirs[6]); - - WilsonTmCompressor TmCompress; - this->HaloGatherDir(source,TmCompress,dirs[7]); - - assert(this->u_comm_offset==this->_unified_buffer_size); - this->halogtime+=usecond(); + // Gather all comms buffers + // for(int point = 0 ; point < _npoints; point++) { + // compress.Point(point); + // HaloGatherDir(source,compress,point,face_idx); + // } + if ( dag ) { + this->HaloGatherDir(source,XpCompress,Xp,face_idx); + this->HaloGatherDir(source,YpCompress,Yp,face_idx); + this->HaloGatherDir(source,ZpCompress,Zp,face_idx); + this->HaloGatherDir(source,TpCompress,Tp,face_idx); + this->HaloGatherDir(source,XmCompress,Xm,face_idx); + this->HaloGatherDir(source,YmCompress,Ym,face_idx); + this->HaloGatherDir(source,ZmCompress,Zm,face_idx); + this->HaloGatherDir(source,TmCompress,Tm,face_idx); + } else { + this->HaloGatherDir(source,XmCompress,Xp,face_idx); + this->HaloGatherDir(source,YmCompress,Yp,face_idx); + this->HaloGatherDir(source,ZmCompress,Zp,face_idx); + this->HaloGatherDir(source,TmCompress,Tp,face_idx); + this->HaloGatherDir(source,XpCompress,Xm,face_idx); + this->HaloGatherDir(source,YpCompress,Ym,face_idx); + this->HaloGatherDir(source,ZpCompress,Zm,face_idx); + this->HaloGatherDir(source,TpCompress,Tm,face_idx); } + this->face_table_computed=1; + assert(this->u_comm_offset==this->_unified_buffer_size); + this->halogtime+=usecond(); + } }; diff --git a/lib/qcd/action/fermion/WilsonFermion5D.cc b/lib/qcd/action/fermion/WilsonFermion5D.cc index ab4ba9c0..d0acf006 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.cc +++ b/lib/qcd/action/fermion/WilsonFermion5D.cc @@ -403,7 +403,7 @@ void WilsonFermion5D::DhopInternal(StencilImpl & st, LebesgueOrder &lo, int LLs = in._grid->_rdimensions[0]; DhopCommTime-=usecond(); - st.HaloExchange(in,compressor); + st.HaloExchangeOpt(in,compressor); DhopCommTime+=usecond(); DhopComputeTime-=usecond(); @@ -437,7 +437,7 @@ void WilsonFermion5D::DhopInternal(StencilImpl & st, LebesgueOrder &lo, stat.accum(nthreads); #endif } else { -#if 1 +#if 0 PARALLEL_FOR_LOOP for (int ss = 0; ss < U._grid->oSites(); ss++) { int sU = ss; From 1caa3fbc2df88b1fa667a1bb5515424a0e96aac1 Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Tue, 27 Dec 2016 11:24:45 +0000 Subject: [PATCH 027/101] LOCK UNLOCK only --- lib/simd/Intel512wilson.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/simd/Intel512wilson.h b/lib/simd/Intel512wilson.h index 6d0d6f63..3ca0b648 100644 --- a/lib/simd/Intel512wilson.h +++ b/lib/simd/Intel512wilson.h @@ -98,6 +98,8 @@ Author: paboyle // a little as some duplication developed during trying different // variants during optimisation. Could cut back to only those used. ////////////////////////////////////////////////////////////////// +#define LOCK_GAUGE(dir) +#define UNLOCK_GAUGE(dir) // const SiteSpinor * ptr = & in._odata[offset]; #define LOAD_CHIMU(PTR) LOAD_CHIMUi(PTR) From a869addef1cebad465c9377b839511376436aeda Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Tue, 27 Dec 2016 11:25:22 +0000 Subject: [PATCH 028/101] Stats switch off --- lib/qcd/action/fermion/WilsonFermion5D.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/qcd/action/fermion/WilsonFermion5D.cc b/lib/qcd/action/fermion/WilsonFermion5D.cc index 1c0e0b0e..012c361b 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.cc +++ b/lib/qcd/action/fermion/WilsonFermion5D.cc @@ -415,7 +415,7 @@ void WilsonFermion5D::DhopInternal(StencilImpl & st, LebesgueOrder &lo, int sF = LLs * sU; Kernels::DiracOptDhopSiteDag(st, lo, U, st.CommBuf(), sF, sU, LLs, 1, in, out); } -#ifdef AVX512 +#ifdef AVX512_SWITCHOFF } else if (stat.is_init() ) { int nthreads; From ff2f559a574d41b73d28fc5341336750ebe0b6aa Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Tue, 27 Dec 2016 17:45:19 +0000 Subject: [PATCH 029/101] Remove inline on gather optimised path --- lib/Stencil.h | 55 +++++++++++---------------------------------------- 1 file changed, 12 insertions(+), 43 deletions(-) diff --git a/lib/Stencil.h b/lib/Stencil.h index 96b40c50..82e818d2 100644 --- a/lib/Stencil.h +++ b/lib/Stencil.h @@ -70,51 +70,20 @@ namespace Grid { -inline void Gather_plane_simple_table_compute (GridBase *grid,int dimension,int plane,int cbmask, - int off,std::vector > & table) +void Gather_plane_simple_table_compute (GridBase *grid,int dimension,int plane,int cbmask, + int off,std::vector > & table); + +template +void Gather_plane_simple_table (std::vector >& table,const Lattice &rhs,cobj *buffer,compressor &compress, int off,int so) __attribute__((noinline)); + +template +void Gather_plane_simple_table (std::vector >& table,const Lattice &rhs,cobj *buffer,compressor &compress, int off,int so) { - table.resize(0); - int rd = grid->_rdimensions[dimension]; - - if ( !grid->CheckerBoarded(dimension) ) { - cbmask = 0x3; + int num=table.size(); + PARALLEL_FOR_LOOP + for(int i=0;i_ostride[dimension]; // base offset for start of plane - int e1=grid->_slice_nblock[dimension]; - int e2=grid->_slice_block[dimension]; - - int stride=grid->_slice_stride[dimension]; - if ( cbmask == 0x3 ) { - table.resize(e1*e2); - for(int n=0;n(bo+b,o+b); - } - } - } else { - int bo=0; - table.resize(e1*e2/2); - for(int n=0;nCheckerBoardFromOindexTable(o+b); - if ( ocb &cbmask ) { - table[bo]=std::pair(bo,o+b); bo++; - } - } - } - } -} - -template void -Gather_plane_simple_table (std::vector >& table,const Lattice &rhs,cobj *buffer,compressor &compress, int off,int so) -{ -PARALLEL_FOR_LOOP - for(int i=0;i Date: Tue, 27 Dec 2016 17:45:40 +0000 Subject: [PATCH 030/101] No inline --- lib/Stencil.cc | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 lib/Stencil.cc diff --git a/lib/Stencil.cc b/lib/Stencil.cc new file mode 100644 index 00000000..16fb736f --- /dev/null +++ b/lib/Stencil.cc @@ -0,0 +1,69 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/Stencil.cc + + Copyright (C) 2015 + + Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include "Grid.h" + +namespace Grid { + +void Gather_plane_simple_table_compute (GridBase *grid,int dimension,int plane,int cbmask, + int off,std::vector > & table) +{ + table.resize(0); + int rd = grid->_rdimensions[dimension]; + + if ( !grid->CheckerBoarded(dimension) ) { + cbmask = 0x3; + } + int so= plane*grid->_ostride[dimension]; // base offset for start of plane + int e1=grid->_slice_nblock[dimension]; + int e2=grid->_slice_block[dimension]; + + int stride=grid->_slice_stride[dimension]; + if ( cbmask == 0x3 ) { + table.resize(e1*e2); + for(int n=0;n(bo+b,o+b); + } + } + } else { + int bo=0; + table.resize(e1*e2/2); + for(int n=0;nCheckerBoardFromOindexTable(o+b); + if ( ocb &cbmask ) { + table[bo]=std::pair(bo,o+b); bo++; + } + } + } + } +} +} From 1e179c903dc176487893456467b315f01c303d6e Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Tue, 27 Dec 2016 17:46:38 +0000 Subject: [PATCH 031/101] Worried about integer; suspect where statements are broken --- lib/simd/Grid_qpx.h | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/simd/Grid_qpx.h b/lib/simd/Grid_qpx.h index e2fe0b63..fd897b9c 100644 --- a/lib/simd/Grid_qpx.h +++ b/lib/simd/Grid_qpx.h @@ -89,6 +89,7 @@ namespace Optimization { vec_st(a, 0, d); } //Integer + // PAB: fixme -- is this right ; just looks like scalar not vector inline void operator()(int a, Integer *i){ i[0] = a; } From 05c1924819a111762cf4db162e0b48674da5db61 Mon Sep 17 00:00:00 2001 From: azusayamaguchi Date: Mon, 23 Jan 2017 10:43:45 +0000 Subject: [PATCH 032/101] Timing loop change --- benchmarks/Benchmark_staggered.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/Benchmark_staggered.cc b/benchmarks/Benchmark_staggered.cc index 121dc0d5..9860e59d 100644 --- a/benchmarks/Benchmark_staggered.cc +++ b/benchmarks/Benchmark_staggered.cc @@ -115,7 +115,7 @@ int main (int argc, char ** argv) ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0,params); std::cout< Date: Tue, 7 Feb 2017 00:57:01 -0500 Subject: [PATCH 033/101] Overlap comms compute modifications --- lib/simd/IBM_qpx.h | 79 +++++++++++++++++----------------------------- 1 file changed, 29 insertions(+), 50 deletions(-) diff --git a/lib/simd/IBM_qpx.h b/lib/simd/IBM_qpx.h index 187991c8..df91d8e6 100644 --- a/lib/simd/IBM_qpx.h +++ b/lib/simd/IBM_qpx.h @@ -153,7 +153,6 @@ Author: paboyle /********************************************************* * Macro sequences encoding QCD *********************************************************/ -#define LOCK_GAUGEa(dir) #define LOCK_GAUGE(dir) \ { \ uint64_t byte_addr = (uint64_t)&U._odata[sU]; \ @@ -168,8 +167,6 @@ Author: paboyle : : "b" (count), "b" (byte_addr) ); \ } -#define UNLOCK_GAUGEa(dir) - #define UNLOCK_GAUGE(dir) \ { \ uint64_t byte_addr = (uint64_t)&U._odata[sU]; \ @@ -184,20 +181,25 @@ Author: paboyle : : "b" (count), "b" (byte_addr) ); \ } +#define ZERO_PSI \ + VZERO(psi_00) \ + VZERO(psi_01) \ + VZERO(psi_02) \ + VZERO(psi_10) \ + VZERO(psi_11) \ + VZERO(psi_12) \ + VZERO(psi_20) \ + VZERO(psi_21) \ + VZERO(psi_22) \ + VZERO(psi_30) \ + VZERO(psi_31) \ + VZERO(psi_32) + #define MULT_2SPIN_QPX_LSd(ptr,p) MULT_2SPIN_QPX_INTERNAL(ptr,p,VSPLAT,16) #define MULT_2SPIN_QPX_LSf(ptr,p) MULT_2SPIN_QPX_INTERNAL(ptr,p,VSPLAT,8) #define MULT_2SPIN_QPXd(ptr,p) MULT_2SPIN_QPX_INTERNAL(ptr,p,VLOAD,32) #define MULT_2SPIN_QPXf(ptr,p) MULT_2SPIN_QPX_INTERNAL(ptr,p,VLOAD,16) -#define MULT_2SPIN_QPX_INTERNALa(ptr,p,ULOAD,USKIP) { \ - asm (VMOV(UChi_00,Chi_00) \ - VMOV(UChi_01,Chi_01) \ - VMOV(UChi_02,Chi_02) \ - VMOV(UChi_10,Chi_10) \ - VMOV(UChi_11,Chi_11) \ - VMOV(UChi_12,Chi_12) ); \ - } - #define MULT_2SPIN_QPX_INTERNAL(ptr,p,ULOAD,USKIP) { \ uint64_t ub = ((uint64_t)ptr); \ asm ( \ @@ -253,14 +255,9 @@ Author: paboyle : : "b" (USKIP*2), "b" (USKIP*5), "b" (USKIP*8), "b" (ub )); \ } -#define MULT_2SPIN_DIR_PFXP(A,p) MULT_2SPIN(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFYP(A,p) MULT_2SPIN(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFZP(A,p) MULT_2SPIN(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFTP(A,p) MULT_2SPIN(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFXM(A,p) MULT_2SPIN(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFYM(A,p) MULT_2SPIN(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFZM(A,p) MULT_2SPIN(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFTM(A,p) MULT_2SPIN(&U._odata[sU](A),p) + +#define MULT_2SPIN_DIR_PF(A,p) MULT_2SPIN_PF(&U._odata[sU](A),p) +#define MULT_2SPIN_PF(ptr,pf) MULT_2SPIN(ptr,pf) #define SAVE_RESULT(base,basep) {\ uint64_t ub = ((uint64_t)base) - (VSIZE); \ @@ -281,6 +278,7 @@ Author: paboyle : : "b" (ub) : HASH(pIMM), HASH(pREP) ); \ } + /* *Annoying BG/Q loads with no immediat indexing and big performance hit *when second miss to a L1 line occurs @@ -300,36 +298,6 @@ Author: paboyle VLOADu(IMM,REP,Chi_12) : : "b" (ub) : HASH(pIMM), HASH(pREP) ); \ } -#define LOAD_CHIa(base) { \ - uint64_t ub = ((uint64_t)base) - (VSIZE); \ - asm("mr " HASH(REP) ",%0 ;\n" \ - "li " HASH(IMM) "," HASH(VSIZE) ";\n" \ - VLOADu(IMM,REP,Chi_00) \ - VLOADu(IMM,REP,Chi_01) \ - VLOADu(IMM,REP,Chi_02) \ - VLOADu(IMM,REP,Chi_10) \ - VLOADu(IMM,REP,Chi_11) \ - VLOADu(IMM,REP,Chi_12) : : "b" (ub) : HASH(pIMM), HASH(pREP) ); \ - } - -#define LOAD_CHIMUa(base) { \ - uint64_t ub = ((uint64_t)base) - (VSIZE); \ - asm("mr " HASH(REP) ",%0 ;\n" \ - "li " HASH(IMM) "," HASH(VSIZE) ";\n" \ - VLOADu(IMM,REP,Chi_00) \ - VLOADu(IMM,REP,Chi_01) \ - VLOADu(IMM,REP,Chi_02) \ - VLOADu(IMM,REP,Chi_10) \ - VLOADu(IMM,REP,Chi_11) \ - VLOADu(IMM,REP,Chi_12) \ - VLOADu(IMM,REP,Chi_20) \ - VLOADu(IMM,REP,Chi_21) \ - VLOADu(IMM,REP,Chi_22) \ - VLOADu(IMM,REP,Chi_30) \ - VLOADu(IMM,REP,Chi_31) \ - VLOADu(IMM,REP,Chi_32) : : "b" (ub) : HASH(pIMM), HASH(pREP) ); \ - } - #define LOAD_CHIMU(base) { \ uint64_t ub = ((uint64_t)base) - (2*VSIZE); \ asm("mr " HASH(REP) ",%0;\n" \ @@ -605,6 +573,17 @@ Author: paboyle );\ } + +#define ADD_RESULTi(PTR,pf) \ + LOAD_CHIMU(PTR) \ + asm( \ + VADD(psi_00,chi_00,psi_00) VADD(psi_01,chi_01,psi_01) VADD(psi_02,chi_02,psi_02) \ + VADD(psi_10,chi_10,psi_10) VADD(psi_11,chi_11,psi_11) VADD(psi_12,chi_12,psi_12) \ + VADD(psi_20,chi_20,psi_20) VADD(psi_21,chi_21,psi_21) VADD(psi_22,chi_22,psi_22) \ + VADD(psi_30,chi_30,psi_30) VADD(psi_31,chi_31,psi_31) VADD(psi_32,chi_32,psi_32) ); \ + SAVE_RESULT(PTR,pf); + + #define PERMUTE_DIR3 #define PERMUTE_DIR2 #define PERMUTE_DIR1 From b5e9c900a4f0359e390e393db02b4e36cc8fb353 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 00:57:55 -0500 Subject: [PATCH 034/101] Better printing and signal handling options --- lib/Init.cc | 117 +++++++++++++++++++++++++++------------------------- 1 file changed, 61 insertions(+), 56 deletions(-) diff --git a/lib/Init.cc b/lib/Init.cc index d6d6b9f8..6c31e950 100644 --- a/lib/Init.cc +++ b/lib/Init.cc @@ -1,4 +1,4 @@ - /************************************************************************************* +/************************************************************************************* Grid physics library, www.github.com/paboyle/Grid @@ -219,8 +219,57 @@ void Grid_init(int *argc,char ***argv) CartesianCommunicator::MAX_MPI_SHM_BYTES = MB*1024*1024; } + if( GridCmdOptionExists(*argv,*argv+*argc,"--debug-signals") ){ + Grid_debug_handler_init(); + } + CartesianCommunicator::Init(argc,argv); + if( !GridCmdOptionExists(*argv,*argv+*argc,"--debug-stdout") ){ + Grid_quiesce_nodes(); + } else { + std::ostringstream fname; + fname<<"Grid.stdout."; + fname<si_signo); - printf(" mem address %llx\n",(unsigned long long)si->si_addr); - printf(" code %d\n",si->si_code); - + fprintf(stderr,"Caught signal %d\n",si->si_signo); + fprintf(stderr," mem address %llx\n",(unsigned long long)si->si_addr); + fprintf(stderr," code %d\n",si->si_code); // Linux/Posix #ifdef __linux__ // And x86 64bit #ifdef __x86_64__ ucontext_t * uc= (ucontext_t *)ptr; struct sigcontext *sc = (struct sigcontext *)&uc->uc_mcontext; - printf(" instruction %llx\n",(unsigned long long)sc->rip); + fprintf(stderr," instruction %llx\n",(unsigned long long)sc->rip); #define REG(A) printf(" %s %lx\n",#A,sc-> A); REG(rdi); REG(rsi); @@ -412,7 +411,11 @@ void Grid_sa_signal_handler(int sig,siginfo_t *si,void * ptr) REG(r15); #endif #endif - BACKTRACE(); + fflush(stderr); + BACKTRACEFP(stderr); + fprintf(stderr,"Called backtrace\n"); + fflush(stdout); + fflush(stderr); exit(0); return; }; @@ -425,9 +428,11 @@ void Grid_debug_handler_init(void) sa.sa_flags = SA_SIGINFO; sigaction(SIGSEGV,&sa,NULL); sigaction(SIGTRAP,&sa,NULL); + sigaction(SIGBUS,&sa,NULL); feenableexcept( FE_INVALID|FE_OVERFLOW|FE_DIVBYZERO); sigaction(SIGFPE,&sa,NULL); + sigaction(SIGKILL,&sa,NULL); } } From 9ff97b47116324f23515c8c1cfeead7c84eac9e0 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 00:58:34 -0500 Subject: [PATCH 035/101] Improved stencil tests passing all on KNL multinode --- tests/Test_stencil.cc | 11 ++-- tests/core/Test_cshift_red_black.cc | 75 +++++++++++++++------- tests/core/Test_cshift_red_black_rotate.cc | 74 ++++++++++++++------- tests/core/Test_cshift_rotate.cc | 1 - 4 files changed, 109 insertions(+), 52 deletions(-) diff --git a/tests/Test_stencil.cc b/tests/Test_stencil.cc index 1b71b8a5..c2a2580f 100644 --- a/tests/Test_stencil.cc +++ b/tests/Test_stencil.cc @@ -66,7 +66,7 @@ int main (int argc, char ** argv) random(fRNG,Foo); gaussian(fRNG,Bar); - /* + Integer stride =1000; { double nrm; @@ -79,7 +79,6 @@ int main (int argc, char ** argv) } Foo=lex; } - */ typedef CartesianStencil Stencil; for(int dir=0;dir<4;dir++){ @@ -92,7 +91,6 @@ int main (int argc, char ** argv) std::vector displacements(npoint,disp); Stencil myStencil(&Fine,npoint,0,directions,displacements); - std::vector ocoor(4); for(int o=0;o 1.0e-4) exit(-1); } } @@ -182,8 +180,6 @@ int main (int argc, char ** argv) SimpleCompressor compress; - EStencil.HaloExchange(EFoo,compress); - OStencil.HaloExchange(OFoo,compress); Bar = Cshift(Foo,dir,disp); @@ -196,6 +192,7 @@ int main (int argc, char ** argv) } // Implement a stencil code that should agree with that darn cshift! + EStencil.HaloExchange(EFoo,compress); for(int i=0;ioSites();i++){ int permute_type; StencilEntry *SE; @@ -209,6 +206,7 @@ int main (int argc, char ** argv) else OCheck._odata[i] = EStencil.CommBuf()[SE->_offset]; } + OStencil.HaloExchange(OFoo,compress); for(int i=0;ioSites();i++){ int permute_type; StencilEntry *SE; @@ -254,6 +252,7 @@ int main (int argc, char ** argv) }}}} + if (nrm > 1.0e-4) exit(-1); } } diff --git a/tests/core/Test_cshift_red_black.cc b/tests/core/Test_cshift_red_black.cc index 43f12e77..ae55cece 100644 --- a/tests/core/Test_cshift_red_black.cc +++ b/tests/core/Test_cshift_red_black.cc @@ -32,6 +32,8 @@ Author: paboyle using namespace Grid; using namespace Grid::QCD; +#define POWER10 + int main (int argc, char ** argv) { Grid_init(&argc,&argv); @@ -52,6 +54,7 @@ int main (int argc, char ** argv) LatticeComplex U(&Fine); LatticeComplex ShiftU(&Fine); LatticeComplex rbShiftU(&Fine); + LatticeComplex err(&Fine); LatticeComplex Ue(&RBFine); LatticeComplex Uo(&RBFine); LatticeComplex ShiftUe(&RBFine); @@ -68,7 +71,11 @@ int main (int argc, char ** argv) Integer i=0; LatticeCoordinate(coor,d); lex = lex + coor*stride+i; +#ifndef POWER10 stride=stride*latt_size[d]; +#else + stride=stride*10; +#endif } U=lex; } @@ -87,28 +94,31 @@ int main (int argc, char ** argv) // if ( dir!=1 ) continue; for(int shift=0;shift coor(4); - std::cout< scoor(coor); scoor[dir] = (scoor[dir]+shift)%latt_size[dir]; +#ifndef POWER10 + std::vector powers=latt_size; Integer slex = scoor[0] + latt_size[0]*scoor[1] + latt_size[0]*latt_size[1]*scoor[2] + latt_size[0]*latt_size[1]*latt_size[2]*scoor[3]; - +#else + std::vector powers({1,10,100,1000}); + Integer slex = scoor[0] + + 10 *scoor[1] + + 100 *scoor[2] + + 1000 *scoor[3]; +#endif Complex scm(slex); double nrm = abs(scm-cm()()()); std::vector peer(4); Complex ctmp = cm; Integer index=real(ctmp); - Lexicographic::CoorFromIndex(peer,index,latt_size); + Lexicographic::CoorFromIndex(peer,index,powers); if (nrm > 0){ std::cout<<"FAIL shift "<< shift<<" in dir "<< dir @@ -145,9 +163,10 @@ int main (int argc, char ** argv) exit(-1); } }}}} + std::cout << " OK !"< scoor(coor); scoor[dir] = (scoor[dir]+shift)%latt_size[dir]; +#ifndef POWER10 + std::vector powers=latt_size; Integer slex = scoor[0] + latt_size[0]*scoor[1] + latt_size[0]*latt_size[1]*scoor[2] + latt_size[0]*latt_size[1]*latt_size[2]*scoor[3]; - +#else + std::vector powers({1,10,100,1000}); + Integer slex = scoor[0] + + 10 *scoor[1] + + 100 *scoor[2] + + 1000 *scoor[3]; +#endif Complex scm(slex); std::vector peer(4); Complex ctmp=cmeo; Integer index=real(ctmp); - Lexicographic::CoorFromIndex(peer,index,latt_size); + Lexicographic::CoorFromIndex(peer,index,powers); double nrm = abs(cmeo()()()-scm); if (nrm != 0) { + + std::cout << " coor "<<" ["< using namespace Grid; using namespace Grid::QCD; +#define POWER10 + int main (int argc, char ** argv) { Grid_init(&argc,&argv); @@ -49,6 +51,7 @@ int main (int argc, char ** argv) GridParallelRNG FineRNG(&Fine); FineRNG.SeedRandomDevice(); + LatticeComplex err(&Fine); LatticeComplex U(&Fine); LatticeComplex ShiftU(&Fine); LatticeComplex rbShiftU(&Fine); @@ -66,9 +69,15 @@ int main (int argc, char ** argv) for(int d=0;d coor(4); - std::cout< scoor(coor); scoor[dir] = (scoor[dir]+shift)%latt_size[dir]; - + +#ifdef POWER10 + std::vector powers({1,10,100,1000}); + Integer slex = scoor[3] + + 10 *scoor[2] + + 100 *scoor[1] + + 1000 *scoor[0]; +#else + std::vector powers=latt_size; Integer slex = scoor[0] + latt_size[0]*scoor[1] + latt_size[0]*latt_size[1]*scoor[2] + latt_size[0]*latt_size[1]*latt_size[2]*scoor[3]; +#endif Complex scm(slex); @@ -132,7 +152,7 @@ int main (int argc, char ** argv) std::vector peer(4); Complex ctmp = cm; Integer index=real(ctmp); - Lexicographic::CoorFromIndex(peer,index,latt_size); + Lexicographic::CoorFromIndex(peer,index,powers); if (nrm > 0){ std::cout<<"FAIL shift "<< shift<<" in dir "<< dir @@ -140,14 +160,16 @@ int main (int argc, char ** argv) << cm()()()<<" expect "< scoor(coor); scoor[dir] = (scoor[dir]+shift)%latt_size[dir]; - + +#ifdef POWER10 + std::vector powers({1,10,100,1000}); + Integer slex = scoor[3] + + 10 *scoor[2] + + 100 *scoor[1] + + 1000 *scoor[0]; +#else + std::vector powers = latt_size; Integer slex = scoor[0] + latt_size[0]*scoor[1] + latt_size[0]*latt_size[1]*scoor[2] + latt_size[0]*latt_size[1]*latt_size[2]*scoor[3]; - +#endif Complex scm(slex); std::vector peer(4); Complex ctmp=cmeo; Integer index=real(ctmp); - Lexicographic::CoorFromIndex(peer,index,latt_size); + Lexicographic::CoorFromIndex(peer,index,powers); double nrm = abs(cmeo()()()-scm); if (nrm != 0) { @@ -189,10 +219,9 @@ int main (int argc, char ** argv) << cmeo()()()<<" expect "< Date: Tue, 7 Feb 2017 00:59:32 -0500 Subject: [PATCH 036/101] Overlap comms compute support; make reg naming consistent with bgq aasm --- lib/simd/Intel512wilson.h | 557 ++++++++++++++++++++------------------ 1 file changed, 293 insertions(+), 264 deletions(-) diff --git a/lib/simd/Intel512wilson.h b/lib/simd/Intel512wilson.h index 3ca0b648..64142a2e 100644 --- a/lib/simd/Intel512wilson.h +++ b/lib/simd/Intel512wilson.h @@ -31,21 +31,21 @@ Author: paboyle ////////////////////////////////////////////////////////////////////////////////////////// // Register allocations for Wilson Kernel are precision indept ////////////////////////////////////////////////////////////////////////////////////////// -#define result_00 %zmm0 -#define result_01 %zmm1 -#define result_02 %zmm2 +#define psi_00 %zmm0 +#define psi_01 %zmm1 +#define psi_02 %zmm2 -#define result_10 %zmm3 -#define result_11 %zmm4 -#define result_12 %zmm5 +#define psi_10 %zmm3 +#define psi_11 %zmm4 +#define psi_12 %zmm5 -#define result_20 %zmm6 -#define result_21 %zmm7 -#define result_22 %zmm8 +#define psi_20 %zmm6 +#define psi_21 %zmm7 +#define psi_22 %zmm8 -#define result_30 %zmm9 -#define result_31 %zmm10 -#define result_32 %zmm11 +#define psi_30 %zmm9 +#define psi_31 %zmm10 +#define psi_32 %zmm11 #define Chi_00 %zmm12 #define Chi_01 %zmm13 @@ -102,32 +102,46 @@ Author: paboyle #define UNLOCK_GAUGE(dir) // const SiteSpinor * ptr = & in._odata[offset]; -#define LOAD_CHIMU(PTR) LOAD_CHIMUi(PTR) +#define LOAD_CHIMU(PTR) LOAD64(%r8,PTR) __asm__ ( LOAD_CHIMUi ); #define LOAD_CHI(PTR) LOAD64(%r8,PTR) __asm__ ( LOAD_CHIi ); #define SAVE_UCHI(PTR) SAVE_UCHIi(PTR) #define SAVE_CHI(PTR) SAVE_CHIi(PTR) #define SAVE_RESULT(PT,R) SAVE_RESULTi(PT,R) +#define ADD_RESULT(PT,R) ADD_RESULTi(PT,R) -#define LOAD_CHIMUi \ - LOAD_CHIMU01i \ - LOAD_CHIMU23i ); +#define ZERO_PSI \ + asm( VZERO(psi_00) \ + VZERO(psi_01) \ + VZERO(psi_02) \ + VZERO(psi_10) \ + VZERO(psi_11) \ + VZERO(psi_12) \ + VZERO(psi_20) \ + VZERO(psi_21) \ + VZERO(psi_22) \ + VZERO(psi_30) \ + VZERO(psi_31) \ + VZERO(psi_32)); +#define LOAD_CHIMUi \ + LOAD_CHIMU01i \ + LOAD_CHIMU23i -#define LOAD_CHIMU01i\ - VLOAD(0,%r8,Chimu_00) \ - VLOAD(1,%r8,Chimu_01) \ - VLOAD(2,%r8,Chimu_02) \ - VLOAD(3,%r8,Chimu_10) \ - VLOAD(4,%r8,Chimu_11) \ - VLOAD(5,%r8,Chimu_12) +#define LOAD_CHIMU01i \ + VLOAD(0,%r8,Chimu_00) \ + VLOAD(1,%r8,Chimu_01) \ + VLOAD(2,%r8,Chimu_02) \ + VLOAD(3,%r8,Chimu_10) \ + VLOAD(4,%r8,Chimu_11) \ + VLOAD(5,%r8,Chimu_12) -#define LOAD_CHIMU23i\ - VLOAD(6,%r8,Chimu_20) \ - VLOAD(7,%r8,Chimu_21) \ - VLOAD(8,%r8,Chimu_22) \ - VLOAD(9,%r8,Chimu_30) \ - VLOAD(10,%r8,Chimu_31) \ - VLOAD(11,%r8,Chimu_32) +#define LOAD_CHIMU23i \ + VLOAD(6,%r8,Chimu_20) \ + VLOAD(7,%r8,Chimu_21) \ + VLOAD(8,%r8,Chimu_22) \ + VLOAD(9,%r8,Chimu_30) \ + VLOAD(10,%r8,Chimu_31) \ + VLOAD(11,%r8,Chimu_32) #define SHUF_CHIMU23i\ VSHUFMEM(6,%r8,Chimu_20) \ @@ -137,9 +151,6 @@ Author: paboyle VSHUFMEM(10,%r8,Chimu_31) \ VSHUFMEM(11,%r8,Chimu_32) - -// const SiteHalfSpinor *ptr = &buf[offset]; - #define LOAD_CHIi \ VLOAD(0,%r8,Chi_00) \ VLOAD(1,%r8,Chi_01) \ @@ -147,7 +158,6 @@ Author: paboyle VLOAD(3,%r8,Chi_10) \ VLOAD(4,%r8,Chi_11) \ VLOAD(5,%r8,Chi_12) - #define SAVE_UCHIi(PTR) \ LOAD64(%r8,PTR) \ @@ -157,8 +167,7 @@ Author: paboyle VSTORE(2,%r8,UChi_02) \ VSTORE(3,%r8,UChi_10) \ VSTORE(4,%r8,UChi_11) \ - VSTORE(5,%r8,UChi_12) \ - ); + VSTORE(5,%r8,UChi_12) ); #define SAVE_CHIi(PTR) \ LOAD64(%r8,PTR) \ @@ -168,33 +177,14 @@ Author: paboyle VSTORE(2,%r8,Chi_02) \ VSTORE(3,%r8,Chi_10) \ VSTORE(4,%r8,Chi_11) \ - VSTORE(5,%r8,Chi_12) \ - ); + VSTORE(5,%r8,Chi_12) ); - -#define MULT_2SPIN_DIR_PFXP(A,p) MULT_2SPIN_PFXP(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFYP(A,p) MULT_2SPIN_PFYP(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFZP(A,p) MULT_2SPIN_PFZP(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFTP(A,p) MULT_2SPIN_PFTP(&U._odata[sU](A),p) - -#define MULT_2SPIN_DIR_PFXM(A,p) MULT_2SPIN_PFXM(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFYM(A,p) MULT_2SPIN_PFYM(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFZM(A,p) MULT_2SPIN_PFZM(&U._odata[sU](A),p) -#define MULT_2SPIN_DIR_PFTM(A,p) MULT_2SPIN_PFTM(&U._odata[sU](A),p) - -#define MULT_2SPIN_PFXM(ptr,pf) MULT_2SPIN(ptr,pf) -#define MULT_2SPIN_PFYM(ptr,pf) MULT_2SPIN(ptr,pf) -#define MULT_2SPIN_PFZM(ptr,pf) MULT_2SPIN(ptr,pf) -#define MULT_2SPIN_PFTM(ptr,pf) MULT_2SPIN(ptr,pf) -#define MULT_2SPIN_PFTP(ptr,pf) MULT_2SPIN(ptr,pf) -#define MULT_2SPIN_PFZP(ptr,pf) MULT_2SPIN(ptr,pf) -#define MULT_2SPIN_PFYP(ptr,pf) MULT_2SPIN(ptr,pf) -#define MULT_2SPIN_PFXP(ptr,pf) MULT_2SPIN(ptr,pf) +#define MULT_2SPIN_DIR_PF(A,p) MULT_2SPIN_PF(&U._odata[sU](A),p) +#define MULT_2SPIN_PF(ptr,pf) MULT_2SPIN(ptr,pf) ////////////////////////////////////////////////////////////////// // Dirac algebra ////////////////////////////////////////////////////////////////// - // hspin(0)=fspin(0)+timesI(fspin(3)); // hspin(1)=fspin(1)+timesI(fspin(2)); #define XP_PROJMEM(PTR) \ @@ -259,7 +249,6 @@ Author: paboyle // hspin(0)=fspin(0)-timesI(fspin(3)) // hspin(1)=fspin(1)-timesI(fspin(2)) - #define XM_PROJMEM(PTR) \ LOAD64(%r8,PTR)\ __asm__ ( \ @@ -324,226 +313,226 @@ Author: paboyle // fspin(3)=timesMinusI(hspin(0)) #define XP_RECON __asm__ ( \ VZERO(TMP) \ - VTIMESMINUSI0(UChi_00,result_30,TMP) \ - VTIMESMINUSI0(UChi_10,result_20,TMP) \ - VTIMESMINUSI0(UChi_01,result_31,TMP) \ - VTIMESMINUSI0(UChi_11,result_21,TMP) \ - VTIMESMINUSI0(UChi_02,result_32,TMP) \ - VTIMESMINUSI0(UChi_12,result_22,TMP) \ - VMOV(UChi_00,result_00) \ - VMOV(UChi_10,result_10) \ - VMOV(UChi_01,result_01) \ - VMOV(UChi_11,result_11) \ - VMOV(UChi_02,result_02) \ - VMOV(UChi_12,result_12) \ - VTIMESMINUSI1(UChi_10,result_20,TMP) \ - VTIMESMINUSI1(UChi_11,result_21,TMP) \ - VTIMESMINUSI1(UChi_12,result_22,TMP) \ - VTIMESMINUSI1(UChi_00,result_30,TMP) \ - VTIMESMINUSI1(UChi_01,result_31,TMP) \ - VTIMESMINUSI1(UChi_02,result_32,TMP) \ - VTIMESMINUSI2(UChi_10,result_20,TMP) \ - VTIMESMINUSI2(UChi_11,result_21,TMP) \ - VTIMESMINUSI2(UChi_12,result_22,TMP) \ - VTIMESMINUSI2(UChi_00,result_30,TMP) \ - VTIMESMINUSI2(UChi_01,result_31,TMP) \ - VTIMESMINUSI2(UChi_02,result_32,TMP) \ + VTIMESMINUSI0(UChi_00,psi_30,TMP) \ + VTIMESMINUSI0(UChi_10,psi_20,TMP) \ + VTIMESMINUSI0(UChi_01,psi_31,TMP) \ + VTIMESMINUSI0(UChi_11,psi_21,TMP) \ + VTIMESMINUSI0(UChi_02,psi_32,TMP) \ + VTIMESMINUSI0(UChi_12,psi_22,TMP) \ + VMOV(UChi_00,psi_00) \ + VMOV(UChi_10,psi_10) \ + VMOV(UChi_01,psi_01) \ + VMOV(UChi_11,psi_11) \ + VMOV(UChi_02,psi_02) \ + VMOV(UChi_12,psi_12) \ + VTIMESMINUSI1(UChi_10,psi_20,TMP) \ + VTIMESMINUSI1(UChi_11,psi_21,TMP) \ + VTIMESMINUSI1(UChi_12,psi_22,TMP) \ + VTIMESMINUSI1(UChi_00,psi_30,TMP) \ + VTIMESMINUSI1(UChi_01,psi_31,TMP) \ + VTIMESMINUSI1(UChi_02,psi_32,TMP) \ + VTIMESMINUSI2(UChi_10,psi_20,TMP) \ + VTIMESMINUSI2(UChi_11,psi_21,TMP) \ + VTIMESMINUSI2(UChi_12,psi_22,TMP) \ + VTIMESMINUSI2(UChi_00,psi_30,TMP) \ + VTIMESMINUSI2(UChi_01,psi_31,TMP) \ + VTIMESMINUSI2(UChi_02,psi_32,TMP) \ ); // NB could save 6 ops using addsub => 12 cycles #define XP_RECON_ACCUM __asm__ ( \ VZERO(TMP)\ - VACCTIMESMINUSI0(UChi_00,result_30,Z3)\ - VACCTIMESMINUSI0(UChi_10,result_20,Z0)\ - VACCTIMESMINUSI0(UChi_01,result_31,Z4)\ - VACCTIMESMINUSI0(UChi_11,result_21,Z1)\ - VACCTIMESMINUSI0(UChi_02,result_32,Z5)\ - VACCTIMESMINUSI0(UChi_12,result_22,Z2)\ - VADD(UChi_00,result_00,result_00)\ - VADD(UChi_10,result_10,result_10)\ - VADD(UChi_01,result_01,result_01)\ - VADD(UChi_11,result_11,result_11)\ - VADD(UChi_02,result_02,result_02)\ - VADD(UChi_12,result_12,result_12)\ - VACCTIMESMINUSI1(UChi_00,result_30,Z3)\ - VACCTIMESMINUSI1(UChi_10,result_20,Z0)\ - VACCTIMESMINUSI1(UChi_01,result_31,Z4)\ - VACCTIMESMINUSI1(UChi_11,result_21,Z1)\ - VACCTIMESMINUSI1(UChi_02,result_32,Z5)\ - VACCTIMESMINUSI1(UChi_12,result_22,Z2)\ - VACCTIMESMINUSI2(UChi_10,result_20,Z0)\ - VACCTIMESMINUSI2(UChi_11,result_21,Z1)\ - VACCTIMESMINUSI2(UChi_12,result_22,Z2)\ - VACCTIMESMINUSI2(UChi_00,result_30,Z3)\ - VACCTIMESMINUSI2(UChi_01,result_31,Z4)\ - VACCTIMESMINUSI2(UChi_02,result_32,Z5)\ + VACCTIMESMINUSI0(UChi_00,psi_30,Z3)\ + VACCTIMESMINUSI0(UChi_10,psi_20,Z0)\ + VACCTIMESMINUSI0(UChi_01,psi_31,Z4)\ + VACCTIMESMINUSI0(UChi_11,psi_21,Z1)\ + VACCTIMESMINUSI0(UChi_02,psi_32,Z5)\ + VACCTIMESMINUSI0(UChi_12,psi_22,Z2)\ + VADD(UChi_00,psi_00,psi_00)\ + VADD(UChi_10,psi_10,psi_10)\ + VADD(UChi_01,psi_01,psi_01)\ + VADD(UChi_11,psi_11,psi_11)\ + VADD(UChi_02,psi_02,psi_02)\ + VADD(UChi_12,psi_12,psi_12)\ + VACCTIMESMINUSI1(UChi_00,psi_30,Z3)\ + VACCTIMESMINUSI1(UChi_10,psi_20,Z0)\ + VACCTIMESMINUSI1(UChi_01,psi_31,Z4)\ + VACCTIMESMINUSI1(UChi_11,psi_21,Z1)\ + VACCTIMESMINUSI1(UChi_02,psi_32,Z5)\ + VACCTIMESMINUSI1(UChi_12,psi_22,Z2)\ + VACCTIMESMINUSI2(UChi_10,psi_20,Z0)\ + VACCTIMESMINUSI2(UChi_11,psi_21,Z1)\ + VACCTIMESMINUSI2(UChi_12,psi_22,Z2)\ + VACCTIMESMINUSI2(UChi_00,psi_30,Z3)\ + VACCTIMESMINUSI2(UChi_01,psi_31,Z4)\ + VACCTIMESMINUSI2(UChi_02,psi_32,Z5)\ ); #define XM_RECON __asm__ ( \ VZERO(TMP)\ - VTIMESI0(UChi_00,result_30,TMP)\ - VTIMESI0(UChi_10,result_20,TMP)\ - VTIMESI0(UChi_01,result_31,TMP)\ - VTIMESI0(UChi_11,result_21,TMP)\ - VTIMESI0(UChi_02,result_32,TMP)\ - VTIMESI0(UChi_12,result_22,TMP)\ - VMOV(UChi_00,result_00)\ - VMOV(UChi_10,result_10)\ - VMOV(UChi_01,result_01)\ - VMOV(UChi_11,result_11)\ - VMOV(UChi_02,result_02)\ - VMOV(UChi_12,result_12)\ - VTIMESI1(UChi_00,result_30,TMP)\ - VTIMESI1(UChi_10,result_20,TMP)\ - VTIMESI1(UChi_01,result_31,TMP)\ - VTIMESI1(UChi_11,result_21,TMP)\ - VTIMESI1(UChi_02,result_32,TMP)\ - VTIMESI1(UChi_12,result_22,TMP)\ - VTIMESI2(UChi_10,result_20,TMP)\ - VTIMESI2(UChi_11,result_21,TMP)\ - VTIMESI2(UChi_12,result_22,TMP)\ - VTIMESI2(UChi_00,result_30,TMP)\ - VTIMESI2(UChi_01,result_31,TMP)\ - VTIMESI2(UChi_02,result_32,TMP)\ + VTIMESI0(UChi_00,psi_30,TMP)\ + VTIMESI0(UChi_10,psi_20,TMP)\ + VTIMESI0(UChi_01,psi_31,TMP)\ + VTIMESI0(UChi_11,psi_21,TMP)\ + VTIMESI0(UChi_02,psi_32,TMP)\ + VTIMESI0(UChi_12,psi_22,TMP)\ + VMOV(UChi_00,psi_00)\ + VMOV(UChi_10,psi_10)\ + VMOV(UChi_01,psi_01)\ + VMOV(UChi_11,psi_11)\ + VMOV(UChi_02,psi_02)\ + VMOV(UChi_12,psi_12)\ + VTIMESI1(UChi_00,psi_30,TMP)\ + VTIMESI1(UChi_10,psi_20,TMP)\ + VTIMESI1(UChi_01,psi_31,TMP)\ + VTIMESI1(UChi_11,psi_21,TMP)\ + VTIMESI1(UChi_02,psi_32,TMP)\ + VTIMESI1(UChi_12,psi_22,TMP)\ + VTIMESI2(UChi_10,psi_20,TMP)\ + VTIMESI2(UChi_11,psi_21,TMP)\ + VTIMESI2(UChi_12,psi_22,TMP)\ + VTIMESI2(UChi_00,psi_30,TMP)\ + VTIMESI2(UChi_01,psi_31,TMP)\ + VTIMESI2(UChi_02,psi_32,TMP)\ ); #define XM_RECON_ACCUM __asm__ ( \ - VACCTIMESI0(UChi_10,result_20,Z0)\ - VACCTIMESI0(UChi_00,result_30,Z3)\ - VACCTIMESI0(UChi_11,result_21,Z1)\ - VACCTIMESI0(UChi_01,result_31,Z4)\ - VACCTIMESI0(UChi_12,result_22,Z2)\ - VACCTIMESI0(UChi_02,result_32,Z5)\ + VACCTIMESI0(UChi_10,psi_20,Z0)\ + VACCTIMESI0(UChi_00,psi_30,Z3)\ + VACCTIMESI0(UChi_11,psi_21,Z1)\ + VACCTIMESI0(UChi_01,psi_31,Z4)\ + VACCTIMESI0(UChi_12,psi_22,Z2)\ + VACCTIMESI0(UChi_02,psi_32,Z5)\ \ - VADD(UChi_10,result_10,result_10)\ - VADD(UChi_00,result_00,result_00)\ - VADD(UChi_11,result_11,result_11)\ - VADD(UChi_01,result_01,result_01)\ - VADD(UChi_12,result_12,result_12)\ - VADD(UChi_02,result_02,result_02)\ + VADD(UChi_10,psi_10,psi_10)\ + VADD(UChi_00,psi_00,psi_00)\ + VADD(UChi_11,psi_11,psi_11)\ + VADD(UChi_01,psi_01,psi_01)\ + VADD(UChi_12,psi_12,psi_12)\ + VADD(UChi_02,psi_02,psi_02)\ \ - VACCTIMESI1(UChi_10,result_20,Z0)\ - VACCTIMESI1(UChi_00,result_30,Z3)\ - VACCTIMESI1(UChi_11,result_21,Z1)\ - VACCTIMESI1(UChi_01,result_31,Z4)\ - VACCTIMESI1(UChi_12,result_22,Z2)\ - VACCTIMESI1(UChi_02,result_32,Z5)\ - VACCTIMESI2(UChi_10,result_20,Z0)\ - VACCTIMESI2(UChi_11,result_21,Z1)\ - VACCTIMESI2(UChi_12,result_22,Z2)\ - VACCTIMESI2(UChi_00,result_30,Z3)\ - VACCTIMESI2(UChi_01,result_31,Z4)\ - VACCTIMESI2(UChi_02,result_32,Z5)\ + VACCTIMESI1(UChi_10,psi_20,Z0)\ + VACCTIMESI1(UChi_00,psi_30,Z3)\ + VACCTIMESI1(UChi_11,psi_21,Z1)\ + VACCTIMESI1(UChi_01,psi_31,Z4)\ + VACCTIMESI1(UChi_12,psi_22,Z2)\ + VACCTIMESI1(UChi_02,psi_32,Z5)\ + VACCTIMESI2(UChi_10,psi_20,Z0)\ + VACCTIMESI2(UChi_11,psi_21,Z1)\ + VACCTIMESI2(UChi_12,psi_22,Z2)\ + VACCTIMESI2(UChi_00,psi_30,Z3)\ + VACCTIMESI2(UChi_01,psi_31,Z4)\ + VACCTIMESI2(UChi_02,psi_32,Z5)\ ); #define YP_RECON_ACCUM __asm__ ( \ - VADD(UChi_00,result_00,result_00)\ - VADD(UChi_10,result_10,result_10)\ - VADD(UChi_01,result_01,result_01)\ - VADD(UChi_11,result_11,result_11)\ - VADD(UChi_02,result_02,result_02)\ - VADD(UChi_12,result_12,result_12)\ - VADD(UChi_10,result_20,result_20)\ - VADD(UChi_11,result_21,result_21)\ - VADD(UChi_12,result_22,result_22)\ - VSUB(UChi_00,result_30,result_30)\ - VSUB(UChi_01,result_31,result_31)\ - VSUB(UChi_02,result_32,result_32) ); + VADD(UChi_00,psi_00,psi_00)\ + VADD(UChi_10,psi_10,psi_10)\ + VADD(UChi_01,psi_01,psi_01)\ + VADD(UChi_11,psi_11,psi_11)\ + VADD(UChi_02,psi_02,psi_02)\ + VADD(UChi_12,psi_12,psi_12)\ + VADD(UChi_10,psi_20,psi_20)\ + VADD(UChi_11,psi_21,psi_21)\ + VADD(UChi_12,psi_22,psi_22)\ + VSUB(UChi_00,psi_30,psi_30)\ + VSUB(UChi_01,psi_31,psi_31)\ + VSUB(UChi_02,psi_32,psi_32) ); #define YM_RECON_ACCUM __asm__ ( \ - VADD(UChi_00,result_00,result_00)\ - VADD(UChi_10,result_10,result_10)\ - VADD(UChi_01,result_01,result_01)\ - VADD(UChi_11,result_11,result_11)\ - VADD(UChi_02,result_02,result_02)\ - VADD(UChi_12,result_12,result_12)\ - VSUB(UChi_10,result_20,result_20)\ - VSUB(UChi_11,result_21,result_21)\ - VSUB(UChi_12,result_22,result_22)\ - VADD(UChi_00,result_30,result_30)\ - VADD(UChi_01,result_31,result_31)\ - VADD(UChi_02,result_32,result_32) ); + VADD(UChi_00,psi_00,psi_00)\ + VADD(UChi_10,psi_10,psi_10)\ + VADD(UChi_01,psi_01,psi_01)\ + VADD(UChi_11,psi_11,psi_11)\ + VADD(UChi_02,psi_02,psi_02)\ + VADD(UChi_12,psi_12,psi_12)\ + VSUB(UChi_10,psi_20,psi_20)\ + VSUB(UChi_11,psi_21,psi_21)\ + VSUB(UChi_12,psi_22,psi_22)\ + VADD(UChi_00,psi_30,psi_30)\ + VADD(UChi_01,psi_31,psi_31)\ + VADD(UChi_02,psi_32,psi_32) ); #define ZP_RECON_ACCUM __asm__ ( \ - VACCTIMESMINUSI0(UChi_00,result_20,Z0)\ - VACCTIMESI0(UChi_10,result_30,Z3)\ - VACCTIMESMINUSI0(UChi_01,result_21,Z1)\ - VACCTIMESI0(UChi_11,result_31,Z4)\ - VACCTIMESMINUSI0(UChi_02,result_22,Z2)\ - VACCTIMESI0(UChi_12,result_32,Z5)\ - VADD(UChi_00,result_00,result_00)\ - VADD(UChi_10,result_10,result_10)\ - VADD(UChi_01,result_01,result_01)\ - VADD(UChi_11,result_11,result_11)\ - VADD(UChi_02,result_02,result_02)\ - VADD(UChi_12,result_12,result_12)\ - VACCTIMESMINUSI1(UChi_00,result_20,Z0)\ - VACCTIMESI1(UChi_10,result_30,Z3)\ - VACCTIMESMINUSI1(UChi_01,result_21,Z1)\ - VACCTIMESI1(UChi_11,result_31,Z4)\ - VACCTIMESMINUSI1(UChi_02,result_22,Z2)\ - VACCTIMESI1(UChi_12,result_32,Z5)\ - VACCTIMESMINUSI2(UChi_00,result_20,Z0)\ - VACCTIMESMINUSI2(UChi_01,result_21,Z1)\ - VACCTIMESMINUSI2(UChi_02,result_22,Z2)\ - VACCTIMESI2(UChi_10,result_30,Z3)\ - VACCTIMESI2(UChi_11,result_31,Z4)\ - VACCTIMESI2(UChi_12,result_32,Z5)\ + VACCTIMESMINUSI0(UChi_00,psi_20,Z0)\ + VACCTIMESI0(UChi_10,psi_30,Z3)\ + VACCTIMESMINUSI0(UChi_01,psi_21,Z1)\ + VACCTIMESI0(UChi_11,psi_31,Z4)\ + VACCTIMESMINUSI0(UChi_02,psi_22,Z2)\ + VACCTIMESI0(UChi_12,psi_32,Z5)\ + VADD(UChi_00,psi_00,psi_00)\ + VADD(UChi_10,psi_10,psi_10)\ + VADD(UChi_01,psi_01,psi_01)\ + VADD(UChi_11,psi_11,psi_11)\ + VADD(UChi_02,psi_02,psi_02)\ + VADD(UChi_12,psi_12,psi_12)\ + VACCTIMESMINUSI1(UChi_00,psi_20,Z0)\ + VACCTIMESI1(UChi_10,psi_30,Z3)\ + VACCTIMESMINUSI1(UChi_01,psi_21,Z1)\ + VACCTIMESI1(UChi_11,psi_31,Z4)\ + VACCTIMESMINUSI1(UChi_02,psi_22,Z2)\ + VACCTIMESI1(UChi_12,psi_32,Z5)\ + VACCTIMESMINUSI2(UChi_00,psi_20,Z0)\ + VACCTIMESMINUSI2(UChi_01,psi_21,Z1)\ + VACCTIMESMINUSI2(UChi_02,psi_22,Z2)\ + VACCTIMESI2(UChi_10,psi_30,Z3)\ + VACCTIMESI2(UChi_11,psi_31,Z4)\ + VACCTIMESI2(UChi_12,psi_32,Z5)\ ); #define ZM_RECON_ACCUM __asm__ ( \ - VACCTIMESI0(UChi_00,result_20,Z0)\ - VACCTIMESMINUSI0(UChi_10,result_30,Z3)\ - VACCTIMESI0(UChi_01,result_21,Z1)\ - VACCTIMESMINUSI0(UChi_11,result_31,Z4)\ - VACCTIMESI0(UChi_02,result_22,Z2)\ - VACCTIMESMINUSI0(UChi_12,result_32,Z5)\ - VADD(UChi_00,result_00,result_00)\ - VADD(UChi_10,result_10,result_10)\ - VADD(UChi_01,result_01,result_01)\ - VADD(UChi_11,result_11,result_11)\ - VADD(UChi_02,result_02,result_02)\ - VADD(UChi_12,result_12,result_12)\ - VACCTIMESI1(UChi_00,result_20,Z0)\ - VACCTIMESMINUSI1(UChi_10,result_30,Z3)\ - VACCTIMESI1(UChi_01,result_21,Z1)\ - VACCTIMESMINUSI1(UChi_11,result_31,Z4)\ - VACCTIMESI1(UChi_02,result_22,Z2)\ - VACCTIMESMINUSI1(UChi_12,result_32,Z5)\ - VACCTIMESI2(UChi_00,result_20,Z0)\ - VACCTIMESI2(UChi_01,result_21,Z1)\ - VACCTIMESI2(UChi_02,result_22,Z2)\ - VACCTIMESMINUSI2(UChi_10,result_30,Z3)\ - VACCTIMESMINUSI2(UChi_11,result_31,Z4)\ - VACCTIMESMINUSI2(UChi_12,result_32,Z5)\ + VACCTIMESI0(UChi_00,psi_20,Z0)\ + VACCTIMESMINUSI0(UChi_10,psi_30,Z3)\ + VACCTIMESI0(UChi_01,psi_21,Z1)\ + VACCTIMESMINUSI0(UChi_11,psi_31,Z4)\ + VACCTIMESI0(UChi_02,psi_22,Z2)\ + VACCTIMESMINUSI0(UChi_12,psi_32,Z5)\ + VADD(UChi_00,psi_00,psi_00)\ + VADD(UChi_10,psi_10,psi_10)\ + VADD(UChi_01,psi_01,psi_01)\ + VADD(UChi_11,psi_11,psi_11)\ + VADD(UChi_02,psi_02,psi_02)\ + VADD(UChi_12,psi_12,psi_12)\ + VACCTIMESI1(UChi_00,psi_20,Z0)\ + VACCTIMESMINUSI1(UChi_10,psi_30,Z3)\ + VACCTIMESI1(UChi_01,psi_21,Z1)\ + VACCTIMESMINUSI1(UChi_11,psi_31,Z4)\ + VACCTIMESI1(UChi_02,psi_22,Z2)\ + VACCTIMESMINUSI1(UChi_12,psi_32,Z5)\ + VACCTIMESI2(UChi_00,psi_20,Z0)\ + VACCTIMESI2(UChi_01,psi_21,Z1)\ + VACCTIMESI2(UChi_02,psi_22,Z2)\ + VACCTIMESMINUSI2(UChi_10,psi_30,Z3)\ + VACCTIMESMINUSI2(UChi_11,psi_31,Z4)\ + VACCTIMESMINUSI2(UChi_12,psi_32,Z5)\ ); #define TP_RECON_ACCUM __asm__ ( \ - VADD(UChi_00,result_00,result_00)\ - VADD(UChi_10,result_10,result_10)\ - VADD(UChi_01,result_01,result_01)\ - VADD(UChi_11,result_11,result_11)\ - VADD(UChi_02,result_02,result_02)\ - VADD(UChi_12,result_12,result_12)\ - VADD(UChi_00,result_20,result_20)\ - VADD(UChi_10,result_30,result_30)\ - VADD(UChi_01,result_21,result_21)\ - VADD(UChi_11,result_31,result_31)\ - VADD(UChi_02,result_22,result_22)\ - VADD(UChi_12,result_32,result_32) ); + VADD(UChi_00,psi_00,psi_00)\ + VADD(UChi_10,psi_10,psi_10)\ + VADD(UChi_01,psi_01,psi_01)\ + VADD(UChi_11,psi_11,psi_11)\ + VADD(UChi_02,psi_02,psi_02)\ + VADD(UChi_12,psi_12,psi_12)\ + VADD(UChi_00,psi_20,psi_20)\ + VADD(UChi_10,psi_30,psi_30)\ + VADD(UChi_01,psi_21,psi_21)\ + VADD(UChi_11,psi_31,psi_31)\ + VADD(UChi_02,psi_22,psi_22)\ + VADD(UChi_12,psi_32,psi_32) ); #define TM_RECON_ACCUM __asm__ ( \ - VADD(UChi_00,result_00,result_00)\ - VADD(UChi_10,result_10,result_10)\ - VADD(UChi_01,result_01,result_01)\ - VADD(UChi_11,result_11,result_11)\ - VADD(UChi_02,result_02,result_02)\ - VADD(UChi_12,result_12,result_12)\ - VSUB(UChi_00,result_20,result_20)\ - VSUB(UChi_10,result_30,result_30)\ - VSUB(UChi_01,result_21,result_21)\ - VSUB(UChi_11,result_31,result_31)\ - VSUB(UChi_02,result_22,result_22)\ - VSUB(UChi_12,result_32,result_32) ); + VADD(UChi_00,psi_00,psi_00)\ + VADD(UChi_10,psi_10,psi_10)\ + VADD(UChi_01,psi_01,psi_01)\ + VADD(UChi_11,psi_11,psi_11)\ + VADD(UChi_02,psi_02,psi_02)\ + VADD(UChi_12,psi_12,psi_12)\ + VSUB(UChi_00,psi_20,psi_20)\ + VSUB(UChi_10,psi_30,psi_30)\ + VSUB(UChi_01,psi_21,psi_21)\ + VSUB(UChi_11,psi_31,psi_31)\ + VSUB(UChi_02,psi_22,psi_22)\ + VSUB(UChi_12,psi_32,psi_32) ); #define AVX512_PF_L1 #define AVX512_PF_L2_GAUGE @@ -582,22 +571,62 @@ Author: paboyle LOAD64(%r8,PTR) \ LOAD64(%r9,pf) \ __asm__ ( \ - VSTORE(0,%r8,result_00) VPREFETCH_M1(0,%r9) \ - VSTORE(1,%r8,result_01) VPREFETCH_M1(1,%r9) \ - VSTORE(2,%r8,result_02) VPREFETCH_M1(2,%r9) \ - VSTORE(3,%r8,result_10) VPREFETCH_M1(3,%r9) \ - VSTORE(4,%r8,result_11) VPREFETCH_M1(4,%r9) \ - VSTORE(5,%r8,result_12) VPREFETCH_M1(5,%r9) \ - VSTORE(6,%r8,result_20) VPREFETCH_M1(6,%r9) \ - VSTORE(7,%r8,result_21) VPREFETCH_M1(7,%r9) \ - VSTORE(8,%r8,result_22) VPREFETCH_M1(8,%r9) \ - VSTORE(9,%r8,result_30) VPREFETCH_M1(9,%r9) \ - VSTORE(10,%r8,result_31) VPREFETCH_M1(10,%r9) \ - VSTORE(11,%r8,result_32) VPREFETCH_M1(11,%r9) \ + VSTORE(0,%r8,psi_00) VPREFETCH_M1(0,%r9) \ + VSTORE(1,%r8,psi_01) VPREFETCH_M1(1,%r9) \ + VSTORE(2,%r8,psi_02) VPREFETCH_M1(2,%r9) \ + VSTORE(3,%r8,psi_10) VPREFETCH_M1(3,%r9) \ + VSTORE(4,%r8,psi_11) VPREFETCH_M1(4,%r9) \ + VSTORE(5,%r8,psi_12) VPREFETCH_M1(5,%r9) \ + VSTORE(6,%r8,psi_20) VPREFETCH_M1(6,%r9) \ + VSTORE(7,%r8,psi_21) VPREFETCH_M1(7,%r9) \ + VSTORE(8,%r8,psi_22) VPREFETCH_M1(8,%r9) \ + VSTORE(9,%r8,psi_30) VPREFETCH_M1(9,%r9) \ + VSTORE(10,%r8,psi_31) VPREFETCH_M1(10,%r9) \ + VSTORE(11,%r8,psi_32) VPREFETCH_M1(11,%r9) \ ); +#define ADD_RESULTi(PTR,pf) \ + LOAD_CHIMU(PTR); \ + asm(VADD(psi_00,Chimu_00,psi_00) VADD(psi_01,Chimu_01,psi_01) VADD(psi_02,Chimu_02,psi_02) \ + VADD(psi_10,Chimu_10,psi_10) VADD(psi_11,Chimu_11,psi_11) VADD(psi_12,Chimu_12,psi_12) \ + VADD(psi_20,Chimu_20,psi_20) VADD(psi_21,Chimu_21,psi_21) VADD(psi_22,Chimu_22,psi_22) \ + VADD(psi_30,Chimu_30,psi_30) VADD(psi_31,Chimu_31,psi_31) VADD(psi_32,Chimu_32,psi_32) ); \ + SAVE_RESULT(PTR,pf); + + + +#define ADD_RESULTia(PTR,pf) \ + LOAD64(%r8,PTR) \ + __asm__ ( \ + VADDMEM(0,%r8,psi_00,psi_00) \ + VADDMEM(1,%r8,psi_01,psi_01) \ + VADDMEM(2,%r8,psi_02,psi_02) \ + VADDMEM(3,%r8,psi_10,psi_10) \ + VADDMEM(4,%r8,psi_11,psi_11) \ + VADDMEM(5,%r8,psi_12,psi_12) \ + VADDMEM(6,%r8,psi_20,psi_20) \ + VADDMEM(7,%r8,psi_21,psi_21) \ + VADDMEM(8,%r8,psi_22,psi_22) \ + VADDMEM(9,%r8,psi_30,psi_30) \ + VADDMEM(10,%r8,psi_31,psi_31) \ + VADDMEM(11,%r8,psi_32,psi_32) \ + VSTORE(0,%r8,psi_00) \ + VSTORE(1,%r8,psi_01) \ + VSTORE(2,%r8,psi_02) \ + VSTORE(3,%r8,psi_10) \ + VSTORE(4,%r8,psi_11) \ + VSTORE(5,%r8,psi_12) \ + VSTORE(6,%r8,psi_20) \ + VSTORE(7,%r8,psi_21) \ + VSTORE(8,%r8,psi_22) \ + VSTORE(9,%r8,psi_30) \ + VSTORE(10,%r8,psi_31) \ + VSTORE(11,%r8,psi_32) \ + ); + + #ifdef AVX512_PF_L2_TABLE -#define PREFETCH_CHIMU(A) \ +#define PREFETCH_CHIMU(A) \ LOAD64(%r9,A) \ __asm__ ( \ VPREFETCH_P1(0,%r9) \ From 85c7bc43212f8316fa5afa9ea6db269e66f4a03e Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:01:15 -0500 Subject: [PATCH 037/101] Bug fixes for cases that physics code couldn't hit but latent and discovered on KNL (long vector, y SIMD dir) and checker dir set to y. Remove the assertions on these code paths now they are tested. --- lib/cshift/Cshift_common.h | 59 ++++++++++++++++++++++++++------------ lib/cshift/Cshift_mpi.h | 22 ++++++++++---- 2 files changed, 56 insertions(+), 25 deletions(-) diff --git a/lib/cshift/Cshift_common.h b/lib/cshift/Cshift_common.h index 2b146daa..813929d8 100644 --- a/lib/cshift/Cshift_common.h +++ b/lib/cshift/Cshift_common.h @@ -1,5 +1,4 @@ - - /************************************************************************************* +/************************************************************************************* Grid physics library, www.github.com/paboyle/Grid @@ -53,8 +52,7 @@ Gather_plane_simple (const Lattice &rhs,commVector &buffer,int dimen cbmask = 0x3; } - int so = plane*rhs._grid->_ostride[dimension]; // base offset for start of plane - + int so=plane*rhs._grid->_ostride[dimension]; // base offset for start of plane int e1=rhs._grid->_slice_nblock[dimension]; int e2=rhs._grid->_slice_block[dimension]; @@ -74,7 +72,7 @@ PARALLEL_NESTED_LOOP2 for(int n=0;nCheckerBoardFromOindexTable(o+b); + int ocb=1<CheckerBoardFromOindex(o+b); if ( ocb &cbmask ) { table.push_back(std::pair (bo++,o+b)); } @@ -105,29 +103,30 @@ Gather_plane_extract(const Lattice &rhs,std::vector_slice_nblock[dimension]; int e2=rhs._grid->_slice_block[dimension]; int n1=rhs._grid->_slice_stride[dimension]; - int n2=rhs._grid->_slice_block[dimension]; if ( cbmask ==0x3){ PARALLEL_NESTED_LOOP2 for(int n=0;n(temp,pointers,offset); } } } else { - assert(0); //Fixme think this is buggy - + // Case of SIMD split AND checker dim cannot currently be hit, except in + // Test_cshift_red_black code. + std::cout << " Dense packed buffer WARNING " <_slice_stride[dimension]; + + int o=n*n1; int ocb=1<CheckerBoardFromOindex(o+b); - int offset = b+n*rhs._grid->_slice_block[dimension]; + int offset = b+n*e2; if ( ocb & cbmask ) { cobj temp =compress(rhs._odata[so+o+b]); @@ -171,6 +170,7 @@ template void Scatter_plane_simple (Lattice &rhs,commVector_slice_nblock[dimension]; int e2=rhs._grid->_slice_block[dimension]; + int stride=rhs._grid->_slice_stride[dimension]; if ( cbmask ==0x3 ) { PARALLEL_NESTED_LOOP2 @@ -182,17 +182,22 @@ PARALLEL_NESTED_LOOP2 } } } else { + std::vector > table; int bo=0; for(int n=0;n_slice_stride[dimension]; - int bo =n*rhs._grid->_slice_block[dimension]; int ocb=1<CheckerBoardFromOindex(o+b);// Could easily be a table lookup if ( ocb & cbmask ) { - rhs._odata[so+o+b]=buffer[bo++]; + table.push_back(std::pair (so+o+b,bo++)); } } } +PARALLEL_FOR_LOOP + for(int i=0;i_slice_stride[dimension]; @@ -338,8 +347,8 @@ template Lattice Cshift_local(Lattice &ret,const Lattice // Map to always positive shift modulo global full dimension. shift = (shift+fd)%fd; - ret.checkerboard = grid->CheckerBoardDestination(rhs.checkerboard,shift,dimension); // the permute type + ret.checkerboard = grid->CheckerBoardDestination(rhs.checkerboard,shift,dimension); int permute_dim =grid->PermuteDim(dimension); int permute_type=grid->PermuteType(dimension); int permute_type_dist; @@ -348,7 +357,6 @@ template Lattice Cshift_local(Lattice &ret,const Lattice int o = 0; int bo = x * grid->_ostride[dimension]; - int cb= (cbmask==0x2)? Odd : Even; int sshift = grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,cb); @@ -361,9 +369,23 @@ template Lattice Cshift_local(Lattice &ret,const Lattice // wrap is whether sshift > rd. // num is sshift mod rd. // + // shift 7 + // + // XoXo YcYc + // oXoX cYcY + // XoXo YcYc + // oXoX cYcY + // + // sshift -- + // + // XX YY ; 3 + // XX YY ; 0 + // XX YY ; 3 + // XX YY ; 0 + // int permute_slice=0; if(permute_dim){ - int wrap = sshift/rd; + int wrap = sshift/rd; wrap=wrap % ly; int num = sshift%rd; if ( x< rd-num ) permute_slice=wrap; @@ -375,7 +397,6 @@ template Lattice Cshift_local(Lattice &ret,const Lattice } else { permute_type_dist = permute_type; } - } if ( permute_slice ) Copy_plane_permute(ret,rhs,dimension,x,sx,cbmask,permute_type_dist); diff --git a/lib/cshift/Cshift_mpi.h b/lib/cshift/Cshift_mpi.h index b3c07cd6..b2a44961 100644 --- a/lib/cshift/Cshift_mpi.h +++ b/lib/cshift/Cshift_mpi.h @@ -74,7 +74,6 @@ template void Cshift_comms(Lattice& ret,const Lattice &r sshift[1] = rhs._grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,Odd); // std::cout << "Cshift_comms dim "< void Cshift_comms(Lattice &ret,const Lattice &r (void *)&recv_buf[0], recv_from_rank, bytes); - - // for(int i=0;iBarrier(); + /* + for(int i=0;i void Cshift_comms_simd(Lattice &ret,const LatticeBarrier(); rpointers[i] = &recv_buf_extract[i][0]; } else { rpointers[i] = &send_buf_extract[nbr_lane][0]; From 060da786e99dae77e55d64ba31768b527dfeec6a Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:07:39 -0500 Subject: [PATCH 038/101] Comms benchmark improvements --- benchmarks/Benchmark_comms.cc | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/benchmarks/Benchmark_comms.cc b/benchmarks/Benchmark_comms.cc index 969a2a42..99ab190b 100644 --- a/benchmarks/Benchmark_comms.cc +++ b/benchmarks/Benchmark_comms.cc @@ -48,9 +48,9 @@ int main (int argc, char ** argv) std::cout< latt_size ({lat*mpi_layout[0], lat*mpi_layout[1], @@ -124,8 +124,8 @@ int main (int argc, char ** argv) std::cout< latt_size ({lat,lat,lat,lat}); @@ -194,14 +194,14 @@ int main (int argc, char ** argv) } - Nloop=100; + Nloop=10; std::cout< latt_size ({lat*mpi_layout[0], lat*mpi_layout[1], @@ -281,8 +281,8 @@ int main (int argc, char ** argv) std::cout< latt_size ({lat*mpi_layout[0], lat*mpi_layout[1], @@ -324,8 +324,8 @@ int main (int argc, char ** argv) (void *)&rbuf[mu][0], recv_from_rank, bytes); - // Grid.StencilSendToRecvFromComplete(requests); - // requests.resize(0); + Grid.StencilSendToRecvFromComplete(requests); + requests.resize(0); comm_proc = mpi_layout[mu]-1; From fdc170b8a3f37a90f3c734baace297515fafe1a7 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:16:39 -0500 Subject: [PATCH 039/101] Parallel fors in lattice transfer --- lib/lattice/Lattice_transfer.h | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/lib/lattice/Lattice_transfer.h b/lib/lattice/Lattice_transfer.h index cc4617de..4dec1f0c 100644 --- a/lib/lattice/Lattice_transfer.h +++ b/lib/lattice/Lattice_transfer.h @@ -333,9 +333,6 @@ void localConvert(const Lattice &in,Lattice &out) typedef typename vobj::scalar_object sobj; typedef typename vvobj::scalar_object ssobj; - sobj s; - ssobj ss; - GridBase *ig = in._grid; GridBase *og = out._grid; @@ -347,10 +344,14 @@ void localConvert(const Lattice &in,Lattice &out) for(int d=0;d_processors[d] == og->_processors[d]); assert(ig->_ldimensions[d] == og->_ldimensions[d]); + assert(ig->lSites() == og->lSites()); } - //PARALLEL_FOR_LOOP + PARALLEL_FOR_LOOP for(int idx=0;idxlSites();idx++){ + sobj s; + ssobj ss; + std::vector lcoor(ni); ig->LocalIndexToLocalCoor(idx,lcoor); peekLocalSite(s,in,lcoor); @@ -364,7 +365,6 @@ template void InsertSlice(Lattice &lowDim,Lattice & higherDim,int slice, int orthog) { typedef typename vobj::scalar_object sobj; - sobj s; GridBase *lg = lowDim._grid; GridBase *hg = higherDim._grid; @@ -386,8 +386,9 @@ void InsertSlice(Lattice &lowDim,Lattice & higherDim,int slice, int } // the above should guarantee that the operations are local - //PARALLEL_FOR_LOOP + PARALLEL_FOR_LOOP for(int idx=0;idxlSites();idx++){ + sobj s; std::vector lcoor(nl); std::vector hcoor(nh); lg->LocalIndexToLocalCoor(idx,lcoor); @@ -407,7 +408,6 @@ template void ExtractSlice(Lattice &lowDim, Lattice & higherDim,int slice, int orthog) { typedef typename vobj::scalar_object sobj; - sobj s; GridBase *lg = lowDim._grid; GridBase *hg = higherDim._grid; @@ -428,8 +428,9 @@ void ExtractSlice(Lattice &lowDim, Lattice & higherDim,int slice, in } } // the above should guarantee that the operations are local - //PARALLEL_FOR_LOOP + PARALLEL_FOR_LOOP for(int idx=0;idxlSites();idx++){ + sobj s; std::vector lcoor(nl); std::vector hcoor(nh); lg->LocalIndexToLocalCoor(idx,lcoor); @@ -451,7 +452,6 @@ template void InsertSliceLocal(Lattice &lowDim, Lattice & higherDim,int slice_lo,int slice_hi, int orthog) { typedef typename vobj::scalar_object sobj; - sobj s; GridBase *lg = lowDim._grid; GridBase *hg = higherDim._grid; @@ -468,8 +468,9 @@ void InsertSliceLocal(Lattice &lowDim, Lattice & higherDim,int slice } // the above should guarantee that the operations are local - //PARALLEL_FOR_LOOP + PARALLEL_FOR_LOOP for(int idx=0;idxlSites();idx++){ + sobj s; std::vector lcoor(nl); std::vector hcoor(nh); lg->LocalIndexToLocalCoor(idx,lcoor); @@ -487,7 +488,6 @@ template void ExtractSliceLocal(Lattice &lowDim, Lattice & higherDim,int slice_lo,int slice_hi, int orthog) { typedef typename vobj::scalar_object sobj; - sobj s; GridBase *lg = lowDim._grid; GridBase *hg = higherDim._grid; @@ -504,8 +504,9 @@ void ExtractSliceLocal(Lattice &lowDim, Lattice & higherDim,int slic } // the above should guarantee that the operations are local - //PARALLEL_FOR_LOOP + PARALLEL_FOR_LOOP for(int idx=0;idxlSites();idx++){ + sobj s; std::vector lcoor(nl); std::vector hcoor(nh); lg->LocalIndexToLocalCoor(idx,lcoor); From 6ea2184e181de36461ed9f5a056a87f0ac949fb7 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:17:16 -0500 Subject: [PATCH 040/101] OMP define change --- lib/AlignedAllocator.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/AlignedAllocator.cc b/lib/AlignedAllocator.cc index 9df4ec1c..f6d234d5 100644 --- a/lib/AlignedAllocator.cc +++ b/lib/AlignedAllocator.cc @@ -13,9 +13,10 @@ void *PointerCache::Insert(void *ptr,size_t bytes) { if (bytes < 4096 ) return NULL; -#ifdef _OPENMP +#ifdef GRID_OMP assert(omp_in_parallel()==0); #endif + void * ret = NULL; int v = -1; From 485ad6fde09c1ae7150ecdd554be6865a2f3a348 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:20:39 -0500 Subject: [PATCH 041/101] Stencil working in SHM MPI3 --- lib/Stencil.h | 126 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 77 insertions(+), 49 deletions(-) diff --git a/lib/Stencil.h b/lib/Stencil.h index 82e818d2..71f086af 100644 --- a/lib/Stencil.h +++ b/lib/Stencil.h @@ -1,4 +1,4 @@ - /************************************************************************************* +/************************************************************************************* Grid physics library, www.github.com/paboyle/Grid @@ -25,12 +25,10 @@ See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ - #ifndef GRID_STENCIL_H - #define GRID_STENCIL_H +#ifndef GRID_STENCIL_H +#define GRID_STENCIL_H - #include - - #include // subdir aggregate +#include // subdir aggregate ////////////////////////////////////////////////////////////////////////////////////////// // Must not lose sight that goal is to be able to construct really efficient @@ -80,9 +78,10 @@ template void Gather_plane_simple_table (std::vector >& table,const Lattice &rhs,cobj *buffer,compressor &compress, int off,int so) { int num=table.size(); - PARALLEL_FOR_LOOP +PARALLEL_FOR_LOOP for(int i=0;i class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal fill in. public: @@ -143,30 +144,38 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal Packets[i].recv_buf, Packets[i].from_rank, Packets[i].bytes); - /* - }else{ - _grid->SendToRecvFromBegin(reqs[i], - Packets[i].send_buf, - Packets[i].to_rank, - Packets[i].recv_buf, - Packets[i].from_rank, - Packets[i].bytes); - } - */ } commtime+=usecond(); } void CommunicateComplete(std::vector > &reqs) { commtime-=usecond(); - for(int i=0;iStencilSendToRecvFromComplete(reqs[i]); - // else - // _grid->SendToRecvFromComplete(reqs[i]); } + _grid->StencilBarrier();// Synch shared memory on a single nodes commtime+=usecond(); + /* + if(dump){ + for(int i=0;i_ndimension;d++){ + ss<<"."<<_grid->_processor_coor[d]; + } + ss<<"_mu_"<_ndimension;d++){ + // ss<<"."<<_grid->_processor_coor[d]; + // } + // ss<<"_m_"<_simd_layout[dimension]; int comm_dim = _grid->_processors[dimension] >1 ; int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim); @@ -373,9 +394,11 @@ PARALLEL_FOR_LOOP int sshift[2]; + ////////////////////////// // Underlying approach. For each local site build // up a table containing the npoint "neighbours" and whether they // live in lattice or a comms buffer. + ////////////////////////// if ( !comm_dim ) { sshift[0] = _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,Even); sshift[1] = _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,Odd); @@ -386,11 +409,11 @@ PARALLEL_FOR_LOOP Local(point,dimension,shift,0x1);// if checkerboard is unfavourable take two passes Local(point,dimension,shift,0x2);// both with block stride loop iteration } - } else { // All permute extract done in comms phase prior to Stencil application + } else { + // All permute extract done in comms phase prior to Stencil application // So tables are the same whether comm_dim or splice_dim sshift[0] = _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,Even); sshift[1] = _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,Odd); - if ( sshift[0] == sshift[1] ) { Comms(point,dimension,shift,0x3); } else { @@ -482,9 +505,11 @@ PARALLEL_FOR_LOOP assert(shift>=0); assert(shift_slice_nblock[dimension]*_grid->_slice_block[dimension]; // done in reduced dims, so SIMD factored - + // done in reduced dims, so SIMD factored + int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension]; + _comm_buf_size[point] = buffer_size; // Size of _one_ plane. Multiple planes may be gathered and + // send to one or more remote nodes. int cb= (cbmask==0x2)? Odd : Even; @@ -707,6 +732,8 @@ PARALLEL_FOR_LOOP template void HaloGather(const Lattice &source,compressor &compress) { + _grid->StencilBarrier();// Synch shared memory on a single nodes + // conformable(source._grid,_grid); assert(source._grid==_grid); halogtime-=usecond(); @@ -767,8 +794,7 @@ PARALLEL_FOR_LOOP if ( !face_table_computed ) { t_table-=usecond(); face_table.resize(face_idx+1); - Gather_plane_simple_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset, - face_table[face_idx]); + Gather_plane_simple_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table[face_idx]); t_table+=usecond(); } @@ -789,12 +815,11 @@ PARALLEL_FOR_LOOP cobj *send_buf = (cobj *)_grid->ShmBufferTranslate(xmit_to_rank,u_recv_buf_p); if ( send_buf==NULL ) { send_buf = u_send_buf_p; - } - // std::cout << " send_bufs "< rpointers(Nsimd); std::vector spointers(Nsimd); - + + // std::cout << "GatherSimd " << dimension << " shift "<= rd ); - + if ( any_offnode ) { for(int i=0;i2 - // std::cout << "GatherSimd : lane 1st elem " << i << u_simd_send_buf[i ][u_comm_offset]<2 + // for(int w=0;w : lane " << i <<" elem "<>(permute_type+1)); int ic= (i&inner_bit)? 1:0; - int my_coor = rd*ic + x; - int nbr_coor = my_coor+sshift; + int my_coor = rd*ic + x; + int nbr_coor = my_coor+sshift; int nbr_proc = ((nbr_coor)/ld) % pd;// relative shift in processors int nbr_lcoor= (nbr_coor%ld); int nbr_ic = (nbr_lcoor)/rd; // inner coord of peer @@ -885,10 +912,10 @@ PARALLEL_FOR_LOOP if (nbr_ic) nbr_lane|=inner_bit; assert (sx == nbr_ox); - + auto rp = &u_simd_recv_buf[i ][u_comm_offset]; auto sp = &u_simd_send_buf[nbr_lane][u_comm_offset]; - + if(nbr_proc){ int recv_from_rank; @@ -896,16 +923,17 @@ PARALLEL_FOR_LOOP _grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank); + // shm == receive pointer if offnode + // shm == Translate[send pointer] if on node -- my view of his send pointer scalar_object *shm = (scalar_object *) _grid->ShmBufferTranslate(recv_from_rank,sp); - // if ((ShmDirectCopy==0)||(shm==NULL)) { if (shm==NULL) { shm = rp; - } - + } + // if Direct, StencilSendToRecvFrom will suppress copy to a peer on node // assuming above pointer flip AddPacket((void *)sp,(void *)rp,xmit_to_rank,recv_from_rank,bytes); - + rpointers[i] = shm; } else { From 8e7ca922786f62732e95fcebe8269cd04d2524c0 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:21:32 -0500 Subject: [PATCH 042/101] Debugged cshift case --- lib/cartesian/Cartesian_base.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/lib/cartesian/Cartesian_base.h b/lib/cartesian/Cartesian_base.h index 72b21ee3..9d43837d 100644 --- a/lib/cartesian/Cartesian_base.h +++ b/lib/cartesian/Cartesian_base.h @@ -52,7 +52,7 @@ public: // Physics Grid information. std::vector _simd_layout;// Which dimensions get relayed out over simd lanes. - std::vector _fdimensions;// Global dimensions of array prior to cb removal + std::vector _fdimensions;// (full) Global dimensions of array prior to cb removal std::vector _gdimensions;// Global dimensions of array after cb removal std::vector _ldimensions;// local dimensions of array with processor images removed std::vector _rdimensions;// Reduced local dimensions with simd lane images and processor images removed @@ -121,7 +121,6 @@ public: Lexicographic::CoorFromIndex(coor,Oindex,_rdimensions); } - ////////////////////////////////////////////////////////// // SIMD lane addressing ////////////////////////////////////////////////////////// @@ -207,16 +206,16 @@ public: std::vector lcoor; GlobalCoorToProcessorCoorLocalCoor(pcoor,lcoor,gcoor); rank = RankFromProcessorCoor(pcoor); - + /* std::vector cblcoor(lcoor); for(int d=0;dCheckerBoarded(d) ) { cblcoor[d] = lcoor[d]/2; } } - - i_idx= iIndex(cblcoor);// this does not imply divide by 2 on checker dim - o_idx= oIndex(lcoor); // this implies divide by 2 on checkerdim + */ + i_idx= iIndex(lcoor); + o_idx= oIndex(lcoor); } void RankIndexToGlobalCoor(int rank, int o_idx, int i_idx , std::vector &gcoor) From 61f82216e2a65efbf1bcf5de379c86de6ca4b009 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:22:53 -0500 Subject: [PATCH 043/101] Communicator Policy, NodeCount distinct from Rank count --- lib/communicator/Communicator_base.cc | 3 +++ lib/communicator/Communicator_base.h | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/lib/communicator/Communicator_base.cc b/lib/communicator/Communicator_base.cc index b003d867..abafb3f7 100644 --- a/lib/communicator/Communicator_base.cc +++ b/lib/communicator/Communicator_base.cc @@ -33,6 +33,7 @@ namespace Grid { /////////////////////////////////////////////////////////////// void * CartesianCommunicator::ShmCommBuf; uint64_t CartesianCommunicator::MAX_MPI_SHM_BYTES = 128*1024*1024; +CartesianCommunicator::CommunicatorPolicy_t CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicySendrecv; ///////////////////////////////// // Alloc, free shmem region @@ -88,6 +89,8 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N) #if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPI3L) +int CartesianCommunicator::NodeCount(void) { return ProcessorCount();}; + void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, void *xmit, int xmit_to_rank, diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index 94ad1093..6ae48b54 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -116,6 +116,12 @@ class CartesianCommunicator { // Implemented in Communicator_base.C ///////////////////////////////// static void * ShmCommBuf; + + // Isend/Irecv/Wait, or Sendrecv blocking + enum CommunicatorPolicy_t { CommunicatorPolicyIsend , CommunicatorPolicySendrecv }; + static CommunicatorPolicy_t CommunicatorPolicy; + static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; } + size_t heap_top; size_t heap_bytes; @@ -148,6 +154,7 @@ class CartesianCommunicator { const std::vector & ThisProcessorCoor(void) ; const std::vector & ProcessorGrid(void) ; int ProcessorCount(void) ; + int NodeCount(void) ; //////////////////////////////////////////////////////////////////////////////// // very VERY rarely (Log, serial RNG) we need world without a grid From 123c673db7da188ec4d0d8fd47d853932b6b84ac Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:24:54 -0500 Subject: [PATCH 044/101] Policy to control async or sync SendRecv --- lib/communicator/Communicator_mpi.cc | 42 ++++++++++++++++++---------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index 65ced9c7..61126a17 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -39,9 +39,13 @@ MPI_Comm CartesianCommunicator::communicator_world; // Should error check all MPI calls. void CartesianCommunicator::Init(int *argc, char ***argv) { int flag; + int provided; MPI_Initialized(&flag); // needed to coexist with other libs apparently if ( !flag ) { - MPI_Init(argc,argv); + // MPI_Init_thread(argc,argv,MPI_THREAD_SERIALIZED,&provided); + // assert (provided == MPI_THREAD_SERIALIZED); + MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided); + assert (provided == MPI_THREAD_MULTIPLE); } MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); ShmInitGeneric(); @@ -152,24 +156,34 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector &lis int from, int bytes) { - MPI_Request xrq; - MPI_Request rrq; - int rank = _processor; + int myrank = _processor; int ierr; - ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); - ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); - - assert(ierr==0); + if ( CommunicatorPolicy == CommunicatorPolicyIsend ) { + MPI_Request xrq; + MPI_Request rrq; - list.push_back(xrq); - list.push_back(rrq); + ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); + ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); + + assert(ierr==0); + list.push_back(xrq); + list.push_back(rrq); + } else { + // Give the CPU to MPI immediately; can use threads to overlap optionally + ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank, + recv,bytes,MPI_CHAR,from, from, + communicator,MPI_STATUS_IGNORE); + assert(ierr==0); + } } void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) { - int nreq=list.size(); - std::vector status(nreq); - int ierr = MPI_Waitall(nreq,&list[0],&status[0]); - assert(ierr==0); + if ( CommunicatorPolicy == CommunicatorPolicyIsend ) { + int nreq=list.size(); + std::vector status(nreq); + int ierr = MPI_Waitall(nreq,&list[0],&status[0]); + assert(ierr==0); + } } void CartesianCommunicator::Barrier(void) From 73547cca66e0170b1974910847bc10c443339346 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:30:02 -0500 Subject: [PATCH 045/101] MPI3 working i think --- lib/communicator/Communicator_mpi3.cc | 367 ++++++++++++++------------ 1 file changed, 202 insertions(+), 165 deletions(-) diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index c707ec1f..b86d5259 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -1,4 +1,4 @@ - /************************************************************************************* +/************************************************************************************* Grid physics library, www.github.com/paboyle/Grid @@ -26,8 +26,16 @@ Author: Peter Boyle *************************************************************************************/ /* END LEGAL */ #include "Grid.h" +//#include #include +#include +#include +#include +#include +#include +//#include + namespace Grid { /////////////////////////////////////////////////////////////////////////////////////////////////// @@ -50,6 +58,10 @@ std::vector CartesianCommunicator::GroupRanks; std::vector CartesianCommunicator::MyGroup; std::vector CartesianCommunicator::ShmCommBufs; +int CartesianCommunicator::NodeCount(void) { return GroupSize;}; + + +#undef FORCE_COMMS void *CartesianCommunicator::ShmBufferSelf(void) { return ShmCommBufs[ShmRank]; @@ -57,6 +69,9 @@ void *CartesianCommunicator::ShmBufferSelf(void) void *CartesianCommunicator::ShmBuffer(int rank) { int gpeer = GroupRanks[rank]; +#ifdef FORCE_COMMS + return NULL; +#endif if (gpeer == MPI_UNDEFINED){ return NULL; } else { @@ -65,7 +80,13 @@ void *CartesianCommunicator::ShmBuffer(int rank) } void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) { + static int count =0; int gpeer = GroupRanks[rank]; + assert(gpeer!=ShmRank); // never send to self + assert(rank!=WorldRank);// never send to self +#ifdef FORCE_COMMS + return NULL; +#endif if (gpeer == MPI_UNDEFINED){ return NULL; } else { @@ -76,16 +97,27 @@ void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) } void CartesianCommunicator::Init(int *argc, char ***argv) { + int flag; + int provided; + mtrace(); + MPI_Initialized(&flag); // needed to coexist with other libs apparently if ( !flag ) { - MPI_Init(argc,argv); + MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided); + assert (provided == MPI_THREAD_MULTIPLE); } + Grid_quiesce_nodes(); + MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); MPI_Comm_rank(communicator_world,&WorldRank); MPI_Comm_size(communicator_world,&WorldSize); + if ( WorldRank == 0 ) { + std::cout << GridLogMessage<< "Initialising MPI "<< WorldRank <<"/"< - for(uint64_t page=0;page coor = _processor_coor; - + std::vector coor = _processor_coor; // my coord assert(std::abs(shift) <_processors[dim]); coor[dim] = (_processor_coor[dim] + shift + _processors[dim])%_processors[dim]; @@ -242,28 +304,31 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest coor[dim] = (_processor_coor[dim] - shift + _processors[dim])%_processors[dim]; Lexicographic::IndexFromCoor(coor,dest,_processors); dest = LexicographicToWorldRank[dest]; -} +}// rank is world rank. + int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) { int rank; Lexicographic::IndexFromCoor(coor,rank,_processors); rank = LexicographicToWorldRank[rank]; return rank; -} +}// rank is world rank + void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) { - Lexicographic::CoorFromIndex(coor,rank,_processors); - rank = LexicographicToWorldRank[rank]; + int lr=-1; + for(int r=0;r &processors) { int ierr; - communicator=communicator_world; - _ndimension = processors.size(); - + //////////////////////////////////////////////////////////////// // Assert power of two shm_size. //////////////////////////////////////////////////////////////// @@ -275,24 +340,22 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) } } assert(log2size != -1); - + //////////////////////////////////////////////////////////////// // Identify subblock of ranks on node spreading across dims // in a maximally symmetrical way //////////////////////////////////////////////////////////////// - int dim = 0; - std::vector WorldDims = processors; - ShmDims.resize(_ndimension,1); + ShmDims.resize (_ndimension,1); GroupDims.resize(_ndimension); - - ShmCoor.resize(_ndimension); + ShmCoor.resize (_ndimension); GroupCoor.resize(_ndimension); WorldCoor.resize(_ndimension); + int dim = 0; for(int l2=0;l2 &processors) GroupDims[d] = WorldDims[d]/ShmDims[d]; } + //////////////////////////////////////////////////////////////// + // Verbose + //////////////////////////////////////////////////////////////// +#if 0 + std::cout<< GridLogMessage << "MPI-3 usage "< &processors) //////////////////////////////////////////////////////////////// // Establish mapping between lexico physics coord and WorldRank - // //////////////////////////////////////////////////////////////// - LexicographicToWorldRank.resize(WorldSize,0); Lexicographic::CoorFromIndex(GroupCoor,GroupRank,GroupDims); Lexicographic::CoorFromIndex(ShmCoor,ShmRank,ShmDims); for(int d=0;d<_ndimension;d++){ WorldCoor[d] = GroupCoor[d]*ShmDims[d]+ShmCoor[d]; } _processor_coor = WorldCoor; - - int lexico; - Lexicographic::IndexFromCoor(WorldCoor,lexico,WorldDims); - LexicographicToWorldRank[lexico]=WorldRank; - _processor = lexico; + _processor = WorldRank; /////////////////////////////////////////////////////////////////// // global sum Lexico to World mapping /////////////////////////////////////////////////////////////////// + int lexico; + LexicographicToWorldRank.resize(WorldSize,0); + Lexicographic::IndexFromCoor(WorldCoor,lexico,WorldDims); + LexicographicToWorldRank[lexico] = WorldRank; ierr=MPI_Allreduce(MPI_IN_PLACE,&LexicographicToWorldRank[0],WorldSize,MPI_INT,MPI_SUM,communicator); assert(ierr==0); - -}; + for(int i=0;i coor(_ndimension); + ProcessorCoorFromRank(wr,coor); // from world rank + int ck = RankFromProcessorCoor(coor); + assert(ck==wr); + + ///////////////////////////////////////////////////// + // Check everyone agrees on everyone elses coords + ///////////////////////////////////////////////////// + std::vector mcoor = coor; + this->Broadcast(0,(void *)&mcoor[0],mcoor.size()*sizeof(int)); + for(int d = 0 ; d< _ndimension; d++) { + assert(coor[d] == mcoor[d]); + } + } +}; void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); @@ -367,8 +468,6 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N) int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator); assert(ierr==0); } - - // Basic Halo comms primitive void CartesianCommunicator::SendToRecvFrom(void *xmit, int dest, @@ -377,10 +476,14 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit, int bytes) { std::vector reqs(0); + // unsigned long xcrc = crc32(0L, Z_NULL, 0); + // unsigned long rcrc = crc32(0L, Z_NULL, 0); + // xcrc = crc32(xcrc,(unsigned char *)xmit,bytes); SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes); SendToRecvFromComplete(reqs); + // rcrc = crc32(rcrc,(unsigned char *)recv,bytes); + // printf("proc %d SendToRecvFrom %d bytes %lx %lx\n",_processor,bytes,xcrc,rcrc); } - void CartesianCommunicator::SendRecvPacket(void *xmit, void *recv, int sender, @@ -397,7 +500,6 @@ void CartesianCommunicator::SendRecvPacket(void *xmit, MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat); } } - // Basic Halo comms primitive void CartesianCommunicator::SendToRecvFromBegin(std::vector &list, void *xmit, @@ -406,92 +508,26 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector &lis int from, int bytes) { -#if 0 - this->StencilBarrier(); - - MPI_Request xrq; - MPI_Request rrq; - - static int sequence; - + int myrank = _processor; int ierr; - int tag; - int check; - assert(dest != _processor); - assert(from != _processor); - - int gdest = GroupRanks[dest]; - int gfrom = GroupRanks[from]; - int gme = GroupRanks[_processor]; + if ( (CommunicatorPolicy == CommunicatorPolicyIsend) ) { + MPI_Request xrq; + MPI_Request rrq; - sequence++; - - char *from_ptr = (char *)ShmCommBufs[ShmRank]; - - int small = (bytesStencilBarrier(); - - if (small && (gfrom !=MPI_UNDEFINED) ) { - T *ip = (T *)from_ptr; - T *op = (T *)recv; -PARALLEL_FOR_LOOP - for(int w=0;wStencilBarrier(); - -#else - MPI_Request xrq; - MPI_Request rrq; - int rank = _processor; - int ierr; - ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); - ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); - - assert(ierr==0); - - list.push_back(xrq); - list.push_back(rrq); -#endif } void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, @@ -505,57 +541,54 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list) +void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector &waitall) { - SendToRecvFromComplete(list); + SendToRecvFromComplete(waitall); } - void CartesianCommunicator::StencilBarrier(void) { - MPI_Win_sync (ShmWindow); MPI_Barrier (ShmComm); - MPI_Win_sync (ShmWindow); } - void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) { int nreq=list.size(); + + if (nreq==0) return; + std::vector status(nreq); int ierr = MPI_Waitall(nreq,&list[0],&status[0]); + list.resize(0); assert(ierr==0); } - void CartesianCommunicator::Barrier(void) { int ierr = MPI_Barrier(communicator); assert(ierr==0); } - void CartesianCommunicator::Broadcast(int root,void* data, int bytes) { int ierr=MPI_Bcast(data, @@ -565,7 +598,11 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes) communicator); assert(ierr==0); } - +int CartesianCommunicator::RankWorld(void){ + int r; + MPI_Comm_rank(communicator_world,&r); + return r; +} void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) { int ierr= MPI_Bcast(data, From a48ee6f0f2da77d27c9993a3a5d57a595a7233b6 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:31:24 -0500 Subject: [PATCH 046/101] Don't use MPI3_leader any more. No real gain and complex --- lib/communicator/Communicator_mpi3_leader.cc | 168 ++++++++++++++++--- 1 file changed, 141 insertions(+), 27 deletions(-) diff --git a/lib/communicator/Communicator_mpi3_leader.cc b/lib/communicator/Communicator_mpi3_leader.cc index 71f1a913..6e26bd3e 100644 --- a/lib/communicator/Communicator_mpi3_leader.cc +++ b/lib/communicator/Communicator_mpi3_leader.cc @@ -27,6 +27,7 @@ Author: Peter Boyle /* END LEGAL */ #include "Grid.h" #include +//#include //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Workarounds: @@ -42,19 +43,27 @@ Author: Peter Boyle #include #include #include - typedef sem_t *Grid_semaphore; + +#error /*THis is deprecated*/ + +#if 0 #define SEM_INIT(S) S = sem_open(sem_name,0,0600,0); assert ( S != SEM_FAILED ); #define SEM_INIT_EXCL(S) sem_unlink(sem_name); S = sem_open(sem_name,O_CREAT|O_EXCL,0600,0); assert ( S != SEM_FAILED ); #define SEM_POST(S) assert ( sem_post(S) == 0 ); #define SEM_WAIT(S) assert ( sem_wait(S) == 0 ); - +#else +#define SEM_INIT(S) ; +#define SEM_INIT_EXCL(S) ; +#define SEM_POST(S) ; +#define SEM_WAIT(S) ; +#endif #include namespace Grid { -enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL }; +enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL, COMMAND_SENDRECV }; struct Descriptor { uint64_t buf; @@ -62,6 +71,12 @@ struct Descriptor { int rank; int tag; int command; + uint64_t xbuf; + uint64_t rbuf; + int xtag; + int rtag; + int src; + int dest; MPI_Request request; }; @@ -94,18 +109,14 @@ public: void SemInit(void) { sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank); - // printf("SEM_NAME: %s \n",sem_name); SEM_INIT(sem_head); sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank); - // printf("SEM_NAME: %s \n",sem_name); SEM_INIT(sem_tail); } void SemInitExcl(void) { sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank); - // printf("SEM_INIT_EXCL: %s \n",sem_name); SEM_INIT_EXCL(sem_head); sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank); - // printf("SEM_INIT_EXCL: %s \n",sem_name); SEM_INIT_EXCL(sem_tail); } void WakeUpDMA(void) { @@ -125,6 +136,13 @@ public: while(1){ WaitForCommand(); // std::cout << "Getting command "<head,0,0); + int s=state->start; + if ( s != state->head ) { + _mm_mwait(0,0); + } +#endif Event(); } } @@ -132,6 +150,7 @@ public: int Event (void) ; uint64_t QueueCommand(int command,void *buf, int bytes, int hashtag, MPI_Comm comm,int u_rank) ; + void QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) ; void WaitAll() { // std::cout << "Queueing WAIT command "<tail == state->head ); + while ( state->tail != state->head ); } }; @@ -196,6 +215,12 @@ public: // std::cout << "Waking up DMA "<< slave< MPIoffloadEngine::VerticalShmBufs; std::vector > MPIoffloadEngine::UniverseRanks; std::vector MPIoffloadEngine::UserCommunicatorToWorldRanks; +int CartesianCommunicator::NodeCount(void) { return HorizontalSize;}; int MPIoffloadEngine::ShmSetup = 0; void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world, @@ -370,12 +418,22 @@ void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world, ftruncate(fd, size); VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - if ( VerticalShmBufs[r] == MAP_FAILED ) { perror("failed mmap"); assert(0); } + /* + for(uint64_t page=0;pagehead ) { switch ( state->Descrs[s].command ) { case COMMAND_ISEND: - /* - std::cout<< " Send "<Descrs[s].buf<< "["<Descrs[s].bytes<<"]" - << " to " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag - << " Comm " << MPIoffloadEngine::communicator_universe<< " me " <Descrs[s].buf+base), state->Descrs[s].bytes, MPI_CHAR, @@ -568,11 +623,6 @@ int Slave::Event (void) { break; case COMMAND_IRECV: - /* - std::cout<< " Recv "<Descrs[s].buf<< "["<Descrs[s].bytes<<"]" - << " from " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag - << " Comm " << MPIoffloadEngine::communicator_universe<< " me "<< universe_rank<< std::endl; - */ ierr=MPI_Irecv((void *)(state->Descrs[s].buf+base), state->Descrs[s].bytes, MPI_CHAR, @@ -588,10 +638,32 @@ int Slave::Event (void) { return 1; break; + case COMMAND_SENDRECV: + + // fprintf(stderr,"Sendrecv ->%d %d : <-%d %d \n",state->Descrs[s].dest, state->Descrs[s].xtag+i*10,state->Descrs[s].src, state->Descrs[s].rtag+i*10); + + ierr=MPI_Sendrecv((void *)(state->Descrs[s].xbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].dest, state->Descrs[s].xtag+i*10, + (void *)(state->Descrs[s].rbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].src , state->Descrs[s].rtag+i*10, + MPIoffloadEngine::communicator_universe,MPI_STATUS_IGNORE); + + assert(ierr==0); + + // fprintf(stderr,"Sendrecv done %d %d\n",ierr,i); + // MPI_Barrier(MPIoffloadEngine::HorizontalComm); + // fprintf(stderr,"Barrier\n"); + i++; + + state->start = PERI_PLUS(s); + + return 1; + break; + case COMMAND_WAITALL: for(int t=state->tail;t!=s; t=PERI_PLUS(t) ){ - MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE); + if ( state->Descrs[t].command != COMMAND_SENDRECV ) { + MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE); + } }; s=PERI_PLUS(s); state->start = s; @@ -613,6 +685,45 @@ int Slave::Event (void) { // External interaction with the queue ////////////////////////////////////////////////////////////////////////////// +void Slave::QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) +{ + int head =state->head; + int next = PERI_PLUS(head); + + // Set up descriptor + int worldrank; + int hashtag; + MPI_Comm communicator; + MPI_Request request; + uint64_t relative; + + relative = (uint64_t)xbuf - base; + state->Descrs[head].xbuf = relative; + + relative= (uint64_t)rbuf - base; + state->Descrs[head].rbuf = relative; + + state->Descrs[head].bytes = bytes; + + MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,xtag,comm,dest); + state->Descrs[head].dest = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank]; + state->Descrs[head].xtag = hashtag; + + MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,rtag,comm,src); + state->Descrs[head].src = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank]; + state->Descrs[head].rtag = hashtag; + + state->Descrs[head].command= COMMAND_SENDRECV; + + // Block until FIFO has space + while( state->tail==next ); + + // Msync on weak order architectures + + // Advance pointer + state->head = next; + +}; uint64_t Slave::QueueCommand(int command,void *buf, int bytes, int tag, MPI_Comm comm,int commrank) { ///////////////////////////////////////// @@ -812,19 +923,22 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector= shm) && (recv_i+bytes <= shm+MAX_MPI_SHM_BYTES) ); assert(from!=_processor); assert(dest!=_processor); - MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest); - MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from); -} + MPIoffloadEngine::QueueMultiplexedSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from); + + //MPIoffloadEngine::QueueRoundRobinSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from); + + //MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest); + //MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from); +} void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector &list) { MPIoffloadEngine::WaitAll(); + //this->Barrier(); } -void CartesianCommunicator::StencilBarrier(void) -{ -} +void CartesianCommunicator::StencilBarrier(void) { } void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) { From 2bf4688e831ce2bc358e15a1c10e7edf07e8880b Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:32:10 -0500 Subject: [PATCH 047/101] Running on BNL KNL --- benchmarks/Benchmark_dwf.cc | 228 ++++++++++++++++++++++-------------- 1 file changed, 138 insertions(+), 90 deletions(-) diff --git a/benchmarks/Benchmark_dwf.cc b/benchmarks/Benchmark_dwf.cc index 10e4521b..a5532d3d 100644 --- a/benchmarks/Benchmark_dwf.cc +++ b/benchmarks/Benchmark_dwf.cc @@ -48,16 +48,18 @@ typedef WilsonFermion5D WilsonFermion5DR; typedef WilsonFermion5D WilsonFermion5DF; typedef WilsonFermion5D WilsonFermion5DD; - int main (int argc, char ** argv) { Grid_init(&argc,&argv); + CartesianCommunicator::SetCommunicatorPolicy(CartesianCommunicator::CommunicatorPolicySendrecv); + WilsonKernelsStatic::Comms = WilsonKernelsStatic::CommsAndCompute; + int threads = GridThread::GetThreads(); std::cout< latt4 = GridDefaultLatt(); - const int Ls=8; + const int Ls=16; GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); @@ -71,35 +73,66 @@ int main (int argc, char ** argv) std::vector seeds4({1,2,3,4}); std::vector seeds5({5,6,7,8}); - + + std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl; GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl; GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); + std::cout << GridLogMessage << "Initialised RNGs" << std::endl; LatticeFermion src (FGrid); random(RNG5,src); +#if 0 + src = zero; + { + std::vector origin({0,0,0,latt4[2]-1,0}); + SpinColourVectorF tmp; + tmp=zero; + tmp()(0)(0)=Complex(-2.0,0.0); + std::cout << " source site 0 " << tmp<(Umu,mu); + // if (mu !=2 ) ttmp = 0; + // ttmp = ttmp* pow(10.0,mu); + PokeIndex(Umu,ttmp,mu); + } + std::cout << GridLogMessage << "Forced to diagonal " << std::endl; +#endif + //////////////////////////////////// + // Naive wilson implementation + //////////////////////////////////// // replicate across fifth dimension + LatticeGaugeField Umu5d(FGrid); + std::vector U(4,FGrid); for(int ss=0;ssoSites();ss++){ for(int s=0;s U(4,FGrid); for(int mu=0;mu(Umu5d,mu); } + std::cout << GridLogMessage << "Setting up Cshift based reference " << std::endl; if (1) { @@ -121,6 +154,7 @@ int main (int argc, char ** argv) RealD NP = UGrid->_Nprocessors; + std::cout << GridLogMessage << "Creating action operator " << std::endl; DomainWallFermionR Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5); std::cout << GridLogMessage<< "*****************************************************************" <Barrier(); Dw.ZeroCounters(); + Dw.Dhop(src,result,0); double t0=usecond(); for(int i=0;i1.0e-4) ) { + std::cout << "RESULT\n " << result<Barrier(); + exit(-1); + } + */ assert (norm2(err)< 1.0e-4 ); Dw.Report(); } @@ -182,21 +227,13 @@ int main (int argc, char ** argv) LatticeFermion sresult(sFGrid); WilsonFermion5DR sDw(Umu,*sFGrid,*sFrbGrid,*sUGrid,*sUrbGrid,M5); - - for(int x=0;x site({s,x,y,z,t}); - SpinColourVector tmp; - peekSite(tmp,src,site); - pokeSite(tmp,ssrc,site); - }}}}} + + localConvert(src,ssrc); std::cout<Barrier(); - double t0=usecond(); + sDw.Dhop(ssrc,sresult,0); sDw.ZeroCounters(); + double t0=usecond(); for(int i=0;i site({s,x,y,z,t}); - SpinColourVector normal, simd; - peekSite(normal,result,site); - peekSite(simd,sresult,site); - sum=sum+norm2(normal-simd); - if (norm2(normal-simd) > 1.0e-6 ) { - std::cout << "site "< 1.0e-4 ){ + std::cout<< "sD REF\n " < 1.0e-4 ){ + std::cout<< "sD REF\n " <::DhopEO "<::DhopEO "<Barrier(); + sDw.DhopEO(ssrc_o, sr_e, DaggerNo); sDw.ZeroCounters(); - sDw.stat.init("DhopEO"); + // sDw.stat.init("DhopEO"); double t0=usecond(); for (int i = 0; i < ncall; i++) { sDw.DhopEO(ssrc_o, sr_e, DaggerNo); } double t1=usecond(); FGrid->Barrier(); - sDw.stat.print(); + // sDw.stat.print(); double volume=Ls; for(int mu=0;mu1.0e-4) { + + if(( error>1.0e-4) ) { setCheckerboard(ssrc,ssrc_o); setCheckerboard(ssrc,ssrc_e); - std::cout<< ssrc << std::endl; + std::cout<< "DIFF\n " <1.0e-4)){ + std::cout<< "DAG RESULT\n " <Barrier(); + Dw.DhopEO(src_o,r_e,DaggerNo); double t0=usecond(); for(int i=0;i1.0e-4)){ + std::cout<< "Deo RESULT\n " < Date: Tue, 7 Feb 2017 01:33:23 -0500 Subject: [PATCH 048/101] Faster RNG init --- lib/lattice/Lattice_rng.h | 159 +++++++++++++++++++++++--------------- 1 file changed, 97 insertions(+), 62 deletions(-) diff --git a/lib/lattice/Lattice_rng.h b/lib/lattice/Lattice_rng.h index 51cc16ec..88f508d9 100644 --- a/lib/lattice/Lattice_rng.h +++ b/lib/lattice/Lattice_rng.h @@ -33,6 +33,7 @@ Author: paboyle namespace Grid { + //http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf ? ////////////////////////////////////////////////////////////// // Allow the RNG state to be less dense than the fine grid @@ -68,6 +69,7 @@ namespace Grid { } // Wrap seed_seq to give common interface with random_device + // Should rather wrap random_device and have a generate class fixedSeed { public: @@ -75,20 +77,31 @@ namespace Grid { std::seed_seq src; - fixedSeed(const std::vector &seeds) : src(seeds.begin(),seeds.end()) {}; - - result_type operator () (void){ - - std::vector list(1); - - src.generate(list.begin(),list.end()); - - return list[0]; + template fixedSeed(const std::vector &seeds) : src(seeds.begin(),seeds.end()) {}; + template< class RandomIt > void generate( RandomIt begin, RandomIt end ) { + src.generate(begin,end); } }; + + class deviceSeed { + public: + + std::random_device rd; + + typedef std::random_device::result_type result_type; + + deviceSeed(void) : rd(){}; + + template< class RandomIt > void generate( RandomIt begin, RandomIt end ) { + for(RandomIt it=begin; it!=end;it++){ + *it = rd(); + } + } + }; + // real scalars are one component template void fillScalar(scalar &s,distribution &dist,generator & gen) { @@ -122,7 +135,7 @@ namespace Grid { std::vector _generators; std::vector> _uniform; std::vector> _gaussian; - std::vector> _bernoulli; + std::vector> _bernoulli; void GetState(std::vector & saved,int gen) { saved.resize(RngStateCount); @@ -150,13 +163,6 @@ namespace Grid { // FIXME ... do we require lockstep draws of randoms // from all nodes keeping seeds consistent. // place a barrier/broadcast in the fill routine - template void Seed(source &src) - { - typename source::result_type init = src(); - CartesianCommunicator::BroadcastWorld(0,(void *)&init,sizeof(init)); - _generators[0] = RngEngine(init); - _seeded=1; - } GridSerialRNG() : GridRNGbase() { _generators.resize(1); @@ -239,12 +245,17 @@ namespace Grid { CartesianCommunicator::BroadcastWorld(0,(void *)&l,sizeof(l)); } - + template void Seed(source &src) + { + _generators[0] = RngEngine(src); + _seeded=1; + } void SeedRandomDevice(void){ - std::random_device rd; - Seed(rd); + deviceSeed src; + Seed(src); } void SeedFixedIntegers(const std::vector &seeds){ + CartesianCommunicator::BroadcastWorld(0,(void *)&seeds[0],sizeof(int)*seeds.size()); fixedSeed src(seeds); Seed(src); } @@ -273,46 +284,6 @@ namespace Grid { } - // This loop could be made faster to avoid the Ahmdahl by - // i) seed generators on each timeslice, for x=y=z=0; - // ii) seed generators on each z for x=y=0 - // iii)seed generators on each y,z for x=0 - // iv) seed generators on each y,z,x - // made possible by physical indexing. - template void Seed(source &src) - { - std::vector gcoor; - - int gsites = _grid->_gsites; - - typename source::result_type init = src(); - RngEngine pseeder(init); - std::uniform_int_distribution ui; - - for(int gidx=0;gidxGlobalIndexToGlobalCoor(gidx,gcoor); - _grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor); - - int l_idx=generator_idx(o_idx,i_idx); - - const int num_rand_seed=16; - std::vector site_seeds(num_rand_seed); - for(int i=0;iBroadcast(0,(void *)&site_seeds[0],sizeof(int)*site_seeds.size()); - - if( rank == _grid->ThisRank() ){ - fixedSeed ssrc(site_seeds); - typename source::result_type sinit = ssrc(); - _generators[l_idx] = RngEngine(sinit); - } - } - _seeded=1; - } //FIXME implement generic IO and create state save/restore //void SaveState(const std::string &file); @@ -354,11 +325,75 @@ PARALLEL_FOR_LOOP } }; + // This loop could be made faster to avoid the Ahmdahl by + // i) seed generators on each timeslice, for x=y=z=0; + // ii) seed generators on each z for x=y=0 + // iii)seed generators on each y,z for x=0 + // iv) seed generators on each y,z,x + // made possible by physical indexing. + template void Seed(source &src) + { + + typedef typename source::result_type seed_t; + std::uniform_int_distribution uid; + + int numseed=4; + int gsites = _grid->_gsites; + std::vector site_init(numseed); + std::vector gcoor; + + + // Master RngEngine + std::vector master_init(numseed); src.generate(master_init.begin(),master_init.end()); + _grid->Broadcast(0,(void *)&master_init[0],sizeof(seed_t)*numseed); + fixedSeed master_seed(master_init); + RngEngine master_engine(master_seed); + + // Per node RngEngine + std::vector node_init(numseed); + for(int r=0;r<_grid->ProcessorCount();r++) { + + std::vector rank_init(numseed); + for(int i=0;iThisRank() ) { + for(int i=0;iGlobalIndexToGlobalCoor(gidx,gcoor); + _grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor); + + if( rank == _grid->ThisRank() ){ + int l_idx=generator_idx(o_idx,i_idx); + for(int i=0;i &seeds){ + CartesianCommunicator::BroadcastWorld(0,(void *)&seeds[0],sizeof(int)*seeds.size()); fixedSeed src(seeds); Seed(src); } From 2c246551d0b4265ac2da1e7e03f676e3d11d5d61 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 7 Feb 2017 01:37:10 -0500 Subject: [PATCH 049/101] Overlap comms and compute options in wilson kernels --- lib/qcd/action/fermion/WilsonCompressor.h | 21 +- lib/qcd/action/fermion/WilsonFermion.cc | 11 +- lib/qcd/action/fermion/WilsonFermion5D.cc | 139 ++++- lib/qcd/action/fermion/WilsonFermion5D.h | 17 + lib/qcd/action/fermion/WilsonKernels.cc | 15 +- lib/qcd/action/fermion/WilsonKernels.h | 77 ++- lib/qcd/action/fermion/WilsonKernelsAsm.cc | 50 +- .../action/fermion/WilsonKernelsAsmAvx512.h | 215 +++++++- lib/qcd/action/fermion/WilsonKernelsAsmBody.h | 498 +++++++++--------- lib/qcd/action/fermion/WilsonKernelsAsmQPX.h | 20 +- lib/qcd/action/fermion/WilsonKernelsHand.cc | 32 +- 11 files changed, 729 insertions(+), 366 deletions(-) diff --git a/lib/qcd/action/fermion/WilsonCompressor.h b/lib/qcd/action/fermion/WilsonCompressor.h index 5b29c103..0257b880 100644 --- a/lib/qcd/action/fermion/WilsonCompressor.h +++ b/lib/qcd/action/fermion/WilsonCompressor.h @@ -180,26 +180,31 @@ namespace QCD { const std::vector &distances) : CartesianStencil (grid,npoints,checkerboard,directions,distances) { }; - template < class compressor> void HaloExchangeOpt(const Lattice &source,compressor &compress) { std::vector > reqs; + HaloExchangeOptGather(source,compress); + this->CommunicateBegin(reqs); + this->calls++; + this->CommunicateComplete(reqs); + this->CommsMerge(); + } + + template < class compressor> + void HaloExchangeOptGather(const Lattice &source,compressor &compress) + { + this->calls++; this->Mergers.resize(0); this->Packets.resize(0); this->HaloGatherOpt(source,compress); - this->CommunicateBegin(reqs); - this->CommunicateComplete(reqs); - this->CommsMerge(); // spins - this->calls++; } template < class compressor> void HaloGatherOpt(const Lattice &source,compressor &compress) { - int face_idx=0; - + this->_grid->StencilBarrier(); // conformable(source._grid,_grid); assert(source._grid==this->_grid); this->halogtime-=usecond(); @@ -222,7 +227,9 @@ namespace QCD { // compress.Point(point); // HaloGatherDir(source,compress,point,face_idx); // } + int face_idx=0; if ( dag ) { + std::cout << " Optimised Dagger compress " <HaloGatherDir(source,XpCompress,Xp,face_idx); this->HaloGatherDir(source,YpCompress,Yp,face_idx); this->HaloGatherDir(source,ZpCompress,Zp,face_idx); diff --git a/lib/qcd/action/fermion/WilsonFermion.cc b/lib/qcd/action/fermion/WilsonFermion.cc index 04c3671f..f5b76c1a 100644 --- a/lib/qcd/action/fermion/WilsonFermion.cc +++ b/lib/qcd/action/fermion/WilsonFermion.cc @@ -224,7 +224,7 @@ void WilsonFermion::DerivInternal(StencilImpl &st, DoubledGaugeField &U, //////////////////////// PARALLEL_FOR_LOOP for (int sss = 0; sss < B._grid->oSites(); sss++) { - Kernels::DiracOptDhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu, + Kernels::DhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu, gamma); } @@ -335,8 +335,7 @@ void WilsonFermion::DhopDirDisp(const FermionField &in, FermionField &out, PARALLEL_FOR_LOOP for (int sss = 0; sss < in._grid->oSites(); sss++) { - Kernels::DiracOptDhopDir(Stencil, Umu, Stencil.CommBuf(), sss, sss, in, out, - dirdisp, gamma); + Kernels::DhopDir(Stencil, Umu, Stencil.CommBuf(), sss, sss, in, out, dirdisp, gamma); } }; @@ -353,14 +352,12 @@ void WilsonFermion::DhopInternal(StencilImpl &st, LebesgueOrder &lo, if (dag == DaggerYes) { PARALLEL_FOR_LOOP for (int sss = 0; sss < in._grid->oSites(); sss++) { - Kernels::DiracOptDhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, - out); + Kernels::DhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out); } } else { PARALLEL_FOR_LOOP for (int sss = 0; sss < in._grid->oSites(); sss++) { - Kernels::DiracOptDhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, - out); + Kernels::DhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out); } } }; diff --git a/lib/qcd/action/fermion/WilsonFermion5D.cc b/lib/qcd/action/fermion/WilsonFermion5D.cc index 7fdceb2f..ad65b345 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.cc +++ b/lib/qcd/action/fermion/WilsonFermion5D.cc @@ -182,34 +182,34 @@ void WilsonFermion5D::Report(void) std::vector latt = GridDefaultLatt(); RealD volume = Ls; for(int mu=0;mu_Nprocessors; + RealD NN = _FourDimGrid->NodeCount(); if ( DhopCalls > 0 ) { std::cout << GridLogMessage << "#### Dhop calls report " << std::endl; - std::cout << GridLogMessage << "WilsonFermion5D Number of Dhop Calls : " << DhopCalls << std::endl; - std::cout << GridLogMessage << "WilsonFermion5D Total Communication time : " << DhopCommTime<< " us" << std::endl; - std::cout << GridLogMessage << "WilsonFermion5D CommTime/Calls : " << DhopCommTime / DhopCalls << " us" << std::endl; - std::cout << GridLogMessage << "WilsonFermion5D Total Compute time : " << DhopComputeTime << " us" << std::endl; - std::cout << GridLogMessage << "WilsonFermion5D ComputeTime/Calls : " << DhopComputeTime / DhopCalls << " us" << std::endl; + std::cout << GridLogMessage << "WilsonFermion5D Number of DhopEO Calls : " << DhopCalls << std::endl; + std::cout << GridLogMessage << "WilsonFermion5D TotalTime /Calls : " << DhopTotalTime / DhopCalls << " us" << std::endl; + std::cout << GridLogMessage << "WilsonFermion5D CommTime /Calls : " << DhopCommTime / DhopCalls << " us" << std::endl; + std::cout << GridLogMessage << "WilsonFermion5D FaceTime /Calls : " << DhopFaceTime / DhopCalls << " us" << std::endl; + std::cout << GridLogMessage << "WilsonFermion5D ComputeTime1/Calls : " << DhopComputeTime / DhopCalls << " us" << std::endl; + std::cout << GridLogMessage << "WilsonFermion5D ComputeTime2/Calls : " << DhopComputeTime2/ DhopCalls << " us" << std::endl; RealD mflops = 1344*volume*DhopCalls/DhopComputeTime/2; // 2 for red black counting std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl; std::cout << GridLogMessage << "Average mflops/s per call per rank : " << mflops/NP << std::endl; + std::cout << GridLogMessage << "Average mflops/s per call per node : " << mflops/NN << std::endl; - RealD Fullmflops = 1344*volume*DhopCalls/(DhopComputeTime+DhopCommTime)/2; // 2 for red black counting + RealD Fullmflops = 1344*volume*DhopCalls/(DhopTotalTime)/2; // 2 for red black counting std::cout << GridLogMessage << "Average mflops/s per call (full) : " << Fullmflops << std::endl; std::cout << GridLogMessage << "Average mflops/s per call per rank (full): " << Fullmflops/NP << std::endl; - + std::cout << GridLogMessage << "Average mflops/s per call per node (full): " << Fullmflops/NN << std::endl; } if ( DerivCalls > 0 ) { std::cout << GridLogMessage << "#### Deriv calls report "<< std::endl; std::cout << GridLogMessage << "WilsonFermion5D Number of Deriv Calls : " <::ZeroCounters(void) { DhopCalls = 0; DhopCommTime = 0; DhopComputeTime = 0; + DhopComputeTime2= 0; + DhopFaceTime = 0; + DhopTotalTime = 0; DerivCalls = 0; DerivCommTime = 0; @@ -277,7 +280,7 @@ PARALLEL_FOR_LOOP for(int s=0;s::DerivInternal(StencilImpl & st, assert(sF < B._grid->oSites()); assert(sU < U._grid->oSites()); - Kernels::DiracOptDhopDir(st, U, st.CommBuf(), sF, sU, B, Btilde, mu, gamma); + Kernels::DhopDir(st, U, st.CommBuf(), sF, sU, B, Btilde, mu, gamma); //////////////////////////// // spin trace outer product @@ -396,6 +399,86 @@ template void WilsonFermion5D::DhopInternal(StencilImpl & st, LebesgueOrder &lo, DoubledGaugeField & U, const FermionField &in, FermionField &out,int dag) +{ + DhopTotalTime-=usecond(); +#ifdef GRID_OMP + if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute ) + DhopInternalOverlappedComms(st,lo,U,in,out,dag); + else +#endif + DhopInternalSerialComms(st,lo,U,in,out,dag); + DhopTotalTime+=usecond(); +} + +template +void WilsonFermion5D::DhopInternalOverlappedComms(StencilImpl & st, LebesgueOrder &lo, + DoubledGaugeField & U, + const FermionField &in, FermionField &out,int dag) +{ +#ifdef GRID_OMP + // assert((dag==DaggerNo) ||(dag==DaggerYes)); + typedef CartesianCommunicator::CommsRequest_t CommsRequest_t; + + Compressor compressor(dag); + + int LLs = in._grid->_rdimensions[0]; + int len = U._grid->oSites(); + + DhopFaceTime-=usecond(); + st.HaloExchangeOptGather(in,compressor); + DhopFaceTime+=usecond(); + std::vector > reqs; + +#pragma omp parallel + { + int nthreads = omp_get_num_threads(); + int me = omp_get_thread_num(); + int myoff, mywork; + + GridThread::GetWork(len,me-1,mywork,myoff,nthreads-1); + int sF = LLs * myoff; + + if ( me == 0 ) { + DhopCommTime-=usecond(); + st.CommunicateBegin(reqs); + st.CommunicateComplete(reqs); + DhopCommTime+=usecond(); + } else { + // Interior links in stencil + if ( me==1 ) DhopComputeTime-=usecond(); + if (dag == DaggerYes) Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,1,0); + else Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,1,0); + if ( me==1 ) DhopComputeTime+=usecond(); + } + } + + DhopFaceTime-=usecond(); + st.CommsMerge(); + DhopFaceTime+=usecond(); + +#pragma omp parallel + { + int nthreads = omp_get_num_threads(); + int me = omp_get_thread_num(); + int myoff, mywork; + + GridThread::GetWork(len,me,mywork,myoff,nthreads); + int sF = LLs * myoff; + + // Exterior links in stencil + if ( me==0 ) DhopComputeTime2-=usecond(); + if (dag == DaggerYes) Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,0,1); + else Kernels::DhopSite (st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,0,1); + if ( me==0 ) DhopComputeTime2+=usecond(); + }// end parallel region +#else + assert(0); +#endif +} +template +void WilsonFermion5D::DhopInternalSerialComms(StencilImpl & st, LebesgueOrder &lo, + DoubledGaugeField & U, + const FermionField &in, FermionField &out,int dag) { // assert((dag==DaggerNo) ||(dag==DaggerYes)); Compressor compressor(dag); @@ -408,12 +491,30 @@ void WilsonFermion5D::DhopInternal(StencilImpl & st, LebesgueOrder &lo, DhopComputeTime-=usecond(); // Dhop takes the 4d grid from U, and makes a 5d index for fermion + if (dag == DaggerYes) { PARALLEL_FOR_LOOP for (int ss = 0; ss < U._grid->oSites(); ss++) { int sU = ss; int sF = LLs * sU; - Kernels::DiracOptDhopSiteDag(st, lo, U, st.CommBuf(), sF, sU, LLs, 1, in, out); + Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); + } + } else { + PARALLEL_FOR_LOOP + for (int ss = 0; ss < U._grid->oSites(); ss++) { + int sU = ss; + int sF = LLs * sU; + Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); + } + } + /* + + if (dag == DaggerYes) { + PARALLEL_FOR_LOOP + for (int ss = 0; ss < U._grid->oSites(); ss++) { + int sU = ss; + int sF = LLs * sU; + Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); } #ifdef AVX512_SWITCHOFF } else if (stat.is_init() ) { @@ -430,31 +531,35 @@ void WilsonFermion5D::DhopInternal(StencilImpl & st, LebesgueOrder &lo, for(int ss=0;ssoSites();ss++) { int sU=ss; int sF=LLs*sU; - Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); + Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); } stat.exit(mythread); } stat.accum(nthreads); #endif } else { -#if 0 +#if 1 PARALLEL_FOR_LOOP for (int ss = 0; ss < U._grid->oSites(); ss++) { int sU = ss; int sF = LLs * sU; - Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); + Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); } #else +#ifdef GRID_OMP #pragma omp parallel +#endif { int len = U._grid->oSites(); int me, myoff,mywork; GridThread::GetWorkBarrier(len,me, mywork,myoff); int sF = LLs * myoff; - Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out); + Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out); } #endif } + */ + DhopComputeTime+=usecond(); } diff --git a/lib/qcd/action/fermion/WilsonFermion5D.h b/lib/qcd/action/fermion/WilsonFermion5D.h index fb4fa925..76a70d4d 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.h +++ b/lib/qcd/action/fermion/WilsonFermion5D.h @@ -82,6 +82,9 @@ namespace QCD { double DhopCalls; double DhopCommTime; double DhopComputeTime; + double DhopComputeTime2; + double DhopFaceTime; + double DhopTotalTime; double DerivCalls; double DerivCommTime; @@ -145,6 +148,20 @@ namespace QCD { const FermionField &in, FermionField &out, int dag); + + void DhopInternalOverlappedComms(StencilImpl & st, + LebesgueOrder &lo, + DoubledGaugeField &U, + const FermionField &in, + FermionField &out, + int dag); + + void DhopInternalSerialComms(StencilImpl & st, + LebesgueOrder &lo, + DoubledGaugeField &U, + const FermionField &in, + FermionField &out, + int dag); // Constructors WilsonFermion5D(GaugeField &_Umu, diff --git a/lib/qcd/action/fermion/WilsonKernels.cc b/lib/qcd/action/fermion/WilsonKernels.cc index 392c7029..3a70bb5b 100644 --- a/lib/qcd/action/fermion/WilsonKernels.cc +++ b/lib/qcd/action/fermion/WilsonKernels.cc @@ -32,8 +32,8 @@ directory namespace Grid { namespace QCD { -int WilsonKernelsStatic::Opt; - + int WilsonKernelsStatic::Opt = WilsonKernelsStatic::OptGeneric; + int WilsonKernelsStatic::Comms = WilsonKernelsStatic::CommsAndCompute; #ifdef QPX #include @@ -87,9 +87,10 @@ WilsonKernels::WilsonKernels(const ImplParams &p) : Base(p){}; //////////////////////////////////////////// template -void WilsonKernels::DiracOptGenericDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, +void WilsonKernels::GenericDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor *buf, int sF, - int sU, const FermionField &in, FermionField &out) { + int sU, const FermionField &in, FermionField &out, + int interior,int exterior) { SiteHalfSpinor tmp; SiteHalfSpinor chi; SiteHalfSpinor *chi_p; @@ -263,9 +264,9 @@ void WilsonKernels::DiracOptGenericDhopSiteDag(StencilImpl &st, LebesgueOr // Need controls to do interior, exterior, or both template -void WilsonKernels::DiracOptGenericDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, +void WilsonKernels::GenericDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor *buf, int sF, - int sU, const FermionField &in, FermionField &out) { + int sU, const FermionField &in, FermionField &out,int interior,int exterior) { SiteHalfSpinor tmp; SiteHalfSpinor chi; SiteHalfSpinor *chi_p; @@ -438,7 +439,7 @@ void WilsonKernels::DiracOptGenericDhopSite(StencilImpl &st, LebesgueOrder }; template -void WilsonKernels::DiracOptDhopDir( StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int sF, +void WilsonKernels::DhopDir( StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int sF, int sU, const FermionField &in, FermionField &out, int dir, int gamma) { SiteHalfSpinor tmp; diff --git a/lib/qcd/action/fermion/WilsonKernels.h b/lib/qcd/action/fermion/WilsonKernels.h index c859b33d..20ee87f2 100644 --- a/lib/qcd/action/fermion/WilsonKernels.h +++ b/lib/qcd/action/fermion/WilsonKernels.h @@ -43,8 +43,10 @@ void bgq_l1p_optimisation(int mode); class WilsonKernelsStatic { public: enum { OptGeneric, OptHandUnroll, OptInlineAsm }; + enum { CommsAndCompute, CommsThenCompute }; // S-direction is INNERMOST and takes no part in the parity. static int Opt; // these are a temporary hack + static int Comms; // these are a temporary hack }; template class WilsonKernels : public FermionOperator , public WilsonKernelsStatic { @@ -57,20 +59,23 @@ public: template typename std::enable_if::type - DiracOptDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, - int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) + DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out,int interior=1,int exterior=1) { bgq_l1p_optimisation(1); switch(Opt) { #if defined(AVX512) || defined (QPX) case OptInlineAsm: - WilsonKernels::DiracOptAsmDhopSite(st,lo,U,buf,sF,sU,Ls,Ns,in,out); - break; + if(interior&&exterior) WilsonKernels::AsmDhopSite(st,lo,U,buf,sF,sU,Ls,Ns,in,out); + else if (interior) WilsonKernels::AsmDhopSiteInt(st,lo,U,buf,sF,sU,Ls,Ns,in,out); + else if (exterior) WilsonKernels::AsmDhopSiteExt(st,lo,U,buf,sF,sU,Ls,Ns,in,out); + else assert(0); + break; #endif case OptHandUnroll: for (int site = 0; site < Ns; site++) { for (int s = 0; s < Ls; s++) { - WilsonKernels::DiracOptHandDhopSite(st,lo,U,buf,sF,sU,in,out); + if( exterior) WilsonKernels::HandDhopSite(st,lo,U,buf,sF,sU,in,out,interior,exterior); sF++; } sU++; @@ -79,7 +84,7 @@ public: case OptGeneric: for (int site = 0; site < Ns; site++) { for (int s = 0; s < Ls; s++) { - WilsonKernels::DiracOptGenericDhopSite(st,lo,U,buf,sF,sU,in,out); + if( exterior) WilsonKernels::GenericDhopSite(st,lo,U,buf,sF,sU,in,out,interior,exterior); sF++; } sU++; @@ -93,12 +98,12 @@ public: template typename std::enable_if<(Impl::Dimension != 3 || (Impl::Dimension == 3 && Nc != 3)) && EnableBool, void>::type - DiracOptDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, - int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) { + DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out,int interior=1,int exterior=1 ) { // no kernel choice for (int site = 0; site < Ns; site++) { for (int s = 0; s < Ls; s++) { - WilsonKernels::DiracOptGenericDhopSite(st, lo, U, buf, sF, sU, in, out); + if( exterior) WilsonKernels::GenericDhopSite(st, lo, U, buf, sF, sU, in, out,interior,exterior); sF++; } sU++; @@ -107,20 +112,23 @@ public: template typename std::enable_if::type - DiracOptDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, - int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) { + DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out,int interior=1,int exterior=1) { bgq_l1p_optimisation(1); switch(Opt) { #if defined(AVX512) || defined (QPX) case OptInlineAsm: - WilsonKernels::DiracOptAsmDhopSiteDag(st,lo,U,buf,sF,sU,Ls,Ns,in,out); + if(interior&&exterior) WilsonKernels::AsmDhopSiteDag(st,lo,U,buf,sF,sU,Ls,Ns,in,out); + else if (interior) WilsonKernels::AsmDhopSiteDagInt(st,lo,U,buf,sF,sU,Ls,Ns,in,out); + else if (exterior) WilsonKernels::AsmDhopSiteDagExt(st,lo,U,buf,sF,sU,Ls,Ns,in,out); + else assert(0); break; #endif case OptHandUnroll: for (int site = 0; site < Ns; site++) { for (int s = 0; s < Ls; s++) { - WilsonKernels::DiracOptHandDhopSiteDag(st,lo,U,buf,sF,sU,in,out); + if( exterior) WilsonKernels::HandDhopSiteDag(st,lo,U,buf,sF,sU,in,out,interior,exterior); sF++; } sU++; @@ -129,7 +137,7 @@ public: case OptGeneric: for (int site = 0; site < Ns; site++) { for (int s = 0; s < Ls; s++) { - WilsonKernels::DiracOptGenericDhopSiteDag(st,lo,U,buf,sF,sU,in,out); + if( exterior) WilsonKernels::GenericDhopSiteDag(st,lo,U,buf,sF,sU,in,out,interior,exterior); sF++; } sU++; @@ -143,40 +151,53 @@ public: template typename std::enable_if<(Impl::Dimension != 3 || (Impl::Dimension == 3 && Nc != 3)) && EnableBool,void>::type - DiracOptDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,SiteHalfSpinor * buf, - int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) { + DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,SiteHalfSpinor * buf, + int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out,int interior=1,int exterior=1) { for (int site = 0; site < Ns; site++) { for (int s = 0; s < Ls; s++) { - WilsonKernels::DiracOptGenericDhopSiteDag(st,lo,U,buf,sF,sU,in,out); + if( exterior) WilsonKernels::GenericDhopSiteDag(st,lo,U,buf,sF,sU,in,out,interior,exterior); sF++; } sU++; } } - void DiracOptDhopDir(StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor * buf, + void DhopDir(StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor * buf, int sF, int sU, const FermionField &in, FermionField &out, int dirdisp, int gamma); private: // Specialised variants - void DiracOptGenericDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, - int sF, int sU, const FermionField &in, FermionField &out); + void GenericDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + int sF, int sU, const FermionField &in, FermionField &out,int interior,int exterior); - void DiracOptGenericDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, - int sF, int sU, const FermionField &in, FermionField &out); + void GenericDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + int sF, int sU, const FermionField &in, FermionField &out,int interior,int exterior); - void DiracOptAsmDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + void AsmDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, int sF, int sU, int Ls, int Ns, const FermionField &in,FermionField &out); - void DiracOptAsmDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + void AsmDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out); - void DiracOptHandDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, - int sF, int sU, const FermionField &in, FermionField &out); + void AsmDhopSiteInt(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + int sF, int sU, int Ls, int Ns, const FermionField &in,FermionField &out); - void DiracOptHandDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, - int sF, int sU, const FermionField &in, FermionField &out); + void AsmDhopSiteDagInt(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out); + + void AsmDhopSiteExt(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + int sF, int sU, int Ls, int Ns, const FermionField &in,FermionField &out); + + void AsmDhopSiteDagExt(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out); + + + void HandDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + int sF, int sU, const FermionField &in, FermionField &out,int interior,int exterior); + + void HandDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, + int sF, int sU, const FermionField &in, FermionField &out,int interior,int exterior); public: diff --git a/lib/qcd/action/fermion/WilsonKernelsAsm.cc b/lib/qcd/action/fermion/WilsonKernelsAsm.cc index ab805f4f..f627a939 100644 --- a/lib/qcd/action/fermion/WilsonKernelsAsm.cc +++ b/lib/qcd/action/fermion/WilsonKernelsAsm.cc @@ -35,19 +35,48 @@ Author: Guido Cossu namespace Grid { namespace QCD { - + + /////////////////////////////////////////////////////////// // Default to no assembler implementation /////////////////////////////////////////////////////////// template void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) { assert(0); } template void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +{ + assert(0); +} + +template void +WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +{ + assert(0); +} + +template void +WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +{ + assert(0); +} + +template void +WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +{ + assert(0); +} + +template void +WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) { assert(0); @@ -57,11 +86,22 @@ WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo, #include #define INSTANTIATE_ASM(A)\ -template void WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\ +template void WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\ int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out);\ \ -template void WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\ +template void WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\ int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out);\ +template void WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\ + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out);\ + \ +template void WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\ + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out);\ +template void WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\ + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out);\ + \ +template void WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\ + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out);\ + INSTANTIATE_ASM(WilsonImplF); INSTANTIATE_ASM(WilsonImplD); diff --git a/lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h b/lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h index 7b5b9803..6d602a2b 100644 --- a/lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h +++ b/lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h @@ -52,13 +52,37 @@ static Vector signsF; #define MAYBEPERM(A,perm) if (perm) { A ; } #define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf) #define COMPLEX_SIGNS(isigns) vComplexF *isigns = &signsF[0]; + + +#define INTERIOR_AND_EXTERIOR +#undef INTERIOR +#undef EXTERIOR ///////////////////////////////////////////////////////////////// // XYZT vectorised, undag Kernel, single ///////////////////////////////////////////////////////////////// #undef KERNEL_DAG +#define INTERIOR_AND_EXTERIOR +#undef INTERIOR +#undef EXTERIOR template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#define INTERIOR +#undef EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#undef INTERIOR +#define EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include @@ -66,9 +90,28 @@ WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & // XYZT vectorised, dag Kernel, single ///////////////////////////////////////////////////////////////// #define KERNEL_DAG +#define INTERIOR_AND_EXTERIOR +#undef INTERIOR +#undef EXTERIOR template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, - int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#define INTERIOR +#undef EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#undef INTERIOR +#define EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include #undef MAYBEPERM @@ -80,8 +123,29 @@ WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder // Ls vectorised, undag Kernel, single ///////////////////////////////////////////////////////////////// #undef KERNEL_DAG +#define INTERIOR_AND_EXTERIOR +#undef INTERIOR +#undef EXTERIOR template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#define INTERIOR +#undef EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#undef INTERIOR +#define EXTERIOR +#undef MULT_2SPIN +#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN_LSNOPF(ptr,pf) +template<> void +WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include @@ -89,10 +153,30 @@ WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,Lebesgu // Ls vectorised, dag Kernel, single ///////////////////////////////////////////////////////////////// #define KERNEL_DAG +#define INTERIOR_AND_EXTERIOR +#undef INTERIOR +#undef EXTERIOR template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include + +#undef INTERIOR_AND_EXTERIOR +#define INTERIOR +#undef EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#undef INTERIOR +#define EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + #undef COMPLEX_SIGNS #undef MAYBEPERM #undef MULT_2SPIN @@ -110,51 +194,130 @@ static int signInitD = setupSigns(signsD); #define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf) #define COMPLEX_SIGNS(isigns) vComplexD *isigns = &signsD[0]; + +#define INTERIOR_AND_EXTERIOR +#undef INTERIOR +#undef EXTERIOR + ///////////////////////////////////////////////////////////////// -// XYZT Vectorised, undag Kernel, double +// XYZT vectorised, undag Kernel, single ///////////////////////////////////////////////////////////////// #undef KERNEL_DAG +#define INTERIOR_AND_EXTERIOR +#undef INTERIOR +#undef EXTERIOR template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include -///////////////////////////////////////////////////////////////// - +#undef INTERIOR_AND_EXTERIOR +#define INTERIOR +#undef EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#undef INTERIOR +#define EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + ///////////////////////////////////////////////////////////////// -// XYZT Vectorised, dag Kernel, double +// XYZT vectorised, dag Kernel, single ///////////////////////////////////////////////////////////////// #define KERNEL_DAG +#define INTERIOR_AND_EXTERIOR +#undef INTERIOR +#undef EXTERIOR template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, - int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include -///////////////////////////////////////////////////////////////// +#undef INTERIOR_AND_EXTERIOR +#define INTERIOR +#undef EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#undef INTERIOR +#define EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + #undef MAYBEPERM #undef MULT_2SPIN #define MAYBEPERM(A,B) #define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN_LS(ptr,pf) -///////////////////////////////////////////////////////////////// -// Ls vectorised, undag Kernel, double -///////////////////////////////////////////////////////////////// -#undef KERNEL_DAG -template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, - int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) -#include -///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// -// Ls vectorised, dag Kernel, double +// Ls vectorised, undag Kernel, single +///////////////////////////////////////////////////////////////// +#undef KERNEL_DAG +#define INTERIOR_AND_EXTERIOR +#undef INTERIOR +#undef EXTERIOR +template<> void +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#define INTERIOR +#undef EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#undef INTERIOR +#define EXTERIOR +#undef MULT_2SPIN +#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN_LSNOPF(ptr,pf) +template<> void +WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +///////////////////////////////////////////////////////////////// +// Ls vectorised, dag Kernel, single ///////////////////////////////////////////////////////////////// #define KERNEL_DAG +#define INTERIOR_AND_EXTERIOR +#undef INTERIOR +#undef EXTERIOR template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include -///////////////////////////////////////////////////////////////// - + +#undef INTERIOR_AND_EXTERIOR +#define INTERIOR +#undef EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef INTERIOR_AND_EXTERIOR +#undef INTERIOR +#define EXTERIOR +template<> void +WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + #undef COMPLEX_SIGNS #undef MAYBEPERM #undef MULT_2SPIN diff --git a/lib/qcd/action/fermion/WilsonKernelsAsmBody.h b/lib/qcd/action/fermion/WilsonKernelsAsmBody.h index 8ec68997..34aba472 100644 --- a/lib/qcd/action/fermion/WilsonKernelsAsmBody.h +++ b/lib/qcd/action/fermion/WilsonKernelsAsmBody.h @@ -1,259 +1,267 @@ +#ifdef KERNEL_DAG +#define DIR0_PROJMEM(base) XP_PROJMEM(base); +#define DIR1_PROJMEM(base) YP_PROJMEM(base); +#define DIR2_PROJMEM(base) ZP_PROJMEM(base); +#define DIR3_PROJMEM(base) TP_PROJMEM(base); +#define DIR4_PROJMEM(base) XM_PROJMEM(base); +#define DIR5_PROJMEM(base) YM_PROJMEM(base); +#define DIR6_PROJMEM(base) ZM_PROJMEM(base); +#define DIR7_PROJMEM(base) TM_PROJMEM(base); +#define DIR0_RECON XP_RECON +#define DIR1_RECON YP_RECON_ACCUM +#define DIR2_RECON ZP_RECON_ACCUM +#define DIR3_RECON TP_RECON_ACCUM +#define DIR4_RECON XM_RECON_ACCUM +#define DIR5_RECON YM_RECON_ACCUM +#define DIR6_RECON ZM_RECON_ACCUM +#define DIR7_RECON TM_RECON_ACCUM +#else +#define DIR0_PROJMEM(base) XM_PROJMEM(base); +#define DIR1_PROJMEM(base) YM_PROJMEM(base); +#define DIR2_PROJMEM(base) ZM_PROJMEM(base); +#define DIR3_PROJMEM(base) TM_PROJMEM(base); +#define DIR4_PROJMEM(base) XP_PROJMEM(base); +#define DIR5_PROJMEM(base) YP_PROJMEM(base); +#define DIR6_PROJMEM(base) ZP_PROJMEM(base); +#define DIR7_PROJMEM(base) TP_PROJMEM(base); +#define DIR0_RECON XM_RECON +#define DIR1_RECON YM_RECON_ACCUM +#define DIR2_RECON ZM_RECON_ACCUM +#define DIR3_RECON TM_RECON_ACCUM +#define DIR4_RECON XP_RECON_ACCUM +#define DIR5_RECON YP_RECON_ACCUM +#define DIR6_RECON ZP_RECON_ACCUM +#define DIR7_RECON TP_RECON_ACCUM +#endif + +//////////////////////////////////////////////////////////////////////////////// +// Comms then compute kernel +//////////////////////////////////////////////////////////////////////////////// +#ifdef INTERIOR_AND_EXTERIOR + +#define ZERO_NMU(A) +#define INTERIOR_BLOCK_XP(a,b,PERMUTE_DIR,PROJMEM,RECON) INTERIOR_BLOCK(a,b,PERMUTE_DIR,PROJMEM,RECON) +#define EXTERIOR_BLOCK_XP(a,b,RECON) EXTERIOR_BLOCK(a,b,RECON) + +#define INTERIOR_BLOCK(a,b,PERMUTE_DIR,PROJMEM,RECON) \ + LOAD64(%r10,isigns); \ + PROJMEM(base); \ + MAYBEPERM(PERMUTE_DIR,perm); + +#define EXTERIOR_BLOCK(a,b,RECON) \ + LOAD_CHI(base); + +#define COMMON_BLOCK(a,b,RECON) \ + base = st.GetInfo(ptype,local,perm,b,ent,plocal); ent++; \ + PREFETCH_CHIMU(base); \ + MULT_2SPIN_DIR_PF(a,basep); \ + LOAD64(%r10,isigns); \ + RECON; + +#define RESULT(base,basep) SAVE_RESULT(base,basep); + +#endif + +//////////////////////////////////////////////////////////////////////////////// +// Pre comms kernel -- prefetch like normal because it is mostly right +//////////////////////////////////////////////////////////////////////////////// +#ifdef INTERIOR + +#define COMMON_BLOCK(a,b,RECON) +#define ZERO_NMU(A) + +// No accumulate for DIR0 +#define EXTERIOR_BLOCK_XP(a,b,RECON) \ + ZERO_PSI; \ + base = st.GetInfo(ptype,local,perm,b,ent,plocal); ent++; + +#define EXTERIOR_BLOCK(a,b,RECON) \ + base = st.GetInfo(ptype,local,perm,b,ent,plocal); ent++; + +#define INTERIOR_BLOCK_XP(a,b,PERMUTE_DIR,PROJMEM,RECON) INTERIOR_BLOCK(a,b,PERMUTE_DIR,PROJMEM,RECON) + +#define INTERIOR_BLOCK(a,b,PERMUTE_DIR,PROJMEM,RECON) \ + LOAD64(%r10,isigns); \ + PROJMEM(base); \ + MAYBEPERM(PERMUTE_DIR,perm); \ + base = st.GetInfo(ptype,local,perm,b,ent,plocal); ent++; \ + PREFETCH_CHIMU(base); \ + MULT_2SPIN_DIR_PF(a,basep); \ + LOAD64(%r10,isigns); \ + RECON; + +#define RESULT(base,basep) SAVE_RESULT(base,basep); + +#endif + +//////////////////////////////////////////////////////////////////////////////// +// Post comms kernel +//////////////////////////////////////////////////////////////////////////////// +#ifdef EXTERIOR + +#define ZERO_NMU(A) nmu=0; + +#define INTERIOR_BLOCK_XP(a,b,PERMUTE_DIR,PROJMEM,RECON) \ + ZERO_PSI; base = st.GetInfo(ptype,local,perm,b,ent,plocal); ent++; + +#define EXTERIOR_BLOCK_XP(a,b,RECON) EXTERIOR_BLOCK(a,b,RECON) + +#define INTERIOR_BLOCK(a,b,PERMUTE_DIR,PROJMEM,RECON) \ + base = st.GetInfo(ptype,local,perm,b,ent,plocal); ent++; + +#define EXTERIOR_BLOCK(a,b,RECON) \ + nmu++; \ + LOAD_CHI(base); \ + MULT_2SPIN_DIR_PF(a,base); \ + base = st.GetInfo(ptype,local,perm,b,ent,plocal); ent++; \ + LOAD64(%r10,isigns); \ + RECON; + +#define COMMON_BLOCK(a,b,RECON) + +#define RESULT(base,basep) if (nmu){ ADD_RESULT(base,base);} + +#endif + { + int nmu; int local,perm, ptype; uint64_t base; uint64_t basep; const uint64_t plocal =(uint64_t) & in._odata[0]; - // vComplexF isigns[2] = { signs[0], signs[1] }; - //COMPLEX_TYPE is vComplexF of vComplexD depending - //on the chosen precision COMPLEX_SIGNS(isigns); MASK_REGS; int nmax=U._grid->oSites(); for(int site=0;site=nmax) ssn=0; + int sUn=lo.Reorder(ssn); +#ifndef EXTERIOR + LOCK_GAUGE(0); +#endif + for(int s=0;s=nmax) ssn=0; - int sUn=lo.Reorder(ssn); - for(int s=0;s shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - YP_PROJMEM(base); -#else - YM_PROJMEM(base); -#endif - MAYBEPERM(PERMUTE_DIR2,perm); - } else { - LOAD_CHI(base); - } - base = st.GetInfo(ptype,local,perm,Zp,ent,plocal); ent++; - PREFETCH_CHIMU(base); - { - MULT_2SPIN_DIR_PFYP(Yp,basep); - } - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - YP_RECON_ACCUM; -#else - YM_RECON_ACCUM; -#endif - - //////////////////////////////// - // Zp - //////////////////////////////// - basep = st.GetPFInfo(nent,plocal); nent++; - if ( local ) { - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - ZP_PROJMEM(base); -#else - ZM_PROJMEM(base); -#endif - MAYBEPERM(PERMUTE_DIR1,perm); - } else { - LOAD_CHI(base); - } - base = st.GetInfo(ptype,local,perm,Tp,ent,plocal); ent++; - PREFETCH_CHIMU(base); - { - MULT_2SPIN_DIR_PFZP(Zp,basep); - } - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - ZP_RECON_ACCUM; -#else - ZM_RECON_ACCUM; -#endif - - //////////////////////////////// - // Tp - //////////////////////////////// - basep = st.GetPFInfo(nent,plocal); nent++; - if ( local ) { - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - TP_PROJMEM(base); -#else - TM_PROJMEM(base); -#endif - MAYBEPERM(PERMUTE_DIR0,perm); - } else { - LOAD_CHI(base); - } - base = st.GetInfo(ptype,local,perm,Xm,ent,plocal); ent++; - PREFETCH_CHIMU(base); - { - MULT_2SPIN_DIR_PFTP(Tp,basep); - } - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - TP_RECON_ACCUM; -#else - TM_RECON_ACCUM; -#endif - - //////////////////////////////// - // Xm - //////////////////////////////// -#ifndef STREAM_STORE - basep= (uint64_t) &out._odata[ss]; -#endif - // basep= st.GetPFInfo(nent,plocal); nent++; - if ( local ) { - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - XM_PROJMEM(base); -#else - XP_PROJMEM(base); -#endif - MAYBEPERM(PERMUTE_DIR3,perm); - } else { - LOAD_CHI(base); - } - base = st.GetInfo(ptype,local,perm,Ym,ent,plocal); ent++; - PREFETCH_CHIMU(base); - { - MULT_2SPIN_DIR_PFXM(Xm,basep); - } - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - XM_RECON_ACCUM; -#else - XP_RECON_ACCUM; -#endif - - //////////////////////////////// - // Ym - //////////////////////////////// - basep= st.GetPFInfo(nent,plocal); nent++; - if ( local ) { - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - YM_PROJMEM(base); -#else - YP_PROJMEM(base); -#endif - MAYBEPERM(PERMUTE_DIR2,perm); - } else { - LOAD_CHI(base); - } - base = st.GetInfo(ptype,local,perm,Zm,ent,plocal); ent++; - PREFETCH_CHIMU(base); - { - MULT_2SPIN_DIR_PFYM(Ym,basep); - } - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - YM_RECON_ACCUM; -#else - YP_RECON_ACCUM; -#endif - - //////////////////////////////// - // Zm - //////////////////////////////// - basep= st.GetPFInfo(nent,plocal); nent++; - if ( local ) { - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - ZM_PROJMEM(base); -#else - ZP_PROJMEM(base); -#endif - MAYBEPERM(PERMUTE_DIR1,perm); - } else { - LOAD_CHI(base); - } - base = st.GetInfo(ptype,local,perm,Tm,ent,plocal); ent++; - PREFETCH_CHIMU(base); - { - MULT_2SPIN_DIR_PFZM(Zm,basep); - } - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - ZM_RECON_ACCUM; -#else - ZP_RECON_ACCUM; -#endif - - //////////////////////////////// - // Tm - //////////////////////////////// - basep= st.GetPFInfo(nent,plocal); nent++; - if ( local ) { - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - TM_PROJMEM(base); -#else - TP_PROJMEM(base); -#endif - MAYBEPERM(PERMUTE_DIR0,perm); - } else { - LOAD_CHI(base); - } - base= (uint64_t) &out._odata[ss]; -#ifndef STREAM_STORE - PREFETCH_CHIMU(base); -#endif - { - MULT_2SPIN_DIR_PFTM(Tm,basep); - } - LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit -#ifdef KERNEL_DAG - TM_RECON_ACCUM; -#else - TP_RECON_ACCUM; -#endif - - basep= st.GetPFInfo(nent,plocal); nent++; - SAVE_RESULT(base,basep); - - } - ssU++; - UNLOCK_GAUGE(0); + base = (uint64_t) &out._odata[ss]; + basep= st.GetPFInfo(nent,plocal); nent++; + RESULT(base,basep); + } + ssU++; + UNLOCK_GAUGE(0); } } + +#undef DIR0_PROJMEM +#undef DIR1_PROJMEM +#undef DIR2_PROJMEM +#undef DIR3_PROJMEM +#undef DIR4_PROJMEM +#undef DIR5_PROJMEM +#undef DIR6_PROJMEM +#undef DIR7_PROJMEM +#undef DIR0_RECON +#undef DIR1_RECON +#undef DIR2_RECON +#undef DIR3_RECON +#undef DIR4_RECON +#undef DIR5_RECON +#undef DIR6_RECON +#undef DIR7_RECON +#undef EXTERIOR_BLOCK +#undef INTERIOR_BLOCK +#undef EXTERIOR_BLOCK_XP +#undef INTERIOR_BLOCK_XP +#undef COMMON_BLOCK +#undef ZERO_NMU +#undef RESULT diff --git a/lib/qcd/action/fermion/WilsonKernelsAsmQPX.h b/lib/qcd/action/fermion/WilsonKernelsAsmQPX.h index 947538ca..612234d7 100644 --- a/lib/qcd/action/fermion/WilsonKernelsAsmQPX.h +++ b/lib/qcd/action/fermion/WilsonKernelsAsmQPX.h @@ -42,13 +42,17 @@ Author: paboyle #define MAYBEPERM(A,perm) if (perm) { A ; } #define MULT_2SPIN(ptr,pf) MULT_2SPIN_QPX(ptr,pf) #define COMPLEX_SIGNS(isigns) + +#define INTERIOR_AND_EXTERIOR +#undef INTERIOR +#undef EXTERIOR ///////////////////////////////////////////////////////////////// // XYZT vectorised, undag Kernel, single ///////////////////////////////////////////////////////////////// #undef KERNEL_DAG template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include @@ -57,7 +61,7 @@ WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & ///////////////////////////////////////////////////////////////// #define KERNEL_DAG template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include @@ -71,7 +75,7 @@ WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder ///////////////////////////////////////////////////////////////// #undef KERNEL_DAG template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include @@ -80,7 +84,7 @@ WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,Lebesgu ///////////////////////////////////////////////////////////////// #define KERNEL_DAG template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include #undef MAYBEPERM @@ -100,7 +104,7 @@ WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,Lebe ///////////////////////////////////////////////////////////////// #undef KERNEL_DAG template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include ///////////////////////////////////////////////////////////////// @@ -111,7 +115,7 @@ WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & ///////////////////////////////////////////////////////////////// #define KERNEL_DAG template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include ///////////////////////////////////////////////////////////////// @@ -125,7 +129,7 @@ WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder ///////////////////////////////////////////////////////////////// #undef KERNEL_DAG template<> void -WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include ///////////////////////////////////////////////////////////////// @@ -135,7 +139,7 @@ WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,Lebesgu ///////////////////////////////////////////////////////////////// #define KERNEL_DAG template<> void -WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include ///////////////////////////////////////////////////////////////// diff --git a/lib/qcd/action/fermion/WilsonKernelsHand.cc b/lib/qcd/action/fermion/WilsonKernelsHand.cc index f5900832..90496bdf 100644 --- a/lib/qcd/action/fermion/WilsonKernelsHand.cc +++ b/lib/qcd/action/fermion/WilsonKernelsHand.cc @@ -312,8 +312,8 @@ namespace QCD { template void -WilsonKernels::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, - int ss,int sU,const FermionField &in, FermionField &out) +WilsonKernels::HandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int sU,const FermionField &in, FermionField &out,int interior,int exterior) { typedef typename Simd::scalar_type S; typedef typename Simd::vector_type V; @@ -554,8 +554,8 @@ WilsonKernels::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,Doub } template -void WilsonKernels::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, - int ss,int sU,const FermionField &in, FermionField &out) +void WilsonKernels::HandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int sU,const FermionField &in, FermionField &out,int interior,int exterior) { // std::cout << "Hand op Dhop "<::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder // Specialise Gparity to simple implementation //////////////////////////////////////////////// template<> void -WilsonKernels::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U, +WilsonKernels::HandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U, SiteHalfSpinor *buf, - int sF,int sU,const FermionField &in, FermionField &out) + int sF,int sU,const FermionField &in, FermionField &out,int internal,int external) { assert(0); } template<> void -WilsonKernels::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U, +WilsonKernels::HandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U, SiteHalfSpinor *buf, - int sF,int sU,const FermionField &in, FermionField &out) + int sF,int sU,const FermionField &in, FermionField &out,int internal,int external) { assert(0); } template<> void -WilsonKernels::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, - int sF,int sU,const FermionField &in, FermionField &out) +WilsonKernels::HandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int sF,int sU,const FermionField &in, FermionField &out,int internal,int external) { assert(0); } template<> void -WilsonKernels::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, - int sF,int sU,const FermionField &in, FermionField &out) +WilsonKernels::HandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int sF,int sU,const FermionField &in, FermionField &out,int internal,int external) { assert(0); } @@ -835,10 +835,10 @@ WilsonKernels::DiracOptHandDhopSiteDag(StencilImpl &st,Lebes // Need Nc=3 though // #define INSTANTIATE_THEM(A) \ -template void WilsonKernels::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,\ - int ss,int sU,const FermionField &in, FermionField &out); \ -template void WilsonKernels::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,\ - int ss,int sU,const FermionField &in, FermionField &out); +template void WilsonKernels::HandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,\ + int ss,int sU,const FermionField &in, FermionField &out,int interior,int exterior); \ +template void WilsonKernels::HandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,\ + int ss,int sU,const FermionField &in, FermionField &out,int interior,int exterior); INSTANTIATE_THEM(WilsonImplF); INSTANTIATE_THEM(WilsonImplD); From aca7a3ef0a9a4408c563d4659c80ba9d50283ffe Mon Sep 17 00:00:00 2001 From: paboyle Date: Fri, 10 Feb 2017 18:22:31 -0500 Subject: [PATCH 050/101] Optimisation control improvements --- benchmarks/Benchmark_dwf.cc | 2 -- lib/Init.cc | 24 ++++++++++++++++++++---- lib/stencil/Stencil_common.cc | 34 ---------------------------------- 3 files changed, 20 insertions(+), 40 deletions(-) delete mode 100644 lib/stencil/Stencil_common.cc diff --git a/benchmarks/Benchmark_dwf.cc b/benchmarks/Benchmark_dwf.cc index a5532d3d..95d4f016 100644 --- a/benchmarks/Benchmark_dwf.cc +++ b/benchmarks/Benchmark_dwf.cc @@ -52,8 +52,6 @@ int main (int argc, char ** argv) { Grid_init(&argc,&argv); - CartesianCommunicator::SetCommunicatorPolicy(CartesianCommunicator::CommunicatorPolicySendrecv); - WilsonKernelsStatic::Comms = WilsonKernelsStatic::CommsAndCompute; int threads = GridThread::GetThreads(); std::cout< -Author: Peter Boyle - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#include "Grid.h" - -namespace Grid { -} - - From bd600702cf2a0e5bd8335c1192cbe480dc76a7af Mon Sep 17 00:00:00 2001 From: paboyle Date: Wed, 15 Feb 2017 11:11:04 +0000 Subject: [PATCH 051/101] Vectorise the XYZT face gathering better. Hard coded for simd_layout <= 2 in any given spread out direction; full generality is inconsistent with efficiency. --- lib/Init.cc | 4 +- lib/Stencil.h | 161 +++++++++++++++++++++++++-- lib/communicator/Communicator_mpi.cc | 6 +- lib/cshift/Cshift_common.h | 58 ++++++++++ lib/simd/Grid_avx.h | 40 ++++++- lib/simd/Grid_avx512.h | 40 +++++++ lib/simd/Grid_sse4.h | 36 ++++++ lib/simd/Grid_vector_types.h | 36 +++--- lib/tensors/Tensor_class.h | 20 ++++ tests/Test_simd.cc | 143 +++++++++++++++++++++++- 10 files changed, 510 insertions(+), 34 deletions(-) diff --git a/lib/Init.cc b/lib/Init.cc index 34dc1720..aeab5835 100644 --- a/lib/Init.cc +++ b/lib/Init.cc @@ -338,9 +338,9 @@ void Grid_init(int *argc,char ***argv) QCD::WilsonKernelsStatic::Opt=QCD::WilsonKernelsStatic::OptGeneric; } if( GridCmdOptionExists(*argv,*argv+*argc,"--comms-overlap") ){ - WilsonKernelsStatic::Comms = WilsonKernelsStatic::CommsAndCompute; + QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsAndCompute; } else { - WilsonKernelsStatic::Comms = WilsonKernelsStatic::CommsThenCompute; + QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute; } if( GridCmdOptionExists(*argv,*argv+*argc,"--comms-isend") ){ CartesianCommunicator::SetCommunicatorPolicy(CartesianCommunicator::CommunicatorPolicyIsend); diff --git a/lib/Stencil.h b/lib/Stencil.h index 71f086af..8b5eac2d 100644 --- a/lib/Stencil.h +++ b/lib/Stencil.h @@ -184,14 +184,18 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal struct Merge { cobj * mpointer; std::vector rpointers; + std::vector vpointers; Integer buffer_size; Integer packet_id; + Integer exchange; + Integer type; }; std::vector Mergers; void AddMerge(cobj *merge_p,std::vector &rpointers,Integer buffer_size,Integer packet_id) { Merge m; + m.exchange = 0; m.mpointer = merge_p; m.rpointers= rpointers; m.buffer_size = buffer_size; @@ -199,6 +203,17 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal Mergers.push_back(m); } + void AddMergeNew(cobj *merge_p,std::vector &rpointers,Integer buffer_size,Integer packet_id,Integer type) { + Merge m; + m.exchange = 1; + m.type = type; + m.mpointer = merge_p; + m.vpointers= rpointers; + m.buffer_size = buffer_size; + m.packet_id = packet_id; + Mergers.push_back(m); + } + void CommsMerge(void ) { for(int i=0;i u_simd_send_buf; std::vector u_simd_recv_buf; + std::vector new_simd_send_buf; + std::vector new_simd_recv_buf; int u_comm_offset; int _unified_buffer_size; @@ -432,12 +457,15 @@ PARALLEL_FOR_LOOP u_simd_send_buf.resize(Nsimd); u_simd_recv_buf.resize(Nsimd); - + new_simd_send_buf.resize(Nsimd); + new_simd_recv_buf.resize(Nsimd); u_send_buf_p=(cobj *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(cobj)); u_recv_buf_p=(cobj *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(cobj)); for(int l=0;lShmBufferMalloc(_unified_buffer_size*sizeof(scalar_object)); u_simd_send_buf[l] = (scalar_object *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(scalar_object)); + new_simd_recv_buf[l] = (cobj *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(cobj)); + new_simd_send_buf[l] = (cobj *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(cobj)); } PrecomputeByteOffsets(); @@ -675,7 +703,7 @@ PARALLEL_FOR_LOOP HaloGather(source,compress); this->CommunicateBegin(reqs); this->CommunicateComplete(reqs); - CommsMerge(); // spins + CommsMerge(); } template void HaloGatherDir(const Lattice &source,compressor &compress,int point,int & face_idx) @@ -706,7 +734,9 @@ PARALLEL_FOR_LOOP if ( sshift[0] == sshift[1] ) { if (splice_dim) { splicetime-=usecond(); - GatherSimd(source,dimension,shift,0x3,compress,face_idx); + // GatherSimd(source,dimension,shift,0x3,compress,face_idx); + // std::cout << "GatherSimdNew"< + void GatherSimdNew(const Lattice &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx) + { + const int Nsimd = _grid->Nsimd(); + + const int maxl =2;// max layout in a direction + int fd = _grid->_fdimensions[dimension]; + int rd = _grid->_rdimensions[dimension]; + int ld = _grid->_ldimensions[dimension]; + int pd = _grid->_processors[dimension]; + int simd_layout = _grid->_simd_layout[dimension]; + int comm_dim = _grid->_processors[dimension] >1 ; + assert(comm_dim==1); + // This will not work with a rotate dim + assert(simd_layout==maxl); + assert(shift>=0); + assert(shiftPermuteType(dimension); + // std::cout << "SimdNew permute type "<_slice_nblock[dimension]*_grid->_slice_block[dimension]; + int words = sizeof(cobj)/sizeof(vector_type); + + assert(cbmask==0x3); // Fixme think there is a latent bug if not true + + int bytes = (buffer_size*sizeof(cobj))/simd_layout; + assert(bytes*simd_layout == buffer_size*sizeof(cobj)); + + std::vector rpointers(maxl); + std::vector spointers(maxl); + + /////////////////////////////////////////// + // Work out what to send where + /////////////////////////////////////////// + + int cb = (cbmask==0x2)? Odd : Even; + int sshift= _grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,cb); + + // loop over outer coord planes orthog to dim + for(int x=0;x= rd ); + + if ( any_offnode ) { + + for(int i=0;iShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank); + + // shm == receive pointer if offnode + // shm == Translate[send pointer] if on node -- my view of his send pointer + cobj *shm = (cobj *) _grid->ShmBufferTranslate(recv_from_rank,sp); + if (shm==NULL) { + shm = rp; + } + + // if Direct, StencilSendToRecvFrom will suppress copy to a peer on node + // assuming above pointer flip + AddPacket((void *)sp,(void *)rp,xmit_to_rank,recv_from_rank,bytes); + + rpointers[i] = shm; + + } else { + + rpointers[i] = sp; + + } + } + + AddMergeNew(&u_recv_buf_p[u_comm_offset],rpointers,buffer_size,Packets.size()-1,permute_type); + + u_comm_offset +=buffer_size; + } + } + } + }; } diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index 61126a17..2033a446 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -42,10 +42,10 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { int provided; MPI_Initialized(&flag); // needed to coexist with other libs apparently if ( !flag ) { - // MPI_Init_thread(argc,argv,MPI_THREAD_SERIALIZED,&provided); - // assert (provided == MPI_THREAD_SERIALIZED); MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided); - assert (provided == MPI_THREAD_MULTIPLE); + if ( provided != MPI_THREAD_MULTIPLE ) { + QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute; + } } MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); ShmInitGeneric(); diff --git a/lib/cshift/Cshift_common.h b/lib/cshift/Cshift_common.h index 813929d8..c01187de 100644 --- a/lib/cshift/Cshift_common.h +++ b/lib/cshift/Cshift_common.h @@ -103,6 +103,7 @@ Gather_plane_extract(const Lattice &rhs,std::vector_slice_nblock[dimension]; int e2=rhs._grid->_slice_block[dimension]; int n1=rhs._grid->_slice_stride[dimension]; + if ( cbmask ==0x3){ PARALLEL_NESTED_LOOP2 for(int n=0;n(temp,pointers,offset); @@ -137,6 +139,62 @@ PARALLEL_NESTED_LOOP2 } } +/////////////////////////////////////////////////////////////////// +// Gather for when there *is* need to SIMD split with compression +/////////////////////////////////////////////////////////////////// +template void +Gather_plane_exchange(const Lattice &rhs, + std::vector pointers,int dimension,int plane,int cbmask,compressor &compress,int type) +{ + int rd = rhs._grid->_rdimensions[dimension]; + + if ( !rhs._grid->CheckerBoarded(dimension) ) { + cbmask = 0x3; + } + + int so = plane*rhs._grid->_ostride[dimension]; // base offset for start of plane + + int e1=rhs._grid->_slice_nblock[dimension]; + int e2=rhs._grid->_slice_block[dimension]; + int n1=rhs._grid->_slice_stride[dimension]; + + // Need to switch to a table loop + std::vector > table; + + if ( cbmask ==0x3){ + for(int n=0;n (offset,o+b)); + } + } + } else { + // Case of SIMD split AND checker dim cannot currently be hit, except in + // Test_cshift_red_black code. + for(int n=0;nCheckerBoardFromOindex(o+b); + int offset = b+n*e2; + + if ( ocb & cbmask ) { + table.push_back(std::pair (offset,o+b)); + } + } + } + } + + assert( (table.size()&0x1)==0); +PARALLEL_FOR_LOOP + for(int j=0;j Ah Bh, Al Bl + // On merging buffers: Ah,Bh , Al Bl -> Ah Al, Bh, Bl + // The operation is its own inverse + struct Exchange{ + // 3210 ordering + static inline void Exchange0(__m512 &out1,__m512 &out2,__m512 in1,__m512 in2){ + out1= _mm512_shuffle_f32x4(in1,in2,_MM_SELECT_FOUR_FOUR(1,0,1,0)); + out2= _mm512_shuffle_f32x4(in1,in2,_MM_SELECT_FOUR_FOUR(3,2,3,2)); + }; + static inline void Exchange1(__m512 &out1,__m512 &out2,__m512 in1,__m512 in2){ + out1= _mm512_shuffle_f32x4(in1,in2,_MM_SELECT_FOUR_FOUR(2,0,2,0)); + out2= _mm512_shuffle_f32x4(in1,in2,_MM_SELECT_FOUR_FOUR(3,1,3,1)); + }; + static inline void Exchange2(__m512 &out1,__m512 &out2,__m512 in1,__m512 in2){ + out1= _mm512_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(1,0,1,0)); + out2= _mm512_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(3,2,3,2)); + }; + static inline void Exchange3(__m512 &out1,__m512 &out2,__m512 in1,__m512 in2){ + out1= _mm512_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(2,0,2,0)); + out2= _mm512_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(3,1,3,1)); + }; + + static inline void Exchange0(__m512d &out1,__m512d &out2,__m512d in1,__m512d in2){ + out1= _mm512_shuffle_f64x2(in1,in2,_MM_SELECT_FOUR_FOUR(1,0,1,0)); + out2= _mm512_shuffle_f64x2(in1,in2,_MM_SELECT_FOUR_FOUR(3,2,3,2)); + }; + static inline void Exchange1(__m512d &out1,__m512d &out2,__m512d in1,__m512d in2){ + out1= _mm512_shuffle_f64x2(in1,in2,_MM_SELECT_FOUR_FOUR(2,0,2,0)); + out2= _mm512_shuffle_f64x2(in1,in2,_MM_SELECT_FOUR_FOUR(3,1,3,1)); + }; + static inline void Exchange2(__m512d &out1,__m512d &out2,__m512d in1,__m512d in2){ + out1 = _mm512_shuffle_pd(in1,in2,0x00); + out2 = _mm512_shuffle_pd(in1,in2,0xFF); + }; + static inline void Exchange3(__m512d &out1,__m512d &out2,__m512d in1,__m512d in2){ + assert(0); + return; + }; + }; + struct Rotate{ diff --git a/lib/simd/Grid_sse4.h b/lib/simd/Grid_sse4.h index 943756b2..fcad4c28 100644 --- a/lib/simd/Grid_sse4.h +++ b/lib/simd/Grid_sse4.h @@ -326,7 +326,43 @@ namespace Optimization { static inline __m128d Permute3(__m128d in){ return in; }; + }; + struct Exchange{ + // 3210 ordering + static inline void Exchange0(__m128 &out1,__m128 &out2,__m128 in1,__m128 in2){ + out1= _mm_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(1,0,1,0)); + out2= _mm_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(3,2,3,2)); + }; + static inline void Exchange1(__m128 &out1,__m128 &out2,__m128 in1,__m128 in2){ + out1= _mm_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(2,0,2,0)); + out2= _mm_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(3,1,3,1)); + }; + static inline void Exchange2(__m128 &out1,__m128 &out2,__m128 in1,__m128 in2){ + assert(0); + return; + }; + static inline void Exchange3(__m128 &out1,__m128 &out2,__m128 in1,__m128 in2){ + assert(0); + return; + }; + + static inline void Exchange0(__m128d &out1,__m128d &out2,__m128d in1,__m128d in2){ + out1= _mm_shuffle_pd(in1,in2,0x0); + out2= _mm_shuffle_pd(in1,in2,0x3); + }; + static inline void Exchange1(__m128d &out1,__m128d &out2,__m128d in1,__m128d in2){ + assert(0); + return; + }; + static inline void Exchange2(__m128d &out1,__m128d &out2,__m128d in1,__m128d in2){ + assert(0); + return; + }; + static inline void Exchange3(__m128d &out1,__m128d &out2,__m128d in1,__m128d in2){ + assert(0); + return; + }; }; struct Rotate{ diff --git a/lib/simd/Grid_vector_types.h b/lib/simd/Grid_vector_types.h index 8a6ab2e7..cd499d88 100644 --- a/lib/simd/Grid_vector_types.h +++ b/lib/simd/Grid_vector_types.h @@ -350,6 +350,18 @@ class Grid_simd { return ret; } + /////////////////////// + // Exchange + // Al Ah , Bl Bh -> Al Bl Ah,Bh + /////////////////////// + friend inline void exchange(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2,int n) + { + if (n==3) Optimization::Exchange::Exchange3(out1.v,out2.v,in1.v,in2.v); + else if(n==2) Optimization::Exchange::Exchange2(out1.v,out2.v,in1.v,in2.v); + else if(n==1) Optimization::Exchange::Exchange1(out1.v,out2.v,in1.v,in2.v); + else if(n==0) Optimization::Exchange::Exchange0(out1.v,out2.v,in1.v,in2.v); + } + //////////////////////////////////////////////////////////////////// // General permute; assumes vector length is same across // all subtypes; may not be a good assumption, but could @@ -372,23 +384,11 @@ class Grid_simd { int dist = perm & 0xF; y = rotate(b, dist); return; - } - switch (perm) { - case 3: - permute3(y, b); - break; - case 2: - permute2(y, b); - break; - case 1: - permute1(y, b); - break; - case 0: - permute0(y, b); - break; - default: - assert(0); - } + } + else if(perm==3) permute3(y, b); + else if(perm==2) permute2(y, b); + else if(perm==1) permute1(y, b); + else if(perm==0) permute0(y, b); } }; // end of Grid_simd class definition @@ -444,6 +444,8 @@ inline void rbroadcast(Grid_simd &ret,const Grid_simd &src,int lane){ ret.v = unary(real(typepun[lane]), VsplatSIMD()); } + + /////////////////////// // Splat /////////////////////// diff --git a/lib/tensors/Tensor_class.h b/lib/tensors/Tensor_class.h index 473dd6b1..e0b69eb0 100644 --- a/lib/tensors/Tensor_class.h +++ b/lib/tensors/Tensor_class.h @@ -105,6 +105,11 @@ class iScalar { friend strong_inline void rotate(iScalar &out,const iScalar &in,int rot){ rotate(out._internal,in._internal,rot); } + friend strong_inline void exchange(iScalar &out1,iScalar &out2, + const iScalar &in1,const iScalar &in2,int type){ + exchange(out1._internal,out2._internal, + in1._internal, in2._internal,type); + } // Unary negation friend strong_inline iScalar operator-(const iScalar &r) { @@ -248,6 +253,13 @@ class iVector { rotate(out._internal[i],in._internal[i],rot); } } + friend strong_inline void exchange(iVector &out1,iVector &out2, + const iVector &in1,const iVector &in2,int type){ + for(int i=0;i operator-(const iVector &r) { @@ -374,6 +386,14 @@ class iMatrix { rotate(out._internal[i][j],in._internal[i][j],rot); }} } + friend strong_inline void exchange(iMatrix &out1,iMatrix &out2, + const iMatrix &in1,const iMatrix &in2,int type){ + for(int i=0;i operator-(const iMatrix &r) { diff --git a/tests/Test_simd.cc b/tests/Test_simd.cc index 92f9bcd8..d840140e 100644 --- a/tests/Test_simd.cc +++ b/tests/Test_simd.cc @@ -113,8 +113,6 @@ public: // outerproduct, // zeroit // permute - - class funcReduce { public: funcReduce() {}; @@ -168,7 +166,7 @@ void Tester(const functor &func) int ok=0; for(int i=0;i1.0e-7){ + if ( abs(reference[i]-result[i])>1.0e-6){ std::cout< void operator()(vec &r1,vec &r2,vec &i1,vec &i2) const { exchange(r1,r2,i1,i2,n);} + template void apply(std::vector &r1,std::vector &r2,std::vector &in1,std::vector &in2) const { + int sz=in1.size(); + + + int msk = sz>>(n+1); + + int j1=0; + int j2=0; + for(int i=0;i +void ExchangeTester(const functor &func) +{ + GridSerialRNG sRNG; + sRNG.SeedRandomDevice(); + + int Nsimd = vec::Nsimd(); + + std::vector input1(Nsimd); + std::vector input2(Nsimd); + std::vector result1(Nsimd); + std::vector result2(Nsimd); + std::vector reference1(Nsimd); + std::vector reference2(Nsimd); + std::vector test1(Nsimd); + std::vector test2(Nsimd); + + std::vector > buf(6); + vec & v_input1 = buf[0]; + vec & v_input2 = buf[1]; + vec & v_result1 = buf[2]; + vec & v_result2 = buf[3]; + vec & v_test1 = buf[4]; + vec & v_test2 = buf[5]; + + for(int i=0;i(v_input1,input1); + merge(v_input2,input2); + merge(v_result1,result1); + merge(v_result2,result1); + + func(v_result1,v_result2,v_input1,v_input2); + func.apply(reference1,reference2,input1,input2); + + func(v_test1,v_test2,v_result1,v_result2); + + extract(v_result1,result1); + extract(v_result2,result2); + extract(v_test1,test1); + extract(v_test2,test2); + + std::cout<(funcPermute(i)); } + std::cout<(funcExchange(i)); + } + std::cout<(funcPermute(i)); } + std::cout<(funcExchange(i)); + } + std::cout<(funcPermute(i)); } + + std::cout<(funcExchange(i)); + } + + std::cout<(funcExchange(i)); + } + + std::cout< Date: Thu, 16 Feb 2017 23:51:15 +0000 Subject: [PATCH 052/101] Faster gather --- lib/Stencil.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/Stencil.h b/lib/Stencil.h index 8b5eac2d..00c9f7aa 100644 --- a/lib/Stencil.h +++ b/lib/Stencil.h @@ -240,6 +240,10 @@ PARALLEL_FOR_LOOP for(int o=0;o Date: Thu, 16 Feb 2017 23:51:33 +0000 Subject: [PATCH 053/101] Make clang happy with parenthesis --- lib/communicator/Communicator_mpi3.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index b86d5259..23626c5b 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -100,7 +100,7 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { int flag; int provided; - mtrace(); + // mtrace(); MPI_Initialized(&flag); // needed to coexist with other libs apparently if ( !flag ) { @@ -511,7 +511,7 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector &lis int myrank = _processor; int ierr; - if ( (CommunicatorPolicy == CommunicatorPolicyIsend) ) { + if ( CommunicatorPolicy == CommunicatorPolicyIsend ) { MPI_Request xrq; MPI_Request rrq; From d68907fc3e494020b9f895119356578b792685a8 Mon Sep 17 00:00:00 2001 From: paboyle Date: Thu, 16 Feb 2017 18:51:35 -0500 Subject: [PATCH 054/101] Debug temp --- lib/Stencil.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/Stencil.h b/lib/Stencil.h index 8b5eac2d..a1f09d6b 100644 --- a/lib/Stencil.h +++ b/lib/Stencil.h @@ -155,7 +155,7 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal } _grid->StencilBarrier();// Synch shared memory on a single nodes commtime+=usecond(); - /* + int dump=1; if(dump){ for(int i=0;i Date: Thu, 16 Feb 2017 23:52:22 +0000 Subject: [PATCH 055/101] Faster gather exchange --- lib/cshift/Cshift_common.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/cshift/Cshift_common.h b/lib/cshift/Cshift_common.h index c01187de..e2fa0481 100644 --- a/lib/cshift/Cshift_common.h +++ b/lib/cshift/Cshift_common.h @@ -142,12 +142,12 @@ PARALLEL_NESTED_LOOP2 /////////////////////////////////////////////////////////////////// // Gather for when there *is* need to SIMD split with compression /////////////////////////////////////////////////////////////////// -template void +template double Gather_plane_exchange(const Lattice &rhs, std::vector pointers,int dimension,int plane,int cbmask,compressor &compress,int type) { int rd = rhs._grid->_rdimensions[dimension]; - + double t1,t2; if ( !rhs._grid->CheckerBoarded(dimension) ) { cbmask = 0x3; } @@ -186,13 +186,20 @@ Gather_plane_exchange(const Lattice &rhs, } assert( (table.size()&0x1)==0); + t1=usecond(); PARALLEL_FOR_LOOP for(int j=0;j Date: Thu, 16 Feb 2017 23:52:44 +0000 Subject: [PATCH 056/101] Improvements to avx for invertible to avoid latent bug --- lib/simd/Grid_avx.h | 19 +++++++++++++++++-- tests/Test_simd.cc | 6 ++++-- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/lib/simd/Grid_avx.h b/lib/simd/Grid_avx.h index 7abe4d5a..2dbe26f4 100644 --- a/lib/simd/Grid_avx.h +++ b/lib/simd/Grid_avx.h @@ -474,16 +474,31 @@ namespace Optimization { struct Exchange{ // 3210 ordering static inline void Exchange0(__m256 &out1,__m256 &out2,__m256 in1,__m256 in2){ + //Invertible + //AB CD -> AC BD + //AC BD -> AB CD out1= _mm256_permute2f128_ps(in1,in2,0x20); out2= _mm256_permute2f128_ps(in1,in2,0x31); }; static inline void Exchange1(__m256 &out1,__m256 &out2,__m256 in1,__m256 in2){ + //Invertible + // ABCD EFGH ->ABEF CDGH + // ABEF CDGH ->ABCD EFGH out1= _mm256_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(1,0,1,0)); out2= _mm256_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(3,2,3,2)); }; static inline void Exchange2(__m256 &out1,__m256 &out2,__m256 in1,__m256 in2){ - out1= _mm256_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(2,0,2,0)); - out2= _mm256_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(3,1,3,1)); + // Invertible ? + // ABCD EFGH -> ACEG BDFH + // ACEG BDFH -> AEBF CGDH + // out1= _mm256_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(2,0,2,0)); + // out2= _mm256_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(3,1,3,1)); + // Bollocks; need + // AECG BFDH -> ABCD EFGH + out1= _mm256_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(2,0,2,0)); /*ACEG*/ + out2= _mm256_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(3,1,3,1)); /*BDFH*/ + out1= _mm256_shuffle_ps(out1,out1,_MM_SELECT_FOUR_FOUR(3,1,2,0)); /*AECG*/ + out2= _mm256_shuffle_ps(out2,out2,_MM_SELECT_FOUR_FOUR(3,1,2,0)); /*AECG*/ }; static inline void Exchange3(__m256 &out1,__m256 &out2,__m256 in1,__m256 in2){ assert(0); diff --git a/tests/Test_simd.cc b/tests/Test_simd.cc index d840140e..b94febb5 100644 --- a/tests/Test_simd.cc +++ b/tests/Test_simd.cc @@ -419,8 +419,10 @@ void ExchangeTester(const functor &func) assert(found==1); } - // for(int i=0;i Date: Mon, 20 Feb 2017 17:47:01 -0500 Subject: [PATCH 057/101] 1000 iters on bmark for more accurate timing --- benchmarks/Benchmark_dwf.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/Benchmark_dwf.cc b/benchmarks/Benchmark_dwf.cc index 95d4f016..09893d8f 100644 --- a/benchmarks/Benchmark_dwf.cc +++ b/benchmarks/Benchmark_dwf.cc @@ -168,7 +168,7 @@ int main (int argc, char ** argv) if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <Barrier(); Dw.ZeroCounters(); From 37720c4db75e624ef962f58d37fa85b7fa3959cb Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 20 Feb 2017 17:47:40 -0500 Subject: [PATCH 058/101] Count bytes off node only --- lib/communicator/Communicator_base.cc | 3 ++- lib/communicator/Communicator_base.h | 2 +- lib/communicator/Communicator_mpi3.cc | 6 +++++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/communicator/Communicator_base.cc b/lib/communicator/Communicator_base.cc index abafb3f7..83e1bd72 100644 --- a/lib/communicator/Communicator_base.cc +++ b/lib/communicator/Communicator_base.cc @@ -91,7 +91,7 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N) int CartesianCommunicator::NodeCount(void) { return ProcessorCount();}; -void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, +double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, void *xmit, int xmit_to_rank, void *recv, @@ -99,6 +99,7 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &waitall) { diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index 6ae48b54..51468007 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -207,7 +207,7 @@ class CartesianCommunicator { void SendToRecvFromComplete(std::vector &waitall); - void StencilSendToRecvFromBegin(std::vector &list, + double StencilSendToRecvFromBegin(std::vector &list, void *xmit, int xmit_to_rank, void *recv, diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index 23626c5b..30065b4d 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -530,7 +530,7 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector &lis } } -void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, +double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, void *xmit, int dest, void *recv, @@ -548,6 +548,7 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &waitall) { From 41009cc1426788e2522fa0e7a893efcc73fc555d Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 20 Feb 2017 17:48:04 -0500 Subject: [PATCH 059/101] Move excange into the stencil only; keep Cshift fully general --- lib/cshift/Cshift_common.h | 63 -------------------------------------- 1 file changed, 63 deletions(-) diff --git a/lib/cshift/Cshift_common.h b/lib/cshift/Cshift_common.h index e2fa0481..7dc081ca 100644 --- a/lib/cshift/Cshift_common.h +++ b/lib/cshift/Cshift_common.h @@ -139,69 +139,6 @@ PARALLEL_NESTED_LOOP2 } } -/////////////////////////////////////////////////////////////////// -// Gather for when there *is* need to SIMD split with compression -/////////////////////////////////////////////////////////////////// -template double -Gather_plane_exchange(const Lattice &rhs, - std::vector pointers,int dimension,int plane,int cbmask,compressor &compress,int type) -{ - int rd = rhs._grid->_rdimensions[dimension]; - double t1,t2; - if ( !rhs._grid->CheckerBoarded(dimension) ) { - cbmask = 0x3; - } - - int so = plane*rhs._grid->_ostride[dimension]; // base offset for start of plane - - int e1=rhs._grid->_slice_nblock[dimension]; - int e2=rhs._grid->_slice_block[dimension]; - int n1=rhs._grid->_slice_stride[dimension]; - - // Need to switch to a table loop - std::vector > table; - - if ( cbmask ==0x3){ - for(int n=0;n (offset,o+b)); - } - } - } else { - // Case of SIMD split AND checker dim cannot currently be hit, except in - // Test_cshift_red_black code. - for(int n=0;nCheckerBoardFromOindex(o+b); - int offset = b+n*e2; - - if ( ocb & cbmask ) { - table.push_back(std::pair (offset,o+b)); - } - } - } - } - - assert( (table.size()&0x1)==0); - t1=usecond(); -PARALLEL_FOR_LOOP - for(int j=0;j Date: Mon, 20 Feb 2017 17:48:36 -0500 Subject: [PATCH 060/101] Debug AVX512 exchange code paths --- lib/simd/Grid_avx512.h | 6 ++++++ lib/simd/Grid_vector_types.h | 17 +++++++++++++---- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/lib/simd/Grid_avx512.h b/lib/simd/Grid_avx512.h index 22d45aeb..f39c4033 100644 --- a/lib/simd/Grid_avx512.h +++ b/lib/simd/Grid_avx512.h @@ -355,6 +355,8 @@ namespace Optimization { static inline void Exchange1(__m512 &out1,__m512 &out2,__m512 in1,__m512 in2){ out1= _mm512_shuffle_f32x4(in1,in2,_MM_SELECT_FOUR_FOUR(2,0,2,0)); out2= _mm512_shuffle_f32x4(in1,in2,_MM_SELECT_FOUR_FOUR(3,1,3,1)); + out1= _mm512_shuffle_f32x4(out1,out1,_MM_SELECT_FOUR_FOUR(3,1,2,0)); /*AECG*/ + out2= _mm512_shuffle_f32x4(out2,out2,_MM_SELECT_FOUR_FOUR(3,1,2,0)); /*AECG*/ }; static inline void Exchange2(__m512 &out1,__m512 &out2,__m512 in1,__m512 in2){ out1= _mm512_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(1,0,1,0)); @@ -363,6 +365,8 @@ namespace Optimization { static inline void Exchange3(__m512 &out1,__m512 &out2,__m512 in1,__m512 in2){ out1= _mm512_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(2,0,2,0)); out2= _mm512_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(3,1,3,1)); + out1= _mm512_shuffle_ps(out1,out1,_MM_SELECT_FOUR_FOUR(3,1,2,0)); /*AECG*/ + out2= _mm512_shuffle_ps(out2,out2,_MM_SELECT_FOUR_FOUR(3,1,2,0)); /*AECG*/ }; static inline void Exchange0(__m512d &out1,__m512d &out2,__m512d in1,__m512d in2){ @@ -372,6 +376,8 @@ namespace Optimization { static inline void Exchange1(__m512d &out1,__m512d &out2,__m512d in1,__m512d in2){ out1= _mm512_shuffle_f64x2(in1,in2,_MM_SELECT_FOUR_FOUR(2,0,2,0)); out2= _mm512_shuffle_f64x2(in1,in2,_MM_SELECT_FOUR_FOUR(3,1,3,1)); + out1= _mm512_shuffle_f64x2(out1,out1,_MM_SELECT_FOUR_FOUR(3,1,2,0)); /*AECG*/ + out2= _mm512_shuffle_f64x2(out2,out2,_MM_SELECT_FOUR_FOUR(3,1,2,0)); /*AECG*/ }; static inline void Exchange2(__m512d &out1,__m512d &out2,__m512d in1,__m512d in2){ out1 = _mm512_shuffle_pd(in1,in2,0x00); diff --git a/lib/simd/Grid_vector_types.h b/lib/simd/Grid_vector_types.h index cd499d88..dcdacbe0 100644 --- a/lib/simd/Grid_vector_types.h +++ b/lib/simd/Grid_vector_types.h @@ -356,10 +356,19 @@ class Grid_simd { /////////////////////// friend inline void exchange(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2,int n) { - if (n==3) Optimization::Exchange::Exchange3(out1.v,out2.v,in1.v,in2.v); - else if(n==2) Optimization::Exchange::Exchange2(out1.v,out2.v,in1.v,in2.v); - else if(n==1) Optimization::Exchange::Exchange1(out1.v,out2.v,in1.v,in2.v); - else if(n==0) Optimization::Exchange::Exchange0(out1.v,out2.v,in1.v,in2.v); + if (n==3) { + Optimization::Exchange::Exchange3(out1.v,out2.v,in1.v,in2.v); + // std::cout << " Exchange3 "<< out1<<" "<< out2<<" <- " << in1 << " "< Date: Mon, 20 Feb 2017 17:49:23 -0500 Subject: [PATCH 061/101] Useful debug code info to preserve --- tests/Test_stencil.cc | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/Test_stencil.cc b/tests/Test_stencil.cc index c2a2580f..38873310 100644 --- a/tests/Test_stencil.cc +++ b/tests/Test_stencil.cc @@ -66,7 +66,9 @@ int main (int argc, char ** argv) random(fRNG,Foo); gaussian(fRNG,Bar); - + for (int i=0;i 1.0e-4) { + for(int i=0;i 1.0e-4) exit(-1); } From 3906cd21495d6b756873f4cc9d90bb62fd1c377b Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 20 Feb 2017 17:51:31 -0500 Subject: [PATCH 062/101] Stencil fix on BNL KNL system --- lib/Stencil.cc | 6 +- lib/Stencil.h | 178 ++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 157 insertions(+), 27 deletions(-) diff --git a/lib/Stencil.cc b/lib/Stencil.cc index 16fb736f..c492efa0 100644 --- a/lib/Stencil.cc +++ b/lib/Stencil.cc @@ -29,19 +29,18 @@ namespace Grid { -void Gather_plane_simple_table_compute (GridBase *grid,int dimension,int plane,int cbmask, +void Gather_plane_table_compute (GridBase *grid,int dimension,int plane,int cbmask, int off,std::vector > & table) { table.resize(0); - int rd = grid->_rdimensions[dimension]; if ( !grid->CheckerBoarded(dimension) ) { cbmask = 0x3; } + int rd = grid->_rdimensions[dimension]; int so= plane*grid->_ostride[dimension]; // base offset for start of plane int e1=grid->_slice_nblock[dimension]; int e2=grid->_slice_block[dimension]; - int stride=grid->_slice_stride[dimension]; if ( cbmask == 0x3 ) { table.resize(e1*e2); @@ -66,4 +65,5 @@ void Gather_plane_simple_table_compute (GridBase *grid,int dimension,int plane,i } } } + } diff --git a/lib/Stencil.h b/lib/Stencil.h index 1821419a..e5afa251 100644 --- a/lib/Stencil.h +++ b/lib/Stencil.h @@ -29,7 +29,7 @@ #define GRID_STENCIL_H #include // subdir aggregate - +#define NEW_XYZT_GATHER ////////////////////////////////////////////////////////////////////////////////////////// // Must not lose sight that goal is to be able to construct really efficient // gather to a point stencil code. CSHIFT is not the best way, so need @@ -68,7 +68,10 @@ namespace Grid { -void Gather_plane_simple_table_compute (GridBase *grid,int dimension,int plane,int cbmask, +/////////////////////////////////////////////////////////////////// +// Gather for when there *is* need to SIMD split with compression +/////////////////////////////////////////////////////////////////// +void Gather_plane_table_compute (GridBase *grid,int dimension,int plane,int cbmask, int off,std::vector > & table); template @@ -85,6 +88,95 @@ PARALLEL_FOR_LOOP } } +/////////////////////////////////////////////////////////////////// +// Gather for when there *is* need to SIMD split with compression +/////////////////////////////////////////////////////////////////// +/* +template double +Gather_plane_exchange(const Lattice &rhs, + std::vector pointers,int dimension,int plane,int cbmask,compressor &compress,int type) +{ + int rd = rhs._grid->_rdimensions[dimension]; + double t1,t2; + if ( !rhs._grid->CheckerBoarded(dimension) ) { + cbmask = 0x3; + } + + int so = plane*rhs._grid->_ostride[dimension]; // base offset for start of plane + int e1 =rhs._grid->_slice_nblock[dimension]; + int e2 =rhs._grid->_slice_block [dimension]; + int n1 =rhs._grid->_slice_stride[dimension]; + + // Need to switch to a table loop + std::vector > table; + + if ( cbmask ==0x3){ + for(int n=0;n (offset,o+b)); + } + } + } else { + // Case of SIMD split AND checker dim cannot currently be hit, except in + // Test_cshift_red_black code. + for(int n=0;nCheckerBoardFromOindex(o+b); + int offset = b+n*e2; + + if ( ocb & cbmask ) { + table.push_back(std::pair (offset,o+b)); + } + } + } + } + + assert( (table.size()&0x1)==0); + t1=usecond(); +PARALLEL_FOR_LOOP + for(int j=0;j +void Gather_plane_exchange_table(const Lattice &rhs, + std::vector pointers,int dimension,int plane,int cbmask,compressor &compress,int type) __attribute__((noinline)); + +template +void Gather_plane_exchange_table(std::vector >& table,const Lattice &rhs, + std::vector pointers,int dimension,int plane,int cbmask, + compressor &compress,int type) +{ + assert( (table.size()&0x1)==0); + int num=table.size()/2; + int so = plane*rhs._grid->_ostride[dimension]; // base offset for start of plane +PARALLEL_FOR_LOOP + for(int j=0;jStencilSendToRecvFromBegin(reqs[i], + comms_bytes+=_grid->StencilSendToRecvFromBegin(reqs[i], Packets[i].send_buf, Packets[i].to_rank, Packets[i].recv_buf, Packets[i].from_rank, Packets[i].bytes); + if( _grid->CommunicatorPolicy == CartesianCommunicator::CommunicatorPolicySendrecv ) { + _grid->StencilSendToRecvFromComplete(reqs[i]); + } } commtime+=usecond(); } void CommunicateComplete(std::vector > &reqs) { commtime-=usecond(); - for(int i=0;iCommunicatorPolicy == CartesianCommunicator::CommunicatorPolicyIsend ) { + for(int i=0;iStencilSendToRecvFromComplete(reqs[i]); + } } _grid->StencilBarrier();// Synch shared memory on a single nodes commtime+=usecond(); + /* int dump=1; if(dump){ for(int i=0;i u_simd_send_buf; - std::vector u_simd_recv_buf; std::vector new_simd_send_buf; std::vector new_simd_recv_buf; + std::vector u_simd_send_buf; + std::vector u_simd_recv_buf; int u_comm_offset; int _unified_buffer_size; @@ -358,6 +455,10 @@ PARALLEL_FOR_LOOP void Report(void) { #define PRINTIT(A) \ std::cout << GridLogMessage << " Stencil " << #A << " "<< A/calls<_Nprocessors; + RealD NN = _grid->NodeCount(); + if ( calls > 0. ) { std::cout << GridLogMessage << " Stencil calls "<1.0){ PRINTIT(comms_bytes); PRINTIT(commtime); - std::cout << GridLogMessage << " Stencil " << comms_bytes/commtime/1000. << " GB/s "<ShmBufferMalloc(_unified_buffer_size*sizeof(cobj)); u_recv_buf_p=(cobj *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(cobj)); - for(int l=0;lShmBufferMalloc(_unified_buffer_size*sizeof(scalar_object)); - u_simd_send_buf[l] = (scalar_object *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(scalar_object)); +#ifdef NEW_XYZT_GATHER + for(int l=0;l<2;l++){ new_simd_recv_buf[l] = (cobj *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(cobj)); new_simd_send_buf[l] = (cobj *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(cobj)); } +#else + for(int l=0;lShmBufferMalloc(_unified_buffer_size*sizeof(scalar_object)); + u_simd_send_buf[l] = (scalar_object *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(scalar_object)); + } +#endif PrecomputeByteOffsets(); } @@ -740,7 +847,11 @@ PARALLEL_FOR_LOOP splicetime-=usecond(); // GatherSimd(source,dimension,shift,0x3,compress,face_idx); // std::cout << "GatherSimdNew"<>1; + + int bytes = (reduced_buffer_size*sizeof(cobj))/simd_layout; + assert(bytes*simd_layout == reduced_buffer_size*sizeof(cobj)); std::vector rpointers(maxl); std::vector spointers(maxl); @@ -1034,15 +1152,28 @@ PARALLEL_FOR_LOOP int any_offnode = ( ((x+sshift)%fd) >= rd ); if ( any_offnode ) { + for(int i=0;i > table; + t_table-=usecond(); + if ( !face_table_computed ) { + face_table.resize(face_idx+1); + Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table[face_idx]); + // std::cout << " face table size "< void Gather_plane_simple_table (std::vector >& table,const Lattice &rhs,cobj *buffer,compressor &compress, int off,int so) { int num=table.size(); -PARALLEL_FOR_LOOP - for(int i=0;i double -Gather_plane_exchange(const Lattice &rhs, - std::vector pointers,int dimension,int plane,int cbmask,compressor &compress,int type) -{ - int rd = rhs._grid->_rdimensions[dimension]; - double t1,t2; - if ( !rhs._grid->CheckerBoarded(dimension) ) { - cbmask = 0x3; - } - - int so = plane*rhs._grid->_ostride[dimension]; // base offset for start of plane - int e1 =rhs._grid->_slice_nblock[dimension]; - int e2 =rhs._grid->_slice_block [dimension]; - int n1 =rhs._grid->_slice_stride[dimension]; - - // Need to switch to a table loop - std::vector > table; - - if ( cbmask ==0x3){ - for(int n=0;n (offset,o+b)); - } - } - } else { - // Case of SIMD split AND checker dim cannot currently be hit, except in - // Test_cshift_red_black code. - for(int n=0;nCheckerBoardFromOindex(o+b); - int offset = b+n*e2; - - if ( ocb & cbmask ) { - table.push_back(std::pair (offset,o+b)); - } - } - } - } - - assert( (table.size()&0x1)==0); - t1=usecond(); -PARALLEL_FOR_LOOP - for(int j=0;j void Gather_plane_exchange_table(const Lattice &rhs, std::vector pointers,int dimension,int plane,int cbmask,compressor &compress,int type) __attribute__((noinline)); @@ -164,8 +101,7 @@ void Gather_plane_exchange_table(std::vector >& table,const L assert( (table.size()&0x1)==0); int num=table.size()/2; int so = plane*rhs._grid->_ostride[dimension]; // base offset for start of plane -PARALLEL_FOR_LOOP - for(int j=0;jCommunicatorPolicy == CartesianCommunicator::CommunicatorPolicySendrecv ) { - _grid->StencilSendToRecvFromComplete(reqs[i]); - } } commtime+=usecond(); } void CommunicateComplete(std::vector > &reqs) { commtime-=usecond(); - if( _grid->CommunicatorPolicy == CartesianCommunicator::CommunicatorPolicyIsend ) { - for(int i=0;iStencilSendToRecvFromComplete(reqs[i]); - } + for(int i=0;iStencilSendToRecvFromComplete(reqs[i]); } _grid->StencilBarrier();// Synch shared memory on a single nodes commtime+=usecond(); @@ -327,14 +258,12 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal // std::ofstream fout(fname); if ( Mergers[i].exchange == 0 ) { -PARALLEL_FOR_LOOP - for(int o=0;o #define PARALLEL_CRITICAL #endif +#define parallel_for PARALLEL_FOR_LOOP for +#define parallel_for_nest2 PARALLEL_NESTED_LOOP2 for + namespace Grid { // Introduce a class to gain deterministic bit reproducible reduction. diff --git a/lib/algorithms/CoarsenedMatrix.h b/lib/algorithms/CoarsenedMatrix.h index fd9acc91..73f6baff 100644 --- a/lib/algorithms/CoarsenedMatrix.h +++ b/lib/algorithms/CoarsenedMatrix.h @@ -267,8 +267,7 @@ namespace Grid { SimpleCompressor compressor; Stencil.HaloExchange(in,compressor); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ siteVector res = zero; siteVector nbr; int ptype; @@ -380,8 +379,7 @@ PARALLEL_FOR_LOOP Subspace.ProjectToSubspace(oProj,oblock); // blockProject(iProj,iblock,Subspace.subspace); // blockProject(oProj,oblock,Subspace.subspace); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ for(int j=0;j &lis { int myrank = _processor; int ierr; - if ( CommunicatorPolicy == CommunicatorPolicyIsend ) { + if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { MPI_Request xrq; MPI_Request rrq; @@ -178,7 +178,7 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector &lis } void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) { - if ( CommunicatorPolicy == CommunicatorPolicyIsend ) { + if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { int nreq=list.size(); std::vector status(nreq); int ierr = MPI_Waitall(nreq,&list[0],&status[0]); diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index 30065b4d..557e4ebf 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -511,7 +511,7 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector &lis int myrank = _processor; int ierr; - if ( CommunicatorPolicy == CommunicatorPolicyIsend ) { + if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { MPI_Request xrq; MPI_Request rrq; @@ -567,6 +567,11 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vectorStencilSendToRecvFromComplete(list); + } + return off_node_bytes; } void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector &waitall) @@ -585,8 +590,8 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector & std::vector status(nreq); int ierr = MPI_Waitall(nreq,&list[0],&status[0]); - list.resize(0); assert(ierr==0); + list.resize(0); } void CartesianCommunicator::Barrier(void) { diff --git a/lib/cshift/Cshift_common.h b/lib/cshift/Cshift_common.h index 7dc081ca..48708800 100644 --- a/lib/cshift/Cshift_common.h +++ b/lib/cshift/Cshift_common.h @@ -58,8 +58,7 @@ Gather_plane_simple (const Lattice &rhs,commVector &buffer,int dimen int stride=rhs._grid->_slice_stride[dimension]; if ( cbmask == 0x3 ) { -PARALLEL_NESTED_LOOP2 - for(int n=0;n &rhs,std::vector_slice_stride[dimension]; if ( cbmask ==0x3){ -PARALLEL_NESTED_LOOP2 - for(int n=0;n void Scatter_plane_simple (Lattice &rhs,commVector_slice_stride[dimension]; if ( cbmask ==0x3 ) { -PARALLEL_NESTED_LOOP2 - for(int n=0;n_slice_stride[dimension]; int bo =n*rhs._grid->_slice_block[dimension]; @@ -195,8 +190,7 @@ PARALLEL_NESTED_LOOP2 } } } -PARALLEL_FOR_LOOP - for(int i=0;i_slice_block[dimension]; if(cbmask ==0x3 ) { -PARALLEL_NESTED_LOOP2 - for(int n=0;n_slice_stride[dimension]; int offset = b+n*rhs._grid->_slice_block[dimension]; @@ -265,8 +258,7 @@ template void Copy_plane(Lattice& lhs,const Lattice &rhs int e2=rhs._grid->_slice_block[dimension]; int stride = rhs._grid->_slice_stride[dimension]; if(cbmask == 0x3 ){ -PARALLEL_NESTED_LOOP2 - for(int n=0;n void Copy_plane_permute(Lattice& lhs,const Lattice_slice_nblock[dimension]; int e2=rhs._grid->_slice_block [dimension]; int stride = rhs._grid->_slice_stride[dimension]; -PARALLEL_NESTED_LOOP2 - for(int n=0;noSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES obj1 tmp; mult(&tmp,&lhs._odata[ss],&rhs._odata[ss]); @@ -56,8 +55,7 @@ PARALLEL_FOR_LOOP ret.checkerboard = lhs.checkerboard; conformable(ret,rhs); conformable(lhs,rhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES obj1 tmp; mac(&tmp,&lhs._odata[ss],&rhs._odata[ss]); @@ -73,8 +71,7 @@ PARALLEL_FOR_LOOP ret.checkerboard = lhs.checkerboard; conformable(ret,rhs); conformable(lhs,rhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES obj1 tmp; sub(&tmp,&lhs._odata[ss],&rhs._odata[ss]); @@ -89,8 +86,7 @@ PARALLEL_FOR_LOOP ret.checkerboard = lhs.checkerboard; conformable(ret,rhs); conformable(lhs,rhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES obj1 tmp; add(&tmp,&lhs._odata[ss],&rhs._odata[ss]); @@ -108,8 +104,7 @@ PARALLEL_FOR_LOOP void mult(Lattice &ret,const Lattice &lhs,const obj3 &rhs){ ret.checkerboard = lhs.checkerboard; conformable(lhs,ret); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ obj1 tmp; mult(&tmp,&lhs._odata[ss],&rhs); vstream(ret._odata[ss],tmp); @@ -120,8 +115,7 @@ PARALLEL_FOR_LOOP void mac(Lattice &ret,const Lattice &lhs,const obj3 &rhs){ ret.checkerboard = lhs.checkerboard; conformable(ret,lhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ obj1 tmp; mac(&tmp,&lhs._odata[ss],&rhs); vstream(ret._odata[ss],tmp); @@ -132,8 +126,7 @@ PARALLEL_FOR_LOOP void sub(Lattice &ret,const Lattice &lhs,const obj3 &rhs){ ret.checkerboard = lhs.checkerboard; conformable(ret,lhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES obj1 tmp; sub(&tmp,&lhs._odata[ss],&rhs); @@ -147,8 +140,7 @@ PARALLEL_FOR_LOOP void add(Lattice &ret,const Lattice &lhs,const obj3 &rhs){ ret.checkerboard = lhs.checkerboard; conformable(lhs,ret); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES obj1 tmp; add(&tmp,&lhs._odata[ss],&rhs); @@ -166,8 +158,7 @@ PARALLEL_FOR_LOOP void mult(Lattice &ret,const obj2 &lhs,const Lattice &rhs){ ret.checkerboard = rhs.checkerboard; conformable(ret,rhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES obj1 tmp; mult(&tmp,&lhs,&rhs._odata[ss]); @@ -182,8 +173,7 @@ PARALLEL_FOR_LOOP void mac(Lattice &ret,const obj2 &lhs,const Lattice &rhs){ ret.checkerboard = rhs.checkerboard; conformable(ret,rhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES obj1 tmp; mac(&tmp,&lhs,&rhs._odata[ss]); @@ -198,8 +188,7 @@ PARALLEL_FOR_LOOP void sub(Lattice &ret,const obj2 &lhs,const Lattice &rhs){ ret.checkerboard = rhs.checkerboard; conformable(ret,rhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES obj1 tmp; sub(&tmp,&lhs,&rhs._odata[ss]); @@ -213,8 +202,7 @@ PARALLEL_FOR_LOOP void add(Lattice &ret,const obj2 &lhs,const Lattice &rhs){ ret.checkerboard = rhs.checkerboard; conformable(ret,rhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES obj1 tmp; add(&tmp,&lhs,&rhs._odata[ss]); @@ -230,8 +218,7 @@ PARALLEL_FOR_LOOP ret.checkerboard = x.checkerboard; conformable(ret,x); conformable(x,y); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES vobj tmp = a*x._odata[ss]+y._odata[ss]; vstream(ret._odata[ss],tmp); @@ -245,8 +232,7 @@ PARALLEL_FOR_LOOP ret.checkerboard = x.checkerboard; conformable(ret,x); conformable(x,y); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ #ifdef STREAMING_STORES vobj tmp = a*x._odata[ss]+b*y._odata[ss]; vstream(ret._odata[ss],tmp); diff --git a/lib/lattice/Lattice_base.h b/lib/lattice/Lattice_base.h index e4dc1ca8..0c345545 100644 --- a/lib/lattice/Lattice_base.h +++ b/lib/lattice/Lattice_base.h @@ -121,8 +121,7 @@ public: assert( (cb==Odd) || (cb==Even)); checkerboard=cb; -PARALLEL_FOR_LOOP - for(int ss=0;ss<_grid->oSites();ss++){ + parallel_for(int ss=0;ss<_grid->oSites();ss++){ #ifdef STREAMING_STORES vobj tmp = eval(ss,expr); vstream(_odata[ss] ,tmp); @@ -144,8 +143,7 @@ PARALLEL_FOR_LOOP assert( (cb==Odd) || (cb==Even)); checkerboard=cb; -PARALLEL_FOR_LOOP - for(int ss=0;ss<_grid->oSites();ss++){ + parallel_for(int ss=0;ss<_grid->oSites();ss++){ #ifdef STREAMING_STORES vobj tmp = eval(ss,expr); vstream(_odata[ss] ,tmp); @@ -167,8 +165,7 @@ PARALLEL_FOR_LOOP assert( (cb==Odd) || (cb==Even)); checkerboard=cb; -PARALLEL_FOR_LOOP - for(int ss=0;ss<_grid->oSites();ss++){ + parallel_for(int ss=0;ss<_grid->oSites();ss++){ #ifdef STREAMING_STORES //vobj tmp = eval(ss,expr); vstream(_odata[ss] ,eval(ss,expr)); @@ -191,8 +188,7 @@ PARALLEL_FOR_LOOP checkerboard=cb; _odata.resize(_grid->oSites()); -PARALLEL_FOR_LOOP - for(int ss=0;ss<_grid->oSites();ss++){ + parallel_for(int ss=0;ss<_grid->oSites();ss++){ #ifdef STREAMING_STORES vobj tmp = eval(ss,expr); vstream(_odata[ss] ,tmp); @@ -213,8 +209,7 @@ PARALLEL_FOR_LOOP checkerboard=cb; _odata.resize(_grid->oSites()); -PARALLEL_FOR_LOOP - for(int ss=0;ss<_grid->oSites();ss++){ + parallel_for(int ss=0;ss<_grid->oSites();ss++){ #ifdef STREAMING_STORES vobj tmp = eval(ss,expr); vstream(_odata[ss] ,tmp); @@ -235,8 +230,7 @@ PARALLEL_FOR_LOOP checkerboard=cb; _odata.resize(_grid->oSites()); -PARALLEL_FOR_LOOP - for(int ss=0;ss<_grid->oSites();ss++){ + parallel_for(int ss=0;ss<_grid->oSites();ss++){ vstream(_odata[ss] ,eval(ss,expr)); } }; @@ -258,8 +252,7 @@ PARALLEL_FOR_LOOP _grid = r._grid; checkerboard = r.checkerboard; _odata.resize(_grid->oSites());// essential - PARALLEL_FOR_LOOP - for(int ss=0;ss<_grid->oSites();ss++){ + parallel_for(int ss=0;ss<_grid->oSites();ss++){ _odata[ss]=r._odata[ss]; } } @@ -269,8 +262,7 @@ PARALLEL_FOR_LOOP virtual ~Lattice(void) = default; template strong_inline Lattice & operator = (const sobj & r){ -PARALLEL_FOR_LOOP - for(int ss=0;ss<_grid->oSites();ss++){ + parallel_for(int ss=0;ss<_grid->oSites();ss++){ this->_odata[ss]=r; } return *this; @@ -279,8 +271,7 @@ PARALLEL_FOR_LOOP this->checkerboard = r.checkerboard; conformable(*this,r); -PARALLEL_FOR_LOOP - for(int ss=0;ss<_grid->oSites();ss++){ + parallel_for(int ss=0;ss<_grid->oSites();ss++){ this->_odata[ss]=r._odata[ss]; } return *this; diff --git a/lib/lattice/Lattice_comparison.h b/lib/lattice/Lattice_comparison.h index 1b5b0624..9bf1fb2d 100644 --- a/lib/lattice/Lattice_comparison.h +++ b/lib/lattice/Lattice_comparison.h @@ -45,90 +45,87 @@ namespace Grid { ////////////////////////////////////////////////////////////////////////// template inline Lattice LLComparison(vfunctor op,const Lattice &lhs,const Lattice &rhs) - { - Lattice ret(rhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ - ret._odata[ss]=op(lhs._odata[ss],rhs._odata[ss]); - } - return ret; + { + Lattice ret(rhs._grid); + parallel_for(int ss=0;ssoSites(); ss++){ + ret._odata[ss]=op(lhs._odata[ss],rhs._odata[ss]); } + return ret; + } ////////////////////////////////////////////////////////////////////////// // compare lattice to scalar ////////////////////////////////////////////////////////////////////////// - template + template inline Lattice LSComparison(vfunctor op,const Lattice &lhs,const robj &rhs) - { - Lattice ret(lhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ - ret._odata[ss]=op(lhs._odata[ss],rhs); - } - return ret; + { + Lattice ret(lhs._grid); + parallel_for(int ss=0;ssoSites(); ss++){ + ret._odata[ss]=op(lhs._odata[ss],rhs); } + return ret; + } ////////////////////////////////////////////////////////////////////////// // compare scalar to lattice ////////////////////////////////////////////////////////////////////////// - template + template inline Lattice SLComparison(vfunctor op,const lobj &lhs,const Lattice &rhs) - { - Lattice ret(rhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ - ret._odata[ss]=op(lhs._odata[ss],rhs); - } - return ret; + { + Lattice ret(rhs._grid); + parallel_for(int ss=0;ssoSites(); ss++){ + ret._odata[ss]=op(lhs._odata[ss],rhs); } - + return ret; + } + ////////////////////////////////////////////////////////////////////////// // Map to functors ////////////////////////////////////////////////////////////////////////// - // Less than - template - inline Lattice operator < (const Lattice & lhs, const Lattice & rhs) { - return LLComparison(vlt(),lhs,rhs); - } - template - inline Lattice operator < (const Lattice & lhs, const robj & rhs) { - return LSComparison(vlt(),lhs,rhs); - } - template - inline Lattice operator < (const lobj & lhs, const Lattice & rhs) { - return SLComparison(vlt(),lhs,rhs); - } - - // Less than equal - template - inline Lattice operator <= (const Lattice & lhs, const Lattice & rhs) { - return LLComparison(vle(),lhs,rhs); - } - template - inline Lattice operator <= (const Lattice & lhs, const robj & rhs) { - return LSComparison(vle(),lhs,rhs); - } - template - inline Lattice operator <= (const lobj & lhs, const Lattice & rhs) { - return SLComparison(vle(),lhs,rhs); - } - - // Greater than - template - inline Lattice operator > (const Lattice & lhs, const Lattice & rhs) { - return LLComparison(vgt(),lhs,rhs); - } - template - inline Lattice operator > (const Lattice & lhs, const robj & rhs) { - return LSComparison(vgt(),lhs,rhs); - } - template - inline Lattice operator > (const lobj & lhs, const Lattice & rhs) { + // Less than + template + inline Lattice operator < (const Lattice & lhs, const Lattice & rhs) { + return LLComparison(vlt(),lhs,rhs); + } + template + inline Lattice operator < (const Lattice & lhs, const robj & rhs) { + return LSComparison(vlt(),lhs,rhs); + } + template + inline Lattice operator < (const lobj & lhs, const Lattice & rhs) { + return SLComparison(vlt(),lhs,rhs); + } + + // Less than equal + template + inline Lattice operator <= (const Lattice & lhs, const Lattice & rhs) { + return LLComparison(vle(),lhs,rhs); + } + template + inline Lattice operator <= (const Lattice & lhs, const robj & rhs) { + return LSComparison(vle(),lhs,rhs); + } + template + inline Lattice operator <= (const lobj & lhs, const Lattice & rhs) { + return SLComparison(vle(),lhs,rhs); + } + + // Greater than + template + inline Lattice operator > (const Lattice & lhs, const Lattice & rhs) { + return LLComparison(vgt(),lhs,rhs); + } + template + inline Lattice operator > (const Lattice & lhs, const robj & rhs) { + return LSComparison(vgt(),lhs,rhs); + } + template + inline Lattice operator > (const lobj & lhs, const Lattice & rhs) { return SLComparison(vgt(),lhs,rhs); - } - - - // Greater than equal + } + + + // Greater than equal template - inline Lattice operator >= (const Lattice & lhs, const Lattice & rhs) { + inline Lattice operator >= (const Lattice & lhs, const Lattice & rhs) { return LLComparison(vge(),lhs,rhs); } template @@ -136,38 +133,37 @@ PARALLEL_FOR_LOOP return LSComparison(vge(),lhs,rhs); } template - inline Lattice operator >= (const lobj & lhs, const Lattice & rhs) { + inline Lattice operator >= (const lobj & lhs, const Lattice & rhs) { return SLComparison(vge(),lhs,rhs); } - + // equal template - inline Lattice operator == (const Lattice & lhs, const Lattice & rhs) { + inline Lattice operator == (const Lattice & lhs, const Lattice & rhs) { return LLComparison(veq(),lhs,rhs); } template - inline Lattice operator == (const Lattice & lhs, const robj & rhs) { + inline Lattice operator == (const Lattice & lhs, const robj & rhs) { return LSComparison(veq(),lhs,rhs); } template - inline Lattice operator == (const lobj & lhs, const Lattice & rhs) { + inline Lattice operator == (const lobj & lhs, const Lattice & rhs) { return SLComparison(veq(),lhs,rhs); } - - + + // not equal template - inline Lattice operator != (const Lattice & lhs, const Lattice & rhs) { + inline Lattice operator != (const Lattice & lhs, const Lattice & rhs) { return LLComparison(vne(),lhs,rhs); } template - inline Lattice operator != (const Lattice & lhs, const robj & rhs) { + inline Lattice operator != (const Lattice & lhs, const robj & rhs) { return LSComparison(vne(),lhs,rhs); } template - inline Lattice operator != (const lobj & lhs, const Lattice & rhs) { + inline Lattice operator != (const lobj & lhs, const Lattice & rhs) { return SLComparison(vne(),lhs,rhs); } - } #endif diff --git a/lib/lattice/Lattice_local.h b/lib/lattice/Lattice_local.h index 65d1d929..9dae1cd9 100644 --- a/lib/lattice/Lattice_local.h +++ b/lib/lattice/Lattice_local.h @@ -34,47 +34,42 @@ Author: Peter Boyle namespace Grid { - ///////////////////////////////////////////////////// - // Non site, reduced locally reduced routines - ///////////////////////////////////////////////////// - - // localNorm2, - template + ///////////////////////////////////////////////////// + // Non site, reduced locally reduced routines + ///////////////////////////////////////////////////// + + // localNorm2, + template inline auto localNorm2 (const Lattice &rhs)-> Lattice { Lattice ret(rhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ - ret._odata[ss]=innerProduct(rhs._odata[ss],rhs._odata[ss]); - } - return ret; + parallel_for(int ss=0;ssoSites(); ss++){ + ret._odata[ss]=innerProduct(rhs._odata[ss],rhs._odata[ss]); + } + return ret; } - - // localInnerProduct - template + + // localInnerProduct + template inline auto localInnerProduct (const Lattice &lhs,const Lattice &rhs) -> Lattice { Lattice ret(rhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ + parallel_for(int ss=0;ssoSites(); ss++){ ret._odata[ss]=innerProduct(lhs._odata[ss],rhs._odata[ss]); } return ret; } - - // outerProduct Scalar x Scalar -> Scalar - // Vector x Vector -> Matrix - template + + // outerProduct Scalar x Scalar -> Scalar + // Vector x Vector -> Matrix + template inline auto outerProduct (const Lattice &lhs,const Lattice &rhs) -> Lattice - { - Lattice ret(rhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ - ret._odata[ss]=outerProduct(lhs._odata[ss],rhs._odata[ss]); - } - return ret; - } - + { + Lattice ret(rhs._grid); + parallel_for(int ss=0;ssoSites(); ss++){ + ret._odata[ss]=outerProduct(lhs._odata[ss],rhs._odata[ss]); + } + return ret; + } } - #endif diff --git a/lib/lattice/Lattice_overload.h b/lib/lattice/Lattice_overload.h index 2a5d16a1..0906b610 100644 --- a/lib/lattice/Lattice_overload.h +++ b/lib/lattice/Lattice_overload.h @@ -37,8 +37,7 @@ namespace Grid { inline Lattice operator -(const Lattice &r) { Lattice ret(r._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ vstream(ret._odata[ss], -r._odata[ss]); } return ret; @@ -74,8 +73,7 @@ PARALLEL_FOR_LOOP inline auto operator * (const left &lhs,const Lattice &rhs) -> Lattice { Lattice ret(rhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ + parallel_for(int ss=0;ssoSites(); ss++){ decltype(lhs*rhs._odata[0]) tmp=lhs*rhs._odata[ss]; vstream(ret._odata[ss],tmp); // ret._odata[ss]=lhs*rhs._odata[ss]; @@ -86,8 +84,7 @@ PARALLEL_FOR_LOOP inline auto operator + (const left &lhs,const Lattice &rhs) -> Lattice { Lattice ret(rhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ + parallel_for(int ss=0;ssoSites(); ss++){ decltype(lhs+rhs._odata[0]) tmp =lhs-rhs._odata[ss]; vstream(ret._odata[ss],tmp); // ret._odata[ss]=lhs+rhs._odata[ss]; @@ -98,11 +95,9 @@ PARALLEL_FOR_LOOP inline auto operator - (const left &lhs,const Lattice &rhs) -> Lattice { Lattice ret(rhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ + parallel_for(int ss=0;ssoSites(); ss++){ decltype(lhs-rhs._odata[0]) tmp=lhs-rhs._odata[ss]; vstream(ret._odata[ss],tmp); - // ret._odata[ss]=lhs-rhs._odata[ss]; } return ret; } @@ -110,8 +105,7 @@ PARALLEL_FOR_LOOP inline auto operator * (const Lattice &lhs,const right &rhs) -> Lattice { Lattice ret(lhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ + parallel_for(int ss=0;ssoSites(); ss++){ decltype(lhs._odata[0]*rhs) tmp =lhs._odata[ss]*rhs; vstream(ret._odata[ss],tmp); // ret._odata[ss]=lhs._odata[ss]*rhs; @@ -122,8 +116,7 @@ PARALLEL_FOR_LOOP inline auto operator + (const Lattice &lhs,const right &rhs) -> Lattice { Lattice ret(lhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ + parallel_for(int ss=0;ssoSites(); ss++){ decltype(lhs._odata[0]+rhs) tmp=lhs._odata[ss]+rhs; vstream(ret._odata[ss],tmp); // ret._odata[ss]=lhs._odata[ss]+rhs; @@ -134,15 +127,12 @@ PARALLEL_FOR_LOOP inline auto operator - (const Lattice &lhs,const right &rhs) -> Lattice { Lattice ret(lhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ + parallel_for(int ss=0;ssoSites(); ss++){ decltype(lhs._odata[0]-rhs) tmp=lhs._odata[ss]-rhs; vstream(ret._odata[ss],tmp); // ret._odata[ss]=lhs._odata[ss]-rhs; } return ret; } - - } #endif diff --git a/lib/lattice/Lattice_peekpoke.h b/lib/lattice/Lattice_peekpoke.h index 19d349c4..6010fd65 100644 --- a/lib/lattice/Lattice_peekpoke.h +++ b/lib/lattice/Lattice_peekpoke.h @@ -44,22 +44,20 @@ namespace Grid { { Lattice(lhs._odata[0],i))> ret(lhs._grid); ret.checkerboard=lhs.checkerboard; -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ - ret._odata[ss] = peekIndex(lhs._odata[ss],i); - } - return ret; + parallel_for(int ss=0;ssoSites();ss++){ + ret._odata[ss] = peekIndex(lhs._odata[ss],i); + } + return ret; }; template - auto PeekIndex(const Lattice &lhs,int i,int j) -> Lattice(lhs._odata[0],i,j))> + auto PeekIndex(const Lattice &lhs,int i,int j) -> Lattice(lhs._odata[0],i,j))> { Lattice(lhs._odata[0],i,j))> ret(lhs._grid); ret.checkerboard=lhs.checkerboard; -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ - ret._odata[ss] = peekIndex(lhs._odata[ss],i,j); - } - return ret; + parallel_for(int ss=0;ssoSites();ss++){ + ret._odata[ss] = peekIndex(lhs._odata[ss],i,j); + } + return ret; }; //////////////////////////////////////////////////////////////////////////////////////////////////// @@ -68,18 +66,16 @@ PARALLEL_FOR_LOOP template void PokeIndex(Lattice &lhs,const Lattice(lhs._odata[0],0))> & rhs,int i) { -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ - pokeIndex(lhs._odata[ss],rhs._odata[ss],i); - } + parallel_for(int ss=0;ssoSites();ss++){ + pokeIndex(lhs._odata[ss],rhs._odata[ss],i); + } } template void PokeIndex(Lattice &lhs,const Lattice(lhs._odata[0],0,0))> & rhs,int i,int j) { -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ - pokeIndex(lhs._odata[ss],rhs._odata[ss],i,j); - } + parallel_for(int ss=0;ssoSites();ss++){ + pokeIndex(lhs._odata[ss],rhs._odata[ss],i,j); + } } ////////////////////////////////////////////////////// @@ -131,9 +127,6 @@ PARALLEL_FOR_LOOP assert( l.checkerboard == l._grid->CheckerBoard(site)); - // FIXME - // assert( sizeof(sobj)*Nsimd == sizeof(vobj)); - int rank,odx,idx; grid->GlobalCoorToRankIndex(rank,odx,idx,site); diff --git a/lib/lattice/Lattice_reality.h b/lib/lattice/Lattice_reality.h index 10add8cd..7e7b2631 100644 --- a/lib/lattice/Lattice_reality.h +++ b/lib/lattice/Lattice_reality.h @@ -40,8 +40,7 @@ namespace Grid { template inline Lattice adj(const Lattice &lhs){ Lattice ret(lhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ ret._odata[ss] = adj(lhs._odata[ss]); } return ret; @@ -49,13 +48,10 @@ PARALLEL_FOR_LOOP template inline Lattice conjugate(const Lattice &lhs){ Lattice ret(lhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ - ret._odata[ss] = conjugate(lhs._odata[ss]); + parallel_for(int ss=0;ssoSites();ss++){ + ret._odata[ss] = conjugate(lhs._odata[ss]); } return ret; }; - - } #endif diff --git a/lib/lattice/Lattice_reduction.h b/lib/lattice/Lattice_reduction.h index 2615af48..45a88a64 100644 --- a/lib/lattice/Lattice_reduction.h +++ b/lib/lattice/Lattice_reduction.h @@ -57,8 +57,7 @@ namespace Grid { sumarray[i]=zero; } -PARALLEL_FOR_LOOP - for(int thr=0;thrSumArraySize();thr++){ + parallel_for(int thr=0;thrSumArraySize();thr++){ int nwork, mywork, myoff; GridThread::GetWork(left._grid->oSites(),thr,mywork,myoff); @@ -68,7 +67,7 @@ PARALLEL_FOR_LOOP } sumarray[thr]=TensorRemove(vnrm) ; } - + vector_type vvnrm; vvnrm=zero; // sum across threads for(int i=0;iSumArraySize();i++){ vvnrm = vvnrm+sumarray[i]; @@ -114,18 +113,17 @@ PARALLEL_FOR_LOOP sumarray[i]=zero; } -PARALLEL_FOR_LOOP - for(int thr=0;thrSumArraySize();thr++){ + parallel_for(int thr=0;thrSumArraySize();thr++){ int nwork, mywork, myoff; GridThread::GetWork(grid->oSites(),thr,mywork,myoff); - + vobj vvsum=zero; for(int ss=myoff;ssSumArraySize();i++){ vsum = vsum+sumarray[i]; diff --git a/lib/lattice/Lattice_rng.h b/lib/lattice/Lattice_rng.h index 88f508d9..a1e423b8 100644 --- a/lib/lattice/Lattice_rng.h +++ b/lib/lattice/Lattice_rng.h @@ -302,8 +302,7 @@ namespace Grid { int words=sizeof(scalar_object)/sizeof(scalar_type); -PARALLEL_FOR_LOOP - for(int ss=0;ss buf(Nsimd); for(int m=0;m Lattice { Lattice ret(lhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ ret._odata[ss] = trace(lhs._odata[ss]); } return ret; @@ -56,8 +55,7 @@ PARALLEL_FOR_LOOP inline auto TraceIndex(const Lattice &lhs) -> Lattice(lhs._odata[0]))> { Lattice(lhs._odata[0]))> ret(lhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ ret._odata[ss] = traceIndex(lhs._odata[ss]); } return ret; diff --git a/lib/lattice/Lattice_transfer.h b/lib/lattice/Lattice_transfer.h index 4dec1f0c..c2bd6cf7 100644 --- a/lib/lattice/Lattice_transfer.h +++ b/lib/lattice/Lattice_transfer.h @@ -51,7 +51,7 @@ inline void subdivides(GridBase *coarse,GridBase *fine) template inline void pickCheckerboard(int cb,Lattice &half,const Lattice &full){ half.checkerboard = cb; int ssh=0; - //PARALLEL_FOR_LOOP + //parallel_for for(int ss=0;ssoSites();ss++){ std::vector coor; int cbos; @@ -68,7 +68,7 @@ inline void subdivides(GridBase *coarse,GridBase *fine) template inline void setCheckerboard(Lattice &full,const Lattice &half){ int cb = half.checkerboard; int ssh=0; - //PARALLEL_FOR_LOOP + //parallel_for for(int ss=0;ssoSites();ss++){ std::vector coor; int cbos; @@ -153,8 +153,7 @@ inline void blockZAXPY(Lattice &fineZ, assert(block_r[d]*coarse->_rdimensions[d]==fine->_rdimensions[d]); } -PARALLEL_FOR_LOOP - for(int sf=0;sfoSites();sf++){ + parallel_for(int sf=0;sfoSites();sf++){ int sc; std::vector coor_c(_ndimension); @@ -186,8 +185,7 @@ template fine_inner = localInnerProduct(fineX,fineY); blockSum(coarse_inner,fine_inner); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ CoarseInner._odata[ss] = coarse_inner._odata[ss]; } } @@ -347,8 +345,7 @@ void localConvert(const Lattice &in,Lattice &out) assert(ig->lSites() == og->lSites()); } - PARALLEL_FOR_LOOP - for(int idx=0;idxlSites();idx++){ + parallel_for(int idx=0;idxlSites();idx++){ sobj s; ssobj ss; @@ -386,8 +383,7 @@ void InsertSlice(Lattice &lowDim,Lattice & higherDim,int slice, int } // the above should guarantee that the operations are local - PARALLEL_FOR_LOOP - for(int idx=0;idxlSites();idx++){ + parallel_for(int idx=0;idxlSites();idx++){ sobj s; std::vector lcoor(nl); std::vector hcoor(nh); @@ -428,8 +424,7 @@ void ExtractSlice(Lattice &lowDim, Lattice & higherDim,int slice, in } } // the above should guarantee that the operations are local - PARALLEL_FOR_LOOP - for(int idx=0;idxlSites();idx++){ + parallel_for(int idx=0;idxlSites();idx++){ sobj s; std::vector lcoor(nl); std::vector hcoor(nh); @@ -468,8 +463,7 @@ void InsertSliceLocal(Lattice &lowDim, Lattice & higherDim,int slice } // the above should guarantee that the operations are local - PARALLEL_FOR_LOOP - for(int idx=0;idxlSites();idx++){ + parallel_for(int idx=0;idxlSites();idx++){ sobj s; std::vector lcoor(nl); std::vector hcoor(nh); @@ -504,8 +498,7 @@ void ExtractSliceLocal(Lattice &lowDim, Lattice & higherDim,int slic } // the above should guarantee that the operations are local - PARALLEL_FOR_LOOP - for(int idx=0;idxlSites();idx++){ + parallel_for(int idx=0;idxlSites();idx++){ sobj s; std::vector lcoor(nl); std::vector hcoor(nh); @@ -574,8 +567,7 @@ typename std::enable_if::value && !isSIMDvectorized in_grid->iCoorFromIindex(in_icoor[lane], lane); } -PARALLEL_FOR_LOOP - for(int in_oidx = 0; in_oidx < in_grid->oSites(); in_oidx++){ //loop over outer index + parallel_for(int in_oidx = 0; in_oidx < in_grid->oSites(); in_oidx++){ //loop over outer index //Assemble vector of pointers to output elements std::vector out_ptrs(in_nsimd); @@ -623,8 +615,7 @@ void precisionChange(Lattice &out, const Lattice &in){ std::vector in_slex_conv(in_grid->lSites()); unvectorizeToLexOrdArray(in_slex_conv, in); - PARALLEL_FOR_LOOP - for(int out_oidx=0;out_oidxoSites();out_oidx++){ + parallel_for(int out_oidx=0;out_oidxoSites();out_oidx++){ std::vector out_ocoor(ndim); out_grid->oCoorFromOindex(out_ocoor, out_oidx); @@ -642,10 +633,6 @@ void precisionChange(Lattice &out, const Lattice &in){ merge(out._odata[out_oidx], ptrs, 0); } } - - - - } #endif diff --git a/lib/lattice/Lattice_transpose.h b/lib/lattice/Lattice_transpose.h index c8d349a6..0ae7c6b3 100644 --- a/lib/lattice/Lattice_transpose.h +++ b/lib/lattice/Lattice_transpose.h @@ -40,27 +40,24 @@ namespace Grid { //////////////////////////////////////////////////////////////////////////////////////////////////// template inline Lattice transpose(const Lattice &lhs){ - Lattice ret(lhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ - ret._odata[ss] = transpose(lhs._odata[ss]); - } - return ret; - }; + Lattice ret(lhs._grid); + parallel_for(int ss=0;ssoSites();ss++){ + ret._odata[ss] = transpose(lhs._odata[ss]); + } + return ret; + }; - //////////////////////////////////////////////////////////////////////////////////////////////////// - // Index level dependent transpose - //////////////////////////////////////////////////////////////////////////////////////////////////// - template + //////////////////////////////////////////////////////////////////////////////////////////////////// + // Index level dependent transpose + //////////////////////////////////////////////////////////////////////////////////////////////////// + template inline auto TransposeIndex(const Lattice &lhs) -> Lattice(lhs._odata[0]))> - { - Lattice(lhs._odata[0]))> ret(lhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ - ret._odata[ss] = transposeIndex(lhs._odata[ss]); - } - return ret; - }; - + { + Lattice(lhs._odata[0]))> ret(lhs._grid); + parallel_for(int ss=0;ssoSites();ss++){ + ret._odata[ss] = transposeIndex(lhs._odata[ss]); + } + return ret; + }; } #endif diff --git a/lib/lattice/Lattice_unary.h b/lib/lattice/Lattice_unary.h index f3c54896..f5b324ec 100644 --- a/lib/lattice/Lattice_unary.h +++ b/lib/lattice/Lattice_unary.h @@ -37,8 +37,7 @@ namespace Grid { Lattice ret(rhs._grid); ret.checkerboard = rhs.checkerboard; conformable(ret,rhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ ret._odata[ss]=pow(rhs._odata[ss],y); } return ret; @@ -47,8 +46,7 @@ PARALLEL_FOR_LOOP Lattice ret(rhs._grid); ret.checkerboard = rhs.checkerboard; conformable(ret,rhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ ret._odata[ss]=mod(rhs._odata[ss],y); } return ret; @@ -58,8 +56,7 @@ PARALLEL_FOR_LOOP Lattice ret(rhs._grid); ret.checkerboard = rhs.checkerboard; conformable(ret,rhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ ret._odata[ss]=div(rhs._odata[ss],y); } return ret; @@ -69,8 +66,7 @@ PARALLEL_FOR_LOOP Lattice ret(rhs._grid); ret.checkerboard = rhs.checkerboard; conformable(ret,rhs); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ ret._odata[ss]=Exponentiate(rhs._odata[ss],alpha, Nexp); } return ret; diff --git a/lib/lattice/Lattice_where.h b/lib/lattice/Lattice_where.h index cff372a0..6686d1b3 100644 --- a/lib/lattice/Lattice_where.h +++ b/lib/lattice/Lattice_where.h @@ -56,8 +56,7 @@ inline void whereWolf(Lattice &ret,const Lattice &predicate,Lattice< std::vector truevals (Nsimd); std::vector falsevals(Nsimd); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites(); ss++){ + parallel_for(int ss=0;ssoSites(); ss++){ extract(iftrue._odata[ss] ,truevals); extract(iffalse._odata[ss] ,falsevals); diff --git a/lib/qcd/action/fermion/CayleyFermion5Dcache.cc b/lib/qcd/action/fermion/CayleyFermion5Dcache.cc index 8e7df945..319f8d3c 100644 --- a/lib/qcd/action/fermion/CayleyFermion5Dcache.cc +++ b/lib/qcd/action/fermion/CayleyFermion5Dcache.cc @@ -54,8 +54,8 @@ void CayleyFermion5D::M5D(const FermionField &psi, // Flops = 6.0*(Nc*Ns) *Ls*vol M5Dcalls++; M5Dtime-=usecond(); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=Ls){ // adds Ls + + parallel_for(int ss=0;ssoSites();ss+=Ls){ // adds Ls for(int s=0;s::M5Ddag(const FermionField &psi, // Flops = 6.0*(Nc*Ns) *Ls*vol M5Dcalls++; M5Dtime-=usecond(); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=Ls){ // adds Ls + + parallel_for(int ss=0;ssoSites();ss+=Ls){ // adds Ls auto tmp = psi._odata[0]; for(int s=0;s::MooeeInv (const FermionField &psi, FermionField & MooeeInvCalls++; MooeeInvTime-=usecond(); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=Ls){ // adds Ls + parallel_for(int ss=0;ssoSites();ss+=Ls){ // adds Ls auto tmp = psi._odata[0]; // flops = 12*2*Ls + 12*2*Ls + 3*12*Ls + 12*2*Ls = 12*Ls * (9) = 108*Ls flops @@ -184,8 +183,7 @@ void CayleyFermion5D::MooeeInvDag (const FermionField &psi, FermionField & MooeeInvCalls++; MooeeInvTime-=usecond(); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=Ls){ // adds Ls + parallel_for(int ss=0;ssoSites();ss+=Ls){ // adds Ls auto tmp = psi._odata[0]; diff --git a/lib/qcd/action/fermion/CayleyFermion5Dvec.cc b/lib/qcd/action/fermion/CayleyFermion5Dvec.cc index ed742ea3..6d7b252e 100644 --- a/lib/qcd/action/fermion/CayleyFermion5Dvec.cc +++ b/lib/qcd/action/fermion/CayleyFermion5Dvec.cc @@ -91,8 +91,7 @@ void CayleyFermion5D::M5D(const FermionField &psi, assert(Nc==3); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=LLs){ // adds LLs + parallel_for(int ss=0;ssoSites();ss+=LLs){ // adds LLs #if 0 alignas(64) SiteHalfSpinor hp; alignas(64) SiteHalfSpinor hm; @@ -232,8 +231,7 @@ void CayleyFermion5D::M5Ddag(const FermionField &psi, M5Dcalls++; M5Dtime-=usecond(); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=LLs){ // adds LLs + parallel_for(int ss=0;ssoSites();ss+=LLs){ // adds LLs #if 0 alignas(64) SiteHalfSpinor hp; alignas(64) SiteHalfSpinor hm; @@ -792,13 +790,11 @@ void CayleyFermion5D::MooeeInternal(const FermionField &psi, FermionField MooeeInvTime-=usecond(); if ( switcheroo::iscomplex() ) { - PARALLEL_FOR_LOOP - for(auto site=0;siteoSites();sss++){ + parallel_for(int sss=0;sssoSites();sss++){ int sU=sss; for(int s=0;s(outerProduct(Btilde, A)); -PARALLEL_FOR_LOOP - for (auto ss = tmp.begin(); ss < tmp.end(); ss++) { + parallel_for(auto ss = tmp.begin(); ss < tmp.end(); ss++) { link[ss]() = tmp[ss](0, 0) - conjugate(tmp[ss](1, 1)); } PokeIndex(mat, link, mu); @@ -498,8 +493,7 @@ PARALLEL_FOR_LOOP GaugeLinkField tmp(mat._grid); tmp = zero; -PARALLEL_FOR_LOOP - for (int ss = 0; ss < tmp._grid->oSites(); ss++) { + parallel_for(int ss = 0; ss < tmp._grid->oSites(); ss++) { for (int s = 0; s < Ls; s++) { int sF = s + Ls * ss; auto ttmp = traceIndex(outerProduct(Btilde[sF], Atilde[sF])); diff --git a/lib/qcd/action/fermion/WilsonFermion.cc b/lib/qcd/action/fermion/WilsonFermion.cc index f5b76c1a..ac5f8945 100644 --- a/lib/qcd/action/fermion/WilsonFermion.cc +++ b/lib/qcd/action/fermion/WilsonFermion.cc @@ -222,8 +222,7 @@ void WilsonFermion::DerivInternal(StencilImpl &st, DoubledGaugeField &U, //////////////////////// // Call the single hop //////////////////////// - PARALLEL_FOR_LOOP - for (int sss = 0; sss < B._grid->oSites(); sss++) { + parallel_for (int sss = 0; sss < B._grid->oSites(); sss++) { Kernels::DhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu, gamma); } @@ -333,8 +332,7 @@ void WilsonFermion::DhopDirDisp(const FermionField &in, FermionField &out, Stencil.HaloExchange(in, compressor); - PARALLEL_FOR_LOOP - for (int sss = 0; sss < in._grid->oSites(); sss++) { + parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) { Kernels::DhopDir(Stencil, Umu, Stencil.CommBuf(), sss, sss, in, out, dirdisp, gamma); } }; @@ -350,13 +348,11 @@ void WilsonFermion::DhopInternal(StencilImpl &st, LebesgueOrder &lo, st.HaloExchange(in, compressor); if (dag == DaggerYes) { - PARALLEL_FOR_LOOP - for (int sss = 0; sss < in._grid->oSites(); sss++) { + parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) { Kernels::DhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out); } } else { - PARALLEL_FOR_LOOP - for (int sss = 0; sss < in._grid->oSites(); sss++) { + parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) { Kernels::DhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out); } } diff --git a/lib/qcd/action/fermion/WilsonFermion5D.cc b/lib/qcd/action/fermion/WilsonFermion5D.cc index ad65b345..39e61bc6 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.cc +++ b/lib/qcd/action/fermion/WilsonFermion5D.cc @@ -275,8 +275,7 @@ void WilsonFermion5D::DhopDir(const FermionField &in, FermionField &out,in assert(dirdisp<=7); assert(dirdisp>=0); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ for(int s=0;s::DerivInternal(StencilImpl & st, //////////////////////// DerivDhopComputeTime -= usecond(); - PARALLEL_FOR_LOOP - for (int sss = 0; sss < U._grid->oSites(); sss++) { + parallel_for (int sss = 0; sss < U._grid->oSites(); sss++) { for (int s = 0; s < Ls; s++) { int sU = sss; int sF = s + Ls * sU; @@ -493,73 +491,18 @@ void WilsonFermion5D::DhopInternalSerialComms(StencilImpl & st, LebesgueOr // Dhop takes the 4d grid from U, and makes a 5d index for fermion if (dag == DaggerYes) { - PARALLEL_FOR_LOOP - for (int ss = 0; ss < U._grid->oSites(); ss++) { + parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) { int sU = ss; int sF = LLs * sU; Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); } } else { - PARALLEL_FOR_LOOP - for (int ss = 0; ss < U._grid->oSites(); ss++) { + parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) { int sU = ss; int sF = LLs * sU; Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); } } - /* - - if (dag == DaggerYes) { - PARALLEL_FOR_LOOP - for (int ss = 0; ss < U._grid->oSites(); ss++) { - int sU = ss; - int sF = LLs * sU; - Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); - } -#ifdef AVX512_SWITCHOFF - } else if (stat.is_init() ) { - - int nthreads; - stat.start(); -#pragma omp parallel - { -#pragma omp master - nthreads = omp_get_num_threads(); - int mythread = omp_get_thread_num(); - stat.enter(mythread); -#pragma omp for nowait - for(int ss=0;ssoSites();ss++) { - int sU=ss; - int sF=LLs*sU; - Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); - } - stat.exit(mythread); - } - stat.accum(nthreads); -#endif - } else { -#if 1 - PARALLEL_FOR_LOOP - for (int ss = 0; ss < U._grid->oSites(); ss++) { - int sU = ss; - int sF = LLs * sU; - Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out); - } -#else -#ifdef GRID_OMP -#pragma omp parallel -#endif - { - int len = U._grid->oSites(); - int me, myoff,mywork; - GridThread::GetWorkBarrier(len,me, mywork,myoff); - int sF = LLs * myoff; - Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out); - } -#endif - } - */ - DhopComputeTime+=usecond(); } diff --git a/lib/qcd/action/gauge/GaugeImpl.h b/lib/qcd/action/gauge/GaugeImpl.h index 400381bb..6041c006 100644 --- a/lib/qcd/action/gauge/GaugeImpl.h +++ b/lib/qcd/action/gauge/GaugeImpl.h @@ -66,8 +66,7 @@ public: // Move this elsewhere? FIXME static inline void AddGaugeLink(GaugeField &U, GaugeLinkField &W, int mu) { // U[mu] += W - PARALLEL_FOR_LOOP - for (auto ss = 0; ss < U._grid->oSites(); ss++) { + parallel_for (auto ss = 0; ss < U._grid->oSites(); ss++) { U._odata[ss]._internal[mu] = U._odata[ss]._internal[mu] + W._odata[ss]._internal; } diff --git a/lib/qcd/utils/LinalgUtils.h b/lib/qcd/utils/LinalgUtils.h index e7e6a794..6555511c 100644 --- a/lib/qcd/utils/LinalgUtils.h +++ b/lib/qcd/utils/LinalgUtils.h @@ -48,8 +48,7 @@ void axpibg5x(Lattice &z,const Lattice &x,Coeff a,Coeff b) GridBase *grid=x._grid; Gamma G5(Gamma::Gamma5); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ vobj tmp; tmp = a*x._odata[ss]; tmp = tmp + G5*(b*timesI(x._odata[ss])); @@ -65,8 +64,7 @@ void axpby_ssp(Lattice &z, Coeff a,const Lattice &x,Coeff b,const La conformable(x,z); GridBase *grid=x._grid; int Ls = grid->_rdimensions[0]; -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=Ls){ // adds Ls + parallel_for(int ss=0;ssoSites();ss+=Ls){ // adds Ls vobj tmp = a*x._odata[ss+s]+b*y._odata[ss+sp]; vstream(z._odata[ss+s],tmp); } @@ -81,8 +79,7 @@ void ag5xpby_ssp(Lattice &z,Coeff a,const Lattice &x,Coeff b,const L GridBase *grid=x._grid; int Ls = grid->_rdimensions[0]; Gamma G5(Gamma::Gamma5); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=Ls){ // adds Ls + parallel_for(int ss=0;ssoSites();ss+=Ls){ // adds Ls vobj tmp; tmp = G5*x._odata[ss+s]*a; tmp = tmp + b*y._odata[ss+sp]; @@ -99,8 +96,7 @@ void axpbg5y_ssp(Lattice &z,Coeff a,const Lattice &x,Coeff b,const L GridBase *grid=x._grid; int Ls = grid->_rdimensions[0]; Gamma G5(Gamma::Gamma5); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=Ls){ // adds Ls + parallel_for(int ss=0;ssoSites();ss+=Ls){ // adds Ls vobj tmp; tmp = G5*y._odata[ss+sp]*b; tmp = tmp + a*x._odata[ss+s]; @@ -117,8 +113,7 @@ void ag5xpbg5y_ssp(Lattice &z,Coeff a,const Lattice &x,Coeff b,const GridBase *grid=x._grid; int Ls = grid->_rdimensions[0]; Gamma G5(Gamma::Gamma5); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=Ls){ // adds Ls + parallel_for(int ss=0;ssoSites();ss+=Ls){ // adds Ls vobj tmp1; vobj tmp2; tmp1 = a*x._odata[ss+s]+b*y._odata[ss+sp]; @@ -135,8 +130,7 @@ void axpby_ssp_pminus(Lattice &z,Coeff a,const Lattice &x,Coeff b,co conformable(x,z); GridBase *grid=x._grid; int Ls = grid->_rdimensions[0]; -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=Ls){ // adds Ls + parallel_for(int ss=0;ssoSites();ss+=Ls){ // adds Ls vobj tmp; spProj5m(tmp,y._odata[ss+sp]); tmp = a*x._odata[ss+s]+b*tmp; @@ -152,8 +146,7 @@ void axpby_ssp_pplus(Lattice &z,Coeff a,const Lattice &x,Coeff b,con conformable(x,z); GridBase *grid=x._grid; int Ls = grid->_rdimensions[0]; -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=Ls){ // adds Ls + parallel_for(int ss=0;ssoSites();ss+=Ls){ // adds Ls vobj tmp; spProj5p(tmp,y._odata[ss+sp]); tmp = a*x._odata[ss+s]+b*tmp; @@ -169,8 +162,7 @@ void G5R5(Lattice &z,const Lattice &x) conformable(x,z); int Ls = grid->_rdimensions[0]; Gamma G5(Gamma::Gamma5); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss+=Ls){ // adds Ls + parallel_for(int ss=0;ssoSites();ss+=Ls){ // adds Ls vobj tmp; for(int s=0;soSites(); ss++) { + parallel_for (int ss = 0; ss < grid->oSites(); ss++) { subgroup._odata[ss]()()(0, 0) = source._odata[ss]()()(i0, i0); subgroup._odata[ss]()()(0, 1) = source._odata[ss]()()(i0, i1); subgroup._odata[ss]()()(1, 0) = source._odata[ss]()()(i1, i0); @@ -252,8 +251,7 @@ class SU { su2SubGroupIndex(i0, i1, su2_index); dest = 1.0; // start out with identity - PARALLEL_FOR_LOOP - for (int ss = 0; ss < grid->oSites(); ss++) { + parallel_for (int ss = 0; ss < grid->oSites(); ss++) { dest._odata[ss]()()(i0, i0) = subgroup._odata[ss]()()(0, 0); dest._odata[ss]()()(i0, i1) = subgroup._odata[ss]()()(0, 1); dest._odata[ss]()()(i1, i0) = subgroup._odata[ss]()()(1, 0); diff --git a/tests/forces/Test_contfrac_force.cc b/tests/forces/Test_contfrac_force.cc index 0779b710..4cfe5ca3 100644 --- a/tests/forces/Test_contfrac_force.cc +++ b/tests/forces/Test_contfrac_force.cc @@ -31,8 +31,6 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; -#define parallel_for PARALLEL_FOR_LOOP for - int main (int argc, char ** argv) { Grid_init(&argc,&argv); diff --git a/tests/forces/Test_dwf_force.cc b/tests/forces/Test_dwf_force.cc index c817eed1..f7a3ba67 100644 --- a/tests/forces/Test_dwf_force.cc +++ b/tests/forces/Test_dwf_force.cc @@ -31,8 +31,6 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; -#define parallel_for PARALLEL_FOR_LOOP for - int main (int argc, char ** argv) { Grid_init(&argc,&argv); diff --git a/tests/forces/Test_dwf_gpforce.cc b/tests/forces/Test_dwf_gpforce.cc index fb041f7a..5094b8a7 100644 --- a/tests/forces/Test_dwf_gpforce.cc +++ b/tests/forces/Test_dwf_gpforce.cc @@ -31,7 +31,7 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; -#define parallel_for PARALLEL_FOR_LOOP for + int main (int argc, char ** argv) { diff --git a/tests/forces/Test_gp_rect_force.cc b/tests/forces/Test_gp_rect_force.cc index a48dddc8..551c3a20 100644 --- a/tests/forces/Test_gp_rect_force.cc +++ b/tests/forces/Test_gp_rect_force.cc @@ -31,7 +31,7 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; -#define parallel_for PARALLEL_FOR_LOOP for + int main (int argc, char ** argv) { diff --git a/tests/forces/Test_gpdwf_force.cc b/tests/forces/Test_gpdwf_force.cc index 6c6f19f0..ee0df5dd 100644 --- a/tests/forces/Test_gpdwf_force.cc +++ b/tests/forces/Test_gpdwf_force.cc @@ -31,7 +31,7 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; -#define parallel_for PARALLEL_FOR_LOOP for + int main (int argc, char ** argv) { diff --git a/tests/forces/Test_gpwilson_force.cc b/tests/forces/Test_gpwilson_force.cc index 25d57684..bae46800 100644 --- a/tests/forces/Test_gpwilson_force.cc +++ b/tests/forces/Test_gpwilson_force.cc @@ -31,7 +31,7 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; -#define parallel_for PARALLEL_FOR_LOOP for + int main (int argc, char ** argv) { diff --git a/tests/forces/Test_partfrac_force.cc b/tests/forces/Test_partfrac_force.cc index 8fc0fb9b..0562fe3d 100644 --- a/tests/forces/Test_partfrac_force.cc +++ b/tests/forces/Test_partfrac_force.cc @@ -31,7 +31,7 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; -#define parallel_for PARALLEL_FOR_LOOP for + int main (int argc, char ** argv) { diff --git a/tests/forces/Test_rect_force.cc b/tests/forces/Test_rect_force.cc index 67edba1b..97281854 100644 --- a/tests/forces/Test_rect_force.cc +++ b/tests/forces/Test_rect_force.cc @@ -31,7 +31,7 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; -#define parallel_for PARALLEL_FOR_LOOP for + int main (int argc, char ** argv) { diff --git a/tests/forces/Test_wilson_force.cc b/tests/forces/Test_wilson_force.cc index 1af156cc..60d31b51 100644 --- a/tests/forces/Test_wilson_force.cc +++ b/tests/forces/Test_wilson_force.cc @@ -31,7 +31,7 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; -#define parallel_for PARALLEL_FOR_LOOP for + int main (int argc, char ** argv) { diff --git a/tests/forces/Test_wilson_force_phiMdagMphi.cc b/tests/forces/Test_wilson_force_phiMdagMphi.cc index 1022cf52..7717e9bc 100644 --- a/tests/forces/Test_wilson_force_phiMdagMphi.cc +++ b/tests/forces/Test_wilson_force_phiMdagMphi.cc @@ -31,7 +31,7 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; -#define parallel_for PARALLEL_FOR_LOOP for + int main (int argc, char ** argv) { diff --git a/tests/forces/Test_wilson_force_phiMphi.cc b/tests/forces/Test_wilson_force_phiMphi.cc index d55d1bea..c9e56c32 100644 --- a/tests/forces/Test_wilson_force_phiMphi.cc +++ b/tests/forces/Test_wilson_force_phiMphi.cc @@ -31,7 +31,7 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; -#define parallel_for PARALLEL_FOR_LOOP for + int main (int argc, char ** argv) { From bf7e3f20d4f7bc26439ef5c310495b67bb9d2fdc Mon Sep 17 00:00:00 2001 From: azusayamaguchi Date: Tue, 21 Feb 2017 14:35:42 +0000 Subject: [PATCH 064/101] Staggaered fermion optimised version --- .../fermion/ImprovedStaggeredFermion5D.cc | 29 +- lib/qcd/action/fermion/StaggeredKernels.cc | 64 ++-- lib/qcd/action/fermion/StaggeredKernels.h | 14 +- lib/qcd/action/fermion/StaggeredKernelsAsm.cc | 294 +++++++++--------- .../action/fermion/StaggeredKernelsHand.cc | 27 +- tests/core/Test_staggered5D.cc | 2 +- tests/core/Test_staggered5Dvec.cc | 40 ++- 7 files changed, 273 insertions(+), 197 deletions(-) diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc index 0455df0d..7068fc3f 100644 --- a/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc @@ -237,19 +237,32 @@ void ImprovedStaggeredFermion5D::DhopInternal(StencilImpl & st, LebesgueOr if (dag == DaggerYes) { PARALLEL_FOR_LOOP for (int ss = 0; ss < U._grid->oSites(); ss++) { - for(int s=0;soSites(); ss++) { - for(int s=0;soSites(); + int me,mywork,myoff; + GridThread::GetWorkBarrier(len,me, mywork,myoff); + for (int ss = myoff; ss < myoff+mywork; ss++) { + int sU=ss; + int sF=LLs*sU; + Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out); + } + GridThread::ThreadBarrier(); + } + } +#endif } } diff --git a/lib/qcd/action/fermion/StaggeredKernels.cc b/lib/qcd/action/fermion/StaggeredKernels.cc index 06720c64..597b14ea 100644 --- a/lib/qcd/action/fermion/StaggeredKernels.cc +++ b/lib/qcd/action/fermion/StaggeredKernels.cc @@ -182,65 +182,81 @@ void StaggeredKernels::DhopSiteDepth(StencilImpl &st, LebesgueOrder &lo, D vstream(out, Uchi); }; -// Need controls to do interior, exterior, or both template void StaggeredKernels::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, - SiteSpinor *buf, int sF, - int sU, const FermionField &in, FermionField &out) { + SiteSpinor *buf, int LLs, int sU, + const FermionField &in, FermionField &out) { + int dag(1); SiteSpinor naik; SiteSpinor naive; int oneLink =0; int threeLink=1; + Real scale; + if(dag) scale = -1.0; + else scale = 1.0; + switch(Opt) { #ifdef AVX512 case OptInlineAsm: - DhopSiteAsm(st,lo,U,UUU,buf,sF,sU,in,out._odata[sF]); + DhopSiteAsm(st,lo,U,UUU,buf,LLs,sU,in,out); break; #endif case OptHandUnroll: - DhopSiteDepthHand(st,lo,U,buf,sF,sU,in,naive,oneLink); - DhopSiteDepthHand(st,lo,UUU,buf,sF,sU,in,naik,threeLink); - out._odata[sF] =-naive-naik; + DhopSiteDepthHand(st,lo,U,UUU,buf,LLs,sU,in,out,dag); break; case OptGeneric: - DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); - DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); - out._odata[sF] =-naive-naik; + for(int s=0;s void StaggeredKernels::DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, - SiteSpinor *buf, int sF, + SiteSpinor *buf, int LLs, int sU, const FermionField &in, FermionField &out) { - int oneLink =0; - int threeLink=1; - SiteSpinor naik; - SiteSpinor naive; - static int once; + + int dag(0); + + int oneLink =0; + int threeLink=1; + SiteSpinor naik; + SiteSpinor naive; + static int once; + int sF=LLs*sU; + switch(Opt) { #ifdef AVX512 case OptInlineAsm: - DhopSiteAsm(st,lo,U,UUU,buf,sF,sU,in,out._odata[sF]); + DhopSiteAsm(st,lo,U,UUU,buf,LLs,sU,in,out); break; #endif case OptHandUnroll: - DhopSiteDepthHand(st,lo,U,buf,sF,sU,in,naive,oneLink); - DhopSiteDepthHand(st,lo,UUU,buf,sF,sU,in,naik,threeLink); - out._odata[sF] =naive+naik; - break; + DhopSiteDepthHand(st,lo,U,UUU,buf,LLs,sU,in,out,dag); + break; case OptGeneric: - DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); - DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); - out._odata[sF] =naive+naik; + + for(int s=0;s diff --git a/lib/qcd/action/fermion/StaggeredKernels.h b/lib/qcd/action/fermion/StaggeredKernels.h index e4cc8cdd..dc91a30c 100644 --- a/lib/qcd/action/fermion/StaggeredKernels.h +++ b/lib/qcd/action/fermion/StaggeredKernels.h @@ -56,17 +56,21 @@ public: void DhopSiteDepth(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf, int sF, int sU, const FermionField &in, SiteSpinor &out,int threeLink); - void DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf, - int sF, int sU, const FermionField &in, SiteSpinor &out,int threeLink); + + void DhopSiteDepthHandLocal(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf, + int sF, int sU, const FermionField &in, SiteSpinor&out,int threeLink); + + void DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,SiteSpinor * buf, + int Lls, int sU, const FermionField &in, FermionField &out, int dag); void DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU, SiteSpinor * buf, - int sF, int sU, const FermionField &in, SiteSpinor &out); + int LLs, int sU, const FermionField &in, FermionField &out); void DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf, int sF, int sU, const FermionField &in, FermionField &out); - void DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf, - int sF, int sU, const FermionField &in, FermionField &out); + void DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor *buf, + int LLs, int sU, const FermionField &in, FermionField &out); public: diff --git a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc index 890cf4e5..7f3624a1 100644 --- a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc +++ b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc @@ -1,4 +1,4 @@ - /************************************************************************************* +/************************************************************************************* Grid physics library, www.github.com/paboyle/Grid @@ -507,13 +507,37 @@ Author: paboyle VLOAD(2,%%r8,pChi_12) \ : : "r" (a1) : "%r8" ); -#define PF_CHI(a0) \ +#define PF_CHI(a0) +#define PF_CHIa(a0) \ asm ( \ "movq %0, %%r8 \n\t" \ VPREFETCH1(0,%%r8) \ VPREFETCH1(1,%%r8) \ VPREFETCH1(2,%%r8) \ : : "r" (a0) : "%r8" ); \ + +#define PF_GAUGE_XYZT(a0) +#define PF_GAUGE_XYZTa(a0) \ + asm ( \ + "movq %0, %%r8 \n\t" \ + VPREFETCH1(0,%%r8) \ + VPREFETCH1(1,%%r8) \ + VPREFETCH1(2,%%r8) \ + VPREFETCH1(3,%%r8) \ + VPREFETCH1(4,%%r8) \ + VPREFETCH1(5,%%r8) \ + VPREFETCH1(6,%%r8) \ + VPREFETCH1(7,%%r8) \ + VPREFETCH1(8,%%r8) \ + : : "r" (a0) : "%r8" ); \ + +#define PF_GAUGE_LS(a0) +#define PF_GAUGE_LSa(a0) \ + asm ( \ + "movq %0, %%r8 \n\t" \ + VPREFETCH1(0,%%r8) \ + VPREFETCH1(1,%%r8) \ + : : "r" (a0) : "%r8" ); \ #define REDUCE(out) \ @@ -556,40 +580,59 @@ template void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, - SiteSpinor *buf, int sF, - int sU, const FermionField &in, SiteSpinor &out) + SiteSpinor *buf, int LLs, + int sU, const FermionField &in, FermionField &out) { assert(0); -} +}; -#define PREPARE(X,Y,Z,T,skew,UU) \ - SE0=st.GetEntry(ptype,X+skew,sF); \ - o0 = SE0->_offset; \ - l0 = SE0->_is_local; \ - p0 = SE0->_permute; \ - addr0 = l0 ? (uint64_t) &in._odata[o0] : (uint64_t) &buf[o0]; \ + +//#define CONDITIONAL_MOVE(l,o,out) if ( l ) { out = (uint64_t) &in._odata[o] ; } else { out =(uint64_t) &buf[o]; } + +#define CONDITIONAL_MOVE(l,o,out) { const SiteSpinor *ptr = l? in_p : buf; out = (uint64_t) &ptr[o]; } + +#define PREPARE_XYZT(X,Y,Z,T,skew,UU) \ + PREPARE(X,Y,Z,T,skew,UU); \ + PF_GAUGE_XYZT(gauge0); \ + PF_GAUGE_XYZT(gauge1); \ + PF_GAUGE_XYZT(gauge2); \ + PF_GAUGE_XYZT(gauge3); + +#define PREPARE_LS(X,Y,Z,T,skew,UU) \ + PREPARE(X,Y,Z,T,skew,UU); \ + PF_GAUGE_LS(gauge0); \ + PF_GAUGE_LS(gauge1); \ + PF_GAUGE_LS(gauge2); \ + PF_GAUGE_LS(gauge3); + +#define PREPARE(X,Y,Z,T,skew,UU) \ + SE0=st.GetEntry(ptype,X+skew,sF); \ + o0 = SE0->_offset; \ + l0 = SE0->_is_local; \ + p0 = SE0->_permute; \ + CONDITIONAL_MOVE(l0,o0,addr0); \ PF_CHI(addr0); \ \ SE1=st.GetEntry(ptype,Y+skew,sF); \ o1 = SE1->_offset; \ l1 = SE1->_is_local; \ p1 = SE1->_permute; \ - addr1 = l1 ? (uint64_t) &in._odata[o1] : (uint64_t) &buf[o1]; \ + CONDITIONAL_MOVE(l1,o1,addr1); \ PF_CHI(addr1); \ \ SE2=st.GetEntry(ptype,Z+skew,sF); \ o2 = SE2->_offset; \ l2 = SE2->_is_local; \ p2 = SE2->_permute; \ - addr2 = l2 ? (uint64_t) &in._odata[o2] : (uint64_t) &buf[o2]; \ + CONDITIONAL_MOVE(l2,o2,addr2); \ PF_CHI(addr2); \ \ SE3=st.GetEntry(ptype,T+skew,sF); \ o3 = SE3->_offset; \ l3 = SE3->_is_local; \ p3 = SE3->_permute; \ - addr3 = l3 ? (uint64_t) &in._odata[o3] : (uint64_t) &buf[o3]; \ + CONDITIONAL_MOVE(l3,o3,addr3); \ PF_CHI(addr3); \ \ gauge0 =(uint64_t)&UU._odata[sU]( X ); \ @@ -602,12 +645,13 @@ void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, - SiteSpinor *buf, int sF, - int sU, const FermionField &in, SiteSpinor &out) + SiteSpinor *buf, int LLs, + int sU, const FermionField &in, FermionField &out) { #ifdef AVX512 uint64_t gauge0,gauge1,gauge2,gauge3; uint64_t addr0,addr1,addr2,addr3; + const SiteSpinor *in_p; in_p = &in._odata[0]; int o0,o1,o2,o3; // offsets int l0,l1,l2,l3; // local @@ -618,42 +662,46 @@ template <> void StaggeredKernels::DhopSiteAsm(StencilImpl StencilEntry *SE2; StencilEntry *SE3; - // Xp, Yp, Zp, Tp + for(int s=0;s template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, - SiteSpinor *buf, int sF, - int sU, const FermionField &in, SiteSpinor &out) + SiteSpinor *buf, int LLs, + int sU, const FermionField &in, FermionField &out) { #ifdef AVX512 uint64_t gauge0,gauge1,gauge2,gauge3; uint64_t addr0,addr1,addr2,addr3; + const SiteSpinor *in_p; in_p = &in._odata[0]; int o0,o1,o2,o3; // offsets int l0,l1,l2,l3; // local @@ -664,30 +712,34 @@ template <> void StaggeredKernels::DhopSiteAsm(StencilImpl StencilEntry *SE2; StencilEntry *SE3; - // Xp, Yp, Zp, Tp + for(int s=0;s void StaggeredKernels::DhopSiteAsm(StencilImpl VPERM0(Chi_12,Chi_12) ); // This is the single precision 5th direction vectorised kernel + #include template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, - DoubledGaugeField &U, - DoubledGaugeField &UUU, - SiteSpinor *buf, int sF, - int sU, const FermionField &in, SiteSpinor &out) + DoubledGaugeField &U, + DoubledGaugeField &UUU, + SiteSpinor *buf, int LLs, + int sU, const FermionField &in, FermionField &out) { #ifdef AVX512 uint64_t gauge0,gauge1,gauge2,gauge3; uint64_t addr0,addr1,addr2,addr3; + const SiteSpinor *in_p; in_p = &in._odata[0]; int o0,o1,o2,o3; // offsets int l0,l1,l2,l3; // local @@ -731,66 +785,46 @@ template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, StencilEntry *SE2; StencilEntry *SE3; - // Xp, Yp, Zp, Tp - PREPARE(Xp,Yp,Zp,Tp,0,U); - LOAD_CHIa(addr0,addr1); - if (l0&&p0) { PERMUTE_DIR3; } - if (l1&&p1) { PERMUTE_DIR2; } - MULT_XYZT(gauge0,gauge1); - LOAD_CHIa(addr2,addr3); - if (l2&&p2) { PERMUTE_DIR1; } - if (l3&&p3) { PERMUTE_DIR0; } - MULT_ADD_XYZT(gauge2,gauge3); + for(int s=0;s template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, - DoubledGaugeField &U, - DoubledGaugeField &UUU, - SiteSpinor *buf, int sF, - int sU, const FermionField &in, SiteSpinor &out) + DoubledGaugeField &U, + DoubledGaugeField &UUU, + SiteSpinor *buf, int LLs, + int sU, const FermionField &in, FermionField &out) { #ifdef AVX512 uint64_t gauge0,gauge1,gauge2,gauge3; uint64_t addr0,addr1,addr2,addr3; + const SiteSpinor *in_p; in_p = &in._odata[0]; int o0,o1,o2,o3; // offsets int l0,l1,l2,l3; // local @@ -801,57 +835,35 @@ template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, StencilEntry *SE2; StencilEntry *SE3; - // Xp, Yp, Zp, Tp + for(int s=0;s -void StaggeredKernels::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, +void StaggeredKernels::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU, + SiteSpinor *buf, int LLs, + int sU, const FermionField &in, FermionField &out, int dag) { + + SiteSpinor naik; + SiteSpinor naive; + int oneLink =0; + int threeLink=1; + int skew(0); + Real scale(1.0); + + if(dag) scale = -1.0; + + for(int s=0;s +void StaggeredKernels::DhopSiteDepthHandLocal(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor *buf, int sF, int sU, const FermionField &in, SiteSpinor &out,int threeLink) { { diff --git a/tests/core/Test_staggered5D.cc b/tests/core/Test_staggered5D.cc index be31c438..a7b00399 100644 --- a/tests/core/Test_staggered5D.cc +++ b/tests/core/Test_staggered5D.cc @@ -153,7 +153,7 @@ int main (int argc, char ** argv) std::cout< seeds({1,2,3,4}); + /* GridParallelRNG pRNG4(UGrid); GridParallelRNG pRNG5(FGrid); pRNG4.SeedFixedIntegers(seeds); pRNG5.SeedFixedIntegers(seeds); - + */ typedef typename ImprovedStaggeredFermion5DR::FermionField FermionField; typedef typename ImprovedStaggeredFermion5DR::ComplexField ComplexField; typename ImprovedStaggeredFermion5DR::ImplParams params; - FermionField src (FGrid); + FermionField src (FGrid); src=zero; - random(pRNG5,src); + // random(pRNG5,src); /* std::vector site({0,0,0,0,0}); ColourVector cv = zero; @@ -80,10 +81,10 @@ int main (int argc, char ** argv) FermionField result(FGrid); result=zero; FermionField tmp(FGrid); tmp=zero; FermionField err(FGrid); tmp=zero; - FermionField phi (FGrid); random(pRNG5,phi); - FermionField chi (FGrid); random(pRNG5,chi); + FermionField phi (FGrid); phi=1.0;//random(pRNG5,phi); + FermionField chi (FGrid); chi=1.0;//random(pRNG5,chi); - LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG4,Umu); + LatticeGaugeField Umu(UGrid); Umu=1.0; //SU3::HotConfiguration(pRNG4,Umu); /* for(int mu=1;mu<4;mu++){ @@ -109,15 +110,18 @@ int main (int argc, char ** argv) std::cout< Date: Tue, 21 Feb 2017 23:01:25 +0000 Subject: [PATCH 065/101] Verified --- benchmarks/Benchmark_staggered.cc | 2 +- .../fermion/ImprovedStaggeredFermion.cc | 4 +- .../fermion/ImprovedStaggeredFermion5D.cc | 19 --- lib/qcd/action/fermion/StaggeredKernels.cc | 56 +++--- lib/qcd/action/fermion/StaggeredKernels.h | 6 +- lib/qcd/action/fermion/StaggeredKernelsAsm.cc | 159 +++++++++++------- .../action/fermion/StaggeredKernelsHand.cc | 48 +++--- tests/core/Test_staggered5Dvec.cc | 22 ++- 8 files changed, 163 insertions(+), 153 deletions(-) diff --git a/benchmarks/Benchmark_staggered.cc b/benchmarks/Benchmark_staggered.cc index 9860e59d..121dc0d5 100644 --- a/benchmarks/Benchmark_staggered.cc +++ b/benchmarks/Benchmark_staggered.cc @@ -115,7 +115,7 @@ int main (int argc, char ** argv) ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0,params); std::cout<::DhopInternal(StencilImpl &st, LebesgueOrder if (dag == DaggerYes) { PARALLEL_FOR_LOOP for (int sss = 0; sss < in._grid->oSites(); sss++) { - Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), sss, sss, in, out); + Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), 1, sss, in, out); } } else { PARALLEL_FOR_LOOP for (int sss = 0; sss < in._grid->oSites(); sss++) { - Kernels::DhopSite(st, lo, U, UUU, st.CommBuf(), sss, sss, in, out); + Kernels::DhopSite(st, lo, U, UUU, st.CommBuf(), 1, sss, in, out); } } }; diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc index 7068fc3f..fdbbc441 100644 --- a/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc @@ -228,9 +228,7 @@ void ImprovedStaggeredFermion5D::DhopInternal(StencilImpl & st, LebesgueOr const FermionField &in, FermionField &out,int dag) { Compressor compressor; - int LLs = in._grid->_rdimensions[0]; - st.HaloExchange(in,compressor); // Dhop takes the 4d grid from U, and makes a 5d index for fermion @@ -241,28 +239,11 @@ void ImprovedStaggeredFermion5D::DhopInternal(StencilImpl & st, LebesgueOr Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), LLs, sU,in, out); } } else { -#if 1 PARALLEL_FOR_LOOP for (int ss = 0; ss < U._grid->oSites(); ss++) { int sU=ss; Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out); } -#else -#pragma omp parallel - { - for(int i=0;i<10;i++){ - int len = U._grid->oSites(); - int me,mywork,myoff; - GridThread::GetWorkBarrier(len,me, mywork,myoff); - for (int ss = myoff; ss < myoff+mywork; ss++) { - int sU=ss; - int sF=LLs*sU; - Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out); - } - GridThread::ThreadBarrier(); - } - } -#endif } } diff --git a/lib/qcd/action/fermion/StaggeredKernels.cc b/lib/qcd/action/fermion/StaggeredKernels.cc index 597b14ea..a62daa13 100644 --- a/lib/qcd/action/fermion/StaggeredKernels.cc +++ b/lib/qcd/action/fermion/StaggeredKernels.cc @@ -186,32 +186,31 @@ template void StaggeredKernels::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor *buf, int LLs, int sU, const FermionField &in, FermionField &out) { - int dag(1); SiteSpinor naik; SiteSpinor naive; int oneLink =0; int threeLink=1; - Real scale; - if(dag) scale = -1.0; - else scale = 1.0; - + int dag=1; switch(Opt) { #ifdef AVX512 + //FIXME; move the sign into the Asm routine case OptInlineAsm: DhopSiteAsm(st,lo,U,UUU,buf,LLs,sU,in,out); + for(int s=0;s::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, Dou template void StaggeredKernels::DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor *buf, int LLs, - int sU, const FermionField &in, FermionField &out) { - - int dag(0); - - int oneLink =0; - int threeLink=1; - SiteSpinor naik; - SiteSpinor naive; - static int once; - int sF=LLs*sU; - + int sU, const FermionField &in, FermionField &out) +{ + int oneLink =0; + int threeLink=1; + SiteSpinor naik; + SiteSpinor naive; + int dag=0; switch(Opt) { #ifdef AVX512 case OptInlineAsm: @@ -241,22 +236,23 @@ void StaggeredKernels::DhopSite(StencilImpl &st, LebesgueOrder &lo, Double break; #endif case OptHandUnroll: - DhopSiteDepthHand(st,lo,U,UUU,buf,LLs,sU,in,out,dag); - break; + DhopSiteHand(st,lo,U,UUU,buf,LLs,sU,in,out,dag); + break; case OptGeneric: - - for(int s=0;s=0); assert(sU>=0); + DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink); + DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink); + out._odata[sF] =naive+naik; + } break; default: assert(0); break; } - }; template diff --git a/lib/qcd/action/fermion/StaggeredKernels.h b/lib/qcd/action/fermion/StaggeredKernels.h index dc91a30c..a45214d3 100644 --- a/lib/qcd/action/fermion/StaggeredKernels.h +++ b/lib/qcd/action/fermion/StaggeredKernels.h @@ -57,11 +57,11 @@ public: int sF, int sU, const FermionField &in, SiteSpinor &out,int threeLink); - void DhopSiteDepthHandLocal(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf, + void DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf, int sF, int sU, const FermionField &in, SiteSpinor&out,int threeLink); - void DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,SiteSpinor * buf, - int Lls, int sU, const FermionField &in, FermionField &out, int dag); + void DhopSiteHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,SiteSpinor * buf, + int LLs, int sU, const FermionField &in, FermionField &out, int dag); void DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU, SiteSpinor * buf, int LLs, int sU, const FermionField &in, FermionField &out); diff --git a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc index 7f3624a1..0c62b2a0 100644 --- a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc +++ b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc @@ -517,7 +517,7 @@ Author: paboyle : : "r" (a0) : "%r8" ); \ #define PF_GAUGE_XYZT(a0) -#define PF_GAUGE_XYZTa(a0) \ +#define PF_GAUGE_XYZTa(a0) \ asm ( \ "movq %0, %%r8 \n\t" \ VPREFETCH1(0,%%r8) \ @@ -578,10 +578,10 @@ namespace QCD { template void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, - DoubledGaugeField &U, - DoubledGaugeField &UUU, - SiteSpinor *buf, int LLs, - int sU, const FermionField &in, FermionField &out) + DoubledGaugeField &U, + DoubledGaugeField &UUU, + SiteSpinor *buf, int LLs, + int sU, const FermionField &in, FermionField &out) { assert(0); @@ -611,35 +611,35 @@ void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, o0 = SE0->_offset; \ l0 = SE0->_is_local; \ p0 = SE0->_permute; \ - CONDITIONAL_MOVE(l0,o0,addr0); \ + CONDITIONAL_MOVE(l0,o0,addr0); \ PF_CHI(addr0); \ - \ - SE1=st.GetEntry(ptype,Y+skew,sF); \ - o1 = SE1->_offset; \ - l1 = SE1->_is_local; \ - p1 = SE1->_permute; \ - CONDITIONAL_MOVE(l1,o1,addr1); \ + \ + SE1=st.GetEntry(ptype,Y+skew,sF); \ + o1 = SE1->_offset; \ + l1 = SE1->_is_local; \ + p1 = SE1->_permute; \ + CONDITIONAL_MOVE(l1,o1,addr1); \ PF_CHI(addr1); \ - \ - SE2=st.GetEntry(ptype,Z+skew,sF); \ - o2 = SE2->_offset; \ - l2 = SE2->_is_local; \ - p2 = SE2->_permute; \ - CONDITIONAL_MOVE(l2,o2,addr2); \ + \ + SE2=st.GetEntry(ptype,Z+skew,sF); \ + o2 = SE2->_offset; \ + l2 = SE2->_is_local; \ + p2 = SE2->_permute; \ + CONDITIONAL_MOVE(l2,o2,addr2); \ PF_CHI(addr2); \ - \ - SE3=st.GetEntry(ptype,T+skew,sF); \ - o3 = SE3->_offset; \ - l3 = SE3->_is_local; \ - p3 = SE3->_permute; \ - CONDITIONAL_MOVE(l3,o3,addr3); \ + \ + SE3=st.GetEntry(ptype,T+skew,sF); \ + o3 = SE3->_offset; \ + l3 = SE3->_is_local; \ + p3 = SE3->_permute; \ + CONDITIONAL_MOVE(l3,o3,addr3); \ PF_CHI(addr3); \ \ - gauge0 =(uint64_t)&UU._odata[sU]( X ); \ - gauge1 =(uint64_t)&UU._odata[sU]( Y ); \ - gauge2 =(uint64_t)&UU._odata[sU]( Z ); \ + gauge0 =(uint64_t)&UU._odata[sU]( X ); \ + gauge1 =(uint64_t)&UU._odata[sU]( Y ); \ + gauge2 =(uint64_t)&UU._odata[sU]( Z ); \ gauge3 =(uint64_t)&UU._odata[sU]( T ); - + // This is the single precision 5th direction vectorised kernel #include template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, @@ -762,6 +762,14 @@ template <> void StaggeredKernels::DhopSiteAsm(StencilImpl VPERM0(Chi_11,Chi_11) \ VPERM0(Chi_12,Chi_12) ); +#define PERMUTE01 \ + if ( p0 ) { PERMUTE_DIR3; }\ + if ( p1 ) { PERMUTE_DIR2; } + +#define PERMUTE23 \ + if ( p2 ) { PERMUTE_DIR1; }\ + if ( p3 ) { PERMUTE_DIR0; } + // This is the single precision 5th direction vectorised kernel #include @@ -785,35 +793,50 @@ template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, StencilEntry *SE2; StencilEntry *SE3; - for(int s=0;s template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, @@ -835,31 +858,47 @@ template <> void StaggeredKernels::DhopSiteAsm(StencilImpl &st, StencilEntry *SE2; StencilEntry *SE3; - for(int s=0;s -void StaggeredKernels::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU, - SiteSpinor *buf, int LLs, - int sU, const FermionField &in, FermionField &out, int dag) { - - SiteSpinor naik; - SiteSpinor naive; - int oneLink =0; - int threeLink=1; - int skew(0); - Real scale(1.0); - - if(dag) scale = -1.0; - - for(int s=0;s::DhopSiteHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU, + SiteSpinor *buf, int LLs, + int sU, const FermionField &in, FermionField &out, int dag) +{ + SiteSpinor naik; + SiteSpinor naive; + int oneLink =0; + int threeLink=1; + int skew(0); + Real scale(1.0); + + if(dag) scale = -1.0; + + for(int s=0;s -void StaggeredKernels::DhopSiteDepthHandLocal(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, - SiteSpinor *buf, int sF, - int sU, const FermionField &in, SiteSpinor &out,int threeLink) { +void StaggeredKernels::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, + SiteSpinor *buf, int sF, + int sU, const FermionField &in, SiteSpinor &out,int threeLink) { typedef typename Simd::scalar_type S; typedef typename Simd::vector_type V; @@ -300,7 +297,6 @@ void StaggeredKernels::DhopSiteDepthHandLocal(StencilImpl &st, LebesgueOrd vstream(out()()(1),even_1+odd_1); vstream(out()()(2),even_2+odd_2); - } } FermOpStaggeredTemplateInstantiate(StaggeredKernels); diff --git a/tests/core/Test_staggered5Dvec.cc b/tests/core/Test_staggered5Dvec.cc index 13374578..ef215ed4 100644 --- a/tests/core/Test_staggered5Dvec.cc +++ b/tests/core/Test_staggered5Dvec.cc @@ -57,34 +57,33 @@ int main (int argc, char ** argv) std::cout< seeds({1,2,3,4}); - /* + GridParallelRNG pRNG4(UGrid); GridParallelRNG pRNG5(FGrid); pRNG4.SeedFixedIntegers(seeds); pRNG5.SeedFixedIntegers(seeds); - */ + typedef typename ImprovedStaggeredFermion5DR::FermionField FermionField; typedef typename ImprovedStaggeredFermion5DR::ComplexField ComplexField; typename ImprovedStaggeredFermion5DR::ImplParams params; - FermionField src (FGrid); src=zero; - - // random(pRNG5,src); + FermionField src (FGrid); + random(pRNG5,src); /* - std::vector site({0,0,0,0,0}); + std::vector site({0,1,2,0,0}); ColourVector cv = zero; cv()()(0)=1.0; src = zero; pokeSite(cv,src,site); */ - FermionField result(FGrid); result=zero; FermionField tmp(FGrid); tmp=zero; FermionField err(FGrid); tmp=zero; - FermionField phi (FGrid); phi=1.0;//random(pRNG5,phi); - FermionField chi (FGrid); chi=1.0;//random(pRNG5,chi); + FermionField phi (FGrid); random(pRNG5,phi); + FermionField chi (FGrid); random(pRNG5,chi); - LatticeGaugeField Umu(UGrid); Umu=1.0; //SU3::HotConfiguration(pRNG4,Umu); + LatticeGaugeField Umu(UGrid); + SU3::HotConfiguration(pRNG4,Umu); /* for(int mu=1;mu<4;mu++){ @@ -103,7 +102,7 @@ int main (int argc, char ** argv) RealD c2=-1.0/24.0; RealD u0=1.0; - ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,c1,c2,u0,params); + ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,c1,c2,u0,params); ImprovedStaggeredFermionVec5dR sDs(Umu,Umu,*sFGrid,*sFrbGrid,*sUGrid,*sUrbGrid,mass,c1,c2,u0,params); std::cout< Date: Wed, 22 Feb 2017 12:19:09 -0500 Subject: [PATCH 066/101] Bug fix from Chris K --- lib/lattice/Lattice_transfer.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/lattice/Lattice_transfer.h b/lib/lattice/Lattice_transfer.h index c2bd6cf7..3f4719c5 100644 --- a/lib/lattice/Lattice_transfer.h +++ b/lib/lattice/Lattice_transfer.h @@ -388,11 +388,11 @@ void InsertSlice(Lattice &lowDim,Lattice & higherDim,int slice, int std::vector lcoor(nl); std::vector hcoor(nh); lg->LocalIndexToLocalCoor(idx,lcoor); - dl=0; + int ddl=0; hcoor[orthog] = slice; for(int d=0;d &lowDim, Lattice & higherDim,int slice, in std::vector lcoor(nl); std::vector hcoor(nh); lg->LocalIndexToLocalCoor(idx,lcoor); - dl=0; + ddl=0; hcoor[orthog] = slice; for(int d=0;d Date: Wed, 22 Feb 2017 18:09:33 +0000 Subject: [PATCH 067/101] Refactoring header layout --- lib/Grid.h | 52 +---- lib/GridCore.h | 81 ++++++++ lib/{qcd/hmc/HMC.cc => GridQCDcore.h} | 21 +- lib/Old/Endeavour.tgz | Bin 550067 -> 0 bytes lib/Old/Tensor_peek.h | 154 --------------- lib/Old/Tensor_poke.h | 127 ------------ lib/{ => algorithms}/Algorithms.h | 3 +- lib/{ => algorithms}/FFT.h | 0 lib/algorithms/approx/MultiShiftFunction.cc | 2 +- lib/algorithms/approx/Remez.cc | 2 +- lib/algorithms/approx/Remez.h | 2 +- lib/{ => allocator}/AlignedAllocator.cc | 2 +- lib/{ => allocator}/AlignedAllocator.h | 0 lib/{ => cartesian}/Cartesian.h | 0 lib/{ => communicator}/Communicator.h | 0 lib/communicator/Communicator_base.cc | 3 +- lib/communicator/Communicator_mpi.cc | 2 +- lib/communicator/Communicator_mpi3.cc | 4 +- lib/communicator/Communicator_none.cc | 3 +- lib/communicator/Communicator_shmem.cc | 2 +- lib/{ => cshift}/Cshift.h | 0 lib/{ => lattice}/Lattice.h | 0 lib/{ => log}/Log.cc | 2 +- lib/{ => log}/Log.h | 0 lib/{ => perfmon}/PerfCount.cc | 4 +- lib/{ => perfmon}/PerfCount.h | 0 lib/{ => perfmon}/Stat.cc | 6 +- lib/{ => perfmon}/Stat.h | 0 lib/{ => perfmon}/Timer.h | 0 lib/qcd/QCD.h | 26 +-- lib/qcd/action/Action.h | 55 ++++++ lib/qcd/action/ActionBase.h | 1 + lib/qcd/action/ActionCore.h | 45 +++++ lib/qcd/action/fermion/CayleyFermion5D.cc | 4 +- lib/qcd/action/fermion/CayleyFermion5D.h | 2 + .../action/fermion/CayleyFermion5Dcache.cc | 3 +- .../action/fermion/CayleyFermion5Ddense.cc | 3 +- lib/qcd/action/fermion/CayleyFermion5Dssp.cc | 3 +- lib/qcd/action/fermion/CayleyFermion5Dvec.cc | 3 +- .../fermion/ContinuedFractionFermion5D.cc | 3 +- .../fermion/ContinuedFractionFermion5D.h | 2 + lib/qcd/action/fermion/DomainWallFermion.h | 2 +- .../action/{Actions.h => fermion/Fermion.h} | 127 +----------- lib/qcd/action/fermion/FermionCore.h | 71 +++++++ lib/qcd/action/fermion/MobiusFermion.h | 2 +- .../action/fermion/MobiusZolotarevFermion.h | 2 +- .../fermion/OverlapWilsonCayleyTanhFermion.h | 2 +- .../OverlapWilsonCayleyZolotarevFermion.h | 2 +- .../OverlapWilsonContfracTanhFermion.h | 2 +- .../OverlapWilsonContfracZolotarevFermion.h | 2 +- .../OverlapWilsonPartialFractionTanhFermion.h | 2 +- ...lapWilsonPartialFractionZolotarevFermion.h | 2 +- .../fermion/PartialFractionFermion5D.cc | 3 +- .../action/fermion/PartialFractionFermion5D.h | 2 + lib/qcd/action/fermion/ScaledShamirFermion.h | 2 +- .../action/fermion/ShamirZolotarevFermion.h | 2 +- lib/qcd/action/fermion/WilsonFermion.cc | 187 +++++++++--------- lib/qcd/action/fermion/WilsonFermion5D.cc | 5 +- lib/qcd/action/fermion/WilsonFermion5D.h | 2 +- lib/qcd/action/fermion/WilsonKernels.cc | 2 +- lib/qcd/action/fermion/WilsonKernelsAsm.cc | 2 +- lib/qcd/action/fermion/WilsonKernelsHand.cc | 2 +- lib/qcd/action/fermion/WilsonTMFermion.cc | 3 +- lib/qcd/action/fermion/WilsonTMFermion.h | 3 +- lib/qcd/action/fermion/ZMobiusFermion.h | 2 +- lib/qcd/action/gauge/Gauge.h | 70 +++++++ lib/qcd/action/pseudofermion/PseudoFermion.h | 42 ++++ lib/qcd/hmc/HMC.h | 8 + lib/qcd/hmc/HMC_aggregate.h | 42 ++++ lib/qcd/representations/Representations.h | 9 + lib/qcd/representations/adjoint.h | 2 +- lib/qcd/spin/Dirac.cc | 3 +- lib/qcd/spin/Spin.h | 5 + lib/qcd/utils/SUnAdjoint.h | 2 +- lib/qcd/utils/SpaceTimeGrid.cc | 3 +- lib/qcd/utils/Utils.h | 9 + lib/serialisation/BinaryIO.cc | 3 +- lib/serialisation/TextIO.cc | 2 +- lib/serialisation/XmlIO.cc | 2 +- lib/{ => simd}/Simd.h | 4 +- lib/stencil/Lebesgue.cc | 2 +- lib/{ => stencil}/Stencil.cc | 2 +- lib/{ => stencil}/Stencil.h | 0 lib/{ => tensors}/Tensors.h | 0 lib/{ => threads}/Threads.h | 0 lib/{ => util}/Init.cc | 3 +- lib/{ => util}/Init.h | 0 lib/{ => util}/Lexicographic.h | 0 lib/util/Util.h | 5 + 89 files changed, 637 insertions(+), 636 deletions(-) create mode 100644 lib/GridCore.h rename lib/{qcd/hmc/HMC.cc => GridQCDcore.h} (75%) delete mode 100644 lib/Old/Endeavour.tgz delete mode 100644 lib/Old/Tensor_peek.h delete mode 100644 lib/Old/Tensor_poke.h rename lib/{ => algorithms}/Algorithms.h (98%) rename lib/{ => algorithms}/FFT.h (100%) rename lib/{ => allocator}/AlignedAllocator.cc (97%) rename lib/{ => allocator}/AlignedAllocator.h (100%) rename lib/{ => cartesian}/Cartesian.h (100%) rename lib/{ => communicator}/Communicator.h (100%) rename lib/{ => cshift}/Cshift.h (100%) rename lib/{ => lattice}/Lattice.h (100%) rename lib/{ => log}/Log.cc (99%) rename lib/{ => log}/Log.h (100%) rename lib/{ => perfmon}/PerfCount.cc (98%) rename lib/{ => perfmon}/PerfCount.h (100%) rename lib/{ => perfmon}/Stat.cc (98%) rename lib/{ => perfmon}/Stat.h (100%) rename lib/{ => perfmon}/Timer.h (100%) create mode 100644 lib/qcd/action/Action.h create mode 100644 lib/qcd/action/ActionCore.h rename lib/qcd/action/{Actions.h => fermion/Fermion.h} (64%) create mode 100644 lib/qcd/action/fermion/FermionCore.h create mode 100644 lib/qcd/action/gauge/Gauge.h create mode 100644 lib/qcd/action/pseudofermion/PseudoFermion.h create mode 100644 lib/qcd/hmc/HMC_aggregate.h create mode 100644 lib/qcd/representations/Representations.h create mode 100644 lib/qcd/spin/Spin.h create mode 100644 lib/qcd/utils/Utils.h rename lib/{ => simd}/Simd.h (99%) rename lib/{ => stencil}/Stencil.cc (98%) rename lib/{ => stencil}/Stencil.h (100%) rename lib/{ => tensors}/Tensors.h (100%) rename lib/{ => threads}/Threads.h (100%) rename lib/{ => util}/Init.cc (99%) rename lib/{ => util}/Init.h (100%) rename lib/{ => util}/Lexicographic.h (100%) create mode 100644 lib/util/Util.h diff --git a/lib/Grid.h b/lib/Grid.h index 0c5983f3..6430703a 100644 --- a/lib/Grid.h +++ b/lib/Grid.h @@ -38,52 +38,10 @@ Author: paboyle #ifndef GRID_H #define GRID_H -/////////////////// -// Std C++ dependencies -/////////////////// -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/////////////////// -// Grid headers -/////////////////// -#include -#include "Config.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - - +#include +#include +#include +#include +#include #endif diff --git a/lib/GridCore.h b/lib/GridCore.h new file mode 100644 index 00000000..6ec23594 --- /dev/null +++ b/lib/GridCore.h @@ -0,0 +1,81 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/Grid.h + + Copyright (C) 2015 + +Author: Peter Boyle +Author: azusayamaguchi +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +// +// Grid.h +// simd +// +// Created by Peter Boyle on 09/05/2014. +// Copyright (c) 2014 University of Edinburgh. All rights reserved. +// + +#ifndef GRID_BASE_H +#define GRID_BASE_H + +/////////////////// +// Std C++ dependencies +/////////////////// +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/////////////////// +// Grid headers +/////////////////// +#include "Config.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#endif diff --git a/lib/qcd/hmc/HMC.cc b/lib/GridQCDcore.h similarity index 75% rename from lib/qcd/hmc/HMC.cc rename to lib/GridQCDcore.h index 3cb39111..0a9fdbd8 100644 --- a/lib/qcd/hmc/HMC.cc +++ b/lib/GridQCDcore.h @@ -2,12 +2,12 @@ Grid physics library, www.github.com/paboyle/Grid - Source file: ./lib/qcd/hmc/HMC.cc + Source file: ./lib/Grid.h Copyright (C) 2015 Author: Peter Boyle -Author: neo +Author: azusayamaguchi Author: paboyle This program is free software; you can redistribute it and/or modify @@ -27,10 +27,17 @@ Author: paboyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include -namespace Grid{ - namespace QCD{ +#ifndef GRID_QCD_CORE_H +#define GRID_QCD_CORE_H - } -} +///////////////////////// +// Core Grid QCD headers +///////////////////////// +#include +#include +#include +#include +#include + +#endif diff --git a/lib/Old/Endeavour.tgz b/lib/Old/Endeavour.tgz deleted file mode 100644 index 33bfbc010799bdb4f6ce8102e2ad0c082f7db192..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 550067 zcmV)6K*+xziwFRkSTt7v1MFOTSKG*v&;A*GiayER8DnJmfemvoWX9M)m@$NF!{lb? z=J>Rww%tKe>#8Nd$!5=fru~Fl)h)?3wlPcylbMT7qDMcftE;N3e*M7vzQgFHh{NKu zuXdG6rOH;V^z47}&q(d-u{612 z8PSlCX94YtvCFQPm1Cn|*bAAA!k*;bV{rB;*M6q|myrm#Enhx-8>ryT>Qeo0ls7A= ze>9TX29zt6H_K(vf9c^_nZN$c^-i`!u1e4@yL?graiR_)U38epg zbVlmrM}w#f5>7)FMPo7)!Xa`*12)O(@}bWM+@^j+n8PD!kx6EU+at0UiYpe9NQes} zE?HH^q@l|q;j7;H-$%sd7mPI5*JtSi(?Hi}A&pe-6h&yprjijcAkEYB?e>07D5i8Vd699$oj(O6oC$tf$$y6vX+;58*-4i6dqGOM(NpLVr*g8RgKp? z)D?u!fQ73%8tE-u{NjU>nxojg5~5Cy86PlzSSNutWXVDcYI4pDk=7sW9#IB=|dC)qMv28Oc2eAw053Yhx z7~7;)`cE8tdxyopqa~eOau61Q*TrPt1k7(Vo2Gg>J~eS{5;>L;I*>X6+eO!=)%hcd zt_1nzhZ2nHb#jW%>9m=7yBmZP%lTfb3^ZtxpWBT+6a9xtd=aI#WP)vp0dxe+gSw#n zkl8peVH7N;oOxoX2SXC-41t;h9|7nT{Etfm;5dj;`eQv#A^~o2qzn_<*=L?TZCe=qP_J=U^)O(jlvGO^D27v&% zC<{?pKgT{P*HFzdXtrF7M#di<@t&^fYl8o*1OPsK8wm2YwzgK`{}Q0T3S>MhRjQSZ z$}>_4AOeI6mi=^0?LEK?1u(+GhUE$vDAq=$Qri4d=Kjs`-<1A`JObz@LiW&Ytib=E z?HcHR6VN}rRyPs{Yi7q0 z?Jwj2im_{k0*@oC76e#j6h*;yv3PZLWexq<65+5Y6UbSlaU@iLUnQBTMvkF8iooK2 z_HHzVKj1@al)YfCt>0ZSOkZIzMX@w2hXwL3ykWKb@TU4xg?WKZ?GZDmts{X~H4=MD z7>Pq>01udGaF~cu*Dc7A0r*7%?lB<23ksuh#&+CKUxzV3Hsk1|d)8>Tm8}9wro%aP zKn{-pF!o{opfE5@jsgVK0G(uNLkMhv2aiVzQHAshrYv!2GpG&jmW-$bDw2pRpY&ty zMrxV~Ootjb5}XeSM}o4qKBB#PLmUnG%9SC1pds8tb&u5jBM%G$M)pwW%C?apV*>u=hF)R$L)$Mg*__N8|kA~ID8&)?Ek;{jpsPJpKR5Gly zkV+7e!A8+2_WIIt`o>xI2F;=wlY2{3-rF!u2wZ zVWBJ@|I&t$qV0ebMGzHnj_Vc^*BTZppqaRmeajyBmtGe0dYElhtnv@#qRajGy4dG_ zR``;J5==`?o88UoLpI}GxT-05-fRWD!SU(Q!d`KpUPLD$BmG{p%GQP=o7C1s1D+5? zV!uYQ&lZxq#GHh6>oU6VlcXL*8kY|^3wzg0@m|D~YgR~kRZKGPg8Pn1er6uTDq)+X z0io{o-dkvlT?YVA$Z6y3^{#5s4kO1ddb&>92Vi#lZ&+qo`40){`3h-`cB|Ju+Uqnr z?|Lg518r>o?MCrCP`Sy%LNX|ihI0u4E?g*Ss`Iq0H_}g#u6msa7U^^$6x z?knUbKD8&WA|YI4X}*{j+_Bgoyi2*E7t9v}UP17-TxJ<6vseM+pEin@&Ev)|`^)Os z2rG*lR>`W&R5~34V)2(6ZJ@G?)Te;A9J;^=b&ZkmSn(f$^2thFN=xiI(GIAloT+95 z|4pgpkM^m5rfU-a8|P=Q-*mcPp^X*u|59bEd}ID!103)a{~zOmC^Hh_Hfel}B_;2` z9}Z)C#K{gF>-V2PK7)Zrxoa`UqP7)ZyfV^Sr?@~z_E2zSCz0S!!Dwa4h3k5HRc}%y zp|^zaA1%1Q$Tb=NedgOEkA@fWp^t*x@BBwu;s33Tr}H0=aV^yUUi#X@#S3ZK_7}Br zyZ^_-KAW@tzXDUj%9H0M5BLLeB8CAVd=JUTXsS`XyWDDGQeVZ}uD zMO9eW>uO|+%QxG^y4^T+jiw03A+9o!Tyu>8GOQW*h*OHh*@#QreI3HQfWYe@WO&9v zuUFQ|Sj6gtNXRBT`Gl*z@FL7rJmK)cSP_CmbyOnRM-4rRd)N4Jcydk-)smV^PUF7I zZPMm8^Ccq`+Qt+)Vh-tRLRjShxyTL?90;JSc3bO&akclGocOmGU8)wLE2VEV*Da?WNX0z8%i6gln{yDpx^i6tAvuBm81Y4Y|6u;lp} zX`H+xZyTLX6kowUW_*W#3_dCtkP^YnX)IK_UhsS{q zj?PZ@yIpedrb8MqN9dd#HP73P4mmyVoWAMqTVOH_8Pg|`ZV~f9sS1x{s52Zg>H0fR zt_1d6hm7bY10~yxU*h2kVuNMfMSrgFhl*CRg+7sGo$vvk4FJnm2VEj@t7c7dgko}VuY40C4+5{3` za^H4i2gWXX2x|nxHO6&(;^TR#fP$ltkyD>b#L|mXx3~Y^Y}E||53B&T^+^ujXgIV@ zVvisoC6&34tP#gL6)OY$fzTe5@+gbd;5?@ z`c>v8#BCt8qa@fC!(%2T9fCx6$P49m!B{1W;y~?WfQ7u+*-2>c?7T>ufkZ`$`Hri) zk1U*k9R|DQQp%F=#k=YQMw$|<`u5_TD%{rB*KjQ4CcXR3i{HO0uVJ9=72DC69wvU4+bI(D|yexzPaub(k9rk7ypd@ z8R3((oVD2M>2>A&<@>51xdT%Dxp2LOD8wJX%3RG4V*m(b0 z`;ZhTHE9Tf;(##Hlw8K5IptT(36Tkb>E2Dhzozb>8*slVTtEQ7s>IeS@`_yd<3a8R zb9Gr{BA@1FR|%~@ zSDipV`Fx3+{A4^gz-$&dOR`2jVVM`B`8<^eCiDP$*f16*sn;F{E<_K}#8ff_ z^~>HM6y9`(zN={49Rb>4P6sk&&Wp{AxH(PGoF%hXpQZpq`Al@6zyaj%u_5Ddx#%*$i0-V&KiNkw<~v+*4MEnrzAmxyItV;xm)4-h#{ERzDDuh!Nt`||nNTbW71=ho)+-yO0Tz^;bUr=76 zPKrh1aXS>cH*&@UFAI|xw6b8=mXmkY=;TxLQp8rL%B0LD=`+z;i~6$~wZ@+iKiq&% z5>3mGILxn};L`)Zr$<9i=G6ZuU~>;(^Nk@J9sd6e@Z^(z%j^`4!0L)<{1S>zA;+Fv{yVb4S$lms8Cgl)8s<7S#P(>Y2rEQ`#k3wW-*!0 z{1YRyZv~0zRSUIhIN&~Y!C#|%KYxRp)+_e^oU6f??*6UX|ErX?N|j{)7w*-qXQfJc z>*@T*<6LO9f7bm!ZJZCW`**h^Cr^8Xiv;krNBFcy_?O-zO#cpg$9Aa%dw*rLo);FN>(?baljllAaB8pg?|8k2J}>G?X(2$d%mTv+hNY0OYMuA za|9jjyt-MmbAKWSb3_jAPoyc*y5gA)tPI^yln@3vH@=1+}>VG*-1@e!WOU-_7y|7v6I$Q zY$^WNZtLN}tvv)EpZ2gV%~@)T7(BR@K}~Ij!6~Y@Fe&e%ht}JJC91fWjOD$BELTfe z-bKu52ksP{&H zewVqOlf$g<1U{;%9qKQb)-&gTUo+L4-q|a zqC|!^qP>&?zy?QnIizDkmTLP(4Vd(R$xLdf&)ttK6blLyT-_K<7*wCP6pp+gExa(F zg<+O&Ak4?L&LpDR9;iL>CAqRyGD}uzrq72o9)b}tz-#AToQZ$Z=c)B~^9W)k4Xt<>56GNQY6k1qcc4 z&nnlml;axs-LmzAVid6$qFpipU&)~BY;Pm@Lf-=*Cr}UdgHZa_n#Mpl_Hcf?JA;g{ z$*k3rQYXS#G*y1DK;yC0*fPVL7)$F8gvOskb`Y7cCd)~2l?rCh#Ac{=w} zxUt7}!OJWk8EGN?$SVwq_E(=KG;l=#sBxoO%_rqF%UWTprG~%Wai#Wiv6=$49Dn*w zZ}LXvLR7-Ucy4=l6>(pLUJ{~^62b3EPA9DWK3W4TIZbPDUqT*=tBCUgO-M~+r?owICjI`ahR3giu6Fsbt9bftID%aDUyDQR!Y&t|<(J#BMryG-kMxBSS= z-<7XYTIm~hd6N4+jNGr_`MwD!BNusr2}GL%oemw&1JB=q;(%^eO-*q!h&H{I7GLeTZqj>wDqAS(}IU5*Thty~n5R|M?{2H~xNo7l}{47XrIUcAd=a*M0${d=Q2g z;oZdyPwtD*=|UJ&zQdweHhx-kc)#OS_rU`Xga#G{+6M`<3up5^2D5n`dlt>1v4ps_ zL$h)9a8WJJ+JcdSINzV2risEsQrhGTmR4ApZB$KEDgi(```2Pe2R|Hi4Z{&!b=i}5@V2UPS|e1iE~+w z7^D&CSej8Zl7ZO%?r$x<&Wwauyd{trdU~netE;Q3t894s?3(H5vXIl{VU`(o>%2}N zLzI~Tb^@{46pFL*T)Lcw)@mGHbuA99l{ieU!%GX3mg!^!i2#C;v)gdn<0vo+&rS@u z*Agp2+{ZwR({KgmzXVsPEVDL;q|;WDu7NfTu&CV@NnlA7qDZM**D1P0|EC}Hqv1u9 z1!33K1iHlj%l8ke$@l*cmLA^OfBhssL_B{KIsM1i|4A;+J58WFO`tnXpgT>VFV|Xt z(Ye@p8?C=vdap}Bdlt+`)&5wvKkmO?ko#A{0@Ni&@feOk z{H=Mwx|x|OF$4IT|JG=na15!%?31d;M(`FerxtnG2)^$*nTHS4+GI2KWe&A;bVGGh1xAY z(RV;ile#YVtg1Nhon0fTvD{OS#?@b(708t2X1QYl+onlRnJD*?%cJUd!;Hikrv%sF zcQtOU8W0p_sCNCNpo?OnqU`)XtEOUpr`4m^PR~;PlW(2oRVjY(RIKomtLoCyQmWUw zh-}!TA}(`~ADqH=UO-wxJvBwDpm+%bPU1#5ss}*%b@84sf4`V|ag}&wWn=wRwCXvY z^CjYt$avYkRDKz+;E5Fb(9iRrSDgSK+GTaR|Rfi#p!&c?`jXOZ{IR)&@yeoGLvZ}FSa*f`zRncWIb6!%RDq1Po1b( zD!0Zt5*7*;f9sN!ip{k7TMveO|8LRyEz*wYFj|AVP#_*y z;2!9;aEZl0Cs8psW?*b2)Ri=AV|f+r_yG~>x7%(;=7HQ_n8F3QJw6D4a#sur zYV{RdF#h4!i|k|L3p_Qx+(W}ei7;Hv!ogQRN5Zhi?{Bby_uj^6^KafdP0CGW5eF8Vm1EQ|BZ8T%!LIkMDnM`_I+ymmVhd ze<1Eo|L2qZ(3$a3MDrhC|A!anZL0iC@={LmamjL4URqY}%BlK0MW8!HpgTpNJ4K+Y zm?5@Yr!Fns(3$K%LObVcyJ#ob!Y}Th!#^R;YvO$2;@`qXAmaVmQ^h~{{QQFDw2!l| zm|JjtjsTbccx+_H7O!Rvppo2ZtO2Lg`Gis_9)VNpVnV5l;?1Rw=QY^6w|}!QPrcjU zXY)A=J1zWQUhpMmPu=mO2(IYRdgO%6UWzyaoOAwBdFi_!_+Jqr+?M=MH;oz2lUW2p z+v7=jQBp#}uMlBQIu8E6I}YxSgPR`*U+H;p8LK$^Jh+T!oP8c-FpqC8UHly9!8P&v zcRzgh!>wX;OMG}EZ2tA3yGh8rkLH5^COz3ZjDCmF?=bpThtaJae23TX@Vdh5e(Mgc z-=Xz8wEoqib-jVV60H8i$HeM4L+W?>7XO-`OY}e3O~VaUKrYe$fRCj9$MVB!^-lle zQ~c0L^HC(SA7B4NFU~uwFL%lwcgh}j${u$vz%yNdf2l6Br(NqNd9D6zDl(J28k#i1O z_YJ9j zsr|u%m~f?cnKRfK`$f(yuSQXQ22YmL!Cb|0-$(R`DJdQ`)wtl%0)2IfEYI^jB$;TZ zrKAe6<9ksh{tpuW_tQ>m^?$sL<;nY0EMT{qh|T6rT=83Lo5%6%`Qm)HIR8IWtW{gV$d%X|G`*wk!***|C4{k`~tPHNZ*hbzhnA#W+{nSGLMh1z-8~(<*@e) zaMzoChTJ*py`fjiuRxA=iTvm9e*dGt|6N{sxcvXCE`MKrxKzEr^r%Yje;?kzlmC2* zAHu{Rg-Ls>@}Ch5d|Ww%-*QP_F7mk(gZ(onMHpzei#nt}1JBU+ix0iLGkVP>LZMw`vbow@86*i;W~Xba<`0 z+W)DxyxPyW+-$|QA6B>GpK8@d@aMbQ4@hLDhT;Up6YsVbw-);g^bZ-=_gs?o0gA(#>@x40D-v#8b&H=9lGoubmxa-9Y7 zf{r7U1|8rcI_&lXUPpcZO#qvBxY6xviOqw>*C|2U22{P;iWgmISq*$TwSf%HX`lr)Q!w($qqNnx-O#<|g07-ESW|T5Fl=W1dlm}8yHHU%=xqkcoF(7nrM$who&9^3F zknRop<8Nxv7E#n?4IfBms3kH~l}_z+AqiwQ@(d{?5rZxuq0^xC2F>&M!}fd~2uvKf zK#pDp|69duKLmtOEtA`wjR?>2-(mieeCq|8Jov-wZgJs-{3E}Km%aVaLJv3MVn4hw z4|}&hPyLr0^)f!Tmy}*^#jBH-MTX$=ff8Cy;)hq3sTSCSsE2~7%pq6&;hEV;Nc_;) z0o{}2I0N(~%LQn>l@%67{2B~H*enOgq-?FM00p%>7_pr87RK1O#yBY?0HkxnUauW& zPwBOvSV@%yxt!YmNo~VF0sOKPUEa@Xx{sSui3vzeCaNI*U<9FVk2V>6ZUx6dd@>I3 zd$--jDA+O>az+Q5GG%-sJwDZ?rR$7N35ns!BNQdq#kEIbtNKv~qF24?K){V)@;LOW ziKW694Bo=>P0IuIp5=jh%ktpW{ntvCuoWvIf+{4G6hW2YtW2J(UFk;n$%rT!k!mPh z1GUD3J6Qs+fYGq;hj_}3ah`x09QO||1_6?6qH9DuQ@5_jfF$JK`L4_&zhUx$>{ahZ zE&*%GM1%Q6+OlZlvqh8UA9uFoMBVr=M+}D z6F&?_<6$V4kZ~`wt%w|9@%u z;hp`@Px9j(cf;o1{_eB=we168y^d|RNM+Z2x6e8v<$IUaebe#AH=$#$QzRd62uIWe zSSC?_^I-SI{(AkOmWW3QKr-vr#@fb4t<*y#=s<#&XTX727Yb&Z2*c@b1~ez#h3gi* ztOh-DlPhr?9!~)WGD)U5OF!vddq@>5q=EC^g+vltQ znTY>UR8aPB1<_rC|Cg2@e4lv#xAZ+k-QoXF@G~d;cDsx69=|7)kY(VHPGb=UEu;Q#FC@;b*xfJDkYunBBmoJ}gtv$ojqqLcy2b#ADRB^jIELI>F=GfZM8MZjX z@wnS->nz@!Hz!cI3)Qp(_)-l~gvi}K|5!hn`#&cAF2eu8`Cxcc4UkLrKhS#1$@jna zA3VI<|DWRLE7AX$x;Ss+`R|=R$eljOoj%A{qYrXh4}0H6fN>C&pn_QwO@k3q0lEim ztYq1+`S$O{Oxrf!o-bxbU$9Duv?KZ9OS`gMcK+kIeEjqOVe0&UcxV6Pll*+u=f7N> zcjy1z`G0r*-<|*0Q3lN*0EVzeyJ2vfr@v!!^ox1~GM)d|4R524>F580N6X90TK@B> z`hfU<50~%G|4;EVC#=a9CxqEXbp8l`Jc>|_Q&h`U_#aNgs4pzmaM_y^k6TgL>7I;- zL9-hU#-qm`M7k$&^~WC{EaLBn^!pM0{+@pSK)-)ngwuLyiGEk<_wph@fxq|Z>jC|K zNWUM^@9*jN5A^#-F&T{l6hx%RTtsAS_cVRFPeaH6;RFtY7*6$Gx8KD98lcJ;T@#o+ z3P*&b103Q2+d#(wB&hR`2!(EoAjgY%?&HaUf9cGCa{};)pwlCWgCQJ3Wl#0t28Z+9 z#G`pp9Z13XCD z=EY93R3Zk;xltq{fHri2+DB#Z#}x6{I|X{jj>hlD9w70Pz=`LW$KJ*!fL`C--rn7z z-YMCHk}V{nBytlC?_NeiH@rk(o}aJ3tk+*27G24?i3Ye6M8nhbXt8cq4M4z5D9w`B ztTKQ1`#Rc-$8uX3D!^M?-`ZV=x!Zm~<7Ufs{1!RCEcYMKUXY*7{)1*a7{saU{Rb8Q zyxBsP-KG>IH9I~31j_R`r!xa4nv#0bYBkS)c+~XYo;S~7mXF`gkhwJ&1M!%ZPak4% z8|2NcPc_}2jD8j&dVP22>E^Q+u%I{UdqA|-ch)!S2Su*Kj9}60zmb~~YK+sHwCyn@ zdyl3BWtDYmSNT+BUf-&(?KGcmZq;Eax{-uNbrQDTtv{Ea->4rPZfx!s^YgH#v5uT) zEo_R_E4_#yk{TDgPySn9Kg`$#<@`LR;mVp^hQ@S4a=M&7yxbvIb0e!QsN#{;5;uRM z9adtp%(OPRS{?@alntNRwrOw@EzQh28P*J~H@YL2bik;fOzFcUXxZIJ<(>+0T4(S& zm$e*F!6mD?ZvI5;ti)uQX>E`iFImly3Nl$ONtH@8#!b&CIju>kJ(JZQRI|xyjGI5v z5-TxTCeenC`qQ-+TSyhGZ@t*4vzkEpF<~S?z>l@kfU6S#2o{eDs`=n~eQT?jI8_l zo9lafq|QL--wIo*5JiT~Vbp;54U_aa@)p0E`T5_9gjVHuDjCYD76E1XWcL6_YNx~! z(m6rvG(xWrSb=~*{_!f;=#MOj{>Z5hK9h-4Yl9qA84lpqh8{cm+}!z9MQm+M8wgI4 z${_fqgD|wE2wJX(O4gyfy0wuUE{Ms%=Th zjaY%2CaLm95GN=;7M&zZlToh6OEeTGHYB9HvqWZMpAJ$)P?mXS4FreZ6wI=2$DjFOYkPPA;Ib7SP z7!1&!Nv%IlPRB#;7l*q@C{hj?=2%U6J~m&Rf;V#&&$slo-+!fu-+u*^u#Um?bqoRS z-`?G*bANv$2cx->0RKpE;oQN`cG%w(>8)M}-gzqd^PQqi)==8xp!wtlj{NTap_OVPO}?GAZ5sf6n^YX0oo9AB$Cw%l*#jAf6GpXPm?CO zNOro6!aO!tiL=--kG=i19T;s);m~7G@zyZT@Y#r_q9_c&_vZFSQxB~9f)rd36p4Yy zp4C3xF8?^z^cyDF-<-;1n@Ur>Jq+8lpGvpmOfDCOiGdugW^2@BOr*j(ma&Tdcr0(j zEK_sdOuf^ZYfAM!Gb8a6C#AWGnw1ADD;>a zUKXQ^8DM^|>&H5lKUG?5Fg7Xhq2eQ}K%{fO4Z?ObG%5ToQ&n5^m>ZVsV;SVQVHAz5 zc=-uwk(wJ0LCo8(ilNYB4_T?r!7%7_&xs^5pUl=X>^x%f>8_hEnIb!`!ZJ28nIo0c zblUoav{An&8z+O0J%@$-*nDA1C(_Xrd~7jZIa+-^_B4z6F^4F|FqI#hFGxXtlB!aF z$anxba?(`9W3TV4)x#l(c>{mQn&e{{eC$QgF{e{b#A9#J?#KiVL5!XpH80uBV9t@1 z0wrPvG$xp_S?aM`>+z{S4BB=F-5AKB_qy8S@kNY$ITfqF6!TYDOkc?k)+@;V)~Rw8 znJ=9Z*@Yrg!mK7I0@4+}vX*x;&dMeG;9@x!!`ax)`y#aTCy?(V&HbD7I154Z!<|fe zUnF^V+0V1Gpk&?*9C%lIpf#nA)V(6Bnkzlg%Bsw|o`@t5EBldF78Lg)BQm&96Q5~i zfwS&DBFRC@eyC-j0Crpv@T=taJ+FN5;_1`Pm-T}cD<}YlC`a)B@&CL`*~^NUAgsxh z>ZBR@Mep8YRR4i6N=);=tcrGomp>s|ejJEGW}yP+rA$IBP*mSS%!^h#$*V_M zUw$6xEXni0pk#IU{Za|g!7zY>!vZ6)%sW&QLPZo&>L7yv_i-qK^X^D2t+K`X<6+>x zSrwh`s`#*Kic_BIs*DL@zeT||TF%D+aUTk%sdBiuq3T*!6s6KhIJRU!dXNjNnsM7~ zwTo#eRPRinyz<_DeSLHH17-m&Y-!Epa7Mv_*#;!eE zYH$sx%;S=4(f`SAS2U4FSz)u z9lkhl1z+C1YY(t$P^w1S_L;l+`gJ+NIbA_gz<0A6vLLP~!m$LTkP@z~q=aqv9_Qy> zaXZ8@X80RLcS>L<-oMAj@TIvpL8x~RNX~dvD07Ksv5u>R0)uc?D7M-=o#gyDge9_ zMijWh=ue_IQ*#M0z+g3AEH6|R7gxj8(*i}s@5>AC%kK+nPjR+$oiWkgRhVu?&=xT< zZ_-s~sGmEl_+Px(2p7fTiA;e7@%yhuQFu~+wz&iNoc7+UTsem}uW~q8-J$-jiVxhV zccNX(=kJr2=se|UzZTy>1~jBN7^3NuTsyZY+OmAxDa|&v^7&;^tHD4%D9TzXmky%I zmVCadQ40;FhTZ|_&;pUGV6F#?ob`jM0<|&Uk>>0?=^7E#qFuPXXsY^!_C^Cvft2hI>hrxmB=b+^m&7M-Z;#{95$xV`ZL8FZFy{I z?P%)GcXquHYWJPOcKGx1T~#ncNEdFSStuNFek{YCUOkkS1wn@;X z#8QgXiY%ks-1#*N^pskOlqZ4R=98pS#Dt#MS`?{-OM79%ovja3>zmmAZ2c>@C)rvS zQ?gvzyX;zL>*tkQb6MZhdS#j2PitpP&o%2Hq^rNuI9#<^6Gy|=pmPVnRscyQT-r?= z?rc4qTHnO>C-nT>E+o`OveGW?NTPz-`Z7bs6MJOqC%WCSbx%?Wmv+R_dz`5kQ|n8# z-|I*5sCU7~hGfU#k-@E4uoP`P)Zd<%Z{#1BTb}oLZ5uk&Jlua#f9%L9Y`H_Zf`=Ij zFN>Dz4rayeMYe$A2H=n%cWc00X54LqHO#kLgN$sqTLUIpZ?^^y*>5f|uiR>tqZNi#syY7?kYAI5vLaV8QQCa%MX91s z*R~Ga?vhf(leP6<54P40o@Wetx^zqrQGt^FsUV``{{=0isvj4SwLXfXUM-9SL%AAQ z9@Ls1UDlJ58V&d*)!T|0&?j}%8Fw?Ray1$UwmD&c}sbL0=?v}_$ zSRxtY?0K6Tc<8bg5FLA#w$j$-!5(QXtvx$n6>~UA)rHf_XO+$}0yr4qGT6NXUp-V0TM;-Co`=7Q{g^edEKlMCpXM83a@IMpj)^0b6s{jj)<%h5{P# zIHy;d5(nwF-WYxYvYf$A*K6^jCKX6lguJ~~eYbeC7q7Km*X{MHmLb(a0)lll=j+Ki zcO%v_=3JvK?-@<2!YxT*pd%nb#KRCsT9Pd`ROu(Uc0{QQ$$ zNZ%GPE0q$@_W94jAKUPoUXhX_E)|Cg@jTN?xyT~rcHkr0fh2fxdXjUR@~OOfU4Hdv zKHq38ysk9h=iADOnt*7K3B&5-OmNf!H?315Kx}%`;>eQ0h76`G^D8sCvbp#@)M~Y} z^8WqmDCh8CIgi_&%KM~z5Q}kxN!*HzYt=d}Z3G8LK)??{{|D{K#rY3ng$;X|#g zWDhmHD6CztAz`_8#VIRC7KIkF`!ZcE2I6|#ojY1hAtYi^>%oxa-3k!@hMD<9i3Kj= z8&Sf?+@kdT(h^jA9<&5p8l$!te>Xfk7w-TOt?Ifh$%@@Q3-8g8!H$(DSQ~{@*66s6 zYYkwW2m5INR)Z75kLv0t=$merp0^oVWS^nt#7H+zW`W{Z&0s<$8QGoMEcvEK&t^-0 zUu(}$#EMYy9!DEx3K(!Bcd#CPuJ8^)DO@+q7J!(YVu{w&csO!qiZzXt)vO35qk?v+ zAkK_Qz%M%$l}Jr0eqpK>cC82__d?Af6V~H_60mG>e4@@&Eee_(QTxkoBB8?-o`X;mVTn_%EvO&6@ap+xkvGPF$u8s%y|lUJD2C7*QXKu z3x=g+2jnv?GPQvWR3C6q7f-iQ5Viqxlkq5BJVu?%aUA4yLD<9Xpcg=EnQ&uc9DV;n z41>SM-C-c&LD1^@J%OA7DnAV9ge~WIgkfV-n{aumkczX!kiu!Xu%`@5>d9FVW}a}b z5al%N2#b@b$tkUkWfw8+5h;eXNUZ@v2oAO8p&mH2;%%23Mt@=XpxX6{z~2-v11laW zTjO5rL~@H#V8!ziyVs7v8mtcoQ|KBK*ua#qhSlWa_Ss(!RJ*+#quOo0oC4{7z~ZvL z2JMb14IfTn{KnAl6|8>y<) zIwz*zgtc$4+{~4YWz&lrm$h8lu$Xy0)7{prUdit;^EY8#r%dhBCzO|lAY<|Be)-{# zi}%Zq?ibOd6B;}uD`0R`h0zG;RP3L7^RJbwZ@mtj;O`$!nE&sW9zCjlZ`pr*i1Yuj zdT0Ofll;tmBPwt}RN_;>;m^iF90OYKjq&~hXqypvsT~aP`V)=}%12{|qBRWsQJ{%J zItQNFCM5CjSR0QbTw7wf{OBP;B{R>}@=}=&BX)==j>ji-9m+J%OgXbGOD>ZI$@`1h zRwlFTOR{Cj<;B_UOI~hXk`GnBGpbh-aSI5E(Z{eexoBIM#A#r|)1o_qrp$>_#*dhr z!=&v8@hLR9JAw;n_iw7PER!acf)+gq*k1Azz!G6VZw#R*ECO%i#oE^T+CjZmT~%~} ziV|}))!V-w2v}O0orruV$5dh*j6RUw?R?N5qe*Ik=Fonww1c;m5G@uj|NL!LiQPOQ zOfLA?d9k&%zP(YyggLP{3`!5m%Z8Ex>gE5S!|h0%MZ-4SynEfz2yFqwSmxhVzsuzx z=RPcov(s+tlpyc~j)w9zC0fiLqa)3-5F6dtCqK$K%NNO5R!RR^a;$;xQ;f)3hPM@o3gU1Oue1~xr?I}Z_PbFR+u{f|agncViACBqH82E?>>lZlOKwv@w|%Gswnf!bWEzI5^duSb@Kd#(a69 z(I_SXTSa&)3x9Z0%U4(B7t7lqPGbiAS;)y|E)>0cswT+f)+O_-diN0KQ8Hh7fivi) z9j&Tb`I(>$PXdoJtRa_I5;0_wpWA!sRrm!DMEj?K>lgfZ_0H38mNkDp8C4jgf z5Fz3LO}3DSi01v?nST)rq9x2WgtN&try`H1Z)%8o^(u1n(CI_weqnh4x?IIyg;nvC zqVo8ARdDlGQYRa|`eh#t2ExjfAiRGsrzp{^!fJtBOw(xql*-cs`2BkzLb!4ZvV^}V zM*cBT5MgizxAhQWV0{gSK@0Bjy$fbu!_gTG;RO7Dcf?$XZV1a`)5%h0;$^x_AbA_zY;M zA)fGCQS?UH(MHn>h<(liA_$->B8-d!1gLJ{1CbjI%N}pdCEl2H9=~cdDz6tr5!YY; z0tT{EgLK+e3ltgqZ-XW_Di6O_1%=$n^YEmAoKjXtsa`t*2$b1^;n|`(EMaZRO}+LxbT!bhKakt z(WHqUjzctdegsPzEuyXMY!mz*L;KetaE zJp;HAREcj`d}~VS5(eB+{0U6`Vf4m^g2j!D8i~_)bomaYFUJ@^U z-`qi(r3h>I32Ze9!zu)WMpVagd5KABk<$X_9PXs?fLvbDt?okP4uX?D>}k3mh>kz* zjdG$dUCPtOhnfoW^99%&urCoC03JOd+6n3-6w*c_>S1}gTzybhiy69$X+-AKcYYK2 zZ+mTXi+;%!F6h*xIXZ9T-{rX&p<$d@95p0{kQ5U0gdl{>Q#3;KOf2hU#=I*YE$_X9 zYORom^J?#4xmNg@0uO41?;rp@+u1r0;TRB4*uv}W)_QYoYpb@dXlnlUw$^r@y;yrz zw-RIwxkDf$@nui8mp`Ypku!Mq@>UP-@!5ZBg&)(stZD9LwVZX|YUCQ}8&~6VT)7%} zL&D}fZrtLv%Yh14FBzQa_SML|el>34{*~jYkwV{gEe9W+5Wb-Up_oS%zj<1$jeNDZ z3a2!u$Kw`S^Wh9mktLI<<9B<=VMu4HnoJPmLQBKJEDkIiiifR35kYM!(KdQwty8P= zfxepu!3OPey3J(dWI|z;hk%e+AiF!$8er#3HNx)jw1(LE+{WNdNJu5?bcGjMH45!N z;X-G-Imk=K$MY_1(ys!VEnIJVJ0BE z8CHceP_MW(kX);Ed@SY?vv|2l6i}R|MD)$bUfqZlPhX&QBLH*X6BWESqi!I#58nu3 z8k}E5E-@eIA?n0QokTpnW0)vC(=|G_tv$AF+qP}&v2EM7ZQHhO+cSHT#x1S~oEUoA_> z6yQ!Tkz2j$HLEx&Ax^fP?fX`xNb6G6fY#GrHTOX$K=gJ+908Ls046Lsh~^$-<)2F0 zvc709RsqkNYB7mrTL+kEuO6vO6O;AMKNaa$+`RH{JJA9iwGAEeN8Yz9l8KO>hLGwbD@y83`Ti;POVnXwpaDq;0rL z5n1JJ=JKGgpK(N0o!6zTEI>i07nw_-g)8g_ybwxQeDI^e+^0mf&g0r2Y9qM)G=e)T z=~f$*jERv?eob(kqDXAxFWF2PR(C90hlo!`bd*PE?-`#JK9E!~fjs7U^?m_poE`NiVO40<+3xcwvk8D#4NdYmfu2kw%neWRC-7yF+ooOg;n+e&xw&w%N?{2Ehsx61*2j?DqXgecjLK zmXc6>eCO3vcNL-f2CO6+D0V>>G9d{TuwK;#E>YC;!~mCdLsy4R;$2K76fp{pR$OXj zUUq7AeomKmeqKIj@GEUjZ+8o&{RC!^*>%d|l0~vf(4UVn!JCuk#0;XnFJ_X?9XTX8 zoL=~9ZN4g|D`0^P3P@9BS6{6kpRCuPa+OFuE^kyE&?~F)xj7x?-DaqaEjo|7S?Onb zTMN@|gH_UiL%g8lxX*NzF_0%<)ZwTsQr9(jKS-w5df*9nI4t68(j;aA**j0=+k?+` zS;D=8fJ{KfF!6r@*C!1QXBA#L#1&5Rd^|o>br^rrPr4@5IdK8Vq(rV+aF3*!N=c25 zlM*ckt02SJpAqFO;b?o(0cQj-5tE{KPo&jYR{nlmO;48)1(P#qr9#X`NE0IoE8?99 z#9G4afSYgcQ4mz}QwoQR%GcM8)C7%(Y(OFA$54;i0EGu)f!dDgJw1#SOSEZvcqc_t z1xKXpqd_GjC%uQY2Y?ypt##L*QFNxa3+kTGmYEK7kioZt0vU5)x#aU82#3zoTos4w zA_k|D>kXhQMjI9j0(#J@XpEr?2kx>LK`B#46P{$`7-9K|#v@I#(v)C_2U?)I)42=2 z1zgKjwns}2TG9tSCc8>gj>vmA5A65e&4Pc41lJ?5^>ooYzF-2P(*a|u;fu95Q2wLD z8Bdq0)sUjaCjlnSZ4VwcwLdjC;j%gi{N9;ahjVaUw9iO%%2B3cG(=YcFxL!3gn`?$ z^YxpM46EPYU5We#pJEoQ_rA`UiuA95HYo zSq{5HA^Ve#jl0U@9tYHT_r-%SE+~^();)L>1+ieF*pX|qC@u*B9uyjzfhOQsZtVmk z{{Vz*$TS523e=Ok&5uUA_Dq%y_pHC6z)wk1E8d>E0gX{lfv~gg`41am97&t16$H*xhom3y%8esJBFDp0A&96{y2si5NP?gebiF{27NbRcr}51 zA|W529<`zp0^3Go^0Z-wjiq^BmH&zC5`wx9Ail};Oc0}DJis(;84oP8B*mMQa;ze9 z2t9Y?MxKrC?(Xh3dTrVuS&K_9ORjBKjx+?vV*6x}%MFAExl`3qYD`mB4E|{t0Ta-G zyEyZX4fA(tZQ)=%5x5pa#(8U|?k9AN)vSTCqzr3BvB$8d2 zXfH)ZopNsqWPS_)6Qbuo9`E;Tr#{_mOX==&IP)Cn+}jHV>`^0=(D%A%Q}Z#(p}Vyc z5)_InK+>(=CO4o_$S^Z>Ec?53z4#ZE8QUQ$T*%0ZC0U9kXAqCatyxg(E>_?h8f-t9=wC=4zG>)N|RgbZh(U9<*XzOfQ2t)4yV)CDre9v0Kfy!kesl4T@qS7hCCl7P6S{!KOl@4{I*7`A%R9V&`Uk~!f9?u_ z4qgZB_59Zy`2P|Z3VfvCP}csfr;(dLBP(vC-PE8dN3ilYr-(_IMetpe&}vzTVkUQ; zAtywJz-{U>AOoN*yWGrjTNs?KLj@_&Mcb^l`9Udh?)fU^O5$>7rJ|I`=wGRBB};Y?Nbw0;=W{A^KHSnAp%ot!m# zcO4Gjp08J676%@ZQ6AG$(lsB@DMlQK5tQjOEUw=wV@;{1w4pB+g`hbQ=3kEbnbhW+ z1=_TIyV3pw$H-8MvX!9ia{2c67FyO`O+f7G8rQl9MX?EpY^K6{uWk?f^e+(<^xwQj zULXdy{k9Ms8V@2+vX9E*)yN`4p(%cTA>%}1!esrtgAWG(^wP*c;i8$#6odO2!*IA822L{ zGzAz{G!9eAG(?iIq1kw05jDli2hI7%b9el1A37=8C~%^|8w9I>Z2XDdP!?5xF2#+; zQ$=qe-!8>Ad1Y(l83p!U^CqY?aAz`2Xc5qozqNH89(J-mhM9l=D!dNzjrf~2g zl0y+-D*@CQKbTMaNkj#PsEm^P63sO*% zZ^Omz3LM^@K0d%l1S!YiifOGV88d6tq>5F;0wPnoD7c@IKJugNmdGS|LrHG8xL>@_ zqIFrDSdVQ$i%_&+S9c^O=PEd?fjr3_V{A`B=H-cRW;Y8c=-D};g(sSWETYzqNiP?y z!4ALl60`!M6Kg+HL!#kSl%0q46ESWp?lC5UQF4H0 z9DR&raA;#_@~OoV1d`Q*&JHD)=A(Pm?6?EbQ-ce2b6CVVj6=y*iJKX~e=4ou*YN8W z75hsSaeSmr)s&+;sZN70u*o&MPoRpEW`3oi_G)9mkxT{8_;=+99vYUMxL4ydZPLN+ zP3T(M$xzG$VzRYnjLn4)Y^XXQRWUms*IzJK!LA(BGDBT#xm5A0EGO^fpsG=vGQ*=) zCXu#PNT%mE$qVap9(a)VR(O5=Z6UhSs+B2iT^<8cNx6T3*!zB%q^mu6T!kJ^US19+ zgnhc8>@Tu~wB)G-StuY3(7R?$%jmjk6GMVIF#`D@(Avez+e+BQ6(JaUBPRz7!07&W z;4hJMYrbX;3)YMSkFCgw{-g1?eEH|9{prG2_3)?bqtQZ%vi4{3YgU55bvcE6K6Nqc zjd?!f_8$|5bmbxw-7f3D&iZyb8!14U`)|2j+VI>du!1qMx+C#NZ6yUl;!4-W-v7a@m_1t_2Rl-x10dw%d!2C;NWesLJcS&8~6ja3X0 z6KAw4%ko?qtI6>!QXPB2BCi)BM)Kd6VbxG=@X+@RN%&;K1qrQT3!+l78)iI%F=cpeC*3C`!|ds_B+bCxE+EE@J51 zG5iWJUn|%sBb+@xR-j&Sn&Ka~)u^&r<`iu;s1?V>f&+i(0}*5+UZ}OHjKfl)Td4N1 z@PV#)u{DeC%Z~LCC?T4?A~-kXyp8=2kyOB!E%sU!Wa6Ze@^g4<8q-V_GT>sD_f{QX z?PS}dzaV7-g^w3=2`YdwhaQ`E^YDCenBjlcD)E?e6KQ)UJQ3CAwlt;R?+Z30P=zS; zJ5Xv`obm$)qn}Hdcg~YAn)7>;cEf`hVL+yO7^T}uvjex9jeNQisE;+IHuBaZP_z8c|x(} zX*Fc9C1uF(49$=y+CcSH1TmHH#jH?Ua6cBG!^!6DSl0`Iv@%|*Ae#Q#Z24DiUv;yW zv9+%Jg=e+_~`l#^_Zys@>zV|I0M zy|BeZ&YtM%ZcUFzH&W7o&l%hpADg;YefimRY20TcNfdH_z>ST$*>`L5a`tS=zjd~H zlELX$*GqL7Y%aC7%Le2R`x8v|UFoU8I+Pd>qpd~qsx5T%MInV1cskDq@}cQ z-#I!t+IIJnceq7+Ub?afJFL8E&gEmhlv~ri#wlF&4RZ4p>Wd+^H6g=h46K~%^j5uH z>}l2eQ_)391^=vD63_b*XX_rcVtd2$6gtj6tW?85;it)-^Tc0|eX`}SUHzplHG7SWV*<^trzYEm*Mdjy6YJsap4;;sjGXpy0?Z3U1CQs{vr4qwkx2g;?|iUh=oM zdf+P#WmUixuqd(4g@&m3p+a70&i#nMrgzyKx~A7vzu zrR;u;lTaWK9I_eht-7kf7?`v%8V+ZtK86k?HjA*OqwvIH4*OT(!3&P$wp4gc0KOm* z!Q`78vV4sl0N_h}U3qj0Qn^F$1AA31DoZ>nUPfPso_x*p5q2t{SVuw;OE^U@s#e(` zCB4q+7CVA=)Nuy7kd#Tm5+i6Nx*abkTcY~2s?BXnVz~XY5REQp&=;4k0qbjcV{O4~ zI?$gh#xRV1m6;lt%dUayu_hu&01km2CtX#gt~ zQ4=C`6VNha$P{tj(Y85kj(65Wc%d{9(P;s%<p*U$jYI>mQ}p1c?qNs>1C06A{5h*?24O4*US^K=$)P{)yy zjeZJHcZ|{mf{fepx3gS&48lKOYKLsRH10)rnb{mm9rz63#$r!2t)x75G3U)q%0U`K zVqn{eancwXTl7ciV;9!dVC9??oAYCt9@FmkDa&LEDwWtVz^0>^pt3505d@ad!=5;& z8)Ne8;vnA_vm2MeSkRqNViy9vDsZ_fnB8(%{w&>e3V_hgUIX#!z3h;qE!E`FlkW;5 zkykz+WCkdSF=`-VQ??yg&&F($#3DtAbI3%SLFHZc!=7WQFc)!9W4$6~!X^|K1t1Dq zb$jc0s%s$5Wi5SiChc9gLffj`2ON#aih%EI^SnanWuk^r6)cFX0)G~!8T}YHM9nP1 zyTSSEC30ubyER)0tLh;CSL4^{btT7~iJAIDzwCf;ri}P z>0|LJzo@zavh=IuvXMtg%u2AamLkJAgcU&w-?na`A70_bw;4T_?% zyx{)sBgj7k?vCZgQszkghOrWSZANi{l|Bc3j17hFwJ z^K+47nr6kHqYfO{j-ZN*fhsIS*5;7CZ3_H)O-;n3gR7tPSr*)q{at!$nCDZYw)h^B z=FoY0aqi3rPLGdBCM}d~^U0ec_$+_Sw$tDemuMl*vaX5}}k`+Wdk_B)U z0X^6i_-jkD#!2*nLg)M+qE1@v+8x#0eGXC;u~b*$8X(DmmbJGpN9bK-$I{+WH0xvP zYUZoiwST&rrdWi1$rj(Was@ai-W&LS-xuex3jaUn*7R-C9V^w!BeaHgK;eyGemm~1$7Mn8$3rOmz&3@rqiH4u|C z=9vJD+=7>!3%m)%@;KBu-|c53x7Z321cK91*o(LfW+i5$8Db*8$f5z#G9gO*A5u*U zf-zD_(!l`O?f)}EQ|i&Fh>Mt@*wd(uOxbv$^w$^8Cf+tdoW()6e6&#QS7}&h1Pds} z!-iTvEuM3Yu8gUy11#gJpc+TQ(}WtN8Lj`rcHGucVcG12on}oxo9=V)x4#zL=)WQ> z=`u-tm`en$@r>B53J(=%)BrD`sH!sY>5N*0>#-HmC;w|N{MZh8DPNgS8MK2g;-@{% zT#%9Q{@O>=D(ZjDMTb?88Y!a;#zva>{IZOFUPnjyMb!}rU3)Iv?sf5f;m@21S(Dzq zoy>apuTL{2an5`@{ZfD`ivg(vaK+r5^--`RAJhJv-1EfQno;Xu^Y?bv{W7dXEJLGL zxjy6nuzA39)i&0^MHm;=t;^f^ADP8LmBEeQohAq2ln734nKwwWVkNWvMYf&5R-zfB zBp(m~1%4+LetjW_ozY>Wy(>w8dQwGK$3de zOm;cnUW=aj=%881H2%q*$%v2;O^bIVen=FH8-9d~lTav6Q!XUvu_(l$-&6->X+}bhfQFfIqOxR6$to-xKq+Dptutad>2y_V!LK92^!mgGvmp~R znc=o|ddm3W?3>#v?1k#rTAP$H;?&n&{EV)d2@&ODWKo-znPrFW6CYY z!Vc)f%r1;Bz~%V#al-#ax_>fLoT&+oQ&6zbXx#sED7f<$$M&d}kq&QJanPe(;ZMU# z_xj~L;oqPu&PG0CJFAy=TWimzFSr0(?)Nq6yA;(hfig@I_>0;WSh=yuyhA<7yIko1 z;^;r;Pzo1jAVV=|{L9IIbptB}(Xle5ED86*G=OTip^-XBCYB9`iHN`M#RS^_Ti};I zE(-5#40*Pf$88-OGSHacQxQ#K%Mo(F1``u8;nLS_+=`x4Tk0xe)&q9et+YC!Q>*&_WUo9)+qUqeK_QQ zp~K)Lu;>5!D)Y3oMCNIQ@`Q#&BPmMUvn}UYEFYC^{oj!Al$M^ccsSxQpiKCG^}fe$ z`f6>f#{h_puN3`X#ZYCuYj;c@_h^n%-Wrr?aG7~<4CckAR)1g=O*BlTgMO=_zssbO zQm~x;6@`IJY}3N&3eZo7@&Yw9c`v$?LZL$Ty*z|Uwn(w1R+MFBmddM2b2a6dUqw|z zNqJvL4!u!DnOV4|qZ~Y;$(BR)&DFZvcf^*+p+dwjT=^Z}Qor+On;nT-i}K*g{XCb_ zRuLZZ7+5Bij2wi2nb;Jw;izTmKBFl0hgHCtQv;rP!SlJRNY84_A@SGLN(LMw-`|1j z7c_CWtaA0}EO+sSIBRW6Qq27*^wIwh_RKs?>BXQY^9ByxK(Z?ue3el(;lnZfu7FTm zN_(6wD$TcSUrt%gDJtCfYiS*^XNmi|XLM$Wul79k6f`MR` zDvK9lYwd^7biEG?mOvG40lb)r-gFh;}TR|;vqGNX-PFM6G+3VU_BRe zgU=S46~k%x%ehb5tW+f*${bZ;-d9DrbRRgNcn=#q&au+slS^fp7Ljxp*6pa5?Xg5Z z)~?=&dR^bqVo50CD%AQ3fYp`A9wp@sEUQPUuUh{8@f3jH-}q?HW;K;!2UB&I=H<{t zH+d!vYklU?+LZkle?>Gb+R#K{z5Ej5dYhxT*GiiWsT)oe-%9MGzLg9p`K?z>QHWAh zv{HQ}!w8K4EoFjQ_=UeqxkyAdfW0)1n?ZGQ;}L~y{}VYsN9IUEcry<@-C#z*LrtwV zBsiScm||n_T0E836d~I}&!os=VMV|aJ&!-oA}u$weaO)1P&`_);a{9?5MAAv5j`XO z7?kU)F&`uM+(^~Vd_PQj1mLCHu%OMAD#qq`9XYI=a~)5aFKTvkc$q&nNlp=1EwDt3IBz$AXL(_Ia_eRm_*8XMwQFxhj8U4p@cOSI3v;kPv+=|+r~2)+-(?&*mT^_EFW_{Bx@PzGy!|r#cLXy4(%~~BJ!wQ z+m@CQa?SGPFi}$$_3r$myn#C5cn++92vdo^4S>xT4zTat)@7#4H8w7_YA~;#V6)A6 zl<-)EcoufT{T1LAC3en{<3@uMA&uEAuq)C@u^f_d)O^PVexE&9{JHPakRBid3@3D> zUOoaBCEH*rbvRCTEo{WiLZy6rt_`WQ_AISt!$hsv){@jY%A759a)D)K3Da_$>03hERso`a8n@5OS83iNE5oh0qL#XM5CfFz0{O?MjV4f^6P8qU>bBoF(O(a?0EXfGW|x*Ep2~N)pEHwJo(^PTF!wn~@{6k-JcCdu%a!gjycD#R3I3e5DP|eiyr5xK!S4 zW|NjNycXF`H!h$yk}?gftAy#Hj`ln<(tllqhlDZRs#q$g<3>8Gxf+h5-OrO$Cph## za!t`iA{eNfv`YTBE**;PVJbAZkFMJ*iNggV@##S&f7kpH-hh8~fE`5)F=khNINeE! zLkR_$POsEipnsQX-ug*MjnK~ZiPZ_VRHUJB7-n5tFG2Z<9$&i%%xM|!)H5`Qlpl#8qi4o5 zvC~3&jRmetbulT+QERT;N$s|%32GLDn-QfEAEX1ljv;Dk0uPtk()2|Gak+q7w}nt4 zo1QM!AqS$oFD#*5Z-9~_t4VV|(c&^pZw-xC;=x42o_G8to)C<@OrC)XOv{5t+eYyi zctZc`>I$FVhK1u(bT&}9ns8mlvhwdXtH9WIGGp&(O>)J~Lh^dS2@N=2E*a@D627#g z2{7R}PIz1bP9qdzlm0+^0F}SJ2#JrZLFwBp?}~W8Fg2F}Qs9bs%n3#04eMG0cryVcEuvWHbSq!bhCC zLx(eEf9EAhCSt5#P1NpfiPj=M*RK*TV&~6&X2Yn0K{m4U5|>}j$|{`R)hY`ZK9=%% zTG^0$!v(}qb;})rk|hC8kd$a|ML&5j8U=eTKGu3p?KPuxU+Nf2mBzgjvdTqB7b&p> zHy_;Qe1!hMb*pBxyc1^PrbVv1u zxm~nH`m2qhxXuo{%_cKei$B~)X3f^8|88S`BB;K4y1^Z;OAlp4J^wP}4{ysO{9xj* zmcY*2S3msiXW>OG-_Fm!hmxy7K%D2{nKoY-r|Cv~82!jbDme;lhYjJCP?jjpaP;7) zWQ>aG6(^C)&f5x=PI_!A)?f8%`DyQqKUVyRfR1s$&|AKfZxoFwF;wRh(U*+B>@%f&SKa2tgSgWzwFVh_jL^o#gOap8rGuN9u z{kIr9j)!9;_QSIH1{sFf_}NQ3#yHP}Hhp4^qVmWocO|4S z5r@)4P23g7r{=FbI@TU=xCnxA!ms!9(|9QH!B?9QS?730NFE*q)`9tiTHdIaY@kkB z2R4U<0@v#QmOFes>pTqszTpLo+5Cv zx(V5A8f9WCMT`BX_3BBWLOr@kEiQSICe0kmDz~i+GA<$7UM;#IN#91II4hJ2md?41 zxNsH<7tK5JVl81mV%!-i#s;z3snlXgc)WWkseyt61eJM%Es?UvwiuZz3bmb6goh&P z>FuqsNW2hR>usj2y+9-Abw-%oQ3Q(x)6tn5=8KJ6fmn9v&|$zkrP;Tm%A7hocku39 zTJfKuj^}kIg^u2|3ib`~4kU7K9hRR5zvIt_`<27{yik%6ENw^;7+bF{%A}MOUqtC3 zzzwQ`nNvvGu?0vHT{P4d>VWDxQGQ{$@JHuu0X`SM26$>Ank>k1cAs&-3eA=Cr;ha& zfJ%6iR1X@nC8CLJp}{K#15^UzkV!W_@_j?+0u#-7$xOfI*gr%^$29A#6HeQU2}XBK z3nzeiL6Lx!cXvnTE_2G1Fef3NcIJO>ijm|C8J24>SwD;xiMfUEo@aQUS3-7uiZgx_ zB8RHbw9RD$N!tPM1+O!W+@`A!w@z(AJAe4pa~chJns=5GcHf1A%sg|Kr-g5x-osdQ zcJ844F!pnG=Q6lp$}N zJ!;~u#;R>zwK*Yv`alXMw#SaSQ`r?Lgqyy{zpu2G|`T-8KDKFI7`HkAt z*Dz_iT!4~jcsHoODA0PS*xQ3(yCyJ(@zyme8f*lgb~%{>CBjqUJ!epMCULVw|Dor_ zz*dZpdz8_UVm(mr0|43zicl0vgc^ZBKSZHeXSiJwFn;8yszfAK7v{o;*v2%fV!~&t z@7srX`)|xiNs3WMFV1V#4gcYJJVd<-SN{BIe9%>fDz_2S=<+Oys1OSzYcAQ=skf{_ zwFTOyGdHi&X@e?*O%8LKx&W6eR~G2PTJ5UYeB=#-R40x0xkKXtzF`Oph1vpIAz;tN zI6h;yb)iRuHaPrePaekU9O}yp_>W<7vYaec?#!R+TcTprjblc)M0?w8#LUr<2!ENe zXpHrBg)-B#)6fSwd3Ff5p>TsA=ygc*?3MPwR$Jg+R4+kHB$Jv{y$V?9hRt zv`YHAUQrNe*sF`C8mvXWrig+@d3(73Clr>& zWj`_I>0;SJrEpHdi<|J6dTD=!)1S55c z(=PZFBH5R5D?FKG^DB0+I6b?I=m_JID@C$_?w^A<+w7Ntc+++>2T*QOj^q7us^CJp z&dtG}1taT*7wJa4;JOwMB*^g7=jP~-o;MpCJ?oqQiHzWB6T+r{1g1fMU|R(d?+@++ z)khu_eWqMBL1g_+aB+<*)!!+?wxLnhC0da0b!3EBR91on9q2P>1>nP62no#7Vn(Vq z4{Lbd364f&3Oyz&5X;R`TzPm^b1y$KH^F!DNIhlt*k0>P2K3MJOzF*ZPPD96K_Na( zbyuitn3g|6!o2gxGxZ31nAS2c(^=-87V8{C-D6cabH^BZlYHdmTSikm!|TPtd z%%3csYwLbvI@==YMCQ4`z3U`vVxv^DG?8!mg#RHDwmUApZO@Eb-rAn_jV@Q2N0Krm ziFGm|EEsgXmQTQiQJ74x89$P3dQ-8YG)`B$hlm10UD9c}P@t^AM$r;-&UP*!an`qm z@bYjGitQAk4_eez0t!&^AV_`cQ9?TfBf*kJ3q()B&IZkvff)`vmLie?lV1Ov>fg4` zjvQR;JeX9?RpyNG#d4D{?NpBSJt=+NP1u#m!V9NvUEJ)N1#ZaH{OW+fhPTU(4~Qhs z;pxDd+h<+#Xw3^b)R86AhCjnOqz3%1Vms_!*yD0%{CdpHLj6ee+}y+xQ{!bEx1iE1 zTt691uEkHJm*g?Q(KS-+5zdENh*X9F{sV)gg?o}9WS`4HCb4kVh;V@S7?afoYPNl~ zP}PQ6gp(^aW6*pnyI@at{ze({A-iyYo8*L{%@TZiMp^HrnL|Wi-UORyNNd`F7O#cB z+2jUkX8)_VI=YT5{s(7rNequWf@P#9+c2>~cq=L^e#$|4S5Tegs z&ZU*O_FsmU?f^dUU^Q+A3}feO1y}0EOYxQ3a{pJuc00uxE$P`>~20oTMw4TMwZIaru3*%I)AEus3N8fK{E99x*94%U!&G^=iF{ zh;aizAPRpz|18Cb2HrlEsA5S<=Uz9t0WMs<`G4FM?}gI6aJ#ZjjS7Iz#kw;qA@%Qm zIn+aVj$M6k_JiVet}opX+Wp(K@wCK+$lL}x=@|4py z=jLt$>L!XlIaC4Dn&*KWPrD{(7cfOOBzxiO!nTQp|T1~GMnJ0gqlA-UwFT-+T=T&t?ap(a?_ zK}cZ2f?-64p(%@hWTE~c!U^w?29G*RFpV)4*f@>bc}ySZu_>rN{ccwH8}4F?(1Yu+ zu2}wO5uNK&P3W68TgzHxj)uLa-fVQ zW`sdbja^e%m_KcwjIk6j*jH8U6@x2g6JlB#{M5&H4jOIvv3nshwLtUQG=;t2c^(`H zxm&z`M_#K{V09$!ab9|@X%=f_5Pc>UO}!9aAPNwsRB8R0Nx8F z{(M1Xygedq3li&uE)@=fgL%2UMhm92FgUTlvNzn$2wJ71&jd7=V2u1yQV zWG0k@jv)@z%Dd^Cs=G=e-Gf3rLEeBHNI_-lmysNe{Dao(otc%kzC9Hi6F8mAO3P)8 zWQ_9s`sF(jGhG&Ud11$^}?Ib$9(fNkT)P>0n^+8!LM)fC#eQ&=DyApcIJ? zt91op8^~|MkT)eoq5t|~-j}d(RIYReb5PTr1*SgghpXd#^O$v${+$;&&@NY=9hAW}>*&pIHTKu`C0{7=&~R zIv>~oKt}rzA1Tk021IJk04@yJPT;1{8#m$soF^I^Tp-h!@s@{Rmxds-S+_V40&fNi z%6sa#9JP5qDvbyDyN=K%v_yULHQeV3Ly)ErRrP;ZVnn3!{(@1<_o65$?XKV+6LbX4 z;7e!=lk&Ei+-ceMh~}(zFve+4i=brt0OD^iZgWD5y8;r(IrnaUn)7R1h71?Lwc#wQ z3kp)0_9_vVv4;weurRL3(UVJ@ybFQ}vmzM~sIi8af@worBq8+<{ z+Q@!yu-#Z1P${jWE>M?wUjhR7V!?_5epi0k2?7NY-fe`SX?z{w56Iuw)o%Ct6IZ3! z!stQQ&gr+HQ#P;pcRNmCi1VM~3^04-yWk6js6^#MmbGsdfI_G}S=SsVu&(AGCFb@l zw1vx{f}Mq0eK+ffgJH21ivSFuC4=G2T1N^eM$)Q9rHMj+K1|Gtc2KJwVOxPV?4ulA zHI#!iq!URwOj+{dxl8#TsUELqV;Wpng~fgT-W`)FZAS}nlKyLgLSbiz@C(Xr8N%Em zr%$|?a~(Ja+m1p(PlYBAf4ms*x;6z`KUcH!e`!rigk40jao751b}il{I)R6ffokU7 z5CF7%iL^kF?V}4yM+p9R*||(W6(PxMvGokyJopg1&Qfhcf-P5=ObSQQYR>Ga|jc#<>u6OST0G`NgnDt&f1meNq z99bfH=h~}XA(Y|4#HV!=HRy53d zu|TKsPdnU`QXh__5rA7l?WgWl08dbLBtIFkp`cWiU9k3F2TA{`YTxv-cJxwltyT?) zh5K{h0MW`ldvT}?hHoAxXgf8Wzo42T_LqBu%UxG{%g+^?c*$M=P2~5phk`v1g!VLr zVBj$^RToijBJ7l)r{LPNLEz?-VGmi9c8Avx7FH{=YHEyMv5q74Pa!G0B;2Luwz?2r* zowoAoN;LFom|na-MaYVHVb2iDCbOmNF3TnQlTg)50ZjH&ULP;u3H`p&?tC$y12 zrHKp8%zTrVOdL0I@j0aY+w*BQBC_+4CXXsA5>`Ak9@dLo~7eek;OajG?OHd`XewXPjq;tywEHKh^nTk?5X0rrR z^@jDHdyL8ZB@RGIxKi_J7@K8->0ylhdpNm+$=wu20FPGLyQo|O)O(RKWT!Zi2+3t5 z_p~ORU}Pt=vuOC!po)a^-M`XFt2xV=W9tEst$dCdhfG{)LilXj;*2MpeLj`9c-A*J z>V3vj&VGMzZh(Kc(jJ6e`guGYtowM|x&F)rlpNLo-wppu0fp*uU~ROa+QSCrks%wG zp0Yg<&l59Ti6_jbNUw$>FoNMwHPNLawP(arLj}VwIGFKf}?ZA)iuYK*9zAfcV)W&c%c5#ZDdjMhI>Cg?ZB4WZXa zVuuD)bzp8eI6eImMPd0IG;MUWgqfezaaah~VRk+L%BY7%d~>g_!}IuV-iHSG5oxtY zL;4|y>a;E&jv$*e>`EG$^7U5Jq(`V^Sr*9{8vQc3OMav?2qqe{9Fmc@<(F7HgeO1z zzm6qEAtka4&2!m4uOv0cNO~9OrRn+)?;24jZ+we>)nc%Usxws8F2(1K+>@@FNCKsR z$a>@K0{pr0zIgeL&!U0Rxa3LrqZ!C%P;u2!x+?bQ)RjFiJ)wCI6-E(@$7%BUdFL?WUH z88ttgrLXLG=F~*gJse?YHi9P+6gin{^D0#>udO|_jo(?!RBrz1V?1|Woo93}=`J)_ z32L`o#UK#J3qgxV6a8)@R7)5_rOGR70xFL#G}tAjg|>!C8;U*i=Q4Y;92P2KAWK6^ z{2H@aI_8PlAWlETUSpn3D1tvq9I%f58f*az)FgZBTzv}txnim^%naW>+)$mZiEgQXT>lv1HR z)YJ;5JagRm=(Cd$t0c2fuZ!H7^z25`CN`;q>3?%V5#gZ;mFCP83459uKmOL5PHdMB z0NII>TklAo8q?QDL;1|ip@2NbT2VlotBzZlAc2UCHmX&&c!~dN&6K6p?A8_@=XyD zIsmaHsrx}ww@+SbpE*KdvzP4oshQgaC+3g=2c<kz1Aah(ztQX_nsbz~RHm+vBmikCEwO9>d0p!XiRwqzI}$rh+S!)m ztx7@qaiIkNA-(8n(oc4)8v;2B^dphxc>2STM3 zc-KF7TI9v2-Oh-i0OUnH|mM;Dm;+StNkvLGC5^V;$mS&O?s48Bpx zm+wkbhuo1@+mohv=6*z4;s#q1p-9hDvTl z3V+(2NL5bVh%$%ShpLF$hVuJ%ccC;gG21>&XP&+jIsa|!MTRtaH*&hOM4as?(=XqU zc173Pk~V;ku_+n;gl%aOQyWvx-F3I7f}F8A-LdDdSvRQ6&09|no!q4I>(g#iS?Sb` zs`HqwYKp1Ns$-^YS5_RRr6T2D{Hz)7N$k=D+CHSmHoY#sji*Pbj44DgYT<2-1hzP_ zdaAmAqaKb?QgF8pQJ`{{N-MWgl&?SIamW4~4gD}iK5G-(g`1Bbxu8fM|HX^v@E-$q zA=dKm?8-lsJP*>E{-E2Gg6CSHTCP?Lo-~03SI3hm(pPK+j&qMs$y#GD#NZ_uK^kSzg3{eeH~qVlkki?84t zt&8|KL3$3dS&M?+>&5uUPDWw2akzWHDOO&zH@m<2QY3S)VO%1r-#zgW#JZ_SKCkCb zxF1B(=(rcP-o#`Dp%)DX7sxtm#S-KLp`Q}ULdS7ifoVrUOb^PU;Ts{Ji@h!1b>MI zl}W`i&uHR%ZSEW#u5E28rdR6x20*8Z*>?T=_h)!a3TwI5ipC+ol^g^S&dC|f0_to7 zLhJTO8>i<3-X}DU^@%`Quqb<4iylvR<0`0J0c2jT;P1+-*81xTs&gPVq%Dsh1~cd{ zo#aNFJ*I$2y^ttgroVFBlWHTo2b?VDPA5|{)ZRng5@|zTj>qo7(<<=>57u|Z?#7|G zUtVJM3Ybb5^yn;<=MIjTS~;@thGt?JtiqcGz)CS;cVzHXJc1(x5$_NXmV|Lkc0^Eh ztZeJQ<95OUK2cjJZbcYWz$~j{vIN>NAf7jrDzH|oak+pAK}fipO5P(oLKeR{){J7T zId8$ERfHMHI@tLonYF1Q6ocB&8L2}xjS=8jqz7{3@UtBc`tiwS%}6lyOf|Hw0n4k| zfz2^uY#6%77wR`kGZk;q?i{|-rSdBzwC&B>-oY7~jJZ*JCAusz7mKX9sd=V@ny52a z8_)M-ZKaZCnYxW;iF(?IKA!se1D`S*^ZMPPe5Z&A81wz?6tfbs+|LdePVB!yjFg7h zy>u~^T2q>sI~ts|k196YoF01NDCBDZS^n-3)|6uEZeqchEwApsnM!q&vd@It)G;7I z%spEYBMr@zOC+XM7aY5UCl{vNn@E?y znKHsVTQnRkYFa18C-wXYU&$UNQe{0iUqaxE7a}@Ysd|#>D>;i6tt{$CqC|+`JwRbv z8f&6>#lUa9A*(jVB?8xo1A*lD6O2u2#sulTjbw){`!Ku)e>! zcjy%5s7p(3tR1d7J$O*K4wp!oCuDZJE%HZKNPhBlW~RmZiDe!7bWZ$t zOs797jCgSm4RKUGM$+9rntkgJY*bAurkYA0hq2!YESVxrILljcJ>x|B$8ppfj{>>~ zVGC_>C9P_K&Ugk?^|o^#i0?BiEQ~ zyzA46H(DN^#1>Ip=oDemuo!s@KHZ361|KPINjQQY1KT~ImZgDC* zVQ?-|u+6G?b7qE$WitGYV^A?f78P?L}uMuWJ?)UzBHI_XVF|~;G(i9)QcQq<3b^N z_<(zyt}sv^*;v#7bR|Oabn)Jk(0o$$6EQAVsRqCeLQipUGPTfBg5+zo$bf_PCkoe= zb}%HRJH)7JTAkg>lZycTVXQQFaR+I20`dd-geYd^%n@dk*%E{(1S9a#+H1gjjicW3M2Db!lM>X)A%p+;^L|@rSt^J5Ny@f?(tpvAA z2T5F*HTiyyXxmyLuD}_Te_i?Z+sesmTv=rnalTS1coSM|YoShJJWGw#ysjvB zfScEn$4EQ8`Zz_39S>@yPj=LDvjeiiW7|BNI*04Q_U6IC=FT&DC%{K~_N_oNh%`*) z-cPV!zkBkel|X7}v6%492UPzu#VXP}FDilA{F;n%8KqnVAPN@GeiTI{jxm0VFE#sd z1vsdJis;RrW^-S;{w&+-h-ShH_KX>&!Gb%}*=4hf=B&xan+qo(|D+rG#9k)_!I+68 z#H(;lC~tU2^j$!5Jk+u!fPxdtI=H$b$_R(b^XQCR8mOXdc}JdCWVIM0(C;Z~JxbO- zQf|X2j!Ud=%7%Na11DbOBdQZg{X?u<39?S3P!j1!_U?SdI#AZ zDRG0Zg;k{e$flBpCx9=JTx*Z}{R@!|X%!hRSR2YduW58$Lb%NIy396Rn2-ISH|2Xb z7U+> z#jdjlG<(HT?irU?CmM%sF}=y^g|gAURu3vBvhtWw#Ldhn8}3b8HdkjZ$yJp&R&*>a z{;R&CRlvh237BJ)hivL;QuW;Fth*sba_YEwmtmx9BcLxA-&liiJ6Oj8n2CGrQFn9S z;h`JjA!l&EFy!NGx9m21m#?JjX?5u(mZ8*TrF~q~lzd=pYPQwwgpkv>lCy;$((Hjt zbFFB?3Ll@++kOu&+$+V|(TgVYnTVwdh}>lU2A^Zf**ED$;i~FoDaa-64c7sP7YMgY z=)XaWs?6xvVswN0ISY9k+ACfhDa1v$r%B#U|r-0OMtR)KcX#-na8E{X!{sJMA9 zOGk#FN`FwUE)=z+lH>}iO>3|&C8f~Pe6h4%tR>Dy`aJod(957L$`vyVFzUlTHra~2 z>XK*15APa-A#?%Up&w7&EbIL$#Ldna8LS9%lDmP-S>dbXxnae#%$a7RIjET=DTI>= z%p^j+th{6HM`>hLAf#acULQpRen0|I#fpMrJpOq}O)3BEjp*aA5P!ix##qyt6%xR; z*b}ns7kNQg`~BD3xflST%nJd;*Esd=0&wFC01u{Gi(d@wkIl-v&GaTOj;rGqT0IH8 zBI7J9ZkX`AI;&UaV<$)ce1QY zrZ8C`UE_BF@u>&L+KPAYF?F)=`0?Za{O3P4IZ8GDEGR3QbL1S1;E~%rRFnbC-bDkM zIgEZ)eZ3;;=(GS$<(JDj?PZLe2tbqYY#%G#=Obr^Z0!K5#PTSlv|VHae_4=ilkyE^ z<7~=U;~mNtiOff{ zNOMm10cmSYBnj^f=&K~OYvf$ZIe?T{_k&2kq+SOfbe`;j!oy0Hrgd>bLUQK@zy8NF zqFH`-?(p+6uek&kT@7#@wtA>R)e5@!S;4-}m>kmjw`?%#Ca^7Gq_Tk8g1hSILTP2m z`v%ZJ(zbA|Z~`P5K$eD-@qyV=!MlR3VxjF6TQ-U>FKtXYr%X6YTdT}uQW8%Q20~q6WHBf zYgZnzt1Vi{vGPSo&o0Z_D%)US*=2kAa;j;zmoM2i+sl_%w$6q{CzxkTL%WfIHuPWY zm}vXg^wcFgUcO8V4cOfN`ns(N4H;_~XOj^#sr4ioYTtb)TJY~=FcALwx`6+og`$OV ztzH=hK|F=HU*X?A{5$WhH;4F}xC)Yqc?s*yYQ{}DaT%b#NX8t@&h0Sfob}*VP~+4) z#@vk=b3B+6R_YhSr=uP3w%VJ#R<6#cqs5dsNM+nd=hKNqHw>pv4oXsUD77y}O~!~X zU)~~)`xlEA^qAhJ=Fuc|aUjRB7WiDRMxH3%?_QMo9+6{X)q=`Z;$(aSqQKAlIy z3&kpt_2vy2s9B2abMy0g&Ye?FJ97NA*v36%pkA|!1Nf5S3Mb7Z(cmj7hEC6+ z7-`y|<*nR}TrROURSIeCFv6LaPkN^G6J5RP8ycJ)eCXJ|mlOUURYq%rkz}p94cO-_VZQkWI4JMLR%SOl<)I2Bd?E^ z=jQ>(;HJZM!_SPND({2h5&Mo$NIE8X*P6&g9=W_NN0NS-?0Mnua{J(qZ3rS+cZ?DE z#P4u;VVS`rPkHK?*4Dt_{!i51B8nA;!3nZJ86sOs;72;>dK*To-StmGHk3dwWYrp! z?#|IzI-&q-6?se_cX3?;Bv?MoTDXpdEXCJE#N?nBd6>=hZ;>^lEH?OMY9QsY10zI21 z??R5aM8iIVQutDN-OH;=n9S4WAZ^EF3z~5Q2(NtJiaH$%joLB&ZPV{T`zmNZ?EK)&@f)J-_G>xeI(5=gvmyNU=85LMLJr?G>*xsJA ze;kT;I!7+NAEvSSM?r|zoo7zF3@9y^tM}y-_E_Fgn1+>HL;JPfQ5Y4-Cq10-x{|$XFu)z&G#YJCMvn#;5u6SgU$E3w}Lc@x5+Qm?|z39=}T$0Lc- zH!G&^U$9YzHjg4d%g1#|f;c=Rk4mt-?-}DL%Bdz54cLw?E<5s}IpVcDh`MNkleGlO zSh|kTNR<{X>;x^M276KT23j*5cSaX0SRQRv(57v_^}~^Wf_stbR#sM|6`JIP;dGe@ zgQ7F%2B(HN2liBWmTl$^$t?<5@8T@ao`idD_a2(Qim|a~&9wqdv7)XiwnNq|ISL9F zKZ84{cT#l(R;$9;-+%8p>Ld=KBdwK664314*gFUUs6Z;H^rDjjE$TjE>UIFzw-=XP zOF(!kT%SX5iNe%X6u2i}H5Hb!=q_nTX)1?sCKwL2cr7kWwu{x4l3JcTZ(6p7*UVOi$vlcL6?tf4!^``Ox~-tb;)pNmPvAFvk1L^B!TxC3A@dXI0hn2 z9N9M!_PVB{3S?p8XaEZtPN#yHKMT;na&!!|(_5UmA`n9@wAPdW12o!G%Lo&*v(_(8 z&br|unXEE3(9@DRwo(nacn}QXESm3@gECs4K@;Fe5IWF{$}~C>c=XN%;RBGPVZV@~ z$+B-+=1wJ9NnbjlCTt4o8nljNS6i2|tW#ZM_|I^>`5BIq5Hq7qpG>r|BCK599$W6i zZFQaVjTKmJ4pcT)y|yO@MI&W;axjVf>L|erQ;a9R7bQ*y>(t=%+u&u*b;DL~440~( z;!(RBl}~?8aJQarZq>y?2T)4~HZ4TvjAhpGD7PqbXSpIQF3JAt@Vw4Y-JamAnMFAkmG8>-<`Q1gS#wcpmJynrC6yq6FMV9tHt# zj#w;UAuoPO+&C(gP~r#_)OC(e(h=x-*ME_lnuNrtWTa0!D<|BE?Bn8-T#bNuLrcX3 zapt>`U1+-M3g$e&zR#^sDfLRslVz|SI+1nllgslTcM7R~eXh&jqzjTeBUim5t$v0( zQqgrzulGadl?ZM^<;J|c57{OnvhP6u1dicsS1KFZB=1zWbCoLR?b%7+Yi{5mk3mkj zR+&V?p1V)}Yh9l>5f1^mX!4wXb5h=z!@zgCHivO`s(vzz=?6km+x`nW8s@s45Ryf+ z30+!mHq|DCX1oc2Hu2^FNVhZP2tjA$EnL#rOGL8j=C2o^)kTA8`8fhoZI+ivI|&Fw zYt7nr&QXoU>F%T@oIU!re4?rwIfG0>pRnq)vN?+-*{H;W=w~lq+Jl9kQhSkfGu88u zTK=61(jwDNAWa4px{2Wde5m4+e}-6;X1Nw8O3a}8ivxCY6OZH6ZG>`0xVDKN_IT>u zI8TZx6FKF5BH1FouLwPyxf1KLeoCV4$~(^KfVGNDFLH9ShaCWGZKx*EA|8IHdxKZER*xax2yK&oI#)oz=h;#OfZz$LI z!?ye6TSWq;JzxfcM~}_PB#vVzhb$Jb&4{oCxJDG}I~xUhnBI=MBW+lQbVRyR6GSG; z=k#8A@A>Y|A1gZE0}vhw3bl`)4teb*_k$!HV6zq7x51+L--V$<0(zezd;JA%sz){- zRk-C9tKV_G3P}r%3or!@J)I3X&t!JT?ymeRV`Zm>u(}Ps^o)zXIk6L+QSZ@eT%ps*44B++;dZ@j`Y!D5=NgJiu|@%U%W=$e z(5@M-ah4dUsZ+#FqP1!6(zv6KbQhdRbXZ_5E*4x z9j_a?^C?-`2Z!aS0Xh%a5Q#D(R9-P;Bu>iOHlS74*aPyNuU8)dYaZ^usMqw@)3vRG zdaW>5P)DDY31i)SyaYUKU`stv#%}0?px2F22kz=f(6ry+vOLu@1@lcVH1QtP$FmFC zZ3e6x#HRhDi$QSnCy}V+XT~vS+?#e7OQ;F9Y(V`UFmfS(b7Xi44xe_UA4A4$011fY zU9C(k1-eL1j|T(=K;XC>M2v`(~=X<3WFR5)Ot(G*qgwH3^P4VSxSgl>w7DpQe*uPrg^r*BA5h$#Q#nC{X zrM6|OP;3X?dzyy%n8TH)BYMjsB;^qYUZxcbM zgAIr}!g7Ga>d^>~daGtI(MJL@vb-gl?y`Clwa)?3SN%{DZC(H=sZTIm+ovP zX>9U7iM#0Bc3cvsu-g$*R;7@5;ByO&`B&N&ksojWL6cy?uud?{ zX&gwbh{F>A4ySdnD5Sc$)_>{y7ZMP@w>TSiN3!2+o2@TBO+i_rH?S+waol33k#vng z)vQ1@N;yzOUx_g)x5Moy9vT0g==glydWnjKR)8>srEUCH^P5-F$Hy71ie+(MJP;4X zBg>CGlbLf4nK_C_g?zVAK1E_P6DS)YBu`pSwQzy#mQM>(sXAuEgjQ;`e9GF03dlhO za}Qnp0T9i*$~Yb_evlSF)zisQgl-}eHBg$iRW8YfcNXpXLGULLtRI{n>_v8xb?Y!I zp~a*lj`mThCDlGlQ9+wpi}B=@X7_S!KR)fpqc*8nF$`RJ?2(?GsPc_ZDqS6n!@XgY zP_2#v^~RZ%JS8A@*o;RW+|mpDmhj9hRf*tfIRoLzoXu zH@NC~z2Qmi)3U;qOA{V(%TN7S!%^xr7s~8l9H^P)A?(o;i!d6WoT@V#JrWySP80*a z?(vOwA{^D7aYzq#5Yt$g9!9y_AD~a`wss6F7wyCan_RNVg%Ji)=T}8(s{t@qvNJWk zpp|Q1c?P5*SI0`h(1*LrBww*?pxU6BGr2tUsr736yOh2-iBP z!%8L}s`PUL7Rc|#l;V%BQCyGQ6$-jmnr%)L-nCKLE&?uDr4kE3X^JKtrLK30RtDBSdsJ@`rC?)M!EX%!p=7zY}-8h(3?$1sjhL z+YBIe6QG^?K^*%hfd)mpAV!0nzG?(rt@z0(dGo}rAW3qw5#Rtr+{3TvO51yj)V=S^ zOLTU8U#`-I@A+4WZ5qRip(y zGZrFF3uN1_R2mdzT0^rk0&i!(r;!d>VZqtpUXI?xj?J%(D|%rt;B=kTP%> zb}9UZwB|`LYKG%}liifTX;dU-;7KBJ6b8;B59dLu6%eh^B?Pdhtr3usbsQQ5i|?#v z`9E#xs3Vtm>0AZWUkc-|zhbBkLfTE3NJ@z3~Vh z3^8(Qkldk9i}LTuGu?SYpJ*r{9cyK&8ZZ_`D4g0ETyz?}kY1!9aM|$zt3c3!KanAzw1-AB* zxIyc?+k2Z^_2$9h27Xqn^I-%FeGvuRZ66g0idsY0($AfyoV(7txmVWAU7x6!d#h@> z6TFYy9{qmOYTXL)4sgE6lXHFjmVA4xXP%Pl>orQdI}vYuBGOQ1Ezd9JRD>4hod$ao zm&i3ddaS1%daQq5ay+@_M>t1$a$__V$y^hse5oizb^TVIYh*+C#>=LqKbcVKBGz zl7ZbYIX>yMc>Z#V@~SlDEs>O8=y7mIQGP9GKa-;Tk`91>Btfa3%1*>RQ<2H_aK|00 zm<3e`588fRs8kEPa@qOjC2 zOU&w*fu!tL`%ZTfPv|Y53qHP9W3`YGdA%Q<(st&jw)mog6y))p8ooa1j&MD=UKTBE z2uY{i&dAyh`8l?_YLh1PCO|$e>2^8+JxJ1u)AS4(+rrQ2pr`5?$Mfh68?tD-7>E(Y z90R=>k_I$ulFL5v^DkMMs+`dmy*;CTtgoYz6RDw6NeOCIC~piMR9Q6E*$Is5WqNlf zr}SO*yMicu$Un|$Jo^tmN~WD6*YTgs{zGZO&p_6?K-DJr$9qHmvd*l#(`h<+J7JN2 z0jI}57%$;SIDNR>1@dH8YBr9#^JaX>FM?n%wMKa=8tKTc_xF*H#5f!$D(bH89p6z%-R@pmMLcSEbQiHnAfjr zh1o7J%M-=cZP;K=TVT$zzkHeX<(4tN{3V!PzLHVjR>JP`7h!ez3O1J~nD9;7TmAy9 zEnjO|KDVjlTQapg%Piga* zD#UDX`kr*%N9pUEL61}F}Y4G$*9+h&WNN8&?p>|nDHp;!BCLgZGZhh zJS=}-h($bXk*Sz+EL#_khU3;q9FLJXg)gelk)0%cGm80zVJpUmtVJfu+ng;q?4Vj&Ro14xjTlsIl-AT8=)P2E+%k{Uq{UUgq1j&xvj#5@O}B_~Do zA?sVzKo{|PQY2^`6=OATeKN`?-pUVvJRFJnw~GVxDk<<*a6)Z_G?*7mL9j)c9p`fX zZPCKvF}4rKe)r(1cu-zKTPy?rFSP!Me$(-i9ivSJw8Pv(Z}z3Nor6uZizU56v$->A z6-wD{LO$^*q6sJSGx2EL>9AEPWQi0S=g*%P#A$F&eM5szglEwd<%n(JNn=uEnhJou zMWP!`m=cp=jMx6rIP^zB?_!Z`SO$KZmk69XAzI!VktsT|730UEa9&^wRo1%dhV1%M zfn5Z+qizo_m!mdz-fy?r+8UYx2QX}+O;G=(p||z%up6N<9qiK5QsJ8#R0IQchW2~l z3=E_RxZyZhloo%eLBpYshQY&_aVi!wWW&i1q{-bgi}|+d{K4I)yGE zEVLH{<;b=pnIdD;L(m7_47e(zxbpz<0oVJA0g4*jAjL1DDpo|P3hNYC!RC`~xVk-} zwxY2?9EQwk=gI7%^`Ak{$6@UItzjevFrCmkY48zdhIln2w9dmzA#1`JrvPNM|MPTr zcmBMHGe&Jc@3kK3w!gwezlcr8fwmiuc?qA+(>`2;wL=E)&S?+x#;%HYm^i=CDRMTy zxcb2)FWbqoIOo9Vf!`gX*dwN*dZR81#w+s+#mmtN{Z^QT8ZbO*t$Gp^e7s#vP+ZDU zte8iuX^>%2ERlOy48K}>EzszllUy~)I4~0_Hp#9+EWy0~(qBD4Jn6md)d2Iu(P2gS zFps=18s5Fq8qjs6HD8oAn$P!MylfuqzSv){=OFTB3nQELmxpy&F1tGi&1Q~e5{hFl z4_hHGNl^vsN^PE^)Z?3Pc zKd(1`TicTCyUePHk$2AL$u-#XmT8A1pWaDAf`hF#K))F!RVOUZna zh{O@NUR$z;awaH>vwkB@9ehU z*=_&l*=?V;vG2^b-bLJG1R~X4~(~wyOr;nQgx_+kR)ZUCefi z_=r~9FTcIq)M)#(8YbCnzpMLK3W(2+ULYCvx`Av5_#*O(@c&BF!7K@^VQHb4S&H zg$^0@*5r9}i~5Hd^7EAPjbJHrG=N4f!UgvnejVel7XE6VOC|wJq&o6TG@E`5Yj{3K zZO>`6wjy#hKLwyC9Z}1}s=cbcn<}3f1Rxs=qu^ zIRJ2$E3OjS|E5z^lBlXGIDivi)S2%hfWgZYm7HsYBF-Cmk} z)?}S2t;^38uK&u1iglcPhC{@jarY$Q*z{4t@!4ld#`>95{-1H8SRKCPSV`pk*PSh% zk~Zb>nIWFCij_Nv&Vu2fwV?Lf^`AWWJrtg@c3`$XNan+btYXuV^(iXU$YMAq*TmEo z6Tu+uBeJg(lj!0iZjE}P)P3deCpO4JWpH!hW2Z-&uL* zGZeAKSv2kko7)>rwP?(Ty=vsJuSlD$3GB4GDovbpU)_K3@X_lZ9xc4bgZt5gh4+5H z4S$N>9bfG-z8cWi{8POuhb>j|D!evR_;YdHObfn%o0DnG9VhOSapJ5Ma63G?^pg4) zaOX^?XX4Yj+5c^~D&AnCyye&Fp$eFr*MOF1;FZ#Xi6@DCEkk` zEVNQv>eE(Y%3KfgFOm=4mK%2#R8@x;jcN!nl%3|{{ zuRp0Vnjyd1q$Z2ZFsV@FmM2w0ymV3@&N`_I;B8N;ggb3gtt_`Tsms@x)#YnU>++(z!`{)W2yZo_cc6o-WUB30Xy?>3ly?>3ly?^_2d;hYzZJl3XUhjXbdA&cwyxza< zdA-)eUTbDQ#?;O@#c4?3H}H(96Tsjud#QNrDof`530AneHrFub6mZ` zBrA{~VG}dtOLnozbc>UGox?X-YQ{4k9PmC)(?H(Cf&%g?Y5p!`Qz z1{TyykNf3Wmw^rRwpW1#d)gvMWOLR)O7A3%K5CE%Emzh#?!-=)YRYTw#bFAt6KKy)%CB@w>Mp%UGv^8^KTPS={+Y$12#r2 zXYJM0Ie5tk;+tqA!AhS0RwbE2tW2OONmlj9p%zs5cJFX#C&wpqO7TTJ3i{W)$|b?f zc%!?O_XBB|NWNJaGOu&rPo>~eh0K&fxkp^=QRV}IjOHXC1~`ubZtGe&!s#|`OaH~C?M z$~)tu2F{*((7?In7It(GGT$+z>PeJL-C+$HoA`8s*WsjRPutrsr#^XdY}?!>vLx-2 zkWHAAH9BRxpRk+S^^0Vm7NP{KX5oiqi&dxoa&26CYqeho)oPW2Pl9Tpw(Fx>D122^ z3q?LTs>MEjji?sdJqxPEY+oCyO)a}As8;5k71dJqOHi$?5p*?FYwgY}qgq;rlThu! zLH+ZfTGILYY^e6uYQH9`UH-JVcKJrgcKI6EcKLIl+vTqn-!9LHaF@SEjJtdll)HRI zoVz>=(p|nj)?K~{+FhQCcb7jK;$8l{nD>^dzb5Lv|7lV0{Trd)``19d_df^fz5lhM z-upA6-uqu8>b-vz)O-JmsQ3OXsQ3Q$QSbempx*maQSbfFhI;RRUetR_)n5Il~XiH{%UM`D@|j&%z(Z&VIFc zc~%}V=KL!0@|F0+GW(VA@=Uzq2RC3AKe!RMm{#Xhy!`n%#`Ko(^Wx>t$Thaq^*#Ys zhT5)=lA-Wb@i7$nu{p=?cY9=8XBp9e>QA;Yqej3YA52_DJ1Ym!L~P`fU)#7@a?CkeyN*ZDaM_N z@}+!VAQ$MqfORjYc-3~BjCZFZ-pdHy>8N)Ky$iT^I<=d`zPF2hKPR<&d)3faoL+&d)a)pzTW(8ZNIs8c(}j$~OA`1q zCpMz-F<|z;#u3yYj)Rszjsqd5J`93(j9oYmL~uUv!?tuwjtfZU>4w~3QL5)et^&)k z0?UVAaJQAU7HGBXc{6%b>-g9Lat+_28B%HJj-3!TLM;@KhVTf+oOl7P^6f>-F63cu zq1ohz{-o7f6fv)=?#Y-urJq1^T2!90_@V>I{Zx?YjFP4a&Z6O)xJ=phhCv5ne(H7m z-4P(LpRxBp`EeW!NB9LZ(QF+U>oY~hOK}gSVe>LZVVVAKwtGyh;_px z!XRSHROPbXU0xzP!OKHH6)hM{TsAR9qR9&g45RVMsf^&Vg;)&yAvA4~U?9@@-x!vO z$_w)VE9~GR9Qo%0j#6B#&}hIs0d@;WiWUMy@AZBhly8U1nCs%F9wk6J+8~G?8E=l( zpp~RY7?NOsI^Ml|d>UDepG=+%1{tJ@TIX}($uN2oge#)scY86;#9P3LZKxb=xOmWk zQ8ye1HVra~{J?(meEqt}%Lc*4KAeI$gfR@k4s3eD0Gj?pKlQj0f^Z}ka7-}pM1YOe z4n{!>pX!1#sqty%ikT)BQ z!Vx6QI}Nr*L^oXGxsWkx3Om;%E+Xeiv<$jS8%tFt@0;5F$T1OdQg6j|;?Cj}z3`^XZ3b%^=PIkN=BLqy(q2K{Sp zZ`U;hNbZU0NIHdm3^!iUiy#RL#k8|a)2s^22w@^Ok_p?REst^B>041Wa84-Lo_sptp$knXsMyH>s>O{98}-@F+IGFPwRv!e;A7^c zZ9YAy<>A+=pwQmh;q&G}eQ#}l?QnOWNrt%D?hbKpP--FzIe9)GoII!;cSEKf2#^II zqD&_lT26fXtpr>v>+2`&{fKB0nwt~+y{fL9G6$?nUTaLT$fepEk9@fFVnPtc$h28? zuc~S&YpMyAysR#70aaGY-pLksu*FtsYBvI4n|H}nSS8~9d!aG2SS2d0{76t}Qbl}Ap_kCLq`mEOW(2i+ITKYv?2Lf_{LP?nsYC)R8& zSvF(At*owkx-7(u$kCs*te2|G=*9N?AAS@+Ycq=FZiwS}c8WwJ?gKdyc*Z)J5?>aC zQD3qf`fr6BE0Wa!+`&Y!VUM$D1XIA?6~xa!XJkqZX299=1qzm63x`PZ^^uG#mI~r0 z7l_?~2rZFX?$mKv@Yze3i*BbQ5}6aAOi7$cw>Te2{tzkbyqX&Eov6~TQd5O}27)#sIdk6k&r3@m9QHAu{ZI_Z03`G~{tyUn*wwgHktg~lfL+>)B8^ggpsz+F zAi>n_>C$DFu|=^VHQiFr;+@Wk>e9me@}JeEA{+)liNjDZIRrZ?=*N7n0!j~oLPOk0 zr80V6UBaPKBP#ED2@(bHi@oxMH9x|hI@lx3#^s*m)B8x*CGOjIOZ!g-&UDU(#3BPk9;Y{*y?dfjUy-OFv61&=!viATflUS{=d(xG(&vzv)-rO2~%I$P8;OUX&U zHZ>98P+dYVP@Wkmg8EHK0&2W3-U}LCyqXr~{rAVkLJ}?D{1fP8?X{dLN*m0uhn+=L z&!#x)|4x!4xMgaielv2TKAqmEUy0zT-;Cm@XObNCE72VF1op4HR7d@vO?K30qC4c6 zaPIs+iNL6T8VVzOk!5Yne+ZFL|5#K;{R(78@KNauo>M-o6nh^2+?3v^Uq*1$U5cZg zCOPUiqdC@g4mQQQBelPT0?8;bogU$w&3M#qU7H}eZ3hCq?lG~Q?KppUl8i%ff*aTA zMLuy;Ka$o{a>N{6r&QuCXgGNe)^xxpwC(1^!8qJK5D&`dLJbYsQzjWiJ81QMXhA>d zFDw!a&j)Z6$PF|Yr23jG;xY43yvHTS)JP7R+d3Vlj-nLP z_p-6)nil8^a+M+7JXjaY<)!knP^&UMq$ooABtxpU#m$BkoBR0&6j6Gm)k4(2IqaU7 z9>Cx}6eqz5$22Sv_G*nT2C!0nln9D(*hAUII7S3l@`02td$wxKrT$P>J_?eeHl}ct z##=Bm^94~5-=Zs$r%#KbMlMg5mx@4tO9i^kgRR})>-+PqqL?rEK*`kvhT(|DKk!xf zyI^HM+5lHpT7yg%( zZJ5dyPw2%g-JlMAC2niE&jOZg8j<{%ep<^&VZz`)g^r?hWV!(!72t#Yz5fdbF6KZ`xSi!VT7y#7_ZYK&#XXJNtXFj6cA5`0zp%(Yn6!-3&u?tO&{ji%XxG(@%^vX$UB8R z>pGY_mzP9VPtLj68O&V!yoM82uRFX4$3y0&8sH)9JQ%$!WFJt25SnDCq9wgXS zaHZ+lgQQC4664SD&C>}(mGkVO$S!!Tk>Q8nhZ~zaht21iy-#(%--k+734Pj_b>hLir_IwL^ElAVXhYxG-|x_+0RZSPI!ZL&Xj+qHWUj-u0E z&vXOXg_x(Q6D(!7;lQszoy3{R>P@O}MvrjlRlZu)B8*M!619KEu5Z-Wx0*YLyU(|_ zh?LY|^k7^z1joUn2hiS(F)%6D83bMWQ;out`lAOh5EF*MeL#dHDMDNn{P<}MH2orx z$69q4IpTh66byk@?Y3CM&>2X&-`j*!_wS%N(&LE9O&vGmj&KU422MW5ynIj?0?^qp zJ)c4%+}RwmD^1AUZjK@KWpiYA<^HnnMvKz#FGGV^E|yQmBNCaLhxrSm_t4s+SY74s zg<{tArTGFVIVg9SzPrrOGhJz_8LETUbF<63`RGBSn~xr3cN62U(@iLM{ccjFsb*w! zGwV&=h3Je!e$-d?ux0d%^L0u0Prkh4_?6jz^bvv;&;b&RM%{i8b=v;Lq;rJw+NXzA zM$Z9eYC0+D4U*1tlXIhfc(}O@(Hm=jxW|XQR?V%gH`kxn*MDtpZ9bU*azKs0e+38C z>%~`1Jg_>jo4cdei;FP$^5+#~-adHNtncsd?$4uGc^fu)cK~Lyi;?KnrSx^_&F%F9Upj07!cau?g7a;cH1>%@GDP_{X(fyzE-Ul;IPc!vCeRYX zAFYVf(P*$zshpjim7(c)bB~56#n;7D2ccg>=pp{OX8*SLp6-9v4r1G}dll{3cVRP_5+|vKsD)J?mF}Bt08M>*2F{_Pi?n zV6Xl5a{3%VhO8-o=<3y{V1hlUnTDT!KEfQJDXCkXf-Hd3XW;4f+9f+-@+@H4yNS4^ ziCDgL7M3SW!}7J}Ak#+A?33_O=3x2OCm{`S`Yb%GUOo?yXTnTC%EY-aBbp;&ro2ee zNv&O?%d-oKHMeP-+v}Ut8-_WunuYNnsZl%i-JQ2TOm9|jK7boyKL~*=5o`O~Z-4Mr zty!&QkT+?sY8=rAliP-mQc+Mw#E)7qB@u|*v&?`KMqB?qTbpDkn1$)4fxs)bYZWHT67(m zJ$B!e=`rG)k)e-ZhKW4Fo11r!I$6iMajYOs#IV;|{2&&uR$-)aZFJ+w*`%f#;2}vG zzSNby%%QYA_Yx|Epw=CD@?vvq12Z%a_g~a&PR!G_t%G{4Fjr8tU?MZ7r1LY4IFX7S zXwrNtDXX*1OwZ~PtGI*q_Ur4L2b;S)wKPbZwl(?MWV}OidDJxNWJAlM%0w@4SGr`N z2-CT4EHh@U;%EDt8_kXS)3p~{ho%Zxm^;t5>Jt{WTG!mn8LF54nZ5dr-4{=$RnLnH zZ!qgozd!4(rA2wC&NXG3d7ddZ&GGu~_Vz)|1!K=Gr{jZQUXx~8DaA}FW|@vwcXkm& z^~YhiMaVCnIIaY`xwE@-*+c@qn_lPk-saUR#RAjoj8FSP|B{Q?^X>Zf)#~LU(`%lL zj^$j~s5r5B7_&}AtSOYoP!-aney7o%)%*ndvr{IZ!Bk-z9j42(0`2r>!jhQONSNEq zCSv^52I_+8=9#jyG!8e(q}Jhb&uktgOl_Yjnr@&~TAnI^XZClVIjCnM<{2W{F%9oP zhMBPrXz;&Mntmn?rN0~~7*pInKCB-X11WNY(DR%=2)s9+m1=~?>bxvLvy*V5| zOHw!t{1IEYVS<%NQ+|qbb(tXw?+8wl0La%xR*cw&(W8O$X2^_&8n0AxVCFtWSFQZ{^Q2*oaxMX31H9|rh#ZFCj{p#y>|jHMEUV!oiM zutj_hXyOX$h5)#oBTLl4G6316ceco+9LGUt+{5Ouxma!wnz7oX=eYRN#P5c&?h)FT zZAW1Nx7e8vn+r~3d=$AzDSiI}a`vOQ0m=!3Ha_SbmFc|UHk45{c^or`Yv5l%M@DFr ziYTCnC5kliJkTA2iU$$E!2vu)MiboU>F|>ML;I%z4yx`u*LcLT=pcOC>_kyPy!D4& zMg;S4WMKng)Phk_oONL)Vc4PB!(q4Gh9N(r-6(pIPsZ9tYVH^1I4w}o1yu*ZS77{D z>T^I&fYFJ1z32>~z@a!sOSoEujf*OmjPPN&=IqG2$U!bXb(2fJd% zORfTJld2vwzvzvI-E&y2NCHr+12W|rkJ|X=jFzYmO9cl7)^m5rX3jX!yTEqy6Yvr5 zn$zydsW|SE4o6}o$d@^1YIN0Nfi^-@P`^W}J3s^$KbohR<)_3@LmSa;A+G${n1AJ$ z{$4BnU-PvLER}w2zFue)YvqMXqgojhj`ZVs)+%plt#SfSDO&jib10P)ARI{g5cWQ3 z3c|`bv@$|gTsI5FYltv-Sk&xj;Cie2YLYG$U0ey$^6CdoAhAlzB|U7y$ZtM9fZsGG zMimEto~#`_Z?5e>`wfyQe6agsf4yGhF_++KP7xX`CwY~3u=-9mP<_e9vbKm2_R_E? z6B~In51?Y8@`74H6jp`KdXzQGI&Z^}jDUBbP{GK7$nLSylYmu^$lho$&oD4 zr-l)akK<8yG-d=h9>Qca;0IO2A_y(Yjnmd?GNmjb$Q2J)^ITjhR0>$QP^g@&diNj^ zwgK#eCU&_sZ2G-$+<#5$F&Wo@Z2?)!7^pNF)fxa=E>s3eDr2T6)qiR3S@#|amyAQ8nbc*?kx+66lufF(L?AwZhsw+srK?pFsSiII) z{vb=NK!Ji}QqW$-%NW~I$kTUI5wVJ3nefzdRU{3(ow*>ahxJO6%dP;5O(1(KOZPcG0)Jhwqk0&oE&pkHScpM%7FRoE|q`%~8+(oRpLS`6upZxa#4^~@K zuuGM%V9nZO3>myAi+!dd@r>72Z5Zq2U*IbCdvNn^Otf(6dpwV$!c1~IijUkodEL~U9wqyF3gU+QviHa7QxO#U7B*BFbi zf#LBe!qNY^RxMYTOils;KZ-^~)F4S1CWj(a#3RGJa;yYCjBFvL4{^uiZf`_AWJ@Rm zYKIwqkrR#lJ6+oc>I)2j^};fjReCh7e69chdQpMZEM>E3(aBPtw{_@5k92}3oNI3V zWxf9L&@%1RZ#Fqj^BJtD6pta9o%su?63+W@G9dD9ZF{4>M}zyg}IXk4p3e!5J1q@g17?%75o%4VQekTd% zgTuAM7Y8+^JSSPAQsa z77`F9TzjTuf5ay1ovsm^-OWmme-6+>&U<1QS^2Bp+&3bB@VvgY1s9xWK^P4E5pxl^ zeq=`Ok3++zWu7oKNOKMR@!0>%V<18lX`<;)E>7OxaKD>Dn2=hnu6o; z$qDpTGqov4mpHO=N_%L+lNjji0;KGDR7~10#Yu{rl8x=e#n6^;B%?g)a;;%j4t|$- zBtCd>!J`KcG%h@L^37UmWE)e4c21NsegsO`=EU}c_*86$u(JJL_ir9GnKY>sIcQ*` zgioUJFl2WnP!#pv#QnAmUTcxjwx6zk}z^-|G8_S|xV{ zZSMsAaStarJ}pkPF(rh_1s^*vwzk%{H)`zE0xoo=2jyiU4Th4u=Ko?eyM^rYwulG4 z?ueFk6vFyKbV2Pa$QN%=1VZvax~RikpeRY8Mb7h4 zFvK&5M+DK!$NDr8Df=AdNwh!|OnfsmDIp~VJ+y4)RNN8&iEmUb z0%(L;)nZB^>A(W&ou}0}ZXH+Xr~*U+|EoIP-NWs%k`snTslMZ=eNiir{2>K7m#?z? zAXjk8KpT^J3pQUe12*ivAr4M8CsyFE;=U&VTSX)*)*qhK@>M1OkTQ+oi7Rx<$!0E~ z;=HU0GP!lhJgeS4gn5+AS6<)@x@kv_)J2CUfkz(O2o_OWsMb&k?w~In{6jbWw zq0?s`<6wCJx?IIyg;nvCqR2q)s^I3Wq)s;6)bD6OEC?%Cg7E&moT5at3adDFAJkI? zG{`$Aeh;oc}eUz-XgCU%N|8E=-D(l89x@WIr zkohfXX zx_t(F7=_S7JmI&Z=#7$9)B9&w6=#7GpP~@n@OI;1-~-he4a-ueZHYG~oyV^ljmql< zQN;Dvzkq@4)F7R9)k5+5cIt0ni=J}~b&g7YNo7(E)Cno8(y1C`qp{XIJx6 zb_^A$o{$W}K6UgA*tgPeq&0k9N|#7nhA35g@RBZCsF$3yDwS1OTmaoVj-%dqB=sCZ zzt6&U!e6kEs}dExAVIfL$)JsP7AWBNgVbGY+L_sdJ- z#qXOtNV61S4L^acCSh2GV9If&a3_ris9YD5vBC?HI|xqbaUCn4 zk<}EqGRH^>^Tvmo3TUO2jB+D306cm^v=h`vD5Q-<)Wh;}x%!~27PHhSATp=E^P9kb z+iRO!^h>UA)(GGSrf^eY`+h@0!#J@xYDf$rDJ15Vx&Y-NOP<`POf2hU#=I*m3E4ZS z)(Uwzul5d>YlWXF@Ss-s4g%1#ovi~Ajsfw6EtF($tvA=;4!5poYX0`N)^?t~SbJ8t z5@ZZL8rs0ll|9*B{+!ZA&Ok`pW_pkb2=R*V^kQrGS@Y?}YcKoe=KP>(O1=)~j$j=G zxGDi-9qDCFkaI@Yr;O@ivL0N8To>*~T6SbEqP+-bXdJ1FHc9rVnE-|JFA5^4gRq5& znZcFu5Ds~oSf5I&+|mA#+GP1(7NybNDp7#6HvnHOl^th!(mE?*CMs9U|FWvgd#X55 z3jPvH64l9URhzKP-%k*S%sQ#rY?NGnIrqV&Aso2pC%^NF2m&}Gdq&O!Qi(r*o4>C} zo-|{|$+&E|VmO|r*WL%=EFEhp!TM4|!|a5~3kdbcaL4?DQ_7ha^}Pn~Gi1Fq1y)Xr zG+djnT8%NethJ3zpWGCPtINeA?_{)6RCCA|E8rAgagG%Ii?2PYG&n_9Z__o@HoN@l5!A7>MrQ00@`7k4_PzGnhWSPXjm< zz!U3_yA&^y{6Fw@n?@IdE>MNyud&}oCSOYdP`cnqm-*ed#7`vl#v(Z{No%F3w*Bp zLgz{KC^caS!y|14D&@lsxzL#R8)>InQrSweH7NTM{8X_BT&U+^j-ePaF4BT|CFb=RUn+@8CY_;bcXF~~Br{Jcj@(6MtEqrhzfN=8Ag>=s@Jm;4 zWL2%ND$vVx8|}&^gqr+oRjpD?&2&n#7LeVw9A08Gd5v$1Br0OE-D^~u5nRY8~K+TnR-aXbQ zMAc;u|Al`(;IGP`FR={#!?w(tw+KC)nl-LKj&fyH{8czYl(R;=xlOuJr1drco zGKzgaJV8p>CuPkZc}t3G1^>7O2gu23_kVAC{V*E*HH=5&w`b=Ue-~udt@`0%egB|V zSbMU*QGfdE`R0Fr-P+#S-TQz02Zt|y`~Bq~|EIG`sClQ7yaPvOZFf()BdFi_>{38T zk5qx!azJn)&#SLL-)*kf1pVESe-CTY6`A4&o=bK-{&=O3RAa3ih4YI>t8m0G*%n%G z!IaT1MSy5bd?39gsBq1Zn71L#k3hJM6#Q>D<40-b6YX<|@1SC-|M zi(=!y&-V7vfkb)ffjA|DeImZ-qr2>QRl+N<6c+E~;%w-X&j{ljPyyQvxees5o*sLk z6;>qgl)VfH9<>IdG$?SHNduLf0*$P{v1SezhUPcwF=X(>3Fn!(@-+0q1Rp&C*mB*K zNON>;Ggok8MLG+6*q^cn4FN_=wK+)AUU;`>p|oMW`8Z#iQHT^R+Mh*4I`C0u9phpR z12iw?4=>cUKvtkVPU$xck^x98{}OW#iLFcB{8R}fH+O(Kulbt z59!y?ax|IQ1d=*53fz#t=!ADKsa!29-Rh?b>bU}n7Q|e^dI6%K*u7Db2eZnknYlt< z<}Rpa#W^Prx5c73?18Wje-q6oczFTOU^x3L3;IA#&;f1%BnB|o^c>RwTuw7sZMW$L ztGP%BmAKS=5UOI$`5SlMR;d^J*oXD+>FE%vie?gi64?$|8u93Le1n)ske_T+Z( z9W5ee=SsyA!`)W}00Wr&zrq2qeYwKYz@0yb>CBmH1)Wv5LSH{r`^ptahF&u+!Y>zw zLs)a6bD-9zkj$8=d0s2X%(gfc?ro*^)|BFmVdLgjliy?3wvw3HN>hv~t4?#jWr0-z zcD|fzi;BqQtLlbv&x)&*a~cC+mG>A4RVDtp2N)2!BsoBTP=3|B#}Z1;c7%QFwFymF zjVw@Q8R``P@0VYGc@>|(UMQ}tt}Ya5pjTH3rZnS+t!GseI%x=nX#czeLR;K%kGN%g znN=o0BGLB;__ml8EFyf;<)csv=<9Vj{}D~m0l;2nYB?S@+~^noS#XKp|LnQs?|)X~ z|GfIseKYl+=6_BKxvREBzZbNd6Q3qp^WA?2gK2eoI4el_%9KX@u-Ig6Ve&?m@6}im@7tt?$&5buThb`&QF7JjBI-XA{e#G z37%OO8EZ1T-yv7FY_m_$lVj%DwwlnkTFx_%;E}&hFLT(N>Z53 zJFzwP&?5EPcoZQC1?23bhxpb!d>gS*EoF~^*nd@(T%DiY5a)@0bc(X32mqQ*8K?7WlOF z{j{qkmaY{5Ttj`R0$G7&Z@R&ToR?@+F5Ly4N3J3_lZtfa;i|ip)ig=Rua;|gjU!(2 z1%8MYj=FRKl{AYf%Qo9VHBX|1KrwMt!UVm7LFz>tqSpiAH$F2OJVn*yj!yjeH z!m8UTnyCLTtpBfZ7ga(^CDyD}ED!rfioY2kDd^$oN<55ahf8e)KnlF71X?wahAwad zBlhJ$lcCzGtW>}cJqQgHc05KK8LHu=AH%%%7}1B;BR7B$Yz;BptChq0_TJXoVf}Sh zKdh;hruma5VFW_aaCy_*9I|v$M6kW}RHgBXm$x@^GLPK5CYOvz=2P3(WK=Ppx6daa z=`O(L`Dx%EM;E=|@!+%^w99_0Jbv?Y+3fo(<1pU!Tx~6vcr>b&z?=T?BvzMMG&UhB zaCM0*uNsY9u5r}JzpmiL_S?!y;V36z%IyHSq~XyK>S9PZNj(jpHQwQ9oqnJ$dV*!1 z>Kupa8+p465P_pRxFBsdF~1H+*3#0C;^{C5#6i>@_JVwnJ#1TM&bvqYmO@^)e z49?oMLY@N!FCMn=I$y|hpx|xpz^rau8Kt2DlbPs+ z8gn@%2RczDNycr!B)YT;({BSnT2M1KMWl(JE<1F7E(ua4?Uf50*_0aHzPC`X37en} zset^;e{=_|_ZchPHglG}*I^UXAr+8+`49g7O%zia9ySWdt+Eb*tV|wt?^RHTR6zda zKa*fHlmuu#$==J53dq0w2YDS`COx~+^M1z58#=L zj)t}IF-hvYnpCi2@|)PG?CBK8P^8&U!Jm;aDjAcUAOq_|{Ihg3lRp?=2 ztBjYm_ReTX9JulieIj&f#9qwX{ntD}$=k6djgz1xNGOw{=Oag~%%111(7D`Wby14hEYi}gDDI(j0u_RArzuvmhW&Jl7_qeoUg{!L$(Ea zEot}=oUEGX=?4vVvKnh3TOQ9cPE&{#g7c7#4e=93QaPiyPEwk1x*Au?l~Uyd{{9J< z!phv7K(i_k%vZ$RTxE5zdXE1J{F?L0?n`9E?aOY<$C{GqhKb3&4VC7+5A??L+7phZ z|2B6v4@;V9Gv^sIAh6!dM-B`7<8zV)Q3K@1jaUSpOQ>kwChfT0*_x!SpW3fBO_x!E zHgXQ_Yw{hbe3~mJjsoaU;%soTt5YS{4hGC$DJJ<}27=~@>CC#gww+mY4qa20&tRQR z7j%q+PA}|a)c_L4LZ^dBWKIA{m=B!=RHBFmm}ORUI=rkBNwhA{i*Cpqc(tVHut+YC zf@{QDlhw*uBC2YeyN7&+ftvp#TM>_RyXAcZ6S`;?6eBHOz#>2E38iA_TUOT}1;dWt zA}gtA7pW{rDVQG1k4_`12h~0KsYaP!gQ)81c`0cnDnByn^2BBzv1u@1{8E85N|6UW z4EjOu0^d<~|MrJuE{Swr4#wf`fp}1^mhYF>F3`Xo?qx$KEansIaH(B){P0LU#j z0fgG2Nrp!XjM{e-;OLE-Vu@B^@GSTwq3~@Gc47J9^9)>j!r0!#hpcS@ zLrW_6s49D*2cDH($bzhZ!!Jgb<_Q{%9HA}=akL;545fil)V6LjQwvv&HWk3vddkoI zQsReJM*RUPiN+BsrS{Q=Ak~VGf_iA17JG&-udokYG7S$<{GPHAs^8~h#+_&+7o2Xi zFJg4zF3!4PJ32Fuh!VEA&`=ldjN^ce0fs?)1+_a!K!UY$H=ygN{&O63h%|$E5(1!x z&M{KY#ujWOj`Gm8LfOm9``3FqUm*^qqNI#oK!VmhPVi9S+sLsYr_n6^h6SK1jx<_^ zGGbMHU}J8mk#2$X3SvoDWhuB<3!*BPMfJX@KJ-2q9TguHF`o0sS|)^r)e1?BXT;Ko!^{R+b7nrTjkZ zfZc6~PS%aGaV}f*K3pQn&)y`kG!m9Pz`1xE>2*FEb==r+>bC&qCw3-->*kU4i5`0dqS1?0N^BS4Y31HQYML| ziD`b6!F|VO$j6$n(fARj&mskUITXQhLN-G2S*1^g_VCFVG^rc+S#t_5FTG>G^~QbN zhj878eaeLw`zgq zLJEKGcB3iA9Y?k)lU8EvJ*MhvqBic)qz0Z0g8^xayy;crhsrMdcqf?f>d$XpFMLy}cq=gW zJrBiv@F%VmOb2s?Jfx2VeRwNjk3cr&%Z+!{2R}4EG>T=mB@TIzWmARJSGh(5#XsM| zr>G*@0bygsHZA+vzkxcvN^fbhsc zQ5-b8F+Ko%1yQeI52BRgDe8;l{=b|D{lJcq0t;+UBDvsb4tnhvDR{I&Fwd3~C~{Hc z9_NsiNcauLr$!}^`&h%@C!^DO%J{1Kx~Q7c#RpESqAPwu|?*|3D+5Dt}-oH6+{!e?VYiPVw*vlYcL#LrHWxf-d%^a9x` zrAmC?>iexz7?k0Ki`|WPir?^AIXU%^7r!Cds+VuC;eJL(MXV?^sSuH$XDMtEZ+rQ7 z{PBUVdyo0!F<!m=6W%Xz!}an;t1nxISEd|53cg8{B{-i0j;e6#s0(9Y{h}ggA{2Zz4AO z7;EZ5;(j3gB++fVY#I7tOgyTV_8rLH7w}~(9CK19;>ZG8ScD~Z9JJe{k_AAp-=%ic zB4(RqIFLKC$Se!(1VVZh8Vncr}h;JWc_Gbmjt^6E5p)o`j5!M#FY<`2T0`-PhYVu6)s) zKZB>}5xpZq0YOrh9GkX0fk;S1OcFEz>N1wYG=L`A5rBrf8X_Irv7FVIOAsh=LhvS zb!LtfrLuhr@9u(NfA7u1dv9t7`d9Gg%X{1NWsSKFIIld!S73~vb5!i|CtYA(-t*svUoCA%0AToDoux=5+4nN~;x;&7g-2Jq5Q?(wqRYshF%rGwx=exX-p(TBW9 zu8p7vN;vkk!DJ&@16?cR#0^Ynhsnuo&<=_|gj8Nl8ZkA@*i&^*^^T;sagq{7e%uR! zy=Q(d34C%`lm~FOd;6takb5JK0GQ_9K7LT`C&o9~Lkz!F_=T~-pSS8V{3ly`TlV7z z=3{w(Pkw@+=?)hyvo1PejS&a;B);Z@lA@+agd-P;P=jRPQwnpZfKmRiND86B5sfQL z7U>8dw*xleSlG}xWkE@*vaHYrbHhTqGJI}o3I|M!izlL5=fGs{nG|E7$OdCRs&kq` zAur-It8kl2GJ?kT(|FmR`g>?*Q=BIZ8_CkQA_3Nk$3zwxkRB3;)i4SUy8W#zzeZMr zO{rIwr=or>!Jz!x9_T{9mjiLZvhxLHH~+pw0{jG#D8In9*BHUy^Tcffte9>~Lx_{c z1oJ+Z7r6i%_f_S6BUnVvS3^>2IKxH7Rtyj^y^=L`s*Hu@4x=~ZT9P){N4zDy}V!h{on3;$ob9tdJ8dn7B7m{l>4yd zXlUgznTKK6UlhL7`{*MI8kiBRDrMk2sO9D)%XcFB4xRPRDRaKrPLws=E4l!f|GClfUN$(n_v0In+JiZP>#$n8dC_6kj*^;MXCh~ z#i1U)pC}{cS0^*Yt?5|Nda+42I@y_wdv8HYds}-n^gZCPj@^Yhte}Kby;K6!B&v}fw5cN0gMPj~)`R~>CD=pxksF&>3+};8pk@~Ie-_?@pmeZ07^nBX zEg&n<>e7|V$i4${bAR5o4XKPU zdiy?1$ELza3`*-Y)v+(NT^y{YN~`YyX#txhpm;>Bxj2^eOyb*36)+{+%>0c_%V$*- z(8qMj&Z96fCXXP1{~G-Jo&f{Ly`v%^LoN9I%g}wwd|Ndr{<&-%LIGV_)PE;H%y&zA zt)OM)NLEqDfA?l0Z99W>caZax|6*Y%%an;{c)B;}m^xY@eP-T}1VVm8Ra2geSb=-ql6V}KD+fQPy`r* z`j5jrOp^p|{j{M4YGVole%5()+}=}<&*&{Efhzd2hi{CrUeWvDIto(-s(9G^YKPqq zw%v1l`w5{KxSwWGG@~N5D9-2?Pwv;Yorl;!hu6w4Xy^MgeuYlB?K%FDjLh$tf@A)e zB%^Q=nLjZAmwvymsDK1Hqy@P-6W1NYn|nsA5q0ApVFNyJM#hU-n$LBV;T2^D`ry|G zI!^pmhl#)2QQ}`RNV?o^Ux2-RV zU)n`fkm(PGynpyYe*43ve8IGa&*>NaSn2+UFXT7hL+R)y>;!+0mWaP;a&+({_*?Tn9=bk-Vvd*}I!YT{)lj<3ZiB=toZV+7~*IkGC-H<#0fM z3b+2z+WL?4pY@lWt#5vQ@aOG+8`iR4|6I^+{`{*RSE~a3`Yp`21}o-X^nd&$(`{_V9=e9ZsMJnGL-|MFV$A8|h^by$AX z{nR`z*CIb9f0!@j-r_Ii5B~Ctic?&>S2Ms=4*R9u^yZ#b1eN*1ZAwwVWbN`JY`&+z z@glo9*5)Q{RSLVSBfB`-;%CU`C06%8DT{bvJ?8hf3%mg?9T_t`cJFWBI)-q5RZ;`0 z8)Hwc(;of3_u$ErX8%#y?8jav)~g+wG7ym>;TXuVH-B@+cfZDFHFHaVut1IrD&QFe zrxOfZU>|?BNF%Y!=>u!uM*o_j;dNG199-92;>xl#JcfK=!-`!eMhLVo(>jS%s-K$; ziWw8{+?VZlR(<<~P0ups-eF1JW*>`ux{jGp{Z@9}wjY#jXCMGuvl8aQ5azpouHB%Y z`(-}BUM{s%sd@ENja6z69mf~m0u*~$j<$G>`hx`QOjj8%|~)Kl86yI}fLr zQvSptDSnjY;dBn7$6}UaXdd*Bp9(%jxg8T&#r(m0mdPgl;2kHw?pa$h#Zjp>)qY$Y z2!q3XQTP}uFZ?Yiq?_^*Q5GLQNk$v(7`9f~C~Lke7-h}pYa3+?Z&Vp&D;~MAiFJ&! zb^`w+jk4Rcj$2p7)vrm$^4@;^7W|2!QC(k)LHSz7rB=K`yvniv7w<~n{^O><7mFiNC(q&;SI7dA|RBMFGQJ= z8^OAQ!4gx6!-Bn2@XfU=wi;nG&oE;^Md2VU{`d5 zT$q%vsqx)zMd+#8YhqxtpicpVC4?uSDkT->7H$O>h~z4ZlBRLNSn7$A&bbLrJRh_2 znuD|^N$LWtNJAV4f77>{@$5dWr@T&)IpU>FxpWA|HGhi}R)|#$GRKGkPCyQfGzptT zNAWz3=Jz$2iGv9^gk55CXUXF7YR~I!o@DvfJWVkENLnG+f-_RuG~pPnW?HI~YWgH| zBsx8yP{M~%CO*8D9jjpdbsoGYd!>+ za}Csnovm4+RCoZT;uPKDV3&z_(F> z7oO*oB7;2#%=@6zYXopbopUt>K_b8ph+|6=48RKS8wR1gSdn`EhTBW(U~scmVa~Sy z${z(!f^F21zvJD*Q=S!a%pbFa`u~`@!U^!!9agE^Zlcu`1f)#!!n}$65Rj53(R;!GRXe zqK>g#Q!&~?HaUX}m@p}SC&*(AS+Eg|7t`tWeacp>cuuNy(2sxCTnIY+DA)*`;M$s7 zlXFL%0PrQjvnUd}|2Ku2)@mkgzJTpt#wnl&sQ40|1Utu@68udP)`9_Munj@+wpeNn zo3V&h%L_QVO`FIischsuMP`${$G|Z@j|9cb5sIY_np7kj=#?*KARkSx8z}D`bo=6u zfhi#jc;>u96~-uV@oh8*X${&%5G5ykk1{(8nC*mpJ;d--79W{p#2UZF5knKg$EkoB zAqS1)Y!s$rMuTE?0MX_MB=`W|HHQyVouZ2ioa{R+C_kDc8K-?(N}_ztg(QcX6bDak zrcoWIz>)Nb)kB10`Mx3$lCC6Q%hQEqodyP%l85V9@dy;@-`&#g>K}2@ zI1xcLvoJ*WzF^f`@QBX|N0Qvmu+Fo@Ec5N%9~BUw8QWqjhO@$7am%*Q!GJl!f-iHg z_P;;wU#$;39+8uhiz4Pcn)^;$`#-dvw}W?IylD6Oo$kp;_fx~D_=^M!1=AK{7c-YU z;Pn~r)oNSt_XzWaM)+;8NxsE;VEzF`*ylQ$FW})MD?d{4aFUf8X1Kk&`z(Ltv|M~9 zsCb&1=EQ-6?yJ|&+b4l$1~VD);lDY_=du^!Axc0ue1L7{D*t>IzrSX?ChWL=cc3%aVA(6UT69WY?$B+^LtQ-meHEHn8hQlO?^F&f>O(v z1JwT~JgxAAb2wkI7P=@tecEg9cW~$98@-d~pjsrTj#Tdod;=cBXOCKVcz(a+A^!HC zaDMOAe)n7dYs1Y79DR{)yI~`D+Xq%T{_4@r@BUx>Csoe+kN<_-N*3x-13neXT+2e%+}?s_TbMTox8#NW}v zO*Jy+Ag*C)@c_O+M22`8ZILr#FZk}^&hF!_oj+~u?gYDk+I#fX-osrG>xcGcwitq}zLgAdv>Cs|0@iVBzt=qng?#g;>7$?BZ~DD``WSI;aU{(v z?`ugzHF&7Xj0|6GT+u;JtW^YfGL zptIlJ{Ha#k`}BXNR0}Cfr5m3uAzxsztUKuqry(8ycC}E|o)ED?VRlEYljocJfBa*e zSG@cDeE-!e{JneHJ~=)$pY~cON1dm&jn*rC0D1Z;Pz(g&1N?J$cXO6RA975B#kwU) zdJPS}I&Z%k6gr1xNy9WoI0bC$`TqX-i&pQvH5l|dPtOMJ&2T~(pv6IpYgRxsalH@Q z`#+fFreKe}c~vieHJzR#glRIW{+7PlUin1jSEyQsv+A!v)W?9v$}cacbMNzm_Wlv< zx>=rFO(s7#e!@M8|Nm)24PM`c={(#9!5HTF{m(y_-yKF@J%ZQQd)X%~-r5K&Kv+Tx zlGn0^)S8;kf#9jl)`~#|9`OPUUZ2AHcmAV&J}`ftbxsCfJ}i9j)uZ$3U2q*-EM_CA zNE*INQTJ3HA20$pP4>oiI(#^E)Qz&rr^8or*+fp+KT@Dn2! z~!qB1n zW|CgkD<%l@N@0fh<0I#HeEO{SFP$Kc9jCX=^TU{26r3Gp7K5NCZ2N)laSyb5FsqZ+ z(<9*CDE|Tp;8RfMPl0#!Koi{jX={rGfD%!-%q~yPf;as zB>#k=>K6y?r)SS`(nkD#GD`@L=0A>);n#09LB~q+nx>7zG5L>tzMSg^4A$QQo*OYLMmL}+fgcf_u+PQa>n2h>G!_}<)=U?`gc2W;SAQM7M_rg zq!_+hNi>#s>FfXLDIKm|+%27954-FmOm|=X`IJCRrwO5#3bk!U=>IqqWB$I5PU1hmB8E zjF~SR8ZrL5Y`ptPI*Zaf8}3QFd-CqK(uPIy6gKF%fQ3rh8oU*JhxKRFjS+5v;b%o^H4bWS)Tidd`P%zBGi(6lBcEefW9D;P0{ zW@a&N|6KnzFhq0fAO9!s1rixxKh$f#C;GLqM^8ZP}64&tS9im~KivI6U|@hQ$xAKdgi9+0!+K zC6777;!U%`Vc`HiK*7J$n1$qDkM);Oi%8IGZd!Jo@_AX}i0((6pkT++qvTper|EiU7e&wjZ&;t2CKnA|2IPr<%`LQLVga zBYN;gR;W*)*o7>Rn->r|a#Z^|8cc6)!db*m|L2>=$H|B5OVGw; zd)6pvg+TUaxp?w9kmnu+`{3gob19iUwGFbQZ}_J4LaTv%e;bSXhPr|z?gHlS1qI2+ z_i7sy-?L$Ta+Cl17|H^=^=ON|YtVYV^o?6E zv;3`J3v=1`eu2yKQx#<^>7nk~;PhSefZU%9zTBg=;5QUez&vx_^U7fZ~V`1@~>i>p^p{# zOuN@Q3Qo_S9(DEu_<#GP->y|WjsIRmR4hGs*a-fzm_-3-A-lC2DBAPul<vSR5a^47@#TV#%nIuWt z2%aWcj?W*rf}MxEyE|LEU+(M%XZ=x2_8=TSP1^PGdfM=%^=aERJE#I$~R zb_iW#4VZb1UV}tJk|%HmXIW&JE$2R(Tq4dqBD80`aiVdvwv0IR?-V46DH^G2j0r8x zvs^WW$po2_1OGpcY`NJSWvTFecul_O+67Ppby!UJyrSnZ8iG(B5P~fTo>GWBLM-K! zd&TQXs_rB>U4RffsvXJfVurZY==#e=2vpBuamej`Q&;$1O~<^orLpxU@yJXEBgJsJ zDFb9Op(uPlLF{Hj!5PgaoLx()QVA=< zG+1nTkzXY#nd~vSHBV}sC`Z)W>|=f|zqV|fdXpO^u!ks(wWh>A0u|7h7fKV8lw!oF ze4Q*P{Q>8C9q?GL#VL{uFJra92YAy}JK^iNqoir%ly z)0pZ*cNfb>mX)@iyQHA+z|d-x>q{BVc5^uAiH}ife8Hw{o%E7+LXFNC)`h^U4nzxR zI(Q#vSM`SJ3g$A35WUi|++(x`5)33G%u>`B5xFeRomcT0*{I%Tgx9GAb&Mq+aFp1{ zj54cfyh`_PCCf*eqbz4_cT9LeR6d)LpYcdj;3K8()A|wD%V<@+)DyZtVvWhb=7AW) zcwB?Mn#`o`!;DV4biyqKY@@=u$n;f0)iWY8i{J$LsV2(6S+N%^b45Uo$O5C3pxxk! zIef`LtFf>lY?&!qt6Wi+k_{Y@kbjztvAR6sKLRP9IVCjoiOS}(#po&smE;YD0H|G} zen2xOmI0DHG(`QtRUKT$vj0$~f~F9p0ojVB0?`$Z5#^;f`IOPSh3$-asdT^^W~({d z3zI>eod@l-U`v>BWLK~ysQj612DoXE1#z}3Z5Ya9t?AO#D3!Hhmk5-?dsx#vnrC~# z=5C$tD!w{>s-qUY`LGW3gL5w1#JQ~)tO{wA(U+6x5{^0Df?}fMYp~(2FtoW%w-7C( z+aHa(HOZ2Og*|5OiMSR#6>OD&Efy(nc#9*52|v8yYDDlzZ)S@XkQIu$&k}e)#Y;%4 zlV8dGVeT4Oh|WdnilM>Bg3=Bg9D5-h1cx>cGtBoydEtFj6K;{YI|zm=OBN!+hinc8 zHjsNBdrV+S;u)fWWC>3R?-(s;k5?&2KhrSi4#{k37hug~gHC2%TQ#|k?jx-Hf|WYn zF3<35%>TnqT=vBIQk0;Ej2doxDn|fU2+dyZ#yNhSOA0meR-3#`hk;ra{wglPtI{L(lAfg5X^5z~Y{ zG2=Snw0Vf)({N0=-BiRXP&&(m`1lQt2sIXr7T#L-McP0v&mbZ8xsZglrix&!mFdal zm{OP_b4~WV&#z$y4a>e{uh)3Djns~T65tZ~?`Bzg(C!`ggVxDGV61)p;90j9h%VL$ z4my2~K1QD;K6u`kIPOowHf_euIz*o30t)k35bUc>~C zO`DfPA(aPRB@?`9KsgrKS8xO@6B*YQ<}zY}svuo0Ye;F7d#60Udmvs%K*dIl1Q;`X zx+9!9;OGScBDhCW8HRkcsb_7iHjUuY6L=H%8^%1ihD$b$-!V2ewIU7%GvP`2evc1$ zoC&lFbj!~Q*%VsEZ8<_xNCb>Li|PEsbqcNPRzZl6`iHd(re1VSUjUi1<&lf(cuVlul;mT0h=?`n-!1vhEt ze&z}_xV)~UjWRSr4qXRRxDpoAhQ>x)VYr5*na86=vdAXfJE3g=W2bcWV2%epLl7nw z7-W#{Q#E_ig!9NGMw2j}0##tpDh7NTyp5tc9tqq5BI?!nwM?%s6ov@L)8&M$1?F6V ziJ(3cDywOmuHiEzQCeNmkqUg$QA*SN+AuchrxsEPRfC5mPs37~RbsV=Bv}&yHNU#f z;BZW2U-MyB-Hm%sBL>a@f$55g-|x2IXX+EfIE#N#D9tIWh!O>P^{QQVbbp(*Ye* z4m3v9OEw-qC8ENxG!ELENC4BULoduz;FBeKuJ}M1+KXx`V+C1LGK|PcP_%H>X*HoGEjrRMC&eIsmg`%MBvt=*PRAQVw56pY;Q`( zaMMyc32BK^9ueK3aYmSap;RQE&634T%0clZ%&)gA2g?C}gWND)l@5uPPA zyv_E9QAF%&Pl|-wD?87k$%WbfiRa>SQc7(G8=R)2`ma6 zfh<9P7Fl&sGD=B6v|R2I&#XhLMc z4-f)quxu27=e`YKm99u{uw9XMQ;R-CDXYS2L=z01JWVE*#CG(jBn4@1GGU+}*?(1% z0L-~FL(L*)PtOz?U3htkRMKWe<>7fymCTBUYDH0t_#-ExFBe4W_;(n*OC}4#D+Q%D z%aatd&zYs$`LXD0nb0s*_UDX_xhCytR8lH0xi4>))QeLnjvh5gzDrg5c}VzpoReB+4a) z679=$4@!OCExgM^4bygX5UruCr`EA4+#m?@RpU>G7FZT<1g$00FpXjVFVsk64TgB< zurT&5kg#C{vhvJAbDl`zZUkJBmjr8tIYnC>&x?v0k+fj63X+eAAxM4cPz9(NdbZdY z%{i(p7m2e=6$HHeqEtm~A)LEk8$oDjPc0NF?p)Eu#qrjG|`*y;aJ7_k@nWN9W*Qj^yQhhU}alz-%$znqAct&Xf z#cs05NHGbsEE&aW4}&9srQ98WSV0%1=a~;9%1EYA&^5ikoXjj1dz@r$oxYtB%<>Qx z{v8q>WxJYIg^g5}ZItHY9v5==;jJO&IP~n1qtIA-j7;mkwuxFhTMMOSEQ?4A2$l&u*kY4~?Z3rl7soo;H1SR;4~+c=uAXp$itugeT;2(HIED<{IUjR_gAKYvt|fU1kx zgn?ed+;&wHsXdev8cVpRLp`|UFG|va(>jV+&Iv%zjhM-3qr>=WAtSwMzy?Xjp}3`K zH;|QDq*{F|Kye*dGmf&*Kxy}>xJ|++H5L%|Jqo0?DzVbzlIXBN`FL@8JhCDKm;XjH5F2Yi`y&%Ni?6QG;)xP9-l({F5tS7vLF$v- z^vca0Nd+C7XkNmCe|e2WocAOQCQG|t-aE2SbTRQ3B)1WRd*^bO;UyB|I(jb9iec5v zF;x1Z1Wo=S!&}*6MZ{2#JFPBW5GtTK7D3~R(;$X8wShQhV03b5l~B7uR;&X6br5}C z@V|@2gc-{u4re1?y2re9s@=J|Cho|2UQofZ7)_Gur>31Fkr%{k&GbbpfxDZik=US!a;>MRHZB>f8A9)j0K&2623Q%e9;rj^Wk>jZb{Hh zJ-lWg8Cfel`MO}b-xTxWQWJZfO*`m&9GP9i{fA=G_0~8i`4v6ZXm8YjqucoSc*JJ- zwE__f8jeIgeAQ$x3gWnkd~%bdzhcI@8{bekK(hH6r=RDn9Id84W%-7KM)(l>mPE(b zuj7K#O{^$K&vvxLFRIZL!k%Kd7{qRQu}H~V<;9+e3T92}_XDGA3U3t#K>Hdj@NxlEaN@9tDN$PtBln!S^~@Q{adizX09=ptS#eF}@^A z9?K|>a?0-K8VI6z;Y9k-g{-G~571l_Y_j8d8cXP*xUS*2E~2d7s5!fkWMQHsZ3Nsq zn_3Nn&~9;5Ao!6$0@|{6Y&msZuW1bWgJY75pl`Y;oDo4}=d+C01)4swUvo1qf0v^# zBoqo?cP;vf2;&(B@WA%U;^|@n$24MB5xXzo4!IPu(sKQp>$Gv=_99qIGQK;n$<1F{ zM3lUBV=g&+5&=*SQVMl-O(2b_4#gp)L7H5HmT&p9|lcmJFW@*}Fha7^pS^=eFXPL0a9z_Dho zBi$HH{5?AGQ}RG!pkHZ7(YU6&omiKsBovq^{5+Yly*neuNl`5$M`a5^SfZD{6&vbe zZk)XoPqjTxX1oAnxP8VHWJ*z2L3YLH?lHMFYQC+&j8(^HnUM^VgHKGfgs?%m37HX7 z0teZ6{Zr#^Q$Zq6s29RV$5^NLq9YE04ADD20K=$!Tk)-$OBP^b&WvP$DQE&UO9c{dJyV#c+VEL1$oL5yV04gV#Lt}}!ZAnCbTKcA?{S!nOCbids^2sz3}$Ie zI#NnQFVg5;oH2erE;)ubzvC2*tR`!}BsiT#02E^JG{H{<`p3DsZP3}leF+zQjJzGj zp3UQw0@&5aL`k9Y1}9J82(erympmLtIYPiAhv#S`>cYfQvXcWgA_YT|Y%Pu9QlWJf zt&O;Zu*M6RG_pl~Vn+G>Hd0hoW3uI3kgi^MwxmZer*xv}pprJ=_kpitSLEy901VM7XXZLBU0KZ30*l z74!8KR86Z#Q-aZ5W*l^r!2)j1s`DwKjxC=ia>uN#k=iTBPs!3CL`cN#f}O=Syv zJ=7$coWW0DlP=ad=(SD;uW3{4HiM__{nlB(9SjcJ0cLUcTE{`BuW_;m z!Lwew9dw@s`-iRG^L7KD>$UL-w=ImCbsB)Dy7YJZ)u4Sc2u|C*tgdSJ1`!sqyaBdz`t9C}&OXwi)@z@(IzWDmn(p!-9bzL2!uf7;&f z;4kn!Y&}@(Bc=+Rq94z272yMEBxu2M;?!^-%97z3#JzT+dj$F}p36%q~&C-4mzP5%rITjzuprH1pR)7-g=+tzZ=b?C}tPEvBepSR|a|KX@D^!IS zI5ixxd3&l4Qxu>nm@2Lqt(!{Weon0_w9dVYuEn(lQhp}#oDE5)NFP&sHM=5%3rT<) z6hOlMJtH^Vf{`G>;0`si1b#O#bldL z#&MQ;bx>$kZnBNv&O8Cr#PltfsBF3gXQOK;N7iXZt!(#-Gm?Fxf` za-p+IXK<`wIU5{R0-BFr_{f;_(*M@vX^2OW%)k>0EY!fyuJAx1QN;7Ygd~7mf@awLomw8-zeery_C=$$YoGSLf6}eu7hZT{UBWn41!va zj4NYkQr+!ax4Y4OcOXrUaJjmkqee*nFdG}LbL z6ZVPBIPM*t(wJ^%t#~$3=bp`HQ3)izsLe3DKI0_MLy2b(G2CK4fop?;t>_&p@k_h= zb#k4IuV;~*baDd@uT4iBMq~#-CnTa`fo|u}(BGVWejgoa6j}(UG2<-YERcW}jF-vk z#`*@G{RKx89EPK}QA$kkJB~QPd~w(%gKIcx$?W?^unW>!8czt&1jQEqqJbD6S*)30 zFJK3W1$YU6GzJ;*Fj->=?jLu3$qZ6+l4ot59F+%CmlYx6^)OA)mmO(U3}elyX&5=> zaU$dA+lt(jJQCoo6a?n>=vd%06TVWD#?{)yOyRvolWHdT7(_bFvnk_a)T;3@DvY52 zYOI|9iT^DA6Jg+emOik^+n*jU!+)ZGSpRM3@#BXNzk2i#@t+>=KKfno__K_r@bCX{ z{HMbDD+c17_HiTr*OxnA{%Lo&fdBQU-9J70zxZFj#lMZtb*g5UT1|)oZN&oZV-m8f6k1Yj&rPh6B!5s9D2Yv;b2UIN4cNk660KwgWMh#le#Z8Q$4-vWr zmvqSp_uA#nJ*u_`QUY9~<0es#7 z5W2hw2?~X`VPtDw6Eu~!^v5Mly}-Xrv@S(b2ictbt0EK*Ex1FJzsL0SUlvIY-*3LT z7rePwhud}I<1rV@AdI%qV;V80fe-#Eg&*#=Q{B3Gco|q%ADMALwxZ~NlZQY8rIwD% zRFD)bNZk9-m;f{pyaz3<0y;t;XM<;3Upwh)6Ndg-(i846EXhvck!OH1tBX&lN5yxlAQqm!sZt7BxfHPJTs~rVVsi5c^pl$1GuHwW(z7* zrdr#!HjjsE9E6peFC1_PT#* z?+?yTTE}f#FBV(*^ik~#&WLh@9`NIgfUnRpz(%1BVG>)solW4nYN3qmhU9!7|07Jt z5-TUCLXtI!g+Wuojff03Hr{7#=zM0_cy#^Kb|Y;&mc#?zNVs|8d_uz}^P+hN)R0Tb z4o7W2xQO0s5D^7LEGO{swVo-UvPVDr?@4eE3y+mb_BQlEApI$l=5k zN*#%vK;7sB)3+hI>JYi(Y|Ke9gn4URjdqeg%@`^Sbz?PLP-~o?p|L+qKX+<8W;?ar z<_^y5EHgpG^5oNAr~Qm3r-o;(=oc!d0rkrmQTbAvbA7EF@MgqC1L;kX=NWWZwRvMgvms4a>w2h zlCvpzwDZkZXpTUUKu3eOGA2Adgkisf#jiWwf#MRIlcz{NEL{fw`N6?O67RHW9MVUPL4@n(2kX)dIP|6obMz2*UW2vIZ{|-X;r@mg)X4 z4obug%0P0~h|Y)2z#%9L9GfJ7QedQskkZSVT~J!xbjjs$(uGAYETiSDC*8tv!P3iT zh|7M%X;EA%oQy&grCnN+ywA5jiR@W;BYa!3{&hE zkLqj;%s-=1bYR@{%U{BINFz7DZJ{=%f3P7>M>j>I?WK(C2M|=T1^!8I@vqC?368@2 z9d&vH4=lo&jO!S+j)ZA@lBDmWOZru28En$5;q}vmv29M_`deHsXwbc6%IyTbMV3i} z{U{yMXY$L|{&kA6ys(P>N%Agy%ReKuCimyE5dHV)gMeQKi}Wr2f`Y_bjEs*%P78ez zM>!d!qvfYaW^MVWIVa{Q$s-Q)qznA~xQ)`x`9ZtCU*?}a=hWKR?8}`-D1_m;vO+>8 z`YPSA^t~v5DDtB4ZvB(Q9mw@Q3A3zpJU2Cj69)2^@LdSs)=_1kk)3oUlbWLXDI9Gn zXJGJ`%5t~@8Ca;79|h`i6Mulxtbn5xu((r46V0{ zlpc*-313KQG+h(Qj~di3i!h6~&_4nv;u1xG(zrmf8KQ{Kg|Ap1g!YuhQ&u?p=|2!> zeFrQCBmE~FYnw@?8(z8y1}Vrg_$|4OC~yH~K-Rn`Q8?!MNFR&@SsGHqkP2G`>DvLt z5!G2mN+2Fkt04zK>bI&wzzHET6uGrz92BOH_Yg{x&}1{{>OI@29+cC<76xL%HnCEc zuDT6Ru@?-#wfTkWg~j|mF~pc0VDZ+99F7`keqGvbQ_!Jq|7 z{=s>_y-y7kk2F()&Xserb1J{%A8>oK+k>h}6XY0W=qPq0Qe=Ku#Ryi&K2_s(WFMJb z=j5n!(mwB;9JYHH)JDg?O5XKmKrUSte&QL7lI-y*uOf#{5%mFLIEIhZNHE zeiccpiB%-&v$Lb4^V8PgkZctd%ENMYPa#pFeF-~f@tIGSbHS0pN^B|=l>=M{W5)4m zI=Pv>(dgWgmW>`{(#L24*h_4hq&^n2_?JcGDNifdc5%98wjBi+26}q_tk*rJC#%dd z#0O#HAmG{-fnG#o1C~XyE{q%3r%w!%II*0LG1b1fa4~$85sZavBkBSOrjfGfi+t;f zHL6kQxpvu z&i9>@{g2)_w8jWQjQ313vw~uYD}{RTh|L2eicGHHkdwHWZq8YgoDPy+DSc|pQH}&| z$;}n+QQorUThMK^0JzNT&jEr_)1kf5Y45YS$y|fio@HA{$vecz&K7TwA>+jiqX8o% zCh0h~I#j~SUpliig=@2a*zFCLalpos3vr5xSIntWtV5QQp(o1%MRuxNR+Wh{+P7cC z5-8-@2r>#hncc_6v^Ex@T~I3=n+8NZ=)$l-cO!6({evx-+=}bKouVhOde^~US}$5C zVYki(-DUEB*_cEDKX5hVDt3|ggsi!n3)sjL$EPw*@;dKdQE0ZZ{CW~u*Il+um+)^2 z77P70ghxf0=#T~eU>Oh6%1TT^s>E#pLyuOmwRfLB2s!9E3DXDKmXpe;DJ&tc4 zpB)W4BpB_Vo}PF5j+QHD)1|ot(<|9?G@6nl0g)5pEu!`QAO9FA3YF4yI7votgUz@Z zHE|yF7f^qD``_@qZf|cJy3`TXXrpqn%rW%l7^Ig|BOmQxt-e?jguM>~PR8K0101}#DT&UWbfI^2_Zin5^K**Gn zf@SZ7WU*zikx_Cvi~kWZ4h06tt>Al07QCcckd%B5$FxcYro#u@19WG(hm8uMIz=YD z&%dY{9t%qvl-|>*^fcPCHiA`V2XCAeW(U2`+NUi{xKX>vDJ2!HnSsC~%tL$>7P1YP*=X1<5SDG>du+;t=}U=;gg& zw_&_*`pH)ff0}#2BMhn5@%#Ut1dlu`Q*mY{iP+8!ZO)mBy#&_pVRu*i8Cx@c&!AqQ z{R!h;aLu03I3-p_@LC8$c6JFL-ovmQ&f9tNXIS~5k1pRfeURpOq;6t8Ge=ne^|fGs^&YzV_~d*i&fEv$>=*-xCeiM3NSR}IF8o~{0% zV)}hKhuzab=NR_76gNJ9*6p3|moJ~xgaZf!3E4~b#m`61LLfxJbw!NMi0y6`N-0TJY1jZ|Oh)o>!jKk?Aj(H8 zm|Zn>46|ECL=-;Pr6Eew0Zse#I=`YyibT>+BWO({moWLxRLglL(gG9LTmhCeVh3e2 z1J-m$M^=M^IN+eMxdgGD<&8;_yme6pZck96)C~khg2Vp@z1GRP1cgaruosHaOv&32 z(c6&H(z;V}$GUotyf{2KT6aqL>>bPpo4Mus&?xO>wBw8t!PF9!X4a;`ApDf>!>ozm z;xl8`ReNQevuu*#d|sMtI3$SLuGmg17mMEu>na3!f?*@tg{90D3E314xE0ToHM7Y} z+s817G*-}I0=1`6sXap@4+mFH1S>xIL{F4NHU#7r!Vbx(V20C>m<$^hkhvyEo|02r z+ldF*3pxi!4Pp-bUse*x-M-NX_8X{IG=hUhaC&&!2)YDR2;|s;Z)gw-*&sj zomF_~L&Yz2JI;V8qOV`uY|^aSNaGmyCsA&80J)V+3Z zrdoB6au>YmM@`u1*yN@|gZj776b2Jx8M5Mc!GHd&Gp&pJng4xFdKk-Pp?i+>!C(gefv zK@)_FZ+ri!6WB|*Vr?{b(HIp`OrWz#*Dis+^R>GwK=AbR9C|Bk(2{i5@0@z9eGQ>5 zi|4^+7DcGpJIM%G04W`Yv$sf6!d`BUlIG&A%}#(9x2djfy;@gL^^xQY9Nm=d#O%o- zgCrgeRxyF~O(lX*)|0A%bN;DV7V*?0^c5mx`8$S!L1&4zDLX~?RA1&4BUCF^S(Y3c zR#=izMsW8*2jyoHzwlhCF{Ur~s13{(_&tF@?d)5GxE>7dfnXgsIX1EbwyhPu=X(}uWW$QY%V zOBp2swMELtGqP|Zle)%=p-wikLcKCLBj>rB%wz)$VEUO|Q@u9?6W+MtoUP{euwv(@ zJ;Ws%ymnUFJ@8__yiAjYdhnebl7hig1E4I5)cNyX_v{ocwZfcNnqC>UZAlD@_3#+G zGp{QSkKhA_lUVbVJVDq3)b!(6SY5uwr=bxPmDj1mmK4y2gPGO}v>U zPAe*KuG-lyS*u(GFRuj8MgoNB;r0RCEG+{2s@SSV3gqA=4qaNzI7N`-EioVoR{?cc zyDejn#gUW>v`a3J)tpsTz#H3~KkHZ`d0nubkfF;a^X_Lc<^$Qc+DQifs!fJeUG}`AK;SSZ|xtp$=7I8O6dhB2mxeg(z&=@1$t&v zfPuM_vhaN>pyOP1#EnskK=T*ZH>65XM5v2Aa+0oO+?wf0Aq>R15RKQm{kY_l%1~H{ zoT^UHs2OT^OGZ4_Asg^@Gq*eeL366fQI0q^j0hns!%A~$^?MVn6o|z#rx<>SVse;C z=5b``M+;(5vAGfoSLa?#!oA?(UpIHZ`eSJk>TK-g8V&^3kC$T>>@=I-G`vs{{IF55 z#Gnx6b`R8e_}AU9o_zKA@t0qrbfVN~u`CwR8ek^mq9l}344R$#A~Q+0i_6Q1L{gGC zLHeGOA;a^E+fq1aEV-?rae2Ku$oqe07;Kt!5xuwsBNsDiFXwSyP=v@ny>U%F*gDR*kl`|AUirS{SWe{rpP3wOm1mG#?x;3v~vT zQQ&Ub+lX+vu0O5eIx+aRqRIhUcxd? zJoD0?*QB}^)+KQirc0IW;Y~Mx!_(@}?kB1g!|l$l1-e&)04Tyhea9;l*=m{;QtU9s z+vHX_LY?DbX|=GL3JrX{|+LrDQn+1u=L~;!tEN zx|gqFn0q?9x-J?knC5;e-#Qd85s?IzJ0zpws!PcDtk-Fu9GvfWk59Ys#`=^5**}g* zIo_mM|H_xuuVk30d!EOt?diKF@8&A5o>1kqJ16H)y9ckAgo9U3d3IfrxGBV5u;N|Y zs1}h5jct0#36%zPyBO3tiMG(U?`KX3e|ZG!kj0y@w;KjzpTuAkxK0c`pg$_e8&N{6 z->HfBLYs#|_n|pkWkC}+>L48%{o1EC6Qg3SYc?a$min^RcyT1Chuz$#*!ykvj6d^| zA5Z~!T76JOu^dii4w+8E+ck9Mn@~A{wBxwa7*V0#N9h*XqbB4P^iB!RZULLYe}Whk zszdD6ffkervjvAT&Ms^^~eXsaa$Ki5JThVZ3awyZH-@%DS}z_v-A;ifNXT`MbUSwn+X$y7ktWR#ygz znwklJRFGGJBduIjU|T1aFPdpTe?_p zGI2%EAQ{gtCX9?qrg1v+;*-&9*V7ynkKra3P$1wECT~}xe~#FJ{qx=a;MNtHGL+`~G z3POODMUba3nl$kaBp5P&-JNoEl;lglBloD>vL_;?2>Q!9TeXAAd!zECa^TJ~Oqd&v zs?mXK6sgDFewAw}w#ObwJxHY;;JzisZf^_3safz9Jjzbooze26^YpbNNG%E5a?Y#` zp3tlbygoq|yxy{DN;Hp#+^?}>IgmN6DwDR*Ive_>eG`n@E;PJ`F?&71hdR?yPElSnW4SF5&WON7J z^W)ZF{}7{}>`XNjo*0^J+Y7FqL{;rL22wov?B!mVJZ27G1(N30L7*^p3J#k_6jK{Y zh_#@&i?ob^s0p>KQGx}FvYRb!nl6HCsXbmWgvaria;#E;&QFH#$achnr+ok>C^Ac2XaQL{OeJZbn@HBJ3PpUc@CVP)Ln>NIwz6({Hri>bxDASa}5O3SO-Gr$wR-0=e6(U^1A zhs|fLa3+G>4P{a60Jg_itnN@_u7*jDDWyW`Y6UIQP#p>usIWOwH)>B8;JKoek037f z2pg;Qt(i!1=0&FIQD_BmWE4(R@-rImoZtl{f>DICX(Cm5`4Jed;v3?JKT=hQ&TJfg zAX1qxo>4AuFu*dJFXHGuYQ|V9L(k6*@xab}y{Xb*7Tfwcp#TG4&eQ?}l9wU{tsU}<_@Eg;&P0`sIf60^9uzOT8Z5ny-QmdJ| zGooU^q%mQx%^0(n72w~;0~KeM7Hg(#Ig0#cu$e5XQaCs_dBmiSt|YvoFC8;PiT0v&B^G9DA{wuOEUImo`Z@@1ZU|%%bS+SQB2eE5 zP0OG(y2X!Zc+C&fiQ{y2*V|1haOP4>xe?!^J9Y^Wwg|2cn5YcX1Phf$Q*gs`iYXR2 z_W->eHQL=lHItJ^Xs6M1o@2clcaOOYTle682POW_Q|EG8vJDAbL+}oU@lbY$ouBX* zlMUczUJDD?96aNF3!0`S-nRI6%>f-DAHR*~(KrsNTp9kg{WKn8vW$Uwz#1XV8fVLL9u0QIxR8o)9S{OJmpdw2uF7Q$nG7;o!%v2YlG-Gs3 zJ9D-(j?xsi*(PF5z_0b<)T_hS)lXHNzY{Z$v!5~N9>nuKEXXpmUsI_ox>i9JP;2w8 zGypTOBH?@=1U`1&dB#y{XUFZ6LHl66Bx)rW-c>qmJdstglSHaQ2wl<3VS;7#^7HvC z0lsA`qVDX}OLJO2bqvpI_j=vl`iRu-gh|}YQJVwtg?3A|ovXoS*pS@;`W1mvVD^)> ze3Ko6%Dx&hs;!!m^4wP)dpYf~aWcD)I7)PlrG~Aec?a{-43bWo%r0+AFZT&)DO6M| zfLip>N%tJHc8@Uu-LD{Kz>Ziw@V1HtNDwE(0vu zh({iUVw_9C<5i9e!{JE&3eki!DGacQX}f6KXqcdMTi!4nW?G{2HqOEl{i-<}qRbt) z233YJ5q^`KSlrmagh*XTOu=Ex5>$1OFp_O6AoX&)LAE!h;>Wc-dvj25ts?sXG+D&o1^)h)70G_ z=z;A;mTqV1XnT5XDy{Z4JKqklFgS^|6sNfxQ(>SQ(anomoM-B>nk8MJDD+c-O(s)? z{MKw@#pU2l?f1VopTcM83d`uz-{YS{Ecl1FSGEp(FAYeGS=$oSnxpMdSJi)mz9*oR z9f@6@*dmRBM;%G2Yi-ppPEG!>s`AvgC{E25VpD{=SfDO~WmuL<9Tcz9sJ0FiwBnM>@S1AiNX`?@&L%;!0 z51LnmJ_C!4TR^X7UivMs=%`l(wZ#5vZcQ`OAt4V$7*n+ey*99G{_lqm4IT&mK$Ol0 z&A$_l=YwV(H)v!rKNRyrF+UW~CiG)WKgRqc;U8D@AO0`Ddj;?EEcxwGMsMMN=2vt1 z@ko9?=AZeLdzjHkr}Q7eP~^6z^!}8-flW_uUE_Z*zS*H4^dE%Sd(g~B)Y!YLvHYMv z7o!Vq>X(cj4%rg|B*@ggl!j&)`~papV2={wm6^*bRkJSGE|sT+7tBA-n}< zK4KoQ2Ez~wh)pId#4$UXYQ2fvudgV4_NMXP%>B&;Og1Hn={9}>K12Rxz^(?k4fFs- zVHTs+jo;=_6HAoqvc)e)-vVbIq)9>-yY){R+on0cMWbS&##NY>t2D2uXa&us3x-Z* z@)hrd0(sB-ua6NYvfBDbV8q33p;ubrJ~U@#K&+FPC zH_+9gEwwU9u`mN)roS7;shxZr@5Bg!4;qI)ic*rGnSL_Wq);ll1ngv+<^90L)W$)0 ziNnv0kiomlt42oXuME(r8yHcoF%HIExC;k5U&Hui2EhZ86}BH(Y8QA+)qZ-~JzCqC zEZ8U{bD=}h1`|3+NJfQK08;;amru8GV~Fx)JJB(P9M>55agB&ogwTp)0*KW&W!Ck9 zilGh0TVtiJ8MK*turXW4G1&-O`&7~l8sOjd_PlS+?|XX>1_V6vo3H%p)z;N4uXc-2 zp_W#Lt61tPQCLMp)h=}(`Xxt#a&FG1`rY2xL=UmN)QLAZ<128#D#lQr&)WRKym)K= z@YzHw&i$sHJ7QV<(VN5K@2R5&DsdT`u7%4Czm!U2o1;30FG`aELYDhy< zUFycjSFE2m)|K~0aegIRQ0KVy{O&h*JoWAW>X^DA3E1rO8fKj0`|d7xhFN*XvvzTt zFa%FuGXkFtK9GHBVh!x#E{`FQcccq;SNp*Pm7xTC#1OyZx4#d*vq1pg(>EMdV7{*i zD=5vM&1-H;@>*@9I-;PeAcQhaw;qI@ChCE!A`Ixb@~G@NbS1@xu>&}kKgpn|k`RQ# z3Ga$T8fvUSh;T{u$sf$$4U=VML+2xp@!ll~%Af5lixNM9p@VXb-VRk5FoG`m?fU~4rR`R8id5yp#q5G4Cz zy$?dKPp#c1kOJ7isA#h*+ZKQjwvjTUMe@6)qeBfsADHD43K{RW+E@3q-6v)TIrqYo z>Ge7MIOp%`Y{c;B#5R7e0f3HLwAhl#ZiX&fGdx=@+(cMbk<-|nSOA*Ysv5NoE7x*l zyho>oKPtP`SUL4NsUHs=na?&PpJdqiF>7%(kg zrsu@zL^gc&4SFC%$i36#eHIBmD4i7<|I=RgWuK{FN52Yhvx)?Td&z%fopWG^OnX32 zf?dpJ&@vmFT{c?0g=Q?E*HpB}TSW`f%#w?+7bLYYggN1U-Z-?yP+8u{3_;iNK@-ct z!1b4y!X>cFOe77R1=m=)qz-ZgNeekZwIwulH|JTueSUmECYDzJ{I~_1OtHf#4nj*v z*h|C~>o5R5H3B?ziv~&+gjk9Zkfdv3yO=vl?CKQ72MmWq=78~6ADU=#sym3FP90tc z6(KYL`6feHprxP3`Qc(Hjv$37x0u}ogAZGuSP8+*($cEcQ$+;1&}y|MB>ETlC1j`3 z*`&R6m;KkA{5tQXFHPfCc3%Zx=Eg(dU1Y(@75PxN1BR4mTNZ59h|gK%0FW6^!hrq2 zaEvwQOS)HV*xU{JlT~bR?TT!Xnhi^9{-WJ`+8T6@&kx(J14Jou^E_83ym?eA8TxD$ z0E-(;8U|_`UVJ#>1gq{w0CH7e2W#RV%XsYA4e+}eIv3^M?}Em> z-#x})h^p}mvaeIpzyaH-EV?UZBD+%VGTMrqu;c%Y9capf*XKJ4aF zLkz#V*7yVW@mqy(Eg_A&w}l{sZue-=IqkbCxSP3cy3SO&LIH{e6+;{+RMR=Y7KA6H zSk*k9i!E<#vdLVDUPGHqCMt5hQl)K;26=0w5*v#vDZ_qUD}!vU%BCq{M4<8{zz{Sv z*L9gBp*no&Q?)Y%LpY5LFRdj_s21Qn_tQRXjO&V}St;G!By8Ke$1Gd%?s0^>!$Ir$ zQe*&p$cDQ*B8FK+a{k5^QitefnYP5kTwz4d=5C$V+N}(s7lIa5wA+Lg3@M9F@5{?p zkbHh}c0V(L{rnm{cuCPl<9yPyieuWC+ zReX7+)d3}jIi|{BtlNrb7t1hh1s#sRW}kUTrf4TXh&!eS+niEaSX|tBcBK?;gk(Wg zXtsi5M}ARf4=s#23gndYLlw|vM{oC>P<{#^#5XEWJX}5}7T&~i8_$=f$xu8OYnC!wC(2t2fDCA7VoB&dcRx9klMUo=MpFWi)pjWYw|X>Ek;Pwzxi(Z)bneXyWk}DuC|hN>Yifwz8c`e1eu~sm&$_W@f2o0zg7Y| zsKEA?XCMgtQ{Z(it`@ORW5HwezKW!`n24qs2z&4wU;1yj3dD-bsx(=jrQM~bvt|UC z2Y{lLvq3-oTxwVA==8Alv_0rB!q!3iRh5~D%9lF>qaoS4)mrSr>Q{~$Cj5s(%cY*I zEi%VWAyDfQiTWv46KZJ97KIDH2-iS|u9#LSAY#9J)JJGbde)iy4RKmFi+hwz7Soxe z%iGY<3M%AS0obemJ*2{ma~W-ufYQ!+XgFvd$T3UrdBl!b7-$aN;u6_=fI0Kk4?`=cgc3c3z43{sxx| zj)4N%td)R2#L_(l7q?jddGRJTbygHmUOFCSb~j}AC*v98@A7D`$!AEU;_K&gu@-{~ zL4B<#Q(-O;Z(!?TPS7i5TgX7BDHXz(g}t)h%g_X*dBZ9Zag)=@8e7jNLvh!+1n@3>Y~7<0*&0oO=vAE1 zuOIF~Wg5|l24qWWNi4!%fbwu7;NZVzn9Xl6iX%mzaS5qkR7$Oyw?yGHh?hcFd(`&b1blNXL8|sjQ167{MHLNO| z)y5`~&D=4y4YMy~a_ZC2(Qdt`-3+6Ce=Zam~2nqiCBio@F(c6g@DS}s4STi-afF)jT?SJ zH7yv?3kyPNsd5WkKpqV8O1Pbu=}Vrs#r!f2ZM7(vJbdG5m?oc)^7mbXNo_-Ae&e_z zq#mgXM21*N3Rsfte;3DH4sw;-o7q9PyE2NSe9(D2tzdVzKjUtSitJe0UG7Z0#OWLM zQ|Od$mEn?0isT>1C{8(b?$jL&dUp(h8NOtX-*$&{%O!rJg%cJA?(LJ=J+JT5^`}(v z{$aa1*?7qw0Lgg7hl2PBk_A~wprH0@fF|zm?uWeC0k*f5FJ_WKktA;|ezqe56(iA> z?H?tt!_-~9_)}M}Vzx_{PI7OE9tF0J1}!2!W!1V%;8|OE=|P*wj31mfewqx?&ch5^ z_Qz&nG3(ZFg!_0wx&K_N5BU-Xbw*n6{ZqG?GycjgR+{Ba8*S!VscF+OW3JkFgqO zS;ontDP^FX66CI!<4)2-+@z6Vfw~hUa#xmlF-J^G8do6>X?|_Dv9zn1hmIM}h~Gs} z8e)H+?WQVilge#$D@YM4zNGPyB#&~%Z{KN7(w$X_flP6wg6I+kPq%V!c9;7l#Qa96 z0Mn9|&P>ej7ob|9QuSAn6ZG`Dx1S`XxUAiT4WS3c7=*Q^k<)}jTFwpDpbQ(7hI!Rk zh&v;&4;uN7Fq&JZqc4 zYlBxbN+yYc8uzZ51Cwek6A*E@S%e;m+OcmcnizjTahUwqyFOL3zi(4CDNotZErL8Mz|2L93^|QQL4& ztl3hm7`PyjmetZG!(#|a#x>k+6G4cEGg#ZO?>v0`Sm2vD_n(@WCg|%zut@2DElNXE zB}(t^+&!iHoGAYU8(&80=dI)8R+-k9AutdP;I-J<%1zNLznI1|+S6ompGC9WUy90w zc!9HpjT^xZr(t0|lMtacv96#xr)6a(ppfV2awfYNoX~gtb~wb?XL!5#28t=f?MzAH z1twE)y>^f@@D&Lr4@WuTd-6PGa^%hX+C7%2gUd6xZJJ* zB@7gKSQ|D3uGlS)Dz?j`vi<$*yWcIm6JWP|z2g=i!lwqE<92`0IzGM0k1h%xOuxe5 zRAb0T6@n~820?$57#(9kwl>BOhh8|kjmjd29DCphgBI>=ewD+#)pBlKnx-yyx|pqU z*xUtKEy0$BCeULCuTNUXoqZpMcnNt+^eogub_uJv#5`^h(xl8W+1su#eAS5Px;R+- z5Cxc3kx6$!q0-ci^YEYePb@im4ovt0`v8z}-) z`C>me!1Tes*(XmiKcG^-izZAz#A8WB*0G{VX<8)ZXEJhnuJ<-RNgVXl5tf~ zN&1fCY$YSH~HctuekUy{rEl3yI-o=fJ)K4_ue#c495*i;WELmq?s z6mQ6=3G8*7qFlB=V4K_Zh6yH@xMgUq+kXX&)@wo27h=v8e1v6g;5{Bk> z4Eu4*v@KEKdW#ujHlaLFlG4zRh6NXA#@1(?Te#6u_hrG7v~j%}*yJ|yzqgO#VH&2_ z+XqUa?fw;<%JKGrJ3IUfZY^7K=#>q?fxSN_5YU{>4QalZvz2ot@rjf$Vjfb4bXN-s zIgvK1HIdKB(~JAM5YS`VE+B8YfUH8`iwN)fRuQfY!_))=m@6{|z%)QvpM>JiXA^?C z=u5jSQ$z=}2U$R2%E{8`%a8?#8Kp?^4nn95;8ArwW>S4`IWd**+f#}YyA6Sbghdkb z$LG)5?E_O?hDdy`y32bW;q^Kp#+-|YWWx{^v-p=qgyHn)T2o(Bx!yq@3>OF~D%mO4 zViUrw&?SQ}Vf^_PEv;C&sJ})a-Z-Ffb9yb5ZXRFrAt~IL37%6L-` z!l^(u-5K4Vo}Cs+U*;bg?lgOqk413UDM+(T!f4NNJssb0^V5v5EN z$BtvnXi6?}5Iv~lLsZXRCh1$)sPo!J8H#JFZ_phV&9)ZVHYV7iF0##a7eE%)aJ_Tg@aQKxp;i1!txIR%UP(LPmCwNe*OZxCrt-LZs$ z*c@FU=Kj9>Rn5g~z_?o%l3b<`eDgJ{v3y-OTnX6tC4z)f_VpF#dFc3RGOq`cT%%9G zn>jv^`A#SvAL1vI9Bqa#Ii9LTl86p#-=K=6L&n0^Y(^fTf@VnkGRK=B%F41j!e)`) z>-xA|;v!)=d3I~_RUzYaM)}(aGUnhF1eqwazlNl8$?C3R^2rI<~fej#?-SQroYsY>o(jnJ)0 zG7>Bd595Zoj@Ah@WUPDM0^0rNL~jPbz?#vAs7`L`Su)$AT=&|BuUud~5ez$7%$Bv3 z6wNJ$|LMM?0imsgK|Tr~V5FrIT>D)R+aqQLu}us6haCccwq{(+;fBTpuo8A$>|6mJ z(nGKbt!X(g_T+l;sRb01X_%pr1NX3SJ*@n7m#4Cl6y8+b@In7%vJP%w7o8|e!fu2$ zhm%291XBZd&RbAqg|YEYs*A11yX@qNJ3B;$k$UU;v~$wxy*?jw_Y144fNhwQH4AY! z7di9tr41!QqT(GcPA3a&R`#>6sWZ)B$(jxjJNwmX>*PQv!Q8}^gA!t$;%LsJDW`F8 zX;v-zD;~h8V(ulb;){u7xJ#_<4lk(BP8es_RfH)@i9a4z35etsO@JrC*S5Nmq-o-3 zFFL)!Sqr_`N95Q}Beuu*5g-oNDCJZHl1;wU2=yb1fxjSG<((uggoyo^JK_ilZzv-+ ze;HjFhF=J5yG5q=T}cB-;HAs(mo_aOqN{#{$BHyURB|_ke6d4T;Oo8VOOEgUay zDeQ1Qi7>JQgJR@O^WL_tC00~Be#y5K)EQm)u%qy!N6oL{2X0pV62+(uGvpgCxlgn` zR8rcbq_g>MFg#S8>W^9iR0~{Ee*6-=h*R{m*0e@j_6ZB|j@#NI3nw*p!-_ytcd-Hm zYVry|+ur#Ng;D>hNFB6owtsnQBCn9OLA<%#G5|+SMuPr?jK%kGOGY@LZ zj4E3PkY^<&OC&0A4fp%jU?BOPKH*%1tuW8ic(_2Dhk>!;k^dh?OeU63O!TXMBr(Om zE|Y-kTd1jly9`#EH7m%*)CyX*F``>-%#Q=j^ zV-AT&*JQ>*v9mpz#OPZmIeb1@T*hcLO2{CfzBr8WrEjDr?^`aWXY%cBey3>mBIkq1 zxM2UC^unT9Q4grB@n;5&UA{8*GD=RWX&pKV$TWyVJj%>z5nbHeH;Ztd|4W}jV-65X zl(BLnRx_tK0jiG32U^yQ>J4=cq=XMN*P=)dj6KG=D2-m>h1 zMmKlUSuwjMjuC6lPYqgG<%W5AMaEj%FuHpDojOzuF&Q{|6~IHUyV|*XVD1j=r99xO9scLu&>&@V@bS&9#W*9x+D%deXNPTJW;+_whk+*N3%?= zdBJ8L4zoJO+vMbu(4aLZPo`x3X~PbnO|uvASDs=TsTw2g71*Y!Pvw3Wj6wj z4zMejm|~^x;!E|sqUE9CnA`KfO_C6OU(f+1%O)?UI+}g(8PBr6HkBrwig7K?hZbWO z&VP%bQw4&;DRgW3p;v(rmIHRtb#Mh|4ZRbR(eFfN+tj$)h9QD~%LSB&HVu94ePOrpVm?n&;4FWE z+pkYakB#5}+h_#){k{~*ru4?x)KNv_4};HOpu;p7BQ~cr3%#oE2!r3)Wd$cJ z5X?Ek^Fp#}SWvNsGX;MFrB5{*CrY1%!RmaZx`y_lm@J65m3e9Q5Wx}6oWvNu%)R8Z znr4YrI`ghV_jTikN5>kcouSPn6liZ-$=-z-B70x~o#(XI{R>K%sNr}_fTGpH%p|~O z(%`c54_rh2Z*=apx*YDvRD<@)%urwn@dhNORY<7%ZSHY%u;uK#wO=qi?nUe9tld99 z1rZ#97%N3>0r_!*12v8>Q?sKVI7S9v$r&Szr4kxurTMVtVs3j`0tPx76ZNn#HJmpp zJBzKl!{N{(l9YH+q-VS((O#^&(|YLv<#6vupS(1cWXGj>e7p4bKLKMj(R(XY&%yV)v!q&c0FJuc0}I-yE1OM6tl~X;RjN z3g9LrmLrrt4%gOhx4ymi#Iir@$*!87bE}_eiEC`hrv20Uq8mGl|3OEJi)$mRTTDVX zTQt+0EhC~=995<^F~h3%Ii4M>w~SE21aZ~j5U6RJy4x_qX*lDg6G76)yAa29{-8NW z^idv-fo_vHj-o@I(x;P@S^U)KQZ{L!ym`MaZQjS_t-Jf_I~NOOo*lK$pLI_L{my^1 z&CcE3u`3n@`WRftC#S7m`(&VNXhJoMMWX0fPr2%LL3}YQOIZ6@pbt^^VS!Fho?{WN z!Y2|DLlLkn%5an?LTl1;1f6#9Rda`{7La?&JSL$ozWh=uvcy>-xLb;e#qMSpM^vr0 zum|+0Rk%{@25{76-jo^Zd(p*(QQJOOs)@UHeOWyBx;1cT0(0IP9R8*C%2689h)gAg zQ^~U3ahPBICHz4A-`ejeTnoPWd-wqjcy;00RO1me@rceMNX#gZG75*$)VVXg9*xYz zy}`B0-=mBc=hJYWH7!>W#b!(yu}ohYvo#AvW2tIjv{gQtJR_1Fp$ib1{FGkf*FesK z{8ui$YbhN{s+8^$wLmFll>bhyB`<_=Xqey<#)7ZqR$2~Z4G zb_p?6os)C%`^u&robe`7;8||VFL1~f%~D!Z>S}dPLpjX)C|HImO4`|4+tF;B!9=&E z8Gk)9chLmF6nH2SV+*yQO@kez5Vl+vo5i>x)#u{C?>rvHlQ_SwH-Zy# zq#gr3Sy#fLA0O2jMft_^#R2x6ifDI~&76y(&l!~#q;@7Akp9L*=m_0&W*Rc&f% zVKkn3M^t{7b9zTRUKhvg__aD$^-^0g=xS>NRoG>qt(n3Hfv6g$QNbh($4HweZ3^P>+UX%EYE9x~*sfY!=05 zi=abRK3u0#02|zxKjv%%+;m-#bt<kU}noQv2CYVQcWu-I#=r3=z5rh>G)gRRDWMg=X$|7I{>3|z6(j8#pk~d z{uuOe0p9C2^}dW19x>A}=B`8m^3t;8!*`0IAT;;g{-AgCeIxj;bu{>X`#Zkdzo(_c z_(6=Rwt#Gf(a|7Ai!`Hkevg!+vFMzPH&ou^`!5rtMZ>uh4FsHcKFE}0$)%k@>)H3s z;FJt;-?e8{A}}`rs~8 zQ5!L|u&avH7~+tDqe`S*WxDf7Lc`p||il8$DjrZb+14vh*M$~+6R3bAY^ zvW+B_8L~}8tIe%aFYZDM^?2Me{eqcc`5YB^qO(urWZHm&7+JYgTj5<12JfRGJfFUc zQdgzq&qW>ON3A|etDaJV6sl_P6AUYUmn>2W0B{;~`2;q>CyUBsXov=0vb~emkZdpg@))@krAD+-+lA}@7hcAQ zfog=LY-75t&AAcizP3KnahvXI(+uY4C}^Lc+K)?maz(8Fu?ObL-`{qSP0Dpnza zj@pgA!$W!t%XG4FTxy=5^^P24P%>04&9XI_Nz*HpkzK{}Ob~Ic|4arMY!XG~ue!74 zWKei};S<7xN0+f0k&s+`=;ZPu>$u>=`4gLP`aQJuDxAmR%$FQL&}SqwN;M6YH~-6R z;Ns@2@RpaBOhh14y(26T=PJp>)#)-wv*jeKkU}J??&WHhqUYtm^Hx}M8_<4Pz7al! zTNPHas4`TsGo)H^Ify)%*x_>d#f&LEw#RoI<{^&4WkR;>iMbXE>;~C>lVNz68E~%$ ztwGD?Fpz83M6bn}q)3VMWNM`pj*g_B?a2Ufe9aGQp{lPzSUW&-iu`WwgwEw_Uz7U_ zIWXa@Dqc+kTmykCB@vfvdlG!rPz`Bxs3c5+yE$&1o_0>2_gO}y1FKQcB z?cqQwHiCIlHUcH8s*z$`mKoIDGBXV@Gq#5%vnd-OnB0!2qVpUZXh<_nMiYfW3Zs}s z2CA0syf`Z$#53;ZQ0se#EDI9Q@=LuMa#kH>bZoOjRl4|AzTMB*cubAtrg)19ttTh5C~H=9JbWMvE#j4u?A0Y z74SIkK7kH)^{exr&bNARDDLPkq;0(WRD)V5loi<07n z@6uOmR~_Nr|Lc?;!BzWk8wHgqQQUnV5nK(G+V@Gde|%u`cW#LOXHscJH@XQ)iYJ#O z3g+WVSF|xIp=t5-;F;_W$1fc-`lxZKIeA&oocKn4XM5S9$@~qSrN-|fI9d{RSStYr zRAU8HT-|rND2Cii7Uj70L!0xY*Ik(6lIYQ@X;dbXj#h8!3^ApES^X!^vSR!^OVfwL zGxod3r>*_LN|s@K_)^5+A3`caZ+CqCf}D7@F6naB02FU%NV8} zW3gz5ai^2Ja#dH8%cnqP@bGC1!`B9{Pj8GGoQHFi3a;li(AKe^ko-Uu+?INe!$-m9 z!`+?WL-6pMKLyiiy%9{<%Dee>^D%t9yNlsVS5ZdaT!N6mUqFMqkMt`pSp6XyEr@S! z{;Bzp-bHvkd1N_3!&^oDRTI>qmhb!zANf)lko||&^Yvlfng7AQdyCPr4Z!1Sbn_VZAbs{bxzXr0+bLn45yg@5zncr z3Q&#R0kvS=w}`hy z&JxOuKrdkeB>6WZ738IuBGC z2S^e{@dnPbcYp;f^QFHJ*l~^!0#GM^{-jm!A#SNP$&yJtAd$&s8yh&;(6;vvm zxpfLE70#SfO=+DoCyy^bbGH5oNNAsaHwqpO^(L$6e8x5?B?5fOMd(oCtj3lued8%j^jDVMk z4**&ZHYFl6dpI9o5D@D4f@JbKZ+(2mfWhF#md(#$?1fAxM<=PfE;_;+x0TmfQYE6k zl`K2aM-Y9Lt7ubhG^KTrH=8ataRcIz7&q?DT%AkADWCc!m0K2x*U_qXi+5OBde7b!Ph2S%Eo=S*XBsQTO1NMTk+3)bN^EI(I@nPv@=h6noTr z+8XR19(NDgE6Io8Px#)OAp#R%FbnNURs)446uG&OjOJsYo-tcbk4_T2jiR|?L!(4` z5kzT{JSyq@z_9VIksXg{JOIZip zV@?>5T%R18kV(}6vE z$z>YOudW5^7-Z>qHbYQV)JUQz%8l-FV@ao{@erwxT;$CNwG-OKm7+OUlckUHiNGODaT&)fe{YuI}L4Low# zvPH4YmKx49<4oJ2X@AZ|FDz$q(T}jYq*L~gbpDt*HR%Kl4%ugc0viXmHwnpt}- znGMalgSx>P+1wRBBFCKinub)HiuDmPUHjg%TjGo@qm4~K@o00GQp1`; z3+RE&fqo;&<+ydSw&bE?%M|b)6u?XhV66dmU-LSx7Bt|SwFM13oZm{)z)C+xx`6J# zw1zmT5A9gq9Sk%Bdpks*ke%brq6>6zo4t)hEqioNszn!Q+aRnpm2>RhkfSCgpLA-` zrxWec4$vXKVxlJrFAT%`2-=bPwq@_IcPGx`ZDwpd&Aj&5@JzR>Shw|6^CAawNU|u~ z1E=Pss*UTtn!y*6@SkvXF>?5D30DR+yCcbE*)sV;i_3`alH;z)^;Qv8c^6ZaX?Rn0r+F|a2Q!!E2oL?2oghx2@Xz(1;UlNL?(<&j_^8t# zVBw7wM|Vt_ne5=cOhW{PM&M*Jrf%AUS!Kl~SdcWyvMp79*|uwxnR;N5`Ype5{gXa~ z8;u8*GZ1}%@aPJH0i=-%1qg#w+i=OcRwU`C6TLuDK0xf%xl7)=ChI@w?mz8zfB3D~ zzjn9o6>yM@hG=f2EnkzMsls_P51$@9TbIM@ILaJezFC{g>j=x7o~Iw)4W~yFgjTs) z#p%a9q}yy(rAKkY7N}u4zVz`MZCcF;X)W^PIc$;x$8ho+?cCPcpbLMTAGFuEwZlG@ z5krISm?_{#94g@gF~4Wc?4mh~^14?naEY)ZY;dKx;{|VE-VtMtGE53vG@}pmD2?fr ziPSVy5J%N;rpe8KC;rKD>GZFZX-}h+5YHBq-^`eSw#eh|!TQ_79i-S8SFHTHVy@@? ztX%uMUwNH=rSV<9yk&|9DdUQPU2{rYrwhWvC;D-&G24MO)nF_s27xMh?j^xEA1^w_ z0B?gt%Dd*S6hWm>bC>q1zQQ4m4F0tJzT*v0$L>`vr{*P#yN${r(z?ScJQq?ijk|_qW`Eins_OX~wyay7f$e4BrM6gtNl3 zu87GMLo7$D9k=#-U9Q#Fck90`o5Y7XL3`;q2U}p1s^ow6>m{1MVEU^0EdH3Y#HPV1Y z#Ji{-P)XLZHPI((^#xKfM5JxSiexsGyAY)%V&}9#Dh$t#r?KZnLQTF{?Uh(Mb=^5> z9|(GYjH}f@*UxbXzXjbYC{jT;%|breKRP>D7SgAOC9{yze4EK@=LFKHcNb?eBa24u za6=DTEQj$^@2RyWUz}FeUF8ja&4_eWL}QCPuW=9_LkTpWT;d-|2%|61hHrz_J)DPk`fqact6 z%_};9@N+(|vm1iJwH7>f&YamQY$%7mO+SrGQc$#)!rAjjY&jsVu*4p=r|z2*BsI4m83TizGBJaqgF+k%a4q=_P4Vre*~n z_Rv&+6UL@+{BU>d39<)z3-PxnHl`!koZ+;>5oRtbKXJa+syO$a+@q6_OfktAMC5x< zcJI{)!!^vZ1zk@Aine=)V+(~WPD`Y|3?t58Q|cg?!Up}xql1mJNHflh;+vJg!I*w~ zPY^bStEv@>bl7k1YNI$CVVUW#7(}thHcnvd(*px&*#d=%k^NCCaNy zx>AgCVLqD%Mc&R{23SDi<~$`Uk5~7JqBcqC${3v8P#z1*NOya2I#+gS`T9R}PS2m6 zo$O=vymMO2RnhSGGyf2q^gb>*KsZbn91};}QUEWx(2<-=bE$+AVk0wJgq39J4ZH5fGwq%LHyCyq{X`ikG$-*Ug8Z((E^x7`{R}pAh zW3mmJx7C7fW`ymLc5`#1jB#_1n|^}rJ-jDfun&a3$L&qM@b0Xd4o?pfE(7 zxNCho^f!eIopc8(z={@xv*ByqS-Ciq2>DUy1h_cf870-upt7uFd;OS&< zklRS)7wCyP!XaYG9bK&oCoZ9Pg-(t1*p&)7=hoIL;)V*@VjNaRj%qVSz41KAIah#> zbFv-E3RkRcZR6utmqHW0)vXohP$5mUj*b*osxs(_Yz;CZiYhc$H_8ZRA!`zIsMuCz zlLONN7KgN9zs;!F>I%EMH|V8?VFQ=-yw^QDT{%0d`&BVLRqe^xS~ zZ!eu4wq9T?z(H`8oXGbzvo*kIC6uo@ci z#%y#+5MarqSdWLQg^Y;*%$n1vxp=$HPjA=jk_R$gNXROUOf2jm{WEEedB=C(BZZFp z`{+(LM!1kfH4+yJS)NwRcrU2k#bE<0h{vwwh^wvB9>VXf#X-EMt}0n`vPKw-5!nM8 zpq&vev!JQ*&uq$Rs42bhQQ)jmfdmD#7qcSxR)Mn8b|q~qD2pK(oz_PXRyzw2e>A-| zRIP0k=t?uYiKI&iYKB!I)!D1cWUPt6K@d<}Tx&7`nMs%=pEusmSMk_;9MujwyPR{z z{*g@U3guWDg5khsPSiFV*ig(Bj<)J7QP*Mch6RsCmELA~ONG5dz;kc+Eo^h2MOa#)E zL{7O`cr-*T-p`K60be87(D~r7*KQs3jdH-(hl_EP6HenuwujYS_{?Y-Q#_VfvouW6 zEk6;&eHrkMzkI_`>(z+{VY(v=zZiiWz!+oCx(11B+BA#>n+NJgALf&I6l0WUo@ilO zltXb33*_xiAn+cgy6nSnLZ@dKfA`TsacYUoEqu4~?>CtcZ^J5D0?#+6o>plN`_Kx?`)BBXSi32=@!^pAvBr{5?C8#!O{)$Z`3~xx|6lHq>dAo8ie8C zXoz)2vv+ZtP-doyKj2sR5asUp`3^`5%-*S3uEV{dlwCA=7v=FN{M0p8dU~hUwC&0IrUe$qL*0QMusqqmL`Vx^m7=-Bq45*Is>kZG&7r z(JZ2pVJUAQ202G$TrT!VBJ3vShm4 z1&ykFl%7%o((>tHg+@}oKOMD%Z#B-TBnzi=*|W-mU@3YG1SFk>EcwHgK#UlH6Si84 zOkBo8LomgFt28RQ<380AzLcOYwW_X1gQ;+I)OyQ_K`o%@jIr-oBkav%t-_L@x z88gn~WE@Gs&7*L3x%l+V$fYzflP!v@p1re35~()9nh@hvFf2)wUnOG~>&Yuc##?@- zkz=`mE}T+fYi!T_PoBur!3MXtC{3?5d@k9Yo;8)HEYa9lz(Bj!DZ&JqS^b2T{ID2!kiMB@da!|Xm* z!cw{D`5cs9M8hEa(uJ!9@E|gHqanr#icz0}>P%XjG$ZTkhMz%mERL*{&v$n|vXuPI z8@RZ}R+5noJZK-E9uXLX(S?`rvkKk5bf8`BJ|n1)D_4_Og+?JSG7UP3UEt_X4YylL zk&3xi9BGXkB6irQc3)G)sd6Zu8~lORrnRqv(p^mEZ|7F1Z7h*=#E#|O!75Xm0_NJxcokm#}3Y)zW!)xTN}h3QCkG*|9;fAWLzRWwvLWIP9h(1)V7)SwM$AW zo_#q)In_M+ zkR#emGW+C=qZ`zR@@y&?kjW*!#ST^>qNrzvj$!O7bih*Gh;Py=49PH0LOm_cA+XX( z$c4xm?8l@QnSl5TG3H9sBu%!I8xZ}u$X%r4GGlt@xb?h!{;bWc$AX$9}n9z zXze0F_~@%N!NAvkcoC*Cn~J&^+bfGHS)J*OnULTWfw87w4iaA1XT5trZe9meb- z{14LLsrKU0{N>m2!EvILAkA_TlD)t^OxSf?V&P4l+%s+jk2 zfB!D$JysBmEwC;4-;_#!FEe^}W_l8%!>-cItKW@D`Kj~_s-IDuL zEjiXuMpn6+z{8k4J2CrNYn{o$DZJ?jE@H~d3ch3F$Ha9CA+kA0)gy%}pcd4n8D}O^ zfS57WCYr;RMIbyX#escO^;@hx8w`^Vu653M*^49QbwdJQ#glOwnaJ}rNpfz}nGQMW zF&_8Yosam!N;Frq6TfTf6BuIh71j(|7&kW4u?mmZ)P3z*`PU+)+X`l$r}0Q68|>5$ z;h=pwI9%6kS7Eq&{K+oJA}Qz=c)Vj#Doev%XNQ8=iw24dOVmmc4}m@DNrMLTB^A?> z36OTh8#P5siWl92LKrSQG-WGvg?t@h3K+aky55&TGndlk2 zt@Kq%RC9N0<~sUpO_g0}5cthQviM$NH2DNifenUBrt^jH5K>fUYL9>Oi>En3l2z6WcUykct8h4O^J?JquK<5n`?p! zI0@y$a6G{x?RmTfyqB^RCgCuem{21=2hNr;vC`M-$hpEu6KV}v=M8lyKTZ5Bu2xa8 zbXWEG>}b$AAGCVU+k?KluB=l?D1-$|mMZ7Ark7EhQ3$|TEuOY{a4R7-U~6E)(}Z3M zor;-s^+NS2>=xU)2tnmYIaZh>fSGAwoheVoC-mI!cbi`~cX#T#G3Ww`vqn%vDyrD* z-9wWmK-4zqvTfV8U0t@iY}>YN+vZcYZQFL2(WU7ZA0j4VW;Qdc+2lXSMdms8eYxj5 z)BE74A(!F3fmZDbC&d>U$gedHwn9Y7rz?5990 zq7b#BBqU38Ag+_HU=ZU7M2P&oLwjzd9he7^&nQNxltJmOQFnK{xKh@K8x(()3w<~Z z#~OwjHUai(W~hF>-24;TNX>g1%A$Z`dZ?jSR(D7pI@mrl} zaWb!m%m54YxQYhdn#0h_*+LvXS{AwZV$uJiMYM0SR&6~M+Q$^}nt|z57cyl~l-cm? z)u89tGAT6fz&IwDMxF8p%8uoh^w+mZMhB*y;3}}=E;#@=p%w&?=DnSnU`^oU$R-nF z-voBewzj>khP&!)Q5{+G}%%XK{5}JLz7ZggUgJh_Jz_Ui0 zVWi7KwAUm9t+&Z+8IdQaSdd~X+@MAc&X$njPad^iv60_8o(GYQjN6R{&z(Jk2NURB zWff~6NBGiF+TVR-BiN%&%)I1M*Mx@pS#vU4PFRAvIV@{#PHdEzltSXW&w6#^>5ST| z+&?b@?_`sKQV)U=jC`Yi;g|K~mh})pLzB0GccxJo%WV zkQ|E}i#ZNj%~)^p!p};gJqig+H~c^J5a`knkmI1S_+ZaZU{&q==k~Jl*L$wMDv$rP zLsTW*@W>ynm$0vH-qL~WH>2kNZ7~GGINL!_YGuf!ZkaT}&=kp^R&PQ)YEEIK*}A2| zAbGPuT9)IuXXCkrbniGA6HFVk7$zGODSe3)&lgA=epnE;vQJzPEIHkCXwQ%QRIW&y zEQqd>p&QU<*>(XadpN9gzM9HMEB;(i9G2D$TK^{n(%%$Lh0^}pZyMASYRC^VTHkzs zb&{NeQenYSv-9vxbo4!b0gpqcVrQ!+I>j<^xN7=!4$^kbOJ$47Lmk{OZBUAlBnfNg z2`Lq$BLEuMRVIg1cCh*sJ~Zx<{{<8Rs$*G`l!wp-w>zGdZ`|W&4UU6MOb_HW;KRO-G~Deu=I@rcn-*3VS5Rw9u(|!&$lw{3ZjvEQe1U!=`Sod5G~|P`M<`CD6$(I-TX&dYN$$K0`JP^hYPke`ya zw>8s0U=oJkaXkD%kQfKDqpmSp5`)eBQj4z(5mJau9Idtcvf!WSyDSWgMga5GCt5jbX0>h7P7*`}DyB5raQ*P+wx_)~pczYJ_U?fSt*3 zAYo7fql$$k%O;L`3#q4iFt8nJ@NjZSSdVr?UKg|4UOwcOgQ!PIu$RLfvxip975SwI zVKRg3OYAabWPNExjZkUGA2?4Vxo3R!fchV*hj@bJIB>)J{L{%twF%0F;_+&dV+d;k z?}$jO97k~7Zd2@~FgLx;NGeo$34cZQk{;9N>M3RwrjZhS_c&Ij!eu=V*Dy#CsS{Uv zS@oZT-F=GlV-AoH;E1JkrCUD|bi)Jc1)qxMYCvFy3E$hy+tQgLlcGt}eLlV$x^ zQLso**B_s^s`T2mh*mC(z$AZ%UHd;Opd+Ndq)v}4Z6Vwh|C;}(icwtq<*^=)L)2mn z$b-~8w}r+2jwzj$`0jFZ+r09Cdcei|rgm$`shA#&f!I;&Q+$^w>Y_t{^PpO^6-$T+ zckgG7ZH{{9_$I|+ABhe%{wu{iL~>M%Bb%9@vHmvDkEMo@Wnl;|&l&~~61YPe*hY#* zC#$9SometA8;6w}X@gEn9Hrxii1PNUh0*YgPT+CEiGhnnBb0+J7C0K6VPQ!8$XP~< zBS9MI#sSzy(;8NVQp2b2p*hGFvs~M3OKG0)3F)qJjbD#oCfo;!1K~5*jykFiBs8Ih zcEH`&C}TMC#Dz)r6MRhleu(2er-Q?+C(rNq>>f9ot*$BEqIf_a zm23itP18l~jw29BgSJK!zbV@@h|)xAtt?^I%zhfSaOsW$V#zT{Fw=2Vk6ov#SeV5>UZOMT4l1ol_Y1J<5{m##$zZ3$1xsLtaQ{tnSQjYv_&0zjYdhk% zt2}}I{=@Sob0r2#ZbLWIBKysVn+%s#5T$#}Pi%dA3c~R)d|w%p+&%lDj+R9~OV3%? zWs~{u8tIqZd)Y$87bG~)oQoby7f*p-D*m_1(P)60VhEpqCgcr@g!Y*kDi3dEsN%BH zf64jJ241FF{NZ{-t73I^g!~sSRW#%th(4~fMT54})0euU5n1C%OZYpZth^UCPXImT zjkj0Qj*k-aqM$Ix>nkx;JGs4oj7jbtC$8c|rbxAnvEcO+?{7jd+F~40HYCO3MXRJz z_zAwexBJb>SHHEj6~^FNAb6>u_Cy(1$T>C-Sx5Yz9!Ec&*B=kT5t6xJdW021l29~t z>U|8{g4Tnu1Za@290!qgGFd%Jg4okGbg=*%=yuqhH~BZ3u6*QeTznR@KMoAclf6*1 zp4>xB7eK@qOgz5ed)kfrWQr^@kst!1H%mX)5jYHrJq9X(ckV-$C1kvY@f` z=pQ8#bA|&}tUn^)vS>Wrk$7-}F4p$fdS*7(dzq}yi;jKAzo-&QKl zDOG}$abuR(?1I_b(4sltnSZ$ZLSt1$4WsZ&CX6;0i52oLZ-XPX*RLam{j+{N=MEtc z=XVY6p^+y%3D5HoQkBU}NK2>)iA=h&HMw|d{hT+nfMRYPri>_g&w;}Vgk ztR67+-{3|H(rC=oeDr1(1M^YT&l=$CFT~?P^W^FMMR6cbw zqA6t^AoCZlexVBp%6X^BP+CjlN{GQ-<1J*VjLY&gv7`JIERIA7$7Vg2K(GfUgu&v^i$FS-obC9XMp~sr-C`P*|Pr`{AC&tgko+#i{;Qddo0c4$jB4)tv)$Fp>WH@|$nt-u zHI(|t9$o(f*3i;qPCiFiom8~Oj~%~ptW0Y^V)Er3y;j&QhR5{`!lo`WiN+a^O3KEB z=S5hMcK8oj16AsOlQotpuIk0zQCdY$HchzVNKuT>$PebKv8Yi;z4g-n%hrHcfb%Hs z;Sp$TWSxi)%{)Wf@+Ru_Y%EHOkV_plF!_h^5wMQUyFoh>{!2?x>ZH(V*Dw@mYYhhB zNMg-(=Dnzn=pe!WT5Ln9qlHn2alhfqTp)Ow2MoXeG7~?GJ3Wb0Ia5Ozc)&!3FClgx zJ3hAl!q>l2J$J_Iiyngt8Uhj>vc_EEhWo}mKtCwf8wEn>6@M>^iDq-GzJ{;>1&RHE z>>5}Til|N9rKw(RXS_Rqg_Ibj9}~}`Bm8p%cr6?7??&0Ch7bNtYCa0OS2_$l=)VHB%4UyjvO-D5^sP`Idr}7V*uxt^=Zw# zbW)C8A0?km+-lVQb+n`u@~Dk2lZRP- zxI+ezEpKDYX-_f?4daM9K3g9Wx&3;B=+;MM z+?WRjcbg;lT^Mds@bDV*+J?btC0%}J%f>g>P_CMM^)%KAgYHBn%fHtXeu8G$!mmjyRyiFE?rwC)qkr>9D85~5JMI<6o;4VpfZ@E83 z%Vy{5^?*H>B@RWyY4*3Xw+7(xUvpUV4(|UjG{_pv6Y+BXo6tbuIk)ri;;?WoWocf9 zan`f+k|PCG(2o-a5D*x0P7T2+g||(5|0xpwfWjewKXO!khoWbb8HbSM(D=e%g&Lq!*FYAwqkPTY zQ=j`ec+9>)ros|Gx&I#4h8}GfqoDWp$2%o83Ro%ox+0&wVZ7D^JH$ymmoh#Fg!|!XR=s9p!s` zTg1NF)e!gXg{YTrr1a;%)C}^Fzw{N%&rPaWor)SHR(*a4vL>);akJ~%Rv4AwtsIsuv36}P`GV|LR)6d;O+E_SUmaTYA@VVR9m z+Y&-(T8?K^{}-7tim^wbUh-={p*DZV)#F<9XdR1nEVt)Khz{#-M571@xH%cSXi%ji zP4|h3@+&C=l<8o(OCvzV5OmcO!%~-e8<^`3owvs6^^HJ5jGJUqM^3tmoPBICKV=EjxOkYdYPe^Ps8j3#>Z1xNnTS@CVPpKp z0asS%A!)b`)#T#sL1VbPWW$}t=D++Pc~<*zrAaf`W#9j;WuVXkpc08GE(prdZ^03N zIy*jA2xX!b&5apr{P_@+=NSRQ9`@xMpJfN3LHpscLF5qsO6|N0Mu+-t{hp7j$Kv}w zVMfiZge;&gp@~PJw@+fkPKk3h&L}ht}ZN8W!OmAmtBK}sWXHY&41Z2M2A7| z=97m)GW)KeEm=d9*H~9G&r`uRgWjbks!B1obJQQ+V=<#}9pxj|{3nqC+=%v&%*_iM zpm3S~C2T#v!MJLp=t3Y5)5t@enP7vW3N}O;&>+jwO)KJ7bB1A$fvPeY$Y{(#)@}1F zFBHT84zf3%yCc>`%u_BI`tUalEnd}LmJ&f^gn^5!^Vq9dcR@@llT zA8n(%>c7a(Es-+%RGbXDsGRx>iYZ+>7vEtcs((odk113WCPrQJcy(2cBv zrED~Y*?9W9CN@n)RtW7OXVZ-ed8|b3R4-cf(njbA^RLn9m)*m2)V5gEq|licaG@hf zvXyynqaH^*@%)j8tn}XUxFF37ess_Ys$7egDlpg*dG8XE?Pa*!#`)t4s|r4DvJQ)EiC#?F=|V>uQhDJ)WX1UXSUo= zPGDb*>`zCoA`ctvXRNnM+?2jnP6dbpv0x{fM^%kjtHkUVU;jRDuV^vS@RI7whH2#T z95{NZLneOOYsr{P8e+vpS!t_9tw9M(n$)gl|8?I=vA-2mQ;V{gg2Z4urZ}TI&c*YS z=!D~iveACVEc^n1#aJ~0+ZCEc<4XhKLZCfO4o1e`?0E5VdNOj8Uz^uC4rUpURuN?1 zw0ra>xJVh{Cb>=zwMrDfO?R{cyO~%T`i_S2=R-B)~n< zxfUnYz#+_};TCu|)G}s+iK!s%i?fxpEX-1k_2NG7YP#6?qkG>XztjaEcY-Xj7%q*8 zxudU9k8hRR!_<^OXPNAn?w*)eeiK{TjZ-~(MrUYRqqR-0dG`1O_zV+S$7WC04;q#* zQ4Co}%l@(pO$s=O{`u|g492)yO^ayd*vL5_GT>z=msC}8tB#ceH8 zuoPK0t=$^pIxua@*MOsgv8dDlt`BWI0<<2i$7Z&!@_@tYkfEhk(Ay~2tLoi{1|b=k zA|F%7P|Q5j2O5mOxw=oTC?zp|#cdZmP31+TkeoAK*{mSY{E3}Ro($xjVluSjsuG8+ zQz{|w{!ptcT_^A+dL~MB3;0OG>gu6KDmNQYzoX)HbQhnBpIusuBWfYDnc{BZ@DORD zxcdvHdFM4%M!mp?GP(21d&osb;DuiNFd;kneNNAfQ#fcH*^Lq)NY2utuyP?mqu- z01f|w$ozL=R#o&MvT&lcr#}Z6!6O|@^8UCf-P`aXgWCsPGD{mWaB!WP%LYeD)|QHR zHRzCGkp-fNV!2Tq8iQ`quC>{+6RNKC#bW@8V9aaa=tN^Hev3)r$&)ZP7+e?Op3xkq&?auKLRh&iKp&xn1bjFg$zGm`S-@W%+r~AECK}!L=$A11IPxZIivef}r z1&INn%r>-UFiN|JxP1VsAEkbdN@Cp~K)3iE1S_AWvTv|024T?Y6Z79JxvKL9>NvI4 zIr^hD?h81c+TvONfbOs;{fQT;zao{x$N!VXsHoh)OsdRx9#Xb2OraQPjbQu5#4@kT zUXOHnR#X$iuxtUv&VRb9uc#BC{uz0+Bklt4;P9ci%}`4z>=_-_&^LK|_EzDV(i z8S5^82ZA!7QA0Eu-|v9Fha~u!SS8|Oc61J!IO%gA@DMhcd4qAlzQhK+@d6-3CO_s7wKdcEXUpmmCKy)REF*9mC*^(9okUs{_3t#G{%?!mmN!d z;^d|(Y8~=h?8O$&yIhzp%}Rj2)w-|kkQ^zZ9*CHnMdK`#HHgDsoYT!1W^Ya4Z(lw| zM>FUPuC06$e{36ZSRqU+gP7vuN|>%RFCUZd=}_>)Z7A4a5l7z;6oFD?JK&b zRUYgaChY654zgUSlQfOh4Qwrbpa+5;H=JxN>W}K3z;GAK0tjBmo~PWTmL`@3J|(y3 zMFz2SGiE6*fJqy5!2{kz$#!UYP1wO%lqkBuI`9%T@UOUq7DVX+z2C1PYrhP6yz=U< zT|4DK)^wxE*f)Y5fJ{Jxn1gHoFVhSPjS9A`Aly6^S>cp>;QxCNquB2+6T#$d!z1dq zQ6up7w6)lvypQE;WJQ+vj4wg^r4@Ep z5Qp}<)FiP&L@h25*CX_W9zhS^R`z%YlaI%(~XR9rM>l1fi~M z+Q=IFWnbRoCznL0P7-2M>{+YjHUJk@wk35*zHg&Yv8Zf@FK2pXiXflZ%uA9KGo7uw zQyX){AlQ5;0+>#u~ zcJb$}V{A*IX=+bg4?Pb@uid4Bmje2=F_us%(>sYA5!4;(_4Hxazs>2E%NuCQ`HuCX zsbC*Rs9uX^6~Zs0pWLapYly7?v8xqP`C{#tIXQWm4)Il;0ZGlQxc14(18F`kDinCV zL6uGN6v$U;?L8fAwWLg_4Ra|`shkpz+~t*B{PUn&(R4eriwiGLo2ptj)^5G{28IqO z*3|OOoMnST>_yp_EnIWU#_TekLQ3c5Alb5IFz>Y+|5}^}vN!+u+Y5xw&7-F+l=qDZ zhHKm*+Sl9Qzv*MQS+j7@d3nez~gz6l-D+^^mDR44b@I6 z?Uiz}=!eUoTmeG5U{ek9UshYe@Z?_^Agj$zp_eOx>iwN6cf7~D046En*%vF1{;4vN~l9n?-Sdq3{6E0s^*rC|?K+?6v^CneHcTy#=Lu7^}#y zmF+CEGEotN2&OVCL@!%|wa-07LL5GXzkyV_h*|!Wcyo|5Tz2#0Mt)SM;s1_2x%06M zznFK4LBYCn!B2g^SD4)s&4;bxnBBpj>H5=$qr^|2X~9S8SEk}b`3u3I!f{)d>AnXQnu{x2xB+46))5deSM|3 zYq_JI%#Uo?3>#^bCEHIhpRmuV09|+o+;iZlK|1kj+`gGYiZrS<(`fy;Uhr8y$>b0Q zx2t@%Ryk+CXz_oO^1(+-uOt{?1W7=J4@Q1uteL_-sla}ZB{)p%;k^1IA zUyEs+m{Fv7C>qpc@8BlQ&w;51BvSRAxFAR}S{uQJWrn+qs;{SJ@8(u!%?2gOQ<3lzcnX(m&y`s zw*J1G8_&J0z1J54I4F-}PW|_l>e^~ey&H@|5&Bv*B#wkxTc3V^P58sL#Vu7GcW;MpMR^?!#L|Bx&)#`et2GPX+)u4B8dIQ?EMLS%!7e?%k8be^G_01A}59lofwinUA)hjX+~od1vu?V8?cAB#pgzy zw7?6;h!zYwaHu3R5ldRNHp>1cba6oD!66>ctfw2xoE^&nLz)#=r}z@9`i=r5B=m}d z4QV~DRvQUoTDmn^^{-m7J09^F!Y?Lx+ZWP5K$&loZFy-LrvT6Re6rv$iUuhBF7KKrg zNsejAC~0L3fKQ=;0-%u+Nv#dL zF}tU%uTV!JVj)CM10U53uq_;`x+4ka@^mB6h*$tU$b~fs$q2?j4vi?{3BxGaF?T^j z5Vi^cP=Yc`OVz5&qoAA$SFB_tpBV3C#^sBfq`WT}KI*yOU3aDQvqo#Ldtv&gW5fHq z#)Fb+9}4Xt;i%_Y>DH@|^P9$jiCFVOfIk8cu&ig(UFz2xI&hyTtJn88(Kkrgann{m z#kyXb+0XIv30t)xBb1L%J?msW9xGH?o;GNJFgUaHn4?mtq4taNsZL|q;g&hN-yDXa zfA7Qs9ar@utPTsN-LC_1_uW}t7$$r6z+*l8ykiEC)Q&Js^vg>EL0eTcUDT(4xs`64 zHluSJu5GEeQ0*A32fLC8pIjb z`z{aaq?k$!og?W*5+pN4WVe!V4Ur(IAcJOK4rs2^-prm|hG1LS7IQ&6 z*KotRcqdl2;TisNIVI#IHLo!xt$0Mk1TW&(4JDS^IFaGfwjGLB9ZajQW2Q=k>uFZI z;Td?T!ojnjC5L4$BqIdA7a~+ToYal+S)VBl!}4>jy@$L#YJ)k@62Wn|hZTm{xm_r5 z-gH-S;B?+{$*pt>1wn(Q3FDHlD02>=hf_`t3ow8dMt2SvGY*qYZtIA$z2w)mAkaDp}qa_TAF$jY9gp=-UfUu?z3o?Knu$mZJhcnoTgwZZugeE z@Doia!kuQOcCvA#8Zcp~YFM}D$W+7U967hVfyGGV(e5Kp*ByDnk;f>PT*qv4i95a*QwBy<8|n~6v&#m$dTeW>9g|Sz|G4v1dA7#jJ|K0i zYfFBy%s+DH%ibRj%_5f0kP1F^L_O`}R}1E%7t9i!No) zj4a2B^s>*{`?TkP9)X^&X4xnp(p9_;tGW4Ug1;ll9B(tF-8q&E1a znuZlQmC+qST(r@(x@A3dr5=x6_6;#SO@?1n+axYmq+pdCQt`TQ}#n-054a>8*QUD8e&a8D_Tcvo4t^y0rNgQ>@^AAB$su*Hv_6N zB-{b?VA!Zrx*O&33!VWJ4-~vnlNQ)NqSPCP!4B~KcqnIcSwmIO z?ZP*CZ8v_aFv>c*ce0?DJeMFjvNm^SL=S-)^jAx%Y*2@2w|{jgr$BwOH6c(|g(id% z*4wl%y%1XRF>XaMN5-St6Y#6135YYvNo-V(Vr3kWr-#GHqQ@;8CP$>j37d?nf$+ZB83W0Z9rYlVs|J!ClRVf+HjFY_VoWYo@Ciujy}Og3 zxUf(*Svt@m>2IW_5+sHR6;*r{;gaEIFK1rqs^U<~1*pBx7_L6vp;#m|5-*dguq=d9ih2rM>D~ zOdEN_7-K6x87dql|tp=*N~fQ<9!l8P||$_BKSQr8Ej*cknqh4 zklgm(4x%Rj)W)7xf0`_qL~2*-(?$Y}M!blAOHf)byt2#dzPwIv#3r2~_jnhHAI1hB zUh9|7WR+`re^cw-VlYP{wjh`7CmVKAZa8-Hb^Oer(Q54h)a!ka(@)25!UuA=%b4cf zk8(R^^UqZ68n&ryj?L*7<)t#?QJt1QFkQ31x;48INSV>>JR!knneLiuG&psH$hch$ z0}qN(tY&IM(ReK8EM;An822@we0!I0J6&d38e@{hR3?SE$;Wk>$8t+T+oec{KwK#$ zkL&0jjp8!W*y=>^7C$%bj4jn!5%F1bko%$w*oMCaw&c_{DN?u2e^;?`F9^LW zQ7rPI_{x+I@$Jwt(TDeFK6Z-j;>*1Msn#5U|Ek6;Qe;s;*>81OAt_e0CJ8@%iHYx@ z;v$n;A6Q|X56ezyI!k^AeaSDa~U#s5*Ix@1f4CQuh(1-2_;V`oG z+KfGE2S3CGVvoMFYp$@kusDg?1^-s?E^a-@(`YKyV0I8Dp|38L3;VYx6=U7LMG=Gq zZ{E;qxaaxy2I%K>ycJ|;mYZJ-HAr0i(Mky`lTO*)>F}0*9+W6|)NIlvOg}!>-21dW zDEsZ+y~gMTLKNV6#%{g~Tz$Nf33bun0ZD6Y;0o)5qcb4 za8tz)xL^8_A?UX^K54=|t53E&+2nL@U6{2`8aiMbwSwqjkx5W_H4lO*2;iIi62p8b(wYgu4D?&cEY z<`uBdhx5WO2(to(ahf3DOMR`CfeRGnmI*J{qSBVE?1rFrlnv-3bz(r~_;Z{!Os(w8 zrT#XQW1Niz;miVkrdg5;c4Q5wZ`Osu!=VgT_AugS7jO~m+oX_Zhbfog7crI; zQDZEYyx1%`xzrh#t<#ppg?`3=2{g8?&NZN=ka7`Iriu|fNDU>;eTqjm%5v$6zFq) z*_hfnt~Q6_(%Y#Za?2wx_J#@DV}Evz=|fa%!(qM>xQjXrG$#^URcn&`+(ABv$Ff zL;Mg$?Ed0Quj!J%yG}+ldu`=tly0~nu+Y(Dr}1<7EnARfop6b7damiOe1li6A~D>U z$P;12bY}OA(NIGd3oT{6WllZ8d{_+sQh<#rH6s)unuRhD`h#ze^jcZ~Cnf(_p>9W^ zqthoXsvW-GQ&Hu1W^4owi^)2NB!{$Qf|UC*f)JHJn@-kFL4ICtAnFchgLA!wTr16o z*^7;(V)>tjouM@WGjuETXGS5!f1k^y6i=!gm^W`}5bYm?OfHF}| z9qXl_FAgms_s6F?lx4b@ZpHFCRi!77n^ZcJDh_`vVt=sr2Tt4rTc7cqiRs`Q^5rhPjHe?7E= zP%MAbfc%G6+8GApvDmk&$f}efXP0O(r_&f36u{jnGX3TITAdZW7U0;%K-M8k{B~;n z+xWfHt{tV}UIq-h^!kc+_Hq7x#Y7L0tV29MfYaHB=rLE3PhePqf_oEA--%BxnhY6# zwO1HjCOjszb|Z#Q1bimp%`}cvI3?B8HDrdQXYJXsYgla1>EU|yv|42JBIcnD*<^Zv z7K%lw!QZ1BWvkC%@l14P5{|`po1&2(4nm;xB^+hZw7AJe1+W$6$q)!*YD`}BEfQN%%HcXrq0ra`&sgp02R@O4)#G4>X(!uZ-40` zYL%FVG0MhxLF5JdwwY}q{F{mSw7Lt6JXJjEqLPh>)f3ZgENu4i1X=GbQX9N>PS3?FxCz4x5I+vc6}O4MCl$i0Q<|)V zBFpxvM81?j1N9efoaB|#f4f$KwBcZ-(&)0G5~&@YpIFgA4d z9ed`L#*Kb8_BEu&5_~7MLDY@G@(W92ix>A+O}lkuqu|6kxAF_ z{nc|y+fy~@JfR;sNHP|i{0Q_R%rQ`TsLiFft-OBVgJNg`2{AIBecEHIEmKqB_L*c) zUBTLUXDD$X5uc?$gt%Svmpn=d*KC|q$0^^!yP~97LdyJc$H1e4YlW}{tOBFGcVuRo$k@NbD9OQl%8# zfGKlmgQePayCOJCcIB0z{x!JJ?@4O^`hfoM)zI!#h_ALsV9c0lNX-*>PB zO55;XWZEH+0}<4Cg!D%+CPS7kKjU2eOVTLHUzY-StxfSCih^CkX8k)Mwi3PB#`ss; z=BR{H+{aF%Xcy{aD)o`_?fZ?ZdI6C%_k0NB*<04RGJi7>9ytu9VQAhdqq?&M!B5JC zpnjDchS=oGx6)bjUR^8y?x+kjh@#VkbTlwT%P1a7eBDG$sFArhTjWBUywjUcV|&bk zMM{FOkgp@wk<6Z?+-{qW?A-Cz)~Ec!wb(3VqT|3f??tFsQxSop=FuD+PM44AwP{L@ z>JpC&N`fU)q(@5T5=m!!hdsst-%q+g=qMgh3YGDRc#WC7BZw+~B4Tq5_6qJLo+LD( zMV(k80hV`+vl7l67aoz}5q$>{|C1yI;hNNUY>eG)S(A~Q3?AufPCia^<0Sm!LsU#l zv!ToL8?S2xmJy4?(_|<{mb0bsq^=Q#6hdXe%^( zQ2a*avTLS&yufj(bx4U_GA$RiON-yuR?bmFp-bE&ioH0c26IKos|$t@=V@?pa)$aX zo?}TKko+Z*{H{`XE`K#peLahO05c7U{p(A%p~#P~RNd0x?MDXRJ1 z)}$U-)?Y9EAUt!H^&gliOiDxd89-vyp_x(vGbID2XN2uxfA`L}tzEgd6DuKZQZozt zQ4ZlHWvdeupSQM;P7S6u+^zczJhr1(7c{&4{a{GAK$w3=ZGt=;K|osJ>6e*F2s*^T z^(L{3W66-SO-nx-&WvooG=jmouu)tZ8d`DLtGZ!myM_-Hijh&@7fH@}$Q1QxxRyUJ zf=(Xn#tV2_(oDq3JdPuOJH{_M7(+rJ&e?EV66B|66^D)(yOmB+61NCVGV2^?N4Vnp zM^Iz}ZczM1yflw-1b$^-LEfp%JM!(Y8O(G6$XsFRUGfZ(6TDFFW=StNMrR(tf6;s{RtgEj@(Xhp> zzICdo?l*rMJ#JmC_h-RQyV?yTl_&mPe zf#Td<4z^T|l3E-h=ZYm{C=r>WArx0Sn|tx&WJY=iTc`F(VAIc*AMjag)saNaWUv@z z_Oh8-V?>QAuk>3~`>=S5lktjR60y8f_l(vD=77J~>v*jx^Q5R!?~q#sDo;}Vrd4-wO@N_cQ}SZX~vC|15DSVG?kFGA+|ofDS#CH z{9HtXw@pYrW=60TfKEcUtK@S8$*VOdO0bu3>JJCWYEB9^wpV-90d)fNHPeOScW!8` zE=Y0#8AA2RRFwR>cX4-b{e$tlMYlxeLW8f(51}o}hi@S~1H{)$?(HhrPGnOZyFSRn zzqpkpLo%txD{UjeGpu%N)z`SP6q2qgBWu_#WdbJ{iuQ;@r9!;x_KBUF$AO zTaD&dRw1eyrmO(RSU6Q0Ns96t9*Lcpr5$@7GTPm7QN;Yv`!Abz_^*yAf4jy%-9^R9 zSsD36`y_^c*sY6_pL*Smh~42JTifgcwDPO6Wrc zWr)B|wC2W($!dnrhb6=@&RB$gxmCCMuM2qAi6ewdF^=pVYFrvV40I^*8-lMT=IEe4 zoQ=aGPqrgR8_ilBgt`_Gz)z(_$|<>MFF>r*Ma^LdhyNb{U_hV0tbDjmr2sa#F@Mb2 z2)OCGAnR0aQxesjuQ(Z^&a;!cT9T(~C}654Q1%>3?lWWTwMcTqNDc3#ZQ--)mM`O`B8E>e($M;_*MvI1XCmIMi@qCae$&yPuf!4F{o53j=;=XIos6=3HDjK83 z7V8khlix+zD4a+1?Fhw-iJDw#C~gHegBxkWOR+JG>DHqxy`wf_Xkk|ssWHSY0fWK; zf7ic?FY@pI=u+l;LG_^xUnCvPN=;`x5gi&8Hk5f5W)))DOk^8LDl=r8h*q0hrC!{H z7V7c1W%>m(!}2*Q@I+^y$jP(;1u?R6skXwqA`IR~LwG)Y7p1OB$)Afl%8y!olvX{Z z1SwS2-X|DV{4QCf6ae5f=<*3{f=?Ee$IuWBykvVPts|X{80E!KrEFFhSbK&cB5_9A zcNHuSH<*)_1Iwua1vLd~FRa0cgP@1k?3ofG)B54XTve<>1Rb>-dxwYg7MAH`e_iSs8m;q-fG>s2_9!Y`&Et!Zwrg}$M zAkI~iiL29PkY>wCRw0E*RNc$fEJe@Df9I{R<~E@HvV0?a3b!h(WKm_PVrNLT;&Kpq zFtNks@{1W$dTfvHILt#Fh0BC&*%Nau6xa>2{U*ckFf-s@4_bqk&0!$dtchNWGf9yW z>B-beDI6V1J=>E3;`o{$)2JnyrNNDG`_rOD#b!d}!itlGnYRBQzEq-+FAR8=Fz zxGXcMyJcn?U}kI&OJ-9xKrp!-QAOuDHqek}nv5n2gA_(FiwsmP-Fb0VK!|7D&7s!! z4p|l?pvkd?3nkNH@miI*6AJg!)If8Cb5T}YPYqTi>Y(GcOc>TZq}gdWpNp1*(baPa zGaCyGb3(WdVz4zPTw5!`4?Gl7ULX*rXgF-ChhxWkw_**R;40v8-hBcc?CM_+@vnzL zvv~(QqoUV;RlyPVcl|~cM~>h>6@`qJ=mhT2R;X>YLKh{)3*V)$)~-6jz5mxKJA$kB z;Wi2?Q=+*0JR-OnDz)#EYXA7a=I`7P{m-P*if(ihk`zxaNfgY-m9A)GR6^6@>A^GE z9gbf*X7o|xQgiaMpgHl4`p)*ULzDR%I!le;MR2qv?66h>45-EmsJOcCbWsesmn_P0 z>xVYyNw2#w#U;_BRnw?UBpt2Z(ivh(0kir~o@K@Od6uRRhiB|}k560sgOx19V!e>v zn_QQ06RAsF?}JQ)4763+65^!3B22z7Dd0n7zD084O_niCJ;q|u4&zQIcjcCFC>31KZJ@1VKOy;nD!4869*2*D&4;@?!H3}CH-8GI(|RMA zu$6c7>*izlcy|}Wm#(6WzPSV;fxmzTcOU6jT(J5>G+Gef-27AXA-#+6c=E_{f`+$> z`l}|WLoMI=A3pM>G9dd8t>^2*x-6kv9wgti&+WHsDI#rxtuV$1=#RSsA zNr|Gf+8`@)L(>VD;<5aA!P}1hr8G&BH1W594l1O3- z*;>SW7UKP%5ZYICJY1R4SY~r<&3_XHFhp ze&%fb6Ohn8{m4lbq&4=xjBLgJzoUgHRW>Rt#J{Fqh$rt`WO%=Fh@I`nRMmj?KB=z8 zO++nK)^PK4L1#EWgzLOV(b!d)x%Q$hrx;0MH}X``E*JqX6CVJy9&AcPX7+GCz91mf z@de4`b>8~;i~)nejV+s>!`KU%PL57ecU^RZH*PDhv!qHyeJfdZqK_c@Dp%2_+-OSc zAa6EZY~lvQAu(>;ow+)fh*LiGODeZ460f6G@5+nh9e6y&O5}pdlDh=w9gLCD8kR1+ zi|fqvtFr=g7PC-+=c4YxFN+YP9I4?ov2^Z)dY;Z(;VJg0_p~+GKRoUpv{#Z3!JqKG zH$wy_z+e{Im8=E|ODJ-4AsNlbKs{r&o*tbfcpF7?#fCkJF@eX(?VC-c^FzcX8TmvJ%wEW?C_^Mq63!C2=7Y&62bv<4jOwp%!xqVt zRd5(JZx+>dEcj+^yF9k?U&kt+<`=iw-8lH--gYMMo28)b3WMm)dtr$=l-KRQg)y{> zMDE-xwvbQ=3AtJwAo`V*fr9*gb&Y9vAjE@k6ivoiQ@5!D{kT%ruWhU??XM;4BS-h8 zeLB6kYzYP*&Y;O{hDNRnxeRYS|HpCOoP!Jd;6f=@q zm}T+hOb!i#QbD|Tztv)HKUF*GqvV~%`v$0UHFa(l$7Pcs{ubq95U9rIzSF6}3m zc3!qr7Qe#8osh1Mni$`pRQ@Q989YWxTe}L0;D>>jBRpPgOlAquz`A@9Wx-oP9!(MK zJ+iqgengHr^)(HtHWlk5WV-gfXSc)|TSgn3fa1~SE~SPwg%;2QnFIYslFM=HWNpbs z$CfGJJt%;g6u?>o?7rr8S}kb6H){(Tb~wM4q=A)wjC29re`yVIQXkr}ygL|Z2KIJ{ zJ|R2Dn?)Ds;5K_3iCXsPo>Yr2(6&o*v;V2dLfs>qVW!uz7kuQNRZ!%r&tzd#f}d;!Mip zmT%JLmv=#L+qFaQ=9kXl!>5*rKy;QWWJN40F2NF{uE+mBd;hlFMv^Rwf_)wdv-Jr2LOamhpiBz3Fq)kJrRnUQhz@bGZ=@Nh>ZaeQ^6UfOYVh((O`B>4+N^FDIz z$aLG1cfj4gGkMvFm8VgxK2|)_?kd`CUDOn0Lk_tt%KE^8S*cpb^m* zz=uP)vQx7!#JMb*ra05$5~43jac@cW|Kdb*`I6125PWRvwglg~1fY!X^f7wPibSoU zeTJD}x*dIJP0DVl><<3VGqFylZxHb1)*HlMj8ngV2vPKQw!<+PBoWsceH?McU|2)w z@Y&X;j+DIUOcCpk+OR}@PtvoMWlT*f1zol31`DEOM8}$c;GhlrH@OTsi z9hd}~DL^+!nLoK;y_F^D=L0iNs`Djj?=4CH^H!bQfgR z@JJ$T0veX$OB>$^Y4wee=4?882}ttXQJj1u%x%6odIc{h&o^(cYX>})9z%t0pUL1y z4pjUZ`urZ2qHbvzOmnedfeV-&A%i!vJ2B%8ym!PtN7+pZYczw)aWDy~NMB+aYA23b z!WoQ~J3NW6EEi6{Cr`T{OvvNetpClF8K{fgef9kI=;3x!bc{F5{B7CX*y(5L+<*C* z*TYX$zRSluUhyDdT>HSzI3cblGxCQ|@Z-D2tOwFp4LWkgKxrY*-6Yu0$1mGP0dI{& z=CS5W34$6rb7nFzz$`ku(7E;xHjj?}ntqC&(s{Fq48wLn?kkv`!gUgnyEWNMiSd+s zuxB$Am?vVmDtGV9&AXV}IMk4$a}~0^W7|1AHW~5F|B`wo1_^ZZa0YqAX_bJ?4q@+= zD5kad%Nxcsz>|48jS{C1l$WEK<4{;bL1!+sgqFraz9VCI(@e@7*_m|i7_pP2k#^kG z3mldAoCjW96NKm`%1MaNSMbKbBZrqdjG_<;HqE6&CswZL4&lX$Uz!Klu@~EaNlqyO z9X)_|iR+%Gwd!~u)lL=A#MP|E&S>}@!W&)qvXL@rd}!Lgi|i1;B$VTW`n~k!s2gNg z1gMh!k{z&Xe{+v~R>MF+ba=LTcz6pzEI{n0J%)DqXGOQ8!Y%?!8u8snje5pFhIa!p z!nr}Su01BFbaCEVZMXUC;1$Q}J9Oi}EgHn-m|T0H>rx+NLvwW^)?IOv1x;95LV7|B zWT4fD9I{m37S?W3U;{*Yh??D9YJggdoU5Uo5xd6rbub6K+58^Q#jmuPPzJAAVD!P0 zS9?2uMHmo^_$8tTq=I!`n-~Xb`UaBHMTBk2j70V-cfv|b*v>%$RA`F1@Jkjacg>M136o{n2P2WP^d$#lD`681(y;vgaa+-HDDRx3m z`t;t#H<^({qjdNuz0g8gjGwzt-D>d3VWsU=vCiMpB3%ojq4hhj?I2guml)j*lVOxz>#P&NpX@350UsTZ&)fk}D{>mBiYM_t;WE zoXanTQ|ZvLDpP0e-x8Y~s&5d6jThKwf%PXH+ulgT;RsDx#~=Xn6`yMhV+@i|I>oV{ zw%cN^?@vxiiZU@Pc(MB?`kN3o?T#Pzjy3V@fv1J|)UY-kDm%ns`E9(psOgFGSx3cL zZLHEt7!B}}F|f$1jp}NKe7O2iG^6e5h^*}{uy4MA#i@zJmsY^{*EDp{nZlpwm3s$> zGn+(wFN$|oat_AuTWg@(7>=rz2-0D{d?`e65Mhz(Ik_*f$C3G6R&Q5Hdtx06W|?+A zHmM8TmkOM;spK5w^^tTc8|AwBEEzBIcJ$K0gePvkr)2r@^fr;zCN5nQ1_vAF`@%fZ zm)$s>EqiYI4u9U>KY8(H?-@qVJE6(E$r}EfL|L`H@*%frBvn~KQ6U`?|zU^JNr@D)Qo^`@;T7)F zF|yt@jTeH}2#EHi$VNs8d2T^l1$OnKlhdBCD}McXtlx+MNSmSG?IglDkFkc2UtF3( zkRCN(-syrJW|QkLFSH%Q)YtZa1aUXC<@C()gOiAHXJ`spuf)8~kV4mnEHVDBNl#+x zr|m#);gWkAGnfVytJ1&P15L9-x0x+(|?-iIRP%X_h%_4oM zXJ#&=ZC5vC_87BQj^(`<5p3h*uc}dy+OxD!oNUE)DU(NL+C}a=A zsYXwvYmgq1RiW9skw-8SnGu^q?b~Wz<-o9j$suVtUT2W3^#;9qtkVk>!^U0K%Y#>M z_HP^=#r;Ydo)+y%*_t{)Z~kJujd~ofR!;qI+{rS@&L^eN?CWg5mcYfgt+o;xL88jZUHby;_an_^V8QAj=Qi}ly@KO%C6d+S| z9x(+`fD+#^?ZHCd&G}NH#hqo=$89r511Zmnut|}bh7`;A`{-oRKok8k zel30Mvz_K4@V?tkG~-A^$3>@oOguWFFQzeBxxH#nuGmArB^NLmNN4E|#|*vn$S1nM zJk|GTSpk9j+$)Oa8p*Ch?Vz;UojVB@A(-|WbZ;+>Z8d*ETYw|dG-!{^)(`rkkQzf4 zBmwZAiYSUB$!N_C3%hE7CxoZC+rT!C?YGbT z$u!cjq4k7t@25I!6GG{4gQ%S`SD+JagGoNYXlO(mqxLDe02}p_@pvd&NSo--urvrt zvtF5VmvcF}2hy2|$tv|sH0;3rGiZ%^Cw_d75ZXO_hR5kf3Fi~5M(jf3mZxPi-iz1n zWVeA7gdJCN#L?CXhw!`AILPU#D@taZTO;g?5y=B8pq(Br(x7SOpFzrjudnn%l5*xK z0fL0L7c(PRDgjxkI~BGmkVTV>?dB%{RyU^5|LEk(KsEEH1YBuk%RssSpr%_XT%Fyj z%#Af+ILHMQ2iNNNfoBpT$?J{r`7G?jvZL5RYnShw@%c!k;|fhNGz7Z?8#-3otYAYn zS18&|Ynd*IzuChu+6S9E%_9l4;_mVYbnT8z(;%%^O}`_~K#kFRW~OaydvmByU=^uZ z(0q)G5VpjZ2%U@YI&DRt$~X3ii~)`*MO}muI)%9Q#dE}{-wVZrpn3G*h^aXspqfc0aRl9QQ9wL%RBn7CkgOzp%5vt>05Q2gJ1hr$MrT7O zM_UJ*&F6H*MKaFic4=cOyn$clQaam%_Qh$QS@_q5RFCDJLr?2Jfcy%H{ znC3{sFG?WCVT{pdT>-~6C>g?n)dTc{%W*$!hiK(F9qGWd$cG{hi_6=J689c+aoJmb zpH@#Rn7$97i_iAofDGq%Xey;Qe9{LnN{v)GA7=GoZ7x#fZ30!AiJP~?i_eys(V7wV zj26*R9|Az|zBW6}fKNlSWSnyt>nhmZ`A9gE6NUFsyj$@GihaVEnH<0Qud$G!6x%zR zO}H_{mE?5`tSw<0O$rH&4KicyhUquTpT@hB)wfKXLs+U1x`VwT(isdd!pVqkW*Ykg zF2ajw>h_=afKh_jI}yuucb9~+3;GwqG;I5yJI5NHu~lnoch|0oR+^^F5@Jf`G!#jg z!m`;&!sgll&W`2D5@`KsKH&1BPYb|$-qrX{C22UF&7MUTbe5u*fjCKLCQDvi4#cn#IAANK$iSsP)CCg^I8CCW zJ??Wg;R^xkLMv^1G(Ht}cA8IL9T3m*Nx(&ts=@oIz}2yQ2&}|~({;pQ8@KAL1A9-C zEX>Vk4%~#*s#{wuO$v-s%W>n8?Bk6FK%X%aDIiXn2~rU8=i9keT7ciIsPfW)E;=k1 zx?gS7vEeMnzc;%(Cp&k!E|CxgNU(K<0V95KP;|chA3q^~wfpkBNWB>{;oKc{0tvXe z;}1_~pMNuQAxz9*lPqgY-;d-(mYHeUxiCZw$^BS<=BSnwhz>ySrFQ3}I@>v!eBFhirF-Onxa?6Sk4`=V-ei z&R&AcHn4DPZ}YhFXV=DpDqy>WU`WKf1C5Q?Ih!lN*-D`YRbY*0AMUTfI z^rANm(l2#gDS$f>K?zTk6J(=40MVJGHmOGD)ipnZ>X;q5kw33iJ~5R1as*D!vE^h0 zfsZzK_jkw{gpq|8(6bcTer`d#(tSj(KF(Z8UJEqZnB6o>L9)lw1tuW=YzHd;{xunxwG?W9C@3qw#~4wTu@T*?9=|J zg8qGNY^Whp6(0{U(}{Zw)%?)#q#y)^%zE z65$v$dEzjD93>QIM|y3dcNk?n7UH35h5T2dOwQaaVKy+==E0}WA^8ybr=LZP@c0Sy z7)-v|Oqw}bIFs~w8}goPZEqeRWm|BGDmkGz1hnr$)o=)NJ87R?5jgwySU(m{bsAhw z(c8>u_}Mp(mWU7Yqe&q^<}UFaFjzAYNjcMZ6k}(;<1E#+_$H~sfDG?R=#v&_5m@fT z=RoA_?8jU!G6wM}Y|NRyCTVUfm%?>1jZ$WF{Bw#|9x$}! z#px2Cu*1(*mc?d5&lc1xi(@W}tpj@IY4+O^Srh5nt{gIBVj>y$Pe)c56l|J-EAIK( zHWklH)b(gI&@L~=J~@v{y|QZt?kB>|yMg?+vAH31Z1Zy52T={LYz7e%Xm)rsA?-gd zwii%acZBYv&n6=@d_DBL{v>2o(JQp_w%CUDj*JGMgk{blKq{7%)T=&b6Nmc5)$Gg)$sKr93lmFUmHK-?xG%Cd3W zaixrV_wd=59QPnvHE zr7zko*gv%<$12K5E0+=XFy@|}sQs+A&fLODlyn#uQRPL6vty#iM0FA-vQyxy+uBtD z)u4|w!^l_)5HY6ML}Nf%bc9EtIIwMsev7eZRcmzVYUj);d$PwoZwTPCu-};k#`1hJ z8cn%QXE=07kM_7%?s()ZY!2pH?ZjnGd;$YZ-oi?%iFRW{-B+RU`s%)JTGOxDgmxR)>536`f|`_=bqg|KIPp-GjnL)uc7#{JpghTX4^_}{4P$9S5#c%- zPJL2k>*+GqGZw9MQMst*Vr%3c^x2pyRj3f?O^r0Zr)W*y$5mjB;YNe;OlSxRszX*j zqo7IX%C0=nS|nnuL;ZM6gMAmQOGkUWn#Djf(^V1h4@Z)ws}U!Hw?fk|YyXjkfrO+L z?L~Pil=SGJxp%nVJlNce+fX@9P=qIyi%!!Cj1|Kik;M{*yvDOI4vEa3BD66mAuJEd zLrUq3*(ZvFzlOsO8qityszESdVZ-_r56NGXUJ*$0i1Gc^pCA{!rk<0W$nHa!;Q<*C zB_Ub+edk*kjaI5FidzthJc?bEObw3luv^!--QH>O6s4jhy*w$fMnAmCWozo1LTWlcVOr%gv)hcV1bh5K{>2ELozQ+n8QPY(^#kZMAsV zCS!3#VnFf8n5PN6wChy7Nta7RpTcIbuC5Ov$AoQ#*#ek}7RH&1@%ZFDccWVRp;WEp zG-A*O5=Nd%dMZl6yRSJHaTup6b!{M$O?bSEucv-+8Vvd3L5BQp^&R6P9Xe4K2AzVi5St@h+m?GKge_m3)%{-7Rz!9Fbf z&G34@Qhylxt$?VatTzoNwUV*6-s^YBo;cy$26DX^zU+t1$YG!NAN zn}hvVhnpp3?r#&uEe5l>(%T89e%Oy>=x8i1HyugYI|yS@J=2_=s@S_bm4?ltmcF|&NqCRA3)wb85 z@4*lx?hk4Yc8DDUU_%|Pt=ZR)^~^`7+V%PB)^+O}H&tAjtBva#wa#_5&&2s6c2e2O zXw(eJb;=D`=hrjJ=Fb8TOp1)0TBEP%w+P-o+}PxScItB@T~W8-76ld!wK*J!|Jw{mES%og*|haoJ9@pPg#6#6_i<^$O_ z$c9sD{((O^%@sHBIrBk-t_?(NeI56yozdwo-}GExU)>4(u3BARk2ko!E?w&MaZHl{ zFT|o_Cm7KujOUNvZak=oyKy{NDU3;mR?^ee&~2~pZDgTM^C9N|fy#YcJge!$Fi6U{ z`#{N%Y-j~85wFf2I9$wxBQVh=*xx|>vN_3tqal!xL zu6vcOR&w<_7DP};G47>1m?{niD%LaYeyiDUA3s%Cz5jjj-$MBhZ}#nOevvkAkpHT+ zN8cypze?@<@9*WmZ}G!B#?e|CXTNkhoYDPaz=CVw?)@^J%bdVlseKy$6m}XU({ z%w$MRTKT)d#?Vkc) zislbfiqk#}Mze?xW|01%4^F?`Lso#R+Tr|6|LOh^-YycUVl+EF%U~|4)p($$TjHI` z2w08|H1+Ff(gO;}whX|ii=9wipKnSd3UW-*qXXnQ4KK)p5h{TPt?+clBgoK3%^t8l z9ikf~Sm?mMJ7$2{vuBUlyG04!wnpy*zVORu8naV6Cg{lhi1%unC6Q>>;`1$N8sE9XH}-$lR0&-y;F#YI(n4D8yjf2C@b%45_cTNJ>z zLuMwQ6$#=RwvU9;3q>mk3f9}>85mm$!7obAQ%MKP=i6TOun4S{3+~`UvPVo zZTsbmomb7Hlda63#HCu=w~V9SJspfoXRf|?Fz(jizsX!GrLdMs4@-T>05=<60BgI3 z1B6^BQ*~L-7$v3ygKSQHpd9GnI(wO`ld{HqWi^$4eW0$~>?ctGMLcy?T2O8_#Jf{y zBQ!$q>B+Juvk;5V(hF0WaIQ>5Wy16_s7t~k^ChxaT@H$+lwzoL=9=nYOIcQ>=Q`;v z^4CR$sSqia62(~81#549eA5Xvi#hT+c=Hshr>O1!Zfdz0g{H}NKkz4J>Wrs(>6v8Z zW#n+7FcUM`|7q?dVVl#P^waq*%t~Wjkq8u?8^6I}5 z$i^yH7NU#mPu3o*Jkpgf>iP|pGfkMQHbxSb)V{QCS()jTFRv8!@IcJzZ`S#6Re?kZ5m)x>W z;0iSTxN9$ppACj+*18c4v`TO4LVVPB?Zzy$J^8$TeBOjWC%*KAENgBUoa(mj+EV_r zryiK=&shOWI&cSoLnl59;N{zaVQ~jK;l<}bp?rJvUyA*v8=upOTTI1LKz!0nMDHfk zlU6qc@s}(!D05fDkGGS6_YG~Cf!($-zZKneOi<*mLmJ~j*nBI#$D<;T;&Y3qWt+tE zFtQf3iY;^gSlg{nXxn6Ya8m~+*n@cU@b~5J{>j1K^S#^lB)A->_GD?pP{=fVQ-i>R zp}B!S38T?)jt7(Opwz0@)5e`>E0)BwDsG)ltr{#74#wFySqZx;OBps)C8s-D2`smF>tDTh>z~lO=yaIu7jnR8Wc4S!@YKCG&-mB*-B$m5!cy2Lsey0O|2}$H zN$7tcJw*Ngz5e&>|Bzz*6WIUX5;^a$1KeK+xW5kYFL51UfZk`$9D{yQ6Ap+X)Ybec zPwbF~6PKOx+1ye&R6$7tbuyaL4cpe+IWy zQq#XcX}jbQ0cxrzDp_7fq6YOTb(r&jPb+R!{tJs|?zQ{>Oxk^ws*UkWmkp3?3yAA? zQ>1;qR^PP!O*Q$_YH`7czm@{Ob(u}*x?0eOudA$YU1bxh$;)Ev>Psr-5tzlxJLAsh zDdc4a^LNMD&)377w!fwBT^cRleZZfta!FV_qWGyd2*1M|G5r$MK`_=U5 zNr36ner{m{chsHZAiAhL|3-@Q7PNu;|ypKCce;4h~m$EHw4?k{d)iB|jzD8)6p zmo7JCg?|}!wyk%4LA@!8XiMu+#Z8@RR8-E?Rz;SWzNf3X0aUwT)DHT6pKcP(aeC$Q z+>|{~AXRrp1I;a>BK%)Q1%8wM_t{y{x{A(%i_b6sUatSG)hbVt_x~PNYY*@Bzu)4= zT9&Wz`v&@72RXll=2z#GkN5iFd;Rdee)ykFKg_oYULI^eKY6yb`Sh=cTbsW)R|)1m z7?m=4!Ei4OtUCl}2g>@BSHcL9-Wjj{Mq?j{6p^hD%w2rFA2Tm<{o+&*Ydq>-4M&3z zBU~UV&Cnx1da|U|(jEofZrBbn&ivQ!UBWZW3k;050qOj@1+?weSVK^=Wok{F)2QT^ zbuKU5AZd9uADYdnYQi^gE7Yt*oy0q=OFO=&(WC)Od6RmTPHfoX(gLd=D`Vr<*0! zn#LPghk1kQR#nAm48f4_o`XN_KW5md)`#n{9@K~AI%Wj{B15fbR$}OipFI2ka9V*D z8t`Aig~@O^OgI;IdP-mh0)*sO{xCDk4eLK2f@*@7ycmV?%axn}_4aj&F z7O?_@!z#<4FB|<)u~Mq#F<)I>uAhYk=e5wtQpsEyJ(43?kZQ3`o*mL*t&mSDD2}ML zzTRDkP+BV>1ltg~XQLtTjUfgZrtXdVBO91Kh6Z-zKw%+QlY21_0<&RI-&P%(&-JOd z(9`}1K6&tP7Q(~v>Vg$-fmKHk)g8b*u9PYWZeCpofxk_m8UvhVYl{USjaFH5*cMOX zhEMtSJ)zY^c1fuzWtPw_| z5B2vPu7J>e#~-$@N0G&C0{IOSL%B7Q_eYHryTZ@H#)&2$iYZIu#33-EkVBPSmg6n0 zt6EM4m(XglYBq7%-X;NUa5C}nZ$KEs(_yw!^3p!PrNm+t{ijt+^%&mYcAN{4{C6I{ z<^#?fmHPQQ*L}|WKX(`X%Vkz{wjR9oBwLTOX~KThknK6j?w;&#=d(Saz>jlf-JZ#K z*?{zX_9`!=rvQHo@Yl`LC0Bk-lVR6sM1^kNRIkqS^DQXM`Z2R26LyYMg~!pgQl%UN zS)6LwDf|z=-HS%U!@+1YJ!6Heo)zxXZX;F?C;I*InGa22hsxRALM<2n-XHjv*c07% z%Ad#+%?59xcdAj*$tk<+o9u3G$!CB0=Tqmi<@_qZ`89SmyY~iIFWu899A>k_LN&)g z_p#?Vl3cEAz9+u`_+=o4hQiA7Z-@;PsK@YCk62p6!zoV6)ia51hQOeP*v*i;>AwxJ zn{VJxnad=1=2KvUX`NP*Lmog~1FUPnb)CK)U``joLx2i4R~+aDvBJSIj0s!L`)>z@ z@R(37-s++@1Rn>-`3BaLz(R*HlATu8NbVE3i8#6*4eN>bK{8>SAR~GqXVqnq)~GDZ za#r4oyi8LH9aS3DMD^9iRccn4wYtRvgg;bn1ep9$#rX! z&CPk(sKJEt%9R;~fj4UTtNhWhNI+Skl0$lc3K1+T-1gB>cPuW5JqS8a z`#?^U+2ZN3+Vw_;4%kNh2DV<@I6VxfL6o&bfyaAP$+CRkGm(rc#6fDT7Pz*0!gUPi#~1!`9nFf<-K=E7lYX-I|! z?OL+l;;U+)a};H9CbPxl^rMQoqNiv<=DCGJILvcRZa;Z7mL97%Qc!?x0iJ;8IjY8V zAKBP?&E+X$+A6g>O$~M$UV^gOB=iXYH|l6+<*LCw%wH9_gL(W&pYpVBOO^ap5z_{g zsFe>!dyrT7aZDePgkH4)T;ma~y$E&MK8u^9aTr9I6`8l_eRzFMa5WSGM_{Oeyo~8> zOm9+z7?e4pP6xTS7Q}^Zw(_|7@+TB(--yl(71B(6HPu8Ki}8(l3`|fy^2*FJ=Usf1&#e}G zMvPGK*+I_)2Z0=Q7HK5IJMsY#fy# z<0w2!(Y8z!P&A50?U15S$78v3@XajltE<}jX*EZU0NVC5VuoZ4qqNE2Nu?D`l1Iu#HR{eFKY0j|wR!YI=Po{&xaaJVA?8MSy0@}@Qmvr)EzS?Gjxvc1Mm z{{fvZ0vZd??r=mg4X6ubZ)RszK1F9f+vth?6S6QFOwz+a%c2%6m*F%d)1%2$N78k> zUV`?wHr3&)7e~J~4>r~Iq1r!q^~?72&F5;hc?j>To-zB=CV%SH3o-%xd3*1Ps;5wH@70mo+1}kgf=Z8GDQr-x+TJ|G$`XcwUgjpV zKxqCsRJ*sm_u>Fr+1%aSJ1Rk|kft_&!HCF*Tg{yvY%0@y1HC`M{;6lL_WydY{c`I_ zZN1ugz6ps>H=$?Er#qY66b$OwPIG(LQ_q{b&6k^0>J`*-kil%+JN4_Dh}-U+G`R6#ifGchmk)lII}`q1r6v?jd1s*#93ss?`$v z|KsYz>iz!zEq){qeH9DcZ(#qQikvs!1ZZB*B47R0ANZ%U_F1Ub{VRU{cff^ici@Np zdwc(Td;fcT|9=L1f9(%OTkp@?<%=_kd1A4)Cc1dZamjHMxS(2hQ;Q%G-<*0a;$3=aWP&Nrx!Y=+p)Ugi`T z?qWwFVfEIXl3tKW;sLmN9KIFx_2*p#2Z|K0f_=neee zBMkPK;Qt`?p8xw6zyE6d-z_eJ-i`gccN}=nDBd%Q{{>JtlmAG7{Ru@oW>Yr!|ok}dc=g`zN_ZO*_4>Y7#67koUl zS%>%eUNUp2O@&-T`P>@!Dm%=1rVsSeAK{%1csL8Qm{EP;s~@RWv-n|&fsXmSn8&Rf z&wganuY-rF2hc0{>k01%aPV5AQ6FH9L?|HZ1>tYWSc!6%$W`Ad;C)tDL<>J{xipv2 zgFKR(Ip{?hjnfu}*3J1O{V6WhC`%LNc=gspQ8}A+d;i6z%04iBdga)A!2CHr;lxV? zFx1K#Sc{J&q7c=Sa z+{w)SIrJC&-NgTYi7@at@c-XG`aT)|vHE?ja?k&Ni(m1}|1z2V=F^fo8jX5RWXbO$ z69W>M;?Y3845wSO79B`tiSG@GQ|gD)X&)%LD0WI2eftiW#vyRhBo<&H&4=JKI{O^( zBghkYejF*}hsH2r!Q_HmA?RS^8DXsgT|;aGa$IRCN%GapV1l8md-w8-F zz;<>;i;&D9=9*^MxCPQMF1AD?QJSkUWmcM#`7PikoQt7uP<~y?e1H2|l z#o!@}z_#uua+&!3YR#Vx0oIez2v{MEckipy5IOf~4E)T5svDld1W*+DL?1lhhLPDn zJwv8)JOP9V(e(wOLU1|7-JyR4%+bV0F%LL$V1V(YJR|(FhZ*0^kX6R}#~}=aj?G~^ zL>z&drdUCHG(ncsQ$gvpG`BVlYEfJa2!wyYCrEK}=Ok9_o0;&#$`rNbN?j`uhCqHEv^xF?@>}wF2FR>T7+o)u zc~1+Ng)b2@ppz}vDjb=#NQo}^^6%NJ@-;B(H0};DpPB5?>*RO^VomYYr{3Cbcy2$A zJwRCOwiDJjJPoI$h_GoQ@%hHPGRo0t1O&^}r-^xwB5|e_nDJLtO+AMHRZ6J-Q%@eL zM-?0@(HvYi(Rc*RF-->=!A@rFDccYk@)MsKS##058B%?sHyT_qNT}>%@38|30RZ>I zd-CX*H-|@+)R9swW5nRO@gf$EeZaebEHbY$h#l9w@>*!B837Z05qR_V{MdCnbw!*z zx@wLneU;(0%mRU?K#NObVdzY=JLTcUNirmf?t&wcFDHWnC$mw^H8Ku(#TT@3s|01k~ z(F4DOggBlNG51;h0Bg1nQ?ttBEp`0qDlSH-zQaaQR9OZD8kjuOb}B%T(JfJ`k~Zl_)tyU9_O0dpuC?;=3r*MlH^S`X)3GNk4aS z;A3GLYw!*UF|4tHe+p6&kTe2)#_yc_ZL~Re@{Y!UXw#5XGkB4n5p%*ojYb0m)J5P= z*@!Q5HhLdi;FWlCA|U(wIO*BK#U9=_FBxCB9~ymNB8FsRXZB414KpA95Vy(fRWQv# zEyE5@m|IaPlbop~_=K~TP6VP1&fSnU8NeU2P}^u6KZF$mC>53h+2n;DKyBm#0&Va! z&e#d;5znpQNhiQ>Lke$TbIRhU-G4|DvTk?D7(%HTssY)9u6QuN%DhAdHGIITB2bt! zxb%qCh-R%R))sV_pXiKW3yP+hOqI}`PeO_C0{slwOs@kpI3Nqmu8%q-qqmD~CmDA; zDTx+$;9nubjSGDeUSMU&r0DteNKK2QMbyez^5-QuC15NDM9W*iruUFLMIzNlb4E*i zxL274^c`CO{0xwQwF!|KfjrHq&S29R0@IM8HX`2Urn7O_K_-#;gBY6P^%($Stwd}~ zqQ_l7LK0eJ(li~7RD^ppv09Vake~^@de`oZ+EB^El4^E3F{IY*z}H9omG~&;wIYI= zEcI|04n(;E4Vr;4>f#+A$-xW@5Ep4K-5ro<}uThXYGXA7K-_M;JL`cd5z zG2=uAv#fphvDEaejTn9^cMrKwfZ6R}GSr-Tao7VzH5Sh-T?|N&OrQhd~Fp-m~d+9Ichh&H>T@1USEq z`OBK4^Hi-;dAtfE-R72wUnSGz$q1NVKuO|m7*$MXXiJkG1$Y#sBJ!UH%dCX0{QD`L z4?&~73T8X@CaR9s|3!3$z~> z5Pk)$IALA#Gki5NQ+Sg?K{>)a6KWz0A^IkwZ8?lAQ!NxlFGYR9*wO-v~S|;!py(})1qf%ZB0LEtC~JKuJ^Q^ z%xpZyH}<-(;gC}qzSJJHfk86KsA$*T(bv|}Vlq)sB|4??$IunLD@V=MDFwZ&H@HPk zcprAAWWj@9ic*G(8)q2n>#NP!p5LMa5@L(qk`FX2BUU_A5&BsB+isKx3@02Qs2 zhPZ}yP5Nun@da?-qc;1YQN%9NLx&&8UJ1H$K)i}hf|+=Q{H7pViZl_8Q8OtLJ4jZ< z_YFb*o%P2V=Nt+>U-ZSVl_i*PiHU|LAl)pY^$-?6QC!$~xi&0Z(| zE)e}FctrGT#!~(;cu&xhyeRt#2>V7CR@7n@u%p6?Mu<_=O*CADI`j&DYe87B&?{8w z8u~;qdO-oo!%Bff9$ORTxxBnxs}Cvn;Ff)?9n(U?dHG{(V(|r_m3`>sOpt^5CD0k#)Mv_X%&(P9+~6c#iW|C z`*Wx(ckrb!>Tv~3oJ?K*Jv)X8D31a3Ic+a%yQ_J9@s7!>kXK|pC=FKPsmu!e5+A7I z57ziKQKha<|K*CP6piCHU0O^i(z`@WEb68-mrUAhf{#aeu>$|3kK;&y%5#9kuyi-)=SJ<4s(M3nWZO^K>}^NU7&&Ff zclZWj9*uGXz=WGV)QP6e_z1O`YMAyOIJ*+D$SWJSZS!>n06)g=rmbU6>zo0vJ5}p4QKl z$Sbo?nAZ+Tb6a*~#E1pxpm3-P@U4JUmlh#eq`aH~_rMlC(R2)nS%euw4}=YjJ}NzM z45rW>dby0~Jtkq44KwrP63ra>iw%gLT_f2_hN#zsCH$@5z`s-;h z-JDFag8^9~Vdh+NVLy{I)sFI~2sH#Uf-%&fkLspc_)L#UzMh!$b{cbT&O?kY%X3cu zjB1kBeD;s{oXu&7yS+Sc^@?2|Fs~z!{m2$sTqGtyRJFlX#^nhShL$SP#zIOoRPn=d zM zu-7%hZSh5w%-WHxW6sL9f}yS$5Nkh}K$!tB)JsnOU{~k{Szv3&o)RO&jP=hxnX+30 zzH3FoAwmIi5Y{+^!nuT%I-r9<9LebdlN*MGI2tc8rV_EWNW8Tp)#=XvY7XRyy5ey* z9pE7QBBEx-ozgF{5{t{0;`aSo{GHZf=cctdbe7DozBmZeC*7JyU4q#>oXPwuRC;N; zt61w2?z}j&y2H6@zx+8XH5;obOI78TEnObq!Ue1rE>#)r*IKUI%wnnwzdvJp2tH=- z)A;E`&X>dbC@I{>*;5@CN*!WQrYp_7s#E+NL~?%G=j#z<0)-3*Nn1k zsRgz4WFrtTQvlFgexpm#eB|>*JF>gNt(cRF*|E(*qOIhEXaIP zCqrUp;Lp0`k6|u*WJCv%Ra9AxqAae}Bh}zX zt_U&()PRZjM6a9~Ivd)~;&ohfa!EQ8&2vE(>g%yqn$-1>*_)*qczl=@8fN2m&|OWw_b;F|2}@WdGvIxtJ*=d{~h~aw}1uS^t6^1#)7Aecp zJ7W0>1kem%X`$*sREe;Ikf}Yci%83e1li$lF^Nz_ARK%Gyk{s=$xup$SQ7shS%XO* zAus#Q=V$uFLpvn##658rgBi1s=e2Vr(4>viU0d%wbvf|V75sNOgun3L0xe}%Vik0O z{-3eL&BD;R7CoS+&Vn`*V6Yr}6+?0AuU2|t`Kq1`!rMKdDe5|yjI02y8V#(aSj|ap zgBVa8cMV#p{C$l%9!v^*an*Klz z6M3_bdf`~?khS#`DQ*1;x=mz37#k~^Xa>@fe!ariEAz^82}#^t5uII`Pm1V~4Pj(M zXJp*-EA06d@n$Q!`4!XhifMEuwLRBEBR!Dr11sHVaILg;?9wV=g$J(#wxo$| zk9L&t8N?kO@h#FfSfdaPdYozV>$jN#am}oE4S+arwfkhU$vnBp-;AkOobgI$`~_JA zHAy;Qkg496{xyKGMVu%7uG)6}0DI@qJIqc3s<>iJ{S!aEPYnUXJNYQsV zrJRzJZ6QxPChvLDUgRjL>X0qj!;GU+e3weJh6$pD0Kf^8vo75kJA{VJXBnQ+X|=0Iw{jEGmbEf!sBXSXxR!|>3o$->Rd5$RLMyF$A- zs_JON&a&MxVkdTz^H%0(tuI0CU<|Ud8J#8@ooPS_IXcT z$M^Se9N)ElKfXifo6Y1lo`spq<>}vq4EglwoDfM{{%z(TPafJ#1K9C85wlUZOUIcbRRxM0h+AvzvvUF*DG|EqB)nv>8zz8>Az9m?WFu(Dg_v+c zRvk`?@{)UoBI+56NV^qZab~)A9h-T*7|Cyavsnnmojrl$oWXHUoO$eQL*Y~u3h@0} zZw1%5CNE=@KfS3$)vB|g=He9qZ$b6=@6)HUT~Ia}_W`-3z;P>g+*W?KND6aB7UjWc z7SUxBe`060B*c{_wAu8aEl!#U^3)N4b0%G$*+C<5C z?ue4q4*M{}<=1#Af$b|}L5QI!UH)UEOTkKp0QWs(mV#ps!iSjb58bZC#M(p#{HkM? zrcnTlCOy^i+daG=hQ_>nPmFE0$(bp6*Fb0zqrfL&!(*PTEto9S8E|wnV=C?r%3YVk zBFQcryh4k`9M^dXvkwnIv!r_Dnz6wSeJtok3!US-+p1{WoW_{Xk2P87NTZ1U+-66& z*@?G_AYoMp@Q3BAjyZ9DHX4l4T9_CC#>~?N>PX(+5&mgYC2W>UFzGm$IIj471)l2i zF78kUcny@}>IpWeZe0+)OB*+^QpkCj)BskeyGc|u-sgoCEsB$Niq{k9jg~fcouw-D zRbUfXxR~Q0bE0hg`eY4LMzJc zCQlnZ{znfz{AXy0P3LkPUO*Dk8J!4OdSm>rivQ4?DKNyUhWL3nh|BzVC-R+#yXJ5* zpssneDuJ|z7wG*(f`rVm8@T_|)~?pc19XN? z+l#y+9K{fj1*`)}!(VuS><{{@bGF*#CXQUS---|~ znr}mcHFPQ~m8~lnkEz%QnT0}l?rz42+&Wi`v+SoIq||J4SSY!HjKD;ykWVyoJt@?t(jsiS^8i>7t38>IPdh(?Ml!Jr|Jc z))P_7C8u7x1e-|Dws9^%>6wx7n}!vYiG7nK$Ph!W-Z)WHPk`HVfJs+W$L-PF0An^^%J z3TQF|I?PqY!4`*jh6&;+JFq|+*YLSP8X%A37zzuuqRygbVCIsjaPb__IENgVSsu#N zKH4sq%b_|&*9uvvC+C$)C67b$5U)GH;}x^ZrT{F7!U=F`>|OlqU^?n6x|L9?B-9yrQkbHoASGKy zP8%l?n?$h>5F<8egDS=cJ_~N>HGuWXQMW6wNM07LHX2&65~fhjzoF1%3O$-;u@(;| z*JGP%zV>3Bc`*;53yC5yZNqs1Qu1sPhz|mG?Sbq+8hfEBTA?ZyrD#s->`ATxfnHcN zG9m;;c`UmJ_XYUc# z84w-}%v9~JNL2yHE4etxta9&UyX zJ>O49!|x(B2&QMFPTYG8ueX$8%GyXBSm+G+vdVV(E-|mU-~F~&Q;`5(k3+*#9e^JE zf#w*~ro%=|nV!#HRtuLkYoDWW{y%n`f2liMgvJ#@HmJr^++*#wu%k23(9W-kbvytb<&};9b+C8x?jON%l?XdL{gT!Hl-J5SPGCQogZDHyws%tJKEpPtL!jWs5V|@JGlh2pyw?%o?@c2$1wvmaUeln zNX?$Gc+pX4xK=cpw8SYh)Z?Belos?z>kFee4$1t}ET7XXpS!bc^6R}k&nX2vKs})! zZ&M%5`5znSIbCzvR-HhQk8X$aHtfVlr38GFfM@D#)bB(wIA&Of;b`LGz_1LKMoT*> zyCYGa((wj)H8C4C+9u*jjWL@PzlQ{iN;x`|s^oDxjtD>94IGJgi2>ak>J z-K5`n3}mm;cP4ev5`uwjZQ3SK4p8idf4SfyrLJ_Ft&*43T`g70pIkqB176-`IwiSt zhZooDb~nB5#BMw9ty+zu*mY3HU!Q9Y8~=)~^BiNhM4S^-1I9aXN-PU}5`$q%_>%-; zm_R7eXT(AIui)F}{&(~5cK(0v9P%ahTkij-T76udi~n1Dd>{Y!oBZzP|L5$#y#F7L z62ZY{H=mz0cV3IUx05XsB_j2s6NwC{D&!PjJvMoMZR!2U^&zw@-wD%vs#$ji zg>^;hTVUvxO2zPI6c%xnvaU-mphI=U9L`n7QE0-Le?b^yKBMw)BL6hd4qqb|NeFSW zj*D4m)AOt&BHV%WRbCypAnsDZ>v&gWrVtNokVUZ=c{nGMK`e5@;taUq+prT6enBK) z3^Km8VLKjA!VBmLM)klj*DBvemz|o6KAN+UvAz)FigPy3%8UEVyzvwhli}TJt#r-~ zVUjoQRCd@P!Hh$CsE(YsNbzMZN!-f}#L0MaFT`{1#E41X;%*pu0sW@DXHTQP>y7~` zN%)Y^mMvrfmG+J7TSRK?`==c28*vYD6r@rzXJ!|?*6&Z;*^fYk-fveI>uzjSbX!oW zu{R8LUBCE~5ItxM`HCt2(m+kq)2Vv29^-nI%S*t70ip%p{&ux^Xc_E@(X(f7-n$s$ zy84V}gMoJ1=-AxFj791h*NX{v#yImp%bhs($;!9pq%gYr04BojFj$PMWQ|;i~xbw~^>z+&(yYVKTa}iHM#w7hiWD-`D*7R2j#GEZi?wfB7 zN#XR{LUk4If8u79E}MFz$|%!rdnCxP=A%Gec+o+EgA8{(K&Vs2;^g$C)wSiFG^oLO zOQ|EJcQ>3ABzk;@6N1zpQ~A61_!hc;r=xe#@h{Qt&kaE8UC`k>)trVVpP>JyT0U0; zPL*gKxU7vOy6?P7)qT&en_C)fx-&?_zYPLVcLe{Bxsb%auk#fx$wL2Bpj4|JI zG5@Li@)vExXm-9^y5SSZ-`4$JDh}P=7Tg$ub1muC;4v zvf>Bvnjdi0N=C-!B322gHDF3CO6(WWe(7_GySp?q8m>=pWwwChoO5TU*-b7r=hG~& zT%Trn+0Md7+6nYb=V#xa*9F|z&TT%?$8Y=25W zzAib_&af8f@^8@zY`?t!B|EV|_y0NjX`+R=mVUnCq)(JPH;{T7V1?o5v%wgnQ6l=- zz%X1VZHcsJXBLO8MCHIo=r@NQ6waO%Hm5Ux(#d50Nj2s->3{#*s6U#5RDbaqKH!(@ zf1gwz&c*+J`0(N5d;RaX_?7djm@lf`Q72ea&+0mSUa<|NV=!D(wOSox?DS2-_dimm zao7)v5(jHdt)8~qsyI^D8e8mB2|gh0R>g79!C(;Z-XEyq^P|nfqt!aKj`4X#2l>n9 z!Qu9+Jsto1C>#Vun4!UVO|4@X?_%X~vHF9mK3RKMTdVv4-0KMSta;Qg00DUhA_$PY zA;=y?8iaz*MXyT>A4M!ZUh~geIz$S9aTLPNsn*qRm%qWkRUy1)X?#a&=FucT`wG;C zs-uw_wJ_u!#_J$Wkq(?eHByu>(cmS*=4I)O9WNk-C@*1UhCyfhN0vjl3(-uI{{EGU z#}11Hs5%;n+C5LM0(5K<>jKeeEJM*}(Ip}iDG_RgUSc<(_Y@BqtJ>Z>+C126?wsr& zY(CqjX+v6S-{dYa@#}iG#faj}h-i;eRF@Mbu9Q}>J}C%?0s?(bU}gbparZ*ci5SIJ zqf;K5t+W+%F)B+dz{;3M=Pa~?<~gp^q@?z7@&+)CR~&d8AstK{Shpz%ODm9Bhe~*U zFjgJzD|pvx5Mr>OvET0brxGNkg%^6!zc(AlpyB|TaV*IyE%<_$g(==%E8u!ghan6R z@2#RHp7+0abQW^*=|m_x(gBJ9v(Oc$gr4%TKCAK6QaRM>+o2VqYXNfe0VzT!gOYLs zC8)Vb2`VKpg0kTgY^?3(U$VZ}%BgJD&yh2Qocc-O^BO+qY{u<9C*%5jM(5Nfi8jue z%^XHC`sqNzwy&#p%R5^&75ukXJFkwK2b;f~=&CY*no+Cv%k$<@Go=6~tMIyG3e0!U zCZ!HyLD!P)o1O_hRL#A^ZH0H7J9y_AmKfk0JHrDU?+q!W5vtWfTwd<^7kaO<6M(L) zvf25L<;#_@uy2CrHbp)Yv-WIpZ=s;*|r_x z;nBhN-plP5f6d} zHc&4oz|DVf5?I8+>oO4@SGR@V*E%DV_NUl~EG$=*zqaPI9lZQ>t%@7)1Up&Byy3ab z)i}`a_^gjq&ON*X+`%hH9r9ic|LN~3^r)%pXnHE|>lYti#^!B|uLy%Woy)@mn!V50 zmd~jH>G87AzPag}f|ri@x8vY&)WJo7#>9LB#Ajzn zKjGY`ceq3MU7F}V2D*c@25n+*@B?=zJ{n_IXv^mt-1{>AmNv9&D+_uJ#S2b&H8Y{8`47WQ26X5 zqd*#Yj!+nowjR*PE;HzruMK%w*Z(#=#!Kn#GiG4M1ZY@g9avA6W!;N>F9*InS#2wPAC=uUXe(v$@k1>TEaZ5scq^di+BC;d7QqglwDR? zT+y}M&1*|s9yYCguDvPWiJ8NW*|AYWjT3i~$OabH$%fK4#$#TCV|@<^VVoZuOH^tT z?a=8%+_(>2qs9@$Pl?MaYBnON2Nj7htH#;Ozt?i*cl1Z`(yZi!!#${SHp%q%QgOax z$ym)X+F@8-?KY}oC_dJJ8xja{Vo6j|zoKvejCa7ORY4a&AT?#*p<57q80Rpl%1R>Y z3}Gi`%#dd-%2L#Y;(4nj^-2z)V7{^3JCYTE|BY=a9OqQ3dxsrHmhWj|}qn;rc z69Z)lWhcB~3GaQt*D7&J_z(?e%%OXB0u^C?P8A5J;mL-e&Q5Yl<8X|C09Xl~$LriLW+kpo!) zz0ROXXM9#@7I{i!LM2-n>&xoGW@g0LQNm73&vmTv;1cGkf2GGkJ4iDphFf}QocuJ$ zWT@@qBxo8)gYAUT`AA!pL=>7DO>fAEl31@Rgbzmv4gejfJ9=tT=Fiw;Qh;W?;@nBs zv5hHg9xOzc&Lvhog6@4Fp~soB(MC0m2~l0N1464CpOMC&O-E3{XsC%sp-8`ia(;JB z!+L(>q~F}{CJizH_~GA;rTMrXB(+lJsV-CHv{)I)hkw>h<($vdv!p-krt%y6^M&BE zoZUkLgQVr8naXc53&(YSmG8)t&_8%8b(?-Z{m1ENLU`3()WYi`ZWpL>OxCp0CA_?| zqNZkwJ0=6kz9vVy>}%u_bX{LVw&`6$_NC86{T4CV#Ka1ttt-2k(Ux1tDZ)dO!W?us zKGD0y`9Mk|?^~UBY$bfN6o6XH&e($jI7ko&%sk%ETfE1nMO$E@#o4HXWd=&rcXSDc zBQSj<$7sn?&6`Sf(NqPk6Y1fdewgGE;^U_;SZezQWFFs?;oVIcCSqJMmcQ>7%67;m zpviU1Xl3~HIj=c_%Q>E)&KM7Pi+0xXh$xJtOi_)*_mi~#4kUF_My`R>)ftm3;kO<3 z*g9d+Z!Ermj4)Rfccq=N+?<>kaSnw!Y9J5QoYPg`KZRXX$v)qF`sO8~`2e7g=|IKKd*Wae&x6l%f=g-wgr6;FWw-21sM>pzsuE+(;2yYo5Aa3G&?;T%~%|2zT9P9QhPRu zNR5ngZQ4==Z}h zPynuM8k|nh6~>F~%CQF*5qju{#(QPlFLWO`t8563r#RNT1#Gg2^y2M*prl%Q1DpPygY!N3GBw z8ju?3lPWD55==eS)(<4_@(YXB#==4MRF;))6(A?5^{--nyxPK=Za<1mo|!qAxCpBg zl>*9`kiu;sYR`-og6;EJ?3}L#C*759t(0sOc@CDPU#}!stL%inQ-faRS&7MNhN6kELyJ zu(q~zD;-eK!%hHDVWNNvi?#_B`W}P2w^TdODo-8Ag7p=o@fl5e5uJSp7)vDG+yob8 z*bRfuO=||0f7%|-`}e}lx3&DU1hZS*>KKV|5hRuc$BAvAy`H)Zja%my?m^?0Ic;#u zE10N;!pIxhh|a0d`*~h%0Ybpi&gg_7LS4AUx9Wv_g3R%F<$;1;v8?i>2j){?GP! zKjK(LZ;*=PLp~Y`B9cmgFbYwwsCs#v@)?IJCg<^4a-fZYc3M$(hO6gi*be@z9@UX? z;ye`(&pc|LddLe1Q~%zeKN}k;Mk(T@vzYJz^$5BM{J&z0;Q3V%8Z_C~ar2_hT0VP# zD*RtnxV1L-G0h|;LZAG3cXT-JJpN1L%aheZeDL{X9)L4sdLhq97k*AGBH0pMD|J~2 zH<)G8p%^nf&{ceaHKHSs+AvT$I^JyzHR&W9Hc3G1l9H|E5Umthk5w^UGiBm^a#$F& zIiOo!VrjsKgi=v8-EiHO4u_(j)-~_~GiI|V9e?WQtr>0BE+ZMHr!RfuA8dA^@YeKj zH)xN=A$^<*zw52kIt~=A)L!V_}Jt}$R6*l+buHD%hP&T*thd|-+-B*J`oQi^$(e;AxPnO;>VO- zQj~0+6AoBgfB_9O?x=w^5TWHI*pnTw!nr|Fj|{5v1$9AIt!&{SN7oqIG<_q$y2uEM zG8-UVFCRQ*MlDVoS5zd?{*bvVs%c*C1yYD+>Ud5uhWWX-8ug2`pAAs8f!I`5?fR|z zi!=0PIy{c|Sb90u-Rz`vb1ePbC9^L^?KYXT$@5rRYf=WDxSv!Ji?3nMypt1)K__gI z4iRh7#Azju=oD*;Q#6}x3zdOIH_N{%3q{HmY(DHv)8u~=KRnO|S!e;(%t<>4(b z+)l__!r2&!)Jj{*VFFDkYOZKu!vk-a6T*hXB|go@d!EL5nH~qX^Uh07Zvssr4i_3;e%)@! zLJ;cCMmoKa0+Hb!hV^-dqHZ{ghHyo(BFqoeliWd^nW!h;O;t37DYnae={)6JxRcXL zdolf^ize`JH9eynB<|f9y7_booHb$r+J}(k2E+ZD7`Ozln&jo;pLr=?+j()wxt)RQ zI1NhXd9FicUME?>WKM?#xi);DtTK}(?4^0OUz|B~^3yarOr4|}6zMp9qvLORa#H8# zc*s#UuIDB>fdv_pDGnM7chhxqRGwz$?NFU-gSbR@q;@76a}dRJbA&7#x!bAn@?LfB z+N(~ySK~$98qfD@9P5}il3(06|E_)W<9#D5?gibGZ;2k#Ee5BP+8SbUA6s|rV=LB2 zvLsFKqI`TmHLQYv?m6J6yV%9`k;pA=P=5K+c? zxfID$hW8E5K_3LeQ&Pw2$ug<)Uj&%*AM_aFUswq_rwYJJhyGxp9$=aDziSi#R>p_Y z(YvJV{$=I%%DvG3k0!J?7fk_ga}}%BZ|dv>SZ!s|J-NM7;Q2ho;D zTN;ZlF&r-W(o;!JdO~E_9R3p3u>}r{#l%GO>5ic`^AQIyJLW6iE0$PR$q6=j4D~A4 zc(#QvI3U$-EuS3}s^|qHRsiXvz8ig+$Gf!VGhzYW4_e74KOhxgWw0AOJJI3ttvZYz z)zZ4VurCY7!E}7lNNk5K)*k31iA1j|0Z-kJy_}nUIwGgX}~`&fd4XeEQ8s zEBfr5^v2eojf&2+f1T~U7htYb*g1QzEGb`M$2;bRTI-6>@!yAd!>!$(q2pF< zt~nV59dt;9R#>NwRj2seLEF+7Fpt}to0|zUy@)j9eDQHuL^~YVAjq=^@R(hUA4A5Q z)GB#eYL6z9fbQ)9*xCW!_N|sHXa`a)!@0>nYLmQ$6k_NQh$EuM=9A$aiWm#2oc5&(a+I?NgGa^)?g$;t z0Q62LKCIX#(n$8aLdNTadSY~K10GPn`G{vKZXnoMSp0G(^{X59`Rb+`0~o2o>}&{W zk=oned3EI2!R7R&O#|^_cgTlUSQeKiHHwvq@lUBH7gS~S_eZ0i>SLf2HYO^~)d=e) zUL~z|T13B5?L{g2n~K_gpAG7&Ws=3Laj7&nirTOg5gqkbUT`+Uf~u)$2usu4Wi3>R z1{SNvu!(#QjPO3QwQOqe=+KvjrJ&879R$~5%x(}#pdFgt-yB)Bs24n+#?i%hXWZH= zdpn3-x3%`Z1QBCeX!hTEmps}kdTwLR&hHL6ak*gNOP&KXh__u*PIQ}OcG?e^zR{`! zh>>$#e{zay&oh6L&*RGluS~1+#7LzuA3J6DoS_iJ4RMwCWssO3gbF zF2m3wK+tkj{q`FH2N<i#N)bXik>jtuq<)msG5F*YNk-=p+ z&2Ii>`{?9Fb9?8_!KUHoBACcw7aP8KTRnza|9lE%A3%+4w#bpf&25g*HpWJePVpKQ z?_lvE7d{MNyq#ESDQ5-?q!mW!R`bwSLq>C?MufFHiC08;U{9IpbsfkwK#ra_(D};( z$l*OB%A~JxR8-a;tsi7*U$5jmfFoNItp08Kw`nyc)Aq1S{t{~{yG82%+^u1cG@dZ6 zVK$i>qs~wID8UuZ2+M~O@_x;+JDktNi-ZMEeT~WX95;DH+7ml90}v-0{*^w9Gzk&U`F8z(B>^N#V^) z5yN8njC24vv8pXnS7G{Bk+`P!KukO4(wnIo4s_n=2pMjVU=dqJ#$Jc>-OFx2oZ`yx zze}X)3vhHAkkS|AbiBo1(?z9DCnmQJlO5ClTi6???BbT4OzC}CU)F~taORutECh)w z>%|u@qWXSvb9N23p&hlSl0C!60;5;b@8Q~GSV3o1RSi)W#gItwI|%?%CNskwoO_?FT&mc7iKwYzm}nA$HE zacJNw9iu#Ik2BD>jhPHN8@0aU3CSI64V1VufCUMVVJYD?C>F$U-Yn=Y1icq zhn`Y}{+8CRX8F>}>2Im!cnZ==(U*9i%|&1%YY?Ahr^w-g;nlFg`kDD)%vj>tY-r9q zcGim-n%OYLM7H7_jHFucc=z$TI8|7F;cl9d0m@xjf zj5b2GMYGu}x*Bwex3WudbwVzO;(dGv&zY9Yq?wu)sYO%A39Vk`^hGN*-;Qafy||gU z>gh9^F{qZ-kfX@?tY#4ZG^U}hp`sop4E)mWY)5&Wan2q=^sY%X=`xP3($Q2@wGwEeOnXTgnw9wy(&@$IM~7FR zA4|90A4P#%J#Q&)vQ~d2Kj>jEqkB$vX6cia=#@Ud_vyH*`!7tuQxpIu0p7I4=wBnj zHQv26k|Y=~UCXmu*V0~A3vULuttn+Z8_^}w#%YPhfG(bH%;)0C)_BgQ zGqgpwNkZ}An`h594-eU-&UP11L9w^LcleFp{dfELFP|O*e0ltr%Hvw~NizOR^~rtw zmv8d>r1&oqvqjV0lauziKa23cTRC4(I`h}s9kqr2N+$83FV#oISI>@$wMylYs{FWC zd$RTzw%dyGJH3D3-99{g_G<6wpn34ujf`dG(}Fd~C$s5WKlVl;6psjz5}RSZa3ab> zT*+$&${NNCzZ1s636T|&-xVY!qi&p<~yCz^3GSG^HuJ=sJE`n7{t|TmaHLi-((bNBfz1V zR-v6_qbPl6L99B8{S3+S{Ns$w7d zuAro0pQ2}=S%~+WbOD_f`YY#-SFxu$wU(OtP^XgTYu$4llnCY%K;^6-3FIS3&f4qp zcVas%-!t+qYIW)C&IEJ^ zVfU>;@ng+Z=l!u~qb+bK->fO@#D7QcCu6Cg<5bY}R&4pWYTLGfNb58cBZSaQ^0-h3 zb16qHzn>$5bZ|FOT zTwL4gIu&57aj4*u4_>_#9~XS!MXn%hx%l=Vrxnr)9T(WFl{IrD<*BN9(ezT6;-i?qPYksK==sJK(em9NfeL z9}^pNeJPKC`?NjyGDj`?z{~j0@?lAkeGu-u{!j;;$Ixjpg91M3Mbq|)#r-b$=yi(4%e&dPMobd zp7G;H{q^_ehX3_`3+2COBYzSF!=SV4PbcB!XNZ3{$bSzXRjU<}|DJqbsa9(bLH?_K z|EP8^|9y)eQ_!#R;MuHI0)ewEurE_)E+_Fn z(L`)bm2N~%F7#|PzM4?P#O$-2!f>IPOmj9p8%@?!^LiHf>aYI5Kb^JDLbdK+@$lL4zOx276WZ;g2Hm=65_2u3hiPPR!`ydI2ZM74&DM*wLZDUGwCPrDvMnI7l*o#By zg?fR(D~aQ%s~|*&bPA&~BL`B`jYy3i1r35<)}LbMCnV&eqTC-|p`S=wwv-M4JI;=F z-g-6~OI(a_%7(BliZ9Zg_1T*hTF?HveYExJ&5>&E{Z;+iJUD3X9sRXVq5>K-(rrR= zDn?;hI>6sgCjJoEpi!5q*xfvMwgqLHPq%lrkN%3=?~CoDz0Jcz_2Sh5NW}ZigQM+d zZ+4moYX8l_{;R{y5@2%>NK|gM49s1cDv}$gfRB+3{jV^&V)sVAE+;`d2rp222bN)c zb({IafU8uONKY1)bv(-$qIYKu02*jj!lIe=Fz$7U!TGp~PLzkeKEVHQ3I(8&7hx9~ zc+nprzx#9)O|i&sQ&nm}<%`vam8yDk*pz_@SQJc9ABY3@RVEWxzFOVcezv)HxVfrR zr=zjz!zA_X{1by4PAI}TW9C;OXUu&9hp0C9o~xbBm+TZ*vyO@XOnn;0D=QL~3p?2SoBA9gYvU?>*o8>aC zb9Cq(0;h>8jmrIYjv$6u9T?XQ6Qnm z9p@BIQC%vBe;=8;cyOd*Iq4Lt`q;@1s{~tlcpYRpW6ox9#m6#c>}uDyid2 zel+lh8JGm)(F7-|-IHXR_cjp6b;8A+(djPVC|F-#jkT%PR?|Twn1*0-Yjb-1wsQ>c ztIAYhvZ?iTdItHRKld@UN6tV$p+_#57Itc`Paid!m-7~xUjZw^Dyn|q16d|t-Mj@P z*U6e)s<1Y4p5Skmu@{}r8c=C$^x~b>$;P}PK441;mLcmRJpEcIYI?NgPrE3msoSeq3V>hUI$)rxjtLI2u5r39CTETveJ>lj$$#=hjgc-<0{nj( z0B->t&N@@?7o*m>$B)mRk>l`7kLWWHhcMbORK9t7Itfl$p~HmLrRZ7-82-YeG*xjm zYD2ld2zGkTSdW$7@uySRhS%-)cscwr@4c*a$Xt0!A2y^kXAo+gEN;L+o#q0Ez%Tog zIxZ_Zx4`{?eD&*4qZnCC(e~O6gMKHqVuZ8>#0$v$0`u0br1BK^r#v@_@AbqNEMwDM zta97HaYnNiZPrq`GkIBC-HC0)y6NzUjrQG6g){Az5)B zoz|b71#Y>t_8IR~2vwb@wjIkBEX=0oOXJ!Gkc?h%70auci3CLrfe?l349YN!ThQD>joLQkxHOSqL%B<*GqX?>vqIKJW;>PF@)?t**K{_|1ctoX>5o*=m zM$vIL%r*2FvS{B1^!TH0gZ@=Th8BESwsA7pRU4;33_(`c*F*RMTOjO%!_j0=%TkTG zls#FK_=QbC9jng#s?KN_tfp?Hf(!!9qtT(p;MmkYFmE2|P3?MC5AYE_7}SZ)jx8tI z<}fnWhrwtXynsKZ7u?#SzTBc0SmT{~@oW;ncA-0P05bh(KvZPk$Aguif3WvbJ%E35 z$<03vRVQ2?vb!jny8%6D{tYpCE2wddIKr6W*0lsb;#9$ml?NM6rWkV~WzakRuzfv> z=0;7!_I3b-{^Zq|&km4^uA6z-P!G;9q|2B$)nS97Avax&F=$yG%rJ6_G$fa7`=c?) z*E+VuupMMpc>1xfy$Gez2Gm)PCHE+;P^-j~$LYy<4!qYZ?2Ti|yZ+_&w4th~AcYSH zf!}|Qx0yq9yaLNSC_Y3|Vti)0=%{xp~b9oz1D_qW}Sf4ciy zcfY0^vaz|YT023s~ zqxV^Nn!O@WY+9-G-k^W;dXy`6aWkRX#oKesk&EqoK~d~=ocy#@PBiA1_5G6v^=vp9MC_E$(Ph?$fpz3 zxr>PW44FhEVu!4Pcc#E|XG1>M<=oa~%UsZsTSHf#PV2BUz-_dz>B1tsF|#uYQh{(x zWc~Y~HFGkyk9Dq}j=(~WY>x*^ebm&R>d;(~8U`GEt2Z3I=YI3Pv#U}Cz;Ng#QlO%o z*|VSsh30GZgDiCf7G{YatY1+li~i_AB_^BXJ0wp-h>*aMM*wAaGTJ6tS1Z#~+VcCO z&CB+gKRm_yJ)0A&-ftc>cXl>+PF}n^IN5piYCo~XY2^d~I5)4Dm|w;ytl&*Kaw@#y zqz_xrH{!_0>}hgEy0|$+EX^NoIt&fzSfOe9y8v)NkH3zXTN2eh{cAR+!_9^s=*wVA zHD#%KkMem@l%s73{|md>!_DHxNf@2bG5muE^a`|jFhkP(LGkT6NytKnX|`D9LU~8s zZWK(9y|LIqtr%}>$;xeYPz_ntrB&qC@WIWl2ZdJoC#_jGr4;_du*QE2sws+TM@N)r zm_yc!XbKG4AWL{L`z>x#`TG1i2dB{gwP@QE$mZTr$ke)YnZ4z*5L73tB$4|Mq{co zV>c(W*%C)=NUK&Hv9fn%P3c8WYd-rRMbnFGz)@0`PKM?X!{iaHam#zo+VM6of*~i^ zV-Ef9oN$=>npwBB+bc(hq6f@k0FPR;_>Rh3v(afnKvIYP0EqI4qg=ZRbo%Zk_HxLr zwAf+kuI1oE(!RM$20$`24AwRT5xcfRy${(Mz}`Sb=4u+`nsXrxzA`d>Tw}BHAOE!}%g98hPFz7HH!~ zOR~(JE7)xAiK@eK^Np8Ti$ELMaDapkw>gN==u&dE^QJ8~c;SE*Y zc934;4INz##+HWc!nBv<1MR6nzk4F~4Gnu%YPLSy_TpO}Zg;So0EIu6>0N>SM$NBW z%+RQa2Ec7*Y=n19=s_e1Cey5&?~l-*c{w*tnGCqpn1kcn5cj6#`sA!=kaMNd>H_(5 zAp~d_XOgQ>K1+}?7=7JliN;QrIE+|i6+CBLX&5wzeRg6zB9$U^hWSESGjX{7L<%LqCy@j8Xbw<)+>JgjE>JfxoIx4Br|OY-pI*ZF0Ukx z>)Z-cNp@-}x8~GR^AgX>?ZurHmn@z`-g8YB&+T`TG-yKqi1##TLWy>}r?)UNylgW{ zp6Z_KNfZh|Y}i|uMyFJ>a#M~1@6xFaeH3bAw+FOs#9MN*l`US3X0w~3!%(zxLZ`OC z`k;gvOfHZ-08a(Axg6EaH7?md)T-W~J9E1;>fT*Aq?wT2u*WRjyVPnEHHu@@DALh* zOn11X98P{Be|#%mKHxFXU%KCT#c-_LFjA>BeT%Lb%cVKns8-wE1cz}Wsn}UIbVznY z4+#jcsa~s{s~dbUJBA;ddA)<-X_9PpD)EED&GEPoBMs41V4QZ4&y;~kSIP>) z1H7p=%o-u0Nz^^%18T3a46-C-i!*PQ5dY3)K=q|h(Nk4@Ua?vGz`SnNXs+n3ib^Zc3{oG>ZZR9C})ab!1f0IoA*2CS#Bor zg|5b1`|#%5AC9F6SDQ&7x!JyP_fFZot)DSs8L&RH6&^RBUi(&0RQ)j(`R%&V*dOl* z+ulCXHn)`2X^=fSv|h{8m3kchv*;}BPP0uhp7lB%ubP`v$?47rAS$KD`7Avbs1kh4 z%`emB5|4M))zPbF6O#-cq-Sa0l;wHY?apUe0FxwCn&`+b6&&;xWjTf%Nto1B1N&WM z5H#B)XDwT}duYgn-mTH*8VPrYY}|~A8`haT5WKm%w3?dDL_NloP|v9DKTcxNPU3Co zj~xannwJjXmbE1-qP{FJ(c@Fj%>B*XfAZhL`@cXmC*%OE{Ml_(DwW#zj~DuX!rvdN zweKsBAJ=M69@YL(sZ}3+|NS4-$*NxpM+8$n%6qi11Pr~t(V!IxW`mnAjU=kuw16rWc z{&QpOHwa**bRu#M1VZm5=x`4D6^q;eRMsRn*88fZA$9Wee6_F zTT}tJ1yyk7r+a(K{1#;1k7chd&dyn(5H#=_V+s>+c4gA|-)$Y^P5SA~*e`Ucf}2H; zZLb9)yH!qey1iR#>^^K%r5uvMhPtG;awY>JzTZM8P;I^}pGn$y;wiotGvs6|=L{zu zy&vu1f_(v;OxWrN`|VR}-^nHyFT^|Kx)l77af~-n7{5T*B@x8{W-A}q4n}<-bi+0! z>H|}{3lgwX$_PK$>nnqSkI3;DQ?2dCsob2jDXf>{boh{J7*00a0G*Q^R2xGs{XH=d zn%Dfv;b1hv&CoOTnS3t1%;ig7q8tvotoG3CFB%Ob3^eX;XaN|(ZucIk0Y=s%1+b`K zA&E!~IXS~CBL1l#4l^Vlqdx=OUIsvyN%c4-D<S9pkceK^aO&K+);sq9iH*aCPC~pkJ0wkU{DdF5{HAt0(;pRM@978J#oHI3+NQ?tn z`qw7L7c*zUw58dy@mDlu66-XG*9!DWrWZty| zjU66KbOjV);7{!Uk3IOu^SzGOihUWwCpTd_ zbl*;A7!|M~iYe@{`xOJ(EAtil5&JrkuaGNma4sJsY0GVrokK~OvqK;Ov#|oE4p|__ z9RD`uNkvxkMe*_E@1>^&|7>Z@xUHZ6N%U*jpS9*XHvj&-z9#Ht+ zdLz!p^o%{bY#Ves2SoX#hcu+8{!GkGeeNrcDBHv6*E)w}0iZV~hPWx!fr)*X9lzyb z$2e7pO!}2BY)V#i!wf~sox0m20MBj!oCVuJ8e2zL`MMJJ*`g}}yajCcJ=?~mX+bC? z#yP@#CrCj79g2RePm@&3NJ3G;&;;`3+|l2NaxzJS881f>0`v_HI_nFkS0&ZN6)2>F zSg)WTlC#&q*g8m;$HxuRd$;R{Q$S1H)*|4E+Go}w4HU(g64$_{w~!*^g-vt|Ek*G$ zi$lqk;a_{~iMOlB?DI5!U?_={!uP(gQ)rXdaN}AV~&)_L51rGMX-_G*Mk| z1G5#Yxu3G~u+}pc6!Vo9k0v@_qGqYyTx0|W7S;qavg>AK^lncusr!@#yHdob+@Bfa z<(~Su)g1TmK0E8l@otHJM)2|_=#TS- zt8QuK6o(>o*$t3I*a5>Bz1=_BExbNPU0*}JmCg7VD0(;~H4UZj%FA1P1s)3Ck8ffU zA&*X^Q~)&H^MQ21Wa)}(gWQyHnX=v3b)Yyp^Fr3EyPJ?U{PddeoUDSU{dx$(0G(n-8x2Lc(I%UM3(Uuu7 z|BZ!gk*gqbA&97vWzH`Kzz(xYrbgO6xfi+I9AF6nSnk0&GDP4q4rnCaAEc4AIn`-+ zmR4I+X{p_0-e0O!lXh!i;n~?}1jR2;Dk_?_&se`^M8Xr4)L76jfSlPpf%O&+FLG*f z5;l0?bJvH4i|6FR4B0BZNrR*ET5eFPPlC696lIyS-p?>kWVz zIVEa~%L2UD)3M$JSw&-@6`b66Dln@%iB>XoLi zu#I81FW7nv6pBws)FS@In+FP?sMxJ;5v?zmU$^kc=g25o(y;~E=u~{+0-wM?#8>Do zS%+Aox7v)IN3)+89s_qL;Wd`*;9q1;mrpld6JYHRYK&;$7?mupTu&&04EVDgJx0s&=DRERS4S9-c%ke-h`V=Pz6h)3I|Q zTJg;)tCl*Uq7Z9J&p&zjZ7R~6&jb$N?~Dc8NA+GJl%3jhjiKsThaa(N?{nIq0r zWcga#)oV?vy(NtnuyBtuajx8f40X|F)O{|_H7j2-EG5SAi7@>Z__Sm2skqyU6r)TE zGF1qJ^lup^*$;~Dme*20>^j2T?}mHns5g%ix5Nf2dQD{SNb2{{mAt*WQ<&wfZNlJz~x8C+zo{u+NZEye}~I--PPkPyH8KtG`sM z_tW+JO&6LC-``PN^t;9VpH>(J?Na;G+gN`8r}F5@ zc?-r-jb5p%Z>z_RN{QZ~gRyrqryyThQcsb6C@boyE+2A`BzVZ1_Nuw#5}p5WySnPI zP)o@Q<=xC5ZPz3Zm&uw=bRO|X3NIP0daVsV@dhd_NK}YhvKMVUo zR?^Ho=sXnxHqRT?4|50gvXe9ZNZxz~{%Mx_jzKOqoHv+@Ty6!ju3)8{S_nP)3*fHH zC)+3dedGuE11%PdvA$pm;Le8bRI&ICyF+aj3L3dmG24X@eq>)r`s2BS^wi<7(Kyek zEx==qi08)*+h@}e@;{ZxCW}kD2mgFb(i+CXG(yqFaOM9-Cg^FOTONPo0N3_;P<3Bw z@?xk#HI^_5`rY8nTinWJEc*k(ej#2d+27By!-82JT;|Ms=UrC4%b9ms@h%6z9bKZ? zm9q5D_j*#I#RQ3Q8}l~WZ->XluHTBXEqKbw2mg{j6cQhJrmN`h&z@Y*fGEqKf*stmnnU~=L6QYD|YwCu9dAgEd>3=j;BLCpok zCG^+k0}3rL%w~qpHhS{Z8M*7_a<)ztW2qG=LNB;>u~Wsl zg$rd20r|)@FT{qO*}1H2T)yoc=V+Mlpu10*;cM}7mfNW1HKb{ZA&KYuvXaXc5N4JN zT~-_Ly#QazHJK_>NgAT6)VoHtWZx`eH4J90ylc?>Usj9R?7KhGl>MX-$@2J5`Q4hu zx-xf-C>V_A-8#&!FH|Z9igI~nWraLGb+fC`%|8dwt3P-CHeAIuejc{6O`K?YAm{f{2?P6a&^ZaHJf^weaUt9l(1sX_3YTSY`oQ+yw03>T)}niJm*uDn#58K@4h`mXN7%xF@tYb@-^uPbg^(Yj`eZT4Poox9);{M8v4U5 zTe#wNEF1oSe9|V2%>f#0Zr)1va6<4bk`1RoyrCwEOukYL>5dKaGgb}XTsVr}HQYPP zI(hE;%C=eLaw7!*fVSo#MT~!xLyxs{$hsylSzTYJhg-P>{KS?N8Qr3XQ@m{bbz4${ zF*#er$FJMcfLm&P^_F0PQMl#y_^XczZ#qIt{@j>is|jzSH5^izcOGGi=hiDlc1F&F zQ4ZJ{@)ukHYVUZY}(3PGGM@kQ_O z@?P{V4I~TagNDe2hbVz5QhGNiFWyg!E z%Jv%7MHiP>GAr#y8x6}>oPO9US5oPmP`P(_6*5t7G^WUgE6%W;+REh>y{oEs@90V> z(=)c*$M511__^w|dwf;Pxe)4JfWhcoK0h>68}B@?@GgUQlk-_W|L$F}lPkBeH%LWO zE-ceUt{@Gr2j<4Dyt_uVq*4S^qBWmbUTV_V|_IY?Nct7YSS zt1yw^VR0ZZXi_d48r&qG-E$~tCXHgf#xIcyM#2}qnTXC}Fbe>C~#&qm-IrS(4XIrHH+D zx-^(?`g}c;JDtQ)hw;f{w>CJ1ZS9=holEK`;3D2Kwpru1S@D;=&g$gl3#yARr7jwi&C$DgAA-JSKP;UT){ zzMUrc2L12%k84j7`rq##R_^t`-{eQ^+gDMr{7(8`7dd}FP4K;rHw^&yI^KI7??0}N zclXWC(e}yV*7l2|Tj+ZiK=ZfZk9%q4l7ww{3Qab2C#^-T*i7$%;u4j=GccQ`ACwS9_x%h zewo3g^yqYwcYzzlLT(gpM4D=|H`>%d4IUr<_u4ovXeRWZdS~S+moc~^})=A*m5B9Y^vRuonoO=0^!j?9|bfFABxkMNyc&$(iIuf zC6h|I-$ut94So`+94pb?SZGGa;?LQ%`zxTB99O`4z+ihB@#4yO5~8In~h;JAWVo7vNnroF;23avJ{_1%Cy{*>E5~?*p4pe(d zSXoEONUbP72!WR7RAb|ayC}QMM+VA-V`~yaD)7ghLObRi>j%oc+wo6#3xl^6^mKIN zS%lneCx==%y_I9no)|c;OWi}(jw&=DH7^A$o{<;^*bsJ6Mr4r_&mxd_#={5{$vEej zMudldFss2kRMMhnZsm8UB!fdLaL(l6Pn_#@V&~KBt+w5};tcBDJC)`Q)A8zsxfV_- z>03KNYj!G~`H8B8d3yH_XOE^#brRE+VXvh{z^Ag^Ew%S**H1a41iqmSK;#f3c)I3g za}A(;TP=pyIR6xsQPMC)*|ZkIoQpP8D~|WoG`znCluA}CsZQ19EdHyCs+G^mk}t{x z>j;A_LQdUsUe`d^BJHA|Aa=B{kImAUu@F+*j6zxR0^e@92TLt;u0&NVT*cyL4~n*~ zaHE3ey-LX|Qn&K7C1MhCJ(?m&ueb~LPzcYcGS0DI=H^pBBa5Kt)V3jWdZsev=u;^eCLETiv4dh+Uy#E;6k>r35zeH$^LV?41&alHc zZ}gW=H90E#2Yb}zN2=i~gXno)0OhP*QTY}h!)T<$x^#BWkGu)jpPM^X`Djt4J#Mre z%((^0UP`nVlI>K&EseIu4T%-dY5+Zpxx-$xTJ^jhjZRU;f%FbAd^RBtSUd06grjn- zsTEQ@Bsl=SdSYHv$rqXVXaDHVY7Xa-M|q7N4efjw=eXBZozAI-+(Bw3e_hSD#uV0G zWm|bapS`XYuPYe6oxbF=)uNiCf-u*31$?C?d$gs1+!h8@6+(ICM(bH6=={9Q3Cl{D;#gUhihPV?U$-bqDCkP6Qm*L-hV#E9}4;2PW>ntB02^ zePB;a;W$95)j^&bqr9w{Z#L8Cl9BXE7$%=k({`6m7|@C$Y;$xHDjBxvG_*xN!iRY8 z60>!!p#*}TXtijVCoqT5k@i(PaOQxv3nZ-$yaSm3U`+u;FVzrbQ1mKkUTnrDF;kGS zW->B~$|?CZHfS2gcoh^_78oOnAtAFX&tB?A?K)J6g(&n@C@O5pK>v~(GE+tQ>NpHf zEC{U}VAsxy>?j$|xDo(IPfW{5K9aO!!76gMhPl3fus*xnu+(U0&l{?dcj*V+T!Ku( zxbsGmy5TU>|B2Fr>NT|2^Ft#>;2ZjgH0`V=5BrHsyMkENY-ftMJQsY(Z>|`J?Q;`B zql&}ZTD>kAms)-H(v^dnKvr0z!Q6_i&av>I!QLf~a;G4jm*} z*dIpkaV1N-i$*F!$NY>*iuq2pz^IFPP}h*h_|uln!?StJXEdd9nN{|pu`;)7Pi$&? zm=JcC`d^O&h(_{O@aQ;U8KYsPef778X<@B3>*5*^ezfB0?6iLcQ{3)lZovDOOkPAB zLPHT`YSRrxkoY-fB80C0oHinNvH#GTkuQ7y4?aD7Ja_*O<3Hcqe|-HP5uUGd`1+0Q zKP+)8yOS?9-MSzl#xu z>==n|f2rGkiIAc?sOZ4QU0HSJT(S+#TR&V+k3Crk2{u9<}1j_JRq zy=Ib@bK=2pC-h&_iFSwQcvjxwwB)5d=^3P^1Q+GYQjVN2n2@`j46ZS8EphLla_37U z30aa46VlES@Dfe~C%!DRZ1g8|j=)AT={IniqIz9Hv}wRUa&hQpXs_TEJ1kKajMJ@p zEjhbb!QME!-X?y6f2cewK)wuU?|vVLwJ&2-zT{Y;;f|RgN7$#8`x6_UO{XU>uQ+0M znK00IMY&ZoJ`VwMzLI^+FfJKD|O+*iW z;Zxy8%Djr?afz2f9;fj#G<075Z5*131Z zdTn7>UI3Y20L7yR7k*zlzs4qI59S!f2i($8qZ_m;A1=^r4QdA@0ZqyNw z#ArNX`x77YBBmn&nIA%;ceXDY9@ajeURnUP3rvj~SeKfz58#78sS zPB=&o7?M(kj2UUr)X`9Ux6@NWwd=N{GF7;VqOt&8UJX%RWl5GSUY7Mf(O**q%EEJl zVPtiO;*10_mdCOjWg(Bvw_wmIGmYQexwiO=gUuQw&c&y54wqj7D*Gy;KC5WfV(7TQ zQ4661We;bq-Rb2#LVG$mK_rwCpMznCU)rO-`GC*xf$t|J!EfNS+@ z0YC9yA5DSpTbH8nm?Q%Pu0CZuUzZBu(ZF@zQ zmG9a9wmt*@1I@&;~?jk(Ei-(b?)^#_j;Xw2EESa_DksP ztAjZg9qxy(snQ|JLjj5cy7WZxa%j%Kf_^6!V-U$5w+_P@Pr?hxhnEpD&4P9n}|9nQx@1)f@bUcuG2(pQ~={ zhZFX=i%)zrT;c$d2}6G40XLLU3w}2gUeL8tm#*k;s4QjR*PQJr{a=S!%!mcXcov<- zR55&!9eOzE#5?F_;wgDRA;UbavB{+8Qtk0o7HShk6h&e}z0`?gFAzSFSR)|g`;t11Md(_R|%_h`^sT;Tcq$tCEPjdt^u%clLZE_a+!+04T#I3mjL88R&WVPY^9%sPHwRwuYHpa) znv8r{A$XrtuW@Kh8y%cYBHg4cB#O3!bpXE5GvyJbUp@0Lbw2)I{dWub&j|IO+QxGE z@8RRxoc;g9+T%y}^53`k{jNC;Ub*>udY0Jtlq|{b$t)?~D>r|y-27d#WM23< zY2meTf3&ZT)c)VM)O+&%a76A}2>t+ae|S3U1Iv|#tx~nys?{I0dr^*D4(wx25OvP{ z)6vlHm+_z)7=}`2!5JSS*Xx5|5o!UpY<(VPr5^Nw014IBl|D_~Q?bL;Ejb4#V++W; zN0KWUMR3Cvafn6<*rdcEI#Q80^)4F~?+X4C&r3&Y#-7@alt1oxK1gn@wy`X*gBoY| zq~aqrq#y}oYQM!2e!1*gO46r{4tT5vy0?KoQYT4x>b^}!)022g-+h_rmjtZ#&7tVN zw})^Mqk{chatKh%2B632eIx_PP_LDVEQ3o_)~pRLW7o4MyQ>1mZZgf7z!I=WO4k-! zA6Vi4yiiNnEGVf0<=`zoOSPd{x>@|VA;|KbBt*x@u717=E#dx_eG?HA}GH`KQ7Ez;;&8VyUM zTQ{c37fB;MCGtozvxroW_?u<~^hgzbdbP7^}GI6 z(!m2v3bB4bWrpwqn+2>&DGMbI`P!V52-6Hr!-6VS4N@q|5WNjh8MgJde+>Vxn{Ru? zA~5EKLS0SpAG>huOj27COFXTqn$r|5i?{tkl}%!#@LmvfU^ocEH%AoD!xR8YVY{UY z_ymRNAKOVFB~GSddZtzz<3MGPa*s-tyv}i)=Tfnp1|I0D1PWk)?JV`A!0}q3y*##A zfJ$>JH&>E}7b{u3q2yk%QA0rEzY8J#QNmkKHcY%X05dF&MkS|OKuCI$nzY74r=LQ- z+CA@~w40#27dRb~>w+@HND2|+;=?*TY&29K{$dbt2iq`~5^^Bo$%m)~s{DYs!D8-= z2I6PUB1ky>bG3pS_7RdM*jw2FLLKy!<8H#3z`1Ohfb1511*=PmIm#XuncKpu+!gh~+sa@9bk=FASJ=Vy#nR62|J3tKX#}^>)Nr$6B0DIMJ z93n4ICs-3$2E!Q$NMW0TLbk(k7@)7NAQ+JQ*7u`HPj49Pt*iO8R3h4V76ADleix|- zy&%7vq2t4`e;RmdJe#0lrw0Tm?2un4)Mh|UQPR;EP!NAgXMOB^YcN3fS?K2z(*O7q z{!dEG8q6plP;u__S$x0B74Xk>&H%MT`7Y)A7G>rV1zUlA(G!z%cn_6@MTy^Mg-#Bk zDX$%N9)-K*pswMo=gABV%g@xhW?f>rU9U?-aDcE>Nm;9@OPuY9vEXZ}H9N(-(opOa z_%WDC^l3!yzPJ*59Wh!>;Y$*gV;z)@9ij4W!6|)CS;*nQI(3;S9uQuv^!rTI4RgFF zi7dgi@n#}45)Re=);=oA-utZZI8v=(D)BoV)3}ohvuog1p(>uLdFsNW+`0Me=xK;D zZ+RCU=J6_C-|H7@Ugb`?^~hiZ&31bR8BsoB;aB^nUQpNmqPob{)O*82apk}+y$_Ic zLM|MnFS3#RDYnz~8vS~29q5(glvER}9^!A$kc%rxthtHTdY7qK6wmew4>6h@b6zXg zQtqK!&N#-TPFK+h;sw*2cx|_)QJgJ~CX#r=lozA-$yr}AlS;r3z=8iBV&Tdsm9j#@ zay(dq2}^1fGB5@9ik?^!xmOH}yh)KYF;k8P)e5g~jvN31mGzj< z_LQgxNH$MtFg8AFn{sl)`+5CRcTh;DqUwcN>K8;dj6r`J@SwLoNx5fzD;{xQ8&K z4Q!bj%wT_LA(s_Jbc$tTPf&e_z{Cai8V-C9r?8{ra^VOX+}AA$_Q^s;I4td;7lCN{ z97TT=EJ&fEw(?o@5Kg(%uH0aWdMk0*d*4Vc5 z53B`5>U$eKpsMH{WJd;$9lg>$rQN+IRRv!$@4zR+iYGb zf`BC>n26I6)=EJ}bl$X(-g62}tKKV@|HF@fESuNZl#9-fV1V zBlS&6Uz<_0%QOY<%OYiP0@7a*sA*r^?=jG&aahZNx>%_VFs;qQRJ7Ybaz97EO~v0T z{<~Dk%{h}9jiOsUf?YrFWIX1#`63Q7V7W@^F)|HtjW?ti&Go#>TaP(Wym(V_fMf$U zC|e!IPN>}h;Y4tokj7Z+5F88yk*$v&->)p8$Sj<7(Wa|q>U7kmdl#-gy_Tz6y?HHF zZw`P+XwGBX@K;fv&!eX~{3DuhOn+*}THU=R@AT*40$nY9ax}1_PG$5OBorCTMz>HM zjnOk$vuyiP6OFR%bQLQR<#psf z2VRRv=lioO+3!){`w4yvXjYPVS+%q!3o-Td%-{lc(}I+Byn+#~mRIs$np|5_%#<5_;cxQp>BKul4=PYG1jyL*kdX zkbD_l0Es#QwzmxgDtifH82QW8M<4Q?6=kS>Qy@pvCP3MWk!OhK4Oy|#YI@fFC0V7< zM#HLz4)#zg!PSG3NiRM-w1!H}w>?ImxOP>4h^bv~0z%r@dfZ#8YYr5*D&USX4J1no z+(9Fm`5G$8q`wqqu#>{#LS1A+4M~%HdtGF?49t)=0_EcQ%d0ooxz4g>M=jf33ZzSC~p5{catTEkmITb(UiZUf`{y4v#V75!`1kx;~Ah4 z&HxWO0|s&I1r+dabl4@detSCwhoiZ|8%WgR)#0T9%qY?-xZESh_jVC**IF%XC1%duz=y-9ti^Zi*e|ye{!Id%hq6nH`9* z(wG`DI=rT%>$S?+&wYo6N4Ln9G)h{iBbIlP-_Pt(h?5qJ~U zecH=S(pY4>=mmnV=W1>eI#u3 z3RQb@((b(tF&Fu%$%bApSN}*PvIk&lkNUI0FtPl<>cMHdC#0!Zbq>?0F{hx7y_fISz203$`(iX{2TsRRU0&Thwsbo2spCL5SAAsE8b)*n?hVxu z#*2K0y|67gPWpU{{{?OUQ;!sRyDO8b2K_Wa;{l!tDaSg*BOBdefbk6cN!X_tq-l%&>QeWrwi+lJh+w-EiTYL`q#b+$-6tlVD6m#kE{AyBV?G{Qq^=DVR+cUg%<#IzYa&jL6Kf)7<@Gfe#r^&AZ}+>!{qNIYdIB?F zYJX}QH~9a8U()~k!`h>J|6kwa_iy)$>|gqKi}fFK0r@FyEMNcCN0sW6x%dx{YWM5^ zTl`3P{VLbEzm5Ng7;@hGfVlSoaqk1-U%&?h;MC#f)~lW8n+J{`hRbXKh}4T`_hC-0)5AGP>)JhhX62dRZ?j6- zC&N!UDFw2PW^0*6?S(^{DN=4a=eyAU6}Eqk|2$~ZzY(A5uy$fVcW~%A$CeCYF^z^1 z*NsM{q%VNA*-$IemPH?RCN!sUYPiZtfg>_ADPEQ3f{nSGiWb&rGeUL^_*#IRc{LRU zE;bb5JGJ6k$ViRXHZ@|)ZBt{qJ}3=>sJKt@9WM$JR)qHurUcl?6G!_KYj6K9 zsUd#HheH53$j25`am6sZDtH%jo`0lW+wBXAFp*#^n$)B~;N|;V+C7yIH zW-wiB%q$KhBm=akxAf--n5^-c&*Ue0xQSsD&I?r=LIHmljA_&&JeG}Bl&*;X7ARw$ z;!<2VAt^F$R<_Q>7x|0;G32MrpYmB<= zofup=(NN4!iWg13kIpsqTY|^jm*f~6zuywO)H9L~PJsM;>ZaEwt3SU(Lv0F2seJ4;*GqZ9*w-&n~xD8K1I6Q|M$(jz+ zE@=G>M?(z9efJfT&H|~qWk9QdY#aEFSlaTT5zcQSvD0&l;k2S+NUrB--00WUczqB4 ziLoJuH5Fu{_R!_a2cTxo{POgYO<08XSsJ;EKy&lNKaG#^5|zp~#Drx*@efLJYvycx z3|y64U0qu&8$NbnRcT!r6G!#&wtb=O4f(sUCn@aIKMVS&r z?t;_1H=%YK`Z9f6g7kA2DCh3MTN5*UMXM&!+o`CYwmf#@%j1(42MS_^1?xkwNjx#! zPz9GZ7#%}J?i<1@AKk5kY;Vi!4~=v~Z=55Mf}6W#AF-CPNG0|etBI|jx#e>c#BVo- zISPWZs~*=aN6<9=*G~mIlKGC~y1zeg|CjyU;{0!0ZXNZnfC3@)4mvyjuzfwctxNdj z=YQWn`u;JV|9xM1{J2)F)M|gI)V_y@`}4nV@k283&wT#33_0(^!QUSm-X9v?9~!!0 z!?q80k^#dwY3`h=bhe?&WpV8Sbzl{54mVGBn)}UXKW9>%v zs#4SP6XEK%T z8Mk>(S?g{iLT4pYn|Y$qajMRxAm4b@zmjZ=(MSgP3=v^6GuLXC7a(ar#JJx0l>6|pZj+nn zBbi9ZY{Sj#^!8h1%69FMiBFGoTD-36_j2Cyax(F=P_jP zaX1M5zEcoKcS4vW1VL1uZZHn9gOJ^MO<*MWKE-e#rOHW&6G zD?OvM4@!R;qeqHC5!W~RH3R(F8*k`K&maF$=l{RR)b@VodA*S~9r}2I!t4&jzDEcIf-!f9e(l>>!+8Gpi6gn3@Yd+0%MOQZ-d`N;}_@M5N=0ZYqxi zTuB^B9fDG;g*a^D`>Yd|GU=DWb_nf*<~%RNU1j#fH|GjZEzi0qaT7f}%};nIdkb}j z>+7q>YSmG0v!Ovx3=LKk+30|*{v0t8kF9{C-e*m9R_;x6|7C*vQ^VtCabZj&lNAYn>CJH-|&B?oH_~&Y1>#7Qk&YlBE zzC-Gq^dtY&Qwe2F>Hi##*j8epcUF^>4J3Y zwIcaLL?4$|C1Crf>VN;Qs`}6W*Z&9a&VQR1sEfb?i%V5nj3H}oC8Z3dK;vEevkg6p!;v>+Bf?Zpl5?f)ULdk)pm`CrKt-} zdIFtDOte(NI|+@Es~|UqRA0`}DD>Y9J>x98Q^$3vbPh){v3lijSuUtYL00;5H;hp? z{h-D)tO8CsLB;ebIGIW^&}c-b{cm-tasHYCI z~k#FbTIaDEpTo1xo9tjN3-SExoKQ%Yuc<0JYP@WhK0RB9yT_WxXy89N^8tkF#0TjKS_baX*1)m}U#CC8)B3%=<5+so*2(%4Yo@d2 zQ{~vJcv%0at3{ps-X8p4IwDmyn{FkN`22YyVRKU_pq?}4L<@H8JvN8S2>DRP|5b=i z4ao-63PlZSDSYW9Bq`$1i({swy z5lSWMA%3e?%oqCbmcP4T=VX&gS)_T%n#&4M*4el}!klk7Cpr4Z11br(>i4@ST#hE; zX^4(1jK>@*j11ki(=ly%=K8{;qcg|#Ma<_;*!9D{##Blec1Miv;;!Du(}LH#`zPqd zfVJW$qwdLY)Cr<&&Su2~Hz&MGcAe{H4T{?&rWH3%_QsPDdhvjP=p53LWH8_D2&Td0 zg6hcb?Ukv%T)`z07k#!p1+jc)tx&0KpM*26Add}!sN$(`<|b!3ov9G6rWv3%A~@u0 zz|yu2p)YyI$`vzL?P@jjN0wn4^@;f{#qMD&pFJl;-FO21>SkAIn2kSo2s-#Tq&Mi% zpF4jWu6p#{!;fCr_rjUuHkP-MlMaaSr<3dhTpFHwKqw4<_x3!K6MxdVeD3*k&%SJK z#&bQm=tJ3y8T=FCQ#d2-=afv)yLc9k+S2e8Bpf{3?y3lclVL}V{fR$7n*xx&VDZH3 z1aNXKqke#KI)nIUhX{F=xH+BFls;utOeErINJSuPpZ|vXl^^0wc6P@{(Nj~Yn zA2KF39TBZ)HaKBKIYD!XD`#CA4q!=cK{DlFndTi#XP9EW?2lT0|8O>7zA4$x4Y}u$ zIo^zp*qLExo<4cM_EjsKmee-6bZ!OHDQ__pjt@2Jf#1Ph;vB=yu=`dDsWA1(ngneU zI-~ZCjE%((Lu=+E|0zdn|Bx4vg2 z@fAUX8td$g$jr%sNe*=K$0e2Kb-k!rkfMN!BvY5mE9r2lAz%P31N_ecjYd6XQ3H?L zP+d@|F@TBA(m9$KR<#1QyoyF+NS%glU`PRtl3E#PlZ0aU zyUw;AC>rCzAL=on(hhFW&;fQ?ibo%pHPr_qf-5bIWkUy2G@wZ*a$GdeHLH{8 zbo8w!ERd&C1q>y@k~`;ipRK;+c|>7~FzB&UbHQm*6*`?fRa~Z{EPTq*RVjoKlwad0 z2RYQKAcqlC^ki17_H+)fQNu{8Oj2OhrGjVHutl}e=4^tX$^x?!5I;}zharP{{@^m_ z9@J^Q(fNkfCKZe4u3n z0?-Yh>@zgyF=u8P583c1o!&r$G2#G?FbR^4@o=}fv(u;s#UG&Jr8YX!ptgHi58{=n zVGOJh2nU;!2`rGJIvR~Y${$`?tC2N? z6s=+aBghZtcB(#e=LT?TOUSMW9eTGNpY41uC}*%2ITbsqk!(XbL#$;l%wnNLqU*z< zNxZ-ds1hKs?4%1nF&xexV8sv1%vo^8*v%|CKXy%P5+}yft1;1J{EMcce9FAG7PbAp zKRLlf>ze zlkQ^jvRmxPdJbu#V2XZOPhPXFBARNY*($!CTCGgtVBqyM7UzL<_OYwW^Mr07+YXW- zyoR3N8yxeQkLh4JuMTFNE7b&~o=v`sNHK!;>f=)N;rE#ZL3WCngW%mPoCIrX7bwWj z3a^ars{i~S{}Z17^Z)#RXWWaY-B=LU<}VnJ6#m)Tg)Vh>Rj{ivlVA)2BJ}BXV_Ou} zA=#y%RmwhW+|=FI;Q^#>Q9J+p{~Kg6ZtgYy>s8k%=N8uj zZ#v~a|DXS}Nx6T9Z-ws90d#?&pf)e3*go(w{*bx{-vLx;rLq7j0+e7kGlMOlA2&;O zEnUb7P@0s6qGYP6OR6OcGe$zgqKW+gX!JnsVe>W;QD)^P#p#Nez3|rt*2teo-b&au zpn-Hxn^x@c=E*jExS7Jk(%_d=d|-+XZq7}|JY@Xs_m$Em@l6)iU8)G$P7OhOK@A2V zdlZby&4~faB++z$nf4UAfUMBo0PTYoeXh~>)UVjUlFzTPclr6xAx3XiY8!EYOUJ)! z%kQV~QTSsr^zs3{#pJz>w`b6bmXtp@HnvLaYO6$fzpz${K)R8DRavCu7d+bB!^mAL zgphpUS&Lj~)N<0}qG-GsOcO?NELqKlAxcq_mQ%l;U6E&Qz|fz5!YnI-%9=hp?~q4h zgVG!zGZ*kBCs<$;5==1RgkA?Ki6MzixN@9Z(3JihYfdL78E=i2W@*kNCf*hiIPLX_ zBt@26&>KikHW0F_Bg%pjCsB|tstkIhj$GW|YFdA*$H{fFtcqG%6}7o4RvhbE>favr zZP9=}%r{9?!3JyLm}Hp9-22k*0#N3PTDe5UIe!AZn3SWMqCXmq6*-1+CP2j8#mbMk zDeK>cx<<=s3mv7J60ZGL*j$tU(DvUM0juM)9=01!U{82|7Pil@bG-=f211swOOd~) zCG|3xQrEjdN6p53%MFW?{#5j|cmWEH%A=CB^J~CE9YJl8K86him; zQ->K|iDy4=;H;2*P%F5i^G#F#6uA5nUjvx5M*hSwJiU0bF&xJN=gC#J=44QiZWdhL)8iNu4|2_ zYHWk0^BZ_r3Ey0 z>Wg6RImwChm@B%nI2}M-$v(x)9_M)3BiDc@#viORH?Nk1TIT53Q{JvIgTvy{9IqrA zq!*Zua1?d>{%Mq@1b91Xfnb8_k`TiNcf#YZyYUFfSHoLJflCWBYw zZ2TMl%NTtzoS`H_hVU_^gA`%Tn5!wp3bGNVhF5s3YZp>T?$My~@&%h&lK6)P|N49W zsaB7eDlcNHFQ=~tj3uBU?GSJKXR8l&p5)~n7uN5y4|BHwJ-KE;#>`&(nXfrugBX4B zhH9@pzD@Q0pgYak{pC2N`c?JY7Hi##C6cyW=kl@TM?J|hDw z@}0=1D?a*fa1NqpWAvUyT7Z^5m-ejn72ksnCfU9ZgbXB;Db&c9eFIkDu@GaNk z9Mh`)G(giWJ2KV{fNMHOGMIrVdRWBfi5(A7QL8&LfH1EAS zvV}{ej$X01zpOT1`lpW+P|g&tOj1aQ7sF77ecgTnq2K-F#jfPd5h$7sp;1aX^;-*> z+FPE^@Ypp;5oIay4D;W$nleFqnX%SgPF3s!ob*21f#<{7VBmv1u;ys*=QSnU=9;L| z3LPTsQsa`6nvuXCmlO`<#*)&4Q!=&9k2l-{z3F~vv*VAEwI)ZZC_Q}_X&(!s)zd~P zpV1Ctgj_d1c|nyDl5l`Ywsb{XEA97Sy7c%#Jy5N8*7QSP4t>z-CX0!mI<1c0GB_xI z<`jUfcaHimb_PSYjS}}$qIa!nGEKa8&MCb)x@w3FoE$+|j+wum<2Q4Wbc^k4gpZm# zDsWeR9Iy=3GLnl`Qly|54Y|sIL68`yMV0_)`JK=pY6G^n&pd_)%?R4Aps#n}&qrGC zM9lhcln0$jR~?%PlDZu?dE5_kd9E9=%W?cNr)O$)5lgyq7tjZ`lIo(!sPs+OZgS|hw%`x^ zzo8K%(Ku@Qwz3tklEbZ)x*#Ry)E%tRAdR9%3+=Ykeq@}=B=H8tYCI7d_BcnQQK4DH zDQ+~-d!;!`iu^%PQb!<%&=HbTK^OLYs0MvTbtbTTtzzj_Yh(J8op-8O<@5zoYV(hc zO3pdm%i5lpd{}BO8zvI=$gRps9T%Y1pT@yS&5{$&8FDEo@eq1bQ2;QGlxV01(29Ye zb7!-bC1hKXc8tiFqW_mtG;w?nGW8bp+({oVlyh8~viOrV%d$S_y3bXDn?U;z0t{(a z-iNSm1BzfSM>50HIy>zGC=WYI&j8K3RxJbOk^%`KMqDDX^?Jmd6P-9CoN{@njoF+nbd--qGL0?z!#Rm+vg$Coy3ft(w(HET_#IVKSDMJnnrlig zc(+xO4dJohe9Gsa?a_eDEZ_TRA=w^H(B~7{N-{!#Ec4S(HxZR{Au8Ce&Pd6Ys+=EZ z=4b?U`&ceRJHO?fcgP1+DyIWox(Oe{O>=P2HcAcdJ^4eOo%+*2++DNpX@%~(sU?Op zqb*gOj_9`{lB=d`l+G{k@ZH~r-_=P+T;A4xqwp#Qa1+|<)v-UAUa4Q8inl^R@ee>k zl}aUg-aS^we1HU#gNWv!~jBv!LA zMQAHP8?j#qby{6aa~U0dv3&!k>N0yymm(6%pdL9^?OAgM$jRECB;NL9RcY-H{)hDM zq>I*uAU0!{bE2S-yLZ2+m@=7TL}P<}+n^YyxET5FZQls}{^j@ztFC%@C4@eCl0l&r zNWdSl+myCKjpI?-FSs5s9Wmdns^-BgdGvr;b9MAwvm%`?%oY5oVQm)VF9za(Qo#GS zrW2r&r0DN<$$cOyb4No7pA(H?i~GRUepHUmZ;Dq*u~#RgFaplAXnLk$o%B#otCBkh z9TzsvS#SJ(+RmA;q84NfM*U!iM5JF zeM2R5g+y#794|^u!$A=Wkj{O z*;fJy;(g?E!IH$Y#=|*%Z0NFLDU#ck@iD%|=PQ;mwA189e9k$tO}QCG;pvdikRoSB ze#SF8=Y6-aH*nwg8+&8--Dpl4u7~oUfvi|3dh6;@j?9O$yO92Ku}aF)(J-*=vmIm{ z`rNCmfvZBEab?QsD`x>gRf)FI{|1`;=MUk!uz zYKz##cZ+Apjb#s5c(iLeYHBkGyLYO% z0sp+#hMY)NPk3_z3IG#FlY8st{4Q!NnJw(96z4K5Hj`t0!Gu>37)hL2gJ$+`_Ky_qgcAp3>HJ?Y^kB%SXZal0Fd)#x7S6Tv7o9XsZdWV0p{ zlTg7q5TKY@z|~(V7I*nT0k#LSzsStZ3R?fh7s1cycJyM6ZLLXl)kQ5SAsHRr@5WgH ztqCXVjk7EDo=zi3Iz02o<6x-O79ZV#fIK~#4Moc!2#Y}zNo{Wg;-tE7s;ynnrvRz! z@hrpLCh&xiEMRhKUgXz9Giqm8NE^I{!lZw-7422_gpcj;QT^m>Noyo2rC<(whMg4rPGQd~B_Ie2$efG+*!A z&u~211@v$AIc)SQ;eF6y+<(H z;d{Hsc9xfQXyz^1d0f#W8G}@i*>db8+dv0UJ+j9sVnE%-TljpLcYkXi1=IKbm1`r4 zh(8}9PGfVs=i$B68b!I&dYoi@A&dGK9)%C8cy=K6v4%l>KBIJ%Zv&Q_?e=USzCf%O zjl~tezE#EA#5Q^_8X+t7+EO6??iOpJE5R~6+32;1xnD@SlBAkVa$?SEYZmm7RCtKa zB6f>atp~Z5q7RRak&RK0D(+det}bN$r6$%*EXS2=Ru;vPg*B@;PJJ#NUY2kb?uJZMgD(Boa8Cq#}kOkxtk>T)Ajtg(*#pbf(DHn+UPLzf?jy(vqzo4Db<45o*(*6#EY@W_;)X>#A}k%K8=_x$ zQLvKS-{4amn&itWM0SYM_edK}?-jh(96H+|+Bpkplt;WWDV@eM<~^#;sr^{xRNnS~ z{;11vbFlKl0WNStMgV@b7&<27)E>P8+n1#zh8dc5V0tcz#Ki7#F=wf$587ykLH5Bl z1G_;Nu}jPgGmOZ#p9Q3@#$!R+i1EV?)QSw?YtQ!2v+WBCFnVHnp}}dQaqFFP_k|km zl8Qhy1pJ2uTQ>X}$`#%; zvh;E^T|$slT%oYy(rKV8#5Y@_&202yRi!rPx^s+jSv^?VU8Vn)-P_IY?WVA_B0r*_xT{0Ft|aYf<*Tt7 zK`Sx6tj~|rc1?<-sW2c?Frm}lXfz0PjLX+HATr%q#N!Y#^q|-{uS>xR9?x8nGVnDP zN2lEWFzHF6YE@e1`=dHmO@yCM{nPC`&!Ck@A% zJa1IdoF^849xE=Gn$M}`bE{N@s*^Py7iG90V~1eGNP-9*`8F8t74v(PrhfoSR@RWdl+-Kk4fB@pv*CPeK4@|B8aa zbttwO=AnMk^{7;ZdL|ae*%Aer2igf}^G(hKY{n=uZb84)V0yWNoOH@;VIC~6j`W=% z%y`7Gz;od;TeYyQGVZl#Z@i*8ajrlD68U}D3p-a%E{AdSbg54Fl%5SQk;{BoA*WX5 z@{7@=9UNJQkl8J!>zdKtDs@6SUrNUB4cdlbLF?7sBJ_=mCdM3Rw<({=ox)ht5Lx32 ze^YCG?eI#k6E$WWZI3d~jPLWKpzX6BR~Fm8C7O+E6GfSrY}|6^N6eY0D(EzXdweJ- zj-CM-HK&UlQqdfBu}05s4OQ40s7+^kkUH?ZD5VY)T%JtQFw;7>m;2BoQ*_-reyejJ zTZNM>msTR}klX|>5Sk&x1*G7hO-+=`l(dU!Fn&(f6OGp|(0FTzQ<4YF&>xyxd6?A< zZ5(!-G{W`WRIhLN?Hsvy2rcBVKt6$b*QUU=&8UQM54Ckl_F5;p$D`OAGLO)^qKxnQ zZI8xdcGj)zIVvS`I`{?eXjV0It(iFE3EUnT#5s{`;b)2x5uLN*zXig!YeKP)#y1>( zS!Z-FXSG7gRsxIwpq_EEQ~xL|gWa*-03c2G62RVS*>j(k0XGnf6}d2=`_2ZJKA?&3 zfJuLC(bv|xR8Vg*OA721=^f;sPj~`Q^|Jp+Tb&F-y-wp1sZ-o2nbhy9%dT@Vj;Qo3! z+<|W11@2pR$K7f#hr0&XGU)Gq4&0f%5WE3~x%`%yge3eWCjj2hVaBXv)cD_CHV=tu zAf5PvDc~vU!xYVZ0zKrw)seV>p2e2e(sI#TH;$CzFo|GW11 z;gjkgsciT7{O@n^LzMbwj{mKZ^Gn3~W?tq#iuZjK@B1j; z|FBWKcbi8C+kbIlc$)+#kajzoc7SAT%mwv!Q`!T68VkS8nlCpc1h`HDe-KX__``4< ztBGYyHwis2qFa484apk?@kXVUIz(&Pu0(ZJIBu=NQAUTun$ltRNR4wJME8|d<1z{` z;~W`{e_n*{J6$nNO;uarIXc=tJo^8$_wRje)DKyLsV?%Q>A;~VrY#bmN z-r@EDnad_wZ)3MjC$`(^wh1J|`#YcMe8N*zy0p|cw~ZmmjJ-R-mP#e5R4SE9rP6`C z!3g4nW*ADFk1oB3+I8$+kpEWe19mEAHv>V2Zf+u8Zpt5y$3R%CZ$)s!tQ0e-K`dds4hWxWAM>((KKN0-egW2osv z9ywm7Q3UG4b)$9VYW2uSIfN8~Apw$KrvuozqA;nIMVt=ZNDT6| z6R(Xq(`u*IING?2R^i!Z*c8&*D88vpMun87(VV7JQBB_UEogE>KS54vmUy$KQ9(&6 zhmWE`jU))w+LPc$)toTO2-`35TV6B_U3rqpMz(7eM5k()Xw1b8c@y;PH|t=UIxJCdGzbVk__~ zm4T_eMh9DO+C?~M`07ujmA27J1VTpUwc=b`yfAsXE8C15Z?qI|MW(hpRqy0;R8{X?y1DMEWTe*8+J^0jPtNz*|WNIugcEReS)&kvBF! zY@vdQwwfx)YOK(!ca#}<$2zoBTa!_RI_g)Z!4h3V=hb#sLNo!%GupkB55j+IzV^-MjeL+&Hu6K2Z@?$~4a zhj+EsY_c7;hyhEy%uW`llP8^FfE_gkvu5#?k9QRh;Em4KEJ#Y|ECNz@sfjf4<|>GpBygeS7$WoqQTM9x#NsAR?mMsIFpXn1=S3t}Oi3n4oMN$FF<5MrJ_h#^ao zTvvQqGRMhePn{s@RZP&Sd>8Lu2BsDfia-R_Nl+^lKBo{M94@W6>U}d`&zr`0R>T3l zY`5yjfzyJiHS7%Be&+^tjg;o|Jq9N0de3t!xktaCoS<`Pk}1!vU&1X?P00|k7y5LT=LvbfbgMH!bV`(0$KV0C zcU6z_0jrhn#s|6B0He!fumW~Sh+7axuCj=^y$+f9RC5&@EHQ(bbv}3=@&PO)j8($j zcMXGvrY|m|7Rjt-QlHII;w~{7<#2eDCN$G~uVQ74BET|>2w;N%_tgLN-pBjCZ1ny1 zHri>u*J6Jm!~jM9AJzZGX#e#E2=xg$*&n96i;YBG!WdieXYyXA4Dv75fu(CuH49Qg zc_&6(O1pz<^(c)XzsNg?CPjY9IRRo)FnSbzL`UIL021lmV>H_L`Wu&uw(59YdUxqU z(4edkcIlLjuOm^)>Whk^EG!h(FB)AX!-R`UYz;|YK(fS2>iVIR*`V7yq?;t#4i~a% z3x=3%XXyAV&j~?g62zk*Qb?=SMMxO10w@mj&&X6Q;TWc(;XZ!vC%>^4L!8hL{~HJ+}Nr_n7^Z`PM-Ik&ROKb?{N)T zd&*Mj26R|Uk%f8SU>-2|C;IhMieX2YSg=bwmtMavFq$)mk3Y9td#zT0hm)^_-)IgM zQ#r&ycai4&mJ~-+fz)lf*4bHJ#?7t%nVXa3E50gmvUc9S?$F(z65C>s${V5X|8W2e zlm~g5kdmZLls0Q$lHNDnZW7U1Hr3gb$vzpK2>|c*8nmv74D=q0=8YZ)7zPn$$PQm> zrKhqOkAw|l;$&=Id12wYZ4wlT;d>wJfjIiX?-VVUyRmmv27adut^671bp2it$A!U} z1txjF$dH1x_3IGeyDfp)d~7`2y`Ry;;v6p|Q9vXtYIp;4UVZ7HtmbC|A)%5DYTAA| zbZa+e__(Cu#L%|kY9$mURW~eAVwq!!C?DBW%uPe|omNy6HNCCYl9kh+k;wkO<&6QhwwB|dcZFXv4&4`fI#``nKn1Mb22R15E)nF8sg!< zi@M8db@v^7d51!hVyzDSzK@SAg22O&;_njI^S%H8%flt(g9qIkw)2u6B-eswrLCSXc&trf zkTs5>RMx@=V;cNlsHG%DWKq^zVvG-{%CnFy4*71XAZ{r`{{NA(e><^=vXZ~}jMy1!c& zj^D4&_YclAfAFNdLeB<0i{2I{8AE5kVVWjH(1U_cxY#a37)KNQQ{`4H^i7pI%~7?c z9$=R`)pi(y@330a{Zx5kvYA*u3XQu1Q9#<9!!5#X-nK`bUc(+lL!6=(DHSx+3XUJs z{WW(JOdl3*&ssq55>dMzLs1-Uq%C`GyVB5`pKdHa|hKu zU|sU@9)AxoO8U;n9s&>AAd=%RUB;+UV1s>|QAw>^7=XP#!q=bfU{su^o9qlpx$^n_a@le?&CswB3 zK6to>MpR4@$Cv+zNL{wcg;j$c3-_8A;$Eu=|BG5@-*>M79e)q^tZ(}=R0tZe!#79k zJdtakDd-SP`SzX>X~+i88?iazQ>~)kG(^AocAwav`@8Td@En6p^b#}mMWI$S%qLALpZxV70nrhIPSRA zz5){izX6*Xei#vk-4yLvIc(Z($hH7VZ9Za6gG>ui(>s6+i?Bn63r3ayaC@T8TPn|Y zDYI?LxSt&Q{|QL?9qDU-DxwmPvkMrZ|37&e)Bm58*PhPw|A+W|>-v8RbIx`Fvt7V! z7w~u41)QAj?bLUUkN%%mFKg#}_0zqx{oPlf_D41Y>0pU_f;%ePQk5hh4N{vO9u`Fj zFJy&=L+KZGz(dVTjVqAFF~CR%N2d?dgd%E)6qq(5X=MtHO;z{_H&02FwIg+ZGC2#) zoQ?UR%Ed!~?A+@>1-*vDb;kYPz(F^>L;G4i;1vj$>K@X?zwiP9djLFO$2Di{kk{k) zjl>p$CHJx@;*EM8OV-=9uib%_fw=^vQEe_l(UN60wn!)-mC1U;#uN#Id7L1+R28>8 zZ!0O9I&YI44r4RH-vBpMc~rFJ_kQ5YaIuo^w#62P7Ck7uZKlmUsjC{Wa zg2=D&fBn8$xLow`3MBjq`^y$A^UsYQ3t2Nbcph2~-|D@j< z0UCW)<#9QDg#qX*()6toP`s-6YBBs4;7G`mrxq&;cG2GW-J%dw2VVOSq>kNowzlRI zxqVa0C=f{a;KUj}^{3t=ncv!K^!Z!B=mVS~+VMM6K2Z)QKQ-IJVWWz8Yr1KU98>VslJyc;1gc zJoMmSe+$yZLoa&2iF(AybK+5OpFg~fqC%z%f#?sNI#tc&>*RiEl{isj_lJYA^@zZ16vqcCT%L$Xa{60-5_q=X<9|beahr z`G1aIo#Nl)7wqU2u=xHC{HQ_7A7}e#@MV`B9`EiQ>>s^^qC2EC(WxTf-S6S4BUi6} z#g~g5e988RztaQXPC!EmSb5{=a`{OaN*_I60k(XP&lBNQa1h09dR^LgT8{6aP87KK zFWqZH-fMY{VLU>;cT{VPCM3>$AQM z``QN^j3m2IqMamSCdpcoz6py7CUm>@b@&S}Y4yXO_)8Q<(wxBr|IHpWE~)$HZWrw- z@ytVekeYXZ?9>+7*!hZPm6 zI+a8<7DdHh)EE>sounFpCa_s(oEw^Xl>H|(!LX3GR$yG6LFhA0WFI7EX>1i{X;!C@ z0r|y4XHjbdpsYe zb=3SYZUtNpQ3d*}nqCFs&M>baoSW0DfKO|xK%e316(m_`8r75w&}a?G-DKFH zCWYf2jAy3N7HC}*OfI7>VJHPgjhOBm5vzNeDgRULlLi0OKAFjW?SsYq54lYANj7@Z zaQ`VOanS82Ca0v=6cUj7me_h@wWYkBIyW5 zgA>5|Bf!LS#UD7j=-EXMWmR?7?V%@ChqZ_JwCD92y&r172TdAvYp;h3`AP`xuT^0v zmum<~qdIPtYI>_BlBXx0m7s0;ll6)ItEtxl4J3T|;mJO4+owF_2)l&q9?n=;e{vKE z1z@BDu>+w`Mpk6G%ukHsCkt3SqwgXeCqfnelQ_iBWj~w&i=%~@pCFeqz(0)#j%^&X zL}G!()o~~&+38bGW>Q?xf4%nB`au8Szv^fZ->GJb%RbQ0kn2A11TG{KPwW!4K)z>^ z+8^mE_}o0`UkK8t5>@tosa)YnumZUBh-+P`Cx5+%edOc6+xlnVHG-eF>%5e;i%}T zBTa2|w?K6Z1|G>{7L+=1oI4W_4I_+c0?GMrVAG|ZG1W&9$LvXzB%DLBwpGcjG5W~S z>%Xlcf6w*9-=8G6_V{9nLA#3Y`=xDKq+no6|CZl2F`c{X0>6jO!~B#89PDFV|lX8e_Ss}}X4aX-H zU&{KeP#ogvAJhdxauob=v&qZ3#Jsz8;O+l@enYk455!dWu-!Zh8+0Ie2GG~NL@t&e zg80cSU8F>|%;W?iWSHpi-tEB>Bie+G!fA?)aM%)#o@!9wH|mZ)?OAmjtpNa7WruY> z^A>^L1c>!PVa)efX=P<4ESc;yFxr+nl}{n_f;q`N(v(JDGiR^(qh6=a8NOqT?i*M1 za%0zik0lJniw>#q9E9qDB@N$CY}!7m3ZPoFfEEu*gRHyjM#C}Y5lC0Z;XR1mQ_ph& z{q(uKf{rC9c>8ncF_dz-H^swUOXn-u-7lwS(5N{29=aq5%r;KN1Y#8vpb1tvIA}za zhwReHAu_!DtZ`eEi+MnZqevB6z&uTh-DD3B5nk;7ez?bWUdpKcC_2!Din=O2V+ynB z%Jm006!IbOXlb{;1W#0(Qv9^ zlmoaW$RWnB&P|hh$3=k!s}Ts)t^T@`bS{S(USbBy&$Nuh5-jaZkR7IEkiE0mG7kp( zFavfjoyL3gk^wnX#8tqe4p?qaVTggj`o##$<6v2bN4X{~zr%+JW#9r>Lju~|>Z{g% z?}~|3C-cNAXQe9C0uDGJFY>PQLnr(Qt09qlrJ<34RjWv=FffC}bGC7$==NNCI&_S0ICWgXA5c%e-IrE7! z)VL5ACS~E0t#by3j0Bca=<*{=MdL-$Buz!8GYI?D@!i%9SA=@#SFGH(qC}ut`1LMz zHY~5J(cfjaXL`MW7A1Yxw1qVpPg9{izS0V5AUCc(qGnL8*>4*UQ4@$T`RY|JC#zre5ci2Z-P{PfAp{{Il4Wv2Ws zU;1{_1+DfFU!yp?bX$W0NX?zo0{l7g1{hKn)`uJ17`BET_T2M8i{>VM*9j1cO4fYe zzJVXLj@w30IVj^bP~WVuuG2*ytiI=U0N@mK=ud1Jin;IcT$p40x5R_9j^6)VUw=}b#eaH`53Q*8V!*%8^S>x_&hB{54hz}ruyA%*n0#2s zj{|E5FON@w!4B&O`$v1V)B5p=c4C+X^Y9jZZh|+(Tp;cGf!B3_OT;}+;oYSruRKWW zcoTnyX^k9mgU_}>V_leyz`p_TW6;7micLi3s5Rdm_c6_$yxuZ=6*tU-6U_{-Pn&&6g^tWl4$E z1$gd;9Ns@)UjJ$wyUUgTHp zSmC;(8}xn}a2tLF^PwkQ*qNaqwy_haa_%fGfyQ?O6?5!OXaFcRs5kDzK-w5t5or2h z2-IqZY~=O;ojah5nMi@Id(E{{vr{5MzSTIC6F13x7+SWDCZm(&yXC zq$^KVNRDy*3;7Rz65WX%y2 zxZ47#S0meP0R&`a)HbT+!p@IqDup?-g&B27&YIc;OPtpzy#ln$qM)PgD0vA4UC%?2 z`30vqS&NvKVe|G}^T@pOAi06idISi@Pajthk85vaO3UlzM%8bipVe@AP~oq2?Wa`z*xD2Ue}=~mKa%dxw}}9!T_B=7`krg*DJ#K_Hvtto7Mm?tqSK2 z+&0=b0c|hQPZDUrceP~w_W+;n9pVbh`5l^@ZdMb7(cFd&+uA0}D?oH%5gKfSZgbdg zu+i+y;s&3v{4NND@yip(NH?!WF6MXDH;;rs7{x_Hi*ac#|s{d18iAhbKxm z$WZd=(8x<1T!+^dXnDG(!ku6Vih7qS$|e!dX@j9RR!r=q-kG8`QYN{NhL{+;n|GX2 z$Gr!gP3rH1oDl z4kkVS=shHp>eYQQCY219G=`}Im_xRH9Zaf8(WGgW$fUAN+FYChliuAnB6m))x5lJ% z0{M|e-xj-Tg;~A`(OZI-QgI(`Evo8QvW$#@2Ue{LB#qOY&PsfD{$vq6WNM;LpK@og0 zwO8j0ta0hew!<$BuE(6-u**(5!{E^E4e{*TZTCt7Mzwt!NG+0{%mILoC#x{Ab0lZO zi@+Hyuz>y+(;GAF1>CD2eHK~oMaSz03jDM5MO&*#Ic{{K>GZ^%>Vy!dFKEB5Faka?&EWx{{KG)`O zAKydTOMJh{$j$$tkM@|47F;)a<`a9lhhH*I(cR4bf*!{owI1P_%azuwHl2eS<=P$b zhC|pSv#myxbS-?mM#&>^9s)`|mdiCFqa{T|EX>lQt{u-lU&$v`|KmQ>;vW57w7-%1 z-{U7wWA{HFt*_1WzlZpIoBE&0oU@>>GmUSi@%;@nzUUpzGvI@>z4~G8{B-~KjJups zI6&BIbcRh31}Jh?F1Nqf70-PcA2BpE(u*DY%(?CZxsmT|))pKW=ogtR7Qj{(#fHsZ z826YSpy?xGENb#O;T|G0Y=dDGJ38GbEmCt=LgOv6uIBb7v{yXWh~N^ZMI`j{T!M?0 zY>x7EN%5oN7g6r!M8#rWR1qJlW|jw)JTT*KUFi5Z0TonfpN%T5d~ld@!ZG7C-Zu*x zv&SB`MMTDn8<-Ei3hw-^Oc@ay>p6NgI?dPwQbz4)P99IgaE2LvG&IwUiFRpsT84im zpJ@pbO)!cG3OFH~Q538@mBzS#fRBVYK=QRaw3-S5dr_c=;LUWS^WDa85|l7?J;-g?e#cwA>^34Pemtq`41BNe!E&cs1WlTqEluYW)&I%6t0qIXPG=V| z;{Ct1^{D>8zV>u&7XR}>KHrr7A7#$jE?~9`nC${)yMXLn!2ZjlJ^Xucyi+?rj`;{B z0U!#*@N?}~JT}4{`R8X%5$jTH^Hu(bG(4`9B$SR9)8Nelhlypn_0(e2jh*8|*v_2& ze7JX5aIP_0I-B2_NADjENx#1uTy~u?JenqW3;JQ&oQ7Pil6~7B+e}r>y*cTvx8vR<$N0kM}_%xwtu(_ooKF1rxnV6 zKe*{T*i%+N?{62l0pE@+PtbQ^jYkJ8EP-RipLHO@l1Z3F!ooMB4 zd^3ZhTlvm~+XGHfsG@}2;_wvXsv^#oqA*gp;r_s&oELEP@uPGps7 z*)(M_=w?F-&WnZ}h>M*fgRaJ~esI%kyygwYH)vn&>;bZ)o%-4SfA-#r105=i9T1X~ zL4O1;>dJ-h*-eCbv|f}j3slI8{1jakiK`pd8vT!>>DBvAP%K&0Ci!R$s2D}jfbXhU zF)OKfM_d5;Flw#hJS-nDKCrZ7pide?1_SqFll|DarvKjb=0!!_8>k5?;%x$)r3#^H zwK~J#vIyVxqwD9jonOvRYdd={_7C>LSM8i@Hw@w+LO93=f&{@#ieZoV`U5QEbTAfh z+JdSWhnbvHSLZg*{lL3R2z=$9Q(z))EQ^zf&Mhw^T9t>SJ$dn)eI zlJMk(hQD^-R;g|zuOu9o|1_Hg6B6mg#Vs{+LwLr+K42g<%*aj_^s?pLq=6Jd!j9J^*StIB$F$dg#<`QBLMMk5+D#LS#!YGLNTZ~ zyTBK5q)T?Snz6KOFUZCw=a!UA#%HBS{*F`kchjl6Wy9FWOMpNCT$W7DaAv>zIzFcT zuYp$Qz_oj;)2ss@`FZ*{{r#^svj06TKVEyhvA$l$^M8Q<2YWotMx=fI=Jvm|@!yf( z>KGmSZg$`i?>}uk+IWiNkE6f#kXLk3CmRo zhQFw!6-O4%Q*FKBjm9x0GI8^+@}X9q*lHERfgJY4P+Bq@3!GnKAZ)aP8&5Eyg9Qi; zb|4Jeb_1K99ZA3X2UB8@CwI7DCeSy9LmL#HJUcaG$l{ zRP02o;j~6;Y`RiN9x70hn)Qwi>&c*kp}{ZR#wAm{70|owz5@Wjc_{5YdPP>JR;YfrX6CrMr zlkw3C5OS{Dwc8*B2)WZtLGGK#PaTYq+>BR3xD`PqZhS)`twNSkqlePH=~M(K91oEQ zJdKt~$JU271&M1S*~bvq5{bLwtWbO1UfAM-lps()EQUe!O>~Y2@M7ko0Af|_9mUNK zNl{t}adGfUn9!Oc^0tONVMgGpTEBpSdSBuAG2##=>hmeHu%KZkOpn)~Chj4r#69um z9x5OMIwa#(c?p%w$K-}qk>ubkMrk1n!;rO67zZm+@%*~^c2S=S@H@LUkZ_fd_`Eih zdhxo!CH;3D5|+y`L44=cL4*%8LXwjM9KV+Y%ALIRfm08q91LL-G~0_CrWON8 zQX-l0-_mnYrBAQHvO&nR=Cz|Dzsl8BcUICsi?QKD)*o4H7xvF?kNBLb3TT&CZ(Pu2 zGIB(;@-~4SGZU~t*$6f-SKGMjtLN`L&xN#cp)it~qoaD^d);Gv=kl5nF8;k_oUZ7! z^GKT!x!w-^2r(2X(-J5uOh5@kt1%`7X`Y+hm0xmj6S~FHROO33Y<)Y8VMn{2`7mub zC)wsh3qT(X=vjgH&dmx&cWb(+T$3^_p70Dn7%FoD<0Azl;c4=Tv3YuiVP0l8@g2U_ z4Zop_%1pSnVr)%P)V8i(H28xM+c@(n; zS3o7bWrZ$$AA&lEVg$5?BX8hX?9%&)Pe@o)jL4*p#KNn>RIf3pSUTq(Y#O$b-KoplNRDd%5P)QVH=dRi!$I3uaNQoImPXCHz)r zu}aWDQ9baFI6De^Db&z}&U*3QhJBUd40=p(#LMFKAYW*+wF2T z-t(KjlY$si`y7fq0rczS;hd!CyPF9a&+%Pt{-`Uc`t)Qi8ALzIXSgA~4=hKnC#-7+ z@0mH%wWV0w`=Yq^y#Y#V^`0+JFhm9VFRZZrmUxN?>h*fC26{awf-yuk;IGxPclf|b zh{={jSYZjh+`%VtH+5>F3rpErw>GSQhG4tEpY%+vFfc>+h3sFm0C;63ike(_QpS{R zvvo#(8FSfl<8xt7U}~ZxP}t{^e7CpAie@EmtJQLeF$_y_TbR>_AGXM+M5^cRpu*pW z|48ZtK9$_MAxklKv7!ix>n;(K+l|ShB!j5ym?Lkx=rdmH(Q>)c+}8THyo~+B-bH$_ zU55VY_?MQV5j9d9aXZz$V~w`9z>2t3eHc2v>r6*)F2scF8JZ_VfR%{npEQif7{^+% zBcz-tl~D{AikqI-yXk^PBwnee3S$aGRx`ZRW^_ikeo-sVqUGE9%P-cCbOUz+vZE#@ zq#)BKn2bO!il&)pF~H5P5?_+m;u9j)jh`lg(hzf7W(w*MaeHjUwB<`{a)`JyHsW8o z0-cOM74cMOq%Z(aP1(_~lh(lT(>j0d@gKPOlQe6CwqeDoLT0agY9WDIghwA&Lv^5L z5Q#-A=(}n}C%F-w{Eg^v{e>FQ$!bK{eMt(b|D+sdw3SA+K*Un*X&V|L0^kO%TCPOa zi|!Dhx<&>$gq5OAiXs+Vq>5g5hoI@tQ9@NGC(qCqjFe8DNI`->Lo&yE)~Al8 zP~A$SM?0<`6QUJhU3A&jdZ{$q&M1J9q{20ojOU=Gz>=>ECM3+Nc0MR5*q77ex4Rl*YQp=XNBUWLM zyfizQL=FlRv`=lJ)=3eRBPC}b(z_~mcA{Vj)dW)gnx4azKW=vtHoA)&bWiEY7*wc@ zO0y_O=V|gKM8-Wtf)p$giD(+}kK&n_1P<9p;~v30`S_+iYJSDX3@bMw1*kFh$PC!< zBDFDbeAA^{TbpFYK?x)+hO$nMUSwy>r7luoqAHiF#=&KBoiRfRRqEZP(y$I*JSs*M z8|+-1d#HuwW(NMXagiXgTXB|1W;epl5eo)u8G{NFGBOj_N;pvo>h&6qr2M+LzSOu# zm6pj8$Y@Kv8z(6X(m_i4Br{<=wmp&G7o!ZkbYPD3EqUF~LXeOAjQ0OUhp_wf|6P0f z^vNUP|GV*IeSHo6e>Z0F-yY_JljR%s|K-d%3jjXz?45b`{{1|AU%WavNcZd&xiL3h z(gW{m%}F=&2!9^=()x5l45T~*^p-Ugu}uJ^-X+RjMc!~5v9vwv~Bn}G_o^)iIz-L+3u*Qdg^M(i#~uIv>DtMWnj%)LVw}0+P#x zzn05y7tKCEN+Q!r{-ZJHEEwp_bi`(+qnYU_m+9zqZ@2#Zptkc%y8TE8Nq!$rg?!`N zX4?E;oI2P)dw#$dT{i#? z61NcO1uNk~|5ChgsXveO92~`Texet+m3(arILXI z{NV&h2sD|Up@XOm0!%mn<4g-5@xrZ7KzCAbOF*wwA+ym6E0(FYEMlU7`H-b9oS#@+l)^2?nx|;@98Qao^&KK~zTGi2%q#a19$Fzx= zqE(K&d*P+1(057%+e0{EMX6S^4sTO*YepU=G3ud@J#LOft^CgtR;FpM5g*(lhgw*> zF2`;+uwAE*iY+jPh?vm0pb)Vx-VFMPBB|Z(IKlu2nTp4e$G9&2{e`8jT78dB%tcHn zNkBkC_oAj{R*YH|flL5^-q(qPFN}cNh@m{j5>?X{;dT`P`J)cd5&E(u3Y2~nUlcZ! z8oqa7UvzHh)(kA`dVHQtBvZWqqFylmHzvt>9AS}K0k@m$}pz~?;VCO3$Y}VoQ78A=-&_Kh(J)uzd#QYziNf5dGeUqEc^HUT@*L>pI?iL_!|xY1qdLgvl=I2R zVcj~NP@Yl*CcceTN;2K9j2y}(V2B*|6-akVJwMEG>^b(qAg|KybmS(Ahq&U^a^VM? z1CWMf9Sbl7b!0QJx=KW3370g*R4ztZMS5Dbx`rW9pvhTBka-RusRv2Njq$9`p zfunxhIR0VdxJQlQP{0tz*PdaiftTMo?SadWr~d zIo0FwQe?oC)%8+B%oJnRj}vSqVFJg3PiPFs-+`aX)dutYS8Y6|`TYAR|Nr&J<@M%! zM!b+9f?DfF%{(~bZ($yu>>Dp}yYctJV=rf>yxc8H#XJ(Z*IV+lBxoR$&rzcZsi2;q zQ6Pft-}k(a$fWd!wqSQD!0CXK2gx*_d|8oU?xH?%Mj$;JRUQ+vi?Rn8O-5SIequ(J z%=d~&SW=X(lWgukEBT4N1b2{W^{n)7L<55R|WGBgm=q zVTb`$5i}4#>I3n}Dl@>z9ViCIpQ+`&4IeqCLv<{$X#4Gk#Tu8;Lzt%U>#7ubpYC|# zl~l3ZBtJ5fdWs&7YTfN=Xn*t3rIjdv4^PMl@8Ow0c||!*46zePx5|hU(}HlX5uw27 zvDT92#+e*xE5+bk39C+y&ZGhdr6R~WppTqwBpmOL7bP^A`Q>LnfQ2I(agymF92F~9 zF<$p=B^w`NBO*jFeh!^hN8&*BBNK-{a|N>0c!7w_9M)(k$>=m5g;8O{anahCfH4@| zS*>lu-XKI5VvEEgy?d-n$yXG!eU!gQevDdNzO!MscYFE-Ld^BB%gaq$zqN zr$qvq-O*@vN1z!(=A-rcAO#cUKiLrZNdPjp_o43gdz1e2`m#;~&H_x`CeStK#;3-E` zxr(V5ts#GNGicZy+vjnpweR|2>_=FP1M+#G_t~LXX;pmJN`!&>G>q!acO$X1z`ROI z#{u4=3XLvLW9Gg;`_c-2rIZxwVfRZ1@~D?5=tQW*ST-F!E(^nl1G`yV<~3L2Nl&@Wz4+WZ=L? z!EaBuC$dJa4tlWb$Mi+Iv}DDGHB|;C;2~j%XiT{qLK2jiA|-;fgpi|BCFM~NQ}q>& z-#ow59NqK+#zw#W#?L_h&Y!&Ye>7M$8k5yA!v6ngtz3@T{~woU_Wy_YkifeaBi;kr z|2cEc?Ef?S|IGgXm$v^4bND=eqJ2JYP*1$3kJrojkUGAPCRvKTh3@KyC;RwbC5}F+ zKZz#G&CN^$DCu8xDYs*{QL{o*C6>VJ?4O)waRD<*9cfrz-OdE_M{W?B8 ztp9d=da!FPsGYBR`iSE$q|jwd3vYO#uz)QyFARmb+J&L`3k;6$f_!c0uGn;&tYDrs zaCd9X5NFh2=GC);lv4{DD6Y1)^n(TXMOmQtb`VaCJ5r?H@Es7UQR?dbwHEYC8*6Wo z^t@yEqiikcYh6h)RXA{ZwvHLQ{%C#v+HcB^X@*YKZqRs1ZS z@QYQwXYO0hKtvxza|Sf7FugxSb+bPRR1!oLw?8ncTGcB80AS3lS({}18N_8TA|I+G+c9Qa)F^5PRZMk-M!*!KE|wg<*kzl?GrJr(xJNb9 z#J0yzB?ZZXfjt-oWIxnKm7~MJC=!X{odGDaRZ(9>>bIf3fRCbz(z?2fT`N*ocmpi1 zJp5ccwNx_!b6hOlCRZT~@z+shEl9i~5hqrQAW*ap5HHa4Q2m}32aj{uppHJYp`MFy zd-QNbKxmvkmoT`}m*m?2P>Ce@JW0wEyrCK%$ZKMPzSAN@t4feQj?`LBH8sf$O(;oR ze{x)Y`e;$V=pPe{FR&!Zp8|%X3I;3vVQ^W$fGJdzbw-jok`c?>1cx(NO}K*eV9r;m z=H#tVIQh0|_6>s~7BOVBSlNH$r#_T#Q8l#P;H8w7pCpj8x@a8JBqHIoQe?~1*}J-6!D7uML>U?@2LbrFMmw0>AB8Q%=WXC4n%qo?}FI}X6`{#y8-|I z(uU~S=Zk#up8xc`o-^qI(1`oLPs)#eAZQp4j0*J|dE)#Xqo&j` zuWmO`X>ws(MzGpJX9>@s-%wvkb!x$Wr#EP!vIb4G=6}S?_hE646kol-FvaZ0&NcfH z{seD&^ZMy2ucZFs)zQxR{_#=0ZhcQ}tT0~H^V-fY=cl!uy%+ljd&N?PHZhoW`$lM% z5g$_ZrU`bbdsFX|<>9s118)Vgu(xot~hrL{ojI>kS-0 z7M89Q3gSu<-_mY{S!|pAzT@?J4qaOgDp`Q12H+k2dwJJggd@|1-U?}O;V?a*PEh1` ztWI-5Nkd8>wR~2Azgq@SrSB+LOTFILedq0pqO3HWq`+`GE11(9coJa@JhzH67ksBy z$D?LqjlZkMaMV;5nKNXDJ8-Vu1}+EvOSb_u5ql{f#>679xz=eTrqVi4hsDvcDj|MJlyy~XA<)|osa(e*F0nnKHB$cXRm&^e^meN^q1Y* zd98l7|DV0L(USD(2RFS&q~zHjM?10zUo)gj2Kq;GE|G~qBiK9)_}*M5cD(i(#eBwO zZ3EASat9AJiwVSujSa&8>cOy!1~r^SWC2hPjAd&L{-;ykqF!6ngHk1B9`Y*(SOT=} zf1~KdR}%p7q2lS24O5OGQUq@41YTRwC`>K56M0+X zQZB>Md>k?YqdXuf`O6uJ`ew^eHknuxQpnpPNBNONmHMDsJW(ESbnBA z){Jnf0p~|Z6Z!88`ioTFf=Vo%_br}&|1+kUCX#py?nK4rgfk}bmO&CB6DSs!J@-w@ zj2Y9+m@#qsHtCMUG^b2B%4&whXVJ7qn^7%e%$Yl!W5k|`^GHFChTqckO)6qv1mf{e zwq+b6yItYD%1@5*b;&A=s?R)qNj!(T#9|nJjHXoL3OXs~{G^LEE1-&tKE^ci(QR3t zdRrpS(>yH4{atGr-5@#(G8^=8_g6AES82`rM(&@4HYr5UFO8@q#1-2D|IKfyh;>1? zIOzfgYx%CKXnEyf&@5PYLNn4Lw)V{VfYUiCyTleuI|*;*+cb1YfDz~n^NsAGwrr)| zmmzsB42bh*Je}Bs^#|+y0sFDZmbc+wv=$0#8aH%!5Kpp+qIn-rA-rHfhHnAKZQ7U6 z=%VPz@^)wx!sZysWKWV&oFWYq7_m{jmgH8+X4RTkb`O?CJi9tb&ZVVd zV%-QKksyGu#uNYXjD#5^SJ?h=0Dt&B6XH_teIKEkP+%QsUooY(0X{6=w{JknZK@IY zps0T24cyiZyL7wkqw^DZ?++G`_{x)%G zzHmAn7HMpb8&ziiPi2nDh|Aj}*zm-oP+dCTsc|+sua9#Iu@YD{fmpmL(eYF5Kkhp{ zBn=2VJT1HMt_{){e0Jo0WEgFY0gJjFIhkzE!cF2~>=+%vaYh!A$N1Dr7+kzfm)Py( z#=i6hbbiydq59|u{-tiJ}6t)YOclR|f~0T_$Dzi`&Ha=_3`k_`}e3 z@X&$VzVLnZpSn^$Bw3FzzP9sIKp<@3d80g?VmG-`4Eoc%q&3!N%~-?9e2ft_o@cD`%C-!rM>o)_0W^~5a@^(-R>}1o>Btvn@PR;^L;*7Q$4frFfTe%~dme=xN0vL= zKip*jOgRyAydgT7^F`B<%}AiC#lJT6uadIjb(;cjdcBVib>hMbF z*J@SQG!GcG&O$R*>KHF$dAsh1Z&tomU$vw9?=)aKSaAmV& zrMKmjD?MdL4>KuLakt7oe#_{wRS{W~pHO&J;kcLG>eOblq;~Vf#znQfwUxqWc=CAc zlIIh&md*xu`29CKa631+c-w4XldGlYy&v`wD~<%9g}K0o1KD$qmUf*kOQYyACupLG z!>J=8ecWB%q>8;oH(2~qQcn|gSGlF0b3O3w#vq^6Q!gGi^d}GDJ7lxPZPv&23Dmkt z1>YBxcK0Le?qAaNl29j0IfTxQ+7O9AC6ENu6^BY5;-?D#^%i?&ArS>dtL$P4Qc*4< z!_hN!Y}Cvb49(Z#A{Ma}TXMY8b0t}DQc@Tt21faXd)-Av*6qFLsx|!oV%Toq+;J(W z_7-yu-Q4)HHi3+J#vO_&0nAJZoPPiq%QXaZxA8&X?m^R)$Z3k`q!lG8@Z!PPiM7eA zL~ZgaqD?aWGg`t5AEn5qLSSury@4D@obQQk$7YJU!>V{SLWt{Vq$l%qhc9J8z30)J zY*oCv^Qt)cn8`UutVI7bokjf*|47hplCFiARK!mxcGbQs5>4WJs z)qB=xu%`RfYLc`M>g4@w$JkiQ@P(IBAl{R?`0`j4+Zfu~joQ7Lo42pCi}$h%Ow~a$ z3A*(5K6Hu{;Wgz|Df$3&#D0}1$GVQ+c1B4t(%L3jf@bn6r@W%R7avMiF>Zz#W7{z9 z7GRF^ReY)?sPNhH%h*FDZJ*eS{ofDwz6qjCv;z}jEMm$UgTB)jXFSLb$SjOl%c!Hm zR*ILJ`mPg9U;V<;%=S6Ur+8YZF^Oi7kJMa*KLQSb4t}I}dG- zLX3@$WBag9!{XeBvRAh6+C7fkZS)(H8|Nc~lxdzXMvU{*maj}2i-oPbDlOeh7ubNg z)zwZoUpp{g-&VB=>|+AV)+;FE*xh0j){4Srip~e^nU=++TW7R%D`^E2L|kriF*Xjt z5@jJQ?YGHCs!`EX?>l~XIJj34#MLQYPaSvOB$H-R>7lLU?9MVLZ4Rc+z%(1TKJ~)6 zS1C*~??|gipLcVT!y~f^Rg1`cB5gzG(@-8E^W0I#I52b!?l5%^S_OB+B84V8seNaV zKB?n#ng{q$`a?j!U$2X}H~iPyTa$P6L87oEFFb$eIjMCGo^^p)nthGODA~Jm#M9m! zZ+S|1kDJYsc6~5xbXBWK3JZgFNGzmF6eFJbRJ4_8oRVeQ=R;HUHlyA@?6t_>llJ=q zj5;v*=&?R-6@qapSf>guyul?uS=BXJp{fkfCk9Vih0>hJN+y4#^Axbo1UwZ}PtghJ zISRu?dhUz(+DgqMJIhzUI# z%^2&Ye=`s&Y?GnLs)SVqa!l?fuo`kTBb?V_7hk+y;`mEH`pqx^2N?>WDdZ&D9l|7R zT-v?1gC;p2{&l$Hq^*J=oO%P8>Wminm9U?Xv+Fg6T?~kE>3!tph>xECK42BVbpy=T z?EN%g`>g4SOtks!vzF^ar5}Bd+-_G^RwTNQKCm~3qth!voo$$&3WoKDKYGL%FnME?OQW#un?QJ0pWaUtd zyxVX522s}ik8aq{f~$EkO;gNckX+>|3S<-jmsYrQe@7q zh|I2t%&v(11+IwT)8}dTLquNub_g3|mzwUHlxg6Qk7h)CmiTcoSIe~TmGlzA%ZW;8 z-$^Mn8wI*bQSEb4n^6a{t*A{j$Bs9r3H3oYo+xn~2dyR*rRMWK~913XqpNJlJzKzqO@`Z*9#tE-)op17vVP19|iX5~P^` z2RS4RW+0I4kih^s&Av%&b;cjCMGGvO0*fVEjgu-q7k1w%)p}=(^Qa1@WpR$pD=r&m zlWUR_jM2h1^B5yn@#%2Zs9Nrgz3$~4<3Y4Y^ROg5?5)H!Or(UaV#)($&@YY8n962G z+3W+CerDN3MN27JUq-)^ZytpBtJQKz%aOPv$;2MpBJQM~w$*CN2gm3*g4}fL0e|Hy z$0>g&f2e746vMp&*xGm2VMVgc*3QP9KkaTJAI=O?;ko{GGr1C;zHMB<2r*dlt& zB4KEdkxy{r6T+0vZ_4A^Z(CHBv!;UiL>oBHSTNBLs(T0d#DhF*Pd}2>k&pjW$ zBag5WFHxm`x3|j`9T^XhS`|nanTWfMqZ-}>Ouhdko1oJ_?1Nqp+Chv-ADwvm0S)mI zhL*0&mn~!thI+;)RrGX@$$0)GY>pZTOD}4fHHe*;hRl37LIrY_{e`8MqFzG*1Fc9? z@N!aU@Mg_2RX*Y%TETU~=vy>AD6OmcwXLqsMKo6NiUWHM`mhUn@mm)3nxq`drn{sr z`v5~qv}&UJmM}128Oq?J7*jdJ#>u0iCt zZVt3>9?$)YKb(R5$j!TG=pBN3&Fr4MWV}lL^q_XT{zMhTwUZHwNiG*>wSZ}B&8qwn zP7+QPT>}ws2^*6Fbq}W2LUlKlrlyrxz|c@wOwCEJM2fSCDk+%jt(Ltg-@4Ed#5*6m zIjin1@pf=a-l#%;QXzFy6V< z8R;SjY$-i_t^>LQ|Ki(JP415yFwY-cdTs0+bUp`s(Ep2P4g z$!oRHH{BVA>#^w#Lxf8apNz>EktJnB!dPLuDF`g8}llP>3fpdQ*K7b^u5$3 z7mOwoNUsOgT)1?WK#<$ph8N6Ixm42CKeqvZ?t&@@>`>lZGZfA44ZY~5tD1gu(QU9L zcCDl8UHB$oRTdTGhyp9sUzRMbcA6#jBP?wdnnUYiXa^aFRq8#EyT?>d6OZ^~y#5P{CM$q2}`oM>76e?w|URQ8-&xVG=$ohzv- z{;VqG3XZRqtO)E^t%}}g6}O6#*m#RAQ@59Z*{VIv!MS`|y_znv`END7Jz3LF(Dltp z7Xl_L{+jAP=SQY-GRCNH7mF9n?2zHkF6BYe?OD>`6K1@j!XuV%-_`1?{ABrm;c77r z2e1+EKRklmsQ=gcqqUj;*F$_rnBR-9#y$Igaps&w|DCyZ&0M?wZmwNN$47gqzFjgi z7WntR9lJ7pt_t_+wv^8ar|?&EUY~vL{E58(MVvCdm%sRD)capg*CX$Lm7hG_SfA;C z5Apei^gqd*Go5dy^UZX=znjjNrtYB|zxLu+;u|se&X?OV9uqqwJu$T;-(7A?{V1XM z;mLk#X}X&v3$vus^eXL$@@QnGqqYE#VClpgugwz6Ab;wwo2k0SRp`w19|iEVSO1ag z{|2o4V*P*o=*jvTuKycrv-h7KdG)VGnuva~orT{S?+iSYLi=p4XTwyh!7QqI<3W4JTDN4??%iM^QnOxYm=nABUdedOR?knbx&?ZeLFVrfPZy zP;1yDCn1!YVQMgFx*n-SdJ+irSXvCsZ38Yd4Jzn52GC8gifIwBOW*5x+o|NF)#lQ% zo9K&@O-7)hYlBp!nYDVR|1#^`@p>({ZKj<=%X~BG#PM4@FjWU;@_`pAT8lVBo4M$g z6{+eH2CW%HusYXn14T&x(rrYWt3Ro|&GanWA2@+)V+V0U>9xT0_xwO)7+Jywnv^BC z?;=OfIvXG}calogI&BZc-DOu)r)BkR5TG5Wvwyt$-0gv0v43p#?n9%w`hPpSW};T= z2nN0pEwtZr2V%#dD#esFaIt2T<+c+JfM*^Yw%AXiE^?&MoqcP<=vi1K* z<&6#g{_n=q^(Rjr)BC^p4D3w*e}K=oqy7ut9IgM?P^)KuB&hW;)U8$*6t;xDK>l5W z35h>A@S)v!?s@N*Pke8-Z?bpU>l0h{cM^jZdT33M`cBE2{uHB2LVuvHDXRiQLIIs2Fqv;bHaF`7z87KDawVR5dDDk&DThb|Xn>TYoC0^oh&5pW!FPER zaMwXDvL%`qpM{r0yhFE@Jm?Plu7)ic9dGt6 zQ~>JZG6PlMnC*0%$E2p5*f$-|ZWbGtusK*<1U;ke!a65^!j8^j+Sm&e3sT9U!Ii-7 zz{h(?2-k9Z&7#0XI0QUhGJzpO(C@04g+Y6kmcrjM?1ElhUo>(Nz{~*=Igw~HTG`_K zJUuFR;dvdi=X=*c=zuzI_B*xPX0P?S(z+}9#2H2oISU+=a;WZsm>U%MlYn~DcFqop zdjH{%@zj!*2NN2vLh>fz)u;LZ<4?&5y*ieG_nzPK(G!~>YP+}`6z8pZesKWMt$<9z zYtZ~g-p5PSV2VYK7YeRWe^t>E^;5&Tx!D6)Ig(VbC>RVc;7@VQ!bXar#QOTf;Ie*U zH{KTsTI65}Sf~O=6ktnXdsjM6uRXXdmQ<)At)5CD9W5U>ibfQt{C;o1FQ6IpUx*F> zm!u)#V=y?w5oF zgA`C4U^OX{~ODZ;pTrJO>!J&OEjDJP#R-YIsXgilM#l*QzeZ+s3 zsFU-{%h1aC5)+h%L|~sp@t9JwqW1m?90ZUSl0))m{#K!*q|vP0t>Ps|HVX+qLA!(= z6E-b2{$95)fW$-?ulcHikv#}2lGX4 zLtq|B3xZ4T6Ew^-^&?+v>bc-_3kjt@_mTvR5gfSyiK%?~ReQwgpwlOP4gC&gd3 zx)E*)6d9zC zn6hG&w4nBn+W}(^hg}iI4#=xtQruRb3wJ{k_-@=4gFkMA1A4lM3X^P)jb2wU)gZpnMz=JWl@-&Bxnj03!vbiZj zR87{v-Sh_TPNjjncWB@acb0mQ&_J1mZJfM;63gA&xSQU_-Kn%OgO0aK-xfjQj9fiK zs_dtGyNp7?)$4z1|A)3S{WC9f=EI_R>UyvT-I2wjg|J)}tu5mxvTK4xQ}5N4dh$IN(s@M@1l z4%^?~PwT-heA4?CYx%?te;KEuY&fKzrZk-Gr;;9}#O}A+aJGBbhEr9e8a@H5jMH$c ziQPxTSxUo+;CE-0dun*b88$O<(GwPe7pfj5^)(WMXH02fQYi?4wy5(Io z!-31SXbQCHO=78BmYyTq^2gr6!SQdIIOgMSkh*uG6?KKQ*vp~Y^w^FU1VhN}4ZNP) zV<(GP}*w@Q%!` z;m&T8+30ilQ%;NeV|rj_xBG6cnc2AS$0cu~@kDdcj&QEJQd-z~VNQuC%bIYVOLKb=S=C%d@xxJQGAF##1wFLJ5 zz~{$IbVSf9&hOYm6pkDR`?2|^HxDJ3w*!~(dejXG%vjgyGP~LI(JAl8PUjlIi^E<( zmv9)m*wQ~GfdWAz7{sRmbT_NR6m_n3;P!^st0!LIUdyqq*E^V4uXBhRNaE{%K4DKb zV9y@HKvMn51_2Gx1IoD`AO+w@l@;}rQlEQ!h*5L2t*S=qC z@Y;7<``*yL2U=NbTS5N%XPEJ?@N0QZ`(D<*uWR2QY2P=r?~lU?C;l*|7@W^j=h_{J z!*At@QZeTFi<3Pq;K9+W`q}ZT(;YoxLCfh~X)y}IS(9M4my$&12%C>(U|U{b6SKH& zr`sRg1P{{0BT9y$(VgG3VJQ_wpq$OPBVJx4+TE&1gH)UdTM7O{p-IDhakaKaQmzTLd zWT2Q}H)FOTO|dhf%*|f{Ky-g7Knjdaex0R~UOM@vPR;`AE=~xKl^UOg@Nhnm7k5;T z6Fvy8dAcILL5a!D*$xE_Q#Vf3(q}&jfL`9o0^qJJ1=_oP%~2lGKc@cQ;o&=0r&|Zbczv9z|Ko?^ zKRjN0ys^Gc@gJT(F8_l)o^CUeKYw%mKY9E`7?-+@S7h(JAiu1|dIz<082i0??G%_Sy61~#CT#u+W4};2>8&V!dS;WV zN<4(eUd)Fn;NlB^#E7uLu~^q`SS%P`Sa|mIknM{+h~ZtlD_iR8dieFxHY)hQ-mvAw zOIS2a)GzFSdMFpmCq9&m*ihZR4B{D&_{$o?ojc))j85eJ3+@sNc<749MMphyU5e*m zjXVcLM(yBW@1PD6q<(OGd_u4?jzSPr*D67<)d`>D@XlbS;?Zdk(6fT#;Nk52bgy=} zfAq3`c7A-icUIs#1ME5*bo&+Yo9MCt3DyGP^k8P&uY=&Nwcs1S)x(2V{2B*#MgXip zQ@;lX6m%ZZ2^<4;1Z;FI>L5x!hfMcfroaJqW5xgp3;<-gr}0P3U(N7?*qRN0eE5+p z_cQ#2c^boi_pyi7YBu)qu}8Ap&)5^@DNX$cQ!-{sP|8F=oOkAb;&Nv&4KANVlgY=! zUy0H&6@p_uo#}{`2{eZf6-wyl!Pe^W15oiRcz`1qGM<%2~!xuG$mousTe?mE&a*r=-Ba*yYAnpp_4CG z14*$ovm+xW@~@MVZ_#VhCm3U{t()8fdp+5(V&idGp_TUHwMOTq8|}x~HWw41 znu&V!l*~Pr7g>Wze=h#kV|uZ8kE0qzo}|lCl=XU;GG9@Oj>(ag1M z=34gGaV>jZJKIb1EE9PuGp4vEJ{-i(k9UtZ8NDA%AGExJrrNPw5JUv=LcN>n4sJFj z_P5AFWzjkmAMPcSyb*w$vK^SKSH4|A^j7}$Z{UIMW~a%WGav2&rnj$cE8 ztGQxXg328B8J`ByQ^Sf>ej%bUpKn96@b(M85F?+A5%*2bEj#iS>=qQ*t}ye1p(8jP zNDTq_?|-LOb!fvZypB|rqgt#ur7Bey+Zx>T9h?sAxNkA_bpXzx1mJDBRDO&8c`xc78a4D9-WfmA0^$*3sh;4H-fc4S#>zK;4@0F}F zTwwDu+IO=IHCE)pSNKBt_i*;hPoWdmSJvow!4SqO820gds%Ck8 z^4EDm{Fh&u6?5;Rx-1^|+j$cx!1rpRHdtCR4U0hr2dag4e6ZFMz1Xs9Z|R3*kX?+XD~v6Cqt4HM_;IR7amO&qIw6le7^E>Q ze1-u1WTQSPEeP&dds|vqyM9q()#_R)EMYM3n&cO5KZrYbY9Y`1%$7f$Qu{+hD>llF;!t|rWMNZtS6wP zQ7&wj8HoIziJjnwHHz+X1}xI?)^b%fJpieE>G&?1=q}MKg+KGy@O+p|eXrMKi>phk z%K{#xbkY!>j~G8ir$xChI%f;Mob~vj#^UJGm(rrfP^sOSv=PfW8?ij15qD|BQqDFk zru2~e?TvV5fQWxH&nUQfN zX)fwcDDFgs35$L2W6^Z}wU)~(y6lb37x*KnimUZ49k&@a_yi|J>1fwfFGz>73Rah* znZy6e2!c`8NPD)s-S9j=(-18T)?(E{q0hld{0D+pBz@aJ-u9L!X*}Mwk>Luq9&eE;~1gt<3;j#r$K1Go|j3oDY)h}w+LLC_?vuy zWg*IY-2^c==eCNVQR=|U8J zwD~~6;gh@=_I^KsEyUUW@lpMJ@9+eYVM8G{8o40TD&ITj0qMydJ{`=va(oP#sYS0( z2Nq$zwlhG;PhlhocXY@GBiDp%5Z@8UOW9z^g?;b+i2Beq!yVjY1L0X0P$BSo_2B>= zi*iDbg7f?)Y{3R2nn)*WBYM$mX9EmzrN;Q=C}9B_W#v+Y4SR<{eR1k z*PpIO{D0S4T!`S1g7bc_Im@T?fBd5;9y5BU`2@wMm}Qn=%iUg{LmUg z70LZ_ye2ez_!F5uk}g@ZG`mOD>lY)zP*N^aXWBa+hDErQV%l|$%_AdNEXd*FvdyN^sD2pUYE3-HNLd{Y#( zUpldfa7C023Rz5`?B-ihySWepXnc-Zb=A2PkXequAg#F!bq0jSq5{Q|Wmwd2AGGbh zI4(?R^4XVcGM{E+Qu%BgDi#Evbe!a2^v3`3sA=VqOUZu2yFC%esp zcV7Aq?b3;Trq+?|`(T{*!_412ZO_X$58dP`u+4}jC-Tj7#m@GlxddCz~P=fNa(j5z;&vcB;+a{jxqzBYUR??FCfp17AInFl=o zmCQLi1)iM(v)L)|>=ZcT6gd6#cl4?6n095F4l3}YEkjb+cpl68=sH>ur?vom-0P^5 zSp6tip2#Yf_kb0*EVj*mKGPu`mumW{EzSBZJMUjKU(5GoTfbjzk=C@3S zu%78{IO7c59^y3xVnAX=H=qaBspQr+b9xkZ!)^V0Y?AP5XaWG|gRC0h#4rr+g^^0B z@Pmd81}TzyJPoOwD&4eLGBnee4jmqf$necVDa8Yd5^adWqlvT|aA^(fIrrYc@9xyV z&fwk~xOJ!Y-9G`h?nJ=${S$EePVIBVUh7_4m`uWvb3I=lf`n=QHO=xMVU~YQlRQY6 z&P3RnXBVmZ&A`VtFkrI<3kB%%^oJv!9G&@c~EiF z$|jRyJ|6KqS_qjGVa&K`DkDKs&NeK>mqAgKJix5NDFI#?A+P#SV_FGCP~|_uIM*}=d=M;AFWnrx>S#_V$zVQ$>Aot; z4+h;}w#;T!g!>CM@ipBGgh4D~HSmk@yw;rJq|-~+@!O%9)sj7>c=Loxl}L1s`5M?U zi`%|oCcSs2y8}1rO!=*HQa0N>ptwsCLSHo}O+zkq1dgf8Bxz*X-P<{+o$l37_YP|3 z`^QJMgCY#Qpk|4^iL;z!u`~FiGuN9$=2GWTUSFO^jaVuorrVY$NhL$b`rVR)Q_*-`&}e@31EYr!NYt|R;W ze{KC~?ER-l<+Vq%^Z$qVpy2ps&;K>%9DVdZJH($I;?EB8{~Cw*;YIBuV1~om^MlkE zn@o_D%iP&7CvWp-S@lR`rgNjmpGr=A8hJhG>Ye`f2ER8aUf@ty?-Jn5AcFBXm6e5l z93RP1>wg#{X!12?#QMMSWIZ1L<3CgQdT`+v0%u3b8}(K-hh6AcXlw5dM;t~V^oYluQcI;i?<9(uk*+ZW z>5BI37LRsL_J7FY_w-$PPhXTw0Ysu$mXK5>7*u;rZc#RUyJW zoH3{+k{@K&JIH#sMU_|HMWRN;mCU|S@`Yj=@QEDDgNck0{K z6=QF!%!MzDkJr{Jc}p#oqoshyZf~W^{-y3UW$XVK&-LzgWb6Nro~%8Mz5fmi*i8R_ zh|f2y|08pL`Pbijj>ondUJwkQ^+63=dGEYmdDoxqCT6>d*>2+7+D+`8r|lvrM~hwU ziXi%WJZr8f>M8VWuaN+XquXnF+jqQOWcr_i=6Hev1Qo@mcn9}xZ>z@7z}?XM_$uLw zc(_}-c*pvxF~4D3xm-4JaC02C$@BEc7p-%mRw z)+j1QD_Pi4*#Ofmj@nky-?#yxT10sCfeSkUzpApKujMN@;@KD41Ck$Hw56se9Dt8C z1t}hEV+d<0EFFk;$nOurs^O?N%3x2pg9{8o2e3voE^#4fiz_#F9~KK6(rER#vVGSE zngkXsKph*4)OsN2-QG|1rU*N8yR_7z!1f)GP45wzEXI-ay*{uymZf)jNW$T=e(5$H zZ-~uVS}JcKkNO@uMh2Woy7qgg?&2=J9ylL{POrgNOD)hh`mkPsP;I@0DRAEp?>EFhm+``x5>A`owlJFZJ zdQZ4vqVVe%Mn+c?300!%(`z)IiK5v6YEiEVL)G(|4nF+Ef}7sJzGkSCB#|}xfOo+j z0)vVM=Wb-;3#ClHdP_l!h_!m+L9HG>8^62_%3Y&_wHFa?rRDmd>!WvHQ9iRrL)&%+ zTWi}KCUBtQW(htz@y9Pt3GPZDD+W=XE(vo~FbR?M zACD%ac;+jZakd;>qK5x)gpWh4KlIRyZUQR_i88GZ3a8@>*v#?m3t7}@!LxhX>TAJT z6AXP6^tvk&-}82dFvcyf)A2sSdW#}WuIFBFnk|h!`KEKIcv1>mRd-uk;cB!6(i2oS zP%2 zQw8CH(wY;*Et7aEM58w>5)!E@D#A&C z-=75EdH%PS-?6u?Q59KeT#O;KQ{KH+a$NYSVFpU-m|q$Ra>$KlR=5Q#*8{h-@oQY* zyT^?SS`l&(a?~;~y1PfP&F`39&gyaA)nr85^DiMuFQw;OK1TPQs;3=c+CblJ z0m?^&oU6{HgcwycNurJ-9B*%Bw9d^c>P!%Z64IX)t5(NnTnF#Hu+OT8zX4r^QIc53d4@1d#uxcAcc zhJDNYnOX|{Vk``Rk_xfkwcC~p|D?CeBP;{@lK~|z^#h>zmOcO%fn{F=srhFg{DBp~ zKk5C#T+7ddIdgkf;Px_x8NKh*TLClH>UimN0eN7-%;))I&sHIS!-v}@o}_^3Sf8a0 zuX};6VtCFTpRFCIHIT<~=mD|)rZDc@MHI&CQRGQMmlpI1t+zwiup&36#a!0_!`scb zg-G8x!D`JWUWH!bu?rGGP&5F@at6rZu!A6r93-VC5TA@8*g^)TW1k7AKQo{J7yO@a zWg|m$giK(YXy<_JC07_+lU(&k;AH4@W<$JVaGQ318*(4h*>7%q{!>W#`LOk%Tv>=x zK~S2`uC4GKksoGng#>_V=H}*vXJO*;wC}W_NDt3Npf6%`F;+QFQeJSIQDao#fQh8k zvAnNNM=gy3MHX$t_Gdgy%yg4@b##0}+=IIw{JRyJq|tV}W}$viDn(wSf4;AkOD=}N zaY`18r4s=n=*~|ye97o2Qb`iN5%riA?e4wcUG_kGrt01#o&uIFiY745ZvM|dBS(hG zJ5+6hsyx+ca;Y!`uxF!VIGPOld`a2nMaD7Q&u$mP6WqS-`o#!jvBXv9mgj$j?R?!1 z>U1hxq!BM+1C$b&vu+l4;f}Re^zbtw&(#n1UYw5}J$)}Eg-LwUPJcw-6jFyY6o=>Q zVa$kx@r~_ZqcbiXVTCY_+*&E1H;FI#QEniy0iYl&Tuu=WKs!w?7~MjR57hVZ8=0el z6-FyT0;8Pnzx>(Y6gD2Kr1DC#5sED=u;h%r9sF#A4Z87>D6FK8> zs0dFo9Djl>(Y z0ZHHCf=(S#lSJWVOWJyH$7H~ujYk@LVwvGXL+LPf^PMHFL9t9_{IQflC@QAk7){u0iN3e!j5G{tUd{rpjZ*K_E^gJ6U#@kVoFk7oEu0Fjm+)9@1~ z@kIc;S!wYBrdeIZB-pa}9nWrZkN!s29$cau;J^M^VY|miKb^BC_e}@b@K#P_#a%9xEU@6l?I;uY{X_!cdS=Y$vX@Ebqc?IzX8QB6(t%RhueFtU{<&MPcKfg;OG*sx(K9ccruX_9&a;nFnxRlImHoaR;Ew3#bt7o0pnZW3Zo9V2 z55jG~-LM*$w$Bz9;rkW87lHpN;(J5bP;RrUa>V&p1v%yW}G6^?XYGMN|aV`yox2`1Q|j`O(g!_K93Ux<)7MSy|^2xsc-|-rD=guJjM#TSm{P=M+{#W_w+T+>%|A+XX zH2g;I{|n}vh5njd)}LM0|GQn*KRDj09h~m%zS_Zf;b|B5V^HH-0?^<*#+~$KcRcBgR#-3gd!UgjX=grGV~k9Xit`Sm88jA+qqG{eP}b#D7ZsivNA=8 zbN04{eaY*GZ$>=}4zPKR(DtBO>G5=_HOWpox#Ivb5+3b#kp4)LMIsrW$?3 z6O1-vpt`5Ys?$8e7HN%}aMtor%wP9jIeY&n&vx#%1+K9$~-dKM!+y6bp zha}OxXmj_z{|lLOwgsGR0cTslzuOk@_}9JDgX7w6+U8HopIW@N33g1R=?(e)FsChF zgnY2dQ?>;{Uo^G*&{HR6dxmF)T!S)mNK+f3r^R%%n1*I3XjeSMUeQ$1EVk^&)iADt zMFMWI`=WBexF$`vKIAWp@{XRf?yxgpKux?=9_ZK08C`x}VY?oR1ANT7--jJKMeReu zB+A&mhI4PPSjV{{c8e&!r;x0%7~4_c<%YyP1!0P9>eW`j(Eb-q?SDJQX!(l8_6&#u z2ZKVisYe%+_~lMiS-KypEKNq0Xt6}9*v;lW6J_~+h_XBxQKH2Xi4qJiauJ2g0I{z( zL;$N${5|xJ6~(YA`RJHE;i5O9l8BeMeHT~ba?@<&q{=DaM{dG1D#bJQY9tv$$Fc0? z0ZQ=*|H`})+2SXlOkYB#BNFa1ZV3V)ZcxiAa6jg>j2dy^SXEruG)9gU=CVg^bv0_I zi=bDdHOf{x1(|i1Q5#D_jg(9GG251tXIsLU<(O_9Bz?j~v1U%VoP#&9cB0)QO&dN_ zy~r`?q6KIOI}=R0PTolusZmWjjm(%)89n8g_K6Q&44*msl8xba8Nkts)K~+!6d%Am z%F`G_IQB_VMy+xhYE;K`SoG#buXrFmM~vW-HiDy1o9;4-qcy*=Msa!EX}Em1X*kgc zE~kv(GM|UJ1~7B@mUB(OF<+9&w*U7X=Y8LMf2Y?UN80}%uRlin|I@X{8|#nO*SY8?@q2w}3~+Lxub=-=&#lZzYm zk(mTROtDqUpb=ZO8htfB}aku!|p` z^mDhoq9n^g*7UwN62*ulM8g**6-~4VvF^aQ#w4-Dn`L4?;H2T&;MxA+E<;WD=B`yG zxEWabyG3e9kuFFrA+PxhMS&F^Un}++^#b~~mp&&ycwJS`U5o5V-f^4oCAXx7Pwq4Y5Th!c&QR=xwEZ*XVErWcEI|y58 zJRwITpYV`4zJ0@AeOu=H-DP@ojWuo>{Pi)%4qOMi@X@{@9h$a5@PBl$Kz|C>fE^Hq zI+V1*fcDq{BwzHYaEfR@Cm*55#+}`{ zrYwianmrN?P)^Pxl^7Kr)C^5r{7oj1nmz|;D4flq2VFEHvjo1%=DB=s7>k`3W_w2V zI160XbTNi6aneL5#dur>+E~bCGnMQw$X!!cpK`OSsV9lVbZsexpTnRdU;3=$}G3i~{s#CP3*Mg<1g@nN z%00feF)vgwQqmsY{c0CwW7qA$AOf47s@C#LK(v>6z$RrW>iF!*E=&-=yPm9^<7;uB z#S%iBEP(dApeGw~;sDA*$jF^=yHf<<65UL`P2V+e$;}(2YBBI9?;GgaRbns>TO z)-KikhdE;5rF5i7u1fMT5Ks~O5IGCdPcAgvmfdmNJ*Qdg;2nub<~G}A!LUn5vBIn5 zY}l>g&olRvQw&dMLHuxarM|03>NP!PUF7sUfFWdz@}6%2m%tiD`VBf_MW``u>a(3TH?R88**2C`p};x zmfAe>gVWo`x1e*k+Q2}g-d4{kM&v2Ik6J{mC1&1)la41k%OYHb{}J8A{sLlVH+(M$ zM1O;^ndVA%5C1M{ETq~b+r!~VHbT3p;T5wDcvdG5zBnaxzYM6K3m4;TjL$Z~rU~*U z@<5Fyc*tcMY9I5R8vHD8EAeoLCuWo^(mZoNo1LTPiPuc10i~RMm0#9#bvoB;G}G$1 zHTqGQyHO@DY2=qjxoDL8{g|oIiNCy(^MRI8&$Do-dtkcQf}XtI3QwXKSxvPg*?9u; zXKr9(wzmdjix7v*-a`%{i15o zsf3f9+)PxL)Wp%(P=t(nm{9RhfJPtQGIkK;D;41O_AG5O@4}E99#UD9MD^VXhMfW3 zJ@EX&rPpq{UHPI1vHX$e zIgWtQ%34U8be#YZbea}UPL9QpCTL)2dLQYPM9x@2A|5e0ou-_$G=UIXzvBY`i(^MR zj4a^HLZ)7UKgb3x(f%4al{r1urU^+cfqG?lG(0jTHs|N2^ll@bR?=kzw5RPK(*uug z2d!^XDB!Z;Z~4uNua3KjYOnop4Z4j|oxB!-vjE2tbwy_6<8>T_zBSCWY8Dx&HX@dr3c0~)3QQK6uV ze9_dwTTAhol+sG}H?L9pH?O7qZ(fIZq{q0c2dn{N|Nl;a=9_b?8Y7r#EHCyy;AcJu+rvnnf010&> zp`=6?)wZ^%@`?X?@U|*D%wylryf?q{&xrHiUi;3kzl}QoU4QcQ>00#tw~e(~{Qn2} z&~kn+r!Dt?{)@~xjnMzIGu+u3?(7WrFL;J~^fLAIma?E&uhAL8He(A2*z~&FNq&)q zyB=b41rc|7ABlQUIVjNL&;{m32UIrTHKaOB8%cL)GHP7Ys9jhwP=$oLVMsd|ioO_{ z92*{N039n#!0LIh|8VEgLrpTE{RcuvHtYqlF$8Y7a%rauN;9Z_TrOOKa0Xd|2rSP0 zw4ZfhMz@DHPHo4*okmC+m>L9vG(wd+RD!*aj!!LN&=zc{(Gs?t+&;1maic(pL759e z1-%IhdUH#GRGOMMr6@1z#QjLUV}sZnyl%d&R?E@bVFFGm<$(cq8 zbSv+|tf4qMEfjk+JGnTlH=Qe34$v4!Hqn-Q?KEMvXwpuRRFTG}I3W>6OEL_|N;Wt7 zzK|ctYTt#pFo0R$H~83a=8|<46Rw>?a!_KMSfoflRxX^j+r!`z^b6!pu>@P*TVs35 zo1uc#7E7#%hTYJbpx2}o*@i_i7!=FeQH$SLkzxLen5pWgAH4dgy&1D!uZ7lwf^;*7 zu|YQyX^10FAFoG4(}%x3H1S2O(;QTs=ILv&ND__n{B3T#Mw!oFD=3OuELb$-ZNNESt8WL%TELl_yvedV!wrzJbjgp(To7prq zn{c&-_3pwZ%S&`Zq3En5H#@@GgOOWMCs4HmAJcoUD>D04&&9W6NTk2Che6=lJ)`S^ zifRyOh(Ny;?rNwNfhKUU#K51}njHtGY;&8NNg8LrUh$`{C(`ye?w!|RQIBHUgN zCWQdPRUK5}(5f%-?0`~>cCGT`O5uAnL&b4+2jzeL>(SFFO1*uL)4_ee_g<&U_ei{)UvV} zV*G`)Ar^n>45-h=NIw+4@JK&^txc1TrI2ftNsyAIVc#I0T2Q$(hbHyfJkm7_)M_6D zTU#1wUc=Y7VcrVx`TI5=<&z*!Ie|86RY%Xk_BORmKh@Oq9P0Sl_#Ka^e7yVAd06Df z?|-hIY;7^w6+A$jj|TfrClpd4nY_1Zl?jE;nd5_oj;i*ikU0@q6cI(*MYk2!^w>vS z%RdZZvxhq5`(WVoS<|;ak_m%r4uN_Rmn05)wUS+&y?2~GD*Cie)0_k@h9yo3F%a7g zj97MI<0UzNHH=Ud?npcGC2KAQB1&71@ijnh3n)NbXj)T4SwwY68^vNcb+wx01P-?V^yk?bewY~^x6Ty-$ zsETVVfMI2=#9|#?)6V4e;bmK41d{~C$2_Y?CJ{k%%(x|H5n9kg{V#U%9*l6G)wVew zJ->;YV?83ss{9ca(sW1->*B?*1!apyS_yoRX1LuJuJ#`{t%} zA#!6PQd9#PSY9Fd#klEe5?R4n(4^+>U8uSHSE1(El4wed$t(UVQSnPKB(L}@q@uR{ z#=XcX?T&WItSXI{zK{hfrOXz*CJKkd}Syfc$it3`(D>Xe~vxNv!_gk1bl`U6@=&rji z{7X@2=m^g1!!8}qye}Nqwb3IJSl=NxO`BK{_vE$9ZqvT(NS_1@OH*RB$Jfk^%*`JQlWYJMg|cZyKdBOHA-G#g1HZ9kfe`#E9Xo(q*UEB1z3qEI zWqh~$;zqc!@V5q>Yq!^4xug9dHdou|wrBV(w$Z|i`%1xZb)H}Z)f~%&q09`dPebpU z<_3cmp#dfvNlWlH8@e$!{Wjl&;-4+i%C>UXoD0Jc!@#2*{Xc-}_jOWb_5y2_1;=;Gsa9k~F&YO;S!M;Hx|grItGcK|euuA=*$ z=R2Y}pJ0&AZM!0PZD@quYFyertbgdjQmW{M4qy!O6-v~Cu5w$TGDGN&XNpR^1fu_{ z^+!pwQtUOvVe=mjNz|%De%!`CE4!Y`f2vY*yR{kP8fFvPkN<7~5Vsw+R;P=|G zSmL5F=)y~avSZ;_sT`M-+sf!#r5=2zdL*bD*|NdiS{O8AOPajA9|DTp;@t+^$LuQ`X&vi=q;Pml= z2O8>S%)c+hU+E8t>YPSB$bKw1`GMepDwz1DXg;RPET5YJ$9aS_nddSdC5;_{WrzNr zUFrVzKO^^ly`bgzciRGvxc|NJ=uve42Wd}d`@e_yeB1j!!JM-#;A{&x+XDU#w}3}y zFZNC|c7ZZKVf$r7c9R!ggVA@QQOS$mJ=Ja>R}-!zhhfJI$kcn(cfXr!-ptkUhqrSl zxqz##&rWpvHd;FA=Ivw`Zxip`@@u!K%-&An6935U2;Jhu0L3(HZZDv4?jTD#EvCOP zdjBmM8OgBkHs0@OO(I@tTtrVxu<6OM$5E6BHbS7t8`pp;bn*&<%h0zcc~=wN(;)Wj zJ$3Aei~G&+Ox_t!-Mx}KCmpK{7%H6a|OGcd+5VTh6k4_l^%9&21c(_*9ebqwH_8M??g zRii_hJ#dMUM1@^qM|e$>TVwCS4%}edScxw1Lfwh)p)-LRQTJC)C{>!5WO&_kflEY+ zK8o><#Wh#`V-7z<-zmVa5?0T1R?~78FW+w;?0w5oe4(19l4i8 z;?5vkeFgn+t)cb_2Py8_sIBN?9jDcs)&tXri(?UvT*N?L507WbbA=G2%dqTYcrqOi zD@kKl_c_sv=K9a{ieXh|6_&Ox3pBKhvN2akV|fh`EvA5L>vVMCl-&%+EsXOEgK>a< zp`_)4c5F9Llk{DT@Or~tMm--kKQ3v*N;tGoIJCBSg`rqzh4CB-Oay^Qlo0hsjIL>r ztpv!C;Wz6oD`;79k|f56i4LD=A*v%B7kpa{2O+TuR5iANyd60LD`;3r0~QO78RcJP zfRv(8n{z^XHQpB8QZ+53Fc*2OLGr%>YlTxmOH;S}gb0*Rc;Ducg$V-iKegXa{)jDT z0P`OdSxoj$U)vNmDG_U_pc=m4^8o#kDf#}BL+sVG*H3sEnyNo@5^6=9@k^6S^I zS-W9fyI(NH7VjEwdnGmRKV$7nh^xjEdIjz3ICrEXiJ@&_u*-NGTwLcy3y6G46(jM& zZkU3fe072|b-z(a4SJtX4650CpQ7+DBnyGRKEY_$*fm>X+%6#|r4mk6H3yZbTjVn? zgju4wq9qXe+kzp>@`y6?P84-`Ul*te;%bMQM^OA-i)?9?k^hGoKR)?+AfxMRT%7-5 zybsj$27T&()D}|InyL<}bTBR#Wfh^fuR0I|%-b|Tb!tNWv za{JN!JCP&0c=2}O+h+Z6aDKsD=4NCxLy~_py=c(qyP*o^zZ5qF$U}Wo5Wn~D#J3ybw~kVxA- zrA{l-`=`A58FziZ*LC^MZHpk64I`6Kb(MNGe0=M0`S)b3z#AU%E#AQrUn| z9sGFd_)d}W25w!tC1#0Ci;>h$hu*GsyiyGMcH%wZF}-Er^|IE#@r_uCt6@~3VZy?M zx6nh*k~fo>GDMkns5@A=rfj|9jkXl$-|A0vTmhadiRUV`BFOw9p+CeJ zFV>l>Z(E6y#MKwGIgKLgZjVlx+j04$%agX%YKy?z(p|wUQLVPjgi`F8HktzD+8ixh zG9J3(JREINC@Khr5ESjmt8>8Djd)tR_~OcGkE@IE`Ksrt&PK`p(MGoFz9CXE+k2fu_jYUYY?IgSiHFF!Dxed?UhW~|J3t+dh0AIa6ij8moU9SUfcTjp;?QrS>(xrc6rxFLEyrLE@LZ^n+9= zWC2vFJ{r|U{ZYE8sfAXtf4-q{S~td7y{T&^T!f3$45VKw&4f?SH%>L|;aF75L$;=S z^%o}__HgQCbFTY`_8^sVOufotclwt$%2Q6tBPa+|+1-0ldv$PLKdhbC_kKV5rs=zP z&3%1bJTs0ETJ#=!olWxDR`QE}viE=F(Rc6rzo%&|0kI9%kTe8 zNc_QpDLq&|v>VSo@BQ+L?=7#d(5;o(u5Y&Mo9+6(tzF-5KkuEUo*Bx_*rpHPV;JJ& z#p;z4>~SBbG`iZcHVi7Ay}yJHwX^|HUR}_hgipk! zE)|id9_ES55A0g|wgMk4pZRIK80ZF{6#tcQbsd;%G-(B*kr*&a&PB)j$n1~yjSyVD zfS!)kFA(6kylxecvmnmEFUW z&ceOU_FuF8*WYdbm9q2txd7n3MrYWBsU9^E;p4t5moVFB7Uq;s)c%WsT30{|>rJta zQS1NXM~`6rf4cT~9Tb4_Q(XVy>umjhh);pP^xS*Tp#LDNrL1#$4OrfKZIpw3SkQ(8 zl2o*=4f`n29<4Knn=}GpFu?zIV3lcmz8mmR#N-i;>E$LAu*&>@SQZo4o2>lA63Os& zsa$;wD{kNfnER;7{t20s&DcMe%1iJEc8BG)5N4yvHV9@NVKxv3psI{L4PhQP*}8yP zXRN$bR#0JO2zNr=bk?9ZJyzXjKn_R&Y0J99YyRjDJN4)_TK=hS^JsaDfP8NtggGTr z+8VTd-P*AJX_cP=_*_wz1%L_QHA}%e;-_D@H>G{=@4WW+k9Z51S!FH&D%gf2$ z=^>*hH(O{26KkVGYm40j{jE@F*gkBHUAwosBfkkz4Fp==mLqXawgR+bfxZFdTP@fs z;q6cNLYTCg4lJ^cB0Zpv7GFj)!F!C!%y-D_!bMvHZM2ZR=Q-8qu~oq(th1{-^rsGb zX%{eF;+UBzu`$#t^)|TMyYk*U;ee95(H1kQHS9HLJq0DI$L0m5c^u<3(-^V}Xf2>~ zbsQV{k2Fo~P1lZW5Salx0Z;a;=WZhAcj z@d*grWrNz*3&OUNV&ovRvAy2Fzk&Lr>ps4HdATXxO$Hcm#_*VZ1=mASeP0Mg5M4g<5F3-SFNI zj}cI1m?+|n$cqh#2;Y>(#9mV+r`&9x3-7e^N3MS)oOF$Sj;An{w;JYgq%tsgTCa_G z6`8$W7^m9_y(WV$Z0(v(wH&4aN*UTcodUdXqK``)$)X?&MndOUMS3~`W|v03%1=c8 zd+n7*W7>6O%m4D)qm9V>U*#u{p3d%nJjmx;vi}lu=5Y7v4ZI#Gi6tMax59T2>y8$w@MbB)}->5BG8kbQzkuf=bvjhL0LDPS;2aQVxf~xKKo0}@7 zQXs_*h~+!L+zO!ST=jr5#kG=vz#jM7D*yRGZReNb{F}8$k6u4o>&~Zuo$l>QpmHkE z%hSERBMG*i3iij|!NKuw67Eqd-1CE1dlG6R6$&TFfr5LS3b*q|O||VwD%5X3@1HAV zPgB8;j!zG32Lf)b^5|bN$i2gp^FNC7n2cu)Y>n>N;~;Vz^6Q@OdooI3*_x-H^Oi<- z7605U_nYnaupUuP{0vKSBBvBSbb`QcJF>9&85Sm{NGZGPTnyV`gXw2jl8;78(G$lf zZ-!pO!J^vF5CQQ}O8NZ(Jpp~?@T%2Mz2UFYDkonRfX7`B7FIuFrN76}d1DL~4fUAA zlvO5ebh64Mk4jdFtii}Cmp%xYrP2U{KzzTcFN?_+pK04FitTv4mfIfsj z@Y-DHX<(nOqn|O$@?+DbTPehrFf+&Kb11`@hBH2! z%c1+>og&q&uCib8-UsNn9k=V^_6F3}!;^hBu!Hx(-Hjj&e7k2Z)9fZUW zyEyD6cQUPGdPVpquCKLhZKk+ll{mEnQ4PkhjS7=!r4WM< zb!!{Pgk0Jb0IW?DWms7L(Ryj4BZpA z8cthdu)3E1v*C40h-XubZV^PcMI&15bxUegi-fTx2e*jD*k?T;DQWCt7scxZ0FD!O z39ym~V0t~q4Fn|dEBh>NGYJOM(ekHH45nCsCB~s$+t!xwl2AGPuO(maCCV(|-cXB% zDYuI&qIKcZ9{i)&as|JIP80ltEPRfWvgdt-j)lN`9lH;6E()(>v;9FdTktvbgg6=g zV#@^8_2hXNqWQG4ScZ=b{Nlsw3_cdiYbDI3gpSi1NYI{+n}x{dOssqcz!&W5)|Ls) zV&GvT%w~$g<$zj1QU2`7XO;x4=DmV0g#Q*KgK&`*S}-?-W05AsqznpyiYc>#MWzJ| zu4!Z6lrwD}kLMtEpjG4`*1W22w)!OvO{>wt@X(N0q51WOtQKi%N35TO z4kpzbVFe0qx))w2KulGp`XOgkF_D^Z{7bl_vapg-39bHIB}4L{>e*knI8*^j}S-n`{nVkpt@hu5pAbgMHA zE{TdnirY8jZH5A81kn{%Nc0P zlcQbFK)XIU+D92^AElsG?tbwRfT^jE1{4OJxWr~If)D*M?SF^%duIi9Hq)(Rg#GW) zqcYw9DnEwwjVEhp|HF`AGyC5IdJQ&7!CE|r} zr+H|@l5ri)cBSUBS_26sUKdXX0{!mYux$^Vm%iPEZAy$X8K7Z5a}+vt0=GG|JL#ai zXmJ_UsXe#t^v*m#qe?UbHQXQ_;Ad|bIG0|h>GbOmPV)fpe)2L9 zk7_*6e>FgBS}G8K)phT~kl#*B2R!ke#u(&9aAA>iVUIUP$+O1g&_8vW&(Q}U3U&aC zpzR-v-E1`Xgf7uFUF=Gn+Gl-aWsPbzl_!3>ZD90T2X9$P>=`%hk~7humzwE2pz*1G zsdNsn?wEQ0>W=m!tWQtnWs+~)MKij6HzBnLMB-=zHLJLb9(>vFxjZF$x64L~CFZDR zMl(7B60Dx_yjw!92>zdtBf>&LjtUrp-_4tCnBwu~ z9(S6nJVX)^CYB}%1%qsNI!mff5vYEkUAhsy?z?pp?Bd9gAvW+KuUKQ0ey^9 z%I^OLQ{3xu+Me6J%kfK1Rw0a13kGQHDn+@8Dh0L!N_QkyJRqf$}y)Qn1^{ZEiaMa=2W`Ms|jS{o?pNr;|FfIneoGt||qtTF`y&HE*J@ zZF|^uR$t=3N%M|ppLFaGC$O_Wbs7U3BTm-BRUih;YbA`yVEw=uHa*$My(_18+-#EC zv@5UpyK13KEqB~=UUX>ZDlnXq3TVf=kh2*5&0BWdy2hyF{G%rd<%JU+854Z0rkKE& z-Nx$Ahdbu)ZsT;=lWMC>b(PIGSk-**MuXIq;%flY0=t7j@B1Ewm{7U@w-ZjOpzp#W z<%0Y$lb}Cm@Kev>U#6Xt#wzV`iS|zY_A#o$Em&*5A7wneKL* zq*B-ItLu2mN&DSD(OQ%T0syv8<8X}glLeQHS8(sVixjK z2byU#UrVeto5AoRo~7+XjC9S=VWgXVl&L&8Z#?}YF#@HZK{^NE0WCmHy>No|ke3os zF6cXtjT=a9kH^MUw#Nrh0OQ6i|_jjt7&-PV6~F0ZPiyH%gJLb2}+D+y})o+(9CN%so&f zGu#641dieS5*pSN-$6F{=rgyolWyfpFu*k$EZqW^k{O!d2!J-=^o?KJHFiYwGrA?B z#2nk0RJingOj?mtO*5^4sp;i{8i`HndbyxLi^~W2v)yZsfDH9V1t(W_m7p!9DlKX8 zqs)|rDaEyxov-V^?8vl0| z|KWi?-!sq^T)sPD(xwJb%^g$=Zi_vCwSQpV;4om8s_B+wGz7y$7l+Cb zT=J4iZftasB^}?<@zLHm=*S)&V*9@EJv$&GxlRDT3X-_;jI~ez8rBc?pP$xF_xF%` z@SB9GT@c^ucKz)5)#(o9{!kRFcnQ85ttz3v$lQ4A+nc#J)Wm(!DxCG$%$|pK@VaPh z{pY>f?%wHH^@k!qCoIh_)BLqQDe~Wau=ua#@>==PV|D)jq`bl9|C5>ge~8cbOdE3% zU2;HIMVLJtcoaLcyi$h$vzG65nWiDEfcSXUkS>oPGWx^8Gmu1)+d1Wb{cFR*e;?Do zPw3yL^zT3E-+x(b9T_ zFR<{64qo!WzjVE$=i#Xpql+F6XvbGhbEP2raEE&|->2T9vE!;gguZ>;eB2em$w3{|pP>Gp!k-a15OWfSe~stkjSdY){M6y>ZoS^Xmuhx)v9Z^5hf)FCHcU>riJqr~4=8NkIEYXXmwp1F@VFgiOLllnVSL*7LiRCz!0R zQtRkIUMlcw2z5~uKsO5~r+en_fDc*LBbe{68@GPQn*3VC0?LEUGwEGy)wvI84M=G7qV!CDw&w3^vrEWfp zYMC)ak1`QSMRRu_NQPLBk7(4ARX5ZYr8>_2(^oE00KO^Ag&9&OWoD8P^hv31uRvj| zeXh3_YC8wVJ21|N8`Ktk6JR&EpF($o=8gDS?{3taPCtl&cQ;n;>pHz`TNk^IdaGl% zp*(MN92mq_MsB;&s9*o{37V|y*T4}MS2=(i{UK=LNpNb#Td^Bl5U3?k^(Ud9I1W3< zM=$nYz5?do-8If-VDs}>9Kjm1M}=phF8sg3jSPy|FwVm zU&X|N%!@CV{qRGd5u>4damYf8CDu|%f9Oke^TD70{ox0#;?+O@oc~`%x+ha902c{P zU^jTR1Y75fDDh&=9P6@!?fmb`B4CpZm89(27}j)SjS|J=;n*Y@u%>*ZSbhC=$xa&%!EDCWcxdr4ZOioF9JZz$%VWuUSYD9t1n=0gxs}867jCP;3|Ph;Z@F ztdkk_Qos3_ut?49&&vEyY?ZAnzFA%UF2FDF-)e;g)|(=xmR4W?clGV!4?kFM%GOT; z8F2S+zm(Wd&-Y&LAFzJnfn1}!U@IzTL0$%1?TIvZv z*=ub~OCLs?<6I9^IH>ZP{a3Iupv^fSN2fcyodBiNF_3TYB6e0|Gq zf1*LrheQ(cryqX!?x>RePq6x?x5|KPv;jygrvPqJb9e9Te0Tq}r1YD4GgZ@! z<_o&{feM5MiBh44lEwDnue9$p{2^L=ey!I|6~kS@kNOYAUrVKe`WelHGV&p0zO2Dsq^gZ`|-E3*e0K6_wP20x>jnUw(qRi>F(L|ucJ#9yVg%dag6_Yqp z7N@fg6VB=gV03;*vU5!H93|3c`{=@{lYVqjbMB7`KSm8w5{wCt0%6P|Bp33Q3B^M~ z2`MOaCW#1(gi=XjBLbEpJu;z$9La>?;zVz5OsYi7Ckqz^m9ejvb9|kz%gKNlk_BZ{ zNH$Ij(h3P#fcAQ5hiHUWhA^n#C-2vk%kMG_uG6E+A%sM;Xjo%6F8rgF64 zV_i>*l!|4P3Q{fGKiWBXwY$ep3|FY%F6=U%6=0)`TYf@u_VeDsK`CM!5CB}}JAKl= zLFvD_z?c&;wuj6Ic9Ep^3m2wU#FHiTVx%35o|GozUNeT4rSwRBu{Lp6p zOng;o@`&M?{yWSKj}@Lpl0q1DsPN28(IC#@cM>im<5@w76oe38pP~H$RtKKOzfzh^ zz+dM&%(;R>?mPtUd7FfoKXsvYVG=y#Zll}i=% z^t7{DeGDjt&0?DOavcp>NL$cqUu#`%cTfrWHK=$V#j%b|t`mA7_ML!FFVNT0a$@KM^c7n^3)KI-*BG_n>RK|w0Jx`NfaV2LPf}^B?M&qMo z!zBdFNGmWhO3D}oC$2UzR?1jKC#^a&V#*{K#%M@J+?2E;$)QtHq0^$LWFKT20aWrG z_U44k>{~RFO5W4foEXD+DpBxwNR?!W^r$My@JWGHqL5?6R>>}!6<%e+k}`-$-r^I~ zI9^posFl2+VYHPoON`s5w3sWV55Ra~SB$bbBCi-2P6njn1OD%%ZsbC8D^*0w+u{O2YjHNub(gm;}4i*$i)G&HYfs zvvT2PgYj4XaD#WjIm0SUpAAkTSiGL?jf$=Ip;8daTEjg?{IV$4Ulk*CLSM2&Jb0Uj1i2& zhjYbjEOv0nuU`M~Oz@@Y%vt)WGODo?qvoC97YGZPMBQX5H$T-?&idJT?fli5d7ZN` zH!*=_gJLz(L{FT*lhEuGm*E*q2 zTIPPhb^DeELl3`Dx)Vhwq9kWhbtT(eCg!kyWL`?d9?&kv1Ag&i{A!2wOPzYoRtXioSfVhbRPv zvuMkUh!Xf>EQ*c@y_U`St`Ub?DNsZuM||rk7He@=uf#_}iHwBJDgbPPfm*NR8>Fb!y&W<>5VK48rW*COw&_7TG^hBO9O$j>lQ_Q-0D zdG&KMUT~Olu%=+GxxP z)`d~sh!+w47>z5ry1ewuDxB4XbgGVX*_co8(S1yX8m}2SZZJP=i$-6Ie%SV;+_nCK zjrh{Eh}{bRf{ie)nt#D|nD^JeL_-oT#ot>qq$xd*Bn3E4XeN}>u#F?3jS{gMvI!WA z5wMxoI7ymptm-5oPs0&VYj09H9pN}MF^j}2$)-wDgoMr1HcHBjKph^arHEiC7I8G)j$WvYFOGBFz}6)54+L zu>#|cLpF(YP(hol(oxj;#v!>-a&@`-lpf$^Gqd?*BD-sSkpM7UWx4lS%_uQbtyCSt zZyND2WTY1((TsH};;CR%zmhn92v787UQHMxRfN7$2sFAv9FBo)g zxKUHwcMQ)&7Pa^~GA(Mc(J~BUjFz5YXVG%Uvj%1sjM(ooVG}HF8homCM1vV;uRo$# zj=gy{hS7WJ?D8pl-!Te}TUTsvIYuF4FE~aybMKa?V8UK2QY^N^h~y{f+7XaUZBv6C zrB$VZMs=c8z=%ep!=Lc7l_6o)6RnvESHK#)0DWjwnLOI}xI+bA<#UJ@q=cJfAw+Q1nlPI&K4( ztHea;GVL+Z1?3)UNMC!VUs>uhHT|m0744q!-(wHtI(g6daY?=BDroh+uR6kodg2;P z%O9@k6Qhcl%J}j>F}A2R$J%{4F+d10l|Sn)Osl-9C?LDT7Dpids9R_tLhq$i}K-?@C`;#v3* zhj?<~&#=~oKP$nXZ&o}=L?@y*so2Bds3o?1c#de74|n0e6Zr21Blm8K8FAkC=y4*d z`G+mGu#k=vle<@?Na^s`i5DySqIPg*c9-H#KCkWka&}NV`#E6{;-$H(fAyyywtiwf zMlF1Q_7iUU2cFld_B_V1jE9yJwW_GgBd3a>k@!o5r7b03PwS>Z_mHfD8!q166F*$M zyC;tL9~6|4F-r_b;D0j~xd#?WptJJUHSeW}Ybl!R?sFDrVJ#-Wryw}MA^Q{^$tnC3Hl9XfG-mS`z^{u(BL>(8?)yIwJ}r4Ow( z0zajUz$)SRX)cJqz%imh5ijRN58h&yAT{$a-qqHk7ees{#lyjALHwz!JnhPJE3ZIu+ z!)Kn#6^2?KUXJI#Zo|34Uwy)7rX)y7z5?aO-hkq_&TfU9cK5y8gqH8`LQ{EP3mCwA zlOZJv1^kz;cqCEUyof@i+l@XFH)qgPQW}p|2ke?!fWe=t|LcEj{!d+tNE;`!!oTg9 zQOF7Urn*5(9|dHu*G^xam45hPQI4nV+otH0CfR8=3n-k=E9At|5{&*l~X={F#zgFk{PGJtVNS(h#_(0i*n^vu z!FYvejl30v4SiJigwl2JjjPHJGYN&buG=!b!4V=U3i8QPD#AFlpt^b88 z#b9A=fu-J<6faDQLY@zNKl~rluK3EUY$1Wd3RCX15hzR#p3+n_Bni;0u87IstSY9^ zX1&6M<(jKTSwUGSAz4Ch7WA86dguOEi3udq>=ZG~y^9*UmfK5&W6Kz%hc)qzEk5$i zmY=SzLABRTgEcO@UXu-%{Eye{GYF!Js#|r=>|SKYgELmFYk!%mODZM79`X0#vx=DMSY>rN;E4VwM+wy`%!a+?pMRQGfJo!&dq=-k2^JHo zVZr(T$#xm;Rc#))d{f>b4S$DgxJomwl*S*lHt&@t)Nt9Mm0b?c);6KM-j>-V>D%<&`8xZf>upM%w3Kg^Knbx2P~wL9&WEAvJ1pos4WJi8vp<#h z9nxu6=DCo9rIgx)5?+3jX`-Dhej}dgVh@Wd0k_FQS%>QlQQ2?lcZRbsoG0CS9dVO0 z=^9buHJaqhBk9*7k{snzowfx(sZd8c9YQLzm^T`WEe@n9=s`@Fxd$xZTR}kRE0@!r z{=zDQPxEXt{FlfPZfvRmOVqd>fyFu$~4{bnmH4L^*+{I?DrXuyF98yKKUXN^z&O%@Xd zfA2|>Kn_!a#0e%QiQ(D4852n6Z!=#`yae5?P#~_5+@QDsM_-J(9Bp0K)-`WKPL4Rf zDHK*vHzxI5+YT^8n+BkhpO26J5N1bV@WB!Snix)PiMXg?F^Q5O#mSl(k~={*8InjM z%aTn@?D9}LQE3teB$T9NB0(gjk?gA8do)vPf+@Nc(FIRL2Pn@=^q4q>1i(XhX z(GVnw;)`jrMMsc0+`6=hNFKdxKQJYY$qvTWODm?vU)$iuW)B_Uq|7LNJ|xZ;EE}IP zI9bGmfhodg3E3V)<`?$paD-E4ivgB0VG7@~a|cjffj$IsC}6K2tvvph^=ReEqZ0F4 z?B&rb==FOCb)B9!V9s?PEoRsX3c!5U6;bIr zy9s_}ph`hU@kAaqstX5jH5>=@vX#Qh(ed8z_zvb~;UBZl*Z$c9WtzgxuVQ#QeDY=b zb*!zetv`Ld_K*KB{`mj>fBsxsd-V9plMPIlpMR9spRPTAyuSWqWBng%yKl=8CchhmM)8L%Te!K3&zQ?ZzZucQt_8_15_!}<)O{EST;3pet<6nMU ze)<^3|H)Gr|BdyHC;wPmUxT!Nu(hewKH~E?AAhv)tuEfjr%*uL=%&s3mp6gi2;?J@ z7W??|<4PO!_u<6~te~rc_Viu=V{*CXpFJXDwDY13nr$V7N z99(+-COg4s_3Sw!XIr9{XZ_0+Sf}j9%JBU*M?ppg@dzmz8Ct&Mu)u2#KH9!hVK?58 zHDKZQ9n|1__hLA37%Yc&uepj=2(Rh3ZU_-14q-z1^lT;u$`5ee#UHr#ig)(S7^D;vvpaCz-TegCoDx+ zrlIjZ^ZHn@1TDT{9eWU#T}dWD*v}@K{}K14*LS!L1v>fB1)Tv_{bAs=h8>G=05tn; z|NQ6USLdvD^auN`c6wSnI{%{r&@dX1>s&b;my3@s0&3`(Z}$c_(0)R3xOckqGnA=4 z-#@^o=YhB{_Ro*@&d%72<5N~+C$-b_{he0_wNrNT>h$FJY;Ofv#i3_PIlHBknG8lA zP_pTOiq{E5*Z+XQ4WN0QCcCt+(6g}NxL43P8+K~_n>^#!@p^5-g&hUxkd_rZFauQ! zI3BjEq4f8t>;>8~J>kJQ z^Il}dYJtlk?)Gr7_p)}tAn|**1+xhh*VFyo`XSohPxkhHssCK~9#TO|k7wY!!2wa&iw1+;6duFkxYy;;3@jC|bZVOMyJZQ(!kJa#Z>Zf?R?*qqhf z))sR)!Yu;Z-rVFXA|YVaD#yeioesknS+xh#zd!I*M_{JeLy$w!u9$e)0(EM6n?*AU zp8t=9Kv#KdIbQ3Cca|FCQkNq=#ur0^x$J|hWb*p=Iu?Q zHU0Z8iOC*}!mr;Y5V;T&s7D?jd+Hr261)bBzinl8`IvNJFM+wRsyA_g5Ng_*Rkqf# z>R^Q(A_(aj|6)GqL_oSgis8YP-5EOZ+C^{fLtuP&4+2A`H=RZYvx=(V78j{SV-!jy zbGY*tKXZMeq~$iED8s;j)5w0phwwp0QXobHpAm4pWYUL-bft7{e4-8zDQ zmX=DNX^Ucv%4^qsJ7Q!Ighu9e-1%_tV=|#~Gy!>0j!Zr{qV&BDj}TUWg#j`I*93${ z0fZPQ9w~|7{>DlOy7*|JwBvf%7azF%im;S+A}>FwmC)itHF^2bBPFxmy@RZG?;z{l zyYxYP{q9|!fkd!U1%_;gEN}Le=Qg?J?80f-!@wc>xxKb#O9@zW*uB8P|90kT`tIj0 zLvIMwJ0SRyqc6qoUtuhDxjX5A2+?_$G0iW1?{QS6fR2yr_fD_rwhD#t>z6+Kntd|P zf4%q4!<_%FZ>+D)&VRqq=Nmo$CFY!+|IW^TXXn3vuk+vIU-pvEe|g4(od1IGNjUtS zc1^JpM5y*lo*L2?4I9Cg;;8l1M+Qwl8IZ?@>BljVlSHWKt2#-Hc0Xo?$tHl<^u@{W z+I^c$33<>*g!rls`l6(WZH)4eLhP9m3G(_~B2BUm=5S8_-45o)pONVsN@ED{7($!2 zs)|HlD|)^*atD8(TlagTiM57QA^v4zF3-)evFSr*kH0E~G#bU`_;^GjD*`v_{yN4H z)x%0HIK|REUL}5Op|#w4aq>BcKVdltoG#zXdZHmF*zgV)%&5QIxPCD(=$#{ zeKOwzUDd%3pY?}h9OXPDku!Cjk!IV3xldDHt|XJ1Y)6xvjI$}nFx*`G6>{_xi9LaY zxHpQ2M<0K6nj2JSZI2{O`+wi|?M}z(>>p3_8sPZ%|DFQC?Ec?F zd{W0>y@ql3GobAApT}!YH=ab!f6BlJv-6*a_KZ(pajWZw#DKcj#MYEHl*-6ni zd{Xp$|EPBQM}7Y|dQzlk6tD~y`MLJ%ULBC_*NzZg1l3?$s1Mq`mCJ3Z8irZ6-?vu* z>wt2@WEquc8+WizHZFkwITjXtwEH++Y=$8>>>u-H}nH%MRm(4RJ<;+H7bIa=73K`HNuSDTZWxQugyfv5+7rI2=y1wnurd& zdpig9=X*~!&X0d)h3|))sQ9dIZ{S^aIw3v7G??B`HsD(dwj)4wa3FDwL?*LBT(LG# z1<(ID->V;=?(Uu9vBmTKmtqWI{YXGttd$u22YD2*m7_3;SC`-F7dTo^HcDxTVvvZL z5W~N|47pQQNwt+bB zU^ExrJ2xdLz;{OQV+ABnAYgb&*eVt=NWA}=B67E(vqc8g(0TEH==T_ z#luv-{9xEl-tHr3@X_<%BWGW@ZNco21v=G$PDaB{Tn@Yo=h1rosz^T5#|bQ^BF5}A=l!EpftmZ_@l&8 z-Eux8Ry)|^$9QKbNii5?0Aj6ii3{Z=tZ&JE?cAxat^TOKs=QXEFmdOhl2%___Y@+( z?G0E@%q`DHH4%G^E3ZrqyDf1Pi>-yV>lZIdTU+ZJ_)?NgSX+anf0`+fw!IxoTU&c| z0O?q~qMFw?1}9-;p6}uO570gZ3aV&e8t+Mt9dA5fMi{)ue~-a?SRj-4bP*rpKmyiy zFfe(rlE!>@>1@b+X`NMhEvwB_CZ@p@Sg?J6EQqMvY-@`>TCeCWX4~6ZQv1()p94CR zw4*aYS#pK_7KTcga$Dio?y%Q({Ou%mc||O-gQ4nzVYhf-4*c_EGkUOW{e0xTOdQR;2+qrUc9TQ{rPizwPCT18c7v5eJgeP&H+e|XyB%7 zYYTx7y!N3J1a{kj0hu=&&E~hZdY<21F9H@IxG+~YI|3Ed$^y`0j%Dpg8QNk{3MFB0 zpc!jg?cA9IE#%v*(||3FIoUhF=$$QW^73})43(r}snSu#V#W@fYqx>Bto|kFE}J`m z;f3#Yxxt$dS{;j*uXmbm7xq6GKCdJ>3$5G-9uN?b7exmw91$w4;Amh}zUY=k!xQof zFe=sbvg2I4DwzTCD&!!~E>R(3yb%UEt73 zCQb6Jfd=teBLLK_fk_gdHM~>&5ySn}W1KeB1^5%0GFbKucH6&^m(u`v)35M9D`x}S z9{{KG3TZeU2lO3O#H$pl5xR(bBdzDf`Tf1Uy+`Y?U79ZmrVLBsuTdpq#c-QDs=_B5 zNfkcX7+v9$4JuX;i_!~MtU(=^j%0FxJ+Wg0X>iu@CD}m`4nXkae_odbY!wUIQbzpb^LMB~JUwb~|_=$BLH# z9$aEb%>ZqY4h1a7^8YS{OyRbme~#V7cT7lc=Zx%n!RF=#ZMFUk6WpGl833;YAcopS z&eW17z;{5wPmb?J6jGX~YY|0(=Ce#yve!^5NJi5XY9{Cu11LNYqPE@&Mdj;9Zz}^O zCM5JHfc}&Oz5cfHDF%H5&^JlY<+qia81ywjUnfDYy{%jmw2)Y_6$Ck@`1#tpu|8RY zTS-WI+NLOE^M3H$3t$ytC6EtuNPC>B#bV>q_CXU{&^0+BJ+&lc*f}CKTLPAj=U4Ep zxuTk_C2dP4RK;9XC>$khe&;SYXcCt+^?V{6q8$_9D9rJ-Q-|bCs4t&#h2(P?q`(D? z68m`RaY?Gom6tY7JID1IId0N%aZ|44@ST=%fu2?p_$^oH+go0Tl(AmFSgJ%tx{&B6 zj!&Yi2Mg2Ie00s)`a!K0SMZREm5DUf^hKHs)%UqdvwL5r{}!v}*f$Tv z3_BA+?eTEH_lE6DT1#oMxeMl8su4c~F?FL!RU<0rsbv>x78Fep_*_sR+=>T$Hgb^$ zz@#NxpuknTAFkS=6iZ%}6BqD|rFz)Y7wfF_~grXXn-DT`Np$==Xg5RX|lFWXU=wyUGu2? zupGk=JX!9FCo)#>bvc+EG=$^pRZG=GtxDGs?y!)@*0oJwH4ntml$P=-tl(KI^6Ak_ zu^f|xq_2?17WgA27N(vER|-m(eiiYxI$WMJ)?lNS&CSzZ8^rIxIY*a6{Rbz$a3lo8 zGH=Hl!mnx?9?Gs1QWkXCcDbs{m9mXfKR~-7mpuIUH@zg{SdCZ~zvksGeRb0%zs@kQ z1lU(FoeWOUv>`qL|Nrd$eSh0FvM>xk|Jt8|_TFntw(R&NO;f++N)xyBy?MK0_jLDU zbL&S-w9Q5qwW1W;+h#xegLwfk06~h9?KJJS=WHyJz+f;K3g%u0_>@m;4Z@ExGF>tqNWo`APmBPQq1}#i`XkI+Y=mBhw4S>`- zcT9$|0+cz)-YU>!ne-d+s-o!=-tU`9fN4h{uMoU$h^lgz+d%9=Z=}Y-qX!xVcK8J( z`J6QQMvG8-5q`lB5LMqNO3POw0)!Ab%itAFU`fQsVaYjzoT`T7)4-u=^%yTLv>k$c zJIvy4C(cJi1hW)jRfu)Mhny4l?w;9H9_D$#v#Bh0Z6{~{rsaifXoQG?hk6wNo zbuPyJ5Ba)&d(=%-?Hdf~0X&fmAE4F(Ndtz_^-ZLBtxrdI+M2J1L=E>SJs)S|ymzA^ zp*t&N(ah-pub*A2MFo!AU~PgWky|Y+Mq)hsUAsK@jajNh;_IF_`rX!M*|LykgJIT* zB2}oYNRD>N^6yGsohS6&SzWqnY3c6uySrU+6t8AqqM%V9P*x+*&al7c=WWOwEifzRNo z^hR?N>Vw30#`6QXj=2ib&E&K0#WUyLPeXe61;<&Wq zDTiA>dJh!q!S#W(uNL6T%VY#uW{1f+5E+xR{_yhfrIyI5UO!2Y&Tx+^QPX0?>ne~B zjn}WbXuVO4(uJ>AF<%#nqaJ$A!3xYbz??N&#-?FiaD6fb!EH%yXbSvy?1YV?p!o{O7__A9(pDb}NDgj%|t*E+gwL8(I zx>asOC3U6$5EU6o-&;L7=7eN?qjvA#zFV{=Q}eWChL8$4c71O*$Xn#7b!&*&b@N3oi&t3;*Hn^C#^7Co}s+ z_aI265>81zg58mp-`pD)WsMy;M-?5NVSnCprsXGRdVU)ZhiNiwkVx+*ps6U4{-kJ> z2>3h~KS@)G-v9x-jc=CLUGLUAtNxL$kx`*z-ASURQV7AIYsH*)*0+UtHwGYaoPJ*x zpxZc#LGk}~3y`i?WfZwvTf`S*VwuV)GP6}=AwX8#ofga_f7(eZ>?BhfN{p<%Q)5Ym zJ;b_apd$IFN#r*&iTq)W*=24$#{5&?jVJB9K0UMh3k!|mzuWv{nEe)p9)53ZIgy%3 z=aBj~1EIQ;M4D3wN)Q5*;aPKbw|_R{ly(tS!Vv!R&@9I_@pKOZLNP&|e!6N(hb z)B09#pM;m;@IMVl|8<6=|60q@tTrQ@F}E@rS&*+K3DKQh%`$zrK70XL?#so-KTZ)2 z=@yr2TI#9X&Dg8~(szPsLjj^iDb4MUeoA`>3SK2$t`druMJT9F&9OHVtpP``5!x-R zBghJDBB`{F)H7R5Ht1De^sae3pqBNms!fffLhkKl3M_!*0+M|Z-t4o#P5Gf zBc~oxF|ae(m}WF51xy-W+8u#UyAafNa#Y*W_@A%T+TFR6@ShXne?HuL^yq6p{^!P5 zf98MtEdSxEaW9ej?lbjm|;X=jxaFkE`Qf&^=Fyp>c`a(L8ug;34E@M?fJdgW|LR~ko zXnvDF7>{x|$#6dmuCQb=?VWw`f1f=EYxFpWlM=Vw?p_H1*)0mQt5#`;(_d?U> zW5iRGmctwP7}+|Ck=tlWDb~2?4%@uk6+beUx9R+CS7SS*Q#G&|3jSfMAcARm3Du8d72E1xM8P8=2n#a1j76h z^NTnU;QX(>_*6;rVu6m5Z;-#S^h;aq`MS6|l@w9LY zSfYtQ=~bLlS#b!ozyA<7x~?7ap+31p0|0_qw-ZL5_nmp?eY6js49!e11m>JZzd3w+ z^!oYHv&ANX6NzzX_J1Fb&lBw9L6Iov9hr~R7NcRyMN2porPNVF`U4ytXiN2oO>asi zNm2PtMxFI_Fo-kyc7E=|0PO6jrFCcLC#CRZ{TE;s-+Pk`Q>C(Zz6Uk*?ZM6tR5^hf z4SND%{-Fk*Zx;JLvmim(}adl`)&BIF7_q0t))E{$xNj0Ha(j%9hi%Mn}Qw>iKZj7M#* zdfAE=|JsTE`oD{EP+a-550%#y%!VxE$RJfFRCU*MHXd2RvId7r*e*!y7*HYdcsNKg z=@J!nK(Os%z179F#frMCfx4H(9kWzbYUr%34wGJ@1XdHRQg&;U+d#Z5^)JV&_T^Z~{mU_(W_2zbm*Zxm+1P4b zHzA#G_veOrx6!(3u9&CI*1wx;;@iKSZ#PXe(O>UJH~(Jy_l*PjCIH#3eeFQL_F6^A zH~Z1Quh*{s?Lhu_0P?km*IH?Rt_kwL_oM5ZwVUgZx!POcgi9S93Lmg`gOY<5IXsGe ziC%HWy$DA=8_@0DMiacfPJfP;(tZn1mY1^e$mhIY{A0+%(7B&6NbKIW69(2~XM^Ny z>KmnIn^UbZ1t7^9?5+CZ*%<9V=@S`u9y!-sp$BC1Py0<~dVO5>iP?YmHiZ0<;y?|5 zDWhbrDS1{-$vK~fueyf+UHZN1FahUW+Nt+UJhfI$Am1ENZmxo8fFn~4JUU9r7R^Mr z?dz=xB&)-GShKPh;h`_m+KL*VS+_K;D|nTfoFiMwVaA!&GjgM% z>e#U2ZFupXDTEXW@T8^COsrX;;k}qGei~}eBXq~Y6MMGB?P5Nh@kpBx==>6mR)hFP z?I^qVYS1`(9qq6&Lu6WsM5dKsIB>;4{SJl>@Z6?(h84ZVe1|a2!GUlt(v*PKXuQ+xGt)zCAvA{Yo+NBA3RVtP4}l1Srp+o;*GI$D2b9LP9Zim5I04RLK_ zyNY~oUOfH3KOUZ(97Z+4+WmGAYz=aL`})UM&mn!-;@+O3B^<~*lr+{2kC2Ybtbz9I zhr?%oJO1${z|f^Hl55$ME4K&Q-e-z*lTOhSz%&5oFE77+ao9e3ZaCGzl19EsoN55Q zc>3+(3xZVyME1aHmOoTO_UhST`_+#xzlF424$r$L%Su4c-X1g=(`uM z6-KYVZ@+nc^yF9N!Z2YUjdT-^>BD^r#m1*CRo zyShatA``is;Li#v9CwR6oX(RGpsC*UTkwM%-mqctUq`xv7*EwY02}-qwX#CeIjU;S z%J96u?^to2<(O|4bu-ooYM@~bU6Ox6CK~l-U6t-O7WWtRg@Ch$`gX7(;_8Xf_>kP} zV=cH&F&ZlEA^Df<(8~os#yzKC@~_oRQ?Sr;$go%XQGxO=AJb62Q`NxEtqRzs^iy9&9StPQ|z4YM$XEGoeQM{BO7AG{CrkWL0wAQNZ&~X zY!S~5f?AU;uT?Es*i(*dVOOJ~Dz*uu=4^AFXB%E=qly)(Nd?NQ zrWG$)r=Tm@I#S1yRSH^>t>S7^Ji^uL%x~}?C{eHQHdGQ_Dw6HsFTU+e>Qw$4Vdfv_ ze$3O$?!kP(vH^HL%lc>O`FMz_0u=Eja;xnv@z@1S6d?ArPfb18_D~rI zzUsQTR4vsPp{t^PYD%{F2o?bfCAHI2%1}dobUJ{G9)I>2mP*fhQ+=j;>a*TbpY4vi zbF*%j6c@UH2Bcw!8gkkK;*SR$n}CaQJONC?eKziQ^v%RQO~wex*oxjJaj#`vWWZ#L z_g=J5gwa-fbRRx%AOE=B{{HEY-ybg8z@S)}Y5VK1Vsxw$3n$g^AKn?Xr-6P-p(-|Q z6`jWpqWQT}i2CBf3@{ro)sUUtbAi1==~e*t0i>PgSPmSw!8y*<)KbjB)FSqh)VfnT zJwMQj`yCU@0+Gdu9<`Mpu@`Sg^Vm1mv($A;Zkcs^Zw!uJa?!!^>`_?*S3c>`aIjm28Q)v6io zMVspz$=X+uv}B0db~B@E+c&26qsVETGS*s8Ag%I?<=eSS#*ri7O4nPke$c&jN9O@E zBqs!2MB9w%LkrzRBSff$pGyF&k6y1H?ibikiI9-fck8hq}mYt|~rqwnvi zIG8Sf^|y8V(c+JP`@hegFNVtSMoyJ^{?m7hPS-<|5RaO{@w=oLEG}rORmHZ-i?J{y zXlGT>zBACyCaT4nYUsY|>c5lcyI-`#!BHyB1zpi<8>*z#a`4`cxc{7E1-oyv8u9R*Py80Xc}4C!n}JvsaNvy}Ws1 z+5{GwLXxavt2T<4UI;4oE#5vyRoz7UW-nP?go}S1pT-X3W;w>%lnaFc?aeUWW`zu5 zzF7}-TH`0QR7Ihh?aUkdvJ;r6u%xK<;^gocY!z^&?dgjbhcBX^-X5JCGTCxEbkj7% z3XA4N;ACgk=71czs*lYfZA@eOu#-Qy2bzVmZeC>>~-AZugYAqxB zoU@p;YB7^tZ?ioT(z8tF?K_;zq?Oj6(%Vx{>1z1JsW(dtpC;;8;P*31`V&%0`0d?U%mM8@AU=Tk4LXgz@j#V zbfYF7K88wv0f{3+sj$>^Mmav>Mqb*=(>r$!!_RumyY%yS+wK*o-UZ^l8_=ue*}2lScpKR5mlggL@= zdF!3doi#Bb{?Epvjjy)d_&=LpfAwhd&-g!|<3Ch4?8+^6v1KAxKXtt5A8QUwx9ju z*^9&D$o$ueqDA$dUay<~7NbwCnoy5k9iP1Y@fkQ`9ZRV4{q#^ZiN8C13+`vf23PO> zEKpw_9UmV$xXSkxxH5p2$6uSzTW=4aK5s$&rhuwGwBZ8+W~fsGefmuGlEFGa>BH1O z(LgIv)j;usMRf+C0#z5NL3JviZ(x*9p8a5u)j;(JMs_x!8rcF+`cNO}i{juD&=>yT zGp1mFodU3A00*#TfGJ4uqR0RmsK)>^04?z93!ed|0$N}I4b)?R*?<~keV~l&6l`@= zHvf-qG5?Rs=KoP~{_CA#fVoN};qeKa*F5L-p#9z3*RM|0I^2TGiRuf#Mf4|H9e~qxe zPyap6RCAdSV#vwSd&{<_iHxZd1&wixY1 zi~CE%#RzlHMdYKb@j(!PPfyKx;?J%^=lb!ZwrWE1gLLM|p>? z>%%U>i3HOfUtZqu7q`Nov4kHn9;Kc;?e_8Uv-ZoUZ~xY~jE5hZ>V@ikyRH6e3aAG~ zgY07uND%$?ugU54ZnO?z?xKzBM-N-@@7M6}Hye>6&dy@GNU(Mmq3mMQYwY;s^_xgG ztjM~#Q7omTykBawsb@zS`oXq>py&UhOFa-?U&1+cZ#O(*R5n!bx0;otWzdBF&gmXCbfix z(FU(5Pv_EPU)!YrZBTF(rRu<2rR=D;QD;1a3mg%r+{N9R`m|08mnuPf%(JKgZS1QT z_8QxnT0;6G7$LR&RJ()4!{IQ4jbDNbU5{et{k7M-#&r!*i~gGbv%lD~L~5B>$3!~- z(O^hvuH6JbNez-@EDG^P^)IX($ao zR9JWkTQhUj2+gL`LzbZ6{wVqwt!Xi8`)W&WNUt#?X5J=)Ob=qYlz}+i#A}O+oyOE^ zu6Rko{p;3-dWB4-8*9~>IV~crGcu(uIpCZAxcw&}<||loXQ$K4@IrE{&|EwV0n3TqK)wDXzuQ3a9JHd!)HvQyg~+iIMhjdkUibP zzsiOm0QhMPibO^>aD|K|a|?n2b}m$*ogkgIkwkxkD?&*>`YAbw6w!?j^!$HD>1Dj$ zPewo#^qC`LHboL_zxna|qi3&QeRuSI`-eAgjGZd{6;o;SA>lJ6dRJJFGSxxYYeiSE zQBm9G%d86tH6jfPVEp~%i}p{?et7z}{qptm!xA{WX^Z~;CLUdcAiR3r{`;G!CqGQ6 ze|++yq&9r1uB$eJ2_91AB_Iy}{_OA#+Jq3&abjU~$R8tuy(lsXrL^Oq-Dq}8@(22g zL*9zg^<;e~)Xvj4M@m2JCY>H6P-}F1O5Ol7H!-EXqBuT9aj&PL=Qo$9S&ws!KYtM< zz_S-mkB>dL>7|5APY9RMtpxbpkFU@?reL1M`3S7)3<&Odz#}H>f~-k>>@s{3NE=eX zdkp~^I_gL9X%6@KVA?qNKPWVgNTmcv5>O4wI7gebng($fhwEhAPdEV#Zm#40h>+@H z*y_vVG8^8k4HGz=ggb~A@y9e9hWh^U@a5~b|7gE~NRr1#|9cp1Z+-pfaYf13Kc2k# z@dQl2&;Ax|Zfrf=s3?qVE`UGW_~uaw=jK=GNXZ8Ei{s&D{oak1Z=oi5YQKK<;vdx$ z+FHl*@c5-{U_fkMuk*7eZ8Zgh(%na~6nd({!2UxS%LhE|OuQ1lu_r z&L(j70_T;AAwbZatIZk=oPsh?eN|lr8GAa4Q#3TlQ`0mzhv>KvD8|k@DUNVA>)`cL zN=T?6fOO8I28iHAeKNY(#HN(~)6++k=FpVSl%XoF`Dup^Q0b5bFwv*ZYO1Bw4njbx zAhi{fwxB*B+~QMw^*6yT@`-#)EIn!?j?;&Z*!l23Pt?pIAduyVPm4906aYrGH30gK zm}1!a)rFehcx=65dj-qXc-SXb0O-16vkcqCI4FF4e5UX0Qp0NCaT4yB#Z8g>H2auf z&xi~3$_Uut`LrEx{*+(Dc%}~!_@oNY{!u1z=Y|o6<5gUdsv!Z+#L4e<8W!0I5&7XT zI)44#2}bTdI*#7Fef>X2&kvtRi%*Z$v)aXLe*x_)5WW5meFy$_^y)c|&EenQyoKZc z*Kebvmv3Gi9X@YGN3Wi}`0+VJH279kef3%?0Y^#|P;gIPW19@^2<+7>0Mhd-XifAg zP)Iz7y02cpI(qfpTh-R#%fnYE>#A+_EO}n_~Yo?Lxmz3dZ z|0l7l`lB=h2-tekQayfqVru_%bbQ!~p1wUgRtQ7=@7}&vjR1jFrPl~YReg0xfPnb! z7^x!g{NpkFMA~8V&kvuzP!NxyzAV3vip_7WApgPtE8qWt&U`nw0IT+Yo3Pc}_V@pr zs>GlBzt8dCpZmW**#7VM^^b3#9kz{fEjE9FPcoOdH}@}`wB-4ys|e^f^kNEZ9se?C%HxJpIwa|?750mw(xuv4VjcSm@0s(vGq1kb?)_!XR#E$#pDzbg zhZ?WI0d}3Dxzyx#x9_sRJ5DHJRceTmGK9!79dr!=J{HtdPs07{Hz5od z^?uf01E~NYtJ?Z{V+-3F_tmf{HakV%oNi^EZ@zg%`)HiyCXm4}`WO#Wgm4+(ptov2 zkI$-`|7IIO87Po?EQRxIMZ~_^M0`}GU`X^9+4lc3{*XA46vq9_xF4S<-J*-y(W{d~ zHN9WId~@_7#484&&9A?9@qxjF!Bc8>7d>wKiIS*l3v{NoYzn7tQ~_=4>u)rNfxQHc z1XFX^=EJWaE4&^*sz7Y9n?RN)*y$}+fqL}#p+-nAXK41oxDz1c)%GX8OxEB46n)&1 zbMU`~<9$g~?QPd+x4(V;_Jz{jpS9b~XlcoO2yDSUmXFpRZ#>?>nS&M`Ty=_s2E)cK z8?AtR$a9dFr3*_(e(_+C4I;H{bv`6&4rB>%{s?&kR~PMfcAi20+nXY1So|$|$1ER_ebF4$0jtWmb+ZWG9Ech zKST%n(dPEXhQdP_YTB?29lcV!r$Z@q`x$u=HeGK~#lPRb`Vn}y3W~9Ew%ZL0$+$;K?Y9+KmEgzM;Li}o$E!_$ zBx)3mPdk2*W&$Rnzc^qZ7SqwQ_S56z!?!04>lfE|5xsZJyl9`rAjaQu*(cFvv=gmi z;*noAKJ7;2^iU|i@qRa=+*i-j^K_(S&r78k;yS087d6Ou2=4owjxxu&f=z?Qn{Xye zGH*s7(-?5MCGsrXriqp2cypPJbK4iQw%9kYN0^-?6*S?F z%?ez+sA`CsqV0@N?MnK4LSFJbc+(C>!}bW7bfGvdzy9hjUXO6`a`uk$R*Ku#`Co9j zamUq$&}DB<-nLKL&kw&-Xgr^vGfwPEb8C+*GP|auwv6!#Znssj$5{9=6fG&S`3Q?_ zKB_3Qy=DC5r3*eV9?4=QgR%~tKDdKAKW8QYtO+Z?(eb!9YIBXG!F_^1&H1@s=I7?v z4fzs_;CD<0KJURYpx{Jn=O zW&=c9TWd{iPQ^x$^^$5jZp!I*N@$v521Tc!MI6xERro22e&5SZ)yingi}OV|)Br5# zEP>N(V+(LL#PuY|l+%je6-&H5?!TmOvJ0!HsCu&N6LScZ@OCkD9t^UBKqpOS&&IPKcnc542 zk?3FJxaZ?J?6e~HRW~1b$PYWX5zR+a^rCY)XdrN?2A=yk0A?D55rXmvGh!u3LM znpQv|EAzrWz*4{PgYGGAWL3{>-=R1{d9QoTy#=NtmV#M-1xf;^ilxiPsx7PA;pdJH z^J~u9%h|LIHJBV%_ra_lXf-r&^#Y_aCMAs;D==E{esy(MvH}RTb|=n9dxY~r1M03t zKEGGsg}ef07&C3*kXlku^>BQNXZ%XqYQGoYt*6J&j*k4b4yyLsao$POhUpPl1ts?= z@?jVO$uROOa1E8%jaE|PnP>%|sC~^oyi&FBInsQS`z8OVLZ> zJh64~U9Q#?SX~(PIrzZ@H%v&S2o|UY?f`g<2d2-b&rS-2CItcZV`@&eFXH^7O(tVN zM&CMvdEysnDEhQhHBNbBLs3V~1yvv&-S89MiP&&xgiP8|z4Zu0P*4EjyE(fO%EeZf zLlVr;eiY ztx7d(i?6|rDqG}qjK<1+EQ@VKOS&Gxl5%`7%uW-M<@9rJ-xOBU6W-Ewc{f5+X*Bzqf z3sAtf1_~P5H2Bvl_j)zjbjUQvqp62#VetoO0C|(VdV0OZrkpPuyB-IqO)bVTuEPWN znb~SjjPFXc9X-IT!haw};E^O?bN+Xr6iqXpXS6<6vdx?sv)l6Cm&Hz~!NC#L@MGeM zHsTf4*dy5SOGdE2fpetxHB68ln45I0gCS868Z>v;=k%_W##*=RF*G7gPi7!+C&?_b z!d!uSQDQZN92PS#>3*^8vA~lzu}*jd{9HD>+>)5{!0J@;<3VGX;NjFjk%B*KX9>47j{XuDw;4MA@Pa}DqKS(f|BM^ED;THwI7bw~#q+S2v(sws)hTyW5k>#k#a$<7*7s0q%WqIo7gu=;`l-xRdqorHSBx1mib zc_+FAIMyrK(pV04wlyr@()a5|maHh4Ea}~^RvfAqR=Rj9$s}cD-qk&12Y~2?;-=$R z#huk^Qaj3H2tKL*zSgD;&x{>D>`IdYkRc+-|8_>7r)l zr^N15QSL(<-5=1WV?TNbq?lY0yCv%}*Pu)>7g%S)z=JdsHjh*{>el09_V#6*e^6Eb z(LzSF7#mGPKp6;RWc6!`TAyrb6*Qb-$$!zEmtPW9rtwsIW z$u<$GAf4-CRXW4n^Wg6fPf%chkGRes6X_r8=W3;fHX8a+x=!uFG7l#@J3AiQYMnAQ zmZD%0XPViw?l2CGelvuWdcAhQ?f5lcy?%ZO(MF)ETu1?pe%2*6RcHzrrKlQ?ZlX#$ z?;JGn5N5;O`OpExz&~m5Nkyy$)%etdJ{ldYP~I3G{BF4ez+kl219jF846?#bL0{B} zek{D~D{)qMc{t~p2xiOj-#J>7)F+xC`)(y_AIj0r5sHtyj2e9;4nz43us5^x z&pMqkUZT0W$dv9(lf}o$3IMq~1#!|-~i$r*$yMUQl#5pJZ>Bu?C|yHZ%Cu#l4W zRl8J;e^qbQ5-~|1v>RR33LY|HkB#SXP^M#%f|3Bax%Ao~I_OxskPg%&O+G2&+r;gz zG@HXfh@EWMMMCQ7t15tGBKOTivjy^**PN#FfiASPVAm1@wTx`%g_nH|+&LN?2{Tcr;Vm%`CEHQAD_-aAc>_&LgC=8_Wv9}` zqE*l%mZEG-Ql78~f~gHc8pu2!MJr6zRKY(7!3}QHF_@M0r{$r|c%P2XDrk*--Q?!O zva>CgUKHa3Ym&M2Ubn7ii^E0=3q35Q3xr2!IzpS8&IHA%cA;Uj-PMbG(;=Rg!vrKG zhv`;1Mtnv+0O#Ons8+PIw#ZExm}DB}TIrBzM1pFVIG^#D$uTHeS50(h5LL zn-ySAUnapY=jgnjk1=q#$E`}`971gD-c1Dgg+JuvXry@8>#5i;tnL?>oBL|K$Pk-V zKAeGr!a25p*}(^2_UKeKX~V&)6?lZ7g#cKa)R^5I@QI|cw%yHa56i@&JmjYJ-o;k5xJ`` zDA-aVDsq{xKRIRg_5;8r!Q6drv9#q#TZ1;z{&}4~tOV+7NSYIIwq*mF&e&j!CM=v` z(Fo^MusCXbwqET2zB%hH-|w(~h#^x*5>bYCP~b1)>+});C1+=8Cr$dD8`)CTR=!Xs zxRv)y1-A0eX5N|BrT_{DBOQX&UNF>_->7le-OFJZCg3_x8(ffNy)++5WTCvVmDuri zIs`9_k;PUMt=M@@uY65ap!>-RCSfQr#W@=?%K~!JvU|_K@q`-Iwi?T<3x-Rr1vEAx z+3@D0X65Qj9SB`N37&cE?FSlb7Pq%z@5+_}xVFJ&I{#G(n$lp!~Ilk+Car(NVNmXhj zbc-J}{58dNsTFB0#VDB87VIgQYAhAaw9!cFmcnLKq1J1_#(*)e$MD1)D(17b7LJHNu(N1+%vRnIA0=0db!#7Vjdebe9A;f6=P#Lnn>h-V$H zSwbqGyvi7dUwkynOyS}=z|VCz^1CNr&aGZ0%W6*|Xoq{EVK#t!dJYwv5-4Y2>JP@o z(O1bj$mta4W?AauT}6)qnl}I=EBfwEG6%+oY27rgYCP{)ej0XlRSNB)=1QSb4Lc;Y zsx^VKr79%xi|18=eokH|q-eDLRwL&LKsC614!!0bJQRgrU{Mc<9f10zuw&W@6rHR- z6}NN*r4{+haJ4&I)nOYYzJarhwF6U+jx<nvmDpAt4*A1NE^Xl=i zPr~i&L}sHaY?h=7&a-AlbJx+!;Qi!AjofZ~=VJjIfHkrq4mPY1h-SQ|)-_usyVJ4u zRojZ#GTGO%Q||6gZ{1<%!%X{203mTmR79#}@7Ikem<71}V<3|}gFAn)M&)^m-u1kU zsU07zN(emV*bZDxU!O@w%XySs4n{YM+DX7B))ZozdoQ+`MKbB_p|gQ>ZPcP%2`QZo zr)()q)p=q6n#nGMUUDqF~##ECIFT!QzJ{@77NZ+yhRdN` zcL}aH8(F!6+cdoLtTc^{u%z8@2r}C9Ec3=53F0fyoB~$eL}Rgr@(bdZEkn!8tm5hV zkSDsEbTSM8B*S8IX|7EH_vyIAE{+i6^C}*8bJ7Ym_KF9Z8op9TG4xP!j&byOO;;6? z_4D;;ZMzAFvVF#yPw+t{1r-3oY!+AKHKp#1G7TTTC;c240osX&>FDAz0mUi5NY6%O z(t^G@SRmEte95XC*krl&mWW@WY~T{I>jAYoOatMs0zWo_y=pcq7JqUM8ji$uxk)Ms z_!eyQUA8t~-kq#}tfE~$bih=Y|9j3%BLcLTZS~2IZg}gfue`$@3%@OI7UN_Poa8U? zcr^xZFxz=x-k~1$m2(JbH4Z+ntMZW}EdWj~!Xu=6Rkn`+xsA=Ns_`a_fL&KvELheMepG)1sRY4_MG=0|&eDZQ>X87k(((D5PrA`mfz_|%1GTDqJjzH*u< zE@HnFS2F9uaEs*(yZ6N{Xlu;A(=2F_?biOj>{r8hL&t- zU`$FDQa-i|FQ9y?Im^OHRcBf-^&#ryRr4+c*JQaR{kMeFs`B!ZA}g?h&{Dq^m4cXJ zZHO8uaA0nI-7HLfV3w82sgEVfPZzbNOR$h5@>eWfTp`LIN*oe|{qpOkSfFa{J#^(I zygVUmmx^>d2*czuPWxmtVmNC`S`oS+H3k$GW zfBj{bhYbd@Zd|FANuWL#Ky<=aE8!uZQ9)Wu0BLT{lGgncV|bbz;m#g|U8ooWzYD~cZAWl8&Sw|FfU zZ?XHsC|*{LV9P=PBLtea{ZYjaFqvi_T!9Ua>*m()U|?B8-t#&@L(3PeD@W9+^cV;8 z4Dyn@)IJ@bo$2@ty4UnhVKz{} zFE{u`gJ`zgMu_`b+&N?v*|XfT$>cx|OYCmwc{sYkn>7`KWOSOI(%mr%IXec91YYBz z?g8hLaH+9`-W`@WggVvm&d#+1551EI`zvmQ9WLPn3n)8B4&dgEJz=O{&WNDzAnmoG zb`q1i5b{WS@F04TU9I(!k4Z0j{`w`GVKB$KKj9?t3zOG)_*r4)jS~KB+6fZ8#N$Qb zz&h)p3#MfXvv3VAkHf(aa4p(|VhAq5){)QYz9ZcXcJMaX>EJSR$Apl|<*jH?ltqZH zO9a0Dpo(QT{)iV@qvcOn$qImBJTd50AZi&oks@7+7cmq>sA7XXPHrL;gKSb8!t7Bf z#m?(-E2`#KciP6yDpDI3bJy321n%ECf%`szCpR%Qf%^`DF|1|@hp9JelJvoq$<-n7 z({AL%4+|@qFv-*havqji2vjSwI{a!eo%y02Bf*zlKRGx%%Bebv(;i=IAs=z6&O!|+;F4Pp!mgpJUsIKUYFHoySwiq#_zNAW z)tm(9j|Bu@`XUFTtTW-kn2wf{C}q3l4K1_*u&Tzvr!aXR5-H-d5qrf?OXiZTnW3lH zsre%!jWHmOvApb9)3TO)T}4byNP_KphyvAI?GJNzAa~$b$mKT4@0?!0T0iZ>d_C?v18{1 z7%3S~@y)|$qkNg4xCuagX%ZV0>jjn*S2qi;??pJpMcy$#dds}|##-U7R`7~-OBQ;! zgoaVI(off>fLONJe>p=^+KaN`n39&0$TY7@6b3i6)CUHlWSOyT*(JY<&H+PsFYYAO zgMc1yP|QakLCEYa5f?b}8h(R27=PQNt z0(At7Z|dR&1b^(Oh){WoT9$ss`2tR<;(j~7iU$qzQr=$-_$CXVNAh2dmZFX8vorM6 zdW>ht^!ngHJ$i;T6S&iF8D9ok4z#iL5F2qmY;1_;y)W<$adA-FdZoV7)>Nc2V!Kt_QPb`@3cd{#!o&0Y;h%-$Q}#=GN2*o1PZ% zMgUv4@A0O6?k(MeI*?QU&4l@OZiZ%<9Hh@ffUc*CLnJR4Ht{Mff7C*EpG$&|w<{-ZD|>^JH`vEAAxn4AS0YvQ zSD`m~j+;8quud;8qu$n+IlUD8KhE^pEbU|2^rD;v_cOgZ7xz1lZr9QjJXgCRhwwes zVt@1VX^#^X!sq~Q@u<17{A5|pk9(JM6fIwq%%Z27uR%rY7AT@E9&nnY=nDNP^Q4z_ zIPH)vUs7mq>mayC(YNJN5dJnYF!$>dRTs|F>vJxXpDDT$;cN(O8g zQ&ICfrsl?DNzI3|Qqz9ho|4keK1GuHJ0|tx8l-l{BSdU(Jhp3$$3t&C9{S_KYgLpD zacnkuY|_43JJYTi5!w5SrN_YjR8$UqjdUs{K&fiJ7C=^oyHW!P8pML$kEnwp{~s?n ze2rcelB$3V=gazlR*JZ3rLN0^iWOQ$cYL+MstV7wjCE9n=h}>WTV>&sZSio!wY*L# z%r=Z%ziZU;@o<=(gYr3u(;-%w8b2f$;`eX^CpQo5rWiy&Du{L_uO33ZfU7~|pECGI zIcf+TQF|s(^Yo}{>B9CBj3vZ!22DdGLN+kQ0XC!G8%@Ap?MkD9_!GP;5JqVH?t1bo z9nPQpWb?SI$xqN`nf%6*cQ+H?OhaellY#7xXMWk*TYsIK)Xtr<0b643VJnE2QMhu7 zz8~t(9x*_)x|$YF&Z-pe^t}#xb%%YYeK|#chdV@fya+OufyLIsZRtw)(u=#8&4peV zsc~Pu;0|2r>%e!3e}Pyf(Q+o5cAzP}%?%QvJl?1cVy#10_dkN0Ay{>!SUXuUCPYH& z#G{4DE(jehnQl~uCLDuPlSzFqEW0$tJ{4ly{PgUHr*GRYUq3$-vDO{Li7XPJ89TQF z;UT?MH>6H*rGo%TREh;AjOu|Q2fN4cZE@G3$M@YqHy{86Ale)o0@FGS^Q1lKaOA1Y z7OWEg+-OBht>~Y_R?cg6%5^Zuig!2u0PezM3TQDb=CqAp6nd z7A;ax3<}_TQI236R3918?_1FUKzE8|mz>~Zd&?ADa|-4+m!}!H!9RM)EkEtwMEn+x zyT5Kl@!G#z(bKj6RSMYJH?64MigpmpI)i}F#1_!z*5gTF0D?JekV#6M_)d1DATGeW zt*;&_W}-p+>Z?{1slH75NB`a2YVkkz+oNsg5p0)hn_ns5HQ+kHYy1Pd%|~n7n+p14 zi$I{@*XBbNxNo3@8ZZledu#3C*9tnmtnp7IA2z>Hlp#8qCLTT(&>uctd$g@7gQctg zsaFdBQP3YfT>JVP1-)*SB6Lfc%|{w-4gIm=*{|*m_irApZEkMf7J2#q_CGWkZLO)f zGk>cFs_<8nb@g6={|WTJ7%0njXT80)3+}aA?UGI!_vD&O3@=v=@au)F#(PS-MUCXT zlMF`DKSwQI8=JVC?y1$x18P=iJ&S4pwZQZ$e(kSoYJDs~D=chWwVR%&BfXmQ+VYJC zYZl5!`eU6Xm)2WwT&}ffUI$i9Sn#U;7ttROLR;eXmRrOwlr`Z^C?eLz4bl3B_@7y+ z-Pim-koi{bJImn~#JsEcpZZV#^xN8Y0MZJ`C~m2L%KjHsf94`$)vHljKKlK)0Odk= zCm5Qw?UpE7DvIT7N|@zX@(Ol)cz*)s9w^H8Wncx4%ns@S4B^#^t-B}@1q?qrE0u~v-AdJ%;K<*QzBK-90i7k z_G}9j1FyUd0^gj_xVy`0R>1rFbO5h4L}8M^th{O7?xmwqFJW^;gUt%gY0?-B3!6^3 zv!Zu~q|VAgV`-dH6xCh#Ru%;W>~kIz2)w4PA@oWkP!X(E+aCb@79?m7ynkpTABAc) zg{7-V0yLKsI&6mQK}uWi-Kn_`7)=9hd#eZyOoT}_p~gv`a}+;IEA9T%-DnvfqVA^n zy(NBcGi8;W?q%5ryz%wz>z8VTDYRpBmRv<}f}1npx*H~8W_Q#6+@hoCvjm^E`RUBu z&&FtyN8OsEhfS&F=`8E@vMVL|PH*u3C5Cp5FtB=b8Q(-_aoU4mpqRq>G#P=1 z?3@iHAK*$6QM58A)12IqGLWi$3(Tt3Jwz zZZb+b3MaK0%JcDMg1Fn|z$4TIf$vK(r-1&UP2l9Vr+$x;J~(`eV+||Mh6&!5CwsX8 z5nZLD3&0cu-r+4~y4Ol{F~#=xZ3hs7Ax>y{()7`75*rlSRPXR7ZiLU&29cJ7z7peA z?PE>T{%w|~0+wozUBFVkn-RD2EO~@w*6-a!-7Jd7ql{Qh4HI}*C?-s<2fehD zjx6#i`D!M)Vn`p$1%V|%EAY8)GXIbcEXYE`X@8(5EO9C{ILfjpzl?i5p;V~3|FBwC zjD|Phaf*w#ZLDEZzbU=hkRDmH6+Oj^-RX7o-TEd>{J;^jcZ%bdfJ{p?SBO(-8a{vd zto=W)Unphj#i7**HrUPJ-NySC3I^>&(n8s&9X?wq8?{5HR0G3W31}mrg@E>vD`rDc9F|g_fdhtYw6r?(~k%v}y>3meFmw36>Pj`B_BSbQl5f#zF9quO?dY zddFB`SOJ1}tHx<3P328Qe7VuvoHlV}*9G8RN&nfs=K|VF99+2;e*V z@SY-G6ew+@gonrgcR+~0=iD;kzCgHv`iHZ~Esz>fp2@;Qc9*6e$AK^}hW_pTUQ!h1 zO3cB!I}Ck-ou=BqgzNRn>z!h~c{EvuYNK!s*G5@f)+!0=vU;aUcrNQ>Z6gQlVe6D3pk_;Uz_Y6gZC z{0=$SVbmEHa>J@nt?lru)-`AkKTwi(2Ub^22eqqY%SSZsr=vW;_2PEao^y5UU}vXz z{fUN4;L*~;Tp^)3M>AV?7UkoVatToB`TJ(bYJLRGPj#u^WlHhT(_V~}KBcfR#Z-BD z70vOQ%1)DWMcY7vv9c-$%<+h2Ib8DoERG44tve)y9)~{XtOW>pUt_*1r`!rROB1bR zo|R6s;1mmZa%rzr;m}nw|27}p>C&MaE=)!1wpcnBIZIr<$!Hf3UA1|S^V4?BVGGY~ z@((yw)#mzlFjr?7p5OVj-QM~<)7JXwmQGu~Z#m1f#jpsUf7*I}!hgVNyS?>0p0-uP z^V?6`%9%-b7Jh#!?R4`^!1+B*7Ck>`kbVyn#7+z6GR*I8Ueuouvz}0t87-K%`-*%Y z69aIb)3+HTy>s%QdRuiIaTen;HM-aVqd9dn2V=GlYY?#KyRe*756?C$3FJu%(d(A% zsbkxxeg7H=)qybK3%r8nH%n>{RfB7V?dcXc=h=AJNt_c0KJcnK5_w#P6&e)kp)&!6 z%|{atkHGTx>#tEo0b###_>I_6GTbfCpNw6!0NLWCAfkmhxCg8|geKw(MBd%G8+af@ z8clmct8`4ZwB{QTG!vE= zw}r(cidSHoT0~^sb$?E0BL34xQrkv?_PIJcEgARtPy2|{BYEL6T6($CXw`I~B-(mK zrBFmP{?pd+hqiUx<@EdSvUB{V)9-g;vtWjTeFyI-zdlVzZAF^1^qK)qjI~gGAKHzg zfNpVgX_+qhkduSoa8u~Z*;->=aH<%F7XGy>qQsA^-Q{n0LG9o>7py1Ya!`>?H;u?qACz?;k>{ z`_?%Y>z-Q_v9m))>iRv4|7i!r2>1Z1f7Zm0O4_Mt!RLx0+bzI^*o zaq>=YE8@$AzP=pi>;wc7+7AMikBp3T0|ph&)ku|7)#8p?N^*|UY`Rytr=I^nhR}d*Y&n)&JML+LQ$UI10DeRd&HCg4f_7`7J*d3x46`rj46lPkpR0J%WKP0h2 z@nBx(E<}x2jg#8Hw9|f`j>eea@~YvmjTXjKdkGUVgSDz#10!VF z_$t218KG|08DnTR-o;S+u;EwxCdjUyFqRfg(dnkUk#|uzu#Wljd>;>5RMiKfnJdMR z3EC5~5#BUAA13k0hzsrw&1%lY4^s2odtUS|#0^~9h6r#=?hDDH z#!Pi{bQ9@iR!#c;xYtv>ff6NBY#G5;PzeUbLoK+qi{_wiVq_&y*s%MYlS+1lB&x!W z2=x8o$(yI&a}WpWZ*%Ry6t~y2!mNk|*A8skEQuRHS`sAOsmply0c}-AeL(Kh^)Sda zREju3@tkgOKl+bo(%Ccce%{_ zrWqC&ZsZ~j1TZE@X;+K(HLLiHQtY*J%crD8LvrS(dIVSSrlvPH=*=#c+>Mp^Of|KF zsKlq)#{{&8!7ziHy-|D~ryw)b>tT-_kyr?;S^XdzYQlke2X5_;6``AVHQ@wxP|{!7 zxrVHpLzWp;=ByW=i^->YCa2%8`%#VAC-MNF#{*V9dGuL>5W1aG2ii3D_UP)r*k6K)^b>6je-VD4YUBH)bdF> z9zrgv_Y@*0Z>n--9|5io;XTJ(ltW~5q{1%r8XC&ZqRx0Y1nmV!+Vb1tWun&Y#b|Vu z!6(dFL>g<3U~#Wx;JG7g#z(OzR&MEOuT=BVnj-fafZ`=kFxOd6j9ru$25ZtFa9`R= z@%mzhU|t*z<_y*jjLbEi>*6GXF8xJO$P(F&;?&7I_AVtSh!`fAigQ0&zFx+uwDgy2 z1p>s~HRgkN!u=ykl(~Uk@Q$+s>J_3T(@%6zm0I3iHj9QVfd{p_y9D2)e5g)afw@SR zHx$6V{pcI6s=7%%+kB)J4E((s*`m8#sz$;NsPt>J94!M^EXUN$SC!59Rba3-ifvj_ zXke#GLVa+`!7>{6J4sI$2@q|LD80H$FYy(p&!B(h7n>-B>r=Rj5s zakFxaH)@UXZ$xkSQxR|gW>C=Fqt7&5Qv+lbox*OBt2G-a!^_5BOz?&3HidPwq)B0i z@|!?&gC)oLIKX$`4Jbj2*XVk@^C7KHI!Az`S7^peF?d?^_i%TO&W zHCTo%hkUbZ>J4^XPF=}YR~3bpmzFhqIK!<+y1aA6?iEwjL~btz8k<2?FS0@}^74ME zC9|Ys|6DHg?6zI9vq_HR`SK3pa&AY`@6xEW1Lvb%NehkqfyUh#l=%th7qG8FyT@qv zW{<#>+^co%u9yl?*+XW`y0-shkw=^2lEqaF7nl%4lsKn2*4 zK!ODb$d>lfQ3~-3-ME>Dubv>QQ6Ith z;KDop+tHh!jvEFN{r5}68Jdd0KUZ=H$4Pl5)TUigZ8!SlLw)_`a5o1WHHf(SpK3#)8v}pg?im=))3aAD8 zq@#+S{P6bmPwm6EZ(qN~6?>ew;h-}+V-sx)#rLaP;EPfn%WjePX@#kFVaoe({2;{}O3M z2f9KEQw~dO{%+$vk&mkX`0B;c-wt2=qk*_VX=38&lPeSmR*4wUA zZ(G)*l#S|FfjBUi4!AZEffVvP6EWqBtuHryq5adrR9GPNp>HmXn~0%%C!u+NxKkf6#uG!VoYaf z7Y?H<^q0}LpyLM8X=A3Sig5y==Dd;hvfaLF*NXm4VY;vzFEvrH@EWc-x!!rwqbI6! z3_Pcn|0ecFXU|dO%_Fe(2-MHl$?Tvu^V(QF=V+ah8DngZ_xC-1#R3i%{pu49fw zRZ|}t@IiX+B%{=9%A}c*Q3r-oeSX;^HhmKqSJ!r-{dVjyO9O_cBX5|sQGh!EH4nIh zhQlPXs=&K7h>B@-IK4MqTSI#~tH;JIR_Mb8{_=N)Jfb>PC+Om{PEub_uRBfW#Qw8t zq6Se%)+=JawCtJ==>S$5K*8`ChASuC7d_7I5EuHwG67!|?ZUo5Ko4Q}Wy7)H!cG8A zoysHR(|jWO5)C=SJAZ&&H~W|;*}epVndCqFn8kd99e%XMxzXUJqk{v8dAk&CQ;Kl$ z`Wtxtki;xxG zY)h&6W;F-C%`dWXuZxZlz{0%Lu#Q*0qC=9a^mIH*#GJ@N!CS=&pNZ40k@BUR9Vz zz*EwK77!duk~JiKx_bpgrw9EtjEG&59KsfsCL)4dn#AI&=mC9QTN4_>EL54SP}CVc zUsH&d^=7g@2Utt9#|0L%DY_`RC2Vt-w#1-){l3(Zg_Yc??Cb!_*Zb003$0E&Ywv^K zr(*K*9n{@&q_<0T8O>!^!zPmzSAA-SDEUx+pKPSGso$K5J{t%591+;)NMZZM>}2kE z%yZ0S^4~M;U)ZovD%HXUPUwHm9?rb~9NV}nw@E7SP!<4X(J%0LLtBx}UeDj^ZGKg? zr^99=gdK_foxip7Rd`p$-79GS%;|DJR|AAp`h5snGa#WlB=>F8Vj!zOoS}K7t=ykY zi+~H@(XCPV`nD)Mo&^QqEd$}38BlNlY!(d-Q=+h0G%ieuLeZcwEeb^|!jvc!O$XDW zP_!4+M&b6GalXU^e)R;KZb3zt@rT5D)38PO7?J+8qQPj`P0!Ah%r+;E8!VFxU<>!x z%#kGh5MK;1`cZnlkoWwisiAG!xc<`-{jUF-FA!jdNWf{^s5n5~b^5{h%JWo~`xb#l zu#x5t{b;Q4b4#|~FfV1>=szyoGVTK(^umBT&VNMXMsZmY;PpL)U0p-*#q-zCKr6uu zKxsb8!3VmJ$-vS`91rH93vD8gKM8A#1yaUUF!-=N=k9@lZE| zOt9FQxrnTR_r!&Fu-ru9)Xyjm5@IdU61RvpllN9N`YiK#dM}=XM0J zNY|sm0zYoKr|;YPQBa5-KdJ~4JiHgD>ow@3rrW&}Xxk&ubTC|p{?gf`boy)_pqWR#qzPsm_bJ`&C_axGN^G5`-EPuXdW@1_h@V+HC5AOZ-S{&!zL*0vWU9_m&Ht)ThTt=dWM>4*70nu^L57H2){Uq}i51qMspyxy-t8w*K&dx+iBHhDNe(#kjNpUWPrD$5!CWOH!Qgd!nWSHnIqePq+1C-Lsi}Wz%m;pU+(vT-p5NusebTd~h^z zUah+wmZylY_3y>!=e*15rgZb+VaHWQIIBv0MA_lymogV&)i+_2|qrh z$A9qSSM>NMOuR=xyu)Sb|B^>*_Bu4)Jaz82Oucv23KE8AAyd_pec1-*$XXOnr=v`Z@MEXkb81jae1f~+qgk7}3 zb}k2y)IqKKah|{fM-m%kLkv3#^)c#JH&HvGA?<|rbx<%6pB%w}!a#_CVIWJzw@c^S zmh-KL-bH|}1@_A<)Ju$=S~Se}-xAw6eH zdhdq8;Qn#>!Lm`|D1=&aosOcjVRlLC?%B9cMp$ftMYn3fXXqBV3o8yvbG(mo4oMCm z#1>v8!jL`Z8T|X=^|SWVlasec-~M=Vcq}XfNbNc%!4a5-X>1_oUT*ZrgOW`YLw2`m zmbskw-lPffsKBtUnku;7=pN1%H^_*$Vz>I-R-vcQ8+l_0kDTGG?{*b8H@ItQBe3tS zwEGRSY&4Au67sq#yyMIxMNcdO%|>JpF#hik7-M4%%M+VvH`8mJSv;Wwwq{CTtzvJmn=U_oiaO0kPEo z&}dGP+$YDy?X*A1jd2CY4v~_VI~U6>arn4o#fxKgvD+hlDUBQ)er-hI`sQ4tNd3jj z0(E8Pj&92L-gNNr7d^Z$sN{CszY!NZG+j)sDcNHW&dhaaF%JQ9O)+y(^L*?Ep0E?p zi9SIHzl=X55hBVa5?Eby1=}Xaz?>>N#k{Q@3l^+0x7{me-Vt+_4q=eN-0`Z*GNOhK z?jT-SLy%;B$zyT`?d#JLWTbs7Yi^HQmDh@FbJ1%5jl(YT0=WFf}Eu znOA}VNe^LFX=0+^9oq7;Y=C?9v_FQRpQKABv}xb<0Q_VxnYEMx8{<`62%Vg%WuB%_ zj&BcNe~0g{hFSl-XLu$eFHI&0L zFapZVKr9UZkmZ4mfG0WLoxSdq^$w!_s^M)L{bx>|=|3mtUZAU`cC=aAp43<+U5Ou9 zxv5Lrg9_T(M-Q)3a1|u@r7pA?`}oPAtLE6k!a|uo!>B!27CPetEmIAWw=f$7P}ETU zk;kwQ*}w|Pk{w)GFE&Cd9Uj{xN^9wo;Ko4jb`ftyr*Y@QXsG^yIM7O5DaEYQ%W@2H zZ1ljnwKY4tBvqzxvM3!ar#mx&kY`vSNudcM`B?8)Sb>r%z2o)+FDiNl+8`s;zD)DW zcm(>2?HvD^7)Ooec8K%vtU{4^1yEUCy#+F#nk#0)6D{AK6!*mT5YC*k z|)FfMQIfMNnrya^}+cbToOeKHcyvvv&IVnS1IVp>i_XF z%?NhdXrYPE)qW=KgDJPK*1Q;P8H`xgPh1W~GO5Yu`7bR5PHC=kZ9V%n^4_#5ei0ru z2WpBqG)2*__Zg+DdKD_V;h^9`gX@Cu7~Q1D2l%L0I`pe*D>mqLn^6fe(v>bmnotLG z=h%8q-d}c4rAml)HjK|H7@*56!Lz0NbyJc%RJN5$J-T}Nf!J0Zo_%z-FjDGO+*7RR z8L!N(0I3R>o+N~>pnwe(+lBi*d&^aDb)!YM7o3gDYNLu7M*t?hH`4gz>GzH=!aP;# zTqKs1l3 zeqWy@XOyi(ClS@B88T+4P72phwVIHe6t90rJ0^XCj$A%1N`9lu|GZAlT&JLaUZ0-2 zenINZ*|NrB+rff!scAg$2>l|?4Z+FApPu%Vbjce9tAC%P>?3Zh(&~4@uJ0}NSi&sy z+T3okiw(^Y7fxC#rWEnydZbYi$2nduQ75oK;f|vl4A4u=DKjkVd}*eq>SY%q3D-Z` zk^UrVdF|b-W#bX9uXG5ln=lcHywqH#YsLGXRYDSmSzknip{C#c%y!9~Z|M1oDHdbQ zG^S*~zLp0aQ|s&*9F;*Ed=IWb0I1o9!nAbo5s1U5anv@XQHMLYYFDap;MaV!QcgbiiYI1KSA7?Wm2!~o++o%z&@Kmr)@hdh?f6S} z)r=b?NjHzi15FGZBYApfQiH!U3FbE6Bc~Q$n}alslA7|-x36DPAkV`J1Nq^v$ z#Dp9&VJA*ZD&l%_(^`H7d2t{ePuJt87JSlF*1K zC;l5(mh;3f2ngYc8PYVZz=II!A<@vhNt-0;;U zs2RAt6 z{!~j;Cp}Yo4|F^3_me!YY9DKw_S3wrMsd^wNSMXRRm4)gvsj`8?q*Ru9%Xc<1}-wvlm7aHdtBnvPmTdZDp99leU%)qj|AWq2!jlAwpra<;$&#yjkjv;xY9W zIwBP)$e^`x{T#yhD*Pb|)CRDE5Og~xEwMZQO{PUmz1?P|hSVstH5j(xSIed`AGXJj z=@Ib3=gmj-La`UU*xvF}_6Eq`iY5pcWKiPnwfu^6crqWRL&Js;dVnWE8;Sw@7~X6J z#feQZt2?ht7$^r3HG4(|h7B+x^W) zA_1*woYql;gQyv#kNPwdfk4kyOo7rd=_o-R+w{(+OWvBUTEFE|LhS896~eY@<}P$o zGB0H}<{Zb>qdFA}SZ3FRuHcI8t%=Lo1g^j}%;5?WF_tShD|#tcAP%BtkSo-uFN!Ow zm+mi}D}c~tT+xFoj+<3%K_zY_2MNSyk%YX-URXTpqwZn)v4~|vq{&b7tfJqK-tv{F zw_y4zu9ZfZU@CNuY<0mk5w zv82?Lj?z*VI4EW0E~u&*$v$>pex`PVaK~p&!xiUXd?g+e0Oz2n;t4~X_CRAe9cCYr zKHQ|ZOwTVyU^}`>)P{2~%wXDL&8v$H96T<@{SWir5f3*Xp z&e*t~-UnXR17DV~ozRB}?)HH>bicax{M=xC+Dkh-B~8OS4U+!0z13r^Xkeb2J6x*4 zg_{uP;tYsLu|D-aI$$$&0U0I<#l2P_JMkdyq@x=+mw0T4gx5I=ByM2Pqdux+gqr$T z2_DQ@l8o?;5YeTd08}ij2v9eN?ji^jo%sl)5g%t@3>w71dOwfVPb33E({<(Csj&N! z;h6}%j(-{!r-E(4h7xcL>{`TX?ZuGL95{t_mAo?qCxUhKn;~fX7^kIrs5O!ba5FFt{{0#C^#X+TP2n& z?ZBgXea@VIQ1r;83yZjBwp7X3af(3)M6b2;Lut=NAK70R3sRG%D=vi2RZyOmgiVYHG_X?auVZ zj^JR@cj88l-xRD4!yoJ7$Q`Tuq9|jaH($k1mId-lh&`-#Rh?{*D%zhWz3d80_+pqU zUM82F!3_)UXjMyY*Roo|iyGqUzTJuyraD2MVyg376TF*Su}T_$S=Gqx?Uzhozi(8r zR@VfzyynVIQXRqOnWamlAVqBskVc9<5*r})v_l2m>>A=t2bgVYZsqz&)=sNRGV1Hi z$Ztzay3}4#9a7bO^9joCVVvZg6>k0q<^jdYt*Z8JO`FOA|GqyLnrLw`N4x z`G)>X-OaMjY}M_I{ke79er?}FznpJd_-(7K&)YNf+5X(VZTBC3=I%S+w(;9`8EGEg zjx_e?!`o8nt1pO3&bNp7?O_=Ozq&ID+Mi$DmY9!zC&YBVeTCn?DkJnGX^}_&jm!8t zy&PYHt4Q3>)2zP}T|bN(YX@ZBYnq~)kAh{lLS?tNf@R^vC{ddMJurgar#BD=X&>Ht z&LS@lU%r0(kM`>yPu~1^0=9=|f5YW+!uP+Jdj*>tTMswr6?sq1&9%JNBS$eLTpk!- z5AEuqC=XpkcQNfK%k&qu-{a?TpPZ$f;~C!dzIzz0L~e~aYh{o04*lI$SuwEiMS9!* zu24kJ0BsE@l=!4KSBPrStOR9v(ulbdJg3<8^Q++m9)UX~87DX+TDK9jjL~2Jn1qA# zL$thWg#*TqB#WFAMqhh=h??Mn9qF95tabi`byoO}$~XtNV7iSuKQ~22y}x7N5Q>%w z;OFPuJ-Ob$019FAZmxt1eAUJhoxROT3ZRp(i5o5WYI$jyqsusk0QI^REywWBCHymh ze|F7HYI_H!0B^4XUJw2m)dqD?0qT4@P>U6y{ssSx;h(H7P?NL@cD*{rVvz~7F~hm_ zP&`a^+`8b>a$(!Ab}>p0>P>r~4}V#9c|+f;fw~W(%?*$;%Nxth@=||wN?8J!@Ts4? zV@`iY-f|{-cSA6xpg66h6oX4>qlv+Hxh=8_x)X+XwRGW3i|df+(75UO=O5%2WDebA zo2H-KgKnnfiaU0DFm?;@&pY_%8T|8{Desni3ZG8kpQluuxuHx>v5@Lz8Kr1^|Ng*Q zvDRS!N?CWPXW(xgBO{Z)emas-1o0*)qGW=SfiL+K$zGa~jL(XfINrbKeRMv`@D^)4 z0wh~XU&2r>6gv=h*5CwBILM?lIv}x=4dqsH(%>G}N&8*Mb}$9A zPz__1qQ|!0^EsqA*XtN3%+%b6ejg5`lp>#}O2Qye;{f8(gP^McXUm(G zg%stFl4{;zD=qH6VyV&wx9_Pz^K+}b?T7c@w)wdoL_;q@x5h(H6wQ7vA8F1HN`JgT zqGcne_n1LCmdV65`iG`xeHG0YeQW>R0Dq&kwFv@{M4u`K_l|qLkmDJl6oR)CR4n?ea>sgtO-Cb5e}=F~&PcE8g(TB~!fRIiRS*6k zZR5r8nMQx+fqgP*Nuv=}#l&Lv=BR2Aiz)u@eblPb3xYufYQ;r6w=pdSJ{1|8)n^?d^GT$C?KmS6Qd0MUD;Jjq!<#UIe!onIbP0&&blt{F zPcaQ*i#Guw?QGHLWDcj}y?n{(ctZOJVRFAJSK(X#G*;rl9j@J?h~Fa`h6bMpnD<`6 zxq~BGB2+YKiIOEO%XA{rn6?&VV-#a0ymdke_`rxkC{PS>U?xETfM1Q`Db$djNq>UX zsVw}%7dHAxzey|*q}l`m<#@Mqrdd~N1Gvr7pm#{2@ETU>h&ks72dvpCl~_b&3WW2- zd7>NIE;frzSF#N_?~9gg!m{L|5ypaT5VPV^*&r#p)MQAtzmG--=}usILgsv;)texD z*5Z4;pjs&=RwFjfg%?Jb0!F_Lkigi<%5*iOrUeNg;l)b66!wX~xYeDKRw2K8BLUY(Z;GZ|kl#S~a!(C3=iK86qtJt0goQO@>ZvBA0~qps6&TkkSN6hf4Hn z%Vb668KYSE#$eF7*$?-L@M)piO%9gh8sR0C!F6Af(wtru)+dv;VP2Yad313(F`LxW z zyNLH@!uYw9s~EVg>G!5Qyqj|{$?KC-gMvo-j`TWKwaM`{(_}E&#_cfQfutxG6}ONT zw-pu+Q^6ika?~BLl5@i3FpJq7-Pa!umD!USu%IfTzQmh784npHiM2RUsZelaZ=YW*?MGO2VR3PF*7b!Cf4VYxR zUL*6qX{(H)5SY(UvOq+Kes0uS!UHZCohEQ`Mu){D5Zud!z_3w)bwIlT_7Ql=Po&}$ zdg3;Wc(L0Jg%vzaMpsGFccO(_8n1&KZD2?NYKxw0HI-axw%`W&rUUghAD=?ZN3ml7 zWuM|n>Q$Fzx_a*%D`2_(NT@;uROSel`gfh+HJZLW5@S0`l%@m0rS^c7XaqRMjV$@M z;ENxUxSI?Qg1f{{Hn`DSU#hsow~DFOQgnfTDIURK1m1x3rhOUP9DOSsm(NzC=eDlx z1|H??U&LQ}r;N9SVIEMOXh}B23z}IL3A{sFgh}VIvh5%otBdg+WvXHoCOSrhc-D$O zijULL7@2;UKxlO{ZTXrHWgLU!NPQ&^L#Cf*5YvqU7Q!7ZY%9*AT+xhUM>3AT?jwZ= zPJ0la!^qJ6!Zwvg?D^==>E1L|osHg{(>uOVuE)z6V~J<;oSUR;vLy ztn^U3patadE`KdQ(hS4wmS*XL(SjREW?_IE=cbNpy|)`qU-)e#DS`$LKf5fVe*w8yvIzTmXt zoD6maw3jzp*owr92c!G1_QHUlM-6}%|5efil(*{!~^&|Vz z*K)&0Jp%-qDuuZL+bOj+2wZB}8bo=m^Vm*yz1N?FLBtMqjm5;>aD>DcuO0zmIn}(1 zZ#S>v>Us6qgyL@^5@7(-K3Y2Tju?$DI1j6~ymdDE*36Efgcli zSPmmVjBYlD_#BoD^rD;)HF}8s=o%D)_z#OE0@$7PvRIB0KHmyANjZeZWEaRxpf}g@ zyHMH(l!LYl@L0iSI;=6exMF05A(xyt_wVT4ezanxi>kJ{qBi*}oEp3pov5ED^z%mj zyh(EP>mTaZKMs$ZLWD3dB_uXcX4vPE_k7|#-+0ge(6(f67=Czq+`y>G;v|l9(Lt`=i(gtHZ&1j78V>s$!z*4y8fS9OpU}>tOpUh_F zLqU{VK}7EBGn@I84y-^l-)(L^em`0Kvgo=}Rj>wHz*R4HcCONH(r?FkCrumh8N(kr zYF=&2)D_hXtTy%Po9cF|+zF{UKGSTphFRt)nLv9<#(bclj?NlLyT!kD7h92WSQ13= z2YxagzKs4m>e@7f7z;xD0q8)w&@=>C|K|LUMFLhI(<1?OEniXso*iK^XiPaCVpOv5C--XB(I&PtH+9TG(9H{wtXlPQubXm%DQ z{ZR@b+`&o$i2$;=8)3voK2F=US>YPPq=(@zdpFG}kNZkjif^D&H^VA<^f8SUs>z23 zBZ%Uz2ng8_oxE0@>+h+-G6t+!rJ5qJPxjI!OZiID5 z^u-B(f@rpz4>vF?*@A}B{^{t|_Lfqb4x95{So=p0*C6N|D}s=aIy%GNok8~?7#!po zf|E#92V&p=lfg7?gHHbFAyXMmYbXNSSRHg(>ZMXoj}H(3);@go992|N-UhI1V{5t+ zmMepEe0aiYm>`p)!_lkne^PyHH{G7MU%jSp?HAu2lMj>lczp8q=-Em8>8s--sHwJ` z9>hlvHDTK?pZ>qE-!|`#fZ5rZf<7t`5Nl`C(GT0oB>769Sn8n?3jsRML@wm*E$qze zW0rR6Wzx^a=NG$fxp6u zK^hV)A-YUW6ptch|$ijrC9h=`xt0h>X?iCl>B{b#T+l0&APvVpD9AzLMyNp&aMcJ5dJ_H}Mx6GJ?qk{ZPza zO(m8lb_pBV~JhhyI#pcV^20jK1H-pPkk!ev=FZGy@ zg}?!z7cigzy*f~*n8@P0e>1R{r0G6z4u9*1fo( zv1gDwI~r(%`Nl)0X}Y|iVNM=0N6H%Xhme&Khsu``1lO4^0Q;bSdqGUcamZpu^Cr^`0-uIGVo^WmX#5&KNebGg8f`b)0Zq zKkZ|_(sp*$7gPu@0WRQ{i&nE`XQTVUY%lv#a4W^|_~b=!Lv?Zx(J?YkYxOgUFWVf| z9%wES+sltDoGjg@ji5D)efFgHoX3jn&n zc%$U;VAoHoJ}!6mi9W;Wrf>}3k|(IA`a|MP9N;dQpUIl zlFLx5`vgU}Ma>tRYBYDS7^Vr~d?DP4V0l7q*AkfW^WMn;Bx!}8EgZI$72GbmS`fWsp#c4bJh<&{ z0%-<@dw#za%;ZKSDoyj-KMsZIrIUdjz%44w4 zUMtK%3shw!X77$a21^mC;@R zWdT@$&Po3B8|c9~EPV(k&GMa{1UFky!bsEX>`ZK&9as>1Ez1iK)jMgn5t0b6qbZ~+ zEl2Z}BFWo52+XE4Ar;mO0Z!PG0ON9VS8?J_DUne26Bc@$L9CLZ+;C~Y#ggkzLi#Oju}1GK!@wAjVozSXK4K+d3__6G(NKIb?yAXY)>9$f?nygqVml>@l$mr?qSkeNk75!S~CT(dFy`I;H4?MfU zZmf6^M;y~2VJfRr$o8dZ8#U}1f|kKx1OwKgoMg$eQqVeTNQM>$+QSO6c(O%D7(?bc zCa9HGLQIV@!~w|uHSoYvl!RCiFamU!RyD1JKHP9XiyDQ#Ud}On9we_;c#jkgJO_FI z?xL(ayBCS1Nc0IitJn{s#LJG;6@9uvn~i&Lc@?F>>5WqQfCQLX3*c&lD~?V1rj*!a zNiXC}M#&Ko@G9U`LlG;~6xKy;SiGH`N~$JPSgCZiu#HLroz6m~jn{R~f1Q)cZ(qNC z@$}X6XMBX&C_5Fs<;iDL*wD!!o+(j9rekolV{gtY6Iz!NM7l38?X?W!e|ZLQ8dUs} z?qLK{)*D+d<`RX{a4&V>x0+)Jf?uqJhZpwnYlxXE^L$6DooZdgv@$l)<9y)kM2CK& z&JtK(0LD2d5{3XxP{vKCChZ!;%P2w63&ZSwf~SKujMl1x z#|EU4WQ0BhAR!Q%kwaZ^kI`^Mp^7}IvBRv-7u~a%yWk^p9FI5XfolCO=#S={QuPm7 zhyXZ3%Ta^L#%h&-d(x)~I(ES2HYL`P6QGnC5acy{y&vE|_+<%i$`)y_$$g38*|O;A z@w20&sBx7VCpLso&o`Jm(aO<%0n>Q-e8!1v?rVM+JU`9wKfaT zh7Ds(2)KE*jpm_Esvd4WN-Z%KRw*XPwGTVBw+n5%qimR-gMW$c3FTlJC~1}rFChzy z*hX?+)cC{jafOBbNDit!g(bWupLH^ohw*8Yfl0w4f~q&le5MnmiwC){dN@#g3u-#< zXANN9_7XN1>1Ceu&IHy!;CRe4ft#;t4AuA_;l%f<1mvv^RI~Z-X9CG!*Z17XVTuG7|O#%*o6w%#glg))`f$ z;+s|pvdbqfCkG^paUvkPP1d|{S2pgeAs&rEIqKawzHft}T08ZI4O6%Sc|nP(JgN?n zj&@cK@#vMb(Pq5=2SLMz0g^exSZ$L@6^M*&01i@Ph1!*dw8ZaQs6y$t8-a0{6*1Q9 z0=V&)D)cry#5B5-*+k=5pHVQNqHNtiPgoF`&{2R>eC>eR8YZaa4bB+p|dBvgt%f#e>0uFemM+ zxc331n)d=u{o5Ex(emS5RsH`I02J8-w3$LhYNScJqq~Ep1W2K~xdkY+G1R)&lDicy zjigopqq79m1meW5AR5|`7eGOHDxJ`!HOpfV8kigs&<86$QIrx1(d*CBq>DG0+ZBzh ztgK8%5@ajH%kbY98lsqRp%H~z(US?NaqQ>ta0T#L2)4C=MW=zYzb|$<=KE4K41sS@ zU)1a#uH#R0+o&GSGqknfwNbc1hH_;21Rr*T8bC% z|CWf2YhuQ?K#MqW=o0>ZYkU}74UFh&ZV@30S3+NJjSc~;flWyMC}c432!6XYDhR3+ zlc~dh6mg}<6Sl;yF(!c1uzeMJ92p}}_V$yY8q*T{s)u57IT+ncp~zH9KCli1VHG-s zS!S$=!xQpwKA@=(pu9V`kh1k~;C-am_(hyww97FFc`_8XVdjqR0U<1WJww`gvwC4Q zI=qJ|HrkJS?dBhH!(#=OAbvD? z+bpdpZZHTTky~0e>O1?s)xIL~BzN!=wl}daM3m_}Q>!ZgRzRu099XGVU&0YBO=oI% z#T61$bf6H^;Wz9=iOBn%_NE|HzpQ@LpC-Q0{<8W}yBprB@P+lF<^ojR#n2k?h4pi4 zS@7leNjTn7WE1O9=XY1H+e=GfTU!r&i#$fv8wz;cR0Uv9p}q#msp0?n>n-5_`m2E- zK2U*E1ELbfP6lH=rYtEt0PxBeJVgcu!XR`8Gtt=ZKrJ`%6B>?npI%L2*as}j4Eva0 zrOKx7B2Z?|rzDPL=)<|Ahd!MNg@Eb84UIScb;7vw4s@eM--7$Q(w5;Esj^)ctiNL* zAa?f{Xu*8U+vVLXLI`)*T4rl^x-N2Mln9_!O%vK7w})~6JlQidMOQu$t>f?wlQ2nY zfNC&7!C9=^2=DBRAe8Rit$|Z99eBGwnt?VNB_AjiRf8H>ZLn z7kp6Bb>SaO1^-}b_mlmQoVd##U<| zYuZVTki%=7w(W`QIXj){Yz{;j)u73dSG${47GbZS)*>vPt9lw*KYiglQSy@ROdS+b zi8ML)uC$}1frEHaeb|lCUHp^=lD0roDGwmzJ=G?Y4w=ae-zLo78b$mPk%I z+x2AetTb^BWalfBUKOZgiWDehzRW-)s4*ABF!gRAg$|S_@Q)n7+p-a&GLey-6h2=F zCsh%pN`x4wfxhVz{@Se?kA%-U9O#(9Ew($m{z#bNam#)2Q{BtGv=m*W{gF$)@T7)v zs>fJ-ozJJ{^~F>C$VoS2q3A;p0zgXbvy+Qhjp9(3;u~`F*4GnZz?D0*G!WtZWW7*M z?;4u+)?*Ek1vxtgqx3S()fPJL_26cAJnURVXT$80`Y;}-?d&M+;e8#YSWXsnQ0>me z+Ak>E!0ma^h1(5?8^%hxR_Om*p>}qbcG9HZxd9SWH^A)~kcFX4I%5dDE7W9!C`K=Y zEeDFKkf{`N15=a7gD4(CWIeomD=7)IquYU&5iC^!2ZFKu^Ku0GL^R0oP*mSe$)i!y zx#;s44U?{FB!)p(kRW;_OgX$q5J#P1oL`u`_yC^R2}5e0W{LuEOE<3`#-A(JtDj-S zeg-w21{%u#Tsq1>Pa5T)KmRBfu&o;9pT#IwES^&e?5U+QlnKgVe<|?+1S=s_3?DVi zqNmgOPO;o^ZtZCoI!=9Dqn#z|j-oH1v7SD;QONTqw*wI7t!#UaAH!RtGMyha;gBar zVB7@LtZP1eJ4CNI*!{1a?7)pVEnL#LhdNTf+k`UE0lbtNJN8CZ#! zXXlvc;lQU5^cgE(#5q=ZhgYiU1gfR~z%`1N_s;nw3Phhgl@4^m1p_{4oOq9Ocq;D% zYffot+TOjRg{5Y-k42cASSLu&0PPBNq=#~967;(zr#?rlL(lqWqow;CN*`Qp*$`_F z;Q=jP0p4)i*VZMgShIZpKzfPkN+3P0U6XB=S+jN$3ix4tn5f1-CXI^L{B2kiH!S)< zFj1V6x{5|B!7>=1b}{LT_BaN1X6r@}5HVe=N?doKlqC9dq-GtdBJpYt)eHmW)4GkB z1;S>8;E_6dA>83W5|v;d17a&|4}b@kBL6L{Q8HYHKZ=yOPhL4ql-^!qbNZ86zC32( z@%lX6!s`tL&kZ}{42Ib#liWZrCJp!Pc;=5ZzLQsUUD76;4-`hSn_G;xSnU5C>fcf& z_&j6H!$9zbst_9Rnv1~a;Dx(iD*i~^HH%j9`Pt3UHvZ(dKN7#yQB3Zh;XJ#XR#?z7 zXKc~vN(28)+OASP;*1v8s5tJp$FUT?ET)@%e+01*J{e>U@O~LMNAJM*eaGq0p3Ndi z@TEf{c9Tvo9%5MEX4JRprE6zbjV(5vVe8jBdip%$_L)Wv+2dZuO;aj7aC39!9PaZC zo*V-$1p-4TB|CZd!M}GmA&GkGaot!{bAjl&nhAmANYjKwWfdg$h-e6csH;bnF=Qw^2b@u)SvdRc1MzfZ1x#uzdJt;1~wchf%Sf*ge z`jcnAXr8Iu4&5iy@Je-=eJpYdyzG{3Z?FaixoL61-*}85#(kQ{82nGCPvBta*|3`o z*-QW5l=+F0Jt@S&^BJJz+V5N`Z~b%isDVUO8#3BKo-6!plMZMv^W1ba;6A%fPD|0R z`z3Bm9ga=k&XwlKZcBo8!vrWyokdy~SVJltqJ(2JW3u7rDO8C+S>yN38jcQgN4&wJ z3!4sWCkSaegGo)cxs#=XfCy@6Bf|QZu-yDt^J`I+a+uaP1KomiOO&KPS@%!Yy*=wP zbIr`Vp1U_UUdP$hn0~w(_E$+Tj(SaAU%E%%a!y!ha6T?AhAI?ur*KwXa!_rZ#qW!W z{T7O}D?2JyEIS%z!GmHtww423nLWZG`Xn2!JH}jET=S_pwz3%^>B2+W7cy&-!1~RG zOj>FJ!{i!r>WvWK?xb@n?4CHi7t!&q(y=v7dNrpNxX7CIdpFU?xR-YE(k5Gm^9Nes z<)JkyxJ7qt{dH4}F+OP@Q4sQsv!@xE4bUAH2<5d?Y*2pjaOO27?MJai1k;gULNd=m zPiOba)fio>?gds6RWYFp(mS3v-L6Y)$RDam+e_g-+#G%q$=r|} z{1PsGs#n-Ai8jRSn=8ImMe$j;jMufZGi(iICM3^>Tz5(I>cRe zXm~TvcfyTnPftF0K&K43!q7eL|A_jHX7me|<`x6pr{0TrZQ`Os-4i zsm!_q(@8~3!(B*?iJ9>jDhgX-OO&_=Hpie&2501}O|a-*bz5>+ps;2Jwhw2iJ|=CS z-c)O2ehyP8+@`C6fe#$1=&s|9+)o^#c@t)!BK|>4O%-Av#Jl(@4u(4*OyW{Z1QBB$ z#;T+gUmJ{d)Tbx*XE*>9$NnkCNRdG*X1VKMk7>SW{5Soxx3Gi4kISOlTIJ-8EX# zxj%GjMHVegN)B73hub84EV^@)#|?HQ4@^qdF+uncOFH5OvwG_U&n0|nK%b#;W}mYg zKnm4o;~{PVIaCiW!q=nqd561k|A-CQU+*^F+szz+HM*bv4E?wGXf{x7K0VE|9ypCd z$=xVfKVOfQADm^`gVT7pO!A8G#J}r+0?EX=)m4OUcsD?cJRyC1KjNaQ*wqE32yd*o z1tfYAL*|dW)xaUVsBS>k2(^P{fprTn7b*~d0xm@b+J3>nKzJGrE(k7R{Dw5{ZdngS z`O%QJ)u41#q;(OfMU537k;rhX@f>h^#miT-|JzBe_)%Nge-Z zMr`$DD3_+xOlX<^*^M_Pm-%1J+FTlQ3WMr4j98OBZ3Pn|6}WcIsifCqYT8Pge0<6x zW#2BwKTl6Dt5q6^cCFQKNQQqhT3%2KnEO<1Q&U?BNSsdIy z-Zk|lB5Eo6HXHUJQr)xF)#xf4exTzE`pg1xp$HD96*RTL0}Rht%!maJ3e0G6dvj}R zp`Ht@b$$NwNfK65*0kCzR1bCflU7i4f1tRBf77g|IgCk^mKB6JC>=+)30XBy2d2xg&z*iDQk-Y^eH{r9#*K7?2?oo+&lD5-a%E z-!7`nCJI5v)O^;9&&v$WUQryf3A+@}vQPGtq|2B^-g84EYE0XrK2j|kj|SsW!>ea( zJ-SQW3&Id#cs16~dgJ_}_MR;3%?gdKd&{mv}V zFgfH^A?k&uphr5}lqO;go(oWfGs6dn!e3^j%V#4%`-%otkfFGvQqn-ZIddcpZOxQm zI~`9BK?!TSGgG_+kR?61fPmua7l2zZw`RN78_+Pk4+q4bPT2-K>QT*sXe ze&~>~S)YAqY*lOPZ2wmKGI(;#+I~Ost8}E6dwn;V)dmcbiqlvlzavyAGFaF$thw#D z746L(_roBSu%snftw1_J7$3U0;^6p9>p%E zp;y^upsQI2$QS4q8LjH~%a3(d-7$h5?wJyj@wn#<8)>DVWa6x;vo6tT^1UQnngaC1 zTuWfDAnIaZ(!x@X6H%v6V$;(vb=H3Z+y=%dlN5lRafYz5L_G%xRu4+8XQ)txZMt~I zqYLnXOU;0$=d8=||xhRz7$}$5| z1*ANMiAHjNzlcdO#4SNn>tG{jehjmUnDl5T>x#S|-RZJGmthp=Z8CCuwo^ah_Z=V_ z%u5d1kb@0qIHV%o6#`ZTp>+jX=+?}ZUO*4FQVma+yDvn?H8ABwp$?ApPUjH4Km2q% z)fiF+PzOPL#i>E>4?SvS0(RbXSIEH|%J+GHs9_sH%xb3cw23Vp+a{kie=_ z+Wc6>?a&`LUGmJmD5OgL{5>hj0R4OJlAk^K_uQ94O6%WqXS}W17v7a=^F!Nyh7u@$ z;hn7~BYuA!E8pTT1vWkVr3)31fy2h@zUXs`;9f2a-IcL-1G7Yc-wp7B__-UfzJ$IT zsGfMK23m7=SGO3N4|hDOQ#Uce5r`Tp+Mo)i6e`kms5Q08k2-iSCch~_*GjK$QUTk% z={9k3m5HyaUhvZ3PSl3PW@J!nMQuK$#oJ)|z;@OKi`L5ItW~tbu`8X%L=_T<7(Y87 z1INKZpi!Fk*pyi!$sDi;LHqXJqAIm6UqMb9LdPp3U+z|B>q$euhMo(n}h6^FTa zE|lRJ7|^9ETvyy_%IE2#Z7Butby+hjRn4p&Ow*2Vwl~lZ8cSHZiEe=3K6|#k<#<2@ z3~K@MSD?7Ms!hcu$T_?uN6bsASwk>8C`>>q)pKR8SaXzI4x~%N>ZF)4+@>MY+FBhV0UYan?#jgC`!WeW1%%Q_&&9jp zo_NKZuQlZBlnA-6?^WQ3unJ23V!=rw6$4hgq90$~QuJ40yl@8?FBCDJc@Yr&Gzs;} z<$z;QPhJj83o8>FtX>%W7QKbRwlm}E5E`6G7?-UR23Lpio0#Yqynu4 zPLKs9!zs>!4!2;us*>R_gPjg}qN)YaI@mUhr)@meQHLiqY`mfH;J?g|f1HCL)`1^B zgc%Z+pp`%q-6Zb}Q;ZAXHlSTw39jSXYMR>7&S;CTvIx7rXdOej)LGhtAT*;k-oLUn zzJse1S20Bu!EcMZUez=sdfG~d@Ti_*bQ3=tW|tAbe?Z>O1prPI3=8F1Htuz?T@xdO ziWD($#??YN;`h|)DC>0K(r35W`*DAmbh7h)`ftUZqxd|8gz#aG&oO3d5zcAc1=nAX z5IS6j35_D$h93-uStrTyd)g7+!FBoIb0RJd`l`md;av~X_P1BEo7iiLFyy;%bos%~!Ls}wmf5aY-~`LU0|h(;NQW}?t_I~3|Ar8yq=4VnsjJ+j4y z`kB2HQ0TgnGlnNLG%Y;lt>3ml>_b1h>Koxyyt4=*(YWb5R}yev!;!3bXEWDPa;7ZJ zy@m207YtS9h+DdgmE#HOaNcqHAiP$u-87k1IuTxU%q+z>||cw%;qa_cs6dLL-2JqaEaXBYNOiu z7gmibYHeUKJ|~0QCTTw%rHb$Wjj^vrN>=xdrtc-xvc&or3@Y;*E!H zh_z6lui#h3ff+B94bgj?cFc`9fOMxy{en12`Ar)LT?P|BYGl>Gj0be{m=L*)haceX zQ7LCrD;SooGa`UgCp09Q$yWkbIZ^#PXp}ObWKitzN>$U9)&03+bgX`Ig$n}g{P>6p zUfm8^m+zZV>73GI$BnNYz`jKbt#-$RTXDw9pX@{w6PmF!8k`^2@4&mg_AwczXE$zG zJBF%s6U%R9q~S|-btH+B1bP~;24uoSsccpscPsA(Bcw9=E(pZzBYNQ$mV=4u-Kal~ z(1hj@Q+OBmt!^Z`PhM*>l3gA(l#Zd4t$wDMD(Qk;)8*6ve*iy{G^YTJc?$@Cv#i91 z@t%stuD)r8xT%-L*}?*rF@hgNkE7)oXd>#K6Yj^2&sLB*4lN6@+zJ%z-l%P}9WaQ= z6MyuUIMwsfat`4md32d~%4IBcu+4&&IP3evlQ&PlKNOKnHGi9;aNplN(d2XrmP`aw zr5PPqMh8!t$rbi0Cq{AHLko#O8OCZC-|t4H%+9qrnxd#U@0K?FCr1=3i#2GDkstsx zyroNt7m9`?!-gyBwM^$*O%v~$`?d&A?m(=q_g z7>~{#JM)3*EMj&xY%l$gY9>@?As*UYvDXarmOZee#`Sd38E`?HU_;s^Ipo^y<}5AY z9N}5Uc176Vdem#D{XsA8B#qx8{RE~#VnHPQ`=C`jyXyaO=|%sRk}I&` z%+(g;KB79g0Vi%7s!8p|8rWL21StnD;}KP8pJtaB&CImFAe4ljB6fdMM(+If^-Hzc zyozsfwPhK=xGAclVL(+$F#dk@pP4?d|z}WP$v=HG5p|omCxz83y?0))Ly%#i&Uv8&MmH)dO%FnSC@Modv z9A3sFEFj`z&Yp}zDJ%zXgDi!z3J!Ge zfU*A>r8ys`U_(I^v2#*_)+ZZO6=WlV2}0csR)WS=lyZ zujjU}f%1(I!9zIvZHI|a)q1eT#P+@3lQ@YMlsrk*y3=rF3ab4L{jL4-_*wgZ4&OF0 zK&O7$K6>^{Y5(8_nh(^b8dz{%p`ezYpUk3@V{*7gKkKilJq@@!beoa!76ok1;WWtD zkg<5ygOHQFJ;cmPK2Bhb85Ydl#7 zdh=I!gYVyrvnt7#}> zA2pocm~Cnx^cyfx9WvbqEBPwW-`_kv`C(phhhir%=zxKb5>T=dcHI8<@cW}z^ExdZ z`r5Zg86g7x=#Z;0+5?AI4841D;D&%`G`VLd9Q(mM$s00c1`Qnj&KAz&uHE<;X7CF!2RJ!9ex6G}Yd_Jr}~tPPZT zg>XEGHk(B86@0O}st{UR6PXsmjUB1Ia{8}vNkd8uoW-CH!|5~xb1H2dQ|izYWRPeW z%7I;v5HeSP^K3YI&M6tN z;_mSr?53Q1ID#jTF&R3OYWE{OA=Ud9kDbhI37nFBL^uDlVTL>fZqyECxfd{_Q%q4s zcI&wnoxexH#y6iNOA!2{=pySScGBAx@x5teUgVMy_%^T@64bdBhAIjWPIK_Q zwnh)1Op9lbSQd+7mSj7DEGXFtzEA)@qoc0+GE8HkF#7JfC)G@EEnCf5Vp;=s}vVU{Z)zYxlT z<}=I)M(=>UAe~n5u^FkE0zYG|9=?D9ZZCu>_YJ zw9^ZpiTm)VhSoR6ZiT)&%!y)@(Y6Gjr3hcQm_Dnknrks>)9EmYKS<84>=u$Orm|rG#Ht)oJuu;cqvI>^K_6$9l z!SV6O? zZQd`YIZ5lmb#+t}i)ss}#Ot;aif90Lg(B!ZrP`|1Hg$y+1Yy|Fd%f$_A<$gLRB_+# z&eqB-1eme^s2HyC4HUH-dAK3vB7vqX?ZYHNcFr$heswO!{SP?1H2t}C-8Qw1t?{rr z7tkHx{gecac5xXcu$OUh1HB=75IrU}z-v?&ftvPd3p^+BjX}dyIb08oE(IBi8g$78x_u;j8EKV90uEE<25LwcLVGczS$7#%~Bg z91|uZrJRS1TzF-~xoUZhhqwM6xoE%Ou>JDzrI6u8^r={DxPq!UOw>)?6?Jlv8mXQnyPaOx#EWv zbFYI5*NF#lCmr38qi&xriXl6}v0Mb_)bGN?eUCiEp^Qz(Q2y+qB?=N^c9ew589{UX zggr(A$mz&A8asS8*Bu9{skF0G7}Hm;+lPODcK8O4k&ad6pDzdCfww{zwA&Yot$GRh z-D^L352Hp18S+_TObjC6+~($~Y6&zO=9iXy6L6h#aMZWbgclp86(vtjb7iNWDT$U{ zHH1T_6tsALfN!Jm0Q7O}kR2m=FL#dngs_z8tz=rkmQ0de+G^;?FeWG%1C$_ETR!=* zrR`+K!*>o*(S9LalF1ESh{fQXlP}yD>?I+F_KqsY8bDtZqDt25w#lcQU-Xl!_N~OG zh6oeM{Jul+shZF@LKEiq9ZP7N^c2&YS|XB#2V^1T^qaS%J)p?48{jJ;r=PxIKN`X% za!a6UL2eO=yXc6Hvjl7A8CjOJc$UGH$R!ZbVvH}qt{YJU^NL@AaW^}~h-FAcrPU&Sd=*$VGRSi9prg}_l6h%Hna1DT zQpQOs$BPy#M1<8n>Ig$rO8S68{SMGgO00QcSclG`e3et!FxY`ZqB-1mU3LaH47^42 z%A+D~?_h<*^^#+^(ZZ?lN=FJ~2;UQBkTjP^EjMykjF)5OlOBx3Jb2=)=@BS{SKAq- zQtj9JJHnH^DoyC<`|%bLy65anjVmoWyM&Y0k$j}wC>{omfip5Q>8RU-1GYePa!D?bunR(BHdg4F zNruGyPZ?}OScbKBz?@64>>ZN=5?o=o_>J+{4RswY3c>EKhoTy64H7>zPRD*)QHgZGZgyeF0}Uv}k4cm$#`;CxDE0NNhK z@TCY2$ah;?V$2%0GQ1b{8WM((yvAat$5f(;C%J9JwK$}Lhv}w<>>s-M^QGktE2K-? zK$Hh|bKwV$VDqv2Qu+ls-YI+Hke|*U`66KuSB6bjh9#2`^$(o0hMIk;iT=b%5a^Z` zd6G4}*%3nA>A$W8d$cDmHNcGQyeLd=?u?$sdT}TE(OZ96*03$}x_49;n=Tt8=lC4F z;b;t~eo?qbM~l_>KeCCmg>x!!k;A0vYAZP&b`o=zzp#?6>)YdzK7ZHBFl>4`;wT(U zVDsR8Ly*{A3yvzx7lLkq>lkBjv)J|HQBSQ-&f`)w^er3iX zEZc^EI3kQ|QY5r(p=>=^j3k{c=E7ce!Pxzv6QYm0L~80Zl3!SL1RN10b}&&Q&IonUO&;TRGF(!vf{$EddU7CLuf{t+A! zc`H|2FD7p90xOt@U7jeCbxYAJZuAu`oHkmPGg}_#gx62KfI1Ww>nI-yV*pfd4hRKr zP7M1#N+EtNWQVC?cyLbGNxc|FbRjJ0PAw)LtpQR9m(`J6=^(We*+@(>0wEvi80`u) z#>uh_r}|)+euNq2OnIMWtfX-D3!G_1ggnw0)&n!<3`Q>r(~J&3i4A)(okU?8n_)n` z359RT7#MlGFM~Qd&h<8XiMx!10ors0y7P9q2*Y2P3jRX<2JEUiiT}!W68_G?z1od| z$Klo_#S07Q_3m5MJ&028`ZTZbThz%|#DHI(ElfDmC-&UtkHTeJ52uEX;u6Z?fBivI~T(SRHYb%wCY-xaKF+Pa1w5CDj3`}73ew{PrmW~ z2ryMZmE=Abm*1m?5(GKKMJGKa9oY~t(1SeM5MJ(T$VhPpKLaJI2qbiG5bnJD)plMX z^^6it=TUxhd8$|neCxBTWQglsiw(dEuecvQJ$`m{M0Qb2H5#Z(%TJdf0_WBv=v6_hz(T)9%h58Z2%QU`wH#fa{!l(_XKHi)sJKD_ z1%%O%9IuO%UCojm%Fd41hq+bOsd-vZrSF>w2mlT}s*b)xkCTjZZ1mT`)vFgf)z@DqCDbYRW}V&WLMuXS{OyNo&GqE&h)u?+qr ztkgea4Ckc^F%Zg%z!8bM_qxB_>GSWw6TXbEu}O8Fq$fMEp>5lx}o9Q$oPR? zTBqES(D@Dk_q%{Vzx1gGQ*EJGrAS2wzD|^;`=vWZ`Et|V2`a^9;{X+kr2}o$)M!<%0XC}U6#T;?%xClV^A>|gW_PI!C{F1_6CMA;kvAO zG6O}h&3hs;!gSu*n{`9Hj{pp*JFvhUsM|FoS~A6YgBFz1TxmI65k>GfP(>*o+T_uP zpgfo}<063@Cb?FW;WkS9LcPLm%tCMA(xS4by*^U5&{M8Gw6y~?nFCE#Om;~prRiYD zU?u~d%)qm28J>r0y7j$zB6_5W6k?+F_4Oz|R}3z?+joY$UmWgYhfBLLVH6eL5BuS; zQM!u8v^Z;YM$5tnOuy;uF_4T)l(`c<99YKTGp)xKb&EEWL08<@6~p@qa?CJsl{ls) zipPKwcrCCD_CVoJ~AXLH5*`gG1-3b^E%x21*>A@=L6A!GuyO$ zv@hfQLzDD-t48T&l+Xlm1L05T(SQvn8-dboW4Twv2(%)nI>F!d+C-J7exq1?x>m7D z(Zpa40&WxuJx-jjn8r!!-sOXZDUZts3qRk%U^OUt@*NHjXJdATr(P>ljR!%TI3D5! zF1UQMZe|}&yEZ0^8CtjTaLfcbEk*DI`MMZI0R;45lwLv}J)5Nin~lK3Rf*b*guJVy zc<5zdCkx65)if!bviz{T{1D3V;{Tc)30LZTJ+u;v)TQC1B@0TjIf(8+vF=483!s#Hbi zM7TPGKooNyWhU$>-o^Lvx|?R)ZhUagYE%N>=X}MK_Z@StO6u)OLZsA+5f(5bVz)zc zW}Wq4dp9eq$ZiKNI?vcAo&3~ssgqaB%f^FP!q>S?*TR^)C{}XDlqe286njM!?buWj z(Xg_wOo?p;Zh=kNzI5MH?{KTNKg_-oJeM_x9o6-{>!!TaV?pS10Y~N8g)j z50Q8A>QnSfbk@t%{^M*n(uvO?4(6w5{Em;f@H;Z8p9N46U{iH|Z4*7c#^A%0VkGRZ z&pI3M->zNr@avf{wBosQuXl>>X=e3+c+kB>ljz?Gt|S}l5)!_AfPWmzY+(bdSQzNb zuyAoM(weG(mAU=w?FmH0J3e~-%8QEk?d!KMp1x8;(iR5gg#|T9paFtRT@#Sc2I#Sr zp4~VB(Y|}re)aT~fMd%sBp@}Fnvz@}DWnE*E<^taJBN>@ges&n0lRj)KfXLwEA}X3 zn^HR;B%L(w-90VwzSJ4yg-p2jX5!MbvDv#078{Fr=)J2!$dND*+$yi)SBp(a83ccT z9wTnaqdsDEgKfB<*6g<_2i$vg^y<5#S4Ss@{`>zueES**qixy+?$ByD+K&nLju76# zK8xbIU0LV$ln@O-2g(uJ;L_5H>8i=Iay)awM^`Zqw64p+yqLr)RB|QS0;sn19k7n< zb%hKo&PaHj1jX8%J8>4k844r)lN@CBDH!|E@kp(AeO#qMtmX}hhpWB3@Sza+OJ0wr zOby+4YAFCpEpIG0;TeJgsS4z@W==NMz^dJ29$m)$QJUv*)J@ORQBK#^FyQXMoRs3^h&L8KRr8a`h+SD zi0(#PBjgN?CZF*m;d_F#C!!l#Sb~BKXLA=%j0hr3^6gBwb)=kt;&M_?tEIBL$xR5@Z+}BolDXM+;#hwOkV z^*{9sa>TQF4xlv}#%e2J*n`g)$=eb5)Zka(Ah>%60%>g-uJmf21g+I_7~`tzLX`!S zVIULCTb69=%LcD*`Vm4wnKn;vqRkDk*ek6-?eJl`z!nJtV?c}zRmlMsi3WGYM{*C* z1mTEZyklIilZI|hr-t??= zJ3#4l6S0OG5h#Y7larz6h{7fJC#$=5o`E93Z;cjEdf=@K$|+{3w`zGGRyt-~^PR0_ z@3!IkOo6X=z3H<{y_+cRn2=s0Exn`j-mC5XlI0PzD470;o1r|V)N-AVQ_O{`eX+ro zg&voP*1YdqYi^^V;PMf`h&Y-|q-U8&wc#`vsd9Mdf3~|7t`BFlB5`XEj_Wn)t4QOzm;?xK~je3IRm+m z5u_zqY`gQ;tj)_OMLU&bT3FKTzLVW1-kjbxD~CgCGz6DJ*mNKijOLP7p42L%{@*3}4d4>`OELt;L1|!m z-aGYYZ`|?nfC~%5IPKjbXK62A+lhjcFDpEHE%&Fzg!zDTN#As!8Kn87dYUmHGs4Cu z?ac_jZduQ#c$9S;nj{-x-`YTtfOoND0V#AL?+oKn=R!NqHkMd-0M*I&H7>ueMuewr zoCyZ=%ls_&(S#O2sEK_n4+ld#u(y9%-+AD^P*b%jS2ySSQ+IrMd7~u4nsX__wi(oY zfPP z&U#rkWZiK!+F1Xp;Cd7S*zUs^gusGM3hpp*?$!XC&B(aUuWg_P|87k+*iujLd7P6J z4pu0Lp&Dr8wwA(v;4~RsB}qS8-N2z%FdA3p^sE38%dFDdRmWAI?W_9{l!XF=~8bRo_k>#bGg3{EaDgp(c5V`osXwE^$6bXrV__9W5=-S1* zP>rVVwc#C52S+Y8++pRcxSK@%anBG`EB$;isFIm6q;6A(-+e_^s~xmu={r~$gdX9m zv3}IzDlK=An#=mkc^aP-T}!J7s_|4UhqU=Jz^nf#Hp! z*UVrYdBaXo8yQy9JGKs7`i6q}K0pxC+2WHkSNsd#zEGY-8iDKxf(zK5YRA!d0}u8i)WL5k#VhLVnxotV)@UA}aEhc@8YO!nM` zKFZ$NqVKUKX7Vcg+7x4G41HwrH$&M*t9o1Q%t6;E3r<#Vgn2xjrXqy@ZgcA~?4f?y z_~eW$5=M`zo6x%BZ8behCDz<6R2xmGrjWGbtlBt0XErccjr0SJjrE_H*RHAEWFN|2 zCW>^?BAyOH595v+l10Ni)ZeqtdxB#g%kTma2MyKw6F^}HF`!1Fi)>{D$fHhE(OBH> zMyU&(Mz1(XF1-h}F@IFk!zXpnNhe^#Ii*8UHSPI^6ZBT?o)L7@I`!Mww>Y|9+zl*k zPKZoh*OR>z#gzklXfi=o5rdaVEPqzS6i{?FsZ=6@d=`Zq<=2&5>Ifq$zGpa-7D0E8 zb?9mg4=z-AZ`ZI1j2jX0DL|~+dPB)P!#@zk7A?p6OjGak=lJfWDvnJ9NW71gCl)d# z)XK&qKC3uYBhtCx_0veM;H`4K$yB7K8nteQd`3f0O=)R7uRNpRC2BwRmlm zUM5&MYFvy)gPjKtlK%Qts)QdT(pk@j=MUiN0at8eJ38C?UmP&ad2dxPtL%oe#JGV2 z=?Jw_!BG`fS7p3KD9^w4)GD$eKmC8^lMYaC1@$l*yY+7Ac2o8~V7%2La1gW-6#-P5O2>lL%mo;>$6E++=Y8zd3q^`E2>f;@`pgJS( z`D$sJ2M1g~!d*$=6HK<{Z#*O)uvsZJGCtt?ChmMclP6`uArra78dx=Alldb&6CC$Ma3X{>&&9sK8nEOMo5->|S7 zsDNL>T0T=+9H!>2aJ$9)Dg_=>yBlbhq&R2QtY)|Ao5H#^8xkBibwgCQWOnQo zww&ElpiRkA&^lituPeT$Xh-wY#8!&ptbQwonI^Up5O4LHv7A(BXIeHY+;|T16+}C* zovd^`@hOsk)|!m!d!%?w`KN2jXLn8MW!Z=EVE!K6QgTTA0GT8{vl~hw;{Fle;ilj% zuty6A-}iJvBlC7}GV2Kn(qOveRxTC*wAB=er>*Qiu1Aps&sP3DO)#p;TfDI_H^3vbY#rB06iTcX!S?I zg`uNl#q)C$`lNP@wpW_15$h*aL@) zO=+1Zl%*&nus!`)-A~<<>i!zRhPtELQC5=L9$1HwV+!vV7~ST96((U_x<;cqaIv3L zwv?G0Ce$@`teOVC5mr~S-}(65P)e~WHrb4r%x)Lwc->e<%fb{mlA z2O8raS?-y63rt!XJzaOBPz|7$RN`LN3-sMK4DQqusV_?^hIGOY4g&y9t%9KEq zlex`|*hWr7;#+vQryq>-i#FWdZ=b^vWZ1Q)^lIO35ze$A=7mijJHD`wW6dY~@Cr|% zv{RWCksj*QiVXo@nXN-GO}j+2wQHeFAXu`s-IN_-^xGxse{rb&^w*^o(aRxJF&p>& zt94?x(bj5PTP}o%;wslYhyv-4(UHPQGziHW2b3$itv{d_jQpVS;6;7j z9@*0@v`h!I4&sVcvCY+e3(PV!2+DRhlS%5Nj)6)_VCr*bByV=;OUI7>$c{U|h&8Eg zk^zB(4W#f?fA&b=F)5i$16T{(Y=z*N)iyDva`z~%U0d~I1N ziBmNsS_?`{nZTz7(-{@D%#|;1BYWuY44FBm^PP4fH{8*y7e}uS$@*GcP24}FHrKzpj)r4OCo57K3-BV*w)y!W+QO}xapjv*(tH*<#u;o~_(q=Y7g~(wxh3Fxlf+H~aJrxuJCBkgV=eev?$EA^cz1bgVQL(2IllS&q$#H)tlcsh>IhB>dfoXiA~#`TMd~< zz558SWr1s=RK@7Hnkz?YhDlucgiNy&TNwjH==i@bvbF@CMp55%*WYhpPg1n zJKPBgA>^&)(=6kGhH9_k(@peJIrn8MRvu;bcMejzvW)7K4FE7o)K0bEffqX~Tu0Zn z42@cr{``smRuQL6kMkN9YMs-nuh`A5*=cO=Lc?cg&oGK{u7IFa^LX{1?-G@*Qe`kY4g+EIq#B3$4rhhhzXWXn+aqa(Uq z(^lA&n z5E*TqQu}@xYb2J%77FFicI<%K^NVPcvQz3H1jt&HCA>nL1^crATPPfNeTuC5Me^|} z25fe^y5^TMMr>JX#Vy%KLq;p;1p0IJGxWmSEr1LzhVkeKouL8 zWt72j-7Z=RDHdstIUMoPModCzXKBxjBU=8FUeW;?U}#co{t9D6N)$Md8BWl+KjM+K z@s#l`m*qftURfIupZN#khhGuSfwMMaUN{Vh4ixwce0e3cNE|`~V5U{19{AQ?punodn z7cRmiWX<;i4CVs^MO;Po%WQgO3a}I_D7I{w5rHQx)gsnw>0D*G2i{zqHf(6GQj7Y2 zW1~3j=b|}$F0Eh^%5y?0BC&b9rqfhStd2>!{Mz`kM&&KtMU&osa-Hp}IuHOh9d<(l zMn^Q7`4~~YE?H52GgQ9Fd~PTq!{Yk&8buhYtg{)eQ)tV?c+i7w1Jq2Q1mEz`_4IA{ zKyR!lQNKedjIB(A2e)VFxMIHJlP5mn{<#o&q3Oe7)=p@?4j(A9>6Ix?>+z*&P5mww zz-WgSVT^rPoX`nIDl$`y9*gBL{-XhgPfnqHlnm_p#36OiYm`;a zo5x|&NxHI%c~bxeSm$^*uZJBm}l7Tw44`k-&npdH{*(L!4AtPx*rB<@g2B; zIB&f|aj)k;x6{6*iyUF2f9d|%7i?W=IWIwd>3?+J8C@#)wvA3hwnab-e5#UC9W}s+^Fl^(vQ~O2_gq(KMQ(wi zU!W=(Iot6I$+`mpB;}@{}2^m_hvPE^K z-Cc9~6sphXVB{%=epW`lpJ6YIdhOw^6l@dC3csOX(^<7#7AUaJZ!EXWaNk;LsrBj> z!)=GGLQe|Um)eq!tI)f#CS9{>Dvs__)X&Y;Pc)Jjm5=0lFs#xt=x(0U&5U}%J?Q7_ z9AM)>hi=*xp8>8=Eqq?4ZJQIY0l0zuRmEjhxLa!)TBCNTmcvOg#;ocaUW!&_QIn;l zot~|)uY)JkRnh~cSZRb2T!LgCl`t`y6#&LSRtpH-@Fiqj(P;|gRAkL9>NGGhOG?z& z?8sS*ho!q+$?Gf7Hs!2MN0Og$Fe2-vE46TiqX{ek^_#xF9poUM3STw@1^4~`-3;W5 zTcA<;+cFX7c)t%;x_AD$Gj{QZ(pH6y*QPV3ypCdQw~C>)TQgM>61}W%fRdmd_9!H! z;3V!&g6dT8X7tcuDD+29f|?Z`U=5j%-vPw6Xj3QW5ZpDKf{8-hQcytN{if7$azw|R4g%i0erx8$qyWZBmW>zORdL(_D z$m|vEmA2F(qMG)CF!VxJ14t5zXV zifw{L^Nw9*CPm}oDWt&G6GGb|n(C^hR)f<~Hf0F3@D2g~73*!WOJhnw%iE|r$onXTo(u!fzBh$b88C0{SLPI5R*)gw# zGW_+k_7t?s5_AR~j9vwK0UazcI@agJagdf_nBET_b75pE@c2@>*^&97Y4{ltDG}zi zgoBXxpN*mgfo;rgKhi}v^xR~-TbUf6FP`?D9MI+$1-sfQ^zJ~gi_J?bIx%9t;Ag7D z`UyFhy22dZc(UB}yB1g9No5V|fUu!mJ3z%#T(6JfUHk-^o02#73mZdeP1nmD^js;E zeLYywlG&8mL9=}3+%=cHXWhg@$UAiHNW8JiZq(Vu*B!>0UAap!?$kiH2ENv7eW&b$ zc4rs8!j#@Py!&OPOA)U_XF|M4HoSf75zY<_-!(SD2YxA~9o#&wiG;q0?KG;3o9n;< zlradiiZHtwq;H=j;u45#%sb<&$0vmg3w3$iY56{w zd@}BZ`w?|iZFTkf8FV@5*sqHLzvu*J!BI-*;sm_in9WO=3@;t{0D z>t>K%FYR}FWA(w#=3Gv!GQMT5E#+B^2?Ddy;Z{C>DrM5Up3-0LY%bEq`Y+X-#09n~ znE}5M&$tuhK6%KSvJ<&zI$kKkFX0WmY#yI+P-RM932B}AzS+DT{JL!H@$t0uos}D% zFUT(9+FS$C?ORWh>3ZZXw?Uo{(z}%F=X_zQG2IEjuA|!cWoK4wCg?PJEav9!Rhblshlv47oe=Ty@~ zU2jVjeZ0d>)!k15*d?2zqM|JeLk10#pW&vkAxj>esnsCDQdUlv&kx+FI3%D>JMW` zSHw^O`t=S8p>CFdPG~hi!)$b_iE)+eJ1s%-s4==oZt$8vBy7mR<`(CCbhn(_f=!*n zTd7Zq=wmkTSxjsBOrqs6`mlgj#AA9^3XshD&yy2 zdcb}jwzw_5puKI3H->pJ!QaRCBQreKr||{{6`jn;U?Sa(bnqF+W6jweI3_6Du;c3g!Eq<0{LZ7Rj3{eW6D+kL$b-jWfVO(M;nFI|`6dOIqaM4`5xqpE z$GX2-&shhtX3^(L`&Sw6VE3juy}AcH*j)rV>-}m3Cbqv3X#A6QyQNS;j_%Xdd2lInNJy;TFtpR86NAPCbA>F8UVJsHW z(6Ja$gz}Su%%xu-_UIRK8~wzbG$z}S)=i~jrx$UCvjE}&HD4Qcvc=twFzoI7WmCip zqa!K8F+$w@d2_d;iT-2*?tIC_(RfO#n0P|>Ko@a$m_p}n1TGMxZUT$S=b;+vA%<`y z+Y7{-Vyd`(Y)O^4C9E=NTJeR8j&eL@*F*Q}beD_O(Hdw75de=~EMfB07DLIvnQ7c@ zx^XK4InPYhAlI~H+&3if9TIY5Mv|3qRETWWTffX%U;3PbzJ)M-T6jr9QM32OzJWkU zP5`0jqzl7_J202OrrcMW7OXGl%|P7#AmdZcA-u^Vql1HsjX~k=8D3c6jZCAfF>l+7lr-QZHqs(kUBuEo-d}^?!rUqR=7ov*?!9qxg zQn*ly6X+HoauD)zpbU+H2Y`lfG%?L*Lze1JK)FU6Rk({boCf?5%hFCh%cW=?A!g`p zpu<68oI1myx$jXO5zOYEw_%`%`hsyw!4V-6=3Ha4A0GO5)ZMeR-))N+&*2{9p18Ez zEGFs;nt&M50g)Xb3o}6hr&9+^z-<2%zj8PO0s+G=J(76B&f(_FNF- zCTB7qSTl}3i;6IZ%epoa??(MyeC3;T*>>X`?UKi-=$(5VNR{0I%odxj{R21Be5|7n zn*#eyutBcVt$tl!`|?o~@P69497OaJh5?>ZI)#B8qB_pzoMX7sP4xJ3Z~t6#@NTrC zUQ)~geeP}#!_Zke>t$C;AjWwoO(E342)W*SO=?7FoaZ1ll~E_==4=ug{=-#Pxc_QG z!J=*-m)Dld{1pR{2RwNm2pTO@Y6SNBrZQ()xi&;O@E7w{$vX8BdLeLPsenj{`R+j&}9Eb~$|axYdcf<=c=#n1oeV ze+dUX@>|e(d+Il{HmMoE37zz(eiJLxHbEJK>NoIU>ON%dK%A);jjASE)h&d!ZkVme zX=Sad1rLH}-gIdptejyDEq5zO!v?YZfzsJHzJt$2P8y&ZV06*o<_A*)&Atz)Xn5KK zL_UGDz8!&Hd%0Wg1#c&mQ{h~yxvq1>Oo8a;y%V^wy41o_CsnyMdiCOB1&HZwRjaVv ztSxSWoBZ+R?7b9G$~FqSCGQM=rlWHMvg&lZUpU_Gm!EG>9pensQ`U@c=ARaO<7fv` zkJhksPmalT#K;IzR}b-=-vnr|L$wQ_bUnkS()`Pt7rMZLaL;>zP}u&^RzRm@`rJDK$E>3fEU_Fvi8j^vY=8om-)+2a3THJR7gUHGbPojm*A6st-1drYLsn^OZsw6^gj^3I;IJ|DQVPkc1O}4)!zNZ{ z9*85T>Nk0`->Kj4>GOp4E#0vBePTk(`D$fQbQ;CIh*VO6FTF5I6{0MHFm5O{C)y); zsNpnq4W+3tn(2F^>Dsx5M@DdB5bL4B<&w>cL~oCU_~U^iDh2yAbj#6Z({d5j()`0@ zaKx->6k`(O7ZLS~JNq+r_Gj7IpPRlus6m|qDDM8x?}g#`G=++%I;!=`Fr|=<>OD5O zG9;=>pDj5Lc6Q)qX&dG+9DVD)cN^LxZLy(E0nUT_gm zM0Y@hla)GGl$5Cpqy)vx0Z1{T70*<{U?)QMC#=X7joK-k3<=3Tm6J zNrA>}C&ZNibb4vNYfL(WGR(#SSo?9mK}${<-@w)JSMe*XWG+Qg;|Kc|QQVi5aGq(@ z*(MT&K$-TH=T*E3;bGt7iKP$&ike=Ir zGJTQhb&4R0cu#OvOW>eAVwO1seoFiTW5rj_h+9miVpVLeRu!bc-o4LO9lRfB5w_`d zP{DB?!yq~2{saL4t-u9X^`|Hf?nxRn;MJNHI804^z3+7uuG=~iqC`YElRYdCCP|U< zT~0Q8V7ZOM`oj2-^gNR}irdZ;wCaGS87DcXgY3<_%fei!$5 zlVz5pSKo1`=O*ssX6v`fwf;q~24(s)hM1d5g?6fsGErHI^GTbg83)%8TMrH3Z*}!i z=KL0}5qCZG;NWVIX^*@^+0V)E9u^vLvWJfN>fv2?|DF$`*^GXi*3u7#o}#_ysiv8Ncuf>wYjLWt-J(B*!&Ve@XbOvcS1gAXg>M;KSA zJg83L-Q3{Ggbi4ej3yiUCj{0}Q@A+c)ikb%bd#j$Um+$X5}v~wsQH=68~c*JEBWdGkN^oDB+E|HBh7RykwBqP018!ws)A*YOi#mkhn5tajY#rH zyI*0FRulj$_~`b72|!cj4f9kY^`e3a;71UwxTmDyyi-G&V`1jYoS>Z$Kq}GfS^MwO zlme96WbDQjWL)8+c9?|5ifctcmbkHp$_r0oZR87ahj`Yy{J`a_rRs37Hp*u%9$?z30EVCN7)*;Bns zEKgzuagR1L3xqCbxn+w-j~;hKtAtdL0n2D{%Sxqd<)!kuVVLQRtmHqDSG38H;d9 zHkDMQ{+iMQRS=~P(hQrp>PqPiaw&6;sf8Sg$W?z(CE4U9Z=6f4L%h87eO}y};ExX_ zptEZiS!x$>BQ5xH)alRz%%Qoo1O+P;0MUW{mCJNTpz2)$iQp$72C6IQ%kF2lMOmJ) zEGNIjZg|};^a483xs~m0`_M=L(;c&}e)M{6Y-b>QgS;L4uoU6Cl-~U zLPp~ZuE+`$*qO9(rf8;%PEU$rlwkO1`7EpjmSS_mX{}Fr+*@}NB=iH zvW|x)FHE+W%q-(+Ye-#x`d5AarLU(#nR0Lm*$akynjzO@9mVM@Ha=L1L_eC)9Nldl z-@AB0%ojW-NTx!_Nwi=~nq>QBBOY+vZ4Qk z`i4Gj5)wmNLa=ee$RrQuyUB{RGnedyu3W`t32zkUEU`sw%0#I1llH+7czdyhq9+Co ziQH2Q*k-afG~0N7tXSW*1G!{I-+13G`61#}H=-kI20uq9T>;(yBNsKS)Bw*OzRV|1 z+7<+VUkzV%Kt>w~{zC*Xy|tRZkKDoSs@4f!C8$2bTlJI82U_z@wgX@?nkZ0?fr@6( zf_FE&n3*zBb-m3Z{4&GC|J zvX7$N(*C6C0LO_R(Z3Qm1Y`OY_ZAr#vgoi$di}ksTSpdWTb=xO4DuNNt-Xs zl~(^2t=n3cDvA&ZWTacudw%~Rid_voR-)5LQl9nH0hSEc3xe4UN(Q)2n%8cp?+)UL zuWJ<%ds64yEt({xw~{Ha;c4)`@gbf;01Kl-Y+aj<^bCy~bTI?gt9__$M7;g4c2lmw zs2=Ug4k#lUUSk^FA)=cxwHe-XZ$wF7YUNlzLfmTNCnkh=sk)z#{3;6hazSEetQ!<^ z$E2{53$ZFsBSgiN51(8-i6kHrq@i!7t5lehPL2i-jk#GDpj#}zY{laFWztItJ<%O# z5!S6-tSLs^iu0F9`nvzQH6f7*`%2QF6{iq;M1@ANG*Ym0g-MOZ#3QKSWe%bIQdgMf z;|LsB!*Jt{7y`7c6*(??oC#@KYe0f4Fu>_c;4*g!NFPTX<=Vt36ENF|7b76$o0=B+ z2-uy7=j9U5YflSXuU)aRl;X&Ca*;(1ep&;Kuq?DKHq4wvdOCDNSVeY;Jd`lOLKWX> zh^-|MOP6{Q=`Skw#1s{|EhBcCTbOM`d$}WeS9}wgrs0@JzR(G#IvRL)#Yjt{5JOGN zZ7>0jDr}`nU^1~BPr^%&OaielJ$z@1I}fNN(^^mfY6|N~Wg@!=8p}kR25Dtl%Y1I1 zy4d=;i%r=^r^P}QymU62itHd4SyCdWH6;jG4sBC$4RUc+-uBalA{mSbg# z>^VZ2@_|erV*-002s5hT^lZ?z#PY{-dD2K^Ydt$nGj{USzR3^g+)x0b(~Ei=gkeD1BLE?Nd+413{f~7P0|4 zxwA(FX*`NC1|x!_&=9M_%DX5NV&rF11hP6pw}Uhe*zaUnL>C)t8}udzemfU_(NGFaH_tJ$$ z+;YVq;z{X=paM_IPvGai2ZDmrfp3TGvHXWn(&iS(IrC{JhJ9|T8AWQ85_8XVu+SYx ziIDIX-AtMT_c9STtj}!0L=OEm@kUnL##qqu<#oNC9A6d2R>ey z)K(Hx%rge>y3;Nfp=L1-C-R(O@0XJLQqkOKDk55m@;l`vdJHAXNtAWx2W|<=jo-7j z?{eSdnHl+OUwNP0o?r`a`);p!F;L-- zUs92a<-7_R_F~r=D)R*KDoF!&7WfIZeC&d#jkn~ZkWpx2{=;#je(=PdIhvJhi zPW_?3JXVgwO6*bre^~3u`zO&8mb5-Ri4_c+z%QvX7Q2sIMXdBew+Uw4^R&6Wt}W8w z_Yp<|L^Hhj&WsJ_ZO*;1O`j2M3%SwF6=&&8U692pgnA_>PBN1tB*>O3U2sVjG9GCM zhspIF=5NvWjcBf(V%!Q&p-s>sPoNiBA}q*T#Ab!K|o2 z9ip)Pl<}`-9qJ8LELXxF zU{#_yPA%j(SzXssIC8f!8I4ajY;-x&mwBxWeJd(bDo{7DNIaO|Z2uylKczETb2wUl z$L|@k7XYM19s$#Y9Ag&8wwALSpX;as13;@MFv%E>(4$Dw;bjs(D3RT-#FlbcjdY$& z4C0wz!VV2w2iA6Lh_Q=dbhT9w%#c4N;nYRba$k~Eo3l(G%y$;Ol|!4HN}(`m6-vKq z@?*g{W+0V4qw?EKS1m$9Dxq2vQAs#a6p5{<_-tc1Sds$C{DH9Vc=-_?tSvS$I;W3qg#wqdBKlsa8p*gV_T!dcm?PEG)X|_2LJAv87>?4Aj=I zibmOGb2+uKfqa=y$1Kf`6g5+vIY&~=WM)r8!m22eR7``bVs7b8%BwJHVS(u>(cwss zt4JqQoB;{w#V8Xhxs*sWUGWXEyJzJ$l`85QjxvpbW`RO%b#I;?zoRH|{U!Y)#}6M{ z&e2Sb`o!%xL+_Kj?{jvmV@y7Q3MOfGf7stYS$dN5UKW^7(@h7m8u{{*=QBOa)QeSVRd!vwix3TARPOx`e8hd>2ro5J%yL( zF>*$&UTl9Zpn!h|bL7Dp#TYb>t|CWzwmQxT{$NJ+w(v*8zym4AzeGA;xGj1Izb^c% zQ6O%dK@Vrk6BOtNen)g2%x>niuIaK6Cyf_T@;CVxYo5*|+^m={t*KMJuvOl4$grL&5QbG%|8I+Ic8{h_^h^2d|#g{q|c z!9*p2kyf8XZ&R5kJ%kA|>ZLikaN@^7mey36 znCg1HQIK=Mjt3j$-b|jGFP;L2pp2)4S|I4^9gRT;;}{d5XRT7+R#y=(Ch`UJ`=F^` z#Upc+n-sPXvkN7KEX!gfV#{=B7ak!C4Qc7+!fZhB6~AdGVaW__^wR4MH4@9i*$8y0 z{8GW9evSCJrd$crycz2OI7&qWSE@zh`!xYNiEdL-D~T+;GEr@sm{A!GEmh6LN3R~k zOeTWT5ae+vY3BIHq$TOuv6Y{1Y>w-^%I4rESx}5WCyn@8w2zt`aSFnhSR#%v0#w~m zs$J@u+oIEJ7irRog)E0{HYRAZ?R_fJCKU79_67BPf>yhJp`Jpq{3g?(DDDx?0?Ni} z-lkz`l}gzuyqo|D5>r92VacMPJHQBYp<*FBz-I}7SD?-f<2U+QV9Ihu!6FLC!Hbc> zC_N_T#aoG2DB4iJGs>sA)3#5`l>|82XLI2Nql+OZ7k0N^3s7l+$%U&5rmi6qZgiPm z7EmgclF=vJug$Aw>`QZQPCEwrs^$5NE5k9dA#p}>9q|6t@w#4Qq}DeRzYj8q_Tc49 z-d=cTG5r(${-WH{P~z0~T|91smzqq3cY3Qf$M94fTu1I4>C%r1&(XSpPz!ZM^@S^B z^hXxZ`(*af5d3w-V}mllK}bLgMeRbE&xP^;?a&(HQ9f;!Ix%n`hCN?9K>cP=q@cxh zA5V@bu$k^f1T8sJt%w|GIz**>Q8nsc1!*oZtnhpWq!qj(bTiJC06-|Pf)F2?p^hSx znf%UPm%X;MP05E({J|6TluN53lQ9Z!j8k}ReJs$n(#8|&*a7o;vIfS% z{$Un>^7P4A#jB~sEzhOFGqM&+pwu@@adfdAr#J|r<5d6|vWUgiW@9x}!m*XtTBJ5| zlbk-gWLnKA+jFFP=}EA);5~_R?5ZX8+)Ruua~Z#s`HJen*${*ccATWtN zsQbvu=M$`buh;US0np`%0FUfvud#&BQcbmY{DFqmBri`XA!n9sRBJ%O)9+5p%ioQX zluBR@_+ox>2ES;g87~`YJxa0)W-JjuNrV>Vln^i3gC$J;tuL&w6qg7ye_Qg;Q~#Ri zfzCPTjq!+HE{GOM|M5iOoP`{n{JNbc|9JUQ<-RtPgh#-na}GJva-Fi4LR#w#ZTPB0 z9ExE=BBbdct?fep{M$sM3*=_6pY>c0T=MM`3SYdOFAgVLX4^Nvrhuib>p(Cc~kFLD=FZ0mZUPf_E&4PJzkKD%`?JeHdY znt)B=pee^Mx~3GLGO2jR%xMVd7ilDrxw&?R>JAe=-ziDpk|+l7_-sr^J<*WtKzLnB zvIzpN1|~s399~5`R`Uo3yEEjKm3VeLihbb3MY7Uc{xuP}(7)%g2){N%h84EQaX(Ru zQCZ%>@g;2CssgC6AP+|O-bTe0mO75Il7TEkvC%s6Ncu@@Pe{yGlO)i4X|$o;%bVFy zVnSLmR`>Yk;mkE9y5a4=@rkK#{i9wOl|%N(rZ$q%*M)@z8*w!ZL+gb8Fag}W6iDku zi7}@O^TG%{=uifFQkpZVM=^yqy#8u!_OI4<0H_G5BkQfzfCA!o*-+ZduLdtD}?s&BN`jW;2eILgevd3nFAfqGD)AG6x(V zm(fNAlo*huC`oe#{Et5-1J7sOpFn$a&sRFpt)?`7Sm5)y}8MJ1&oq|j0%fz8k#3^{~D$G}7&e9KK4 z?c2CuoNDH8RH=E}Hc&tjjDVJ+i2{!Rxb`4RVz9O~G0(jtj&q9Xs}{8;64F4t5C0P< zSfoDMmXorSMOZ6h7NX+gBvtP|fn(jU z(LbLZ$#rgfY}bb*BabZ}DRjlb;A1-&=E)l8oXZf%8JOGc4{z!wqn<-7{HY5c@e}-Fi zc&sGlb^84Xm<(U{O3s+}P1%GVs`E%bYfqws7Q5X`H>sJh(QSF)v3V~Y@=x;bOMwaE zxF2@9ggHN7n!ig4KT?aZfc%%dWNef%&(h+pQT7v~1Bn~J@gey&S?t0uHKVU_Eh;st zPn7W7PE`fX1vzNBy1@bMwpOBylw-C*6$`YWxw4Me0#_{1g7lA@pb1&i(uVF4Ci&%G zVY$F00R|Gr25I>@%=-fD^DmrW@hMVZm$Gjv5eQ7JE4!0oIa97lNy?!vGAYG4b?M76 zAD4ltOmAw#mFG`!RONk4#W9UfP=zQLzH%`wL@#Od%wEfgxjL%SF}J6ywRS{CJr(ar zd>yEc)U+L&pZz4%JJWsnQA#iEB4TWuLVcz1NQI#;#m6eau*}hFkGkF4uvDS-fGE=o zb%S-NY$@MD+d7c_l9PE}9e;BN##s>S+^#l6G9z3u%=Ns$Ik>BZF+S?{6>>++Eh{0X z18PAeI*_#*WODJ`KAMYXRr(mVN9=Xw$|z0K+KMZefx%n8*o*HjFWYYz=S@l7UhemA z(=IU2F(a}JE^|NkZ!Hv2myN?)nLwHCd11!}Zoh-at7x(dyT-S+599#n80xfwh)whp zI)&}(`q=~%BhGRP&G3++Ofbh&I7_iv9-mB+Rqw+uZii>piQAnj|8@a^M$6bG+n;E7|InCCSZW%Fgv3167 zdP?g<0aJ;I)E(_ixrDc%J58E@iP4=5$nqyk#M%SpMU^HQ7axj;=~Z&>PuAOFwk*vm zVRqX%WXzd=$+u$YcKd|+o!1vmZli}l^uJ<4tDb8-uZYLkX!u*kDX6n7Iybp-`Xwxs zSeNN!pgQ-K?as^x`h%L1;nvj!lo3y5XP8V1WQ|OiS5%OmPjJ=B(|U%cZXhBz??MA(6KqW z==215^I=qzyoyF0Mu*3$>g*56h2FRt1ru<4OeUrY6mG^$SWG`QL~4}MoPiO7MS!Bg z^m-r3w>GkC`1D(P5_;XP+r}i~om+gv9-IKf>x`g;xWYsum!nSSRy9V6=xB5}8C_F4 z()MVS=CnB2g@~fz`}w&r5nc2E&?L#4(L~9OQ(s`x3&aFOdO{oERsGAVbQ^}n(Wa}N zgOCnPLWC8kVUNa~E*P@VDLzl*{M{x#rZY6tliHYea%Nd3PhwIqRdt-ALzlMOaffcs zJSppai+VLat8&^lMr8@nmln?%=h8Wsx&R7AN21bC1pKAsBgiz`?KqE!qZzKER3^d0 z9_1&dP2CLB;GfZo)qAb&ee?jgKQrJY9)?nk11GHp(aouberO*UoO=3XGYmS0iEO}h zr39Etp-(Yfj*YCvU^51~+GX<(f^E8yW(=gI-R6Oj&N3j3LvcMn-X!Q3;fhz*p2S?k zNyw9nGTlf!QA4%^h4khaFhw~ieUfAD#t#+4voi`tsVU`Hv(4yn6u;x2*b!^=9`6@(=;>I6*bO!#Y*QS#stfR0wu5 z;O_D7S~M2?Z0ezRL~sqZ7SE_Xt#0`Jq1Qzx0t1?g2XAUatjBxJS;oh>WvbZZj8C=n zTK{8I(@c(PMy1TsklLj*Yoj)!H%d_$-dN@wOV_mThK)l}lypt}PCQKg(2R#V0JE96 zJtf&lQO}=yB3{967PGIUoWDw1FaS%;0pFBr34seUSm_9i(Z57USjOHckqm>=)ERG{ zJHG|_@XDkZL$(FjnI)nCK@qg475Jyn3EAtNXi2euA#vite{*Sv(sA<2W9bQ~n><(k zPUxIOEQ+waBfS;160w$+xW3b~eIvGrFEvdW1{K7aHw;;{i@l|Y*~o}ujh!8X4wPOo zXqQbEz?~`$7U|LiRQLvq@&r+C2}$l`xE!VtLJD8(hF?*2R5Y?Ry7#-JltdiOMM~X| zD1^tWYTdxh4r8r1L@m@%W9PIHptX=1=y(u4e;Ga};X*p|(5nqfdNCgbODOVcnLHo^ zPea?|JV_Q((2GTreJn`lsO+7|)^zYqXcS};DIeR3DMjl&k6OvL(M;5Fzs;P|+a7Of zC9$5$3`6ZeGFw~R_}}LG1Lvyi2*1y%#{GU+erZWy#6m}GJ$)MGFX<0(@po>;N7wC# z_kwWReLT7$g9USlN!`rCW z5@-ofE9?C}?{Q^Hs)Pr~c;QKF;0L0F(anJfMgv#exKH3;Bnn{p5xXP{0gBgvw>zj( zPV_E@`_BJu6zXVA+(h>d_xBEW%*W^O?4LXPZ_THtXPakxTg}bW)18wu^VRkNbogi= z-tWgip+U5{b#jLEIEAe`*8XYp)y~g*$)}h&#(1`S82_Dy{LedQ$D2RzH1`gU_pNW@ zB-q};>mAIe+Px1JK-$~X7r&ZsphKs>?47-ik@TN?$G@DK&yJ4IPIqGTJw88ujflpc z9vvqio^EX(?jD^aAO7+>F*xC481a*%{rx1$J3pJx&R(A!{nFeyIf=Kh^?GOPpYlaC zg%=j)L4n%x`?qxU9B6lC(Wup)R~xnZir55Mr*|v<>Gb_k2PUoyg|YmxJLr0xugD7o zX3)F1MTZ060__Ut3WGI@+!RCHVNv2GRlp4bzlYWRi=pH7_(B(QAHW4oH)i;k!yCGP zw1;<(27Ze!Ne8CVqB}@wy(5H657;VNLS6CbR93<`DQF6vX4TqPujuX(e>B9Z12lYk z#CkB{Z>uwE6SLtGoQjwE3i(xyW5L3oc> zfDc=4FTficd+jA)dceaw5{L&KpngYaQZc$_73n*a6l1P-?RT*(Rt|dw6uSi04h?e_ z*|rZ9q;`M9oP^R{T+!#48dk0sFXdRCK@sP||Kw87coqPw0>l{`9G8AH-trY+ z?Q%i*FcAR-pgKNPof{w~bgPaBL(=yL)NZ`fiwJ@cdA%b}k9N;~**w`1d#B>~*U6;wSuIurYQ2YY7#_w0z;l+gBeP7wrvJlHt_9&vcK`D$-}@9b?w z?CzZ%B3M|pVpALg``Ft$-`_lem34A_bh-mbY-8QSqr<(!-4keQ=V0gXY!TXqXJY3K z{1B(FH~06c!Oe5%#tC7$b#(mpWbfzKXITC9(f&5@VDV}PP~3dAzrzhe2e5hO5FIwCkw^>Bv)A@R*If+G0){1m?u?Wp7O?co$2UmklIA^>^`lDs9x@^ZZ%uFaHOB{D1$~AGO-@%Jb*X=y|RF ztX^Mv@t1nz`HPj6#E=XWN+L#+Wt+i<2%wriqQ^GjK3ZZ`!AQmx$Fzz&5`B*T={6f0J!kUg@|7!!{ILye6em9z)>t>kkLe zt;^<6LqnC_Fd&4P9HlZwkbXS|rJ9t+P(RcRo4+1!p1n4;N2}QrRmiaQu$>JX%}>wv zW&1(eIh_Ve}n?E0(Z#A3P!}#oq>WF5Pl&;wP<%D#9cei5L)kIqkL-!Ijqyg$aoni{u7M)xPM^w zCUSO<-h?>;=9CP zQ09{Ju~U;@&^*yRAge*V844R8mc_pK+uy{{G7q4txM3sa{NLoG2#wG$y=4sfk|nY3>Vv)F~g2 zn;1n6^M*+~Kcy^3X3MOxFiO$|G4NXQ#C=Uivcm5f)q&#;1LO6Ed>`f*H{Xhl&`Duc z5&tVq9MsN_f%Z^Tgri{}m$LzhLT&cu59VPOj=G`!m`;1xAWT1dqfV#75ga?1_(+uS zNtprgv79QZtS=S5GbFUeh#|BHB8Vfef~R9RRmttRQJo$BA%@OXMR@9Let&`>aW{JTWIUVF+&)XwVv*G9uV$q%@6qDf!71km4#q3 zQsY!8xxbG*kVvT+5Ije0lgGu*x6WU7oGUcyT$UFJ$`M+!<><2_a7-g00$1)lWZ-MJ z*=B{?!}ieu%_Y45lxYq!Vx%!ZaahQ>f4FUEgAYAMJq*Dlqm#Vm5wA%j{MSY8GitE1 zuO##%mICiq9Y87EbT9c{;=~XJq9jMUoQ~+C2Gpa@qe39J&ny)Cp2Du4gPok83LW|L zK5z z^bW*YPLOytTpJ!jhFxd`i{w^@-OyQ2(D85NJu7@Oc^lRkG#G`&ff&}8_pqmuyKP_j4a!qN#mE8>z%`(qVIhmIHf`#IT;X6nBpHBPgl!p!CdVJAB9_gwO~EgPm>+S-#cA~Rj?sJBPR z)^j(OcG|YA5auoXK9FU|jA$c&2Q0`hy@1docG=GlESQwfa!HBq2dY_=|2uT1Xmpw{eH^%$Q?<=Ex7!9_;=zX{ z)irj8zfU#+Xb?j;4${&cW2DmtkdH5s-Sh+{Qw>)FA%WEdXX;bWC+_Pdx@90j*wMN! z>xEVfLyYCQCDo9iqC`@}x43W`(rGE&Jb5?uVw-4%TkEwk=6xSk?9|Pza@UO|n|CoK z)(v6mkJujlN6OyvN3=_7)J2bJ3uzD0j|&$%Dh8x~{;4QQyQ@uv8>R9N->TS3 zYbZg6jWBgy!59JjHL)bteiB>~a9^eKh4ZuBzvKGGLLRe+OBq8Mwlx}F{+-*R(h){R zk^1vy$0bd8-y04)ZWV1Fz&4;x{duzN1($v0U1b*x8?HZRJ5P_U@DyALm|hU8vfN0; zW4VzmYsJG-FvMfIQ2-CJ*l`tHVS&@t#$KB9=-S|t>ygUM9rOp>LRp7}E^)njr0DPB zJRed*1)5?~%-Vy+)#Kwb^1x)KPo6RZAcWhKNjq?cV8#Te%l!Z(y6DT}QGK4)Y`F^X zT^n8f6i}QZu)9pI0bUaBO>RpHUrJ8=1<;JBRF!_pEt;B=-5BUku+zxqksuh~*OgM0 zB-Me<8(~h&yPE$NdBY z>RrmaPHE*D@1?iY9d>1jSI%Z%&!fI8 z4>FnAk*SGB4k&>9Q&HgM%K*flCLJ9!7JMiX2%2b4N0XUKWNPcVH&qEu{3kVs@; zazC>PW9a-1+t_7{9qGaYd9)EkG((V4o;E+4oF1K4LMJqi0y};P-04(tQFM6R)bXWB^5kTu}v@*Bj z)%5~Mrsj>)`RESxdvM{69>2Cl40YuV4^gR0Nc~e?ffZ zQrbNVS{b}VK_DPl&Yz&qFS)RO8^LNM1Q3VNj~*$v8S*`--lNr63S2$bfS#Atz?kRE zSc*2qL5zr>yuok;yzzgc5`g-buQFK@^C_i{Uyphp5hGu2&`ZeOmhj3> zceokBlVnw54(w^f7mN#sIw!qOnB{>;L(a1(SG7G~PBRUk5<|hS9KAL+p;pa#6Twr| z%Q$DvrYa9*kV{uCcNfa`V$vy-*rXP}DrpC|K0l(=&z2Kg{JtSJ9!sDUwo?e66w)mq z~6b5bVPGRn$0bX6trC04RigaWFqzT^07|z!im)-=CZEBPUR)UTf?26 z*B_CJawRmWLEF>Qr%qU2CKerd7z&iLRVIt6-A$vVZRcL#+cjy|3pW57tkQxtkg{Ad zz+jXnHKWhvZ7E%})SUJQi_ruRBD+}#W-*Rln889{8i6MF(eeo0F#0}yMGf&Y=2rJ& za!SeGcYG2iFF!>OLkjJTMBDIgPbd;bn`Q=>Drm3#L5Iw4PoJ7un2ak!{~E8h6?!v< zWdb}H!ly8UZU!LS0#A+TAbXB^b4<|Dt*_=C$AhrNI4SR1qa#<=juMQ9_pf+X^_ z+xlpp7oqBdQ&NF6F3D2$eg@$_s)CZ&lFWh+v*OXCREWvkXeF%+mTY+04M%MbHO+$) z%NIkz3Ct8H>Nzg{^~CKR`N*J61hg?{4-uGkr2T2lAD{0Jp2Gs z2fvh}M;To*>W#Rf5t0r zDzp`{;mT-|?SYOZwTWKhMZsFXalS9jiJWNEbx55z3OvG5d9?J1di|)JL=R_o3g*TN zNGT&{eac&&>nSh8`>YGh{#ey$-sYj6C~3@V+X%#bxI&m=&?6`MYa#o`+@W!y`+dYxJ`4PTGQ-nx2CHW{vGl~! zse8Ibezgm#s-U#PXg&J6YENX4C49Rwe%4ifHd+L;fO6Dk7G1zg1Edw|eJb{=e?ZFz3J zn5QSs*`$5kE)#nm$@oP!N4x|hdt;;y!7i*f#LR=*px1Hu^qxS+UZ;rzfp^hi-*wt% zjW*L|0iH)F2FUtWJi`6oQYgYF1v)>rb$~M40cvV{;s9N$bm5{Tgk!CP!U{G(G!J;o zr(KW;FdD?sp<;3}*#}8?8lwANY!qe%21Oagi`DuH5#=#UF1OT46wRO9O35KBRwXfr z8PzPS1Z}|1faD75yXy>E*Xms38YT4-ZaSlWn}rX0q(YOqG#&Gz2?KtEbckMF{-t5~ zMctJzr72FHN?dZnmeU4U%BPr(Mw=h2%CEH&y{A+uQqRYVL_>UzY?)mXmgwIE%Q5*^ zE*|ug=9D>~7R8RCrtzGjVOlhrNHRVBb?zvi>f1@GDZ$|Xl?)pK?HRlXoL=NI8Udf8 zkwI~v&9-jpwbJrLc8EO?wONgK%$P=YJZvKkI7pb=(PsjL+Ggf*wdzlkRD+$(9cX(? z)43dmS+A|PZkP)}n(cGJUbwI-?Mo{Qu10$;ZIjLxqjQTX(;u6&t4*j)_!&bmeoaX( zx{FF509*Bm27(|PV=zAGXe=~t63}4?$AAU>&|JtNXUgkd+7pRvz*6ENa~ zEsszmR72$=*QM@7Wp={!fJ^dBheON43lw~`GhkrX1sW|QyxKER&Sxb2qcH;kb-YV==ygLKz`vmoGu7u+rk+xR7_wY5L1fm@ zFXJ4bd#KmtX1GbUh;*+@6HCMWrzKsW4Huw_>9Hz9_v2A16(bNNiX&JTMmR_~=DS`` zm!;V;8rE&wLy|?Df>k5UTmqxQS+c^yawt&p7>sDNEiReIs`Q4rL#$^eM zwU)VbJiqEgp^0-jW5Fe-G!D`{6$%%VkY5?jO}ewHiT|vd_ODf`ipzJzAxz6ujL5{8 zO%h92<2ZpsOibEF2K-=jK?bNC5+^H^$P2y7mj_g)PkqAtF2u(oky&aqNKjoZ40)3+ z6j>x_nCd9{@z-@eiP0{X3&#OUMHe=}4|$^;-3>F6mnf*_$`Xgf3&Ml)a|u;3Huzdi z^G#HhzA$#GKYvY@PwANkUzsdrPr|OG*RWxP9@{+O3d}`qonPEq{X0yO0l*p$FmtyZ93;tWqiRWPY z;DrN12Er=>$@G*+8_JO=FBVAUoY9Tt>WCVRd_=V9mV>)p?-6qp%Y>VHSwdtUrCuvU z)4Oc6M7LYq@dl36ZlnRv?TeNT@g+s@YCtIIk?X+rGS)a->3g@EG;)V;j4LmQ&uBLn zQmQUGnJ{*IM@18aaRcOKKw&SmWcc-CSuD=p!jM$cCG@|Cn1kMoE2FsX^;mpaK_NW` ziv!>iE|#f0)$Di}X=8SFg6If8_+IIe*Rx`A8u!t^Jj9KSX)BZsf-vF?BgU0SOv3%f zj7tlj8WCoz`Wr2(EHuzXX6lqCla4_m`G~+}l?<8*w6cp^aE%87hB1JNn7p+Ftu{g% zMf~VHSug3pq}9$ng-yUq_H#q)XsT<5e|Z^@QygphZHm=ek{3djWCEAwknX_=$}+R> z#pvpabeqwj?+2I;axTn>EFm_j)OX?8$GhCOcIsop*<~;>zH7JBmr14(&&b0- zb8b0SSxVY&Ml>DPNO9t0#n?vONgV93A?09adb*K!MWPMTmQtFhFyzAS~KZLpH&td*>8GN&sy)r}@Sa3K%qor7hl=TnG~ zNg_H6kC{ezj2fTm$Hb&e_b~!u2)CryN(^xE=$D+B7HaO{0$HpfEZM;;X<;iV0V@-Q zs$>MIj1!*1W0#0Y$%#h6TEzlU;w9rzCGq#E;V1EemdKNMkz~+Gyu`TiCh_|AP?OX? z#;R!<3ARQ zWi}Ja0<3eQ48Tctbcpsz7E#E^C{0Tl(nivtna5UUyMF4fpCsn=_I?X&n*h%$rEi zxq$Lci?4W+E(4M~#4-gcM}rJm#&Az4D#V~J-lT&v@>DQ_##LaI^evSrI2}G%la=Ya z5eORbvKUr8s7Ct2*ju@%olfV%X?+ZDMQFhTShzRWm@qlSg3h-B4I(WYKJ`iw;L{+Z z#_c-5GiY`wTDRk{p;!}(#ldq*#IcY^m)<0!wpxyIb%6=q zNXvN?%Ml3AZxU2>AsQK=2J%V=@?ud(ZhU^OP#X=K{$*3f!AZV9>ac-Yc^yk*qt=)@ z-cn6cK5ls|#WmM9n8P#VJ3UrxhkQJ z!)CT9*o{sl__tlVvShzF%WNmQUafFjvPuk&cEejCD8_XTCM=nEe;jD3SGmNfIyRce z8F*6MZFGTNQIJ$Ej8cN}H=#x$c*U_)z;vxb1Bu@2#yx+6~va#e4-+Lb`AOh!)4P^E8rY5wtBULMTgX zIN?fwMQ21<0K90wHG&3oq%ilotgN8wL&AKQjNF(uTQw8f+$A3J#5vB}xptU9I=RQ| z%$?bHVrPgX2c6d|^S=H}R!}?Fz7q_U>x1-#hV5PaP4Aw1aeM}t3F!uVt-ttqf9AXvwMJPT=g;Fg=1~0?s2JdW+VE>OD zILC}>aak3-ohpoAN}x!+h56`a!x*BR*rM1ic{|O}<0DTBRZ2#6+MBSAkqWiUc4yoN z$xRPMqXkcQdstQ$$$7d9pf^_P=V`}Uje+1exulDsnK<=JxCN6?EFH>1w_pps|LqR^ zDodDjYrJ&BC#hU@z7@N=6*jDnM;EuI(7@D0qtLg4T}z!D_kpb4<7Ij{qneT0Hfb$7 zZ|6XQ2brmQ<2Pm?!+k_0T1dki$l^VQkJ>x_ZNoED2~kmZ*o56DV}aMJ_B(VjxRkw0 zS^*COzT5Jk0CV?*{ZD+FiIrUPHjW1%*TlDad~2%abYeX5Z@OJci<49% zjS4LbDD3v9E+w-FCEuFHRs>^$%eP)#N`;RM5Nw3e4}{_-ourRlN&qLo*24Bwv^M7) zWuv4#ZBI@@xASNtQgcQ8r%g{)cx(`wh^^MS4#1h@s4 zYMd{RO)=$|{e>P*V96SUCfzI()*VbyiSB)KW7MV!LFG|W@u4YJC?lfOcCKi-O)j2% zI<^Ev=#yYp#^YDI0x$8VlmJ0NGm0kZ<}4G69@MHHvPmoUENRZN)QOp^a9~t{@e%`v zwN$ENTW2#XQdvy#k?@-bxd6I?EEIDvnGpgd@`$g9L+7v}?NxH!>lvz$dmxzbywR5o zDm)|LlR~)}T)9KV97@Tk4t&eZC;bWZH-J!W|FSDZqplTb*8I27(~=;O~> z7;K>emN#aHQwzj<8y3`-GGF*m*OK~utP)dzGT)^w>Bp)UmB^*XonW8O_ z3R$O@+M&HU(XFPm`=UA7`eI%s?YcEUGPU{aD%v3|Ao9n<_AZ;xnF9qvkZ3kr{mv-B z|HQ_+Xw>TU+VXsqBQH`&v{>OJvX*9w;+a*|5T`j4B3{iKG6C2$uVf^K`&lI(0qIL# z+Dz*lZ{CJgZkd-(+cqmJ2?@{Ko0tp9q{MXUDH~RRCt^ymTc(3i>)Pn1cd0rfxmG&& z;ZCISFk9K8e9e}zW~@qS{7;jF;?%^H8Y*jeZ2YdQZI$DjOKt^=KqW=kIG(|UqKF0<0WOMSm z_Au{e6l$I5_t*(m5}v6V!)Cyia75{Gl?OfY+-(Wu=d zyDd&0w6g;fm)7%W`mpn!>`5s6##tDq3N6@uoxy(hy_d*_{g>i>UiY(Xo+)IJ+WL<{ zmM~#jNEqWxGlej*oJtQzh|Qts`pEB`z5|@_Hi|gs6I(Qu57C3wm!amk?J^%#$1azn zPDk2$==i`8SV~^6)fu6imO3iN9^lz=`QC>j2Y3irg}JlxD>xYpozl~Q^1(=AIOPTr zo{e%bMaQHlqmOsyD2J)h(nffC+oGx9;tAM&Q&;O;Na0u}wuqxB1^?bj$;tKf+W?_w#=TP^+Da?LD03Eseb5zu?~tkAO!|PUS6cxfJKQ;R)LA29+niwklc2JyByzb zNb0&!{8%XKYRm}qjjw?Fnw9`E>YHXUkH(&qhAiD7EE@U-(#!}S+f6%ZdPItR9^pfB zIn@r$`Ejv1EL_SPq;k@jEv@|XQAmVt(7w5L!ja|GQYy!1nTA;LNkZeJHeGEXXueBl zP1i$JI|?WA2gSmR!#X3BV__4OQ4J|$SuUiv;0mS`R|Qy&i;BLH7x%)&vZvM{EZ`=oeUobC(! z#LROEfPQhMzyhR2qg4Vd-oz56OLPK}ETeACLxT-oJb2~y+yQKyPUp%8_2wEhrINSk zF5;*MbmJig`;}=jW}>WJ&zs6@jr@7?uq~dSIer!pmUl?JE&P+$mKRx(bLRzKGBF*bpP#eAOCWs}A#Ev7Vw4@TEJ)TG2DsmCN&V)k_~kGGMjLe<8yB66$>cNwFoA_EC&)JA-yP5`I-0 zA=qP9x^b@uA~=W4zFzZ=VCnpg2@^?GM>d*`Hibh!VP=2l|f zogST^Z0$hhIcW?qUx-IC)@rfU?>~Z43LmprM1_Tgd9gssh}A&|7;6!50NbNeSqZ}@pebbA zO7~E&RQ3t1IzWZnqZwC`!>n#bz;h(JB#Wm3T_}ZAp`iFt;8w6cB3$8MIsEHV7s-AV zT*C&4(bdxI8jw-T?FAf!xP%56x6_e8FuYxcvMV$>RuK&nNZ+BP7`1+L%{N?O<*-*k zu}ct&pkY2zYWqMzYFGKGD1=kJ<8VEsOEBI7(Za^XnTA)0-hhxAZO03h12!arKy(3PKcF%s3ULT#G2`F*0d3g3#9PNtD!?)s}dxzUJHaou_pX{8TilY;; zcW}JFx3gUldxu;5=i7USKZ{pT_3-FS?C%}yodMjlBWhDZ+uJ!s5CHOE=Va?OJlTA; zx4(Dxwjy@-&JGdmF2LOs$D1c-dt2xGn>Qpg zLfi06?7V>=;`H?;!&RpvNa?VeMb` zPIoF|^JMQ75XSnuCr8i-5*R8S5ge#`xWj;u_~sZv5&V6AieHI#)cp3&<~~3?#rk@A z9O$L_zsx@0*Jq~(9P^Vu8Z33ZizT^zES@#mY+KS>xIBztkGF<$B{UqBfP*b3P*seiGs@ec*z(9rtru zIrqAM)ui2{8F<}?!0bUjDmeT7U*EqPVJ7-h%8B%$bUhsQgO^K7n6l3csx5EPA6zZ{ z+Z}yYvt%*&boJ8(Z2PV2^{x|uyj3nztsJ!##M*+=PQU-jz4Zr+Ex)_u^gYZ1huc}O z#D$ms_jUVYP;dLcfBOCJ{Tuhi5@WE$82kh{t>b+c<#ebLqKVY}I)CQp6-7KecEGbZ zJl556(ZMjGNa18tai-y?`mpIs-mCJc_mAddc zurFi&8*-5!4nQ95VDKY|x70po;ES4+2P3$ZhRKxZd_>-*PKgASPZr5jJ$W=&U#!i? zBwkcU&>01$Kb@yk+XL?trE3JJ=1neR zP!6Kc7qq-ijGfb*J@hf5htMQ1K4W7iMkL!c-@Cy4{4y0Jkp}#^kf0o32v)H zze=eGR3D+uzx^#tcEzJRznzFQ*DtIexE%mR0<%8x6g36th#eI4@fP_TyuYN6nE|7K zT@d8v7sUJj8hNdcRZdKHHS*dnPk)&GYO&%ngi68E?^u*dE?y5|=2!So(DAx~b{eLx zu>E)G~Fl%&?J;3%@t;;MmOznH1{Mg+)JzWM$bN zxusFOK9KyaKl=aupZ^!thBmMqbt#TBP&esZ;~b8WXH{JLH+YnR8MQz=fdv7x8Ba@9 z3t`JBYD*Uwc3NZnNZ!Sy6J(F0`=Llf#C%mg`b1p7{JHGhuHxARrm7g=O*8-tQ&bpa zy^V5cU@lO3Y$9nXDMS-%pVf)^GZe~0O6aqNb*&4(eVb6jKo6BUY0v?c2nuqrf(f&z zxE45n7X|Q++TWzA-<|ZnyDNQ@YkU){d%CVSe@8X%t~%cQUA4Qrs&*6Wb$3zf?xfM( zNu9f!E|;gseQTL@_L<(tuK(da)O8n!pNG?EBS-&xR$p$!^uKzo@oc95J;X-|o%O}% zqNgPu`Xdwx%x3w1kVm1m7c9mfB~G8CVU{{wtd$A6RlymA6}@%oN!o4N^KYX3({}1sI@L5{8 zkI(!(!v1;SwMGB>Ho$10=;-w3X7LJy>d^(C$TIDhs4&+%jdYloVliuSa{0Jmp~^26{bsaHfk_RA6XSL9l02|k*LpE$w|Oxy?_9DL)&Q#{`mRuT>Oktc_1er zb5y%jz37%Z)+Oj9lK^3rUBn_3|98>f(BahT^3Y0!*EUom`%3lj2Lz?$3=#Q(OmJQ% zJ*?vlqq>WB0_ggp)TplgzD(Nh4MT5q(2g!goeIH#(xDKKGOiNjSEDPU=;ZmD-7?W@ z9Xf_eBkF(zA||4vevv1nFT#Z4%mk%XFh znmvA)y`EsnQ^|;ytDHn*SmA{1N=Zu_SEtwkp1N*#(hy^kHB|0$Y!0LZR)2@ULa zd`!~&3Z^kuIoK4yR_nEDeYsW#5q?v4Op=9@*GQ#Q4JIx=@o0Z_>b-UZrA}Wg=)Cq`|G` zX!#R9f9*QM3uk!!6BJYV9Vh?;&&pv_xt5;HQB&gad4=#hb_RHXX6NMa=f|QC|MG2{ z08~~L{lFcy{Y%u#F=ira;ly=1+v1egxCMS$qb!{o9F=WG4GQVef^hc zpSI6MITt*GgK7TB)mr+WO&NV5zI(TSbU|{ zjERswk+1MO*AoJM1cB%DN&vq?09PeE_zxCy#^Mt)?FRR2A6d|s`G|G`{!`IdNjd}5 zfQQG!!79$Yo_~X!zNJOXxBL$5fdM(%TvINUq2lUj$Hx9*QvU9!GxTs@8W0I6is2J6 zzBPuwA!lDe?=@UYvO3KFI#N4=RD!RHKG0{3H^fI`gVHGyb+~&(=>90UZsHBQC2o+y z%4B63-4zw#kTU>5t0sfC?9#*NV#?O_c-K@n*7UL65E}q?{%aUu@vX(#*=Nj8j{b*? zq8r>%1I#`DeO{00e~o&hKGXjm;`1Zxe`<10Qv;MQQ%cSBz?mL6(*wVAJ@Ds~z3rx^ z^uaXPJUDH>4i&=G0y(N7{N|SFuJFL6WsaFUP%`vk3iOE~=|}M}>#xX_{s5yz51cE^ zQwS=H0}EJ&GYX*u+n+HR+3@xkm~MV=4`w|xI4Kzlc~1_k#g5b8^1IytJ+3KB9>3Qm zHo#~@3~$h@WmU-cY4A%Zj&j-3VY^ZTf#1>!-oLUyzS-^vx)sgi61^+E-^6g5+p zE$GAa_3QjhL-;N~Ir9HqMSuEa%m0lR&+GF1_gSOyqOpSKKlSC6nf(6{pC4KNFC_YJ z{(BTS;;qwlu12kEPpmn&{QVy=@Ryh5l$uln5(>jbs>V=Uw=hfM4=zm4gDQv!uYCVw z^?2Y{8;h8Grjvg%HPc&Wddp02`7`P*85#@eDj<3;!07Z`XAosC7W$|3njA(wSk$PC zV7{Si^`tDs;-cu@N(e_PlO;htCFf$`4INxSLW~bp!P}LIvl#9Hn!Hc1VSFu3Oa>PK z7ylp@n-?(Xu#JcdM-SZ{4^EiqXD!7kVI!-EvRiG8kNx=&gA8u96lr(Io6cvJ$f}P@ zSQ2?yAe~@|)wF%z?=$LFSn?E77DmMlZ=vu9P=hAV(oz_S4eRxWN^yqV@cc3pt~}aO z)4+~zcAebvjQc;`LC@_33h`pNk3F75b$RSEy|LsT5NrmW+Gj3*+;M(C?u>#H;D8QZ zjT_Y}U=mS>Zi!acj@^L=Vngd^+zL-NA|8?`da_y+%x)H#-NInDv%qXeU{Xk9>sm_D zlQDtqW&ztB6WDeZFs++=H%{Pu^ok5#vgk>1IJ;SJc8kN=&Vr-$>-4tk^#1LAEYLAa z(UUO%?PdYm9RtvI7NBiBZcU%!TYm4i(G`fki4_;V8E@&C0Cx)k%v^*f!0kc+3oJ)l z$HQlnFHBqa^S~BZqBbJrfz4j5sRJLr7;;@U^cfIymU3FR!{_5K>a>1m zfy`OnX&ui3xh<`xpM3K9f*&UTn2w z$RR_liY@21@Nk4SUH!dFp$rwelU4a_RCqW!8>%xDdK5NfX6kDQ@;3ZHz z`4(7g85Q`QH#;Y9LxL`{;X8dC`wSHPRRF}eHmZuSB{F2-oB)qx5k52M3YfdKGb9YfoE-{z+j|%5h!N|(}5E$ z%upp1D{a=sb^QvS$umH{$s38B34W`CXcK2n5jOlrwweiJ+wVGF?-xK`}ErBdV}#=c;|Qgp)+tl6>f?e8N1pM52`c}5UJ0c-t`@-+fc|E z`}E!7T9Zh_389LMOIdy#9HBB+o4k_}!1&!v-DeY>q;AUMlXNoH(!_m@!LfFEyzOYR z59J3(D>|-L%tWBFtEu0y>=R1*qs>_E(4E@~9(s^5{t<4%kHipD9So7hQ&u^r-NsA^ zC3o>^5fg^EoJbrcfGgg1D&pGdTt>(2VM+*(u5#}N!X3627t0T{v(HD|aiyKK3<;5A z8z&%|pD)2ynyma4nOs%~4VYTBByZJEd#uTJu-?8d6>=2zK{RngVRgb20{y-83M( zV*uGs1ELR4ws2$iE4!@LzrsLw^MLM70CYPK=yoR1?6ECSA)64{qh6pwHc7JQLxBq0 zDpoBaLE2k|a=WImm_@K;=~QZE0#1-BSvr+YnSc{SN|sKYKC{dZjcf7HTF=$Mj0696Lw-C{DVfGq8Ar zIYcZvAB#7bL(Y`hDa~bUZ(K!(c$%!F7ma`icEr#PhHg7WWKUS*q)Fd+o~J5UtbeIM zQk4gbT`(2Mc9z6!>ymsnp44vZRvMs88Q#{lG(cIJQbx~;>rENGDy~Ik^eCxQ*>#7E z&J@#EGCERBd&%fTT$e%Dx36EPt@P9SUBN_+T~U>pBCj3FGuZJc$c41}z{AY`Z4VfM z=sLrJ_gP)8EzKeq1E1nw0`jM$9FKf$+H|qav?^V#gx+NPUeE83WjUg4mLlnuXAc@% zF1((+WfKiFuMf70!?U7lq-|K*`Q-MF+U?WU^=PoIlBjxUc1pMNnGnuyd>KcQYI94g zK!GP?*ODktS}$z0NbO{hOu=T8702mpR9i=SlGbDOwa$Oru$a{N57mV!v@u@(w+7|^ zuFvwnJ;*0_{F8_HW4DnV|MRR?f3_Tp|9{qaG0Xq{ARjvWxfgHu`;7ldpPUoq0{GUU zJ+siAS?JCzbms>R-BBVSvM!Y83S*dKRRSq6Ai`+--9J=*pts{V`0KCu}DZWlcg;03ew z-calZ>$tM38~XtV-UC0OoMXXyZ8cbfr@_;wWtEfxmVF7{tk(o)9-CWQngdv3U0eWb z2)y^f2k}(20{lxqpVq(5&pDs_7q`tI=r{XauQv)xP^EH#g(`@2EBqEzpaNd<4!F_> z3O-#IRixcQ2Go9M6bOP^U#wN|uU17u-VdtvvP!D(Rk)qN4Leh!mu1ZOkcVs%Fy>mz zM#ku15{oaBP$<@5vG(TGzh5Vxf!og29VX$OC!CegCZ(R%(`gk^H!)Li4%}g~*suv< z*U~0S5+CeAgh=jmnGO*$8r9~^JTVdUmH?|jRKFbHO`kFm;ce<%V&2KKn1^z8K9vD3EzDTne+J7FLJDJYAI15BRn_d^>9qX|fra^20Y-P_ z4i0?@3&y8Zx)9sVUtqYmn>VO8NKFXP6Ch3o)wbubyiBN;5DFrQ|KWFNvAin6$+(5? z$_Idq`_oTj%|aXXR9pDBieIuwCye4GNR2ULa$48G+2=_bs>PHDxTLrr0XD^4tonAqsT2m`hYBRW1xE|5JaCXC#b2pNsaznJei9KhYZ zUk`S~ZzJ@e4mm2!HDIdQ6Q=FRP9k}5ZHf}l+M*r6GQ6G-rxC6JW9_c5i=%d1 zl*0s}Qt$u{o+-Qc2cV-4sr(*$;1qz5G$Ax$t1tf<+WUzrPfIZdXl;m zb0bW;hxa<`lL_G^I8ruqoVLY@#T#BY*hQ$o>j;D#A?Q)|UI`WSeVjN2i+fymLUAvS zR|vbN02dGys3Q?7#pMO9;gA{yXo2@p1JQEZo{Syg42rX(#0-kq(WG{HlsBg;6UZ4V zCYhoUZ7*Cm+!BGfZvNg77cm!%S`%-NCNdVt13c?TD#-ZAfr`dqsW>x>9zrH zFwS(LoXk*1{tf!&jLCCHd2%M={fCKoNtpt}M6QsP>J}c50JgnBC^q42q?wpwBg^HmAC1GDv)jDHD<~0D| zx*zcN08tgesQaEva2xRJ!*R2U%_Pyc%tcv+ejt=eplN1o99O7qM?_CWP_qJF%MGf& z)aFiq5ZMG>YV-6ayhe-{J=@m^tgYLxbLIpv;6!z5va8fPt}ZBbL>nd9C_@whGiThn zwZ{OXu^Mb(&#P72EFm(-#G6snH@R5Dxws#EnAXbMp7anTXXTw6E3ZxZ=r>q?=KQ7g zH?pih+x-1+iFD2a%USaJ)XZ2i`(~5dF`A9hy3u1}fpory1+t&sv)ol;(E&Qe7m46B zY%O`juqrMw{AmEXs@KD)#fAdTAQ}<7E{>RbniZpWh%Di zc5Z$8h7}6uD#Zka2Y_!(KME2_@YHa9T`1F;W@1vp{<&x`wMXJT+W5CuK8QE4JIH6- zzrYjmMpjVf>o@W^o}X$mC*2f_-8!c;AwPB^*s?G^eVk)ZEyu^Rd9V#+?J z{TmL;IVH+*I-MAb=eZ~%f^T#b=lLGai5RHMaR2!BZNFm~Q!PtZyu^)&|7TU`}vvM9`CRUkq ztcB>qzCP+m9sc-1MlB1=;m^!Tm*?nt>hyo`AA_MVjsY4FX4~(7s1n3 z@L~H#$WQrS)jGc_(W|nE-U-z?`cW-it-!zhkp-YG-KJg=@UB|bUxHc`13=AuX*`97 zs?gB(%~x(jy%OQ82nPiqgmZyisV_B3qEufZvNZ#2tGsXlf7CNj8{tVylvQCNUz9GM zw#r!Wq6&W_^jbEs)dbk5nP96iunIa67B8G%Tv;gDF?pH-i5iMtL=dZa5UVL&M&oL= z6~pk<7qKV$lzG~M2@_H!Hfazz443}8(A3GCJR)}rL0{AaN=~9`%b3RYsg&rUQ8?Md z_$RxERY{N39zGR{VpH#-rpL7X5h^wp{T7b<)7Gl-wA;+7pwS3VtN|y5zq-~I zy!Xf|vl!kdNqyj_r4oZJ6OXQLTshq?zA7(Brj9z-;VU#8dBEIZH-)dTd!SYWI?UGf z!0$TZaA%hjH8A`O*(6PL=*3dl;8R3D+!Q*Bx5gvIeBA}mhOBS7?~%CB0{D>;6SrQ43Bi{nK?-IOZ3HDuO+!eN2U*Nt&Ap zKUs-xu$bNW`#pcg&i^sNeZblIKkD%R7xDZb_2uPR{*Q_Kps<`*kylBr|%e)d*EIGQ(S< zyy^4F8w^J<8`+R6(xbS&Ui@$$7GtPN#=)dUN~VE{FwG9~FK`%|v`C>qzkm`xEraHx&6CMb<`d0L_1xXsp{btz+FO|B z*`rS99d%SBf>FQk4~D!4A{)U|NZ8--^17ZUcdz(JraMMk=R?vjI%e9?S1BhhnQ-=+ zt34wJN6oWOLxq68EDny&5(&)Uke2E{deqJ49X z0B*3ocU7bpxCmBdW2dkV`Y!B!KuSI{ORv@9TImHGqX5R~!Yt6Ou6X}Z+}>(O&~SjGz+Wl}_`2hED6+pz^+Cl_lpDCHCxDsThHqvc%Z6`HK*n_n=OA0xewNCL3nW%QahC!o z?PDLbuVyqbfdhiH1WLbxuirck@$B=BKKb^)tebU<6ep!S_HW(2IPs!Yl!6G;sU=)qxuX z&XtQ<1E3z}4uHRp63HYAf{-3lpjr^5s_5MnRNIlD;LDw1T_{)=cLvo{pzhwowgT0* zfnrZ!u`bMjFLYuH+`+IE_HBI~-Cv;PSHMQ;rXi>%?G?(HvR)^zJ|~@rKeCF86&=M?o=Tov8vK6 zIF%uV@pjX?l(ilo;UdEo2w!;(!4KJId;|^S6WKmKIprs(>PcelDo3UHoUKnD_vCLL z2O|2689W;87Z#2p- ze`s7Sw3-oEPw|j^IOn+m`W9c@;yrAEFJ9lw&sp;aP+~(h49FxGYflw}gykohqXk1b zJ*;RYLia88PpE(jsthPRbuLOQU?oW)NfKm`B$f;yitSfk-Ro6a*dn&V ztO*8lWu)R0owK&yZK6}Six+6u>N%Z(>$Gp_Qa-xPZc(7W`}Pe+6Tno#R^j)$daBaJ zhM_ZPu|Ulu0vX(PyDt1_E$YV;Jpn{Ro#qjMcx2M-fEG_-n#p`Q1xPlX)HzuU!pwLg zyP;(~!Om}UrWEb0a1m)LALAv6iujYW3}e~mFG@Xh`mT%21}kHw6dK$jqdR$u%_5vX zC4F9j1bTCc+<9j1z%i$Uq+K{Vi7b25fG+>err&teZ?c3$(e#sIEP|SZ1r|Yb@}zS` zR>}R!Hd3nQU3l8E4X%>`rWD5b&n;XM?jk;yPV=N0hB{%sz+=Ppp#&al+p*+I(-n8V z7BO8Fo}W`%MhNOYm(gU|p~l*5_PyRGWz9%rJ|TTw&VMsKO^&LFpsrRdz*IgiU2{w% zqlvbMD4~CZX4-m>PusAG4@r&N!BFv zHfbv|ruN74V#GGRLTNCyy+>af+ZO5QoM!2{+zjNDiE`^1wKGK{DsC_c6Vfyfw2i%^6dbp2j|^nTbD&NX4@uefS|&`DoG>>b%@K6RvovyHEa5 zrp&G1<$Fk^Z$E@5A8hn;)(N({r_g6z)}*K zx&O`F|7PxgiBRC9!?V4^^PTPH?#brXS!y`2t=RNJf>YVoEIPPYbnq-V_)i)f9F6=e z-uGeReQm>+7L8rFurOn{zkN(`go`ciG2LcjHhtnJ6jhXen)mqJd z-E3zTkf*{~ML=rHdcBFS*($_7zOyLM|Hx0S{b$?nI$rM=r_+(laQqB#Irg7tE44)a zH~2cU|2)KpIOM&kVh>^eu}sdH*#`m5%sw--&&=$TH2Z8H9c=C$Hh2+YrAs!$lZCf{GfPU>_^P`9GapDAz? zfdRAP$dNX2NG>`*X9j!r*4Lwon7unQ&h$}oEf8ni57r;8ttmi8_}Ux=vEZiS0GA5f zA?#7g7vH9uvI>e$wl&J5Z=(h7;w3$vIy^!D6XngxmH*`k98oSte{c`^pX)E`HIV;b z)K;D~>d%*#QT}hdcs`T=AL8?4%m0bV`M}b@y(IqN!t^|-9yqO6zW=d$Jn*ZH#SY%U zGSeJpn!`+U_<=MBr3D-v@0@I&9i3R!21}vh3c~o;6WOo7+3a*UyGr18fP8r0&+x{D z1#pFOIZU%YY-1n^aUM$?X>rkd5>&)xha`bOxI-Bn4A3w?Y2fv)fIZ95D@^|B@e-aI z+C?;^7_-;(BR&>>g~!5f`?QZ|NCy<}{0gMOwc%~wMS>H+moN2WqK%4zf$uCTm6ZF^ zl`2^vhFhtSu7JNweb%rf9GoJEv{<08QU$|1V`MD0Y=~Z#S3wfxQKF?!@!}m?P_TBL z&Q*9)Dta#P6_^qqd;U$bDP4U~T3@U|qX;z|*C8W#I*zIX{7If2CTdx5$*M)VrlqCq zwy2h+B@sKXyeI#NBx)0SNSN3b_mRK@QIeCOp`D%hkk@Hm;pNyJB!GMzh=02SA7zG} z9hqP_zyo*YDwl{<4hA`4Yb#Uf$aL_5OWqz9jE1&u)C0?l)hO*J|hVe433z#dT!q ztIX`TnAxMc8E$(ZN(8Q*4AUF~4HN4Y5|U|3^DS$Q%F%d;W?;rms~P*wdq6qa#^LcG z1aO=_ye3CHC19SS01bOT@0CuU)o5TA$l%4+t0#6hO!C+opU=nH9-+`e(G55r{beRs zk+ZfU?d=tE^+E5giU3KQzD4|Re6@tyJ@12#3^KGP=!o`cfZN^cgRMJ3H;4u5fIk@0 zjF9xwXNPE>M?3KJ4AA4+e^F-{*9M$z|9w__-gv?G-oV0~3aT^0Ndj0a`_F)p^T zwv>NIfb#%a`{-P`s+Rj13W7e^xy5qBf!|T`Tcng~_w}K!3cADq78a?Ia*(w5s@QpF z=z;X>0$?&!A|J>0oz9JO8wg1^wBrWi1~Y+)Uw{1-d4S({ejm9pXvmtNC=N=DWkPx=-^*)jywmMg|nMimm)C`y3Yz@b#Vu;WARkq1~-yRn+BG%A7?R!0c zP^bzPf9-TGWtGAt+w!}3HW6rb^kawv7p77>c&UL)2+kJEa3#@i(5c!%B`IT{cSq$o zaISm`yyYH%qgffJR>`E8c-+TKas&e3*xJxu>lIa=)b6NqI&^z2ud@Ry^;RsL?x+W} zx*nen6WHwa&}U~CjYQA33#q2BhF+)bT3>BSG_qP*nR+X+S#t(Fq(vDpc=O=Nhr*NJ z6_0QzABmEbR2=plVm=6@92z{bjB>=w7#*)b8&Ec)R#w5Uqmz{@t_xY_;uAz%hc03Iw-(L*an}q`>+SDnFyU%yVW z*G}hDH@UN#L@!peL1>F5`nsAN=Qvvu3^kSS znXi*vPoI`*LZ6azfpR_oMh5=`M9YAcm6qSZXC6%R&j|oAu5?QK!J+BnuzuLm{Rr4} zNaTB|PWnN6A9$c6w`e}$yEPGhJ;N2X2C!prH$=$bV*Ub(?2|(M8RhBKxc7lhZYrT& zbD+o&4)iDdwgI~q+9^G3&@%bgg?HL=I?iCtd&BX8>MvEV#3{J}PXa)2b&O^c3udxr zug7gS$uh8A)t+jf{Y28*71(k+Pz}dMH%8SpR4F#>jKx+nB8;`B##3yy6Wxn|SVhLw z_}tughC|S}*CH%+4EU4OFYNUbt!>zpjTiu4hY{afGkaap5!1M%6-o+2vlW$GQtNlc zYmV=x4ipWQ#ZC|&C2X`*t~@?3f#6<2NEBYqCB7zD8guP|d%0ohSjTtT*s&cJ zib}W$#A7IZRe>6yQ@CA!aEtjO;Sn$Z{vA)_s6(8-!WX6S6KFWWTe2y>2C8vN%t{D7 zBtxuZca%}mv`~oilC6+Ruh|`~*H*iuwZk_2D>H1)32_dzId+^WErV!ebuf+ zrspvvuLr1= z&ZXxtXBxrtUA*(_>3*;foUig@XyAQtj@NI=Z%@Ur=&-#Hc0W`$!>?sdsOb;dE}i+J z{Sh1aN-6lW#ul2EE5hV$lyTwCqHsm5wDGv-0(}^#As0tcSIRSTrTr(7k_{Fzla(Jr zRxZ}C-eT0@NgAG@UF2xi@h#f!ccvHQN%W>PFDv5>EzU=_kFtY*+wgR8#6m#iSSY52 zghWT&U%uqJO&$P|rkjK}pP>u8;5xSEinpVanqKLxkzF*q5tPUPLR-{rmcj03J_cg-^hnqs>pY6JVc@62qjt-=O|?9*U)-vG1j>1 zDFICt`wfcymMHdHB*#g@oS&D<@ISBeTe(8&q5Q#`!`DbHh^vz4rF2d5{^BgrTy?_EVC6#Y7We#eD3L%U{IAJS2sFJ4Zs&_TIh+(pzem~w0vY+bCO70bsv4HsM#~0zHdRH62hrac?$0b;S;(_ zud$_4I5ZXfPnDZX673n&EE*}Cl|YdYK#BU8*#gpH%qBnyPH6-Po@fL$XZSB{Bb@qQ zL*3&Z6=#|2q4q75SWDwZTD$3;f+bGH7`WJ#+L{ucq{6m}vdqtYXW$KQNw~r*w0;sd z=$+NhV5WkuF_P&KLKPSiJxIj z_dq35lluezlMIRqcDyqAwqZD> zEcD$e0%$5$qaB0u$-nq*HF~S*%X+a;bB5BGkszR2pbf!aU`E|OfWT)U@X`1Axkxmp zywD&CNpo{(%bVkV(QX!?zO^pcpiI9?oMuV}6S3DRb5sNrIikMrjy|laa+v(Ap!y{iGFJKauqfE>;M z-OHP>u$nji0H^I^wJe7>fEv&r$VxX8)A0aZ2G_w$y&K^}Q(M5`r$N?)S+Acbdv zKFpZgo9+q4*JXKNV4g8h9eK>a?}eOQ9x4~R1HZdRi?>95s)S&$U;I~*h6-qdyuMyT zLM0z7jYO=`dA-e=L{97s3j*_dCr21*EH*~Mu z0V)hQr;YMhA>BkW#}iya#8uz+dU#15EqG-cVfzSk;6k<(#2V+H(t1kgat134c>kKb zkSW-w<-Je=0-B0qQh3#YRk9_E(HxH(mM zCDWpf@mOZDatSF+Dc*uQy}mA+sZ{A@h5l8Gh^S#X{AJeaj4&TjuztRH;a+(?I__ON zUvztI`Wo2*VV1oQh$Bcl%Q^j2D_Fg*hp!O_t;pB-sOXj}d=gr(c$%a0vOhpZ2U|nS z?emQkLEq{1D4zw$dKVP?G`Q8rPlrmh&~0K=kk7==3lf`4>$I*_B$45j-nA){nC(pG zbV_a^qU;4?a+jXmV=wA?z4)5NIj3w1BiT!)`cV2p08rWB+%FMCumUVXW=Wv)If0-0_nfMt?0r!|dvQeHSjKv-N zVl-cy*jSgJ5_lf-r}~(atW>G%Cprb4Xl!Gy6!hc~=GrIJ;ydnVQ8{dFXfIAky}`hj z#YNy#UhCS$yeF8|E)Udh4Akx_8-RUhm{)Z>R&|@IvS4O*A?*w_)}gdwMZ1^V%x-2g zXbk6-R&!4R{;`hHSKcC(I@*p3Qf$t?={vq;zBq!$t$v9i3$)92&Zip-KsM~L(CFyHBRos|=VOS;Y=nL^ooPHB_nStL@58xW71@MU#Aisa4# zUsCnpZ~3Lb+W4r+c@fndFkd^10W!q?cb&X=Lpnx zEhVZIhz#Y4^g6F+_+4Qq7b))^Ho;a8VnRSBRI0*9MyrEt)RRf^ilQ-)SX8jKyaF`^ z9fc1QXtUQo^atHctT=hV<6b@d)+ozd=1D5cb6F)3i?%=H$M9txxJ@roH;r1o`Z}zl zP6>+XaNwtiRYR1tdJVJQSF?WwW=almbH|rFkj7l9Q(dTXfr_gYebI{SrqUK|1;CW3 z=F681KwS;>pS6&o?)ufr0v%9&>s1`bWwwOxvV`N*r1kst;Z^bn{p9NZ2mXaO3MRh$ z6+gN6zrKjy|GNC*MQx`4Kg8$9*8gLZb9VRZ%uX<~6U^)c_Pm@2N3ZtIPcyH5O%%D$ z8($6P^`qSOY9MXB=~Z$4{L5Zd$>R6DX2CXZN6mt5zx5Ss*e`xHUS3q>msY_GVHrR9 z=ZCl!Hv4i`bJ~_J@J`l?wY5i;7Jh5Nw{m!0t1#|seI)SiRlGc4;D3H(-rxG|uA-gj z=Gco$tI(~nlFr}ny|D%5|97f?lPCgt^8btH@$qy3|^&6AxsMN|S?;rmw(;+*YA(GpV7x9STr%A=UdpcO8zHRQBlk)Oxa z9b~0c^}%?rX%O*O9bdf*P5$2y7uf+vt8H^-y8CNg#vf7r21kiOk#13r^|%Z z@d{-C{2tzSe5?##u08njA*SPuqiclFOE433okQd+9rP)va^%YYM;sR`1CD8ap&m4j z9*`&h*O%k={};>knf?DkKD=e#$LAsB|GddL(*sDNIl@_&CbdG=5h0v zz5P=V#DeZa8FPEi}M$@kef*@gVM;v&8(I);Fs z1wAIeWU zNs9PM0Y6>5PYU~Wr5L*zJa(u_eF(SewD5>-;I5H_R0QCBL9zj7?3CbH)<=U z{J&gVe(_=^|3Ad%PbdG+4g+RNz)T64DFHc$0d~PY^GKk0l?PS`(oYHOhR%Cb57P95 zVkZG}b468zxp5VQRLF7lAQf_&axj;n8qD3%tS>c#`eMzh7t~`qfqkG$h7xAewzAYJ zoBoumB+!4*Yv`_a8u0yk!Tjfb#+3j4-tcnZw8lFD$o2oJC+z>v8!I#U{~jdEF@ND;FbF1hXKuYn46Z(PA?V5bf$k9LadLy@uC6M0V$VDjI zLnyC5>1POIogz$hjxg3q!bE2Yd8Y{ve4dauWlFIDu(^^R|M#42OP zA)sa!wvZQyAVgkR0Sydi#>;W?nbvvBWb*%bCjhzff8%-F{{Nye^Z$8}&!15KpPc~A zlz^EMFjE31Qv$M%0LH2F;Oam|tiaTDf_v2iik=k|R|V$AR|GPkCr%8;R|4iTRe(7y z9AJ!NgP9g!(E`%dfK^#KsRLMJ7K*C|X4FF1a7ErXQqbMMkQQ)vHDEmXALbe??~a-x z1&;x8<^SbcO#ZLepDoYi|A+YeiRAyR$vM*iW*Wdu1DI(585#i2f-#N((uzNr+@H?w zBFR5i=KDVjNbgj-7BH3`Fp(xOmM$=nHjt+eJn)e~_PmL!0(M0pTMe))0keaEhdv0n zll))s7$9H%kNN*BHyY1o{yz`$`SZ&Evtxjn5-?K&W=gW6S>;lXE8Q&xHM%us;*_S=f)u;6<(j%of2}&dnB) zUcSW3o5D+>jLXIQffv7Tyaii+;*O{vnQ z)~=+|AoMOee(U3U35~4frG^5ot}4G+udQCJaiP@O9ng=^a;WXHGuCCm{p&#X+6mAC?g;D-!C!_p--1O0Uedp-| zvuBFa2Y8Kh(z@7;j7<& zhgZMHUQuS&ZC|)f>smBEHw3+f9T|FrTR66Wbm)#cLl0$+Ky2SYe_<=e8v;pdytoxV zuYj~CUf)>eU;CzsB~{KW7z~^n3&HQ8!T$bxS$hsm2{qf2lXDz6IBO|Yt^$Gg6Cx1G zCqNS$s}X>HU)Ei~Q6vCZUz@7Yte1bn?7k=KrTSCU&&x}tdX>JcB1KQc)e5qb_Xy>K zcnTv@`ygJcUR|w3{W{SQk1e7F*=1f#SU_JyFmzedLrzSd9#8A5kydY=BCw(ja@Xau zt!5I6@Cl-`I7O*xF7CbFr|E7F<1SYD#%KXAR>ZABzgbZ;Hr>l7gud_fK0QcNR?eh0 zfY1Hgl49^Qc>|B_PkU{*H}pU!>3g6Fn4~Gbk4?tTWO{KlE7jL=pckU~8~xMoF9637 zOR(Q7t|I@!QjLm16J---BMuYSgsNHy91#eWw-zXYz$8Emb(4Hbtv>EZY^gah4q_B; zXoa7qGO+udHcLy}FhfNN)HYXagKXY=G8A~9DeRr$rctem-$ubOdf@i_(bY98$Tu#T z3xXkP$pFh849mI!z1v3v0_bPd4s{&W%qzZ~_dzt4>B#!IigmwYN-*wJjWV4rG zp2ik~0mn80=>@-iYAzN{T#e>Mh%L;iCH}Gc4HxhfR+h1N4S}KP^4A0rSeA7VePt zAP7Z9%hnOE*w8Dc$-y?cTC0b16#w3r&Nn@I&?vSY6Y1`5Qh?Fh^#`KQ9TFI$3rZ5~ zZGkIF7c1+nm364mx*qslN8oYYM-TSM7VmFC2*fx${)EyMbeqhv2$`Td{l4oA&^&s9 z#;>j$1dCC%GrxEHBoK$rz!4p{H5?5Xwd#c3WrV6ac-h_wi$Kj46gFyri)DOw z+BeRi9W4DnzQ6QOw+CAO?b0u9@5&um3cm(^Qz6s#qYHQFJP96&z{MFv2Q6ym!mOjk zPX$LkKJ-Gvz^L<4&=MUuxL7W6nMKnY7EsF{^!ri69rpvioZPgdan{>2c0 zaJx{f?_4=((G3qS2qIVYd*adSM|iEhIxp}+K#U~Kj@!i(XFNB$^gA8@CU|+*h48Ic zLGB^|B+`}vunb^1u117d-$h{Qz25&qR0tFqf`2vQl$bg_@(4ooSPGCLHj-$pQ%*lT zaieL&2YQlq$iv21+x^-TltgMvJfRPq&(=~+kBT+Dh0JolbGhI3a+VrV)y>PHJU~Sk zkWa88dM&hHI=uk)3_6~K>U4gHg}Tmg;C)t;AOe%gCYj|u8G}t4H58Ymn#{(Y+!LGB zpbHzDe3HT^@w0C0G(aMSD$(!U&PO#Z{m0>jm-k2mXnsMqZ`mzaOVmkWo__s8Cm7q> z`GvUS!9`q(Z~LPQ;NDg-UcS4F4clUy2C-F|U$=``rJ-&Zyg)rWJcGGGI%Bk^zH9I5cBuKjYTyiEY_z!`{BD1v_hsOwWW}NLgV; zFJD=qg=o`Lyk1;Q0fEI-Pc%&jO2g~q>EXZyX}%|Vz}Ool1fW!{sUVMf&9sq!>jwGQ z7f=)noh&O(IB6fu(`ox46gdblge<_lFJvR4PX${%n7J!y&6C@EBrP^{SJGx6tCMzG z=wcO2E!N#_ci_R`7>@WLLRV~EKes-8quZjG1Zl^PG5ge;&^i^?vdUc<+bR>> z5WM)2F1~Clw~=$Q<((^+5nD+SD`|R+@C-9+SVaR>KjCJiIbR{=OGxYO${RxtmG0kn z9a`YP)WmFPzsb-Z?cC?!y8di%zv1BS9HkF#fn6wfd~Ffsnc*rGEw4aG-_)3t=zv4N zY`TyOnBn;AL%G6hM)9k2>ln*T^rPKCA9Qxu_o;W2P;M^QI|iF@B12o-w^%PxQ*9R z=y%mpe{tXj-oM>a*q;!I-{Y^^WK!Ulijnq!k@nWY%Bwv#EdVF`0N8WbT6IG>o&L20 z&*JBtb}(%P=642Djo|bxK;$3d9_w^s*F@?!`S7IK3Xk-}Pol9{!vn;{8X23U;kBh5 z`Z>Lxe=E9fIPSoM*CmTEln{HUX)3GAV^V4FQ{z#`OIbf@*7|W1*H48F-BWlHm9k|QKpYRfo^`TQ2UdJ28kB#(r+Y&A3jxUzf-d~PyC5mttm)&MotgXR- z(sS+r!6P!~#oJ2O7K*t?{WHl_qR4l*l*TUz0^NZ2t>Zr@kN?fT_e*_p^8dVemWcmd zdGTTv|N9^xT2J>9i}y|Ae`mRVr1m)r|DA>Z&cc7?rN2L)>}@yyYipZQ_Uvu$H!*F` zS?rqMtU^;u-IK<+qBK2LAz71I$IH&c@;FMT8JgF6H}|tIpV6IK_%>}L`m}SdEArn)f6yeo>A+VUOSyjVTAjV%#WIO ztTBCB@SD4!u0tFDWHJrfWQVd@(58#q_=lP~(8fG+)JOg<=Z7(Trg|c1?eSC(rtB?D z^K@jK%xGerObkBrSUXB-6~UR4y`R8Cv6%;^jV>0w3!N~5{?@WlM7hXZOoQIB*>IeWtC_H^6Z-<83J zjd8Q9=c7-q{pS=(-#)!|y56ABZI8)MUjFAsqY=0N)K;F&>^~3jq2+ro%Gg8Lf2@;p zX8J)uGt?PH4T1?BWH1@x4p%1cY(I~J2)&YKUWbjeDc(knMERC48~^;OXbQ!Q8P zvZ`fh!vk@AA0w^yDBfXuWsod8opNn)WuYWdTdb9}-&OkEFB7dau#*8&@m%V}{c9wW z&m{71_sN(4i5-l0;d|_pC;!(QFXHn5^OeR-{(p$i4=(?wOwO4KFjE0$D!@zyNKpY; zqR+bGrBLB}RRpY@Ev^g1-%nX7kaNI_N~&4ExJiF}bAEAC{`hA6^xId2b|1SKH`*KAf^hO!kE2wI+v$?uLmTL9`XYe@A+Ar`;pAAOZ@1%(;cmb-`nVTLBHZv zDZa0XZcEBO+a0af!bnHwYQ5Qxyf#!!6h2?CmH+sIztzh|KrjcS`v94s_simoh=BK3 zHPI2#>mW7FzDKb~rLuBeiU@&@OXbj>9IH8ShkCeZcZ3)!TJS9=f5q-%5-5II>W-cs zw#)yk)L1x-lG&^9#Q;`W>$*fmqedx(>975l^K(HTwt?S%?U$h83gERmKX);3osX+} z$+uC-x7j7%MJ3;5mpqF~o@JN(wHx8t&Bik-`8K=cyQt*5?2>0u$+Ogw(&z9c-h9Dx ztd=OFMnon}3I`gW^K8n>sEm%euq}1H0GO@Zj)KfOcV3{s_A{60z0LdNl*}aDUAy`A zn|AZ-4<-s6*<9j4PMgki?#q_u=(`ZbK&yVsW5;>ajQYjK@d!WQI~3;&DiTAiER>7_{7= zET9WnxR(egg^YoP)|3WMj`tTDbQk0U3ZQ5is#seUGN9%(>zYu0WDgnNVs`t|xwf>_ESU`g`Lw z;`FnFwhh3j6l*s=$F2UU>5O>{Gk_osu!GS6r$-c_86$1VFoI{hndDH%!jLR0Hga&i-(sSEQX24B+FCyrpCe`#W zF9UaodOs(nZc}z6)wM;k0L%e109qvZ4seX1bl6^KAXt?$h^z?R(b{1f{tXiv;}xR_ zo3wAF7uJ2>9eogjbaegM(`O&nNi(O%>Q+%;_;422rQ-HrK2>e|P_;Z8iDZtMP0zZoZ zfAK-%3nkm~UQcHU7JidprjngfEY;Vq$gLxjW~<`|?(7Ws2l^B||5cuKt=0r>WS{>w zmX}xX{P#s|1!$`qH?+`X=Wx=A^zzOdZ5ylq8z0{km_IJ*5UD7 zvvt8wUaKd{LWz??V#!v#$__BGnDH(eY^^GmlK=}=CA$po%D_Rea0m=@p~isWqr$+T z@cf*Xn-$Q6@zE#%zchs96RlM;D1A|8;mr>2vGO)Wm7vuyKvT&*ns+oS$gP$p{nv~+ zM&=>wMAJd>fX63Wlq_F+^BQ}JZ?)@I1w9q(0-rCGs(w-OPBTz(6z@I7@uAaA6YN$V0uy+CFB zq?4Xkl$G`eW8^-HrV0F7)D7P2UeEZ=)WO6@DVN4bd$9Alf7`^1oABPI$}>t8Uf~+K z^C_!SZRhGi`xRX-Glh<&FBHo4HBraNXc+4Z+Bb2&Mh61ev7Mu|dw!(xP%C@1lW`}| z*NJ<$J>lq0-bw9pp!{B%*I-RJl9+vLoKD*3Conqe3?1I~U6h>+NH1SvJh3xq;#-QK z#?1RqwXFOo$8&7;g4fen*JRxra~Q|#ArsJ8QFlOfulMQxP-Uvp?H>`r%hRARr?0zb zA|q{*bk3DF!#z#rgDhBQ5?WB!In_={N>%tyJO-g&rq+kGOnZM@mo3D3_%@!jPCd5~ zcQf5ZqKQ-u-<3zgIPgy29+_WFGBEUc@iSLL*zk5XUY^{zT(k;$ujym_*@ zzrVBJ+&wyJ?jIc;M+b*N5WtAQziV=&s~bK#rUa!jK2=AFQhkCI z@iwR7qB;?+KqdO`RfXqR1>|Mwt$yk0#AJ`ifJfJO%QR>5Z!7V}Sn0MBfS6H+{=tW_ z9pc=wL9jS%W51+PVV}<{Zz_ss-I<~wirqg=*&8MDLUV(T2=2BKTIdU>y%>9-v{dj>n_}GC|gD z3)MjwH2Di~=1%^djsfaHPJ1v@wsd|^wpWf6v+(i;ZH^8h&NcM%qu^W%=+4& z5}EIuoLZ7wiGK<$C?a!L-;pgL&^w^_C88)vw;w_J2aOuvVceYfNzjdVt6>SKjV}e1 zg0D}p!osuYHB0eAy$00*OO#a+ofg{wMxzHpWx+RY@|u^v>31LzwKX=G2&J%B@fB2D zla$lb)cm03r0SPcJXpw1Y9DId+0>6zAog>&Y@IdkFs!Cg30a{Xkb1Wgnvp7m1? zGd8>`ZTUKd47Ec6&GfnC+q+$y8Zf*LsZc($R+wSZ0%X)a@CPuGM+0CStLEDj$5F%Y zv}hI0!_QhJihN%GyL3N^#So_?LpA6XzamqLq^ulPtk-8Ui3wnm%;{NC3A5Y2ixb4s zmJ)`w^r?6HgivB$bj}4{o!%OX3ny@=+Ef&r9(fY-eRRg>i}DF0#XzinwO7|ui0WXX z>TLG30gwY_$G~hqy|x{Jc=M}Yr!-#1kR@*WToojDd))-rQ_7m-c`n|4@oc?w zaAZ%wzZ-6xZ0wDlY;1F5b7R})#MZ{y*tTtZW81bSnVawXdoTXDRb730W=@^1nmVVd z`}C)uN32n%ZN%k796McEmE&pmzfzzmD6{2P|eKJLY0>vxMUR5)z8+C9rO^Csv^&>g)Q z#wKL^)@us!H?!8WAt_eHP}z^=qaaFc3jtRIVHgEQs`NY(GjZ;C)P;~+y(+ilV9o$W zpeXTm@wM!Fj=x8vYxpP>2NL4tYhMYbrAb8ss&1~E<1R`~$(oB~j?^?cx>)uF6gRk8 zJ^H^>1bLWU)7IWTleNEUmnp`oRFJbGz*Cg5O$DYzx*a?7SPqYQ{@jdDuq&T$N*XS@ zipT4{n`^c#nxp(}z9OrW?S@Wo8(8bq@6Z0ewX9;NN$P2 z=5^Qon&M!xg)Nc0E_2~~d6{51@NC_Xu9jQ zNyag&^*XEdT{-dwa<&kG{VY(GjY0xAW1a>wZ4>AK?F$P4 z5*h^mroD~;GC)6|V?EAYvMRoa|LE4!3;+X}Hp+#3X;0k8P%(jSce_v0*qGp+S=Juh@^ju)RyR75mw(rMaVgY(_;f7hnxv zR=<@`Q+LNLuTC6D%PHK3`mAL3;v3yCuUqDye3RBgP8s^@bTk@7kZ z8n{nos$!Om&>uKY=mjQA5}PC5_NoqedZZOCFFyzAEEw9`|4HRu`X24;hBOpEw^-Y!o>PfOY18m+{vkYgs5B=Z!HQs*xw$$b+ZLPf_@lSLR_PBxXlwh_ z@E@=$^zqlWEcAz-V}>H;NrCur(OczaIO@wOI}{H(&ONtr-jltT*4} zieg1QHfwdZlKDQJSsyY2^>Dwk!gpYW+^6O3w(H{#oy5+jI9`D!^ob#bAzIFuDMyk=%bc9-moZ!k@D{rChG^Vo5*rPZ+ z=3%FrOrvOk&oHo?7WiP}kSXX_Jk}GVjYJ}G7zhG@I6YPT1Eh-nX@2Y2Idm<_fPG>3 zb)QS6M+v*AzzC9~`vm97j5hAQGVX^#g0W75Ib`bdnyQ}JXYp4IUgQv4l69q8jv=d> z5bM71MP&D;TCNnmbrf|-hB}fiE`5}KNkXsMs4Fi%z5f$5^lwYh2QD$c6@W+iC>1B_~$~@%r+?ge2 z&O0QMhG@Fv`*m;kMDYw5n{2^cjbC}K62zYy{8G;!c^&UfiDpZp5|9Kw4~-{ zN0U&*mXB(tOND|F!I^YgaNKhgwj!tjdz?5yY{khV&38B&(XOYJ67{)}Kk_tEv=D6% z^@?igb)n`PaHh5IQS65QolPB6oNffS^<`ekyEG4GFvZlLWQeclyRgfeneskPveZFk zts0|V{;E+<=o?$ku%vlaP82IflH(ICWES~i)#uzfYf_;mW+MKsO;{dHHrnpt@%J5V za~fLv>tW(N#w0BGTbt~=UCpM}RLF>$M&T{`$uvm`j@L26r7EWLM2Mm8iuC}p+%sxJwD6_d6jnz*$#MD^?K{lPW=l8Si1uBi z^u*i>ywg`45NwXFs(Q&4G&B2s@0lKc}>-1+4EBFqVTI)c;f z>S5y!sl}r_Hg1zG1P)cR?uYJNMBeLEv)nJ8{SbEt{s}d3Vs*}ojeDux3tZ4=L^W2z z^ugkchRpX_95lZi_c=N{ySraIpb(CYHLW3VNu=}5J340SqUe-#c`(F8zQ2PXs`prHGz{^`=BYy&ny$i_P7$mcDky{K44Fu#hmV=#t zmwvEtQT1o*xdGXh}+nXtvr3Uz< zxemm4XXiRSc&5BUcBSg5>&zu-3F$5r;*cdt6@;=xPDokF^Y>gA$=yaeVJAkYI&@k` zOiB1Dkw6N2$n;?89$D!EQqGXG=U}wRM%^VW4Mmebjxv%go%Bi9aJrRhDC(r4umn{G z`A5mN)B<{T=H>a+W-^$tCTma3Gs}#58XB!)Z$t|4fAbP2ykyubRF{F$9 z=kf95VQ4hSuCkKPJN9<_QS0+K?Db|>V=i~NPGFl9)+YtR3+oSm)M&Q2$+|#dvA^Ex z&jjed(wGWRS=W{7M10x8hrfxE*N6hZV!(tq4)>aM9gZJU(rR{y=}jdu&%`q@(n|4N5`K_ z(O+tmJF6mqP-F-uB$EiX2wB;lQRUP7WtT4T2aR^JkHOQAquvc3fyK9rGQM%P+eAWd zJfY>muy@?=fmX?MYQy$V02UR$ZJPWXVT`GZuHUsjUcc=ZsG`67_~FkyAz}20c3&r7 z!m;?#k)qEsO7WOeef&&$%#glAO|3slLrG3v@F|b3I+`CvhN?+DSVuK2)>P2!%;ocA-k`GA-+3xXBh?qd%4ai-c(7EW2P?)i2RwWT zZ=L(LqaPqT*$lvLq5OM>NoRFcz-RfU5*<{1*saionLS1Bue))lzokjbGQkIQ6|#it z@<1pgjvv*=bVPjT^_yQBz}{YlZ=K{ehm-#f1X z%*CPHv!g0TmI2_dPNp&;WVepzO5&1VxQ$1TDDGO#1Sntm6nBkiznkzlC*!w%bIFx; z$xiZTnUjie6c@&4H-QSdf}Fdz!bDHc@5)0cjQ%-B90mTv2C|N~1)@==-BeU>tS?`q z7U$Rfv}JP%Ln>iAXC20hitVmK{0ko4+NL-?m$~`iu=|)&_OedPbpp=&IhQd5Fy~JCmTpU-D6vI3?8eJKodl7qv zD}Aag@aj>}_2XH)i0>Cp-NMlc)so!=?`9#mMqZw-aMCQVtC}bKV^yC0Z)wYN+cRzS z;Z9~jWdRsEFNf1^KczK#UoVL_7p>k6&V`!A$(Da`0|%B8h=P7DWJGW5UEk&&F~qb0 ziUFRnTP`ewLVF*fD+cbu4}Qv})brWVP8p_ppa22r8Bo!jbHc@S+v|1?r@G$kD+t~P z&@B79eLnFOb^W56lTbP`j@Y8O1Bv9O;p9Q)aBJpQV65)`>AXT-flnJ^7x(l#c$1Q* z)Tz%LHD<}Hi0!I_XCE|23}NxK%*E0`iwVS6w! zgd4S*RdL?+-`k4h-Is!P_hzo**Jx00#%p}jByAie^nBY@HZ8n_6O*{3M@=TM9;Lr% z0QJ~TExG@EiK(p2O!NBbg9I#sdp_0}!Qtp|1P+WR&%9|b_4U48-H2L@r8LQawFVBs z(IpcpKuZtz8m-FKdLk`fy(q?qSKTkRWe!FlIOCrpHf0sA!u%SX0@>|5wtNzT+{ZQ%~ZEuDEfMU ziCRdFpX~aXz1;!EPgj1!7n_`f4en3O-kf7;S#n%v=AOnJxA9qR7k5;2}Fm;Yx5Ad2Dp)n%dVK!^>^HcmTjlWt+lVu zCjZ-y{r~pULhwKRU=uZnc&`SNxw+Ysxp6e1qC2*3JFqsOtfrfe3OM>@qESf3`+x0? zYb97q$zAAlu`a}iHc$m37ZuyTUv!-A&IP_}`Kll{swhtYGzT8pT%F!0uGJZ5w5wQ$ z_+(OE%_3hvkk)flm=#dXeyB3{^f13W(;m@$QQ>Kio~b5*xS01CT1Uc|_ui~UKpITv z4jPKQH0-qrO0|h=+riHyE6>P?*MP-4Z~O+qqTks@Mt+J-z)#spfOHez&@f-#Z$Io3 z;!B8Hqp;0+zVmFwDh?yg*SB0=+O@N~YL1f9mA17;*8t*Xs}SmtwGMjJyYus&CsQ~5 z-zas9_b<1Qmi@SN?(~mb&0D$s?Y2XuHvMf;--w7m@+lIF@G7F1wx2Lr6U4qI5bn8m z3mD4~sy_>-1c;N-BW)~}6ydGK%;oKrABe{0pT0j3(&Hk&W5zLGI$MQ!4FV$jbJg@tHBJPjP2th&O zEQB%(l(C%oQ{YdjV|Vxb`vtaA(u-H#0%wsTmKEnr`%KpXWVyj?qWIa6q`WYb85(pR z4cxyY^tm2@iXFC`%YR#aOM18wg#2qh_vg}GN?3;xm{j~W27C&J%%|4|klKz0paTDpU=Om|5ZMxuWjsQG6h z^ZhjdInxoO9VP=h6)+wKDa3yWa)JQ1rmt<$pfFI6$8G9wg$`(?){wpS+4C#B|8Tmo47 zV}}9L<9OiZehLg-&m*&zf@v7FA|st^ch$YpKOb(cUrD_9s#w*^bw>*=iP5NxX{_5@ z1V{Pt&(Uab!|d|p`y0uCw_sy;s#g3}WR@;i# zGRE#See{EpnwyPI=|6+JGU#!KA7rcD^|aa|n$Wc3$ee1UpV(Q-{jAUBKh62xi5&jI zDf^qPgRJNnQ_g8Y$I?}G(XWCU*_>uiJz+2L3(kyq&j(LE5e)u({3Yp7E@o>P=}%_G zT|^6w*a`;`=D^zlpx~!s98}Kh3?qZ(;esd8OPDmqA;T` zZa?m3e%)B$;^>|eQdYu=AiiV}=x}CX18+l81AJnLI|;WLut$#Fy0Fb5I`2_v+}_m; zLFreji!*;w75-_fuBX;)v`n%lV6SIuo1JZ#ETc8z@5&-jiH=`MI$z!fDAzk{LttPn zmpmH(KQ6m{E+P=?7u)Ekq*iB?mg;mKCp?+d=Ct>{T?h<|uG)ehPw*^Yz8!xX; z*Cg(ev0AFxJf63fAA7tJS-tGj`LDha`BVrZJocDB;++8)@mN7|Ft0vwZ`;9ZIhmQP zkM_l%Qwu%mZ>fqPk=rMec{ahBle*3xp7$ z?q?+#*u%2^VvGjTNAi|nF5rchQ-$b=j9Gv zLc5`~8`O%e>L_!i#|26#bEAwKrC}CgAw!;Azv7_~fYxf>8k}2;G45;u@p#Q%!yTGk z-@2?d&vCDf9<3DMQhAmuGCa<%&s@aJj9&&v2hn=F5YY|sQ7%U83?OI_R5t|EI;J!`$3ZunQuIoJ!Q!|bEX z>*49_d3&E;1b}g0xLm8bdVI;))Nw=9=+kS1Pk{RkAZCNE_+D3D#!)etlIwP?*>~_Q z@9FI8;FjlR*I+=_=2_~?r_R)tT&l~jKD)xTZ8o1*4jaD&P|M9;vRBLfv!=f|x8PTf zT>PZqDto@nTcyuk5Ji4el*HODX;fwKc0w9a5GY|^5fM>@x!|oUCre+IyY+5mWF)T2 zHBtBlC~Mpb3syLHm3&Fs5I6&m(4NHR5ENQiyLVT%(%EQfMXgQRERS zDZdg1Ehh@8RV1%CEGH-C39_)`+wQtAjsK0n1CTqI^C-BB%`x0*)H zS%gN+9nai(?|F>BMY(4FMtzkI1EqTAg$*Gg=^6p$Pt33?$#9LC^WFRQ+fGyg`;(b- zlFWASk{|tL_zq|P(+qHUu#-$PEl}M2U22DArHQ~EEi$omWM$_3|JK7B>JRpIcD7kW zI0ZK@n>?^*jy8x3`zzl^M{ZFS6|(a_616S)A$jM~+(<(}lIJPiv<@wvcHBm6QMP!TE#$Kvd!k^v#@B__AgHUaaJ|CJz{z9q&3m}Wl`GT3+1(GWrS&2k!-isIR-zapI8Jt-=0O_k3RiV^_g z_MzphM%GjEXvQwhKN6jiWP0Rvz2_03Xx5$C7Swf<=ff<3=Y3fXgpR1h&HmdxlNP2f zuGNZHlQ`_Ucz!){JC~P$(dwJJcjpl&p0fGAD)HYkj6CFjZ@pdlLE~{<>Q=jNR%aCwUieH(%KAm3mF7Y= zg`)oz6CoY>u+~gu$2Q>w$gJXhJ>K&+p-X16id_6ZCM@lr(#zxr9A&c{sgke5z@x7UIC(6Mb&I8$S{+i_jv2ekf ziHPrjt$1{jCj!d5HcXP)@tG=|03JU(am%*iP_EDX>i?>|(6s)+c*eRRi_K=S%!mDER8Gh>ph?w!1)U`#rm`9Xvo(LUzK>FSGlIf{fyP;b6o(Lk`0v=5gU(@)-JG9cJU z+6Q0JX~zT3EBIGzzRYU<*_P!MJoAoiT{e>a?A-H zz4*dxH_Qc#X4Dt}R>L>NmO_3$$(BNWZ_W~PJC6H)B)dP(OQNEs;v^z1hNJL{OBjE3 zb(aCI<@!ejMU3@)!=jXbqTWyO=JvslFD~vqd^0#PIDu+WRH=jHf1vpjXqO)K5S61) z?!FI&D}$XTZn~<@smI!;?r3KN>Bb-PY&=sP2UkuxnaPtYsfg0s__i(P_b>R0?i|hx zjdFHK_xCagDy_}N60bJn3KF}5_ZuAi$|d5*AruV6I+~KcjC(`;Oh;pJkQxY2fXMZK zD662;x|&lrz|}g^2g>H-g)2Z|Z77NHU{PpmhM<*d_aTgG;32IKqlU@MVcUZgIE#@s zvkSi;*$lW+W3I+|%aMu!aj83k*2dh$!aX~=ueG;_o)`q6uR*-L@v0d+Z_gX=+d1VEI);-vq%o@k-q%t$2{(~LN(^H2J5vbPWXzewGF+hA$7O9r7vUDz?WSA4qWg(! z?rg)*nhIgG9{CF)yg?f@Ef52Y5wXJz_ZOBHydP(;e(yakF`B^hPnp@Q`c};vy&Lpn z=k1Qx->*9T*<`wz7Z3Z57C^h)D!*4N)|!Utc^XqU zc^zTro<_ROdK#%@mz}<@1d>H_K!F7hjD^vcVeTh^6nD>tNdSHAHm@GB|Ie1#tmFM- z8H6{%`f)8RKu@?ia;$v>Qw>2!S6|E0 zKYV-%LqVpgw1qMdcc)$fRWY&k1>RH3Vj?jG&N((Fodq8v-z#v^Q^?KhNG>% zTfh5@%iC#7I6BEJv1GOO6i%F21KI@gZ?2-Z{ovxI!3x5KZ|$@)j11sm!K6W`vBtzX z!?iz8CEkxb-Gf8jhUQu_`(}~&3_+4W2Ceb{2DdhX>TcN3&!ew#Iw)}%^s=P95cf_)4Vn<`o63D3x8IH}gE|-m zG3xbpcyPx{#sSyGYzv3Zu^e2z1^k~>rSM{?!h>oziX1x+Ql=atV zp>e$ibnyXU&9DG1Zk(rO>p^D$+gF_BJ5LR$Ej31u?VaIoFBOAvRs`^mWAoXIg0;Tr zWOdnsbRRXsUFvz9rs;$bj9-y8&22CeCl}8M3F#FWf%i%XME@2NB7EhFy?%U&GJN?N z4wTsA)Rc!DixR}Z$4AqZE=QZ(^{RFC5!--pE_(3y;2+$Zem6gOo*quW_Ns=IJ1rcX z%nhjBCSz0tP0a&16x{p-itI`ucxD$#9mVdIB!V~c@42Rdq7V+!E?Nt!Vat*EjCW34n^01UYpL&Nl$l%x}DrKqyRP-9~DWJFJ3c zYC8B{;w0)tbJVeRJt6_1`y~%?+)|%fXGP@`Nq=Rt|CuJk6ky2wF5Rp5k-CqmR9lK4 zKr66>h(4R73PMHo_`1)Qjm0_uxNF}Bq`l;Sz1H4LdTU!8$#IIYap2g$C%c~p-MZ!X zS!~=Jbn;6y+6JMb*!=)VZu!2GcrMpu7NLxFM1&Ul&4}Jz;mkE7N%1jx0ZF*&xX8;4 zf#?*VD5P<%9@2aC*%1K_bMzRw%j}V(3!J)^7&(^H<5pwQwx8XXPOnYce@>B;+cBl9;_K^V z*~p)}>j;R~mSPU4MV+3ILJQ*}O{m#2xas~$9NFamVQ#@>9-9r%j5*A8F~h~LOrDG> z+?tAN_GWGuVO>2v${?h6`|^Y{D<{R!zK)ERJ>}4`fqu#j4J%eG;xVc#UkbjEyw6B~ z#fS@(Kr&TI-cOZJ*mW$)OU?qI-KU()+Y(W*#sUSWs~UmTr=BbQ(8M;4q3mA57HhiJHc=sdn9l|_KYUrE!+tN34U`PC! zfvdzv!q?^dnvkGe0=Rj!;(#*Rd)K=r%R#sw@zI0|)YCpuj1=mHEQG?iH&lWRZ(Q=d zJheLD?aiH3;-Nr!9tc>6ArX~JeH=)+(nPcKFz2#P63Bm#nRFh2pHle&zMpKxanb)D z&rWvZTo(?dkIFj`#;(LgruX;DvwuQPjnbnufphfCh9+T)oHIt=6ej`x?^ZdFZ;zbn zwu6#xwn$|ShLak`z5mYW*yhjgGwTfNt`Wm}Lzbh9U2vMA{S%KMiXtnf8SZ}Ou9cs> z3!yRk^G9lpQD9a1!}Ea!0=*p_QGHcQrGLOLNJ79kL*E{JwByMe@d*#jYaUoN?nCQh zKh<&Auyn;?{)a4e;E~yYX3Rtw+z`J`Q!+nMGVA*x$+$JEl*a{jkK_WYo01#3ekWP( z>GBpntteQg5yr57M?(2)LPVcwWESj;tmCjN)~%@ojjAvCFffibz;hUnjPk4`#zXk{ z{%Ih&0Oh4OK45xMvo1~>RQgZ&^XJL{k_L`^FhT<3w-yhG?IS~HMn*$lXi}V}YNgL$ z&M#sY(+^-i`=(r(189#RXr%6N{^mIpKTQe@%Nv0Q=3aK#A%yieUCSYWr|X-rG@?W?LY25h)ZUMpN(`=j#k4CR$XgeuPjUI5&jT-)rH(~hpu*$ z-Ca(urc!zOfR3=J9VQ}_LpcMC&6sB1VJT*anFiAW?6w2u1f^5Nj1koEfOheBA@}r)j zRxOU_7G@Ag_K7hOlEE*d`mi4O3Ul;l2}IF1F~|(RJeKTl$T2%I)0`}oSTrR zcwKYx>^w-yt?F@t(4f!FA+mtfjL!PRaPOymKPa54_7`;bUv+8bpKbXAfGWCszd8|Z zsvaL{RxMKZSvyDCq)Sa0Z}b5uidY<~Rh?k*g%kzfdTD~4ph5GDc(GnQg+S##z1so! zH%c1A{GX5m@SEzEyk^sNdxRi9Wf{D0(>Nx1jgt>g>7w5 zzn8D5+=UXP4bwW9(>kCMS;#O#>j;DDwLXd+6ixwUv7gq9QohCo^06v1oj%|cUN)mE)YH4LV+b*qQm>U z`yW%FK)|%Z+~SqZ{mt=5*3r>YI<7#rHj`U*^j^JVIZ&K4O>RyeqCP(6_EpdR%~nex z;qe}m7bfrj>9vXl+_Ibrfz z<r)$qJr&IJa_6g;EuWePDfluFJvyE9L(*D;hGIjA7kI? zbL!JB!V2(y-uVr(_F4xQFl(@z^KP$|;OKe6atcvByfnD7W>@Ya9<~HHp8BP?& zpzMO+epsBbxXe&A6Cqbw91p)F7(Ifr-JF{FST8)A+D;A`YfgH7oY>4uPU`7qLl~EnAQ;OBqoC2x!KB87&baQf<&;E2&YQ&eom=ws8d`}W2K&e>rQ8)xtaz< zC1WqxXv&4){|PQF+2N!;ZvM8=Qc@;|6#nwET?d+LLPF~=rIy4*Ro>L$7|sKrkTasp znyV5Mw)`uHY7K0$B z+~mGE;V)AaT{v<9JC_8k_8^%v~W3>)v=DZJKwA8Pv7j5I9zamS;10hxqq`bS;2<7<(vk9%*LGc8b8YoZzAKE=*1rM zrej8BFU|&Zte1lokVnS969CF9kw*4n3i>P2dCdQe1sb8ZqDcKfPl6K%os1LP%#kpA zYN3w`$^U;YaB~hZ<_3~!n{dDn{QOCpu|JgR`*~f1oQ-(nbSe@0(gT^AI;st}jO`Yf z5r7pVP*+$B4Q1;aq$YZVzyqA`ftEr_H<1?t^D+?8nS8vo0xp7UPLSd{Mc*jo-%y;1 zPG+xLnpGUzWpJL}UrP#|tb`%P#jP+ixQ*&|uzpt2Hn(R#{uLS<^ufUPHWUl67e)h4 zGGqNo@-$^i!$+R$Sm}gZtc*04bV~~Ow4v#-(Il(QD0=17rv{t*^m?AGFf);Pk0nBe zQJC@ECY}Hqx@zq4bb)!KLn<{;6b{s9y9)l$o#4rve!;DcX$okUL;!jO0Aq4k;P&3= z%8zH`ooeDwm>}Z^9wZbCQs~NV1zit~-=~9Sf_(gBKYCscCOKgbDxO%2e2SB1lJ1Cv zXtq5YoLBhPw(PR)x&#C*?XEAct}-tOvM+7B*R~_djy+B9FxtJThb(2vo|@7gRqjn3lPSm35Q3!EnM$&A5ED1HohBDZi717nT#+&PsY;q# z!$m8AwwAJn0v3RN^QD#A0V2O`LgP!Tsh7x$GhU@(JCAo7*s40XL8?~Nd#Uaj?J)y~ zAnTL-9LpjDJR{0;eGUC$uWe6Snx&|g9=COgn)S{fSD$2>w&Sa#$F&X$ouu?==N4_` zEIgIe2b3w)UmM}85KdPTDs!vR>eLBVf9GGVM6Ivo?cJ)^{;=A-pa;V|i(>b)asjMd zF3X^K+tGiv=XDJ~3k}GaAe}stZmY5#Zs_gEX$sM9W(e;fhYngitfK(lq~i5MI}?M4 zz$*6?U;ni^YV5u7z{+ow`hX~(w&)o%mdz04QVRY_QV9VcW1oH-G5URIthYhyCc*>-kOrq*i_{}l7RFG4-TmQC*)q^k{o4EmOJ93( z_Ko@6D%ms|Ym|0zZ7;IEt@jf_IclEjmJC^AGbKQU{6#oa(H#b1+Rg*|}U} z_5a|A^pUe%;%PIgmP&a^>FCi3T(_wx1AAUsa!5 zZ*J~Rch|XSOmqIk%5b5c!*58iG|y_+q;&|>UZeY|XE^KcApeJ~&F1#qEBUh)KWUge zs{%I%7`K<`D3CTnwGFKGQLc~mHcECpa>TZKO<`e_4bt7hugOF!ILQ7awn2l#Y15gi zJQMs^lKb)epWM4NldU-`p1GeU37c!=^WElksG;@a8Fvk2{`CC?V|_9!mi_lQ7Dl zVd~$nRdcdJ5u2A!!_R_^=mSxBVt7LWgIL~a6pl0IwUN&l-rgwY-A60=I?-o%nIp{i z8q}={K58%qddpkY#HP!mlaU8tX&i2`Log%3%FsYtuIDu2kE}7(?l@$aZb8H!Rgql* zD4-Ess51=JHn%r$dwR?@m~faaF7QwfLlDFJ2rvGU?a z9K0G-cFP}}7fCnv5z_Q-KzKmg(?KfJ3|U!;&=#bDGz^HAzbf)gxq#kSO`90Ia*pEi ze~m<#l2%{d$?GQ3OgGzS_~(gV*4BvWfSM}){D4di_YzXH%GYsRseP7wmOJD>&(x;rqJ=X7RV~7wN-8 zst#boLC|+FdfZaN?$2dxk9ReHE}EOB!3ffI6a1U3HUA#{0n(eJ`yM=h%^ls3ireHX z^kMsp@9mPNCpjuP1~R7)f%!W2og9=VkJYmn9T-3XoD{snAtq!&u%)wpy1(cTSVI3~ z*;;=DhA2d=hFT^J|B1z>lrY4KWk!&pFcRiYK02<>RQm2AUlS~KjuohTbUq@f;+ef{ zF?p_iq^?@+we(h`SZiE%8Mg_yt9@+HNrag`=HFm@EDG-}h3(ZiVXa9Cmp{miBP7o; zTcK~}5jX;fek74L*l+Ic?G45sA1C=HSayUUc$$|$MS&Gl-y&_r8vOoTCw#df-W}mL zoX>-Dgs&}llx^O#xK${K35>nRMW56pw0FjyUqN@kV`bXAgks7J;rpPqM~4$U`Mecw zQS!n!%jwNRa%Y*?a;wC5!+^?8vPgz8L~FC%aTh`gBa9u61L*RA6F<%?2=3aAV#fF~ zaw_grqhHLruFutQSQyG&I?JJ==YM=USiT3rK2|C2@4d;auLwW0>RASCi6GNpiSkXh z5lAZcYKPUQbTi>stB#c*rey5 z2%q>H^q_XIIVM`cEx5^QM70_izkqhe@zOC*G;uZZ=pi)(4u9m))mVi0RfGv|yohrG zES83wR0H!dt5Kz8Rd&@9p@t@B9F^LuzdiUSQ6T1F=RijVJlm%#ggr*HQYLcm^f92O zNmmmX+>GdtpmR8_mLX;+X>=fV_^OHWlFeX!??&R4_^a*7tdgp?yCUvgF@ApU*wy^z z7!%&}#*7I3Gx=`pqZvTwdHDyw6Z8_GfDRb)9FGZ}$$b^SIH+kv7ehHIL?4YDDaa?? zQqSec?j$ujf0}f)5zfFVg3;Xd~Z%+ z1wv||Up(J+zK;}fJ{-z0oSiv$0p#|{Cf}F!4>E0v!6QCu0~e-w z2cA{&tUqUAawvkB4>LW@nClwJW*pDrfwPy#3!Wz`%EOd)kXZ$`B$jxKE8pe6gDH>x zj>MGioR9YPS>vO2@P`j6MP20W7K^DZf+L(duKQln&S!v5{7Lz|VJn|f;P&0}<3Zy` z(;XOv!tX=tAEowA-@QR=qoP>{u`RHB$JqN!0WED4f|>PPUrw>}^~w20DcTuwKXy~J z-+IOcz9>F@^_>{jFKR!m_J6>`?xkQFR=ScE$dn!LLL(OuS#^ma4QBx~n#n-=4+B1A zD3QVd9M{e>{F<-TZg6X%HAlOYuv)eh!rCA>EWTm@(MZ&J(YoEtwQ@|-ym1xh+N-w* z*tQ>_^Ff~oY84MEojM$OOy#OXgtu|T>1@X@lV>B=X1)|%ZP>|};MIBxi~GUc8-eKU z9P_~$&@Z3;u1zKSGN(ToZx4aSi`xKi}3KA$3${f52rKl7mUxWJN&*FCkd$q5{Mzq4xDMvKRVnR5XJVj~@ z_z)4pgDiLuqJ{?AG*+j`#)xvHu0ne@M;B4G za;1(e1%*>6kEv&1FEqQ}B37#H4VW#SqdgCQ?9|Iq2O6oxR}LJz|A|3IAXKDT7;-NK zt{sAa8Na2pU(jyyLagec56OOqKN2^DcyPj7i<2@vio4%|=aK37Flh=k+s~lqjVF*P zEPOi%k>_XOJ<2b@mUmsABc@*59w9DN0|j>1%*x3tUbyDBY-*p*VJLB(HS7G|+Z+ak z&v-5;5~8p!#-2M1Cvmv*me;(9CT~#@h({I!$_jkyjo{c+MU zt^Uo8!7{>@SDS3lc3z!Za+ZDQ7#an60qFnuBKN@26Ml+r$9E`7M?)1TOiQAE3pc6} zwyyJDy*<;SaXawG+;%g^_wjED*rb&jU<7s`fTY3-UfDlix>O$DJ|869%{bqOz84IE zZuKs6wlDrqIZo~g$oTNIe>mtb8{CuX{d$HcQ!gldii5GBGQobO#PQB?1 zdten(QR_UY2^GrOe#*Huzqrrg!v+nRkA8s0-@HgaTOKQU1VNtRC=6Vzb?bgp_c^aa zpjVPv;CBl@SPb5I+{(`~%eX54Z$;cDELA6y^W;<|EzKIA4$YC0$_rt6;X^UHg9g`ZFhPGrh)%>LXwp z>X>sJiF5pZlYFLA_%khyVc*CzTl+9h3U-n);}_C2W5x_)236lz^;H1-sZ%A6V?LD= zhcv`@TQo$EKVM7TM}Kwk3f%MV&;LU#E~NK$=A1lUp7<)iI`yVY_VDiyo&Kb|*6`hZ zmwNum>>shs$XI76Ps`e6IeoZ*KhN*{C)ssK=RSd1nHZehbB6EXAFki7PM;g_{ODU! znxl}@GffQh5%$czuhSby$M0$Abrklw`|#`Ev$rO51bO=!PyK!W&&^hRE*pD6G!6P? zI?b%(cLtY})8;U!-ck**!?D8#Q*`pN4rb-#&oDC(#iGW z-8;2yyL^>rIB55%v)*Rzmb$bM*884UO_gzl19)5?QW$&{#eX8aDOr4Ouku!02it2H zZ%D@c9OuB~Z9RRp#UCKM@tomj{>vf%ML7grEL*6ZEUx*D*n7KGJmEg*SB@F8+4Q}1 zfb(x7MW#R)hh#k{$u*n@Y*!>Mg`bVbY_?G@*k9`9t|kmZ#F;$s_+qOjsz-Q%t4w+7#dn!E4E+i@)2O;q zI2WU)75r8)mp9nvD)pk@DcFV~SrngBS2uJ%c{Kja>F|SHsOhtX1o<6`ObZ7!lF8o* z)T>gW&uc@62|#P=V;3m)k{b}N;uVF2s3^GAS zN4<4j|F$R(ye2Ao+)a{nR)ckXCq85dmH8Ig4-t#%JDMEG$gCkbV*A8ePSp6qrQ~6{ zGT?Xhmc;FdYG6ZT929N+B|7<%xRZh|j$_&# z`oU>3-G++;J~h?r%zCqdm^p(;(0!G;{G(EJ)DY)$ym+^urIp0q#}vKp`wY5vBzwbg zi!t4_24iYjnjWbc*?QDe?+g0>#nv}QXA*SX#}P3MR^w#meV z-FDSd36Dj4(@!r-jq86qT9smmdkps(4*S4Q?TIR?HSM}cXfGYbEcV-sVB8iuHX+ao z@cb#k?rcbXPp3I8pA*S&e}i2XPb`s?Jb)S>js_;7a9<+3E2g&j>Tuww>6wOAXYOQ_#+ph~7syie1S-z6F_jsde^auit zu-TrN_o$a0;P(ZQ-kZ`?l%CMPcz;|?J+8(VE?zuP9^C~F^8m3(->z$Kvp(*R+I-)X zXj%FH`PZeNXK9)C9lXt=4F*mmleh+avkC%!ZuzaA3J!*bS!iU-Co26bhzJRbhcjCe z#YrwS+%6Zq4zl1trZ}bKt)%4bcq6s+o1&~7Zr;mOd?4(CW9$r6Hft1Y82e4aj$y=QII6Hb{Wz>@flZGgum4owxhB05TpD!};#StU&G4EGN# z(u&W|3p4A{t9pX+HcVJC%(vuS?(J?Q$;aoCgiG=>Q&tFFm;jA6H3&d8+rMMo`sOJ@ z#dp@Qa%j?)`O81`{3GCO$RG3G-Yi><3jyNv%YhBX`b9h_W!#}s{nK2N9a)lzt%+SW zz$=?Cs7~j8lPBJ^PJgQdCWiX~?kx$t{+B(%dnmy*udshupWE47crGdXd{dXz;oqJr zTre-}Sp084xoGoI0NsrjwW|*Q$p`s?s(UXW`SkNPxOQXA`(}uON3fGMXE;q&Of){F z$E{$JS_oe>jXE;t9$}lH0S`wR={Qkr;~L-SIovi{@+GY7{bO6|cXjRefCqdr1IzB@ z6qhI*s()qjtQQ@obl*L=Vdqw8&i+-ETV(zh67t)5g4W((3gN^6ckN$={A6M~_?h1}Lb;jzkmgadhy7pHcSoJo+=p>ZlkMhQCQoV&w>+PwE1 zTW92`DYxmoU5a0EsT|pOH+Vd(C%ky8?pES=H&XqN#%R+Z5IyV`ziS~6tnE9 z;B&;G`|eBexGB%Dwl?V9q3t9h+%CY1OOWjo83A6Bm-f>k9_rLG)bP~Ki3WHe9MN=# z^5y}5fBNKP>2CF2BI?ZCPn^*k+GZp|u1x-wD?&Cl8+hn%G8Y_5gbWEnI+qd)0VThi zQnlWFy##;OggZ^zx7GR+0V(f4VUv4rZ_^h%t#y2uiC-87C*Ph%uAUC5sRb{@x)7W7 zwV_2z!fp0Tnp>@#=c?Q*!C!5Yg#eqY<#T5(np1%vzIPx8W%uEAK#x9LL1|4CHzF~ za0+`%Iymv{BQ|+cD8_Om`9rQ#5A)z+%Kfs8%wkT->Kx=p{2yJTwewZwlFC_U$z1Zp zV_hMbTSOxpD9vrU8?Y6p{3MY#x=`M^!~9Z3eC^GS9-&M@!M-pt^b@?dPEj^eX!bk8 zp2H*eg9el?GgLON^UiWa@}S@X%S{8wnEp~)1Ny8vHOe<7N>wTaHl>;Xi>Hhl|Fq3b zi*>g3>b^@&>>Wg$TbcFQrt%55z8whwnvMt>IC0i3N&erem9=#>=cj@zO>vg9C#V0R zNF8>i2w%?10fBQ8+w~=|0AG*k?&B>W4B3%mlc;|*`X8?*&WX&Rp7m>OTKMwv1DXKrAK5^2Kna6u z(s25k;3fa8eIuw_g%jvOoaT!=ny=$00^CyYw(XnU+%IR~5uG9$4rsJzmpyI)DLy2w zOuzG%Z5F9U56_tEf+JeZytGxCsg11-uU_?SvA5!YW=f6M#x#%HfZ z`_L0!oSyFMefTa1ba{@9fMwHv&7+g0b!rY<=nuaiMS zNEu?}=CbgW-u?(854UI1)|0yr{ZGaD7s;zJQ7x7SIJ!+SHox>}1w7uR1grOJrpP08 zk{+AL>WccO62*b>UqZg+)A74iaQ6O)w+l$Dze%JB`MJ5nY2s2sv8`_;jj-k#1rmkr z@OISuAM(&8+WjVOQX@l2-k5W>e-iQ_u<-K(UiR4|06ZHybw?Z&bk;F&*|@po2j-vh5Y3uB$J! zFj(>yZlj>Zc|oKcS5?fBd{pj@_2n6y4ynyGFh{z{LA;%In&xi{1TjB}vfG>*>WJ}H z?2u|(H>p4=2#W~9tM3I;P?XRN3JBX8y8-H-q)!It?iET;S_D@V7Ymy+J(n00MKIni z{R~Tvm{!0@=z}fNQPW*bDXCD!6}f;LItN7H({2zv@yxox8u#WTM02C3J^0w0fx8c@t@rN4&{ z2JQ__8^y#-3vf3Dxwv?`_;`-CjpSZ2poi~ZU3Kf%lj|1+yev0|(FF~& zPbC4j^TLhOW$^aK#rqwfXyrrmHM>Rnd#OS$0UnO?|4PDBs*&XFk+9jI+1Bexwteel zjyh$NshvKqqPNt0;#VP|&%SONS!K?4yaExmxLgg?ndmAZ?m#lk!bmPawv~%uGniWw z$J1gm0tltZLD2fnlMy55=O^w}yU4jz0oi8=izlX&f6uG8Z7;?7(7#3v= z3z}qy`j1AX_lS%VWz{=Fx57&>y z45KzNsZy>ZnYESM7ALLM(=!@S{?X`89sC~qF0&gNn%cL&VPf_Mg1MclU2@4|5~g`t z*DS$BN96@l0edAuT}Q&wfr9N0=3lQ2GX{o8HP*&hPOwEQl(`GLkJEzERil3RA|y?P zb`_FG8|_Pe?I$*?3JUa8BY+Va8(=_8&d7)XNlgaOn)%V)W?ez7b(sGNyH2-4aFJ%n z{w|V-AXj5^S^^N@G3@nmslc*0v3(#UfT_*^HsT(I%MYK+Twu7azS~-m`+i8)TQzd? z0sKEJ@{z2y6-&==Oy<)&)&4i?txq(E0GJo~yn#wcg;UM)a>>&T=M<@l$LF&7rM~{a zoB=hfFp^^xoQ-cSB@SZk6|XT5-q2RAgb;n98bq1J;9VE0(jkL4|NHBOYaf<05-AUV zB|j=*Eu`UT^0vClm4cVsm`PDsy`qp>gozAGN!})M5AlVRhU-|C(FPWlYIQY?y{8K` znm<#{S{B3}0gonoEjdWW0Uo9=wYcOf?ZNII*3bc47;jliAeFjm@D=$~w6HP7f@%Hx zoWX!XG;-`Zv2!tjW~j2PO=HyK2zItxHK z>o4@X9*x9f_=u|!$WUkhQ1->u;->YXIjziQ=WQZ#(y9q8%$E}~nV^X#CWnM9^+5@I zg0bk3VPqV>hAogr1-1m8pRp{r2K0sOo+R-2z+=ay;Kpv4wJ30YjRh6{ji`C!rHQ6b ztvAzBxF)ms)On+V65dQm#3~PMr819`fpK@_VZe98TBHzJ;?4!zr=9>7FEcZ+y4mwV zzQe4Aa_o*@C1_T>PcEgPHif{3I|eE$0rjd*L$L{T{y3l1if%zlTalO{r{BHdri{YY4RA3|nbq+D15{Ts z;y{h(gIm=6xk_d!&fHmIR*He!Q_J>kXjO3@wEi#fqGe%oBP&=n^76X~^{Vt0nak2R zMi8wcs^;k!hxEL(yo>-E?KH+=Cx2kz%CUZ_^rkZ^f>$7w`NoY)Sz6L^xrJiW)hfG7 za}x&Qg=A6+ibNCe2HxizJxQ(#F!1>%y;N!@W+4u#SZ!0VO+@a}k=>cvdfH|(4JjTXns^AQMdZXRT{IUR1CcXxK?jAlL@7}<-XFhM$xl~A~yFo37 zmjbA(I(5wbwu*!Rtw5WDJ+8U8*h5nsA3Vs;sY zBM@5zl>v~&4SjPJ#g^^&>+vY+o1P1tFy3efXQ>1l-TdtlJ!m(t|ISo`29{>fF2HOS z;+CRCzqg#;wAJd*6>>8S;2{!qs9t6P#`%GP{n{QkMkyBp$_agcvC^oTbn6{GtqQ!w1de2$? zGl7*6czLVMt$g9Gm6^w*i|RP>u+Fiy>{EFdJ=Vu80i@KwYoAvQ(YEuc*e>Yvq9F6~ z#`H1VTq$|KekbLQU&bipf3|KjZ0l|foR8vn42EajqwFU#G?Y7jdQfX(k}IzFt4!@Q zfX?5*^i3eYt2P~*XS4oZfI}qqMQ@-rl?L+YGs2wo<-531kGi;PbUYhD|E70Uk`+5* z)ZXrnj>}oes)s{Jz`{!WZE{ziJ&n;kR%*Y_vU9Z%9ybrKh|@zs*!kzFunR@hvot|_ zE&FQbj@cBuM_2#4Lk~NGeDij$IGl_DzZ(QtCKpj<$#!ltSEbNXJ7(72=lJo45Q?0^ zrB>*O$~2SCsSLB9U2Kf_q5E&3r;yzNCUqPVE?BqkNoT&C$cfAZrt&jgF&(l*dT)_N zFbL5S3|aY>19f*2(D^Vq)d0BOtfcs*{i*p6b1kgqAQ<7En>mW#ry+Mh)dxF3F@$DD} zsCEAPR6hANkThCka~QU~d0D-$BeJ!c9es&$QI;Btl=|=-jU&?0!pYK!nzRM4|Nr_$|f~3RXjP$OJ)(fVZV+f&Vdp?T4g}7pd zfZ5eX1_Rbs3i-5nQWjsNBXybje?fd>)N}PgQq*!3;gXwG?`4lx(2cpV1)FbwmWl^? z!UVot7+j+*;)D9Os2CccGbc8*mm!7Ef`7q{E;p0DG5QM zrEhPs8wG5_KP(6k?)fPa)BnH{P{_ME_U0$6__B56Bh+@oWF-K+QR9?s%29*TV22;D zeo*R+sJ?T!`XZC20@j8WviBG?CQ?fZqm30Q(Qr9n*)=~W;o@h;0u9(I(?^7<_((+` zK)-+P(%m`I{B=H^$HRFIULT(2$bIpS>0k#c?@bX9{X0^`l?;thz|6~(Km(x>5a1yV zAL4(;Zl@Y1jS3^HTKRyolA_f&VS+SvrwryV`D`bwd6()NY+UWFwG5|GUFV<5x?-97 z*r#*G%sc&J_(%N@jv!z=sP6IcKU5~IOsJyop!tBxsW+sCzo+Hy6d+TImnY@!yD!$c z6qZG?oPS_gr~i|bfH zOF_~LF4qpIT7J0j0`y&|DAk~}evE#;@9$ z$yi0G(=WS(^MP99Ri8uLsvCP_U6jo-Jn-W75^jfp%>WKPe{(xNhe&iIyP5j;5fda@}A3~C4r<+z~-7?Uo5ZQvx$J8 z*9J`=FqBJ=7Fzs~R9w)E?0<-CmhoD^?OKLIN7v2T z*?fb^SF;7bq3)orm+fp`o&9fpjHRDN8*Enc*|^%UQULW38gq$k;rV11ak4xChh=02 z`5x-v$MOYb0uTt5EVW3aE76-JM0?t%Nwp89B__XLh;mGnxp)IFHA7EOd7Z=_Mj8El z5w#rIU!xDb2|f_%Q57+KOE!W0bdr5Eao%a_Te&;sJ#`I^^pOnjDWWGhmb#yEsnQH^ z+fo&Y=Ywr}iAes3S`8GHTt$lDcC?#c6>65|AIv&x2G4e*qFF5b z=Lm-_ozNoYn@UiO9n2Ik{C^}I^lS=wX+$r>nEL_`pY7=dk@K^Ap1%~mdSAxPXJ|3{ zwK{5^|yb4j8tv9 zc6rR!u-CO|kB;WYOD^eNcK{2BEZ5_5ey;sF3@oF8=Q+9Pp#O-+?*`L8Pe~(KAG#mA zq~bd-csRnfdolioc!;I4I{ZYJwJs-)&vaT_Z`njFLJyB;8J`UO`{15Hr*>Q`wI(od z^a2aSplX@Mzf0Zt6Lw^K!L#qd5y&qqU6EBs$M)M9& zXn6a&(5b61tdD=f*C;N$!|h1;@uW@zhlx--O0Z=5!zT*~GaC{r8`BgymK(9d-QP%i z3|cpIjG`p+x~a@&7~sUa6c8I4oh4EwYY{pGsl4kVJ<=q)?@fZvV?5=UEU~z(OJ+mS z1ZDD;D=T;CSMwI5C94x3g|fVq)+lkp_S;$L8_4g>F92113Qmekktb02P~dun&LuOT z@8bQP?|U@VPYPjrh~3iSI}qX|rw^3L^CX-1(Z77gZ^7~U>Fo2Mz~k=XY|qfrQTZiu z2b9SSo1idWg)hGk^-mc%i*-p`RYWg|UGyDaN_iMAS|z)WlGg0CdovwO8$xWn2eMT+ z`vvY$QDmiSLq?=5Gn@-cuwj|BKyi3!9tKsKo4+UaVv6ern@kmS9@f& zx@V1(?k3>s<#X1rn!V!@f%L?}VRN!cG~%j*`2iU8d20cSc4J3+iCGde1Zc5IJ%xQOzd zD;fFqJOAh#5u?4b`yIo-@g?wbc7?X#HDTqXNyfYv;2Sy1*$N;%`Fsy1`1k)ilxhA9 z-~J65m2<0h%DYTxR{s9*c~lQkRP{`=RY)U!zkM9i+A&n&0qge?{x@()n(yLIpIfnK z!;cn4*SxH9)D&W-w)}Ya&{Rw6l;ItfF@KHx)RvSjDl6yW#?X}ICN)04{Z-k%E6!D! zvFk;N(2x|xeW8zz^&c+!RwJ5K&qh9b0r~#jE(CT zmBGz;X1PKCpD+&eIF=^_uKkeFP?)>zZCSr-NM12vqa$4|pHMklXQ3o&#otB3IPobn zHGuK$s3f`}n^Tq|@ZR#4(FZoCix+WRYxqz0Kfv4%kevLCub5THFIOosmz}eAqsBM* zOrVDgn3`J|CJzf&kZ>AcK+u+b=y%caiVRfw_s!{Ckor_XX#$%sSFAl8FRU-FApBtw z`N>2)9)WYk1rA9YAaZMGZ&+lgD=qL^Z1{C?J7ie1l;MBLFWBl0Rto_y)%JlXtFg;% zJgaG|g7ZyQYj zXyh{q*xUr*FZ}irmMz*b`5p;Lsu&Bla1yUxZs-_TN(`oDBWCC@Q7emVGe1)(%1!Eg zf?2|2{pX@|WAWsXygji_KCe+b5@w}9Ts#o8&PG>QmEIOwG@Tl7V)rOvtu|q-(zwAU z%0+T8lSu;OY1Yo$iWz5^+QVPH)6lVVKNlTw5*^`^1IO-2UMn-b$=5y+J$e$2JvQ+> z;^cS4#D-$=$PQLT4Gc4A&+^svK6}D6`(ZAVIzOAH{G8sO?2>nTgV+?ezUerZ|$qLn&Q zC_~Yo=3uO;G}CNSZ4P!D*gX+n7kb~^lERG)v=z}+h1w})@*BY#YEsn{JLDQ_TEtX4 z)Ea7HeFA>;+KP`3u#-di7vVv~!2k_3(LM$r@2qpyH9j;n*VOj7#LuxB_cEsUHmAqU zOw_Oub?(dya}WjX!i#bdMeSlMVSpVC$8Ek5PcvvTp&-m?-kaID=#$A}DYqUmfqSy< zwAA9ibyH#czJ{-|`e3Xt@<_v(4$+V`t_m+ZQ7U!tathRcKhEeF|N6#3Fu7q+&TUo0 zc*ukOD&pOuv;V37b`P5E;Bl4i%(%Y z?oethwYssLSLCT5UDEDAWD}{U4)P0~0m<`ilh?9X;|z>K@XkK;5#~QC7e`c52^Yn+ne;}M1|?^d%g9E3bWrQdaE*`if6~+QNu3| z>&x+n)tEPAjhFakOR}S^h49gImHW&Dq@(vZ9)?`-^vQn~Oe#L&uKc|uYQG97Q&9gz zf`qLmkAPrZro64`kMnu>+CMSP>b|Wht@9yx+m^?MuS*M?Ss3Tqv1-r9I-7K2+aL_D zT!oQG=l=Vw^|vpV&ItR>1vgP205xokm6@tg$}3^*RP+%jVJGKfLV@O5E@xvx@jrGW zYbP&eOQ~Izv_9hgDSbKh&u}4FUz{#!f4W>JuDzF>AFMq z-5UBf@r0?{%34lM&ec^~TG~j#5K7FwY~i}uH4o)<>gIX*r{0@Q{_j>j|G7(}bKF-1 z7F2uar5{eog}i|#UjI5e3O#4M-1{O&!_aOId}=BtN4#xjgflM6k`e^$h#`2?EF9ze)$oW2~>;GPj3f6drzRA1L%fErY{@ zu>0Yd291${sFR~U?2#45 z;br^W?uo2yycIhO6vObozUUdFi%GTL?fKY5YfDfP(lV{J1enw{HF2Z!RVW_(k`qQM zx@V}CmtOtGcOv>h4x%2aSUQZhG_d}K{d7@lU1BKYw97`GbB zX}`wI1Kt@$xuHTbLT($X+*ZKs4VXURq0^JkGk=1-;-i-dIoO97SBBbzcP<7*!Ca|H zr1*O{VyAxWxtTHj;uVwY78jTK`NLpCH0wH+7qVlP6dKERHFdG6(HgsgPz3Uu9LtS4 z;_VphudKD!L(hpr>N#$zhJH=WB8Y4L_q70yu7f<%hE~-cyKV3l_eBh{mJ=vYo0}0P z1j3ahw-#?NO^QrH(NVI4>G|-3O9Y{SYSd=j{0&$`s}ZY)KqB~j<}2@boL-E>JotA5 zTCJY`kPL&(VG>$S-v$w@X?^~ug8S6&$ytGg`CIZwZ_v7X>gG$@&peHz{=OF9yzO&B zFiwM}e2uGvs+6h5H&)iq8Uj&RtZiPO5q_;WgKO+kyXkaAyWh4wKCVI7u#fdAfy~2! zWC~d}xM?!rh6HjkfQ0-$t!2x!xHubk*Pi!`Xz<-XfyTJ1!z9O#p`(Fv*ntef*UL%@!)nWJ0|&cwK#bBc1fOq6FJb{p?B9gODnWENi0 zB0^NvHcm0k1=ot~jFXBJcArXK#!C>#OD4@0!qe5#e)2P!!|j0pu70s9>w^Q#%9eRl+NueWu91t52wQVCEz#||RC2doO6CtL!S zr{6vt5g!W2`BT{aSKb5TErBd>=K_1(nUIZo-7_8W35HjQF`@x?M}B~Qv_wOTBbZ6X z8;XMOQBItPaW-+zmBXb)!ZubbR(D#G7u>@5SrvgQg>PBuk2eFrn=rszdx17W7tY_2 zxkTT+DM`-PQR>4;O4TXqmWJdwrH#aCSk3QG3~`338j@hYw35)da0Z7FhY?i-ZX%W2 z=~a^=A|)i96M(nB*Es09s50tB29*PRtG8T3har}ZZTXqE#urU-Cc5$+QhPhqo0|2o z19OMnjbSVOe4H?L{C0Z0Pw%&Ec8VZbavE~lSf|GxaZPZA&?E;P8S%I=O8E*RwDscS zsn+sMza20(gi8#&klp;WPynS;coLJaOmXFauyt4|3H6ft14?u3L$aI3Mb>zR$UW8V zU}>j$nIH*T9}O5A?@%3K+p`<&RWTgU8$D ziJuzj$&|EvG%zOwuM_7pq>X1YDha~AbR)j&8y3v9`WnX-xOeWr%O@D!^ zy?kDhI$5H`?%gcO0xo{OXozymWNf+Thw<}i!C}zY)v&d#Z8ou`yv}09FU(K`z&+m>=rtZ<8h2j+@?YMY=BY7-qbg z2NuxI^K}fvK*_mG6s#HahTIwe*s8&)-?R z`W&8}b~kL^I`g`rGCV}8`SCfe>bM56HKp9n5hKn_?uAs)096|<}6TJ zoS{m+?#Ag-CEiV%D`xDWH{_E58yY;L-RG}q>?1mgynXM}QavoUlPKmd`715!m9Y4g zo65H?D#xNU86u*5$tqF%N~Sgv$|+H=&%LVoKrzTT_*Fp<3&{Sf!;w{Eard>g+026^ zk(Q+AaBV%5(X)$tIF*4(vxGUfT5SV9YGDbwXY&q1g0t4ynBVV~ctY6)BH z?7un;MXR9VGvM;-<%vX?8FuY3(DCzpHY* zl-r#lO5Mkm@Nq~x%UTSVa=~~tYp6G)$>8ZmJMtx>)nS#kqo-Iw-?w3u*2+sq&vuWR zn{C-|P36{a!^$;$60SP#8XxlQ#2hZ;bI1GPlJF_^WrBs<}@vZS~rK%l5 z0cnB?%p!m@a!>2I?sN;p#7L!IdGa{}7(p_;M58K@bBH1;@ojtF%Fx}-pE}IJjzI6R z3)G?*`z%U*M)Dvl`|~O|Wz*A?2W=>8kKgD^_C?3gofR|E5Mp*6O1%n!?aQ9Oa!m<^ zSj;-6pytI+@5zcJ*Okh{DIi3-cl|AnN6|+=a?slp79?1ou;lIMjLRJ9`1^x<2u}&#ygWRWK8?;9Fj*=Fr+TFr z%9SwjIn8;tw(i;X9IterAUu^iht>vs!qWuk3qE7<^wG~c-a6U~uR2_>^wSUHgwcJ1 zu%19iT49Vrj(FpywA}Cbca+4n(*^H?dV#y15hQaR8FR6uxfQY#=bH4ZP@_Atm821= z^BJPhwgOH3Q5S4$qjjZ0z66HF~(IpiFBg=Yzn zE&o^7mo)}onR7sqJlRp_9TLa-rb_8c&B2op_+SgB=fgccp3l5qE>S+p^5%IMOC`lU zot4kLSS~T^Xnulw(o@pze4<#CDy@EG3D*k9^7SdEKsgW8VPyPudCaeDEkQ zN+a|1TGYi4;Yz{cCR5T1Yl9BWrW+iUtqA>|Q0aCerV#M+t67Rv!(>JRVn27@y4z(=Vv<|g>74y⪼%?fq=UCyTJ#?2lfZ-UIVz9>&L}Xc zcIpHY5TBH>pfJ%bDv=)(niLmbED6MqDR_u@I~6>bE7aT(^6RF6(XP#5&QR-z_q4_K z_X2G4G%WZZHR^Pjv^7KTrnH6-skjW;^vDcZTI4iZk)6#zdb&ta5YrBrca$=bMwSg4 zU2LdP7I9<|$e1upO!UCwvz>$F8`nQ%Rc#pTw>mqcFKb@dI-?k<0G-!cE;spDcv!(A z!EQFkzjH3))^El)@lHN|SoUJeeaBq7nUYID!!mO+DAXLai%+E|Up+Zatyaa{`%}v! zjyeh+-PyRRLJ8RA(dI^4uF{JoILoBuf3XLpazkxwP%7+3~NW>L)BrHBDZMS_a z)J^X5t8Geo5&%y~ggIEdI-iukxKEi2PX2Fe$HK(`y?8in!?dI)nm1dO8PRdMnD%C z(VDU9E3RPyaCp8NAzaB2tTJ3PzD7*MH1vXQuB~%P0_&rzp|WKZy!~~&&R`zc*rl%< zN=!|yUmh7@tdLqnB(9Wh0D8N@bZbStXVe{({amwrdiSeD$Z=Iixxe>85Oi_yM<-IZ zlyyQ}n5z$y4kc+mu;vs6y@>8t8AKmai1OVTNKJ!rnTKaM)`q%#62q?1)g)mVx z$K?xz1DM9}ZKhs(0aA0^AWHZGw|@+}1HTR!kg<-xu)+GAEYdqLeS;D$R`Yvd3U)*# z3xt~qxR!|6{J^4drW?Y+kJvROni4EdP-*XtI~Qq6wJz9rQ25kN!F+~E80TG9Q(|Zm z0jUJZYD+p{J5b%}fee_71|LPy(<@k+^d2d*;;zAi08f(VmgOh0@B}!9W)IU(c8j)& zJKM-Wr|9f>7lPy2N|SJVXD)=%B;f-H2FScr!^99PwQ&1}yWVQjQeuERe3x+d8Sp&O z8$>ErbSb*1v3Pbmya~|Ykl#LPj6|p;rE31ynpSw`eh~Bn7wra^Cp(kM#8{qFxvbU+ z7pm|;%#W; z4mdfN#?}zo6oTI01t`FXNN&S2?iEM;bm2w4|XZFElV|(;iAgw35x3p|}vkCd2Y=WW1{Orefw>rIplB~L(Fpxef}u()X$Hqwd_BHeiyFpu&FvH& zKI?BZx&q!~cQnStC|kpTw>xS|(f(3r6!g54M`du2S)o=XyK3b`^>e46(G<#U1a4-b zDa+x0J#L)vM^_HbWTj|h4iCj~VW>*LneyAr$v9VWBcGdEx~il(N4RU5zmaj%(8H+7 z%9GdM#$S`$bYN9~$G+uRC$K=$vRs9d(?yN`{L#ek@|@jhU-~ljxXj*{+mpnNce!w4 z!e6U4JAL6GA4$ozE>_y{FHnA+u^9035#OL1D*PMG-UZ6n3w1rNulc~Gh+*U>$XUFI zWCA;QzQZ|9)1l{o#ikAN0}(NkvYfM#FEvan>XJtX7c2l&8WJThb=*2|A(pR zr`Hx!Xswpv%F>$}vnP6n)y56zGYhPi-JRG7AAQ!nSfa_lZL5cWw1J6Py+A*$ju^`q z`uHY+BgGD1IKU~^{mCVX4J<8+?{cTZq&*DBl_PH_3Hm)PAQl*gHTSad51rc62c3)m z2*rmoJkSH0Hj85-b4U1h&d!Zq^VmAt@er6o&YnMcp3VsJ6CJudn9h{D8Afe6^aH*I z3H5e|#4CMlau33ozL>vePHKW9)2SP5aK6~!P2P;n&R#hB(j$_z(900T4{xJ){|s7p zr;8u{(*B5}ld4OPhFmy0~Iq)Y=tr5au{dpwncuT|G1V+t;+`lLn5j1kf>DTT~78Ra!$S-x$(Ld2p+Y zo-DmHq?C#r^+B4YGLE~uTON>6@N%_UkXWBGy{8P#dyb1E3YBeW$eQ$7_|)R4#s+mn z7-uK-x*gy-K>X_fv9$0z^ME+_46z(9Fa!>x%QPImR@iW+iWVgx+X-p7j zJ+XcGVH-LEEn09+;Rq5P@mwshK6k8*npo|VFe;I0*W+X6NDO0}(1G$nR3?VFgiyFJ z(q630lkDL;yV*u+i!cgLT7uEn$?IZ<0Bg6CJE;wlY}3Bdg}M5NSFv9OWPh1`_?BBnjb^aL1wFRIf93lK5-HW|)~Y%=311G;uRwASO%CVd9UUqW}0w z79&1>sj|vE{}Sp-B%BZV@e;A|t0WrYx!pXp|9c)PVkiZ5H#-5 z{)0-dJaM7NUeXx!<0ac}cEuRUC#ZvDfK)JZ*t4mlBuAMiRpWh}jsW*ff><(D$!A_0 zsr1j3@-RCbZT2Zk_8aL8f6rOvq{0S!NRVJ7+i`fmF%yLuCbVLF2$hL&D-vAOIND4_ zqWMk`lgixn`oB|(^uMD$%gAJOJO2d9?-MzFyU&~6hpY*<#%H_b#or}f*H+yG=CjLL{c9a`B@?8DU(v%Qf z`XKRZYj>@^x>>xxzH}FEit$AitzNUP@N_`Dq2Q}Bbmim%h;l4z14>obZ{~da1 z&b$WZqA#J5Nasy&xRJ5k>dj~+#G1g*TPt2Y8aHzr-bf6HOg4zfU~y>N@T+m<1(;U_LO+-fSlS)P1+@S0nsqPa7XI?_9M~ zt!kEoCh!e#kO0^PoX*Oa+nc(&;x8Wmr?fJwMR{GTPNSwzt=e+ArKPVv&YC3Y85_*M zYjn-9(__2OvBT5)cq{wHz2+3ebrq!U3n`O-jMDxS@l&_!bUpW5*y)h{gO(GuIT*CN zBah=-!@SAl@2jK0=2}Iq<|-#ZrBZ{FCOgnL?c9y|<1sWr5PG>(+Ydu3yfD^eJNu*G z77R|S+VbT__UiF;mUh(uV4-Aa{d%n~{Gm=Wb*T>|8&YlSL+=?=181sWaN2*n^U{5p zEkKKhrrlyZl{`JYENz3 zwr$(CZQE1Zw(b6I|Id@$ zo_E7iJ98P+2vgy(0lDWb>qle{lvqFd&cRSs+wIc#N%7_8?lG*>fsdJd-YC$0#I|Z# z3p|-MX=R417=XK??~DLdiARco2?FbK32M}N^OkLW46>FVui1*(XRDGN4HZUy`~oG} ze@%MNd+b_ofXvim+IAS~tojcu?EQ?XR;bR_OHJ~F0|=SxZOyV-ZR=-qS|ezX2LKnZ z%}}P+ZtG|_d@a0Oo_sw#d@W4AY{=n=f;enxrVifo-`xVSI#og9*&)87hW)@1A#3xd zzatY@C{u6paW7vF?;lV11Mqu!O+PNJ0H61VyXQ~ezqvcP_&h!Bn?S6NNemcFmz-EQ z%**@|Vm2UUpoLd<26rL+bSCvVo~vO4H|@GL`KL|bcZYcnRzQ@)_4LK^orEW?kJ+%z zsF*Z82$-aIX7JWfxX)NsIDd1{QJJ{@@dp`Uc$WkA!y6QC*B_m zXs(i6)2LLtJU7sF`WqMA57ex;?4UKQx1VGbSu2YWBf}VjVQjH;(|`e|K#SvR#5j5p zuk-J>BZY%WGHr-d6n-{A!-^Q=i|#M+&&-vewMAj;au;X!`}YEu+vlzCH|RR?tKI#j z@jK(IrGcG1Fq-o8U<{I_NU%jh*bD}UUe(e^VE)TyK)Ul5ft}XIH7jRtr*-Q>sk~@P zgaluUuM3MZ-1cg&*`xm0hD|3zBEBX#whKrZQ_bx`kemIjYZeqT^Yms8UuvVI7%~e0!rJe4^*PpsonY8Xu?XJEa{109|$I?Dm$cDVKm&fLnHzfmD*~8Ex zs2fE}(vhTg`d+DwrvRxUk_~g2eSJyR{A5vLlnws_yJ@3uGUe(UwB$c8e zPN757D?y~Z43-3I+TWYupVkc*7m!#uRZC&4GFyQVDVi9?tuhtR2fvB;wEXz+RN`vT$Du7}UeAd@Z9dhLf)0R(S4x%n~w~MCP)AJVI!ph#2T@2u6fO{ZKMV zQ1M=(^hT$Xm$ilB32L7; zH>bs@enk0pnead5s0*X~{a3xWR{%o@Ubu{4&odW>DqzKWkq|OWz7*=|`3@I2Jt1PL>u`>tAQll%qC@-u=N0Ua=87!TBX95$7(2Vi!7 zLbEQ|a5x-PSZJyEdt-r_tUQ?});u`2exij|YTiU4dTO-`_>cbM$ZV|G&w3Cb1i*gR zE?)#TI<{N)`BwSR12D?~f8!BgC;IaUh;D#5N4p2TNq`=IcUo~)%~sZOi}>yR zNUMJ#reKiF?Yo)N2)r@(ec*k*T6tUgvPZ8wbiOyA@eKQe8T&8&h$?-y51m09O2L zaRxsRgs{8DU#_y-=i}Lm*g+X8U~9q!>GHPP&lgw$lpQDzrGr6_Pp*AA%2#T(4pDIE zn+@tu#qU>F>^L==#nkt@pK5d%i@Ac?9$WpMPM6Nx&IC-F6C?P41pe>d^iUn+#Ci#< z>4x$graOrzl6?S|QM}TR0{6@LP?^iI06QU-z_0kts2?SZ^BU;72O+|D_Yb775YTP& zTomHI!>xULk5es$6JJTp(mhonxFfl9SI^{L#=Fu#JPGIL{;q{lr*RpXX>9mkaQ9Cm zUTK78X@){=O0qGIUOuw+>@$!aE}+)dmVA|aeHx@=R#juzBK~Py_e*mrTN^J&yqr#KG;GZk4D}^x4WSU6P*NOnx)*W zK$KlKD6qI|1F!h)r;qU>PV5ENXdpL8ev6bRtd01e`zRq`_l(`aTzy_z;=U{XF@FZY zHtA>$+!7~NvEn2)#ajbvc{ZwQ@&OzkzC!iRkAUi$sjvSi5Yp>_gMYuHN&VXWeQfe_ zaZd@W{d@kGB*A)M`c_rf20$Dc)Kes5)2 zY3=LakLeMrgNqu33hhp`d9N8l@a7i>+?A99TbY!Yv3BjD1&I~|hyU_xA&v&gD!|{k z72E8W+U};aPSekaUu$)8K$6jbSGB`@h;mL#pc>gA88&k;Hjo1Eu`uGF&DW z1n`P9QkKrI=@WGth?Zd27NR{NCe;z!-cwl=D6*;M6#Yw*BIj!l*iby;A)Iq zMi2p7$3UW91k3w_WE)5VPr32v3@^?8$4u~JDB8uTj?_9jz9+0c#}0e_Ey zxrV+Qb=KT4)_vJ&EzJPsJf7^WKP03R?mEEVVNfTV@AORH|xwblI!COEmk;?o;MqqenWWaiThYIZFD6FbF! zF4H}tDbVRTn(k9;;m|IVh0cPPbtW6J_H18Q@@kCRiFa4p{QZm@MTvhBHY6Wrn)u4! z0m`QPp1Svm)c{+y<#&(EZ`I*}S6#<1-{)LgMS$3Gmh!*S&(>kCni&@yA0)W%HE?pP zqr?yMqt@$B(N(We<{f)4N^S&ftO^%L@=8A&ACbQ2*x7$$`E=pFlgsSb#L4UZc{p(AE4;q5$xnmp;OgB-%ZwPR zl^gX~Q2Kq!$Z>9cgKmCs#{!D!&uj386rli%dgZ~KU!OZJH6ay-Y)Z+}bi#Hjn;R}4F6{hm$_kOWMjCxnGpw#Z|+XP zSOLH0;$a>Wrt;2N_^RLF7QnL9qz}{B@{3lO6y0R_Oi>!QiisAza>xySkcQbvs9rKk z14f0|deF=IGAfMx!i>~xgkJq@Rt&1rUhl$*n#h7p--BmG6}$=B1&1M?(h#pk%$<=2nEwq z??n(?*QgFyUpU`j>+H!JD$y{o&6HN#y|V1;=t2^82Ps}V0EYCOMR=%6PzBeAfk9z_ zpua(d$iQjpI@NI4)plKXfhVK#3S^Wd;sm$iEBd?FB;PWTqiiHFdE+?1kDF)H=5hd$ zY23@AW<>+{{d4!rC+{h!5By{eWLb-d4<{hASBR9|;lymFs3$7dz&WQECWE`kt2WXq zhdN{>2e4UZpaJvWc-Z8J9OUGUuuAVyikz(5W-__!+S=j;YA4gL&~8pqcm6GClm0#E zux6xi8=KY)lk~?L>aWV6Hn>@IXWVx#4cZ^;p-#lE3l|9ML%A9oko3tsEjxoMDsuav zLH{^%Ffk&{Mv#-NPLeH2MBg3E--E#qM1lf>H-OO^cdV9cL?vTd{th!RwYB#FKfeF~ z;8v1=ER3)hSl3seqS%zhkyH|tc5&Nx2x{WE3oh;G-)5c2Z!yH-&rK6+xB)0hX)?+< zC4A;5ZLwvr7AF9=9kqiw(f~I&`%k>!M|J~_R*CT73aqk_ggCA)KGA>fNT8pT4q{iA7Z>>yQMVNFw!l2Rq!p!(k{u_}G;TofLyFV* zFW}UCqG68}VPW3^`?x@dC-32oe z=DlC$=^+}jF_}OQNFpx=+EuB4kECxyA7z#=XJ30~ON>uecEc?`0h^t-Kn@j(l6iaM z2)*Ft_W(O^7~SxMnz%=BklTd1BGg4vUS|W=j<*N{X_R5aJUiHDlLKYngpY{3r)Nq4 zQ>|tcPA~u5QxDOF2z@7gT{1ZjP&P4 zQstc&{@#db18rtPZia4U>LcgXGo5wVGWiA6oMD?{24J^@J8(^uMu8Z)IYn?D`$J7F zk0!sm)x7zQP&>rmfVa>5cjH2DbqO39L7AEm8)yJ0fr6)jCV9n{uW}@(3;aLW;+bQe($J3k_um0H56vd5hgdIK~T?!dH~fV;tb}A3OHA7Oj**7 z0;M#sxsWK?d-)e~qGZ`tQD9wzfSQniTDqE=dtDt%eH|wTh-kv7IDFr^#Hl{LddN4u zVV}mkidIRqBC%u`4vSwR&;Se5hdC}vFyak@hs{NfNhehyQv$FOXPnfWEE%K<#@x1H zZiC9)Jgo}ZH`yD3@TYd0x6M;~BJru*t2@4y0Y9081ZDV}c+i$aD5D>=K$4VKz17@N z8*pm$n=l3jAwyl=qLBmCBr#fk<=$+5tVnJO16SGO5I2Y-l6sdSMYvH;*&gdE!4DM; zLeChqE<{fcey|TGK)^{+&^y^#iD5(xtbO{1<%Q4JkLDkP=VPA8Wbx{$ZkiA?03CFA z9woyoKY*&F+idoslNT{xq@Jo%pJ}4wCnVK@-=GGeq@HnxbPS!rB9Qg_rOGGP*j`fc z(OJ&8Db$PXsAg=7Prd$fSN@$6__;2cuiOp%%)-l6B@_c zK?2J2Hx2QUIV0s5?aFm;-3P}2USQ+h0Qh;+w5)E#*%zzd4bQQs+VQ_@VE@g!#f2Ij z(V!p1W0Hz$IA;~F*;BX81%dQoR-JsQntzGbD5U}#Dd)Su!Ga1}Vh;2kk_4T4nN1-i z8oM0HAiV7g=?(H)jvO!OX07mUQ5eioRL)X6u)&hQS}|AvNga=;Bzt!YzL#X8oe@8C zSLN&(=@O_j$GUj|X~^F_@1vkA-{;P4hU?Gm#c7#GsHXt9ED)9fUPj`vo=FmvdUBOM zeGT-^r~+gxuWjo7`<-wZI(w`Fm>M621KSP!T5hvDYVuFYSr3!enk|nRlS8LG_9FZ| z#DW%})80*)(O~G6;jw2MLLdeNj5h>9D;lk(u}O50Uzh$X?etWUk@9V(S4FL&8U<$F)*InvruT0#CHCGUxxHbEs&XP?tcnb&Vo-{dv_hf~r;-VfKkO*<$Trlz^#PwFo5lxhW6>e6TC5rwuHrn zX5uE>Po2z^u=P6VOPAGWTp^*9dAQazP9cn8qg%p)>~SGp<+%mEOZ_m|Vqf9~vy2Q=pW&okN*)sl6z{kj@Xqoh z*x*7{H3t2v<>N@!^uC;535*;r(O@wm4(9V7Yl>*t^F^7k!O1cYh~m9a4fojYMAG{L zc{kYk=Ak(hQQ2`cEw8)m*0h-zr|2!zwI9x8mHh7S;;9fC_)iG$1c!#}2GOo<=YS20 zhZ?~y)uJo%LxwGtrr4I@*ebe4GRpdKg-Q8h^%#)UDkh3E2V>F8MqsOciVea+C|ClA zT;MHGrGdCLuG`u0eGK8oJ>*V5)B-r=LR3x$>no8pEm|4I@{J(a=38(%>QI(9ZFZYK z5pvSok4(FOQguJg$*sH?Rj|@>TWNquxYmyBM~MC?U_!is(JTEVpv7(WWC$b{ zLsm(u`W03HT5S)K#?uaNG=DPajuUTZs}c3y^XUVpUj-g89^jg{v{{egg;y>*1Iho= zg<9lre(>9C&@V=E3$H#ED5Y<={m8`rO+Lnf81u)yX^uh#TW>QRQ>w!O%HIp$vQIPn z1JGgYy~ZyA^gz423P-Ex%b+OWUYI~IfeVH)ugl%YIRvLNawQJNo%gvG@v?o$6_{pKsE~l z|E|0YH*O*)#Q-*h=as`W*Ar+l^iJ7vSRJ=TM8|j8&ij*?VQY(1PFbPJat-=#NE5*@ zY`95GJUp}5&o7NFTulr1`yJgo2#|G?iu{J(r%m2s*Vgfgd;t#P)4W8pXp=Be&%j7{ zi~g~&AGGn((CiCkLHVLv9V^!^PN1&4uSW!B#y*-^_7Im@i%k!`sk?mNCpQj}QXP21 zdYvM^MZuV~Mo8b_h;Fde2;7V{3bm4#m~vun^E)Ez&jY}W-uzq+x49Pi>rbn12K7VX zt_YdIF|w+fU>$!4;o@JL6&n6DTm=guSP=4mIn;IAH4W$t%0iC6PVS_>v|`4n&ZY#n z4IZwq7oLS0?j>P$KA*wMD#_#>N+G~Nn5Ihp77Rvh)#l=NZ=FO?-+eYWy$kpvbZUdM zCQWbi{e)c?X`PJ&@0j;dy9m=whE4}>BAd+{)4OhK+Zk)qyOe5N@O||bS<52+HBf(`_f*}(R5crdboXI(5(_&PzZOq zAn!p|O&J%ugu%`mr`TY4EYR!K|Mn@|bMbGF<~DyHLs4i>=mj=2SYV`e*_^5VP!pk} z{3kQltzBH}d0C^X#z>tlO+RMUohMn3BuhCwFVHCES;l0WZlANO~J)`k~ zWrJJhSYc;KHLkQaGC{WCmQW@{ubASiy=w+FH>##oRDn?*B9as$?SL|-7^PR`edrGU z-XnZGe@i8F{J`^$E2#Q*T!ZO6K-dP7vR(0~s~``Ar)Tl~m%!QxU?4SD}7Z^ce?13g2YnuwvUEJxJ8u#shZh z5?RtG=;%u9?Kx$R3XnEsFq5%-W9LnvuW8NyIOY2$KG7t*7+i)c$SF6_N_`|DT*i3h zJfO5aEn6R$Mq9Mt-qYyk&8|bBm>`WAtA&o;NuU7=7u16iIi8<+RS@A(Zjiu``x`waAul>ij47z(Y2X zNH`{{UI^7h9)6=pm*Ic_YA z6KlW`B74-Wt600W?hx4Uf;dx9lKupK1c;ZSt((!M&+5K8=qV5xiVQHn#By!Ftrf3K z>t8u)>SfZm#+pkk#b>*@UD{OMZa>TM{T9;%c(QhDqL%8!brmT3 z$fbNWZAO(+16h)ZjjU@)$?iol{V!ph)eqhl&odX0URRl%E%56w=t+{KLG6`9I-G@9 zIuOoU^*UGA3z6|MqoAAQqjeB`Mi`$hx`=&WX*5;I+ZrG2KmS6Ar+>5WiEP)4kqDgR zudWB^bmg&^e{AAoG$Uoh=?d;8#ln#~Q0k~;v4e-Os4}mg$4b(ZRz^t{&XQxg2${{- zdFu4J3RRf7UA3PS#SV;A#$9`2ID!PWNX77#wZ1YpM2d+HqxsTNRkj-r%l1aSw=GI& zHA6^{P-^>w zZWrD8({V8{aFL=X7eO3Z_IW!`0EVT4^lFgnnP~t1FN-5-?+CZUs%MP%{%$XrLJV7n z86$w^{MAGf6068;1^6gcZ;YfFSbBSsN$0txOTLK+-)*Vh?dS#ZQ)Z!);Bjn;kr@K# zOaHKD%h1WOkQu4%!l}~&5r%jwk_&nrG*7^@&hflYxA-O_C=UWS!XZAs{ucnXH?`wo z{ia{##MMUvU9+IUy4>6GNciTkc;hKcY(sjN!jkKY=T;MOPFg(jx(ngrAq+plGMT-{ zO`WL|+A~{WpqKL;QpQi)#T?XSPumcm6^7>5VJDehmvvcZSy)!5%$^KsEN7YdI(++8 z>0u`Vj)6OJ*S7bH`ZBKyf04$dYrLx8jFpM<2gkn~{UP3$8dz8+_uN{C@8@obTtf<<1QR>Zt!z44^}0?}o4JPPf{0`SWWA@-6Ofri9Tj z_K5FNnj-;>0CdCkApWCUDr*)8Hs+}H##Rt;rqB@G@1T6NEBr-`_{`6)|e zMn^R>>D5rF2XMBhkQ&&`^148y2W{`vffO;-;BR5JUT*p0k#Um-7Vz&eo&xR(Hev)f zoxNJZ%30^E)+SqE73S3U8Zd)UMHPdl;OyX_)TRZ4g21Zx2evHbOA=PlvSH@PMCAy! z5k)X*S1%1z{lv67XKs#NAbC3i!4%+As1XObBwv5%^v75C@GuG9we-Iw({g9UUT;>i)ic8**Oo%e&tm#2aE z^lYAGEVlp7C)CrV149SRk)8Z~l4I9>G&A`HTAh>&awaA*I6F3V2%IQOoZU)e2J}g& zCGMc1|G*CvAX;+C&xA|{huPFmpg1Iiq**Yj7vzr`Eylm$v%Wh)a9;s`9)+4nw8T}@ zfIb8<{JOs9j-Djed3Rq*HIG@+KI?h(ET3W2owfmLzk<|=>0s2^lEGjN_o3% zuzBrY&X}Il14PurL^bFdOpLqxf}3D)qWeg8yMb{BsFFY zSbefuCvsJ)A*^%qPz!uUM-m%c${)0(&7a@si_I}#<5&w+$X^jXM$QaX1l}PPUs#e5 zfXM~*{O3v%(IwZB45hiD?%Q>p$zz5K=P0J|Wui@tRjfB84s+g@UeHx4&O}zUjouuo z*9o2x_RWERt8)41WoCI3C<;JSxG3S_76441|%;Jh!n(44)F-`A?eKKUUe_S^1d<=0h4Qsx9vR)cv=JvJg_J_vZMNQpF;ZYD^&QA&QpkB-s%K+W9 z*{EtMSUr9c={a-ZUEFb+U>V=%ARM>xQRbLOgeBR|`rNopm24bL^E0%Dc;eR>Bn=gR z-6jpe{@8;~xOIenGXiVj5eCvof6TQiLAXtxopHe+>d8=Bw^x@O53&10k#AZTTfq(n ztEa?q+8tK7rW{|CU#~nydYpN(^{CBSVrTdNP`^~JpyAe(Rj;A3ZGO_Fghyq$d`G=? ztF84y0T=Rv2NQ(n7Oyc-iR4t3lIt5NMOLF01BhA7#xVTPWasAlb)@aqsHyVM<3a#O zG0{OOBKnR8%Hyg;e&L?^cSVK;Npy-&?E13T zQ{M1oEjNSx1B;=tMQ|O1)136iLaHf$I%;nSJ1}|)_;ssOP&NlXr3m3}oSvBz@2!}r z2$>Nh`0)X5gLBCJyWmcH$}D42ecaS~3hi*}<0d;(RR6Z~*Zd(0tcVQU9Sv&u(iXJA zuz=Xo$nSrbD3OKI3LJ*rv}o8KH~0Fi)e>Ey%Ii|WuKHBhTZG{n>PZszhU}`-%;Fj2 zsQr9gk)~~6l>$VfTI9?Us1Xj)QKnGi3~;Ff1(YV;b28~Xjk1zY#Kni>J^4xf3F9ca zJj*k}<#ZpW^<#1br}=iQ>*QymVBih#q^oj|5Aid*Sj@lfna{-H1yaJzSf;+CwP}{j zQnxH0=n^u-b8=47u}SSc9l}brsS^e1YIe=SA&&TKjcV$VjHAS+u}I+=pY)Vi_Y)O) zq~lvMP}td<9bFg~YAq`sWu{AlSO+z94=-?a!z$*973?QAp_2)$cD)d90^S016b!g^ zyJ;;SxiD3i>}jgalQHC~vvrT&{_wqj<@#+?B9AmLnK#S-k1=%q8Zi6+c5EmF1C+l9 z*h_->mro#j0f}tiv@t87=S`icE%Dv}shD0504!Et#ty(EOm_M8z~%YyYG?%z64l$R z&nvYF(DZF#CItMDC1E%caoH4m0j4I?cq;%d@7@FF)%2Z<&T5QpzyUR_cKrQJOQc(r z!%J)~@Qm%%)c*|cx~3=Nj4XmB=Fh?6qgIyTnNfB-XZcxIODpGwP-}TQ_8BpDIvLli z9R~DNrjX5!pKe#BV)<%$jKkxp%=VE^*w{=1RsrS~ zyb(#@XvhKXhGy*wA<)csZbmR7Pn)#oEJ}_P&(Jne8#NVz#u%{QlOncaD&~DBjT2~w zswa=wH%5O|oC#a)xS^)I0DNuN(&urE8`KRwMwKkE}XT_ErrQ zr-bWlF4pfK*1(OIu~v}vL2YmrnvlNvNH5hDS)xnf#)C~hV%8GF+I-$dU7*h?jID{e zR|h?$RPLmKeabPmXo^Z>k4TE>d{G==&&|C=oCgT?WP~u~@XyxvCeA$eK1|o3hd>?- zG1e5M>PfMdV&EfW8m`vF!wPi9XZ%dl_@48vyHIduJJwG7rlBlf>24ztqBUQwTrv@l zLQYfu2)tE&3LL#K6u%`Y2PCSTwKOQA``~VJ+MfU#D~A0Lq$#RfG%`*kszSSk)S)s5 z0gml52MJNA=a6U%+XQ(wwj`{j?Iuy>pLKkDW%F5tGC#fIoSw8w8=M))1P00-Mv@C) zcu6*8J3cU)Gz2V2pgfoc>i*-J0EZju&gk1)P#E~H3xiWepp9l=E2Nxl>pKT|FJ_(R z7@9H8!p#-DnKxTi5I9#=BhM;>iq_Yg4i&K?AFaRJvX7wIt#3o&rzkYdo0g8ky~C_6 z(4{BKDHX*bMf(G2+EEg%WqY;wNDZFgo0doVfA?{U)18Kimd)H50Y*}+B+*!EZdQ5fHXDFRrJ;!gv?D`9E7Y7HREw%rcLO1tl@i5&ULyEqc}0C2W6r1$x@otp6_JPvHU z-{1j6r#VuH^*!WOKOF45-Q93_eA3RC$>&vV_BkCKMs&whZUPI}Bzb0kAA7(%_>6Ku1{KTAd9>5(*(0~m^@^#csu5R(F&e6z-hoo- zyM6G3+rLy|244pdM6bcx#9??NwfAf9)lkJK>{EmRIrhx@!43{8L1qmmfilp+qr$bV zjN{p^Wp%_#B1!rrXVwjw!77A5Fk5@MXQR*h$6*)fi*>czclC+oQAq|kHNc7h?@em0 z_(tVwy#s;>@`N4%Q>GWV(14xj2`zwc*i36JV1}3fMAPpLL$3#X>;m|G|A*c8>CFI` zag#ST-3wRz;H9s&cI~RkSp1s;lOSQ;=^MC_L=~L-RY?v1&2x&Rswi!qQY#Qf%EM-B zOMJ#`x|v30awn%A-M(P{G-6+Y`lsH$w}Z(&Y%foC=AiS-l`aPuFkYUmH_woLw(3BM6!|FEKOPcJi`0u^6O!nXAl+ubIScK=S zkg7fkiCiX#d-$mMzDKLVrPE2bNKrSpuP)j)cemuCn~?8Q1IiJxD@EdF<$Dx97+ccQ z`=?G0$02cOzs3SPM24u-jHR$V@+_I4f=a6k>;L+&HtkIO%W`p54Dt)tW@T*eYDtRc zqy(GHA8w~yp|)K921IY2zS&+{)-tDq>y3TQm@apq?*MA8r+0k4;$lmvde`_kePMld zbMS`H4S6l~(ZYpNxjpI@^x-!lU+fthoBMv*ynkIUpP$=u;I|^;+O4Y9u6OsWn6>v? z6mC>E$A1W`f7;%mxZpxwR-L5Gc=!y$gJdy7nwpAidMUgr^SJs| z5<%E4oXKIyg9O|T*;Gb9bQ-(XKw=4D-old84W?E?FkRDOvD(;;;#VAEO)Jhr>3jFw z)d^P~`Ho26^Fnb4fqoSEz+D=&{ReD+G^oe!(B_yL#*zp^WpG{bg2h@ti^Qrtxx#VH7loAnEWCX|H;b1x3Zqi&QdO zZMic#I-#d}>Iz-WpmXVeMW#gURxgFwT2lA;$##2?zIb;KAPGzKSX)@s3jcabW*Hkpq%*9xGRP{wtX8Tk z#7Gef2#mO8ki-(dSx#bW4zgd}5UmQg?SXD%mnkdH!nkVLrzpD(R|j?}P#Xz#N&USH z&N6D_V2+AWhGZC4*WNZ|5kCSV^aTd077a-xy3Er^0e?A}yH79tUG^Q`;A*Xg;*H1BDut7+hyf5Btk z`>E_@w52Fiu6Z-NS~Lsx+jzI!fX1uSMNHDd=96DdeKQghz3Q~lZegnD+o^8SZejv} z0XF@;!)0UR=3QgFXzha}TGVW3n4^>@n5p2MK{MN!>DDw3_FzdoLoCknRW@MGkH?qy z-uY(Jo~?ime&_M~N>jDubB}x#&sN^;hsPaw$!a4S0eP0c&)QX_fz8Rs z%#Vsnn$tm81SYeRya?_M=tu>|vYfPJLDPr~WoT3ufHa^=bb%kmKQI?u;ug=Xy~Shtv#|Wx@cn&BeDZC#Kg>sVSR91qOXJJj5azIoDPn)w2#me?-KV*H+BO8wSmwpLp>G z%mMUOrPw$P?p;@Cu9Dns|o=&-6;|t>p0{e1@-tRp<%>D0aF2 zoLy|E_(OjD!?v%BGDGxh)6Y_iACDhMZyrN@)#v((p4c^dHdj0H3@2R{7O%%^&9dW}epN{4L0dG&J#cb7 zrnvM67*I%e{#zHK!I21c^=khgx-_L4e_1;$yS4T8oe0?AyvNqBefG*hH!qe5bTE5& zunchj5>+uKJ5ldhqs~sEXT2u7SrffbL3X%m@c zYok?O8=|5frvSQMCp5AXD8HN0vbw)T*Qdy72+ODcZK=sfg7+zc{0-~66DvnPJr5YV zmBsopb^FYHYJUCT`=Na|w|~DN<=UzMVACB5zD#SLrkZ=&mtWKh0eN`|9PCnS1@nS~3Yt3GB`Xq%=!0(c@_P=yi|bbUQ;n1Fos^+-QB-ZH?XmWx z$&a|OG>`z_H{UyN)wPTpzo0q-S0^I1=3eB_=mIZ;z~YF+C>+(85z~$LMxr&ew=2hloVyu%%;9sdLq9c z0oc6&HDGPNe1L70>d8Am$g9Ws%h%>L+ZOovztcZa?cU4%X{whk-#Iv;;bAT~{e5gf z)#(1W@b3~RrVx6Gm{&&0>QE2SoA{CUIfGs#e&@<0jsirFut)XBz4Ir+WX5qjzK9q+ z*k{zEBKXHsf1e}`M6TIb&OStcDXYc%5#R~7c|=??@vW0sY?85s7LC0WzUQ7`uc*JC zWF%kCvCzy}JQcD``f|Cu_Ct@@)~eQFr>HM0k*8>$O~F}Dob3gpDn^%t>FESdruSFA zIca=`P4`nGpX1iOW9xtip|$W>K*$wNW#TRfO@JGuxXV`S)!*@LE;h^_z z+qdT>_vVZ6;!gJzp{W_&2j77zc&9Ls+NWsl4qvX^m6=A-diXj(;b0MmLuYc&g4Vbe z%%)eAv4LJk2ov+|0s{sFfT@p!&wxAxwAvOzA2PM9g2Tp-v}3Dg^tX@XO9D8}hl=9v zD9juR62$@L?^V~|&;{`AlZ_~Ko(*I!*EzW(E5W&)8VzWZUMGA{rvlX;9m}bAyeQb9nHTHQ6k#fdt*W=o{<6L|y z9?cF-^BIwo;=Rp&AE4@XQ(6w5h_b#9AcdRcm-oojtIXC`v4ome^#PYgR$ zed7Vspp#S9Cj1HRPC6dm!m{Sst0- zy|Fd%rW8w#(2deMl9WX$J2|E1jXqVGNF$E}PXj)+{B@ipp3uz$IjV!G%wv^2f?ozq zUQ`MXEvwmg@Hm(dggg1=?i2MSvEIec`}(rAp{uL}Q+rS1ZZV!NydI?3Vk^u7$S9_> z&|Mb@<|k&42ch%GYpf&tvO;J|naHMv1=ITPcder->B>o|ps(dEdOfe>?1m7l5yiNu z6EQpXn1e6`+r#39J7VszFBjeh&xEyCRWjYtlEn(%E%6qB{u$k>^ItrVy8oM;pE@@= zVze?mPs94d*Y-8a^i7MP;c?~Ss$mUR4+y_gz4Aq`ytlIXsW^z$|FpMMYqY&7(R(|} zy}Z-)RjFtCUTB1g=7Zyj@@;1M!D$f@Y7zCKf7DKA{if^fMFh$0Zryl(0$PM@`QLJ3 z=AUxmmKByXCtU=dQOGs{COvNW&y!r8nNUD#XLPdcb!qZW$AMY|cMW5|vp#B~A8RYw z4E?yH)IEo}gpM}-Psep#$D``A?0FN(G1iA^_es0-Glq%S#z*!|T|3pVSYMp}^Ujw+ zwTeGKf2=?t9|(jdSca+6(M5E!MM@Ny5W3%S;N?J zOVGWUqdTWO#LmkaxqeU78g3@&m9TeBS5>pWwZM|WctlROq;P5kN)z(8p#U?)rQO37eFoSp7SpL-Jn-d*?J!2CiV$4d>VTqv z3++E|d2p)vMcPf6M6pT8^tI2YH!($gvC4xn$r!#q4yw%F8ro(~*alZ%lIhCo8iH3d zqK<>9TQ>0?K~c3a@wQ+Ce?x;*Fv%x_!%rCoriodnK*4UK)Fc3FCwi-5v#1$sCw*tp z7vpluB?+Zu!*4NbSvdp`>KOKWL!&2Ekcncp>j#q{ zvIc%ESm~O#b>Q+I;n``KyC5RVYBV`iSJ?lDr*q)WB-*xiI<{@wwr$(Cla6h7+_9aG zZQHhOJMWis&;9NgbL_G952&hLd(F9?(}-iI*aR(0$7=&&R#2%6DN;n@T(GAGDMh#ypP4P7Nml)7P$i5@tmS^5-QZ>|P zsfqSfXJ}+y#0v+B(ij9`|2WHixp3z(2v1F0#gttkBf&`EiZ2`DXE({e*Lnpm2qODM z<@@6kn2edC8gb-P^6lctKbUb)>m3HW!#5o`(LH!hS8gRDL!~IPGgOfVUAcZ|3y~z3 zm(ug=(+G0yD`GDC60nlFN~P-OJ(5Gz5VxS@Su|_m?EG#oK@|+`fE(Br;?=t_ryy0uV6FFSsPzb`m5%W~@-I2*=Q@8)XDM$4T#J(Xy-Z(VETVuGxRZ7Ni#Z0u z$CK&q^#ABL5RYN`K>{O0zPA?Un04AdsSsKKI)*s?;7c7i|0P-KCxPXMFiY5 z$-)TCG|A74T1GkO4&c?@yYdagJj08OwQ;Hgp7|f{9m+uh=-=hiUWV4J(0x;RYqo99 zD!SeeCGJj-@29)LD?#n$D(2h*`Es{W;uwyQ+X(&BknLNLFSKLoMOzfWt= z^&`K}=t9~?D0(`%grk+w7Lgm%y4M6E)*E87%Lb0Qi&1Q5p=i!SLg7}ti`y~!s^Y}c z*#fM}87mjDYD~Swc}d!DiI|eaMG*4){AmZ7i84Z2VIgn=OPCs|Wk?m497`FYL@Z+v zg@?LENf$%-!&OlQKd$~E5f2Dih=}V2M3In(i58{RLWQA%!a{!tzWdfdW^VPvyuKHN z|BL^?_ZB+4-QXm>vXljNIt%okW4zh)L$Gc@{<`=Ta>N{7dH1Vv$$O6G#ycTS%)n#f zwu?hmvly1PduzoRTrW;DWYmwr`?Pjjt{?{jFe5R42Ofh)ffITx0tbRFwf3%1;9d1^ zKXK1ZM?VSHOZ$LYLZ(x;3Sji*>ivU`=I|FyE0HKtvb4#( z!REg$`~Mgrtl4%2AbQylH}i(_`y4{rz|1*-k3fmW9|hOOK4 z7YYx`@Keb7r%YkzLx74Ik+T`FzGxK)WvnU`!&z<*T3&%&{%TbOideYTEY8*|_^Htc zNaj~(dDkr6UMeqrk`8{?tRYnP5iU~&&M^5ochOhjF6n|J)A4%#zB@mEy-Yyc#`~%d zy8q=d@H?PwFjCfOZ)}W-$Md1&-sXtp9fO{?&Bx7k96p|&-hWGXqrzvii&LQXRU_=i zh@!sgyrFvPDZX~wL87+`lu-%xLKS7VQI@3F8hKY?8NDnJ8Rkv~tOBjf-qu!2K+VK( z1=-MyD^LcxN?#BxTIKC(3;g0L`9%@hu3>!Z7mh17hx>^(rdfLHePa0sS0R=EXK$Xf z22;7mD}lASe@$7za|WGtizP{0*_6BAlMF9yjDeLqLp*|e-{f)u7^s? zJk8?Z-T%JQProiV60&7%a!{sXy->N>{=^(*TRYS$FS5L6&$b6J;ZU#jCAawLQ#-3iPW zZc1(=4u=j~%DZ|_J#$Asb4NULhd*KgNKe zJ|Z4&zqhEAm)D;30_z8@DIJl_nUM-~#&hj*ZP@}>$5ud4*Uw{=g;9K-tT=BTA2uKT zH2~u7^LTEipS(P;bkDsvO@$VNLU0*#BuijN!L=@^D6furV)V*$XLJl#>8O9%Fc54@ zO+jIoLtcsHs}{kII<3vmIZm&)QWG-`L3=HW(D2>9fP#%7NRf6dkA$->m+BcOtyMm& zrOogi-=rlA(cH!lTzdkXKKm&9{#(Bh;BkKgMid(upAJ{y+tvLA$`8=@xDNqDzco)y zQZw5b1RR<6dGU1_>d|2eFM30IP=RishyFT%tZ;Vt6`*Xv30;_{I~MJ1W=^Rgy2gN2 zSo|<_Sa@`ZM`uwq=ao})G0pk^ci3oVZjH%wsR?36ni#OB_pFP-jLtGbQ{LmRJ<=jfs>4n#$8_9b+ zmalJ^K@aV4Bg1yl7-YI|kPoWkBxwTwED@Y(|DY)5a-F0Y&x_I0*y600s<6^E(;*~% zF7m*Pevgw%gnDrpUTal7+VI4Y+@bgN1^2yt%nQh|CwS&DjN2Bo$fO?xV3d#buC zpOPzI^{IHlskrvsl<3@4W5EovST0sn)T*vdj6@=hsQF)paq;0QG0WgnABV#9aOTI* z?+}dV2vF~{EQSo#rtNeR{GDwmG&>ubMF%92F49M0Uc27BJ-p_TWlUNx+dZiabdbUj zZ?}4r(Qn1kGsU=CRX9i*;jg&%izQ3sY}ABL>)j@6FZ^^trl3LY4os8IpTtd#O_0y8 zS{^|TS)gjqKl||}d{=FhpAkwh9dsPvo{=|$WjC0kH;1J?Cb&Q_5WRx_`Dta3@e7Wp zoRF9*nBh}T~Am^x6J`~tMaE3MVe(m>X(Sn*wCK)&D=#~Fi|2hz^E*}?bXnl zpUw2!cUvVrG%bps>X_ePS8ng9Wtu4H_ld9OzNq;%=>0H-T7~(sV5xC%pk%QE&;fwa zgI$hyKdrjVzmX-dy|d!tm_fL+6-ltC_O_e@cL%}Z3!X$)f|D#>dq;u`vfQwA(IrBz z`-P_=wjA@A;H|D2xeiG>Iho9v#;7ihbBAd&66wI5nISrt zu^6Ksn4e`*@j6fwcmYxL*w)m!kOZxa8AIGHW;pG4<5xymcro3Q9E>2)VIez|nqU}Z zm-~lPMpu|BkplFV%t9JlVG>U6gIjW483UDgVN0*YS4K;``d6e?rcB^#$Z zgnAgRzr#1M&4TXZKQ}EfKb5ZaaX8!&noGZOFo(%w=u_{0(9_%SshiKKzd2UMQpZ<` zI37BiF@r{IeY}s=+p!DlgqziX@+P+Ly^c!dn00!%xw@+K#7{=U>T%T7sI-;hYmD_0 z(ah$8+Bojmk!OcK4M+CfX}Uy1y|6B2fz5Cqu_g$9b`x$>aFdIemFuN^Y z4viHxpjR4T3PqA=V|K#RQlp#i`8trfu|~;nz1^*jBz5`lXOzxDIQn{`_c8?zA4vR| z_XUDvv*aO1FJI$rUf1K10dqelhl?f8&;Pikf!UG_tr}thl0R4#&_iDSga4Cj0p{>N ziytucN!$!1Fbz`fpZjk&FGlOE;cS-dF#&VBm~Dk`9@l4iZrykZ2O|AehgKSjg9mTTgt|Ta(>fk==i@xAMiW6$6MK z&%%KR=VYM3B&+2yp%Wz2(V-Qs3_leEL`N1C1C&q#A$DMl;7n)*V-rz8K|~8GUM56D z6Ze3Ul5>R11Pi>tf#t!0>jZ=5#ui8afSLpmDUb?@@fuCGP>yf2kiBo0Bz9w!6#ho& z?FVvFdJa>z(~~k#P{*OOH`25V+k-jSr%-qm7CZONrZ1M@WBrFR&850{47JZNOi^0!n-oJtr1FhqYN#}?z<;zxi?00Tc+rV~rf8=%B!{qQ z2Q&nq=|5{6MDzj_T+-pv zb%Y@5!U8lkA+ft)%r&Ddc^Bz(GUgFGUC3nrAV(ogy_~%KemCtj;Mte%EhqkDq>&%J z&bG$+8?3!&sjClSZJS{&X-%R#F1eU3)1aiVTusl|h|ifT>-(Ag=$Ku9l2==%mZ_#5 z#UcHlyDYGr;FPl(c{?fZ@<3cTi0787SFg_CxHl2#0f@b7a;58ZrTLyiGC=sHI zqqr+Ew8XkDMlM)gq=;TLhN?(n4Z_JaiCbk0pJ%>}Kc=$jSU99dQHbh|f8?5fARYjECYavbhOQCPVcd&;9`tOh-BTqqz z*u6Z!Q=cQ*Bdstwp`qnNKoC|F<{Oi?SRGi=C`fX~w#Jgt7o3QxWox72u$7F0%ve1a z44O-Ir0)&xI2Kkt_7V^S&?Kp8T*qd@6sXqM2d`5^E(i$0UO=kqW%u)xOP~f_rDMr_ zoeia0kgy8=3L-l|Hq4G(=F>lv&heO9KY2Z#S4hI%NW^o1i+aevAUewm;Os^943Xp_ zH&Oq-kN~`fcaK_+#(90XQ?A_Eyp`*f@a&Mod8sZ(9d}uYW;GHy9VTGD*~R>t3qrCb zMVDczOOa_UseEusCXC1a6A|Z&NaD~&owZJi|zQ!40K`^lcq z0QJ{)(^dWte#3@$*n$hY=GdOs;Wng`u12o>$TU|a(|!LSyG}ctbw{_SQoi&=-e<6M z&gYxx_r6I^q31%(%)+3yILcTA1Pqr$W1=8DLn@Ti7vEOsC%b)Pg;{Qc=-!EyqrY_O z7nK`oB8ho1!e66FWr!*1xQrBTTXpBns>Q&DL=uZ)`0Ny1o(WbD6Mfucm=11@?vChz zong~4o~TEK%b;p4VCvsCNB39_=#fpu#H)y z`k5Cb2W$-iS}>jt%W+;c=w8XKR>fxm$26rC`dcKs_dv3?D2Reom5cm7WvU)i))gFT z56?1rWhNA|cq~0pkvv$1ecG0NX3Assz=c)hE7bnJtVP^Bc?MohE&m^~=8K;Zj3!7QdkEbXHjVdcSJQAlMpP_AQv#&;xDC?`9~qIO)hl<)8|0(v^M?}kZ+D&Q&x>~nC9&$g z2<-lA!iorkDT7L->`)~JOy`jMO$Jmlh_BfJxNa3il2xscKow7HK6cHOg^Dm|8*MJ5 z%U*I+5YGw;4eav+6Wf!)Rs1lg_db4tX`dHdu{9>joG^{39&KvngF@zHQE&tHM(M*g zTB{3tYm=Rr!%w4j%r*Co*lWJ+a9yj^<+NGoeGV@FAqEk+o|X|Si%tTnb*>`!ih4YK z`fbIy%tT4S2f8s7yP|GdOi7MwHA(p)uTBN`a(=M&^56Ject3UkvYhPkaEhSwC1tsFr|)%On;QE&yNy^FEVq3R%IPD}%l0J#?JkiE~Qu zEXF!RJT~{gHZOKpp;2#tn_&~P90PS~6b-ot8LX-i7WFspRDSh? zOa5?=?Tml+)&IUP>vtUe9CX;=j0 z6Q!moVSyY% zf0qjrEy%yp1rDwu`jhryf_(yjye#?sJ}A&m(ZD`YabK4WR=mJ8X%K*}XT zUhSI|DD%y@xZ+9D7sRfMU6C5(8d53hW@H}sJ}4)(IKX8ISNxt|sUCrUp05<(+v9|yXKye& zL+|3XpQMR5wZ-Sf%h+zr5KpWO`p9$7PkeIIbtu&uqO2Ij*Trxbw!|vt#X{6=f|{0a ztzj?Y_8Je`l}-%fvhrc%@HW|XLy-UCn*nH9vG-7M2H-z`%M4YhejBTC0-~$MyfYQZxgf+X%@EL188a28&}ZZb<&@GV7h|EuzASRMW$rx4YWZ^f0Yj#UOsNQVK6=3JO589#as%&*7k=Ued|#<3aipQxoa)?sqW-bW9l( ztgccz0m)WI6e-f&hrt} zo^OT(zzu*$1CaboUijgH7^H1{YY?naS=n9A%c_@s0V=EaTB4Lzw4#3(FBCTzBjJCd zuc2_rRhN)9KrsR519=E)s%3#P<`h7l&W?TG_FC4*eDB2~=y3rV*I-ZZC4k3E&o$~A z2i4_qv`S?Wixa>r66 z+;did-B|S*Cog+JkKFd$HSeC6@3NOG|*AE>Yml`q)R=c2If@~LkTF`e)uz1#* z-c^)3Yy|5TL!$kXMxCRnc=bev^+UZWw3n#{G%U0i@k}2jqJx$QF+1y`*Php}q8_&F zm+BC6LOW#bAvK~=ao7z;rQkbup6Wr(6y^$$N!mlruSg1iWsJbLObOfXv@a!9nT9{%vmg7|efnEw_f7HVl+*lix8 zz;&PRmXaozSxDl6cf<6u#2KBXlEWv&f6r^7f*<2RtJgjUFT1B4FI&Hhihnhl-=u&! z>BW~7d?oB_=zFhEmEcjzLHg9czIt(3Ipe7zoEB|9)-}HR3#zko79LFR{!uOrDjY&|l85{cuo8W4Qw`I)lKW7xRB5};XJ!#;Y z-gBC#&mtxIp4D^9=)boo(PduG*kf|PdYwKg5dZRX!YMs?&kk<`s5C9JFC)MBvmd|s z;~$E=&&h5+S$F`#Sx!FR!r!KH|EsZ^Az06EqJ#69v$+iw^;H3X6e3LMBTTJ%|A65SAL{xHWv7yhP#LjZcmWuJegSDDSsI1 z;=eJOieBMjgFYRkKNA!pla(U=I9m&VP9T*VaaI!a+RFDUbaA7xF-CAO&UH~W<(5pS zv*gwtT6GHz;Rx?%m%(A4a$MU{HPs|~ zN|2^`(NROm7i0}`tt^$IrposKiXt_3;^itB$!41JdF=?~=iSG@a%BIqTe%cU$3m^+ zXa$rxP>6!DWM&xrc~i<|9L*rBYa$}wDR>%l$E6>U=Mme|T+i0}i> zgy>wQtN>tJ1QWUOoHx|7Bok|V+1(k4Z!|y^GV5Q$28&CL9p)f{o#QAR_BC-s$s79@ z^O-((;C4{u&pHk>n#()PNuF2Oh}`g_wjN#~*!{`Ej;IGB!?rHx`DAIkP)UNBaYa#6 z#K>_S`yWM??PL>A30~I)-PR0quLFhhL2;2i))ksdg z@Y=cCgh)zfht3O}<_Y_nEkC%Pq!^m(mS(Q3O8jM=hirdGod1FWVO4&SwsYC!pqH8VVDa{uY7i#etcu1ix&Gaq@e5 z?XlL~Wz=`nWi{o*EuQ|DKNm&Nj-r(#;|Yv{(#^bY#}*URfiFH>jM+BBkG+AXM=OUA z_!(qFMbt)qtH$g_6MIy{nmVHFms?Cx*iOR-#g~$EdZLhcx+6xr!y6@Ln=-Kno;3a( ziAE)NKR)oQC9q}rp{?JvXDz2mJ^onlv24`La!L@pe;Oy28wRBn5F=jztC2&MXhHis zn~D2@5hPpWG@f~~ur-y`s-QKg=+ra`!6}a2{94}lyv-D1$(tCgk7|~18t-ql{4sB0 zX#>JiS1a^%w;ykw6D~Wf#VL{jMgQN!+j-0>1(U|(gTc{93e33?AsjQj>SQekVTSlZ z5qpjBNF{B|>pI3oi@=(t!uSP^-!u`P;FQ;(m`bzPFcT9cI+9}p4 z(*O>dhlihB<7_=k(nA+k4j>u`4n{Y%mzdm1;Oz7$3mQm=z3;9v(eta|7{3u;@{2iK%Tges$L#a00e+;af zn$>*yg=aC8jFLSVI-`;zo|Ll*m!kzu8q@xn%;=u@O|Ne%i-Yv{874ziM+z76TUK|P zXMccQG=?$vQgj&mv49Dl`1N?H$7>vD7W3-!-Aycswl}6#QusG#jHbQYy1kmSq0)p5 z#X6`foQ+wNn(>7J2W8IdEQ!jo_&ks-neZy%Wwv?dl*MhA#_^)8F7Ein&Ghp+2uQ@k zE4(T3t=9mOXUe&AY64pOc2)tZ0!7C}{T- z)Z`uS6iM>O4t`0ZaU9_lSeGkTpuXwIt9t&}6J~+Ei%CWApb49F6?_?#AXHzZ9~BsY zEy$3nf};cu^GScL2)91zmtTtkRgyeh|L~631`$y|M}$8g zBdN^AI6ZKUQ<<5^HWR4cJVT!mipTXr5ho^40Er7$*^PDZH=p=&Sq@JQum=2Ne z*owVa8#TA;mBCk~U~flB&QD1)=#l4ln1;B%RR7-WwY;JfI3FovvZ60h6hcW@4Mbu~ z@vBl}(1c2tsEe;gp6IX-ds?|}J{d1sh>z$o*6(2A9YiXzn8n@%EM0%*9HvTsLk?+s zuhpYoJrb4cYFWXrksslP{@l?7@kg@$+|+2tiQ2fi0g<4Nu0-OWm(Q`bhtuMDXCWm~ z&W|~sKa8Ntm<1;&rLx8N1_6$|R2i2Gks#QHb6OJ%lE>*C!Bf+jGV45fx9TVu#p2V2Ai(_IH(r{u9ujXM zp|v1(w;`M%LoAm4veztyZp|Z?qfTY5FhKh$$HsqMJz6`mKwW#zy1+De7T3C$8wU-9XL6a&Y6i}@qkk9szb*8_1loA0-J-4 zdrLk1SACujXv7fFmO30NM-op_4e5D5t-<6_n|VQfz2zN=FPhnDj&ASd%$fO}AgU%!s?y zrntXtnq<~P-BuN2$QuLt@q?n-p{l5;y*(HBJE5rX2Da6O> zRA4$Mi~}NWq86Rx>&4v#I3b)9D&A(-yrXwEyc`AS@NbH?oJ2jH%9{%iDN(y4`#=dffs%?U2=>W4KizZgJaLdfheo_~=`rnnm&;MDITVk7yWw7aqcM zQ?O12B!=-ClQOcuu!)+h{x8vLpAQOj`P!y+$p;)C7ekzyAJ6n))QdoB!;1LIDiv9s zT%3^_x9e~W#jk*zFJuNeCAx#dUWrJD+>z*e3g3bH;o8r_bQ%Vspw-;L$&&P6wb^Iy zt^SAhxpHCreU<&1lr1%FnT(ft^e@CU>K$(8FYo*A;fq%cXB*9p{l~y{AW+lGse&#L z(3fVwczY$rI!)&5I?9_M|{Y#iL(Mn-uE2%Fs|Ze-;T<5kwBLfUCI?K&Uvr+fx3>xi+$ zhz~_bW0RXr3wWMVnd39LHDK{Z232_jaG?k~{LBKcb+|ih=R;gA$e0fi1!>Ud)ce7= zaRE_of{DNEDI_=DME7rmjgx@&N`2RPzKc32>To+in;1I@)YcAHd?v*O5Acyk~linF-l( z4mchddk7f4IvMSSwCTbl>q?n{DFNz!1T}!f7tYUeEXDBJ>;E zDyzR3E@Xmiuc(oFy-A70FkcYP`^{fuLAdan3$t4G8n{iOS_)b>#Z9z0s*O7S z3?yTn7O?(^p6XB(B_Q}&^Vw&ID|<#z#PA}J#O>|1M14-EXqXS(ypwXYsn&@7T4S_T zh?WzP^_%>g8M=28^oU8P7cn|DTy2vBUAL5$n`FGyH?3M2OU=w-^Y-u~lZ)~Sy&zu0 zJ>zluBEg7%T=z#YB>0}o4h)z&q*J8D|AHL*=+D1@(-sX{5^KR)Y=HOefP;rDnJF?b zZ8+V|IGFS$3%()!Zlh(lG7T6A?Is!O&-?<(C#X@(CZ~nMqwp&aL?fQoGG_C;++}-j z!cJ0XDn_qO0&D#2*e9?(!*6pOqo+qpy^fj)E%x5)N{&LAwF1u47I8cm<7|pjY^z-q z37M}Ayvqm?E;LfX4})Wz9x!UaHBL(Njae-VGPHvHs71o0r1*%5P)QVZ$|5(-!#ZGL zhMB_5rD9U@a(PxvwipsU<7^7sXN4ipz~^5uDmdFiF)2XOn9QGGsV>rq(fi1bgR4Na z^k}q$(+zWiRv(Kl1@*Sp?YWD0Wz67b2iM*aPJ*t^eMSU{b=IDQxF0CgJBcfJxk0;B zJ~cvx$!3!6_+z@iEh{2}-4VccAnoj~@oMh}adCrIWt)xl_yQ+5@p*us$*UG?u_mdO1zKTH5C`kETtA#5ZTT(V-XPp=hw6#H0a~= z2vLO3s5`57z_86*nCL}50%{>0%uY_Qq$ucv+DZrB6a6N5ppP6hZs#CQN-TZ0xK&H@ z;`fmF9y>HF8sCNy!_v}PG6Yt!RxqhgM#NqL>z{C)e~!1BBVDON+Y)=&Aj4(@lXUf@ zT|gTNhK(bGAFj}Cf{c4R%ojtJnCh?$`Se$*slR28wh*DZ;pBH;S&41>JM)aBym_@1 zEW>xBEh!n^-$EH9#HMe&HNxw9fL`>R>3-lI0Q@0q)wMbUx`y5c(`6o@{tI(*Qq zAUZ=Yxj5PX=zHMxWF{&99xM8oi~ODT=QqKhr?_`N47W}v1*#X%F`rW8zW~Z#F}BJ( zW=LMv?}b^o^BR>CH_o;PUx7k6R%Tsr=sXtfs-55wszCICxiw7bTBFT@7>7 z9D2f-tS$dKHw)^u#~MFfN-*6eO8!wguE}XHSj}VSkDa?_RK?96uDfRF;^dDlqMzN< zpwF}E)QmwV!)i11xV-gWu%L1%)B-RIyk$B#5G0?9m zPptE8G*w5)zM$@nc;s^8b~Q3Ds}HJLnNjN?)!>XDc9?YrggK-yNO}r*MoID;AC@wI zCk2doRz@($-<}1=O;Zj^nS1zY&HF&Q(?74;hsx9J!)#8Aoo(fGTH?tZ;jRSA9&++O zzQU|Ve!t~nrdQLoHPTv6q*{vnLl8tfq`sa3-gmbMr*j@YI>CJMZfxVP%|? z9pz-zg8LBl^?Rd>1I5Cl#^ND;txiOnogTHI2nY=piYzCz7Xt;VQ6}3KY8Kqq#};^r z%^t`XpR&_;=G?jUokFTgrF9>wc1QcchL)@(K{G;2cfe=Un?EaW-NIdz@5`5QhYB&s z%qsD)bAp)JDua{DEXNTYVu(UAYLZ>1V$9+J!Ll7t#cYZ|c<=G@n}dw}AuxfYOzFKl z>!qe0*7BnuDvj6_(#l`^|J?To#(B?31;jb%n%@(DZoCe{aT z`_*`|A2j8;4~<)6N|f8^xF+kV(5KkX_R~$;L*Yx`EHZcB1yDt+^0)Ao<=-EbH_Bx< z;l0pGVGGu$x9VOUdn)fXNHi|?u8deu9e_EzofS zeNn8Tg%k*(-t{+gNS*sf5!dsT_k!rq>ddQ)YN=9T?CL@Y#L0i&I{>3cun z|J4miJ{G$)W5o=oB1a1%5Q-2B%epNw)3o#=jH9)ef}9SF(~WB|4+O(qFpjG0XMqlT ziIs~}$=;EV9wG6b26O77ccN-|Jq+CVWT`ZBDR6W`7@N-OYZYW*C}O;drgzA;IZqSHwOu zahsEixVL7{H;(OekfgK$Z2MLYO#14ygSMifUy_Ge=<`!t!xaC{ro`2QFGDu^EFRKH zi~p$(nr924{U4TUK`Rq*#i=i!3h;$!OW|Mt`V(}qWaI^+E)!+an93agc1qZk&r1@` z{aV}&qb(b=8f}w%og6G!NLLL|nv2UXa5sef2d+eH??%a3J7bj26zoI?Vz&VM7yelL zbKb`u6ZDI;bQe3Mga+4T7b9rOjtght9 zA1?veAzRo3$mUdHRBtS21`NNiob1Ou7@ZdHmG@DRAWO!Rf23CfTA+pXRri^h%1rRF zQyz3ux=%ak<`wWc*LTRt6c>5eu^%2`&(~K{Ooi`@qx2UaWv%G_sZnc5^4GUc-B)}} zf3g+W;o0Qft`c8{a!*v@$i=f*G8LIAN=6nqx zTYxPf23oU5MvinN`Y1epN8&cmLWER+=wyP+;3Pco@w?s+(lPm*aDm5+XGxHx(i&Ml z2r|snq>@9VbW}RJvFG{GMX=G0Xy#hLHgl?JjPPQ+;=d6LN~RGj?>X^pRbEHB6wU;| zY&j>(?v37Vu@>UV7EZ+71K+SE5GT|rSXv)cp)-jvMNqj2O6@*dbK6>NH8j)t=DICc zRjDE)un0BeR}s<$%HV{OAUt{|hYN*IWPv@kclg?P)3y7gKjc~V4N?;`yL_rB@0a!? zd&w}Rw@e|5&g!#-h@kZwL4(E`fQfrU;hJ`y=?a1>dpoh33htv4&bJ$0PDFwdOUI(O z8xlb?Oq)dB60*Vp+C@eJINBhT3FfO#x%GRyROX5Qn1;d;1>FXY+>#%N;ztVN%7Y8T ze`($+up)+|1RxIZ8%?4=*6eZ?dT8WE# zJMak`^v84kypB2O(ZANfkHNAt_-#jjQ1JkTA^Wu8SGPByAXAxkKimjn>VpJ78Z}7h?T+sAG~6 z3Wd(wSgZ(Jim=3c?L++oe8xxaN#8lm8hPM0Jb2LL&nL#K{r!~gfxG2a8WHZj!gjy} z3%43C3l-XLa5a~)U89i1VufS2GGaEv0&qdbGAeA9Q1l868m-vEFlz8L==C=fY^>{; zeG-tllzAc92Mqr6eKo?!5xau5*ks$22cH54jGlWM2A^ZI+WeT@hn(F zujSv-$3i|>;cnvi%-X$pVE?vst?k{PM6?1KnagC(MY5>e51u8NG&_;=)aq0V^7`!O z6&CF*4O}9CF_(P8d_CrtO!kZJ;{M5@Mx~-wAi{R<&f1E^y9X`c7Iu4YokrdgD2=uo z?Pck8JufRE?zlB83EvNUFwmXS!BJ1iY0nVHSv%=btJKy*x$*zuMdwt16)RU)IZW;V zd=e1u4_jQnI}~opuOO~?Pr4smR^Hk!J~y8>1b%Pzc&KdnLp*dpOAENGv7nQ$JNO1w>0Nnu=94H$-?r8ehgf+e1uQ+ZyX%VuN`@opQLo+pN`!;n&!Cw zq?B5MxKBNAZ+?B|OB?+gjhy$eqAvaA z!W_s2ZWsqI8`$jcksh_tLCFD1u`t;xq)+X9Q=p5dP3_#9Hx&e2IQU~d{WD+P7*jzc z5`0=DBs)=t&HUKs>+)FQAu{;fV&Im&8`h>{nlL0;NP&I5|GL-Nggm8sE&60+x#7q#`iuhHQVaymK=* z_vPjEst3~6`KAU{D>+Nq;tjf+m_XWb)n5u(W@2nWS$>ErFO7+)fr{@)Eb zFH-mzguk{jTW+QiYNlTkdknaF+goYFSkH)`XE6#Muoq`&?N|d$;WZNa#9fza8ftDPz*R7lBC0o43=e^?t3If znt&b+^?eVSVgGhlG{QWjDQt*;v7&m%;AMj_gogGQ+|9HK63Cmj<#UxYya7tF-R z4E`?z2!m?Ad{>+1^Z>q>rQf@{f|Q~1aP zoXHnf0v%a5A#`logBlR&G|>ku@}A6QKFl90w0y}Q%k*lUyQ#jL*fde&_T!`a7ynEC zogYCaa0F>ty14CJkchVB2W{?$z}JK5mp{@;OnwkgMk#rMwVF3Fu4$j?XIQ)F>#*Fw zQ?I363^>z5t7j5+XF3tpPk-BtsY+skRih&^A*PRE2c9^GOkP4t-|vhm)0etZStrdJ zD~gK9LLkcS?Y|(yj%z4<;1Dgu5HrgCv028X=Md6+RiceEs7lMp6bkqzD4fyBOtnRI$P*h6J*VJutvH?i1)9rq}@3 z_fSvgVnWyTc|$?oFEke;m-wZJQyg^9GlxWwoOyT(}GIC5gM*ynLyo zRJu5MxaeFz4p;9$6GTVqrk?r%k2z_SPf%Pwi`MS5|CTfZ-PJlxU0tKzKOg49p+!dE z_-uZrHC18GtQ~TxsV~Nj5Aj2cDp#sXr=e#X6{iHq4VtMXD=Us(}~w;Dp0RZt|&Oc39G1IGF6jjrIE6#(2muvuAoShUV3sQ zMOHJbTuN5Y4HY4)pe{-s2_jgQHl8FMJ1DdgE3~SzRufJf313qV)2eCz*D{V1A!$bj z!Kuz>yF^VcZF|~V$+UYRg&v^>7pM_Uvj!p7=urqGz0Lxnp4i$-ED7qIlo1UfO& zlyOMjq|cVkEbbQ#I{Z^6ezGGM@+m}>IKA2;+mD-L9zvaKytk@X7~RZ^^A{3aaKnN$UvS4f zI#rdtSc!!9eT3%jKWjAkm|g;mESAIU`VTdF!v+CW_NuvaUo}rnY&M0$AAtWPY2NcD zGj9MVAAq-S|89W);s+pUVWM!QDY(U9yOXcFSQm^X`L72`woM@%&TGq7*iCO-PEE$8 z(K;N*>jws);>TgwX}G@aTX(gS*!=bL)%kw_oIqp0_Tf`v|MNetea8~c^^2dN;i1xg z{4@z`I-}H#`#EV|TXYS@9ZPrkXmE9DeEo@r%eR%4l_mYuAYvofu$%&IfA z<%lVCAek;8Pt0ZFR4|pvL5zqohM?r+Kq*I=bSiNZs0Z0-0aXMjiM;=^|EYisBN4KN zZ;34S77M)K7N#+fWICh}kPcYCy%YL9GI2e^7A{cxS@$=@d3mg&lUh+Zc0WHlKDs=3 z^|J7`1mbeB+jE<4zj4#Au*J}oeILZKewRU3`#DzabKlCWcl7S$;F8xtho03{kj$d0 zYid!+wqWIg{AJiBYh}oQzSsag#cP0M1>wIu0u$U{k(!f#1B4u|`gB3y)v*6HOwUxJcZ za%oZ*jf=~-=SLBa7v~4sqOmA%E*hnM(kY+7IVs?ryx?%vyZDUlpS!CkZMF?tOf*>x zI$rahn;gjaaPsEhL}^>SejsOT>@KS6E~s8)WU3Rac16PsNOK#^1jPAGy>c1HNUWr> z2{PJ6*(kt|n z7L+9We|2vEe~=H((YgJ9ZvUU#|L6Aq$7KJ9wfEF#*8{uf&;$gI0|mP8b?NRfy{_fp z^cm18L8Il`J^R{S_#6LCR^jl+o44=6@2EtEKS)8-P05iZS^0!UX3Y-6Z!jdvMmHKX z>wz&&gUPNC(;wsNBULkKmJ4C8K(CK_c=aM8!`18PI~7tk$h4O_h<7KlFsOI80BB0_ z&<3Cuhjt5Bpv7NdkewSZs>Jt|YL>n`G1d}WKdn~OcWw`j{j9K9f))E+P28n6_m8Qj ze7%#RrIdQArQcM~%C2Xn)vIRLtA_RT4s&~`DXn5kv`mTB7%n{3fKU7sz|arTcCvEf zcp$a1fr5#1gVf3fGA0fbbTWpf_hd>XnPerC6!L{>aV+#>10DOQITH)S4i!?W8o4fZ z2{XlFycaX|r=%wdm|#LZLqX4Q-eB&OnF~#a5n1))L^T~$WYv#T)^v!G z!iyGcdJUXLsmZ~IWN{wywsclZ3_fkuqkIz&WJ9RNA*7A132V_=BkEB#wRJ;wHvUl) z8DP>oK^HN#UzYZ(s{LAv-QYCo4(i>`*nQ2^;+Cm+HMMxvRD3P9_?o<$8LAW9vZAUQ z>Q8RLP8ed6LAH#>pt!+a7$yy?aQqXq@W*tO(Lm`|cy*o$k7f1ux%Qhoduqhe6h^F_ zrdT8q2?OpW^eNu%NRZCWkT}K#B`DVrNQN+8`f!_-;uLVwNcloh;V6Qn`Xii3A5`7_ zgQCrTc%!JiQ8-yGTO>tBW^5)m3E?9mTgFI82#yM-ngl~T1WGI&<^d#ZZ*nFJ^~B)J04vXMgqGac(u+8Q|=E>m_J+Vnp$$O2V3 zzKID--p6S6(43AUCnlmvMkP;TCF3z26BC?EKqabIYM{Hs@sM7RG40&QSdY4us1^^+ zEUW4C!ag&`;>joRWF7-8mK2tX^_~c8?7kt8TB7zzYHdTbG>r_&b|oDQ?b1i9o@`iF zH#I!2{Wrt@@3kA=R33mCX8&KS$ov1*jS8k$E1Q-zxBoxH2YG64|DW6c=l1`({r@r9 z|G)10F9&`P#(Vz!%l!G5`SUOH=U?W}zkI#tUo?A-X-=q-JH@*ne>5Ds{)Odu2}CpF zn4w2uJjH7rD)doaMKOs8HI(XP$eW=@aaG4hJ|((#;s`S%nD)MGI6L-m{BScPo^}>Q z(@QTYA+-~o$Oxf5W+yVDXph$kQy^{gk!Whv?6JBsEO7Qjy>W*3CN)s@B)v(El|5>2 z(r%EM3S`7n)|%Z<4Hs+xnh{eu6X<6CxXQ-9q{mU7pfBmMlt=ALc7)^9!^JU)ne0{m zh~;^EhJi&euyCWncIj`Kd2Bzm!%n^qk4f?29UhZEln$@177EtR&Svq?LS<)X9sXI{ z*{Q-m)nbwD?y`zisgx|(1_M3Nhny-@d4)Azp~5RzynB0zJbivZ0rF9I|l<|06Iw2J`A;+Yo#nqyrAXny630Lf0~y9kh0 zZ`ehEw0grX0)+J*$wh#ae&o9dkXd=yMS#r8!!80O$r#!!9_AuI68osRAr}EMst&sd zkWqEmMSygQeeH_?>7B}V5nx#TVHW|0)gN{dAcGgDeG#BpnDiom2I0Xk0%YT#`yzn3 z6SKMq5Z4`sTm&!`&$tL+DxPr>pgH4<0L`gh1ZWnfb`d}U^_4FI81$(zF9H}Kjd>A3 zdmMbiivTeed(`&;5)>To9zYVzSoZ*$Q@RJxRBB9k4c z!&rNC`)`K*|FhQ$9w7ezv-Ne$lJWm5mi3J6|Iaq(_Wy_Y%x(X3+yC75KezoqCfk2X z?0;S~A(c++GLBiKE;FB6>7k zL2OWr#?}3IzW}B{eaG$gXNjpK6RcW;R=-xMu>bkJ-L9}ihD8}xBE3rK)diNQVsTh& ziF6KXJ(12qZ7jqysvOW}BAo+zmPqG-o+kjUaX>#M(naSgi3|?OQrv&|BT_uMs&tw^ z48_ye^i%<4T?Zn_1M88NDnM-NsRG0^^`6L;ipp~qOBZl{iY1HTs>HwuWU5vynM0{o zW61)_nu?9r&?V+#ao$L@1A!EL*pJ_%Oo<0Ts9QEts3`Gw^@Sz`*K6@~p9I#5QP$H9fX zj!(iezBgi@xS%hWxuQr2+o009B<%xIctA%4C^oFc{eFwozQAvTIx>JfzIK3W0;?}5 zS`MW4@VVK3?;ihwUNqevPy!*`+C2}nra+cK<*w}o1YreKVN_)rpl;Re#wSG7@3b&H zupf}V=B!ME929}S0jph~U4UY9L2ux)CD!ft*e-htYuZZ92c^dT)o(TfxBqebNyunu zsj3~kI(c(dIJ-PAK2Z`pN=cMd%83VRF+q8qK^FAXCm1gF-yPMiqUz!ASSyqsE0$8~ z{OIE9^(AGf38JW>haQXG*5xS`%X<<}tFC$C`M(4JC@!85B#R(_wpPlmV)(`*Wm;;c z70axunblb4nv!Y7!LF;Baj+X|CW19}0{W-c*;MLOC``DvYz`Dy);`DtB}`Ki4M3m7^Uo4c>(j@w|{BIXh~3>rq|;=0=nUFc;Ke)Mvk}7LzG5 zIsWRk^so_~y;6FsWLiq570;|HnbmmaT9j!ez^*Hq39uVVrUV;3C>ob>)98Fw-?O@& z)$^>5XZ1Tzw^bRPc5BzU>5z3p&UGwFfp5t81(4s~f9Vt5;MO{(5eU>|0H* z2f`3G^4%4^l6VAM==CM2lc*cGAW9vooLN=8?+kr5LFwOky$Hxr$F8^B0F^iF5!-+L zT6D>^do9oHvHCqIl1=wBYuq=mBx;#<3m-&6#rduO7VQl|FzC5tfpT4kD}L->r?1h# zcX+jr{{{nY7dt*H5eP}Y{*4P=;7y`6%kP8k=|KbcsFeG?KIoqW?$kwHl?-a|rQ7q# zvV>zqDzeKEg%#AWU;v^P8UfOL{C`}s})l+t*p#yR_0n( z=6Y7d#qJ{W*)OKgXyV%ls*m2-zgE;=?7jq$p;@xV?eJsaCx>J2|Z#p3vAuDTVlK zC@H%Py<+X`_!sm5>8ka4VgiVAgEIl-y$Q(s5rKR#0r@Z@kV_MgOEZuRqP#H?<;{ph zF#&l$B9IRzARk5qa%lo`X$BG#yGoU-)3;}r^mJjQ>L|iOWTYuY~1m z*H)q@@AWcVMG@}Xp^ktqAFL3AE_6?+f0sT~ibIQA6{%O3K3JyWAznIktMs8dym$y5 zim%0r3x{+u`I!d2TKcd)ym$y5if_o`Pm)7n#Ym{6QZF{4WwNRX}yIcsZGVL>aeLn4t){b zl)Y)908vWWXeJ?Gx0Ri55(0KINlyg=c1zg zdQmm?BBu42mLgh=VZkYNo0{e$lFU`!k6z`&=v6MqsB$(sVxw1iKYEp4GON(>Mwo1i zM~(SM1H)^;f6bABYVxV(=7W8vMc1xOyOge9fqlI0+*b- z=Bbx8b28aBg=G@ssf-n@uxCBD-@j+=LF0y_=(q0Knb&n&FnrFm{1gtop50(} z>)NgN?5Q30E%}me zZ28o&mBeR#JGRrNo90z%iME+YqL!3=*)+e;{>}_81W1sgY}rX#al0`^00Uq!7|aX? zgPkJ(y;Oq!nt5w7Z!MO$Ueb2z-yOCNj}HD9H!NvuJ!NmoN|Ofts@+NX(b&8-hDtm0 zs;$NRPi(g}p0%c}R8}@`xcBj+ye-h!zk% zld*-b5F5{AxOUW|g@iJRgE_i0LQT`{^*p*mLQPu=^*p*$g2GM+!a{D;X zoF#%mUN5zreDk+nx78t!dE1D1Nf&{v1!APUR2x?yM$k)jOM#R*oq{9#tghFxGEaaR z`}DT)7bCw7FjP=e3HEd38`y-KB;~wHh)jTNsmYXDJf&W0&?>*@^+&MO%UtnnOSWu_ zrIKz8IBISk>>Hy(HeMzI@}2A*mZDEaq$X+RQx1=1%AAq67I{C8&0CLLA;;xiT#Wo8 z$L6g?K9Xbe)+2vO%FA^Z%8d~SW0ec}GnQP_eN~gmb=_ArksM*SW&<7}v}OYy;jmV> z_Omv@7}h2j!`cL6SesxBYZHuNZTuJ-Og29z>Jwn1J^?1`6JVk~0Ve8WFu{ciif_OF zlC|kfQ;5g2SD8P+Gg#=Q>d>(<*4SXRx^RR|aFG@)9B=ouqe8?H6&vBQR4m+E67Bz5 z%*v@F{a>}LoI1+?RnN$2ApBoTSvhrd|7$rbr;hA@t=MwfCZE0Uwy9_DyKUmx`)-?d z_P*ODoxSh2DX035UOaJrsd3I&CXH)q{4Rt{+9jG;QexIgC1otm(^hOtmJFPytA` zATv3_7Gx$z*n-UT5&l>99L`o6$EmD~S$r);Y0Ox5Yk8L4TApRMmbL7rXo=%=)Eu2n zEQ=YdaCT=Ci+;u`Z13!!dhU)<3&$y?IfgQ^N@gs|*+ZF-0Wucl* zeMXAm$!DhM-g{<>+RQg2f<*1FX}jsFa4og@wbbUi>Vb&)DsB+GsR$07rq(fNDPYhRM1kEp)5r{l7{FUYHI@~knUyCfNM?ESN zKkfOiT4pBhM)$8eQ}JAk769^3qQeaf^Q-dQAuSfOQ?d6xFA+eW_!iiYRKaWOB zj!5lCIV3TN;?g5=gfEIOQZh$)u|_nOSvZO|Qp%`k*{q18P@%q-I`n4wDE3IKp|u*3 zVoKo^v+}xG`En}VnB}9;B-*T7n6Hn+v6b>w?ax!GPAgx}gJLU%E9$7HQm0lvqVSA* ztdy+kSxKd0t$dP_Rrw!vD%Q#`%rE66=2{vt*K-haZ31Gh*@?NHmzZm5#9Yro%r!eP zujD1>nw6NB??%i&Dq?0iiJ7P8q39f$4f4&*Y|wCy%yk`GGBZcn%8Ec)CX6NNI4p6O zuRtw7vX;#n#^YewtYLhC6|;u%h+8phupn=>#=L&m50&I#oL@>vPp#c!8NowfJXMN{ zr^z$kQBvF!Khckvs1^Sz#h+yV#r;)WpwZ_`Jjsw~>_C=GAhl8=$q-BIKvqp4^-?0q z;K=f9w-IYnoba|K*DT2k^NN*ZAePG)mX%y&wIEl!*G!Mjt0UQ(MUzu9KrSUg)=Ftv zba5mdVW8EQ6Ep*^~p$kWjtxGSlDwG$REd(_m384 zZNb%#*{J-79hIww95aSG^#JE7)EQ;bq?inlD@l-X>Rd^J%%sjAlOW^N`C}5KjXG=N zmO$GYn2foc#k%9kAdS~e>Bta{{-JVc09H!DcHl2K3+ z377xl!V({m9W6;pzp2H!RAnu$p9PcUgl$Mb|Cd&=o`hkL>ZN3tmW(c?0bNQ5x@-Zu znhWSkI?z?Ke-Z8t1{O=w6@^&gqzD;L(sh7X;iTvoPtwJEQ5d9-nmC^l@n;iDH9M9f zGODGMk#@E)Nk6r8`q7RFs$SyOkcy$D6OfTqtJw)?Ic2P4P?yr7+9>FGKBz0{P*-DJ zj4*x@7bdhMWAQT!Cq=na&$!8_UK-bgBr*0>oAM-&?BcHlKJ|Gw@rHq4iwYCzlI0h_9F$`!K zlqkjfX-VS!jHYw~gc6;2rz}alQ(F0j`EclTyPEiTmlWDz{#O)qQj;@zQWT(AmoLrY zODdS6CRFky83nPXPqJ9?6%{m56CiogstTg0B%Bh85(|s`=m-uA?N)Z7vH9K|2BnS zbBJ{-AD_f?txY?pz<;WfC| zuhiSNDDX3ZgaVhMz@;d085Oy-mf`i%wd79Fz5r?-fb7Gsmz@$4BFju`Z?ARS+-+{1 zv|b&&J)(b#o8_0~t@3t>6^one>#L0L-(-HoU~4UIgnt>&GM`A>+&1X%;&tB ztGt;j*i2qSbA5dY{=DLKyyhR@WOM=QU{TwxO;{}R|H)i7SdfoE5iG91SZXQ?Fj~;P zBYy}S{3USV;^K7;B=bU{@ATYo;J{K#i|=P-07xb{cSb;~OZC=};RVa*Zm^~&a+(%Y zyQU{}d|+}er{gtwZ!^ug?dGdZG|wF#HMe$-cMkSj+Xru7?l!g3R7y++jH28P-B{|Y zD3OO)j*`P+=f#UQC;fHGyP@XMT2{Vom2W#U-v*QU2p3+zOK7E5Ih~J1e!Z$_TPBJncezk^{$ z85#%56O_#2a?}#7l?gf=O!^2QN=B;{hzLcgg?SAr5RwBb695j&*NRukR#-|K&9L;u z>w~m$82Fu0dl>5yAhug(iElxPDQrhIbW8Jry_M-Tyt35rDt`@p_$}ahf(BcD&;fTX z(5#K+2{CB#W(sH)1RuEoD~r@v3f^o5UyYqoMk)F0 zP^=;1hE$Tm0{7n|kd9GpqotRC9)pYsOD7f)tLOj(XpdAip>~B;cFc7SP@?d@Skeq! zfR9(ek_O@TVLA8v-J9zDbY6_ar4Ir_&uN1SF6YHOYjGQi*B-W7?4}%JG_iCik!~bk z!+_4;um&s6J0&PvoWCwTE6(2_4Xwr7G3pL)-HvW{JA$YkUEU5L<1euWU`VouX+Xu^ z0H@P=pawjP8UVV5V&O7~&43IW1#^sold1Fm<%)|uqO5t8s<~4ck-Hh7vuXguY zZ`LArpY>{MTiQUUGDvHpVL*y8?z7^|Md>~(j;%#{-Iq|rb$S}> zvFNl2t=k9^L>eo+oh-aPzOYb8;|gk?V%xD|vD)~5?XDAs>|bu+e|S3UT(7OE%vI(-n=$5 z*DRU0w&5p(_vR}zbKR1e{^AES>k=FIVd$Y!3JNRkZMgyS#O6cS?Vk@XLRMU<)lWTK zTz`~GZiCfUSxCBX$%MFUK)i6eXPQR*lYqoGSwgPu^74Eq26rGsRMK&?hlK+u=ybjR z(1sapWQBJfj@S(0UB%iq3UShSJ>Y#RMw!?X$;tF@r)AtD!rbWU`i1V@jtr#Tx~p*?_#Bw zePxst7XOI(k!jfH%F0c#F)d3~J~%@qDua>WSv6TkFX4AqeJ`wf-y;>A89vPkjB z8Lx-qb6xGT0WqBem2Gyre%sMgHrNI`9i0{XeNi0Pvmdxczx)I~zhC^Ipkk}m*XP#f z_y^Erfn>^f?2Ud|NnyNU>=u4^ zSS)<8J35;Ff8o&Wx4rJdbbZXw$?y3}n*Sd?-2ZQ-USD0VqyOLY=S%;^R;Jqw`|}s~ z|Fe((u6yc+=Oee;Zcm|(Z2!;N(&EZ<82{DhF#gN+<<NEe(hj@@*d?)^& z7UG-N(+0@?x3S*L=90V{UU%Yr5y2h8a zf-V+9fsJ^HmmnN|Dt@k)>3?5PYmepFfysU?yy)VkNyHotibkG3NrnANWsOA-sOmLX zFd3|wHCCx?s9%eim;KRze>ULjnwfWz{lVa0e20N=uv$&PFiO>BDVd6;@DHxhREpkX zrIuAGkxHeY0DTap@E@bpkD^qyT4lw}p_mHf8tWj#@P^z=wN?i13WOOD8-veRUg%bc zX=&YET_a%haIJvcFb3rCxz=Cm6b+ z=kzI?*FVQJkl@LP6l+2~)Xq^;OSFd3MxK%HNLZ!9LT-QG2 zDKC!8<;h$EKSpPIBfwN|w7$O$|0;bC>wn)HeTX_5sE$&CQh#@bNYm~C*0H}`sgGuQrm`0xQRBHq>s z5%?y*JZlKDfp@92#6I`x-vSly9WmHqz) zu$w`ilFn%WJT(CPjZeg$*DtD(sECm$YXc#(D+10Qlb!ryVlot%B^|2c(KRrR2G;uF z8Ma4RM83$H^hxI751C`vz4qEDSPd?`wpe{%1%7YKM+B59Z#g;ZO0s-{a}Gw~MeEdo zRi4V4q++KO!Pj&0W8n5_WJg5WCPyb_L{f!Nn24 z{lMvTK;FOfyQ7{9LjZ&5_0e$&Mnh;pYh088p%}iO-w0S9C{#!X==9khaa~NLK+*;1 zbs(t^)CL)-PCby>Nz}gbIv~kI?GB(XBU>vsJsl)NPCU_d{YV(a%8RMUXl zc3sp*ky?mS&w$af(Eazw?GHWBLg#v;xoY-IzRsGD4vqx$hyJc3M)<3utAY@ukB1;I za@`iU!G+^lBXfsz}IfG6GsT7oO|8nGk!j8Dn=Bv-u zSYg{2*p4km(Jay_Rmiz=Rz0Fgh*`Bv@8KN=_YtI?6$w*MiZsnIV+l(!(b+#5(e!)t z#UOLN4?F02p`ih^aL{Pap)a5+`Pcen<8U{k$p#Cvj=#k<9r_my(O5dl87=3M&ERCX z2A!qw=ou3a)w1|u9=_M`yZlXmYxKQt`%ZuBl)m&~tsp8AVm#Hgj$@K%HOAb=VyxgK zOYyN|4L%Q(>H%r?NF~K(B`jr3gycNn>ynQ`1_~PoBr32Xf-J{d^PY-+P<=F%u{nB0RtXrVog(c^E%BwE__BmwKJYJ*h9(4^FMri2 z8Gqhq%>K_*Vu57u|12&qKVOdR|13VQJ)iCWJj6q5%)QuZ?|=ViskS$H=Hw;U>EBJ_gk%TlFh98&^#|8W`{DKbrH#Dfzuws^r6FF zP8$f$hdy0;{wT}`k*smL`E4y}{z%z$%h70~#SW3S$xov+*;oFIXB2`zb1P+FI@M4Ymz>=)WOO!K7s zJ|DaMf6V{3Ksf8c>VHQsurcy~eYsYP$^Z5G>f%iPe~9Nhl>ZaNIZZXd%!xmEX8Imh z_MG-h-~UuO4E##HicZ1ZnQ|~w4ra>1H>?~8Pp~=e8#ZTg14IA#ug!?R(i4mS%USD` zLjY&5|EoZ9=>Pq^R$E?NsV}Yk7yPa*uFTf|hj`e13jnv40IVT^C4Gu4wl$ySU-UVU zsjb}8Te_#TcIRKbr?PraWBH!K`hAN9e5xz>RG09(ui^K#h~MWbo?OP68PNBnxxclu z+j;}vjiA2jI~I^G;KK_HEVN;KOKv7?aFb3rI)#2R=lncyF)R_38f(3|^@Fh49xOpf z9jxoaz=XukXquh-KxQ5}fN8Pb$oDH4wTBd5u6u*t&0gEXFun*4+KjvbGCIPgTAHTX ze#fn{t>ZU4uTBswR!LqXF1k87H{@*Hcij%baXK9jM;)3V0f@j%)dCYOKbA%>Cv-am zpYw%5;8xu0_Jz|wck$4^k5PxwH3#Pj8&Xezct4E>G$H#g^!XInR7jvXFOm$VU_P#Uh_$TMbAL)56hAQdZWIFML3--l{8;0ko7Pq0{T}#en2deLH|@>eE|r9=)vos z`GkDeX$lg{xu}0De6dpN7KXcA+p52(YGTvn)7da zBh|6Z0jw`jtLo~#v8e*H#ffI3O@a+H`@`Ty?GYg7X#tW#S2!2)FJ9OptBL+i1jf)m zIRUPQEf}@dl^=BJ?1bD?h{fEv^iMyQ`D+_qDewmWsqEyEPkq*zRxs*^eAU){!<=iR zvVQkwjqM-6iszgY3&hO{Xc>7n8hbg+5-wZRVC?8<`S35IlbS2-&lJdmY478T@gVcO znD(Bt7`&rfp7Be$Vog9vQ3;%=K(Ig}pMb6Xd*qG;PYQ4PZ4`%a4ZcMMvWTFyLk=Ik z=xAqQ9ykgR3h;on=o5%3m-3U`+!^{U6r=P{=qZ`_czOdg!(k!o@46}lS0aVpFKb%M zqFn^uybaOJ5Hm>uxMZcdl>ea=0(M={Iy~G;F3t;fON&{8|U_TTi;SoQ`RM|KN zoS*o>moCxMtw;@nMqs0;0leZh4bC$i4ig(FkK}L;FzcWyhmtiIg5)3NV8Ta$&%cSm zWEi2(=bzMIHV6m&BeetZ*@(tf;#nK75Ua%LZyl)F-Ifid@rGUe7V%lCKLDc;^@ExdG^r36Vkw z&9??GB#J$P&KJqkkFd1%NNq;JEyf89(Pqf)*G_XxD0FPe}tBau3P+@;sPpBaD&nsX{}I+7M&Ya zeXVK(O&JVvf-z;WQPrAnA6_-UT26hWwa@V(XW4_!NA;09sH2VSEg0NOx+sElYBWhm z8wy$#CWa~tAeyqDR{O$*aScv=Cx|#wj&gm%RY*jz=UHUv{7C9WILQ`&w z5*q@%Ft`KB$i?>e7XP zWaA>(G@a8B=cV#O+4PV3q7r3`>yWbjLVpwjqle0)M~i)cLBN5URg-b$#|q zNI*REf^aAWPTGG27;6YoEf8c*A`%i!Ev?BKRZItty9hxP5mn}av2xmc6HCL9mW4L7 zc$&(p>Nx8}m}#A9Q6$u>f)nUjgVe0Hm9Zg;iAd%rIF(c>-eE%ugNjU{jI;sTixbev ztR*{$3tHE?L7xhaD%nac#nK^~!5FkfJW;tp-~$drKhQhJHfl`alqN`2cwCiG^vM%E z{>*DsT1Y^bkWK)nPPkB!J&GCts6fHTligv4-_f_<8#Ox zh9pUZL@@8`A0*n}q8m^5h7SG6T{iYg?D8?%!QzLsY4eho84c9twm3|%4# zM)w>^`&Sn(->koIZ@Biyy51!@XLQ9@GVSW>`meS1;>^_Ri?JVEpeQj2JfB=Uys#C>bJ7j@(gpxr0HG(^WVoBaS+6hOVzk=#(vyOKN5*xumet1zDxLPKj$>Vd;t0 z^S=9GOh+EYD;;0NM6uLodC)If3GgXK+OsLil$Ag-=+S%o6k{SqVsj+8<&sRa{JF#v zEq_8N;C_v zsRK~;^)n8;^mieOV)9ks47g)XgZBz~u*75pqn=9nyGp^vU9ZFZs$hC4RvCih8EM@x z5)pZr{;XE|d-q1Q7#-CQyH4LtaR2LacwOcrT{a+OI+AJC^ zLv8%&Fo(7a_$LsfDAo+c9TrS+$DC%toMq66b z3$!7l_e7Ngyw_eV>N$EhEp+(QXL-<>_u4QmuJwHV)4s&Nk zMU@@nrHY;JTdn)}7H3_A3~j1Y;PX4k2l?vAVxSa;bZi6uCJ>}ZHbwAYcV6F_K8R6W z80-hKi6xa|x<*G6u_WOoYaJo_?XVpn06X*{0eF#iG^=M zhno9NA6Q0zeT=BI)>n=!Dj+R}qg@{(LY->aeo4I1 z!jqa+5Me`|=ophfe#}Ett@z^R17|S%Y|)t?X+8!z5B>UtlMmv(%$tWLIZYDsLGrDw zuUi;L6cx7!!!Iqlg3~~;Ct^j@y-~+eag2y3BIC&k`F5l%WVGAU^nEWxd?nHs-E2#O zbpaB~FZ+rMP)JkrE6vQw7Bp@R%nkWZu>ysO6^Oq)E1sg`X4+~ICT8&B*sOB?j?a02 zOAz}e$nri3GnF*I^_52*(8crPT{8Qa=h9;<6MMpPtW7!Tdm;)SPvb0C<}u6DEy!I` zyD~`2FR}Bv=pr7Fp1EN-ZSoVL?2Xg)n<7>KKSjh1&EhQm`XxDCJ_#upOgRF^p2&CY zz(Npem+Ihbk>^~rt9BkymHb3_F$R{B)%D4$k;V5&hS z;_7|BMYkm^d-r83vmrsHjlqSj%e0-LPA^0U0>UI761`Lhopcg-0)Nfkq`-fqd0Pa5HGJM8FF}=4WzFf{ zHiD+e1As*g`dISwu4O=-keDXd^FEUHM zM#FT-D@Dz*IN{xP&&M#&PVR`ursRqzdnN*7aO0N(&j*Rz;na3$!rSa#JcXw1E2J&s zW1WR~z6iIJ@<{U+Hkr;Nfjm5-D0!#F$H?HPI<#jd3lr=qvg1E8oRaNz=dV|XWv^Mv z0;V$*B1Ol^6i`yrnsysks(iK7&02E0RplO_yN*vSHa_F>*Ka+ZLbkj6`Zu|RqAz(A z+on$ugkV%91Ev{XmJ#V6sqJPhwb6)@)nNbw-G#_ISb4KWw>??69E2m7c27A? z5i`h_)2+%~GUYGhnMfE*DVcus;Ff+XUr~Tj7riO?++jsaGd)@KLXm$;{Ht(IkS)( zvVPk|(eU{je?D=ds5*3XT=b5j_CzN|fn#DQQD%DJ3>+`GbtThZ;w4nMtm!Mzg7~2c zp{3X$E0JJ{8!TUW*F9GpJ|urq)N37zy-iI&7a^h9Fc`U-*do?Jt#9+9awDvPtUh0S zB6Xl_ZgWYQs$G@!2LzQRz(L!Wc^D@4V_AT{xePrQ^gkCM$;+=l32O+MdLZ zQF$L?E2M5%Fhms+IBh3J_@kY{A1|^A2EhYukl?1;nJKp=B(-E*vhmm{AYO3hl87RH z5T6D;8t>r?L8T6Om}~L_*Vhza%5gxJ5Vj{>(*c$c0nX! zbjh=a|0G6)#fQYWpc!lOCh(Vjl<6!j)5o|le7k=i$wv9g!1=pr)1Ov zjU>8MnNhu=U2U{EVu!`_#m#ls4fL}9b*${)aXUbi)u+CtER82&wfO%cyp{w`&p1#I z)G3+ykA})d9Z*{o*J!=s%@M^hI5}{2ZNjQUjGn-~W!{Q%;lMW0Wh;{9upk$lFR4l? zMF)d5Eh*HK!U1g-(Mc>h)?#9%=sYqZ?qH1Hv(+65KU>>j0#>9nFlcfadX!bJ9Le}t zI53$dA^;O~KMzZ`P-e4nP8G;6RU?U_lIhX<%(#t}TB;sGU!FBA%DJJEFp5LS{!~{Q z&YlS#^Zq9gPLa_rOt%iaiv)S(EU#dMmW{;!94NT|7&n|!8z0Ea+$zyGJ5IHz&>CxMt< z;5oa%b9RB}w|If4cJbHV=E>rKJ9`pZZuXqSIkxJJwAbgCB{<8p<-p2p|u z0TaoQYt+qCBiZ9HqZG*RTOuF7{&QsArH<_NA69-?^JDjaFF&u%*8hiizVr2;6X!H9 z0Pnj#TW|YeIQnS-%(eQ-{Z#!pn8^q;8DSv6t2p-{7AqsS@1-chuT7ke`!v#6h^0TzqpeImgWurwF z;=a=@KLL=&@4)y?j-Ov9#0EK%?5E>IM8)SY_Or3V$yIb!6zrxHG_YeU7^KyxV@dAw zwHu3T?e+b&HorWL9us-*+aDSx`rym6ApV8>&T>a;endu3>UbOSV$&`*!Wa`*!_9oO+}0@j>9S&J%7LW!R$PI zD3wK{frXZHWE^pGHR zFQ&SOvHufs&U^r7_WhZC|1W6Y-`m{Eun<3G_vQ(h>xyM2YnN#AM|_la+N-SC88|Y<4YJ_KIV+41iCV!m@qyA}Lh&(zhVE zn2eQAz5($i(QUooIn}SI^HQx!wU|cz&tc2UZ-tGS@0QR$w}ke&C1sz>G~SId{8_P^ zZOL1*7|NG)L;07OMGo|eZ+|Gp?uUI9T&G%16Da;#7#_RvcxzI>Zw!yw?zvwKOMBu} zX*kb*{!oW5|05>(}hb zWB)_f<-638ZU3u7V#5D-Woc&rdyt2#+xPK2ko^ydb7udW+5cwtzrUjW@9l1u{f|;@ z_P;R(nrS_g*2AF1QlR>FeS7a>z2f5Rs5_k4dS$mtB}Lg37hR>!QsuX?UQJJ$r|y4E z`_)~vO&k6)Z6=FJeRI*!>xKVNzD6xIY6*K=Mw=f^-h71{3lu&Ui3g-mKzvk}2~ zWpGj&nZT|#A(Gh?v7E!N_twU>d&l#n*0tz3Mc2jK9mKwk8wz*LiI#z}v_8&^lOXk8 zZtGjB+wUaSjTA)ZXLy=(tv2R0t2vKqoSI3`WWQ8Q3&VdKhF|d&6y>b$qk2?uUc3|@nzEo-2 zluy7`u6K$IGTn473(@^_&=`q7U-v{7>-ZTG{gk^$M2i1RNh`v8Bf2A3bumO09~;90 zeqCo+WqYF#{Xs*&OXdY?4t9XGQP~cCeWXa%0}#igmQI(Du1-h*rA9I5ft%3wZ%6&&~XQDzY{XQ&z#_VM4`PgAw-Kp z-wB5|?8>>RP8s3fnLmh3|HUOEVmNTy9^`D}eH8k&760=;@wwNPB5k%edV*j}DX*b7qFb@xuVyA#$7d?^l}j<`GSx=XZIE#kP@R$WThnrl(Lrc3B{K5z>% zzY~GqfwUiLgkmF;r5ryDE0O|^IY#47q}lk$UHOnXQnFo>ja0^9*rv*2e5Tw%fgWF5 za$=6vo*!bC-hu*dKlWdF)GgD`;uSJotV<`Ew4-H{%)8sKVHpe9gUq#*CCR1`&OCDd zgij9t9~qSJ)^&{W|9QTg@c&t=*Oq7gKM(QXJpZ2keQ zL)PcF;cPv7CJd@OAgP_ZFnU`)#?`wnvm@8(-e;4rO|fHpr#CQT6 z&f{^JB5fzPDui(9ir*;Nyy%zYbdtEOS^2^Ni+5+#hG`i>_BHyqfp5x%18bb8M2n6w zQ8&gmIKyi8Ig-1fIbCdOaxwn>p3kULTI?Ii1Ilk{!-G^u2L-$eZtg}`Z1_2fdJPS*GO z;Is)xr^CQ$lNjiq{VB!3$~PkhR_?OoRHh*Y=I;g)A^w9m-)OzSPn9$-Gs)Db+>7`) z<#D-HhwdgkPIrW+GU>igp)+;be@&tDt{h;z(3!JG=#^c{-5*72!v|&sUr(x?7Ej0{ z)qbTho66MuPEF&f+~KPV@ONboIpp{Jv#2{kkH!+)J@JRHs31&>LF7^pzS5}S0hmAC z`IkKBKSrnbdjHeX>dMkA{_9tIzWehZNSxD%|0;9#U7xMD{V*K;H1NY=^^^Ol`f)Hj z<}o|wF+1k*{T=go`!ef{2c>>b@nNCG>VW$_)^pp3>Y)t?Y$#B7h!dF4sJ?%Nx=sSAAK{xY$xd>y zcwA_uuxzm2sL-`3p=PZy*(Q4O-hnaIYR29HfnA?Q;N0xn+^zaTg!*wo?e30z12;Kt z(zu7}Cb`W7BLH9k0OS0BJcwcP%JvVHApRwJ^xvY;We=V;kdQWdLYKSu(PJ^)@xF&I zR^NTceYMQQNyF<-c)8N|HSG7zIL+D~XwX4GEMjV-T`#fEMh_;ShcR@3&ll!)K>{?Z zfW`C`3d5{Y0Tl_P-ccVkIata2qp-@hoFNFe=NSKx6pm5fAB6v70I)Z&@gFzv*Yaev ze@<{Q{&}qs_2y4V-w(~!#!3j&rtjNQ_n2Vf_#kcPp*K6=6mBU5Y*vYknJDvep|dO^ zW|xJxK*B~kPVmGCMuiMxJ(b}mVC#tG4)<8OQ@F=UtZ+uSM>(Vu1}sbw7BholRuZo2 zV%AuLZK*VYv9Xb#%ud@wZru*Zw}!qL)3HVrOrS4E{KrJ+Q;>KEP~RFVEfY5jk>l_< z6%2kaxXU6#3)j014Nisnrh~hC54x7Q|3iaxx4j>HviEZ{AO|EkrWFD|XFEW)4v zTCCUb(`^6e0Ui=u?!{Dl-}^s;IA?*sW;;S`wj(s#5gNB6bOH@NI6AiO2SsU?-JsiS zgs82Hve=?1%e@Hy^do!#5`ruz7jDQe1AqwujBwzGKs_+hKyW?A`5n?jX9OjXoRp^K zfke=D3CiS+BR91nPF6k%vBv#_6Hdf9&o!sCu)uG7py@m0?M23mdpGcJFl2LH*c$j9 zF5PXgMuS07cMi*)x@`xlz;c{SfZ+nY1|5oLc8T~29I6hc6-*FN&b^-32QCvLopqTH zyyn7nI(V@}-@StQ()C3MGTb@BI{JRU(t~_7<-Gnmq6ELc@vrbw4V*%P`E-1|H-qMw zR!`{npj|i6g7(mBqo@Dq+(Gxf_64$61O;Eayr`?AhPl@m5zQNRZ;;&(XQE%&0xsVo z3GwCXCEoJz`OBJpH)Mk&RZ6hv;Y57B{Q{_)axf)Wl5I=Mr992;x_C@sO-oQNPt*kX zvF8jxm==3hrl*wlJ_!hJ)!-ns_ens+e6Y-)txFx06z=1BHe#i2fdoD!k$sPX&}|uC zZB4Ld++eYC1%4UG=rkO9!x562162Tl0j;z}_!qeh6Za3XrDD=^8XEdctHsZ9`%!Y==MiF_L*@S-=zhhkp@W5^o-TMNE8X0@B54&z2RTg zHt;&Chg50<_!Z4VeZX5?QES4uK>3A&bg?QUI19?juRDJi{ z5lqF};8;r{gHy_s3*_EgEI}D|jG+4sRtc)w${jcGa7;pD=Nsi`jdG=--5=)lftFz& z(VbvzKL|i9^+D_d1~@lgu9Tn6fqVrbn+IAN0LkpulPD5LV_e7N>=P4bK1iQBIqR%U zT4&|%bv~Q4&S$ynQsmVxrlnUE>ge+GG< zEy9O=e|YTmI_q!ggEo&u&R5Ve*(K4PqTYaF#9vCp^6$Gr+mkoQ5K5-oC={x;$?TG7 z6pDV*KK)c>wojYNDo{;LCwkO zj@&ZCS1dFS9qo_0-N7(mX=j__>l5&;6Kn|ID*w#HmUgt4+rU)0?Hy-of_-RfBDC1& zGE2ma*4J6lOyPgyyUdFDs@;5^56aISv(Izu>wdHduCLF1nTxfHXBQ1yE5LuZFl*?x zm(6b1zoG;B96oQS`G=oH+<=FjI_*!GnSZ*KQAO1rCyNb<5N%LzO)2KQ;{*`R0vvF= zXN%3Fz=RbWO9EWki0&;iGDE5+5OSA98)Go@scToQzBj_fM%Y1wi*J@4eYS>}+O0G0iNpd}0<53?$mo8xLN(C;df&S>WP zV9|*q*DSBGxaO_zuX)d&l?sodiROLCwVK?`7NqVw8*dJXvX)pQVWKghB>cs#h8IyJ znK>i3mdj$ONvk$ys>uYE-kgNFCTFvzZz7Qr;^ITC5gK9q$MtL+F{WtcrC!jZJcOxV^Gn@m>Qt@avA+Y?qB*^h~A zH}N(ntv88YMfRI`QsQ<+gILVF6ul3oWd}Yyq;V%Xjt^npNv}0B@TBAW_9mY6dY?^R z@7>HiL>6gw9zg_?n0f@fEL)ET^=`^q5{+WkM#|dH>!7RcgNC;8c`jov#I-!%G7-2x zHx#MVN{?j5x$;~z*Q4@EAxx-)%I=afIH{w<%cNqHCx0n?S<9`Fi0%6^8p%v4xsy^d zeKkS&ZB9>3$O&+ant+B|Xf@hp1JsqB?p+l!b%lCvh3t-dw*_@?doGDxWHlxBC_$4} zO;6t^mIEe+qGIYWA0g=@Bz+_uMv7AU{D1%PL)r9orf+HD?;5{sMDK2FOB8S@78mGp z$kuMZC8~571FwnWMOLju6iH^z$epdjq*WW!VKPCbHz%pX6mmA~wpz^BSIuLcn~G|l zS34iC!zECkQ-`xnz0ZY5X{uc`*S<`m-c8y%R!D1`M0mRXC!3g41xz$eQ>zkbN(Hg#z^XANh(Qbp(nRfJnvL_B}e61+UK1rf0m>2bQP2j z9{-N9QCOE29)^r6aQkgHzmiIG@?(aoD(GlpZB+suqqAz@@20jUah*_G_im@QvboP{ zBhtD~z>Ku3xYW5vSqiRHaV<(NYQ{zHZw)LMrT*6cZTQ=8a`jj!vw5{+oh-N#oIZA# z4m4@=S>6yUpjtv>Mkg4i%S*m580ERVjBN(Sp-PS04qMtUhzKO~bS7)BH!|){TD_#h zT-Dq8duFqg%<<(xLrrF$|LfoxD0GzX5ASjcF#G)9;>zO6^H}`HrN!0y?EK$DJV=w@ z=lMT_ILDs?oW*sVo#LCF;`{DT@ohIxtS9*R_r&47iNhWWC&BHAx!^T0{7RUNkS@2X zM@VbRMOLe{D8yft_5_VcpEZGvJA1`}Y-a%39t#9*)YfM3ggnKq4d4kW#gi(R*t66P zYC2Hc1}7?8w)i%Bxj^?>6UH|wk>@WLDfls8e^Bt!O2tfb3PNh?MO=PXBy_{<(r-Xc9O&3j?=h>uewX7?GuvG|uyU?2h#5vRZW_sUD@B0hrea&}=R=tmZR|(tASDSBlPcRLWSgmf0=yF@F zuz5Z3`)+^e!A!a@wQ#IH6qBa)Hl?UW)P&glYh_azQq_=m89#)}Wf%GXg)aIhK4eD-ol282IQ>cF=zv_@hA) z3zgOu7JAMNEE9Oy0}mA3#eMj;8-|WxCj!n)z4*G%82O*BLvHol;M~2vCXg-v*FpSW zqWF(1ApNgAr~RL$=PNV${~@05S^hVO^XtWd9H(i>bWwN~O>(A>%=D4(SRZM=J2~3i zN^C<#pC^?0lMjY4pfQHQ6>M&S*xZ8fc$`8NF}3J+aV{sTk~Ao-H$d5ENO5>J2B?|q zP9)=(kw6DNsSk_?i6oNt&(1=3c>gU;_y)m}-w^>!xT?2N&^>3xAQkg@loPbbt6VtT z;YUqBfmO2bh%EmEiz7$Gu3+<^bcj0L4kP0tEf`L20>b& zT;!w1i_f2q&d?81_M&vNmUN<`GSLQ(sspzwcrVf69U#5;=0o9Rc(;fl=Gf!4aYUUo zAc81)uoxml3a z?>XToqKQ;M0~Fi{&{@}qCic%c{a`~coxt-)A&^jKh>>SQ@4SzUk>8OB!YRebMU-N2 zV29$=$a%&1#ERw>%Q3N3DkXdCA^f9ln~Dkc%=11pY;#aQw2dyJ(<1lOX%MX*>FWk! zUm$@`$op^B*Ip^ulbdpKe(JN6Ps#C9i_cMl{^AAw7|+pJf~7y1kf%2yP7YEQ3Rxgq zZ|)!LY{#jzW}#BLG)hAFTSFsSRwDOp=t>(V`#ME~-SyA+&;$j{9o@g#@y_11Yz#XC zS)AhJ;c!Bi=GNE4^>vU*J$!|#;e8IuQD^7}z#y!MA7Ugs-L=9a+3d?pK)~hYI^}~G z;Z`nKs;y-J6nk-b`GLR!tS=e;Fl*%zxr>p>P@3#Sf<#n9N0bV`AZ&2T>3eEZB>#%~ z{V$rirsKHTn6iB4nwr@&^S)MvHDjF&8R6!hRtSA10Dh0rm*r9lXNa%P$;%vQ9DjpkvS&!rMA}O%|*jf&0Ey^B~ z*;?x0T>`?DA9O+m0h*wS>fmmzO(Z55?+pm0LLLu`l)IF*7I8UE7NB#q#?fw=F~&+o zRqM!`zr@{=qVcJOL$23)1+`wS6F%0i;w#{~R<^9wy-|E}Ie35dp)6ZhzEa0lrbf`_ zG;&~W_uCf{GZqs_A*lQ?+KoP~3R9U^F*67Z znQ;P-O$s9Lu@s)Y%)~2_YIL$Ts{ugDH=-EK6!a(^(9(1@PhBz3looaE9IpFH;{ z&?M&xmtnq_b%*wUzv9ZHPY@uU)G2~U|7I2-LfVv;AR?FBEI{`%mDJ(`DhM5R9)4@f z&~2wzx~c48=h_`6+1+N>#Aaea$6~}2t5W8ogol=ew#$me;s`*jF1LoI%R%W$(bBz2 ztyCh{pwbl9v{dEkk?2cee%lwfM>zNRGWJZGGaA}rXR7xiiprPdu0;wF*S=#azJ(}s zw?*QcAiWsjkuk{#8LH6>l1ecTos|S7@e}7cX{Y`j)6Qf>b9ZW)oPj+o|4NY2WL(Nh zdT}PDlDT@pUgck>z zcQk7ZwVgPi#{Zml;DfS&bR&IiD4W-(z|CSi5a;*}-3JYIUtvsKa=YQxI7Z28%5klE zw(o+(-}#?NPw}E_VdH@`2=WfXE5^cTWL}t*3UNCX;`XEvc_fVlIR5z-uq;jEpKY)x z&aMrhxUQP7tOP3}a2$XrnooB6mB$Bw>?VcXv>_H3l}P&ib?7h&=4XiQ$AqoMKE%hHsH&`kbF zixjO=AMc9a_a8r6a7;oEVfB6XSXmz)ael9wtSP(+GzSV6J=yhw7 zy1gsnyiQ4+7Zrb^O}UMXt+@5Y4IDSQ88-=k;k>Ka?}TDc*x0(3&WH12bh;GK-s#lzk|WAQ2ZHMY*i z;(()J0;DDJHWh3o_}4gmO=S{u4l4YN#RO(+tnZ`m1-TsQ-X6l&<6h?XZKacmg_G&e z-n@(Ityk5&G&cDfYx$a_#2i8_@ms!V$RZ;tX+W^ed z=+b>|m)%jIPiRMXpEtL*bx(2jOFb{GH$Y?@v!pARfk0fPqur-@C}xz5_VW!@*`jDz zc$?@4$1Zm*9A5d@4X^En_iQ>y>xfPBw=*bfj~*m)l$x15-~lI~tAHYfPw#;C*xD_Q zcjeUkt|G}u-g+pWAiq zj+nU%ZiET*b_7!}**z-?%pjBJsLEj$jvIxkH#)fyX0prc=G(Fy^RM2ISWhIG+^Rn` zPI#}u4|ojdETWWe@uGt<=l}X{aNpq)K{@PuJ^x3<`Ss%dQ^m7W zgR@hEvr~iL_^CmINterMDkGOlH_Yxx3FDu1)) z78Z?b`I4Pm*bT1bOFZ}e^)hCc2=&M!{H$8v4|;VWv4<31gszGf_Jl;Rk1!=Zh}_^; z5}=L%lA*cA)(?6zE4;D>v*wdocoPGIfk;5-eQbY%of-! z?{7R|-HWhneh=t-a1q*XzBc*Z5C8UvrjM$l|ryB2R9d?`g0Mt3SF2Xks;DU550u@v9$GL|;% zPhmt;!R8s)l(b=QaJw;A8S0p!jv4CmR&8_lt&vR$@7r|?HQ-mBB5q3(If+3arE8Lc zMAhvG!R*wX20spJk^zrf>2uezmVxqbP;LIxJem4`*FX1QtnX?87^D9$JzuHC^#6Ko zwKmiLAL79o_I>LAia4ij0hsCaHU!M{`k7w;t?Bh5;?M5E>z%F5-IRDhQK}`hj4U8C zz1-P9I4m9vO1~unVs4-lDGX~Xeh>Es%q1WKRKX7haUt*0D&$>CA@8@s=idrUhlO&_ zZ-t~JWe87Lks&`ZSw@OZzZJeDy3kGQLN~JuQcm6J^LwuJhAW*s%4}-p!!FbEfBFUR+Pp4S#<^4~)|->>{9h;tVI zaVGK2B)-3j#7FxzKwK$1IWpyI?BozX%Dt0gJeHlDJ?X!Ef9+04P$`yeGGrRPyWI{& z{P?L3GS7wrKr8T(Oc*mK1*Mxbl?@(0S(&C;nRX{#wzTA4wR&uFVH9`0Y#T z2<8~O+Z&AE>VE zq^m{=2rdXT21AEVH$cgccyXw_O%#6@gOYKlXQcnki)3tMJUnT78@H2f+`O+gG|u&kZs2_~%V6j-y}1pUd>|8Tq88{XWn{lB)dnzaAJGt>Vb;`x5{ zKSi9=&;Vu5zUwpKabY<6X#k>i^^^Ol`f)JR5N8_VOhf#RG{mFkCQ^n~M~u>v+W`}* zPz;Zh^+w&HH|XBLoMby|tmk}kmk<%m`?)gLp#^daz?XX8j}(#~0F%REpAO zbe~|1e4OKyVd6}r+00m~YG_)Nmh@xNwhmxC&cOItlBYl??@0Viq}h>J>JuSurXBxI z>qq)w@09*wfWsjEh4(Vq&1`z2(=?(QTL`^O%F01MxzKDh!g`)|4CPFTLh?6OwbpuQ zHk&*ih79oh(Zrl&&+Cuw$W8=^2jVGagQv_`Y+bp3?qYW7VOUH`Pp83Wl5<>Cb~|p9 zgS;kWIkGwTz;`Ahph=mJZ1=-)pxaOhvZ31uRaH$Um_p#U4ftq}#A0?MD~#jq$Xx4Bf|uhtB-y30p&F;SP1V3g znGts2t9Ee4x*Nrx3CT105dU}j%6 z)poHkjKI*t8|-)r=8f*LYx7MI)_0)kEKj1atJs0unt zX3jK`>&buY`)*X?Keqj~JXzBxY9e!vnd(_6JJBscv7RF5|JXNz;|EzS0SAL2op`#$%7kvL~tzq761+1BrD>sQ;}J=wJG z`qJl=%f%;q7!_eBPA+53?ZD3$l;K?Y8lE`Cn6IG-lcI?Y@|y3se_*2Q_SmxAJufOK z-1Tyl+K!dVsB3=w?PoD_6M#-LPa%9MeWe+xELWU37AuuK@WXAaRZ)8_#ww%5sV1vi z7_H1sMK-G_+R1JE7q>!1{j(ddj4oPDSRzpb@IG_M1HQqEwd&$Rk$)}i>*E4x0Gd^p zFjAFD^NZCLJp=q7l8Y3F7WXSk^Bine*q&Q6g2TW^Gt)u;b>NQ%d6egF&L zxsSV9ARl-jlHRa$gm;@}O**H{9!zpdsKLoV$i@q zLX;^HOv-BDC|kecdD}-kqF50AJ5beT)@p?`-_UzZXTfUV*`dq_;0zCt2q=rXmC2R94+SE!0B@i{*URbi-A4RmT?Wc@Y;IDQ%Ak+Rs z3+Vly|6Z&m{pqUA1W&+Y}Jx7~cO}iCHq}Lr8-iOp{aBipIM~Y7 zJ_6~Hm)zn?GG;_<)g ziz~D9-w*P9*ZLn3=j;NQnOZng3ukI!9<^}i7*CJ`?~Cp2NHs;OTpKJFleE!L>KHn*v$rsEK!*;yaJvA2iAYf% zxzz);ftKoDg)kfWCE;8y=9;Ple8nZzb*=*|UpR;Gd}1F21vJ+X7%DO$u8=|9e8aXJ zsf6%Ngc+g_>pa7Tq<1$YwO26n0!P9Q+35#<7<#xTOq}D&g*k;Yt>g2C(|mNh?m1wr zdxMi@2Z(V<+cq=>5z@a$ZgAre5Uc^IRp=OHsq#?=g*;C0z?eEj(^Qx)nHF141QHO` zcIDKjgdPpe|A@x#`$IrWfL!kiaVpwdCME}S`xKgUBl-etu!DCI6EYka^w4zzz^Ho$ z9A`*~@d16K&JEjo_N-QW8ZwP6s~0pGtE?E?5cEQ#8d@ijcBEFeQQ~c!I^oh5j3OSP zRh?`IVlzpYHVJ9bYQNy?)lKYvpA*~VKJ>npT7d7c^8(s?dVyGIUh`*qG0_-)w1gSI z(hG^n;GN;ZrT<7Tf)k48S8qJC+aLAVXJ+i}Vgdcb7gF@@@XsJUNw^gf+_~BU_dXB2iYibfVxe@({tI8uxsd z+0Mqyb~a|VvoW)sIin2t8Hm{)gP859BSwx$I&RdMrJ_gWNX1ZOgUvuvyrOT4fFk*0 z44yK`NaYxZr)*-1SM*KM(+-a|pN=MxAstIHLn@MFhAbS#YO$e+Vhg0Bhmt45P7d^3 z=3>$5$hjQDq%Un;+BQEjF_UAKUEa?$yUfL?$KWSpf?ek2)p_ufHM=hFXL?;uz^^l~ zG@fH$PR6poRdSkuZI4G+7F|uoxxZEF8q2-Mv+v8X{QEKw1OI9q9OV61UO*SK_DJpR zfl7SU7aF^c#7KRit5`6Iu&#&-=g@En8bMp^Ub9^1+y!Duv0%T_5z zyLrjK*-8>7(a26)Q6cq(Orv9$F)_UPk()Yb0#eM-kY`=Y3vzoRkmvw1^ZxkDKI8m9 z`r*J2?|24ajQ_{t^5Rn5|6^rkW#<3!5D)T+@7DiA66fp=xS2D@%$eh_=FG8wjN_i> z&!JM2o*Pq&QFtO5knZU}eMfzAediSwBS(9@o$GVt$$JqdeDq%>%e~4hr~K?iA>m{{ zro^jQiIh6>xm+vRJHw$t9d9OKww(0bm#3UjT2@UE0el_K`h` zY}6FfE&C()!$-Ab|D|;M2}KQ-Flp$)J$+nC{pq_wrv-_G!(-%KfpSpbTo+h47ao{U z_A+pra_-@7Bx)Xxuwyh9hu6So&XBt)G-ju+d+oSJPY=|0qsL~4m8Xim-}m5~qH%L` zp@+fFK%!eA7gpaIO186 z#M5GlTiI5@8bcg?Ed%6U6C!vz=*_bre3KY98@0~+lRW!|vD;G+pN;O@BHCbMHZ-U3 z$rL18j@xk*TiIpenA|uNmDav0TBeH=?R02#)NOX?jh;;V->B~dcfS2|y!{W2eX;l- z&lhL*zXy4~U;CdR&aZd;OXd`bGrQu0_e-0eVJAVg`N`Ypz$^8 zpqV*u2h(m=NbSD3Siz103O}gOV8ufrBGQ*TM8`+RTbsb?h+50&S6F(4PbS?OH&xjH6NXqWRfJew^6dKvCt(3i@av&} zl)SY`=h#a3)f2VDeEcHIF`2ANJ6bk)?eRNkOf7vp$yj<~<>yf|jpX#+q-Bd+dEzuB z;& zIeF_Y=UEhP9D_|Ia@mY2(Rr*_laYEx&oXk}iQp~Wp9amx&@dUw7E*HzT;Q@} zviP5}yMwpXdc&K{JYr)hv#p4@VXNl`=h^jj{c{ggAk6b^3V|{C?*b6id0_c(_4(3D zLjJ2gpUHm@@qAD6pCry{UimQFyq(E&GkNa&m*-C295pw$6VW}R&zK!^w+;@Q`+J9G zwgdbi{KBH0+@pitUB+66fpgw-igTlW;CH)oB?T%fs+NDhM^cQ00Tj8pyW8At0fDx5 z4-O6^Xpj?_4-iZjFpAU%fD^@lxKEnfRI;8?v<>ZY2BBN5mk^5E4c&|;WLKl`V81FA zYo?<_Kha?f{WUNe`$4=>HTLLEsK3FGcaBuqH#|Dg>zKd~(GRZ z`-nGTI4r6gW=r7ydjv$*0m{{)w9sOJKH^bW7IEGg4T`NHUo6&QpD%8prj)ccO#hG$ zyNj`hk%*iYikm<+>RToM67O94Ky&l9?^P$ zxVN)kgkNQ#ht|0Z;;3^iQ;VgNh((BrAtd%kJ(Vb5N@hzV$VOY&l8xPSu4Q}iQ)**j znBjry-Tq~9?w@;yt<6`jcJ_gL&z0v+ovzbwyPdgmO`Pl3T43$dJ@g3C{n{PALAr={ z2}9flLk0Wqawr>%r8Q|@mWd03d~Cou{cxxZ;L4)uCE_iKRhBGsJnC)2YI$?){l}H1 z;)T8>Xd7z!#ScCa&GoO~txUjrF8}+4^=_iC_{%>#6UKR2lox{%A}i%7Yr~Nw_S@7- zn;LjAYAEW*-=|LKVon%dg0nyw*0l8Yr!$mA8oX$QxAyNGj>vb$icmG%5P2BUGT+AL z?SRtM1PqMC%Y0!(V#$0Q{ zq7WSx9KQq+%5#G_s-NT;VQ3LP4aSL4f)Y*09RE_HQ3yT;NKk2VF&)q{htnJoLV0zM z0Ld6Qg&`9t%1N(hTa3^5LgH>!+S$~R#@h!|smOiTFVz;`273|$w?TpQrVPK?lS`*t z=2R=trPAN`T->FC3>;JN(!g7N)`To=L8f{BK}}dZ27$$a2qNg-hg3im&0}q{{ic}y z@Ac5GG-Vxg-2IO=y8rQcZF#k}vbqT0Ypakx)Bhjh`JVKDPMov*K4#W{nKfW$4al?x z?Cd21Lx^vhvL4=q3qxQkrUk!^pNtO|f#3J7cYttBb{j9pAglo84o1T@q;+xY1(n4k z#{2>YhQQgG)AqXF&;w=S)Ey4-?g)M@1{Q4l{SFt7U@2GOKQ(ACUO>Y^mT9%Se)|(s z4>oEGKdq${!@64oA0&-+m3M<{(~xqA328-pFo`|hw$Q8*i>8-q2fnq`Tlk8)ws2CS zQb401()6AH2ZE`ClGnPp*+Qg%fXZLWBBoh-6Q2;G`SCwd7@*N$w_yqmi>3ATIo1p3 zG|w`gkDSXiLjwjB_=O_heJ%_F?-Dp7+8QXFS}^MOz5Y4EFtCVHWiXVVsjtjww@1BE z7yGqVh-?;S6-A|e9J!$47M1=)@+r+!gd8c|_(`a#qA_^k3Pqt4vZYYLu^33DYTWVY z=~avnO8tSsUJ>$ns5oKoO2z;YxG+5Eh z2qV&D{8fvN!h*PkwD3@9b?ezu&!~dbXP{-@bmmv;SHv6Xl~~BuH)@?Cl-5_6~Qn zd}cyC@BYDl)1Dbq;<=CC>^1l7*(oW?zukPb`F8iDb$Ha=+Bx1i*hhVA`{3=%-9&YX ztSz2z46tKBM;>529|1ca4F*0euA5y9YSWG)+0AsXC}TUD`>pY1 zL|`X??)H50&C3Sc+&iFS+B(_vde@l*vbXmxNBzB4&Yta=y^^gsK5nw&F)5n3l+gl| zm$yOLIIMaGuyL#ni^t(DWx-n-3vVd~@3^^Kt!06Piymp}4C4!_)I;IuzE=dPCN zaC7UQo3ER(4q}&_4&4gOt}fibWv8Qa^!I@pctg2qTYwWr^HX?(##EEwTOfKLH4hJt zPAnCXRk|-)sKd1Z+TYx>H8z5QLXzAK$pMekq6H$}+Z*2WPW>)+4-lKTg(sUwDUH0q zE0zX`us8;7aNZupg*Up-B7fI@ZXTiVPC$-ezU`gV{_&5c=UPtftIBn>xxf4NT`Uv- z$Qdf|R=b{uDPt$f=Kk>x+tMdVsJ9(v2dt^h&24Bq!TMSBw?FEgLIEoc>m3z!BZ7~8 z5TgM__38>NQz6G6n!J|kt)aR$i&S9FXm0v#Jh?%eF6#rnO^?5E^JzVFd(Hp|0k;!S zb;7u_D5O@|PIOl^0=&ZMLf3A)owD4Gl3Nw0E_`QKG+o-xh{p`QfeC!g+#v7+9T%;) zur5GbPjYvR%z@uY$DploX7Kw6Nb0;#8;!%KNIE97?)WZm9a2Y2)hhy|oc(XlCgBvpkXgSM2cKLmk=n-{qzH%5v=d&&q0Tac2L0 zh=(N~a$jifH;*=V+2PxlKo?BVM!}LBAGrqC%j|zgz%)Q$0G%!7qf`6GkN+rB(s-YS z+dDKASQ8|m8x+n^_(6yCAsVtNw)WtL_@3sO19?xOYP4|=JPeF3#U|+l(ec|O6rjuG zv(mnB`sb(*dPBJqiuF!7QlveHs7Ri6UCea?Y*&B}g{KK!uPM;sklTsi^0)w?pS*MB z+>m=t0sW0Suy8|4ctQE0N7OFPtl7&OG}sRVCmfbj+))bB9R>P;>j>}ws_J~?ppOj< z4tWA)RfTB<+2%?GvT%R?QNYDG)g9>~g&n1PGGOt6)jt5Kj$DZfX`V6z^!uUH1%w~r zTVs$;AtIfpp@u145ER9T! z&b57@A?}PNk-h8U=4zS?5ZVn;yee^jB6JqRzTPn27Q(z}a0mnF`$Ss5T)2G@urPkS z^9hk*_??w82^$o+XCO^sZ!n@AAHOmM5g!ClKXmY5l-9B^L3;X-a`&);lG8Qf(B>1L zMv3;Zq9C1s>ox@*-`))_Jv`&VRe&omyeO4bRnof(fRHO-+wZt2k0Qd(U34ug$bt^~ z&JMMrm`4z*L5xrwB~NH$o3|2yfbQU4@%Bo*uL=1`fcw-3(N)6jkW3a*jD9qZ(3JME z=+JEoA3EZCA&pAk)o@AchbL@C1Rjua>U9c0t3H?aI&Pn4x~POh!iFP7ho5+okE0T} z%6*XMsuoUo8KV|F2#$Vy!~)%5h(~oG#{d$&Q|X3;vlAhUeG2P{>X;&55pdJ&;U}r7>qI&Rb(77k*52(WfP<@nT zfvrSXAiNf&PFdx%jLOEy8H(J{1S>02)5r_e7me}&&L)tAY6qzNSD2?KiGdtAxvF); z2?pN0pdg0^=df#|65$DW1!FpN2jL4=td(f3;!CHAbs)<^v0eiF!8{jaqODf^{s+WS zNQqtd9Hu#~f+4>@f-i$*eS{6yRHHs>0CPZ$zx8G}1PtL=xDJjOanGlr>M1MlI9Vrq2S!K??;ZF$_8SrH82o6XNe_-8t)Msri-ev7CNMX zO&g%I3$(D0?JEdw5$Y5K%@soijk%JjDW6()CY%2bKYp7_RFf>5$KKyX!il zg@6LlAwK<1l?c`0q$x=2sgO31%k$eaqI?bsYgH+N4RHL`%%%0ohaPgS$xl@dZlDKc z)58^KAg8uA7Y5FJlmJuYzie6hqj>gS3W<2oh2O=MCUkCu?Arrr6ox=^{VrI2*`Z-}S)6H5N87HYEXVE<{v3SF3}BeLMK z;u~O-z!lB_#bs51?zsx30jHx}=(`?>w<5V;kR!w9^xEf83~eG`7t@6Jov`fVoC(<+ zEF^P4-kW$y5tkNxfmf8W(k6+~Ns=s43XP;?P91_A);PZ<5p6UKJ(~ZpoC0)7Dh{&F zvx0P3)KM=uv=n>L_$bX=+##ErI<+DNO>igwAMcDR#IdCXfq1Gnq~(z%2(2J%E@F6` z0m@;0(oleXVdG9+5Y~x{K|d`mE>#P^aA}7rA`3<+dn2q6YmtkP>Ll7WiOJNeT%vJq z@@TYDa1&sjhPqIL!>dc$C_{UFnjt+{5=J1E!Q=y;??%FK0Z}vX+9Q7yc6sf^zj4wV~*N{td%dq|QM zLX9hqL;+vqbj>HT)ZKVZT-v)4P|V{wLI6~mKq(lahmG#kC3qo5!GrAY#E>F3>Vm!ur8P%jty zKF}2jnf&lR+G$8tpjfXC-Idq95uFD{3k+6M5IHK5Mk$dT@-Y^z;KDc~ko4psaS4zl zMsc=@+na)phk?Lgj**rKt;3TYP#gvY1SKSBPEY;5KLYN8Rzk4|)1)vs9hhX9BV z>55Ph1>7#4)e{n!q7N|(cso>qQe?d#OB>C2<3J*DAc1WfE_sm9m@r})b-P`;ejzwU zny=3;J@+cc6A4Nr;T4k6l)fTAr(YXN;08>i2-g4j!Yw5=i@Sfl!ZZsK$kBm zM-*^q15u5XF-NpmaMK8UAVrjSD{#-9pwmSc8I)%(U`FFA%?)KIP_V3RBVbF(6hBn_ zQs9Z!B9x0XTOi4);jo~aksupa?gHHL03m=H%Z&n%Z;inyU69@oRYlrO!M%0^t_n*d z8n+XnIiqVuwx&P%K>@=LxoxFQg;q%b`n=Oe&BEh5Jw0+3I6p@$Nw7lY;r&3Y*Z>vc zq89N-`j{bzl<>F1F8%I^ymUb64TpY!ej=QuNB!fXZ^VR71Ihlh)^V;$G>uA%b&*@j zmek{2h_@a!NWPXz>4&@;F>VFgSEM~0-Kj@I;x{OeS(e|zv7B)=YD1lo5S1oF0W(1J znV3`Dya5=9#HY3$OZLoB^NA$xGREWZ zI5@yr7q~NpYjSzNZd@mzO`AxoAo++G!cAGo<3fPy`}~YK>Cv>eM{<$a?2-xsE`Dwx zMQwpNonFhd2y@)?z%4P18)wta!7;{xJUW%D!zstUkX($-va^V(aDy0c3vChd;pnNA6Eoq) zoUq!OiW%UOHb-BP&Y(A%8|2i~=d~B8c3!32n@O|_zC$G=RF%9)X#vHqI|@lLal+7V zd(s{TQveU%lUI_*?XIY1iTs=o2i|~hF5`k-kn0yV&J$)b5_`~E-t9WN;1Kl)die$h z{t}UnvR#2L?1m9{r7T-c^hcjAwB3iRhA_wB&K_+PDoc-&Y4u`7)Y`eVP(TJ8ost4l zz;P-?Goj~vBnhAg6iw1;k@pZ={0Yd|?Q+p1L^fU$JuL7u^)vw}cmTICA>%dskBSmt zb=2?jm8^A3=$cZBas1#h?{}9u?Dn1GNH?;Eh(ZzffFhcuX{Z}GcKu5 zVx^bdT$5CAV-w9w7#(X^AQ9(^gh4;G`t;h7ec~1qCqdda!qcM2xcOQpFR*uwEl^9d z3Thfk{GtSHhF#$nD?*0cai`J61wsmFnnh4;#YrayFNhp*RKsZI&?upHgREFK0MtR; z^Me1#o1D0whX)I3=vMg9N$pP8HIX6*!IC#pypfzY1#&*h}2BgL{u{MK!#BN0@YtF&>iqiaXY5ZIKt~;waRx?l))bhcYF_}?08rXL zjymUL$>TDLrkwKa=K=_#xZt=l)0r4gxqE=-8dl^R&pl7L9*V7Nn64u?ER_oy6_PAW zcqBqV+9?WG80-{p6$pMLkbt%pINiNgq^as;qCD667+KvLZnRgwK#L@y0{X$bNSufC3gP^T!~m_5=sg7_iO_N;54@6x ztzZrSM-Fh&Qq~|EN>z_ zy`Vkng~YWvhZFICm6R2M*L+e7+$O1xD@c!{Xj3BQ!zz3!`<#RddbPNJx?@<}2BUyD zn8oA9^8=~g2;&)9>!i<~Fx7Fpfk*M{2ngA-dCoPn@(V{O zG}t?>g>sKiIMvVrkU&5j3XjZ4zcz-L%lu_!UM*l$J>n#MiRbaC)Pgb~gFGUPu{|0` z*N6O(cTV9Py*gl8bFL$*QJVM_Zr}&B1Bs4)i6*(7f~0n0T|y-x$3(%;{XVyMhr~F^ ztEH`}Yz_!ZRA^1uP)7r`*-QRZ3mw1D2cQG1PludL$?J-R7eq5C4ASy$*a}ozW&0v# zBpS(`Pn5TWV1r^Mj_edr@iOm|FOvyVA8;Z4{ai4shu7r+w3TSXI`2;w1ldBb9 zRl{L|J>x3(qdJ%sw^A$!|JNA9q8RJacLFyLq?;(IMFH|Gp_?M9M8to36K!khS~_RZ zku;KUBtoW0idSf~BdG-tLw7nIZgoH!fYCm8G5g?xw$F_&Y924JtnrPw0_Vf3D`oDa z=?u-H#$f?&%;}R1(8GX@5h6I}7=@w&JO#&g`g~`q?eN7_W5poj`!IoVgCrz=t~JrY z<5|kQDE1yt{Z7KgfJXHnRdRz#^+`ueN!N?Oz4U17mJd0)H;ZGTVL`^IObL8RA^-}p zIGZqB9%IMYHysAe9i&TG;63E+(DrcP1?0dkjZApZSC-(1C$NPecB0<`zb(M=y2Rnd zIby6r`BL&t4j@Ewh9ucqo@@ogpmi0kjW~ob#v|x7vc*f|ENJ8@Dpg~$<(!eO9?P51 zBRHqjyy?V3JBPg62#Y#bHV?2W2aU*du9;6wd0Kji$^AifcsGzP14dOw>{kMcTGTd=5vN`U8BPse$T~8 z78YnZP$tIc1id)0fomAyT?W$ylJ7)qLFe&v-|5oqrnwhfO0wfp0kE7AckG4okqv-8 z$)i5T0U8d;@A*n)$0I=8u>o)P70ZY!a?bfl_h!sNsQm*K?3ak5R%I`nTbpl>o9yIG zlN}x%ygu68V>`#vPj;KVI%+oA!7H})X7lKEvyAzUnwUd}g^T+rST-Iz zhf$FKxwF4rX3ZS})O>e%)I5g%7XbFo9<;a#i97pSyKlG2HTWe!*grS{JVDQ(ohJu` z8ktu@hsFT>LOfhD`V!}emrN)GAb{|ro#TJ9&EtYV?Ek&pRDb}x0Ap`+e~ShxHjvm| z_Upl0TuPwdyW5zdATuDMnryrIs=0-+z{;2tsyTkU*A)0ZJ|P6{?y~*n7PNQs=vQ{! zJo7>nyGxbO{xH*h^TY(yznA}_e7$KD@bk4AA*kV<>Co}VKzql2J zDPeTCfI42v53O*|LXadb-5asB1ya6@rxLj&lcx`YA{cr|c=X)fq|(g1XibpIs3TLLGn`NRRF)tc`=dD()Er*}&mGM?3^S z`HpW`?oC)nhlLHOLB_7(($VaygV|LFf5xj0@F*jh6LfD9w7795Oil>=a3)IDE;_XZ zejob8PZ5Hm!07T2GLagN9flukS#n5u{KSzFqWJXgdY?EyEs&q2C*sA2{OpC{5et*Z z6^>W0`yh>9ay3Ab`078(u^EjMnwhgXQMwIkhd6e+`SSQ+7bKG1Uv+7Eji^Ep1P>eY zI~{nvdRmPpMw}x=s}C&)ZWrr8yv7&=K@cadl%YxLZfkmV?WcM}s@%in;%0yvA?=4n zzTvWc)CN^rP_R6#Yn(wb6wcIBY6oYu1tWG%qWWlyIYevd4O&8QZ;p13L7^j4gmy&A z+B9+EiCgrE1Dv{ro(~`^ZD_$KvVip5{s<7}_QFa9c{yp_;mG4Vz$#3DIBOz0OFn#f zf|;`5l~?`^6f4TZ^2%#jlL5oU1Y_J4(iNB=UJo# z;++-xc)C91hdkd1e|87mEe2g!HptnE%FulpMBTsoH-6`)?~0jD+kmGxsv_=26g7Zm z2+Ame35&`B(C-?ZKgEqSaxH|}81hrVArlTQ=r0qNl=TfN`(JDgd*ie}xdAc3b?$M3 z=f#0aPHteP`TdPDtAVr@cwGu;f?^9#C}WI|(32;@eg+B>7T^^As0=bOJC}qtry$+P=^Q6Ae!g1KHYINf@K$mH(`(c$ z@S$>FDG1=w+QgZ{mGmZ+C&4=)(#dl+$@mxrdwdLw5%g=v%K0-sQT(Ug^1?mUvADQc zf4-7>{~JCS|9^31rCwiMu4DYC+T!wmv6XvjkMaD)<3AbW@3dRUMp_;J`sUp2S9{A- zsbkFje?S|t_+RzKm8Iob{I7?2I{hx&x^Uav6fOpygkdrku+76AjDZyvAY);33q$|E z-7mg}H?*y^wh!L@`ntLQp@xQ-UHwLoZZ{4ViUsR-TecbQE7+>PJSS(nW{ zmL=xc3-s>PejMZ=LCmjNdF*;`-lG&96zV;=?HeMMI;w*J>v>$Cfx9_0CfHKz9zet=1NF&qwFEG*C!zx`3w z56%~)g=hg+RlH5A;sosr@6t`7I3(%9lv@Vyj(^=hI6U4trXMg_-wB?zb+C81v)gRF z+TDCz#DB^R1RUfXmE*TtxaIgE`XctA@JsXE$9fkH}r<$F|%3m7bn2L24NYknBv?L znq1C-b-SIC?5ok81+y7a_Yd>JAAQj(*6BZuPv%wg_N}pCzZVgi5Ty@5`t_>KplF(ODuy zCjp~f6$JQ#&2eBxOYz@15rmZXG0q~VV7bnqdlIz^eOsjgkOqX@SJ0hX4F+$h!%m$J zRjd4zB&PNcPMR;+j`Y0bLX|oJfYeA0YrWpuY8?;_RoNkKCb>KW4q1#5P|s-xJ~>_8 z^xMLYD{1-j;ZYOT;H~oR&i+5Y5J~g;W!N!6mXXo8jn`){{>WaBypGS_R@rvoT)1A3 zt)r8P-}&jhcU|q^jkuO_wHn)VFv3P34evcX@weXlbjn|$BwEV>WgDW}%pIi{`F51h zg8YNBAm3p}i7Z$MDSh7|wxhIU*-=uvNqmZ??Ifux%-kZy9YVp;zv@Wf77my%siPgEq-jYCqqJhfYLHgH5q$0j0yd zd#ZVZG5@MBcr$0rqrN=!VTrlPVw>UGAsltXBImT?BrA`6DE5cNRuUjdwTBoE^dH%> z^wOd>(n&M|rH-TWee{W@3xTK5$a3{D4oQ*At7xiuEK(dCP<4P1D&seE;v@d$3X zLEXA9sB(lA6-tm=?*f-Nw{p=_wQh3Br^+KbbEEtx6#z}q2WRv$)QoYdOS}t_qo#~j zqVSO27;4AS7x7etG3m#LOQ;j?j8Rw*lGYCp0IJaj1FJ{J&zoy7G?_nwXF$ zwBY;h=KgC;e6{lqjT1j_9<>hN9=`#I#rIpA01m!+!6v@@%`gE;PgewlCZ=R zN@~mx4$UE_QKWzEV)^lG^XGn^g#Leht#q+)*ukH*+8s>*WA1-m;`=|k{$H;z&-DKX zd8VpmlL262{ohJ)(>DR6+5q0YQ>+GW7lWU*TJb%srGNB&_!pKVREeFTAKd)G<$I?) zM&HNf4mCS++LO92k2s4jLeN)>l60cjdiJbV<07EAmWg{xNq>qmIWL7E`~v4DNd#KU zmlo95xfNgB^A`y==;;`QC`q1+t4si&R4iylTP7+_D;_&yca8g$(=bXkmALy#gwkzlC?n1)T=uSloq zMC=C@hmnh=AguAx}{jl$S zV(X`${Qvv}$bl{j(}2ycP-p*gU?TnEdi_&3IA`nsfuC1DVbq!HjABdd$iHF7L;oCa z$6OEmn{asE4}PM1yI?Q ztuzMPLJ1QOJE9>KrEd&BL)Vn7qXQs$FrI9PV7qwDE;sejhtI-;o}%!!TjZI9xRf^quYyM)MW@70tw16327hf~0xuT*X6^WwCr?;A02yCkx9x<(bv<YHe<+qd3v4Ycpl2Li^RR_WA!mCpfW4B1_0kD=u+*pBr|f;@7fLNx ze&NX#nfyV=JfyU`>@)w1!^7a;HTFf}73vgX2=t=;3DJLsmo&iaT=@40C=a#wh1$~d z`j53Amlq`MRh+H|qZn3Tj#NURV8@lC8cBr=z6EHtUY_ima9Kj zmo(c6w7K)*g}h6iow__gu^us({2&JyhYBY}OSYy(t!eRgD56(LIP-n>S$3GlU6e~q zAbfaRdM=&xY_KTHix>Q(ppJadIAKGdiq&eBrpKh^c%|zn8}nvyem@DIp;%h8e$^0x z4+2{URBRL%#k8jR#b;UJ!-rDbVqs%O$b(P;WeixCcLoY8ed7ODK*d)Q32g~NS~QaY z;F&XYy2bZ0T!s`v*zb>P4t7L7Ge?^AfrAO8QgHfZ5eNm-xS}2;3ZMyv|D0>in{{DEJ9MUeGJf~L=y_Z z?aU=7-w;=KVX`O-bI@h7mt@??u-_aXygk}#=34ob0Vk!ZhP1L|WKm|G$a5hjYr?@r z#~F9yK%axy9MFhb&JnF91;F&-VT{N6Bt-=U7F|!!ek@+Ev5qgur0u{e1MI%BdE7cV zYT@{%^rZEeL*`B(>~@1q3|s&z8!SaVk7biIO2#guy-X@zjk(A2buNNpNtJbJxu#1+ zqJhxfZq#y(gzV`^Kh}1~J36QG!T1vpP6WR_R=|=JmKzrdQsmER;5whgFLJYDwE0V< zimW|GthXaoG&;UHd>S%mYl}8aLiS9ls3Pntzftjp$}gmgi+XKw@p<*?>Z)G77?5~@ z#O8DtI{sC^>pPtV3RAi;>SOet1!wE}df{;URqKdv5484uXy3x~)qm8M|4~_7tW^h{ zv$Xlsj%H6A=Z@CO*)*C>quWm-ZCO1y6su}tMM+)lvAHNPXzk54?c&~Y^Yim;GqQ<^ zNXXm-g0q1Wo-(E>3e@Z^!YxpEWD1|dyse3BAdN+{f%^w1J4i+Qeyn+>;pfT`mJ!hk z)c^x_ErJbF>CS$$7$uf#$tm<+Uc`Q8+6>i%Jat0QK1EY*`a|bB!a_mR#JvKeiiF!L zWQ^PYfrU=X?O#3F_W%0Q^7EMgUu|`DW#<3?AkXyu|HL!tHUJXdgZce6-<_n`?_Z&( zu9#cE+m3hkwz*lEZ}{VVZZ5YVVEoa+$m@ume5X&DBU8DIRYfd;JC#&=b3i*Cm5l1~ zoZ%yfAX>tcj=K{iCp?A+jt8)u3q!ZgL0vjt7cC~jAUs#;Q5v^kTdOjBAh>fGo@WhT zi0>K;i3%ytLbPrYi_-EFr`g+k#}@eO@o`got!wW~(Yx`Zxm>Lozf_^kpWo%NJU?e! z7l9WJ{Q)5U1Q=Z3xe3s^2yaWlPy7mU9ti?gmvgt;cZaD)7+O&Zrr>?Y8T(;2JtR=FvNJY>EIb*?w`?*~L{=+C{3Oj^Eke8f-BpzU8Q) zZwJ<3wG>#=MiK9zA^XWk$bHtId<^1EoO^5sj}9P7`BaRLF4Ju8VnnB{a{2wS5Ro?J zK=rv16M3pGII}db?8=#F0E}wHgFqJ67^ccIM@pa$R`hmltspGu@82&iudIIf@Z;+I zAJ;#wwpN$t|8RO8_@hu^Q0+VN%6$hpw+`R#Z0jnMEfesl*1Olulb1h7sExxYE?$cR z5H$(SZC@+d)9q;Qr_p_R`*YGXY77}x_q*G^@Yxtx z><`w4*s4Mqy*4zVrmzTq}`_R+4F00P2iFafA=0aeWFyH>*#O zJibpN)7U<(6xp$8@Tmt5@}0-CCWXTlG8kYiUfsmU8uLX27@* z+o_zr%GIrnjeq3t*`2%QqU4)BV9T{!BgV6h>sxJt&PkAW>D;(kww!O8EsvdN*tz=R zB%Qm{L~|}pMo+=AxpTfB#HG>8Yg}Z;cT~$bwyQtp?Wzv+_MNRy(%X9O?$-14_Z0e@ zdtoquruKJChtGOW?*2mFeBISDj-93TyxrA-j_a*5jheE4L?7hrJqKXsVpUp&-AG(p z)HI6gb9Z=p*>auh#@ymuqLVa&Rs|HTpR&fC03En3N#_t)qdB}6?Jxk zDIdRR%`)>%9rS(rBo%aF)y=et|2&g45``jELAh04Y}Kk_TACAAEe$_NHc)6h zEn=)ohpvDnlXO#=q)XEr#8AXv`wD|tztt$#EyGx!d>rd{8OVC}NY>4vtY?fRzu;;asu|=hEcExpbG|T*@BKC385JvW9bM+QYf@m4@?{<7pkx+l=VlhIHJR z#s`&;D*P0I8oKP^hWP7^vC!=&p0mrIt6%pA*cQ#5-tjTveg&$>CF^{f%ikcH!entN2IJ2`c8$GDM2quk^J3#H=Y zDgaF~tXkcZk17<-8q`;No7$MdFOZyG(Yda@QFrJKx;K^0PG`y^Nw(N~8Av{iYq(qN zhE5xQSY4k|f+>E7Pu%|#?WLDa&~k?2(CZYuLC3^@s4dmv@gHg{ON%rA&xd%ve*DKI z#XXoCDDGfjIGW89aWPv_y`{x>(0wHFP;8j)n2onK`DY9&c^ax?uG|w(b`@5FUUb8- zx|f3A;UPTwq7$-FF*C?vh^H*M>vxrHDGzlK7?S+_L@+}P$p#B;v||x#4u*Uh^`rd^ z=t8K6lhlsL;WfJd9(nFi#t=~TX=h)D5w<9h-HnGAhtu&uHt<7_?x55IH7NY!eW1`S zj8uS^28Zgx5E%nYHCo(pMx)_&w6K&BEMt#G_|wz!Wek36UA=DTZIT|b6mtXbUI8wE z2oO;`v{l|9C(M0 z=OQ3qRKDNQ{a4kIQ+bQ9ycc#D1)L1s?%A3OS0{#BI`8t3qdoGjg|=J;K3?~Om-RSE z{}^Kf=LR0~CJ*1XBZH-8do0(M*lP#xZ-X`;U%Fjr*ZR5mPz~urGN9)7>1upeU*ou| zae|M3D2r{?c#JuR)|%L!tT}BlrkpxBV{H3U5IZert5|$oE0qYEf~#*DV+_{R0B@$} zac>zTEx7z7V{981&&%;iZd9N@7!m>l0wkJOT&kB!+R1qN*eVgsBeRlIC%_OJY<`Gg z7XlaE>EXwkjEyn24&p+S_ako~B%iy#8Y13#n>rY}hUcmWId5tW7zj0Gz_0wElY7WH zyHBk~gqnaskA+=`_x9BJPd<#|lao}`@{`)UR%>_>;9XIIGHVue%<;9M(zyZY!E4W1 zQDXyEw*l+=?c2awvcbXy5V%#wVd9pbU-%r^5y*g=E?m+*TjxnFvZ$xZZx4d zbC1kKib(fb1sA*SuL?ty%f(D+9l^*i?Ek4Z3tPYk^PgJAaCK#6X=Ne*>Xrl!Yc47+ z#rNXaEHwPBru;+_;p19a1TqF`69esH44~M#m0Mua_)<#XZH!2eXSKFdKVYh4__4b@1<%!MF8x|Nq_|oHVzKJhH^r;bGCD zL<6mtS?nD9`)#gsgGy7A_7dTtcj0hdmx3WBb-oWJahA)F=0)^i+1x*oIrhOHoBh}H zm%ab{f0h^H_y5&aX7PU?=J_`4zcGq?Py?{(!k%LGJ?M+;JfNk~mXy3i`{#Guxj@Jc z!rlTM;~{a4OMED_>jhoFjNhH1M?rf@0--o3UY~->VpPlCDXvD^PFI-)qrSKtAnf7g z2l6Tr0xNFp;T)CYQZLS%<@NC%iGxFa;+w}9aQfPfH_}Kf81=!i;cjgJ9EHxgvS$L0 z_{z(LrAwzfa@7?HF7ytqLT=|8gVB1x`~z-U4u?Ls{&4DXubqzD_MmeVN|+4TA)ABp zFQDw4ik*z+*qBv4#o=Ew38&zNhjc8;*o^WDbHFu8036@#zz@Spw7ZBQo?``Y50V?c zp%vB~!T{@N+nO|TQWT<#QTRp5QTs~e%>K0w4#~pp4y7%VBQ84qY6cvK6JV9%TKLC9gZ9O1tlu=RI_B(D!oK7@zenBL+T29z& zwTg4q(;FDb#p}hhGX7h`e>>U%$QNv`;molj)P_OI8y$S_T$h-hqYWSZfifSRkzr=w z_KS1U+#ql;2OzHIN_1gF@o#4(xi=#XIA;Tp35I7yXw2N-uV4KA`nUdE)Kdk&-StDa zcqV@fAd72-$SUwYV;CC0xA~0IoA&z}3wn6GGWIW4iq7#t2s?Z2yoC=-nps7I<5OsfS zQiDbqfJch9U)8&xsVA2)Fi{z!GnMTsGjF?8o3mBsQ}}<|qB;|d$lhuReW%0_Q%=oINjW^H0tQ zWaWSz4@qu*U1k64U-U!hAjl^8>Elm87QHIy5_De;5yGA`9J)SetKPZKY40Z~snr)3 zS1Rz|(jxyZbRCrQegTr*+-gb*FhzjlVa=u?y9p{u727!a??qx$( zWy7apJe~^w?J|Bjd5RFZeU_|>R0&A?gA1<>s=!km3d+L zc$g%%sJa72($3y7qd3wfDzd|iXcaloFA7QIvMLZe_juO!9pGb6cxa=1_Y{WlDS(0@ z!#^-Q_y>mKDNjB;eyR}U3}DiLzyPqhah6+$>CVkz$c?FP|I!QmKHaaRM14uNhnt7Z zqt@}xznWw@JQbE_btxvuw(8b8eWac`%UAI2DH}KgJniEBM{NMUjP6eoh=jIN(+`h9 zPH&UjJ-?U|1ZsgNuETJPrR(-ks3u^iyxN{04%xsTjJi%hpy+-T>$@xvc_<3Ui%aGrPr~`!qF*Er*doW_+nH zp4cgXBD@kDf;TkJ01dsKTjFC6#6ve@IMOyw=MqMJ`a@b5nSO{sJ9WSKAGg>RS zX}cee0#{l3hk?`a&^7di(Us;(yX1#G=oVVOohrds#KKd-g@;g^NP%gZQpV}u(D-u0 zfNG^o2T6U{u9omt0i-;Xx z6eCiQW?Oam3Lvo~JW2$_s@PBh*dV2zyXe%|kIqJg(s+aeFXm!5+H%5d{wP%P!@>S` zYaa7;Jl=C||A5?5DAQ+pbjnB89JtX94;ms~h#k8jCp3{_GW=?yU@+>xU`L}qQ&Z&% z*5nI5Ar#LeKCA5<#vXG~De^@JI3xbEQ^r-Hj10Go?6715fIg#CBeIANHv|o6$%%NN zG1p>`q;XUXu$|tJ6%UVT;p22h9e{R}k{IYYhygwWD4WDwv@${#zN{rn4`9Ych?hkV zq3y@CB|8rPgU;CtOLnXjDo4l9sQ16Kl^wuirhl) zfY1hNald^N(fi|~0$qBcQ@_4_Oq9gUmb}wG8JlHaksi+VsX~?6ux+vqCA*PvQ(SQ_ z`2`5A$RCi0056Fej1zvhxw(&O%rR@={(KOYslRBe!L@8I}k_tywc zL`D^SVCgu=qk=j!tK@kUm>+%^UjQ;ixnGfhTk-mGkcLXc3r4^Zt48j8utk1YS|yi^ z#x)NZqsk*}g(=yQ%*Gd(iJF(vIIbYahMAN~80Mcy_1pqY^``|RBOogc#@LpO>mbUa zllL#BdmwBjdxtfcozt@!r9E5eomlCg4mY>{x%s;JQ&lfu;wUSnH$CK`>M}Lt^-#Qy zcOPpoyRT<6yQZhyxofcWPOodh@GRLitikM>p3Ur0G%;)RW z>3luGY#!G$(A6jISW3SJVX|9?$Ej1%%zk*>dVR2oIEX|Zs}wXASXzd(qC@9gll9si zBO8kE*K6!IEeGH8n6EmcYxox=Y53Rc;5%L>2mf}@;oq*0{{m{w!jFppe0TGt`3^UP z&S3sk6BRrz?rr|FxwF6B1hj0Pyiqx!mdT;D{ZW62_tQ33%mfg?Vc91T&DTfG!xDRX znDlaDO znE5&$+@EsFj_b1|-KeAM#$ufLLS9m%&VNQgcJ@!2M|;if z9cYUQD_42LqLjfRY?iBuD@%p{f|md!z1n$=#Rs3xg<9Nt)7;!{9vzorOei?h!Ns{k zbN^@23SooAUt!ShctL4FuS+XPH=mq%J(!73Z@@Ml|CDME(C0{Mr;J>cAj!HS-LsUI z2&!8~+D>X38qLzuKnNFQugqDIIshtr=kP$4oI7CGdV2c$eJ5ixNq4G*n&~Up^|W&BMdy{&w-_&7+;om%Gg}`+0Ns zErw?tGpiuO<^WBEOcrg-QPDXxY;xejS@_bYeP8K-V)id;1G*lnXs{v)zs)@s&>q(s zPaoHx&XFS^eAaH-yI$$n+#u?*8`jpWH5JFc;R;O^bz?S;9Mn_K<J;Ey9x&EzK zo&T-0Aksdl&g?`!A>wbD?YycS?DId|M-8^~IIypGl<)LAyXQe_E zcN#mtyZO5H{zI~w$2iF5HDN}~<;Qh#S`b@XiT1`cNSddQh%TaLMOMvawpc2hc?E_1 zljiYBoS?E$9?cf}T*2PcR=l{KP2cgx-B&uy8AGq$2E=3xg92Hq$vjH;c9U3=7@F;< zgX+ui$>vE89%b+3&#;uyu~0nxsdgVDZzmZtg)gn#-QpB4v3D`jwVX-l1Wc#R?c^52 zgWfSBGb z9xR3iB()6@F@Fu6AfZ%z6tq5!83jHDIcJ?hlH`e-N3SCs zY4f?9GzaLEQD%AzEtv&!(SfoeRtE7l!Qv1h_s|JKS4LL`DXLSUb3NQ~LpeNfPdnvl zq&Jw+(H!)N0BB+;&|?qTM%Qb57;+b9N@y?(bS2=6GDrq6;bQZH!ZFa9<0}uEr=ujQ zFPejbHca;@jBlko7HDJ<`Tp<&F)_L*RV9{DC`W(4koK;XFN=UmUVlhGAuGn#5dfha zl5lTb?$M*elspu}_r~QgqJp3Xl6#x*UM5!WGX!k+0W)bGNU(*gZ4|22maTAT3ks#wFb<$K;I z9Llhld7xR_*74C{P{=ue0b5`(jD&|_<$#UwsxO@`9t|t}P%Pa1@#`NqZurNq`2Xf{ zY;k`du_5H;JoWKXVQq~;+5R8L6^*Y?Ki9L@bPkiOHXl;geXpvlOC_opBE^^Z0?crzk~F) zxm%RB0F*VYVDeS4Dk@O7Vmy8xRr$fDSV_H#hf2s&t0JxH-8;1q6k#H5zTG{^xfsSY z4buT$s7I;#GxhdEDGo!Q<9rgP>)2Khn(uoi2%&{rVdx(D9Ecr;P}Hq3G|&Cab_iuY z1;$QJ7+xw2&EY>2hA;_OVZ4i@BL#-A8r&NU?HEEP3}KwG!#IwCNQ1!*7!rn1c`au| zVEnn|wSu$@`N>n(@UAsoAklJ0PIf1n$f30s zm-jj8DmzEUIJ5DTK{Juh#kFk>A;fZ_u2)#;3zPYZKB$G;NaPLD)ODeo!~bR5#0nNK z$*t5z$t6)zYqX)}H!Y_iLV0L6L>F{v_-UV=zbKeyr{g26ugo3`!+R`QQN-h@%kezC z$vg{xkLUQgd321zH+w8>?=A6#Q=#;fq+>1AKq)&Fd0!)+!v2lj{WcyiblmT_X+&f9 z5vF7DZ*64`n@77)00h-;B4%j}laRB;Pg^|}1?4RCx$C$*wG+p{P{)AGL?v>eBCgdF zN^7>(8X9JLHVfiT2x-GHD4<}fkh#p1?4={uAgL39tM|XhOnya9W+$cJ0D?e$zmrj7 z)2ai3jiY)m?j#HNGSLl-sE%%lVfZ`(v7gG`FM_lC1oYWF0!gMtp5KVZN%AW^pjR2~ zL8PGp)=nE5u-pR@Th6|b1N;QIp!^CK=!TYw3s}ExxWIDX9xmSD0f7l{LHQLfid*V9 zV2OM6(pkVX%;2#h3ua2%sY`6CjN8 zD_k9_^RHu;0MvnOxQY(Y-inwL=5<2GVqPLon1EA?Pe4qm$TXCAsHcS1*i1kXmZS4I znM`#M+EAQ!y655f1$jgO#~^p09D<;7g@ZC^Yow`@qa3+gqV#m>4Zu|(goBD6KxW*wAIed#rIy5jbNCG8(Vo3{&1H6;MM*? zwf>>1ELkw4L$b6a(~c{z?noAEJhi}js#$UiKfFTa%{?8RQ22CYOro%2qGA?dWIzJ_4WB_&_rPDc}ShA-ld$DBD8=_=7k76WW>y=+8 z>IRaJlF3EYOb)fwFiJ$w2dFBoXg__m1Bd-e8rT z$Hk#PXh}EK0ae_}_p@0O3M|9D9R^%;xZIt|_o@oB>H-9<4 zpE}0Pe|XIJU$vF`>g@jS2YI6LKWc7n?=|nEjxpoEytud$AOGc*S^T#LdC1>dFmvg> z9kPxybSSn<7uYlLdvvw|{T^>A?_yltP=>+mkB0wU;I~(VY#2EG z5S>)GM@-KdF!$`tYkT~OvJw|7!?j;U|Y85jBMPV#SI-|Hw|gVF~Yv zD&s%!t-i9#zab8G_O^ED1=3b(kc$6MsupC3sVg{Tze4SpNMA&Z!z9Kok6fqws?7gx z!(X(}0~dc4y>3%h#i&GgX&^B!H;k?#@uTM@e#fdZDg#vpjyU2+i1WN)IWzsNopRA zxk@GEA*I+xdRkU+<^JA$lRVAfM%dhJ0w z3m{%gorH(etWvoEw$bSOeOJI%f4;U5GJC$-6PN$1z0TC@7$^TPll@1R|5q1RX7_(Q z$nyhx9eACXXoLcP4dCx(_ z-LJ0k5UPe<(F0OiGra;6sE32+#aG`gSZnstNM@vaVp9K2VKB` zurJt4e(qG>_Y_sM=@kgu*hmr=Z1Jed@-)7Ra1o2ypj37i zMq#jENm>xD2Md_z0z~$OzUwb+;U&8Z=e@ydbrJtmprny=95iUw!gBS;YTcTPRd&bJ z1p6zV`GpC$hj%jgfj{3u_U4H;SoU2dl>1T?V#~v?zdHOr= z2o-ybb+CkAat^i$|Dti0p+S?r$d|f$TdCFO3NLpzxBl6f`)zS)>HX4TZ?16E+?GFU z@bmRiv$-!n)ZxRg&E4IDU*wmi9)ft=l)snZ_u|w|?DJ5LV&$FK>2ERDtL4 zVgKN0Z*y0CSzKHC2Ye%Tx{b{AG4>3ZE-@?4u_`Wm3=JJ~1#Xm=dI6AbR~kEr7S?u$ z(bNwY021RDSkX06bX8U5Bm0qOs#X`^zo6Zw`;lK)fJM}sNWF;4!gKVOa6f1j_`XZrtxJR1K8BJKl~ zvkJu*nw$TBbF9LpsS0k|hLTG!N`p#PRM~Xkv2Rw{3l{xP9WMYr zV@Luq!GN#-$!Q||5<(^mQAREoeC^>K`((|3*|EFUkv;!wkQTT9)NxCAHvb>s(dYj! z+l|MyZ&qizvwrl;E%gN6zXA`*08lUhgWx?F`2daZ=%&NjgKoBu`O)Z}H|V*;E%VAy zKgV`=USeAJ95`jAixzLq8wV$2C>?Ot)?^C=Tf1XJ&IEr>X^~3K-Lw^A(ORo2D`VP4 zu6=sLh{xX*MJusU8rzJikCR+~WFyE2>npWETNg`(yEMk2mz@QFFkI-OtE}2v;praK zv|u)Sn**xsYK3q^M=v1zhRZko+iG9ym1cDsaufxC+?7n`O+G$3{p~GA+*V8l;&EkwPvp2=Kd#$L zi@XPq-yG~U7q-Z?!|Q|4-tCg)I<0Y+>)3g_y|7&UN435X_We*TXi&w%^^dEq)#XY! z3SI6hu#lq=({OQn92kRn52&yY<3r%&ZuFs<5+)$@@l)`g$=W81P1N%5P$aY3e(rD3 zzG)Q1x_*21^-#T>Ymivgs>~i-;)r0&3ybPrn->Xd8e=_ZVfpaqr z8`IeRKX{M%f5Fe0{qG?j-Tr4dSxA$;X=O)QgeAn1nNj__eX{01sIF6~W8D0&tyuQ| zYP0$O5KnUcbN@eSyBIgMaeBh+lyf;bf#vL-P2F$uvCV&RYx~sd7&HH2%Gcxbzdk$v z_b`vK|9^FXA%2TRHuw04IaVsIG3lMh9@lUKY{)iP56^O+yNs=`HxFLX(d>o!`*`@V zZ*&A_gNvIGZ*>v@2g>Z~>Z*F~4KGHgRbVs=V(`doT!d#h2AXcW6!`xIwH`liN!n(` ztrA79DHO011%So6{yAinUgevk;RP_hKUnAoLsl7~^SJ1`z(j-!jtQ!P!dl3izv1;P za9sd#HoS5Ica7bMAPIrn@qm=PQ#=aq4P{{Yp5O7#ZU_*3jG}f7UA*R1gg$w_|CYU` zfc#x~@#C)7cKaBr0_w&u;ROZXAV65;6}IA7w1U0D%e^V|!Wwfu3NCT!26TsZT~;C> zmYE+Alp^#P8Xt(ucT05F7)JP4Wvg}sME&f@lld1QHS$m~&`BPuMMl5iaq}TJ`(@|k z&B5Cfwz>Z+`(^X!XmkJM*ENcef{{mDx(XCvQOFVuoq{1&`okLx!-F99nnzo2pv>mW zo!y<2Uol?9tDTem=J7Fmb#TNs+2Q8V$-OVF*`1a`V;J8@@qH!rS04Kb3H1o(P z0p74Tbi8gTx{eVr!V8LeaN%51Yy#K2#F!xrnQET#>-zn3f<>WVh8ix{@Xp{q-gb5s zco@OJ&m7q@+v&Hf9GxpQ$maAvbpij!Lnr`1UU_Fw!K<$C2W9rs4~JM}Z)P*_7+bRZ=Zq}IF-PvmHAJgRooM8O{>td*eX#712=OaQf5ZHU+ z8ViCWF^2as?>1j=?lSmD6BMVdoU z!|u-UN#l2}_GcpyX7|YSaz>^XY6RL|UgA}+jo&c@Bcb*28KY*~ar5{y<^R$;K6rbyg-AMM8M$`1 zH@CMNl`iU0nJwe85Fqv6ox%0>*F@^~jzAzzL8ACqzZ)kVsV`?p9gEbj$yC3iC%OJ# z^xD%2`=Fk2>;K|vZ2hl2Us|2*|2@cKtpB?Quki%H@h{EhA#fV9gLU4p$M7AP%2k_H zy1%Pb_j(Yp$NU$oupbwH*Ie}FcQh{b93Ch)ymH+^$TJPP&c8(khwv(^Kx+z!3#zyb z>cEa0$Il+u7Qc{E|C#agfX55_M_<@eU~mY|y#+kB#*X2Y-skbRSwZ;kU|3|uW2^|e zcs!3U@Lvi4&v@0;-8rmugcmEmV2`metoZmdHR?-AoLne$-drY7I*s;G>r`txZzih* zMzL4{$R#UkW%t(IO6Sc6^bO$8$!}jE0Z}8}#@ywBkz@v(H)b<4=(jn)8%h zv2K|?<&C3XELo}mFH}x`WgAZz{Hfx3-U4pzvsl%CI`h|G{z$;_Ahd4UW`8s!-ysuZip&>sT#6luRhA{RR-3!`la+6xpLhI4fJGyB@Tp?jsL z(t^B5%Rm_+%)l5x5$+x#dUwDVR5mNG2)tOW7k)=|FY3eN&zSd%YPI^E{308wXGwla z`#4dzV!e`}=9o|sdHc0t6uslHZi_#HHz%RTDPt;CL_ndQpr?S`dj2Q*e=?%_U3tt{ z|F6Zk{|7pN&G`RAJWiYZi&3MWh3#A~H zV6Ly-gV}SJCprJa(P=pJhNI!sq9Ts<|4zjJTwJct;y*mdW7vN{?M7#(-Oc^ijjclA zm(61|2XXg@9qoOLqZk@{DlX`-KRx~oFuuT_9rzc+K6wlyIwia+zBU&Ax=+^pubg-N z)7xMBpq_E_f2p<-KmW11GK>HAAWz2pkB4Gww_}mm)I10Iy7mSBsef6}Cb>PJ+r>11 z@ThRJaBz2jlJj3gNIvh6rd7u{|F8P!V9|Dn*(UkX*05W3($pskG$b*u21KQ=0CTF z+(#XYlidGRum2Yw2t)s`+5KPl|HQ{%`ch1@j%@y4Tdh<4PaOZHmF3kM?f=jGe;?*4 z#JqsWH|@1EI(IkQyIt z#~-xXf%tZo0sy`Ttx+Fi(}*t@?&*zq_4~uKz-dQKkso3e%Fky$o(baLNdMH|VRSpK z)2`G0WdGU<{2|^QX(Vc&I#F40QtwB8> z#L_Q1dNQn`?f>{;^dUH15wOplUe6J)gBG68(5pHhIDt33kw0P$IUk5c+HCGcuia`M zZ2$7=2+yL}zO`QKaCzD<1nLzE1b&}LFqs+yD4^_IIid5(i+BVc0IRsPRD(QejrAm=Xhdf^+w&H$7u<@IrvA&9JX5$0CXckz5gEJMgUNzxP+ZP z#>Xyjd+vY4tB%Clpl121H93enV&+Ze?4PTb)|q$h3JM6_z;on9XplZiuUyw?>! zZu-Lucj&cE8kNGIKA)m*+LVeu>QhL54QyXcVig}Xyy;(Bg9~r3N&jg&B$1ZGa2C}S!Zlx8hp@Usbq zGNyqMQg9NGNH9Rj%xS<-f=)uhuLc;(mIgz}G)XWb!GI()rvXEmJqZcF8ek}!4F*a2 zCII@RO7jfXqPk`f1ED8Hpo{``kR+bRfzd||49{c-kV1f3f(9s?2osu29Aqq60~{}C z10U0V4A^Fx0vs!n1L|uVM(ughYI&W3?=&sVR$nq#tVRo%adoDmPD{4dl8r!%4wsM1MGSR zQ4C5fH3AqfHWs#JS|ro$@Y5%UX|u9GPic9Mc4xObwasH&+>dF>@F9ZGw`<1A&Xy?3 zKN|peMjIHzK4*e5k`y?zfEA?Ii!})-V@3JLsIpu`JkjwKSfZfSswFW{Tfw!gn6g z+pyQ#pql*E$UYXHW*tt4t>?1Wql3v+=lJ?E>l#y!X2G`BqNT^{(2Lq?_)o8Af`ktW z1}2(?zV`O$pZ>A!|6>H8sx!Sk?s5KK&+Fv>vAD8QuPu`Q$MfgQGyl&Ad46C{r{m$G z?sS=mt!Jr zt*tjl2YZ{Xqvq?Q=JD~)!G3`;k?&bUgp6_f9q+7A*xYN$xcC5~vh%n&^arglXm`Az zgi-G?hX{#}8FqOF3R0}HV9jrjo2}i=!_BRK8Ze>0%C?qfyL_5#cA{{`P`%l}0uf@l6;5AxW@f4W{^Wb+*$q`E5Nbe+ZTb~b1M8;IjdCK*VkuV z=REv9hisQD-RB}ZEs{LJ(#F;0VKz8$V3NDVd6LH)zvlgbGz24ELNA6YHJ$7@qQx7Mc*89 z*bf)ZfUz%lePE%(h43Z}{Vu$=V4|X<_<0MN+Rfx8o%8d+Jx9L+r1L@nPr`=-2bRb! zY76@e3un9QgrWHGOW+I!z{)=tX!&Trz*pQ#mtLPe zD}C1U)Oem6<*9w3*La>f&r_#7^$+wK&$GnyEK#1N5A>??Fz6K24ayh(S&P2a__vz= zt0i3+miamM`hr;pN?TkE5c}%fgG+S0;pLW2WnpM8Fr`(J$~6#4j<8 z_`H1a;tfcKU3Xpm-u3#f6X<6yH$a#e9=+TWKa1=wdop4rHAQ5}E$GqEJ#+&Pgv>TU zI7SEh^|$4>8*Jw`;S|ELT$!>FR8O8UQR&TKu=siO}M<>>|efk0b&g34{os8?ZUc9>i$&v zN2dL|c91V$_-_mQnCM-UzU2f%H}pX9pJ#8$aKV5wG~wkKrLz-|0r)ozqzJlP08#;d zC`ORB-7^Q|3>1xur7}a&i9gYLs%1O&dL3N=+wYY5uS+4^z$f&zE5b522GribD{=}g zSVbD>5B1D0(AY$ze&Ap61$VzgSyEPe%7}|6Q~_cMypVh#M8q3^G<@+QbO|NLNMx8j zs#4}?)ASa?Ri;b|x<-+@!t)BzJ|uCC#?8ugTU zH*Pu`DBjzEl=XE9e|>$9&EZe%A^b%MF0i|`G;qwhJdZ(xcJ^mO%&)k9Sdg1BjmShfbd!! zx<{x^!gM+xMN!<4v{c>uS!})bS|M zv`{Eg;B!bEI)Pl|@nUvi3*mbGbCpXh$HWX5c>hX-9u^yC3mNYGGL8yW4&Jm)K8~nV zp)pJZ!ew(cvXAjHPeakBs&#G`0nL>k=RxesG0|-R5@eddQ6_2{0y%J-Z>5dxBw6N(f5NMdk^yu3a!t5pgB&h!-RTqgv(kfHY-W6#^}jPm`rJ{t^gKC zX2*o6niPP~!y*R@%ZxkZ(l2EI^>|d`Ku|Wt%7GkY-s9qVC}*Ku@gRBVZ_^Ki$;%2c zHbgqG84YJta1T@WM*ec$#>SBo&6uzFs%RoaQBj%HNKmxEQ%nzb=^3iFqaMYvM zdCNFi_fcl7Vfw5=nOYu*HB7`9Y@2a;wq=M263S@kx}9*j___Ha14GdOC6H`svbk5Uf~hDo&L~0cZ0CVo0(8Z zkKZoAp!mZraj^z%=N1u0S?omF^jBlsW7r$0+Hscj2Flv-A2;yhAP)Uc?oPksUau#& zQ|S9f5o#_2m~ve+Y)o2qDbaS*90uJyV*C@ghpnM3{l6$l5Ak9NVmJ*3yo#?of`WL7 z>T!`{=L@rJaOS3^DH2HxPcZ!syQ5I8(6J6LRu{>BZSC^nyH1carg2(wk#kdlkSen(#w-U{0mhk59_pK?qn!jM=2Y>nYt zm#DlKqjqQ+Ph^%M=7}ZBG}bl~k=t*lTd3mQGz^bXk7c$#X6whZ^&`Z1XRHXo{?_P^ zSc!d>n}P`jvWt#gmLZ9VV&lb&Vc@jgZ7+yKWb9k{Enu?TAE_NVLWdd*kX$I6N^Tp; zCvHFV0b3tM9XAbcU%3usLNt$kssx)|57O}p&z z&>#FlcEm$>=mu*U8OV=Kfk4@oEG=}BNqi(2RrnqZi>ec!*~Cxb(U`Rft5zgw%`9jS z9}s^}f{9ARvKJmL%oiTPWJmX2#tlqlmmz0B)`nT-2I!$RWNjZl`)91{^*_P5knfcb zk~IGLRF#;+=MRLqP}c$X#Pji_JOfF3&T z1ar1#o3k+r;iM2F5F#6(4@Dq%A&wlQCMg6mPEu?PrR0aeI8oe!7`~rzPcVkNj=5$0 zzRKX`q8dSGwZ!|CVHOr5xdh&GizfQW(i#tgS}V;X_{zo~e;B|}637E-5>~?2CY?l+%%NnF zZ(s^dC|^B7qMA&^dKSr8_de@t*Y#=4I-DPwGwD`y4rcFzt<7JJ!jg;2ttGSXg<5P4 zRpdX#_2k(t+G|t#`PhAl>FE9-Qqf7ZPw4GgYO;#--^o_0HJ!Z^w%+E>+;z`}t$Ob4 zBix~AbtlX(Tt#wsz`yEGPzPn7unNuQE-V9ehR){KwPJr%`v2K`^M*EZq;dH97k>&Z zJDY7o+t}vFEMT4xAesD@>*1JVvstgP+t3rc+vBzgnI!LLf2&GIOMTe}Lm=s%3GS9k zrK==Wl}e?;9B-ETnB_(cWxmR7Fj}t9dn4ZN@c+SER^Q)fz%9;D6f9ODg1|X9VC^Toz`6Pfm!ci{H zmmR63Li-omvtb4ze7)+$CN%t?VVE>?P6A{7sY`MVm&0 z-i695EMk;a1(6ezf>TQ!U=}RBbqy+(LtwEd^SwpuhO?fzkco8+i;=vJg$eJzK5YN% z{B-+1R1JFTOCb|mpEtkWvJ2D;A_NeKtfeo#$dG;G zR6Q4wXr=VZC0b+a{A%EP{`z{)?RMS3Y586|V13MlOTl~HWh!)SgKInTBe6E*+LNEF@Lrvt!W9?)nA^1X{cg~F$(P0YjC z-p#m$#v9W`^Z;YwQTILunk!jUT7YxsSQ2$Xmt~^++SJCUE`0V{r2gbRozW)UNywUZE<&T8IBvNOZp}EU?PRU4QPD7(^W)PN9Q@hDCZM+*TqucpeT6>IejOUIM z#6Hp2>{&2qWyS-8Vk42ao0r86tyIFr$D=6KcjiY`0!A9;Y-J-IivZeUXREm@*4!0q z?uvCvSFB`SZ@NaZ<~KK~pEu|2`He^{b#8J*EiGahBZ}nIfwwgG>L6O7$drdA&E-R@ zr~7+AgLlEa#QnymjCwIMB#e7C+lGTaW7w-128TRj$%Fp1?|OX1|4ajny-FPKf}jY! zEJf>;Hg*w@z|cBPngcQ>fz|J65)d#e9A<@JR^HL8Hc{XZ4B`_-C1IkdCQKBy=tL1t z9xLx6r;^p|@hWBRh)e}CIOs!o(1$4{=u-^M26grdW{03(w0_ z83O6cYMt7xmL2djX8Hsn=8so4K>V)uZRLJ-Y#s?<8hI6ZbpE1^NPRf&-Q>eDAC3<; z99ysCI8j4V&}!&R*696V_5RRD1O+?&pwaa$p*cv5zH%+0LWo6^_Q29r2}ONG z8)NRG6=?2uxgxCaCsEjW>rs@130O zoop`2Km~OYw0B-_Rm&Bp^pDLY{-q)hnm6#dT&Ywxm-xGiTGI2JFUZc5<#gLdG3T{H z%ooin;azcuK)1y?s>F|A09*^Y;MuhJXxV%2Uvz2=;#J$s4m zX*A`6MKc~-td=VZx6z~$qNGFIzFlP-+@>AJuPe7}w`4LzU1Qj6yv7(7sWlDz0F%Ry z$c2BDk6>CjXa+(3VAYJEKq!_Oda+s$Nmzun62cBw>-&+MNZEV?>pHHV4ODVL=j|Lp zNgHRs9q%>%b+mtUwtceqvzct0k{!5SoiCw@vCW%0UqLsvX zqz)f`y$-M9rbVp7D|v_Cy~ES{yZ80_o6o2^vuRAdArT}1mg0apWz3c`jLp1|HVA4z z_*)HuDj2R#KJ=jiYh(3~|+&rY___Fn(CxkRy>OOZrC1RI%st;PryvF*`~z?MkBne>}PzSEIq zbSQ;WEfo9=?K1ps*Z+XmQs9ZQtB>prZrpqC<<@4e*+#>po+FGh_@(d8VBimeb)(R# zo^@u5%bQ`86lOQ&_xa)R$sUm7?whkWM~8d6jbGmEpB^1XTLDbiEu0uqNEjoYzE3+f3~X$blaXwopN zyD8IzhBr~CrpJv{t?9tpz^+#Gk*xzIYua>(h{8=oMRaL1RlCL7a{?urp`0-HuNCdD zqx}+FX_I9bUH@d-?^|?FV+& zdAXE3Ad^7lRyj^@gO>GbGSGn|?@ya11ve7aQbhVzXIi!x|J{cHX??nlX=}`2HqrJr z9SMO%4SI2Yq@wHP))qurY!pT$ndKo~gyeVlVIbsQXg=z9 zKVm6#<2i{kdMi@2Tw=tfmx}{TUNT`HJ+5pVETeQGP(6HQB{k?LDooq$bew_XwVY|n zpjkcWNMFGcNoRxb2vvk#)Hs32C67_d@vm()PItdddCeE<;S{sz4^M~I$Y6dZy#8yv z%@#ieZwbbIJUmnkq9=!E{lMkW<4P~h(XeSDQS$Ncd%nLc4X5l^!Wuz^WsOp%aS>e9 zczz=aFv(4EH05Itx4zTtc2UbF)N1>Pj8yEb)9H?aD+?+D zwdaXQa|tBOZTes`U7*a177-2hk}ZpI@aFLRlxV`~2F^I><9z~-lnWYqqGF{~-Lz`N z5l9*1P@p^A%yTZcnHBI5{7h$s5naR^p5u5errD%$!Wp~yDb4pp8q})wY1Lk{pW8p) z?>%(cJaQT`Xuu?7Crgw&xXfmW1J9OjTy?xTAqvDs{d~ zXoMfE0&O3xhEu1!2Gg`z<9nWkutZe;T_4pnzd&b->EeX#wm)>JSPRsb@WbiRUN{#& z;+sljL;f*$7)+h;z`y(c<$)6fO;{h%O_*E@+LSf>0G|d28TAPkT@DzPuzY~7LIv}R z^hR;G3Ybta5U#`GPB&Omn?-F4v?k*wtQTyI5r7WdN(3%NfvGbN2m~`8HV4ji*dx<* zhuyfa8!cNH)7qic>@7NpmzoamPTNTn^iZUGjQ$a8Y+FT;KBFHgahRkHslyp{^R ze3V^p26>>Dr?r(-|4)^b=WG8{tv#=1I9}Qm=Akg%}RQ<<4R&4ycN`Ker?{oV5BmMn{ z&1ygg^tVcXYc^nk-(~t*p}(v2ca8o&r@uea-+!=lhd6)*UE;tGxhT?W8u!zbf;7kLHWXygZGKQrm;gtX8>X=? zm>_+E6Q)Ap#p&Kz<6!%rdqh9YHg|Ue9GUd(f#w#_GENH@^ub5o)cv0xTzl3gV!aIciV2StQ(u?Emoquk>-XjQ+lLQJsBXE$7 zgyFC8c<07xX)xkbhimp4jTY)ec6PC__4yxu`1G&SpBg{!ot(ZoI$SJ&`N6`7#_1lk z*Y?@bNpYS%d&XMrg6qMA)$KNdtH$p3*|sRG3@fryi zND83qh2xVw^ZV%Z%m@%^&JUxhA|aU047wP(-8Pf~*gFTkjK83O7lkW780x|J{h|P? zz@=k^^N<&X-8ZLbW_xgObV#-1WL2a^5~h+?BuLDT?B9jCgb3FwT)3X>kW)aYxw9k3 zslttJ@PoDY>)zh4XGO#m_j#wH3&BWGq_%<=ws-cAcA(u3R;bKFzu%B$X|}pP@WAEX z3elnXZ1h$dZKof^z$1u8dNd|;_1N!2Hzo<#`WIXZmx=Jh!c)9&8!-r?@v;m(`A(<09! zY{6oo_g)ML$T5fykamG%(#HoDkqC|o3Ddw5SL&6oB=FAu-u7YR)tmi2AOkayrZ5$S zr8o1R|H0bbJ3ZTdb5i`l0(!zc<_j88QK+J_BBDs6v&PZO|F^euhN&WyW`LD{un-Ul zZbD4d?BifFu<3omTOmFQv@8?cLM9eDo-$g{2t;D)Na1$(UTvT6Q|c6U ze*G1ut7D)ZLJXgX7K(T(evP6QX)>4Zob(SGJIBYb_P1YCtp1Kszy2CV$@c<8{ESg* zyj|m0;|J^KB9Wi?jzvQ{St3~dULKtS-(aMu;9Tsqu6#7)#0=1)^M@B3Jjdox!`K`e z`N3y2GBY=Tpv(dV1Pf69RU-K>V-QTw~!g4gzG(uz492cEneUzA!&A5#!(qd4Zz*RQW(Ad%wOpe04YhfN84+rS;vm;zG2tvH`P_+iV2)}p?GT1VlGuzw0{F4HH z`6m$l4!-W};2X?t2S>YmJV1OvI1w@h(i?<`nSsK@I}INS`1MzMhY0ZrZxo`VS6>tp zViWHW`rB(-4S%6v`SAj$T!O@3{5po!kbmRbi^AT{Pe%=^6MqvShbrVygq*36GYENo zvPZ?G7x*S7ivsBFML`1a_>kdzAzoFOOI3C7dDNF|7*O?A;rl62|kkBz$zC-9gq2U|UIO6q1fiJ)q#NQwU z7S4m?hThqcpce%pC}EtEnZ!%u8wQ4Ohua4_0{RYPv3U+(>+w7!gn&UA zMInMhnDdi;Jsv*8I3dgF@gfLeblHxB=pio(AeTdHV+?djSk9QpI4NP{yUyLncs(qHGI4|<0MTl+ zh7CRw$#)gXXBGAFLh!|0L4YygCy(noAm@9RjkZ6O@mdgv`D5e{RciX+;iwc8q43JX zMxGW*A1?}=`6lU--d=FAQxiA27~1T*%|M0nPZk@!I|%jN#uR zR+dGzd8qh!Azpdd!1sq*xcG!PU-k`ugOF=ihS1xK0xC2b=o#3(p+!IZQBaDW z7yOMvLe8-%=vpGKd8`0I;;4;3>iWdyt^ z^xGW~!QUW6b&l+pWR(nuNQ!|JfkYY*&LAZ9K+<|})f_l&y@F;4K&Y`U$9V8DKnY!j zsxPU6VVdeiC}k7{>E9|=P*M1$VthT3AEpSY$q0aWIbUhy9SzfTa7SPKa?Y0OvT?nWbx| z@XCeMkR*Z>hUq$kKc!}`P$-|Czk2oN*S*tq?Uex{ zR`%ili{A^0sYg$M{xtq)B}h8Ju%VW5te`$ zTlnDz2wN!1Z^kUvYDd*asQUfE!UbQ5m-s5A%4V-r0=gVH0qz|uGz`;x!ktdC`!01* z7DHXRM-CuZWrJ^SUJRV(`wiA{H`terFgd|gH$;dNG+XrA_C1GQNYMLxSX9wmyr5FP zC<38fdLx#!v-tN7r6Ovy+Qm2;%6Kwdg)$yIe{*yWeXhdf%X(55V`oJM5bvZi&RnAa z`c-t`|AxJ_V1R^ucI*C<%^RYMHfscXvwr!goJ008X z*tTukX2<54{QmFES~Ks=#oW~1*WY!{-nBo^Q&m&_MvxNT6#HRVfpW6CinrG|2XMcw z7duf!i9&?U{#G}j4^_<%f+2PLxcORH8N)lb*coW%X~eMJppD#L-ZQffQwNnUzuIw; z8tSC-%D}3bmfh;cTKb>peHf&_GawJ2gbFC;(l(9$yEmsrNZt8Em2u3q>l;ud$07_Z_kCA=XQ_AsAGM7S?)Z;; zaPB~ZH!JXN-(=*og)Jl>p$G5zL`@K{13Zd8SANhxu=JB{5G;azB()Rp(#>6awmP5L zfq^qeAu~raeyPTPg0?7oB|SOvI>_u*2S*9|Jv6b)Pxhd`uvURjHfH*N0EFTCUz#R< zer?ZhQ$&-etWVq1sVBD@$Gq$*JPJFiPa#<6Xx1x!FNW!Madj{SpD~}1AhUKU)9|&g zPn}ci#6BC6Q*4RnY>RmC-5vu}o}+jo_vJIikN7kA2^ucGh(lql#(!c|~WiXw=*NzEb(2`@f6v`i|XXu-u3`5GU<5eFg=M z5^RKkslI5zabfEl;$VG{R6QU=w2py-0LwIkx9@vykvpN552CZrlPAf`$8|kYKrZlW zC)%&X2#G^p=#Z0Bq_HSS_r)CnsK=DB!nks$xkF)$GyWPT3phKIe1)=QO5pzBelnKQ zx4q8ISSqT@OWoiB@^0<$e^TWi9h8B$i=mKTPW7T{zM_quW&K>*DtQ2k{*woU1pf}?~hvnhu zT6PpW^kbu4p3T=EviJe^eKB|PMWuN#Ln-G`@#xTngf|077+0ZDo*+vXNCI~?3`7!x znzo0hWRi;Y+jdcp$6k31ekZuf*In@58Yffq=*cTh{+at12-*b-y*;F7HEEVh zS#xpqo{CUE>v6`P#<2zeJ_$5>Qq^;x{JzTG9lu04lT{7klw>xyu*vP@hm`XaddjNY zXfH)ijTTAK`0kfV_46u>pp-W{<~PYX_5JH!SWo>t8PjyI@}jtjZW2SgSQK_129pgO zA08q2g+mtG>={WP(%iTKSau0SFN6uRZgu*mwHN}Y+=|Vg;C!)6|HIP%}NlQKR>4tJ;3=|s!f582!Y`pZmllPU#`<1L5?=(*_8|=~ru#~@AsUo@0!eCayT~dH z)YE#C#z`9k3Z@h^Ft+sSq3V>_0(IpNk^HYDfTkJyI_F{oP;e{=(4G04qzV9h-omQ& zxQn)P)3W1onu1yJuKwV&b!l;}x%yH9v?Z+D1b}2;f%;VsP91DSBT+e%s22-(mS2}B zr`moy-xHQveR>eTZV*k6S;`Fnp*7_vYD4%1`>$PytONH}DZ z_416ti_GM0my~+B6ZatTaj!Ong2Jfjr%rvcW06%}+V$$I>2RqsFx8vG>WYN&4T;3q zFJ#I^@^$Voev?P_GVo!_n)hpb^lKEU>ze2P!DuNPa9_eNwUOO^jX1=f5HmufEB8AS zD?D?+!%!Vt6J?!vd{tPpNkhgI^Ba-JFePIeV4Cov+717r`3;*+5=H-o$Mb6kHd}r+ zbOja@{M@kr6*%sMt;NDUEve2f(K8^X8G`+5^#$dJGDapc14@$Smd+o62G+rKrdWgr zZk2q0JMqw)8SJD24CLb#7dd1TYgVd$=RlBa58JdhN05*)a9&XkxGc|PDJrC;I3l{c84!O8+aI`PstH`2Lc;-lJD*;oOaF$9Rhgfc<>?Bj!9bat(o{(`nGAB zlIt`#{5$LjoTFS2;=*FYpKK)@pjSdIFKsc2Dbvq9*uLFmIEzoHqE1hG#1=QJbeU3FHHt?;=k%heYlNC#>EO;;$}gNA*_sQu*w5r@o9b+vjI@JX|(~M<46PWHJgNU~G9doJ$6)3Izcs1E=tRx;p_T3d66u`qOE3KS6qLVz`;^EDGa|!qMn|FmjEX zY$cTAP*w56{JkjShaeeHl>e2bwLUL16otUdS4Y{*RGh|M@IX{pasZJZjqUuB)>T1v z_zA$4zQbuN2V^JCGlV?3j>{I>$ly@eky;C_3r=R`NHzZ`Q$OYD|B}wP=7qQhd10&6 z^Ki@Q(xtEY8qlM!p~D*M`M1YK5(`<&ytgEQ=*dDl-ff4tc?@Z%51K%-`v^^-WAG=F zHt#nD0Pn&alRmzLASf4`)>|RMK%?;S8Z36W1-B$x;&viT;&;!!X3*ov)uwSE#W^s@ z?L_O)w`i`Nwnd9&lw<=a@%TFK^ypXK)O1o&(UiPC{__o3s_G0RK)NHW@v)xyN|!&~ z3wx~Lhq9xIqyK61^gWa5u861er*4wrod&9Hk7zIN7L~0ulU<*t9EqIcd(EDGp;}or zCfvr~y<`Cm^O4?Z#8qk08Fn##hBtLn!042k=Yy}b(4SFs%5lo`0Fr&7!@mMXbZI!y zF?(R5Z;(VS1O#p11w=BAwv+(w8h@}Su&nMI@RHhl!HATNKa!J zPv`F4@cOJ28^)2L2sydbN^%wyfbB6-YpQ12W!??=@$=XQT@v8a7h$0B=nyJx84`A1 z(pk75E6SuDK!kp<8AZy44VvIIstH3e@nA+yH5Qg)!&3|A2Cn$HljjUhsn;@9ov~sO zyzI1TO{tT}n%tCMyo2Ld2mp){f^0)$lBtW%or=b%m}S^XQ`#^wj3kbsigl zX6Dy7F7|q%h@bqlF)MN$|NKa6*mbFokvL7aC$j2QcJDEEJXd#qcXh>1pMFux2gI{1 z++iTBls@YlP;nRqY^pk>{53H6@y3bFuC3b%UfreY4;QN?az`w&4Yfh4kJM@A4qg)} z zl8BHPQuhzr6Rk*ErnNc2)C;HCwG;uX=gGtS1Z|HecJE-?aI7lRLqH?iq=e^ImoT<% z?e_G<)q4T5s$z~Xf5~jsfW@Zb!ED?jzi~J{&$abc4OMX}APoI%od7q$GQ%u(UnzK$ySzgV^@#Yenm> zq$lw3``4u>u*O@LR67gh@`=9UQBWfvfP)wGXC)1M7E>lognINptA$tj^wSN0pN~>} zW?>^?JKK4!Erkwhkx&ShaP-;YI!xb#ze#>fGQ^ET((ui-DQZWzR5kl_``w_v&kH*)u3)l6B za6JtdEa*#CCcK;Gm^-#i$3bn(sb<`=!7)W~#`T%fvz)Ou>E%x_>hAWs#=%iXrVG|O zwzVOn3cZFku`<;f>RHUj_48GBW?pfMX8FYumzkclV%vZ49m$*g3N}w zJzdzUvJSVA-CiYCjXy-KSAtWolEz2s1J!yvukx``d37n8h3X}29%T4b!3))RVFea{ zvb4r(QE*qtS9@gW|E68fRaWJ1I8nYsvAsFrV-;?$xEdS=Tx@Oh6oDNm~8*M7+{TrLBs!6A+ocEB3A1UVNo*vq{5kjtMr9TzPcl9{H}+(p-`~ zR2x{r6SC-+?UdAIro|NU#nk1sYC)}>>ONOhAiZDqvkUM|WXdGfol=c_##|ANT@ghe z<9X%+g(5>*#_EL@tl#;p`&t926GU}{N)u+yv-MFzCN(=eJq`3KW$~;fYREre^Kw8xdv2w?>Lg@mv{Y5 zpIh3Fr_7~IZOu7&g4^|OPT#U|?wBSEOt{R4xmdi`?Ja`q)nwR-qx4^o5r-<8Y^08;ScXonZ;3aPkC;2l7|tIM(%hu-bKNY#KRW8*v@j zOtrUQW&1Xfvb=T6$1aej#I;xR?vg?zDO# zJ^Jn0Wegvou%EQXP;6-eVyIn>H*VY_)Ri;Czm0gV3K6YVhfhVG@?FhMY*bl!So3&V z+%#LAX77Deopc?Xj+6$VK&kggS2EB(YZ#`cI5p-4(6Z4n%7QE!c`T>lr>jP0L7AJg z1vZ&yc)#rUF0(zPwRHaZlHRL{`>7Q$6oseeY7A|6&L|=RsTHX%ujW^PgJEd)hf?+6 zm6Up*Rxev&1}?m5mRZ!{2y!JkU9l9q4q*#wOWAt*iDn(0K$bsmkx}8_lhKl1Whi(Y zkXCpRs)v-LDVBrdM9jW@n!XU(bWZ zT@K`I`&+mdu8iwUvr{?NWd+BI-mrpIMymbdW4k4lC;THAI0t!Kj5%Te^XMFuy zQZ8B^h}P8G&4cR}V2;hIU61qtB^$1%fAZ>(Bm>-?aN>=?Hlquk@TL{gR1qdWTC}nT zUqQ@Np5_)0&^k2>wX77y1qsz-2Z>7Dkp;0GL#H8!yq`ReS|&css%Rbb&*FZ8kxp3{ zu(nrhJu!|_oxoiZTe0sF2<jGtkbi(+umqZXrSra z&lQU}xE$iK?JgVWK^hWEPP?UOj0knX5m9k8k#eg_bR~lH(1$ z(Sv2-pS9X1X*l?t;G(UtM`02r?M*7`^@8L5X@m#|!EX`%j1R<%=1+c0@w^2UI@KRs zPa0$mVV9sX=XO2sC?~yN?|_ovgWkUt;jI-{Tj@D1n(^Xk?auHnbw-D`Q#{3p4XGNn01S7k3sdwL z_6G8Yn{%wT!`Mb0;V`JIzfZ7`q6C@3jwFCo3Hj$1P41j8k=X^2ryZZ(8? z?rx{pj=u&JnP23A{Rl#gtRVftvnaFwB4;I}&?I2Yt1^R2 zG%p)!YiZhJDY$d#rCSaONQ;Sdgn6wD2b4 z3u{IFw1^CkHJgj1R9f*X<|j@{rz1w}%$;B;^ny8jO7SF^BM;NbZIpp6`R>Rm3le3H z$Ug1Kqc6(kurNl*kaTp-Nq(j*<0PMIOX|)2<{({ymlKY+X5I`W!)3H45TkUXicdrm zpxXkLg2?^dJa%3)Jln|r#C5H1qt)I@=q@gnXYoXY?L6^E`ei@qYr5JBodNMV@^-== zFrZR^WOX@d)qLG3qOx}t648)YS_ik3j?t9J3SK5X{0%LsR6sG}zlDH7@w{G`O$9+| zzP=x3SU!x1SA~8klk6bhX>FWWTwB3|%0n5GW8aZK)|uF`V@f*GeDWd-_43|=!iu|b z!wD6^!$c@6ylKX6LO$^6(I`w@wBr+SrKX`o0fjfP1qXj6PuLk>k7FI2Vdqf6gZ3`lf^DdLgh2PR$TaS>F|%@M%^L-3$d+HgK3Zx z92B(3u~={L#DtkEHiy#CYmnn3J&Lu5u3+;>yy(oOZ35}GFuqtO_g6Nvk6hkDL+fS} z0?%nn9>yL6bI17po1+4mt66~=wc_(b>ncOpCyv^sp)FK*fr+H%W7_lr7yD5nB-I)v zWI95*<8R|QtEk)n7vgGrVl#VU{A(QZ%a8O1H#ur+Xz;5IMvv)w;E)JM^z!a>s(LT%(*p4sug){c1wq*ps z>GCo}Ho(N(0J~hp9cKjFoQolTjb*d=sGi8APAU5PJpE`j?;pn(zSQPDAGZVxD;@>G z;?^K{5NkZ1kgj_6A&@KpE!5X5Q#pneDDrh~?2c)EKR%MSkR4`P4;P*m+^;Ui>)DuY zt(5A^=JIhT?)^8Bi%jc6q&mvnMb2}0Ywlm7KW+>i+Q8z$<St5Wwq zL9?sX_GRw-GvAlo)b~Lz5498a)50SoeR&4aF)E`byXL%({Ar zy@D{7P~Mkxl4W_bg~mjV>|gcVi^=l@eme2lywieUFsC_Z4L&(|H}B7OFCLCu!qwE{pO>aB(bH~$)Hs+!S4Hq#{{M6xgGqt2y-uM7re_`t(C}JpK*xiZ9p=9d`}tok=ff z)**Wvdu^1tAMsDgo4}5&TVw^O%=4M5&D*8eRX>N5F96(@yu8fnxr}yj<8sS-wxquf zd3%OecQc{(X-kRw%Z|kSzKx8Tnd14UlP!5#Ih$BM6@TP7y$Zw@_S3wW;6sXckL9YB ze+$RAnC8vK{-xm|ELS&**6*c(7P7~cXpH7->tmnf*u*JV6Q;BzaxnlQTq^nVchPuM zWkW>6&YTnt#Vj=0?0ezthP5XCbz(UXoAqVvo=4j+W(inXU0E&uAh2|joU8-9M@GVG zzB*`=|BMySYCEP_`8dqT)wxf!_H~+)uYP^qW);{;;Xm$XWeO>rdMZVzXx{Ks*|-cb z>4@H-vXA*RP+!;%)_!H~XBXY+|D=v3PJw`J^K&lH+fSqg>T+Qbm=nH4m`8oI+~Jz= zD}RCe|Dp$edx0R11cb8Mc2nVf|0lE{e{dc=4i6aa+C1lX*lp%;N(w1*3?BW++OybY zGWe5X=kR*sSmK8eYP@j6LIz^Fh2B&sQUbI(Y%(gl@wIbnfb}M#Ie#V26~?lP%!7l< zCx4rE%BLcx(}_uJoQLS?$K@0LuChdsktGQcPU`dln0*l6Rx^C(WH;kgHB|=Ig!Yj% zV3WBkOgSk=vs}t6Ej2nFP@AFIi7xGNnwSlZvsl-e+K zw0%oT-D*_^f#B&#X$uQE*GM&kRapnMKt#*oL&olUNh3pB+w6T9DKjE4fA(MgW6FO7 z6_wS|oWqw`h_KEdQMX`0`DA;He`Bw{)K;h~@p#!tfaG}SJ0D6FRfY>!SIH=h4|H{; zTszEAuI=en$|%Fqk9`P?s^GY@T*i+FL>%%TC(y5?ga75#{oIQSyI8A zs0jZHyBnKd*uVEizEK3)0B5+~2h&BSfSJ$ZNLq)$uAgsx>6eiD67_Y;$}xr zWwh;-V^o){wo(^q#fa7>8*ynp)e%#$9qW$OqC!9*IV1|#uaI^aA>WOXR+e-)x;L-+ zz#j^>p_e1Y)Vl*%u{s;ER2XAL?o`VV93j5XcX*1QTlUaX^-3~Kxf89!gf49u?sn)K z@?LmsD zN6%JwCWheCm!GZYtLMM3*psCuZy=BeKMN`M^sDFlKZzk|2N|agdZ-4nQ-%Qo1*mj2 zT!yP+IxRk`fi!@NtjdpYUMr(@>t;%~mpR5sWh$PC=|XNb`<0xTA-YY4C-r~jYb$?0oSCox z?z4bvIEz_3KQkF=S1#&r4_MuQOC{H%!Atvyd2aCDq^}Y6 z>V10MO9Ut?M_Tqj5}r?*;d)dwTX)( z8^s`6kXo}N9N%MNHX`<+^2bYY5`HrL_A30Vj@`ncl1#yna$u579k`xb&gLUf!Gv}W zK}Tbij6z56GD2Q@kSm6dmcsrZKAzTDORrojMjPKBEq~x*gw;6^tX2|^nvct%aE-@g zgTL&?gO}!c^MtmgOp&=l33Waok`~x6W5m&#sE$w868CRHt9a!E^OIaiHPD4&9B=W?d`sR*hed|^f^x|wMgeNi^X6YxIzHiuNePrycykC?~J08=W zOe)$&q3b)#Mo8NM3*D%8ulpiVC)ofHT+>gywu-85rf~?hEv(L(xL;Ilh*Lq^U|e_+ zGmRPLk%CguuK@MGML@4$F*)3-)T5jf4O*Phmm6U(--*hN9vdlHx;L!?kpa?vd|#Y* z`ovL=)UL(T@;1sdTe-jo|59>Ci`DaA{g)j)HiUXE)tjLZu3-R*@VnG(x1J5;*81g$ zLrZ@GH`D2VxtxO^m}VTNVI_(c&0aDb;}BlxeYfCl7|h4&uegYS^@d>Txg9zg-NFb0 z5oxVeR?`A@BqnaSzxwn2_>U$(yhm}awQdTQPP4M!ysOS!%&W zPxb~?&RkgOC3A9wn|U`DnDj3ZDjlKn*3)>_idH`yQ`;QX*PfW&p7`nt$Nc;woyAp- zS{a%(Ub}cd(H6LN3r%-2O`kXX;#aPk^L@7FHfdb}ABFIBo^wh+OGu|TUsyQ4CZh$M z4`#FheqYJneB6}`&E<}ALg(w-J3AZpAa!g{u`x06W@ZLDfChvhBF^BUL;1ynW!FSr zyP{MZ+2g16XuEq7#KJcdGw%owFCX;+rrWrDeP7p65_YkVV>%Rq01Uf=K3KkTssH@W z9Sh^kJX|o!<-ubLBjAK!c;#^x36_>74qn}z-Cv0Uo3~vtzqx+G*e{wCP#tX-!Qyv^ zd0wGF2f;hVZZz`fmYjGDjMV|4a+1%*JuV|!4%KSoR8O%w!ZgCd9l+Pib)>OvyN}rvpl2F(Kha;#`rZi3)q_i}1HD5$5UA;8#I&IYag;^$jC=y_d*L_&h_Dd@pwm+Z*IQpeBLW^u zk<_=|0BM~$W<6P8qRgViY0C`$R5-hxu|XWcR3=;agKf;X?KMLP_1D}RThh=bBzn~R z!Jd$uhBx|KmW3t@VTYsqx8i{06% zsH7!p7vMzxtW&p6#?H$nrJ1~J8;#zWipz9#tiq6ExA*e#V=M$Uyp>}&Gid`EAA8ae z1T;A3*7!vzH)`N$vObbJi@Q*BBMq7$RvfWK7+Ksy)8Iqo%Ed0E`LzB}d{>!*xKu8U zX}p;1R)`uVRuy>s#gtWiRAfXf^jT?n>uHofToWlta7*mNc`PPjmMT3Nr<_95K+O7h znmElx+d2C+QUl;v4@JBKa~~y>wHkOj+ebbT)LVm8q|72eJXStg ziZaDa8iI9UoqpO^OuT;XAl*5HZ20g#7XwoV zx%94kK2Inu>UT1_j*nn$WjHbkUfG{CqZ;y2Y9M{gjyt=*Osc3duChPf9fS1h^dCT^#xV)_MU;_ebs9(wv z^?fM7Np3J8_2>3zu1yHT%w0lTk@g?UB7>J2JZaWo#Ml-i+NNz;XT(IkQ}R<#q)`0t z`eVYxTl#CnwvYWHk;7GzM(k+^ZIr3CRYhE|V%TVpFe`4VQ;B);s5AYVKFxBG z^ibbO4*t-9Imk>kv{+P8NLo8@r|7F5RG4|w9bhSe7Q{yfwPxY0U-kxUo*RPT_24{2 z)#1=wSo={Xp{c0W6N(mrFZ0u4_^3wLWZZ@n{W1I7#Fq`vMeOV& zRLwB@2-()XYxn?}wnX$Zo@&*Jur;*CnhPMmfUs?hsVW?yud^uCLM3{JdYl5=h-9he zM!`Taub+oE_nUfEUEO$a9~$d`a2oFdyT)R0SWNWY&_(NM+9OVeR$pYvX_eiD?lb_L zfXr`ctfNb%D7cUMw0S*U?D^>}y%Ej*B!+QCx)X7Kvt+d&!dHB{gp+mXcm^-QIM|8s z%9Mhqu7)DUqf%;C)bhJc6#~k6QLQ}N5c;+9Jgd-+{<*4}Xyjr3MRahC&99{;zO$10 zIHTu!X&AOk=@7YzITx*Ih+lNe?Y;9&^vujf)3478X2?(RFjR7iUcBe&S|0i|odk3r zg^&-%kWorXW}x!8$oTX+oyV}+UQ(ExM$+Q#bwJh zhefscS@md{ep)`K!FbG;W6BTZYv~H_PS;k9_wRaTMQk2Mx;@Z*Rm@})uFYuYHUnzy zndi+R?Hk~roqZ8Q8Sc1d(4^0}{-Q`BOyS^;hww$eE+O+gttHEf3#%x4A zG9e@8sQ)RIyV8c%&~k(4Eh93M)}gy|%E7Zk2r>rHexn{oOc6#3CSyA5G=17Zn(YwX zJ6(ghq!dqvKwzJn!;dE%XS0L5j9jj}*xFyyF_Zfw(+J!FybK)QEXVi{Ujfx;fiw5x zRz+$Rm6c!eMa@c@RY&jNge0#2RIhpC?i1(@6xadIJOl4Pff6l1d(aDvjmwsb7yUmM zY2FJ0I}bA)pZBuB|L?6%Q?~uvN#@sUv0wLD?*D9ecJETYH-(AzZ+^y^U+al&K)(`Q z;0^Wx@cLEjsI3{O-**p8Zu(!ZrFp!q2{-L4Aj8hg%=7*Ir?=>D2DC|zmR#Vj_EX!6 zK+5mW_u|ekIywu;eQH)@Sjl28inZQV;wTU`G6OcWxK!B!RedzlvY%1~ZM~bxzv(79 z2Apa_+E)5UE}M|k=|3h^fw!F$$~)c%UDB7q%5N*rw`q^0=YH!1+|kRXf01Y&Xz2q0 ze}S61n$_YT0&40NZNL#|&zzjE5z^_8yAz%4#0uwbdsjJ!j)~kb#5h8P;3>5hY>+N{ zjJ$pl1EN4o7sg**k}r%32Kk|E8P>cRU3zHtcEJRWA&3824a7o&C&su3Yusp0TRdia z_JzzxsV9Yhm~QTU82g4f`-a_vZ|%{d8{UCw9=1c%sV0Gzfxb?SAy?D9o4^?~`OEdA zKa@Rqf@$;wsrC#t#rbg6cX}(>>lU@a+lOr#oh<(mhh4)W%lT$j+ormSrTQJC{T1;; zu49Gqtn|{M*SZ7}K!#a@y`#}yAaXVAwZ!ykOsIXt;29orfD~_)aH)#GxJyAzEsd5s zr%q-nboVu!VHGTo%gOa$>9i(&qzqfE40o0cvy&!O zI`ypiSoUn_{coiiHJP%C*0glgmlkzxfM4@Y-~W>x9MmD}L*1tvGZ~ok{b@g*zcr6! zAgSXqm;pV={}#k&{Zi+*FM~-@7fAc49WVcPUD>d%PCq2=u?t=^_GK&qD86mu1~M|{ z$94xa+XRN%?K-_)gXU1s0IBvTA$xT|{$D{*LwJL*>#`$f0avDHi@jm0SqIGfL%#ik z!-MhF{=Y~3H5|mz-3~afyvi5O;hjD`@aVs=CH3`=2+^wp;oV7WKo8FEgzeBQoJX&a z4FcLEF=Z!{$GyrGA+4gg=lDnk6RVNP%7skPx|FCI?GIJn2@5PRJlN@0DH`f-}8-0Z_SaG(-D<$KT zq;&onndF}2N$@Cs^e=cKJ)*-g9nAz<2e~9p>0RAU3oiWEueo3HbHC)6;z=uE*TGBB z*ui|vK#+d>-~qCEOTc-8`T)BTkauTCw?$-bhpvWIk1wvwX)F!=m_%?2<$;oQTla!C z&%EdxeDV>O;zHYA+dO+Y(Y#sT^_q})nT|#U79lBh|7GapXK0m|p81`*-AC{ZLwvcn zgU&IC6Wn{>(+ydOpnimnvcmTh!0){fl~D}ybl2=RqyRMd&s%&aAp=rJ`KsbgezlFC(*qPTpiTpo_BQ|14g zwJ3PNp4*>fR%?p!!}OHV5RSC&;nrx5xIz~FKVgJM5}~1Ksj_HDc1uUp87JuBe_Ug$Ahp859x!w z_93k*T2p=zY%?fr4eXia&Yr^ul#P{PtWIB$Ir9aK$`$L=5iv_Z{0iixb(BR{&L(j+ zcg6;bd~9ZL)dqAz(>(J*?*FMJ>8$?$(vl`E|Lpvz&ZasJgf^B2$a&mZzw-Q^wIJ*(?;7BKS8l zy(vdP9jOyPJa=$T{N@pCwf`HBxcj=sET*3yjLj#bv&mpq%nSqIh0HNn8pe9nL-`&= zyVC(CtX$;ee7cuixEnnC4=X&ZjF$h9Pe4YKq;cAQw`6|HYLJyW+t^!`->o5>M-0=^ zsstEi1^D};9`fYOjF~v$^C|W}gxkNcU|afXi$+w4kn0i)Lj8Zi2vfNm&eC{pCMm46 z!pq+}GO3HoKPsMThuiu!xo;DQOqYDPjZapSY%vQDi_lG&ONZ~Gg~}cMpyneqdLlTx zBLH3YSv~jW(>q*cA9&H-o9E`6iB`acTj(n6&CgKz=1=)1HO+`uzo6R9uaj>zi7+?k zGxqy>`6pqwoS6A|XvWvVc=2kz0Xp1k=wm4C?+mB3^7Qq!?F$}WRp;m862*ECV;{t_ zP`FV?x}Uyl)cP?Pt~MayVsw}(z<9TGIjdyz`&3cZX$=>0H9MW0AKLI&GUInQY!J~r z5e!s1OWJ6J`p!)0&$)RBq@5Gq=kTCD$hMIxjO^+K=gz%D4Fu`bU#=v)4a9P#27={f z6X>DzO75Xkf4FPFn`8^!q~x%p)kOaA!x+{Lf3lym-sZtyMAF?!n~%0gWe5C@j^qci zR+vq1Yz=}De3k)~{SPiYtNaHSWNE+QLJQo1i0dBxadb-!QzqqrSz29}G*3$y|A3IFzcd|FfqH_Z1u5s0)HiiT73_$zl3;t1tKns!_{ij97iK^MVEMz$leWq`(yN+&eV zxIN^w7)#6Y=nA}9bjd%**}3`200&pTtxrE@WY`}O23j*9#Ww!;{4r_=YMp-`w z>fW?uu-~g2wz^yd5%-JsjZ@V96OY$GQbOn_O?xZ;!wU=Fcp?59FMJZNKS^ z)&o{aD`muHUMPwGK?{Gr(L&Ati56;GruZFdI3S7}B-ETCRIZmnRbTttT)yXIZ^?cH zj-l-|O$d9kbh++m#*tMVm|C3dS1`7OC!^@YGkbt?uSXfdzjXvQtc&>TmM7B;(Y{1A zrg_``Ib16(Wu$2l<-lY!s#&IH5fqS8OWSac$Kfa@;t}8L3pLB+=;{^#%!k){Fl08= z>)am(|E3j;!fu~Q?Hsh!q79?X@Ji4m;l|E}HWChlyvrzLXuJp#LkyHJ8L^xc*laBI zDL)Q;qEex-3drzwND?Rch6|^Za*KC2rIHE))>_`h8X)Hc#&P2dVSn{pOx@ZBak)&} zm}!I)WBD($2KM1E{XYJ%Y3p=C^ey5!?Io<&`m}QnYR0L%38BN8herzw;T0uc?htU= z%KhpqHE)t9`Q>y7jm9ZDO*VTqYnegK%|=C`FZ0V@x!B%XQ~7Pak*t@xqc_+=#wDNJ zJ`{n^2Q^1G?e65S*S(JA7arf)8}Q528Z))$V@BiC&(yXJa|Kv)^ac#`XkEVK&-uLs zgx|-l0q%S`qqDBrPwCw|1wWN$PQ`wFLO%R10l%~0*dui4wvN00dDsE9Gg~bL0PC+u zvh`YmPsuZdCdHz~6Oj^QXbV_~X|ohUZ<{IGJXu`W5qf!QLCzVR`fCpUyl(5}xP9A| zhF1D5)`o$MOv%(Wj?@%Em$6+5jUTW6!{}&Ni}y-%)F9?09vf~4*L$~`uBeaVvG1Cc zF&1j?y+Opy0$Qq`bU+Ij*T|TLb2rpak7lZypd%+2c|3dB=sL7%z5TiLW9Xhv@FE#{ zUOTuzDj1z#IggcG;G<&p<+XiU48MFluxK4Q!4^;jy07J?L*4*Bcka)}?@PbzU5ipK zfcX2K8E-vtoFmpC&LzHW_zWutJ|te9n{UrF8o=Z74vGBsAk{tN9nk(F{lf3!^V1bP z->8YWi;Ml}iqePJ5hL@=N(0u$xE2M*oH$?mm9|jWnf7EerWnvK4lQS=L<17dKSB-& z$NZB4bLl9cmoHn*uuIUjBL=KGA{Qgiy&5p~Vs`iQ(4OI|r4Z3X%amEAysVm9DSJ%w zKYjfn>3XS(`=e<+d$nUbpVdvbD|pq zq?eO@cTHUFSjJp7C>Rdb=L(@F>}a!GjcnOec`)X2sNpdd&r)m~-}i9p5wRZAw;4YM zY^VCs$U7x$QR%_`&R_ZPZ9*Njy9&ekOCV6F(~XOZ$OC6iAmfhVe*B2Dn)}-8s5PHqhvE3? z&2L7-n+)oKps#W}eQa1p=#r5r&);KmC~Y74?KlE+$%w*yWvD(}FyYE~{Aguv40<{> zw4!m+5~avHxPNCJ;tB(#brN)g4rUPBUVGtQ+l#JWw;%2EayB zT*+qFlA|sc+rF^)!7QSt=-^0ZyDEPWl6^Hm(^8~+nnR#RI} z8T&H6^eobyO0F6Meq)=oK0A!{=o<1o`?4ni}MmPR9#lHsCh!1OJwe2({B$^Q#Q&ulmaf0`r@ znE|hL(Rk^0XV&7_?tW+X8#vX|tbcXQ=JwPgFOLKL8=B!+PL3|1pMG*`D!$2oYQ0fx z#m%7p-#a75V8Lvj7!p}_H-0U{ZIcfrtPFXV05Pbp0II{$IZR;w0#>GXfxp@<8oox5 zAF<#iH|C4@3=Yx>iFu|Qnu6jloC9=Mk-+!w{2B_sB85hpW3)_s=A`S6!MEl0A|?*n zc3rT`0?N*S1)t2W0u9fB@f=aNo+s~~&lbx$q|7l}euT647NNHyUgGYicrnt}$V}U2 z;vbFWW8MFl8_S>|EGq79>TRfCzDIbOw&V4{((Q8D>WwBf;{L*ymSpF^)((7hjl0Os zwNyx)#PGG-L?vQav8-5wWS0fD;s`5>e;(Mu44LJ9Icf(J?ncTAc`Sy-O!11`)nkL4 zNw(6Z+!7|ujS!6~e6TDtMvRieN5!JTp$*pDfdD4SZNT{!82NDqEN7nMF~<6d4Wr=b z==QkSd<#?3l9T!@EZO{^uH7ojKQ_#~!I}q6RyOb= z)f$J$%?i8dzOcnFJT+eo#Ur+MZ`h(@*mH-#_d&oU3U@&srL5MgeGE2ZKar5nKqQu% zTl1wPM`v_)QHHu*0qZqdy%8MrMO48k>I(;)XsHfKvW*==pLk%r=h|BElDMB%TaO-k z-WV2o5TEOzi@OQFD^=?3n^Rgtt1^iB6|VDc5a0F!Om*z%3Nghm#`}8ChNYVIPc27q z+n6{XH21nLJ=0e38G8StB8fur_Z*FuloIj3s&sm*S{fQH#%+ETYUhuB=KDZYyx zl@ePM{_Yn-Y*N`r^L(H%BCm9cXhVySi+7*vU^z&-aFL=?CZ64Z!$awMT5`q2DnDjQ z7e(CQ5+0q9KE+^DL)1{J!*7~7d&FLhWojUYMJP}Qvpb#ae9Htz9m#XqY<*15FxUHZ zwOz5!{rk7KTkVVAbzEe1LyM}K5pbk=rw$xJBiJ2I0GYu4wxKb4=h}r=wfDOx)r()$ z0oQJLGxiA0Z0(xbLCq}ul7)m)5&4xg(fWloTh2j@OqgkXLNbI&;9u+|^xc3RS!k@A zx%(OJP`ct60d<;>4`(q`bXkk)Wjx8&C|K(xxl7MNTx>RYO9?ol<7`P)QK^LDSsNGT zITgx8GtKTUPKvP0;pQKV)1@Jcon`$*m2wH3f+EdXfGOcq4?Pwfx-%y5!SD7 z3`{q2bPxdKVI4{L<8I$&#%hQI2q7YKN#yeW06>16WJctu{U#~1wOew zICX5db1#<4UZ+i<`i1Oc{M~?DM0C$oH?l^HGN{Lu(a(ZQ7&jd;273qDkVl6V-(kDU z+svt-rdQABOs#-^UUr9mUhve~LmYmd6>`EF(s~hOXHwsLd^0f*)~@UNg0d%gE7U_x z#Aqyavwsz}MK$ZAh`)1tcOT8(cQe5v5FX3aJ^o-KCL-T$>uNTcA3gXT1G}%5zXF60ICmEC z->8N8kr@VM>ZbG0)I=BNY9b9Xln1c=)Q&vt{vplf2rmNbH$qg5C1*X&xiwGVD+v2V z4@Q@8WZ4j{yxKH?@Q=>Ve_eo(h-*;QD1G*Rq(t@_%eXgB*9CTN9GZd#Ai=2aISs8k z)ZdB;CdW~*D0C&oLJ)vb#etBCPt+U`iacgd9o4E^br8h0_M0HMYn0>RuS)TcltQVq zZ$J3Wl^fi`U2@sC%iXd*@NF>T`bS7R}pV04u1X&lx${&3h` zm*g@o!HyF-=QJ}|7xp0U2xs=kM^+MxkI0v@LpIHyyvOGxEc%m55ys#Z!2+yh5&LP6 z6J^=d`yNM7sjj45Os##q{m$?!MeFlX?&Hnp{tyaMpsg9D+MxjQ?CvhO2r*?$UMdoi z4!cZ-t_l#Z!O!LT*)nlRnx;Bl!LG%%TB%m;&7@Jx!*bTL_c}7UsfCBA`SjxMw|>hREPoO^sde&eRgz7ww|;sN`f33VUs2U_HEZ`UDIs%i+tk zFJedwsN;G)llyxeXi*fZ06DJwr?cpFLf;TlK2xE{maDEKCr9c1Fh&!80~w_@9WEph zIHv};3mU6v0-ogd`9anFXDOs)yFPmpn75?wLVJJ&|HY=;@=^zbalQW;*ojdFbqb9o z%MfX6vv$>7Ve4?xX|wkY1Y{??RTbp;_c$@H|AVP_49+B6qjqCk6Wg|JPi)(^ZQHhO zCllMYCblQ<$=>^%?|fBVcXxGlJwF=N-Os(&wFutuZ#ifX0y6&u;^$A2t4wX{{baeG z_b@mJA$BVKt9|Iqz-m}+jvvwONBIueijZs55b?i8lWU>q$Ce{O( zSa8I4`yy?ah2pHmRQR=PgIl2}m)Ipy)!ZD|1yk#l(~qS?PX(5MhHpcs83fL$R# zCA1)7E%1rvQk^W)*O_wVM`$m+_M*=>E6*v7Hd0TY`wmh{s|R-oqwbCG_>!iZ0%%-%S*IP-pv-7iQt2Qt%`e&ud=S0 z#3>$XWf7eM3BV3_@o{GxzNg+HaBr7KEu)>F<@6D^x_$RS2lN_zdwALxL(NYO*lXue zEXt24ZazEYq!e+p4vr18`?bQw3A{i5y{B!C_DCvA4l@0r()?Gz&$Qq)I0J!qb;F5n zgUAE+-*GwDc)BI7pACXV<-s#QQhVD1#%gY#?EYF?k#3`NK zzBvfF-kg!g8TI&Nnx9D3okW`NSjk`5Jh!C8G5nT`JNtVlS0T*qz0+MOx~agWg#aF` z1j>;N>M=>bFJPCk2ZD|a({xCAQRVA+Iz`6M?3!OBGTxn zo9yF{=I&<1rvliaE&7_thO*ue4T{nj6ZcV$@|D#`)&DN%nGcsN(>j8+rxeN8B}7hM zpjkHm>$g_@YsLx*pOn}ztcqHWBiG*qdO16kh7 z?5&Ul|C`RE-q31^*CBlHzm`*2MGL1Y-v*?C2$MtvDSZ9sEBqjhF@h|7b%F-!pSDr zSTHTM-|>zibbY2w-=AHn+DxWu`9nc`U+-!$4so{q^Qs@A$j&rWzjD*Z$eC zQsN+U3%13g4VEn)aDWgJu)-k~%piN=eiT*+{KiTB_S*x7InqD?=N*K(3B!|o2Fd}P zH19L{UnX?F{_9sdT#zFFCU@VHvfC`XUr|f+q9Jb{{Ts{XN?dP1bd3C{8}=>Tnm_#* ztUY@~wFm9%>}u%AEAflO3WhOBb>VB&&j-P6?)v#r@5jcPGKB$peIXJYCq%vvn998E zb+GCrJbkeCkwnc=*3qf5(x~l@iEt)g&lX2V++X@VV~0Z@yu*h<1GTj;|C<-`6wg*9 zClWmCnl&zvZ)KEmhACfi)Sg)#3)e?+z}~-BU7dTmdlZSUsC08DP`AFm;lFtqqC1gT zGuRqSb2tG}o+`Geb4E-DH<0Ta^$Lh(*+@axtV2IT551KeL3m@ENkNcG-A{Ur${QhGx#c^*=Z|?fi{G5RJb>Gc;F0vLDkBHxiAD`o93~gS8YCi{{O09v}WwaaR&ci;6ZD`)RNSnd0L*2kwC08FHEUWs%+h|QO;UI=l2CCz`A zORDWy1srn8s@W|V-9s|u_$p%C>A!w!SMN5V*Y~a6^Boh2E&Yx- zrDplM6q=O|!zPe>2JBx*Et9d0W~S*ho z;6;jW^@9|Oni@}BRtSHosjCnHuzSa=?Vvn>UnBB*aTHh&PhQo8NtdeHa0&{Vv>P zObQ+mnap0a4mt`>$QOcSPv-nz7Z4{y3lRo?qTDp@q&`b_bn!2%dr}=7RIhSoAg1lA z6zdnOjXE{4W@-b&b?l{Wg(`o+s3!Q*O4SyZV`HhmB%)x~FsW8PyTgScb;(@>zj0Hz z1Dn&{Gfcgmn;24<*p1pMjjx7ZUkvBU#G5OHR=&e_t(IG|n7HlSG{X6)0|x&vI$B7{ zwT(8G4u&dW{9T`M0?A(o*>0=zWv*4K&N|XwVXw$0y3Ld+ndiDn+<73Aitryj+pil*{bZFQL zjQ_h`nbdN<{(()LrLL@;w5DQ*tWnRUuQ~mj6XE@>3<%Z=`RN1CNdl+npXXIaIza;|pXP zgq4z?VdtqC&2uEQcUDG#EBiETNl?9-(82jOGO#DAF_YAnNH$`LHfAai0rGW1N9Xwz z%?{f^6JhEeR=h}c6tm~BS=&Fo<$=lFhVotsttkT!$XSQwu?09(!lqUbj;8*@71qnh zzaJs)G4{F}>8fJ}Nzymp57ec3%BqAJT@ZsdPpM5`EXtZ7LvZ@c&B{#t8CDKXrl#9U zAhu=!Q;Ofr0Ys19S;29vTTxq`NBj9K593T~V_0hBPkxN4?9BuA|L(d5hCL7*i1eVN zXKUim6&x%J3%r%)5&9SZsw1lA>v_DaP|j%o#@m{?qaHl4nNx;Q)L1CyRumzF5s{)4 zTe*>Q5tC8T6%L}r-TG&sdM#RO>_G1?Ha=KYW^9ts)B`(RXO?0sK^I_2$C;w0{amrL z%FA^xa=3UD;)oHg!E63e`LH+V_It$tN3xO_vXXiIaU1HE{wqySiM7W|h9!+N59iw( zdl6Uj0qF)v0V**-0-LLs zaSOU8nc{!C#N*}Dh!ib1VASBy#_J>-0iN)e>I0$Zyzi)o0!;%$W+c%uKdusSm>wDC zq6UcN4!R3JUa7SNg64(2;Allq2xynVO{}ezOG`c{4ipuv;o#IEkW|d^ERIV9N%JQL zeBm94N>AEQ-`?aH8pMl&AH#E(s?FS($>nf@AR2{P5#Sy<^d~08AHbppXjjwp#%}~v z(K4L9{KRnb^8L#voxP9hzZQ@XO8&Qc#l|2}OOtNPF7WXBYyK=3AtG`IjLg7}A&mya zSs{bIvZ{1lUr@(iG(JybZs}#FA4@K$tM|uyqhjsfdtvLW@fi@c^C%G6d?##NAZk4v^x-T+}@dQ z1A>l*Z?p&EW#J})5Z$<&TztlkAM@uCd6j@=TqNPK+5?~Yw60T-KzFJtBvXeicr=0F zw_VHoyhTe1$?1HbDTXJ7ilfd=a}`fpL&jstpnOg44%7hLqsofp92;1sVc#06iH~nR z*R6cQCM7DfvZ~eH$|+a2b5CnM;$-kIJeW5g zj5~Mgeca9-J3TPrF>t)=&HkU1)!>JsIcwJJB}NciHK4PcySJRQtW?rM4fJm5@hIqu zZ7Bsh`JgIR(O3Q(K8|^{&7R(QRAXEE zMLU?;0wWnRkcq{MD)Q9vgpwnGN+DyFz!Dn8aFm6{hO+FI@Lf^VFc57fSZ?ZTS}Av6CZZiZ39 zUu&dS!)${>HSp_jn*$a6lJ5#jE#eW&0${!I>;pU{CE<(-Y9L+ku{m^iWXz1Aq5kEY1Qy~Ml ze}RcQl2|SGFFVd?q0yPI!=e|~PMl}9Z)F~VfA5d{eP4^~C`Ld|fhs5OrD=L={T;Tp zaEdB+8yb2|hTfL0wZ7Pk)|RfgzV}hp&`;KS^%Kx>W$#XRWv@y1slDFx|Lxj=&0c8& zz-YhdOVNGS|J8cY|2d|=>1S;?^-WW03SiTn+U;%7r1QpTyV6ve%9C4bGXFV5Q*A0Q zl+HVCxyk%z;Qx+NZ8%j&-gT=T(xRlsQ{#>o}B#G9n}}^Mlk1l4agpOf35*~5E1FD6||$dtKEPJ z5%O=@l=0xOYG^sq=YXU~T9kJHrwehAGT1(L-Fy8Uq@6*4k+F#7G5C2c?Z5hBj%m2q zxfX=_QqL3G*ZwNv*_~S!cZaeQ6T*g4Hn{||j;#Ofvid#r+HKRl;`A|$3VBP@-tmxE zZReJI(-3KPn9u5qaG@BYw0W%oHpISAFQ(}A2Ma+wE`TwGUhcPEJ{0sl>>|@ynSH8< zxL6@E+dO4#C)S3gFyl086HbJx-zfy%`67qW9`dPkJ8L>scWG$r(66q|heo(+ zRAHoB1&sx2oW&!@n9;t|uT|wW;TiOM$>m`}xaQzHaN@aV`#T`<8;2#c%4s0Yn$xaA z4kWVRa;xDkuw2V8Sm(=Y0^LHb=iYIK0u61aBXUKjG%Sgu=Yydanuy&hJ?R~ zSGA1BJ%vSWWnH6w=?6^RKAK@~eKlcmHVTF^@tt6+ur!&b}pNuL+yEE<^VHp>j@Wu?`LI{qE$yBB>26U8`J=vAwt8 ze?$U;00JkU_h6Ghx*$c11F~<~z!}uPR?`oR@gqGU7_NeCwzxb&*adj{M(u!IenLJi z%M=ftLKZnV*KU0K!tn-d;13-+P-C3n7jknG4OM9%netE|dD_e=)7*etuQyt znE}2UcB`zyXc;cFcpc5`!N^m)W9glYZ5C&|SZFM`V`1-wFSHNO4j+y!yx2g}Ite0( z{(8Pvs7au%H{4MeqiQ&(S1%HSQf-^Lo0QyM^ADHv;MGxx?Fjg1j5j(P9a;ITTtcb1 zW1#UBDb4Z}-jE?Wg|ri9JZ*8s;(~!5@y$$GKUZ|Zb@|(&3!Cp>jKZQ#II6o~=|wr3 z^n&B#N7KUDQPLa<_Fi8fLgT@<6>ga<-PFM@LqnMrS&(Wg4qe(bTc>PCmICX;_$X_s zoC{P6e=>5^fgtF(ULNX&-{czWhzSly6zul-P;43FmmcSuTkmijf~Z6^shBe%*!?cT z6K?}F#^t)7S^+|W+~P-7>qhGqTD0>(t|x2@tksKFMT&LxLLthL&vc}nSoZnQq??$j zDsgbj&lHDSIf-q;%-$n81@;AfMrrio;(zcdW15YnWq%|Cwa1D^YS#s?+=@eO@qR05 zc?wnhD7hFw%Ee7@%3{Y#V!^YTVBT+rhWRGW03YzjayX2^Ejit{KrzpNz9!rSz#4Tm zP3Jd&D6hO65RY>9h8LhCl5c(P4F9jas4zcK>tDSsBqw4xZ}9$K=A7RNU2U(guP-=) zHnIPTi;YV%%N^|)7TlL=Q)VK&HtsT?p7gTpkvGXaxc*=b)ZZl1%)lod&{Q&J4@W_3 z*`_uT`eXu*5#?5xiCgAMF|;du=nayNW>Yza2Gmla>Xh{K>R?1j4+5||T|@@QQe7C& z7VAULuhdk>;SR%S==rU<`9O8f8OA_DmB#6LOjQIM`~9tQt3jjFzd$51dhT7|WGDa8 zwD0sB+x6WC|Lz+oI#`J$_0O53Xt(%mv2Cduk7d+8WB)VdGbYgE0XYJy0L{QelVQkI zPcGXtY`A8aOmajW_>%Ae4R0dbT2_X9{?51k5W&^X6cF)S8f+;GjL{S{=M~a!4%(3U z8P?}nNXeJZNBXvV*6no>_$59)Dw(z(s6a$S1kJj;Ljd?Iz?;n8N&;yNrzlPSqkQxa zOa`QeD}$%JWnL*^r5xA#;+Vj*3gjjP)E@%E?9=ThU&O_ZhANKF28%tUj6Q0O8Dykn zZAg_9*t5lFgNm-~H}FT$KI2@=_`@>$XK#}8E@H2tAL9Z>M*GfxOk5`cdVJ~i(HcDW zjf5h<3eH^F>X^8plwBee_K%;iOSc~d;ixlY>mo&jj^6R%>{t17@y&JER`^w2miSe> zp6}5_ujPZ0vDu7&!4V0YqVp|ZFUwk1MsT_`ahA_OFnMIt&AtImfjJk082T`D{nquN z58gVHVb%IHRM;0$rAn}#27>6b(1RZSsB#h`dRBNG$WH<)w4#NgEQtxlmOm3_!f3N6 z07I2Wuh_QCx6%o3xs~O%@|eVdP3vB2sWw$kc9I?`d15#HXiH-&WsuVbsbVe=BFnc3 z7*W;E8Iv9e;DDKJmTcS0uC1uR>owpRQzKk=k{1w0YXDK7&g4|7T{ z-BD2lI2l7BOM}fWOKZ^Xz}+D7>R}!M!G^>meZC zlw>et^N*^tPa}+Y+RuV&Imowg3;%~Z!KlE5c8Vu{@+i9{46umYew@EIQnnopG(y$d|D8%J) zdbHwRFmNvlu`|ts?-203ggm$rP|r|nW6b6GNI2vC(!1jRI>HV&@jx(DN8OU-lxL5w z1}W~lC*f1QwrIxf$5&RFKin~LrmTDxX4te%*i;M*76xlF0{xrFH7zy|(=CL!Vn*Jz zDSg;PnsXgHl=DF#LstnDf8h;%oXQgr>QX*J=YU)^#RPgg^J1T71#X#|uH%_kH>*OA zakSclQO}C)bMvco3lkq=pCG^(QbE(KP1U@pZ`E%BfoY35vh1rXs4}=P0ev&fxD(Vs z2a2@u9XeYL=A*8<9c3Bd4fHKKowy@B9&WX8#LqfFw4#U{3W!O_91(> zV&q*qJtihO_2_JOyaYH*XoLJU3SZb__o?dS(r^lNO1Vn-G974M+#-?wh#hPsem>7| zLx3!Fm?XNn3VuMATEDeAns}+9AM}7uujI78WkBrQmQC7$(UI1{Ay5!0$8*Wn!T?v# zCEKx_&PY4qSy7#B{?qdfBP>jPyG*`$lfGt+(?G#EfPF*ax{Yq( zg0B3af(HIg_ttTT2c|wIZ{ z8f77MSBtthVFJqz9unSBXghLU_utu&JfH$lZ_L3UtaknIl7OYL-^*5Fe9DQZpfh`PhO1mpt#pK_ZPW(UOd1DQ*QnjP8-?A4HS>Fz630R~D3S!3Z0oL&3(`;G z>}4`l!6BMRZ@Vgs%MZu#Nc->6N z@7ee>;b^#nX@_I}V0$QUP4f1if^f+z7;scrfF^^4$^56LTJPSRefX*qSo9a^Dcl$;zLuBvxgMS2>C zoRSYN)=?2r^|b}ui?OY4fk0AU{qhm;@8>6J{= zocv9nd+5qHhlPvy^-{r>;_+PL>LCEV%TZml{~t~Ix0=Qcre8q(fqiJ|-Ltd_z6>cS zL=9eN%Jz0iP)WUq6s1q#5TfFnBihlu0#3T*X*-fcB8CDSou%PYYU~Hm$Y07FKPgz5 zoZ}cr(h*=~p;C}kYKp4W!$Z+pRJ>BDj^ur2QJT7%qyX$Vg_fvj5wAW`ozU0I3TX*%5Y z=-ar_dJ&^sUGF6sRupa11;!I zR`5g_a(eS{4OU7FN4{=V#nj)&cs8UpW~3u2v-6nJ8L4vbj;->%T~?^8Xgl<0TPSQY z$OV(6f~u&L#~NZklCRZ>>P>Q39e zr!x&ee06K9^fKV+!pZOZQ3xL;;P%hq>Gu`D<#779|LISo5> zF`$J*X3=s64d~S+e42Py7SI%pS6PcS);)6SR`95BUF40YS-UBhf!7@!oaA&JHwJxV z+8O?_jhkLz6u#yrZP^tkZqHa3bXPK(Cz6H9rMy;YH&maW-#*kuW}s>+xdx%8!-~bbf_nzI<@k-eX@c{VN`F$n`@|sa>X-XM@ha{fi!H|%K=)tH~AtwCk`N{mLEDr5~Cfp@FJDy8Oy!n z|3Ry+Lf>*~9EZ4*2XG$znDmL74*|^ zR`Y?amuht?O$W_MMo+AjN#?~y!8AL`xEL#vyjayTgFYpjgm&`%>a-+h8IqxvV`F^& z3?_<=xRdQ=CzwjrkZ&D3X#5-j{b+>NqFwrrj9iL@``66Jd>?;nh4QBp{k;5jyfY2g z_rC9O2R~UwKHIzIYY{8QTtmXi-(f|DF6M3>qqv+VolJD8M?!}$>IvTf&dvS`f~#Ml zLs-Pyo_Ow61k7G+civZ5!GJ26s;#2|tXAK~<=Wpn@8pK9ztcC@Wyy4G0l~N+%?J^S zl;7nlruLDB2G^i%Q2vxTy(~h6x>2n1A^v_Hkm14lm|k1vIgxz zp41@-`X3O#KDD8IKgqeU;S=tAM#aF~<>v(~Ur8<i#dCX22B)hpI>KoM+h>k>y`(PY~ywLj$a*;D3QiFn;bVJ zhne=0`E@$+z6>64(+;LJDMjX@);VwANbty*7nC2y(WIQgz*s`-V-U)=XAs)-BxPIK zI`^Y>L~KoCwM{~_Dd(wlcI`*}RRx8GRY#eoyqMC;z!Po@XfUC!tv`#iyqlDQ0%;oM z5hd|%@?3xD);8{)$E8ufbpE?E%QOfqQf$y4QmT9$flA(`ZVsESGYyH{y4^wQn-YTj zNc#{_vtqJZYElwwF}T%ehbzp*;7+G8e+A@QZMCq-;Kc$Lq_jL-hWc3OcKJ$X$u1Ni zeBV`xofywRR-t6wnw1l%(#Tx7(vmtwfxlwA8s67KjkQSEvJFm&?!G=37E8X|>b_t% zBm?_h`0DirY?Z)e)uEu@ssWjzb*ev-?LZ%E>8a!)as^Q;V}y%cNC)?gNWovSlN1;z z;h;f>wL3~&nIT%G9G0>-sEM_|U~NyR{L}I>^SQ9o*v5!+52SD(eJIIG`9TL9H`fO8 zu>Xg&vqgWG@o(>p#@bpD&_72+xd-qK2v>Xa-dO7F|F_5KD1Qk}{(75qOL)dJ{*Lf~ z=a$f4uY0gfcK5FfOq|IKBu02Q@jDpRO{w$1CoK4&1z!H9)SWqBG|?CR07m5F4Gn=p zQY&SbJZYAfY>>h(xNY%3Tk39`mLsHmZdn-yw7gjG-JF{8#@e#rE)(Vg4#Kj4>enyw z<=GB@iz~UXgETddziSm|%hgA%Z6HYi+7J%@7jc*5)Q(wbRYSdB7_4)FWD_)oKkh~3 zIZhEN<5x3Je7&esL$$o}CgzLR)yL7@K=9y%{nOG~=HKW%(8&jif#xROyQH4a;)e3` z-wWBbM)+`Vr@E7r(!6v02Vt(N*Xr>*0qo7D-2@e@EyL4is(lMJ1eWn}27aQqSdj7Y zRNPHT&`g-YhK&5Z1aS`$lZ2~s&)NbD#V47ajA&fv3fvF1-3miuZI?%etviWpkab~l zp?2b-^d-4mC4MT&b|3Flz#xp>1S5*Vc`gCrXH7JXJ7+ZOippD$I4|5V?c9RC@W-WiDOBMrACRd)HQ@sVndoP9i6ERr zbvtM$yt#N2W|H_kO!oz(k--f|;iRv$2;{X8IIod&m829)E&-w0DHw~KZo{r z=qJz_Ks_yA;##1pdPU0)aF2voXY+gXnP1DpwjwqjfCPUs^By?Jk2 z{Q3#{cCPVQYt|kO`T}#Re7fCIm$h5^Xm8)wvGCVLVlWlFmq9KP^U@bhi~#_4=MA#$ zbrOtAb3sYi$Z13Q3EB_$;ZO+Jkt1GOO!J3nc0uBLrXosI`pE9N!^zR$Y<-!Fhy zThHR9G_e1?-R-(%=fvGxVA8pkE3FWE+ z1i3Q}h@Lm)rXOywS-S@oT)c$h0k7VrV1API&2{LG()fVzsx zXe_|{K%q*Ykuw3A#{V@ay5GPpC#isc??=6j?hzh^%0?ae0Fce95D~TI(l><4W|Q^@ zXt?)_`>sxE;PkN~&G61G`k1~kIwuA~`3cGoY05yslMVp46kK$B(V?hBi_j?}wW?Ym zs0_Y@&&5i5u#xqoq4e2=;H*mX3ZJwWm^T#|>&-7XiVo1Y%(vKnp6Ltu9aoF3xAi=& z#A)ASRK&c*V?xq(h_Z#Q+d-R%~0=TvgqdHb)xk|^Amv9FS%2C zAY!GNAaGegb@(*oc_f1u6s{I zL^n2CMV`%YL1%+nkn%WswYhgZn)&Cx1K0h%vbX&rz~cFU{4!Vn!D6OkpX5(|6k<|635F)Ourgl~Rt;?pQz9*F+c@ZV8 zyP&Kdvz+RVBDzrWkO&NW4S_hHrvDIZr_TzBqf_I!OQNdHo3XivKp}t*f5#rfg%#@P zRIo42yRNf~InJdm-qP@fw6K_F{yk6b(ffyIOKc2QuNkMW7q7>A%S%0ocgdJTHY7*p zrH7l_s9~s^h^hP#zyyP8k#sanM?Ak$0EG$CvTa+Z=*G7L~8J$P)Z;yXC$K>A! zq&gW#P@?MVMIyFB7;S{~IM@nb+{lw-&GI>J$}8eUBIhFW>s6uBKdVfL9+=wmpD%1SI5G@&-VLd{)y2 z_*A0zF?PtOm~oF%cAJt_Wfz1!O4pgC1hR#tH%}SW;_mrY>>Uy{g5+D;Yy_esM)rq z!ICLw1gk*Qe*H0i&dwiX&eHzeHG4O8z^=e#3>hRX9mYK1s)N5FORc? zbxH-6Ucl@`Xpm=rUW2|zORJV%GSvh_=F@Ob6LQ#deIZ}fB46=owx{Z{E(KQGrH917 z2|mJK$qu-2UZ@A>`Z91q=;M9wbocl+Wn3zv#%slH*_d#{ox}`lW}K^S>xSiKia*va zQ|~jjF9Hk*d%Ih=g;HkwivVG+BHtVV4&&U;Ka|22JMZ3|0x)&hzvPzrMbiGXa4r-! zNju3e2zp?zY>~_rRf=C323COm;At>E4dVwCML6$;t-o9vNW8-QQ)VjHETV3O`Evxn zt)VWjw5I}(j{DDA&;R|Nq5;aA@<+Vmb$$O$GW$hcq0=>0+O5;&ZSYs#G5eML6sx?r zskHYAR*G0ypZ?jGK`FmzZem2>$)5J+bR~gwr<-Lq5t*WK{e*{B+BA|ssVN8Fmkvv{ zzw}JLs49*!UBfx@nMS$>>mlC}JN@G$4P~G6-M&*PdfcBTwe{=kKV(zYZyC~A?JF?7 z5W~Qnv&Jr?(l_b;5`yp9!LArD|@pYMT0X{(M#>}E&iuLlQLfr&$?DW-d+ju+e~&`-8#Rsef4$0A`Yr#T)0>LjBk>BLUeWpX zjySWN^q&~X#0fZU(xu-zY1-j12Fi+7WnuAKPKTi~=*0k0lWX_uiN`Bj>ugDofsKE} z8Ut8+MSgMvz`y*LzwJHg+5z4fXHt&vsv|Jm=e|5Nr}u9kcq0PlK2v;PD(DN+IX zp@;gsM{hsW`J7LJF>*q$3H+zAdMC#K^>zRE8E^!y%e1^|FH2dGyo`tpDFya8Mr=9NzXE=Mw1 z09=px|EB;lG66?+kMT!0lmo5Re|6nZNduyEsTdg4ow*3pLGdr7P5|bo|Exg(`mJY7 zpZQd<769Mf>t5RH z-uCsaI|D2qyWu&H56X#vK|6N(NZ)`k{`Nc2{LgKD?YRv2r3aM!^FZD6^1U;N=QQ&J zv3Aa9L1gkr?F zSt!@HEcq~*V0cpwWhC0_6N((okuJ3+P3m9pFgI!#md;(Kh(D4);ZS9M6&4#)vU4tQ)N_{uzV0wz3kVUDb_C1Gbb#8LKezsY zfQL^3f@4pwhkU}>dWz?hvY^hZjH$T2@GDF^vRUaGZ7s^XXPs$dHQP=#9rIZOLFcHs zphO-UJ(tK)$kH~@OMWD=sn_F*OSmt5hV_pw?@cSW>|nWj*ZGju?hV-}RFI9d8MU^y z(?r5kU~@*3$Oz_n7*TfZU18B#co$6t3pOvpqG!Cbf7xx9KY^23I_J7E;x@&&!%g!DlllDS$CPHI91;s}Z zXs?(TK|^Qg(~)V?#@3?7;d~5kmdVf3!O@nExEzMiH{3hP!)sbp`%_+F4A{q&EDqV& zIUjjICDb2M`}D&`oBY6jbGO&rYYk~y#005?$i(7kk1&4tQlzrAwxxt(BhcamT9bk! z6+<5%k?x+{A)S|ShlWs~97}O&53MYuNYO^u(k@+}|N!dzEVIVrCx6VCCQ>Ff7l{z`X zb#=HS^ap?qW47^=Mxy_Xw&N|T&mJgwhX^Da?6E;Wi$Ors4bhyJ7D@-o3|j``5zFmd z!q*lCZg)N@!1{x^A2>8AVF__AMr^@tR`u;sSvgKN1^oH$hc%erZ;}<^z6+_4_(ZF*}K5!^jS7vO=nwB7HoFJvqX17h(tryxS%Y zM5j;%4Jjrfs)Wc%I{L6O>99deS*diagyN!1f~c}|UnU+o(qn-q_lB~g$)E{t&8Ute zp;rJGD47X2AjQ-Wc8c4Xas+%qL%2;)AE(4x4#)0!xv?Q3rHoJlpA^mmUx+n+U9z?u zp*Yk7I}a42XBHCCOz4@>nbAt=H=aMNrR9@PjLkafdr7;R8vo$(yGgqSZGKSB2oPvO zJPRlO+qEJrm<-TjWOBT(iJGs6ckrr>Q~`HibJ?H2_@7zh^_Kz5t50D!=-4a>hG4MR z80)_-4`;@kQ!u*>!q~TLqy4u=<7vOoiD|kZm40jSLRz#jFHEf-FhoP2q&k+ybbw=( zi>=1mNzvv2*|hn$PeW}pD=K^Ag_PHsqGgXod-mSEWTi{pHzm+z$l1{b&bYyk|A486 z!Oi52p8EbsiI7%QVaXgrk{*Ej3OLk+wAJKKtb?^s-O&L@r{n>9)`Wa|XwoFIpER4H zq+K~8f%887Bp#KDVR&qTIBrF|dKcPhi(!G4*f4AgHo8rS548loU4=5fxObCXN0}13 z)yDp#09D!A6e-3Vj;SHI^374TA}*yJ#su=ujF^m6EWs=jsSJf7OZeF=6nr#|EQ?8nGWS)g(~#dZf){ z(e#ya;8vz^W0RP8iQ-F2%fv0n9RbCxIInX1-_YQ{Oq@)6<>Qq7yTVBg+o z?@hin(3j<`8sF0`!z9-{(4wGx3;7KTUJWiWulQ!@UL!P_b=-!6!wZ+=Ui`P>?+~#W zoAB=R8^vGp`ZO$ET6bOUw|7AXL3;1}$Kd{@tVqdK?ZRYTFs8-SoFggbxelbs0^J+ZZ0VyrfCye=cIo{%FE?+QV)pDv)C-_;VPPV=gD0`3{WKWuw>+!}A$m+WVb>`R55m7CN0^ zAuXB}?rZ6^j8|^eHC!YiChH%6{Jxf4Yu(b)rb1!Y(<D5r6&vPpA(u?Gj!MXI?Z-%xlEj?H zvg}jeBQ0OrU;2AJaTrrQ(i*jbbCaZMml170aHw~JFca1J8t+_P4>MaM^6k%5Anj}6WFJ!{dY}#2b zK8fKFEuv8vdsCuxS~Q9yWz4WosI>Y76n5i>ub|M*^UwbbVvF!9SHN;8HYoG3z}Tj= z?~d$l!~XBnQJ`4Zw)-Gl(c;R@bhB=-KC`h#RjYPYY%gB7`-11AD#Tu7t&NFyrAMuG zUNeK<7;k)#a@0R<7(W6or5wQ&t6Lwg<;R20c4k8)>373F_-fbkYqyYLP2YzkF-(RK zz`1_|bGB;h)u>Y$uNuf==qJuSeg8F>%4=k~=R8tQ%_PAq%?iM@iE2b@oVmF^h%Wl1 zQ5-H2slgRhe!&u&(m7=R0lypz9mW9Yn+USWgOCx1XjIVAGVB9A@?g|+V5dbK+l6cP zJnn%p{38SkE2%IepVw(yP|lE8HUabA>;K20_by!Bpkcs8EM3t<%V#V=E`Pk)=o4Ko>xWibOF~T zY(!;DTx19rFi8L-0oPb52-qmGcK5p65iZ0schhY$N^Gt+Xkn{GRTYMA+)OvgO&_PN zBPKAXf#9($OePKZLrycuwdB0Xw&Z-FND+DKWn0kJmc^BZliJ)M4Ghgy=~ji&b+x4E zImXCU^f2*f+1A33#m7!3^1s6kR}Ui;oL|05^edk+v*G>DA0By{R!{2N^0KFWLpOr5 z*##dhLItfw5lH~UJ|PBsh7(pKL%ksmS^8ycKVPkG6qdM&vhD_P)7m&1m7+dlSFT4@ zA0-GTa!}LLMR<;XsVG#~goa2L3c(;<e%W z{%}dKhgK@uW>t$AooWWoh?1U3Nr`AFogiVBoOw`a0$2JBWi-Pq4Q~9rbMBHcV|YU$ z;uI5q{TR)DpF2&G90Hn#l8KFeA3=_ zZU}$DQ_%7iWfsN%L)be8$JTfI+OeIi*tWg0Vr#{AvSQn|vtrw}ZQHhO_M7{D&e{9j zr%u(Wnl*a#m;N-ndiMDL#&wb18@Ie!1MfO>=Vlc18Efp*Vmx6S1-VnbK+l(BVt+V| zf9M><`*8`O0gO|D-7}$pj0;==2yOb;bF3m~h;u#-g)dwuT~^K8SRsi!9uhy1g}2?k zOICYwr6}sf?|X&wzu;?>w}{q6$luZW%dW#=YfG?27L67_dQZPYlr@$hv~t*#KxUd} zktjLwli(5%QefJVqDoXCQhCcVXc7VC$Xav|CY~yWqe)b&cP&Ok6sL7YEbL&lfd@!t z(~QKe^8GIrN$|Nt>Ij~W|wn}@cnoWar8Hl)LtWrPmL zq0SK;BVUGHH8*u>mF z9oW%^mT^@gl5opZAyXJrUg)L`H7okUMPs1AbBvUMxc-cD%QXM2m1NwYH74@f7Nsjq zj%;qWv@Wz#x~vkEMhPfWoc|{9M6?+Y%@M6uqF7*=BZw48)xj0|%~Q$dbIIVs_oRzj zP%Tu{wiOkafJOBy^aJ^c|DKW|&}N%Q?@ZHDro~r>s1mvsyxl>zFeNT$E;4&_weNDr zTj=U_mK(FQB2J;wX7KsbhOd=U(8(h-r?T0^x71Ctr99s9NuiRqu}OM6JWh7%mS|CcC>Z`tA-gnIz6n(1z-s?g-1j0;C*O zwvZMGCuC)1Dg27Lr9lRez>oa22|Kho^E)(UC30ka%?y-*E?(WgzoPiEl z8?OeYsp98)DMzlw{rKy-?PZm1wmlbr0kB!XYh$Hj_2+}=fA0j8Ym`@3ihg!it5~!G zx$hIGGrs=q-l!DQV0x%Hv5wkdeZ8CgVD zh{RH)(-b2%wQgoXz@{OTZs&~Zg4~Z?HCk1)ds&n>wTuax2&zJlUMZEhyj}xyH9<9* zvucrj19~B==OB>yldTmRG|MW9aY(+|_@>OTTj)4ldmJYi zOkSbBrW7nfvZ5QK*`}f?L`FZLC%?f+1CFY!e7!Jk3)w&s3#uwsN6<3VzuPYxXMur2 z15Sv0Ab?^t^~COCZPno^nB5@67c6AZ`0-!;{=$JD4qK3ib64PW!3wW}S$QZ|Wdqk3 zr1*>c)w640W&0Ji1^?;&yiX-}a)5`75T%w)T*q-Rx{!*gyA9yip~h_wyaHpb5O>`8 zmC*zJ0{IoSo}W0iC(jWm&DjkGCdEN1bjMk-yu!*FuU}Y^PVCa@ZK9eQr^!Ly?ntux zBy3k4Jo7`^2~mtEsv{~tX9$E>o1pqf#mrL#y62(23H-)RJz{xtM2C)-l}IqrB3E*c z(n2{{^JXWHo%>YwOvTOe+1pd1MR}lzom7tzVm3tW?-7Ka!_MEp^ZHm04ETJ5K@K$4 zFs*m_91EGd4<7dfjGZv;GHqa!9t7BN40rJ(yCm4eRS9Ne6ai z%hh1CyuA>QmKcQuiTi7@taM!Um4ht0F)d^k(EG~ji2Mt6=E_ysJ0`ofo&4>4*bK8~ zhP>7}`d*ZnHUDUGc_<&aHbdD=Wq-cJk7E^-t z-qG^sA_tk7k1CoqC21%;wvE)5__to8yI$_fRH3gF<++XR!VKU@%E2?>%tGp^_Er?w zk9BPXmzVrj;$Co@tDR*GkzmN*El~m&QerL{DH0mA2#OID5pYbPJIiO2mCooXjrj9^ zLzr(jl;mGbRq7r^C*@}h)5>_E^{u4~pgHDdC#D=KODKPWsi1R)A5xeD(ZTijiA;>` zHK14*8-DBNCvt3%l|IW~rmIBrf)^{AX^d!OXVDczbreBu_pkd+EYc}}o{R;RZ1SgK zk@>I2wp;{Sl~$YVj(d3DcTbc@0REQhmhC6jCNQfgK(~^&TKEC5IQH+LJv8Ke;M=>$ z@cED9YmZO$<2W3kb({Uk)3OJsddn4-_|m@;8SNSzd{fe{e+7&;GK}uffo`=}BwdMN z_1F&93yzD5-%yl?EIp%8cEFH&Baa*`y9~@<#ZGOQ%f#|XTnbVr6gihrW9<=x=x@Cu z6GlIjz>cLcOSXcNj)cbJ1%SejIQk0B{fEOc8)0O)qy3Guecof9BC0C+hs0ur58SYa zk1)|wM7$D61a_3B(PMwesxQJEyJN-^7J&R0R?uFb#xDh1TygX^qNk#_8&@m`F zX^)s{5WJ`*2Q3AyGUc{d7#fZ!2$szBjnt?R*5swvj>j!PTr&q=eDwsh8hMNk*DNmb zDrrPCIz(|xUS8DfC7u0RS6*vtuESYlCdf-(n$nbiK(IN}HGd1d;pfqM!WB|pDtSih zbrkEIbfk@*MI?A$y2is#RFnmW;Cn)61V!c0MbhHfB*(R=zx0u-q;n!=>;DRp_H+C*Xpz$q&4)C5?4qQ7EF~7@sys8WvTIe- z1$_}ZuTi3-U*tr^V%WB8eiiGuPSiEQpFEb^D#OgvYW?`p^0!R=%e>=5GA69?UNpTE z`D0O<7F_a;$(#%^FRS}e^g4&*!rTrxhO0!+xAqs@(Jojq!Tl!+{y8E;d0)VyshXRc zq^y6-nV3bi;u_fSCJB&^M!@!;-BoU>MW*V%f9)#X!V%=XR>YI*HNmzx>j&OOScFv0 z9T(~ur;dQ0^xKt`&(C+|8Z`E$&w`jjSUQ>iT`P<8iN9eHb#Ya4X>sG+z$~+d&Q&IV zsU6YXMi_Ljyu)5BJdX zRvWs%Dc@T@pNQq!r=tdI2QM^s9Go{Pw=GI9RLrt53_d?Ep9%=4ZnVp4az zUJ^aGAbp$n`7ac2DS7XLo=QXcJ(Y^a_DxfL&EfcjBu$l+sHlb*7ZZcjcC#q!Dm^-+ z-Hf%#x~}k1Mk}faGId$;x(YHX;U}Hs*{M!y3jbY5pBxMkCcdz@ZC-IHJj@7qX8*Q&bk~eem&4cll1~>au4aID?D6TTkKwno z2$n}DJKe(ELB^`8^z(|f(dTfvJpUbEVQV+Tf1$CnySsTC$iU))01uC3k|*lD7M1SH zx)6#!=MrJf#Nf|!z#AXR1^Quq&y?AbadVx1#)LUjuPoE0xe4Xe&fX(uyku&0xX9se zdGveiuCWTD3ADQ4cRwowDe9f_2@rcrQDC-;FeSes4;o~lZljXLP-ySF#ig5`;j+iG z#v1y8YS!}J#oAC6BiQ?DR$-c4B@$c~n6(4mt)*q%pZzSSwRRmqVlr^`wmWcQ2b}mz zR7!C$8zqcCs_&^J70p;zy6yj1DaDsWXw;fn`ij@F^Zfr@Wz_2D=KrlKV;y9&)O*(o z*NNAh&+-`P-SOYphxGrKAud5@4trO2rNaU>W85OCe5emu{%HOx2Ez%GmA`%g0I{gP z#@jzMousF=GXl6TJyZ$jXVhrhue9;QgYbEdtiT@ciFq1YKlPT;*`x(19fE$dniSuC zT_UR7`3^y9^k4mD()Qr)@U5eV#f!h)+6*nz-^Bk@1QX6V35 zYN*4f4?p&pLfsVdt+v7;2Z;Y;HUsX3DPN~ADr|U)mHkv2;pvRM0IEHHb|+!`EC7n5 zjDsqAgw-2^;Jn_)M9^nWe97SINB+=|CjDR3ap7<3I7zB1f4c*z_bjb{52r|CRYpQ= zGUyg|%A8b|qH9=bYay_N)tH}+r7Z;9Z@wr9{%Iq)NCf-=FCJ}ua|u1}`TuD#`;OZU zFofWypKYoB7ic^mG{E6jIYo0|EVLRNs{;tnO0*XIQ@dCFlBkD%QICG){}m1&-AoWP zt%Q%COvlfX%>UmCGlc(Am@x$Wqc9`>9}2UGBQ^;mSw`V@L#TgN>iT4z|06KF%nP{i z`h^{#^tm7-{;e-#FZd5(JWEXu5^7$wL!2PmZ8mQ85QXI+;c%=1O6J#Ffdc|8Lj&NJ zW~deTgpk2DL~Eyj-7=g0u9Cm6ix4L0zF$;eiNq4XYrUz6Y^$5&gP-oDpb(LH_YtqU=&N z1bE64{sa(^bO`jBafq@KUoQh-h3UjDyDexEH@@Bl@T+XwV~)EX0CwNLC*z&hCIJP| zgLR14rr!MxcBp_a)5hc+-%UNU;cjaLej|K)Smr<}ko~tZukC9{Gs5EIEm{tWE0g}- zw}Zujgl z_bFFH?=!dfzk(fQ3tx8k2)6mW6F4`5n4V~0r>JUUwy|>PzE!wE3nv%MEN{XWxFVlO zaezQOUz*N!5G4Q!e4n#U!UDgwa_I(}HHL z(-4U*rInp5(GOx^o*;I@sS0HU2$>@NUbTRzvkul^dfDOh>QDgp;h3cX_JL=M>{#)# zmKoO+N*`TXPG3$Qo@;}rYc1f+aym<^%ZVRJxIE(dxwZY(ecxGM{|jz#B5#kJfw@ub z97MZJ$8u?BdXHGUi5M+F?zzr8KuVk4`dm<(G>lGv3kJKB1&qj2SEB%S5d=q(LK-d3 z`A=@V6S0G1`-lZ5t7(fkB&cjyN2pytTiiJ#JE0ZhK<5IH>>-1nH&n`B5}`v>mZiH) zSqlE?^IH5pQ##p>CT0PKi8$y(u%WY$J6MUzWKbVrQW6~w2}rz1b7r9CrGxehg0}oP zyWn)wjTSS=eCY90sBbmuCz#L)85|M!Ab$v-B`zm>(hB;IIQ5lAn5*56Vt;QxpHg?4 z5=5dLN%?og3}jl*8h1#~6v3O@9snMBCrJ!Uo?=5=RP@if#IroGLI_@6O?rT>t0RNxk`EG%Xdh|~RKVFOz2fwdVsE1a(ozHb(^7MgQ zF*8td?jSsjYYAaDE~=!y{F-m(z_`xe2U>!^dD&F&_+;W@zwIc_(Uryo(>^h3*T=r3 zbk?Hm#WJC%wTNq!$^6R`l9S8NaF6X=&@%WB)3*LBuaaXy6luIQhKAWFCth>X7t7tE z5XQq-bMn6~aH|@8RYa9HkfXHTYRXMph5K-;NKKR1YlH`gR3tG22QL2gA7usKk&-A{ zM48>@Flr181x{YPj>?o@72$kS#=CfY}u&Q~+p+)cR|wMY?Iq zPQOnTx|RvC&q_|F1?*_ij>7ATDlLTsb|X)rh><D?Md|x^8I&jNjX(90o+3Wd1zqW>tN`Q*(leoA{lcM-VTp8brvOHaX^9^^agKm+JS@g-VU*g z6glPqHPe5~Q5EbyUhta9^;*Bv7nKVz3#o3}uPQSkc za#l4De%eXcd2+-5AVscb-fV~rwLV~|BjWaZ$d=K50>{Jj9dR2R6!9N)1#-Ri9@gl) zC_F1Bze_<{oFfhhhx+h1--!ZpD`6;iyzBBGxt8*B8kTGK15NI37&1bX$lPpwc(cw8bzMk}qh#@y0K^vHDQ!JJOCIFd zo0vx5A2e2pO}rskx$hO0xcJYFZ2F=n@fB)Dl4gftX_q5Ra}X{!U6f2-Gzggb;9%t8a4I*v9F)Jc;vReGvG&1J zrj88;4)?q`H8Tuu)lyvh5*+k}VX;dqnG(&Sm|=m7`*+jT_NCt`77ebHz&^knZDiPN<6lM{QaHp?3 zTR&r~rr%i|?J{@~b0LgWB(^a)&+$OBeu6foQ7_mR%W@p&2YkZN@D{uk$Q>-iPnot` z$Z*Hq8q+i;4+w9-<6Wj?FE4P8!q95{6o zq2Z9&kwHVugw7cCGij&MW*zDQdFU7?=)3HT$_{lD$8U ziY6JEbdo=xZT@!aUz!40^D@9WVqXxGU&FrUYSdB5XTc^ZleUV=M{yV+ZuSash$A54C&PeF&COL^2Od@iM@ft~0p()trxU~`*AP|L! z?J-~{YQ>59ekYP;q3FMx$K1Y)A*}Z}BVDdgRY@%)Zyt3eKpV^)g&J}FITq5_>~#z( z$d{){fWKOZXg^)$=Czt~c5>WOhkNx2Vo;%y2Yi6~54~hg&NVA}IUgk@FOUgJtLG8 zH$7C^rP|#}{*!o$bM{4-)x zvqUJbo~cOU5XIKMf8w{TcLW|cn#cBwqt})8CS0fe`JQ989hKTYgUsB6Yb_Av{|qt< zIxr_o(6hwL^Kc$-AgWJg@$K;WUJUu`agD<6gM@(R^|03EJjLZTZ{Kq6e7kpRWpL{` z0JUXOjeDNLV7pSoWWTR1=W`h&iW|xe-cR+ur|w!3tamT!xr!xzB4(v-BM?mRPf;#- zz+xUldE^hluO z1@iolUU0pE*{emsV|xNcKkhv9QhRLM*;!D?GC3LGd9yMxjW`(3ma+gZh*lfm98Otu zO(Rj(`6!t3e#pmyxMR}2QOJ}+Oq_5w5FCWW^53>NQI>3S-}@6sBn$x~ctt`&he{Wx zl3F90A#A7=@g$Bt>^<@>8A}9`nhJ4R?6lL{gc{DkVffpY=DvF3+mld(l$=`B)OX1XVcTKr!K{ zLs7N(9yKUPI@-aUt~#zqC3)cx5uJhYxgqZC51iLcRQsYN{R2nAr5lIYK2}`TWW`+d z+(1DO0(Z9!31+-E#C_$L&BHLV;-SO3)#y--y|g{=0zlqEIIkWPs{n>BttT;aAr^UH@%hk)mp}jT!h1x$lIb_nfG&$rUOGiN784N5x)1lsn%S7U$h#+2i+za^ zsW=MnYyv?ccuQ?$ds6xxH6;+UW)(zgD{8e(x%W-s)Xdup$uyte6KQq}t{3t|7NT9# z8@_h7+s|rt;LpXK=bbW7ug%&pK$AZd*}p#gqHHnO0`gLy?rTFE_SY&1xVb}HOeN8> z0arG)<;l&9j5C?_Bx;a}+`Scld;SBFRxY;VM3eC=m(81d1=%p=I|TnaZyMf5+dSUY zYSQr`Msg+lZx+1~y&Z7)YVHjf|Bj+-eZ>!?O8K^erTkZUsg&|KO`sKhYIP(+DCxU6(&qL(gdi{XgTFxbb@l3gx3kZ!Lr0~)w5+{j zuOm|)E>cMm?&*k1Nc}sgh|KOFaNmx`I)B)$e)%O+15nkB81qS7JpNaLiJAzw+w9hQ z8vpaZ%X^klw^-LZ$GcUxXEy^ze-TE17rW}GW7hZGw&?;G8uy`@Hhju1ry2i`MKkH& z4*;1U>(Tm8`OqQlxwd=0=57AG`|Q%1`G^LeGuNDf&m43dSW-zy7Un8?pxjt zA6cK1I_}$a*JUr!y=d_b`7qs94%zo84o7-@{Q43~ecGzRJjZFK%@kpTwaG8a*-=f@vQ z5tYoJhl{;y@;=2TfzaOMXL+U;oevs?!gLb$wh3 z)UpgEpK1k3llTP-(wB7o$=enPjd`t&j5$%GNz&iG@E3_*T2&C^SupyPd4z~~zWcN; zg$mYfk6&_lZZ-#0F$t+^1|wd0f>r&E2Bu{@ZUf6d^im!jPQ#oQdyhz1y5dgw|RPp*?1fi>VD}$iU11_z&H?%N4`;ozZGZnk23}tTZBQN(>iOGD*#R{jll}h8PhzpouficTN1tMJwF8vy z)Mi50V^i0553fd7mUfo@FkQz*=h~hb3L*F+4|)S0>-W6nWw->M40WZh?8R#`rVqrd zpR{Z|_YhXI3jg{fhSdvBP+ACkMza_+@=Ve_`$t%DPpG^g7%N?Yn&y9cVUw+^0g&~3 z=bz}VOg2*U^Ih8^IR#5F52bOx)7{I%^!q;|wkxS)c{j&krE*pMRoa3RN;ZAbadtVw z4iL?btX@d!J`W2f_8CFa0)pg+$QqGw+kY#etz~eP8fh{jXyStDu=cBS?UXJI(uOWJ z9`hLs(E?g32$eoDmq2%p?Wj#947I4KSdlfF6=3sgYb`|%g!Oj&vatc`(bPzf=Fqmc zDvCA8#>4!b_u7Cm4O1Ncq$SpqQU5-Pc0&R>lHHBHp9z zUlSCLS#eoNIQ%a}4$>`>DhrPw1+E4mh#VP1I=Jy~ysi5y-y+c1gfF zk_ri{=UCz2`;LvZNkorrk0|SNBQ--Dw9hNY^)=BJrftg^D~-VoDM zmde5q4x#$O>7mAlRbjfQ)i7t?`8D$X^J{KgxtXXx z|clSRU09DH0wrxl^(?4Apvlm+rd>Nc3Zb!=w zo=vNF0eTO4h<$dio%dZI9(>EU2i#d&!}ke}JS@!cI~5Kl2aVC~`TRdC$CU`fURx^m z!sKvS(|M3-yEQ+A86rakP_c)oHIPQK;33#Q+mvrLY3wC}y$nD**taU_+Zy4WQh#E= zlm?;`FXUwIn(iA07q`r9Bx~|36+xewLxbfy9kks}^q{b5Zam9ML99aKZE`nUIQ|L8UuIxo#!98AD z%H}E;T`2km;m0oKoG3VaYQo!1%X?WukN0w#9V_&rM?O~2z*(`_)-YO5J|rbjixnBX zBDP2@;dz(){dq5b4{>qeIPXp4Ia0K|=ofoX?4Q8NlT6LY6_l1`%hgVK7gNZ5*!ETH zbvd)?+G>C!M3>j*o{ALwvau6PF6%OI4eT;7JH`k9Jm*UQ#@X7kcC3UI#GXYnmzwVr z(clCKplEgHsiO=}t8 zdMw)J+@r~VZbF`6s%5AUF-+mY%F?cpMu?1M@+SbkhO8bsZ&i=>kVI!MV$2FU6oZj8 zQyhzlFTFJB`%k)vMqVNSt&>#k!Gas0-(JZz+l%;{;zWN?)?N^1z4bAP^-HT{` znlsiI`O$;VPoefJP65d{XHfDsG}i|5Y{<%Nd9GX{N|$&XNFee7*U*bJI9KF}*8SRA zwDvLy-uB@pZ%bYj%F?qf8uqx;Dnnkq(jHSgbGh7#_iA;b#E2l$ex+d@3VN=_#J{&D zSb3Eq$0YYq`KcxeS?vuX!(;>70xOYAe=bh9Vj-ke>Kl&^ssFWOVV-D_?q@L$3>|#j zULGBkbY}3^GhyneRIe7Gx=5z8ce?AKbh%=-oLk|67LYjWVLIK~n_T?Pd0x*Ti>jFQ zt(^nu$5W1k>f`f%y$_05CD#z1AxtV&Gp?o@u6>?l`}BGmnCshJvh5NJwXJ}SQ0zAS znsgL*pdPFG?WKd3m|U+%bU&>l7ko-P{KBI zS2^^fMVu|svJw)vd9wn%-WW|F&{_OsdlXLT!58-T&nqz+UKV6TT*Sdkp+qcQ^j!Ad zY^P-!$UaVu;3xlQMk_EMYb*7wybUmRNR^vncB`Hm%pN2nyGuF;K)~sC>iLXSgK38d z3fLT~&&tFZ1HMo7CL87gByn=c1)aG;emD6g`ndK(xDV`Che!9#68vD!`l!H4`){vF zK$20e7TVk^BCnRRYEWS-!znr3p`RsggdA2osWJW0YNnOmpR)E@yKLhi(=`4-teR4X z8KX3hae?H*qLBmk$@jzahytEf7wQymm*Oy37cOa@Q*{F*rY%=L9Jp31;Nzs3OzQhz zOp~zMPCfLS78E;X8%_K0+6#mBAkcEd2vn}#o;KTzh2f8%o?agT4HgJ}Oq_Ag{6)vu zcu!ph_h!SmCMv`g6eh0hR8r~39e;w_R~E2J84dC|al|bLjX`JG_1U4ektTzgn!Tae z^QMr)_9PCI1ml{eo<_PgpDFSpAG(mb{2iH7lzU%NfOyz~pAuwt9W-?5DyXGL24&=% zVr}46V`5!oiOHxc(1qyFEt(lH60f?Q`h#)#g+P{Z&H8$cWGD}Y{mgsC7)3652hje& zJ!x-|x2v-xK*uu+rSemKHwm0Vsf4574a+WEqs4GMZyA>PGxD@tJL-fSvVyxK%fi)i zX09_uN-psNR(fvN9a^DQ9nS$R8H;j7!xUI26Ge!$nprlBg#U|h3PdU+o^g@^^UO;} zG6Yt-@H`Mj3c2zeD{hQ&mr}wDXR)+;2UEQ<_2aLyR>j6g)pXwd$yCmOA_Xf)d@r!d z)ZWv{NIgS`zAE$7%~}UjX3HU5Y^hl8kRihP4Es215 z5;4x3Bt+cA`_DzDOW^HDWJk)P?G!c7ayYCSs&A-b_;5&byFuiJx77&|?P{&* zRLg6e`B5pe{)d2wQ^o;2BbN~_b^~bWl6dPH_FnXkB;H9?jt_<^@F%atRydDU+<`vl zD%_4xR+P4LXKoF>z=Yjw9VWQ4`O!M~qMwGM0SuH7`cS_p3@Oos8%9AskQenhL3?Ma z_Ydn+f+Gn0g*yado*fHl=~A#C`vUW1aD>yJj`Th9E7ieD7rt`f`=FudLit~({wDTz zM+AEKB}qpd#b2ZdwJlELE{ReJC& zt9EoN5r~9mC}MNE!{Ftt9oMW8jlEb$U180(9;P-aD8dttS>s@)5SD9p zV9<$2YC`I$lAtH;=y~s|dmK%8Ap@hOKjiQW3ku0`)pfQh6jl5-AGrg_T?pB+IfJ%g@J5m@Tr zr4zW5Qhx`AZmk^|0bXXfc(1VlXeq!QT+78oZX zIOi%aUE)7paIUWf8oFT}bYUFiI1ZXkLwy6$a&9SS)uq6IQp%iODRzvXto5iK=<#61 zSCZtEeq)w0;yRZAsL<=6*OB1;87V9=^sPNof5FUE=BhQh$86QqJ|@^v6|1a-bn86~gH z^zNCjlAkXb)VyA*zX%~4t>Q;J!O~hFOcAoUluGJDlTdZ!mTsDJ&25|s0uMIj__c4z zz9I57s03ELidN zKLR3b;_FuwBf)LBM>XV{|JY){^vc74oGjR+{iK?>B(taRKz=g1qC~!+gkg`4LjESW znoO%57D*nR$cgZ>kJDN40Ha*|GWW|LFg?MkpwLa@t99hTWrU9!IUK)Cp@p&LfYiZm zcVZ3RPW!vOmL9vPp^+BhzBv%y2O_lgr)>+W_-YPa<`BD`I2yl4H^3tN2BKO2AUZE& zV=GdvmD0en7&^7JpkP0>Rh-~@vMDCUdV5%`y`AN3PqY5qV^)VZO`bsv*-%PmWn4mn z;^a@tR%UMPh=Y#xZ>Fo!TQTY{8i!8`2KA|z<4C|Jrcdw9`3K)I`Hy|h7&yF1ikEU= z5WgTE;))8UT0e#~VtSFl8Pd=aAUlltkb_S^3_fF^MDr%hOk&h}=0Ufs!=+YaTQ}(jPL; zVMR)72K?0b06}?mPWc6~2i25&fv41%d%bAs2aM5@Bt(x6My`FTmd>l~>`h_A5sx|ymHk5X zpOp9KK1OXJ&zFK_qv4dz`QY3L)Ei;=xn?17%^P}*jvBoxn787Bu}MH2Xo4s?)rX2! zG|3UWpW?)6Nldc!+{GG*K9emEt|8+r8y0t+Q|%b|Zr@2s_c=R5s_k=1HzqsTIA!8xnBsQYzNn*IpBKM;R=m(IGic?y5B)I^`<-h(Zr4`VCbt zjvQ7w-HmNais!OvnHP*{)L%)>1?a|nFp!5-OQk<`Ee|AaCUCtfmQk~G;IJ#N^LX+wdIer6XWBj57Nz%?e+3y zCJScd*b2|8<#+>3o<8Cr-{2!?OvDXY!LsLd4>RwIlg9xIi7~~W{Etw?3aibjTV?DV+Nn50Z4bz$01jke_Yj!9!^_`r*ts3Li-n zf^yd2%jlx_M|=B1Q^(aahL|^;wH-d2p%WiWP)sQdi&kfz6>-yy4_?iJi%Z|isux4n znV{K#Bgr3!xu-}5)&0hf{K916Brd#sD46E=n@K+}ym{6;;OtX`6(in;{te;vbL$Y(EdEzYRYX@nh9m&b% z0ib&tJ04L>e?VPFmoj7gB4+^=OIg*z*FM#iM{n8x%=x}3zVQ=sUkB!yT9xtTPWYV~ zCQFhTq0Tv-$~#9tNtrw>yfVE9C%C*f5R%(>naTadU!YW4PU5Y>=6rQI#F7z`pQkI+|KA^%6#~id{({$jp~%klU`)xe1J>Y@X*> z)gtcKqI!SfSgZsKN5qOy1A&N&D`^%aSQ!vKx9$FF9+4KBJw+SsZ&c|k0w%JV?sR?8>{NkmBR`t!>NKLpd}zj}{~fXy(*h*+Cct~Nfd0FDkc0J@3D*ky z5h+@BMA?=A3(ZTc*ydk1<*i&Af&W!BI;M6zT)5WxS^EGcB@b$G?8KDdAiQ!F0!DT5 zj?6xE10b=1_{smxe)-m_eA-x5rdVS3=w1d|m{f6sB3CjGm~iy@7H!!)+sju>yNZzifpR@f(32CWR;nq|QK6 z`JLWGQ<(e0uf+5uki5SN4ReYOe-)+w5D(rMiBO=uov?tquAHCu+@u0uz(E-LQlRhQ zN5Mg|IvzObUq0ZfeRiOzgsKzFav&rqE&et>bc;@@pVY5`jkKNEx{ET_{+tG6g8px5|De?=2bsK_j1o?zx?vrZ$9OOruUg8u6 zoKl3Jd`x1d^s3PYNZ7vYiYv{lNutWyUYou-6~u4agts!D+GejTYg6tLuRqRG&Cze$ zrhK$5V4=6g9z|!rDau3}#&`iwT!q~E?Y~D4#v|MWDm#-ky=Q=i7|pifjCk@NNk>k& zpu}GKAN0^xp`Na^PfDptAq*ndl%(sE66kz9i0=-8A5C&k^;ZZBb9hs2{Ui>;ex?{7 zDW@Um@GYgp6T3&^rx8r??~@LvT??2kFKAfBHMm8s_7^mn9@?BjvNmjb<`lH%kX*4& z=Z11aM}r=~fD&kTE$r)gH9SDDr4-3JwKqs_81X5peMwEarfUMCYm~S~`yv>5WAV3= z3G*tVOB%6=%%ngjXI32YTLV!<&ui*Q2rJgC8i|1>0ASX3dFK z#+zrQnWOQ20A{c&t1uET6OXy40T^qfUO>aHS-~CGdqT=sb3bx+k3W9_9ztV-nyww? z%r7K4|L@T-Me4+8K80us`YpT0w&5k8O0wb8 z_V@RfruB1Jh^M9Em&7}wL`LhSi}g}my3>I%UNhtQi^aO%pG|$QT9Rl77=`7hudgj{ z(9p_SA4kVX*89#?RWy6hh3R8k)01Q1%gwp=l&BX$nct$H^;6>OC=%`m#RS2~rGDeN=)2t*!>!p6X zv8VJUr>xvmmNE$q6sFoR0|oHL{CIU{G@g=jrmU?d5d_u3w-!V;F|msxY8#?k9=L8e zE2!L;IWd51fWdWBk8ss8q6K*6EACC)o+&Px+Dw8mhlt=T&FywvwXjSlq7_O1znIM$c9>j29A7_j2HZqM<}H| zM@k{@cO79Ack<@uGwoAQR@wsJqErKB2_tn-ilRy_TA7DvVVV2&eL!Io{q%d|frQWX zC{k{fffc!Xux8ERfkumdb=38AeJVr?z5ntl_c`r9%PQTYpMtAV%a+x9y6l&O8=p42 zmONhu0^`21M2a?RLGlYCswkmvB{yG#!QVFIEL+tx3vWq+dSzB6GF&IsJO@%sEF8y?wMHZ{mT)swYUzXwCDh+;~02Z+TO#NK;>Ry zgQej(P2Xz&rdC_(p)Iq#CV4!7VI|v)Q@df2j!&VCAJqqIp2-3?EtOOy%s!+qSzD=yu&n3RgrU)r9{3^c(T@*}ICh@Maq_5d zCJTZWTNHM{X?G`9MT$uAX03 zkTFJgUKB#6P>(+NU}=x@*aM>3aPly}lEO8RXed4-L~%M>gzjOFAePn@0W(v7q<>#Z zvh(3#;QUL-pI%`W^$X+pPNYowAwC+_%-OhIz~yP!md3)}A42f{{QZJ4@vP}#W5Eg- z4>^AsX(HqF#0tT-KiC>`=QOYN<~Rogr#Q>Vy|owBCp?HM8PSApm9;y$Dg^(QV3lN! zuU!j*o-%s%K)5n#O3wyF=<~MfhPwjoE=0g)jGE{yX>$qH!HztvFDMG;NL>3b;b|_h z){TL>9<+8&E)1(9Ftj&n)n1E`V3tTIi6EmQ@AM3*b%2n^np?Za;%>?}LLl1JR`S|F~L1>URdG z{q?Ck-*fFy+FpG(V>>QPW?DmpPA)=bGFA@PP5->=dmryMs5eM)a9!kSNvNrZ0>yuk z6|aN&Ro?uG>CrAUvPc0xWNImrM0b8ftTzlsAXIYy+Yhxo+IsPW$g2WFx(M+PzQ6zk z;9j^krE_}Xr(Z}ZS|(plo58HCu*vO#&UL9*KX*UVo6qhPF3lSeO>escA!Cn(zMPFySzp47S8#jeI)wBHSV~-`knB$rj&8= zcoT*tuAX`+hJC|aFz-*<@aUDWyo~a>{Y{3G8YA|^L?l_DC-siRmadvNBfGY z-|!|#ETxxIE;+S@W#br~zc`P*UFw+AKkI=tM z&l(M)l|iOaDnI@*Led9i9k6(2YlcOEVv`mqbdv7H0RCD3gT2*?z}b6xdXA^l=grm$#*#cLV(qClFHblLLT?_jd>>AEP~!!7U z;8aPDfotlbIBdVC{T;Z&NEFA46wru1x>>Lx6mu7?xbDESj5n)j*B6JkeIY%E18Dv~ z0G2>$ziP>4!k{e6(X%-IdD1<@*QA3Ab&O)1R9JmG1u$cN=Gr+IEk9;LnCKE97g@oa0S)HqptvM1&U@ptg`4W(LVAEld!h6NFBa#phV5-=el0*WdShV`FTFH{tY*Z8u&V z&2%y{@!%`qNoU)fcv|D$&>Cqibc8GcO)8cv3r&RP(YW0iPmO$a?hw>rAPnl?9tQAN zo;&Eg2Ie&=uMx@XV7U&I>%iY=9+vAgD6bJ7UNa5o_7R3#CVdGXkrT;eP%o7`bvz-! z>BR*{Pz``;=(sBg@!>Qvfl#p|1e?EPdb#*6ya*hu$ycfh{2|W$21f>q6ZQsZ8nY^KPNx67kK- zJF!NyGQL&YqpmmgATj%dLG&D$sOasksPJ2_;3y`kLW#H|DiJ{?5>aKDj%gH<;}|?I zIHS_oBTM76UHx)Rg|gS7chY8ew#1=pgZ%hy)VFHJW7E5xgoY=a)`4N-k)z|3&^RJo z=p0D4pz{nZN9)Fi)1O6tY3D!F!Tp2)ofrS}DaHS^&VTAppFf=c{1HD^wSI};7e4>7 zg>(Mt&qH9(hm)U&lb?r^pNGJn|IPa1kd>`=3G=fMDTow;Q0{2^C5!gLj=!kJl)&)kb8*W`R^)> z4-q^cB6$A03Fbot&wqmmo)7UmAL4hWF618Kcm6y5oc15T4hHuS_EZ1T?LSsm;lHoy zjTiOR7pp6)b+rG0l=_4H#~<+%U(Wo4ROk0^|G~ofVDRx^@bO^q@uxBP2vd&z$;CFb z+|*9SQ~VkpQTLzK8*Cy*h9Y+`3b`J^DD=0%tiQWNkIUofq+ZsKjqvd)EG9H`W?)_? z?*x4>xZ1kD0eZ6(?G2~TSKCuZ=}cs%6d(1wnQ1@mL5uCB)$0V!0C~_G&KALJ!Y6_= z{8p#40NoMjY`_ap?<$hafV!K0aJGNiZnTc~4woRS#P3@>JFT-HjO1KsFBF;!No%1@ z#zJZh9<>)*O|ldU@9FIa8-Go?F}^I$XnIGxpBDBxYy|Fh1Mh(FL>dXI9FcOS&qb2B+D+vOW)#KMeo(ftxd_}%C6KXM1R7Xf-|mmI z+sbMx<)jiVidG{QNN%WSOTRrUKQF&1|JAU3YWb6%M|MvE{8^{JNrp?STEX-fB~kZ`lCoyQP(Cz5KMgVrr&TY1W$H!-kAp76Eu#lK0oG%N%)}8o5%~ zt>ZtaQTUpOfP}9FCs}*;L=+81(NHKF8bu>3il>a?sY3Boqj(xYp|94B@oL@p+XnoJ z-kuxb?YYsnPvMWuLJP6dQZOzpbw+qW2H6V}mXXx(l?;T)LWmX+auz62>y*v6hV}Ne zbWbcgQM?_#osyvil!AW%3#Ax{qdFD@R<;>ZoTbkB6c(SO$%#K0UHeOxLJMs}(Ci|1 z+l`4Z)Xy;7%{so+Kidi@Qv#uzXv0N#HtsmV8{H0Ul(0~_POUp7odun_ zh62m7d**~|X=i@8ve0i(u57+ATF3P-tVTpzJO;%WV(nvmm~a@m`=?W>v(MS`|U2VHeEB?-Hr|$={pYF2A?k+19ITuo0`wCNbcWvR_jY{us zqWJD+mfzj@z_QB%yjw9o3%2$=3Eo8!-gU_EVe@Cvg3VtRIo{QFV%$tPqViKdrVPJB z`{Ksyzp;sr_IJKW6Wg0OyX6v^%4&frm0zOl^qDZVtxPoc)@Bl`2)Nz2t>R6U+#vQu zr7eCn)n^RJc~&HG}46V-9d zm7R=|^DmQ|upJf>9f$7bj>8Hgxu3Dc#C6XD45PloC}U}>?ljbdT>Z}R#z1tBgN|&O zJB>UGOVC2{^KT(NHmi3VZ5ETEMI`87D?gF8SxAH&*W{LVlZ>%3(nfMFl=YXS0?}#j z&DYyTA&g2%x|hb;UaWwZQ~-H4pmRx=*R;wqmvVIxURD((zoqH)(h)sV&Bkv@h@P36 zF9ei#ZXWk8MBU0+m6z=v`~AS@-If>(W*d#D>N4^EU8oFBziW4fx9ZY7-znd8bP#q?})d2@NB+il*n%bdOZ8v`53j(qdSNl$liEH zjUoH-bY)`}_?;2?z%jm74?b4!TPk3<#*r7Gj+#rvzwBV;71?GX*1Cx`7rleIAFLP9KTD=4N)U<3<0Nla0E*ac}Xji z!phAS{L140{$Jm_L63ZE_`f{9<%#sL%m*k$uBQnWi+p$vC8$6I)Ik3Xes@Lx!a%Q? z-`Q)QY6Bw=Al^LsH!WP`3o2o*)4AWC4g~Nm5r$QX>NJ%t`9AkW{w-)GYz1CkQ}2SpcG0 zlGJA=sagQ&R=`5WW&imjx)29jtsmPb&6h3K7QlCDZ@=1Gk^^}zIzIpOx8}*w(yzaA zqy2QcchGDd>>Zx9_SE>RhPz~#x*mIyVH&#AC&GmNA_bt+0=#>zDkwrlaZY*cWKmOP9DVM|kyUB>aAF&>LbD?yVp8 zM8FrGf+=2$LjOgm^ca;DI!dd zX=etZmWHM-N!4T9nL)axAW_8YG49M@#?ntbWyq&`tUFWe?DKSE>&_&*T68RZljKkfkEP*}Olk44bSjb$EkKqQLz1pV$kH1~YPAsA z3Ofh6jWlu_Y2-H2$Ze#N+ejogIvfgnhnnR=8jV!xL-e0d1t7v7@t;W%khO*HvJCrA z-bXLEjZSUG(As|Sok6Cx$i41O|4moc|J6Iu%w0F*h2Y;WcJn*`pG5BdJ9cz#k@{UX z^Br?=c%^#SAC1NvqE3Etq=^z$g`$B`QJu31W>z-n5=93sIh^J$h}kfAiCrgeA2xpu zw@{xY+9w;r#_J?t4zgGq>YW_bQDfTXv}`7)1@Y-Ov2C4>lvokA-(?M#fxVT{=fc+ z-(z8XaYN~Mek(8>B)Xjt^=cjd&oO>Av^S{c9*a#FP%nCyvx(pC4ac+TW)2cfB#B1~z%KBK?S>F*2r`&at=jYxK@ z1X$3q68uAsu1rqj-oDZtGy#Uo-ZiYoW#sztk@&kWz?RTp=#l!D#*7Z2iRaaR#sPb>7 zK1^f8+f<4{-}|Utc>#_zXI^t1c|m!HE3aH`v(-FpA8h^DB>C~W-5$0LtZ$WOcI-{B zpk@3H8dgqjxyc7(bZ3RMY(IIToXh2MKOJwKz7iW^GnjO`y~$>7{NWN~zQQx47MA35 zskV8%wf*DP%O*j{niNp@8i9jq6bwJc^ZgsYW5CE)6V8JrdI!C{Z0{hk8~H-vx4*Ss zwSQ`!wDyh;pHx2=mN28;YC?N$ogST(^5W~SMW+jEA{emx{dREG-q||clEkXClBje< zWgwofu9}d?Cr2+&whkP?CRA(tWbgRY1+;hAI^EjeZ@+48?KDpaLM0&)rQ8%mAVB+M z%9BFc)~Rjuv{lL-9-SO)?YCu70A0%+pET|Fqt>aFAj_N`MoUFfu$-NAKI`?nKtm6< zZ|DzVlM*&_SEFERCgb~N4rYN%-%96{&D`b>XM6j&?m9U-*us_M3CJ^irS|E`S#uLV zr8ge?!)_bhs{FG%zPZV5p0%3o{Vn|7F{lPWRdzxEvh&5sSL9z8e)Bpq`Sa_%_{)ZP zed`CWg@tf`?*~SjR?AbYWv{TL@s(;)q!-79&Yvp#7A_EL)h>TKIB4%~?YEkn=e?nl zw3c9}%x${(v(>@wTv%$pZZ=<^mc-k5*+&F@;&-OUeea(cKsz4H*0=4>GVSDz1D$r>568$blCZA*}aB&y9pwOhv!HlwiU-{>^AsnZThSQ4)e9~+!sGt{V z3RMP1fR3M~KycN&$izMY!fLGBK1WYjcPP@HjHa;AS)gQA`0hveIG62=pGHCl=u$g7 zc;+up@C!%^!Y}ZN5j^YHCD`$0rcQydm*Yh+Uv)Dh?t8<|ucII%^yK_mH_~-3F+OL8 z;+Nw#deF?=LYHH2(wp99f?TBKFUK-w@&aG>YS3vnk9L0EJ;7z_!UftdGxNd)E$8tx zX)IWD5jrJtdmp{P`_NmYHd&IZT%_O~>cYuz5%@bd;o{sZ*5t5FXv>}DXp*Jh;Wf}K zV6*#kVAbWwAaiNKZK9TAI*!p&Q{VauVhp_2{ zP5J^;i5!8mFATy0S=R<(iOkczDzd-|VezR%mdM(VOz`&js&~-E=p0wQyA)lh*mMx* z$}B?LBG(@0*TEp67Ao>6<3`8;j_-pq?Rg9g3vGGg<>q|ec!Bf{j|rc3zY{4F7g5*N zCK1Ej-Hr=*fmX<}*D>MKwmLDmxjUVd66qTkN5PEX^S3#1;Ixq}4xKtAMZxEcU=i@_ zA9mh4I|`bz!HIyT?{6Z&DchR}Wcrj80bX!(W0cOax3R#}wgVR60xh0(V`CJ_w695o zpR=urmCL*&Pbj$nk=ge&?t=3-0g-Zx*EDZK69K<#<1Ng|LT#OCGh@J~>}5=-Oj{YF zz?_|o0hwtdW58$I#~20fw2d*#XWPXjmB{$rGVNj9@L6kEcn4z?PT0UWHYQ6$su#R;Er%FnoWag@kD@h?1AWZ1YQKxf#uq?KHxlyldzv)4LpZyy~T939eu z9G`@i$hxE28+IZ|@<1P|dgRFH4RgUv9(qop<{-DVy??a5wckEiCG1mJ`m0i2r$6d= z{pw(q4mss(d$8K>`r{x5K3J`JH@Jw0g{nNzZC~`gODNCO#DV>rX-e*8r_;Xq>+`mE zebc_V^rz?78Gt+ES+DDa)8-9TufwOd+TKC)Icd?Fq&e-%)s@?^c5Jhy0+dlf?f12B;u4BMz zVF@!)OjAlxvnJ&`N}6rVTv;3GC_O4YN{j2xGTqQy>V!5$W za|p4w-$orMn<B?GFmo*Eq)J0oSFGwVD@K5`ltD&3-%)ZNNr@db3Xn)%dS^|7O2jgcU|y3Y zQA@drq(;j+=(IrG<&!An&0>@?N{vwzg*7O#vTXSDnnMMnlZG%qff`r&7+o=-Bpg|3 zW$mVOY;%LorQV4#g%KNm4%RtrMXQcyCXpX487DZ6%v9nG#c-4~BP&XnyHrS)Bi~Un zog~_eCeTEA2}DK9#i+6XTxHYga!*`VW&&j?QLd6pHc+{}y-kx z#F9X8B>%~KfTe@1v#Sx&lwr#jEh-D#q=>T!-!~GkwmGqbE8Bi zmJMligSg8lP>6ObQMxcKN=*bVq`}NEVUy~!Ens3*+u1QH5=$EdxnUA8B#^1>Zld(5 z^eABt_>iJ#X0%KK4Yoy3loC5PN>5_hkSaHbyLd9mY;K zAt^@D(0!U|q#KYaRgo2+qe9MbRU0i~S8nCTN+l3t*$767u`;6sC6x?`azZ#uClS{h z#2N_GVx-t`1_efj37b?On>)s;EO) z2)0c)qtx2DQBo7jhIG0?+~wy|=sc*4WGB;@d|a0RlYn6^neL;pXnr!GDFOehzl&b4;5va--+ zm>;{ANJ+4%G$-Rs;K5N~9^S0HW#uaiPYdyrftUGw7$(~(To_OZJS5|Zao_+=#+5+* zuY*BU;2|kWn++8bVx$|8k*ao@yIbhda_?|^|7-`7L}jl^zKD9kq_asbjbg^nxvoD( z<8i95^{TnQUy7KSO8_!Amb;@Qf=41#8BDI zTFduXHk4Bo{nR}9;i$ED`ioU!1&VC?ovRVXhQJC0JQOz9xkhhlVDzR|eefF1Osx$d zs4{H8{TlF>kJY|$_vfJ zQ!*oVx5;(zekCu(7-5o`vD(@(Uba`eS zajZ;KG3zRsNo*nt$aDyItD}0%RZeToq6KWVm7^%Bmtv))-p2(W)3rE&BD#(!`lym2 z6+`ma9NSbU!z9ko*3Kole0-Od1vVYd+-*e3!fk;{(46vq{jq_651kFy6b@J>v~(zM2k9 z_}=hhWWAEthYH3+u*>Nj3!zHIcm{6xSbdxvsn;kl?p`?H$AKf!RX*APh93(f)vg3e zTli%<6JY zV~L?SpG_5DZJB@oL;-xfqM##Ci>M4mzEA*K>-lxj9Z|LrOy_&n;mU0B2@#8mk13j& zyyQ*@9RZ?H;2aG7kgw(`D$P&=xxf%A21f1Iut->mlYlJd-`6<5kzcoqugohh8!6;L z2)`fr1{NaGCZjR8C0uMah)>0Hj9CY}j(Sio>}n$9N;J@(HAjpP+vL_=e(5`Cr1L0{MuiA<*k71s~{iBzDrk+3|FMK07@^CqV=u zpE*@~9oh`vI0xhPsC16qv-R_jl<@P9o4M_6eB9o~r=8~U!O>2W6XXk|(}4~p(i5b} z6EaGZ&vcAL39nz%Gi1nDc%l>&y?&#d5Sx64)L&kb|D&HNS-ouH(IY|PzxZ|xLmj{4 z(`K%@{pzSqJn@rEIn*hKGUZgKoI=XWlO~Z%5AaS7x+sC3HggJq^TYTxoAObosc=7= z=6jeMhWOcxB!w_Kz|W?gVnCe2y9<|xK>AAO{7467gX(XV9nV^3DT& zQjZztJLmENy~$ihIpHhk z!HTWhgDqEgG*$UV62ts+Hk#_v^up;FV^F5TBd5)VMk>8*=GfgDCi2rJN3jxN^eT>O z1)DoB(5d{Y@;c*Lm;)~=ePk3EX=vDsbVrjghu>wcqD8lP7)NbWK62V*G@2Ue@)hz% z>f7)Wq+It@3O#M+a6f@NM%c>I;dl7TvATqg(M6HFH0$A;%#n<%XPG(*fkz55WZHOz zJlMZT;^gCI&Wa1S8NOl8#YiX9tr-UJ#;xYDH%;?H4vMO!5949lv;R3SLiG%O&Z0(>WPo@8I((C4&hdBzDcHgKF zX+Rg2*s+g76FOj2ctX%LMDC-TDAtIlyl52>Bq{Yp^+l12w3T3_23;%#b>xgHf?2sF zj38EpoOlyYwlwaz6G4qXUO1_asP@mquC19(yt;rBqj{L7QIVrQje|g(h2sZq8inC- zPa`tGQ_&@U_L#H080z#-?>yg&VP`Am7-}x$#A_W4YJBXHPUB&ql=89TLmQLfST2Gy zESL$qeXf{PiSnbCSeHbTR%L2MZuH4W9p|13rRiQxAvD2L&{-P!2tK}U5kbfWQZTx^ zr|r}%f-|LqIuB$mq_IwK7fC{z)X6L-VkXMet`$20HPP7}Dg;eLWF8b-h{%1Hg@{0t z5Je7^O;pPLmWfP(C!sUXp-o)mI?F^vfJs;)=SU_Zbz#cHrNEQWnTOmaE^=IDA|gN+ z7V9`%pZ-tP0HCl(TqP;gkwO{pzI3;2f^(yUdS0fq`#j?VyGDoq54%nWqItEZ zG@yBf*Huq?Gp3k8nANzP_?VTs(9EgQVY}d{(ey&%sL(``RNp+_5}C@xP0X6ZL>)S< zG4X~)l8|N-wK;;efSAqs7D*o=OY6^(-z4dsC%bOb6@@wCaz!DI6msD;4PF#Ba+IQQ z5P-9!+bTvQiMTw`_#(wzsH4_23a55cqL67o7Z$hajlvaiG@>v@3c2tm80;v-QTHGU zbF`QXb@D9in1bibzm{cChz1>UgcuOSSvbLfNaKk**3c-U1?OUSb`^&k3}+3=9mZ|A zWZ_SsJ_GIZC{Lt&F4e=>E*u?0XdFZLt~+K+WfYc99!wMr>O$l+y;8_*Ck+arUBrph zFxS$!(7A#}fdI~uwoR8VEZrpN@)FF6BFXkkBa68@&}d^tT@|?P#uWC5YXXHjQYam- zb9)6REy(|>`G$=)37IPcw^W0qu5ioy>piENelQa%XgPuT?DT^0$T;< zVsu#Ixv*PT+MP9-h%(ihrN-3S=G5e{3XIfaUI&lVVitDQVVD&vq#VZrQ3F|XfW zvyBaNxoYcdcX#h~v$bYC3cw<&Bl!RMKXT64VQWITgN-?(l6kQZTczBiO=w2Er7e0x zQTSe(T0Ih?;|0DbItvvMmVks zXo-u7A6y9ojleR$X>+B@1zyx;>m>iCHx(=EY-4mj@x2f0WC`?nJuFTz^>vxz2VRFB zmD$fDvJ@(Yq{`-ELY2`?NmMGA!x;x=)9p*PYL8^gbW3prROLJ@x$5yrb9?XT48F5Y zQx9vd&x#)?a#X}0riw>+vjD!O{Ak~YUuf9^3BS{wzmaOv5LL7(BN(O6fT~R_GDvan zO0~kKlug@tK&iMskJvk-W+q4~a=Fw-T@wj0>x2qI*KVC|ou0MqKy2sQ6Q4dIF>J_GN(i#|XcLx!(~V-J5)d=SIE-Q>aFP@pBYA`@U?Po@%GPiUiDR-+ z9s1%ELZj7TF{tT?k%dC7R^%);aD^eO3;4!$LSzncg|QTR9_UmRfZlq>fp2YHnm<0W zYp%cuFkTPJ)#X~bygppNDpFGLsk;2B`l+b;6ocJpzzDtT@VS+dK?ySpVZLI9_&i+4 z{{`i@!?Gw}${Y}gpMNZg;t$Q2dxtQJQ{Q`&ujb#uG?>T6>P_@}U3_MueiPk|LgDE; zw#X7ebl;(^ZoLFq<4JEg75Q$yEV{CM*D6h>txJVMLu_n7L#~!&tpueX^XT!RP}f5X zHKoRW0oZ{8k*^`tYMDVlt16&>BQqcjOI?%(!dfy5cgtZ_pX1+rdOP-!b_NP3TgxmL z^3f)@DrY-1PL-b=AK89Ze|sz&c8CJuCE@+emIQ_@^ z5mjHz8&mD#cwPNd@(q;J)Lp=}h^&HIpF3DC7hWkaM-FqOI6Dl-ir>-Ykg<$Wr-HNGK5yw$Mtrj>+?)%k{a6I8ILnFu3Hy<}dN* zgCfq6QWsGrayh+^SkD{eym&?x$^9|ukfY0!o&)s z&mig4ro>)y_6~n^B2TNBR`cXEXYYq-l+rjm+IA+2QfH(w62m)}i~04r=+~AxaR%Z6 zQR6vjEW}aIF=oT%;4?Y&YYIpzzsa@E=lM?5B=H`L zM{jW*^O5PyoX2Ubj!ZmetJ}#J+P*m$vO?3^si`bj=;qU&QjL?^(7?_?vm=hJD=(FP zM+Nh_$XIbIM=4`uV(8msh)g`*VWhkRDXCnO7Bz)s+huA(b|z-+nzD16i2^5&bdKxb z&5p|(NlxX(#yFpc)S8lM4ox5vsgFnRO$3ye&XS7Jj95W(Kw+L3HQAEz$^4 zA{K9p1&K>*nK|u|UQu@5@5phM>RsF!rn7P0T=9rqd1-vPDwxYv){0X&qGQcOmTcGP z&ddx<*>z;)Dns2Vd?hWs+@X-&xfxhnbnUfB^@-diFDu=#l-;>=xm$c?xk&X%+$Aq- zoZ*xa$?j*hv7B2DFr}3P5>@2ua$x>w^7ie6bz=ahNI-WC^3k!8NF6LC3 z#D%nKck>YqCGEFR*;F8EEOklgm(+v>{y|&V;S=pvG8W%BE3Er{dBh>PP`pV+fGd*+eSB+=Co>@ z)S8zk98^1_Zo6|+xo}o9pPL1$cXLOk>Mp+Q$jH(3bY5(v8MsKV%uRX9yyuGg?aE8% z;tsQ4){5O6MGW6ud?iJeNrp)0F{#Q7oGwm^i);I;OPuA%RhVcck?b^P6Ka~v>0%Vj z=TY4cnagW5I{6&1?7YV)ShNimq%65j;`cz3TE%f=#9g}saDL0A(2=oieh+=~+9$PZ zm}cr}HD>J~t|}L5gRG<^aqYOT<>oyt%=$dIdF_x!M5fmGJ;cpv8RKeGYQvaUpi-(f z!7?_Bi-JX3ji&5a>5pSZ#IivfjQEZf$#)XAzF73&6IO()+xH?FVT!D>Y zj^w8@l2F-vo)#p+#i_b}8)s2p%#EVe8EH(#@XqCCetjHAF`NF>J$h z+d>o3eA*K#NT86t7P#qThXyWWoHK{!2mXFmcVNJh2WZo$M{bL8^2k5yK?YQwfeBolo0sBGNfcG6{=08A+}tgJD==eG71#m5wxilTF}aZcmcx$z%wZSYKv-v(u5n zZ}NHbyd&O4&YJIh(!3}UDV!$kI4vJ43FznVI-PZ7M-u zgF&2GbVU@!hiPf-n26@mpHxAF!kiiBuQM8q(cw+*QH85rt`?RM309a3;YD{0HdU$-U1q?9-1mpk40>>n_}C@nJtDlvK4`}~xG#+N+HFGD^7n>~f*#x#HWl~azVK1( zgZo0Jyr|w6K8k^GAILZw!o6U!LL%G`Mt0VJKe+B!e&bOQ?gOiB*82re>HrA$hdORx zb??aUw>4a04DK1XVS{yVsJ{rkVQhl?!uSQn;Y+mYeG6D5=)irTq{svJf+!Mw;68Bb z=mYnGEE0a;K2Syj58MaNd!FVv{J_0niUuIKcck~%Q6j+x?iuAh(mA}Jy=)kC;6AX~ zQ3vh?BgP)M7eq1+!M)&$g(0{PtoG64J)^w8U1szo?Ou`I>t6FqEQL*t`@JKy!U=p4 z9!6XfarTjVD*_bgHAZ9Hm`&!L;j-w_m?z#D)&O1nK8q;BUMqC$NQ0@ky}j z=}&L|UVFD(D0~X*`ufv)@ab)J1c{%D@7LcZVu$DLql4qUee_#@aEfRv2RrcJG5ohH z$lX8IT0I?)M5QAtRCD25@%7iqNHKYEU5b?&kuw*@dcm5iQW(@^5M^Lx-M=R96H}`uh zWJ9q`IHIY5g~F43yy|tXfP;P?7@Y-7o32a%$LHv+U2Ywr|9nI*?AR`01C-X0rbbDO zpz!2lFYwD^>;(bg>iPjRbXW5`q*Tc>eDeV|X1_OtCMqnQG`G)AT6;e=+oxM6FPo>W z68bW~@Ou3XfqwyhqsYNbiaHd*1u}gDokPc5h{SCv6fVpdEgQ=)6jUm2{_^47a%o-r zRbT$FTna0pI&b};m>e{vJHqv@KZH)fzRA7Gr9X|1H$g3oY*VYT7jxx?y><2j^ak?4 zj*PHSM%=4&<>c2KJ`0LMp};jUAQw^|=ncpDG6O>lRP)k%zGPKXev5ARWg>^9kWA!N zB^7U=aFf&hRA2?Ez40#@abYAuX^}F3Z3C|ise&HfL3r>h^2f2if*C46vPC=(M;6mb zRa%T{Z}N5N^nTrnCIHpvbC#5D?>Dy&{}W}E>dF^3^SZLoxB#m}-Od#L^CuOVSQ1KV zclKJRZ7sUO2!RumYhi?z!H0=|y-}Pi?Q?{9ZQT+@#Hk?x7raO0P`82vg?~gfzhIw@ zk_xzgf)>Wq7We_&x+8xHdcw1GGhy{5;I!c<|{#>OZ> zQ*lE?z)}dBc>J;ZUd>Y=9?etw@|B8D)yq9GykGl3#%Nd)-_^SQb!|9<1zF>}uj}T( zAfel%F>2Q~B1qKGfGvd@y^Si5y?UsK6;CRtC%?NaRh%PS)jf0*;9oej3Io@$eXuKO zF~9}|Trg9i9NH`t3fRast8miTpVn3W%zcHO zFM2$!tch(JW43vW>3o?%=0TR-vTTU&wkf_!D%B!JW|!21DFSxFwnGH;oRWJ zIe@>Y6ro9vi$jID*ucfST1{*+9l>;5BR+lp(Sw8p#wHkERK7CrB=B@Z+3}drX z`xK3hiN)TB`zZ|Crl<=;j@Fj(GwW!Ks!a^oJQMinkzg8&wYRm}U%Kl+15_zL7SW^#sS=+t5N`^m>g6%XMp3 zS*3R9O_WH;d?gGN@y%rJ{d#ObqVR0GF=TnW0vwKqPB>BFV1u~Y82i{+UaYJ@wKsl8 zbgl-Yu9!WUe7q6A!9WylvyG^NCVR%XM@^>oKEO8yXf#^KCayDvb{?O^5f}tV0{?}f z8NVQJnlAm_GEfv-$I*-tCr*Muaja&XP>BQFs?Au<8DR`w{JuBsUH5+VOFC!he{9Qw8>k2A6Uy|8aNJatWv#;~ z6R$@_dc;t?SyTW9E@-a^`n-tb>GZd6^U*5t$7Byi*S^NOMv1|P-niZOhnLeUyuZ-- zV5UP?^l5{&CgN?8pgRcHoe7+205O+I12mCem{FX}sSFo`R-VCRygq~6v>ri-E8c(u z!+QfxfaNr$hk@cbH;fn#jS~jflz~xk_U026`C^neQvnZ8n6kGyV}V9AK&oqgIyD0T-Y#MBQY8l1I4=K$|A%Y*mXil zg-w1M^UXmwEuhx$&YZipNN`*nGE`FQ^knZ?N6pPET?XN82Ul5&*y&j1MkQcG@70II z?G$F0iiY}uySyWy?*_qQ!bVsy|6b8$UFxH=vtDOS#f=T$!)3fqv*O6gkm`KO!0TWu znye#VA*iBW*<8GT5=n@EqT|we=uN0xE_OZ;GO)>wMn|sde6NS=;VGV~c4<-zi_;O~ zEb#NXAdCrJzwZOJLY_fhc~dBHD<=LwXT6Cpg0bJ}d3}K=(o}xp)0(`R=N5*6f@m^C zpA%5QRi4;_*KeJXaLQ^X{KMi6Va z2P9qhfSBu^$soSs5SF`|4xsNaT;<}-flwN61YTS?Fxf&e4vaE^vq)f-(34uc z_pHUw+ZL6u7y@i_Q|#hZNC~rUAg@{SZ!l$d3mZ%cuVc{5<*0EZjj9_H8_{k&)o9@a zhP64voab^?oIy~Fk#r&}TqXrEtsLq;b!EbuvLdGhQ04Kfqr+bouXCGKG4vPeg9_vn z%!?YKq=N?s$0&tL25Vp-E@HgsPi_w!$>WPXxTu914ljg(`ZEnDLUunIJA~|0sfyVl1VyC53uCTjh?T*+Hg=7dY058k~efo$b={_gSz9`kmuHJ|y zPCkIkqpU!@d0Kt;P5Ei{`O^}bGQB)J6KM8>Gxl&ah2=lqJ;!bk+CHJ3)RD3x5}pWd zgQ-6d{oVkU1xvVeXx#=s30M#STfwwM>whQh(NX{`4!c@@2@ee>3Pb6Cu7Dd*`FuzJHSh=UtEn_@)7q1duV4l zgmHE%f~(Q2-^~da52|eO3IF$C4>AmVx^CrTsVN)j93AfeLKR-1c3;&83lAZ>t; z3j3$-8U9NIJ%N|5cd6dt5g~hQ%y_|<3Yp9*W*m+_mPM;Q7!3eTMNB8+crc^?=(kRf z-f-Hhz)&~967zml1=AHRK+6PHM5DT5W5-0zfUfz?8T>)Q;h!f0{_c2}@*SSuSp?{s zHol{>8$yJoHRR8D5=Gn(N5*G?G2QP>aU2Ff`UV4-egg3krXvu4(H+guSe06hKB{a& zGXB50x#61%VltXdVbTlALhc?BW;E$t0{DmxUoSi~-leyXqpGT{aYG+{Jnr{kt9gD4 zR3QTUJ1OkK$ctQ{Q!v0s?s(Mib$WiVhO)Yk!WIBi6Of|X`DixmdXrlevMxw4fx=rK ztzXel8}JXteShG?ERH4%@aZom-oXDjntXr>dO{5_?t2{{k+;|#3oODefL16YtO|Nl znpT%Cdc)on`@>*d#)adcD#jz&T~BdST&-3`$&F0&?~h@0Q6vT53uwIx)<7$bu0Tu^ zzkf@H&9Z%P`+iB6M};ihzs=<;qSd>kPZD;~Dkh)!v(mcrIAuuoUr(Rxo zxOIoU{}5Lw6U^P`yHxnNHA9=_(H=}0H)0leJP3%+IdtwT@45%G2;+nG@9+{NiU$gh zTo?r?4)R)Z86I1=u)O=V_W_@=dHiVy{=xFc&=h3*jW1`O94Zfm|ga7%e1s%bFPZm?bCz@s}xun!Wj2GCGMUM zcLY7kYuW28rlavxkO?+?*gn%%F#|Huk1h0btgM zjnA^K)0R$3$RRc#)Cxn?a5gyiCju?Qp-%4_4ZZ{T(msG3jLv(rAS%VY=!7;w zz@J?DH8g0Vs-UKI>H@&$EiZwA#dR^EHpb8H6Db{BAqVsYV7!BIi;$l9A162pbZ`)X z&L4mz;`hB8;Jrj4Jv&JQei~_bEe*&!IU3Xf{o}3=g9(1`F_fC(vR{8UEINRZ21ScL zX<2mr&cw%)ZnW2jv>A?2(7j>e4S-VEO^gEQ^nJff(*zD5T=ishj-x+vqjofWX~7b! zD!0}UKR3=8%~KpC2D8o;{;P1W3lg~5a5M(N8FG*3%NbBWkFGS0RcPo5m=c(qK(0~J zprI~_KWtfzgO(zDuMe8S!;g=zxOSMT*F9KAOr|yYH6i!Jr;%G+<0qvmJpNeNa`>&Q z>jIH@Jj}nOwDL=z5eVR7!ey%frBZ8Ll;5NMj*ewukf$FMI+9(V#sS|HGtt^R*wHiH zy4d;^KGFBz=b%aUV6?f$&)4tMjSgzMphv?f8vRmDYKK+D5{Iz2l6qRv9t_&cJ#Gp` zv#N$Z;OW%_@2&*U5_BDgKEP`q`#1hQzUt+2hcHIcSB9B`FQo9a4s$mQ6ce00dKb5} ziW^??^&I@Bh;InvC`?Vm(MOWI%PXq%fcBL%k5f;;6LIe1+aqkk8#(CXTzkth$%v{n z_i?EcI`raI;}wh%Wz=8$I3mc8+aD1_u--BHXcw0olaU8f3QeS{yAG&NsGI!n&M}YE0W~l*M1DHjp@mW&QKBjNIgrEq=Ar8B0pXJ?-N}fr0O`3I*SY@C zVyq0q9W0o+Yr;^2_~6@5M%`J5_u3$4H>_^&j({4R$&}6=zYQ(waMFbW-0KJy7Y-x_ zcu3aaJ3fF72Pa;VLpS^A-3Bmx3`SG_Op1r;1W@S+o!l8vQO888j~$uUtj)wKpN zNn`7@W&?z2!BWeIXHl!cMCsAuCzuU_$YL5v1J>yuUQUoC;6WbnCy~SVgVrA402%-T z=RV(jol|rsZ4{+rqmzzpt7F@?ZQHhOcg&7$+qU(^wkH1?tht%Fu2rjUs_MMw?6aR8 zGOkq4%?vUNb#djTgu(M^CwC30T1ql|doafB@gjZfJ zePs1dvGeEGtLk*ncZ#`e=8tl-@REuqVyRU{reDS4=aqIB1nh7EsFe_Z7Hf_FRzV;= zbTDl>dG6TRBS3>Yarh4(j%SjPM+QkD@~@0TcX3&1AxDvmqY^{~UD&JLLe)mBi` zcfC_cv^T`;%RKjH>{p0D?{Lrmui8)x^pjXZQ{+Xg{YP4#CjaOzu<~HX;|wZ$ zFW@2y`T|ehRJVVb(z$Kn^d?r$Xy&rF-bKbbiGG1GQL4}d({y6b8U2jmiIROno{)gz z#%wFWc@50UA=$ArRFc{#7mm79(2CW7GDZgcw_cj2^XdDwl;Mx&m`p+O2b_Dw4`<9q z7;?C^!BEP11-&p!uDmy(u&IUsT{cdIA;=#diN2#p-6mkw}ZPtdQeTl%+T z@4i#W4f3+dwR~ob!EyDklxvxpu2WRk5_>|W;i(IZGLXJNTAgn+@YIuRDV89qs$E9D z(R?)X8`Dp`3)bmbFnJ8FUL4@Bu@-#-MOW6#%D zRxCVj2JK$1tv>B;&aF*7Zf_Gj|xBktI7whaNB%pS=RLRMC+!p1Mu2aqjG!_ITk0Nc&OGgxZ;g+J9m70VBRVSdh#C5cNZ3#XX1hh<&s zu5kuyv83hn7ban|<_8ROPyb9OF}aD{pxp zRyO&n)m^`$Od0NbV|Jhr)i#{zwW63#rRT-pR{cdR|9Xt~FUZ%jQlu&z{w;JmOOqx; z{AGz$6apfSiCLy;rqisV$gf&M%A$jfH#d`ms#~38ENJ}_=5A^ec3No?LT!Dx&fE0m zh_46YYJ}3r%C+DVq#=zLGK&h9?g4{4RLfn_GBJ+A>j041lTXSjI&jmh4z2=RQDAOOS;HN z4dse1uSsL(L{0Pf>LaA+F*#=J(a^s)KdVb5}M?n4GT*%T?#Sc1zQ`uvN12U zY=?eXNzi;3FSRMVBcM2H^gh3}rWU078K$`F_Xg%axN3<#LGYW3|G z-vst5=SW?FD5OrV^D`O9m%{3KIVZV_i*dY7iW&djn<)D<+~e!}ya>=ZWW#a%O1>=< z<|T%Av?rg%7mp!hM{UYBph9M@Ls&0jD$bP()hPgh@=cHKw1q6mVT|5W{2XUgYtqc0 zgx%mFmPmJhvJ^XG>fJE7FKVh9Y5NLyjWt;Iql5HBid8#3@0n zL#PCucbfZZz-BeAmCf4LWuTD-$FG`SUCtUef!vs>lWWYgRDK4vZVX`DBv|+(AV&4_ zY6K}ydg-uuj7fDRWu~5(PTj*NY7)7r18oJ(-*DNMJyw@%g9X)3j1D#uH6ILHTXk=W z%u(X`XCeSb!r__*G{2O}22LCTJaGD4zBTm)4k)7DU2U4%R>o(mb=$)ORjPdLnh^2^_op#@*kyP$9`k7l*k-)c^;H&SmQ zpL&{QegT}daXs$0M0MW-ac|!ZW^&jwYFK(z+~EG`dJ&>FXwsIAt`#=#pWSTbb*e8x zdX;MDGnRcRY_~3&A@gV^2Xfor1J~CsWyO_2OYhahE*~`mW_muMJ3U`;^Ms%y-%Crw z125lVA9zu-QU7=y^NGXnwFpH(mS0_7ddg@D;Q>E#kJT+WeVTRLa7)tm(ULNaO`7m$ zmR)Yz?fqj`keIl%m6O`kK-80&~5*{fTca%S2(Lsr@q@n zXJxxqNhLRUtrX@XYBnI)^!$E*DpNg1poNTxy*LpeMz&$VvySj) z$^7)XS<7b9(tE;Nz4+72k$};L`)+gjxH!8~H6!>T@!92MTjS<_DWY9qW5&$dyOM2} zC^h$8dEu|1p8A8v;jYc=_nNNHD%T3+ZWjy#@;vH4VHj*bYs1>Dj6L11A8OYuBen{E z*MT4XXlt_1%`;CevV*s4W48*G(39}08(pv%`gEz=);29I)3R|lPmb^8t9w_txZMLr zd2B#IRh_|zBU&4Lwq4etK>Zrk57mpfPnhq>A0aTIS6Gt%`3gQ?7dma(12F!S+zL9L zOiRf3NZoR);Py-hlt?3qOPlnCnAHEe2-_4@)yiuPP$;aUpI$hbbHVFm(ES`8IPPn4 z*VJn=H#!LTrh?^jV*~%`Bd6j?W_8cDFl~VDiCDT^N@518Ih8hIB8ZygdkcdOBUtB> zyX((p>?KC?u^#n(ZFLA&R&LZ?>|ZB*L7!dHMXFdYyT`>osEX0&?C^arcmbAyCD695 zR1%v=zG($qNsq#ckM4v!tH1iGv^2JWW_H)CvnS~Wpw+V=4=49Duw8+G^!lGUTi0(x zTguoO&ie!$BoJu2tWb!nr8B?swR7K%(O}*4O#*e*EH%gIJt-k`EK1wFT%oq8rn#Lr z*#Me@*w5?rOx7q4u@uL7di`cNvjAr3Rjx!(&`P&GC&DhptOEhH!3Dzp{4N**I!5^s1#_Hwl0!jP1yCtrCLKBXHrFiEqqtb%s)LI z2MlOjq`Npeg-|_EB+>X6R2AW)=66DOyt6b){ zSF_uE(cGW6uFB?@QinjDK9-*}Dp>LgdRBN&@IOAPG^aIG<-R}PsqaV-dL$EOEE zTRWR6dAjFg9tS;ssjJ0W>dtTetOv{_b}hvnXlv6_c66e|+9`lx*bhqA$-os0uZAqv z?K?=%9VxkR79jfleZx@$9A|J792OI2XbeXDbL1gM^;S*VM4yxde~Zm!G4!(T9DQ{t z9;aEG-paXOHjfD^xFaA3Qh6LK%aKB)aJ=l9(W0OBsvvh>pLn@9=jnJVoj_$P9Ns3s z(CQL#>cI4ZA&VpY0zhNeVDPr(E}o?-xZZgzfv&kR9&a~d7W-bC;_wp=g2X45D(A%d z5?Dw!2C;Ff21m%y>QwB{&7WqY5aW{jRS?H`^)n699e(^2=G!n0 zK}p(YQAAoc>N_o_3?)jP2K~?|D}JXV8HieG*cv*DKFG4?RW=2nq8!BQvsq*9h6EgXUuruJa8CvXw0CIPG96(r8aSs{LH!CP9W^er#>h9Yk!WdXSrhy3On5&oRk{ z@*A%wqP_ZdE{$arTy{^tW@(+v8=4qGg+h<*&JMPxNtfmMnGZA}h$75`Wh*#m9s__X z*%roBtP)3U@~8p405@}6!o(aDaFle(hp}=ufTYuwdg6rzPA97y!#h7Z4jD}0;h0w5 zX_vf4VdSMny9jj*;`;F7p&$mN4p#wiC1R0huO<53DFzmMM>6kmzL0QeJqAxFfjTv* z3~WfLG=o_#C3Lez)D_B@fJ~cAQSS%>knN0f){u1J%f(}%&`U^Zo?!;E^qn0adVY)< zv+AilvDj`UN^UR1SZhj`!qzU^8{Qu*Jy@|%iYxic5HUS$=oRLPIjLIf-uuat$Hi;W zFX?b9Q}|{blBN~+lgauzh8g(XZ4@N6vcS;=7o^fZjkf_|svBgY8Cpk0SKzA@*#Zdt zs%edUrPnKVpi#b+>D)_0XhFy{5fBm;DV^CG$K*=I#f$Hpr3bn=w#jEcE367iW%yMK zF@_%90U%br)cxa+{b6OR0ZZL_*!uL=WvjlB*~>$<>rDcyD9+mEGAbW`1~I$5hFGQa zFI#YgBN4M^+e2RT{RW_HlN{rC| z7i?Sj(waVC%2fYn%zsqp!Ew*i`yH?gBpQOo`mJ~sgKDzJ7P05FjgWVr)&Iy*V*wlu zS(SEqJT`_zbG3tkj|a1OU!-%G!o&n4=stW7+IB!LhEued&%@oH6awWBrYXQ_Y1jCl#i_F}E#oqG)G~T`Fg&Ii6j~m2Dec-AJ}SB_15~>(LB& zcWA3h;c~b!k$9UmE|b>SE)Z2aH#apggioV-5)XVXtMU=VsJiRs2~TDrn+QNoUE~Yu zcG=lAG&OGyP^^TQR4>E67=lXM8Zhlv_-ng})23q*20_dn{`s9&+Aa6a^iL*t2@9PZ zBp-yQU<9pG2-J9=5&Cw2WXjI&Wa8%e(dy&m=;`~=A}WjT$Nk=X5B0)i>1%uRob#4L z4Fij0en4gk#c}F2u2~IAixFyLT>hQ$?X)C#&@&nOl?%LMlWqjkw6wxGCw|LK#|1h zR&i)T_N3gOZ^-<$;lQRr)qP5l8mdhL#$;PzT&41L;mASLaY?h_@&0M~snQhtYvaq| zG-eJx=+x%#CIJeRTqgoI5c3N9~*fRTQ#5eZ1@ z-Pf!xV4yP{#$(?}S>n2pgN%P+lD~1H$6}X=ttQk)QW{o-H;OyGCxR@qQRJM>g8FD@ z>HyK@)Z5o6bNz}I2-|811OZ`p&j*-pUaS|%gW)+ux`o)@y{>)mz*|ouJ`l$TiUOl( z#dC2Ra{2ffQ;!K)0kH(~W8s?q`-9}njyq^n`hqHYv4X1VdUTSarP$;bW*mJITM$}z zcWx|$;VFWtnc_Cx5$}|1=ko|n=W;%dUIJgkoJ9>xhZyv;5FeXz{8O8o&cS_e6g0nPo!xDih48odJC3q3M~OI<<(+@Rk=r4Xo>BNr zDYj99)_3sfdvrVxRD6FhF*e0_QyoVw#fte)&AQm=I}vswO=LqyJ_rZ!3<;NOwPEDC zx_$@!2PQmt@$P%w=3ZI|0(R_{6*y^NTf=^xgU+-)sHk@HZJt)VDMYB! z=L>%g6x|%1QS6=MjY523m?RoT2yY+}W6NV|v*x!j$(5(AMg6u+ouV&SQS`R0um?vD zbJV(V*pD%Y2}Fqa;oqYsW~EP;@nORa>8TAOtK2sWMX&&QY&x6WX0nz|0DhURL-u(K*kC76W>%O4M&vf8ieW zV!CwgpSCkzo!j#JL;!NB*EHNp1jWv&T7UKCcDdTQY48X@;d;&TuTAAr+g!#wOsBFW ztOw=g9gGg4Ma&z(Oy~m@qJ3v}u>PaXbsKv*kow5PkyDT!JpqfNW+EsE zreFRA{z#kgPUGgB&#d_{#ZcoVD2`d=I&wIkHD${<^eKy!n?^)=b#U^Q>$5Z zgZ+;ot45Bxa0bi#2e=9XGNmTUY`6>C0zAS63yC5b+$1L9_ol=kHl8exv0Dl64l~0+v2xZqj;|)ze(y<3sDw2yTB-t5NPA`X9x^04)%8G0L ztjnN3VrZQ2DD~SiBr0|3)wgbEMO}P(4Xt`&3%8jMfPs=@lMf#eXJ&$|J!8%W?z)IE z9oS;i?iO@# z8({08Yo#MAiZ6erv29)Rs~5m zmeZY`f@Pqa zIv6yk1VXMsf(t+&*5Kb=0sp}E;7cLr#aQHzoZ5k3r6_*BVf*bzXcX!%@NU{q+XRut@s4A>mAhG~mvT?Fa*U1S;dp|}BX zz!2MqLIkO!ktv~T<1@~27&+Fr0TeB>jZst6RJ5ybfXasGNKXNONbOvrz-Cu##HAFq z;-yrA3#hoc`#X6{6Dr6N6T@o}myqLF^d-(6zK1gG>A-nIVJhdis=|E`j$&obl^xVmZf9ygWi55Da(9>5#x$7xa zq-H=wk*egm^stidlOI7a~2N_QYpXf4mnk^*c&?vPtEd}M#G+RC!17-Mbn!tV3ty( z=alPz{5N!U)Cofz0i(6}o7qx*95DckT%rj-oKDod)emO)WJu!FeDi9N9)>d-qSwX}(v ziGu<(tb)@fFCGeX2ZpIUslu3NEn4P>{@?qgRJw-1OsUb!bh>n3c$L3^6o1!La}Z2) zGE=|`bqskDR1GFlG(I+@%GuX9>+x%t=3z70>+*AEbLQV6TW~#!0fOAFT#6R^UGbAw>F* zI;$PeZIY>SxFI;x)oZ??^pB9#C6>zKa%Vp2WD%-vAn}<-YNqC`v;IdL3x@RWdXQ|g$<`*$5PWugw}m% z-9+K!ufZ^8aTitkey}FIhUNO9ppF_MLZ5~NBH zEOM)2RNmID3LhwnDQNr<6g?N-zKKjWG($0w@pjp_#G zWXyF{WsQ~$&0-t+`OdDo+Fic&xhh(ZM#5JA(shl&ged1U*07oxx9add_tUp({&9W^|&rN;ol?j03kj@$s zJDpkozkYIYp*EQ9pfwk$sf$gxXWPsip%(JBR5OG;-BjvuG}JU6x!h;ZW|Fl02A$?D zNi`QL68I%9s*RqYhhldO8QCI4=JV$c2a@BBO<}dd%W+aLmp?s9`OMDxiOdtARc6{t50K$|Ga{ z(8c1PKK9-T>MU`dqlNU6`jc&=T<&-AFbZa}3|rl3rU&RiZ7w(dJrH*nW4j`W2vN(U z-p}>xqC>cgPsGAv!ngn6V?JxItREe2Q3XM0{IFLNo$9Pk`yi1+x21R7`ml;rCpsg4 zfOKb?&{fxD<{XF*ED=%Ny@iZSG~&^N&tU`GsKEp60F%g`VUSGx_nZ zA3i8hZ5gIyvP_Sq#Wj;$`z8{kU~uo(RpN%&o2G!@;b5;r8n!rto65BO+~3nCcmzpw2Ma z?Hf>lkTbl5IY2DGM4G=V=f%a~d&&B&1#xZc8_6ziP2vIZBV(t|}kQ!iLqU!P_3K1v0fVb$qU* ziCNH%*W?j?%7eh3t;S4$w}%{h&4q^pibReaDlMZ=TB@Tm8=B0JZ*V4V>6v%Nddhjx z-_6#y+trF(lx4kCv=KW>E}Mgq0GCR8NI50|-%;#jz5|7ytEw?t-uCGTwM z`dR48+ZdM(e z@B)^ZInEjWZoR2;wGZUUP^~@2#Gxd|bURnTDF5Vth_|V}N)2lDPf0aCNmYSoKq@y= zqZaEZ0?vljLSfI%B{+>?l;fJ<$MAJ5in%qa|GO?aKl^TR!`H?6p-{b`E3K2Ivhm(O zk1ueGenhJdTg-xJ%z$VPFZrmC)V3P+(I4cIne5ivVw6){HV;N^B#tthrfsVXy^iva zAhhN+6Nin48_S9)B2dyqj9?C4`7rB{*>>59{q(SMsB5xVA-Q7pJ@gN7vLhvMke>H+ zq{$?|aXVGRBprt|n8zN4Xly^CtcPtnTw-fV(>W4i%!Vx!5P84f5uU4S(n;M_BjMqF zPp2stI3O_Lu=sa$Oy776pu82bF%6=lclNwt-eO1TTOc*lYN9;I7Fx{IHiqsCtKet) z>AoxXsG14R=xmn{hs;08+s7A|s zW*{!NjTERZxjA)-ax^EuY3#PRNxfbb-i308S&%8_nYALcuOwwolrRG>d(@^WTS2lz zD{nYyowc)Hmjs@Gv+PzLX?mnq;Bn6IgmpS8!%V{m|2s|mqyh(Is$*AbdH{AMOh4o& z(08@pOo!3zyI9?FfLVVA`0>sXSs!V~mpZa`jiP`YKR#&tXP?|W$yC6kLfH1&xMb7i zq++Bd7Qs(r;go|PnE6^K_&Ms-lGfwbL6DbjKt3@StMkZ)MXM#&m{!3X1xizWkxc+y za5WhDUXyi*)K)XKID~++40XcCfj3>_d7jHyuwiS%79*BQ-XpT7yt{|{F#LpzIGjow zg8bvyFk`hdaLolT%1?a-szeDx2Av#hYOJ5*U%)iM6>=K)5^3~F$}`X=b*Hbz1j^OGFVV%g?ZJE$sOGKN%FG~oI^WM}NfO2?s=P5Fdrh9)-)yE%e=gm*4i zHaPG)O|60Vx|s>=My2#$0e{_9kjj+= z-wL_=5x!f~DH}R4qNG0ztzlFVKFO0gz&LSV#LV)YCbPNRSZ*&fX5W$fw7w{Awh#2WX3&G)cCgL)A3Z_**ma#jEVtQQeC-BDfZajE6 z-I{r)#8B98tHWm6bNEcZ-Y-LY=RY$xPy|jFv6;Si&xIqb4Nz&ZesDIxH= zGO1|@H5?H&ozJl2*OY4pU!p^7#BE(Hs85n#HVHAxC;{zOG>}+UiPX&?M+r4(%T_wt zB8ZUqL$6}Gg;YK!iXW!<8ksy_k!<5NH$b^1KqNN3UM&V+ha#PtlPstcEtH_fvW%GM z+Z{{_eAq;dSA`P9tlZ^s|725jbA=nLwo@~Q-I;typ&UdG9cwh3C}CJ$!f^Cx80U3M z%{5D$1vK^^H;yr9llk1y?CJ*+liJo~wSabUbL6XpJ5$6J8Krw(H@^SUo8|*(`n0WG z=MBq6pPUY&{D;n;zWt%Ym2J^eC?wbv6{jvcNl1({pjitPZx~=Qs}J9XD|^}v?csg4 zBZ(-(tvXxlF!xMn;wQYY@yDy8DFb#ap#<;Fn{eCQI3hsw7oD!A)ZLD?s2>o?9!I(` z!xN`3>CG(w79666bx*!@M9G7iE8{Jd1b+*yA7lB8qQ+Q(!@?K2MF(0y@|X<<@$Lm5 zfZBmd0RgqUcLQh?6q|`!V>`Z?hZMDWCS}!r|Kocq^S2^lb-+X?*-R$8c`Be_Y$qz( z9W3)oSVaPKz*JY4!B9^9>!zIz)1gSGXcD_V;KtCkcbazQuWiGoJ+9Z0%o@~A@4_ZQg?Ax^6q*Wr{AZB`DV|gUp8LR~tIucWZ+)LCaFO-j6 zsCM|pE0hVdmX@^9kx0u!sxR01J!nm89K6>mu=tJ=bND%brBK@(hehQNJyo#d1P<^f zi@N{S)7N~B&De2fj0%D;e2DT2qX10 ziFJp1ir+#yOM_-LVhZ+}5RM5%uLpwgFnVXRdWFK4wDa_Ptj|sEJ7GFEU(64KH5y|Z z^_G?8zXZzgy&67Z9}H{mkyh|$<+pUvwJ?taw_q79r{(`F4WYjZPARU2><8yfU0Wdu zS*1-_)zIdLffY7Rd1ecbm6|$V3&NSKBDZIsxlOEZYrVM_ha(EjAwKwE*-`k!TQGM; zF?yjSmC~3HB9^SNz^^%jtMw;+VTH-#Z~a+-awhFRf9@pcY&4J7tnh7zpfXV*)(-^i z_Jrj2ahs?t*Y!vAt!|ztg7f1+_IAECx{4jqM`LtBuVorpBV33dKh{NbS5>T2j5I!n zde8I|@2H?Em=Xcbr2&l;g6!LEAw+lM? z5X%!|B7cyM+;bw8X_oPwdFxjNRN;3$1{D>K@%`+K=}b+A(xP$xAoK#sG8$D#EvX2xhvj3)-otZPf3w2>vYrhoPCMi83#Wn4Ng@>| zkQ<(Q1UWQeqEC)f)LnpwU?E;+`LSE}QCS#)ldbHPk!=3Rv!r5R)7sRkJG5G7Rq~tN z_S39g-h&F}3x*pb;VKkSM)4H#Xy?h_U%6+@O0s9^t)(PAkcZ{4(5+7blVtl{%%c>N z_&W56Gm@NGaH4!69yQ?(=0QIOLY>j@Vn&MU`7H;Eg8S{+0e%fpe)_+XV`WlQ|In)j z$O;L{YPpD|8BJsIXsSjZa$%Rnl!apH?)0<#Zq`^8JcCIl>J|KRd6`3>C>+VX7HF)Gk~ zQ}9I}H3u8zMMFo$9uyj73{dd+D(^MghM^)ZM2gMU2aGbxhYs!^vA;o1t6b2-LM(TA7qC>fzt zb>-TsIK#sS@9B?yO2E`~0t;usXlBK);Up{|961VYW^Q}^u>}1CAuS-ZnR|fb(V7>N zTyj*tmk3=Y>yWk1%fq>@=Sj?$?=B<*foRo;Ro7kHUM%3LG~f!eg`*nS`6rb#iZm3# z_ltbNg|$(2I#b(sopWs3YLtXP!)XF8de#|345S*x?JeN(OAO6#KvlMGY zPz7Sfd4s!RhzS}YsC7FfX3e8#1qnF!WPznn9d!;oS*1%6S+fb9mG)>271AofwveYW zm>sfQrc30ngi+6%S;kr+VaGGP zxF&+;(nOrljt4Rv#5l0f9ah1vr4_xbM?dn_?qbeIGAGJhc%jVV1D@3@SB6HDRaf_h z)OYS*Gd!4nXrXpfnwPr&cnLSk?QZ)8@osEz`9;qHOcwDD;ZgIW@Z2#c5Gs8>1>Xna zFY6R&K14}*3&qp``V1;#Pm|~?A;-P`9l$>@xi(2i_IYFC^-&x!nz4j5D@jTr`iWO2~yTauKBu@fQy-b(}Uo% zL*YWa`#B^8mTwLuIq`c(S#u20YLtcIwc`}%Z$WBSs&`1Cc;ylX|LCKnt4HIneZW0S z+q?KHX_O_c0v!{$V39%QTS+C?tB{U7ogaa}6X0E-{g#U>af^95`MolpcqNSmQs&uu z@-Oq0LZ@|_!aPY+JPP4p!@5qATcAb?^V|xr#0T0>BEeeC!)qg&K0D@yYaMqOW(`mD zm3TOJRQnYZa|^_8alHL;k+4PthBKLs=Y20-enMvQ#!2DXBavbmi@|8pf?mJi;D3usxwDaWFkbMa_?R~? zv&_`wsH=On$adt4*^PA8I&;v3(J|J1LZTA@&cW7qQfdBxBtOt z)Mn}0ObA=Kg9R$pSY*wXNuVeCymls!=7N5<6PL3rRI)I+6M*-&mo4sy0-4JyeyUHr z8bO-jr`KMFsTdLg+~%0gkpw;*h4QD9O84L#^JB0ATIyhmDs9HZqv6%aRY55(;4Hvg z<}ijK-zB!eoA|I1oazSQdEnWlgAXVq|J)mzj>~4<{>um=T+;10Hc#je9ha&VEc0Nq zX-BQ`)@+$0DCAU6nZU$GPBoFxNXwCZKdzbC*nX6G|3UNQ>dgdQM4NTdf%-gNzgr=6 zbpJSUE$kT5nE0TsT7Im!UGBI0mEU3X-#g#0jrDgAYxgIW$#i~tOW5>hGYnk_r*hao ztH9mJ@Nlw9=LYI2>n{T)@Lb=UZl)7KrbLp^>zE=Q-=Z(nO|?k`_=?_upH@6`u% zJARL`pMC~(c-=i8U-SK+u#0ss&w3g@o+Dp94CshEdcJ;`b$hUj4q?~d53f-jKHnk# z^ZtpX9D4bY{vCW(zZkCAZ&;_>o6n-ZJ8-;8`$iG*`-G0xLn&y+P84mF-UdmaPuLh3Kp6>1K{B!3in}PkhvMKmXH(4b zTk$4gZow6GCI0^-2uPSr!-iSo^j#?#QgU@NA2>B5`Z|l_DM-J>Y1~v+w0-B-(&jYY zVqo`8EW+L=nC3wqWt{4dXBM<4Mey$oi&5`@JZ3B$_jkcWC>41l-45r^a_l_vhS*5Y zZi+E~tqdXNvsjv-gV*Xu8>wuZYiw|NCvrZS5adeeerQPg=GkG<@1>_cDj(h?`<(q~#&(qR-FQbPUOjCt6hWpo!aUQr*tn5pX}sR6Ep0bfVyH9vVZc?Uc#Sq2ALF?N^z^KzuN z5WaCH9oF_$$keOfKNi?u>H+>?{l}au02%n`?Zpa2-NKCWCSWj?R4d$^9P8g|i(>;T zNykr^(c^px@5gCVN*|G{eq_F8tk5?Pxr1Bo*SgQqZ1HiGr@g^qc0J3~ZsOT#7ml1G zf@O*hx()*)X9tJkU7`UF&;7uB2sjbdcXJR7#`EG$)B*EoUuz7iL30l@K6{WDz3E#$ zr1ZDPVYNH>dpbCmGTZ*JpS{XN8Wj4~h`Q>L#nmCjtcMiw{59|#k9438?T_`OoDZ!s zGxB(pLlF?)#eAqRG9eE}@5h;7t%Tk*sT-+w!NTJgYluIQEYj89bPBOp-Y`XMDPluBGToG@^t+MZ()Cif)i#o$Jkp#6XO0JvGG=X zz~E&y9rz}VFkkT6dS6OA1nwP&*wWQ5eeS((KDFJ32+i~AcsRapa+|P|YC3bw zm-p48M;Bt)+(Sa?TTGoWHQhMi@kLW8DuK#m{!c*i7 zF^MY?5W2qvCo;&rbsrTg8{H09qH`VpAvF*^|@$%bP zmFmPldH5I4=sgS)E?6z-7W~zP%Gv;Z5s%al+CT$#Gz3HT^v;24#5F2C5b*_1@0YzR zH*TIuEl^=xX))MvTjd_*=T&tjW8vd=cgzKA?qt#ITbrJX=U0J4l~SuD0i|08QZ)6 zKgoT1yZO)mCb?H_m3S{&Sdw>IbceO=UAZwOqVxm0u1j+2PrHxxfip3)+I}J;8t;(i zpM2SWmL1t+@F`_TcQ?wRohtH`v!VXOxR3kVQYf@TjstgE^Z5p%td`*5v`od}O8hX_ zXSL%?n|@P$j3Sz?-ieWyirS`Q^yjnv{tdP>plyw}+KTlb!+o@tf-GDb#$a0ZY0BFj z{$9C=o>-5JFuGjC;1`n#lUuzDR8^@gHdXU7u_H(kg6L?;xCZ zx{!#M+6*SZg0sho0N@jfanfx%#TUvrciv>L;fO=xILm@CIfhsoI9#@a$>v_XDA$E5 zj_S>1!5K*MKZHAGx2;NdLL(;U&uH@p-Co9>R(>;TVVz2*bo%prgae-Yez~lFyMIu) z{@v_pZBwP_spy(>5pFh3eIlzjUbB7R;dV*N?O z@t?}kho)-!Lyrf<6?R8yy^j5=#-Tu>C<5Mt6I4J<{g^-UrL!>9W)_K#`*T=P^147E zih@PXdWE74eqFF=i6u{{Xqk0%c-3T8v0&^`#XA8@T~a$kb)`C01g{7OBCJ)}LkPQ* zgXEra4m~>xy@=};|InoqK(ktK2%(?S}|2OZ7ad?85%Yjy2s5`nVQ zu>6_nqOqe;`&@W`$wx00jbqjoTACot6*S>*X+6F)DVx!z!NT9g^svCvO zB(uReNi4Q)V2f4}J`5@!3E7I%oHVoP1&|09Wv%X%-qv=wv+^OMI-|=EdveDwIUc{gePgn85+?+;oJ z)ijwK;nGYeicBeXiM*&nct>Zj8*C9g(zcDg+PJItp%@Aj`v)S8@aXT5{t%93yFAcE z$)hPf-cBvLs`KrR4`5L!9q6TqX`4hbBlWG8Kf9|Co{0W#xktr`(41_QX@Q#Tzn3kG zbUcGAvSSR(${=(paDeff_ir@#(esY~Bw3}p@`cU3 zt}Hw&!YX5@L{OiSTJyXGR#C?M8k1{b%w_m6@vk?EbESQb5U;IUqKG&(B;bPgh#cxx zkf897sOA^!vr&=($rtON|9M?eyZke|3ZBk|4Y_uQaWCFhRU~3TUXYHGCN#Spm<1 z&z=%c?k9zw?1KENcL0>3C646cliptZ{9{QJe`8;NZwiH#chG7u&f>jWauG*wpTo@% ztXIWv6t7%?l<-^>NM0hE2~kP#-QOM`XcoDz4nFj{K>6MdkjlK0EUwYfb1sK}*Muzv z3GJ?0FJ|`g$ZA&A*ttiz2D>35&?i!OjEeIVhzFfx;|_}svMXwLo_s~z92g{Yn_Wq7 zM3AVV0b8nl(Yx8G^4P0~idgZaf_n11%TmQT!d2Zvw>k#D(A}j*C;Oj1g<^mW3bbiNwsRk8VOpFeL$G0u>WCJ$XT8?w@`ATEG~*c?ajODo4*$d zZ{IGztG$Ka*R@Oa4M$@q4x^ni;W2kBW*^|4PMf5NdjfkPvpRzmcg$J0@53z_8ks2SLe(h7W-$WP7-lRAHqQiN^mlHW+S^*~ zFWq&Z;Zyj5pthh~3yrGGLgeaZ_CC5}vl}R^Fh1eaEKZ;x8%kzW8`2=s#U%-iAKeWt z6vn_|1So~7YF1gLx#&%lNXUG{jHhTZ_kKM#rdcwq%G(u)nB|?WCuV^6B4&U$`FMjS zcyGge7?9_YW6pDtR^DY$iMujeAz zIFYgKOG`Ru=zmlb%m%7b`h+sQBE!zMl(i0{OuQaxW50yQHd(X?1}QH;GiZ7Fx>ChE@+M}(Bcv~c>AHt<;0%saP%w^I5P2?A5 z6en{k!^NPLXD}JB&mcFoM^wUH$x;k6*;+q>@)^vmUp#w8Z+(v)Dcm)##HagBtz-QK%A`*R!N0i7vv-g#=`BuxGN`=XD#W-82|Fu-dIZ< zX-%m|X|RZA&rbqPU?E>w!j=SL!0Nw?o69DwJGtg_DA!o1YAPWr-?(Z!|Fw}`UH{jC zw@__ZAfYnMO0Vta*M;i3`PEg;&72kln7=Jl4cdiF@D81$vcIL*g?2ecUHEF)M`x7_ zEH+m#Y>AZn9|H;G=N=LA$v-3M<|U1^I`l=9KV0B7OVv)#U*nXLv(8*3FS0 zK^at2-OJ&Ik$V@l`cSs<+QMd8va2jG5?ZL+iQCePPr~xvyS}dRL0?t~WtWd50GJdE z*g+Kv?#FQql%9;@Ml*$-GhX~Ajf20bva_Mt=b~LM0fX!%3pJ`NBTzd>om1ih(2y3p zhfs$hF!F5qD4Z!O9}Tc|Jw1K2x)&=6jAbJ4eO(ziQ7Qa3@=ym6L!=i0Y)94YwAFlf z4l-#gW$db?R>s;s{{7A0*Dx&V8U;r!*I*Z^l`??3>+2=YZ>I-%0iaTw4#uBr-T)f9 zQh;AAuM1GcAG>@8;MDy@c~LOcs$t4gsqj{BqgsV$whpA*C^oBlZN|NjV;F?@cFm<; z$|84Oe)qaJ7luZLxloe!tZ>N_^`8HwM>kzY2cUDX=YE~P87`b8od09^Xbz=)d< z_}cJkA!j!5f)8Tl#S45<+OVZu9|62RL*WL zDFvxskVwbs0lCA!B@KW%*{=z;W3+Sz;jGO{gXsA{cmja?t5QTETEC*dL9JTbs9ne{~rDWN~Exm zoN${_sJb;iP!N6pVk*1~jJTmOuI0tLDXdz9N4rWX+@+WX(cI}T@kf@h%Bi@qX=g>6 zUC%tmEMP6%;=8XKkxPSXp|C*PR`MF#-8qH7T|5L1J;ZOcjAe=mBc6D?K{&>2M$j3{o-GsB7lW+HX12$>* z207WRkz4;c&NU3A4L1O5Zl3@q2$ogv8axh4Tlz5Hc7mXR@TN@&WP4V~g|hARI-rpl zEMwP9otv2ubgN|62_<6|!1Jb_<$8420z!;1U_Nc=y z1_T{V3=_mah9b-Bf*6Ux@B2Wl9200;JMds=^8Y!55kbh9b`;5s%1?Y6FRFQN;kBs4 z4At@|_!haAzYP@XaD;I=*f4Yv$-0R+&?RJIIXh@-} zYiq*xiNdLzk2loTB2HO2Rpnbf+DyS}olaUT7`u=hD2x;)(Cxg%hO)~k!d>27Y$&@N zh??;>?p}mQEFUcvjE!O(7-a%yk-#dUCy{ucSWENAvWO(nLez3zh5iRur#?GaipyK9 zUKBf12;-DV3Cx)z*P7cMjRixf9iv??N9`YJcirCD&UX8%b`57PdadFlfyN9;hS|b( zvmj=i^EUJeWXxq8Hq!=$Dvw_s9saU-o$R^hrE+_N8!1_zaWPKrpq1G0U=SIU&|rY9 zM50_+ic1md(1ljylb%E=yM@-<3au>`QaLS@qBV5HqslT#^9Co!R!8nqTsvvhE%1mz z1D24To<=ABIMkW-KGyNEaF~j&Ld@@qRoH`*wT6i;Xx`d>r9@;rvc?|~8Fb9TFzT4Y zjIhTRV>B@fk+etT&}y*A@M4t3HKn!cbaZC~PwL`kZ*}!t?6X*2(hqbqvGma+0vW0d z%x;^@2y{(o0i8ub_G|99oZDx?D3O8t4GzVhJz_mB8J#?OvQzkGf9 zw*Xe8o#`9@!{8kTe;d9TPeH;r>hC`1MdhF3!?1t;Pf^($i^|X9`P#~gsGawQwR11H z$~n>WQ_%ztH2>=6_|yID_%9W?mAT(K=Jx+*&&dDx>ht=u#`6aB{}(II9{hj*AwN>x zTW2-sIAjA2m+_9r5T_$Nvg!12GTT6hvwb56nxeb0_0#KT^@dO`7dG;hn}faWicGFt ziptSb0n>kXqx0lRy)NX+0C+M01*#sA diff --git a/lib/Old/Tensor_peek.h b/lib/Old/Tensor_peek.h deleted file mode 100644 index eecb3cd5..00000000 --- a/lib/Old/Tensor_peek.h +++ /dev/null @@ -1,154 +0,0 @@ - /************************************************************************************* - - Grid physics library, www.github.com/paboyle/Grid - - Source file: ./lib/Old/Tensor_peek.h - - Copyright (C) 2015 - -Author: Peter Boyle - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#ifndef GRID_MATH_PEEK_H -#define GRID_MATH_PEEK_H -namespace Grid { - -////////////////////////////////////////////////////////////////////////////// -// Peek on a specific index; returns a scalar in that index, tensor inherits rest -////////////////////////////////////////////////////////////////////////////// -// If we hit the right index, return scalar with no further recursion - -//template inline ComplexF peekIndex(const ComplexF arg) { return arg;} -//template inline ComplexD peekIndex(const ComplexD arg) { return arg;} -//template inline RealF peekIndex(const RealF arg) { return arg;} -//template inline RealD peekIndex(const RealD arg) { return arg;} -#if 0 -// Scalar peek, no indices -template::TensorLevel == Level >::type * =nullptr> inline - auto peekIndex(const iScalar &arg) -> iScalar -{ - return arg; -} -// Vector peek, one index -template::TensorLevel == Level >::type * =nullptr> inline - auto peekIndex(const iVector &arg,int i) -> iScalar // Index matches -{ - iScalar ret; // return scalar - ret._internal = arg._internal[i]; - return ret; -} -// Matrix peek, two indices -template::TensorLevel == Level >::type * =nullptr> inline - auto peekIndex(const iMatrix &arg,int i,int j) -> iScalar -{ - iScalar ret; // return scalar - ret._internal = arg._internal[i][j]; - return ret; -} - -///////////// -// No match peek for scalar,vector,matrix must forward on either 0,1,2 args. Must have 9 routines with notvalue -///////////// -// scalar -template::TensorLevel != Level >::type * =nullptr> inline - auto peekIndex(const iScalar &arg) -> iScalar(arg._internal))> -{ - iScalar(arg._internal))> ret; - ret._internal= peekIndex(arg._internal); - return ret; -} -template::TensorLevel != Level >::type * =nullptr> inline - auto peekIndex(const iScalar &arg,int i) -> iScalar(arg._internal,i))> -{ - iScalar(arg._internal,i))> ret; - ret._internal=peekIndex(arg._internal,i); - return ret; -} -template::TensorLevel != Level >::type * =nullptr> inline - auto peekIndex(const iScalar &arg,int i,int j) -> iScalar(arg._internal,i,j))> -{ - iScalar(arg._internal,i,j))> ret; - ret._internal=peekIndex(arg._internal,i,j); - return ret; -} -// vector -template::TensorLevel != Level >::type * =nullptr> inline -auto peekIndex(const iVector &arg) -> iVector(arg._internal[0])),N> -{ - iVector(arg._internal[0])),N> ret; - for(int ii=0;ii(arg._internal[ii]); - } - return ret; -} -template::TensorLevel != Level >::type * =nullptr> inline - auto peekIndex(const iVector &arg,int i) -> iVector(arg._internal[0],i)),N> -{ - iVector(arg._internal[0],i)),N> ret; - for(int ii=0;ii(arg._internal[ii],i); - } - return ret; -} -template::TensorLevel != Level >::type * =nullptr> inline - auto peekIndex(const iVector &arg,int i,int j) -> iVector(arg._internal[0],i,j)),N> -{ - iVector(arg._internal[0],i,j)),N> ret; - for(int ii=0;ii(arg._internal[ii],i,j); - } - return ret; -} - -// matrix -template::TensorLevel != Level >::type * =nullptr> inline -auto peekIndex(const iMatrix &arg) -> iMatrix(arg._internal[0][0])),N> -{ - iMatrix(arg._internal[0][0])),N> ret; - for(int ii=0;ii(arg._internal[ii][jj]);// Could avoid this because peeking a scalar is dumb - }} - return ret; -} -template::TensorLevel != Level >::type * =nullptr> inline - auto peekIndex(const iMatrix &arg,int i) -> iMatrix(arg._internal[0][0],i)),N> -{ - iMatrix(arg._internal[0][0],i)),N> ret; - for(int ii=0;ii(arg._internal[ii][jj],i); - }} - return ret; -} -template::TensorLevel != Level >::type * =nullptr> inline - auto peekIndex(const iMatrix &arg,int i,int j) -> iMatrix(arg._internal[0][0],i,j)),N> -{ - iMatrix(arg._internal[0][0],i,j)),N> ret; - for(int ii=0;ii(arg._internal[ii][jj],i,j); - }} - return ret; -} -#endif - - -} -#endif diff --git a/lib/Old/Tensor_poke.h b/lib/Old/Tensor_poke.h deleted file mode 100644 index 83d09cf1..00000000 --- a/lib/Old/Tensor_poke.h +++ /dev/null @@ -1,127 +0,0 @@ - /************************************************************************************* - - Grid physics library, www.github.com/paboyle/Grid - - Source file: ./lib/Old/Tensor_poke.h - - Copyright (C) 2015 - -Author: Peter Boyle - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#ifndef GRID_MATH_POKE_H -#define GRID_MATH_POKE_H -namespace Grid { - -////////////////////////////////////////////////////////////////////////////// -// Poke a specific index; -////////////////////////////////////////////////////////////////////////////// -#if 0 -// Scalar poke -template::TensorLevel == Level >::type * =nullptr> inline - void pokeIndex(iScalar &ret, const iScalar &arg) -{ - ret._internal = arg._internal; -} -// Vector poke, one index -template::TensorLevel == Level >::type * =nullptr> inline - void pokeIndex(iVector &ret, const iScalar &arg,int i) -{ - ret._internal[i] = arg._internal; -} -//Matrix poke, two indices -template::TensorLevel == Level >::type * =nullptr> inline - void pokeIndex(iMatrix &ret, const iScalar &arg,int i,int j) -{ - ret._internal[i][j] = arg._internal; -} - -///////////// -// No match poke for scalar,vector,matrix must forward on either 0,1,2 args. Must have 9 routines with notvalue -///////////// -// scalar -template::TensorLevel != Level >::type * =nullptr> inline -void pokeIndex(iScalar &ret, const iScalar(ret._internal))> &arg) -{ - pokeIndex(ret._internal,arg._internal); -} -template::TensorLevel != Level >::type * =nullptr> inline - void pokeIndex(iScalar &ret, const iScalar(ret._internal,0))> &arg, int i) - -{ - pokeIndex(ret._internal,arg._internal,i); -} -template::TensorLevel != Level >::type * =nullptr> inline - void pokeIndex(iScalar &ret, const iScalar(ret._internal,0,0))> &arg,int i,int j) -{ - pokeIndex(ret._internal,arg._internal,i,j); -} - -// Vector -template::TensorLevel != Level >::type * =nullptr> inline - void pokeIndex(iVector &ret, iVector(ret._internal)),N> &arg) -{ - for(int ii=0;ii(ret._internal[ii],arg._internal[ii]); - } -} -template::TensorLevel != Level >::type * =nullptr> inline - void pokeIndex(iVector &ret, const iVector(ret._internal,0)),N> &arg,int i) -{ - for(int ii=0;ii(ret._internal[ii],arg._internal[ii],i); - } -} -template::TensorLevel != Level >::type * =nullptr> inline - void pokeIndex(iVector &ret, const iVector(ret._internal,0,0)),N> &arg,int i,int j) -{ - for(int ii=0;ii(ret._internal[ii],arg._internal[ii],i,j); - } -} - -// Matrix -template::TensorLevel != Level >::type * =nullptr> inline - void pokeIndex(iMatrix &ret, const iMatrix(ret._internal)),N> &arg) -{ - for(int ii=0;ii(ret._internal[ii][jj],arg._internal[ii][jj]); - }} -} -template::TensorLevel != Level >::type * =nullptr> inline - void pokeIndex(iMatrix &ret, const iMatrix(ret._internal,0)),N> &arg,int i) -{ - for(int ii=0;ii(ret._internal[ii][jj],arg._internal[ii][jj],i); - }} -} -template::TensorLevel != Level >::type * =nullptr> inline - void pokeIndex(iMatrix &ret, const iMatrix(ret._internal,0,0)),N> &arg, int i,int j) -{ - for(int ii=0;ii(ret._internal[ii][jj],arg._internal[ii][jj],i,j); - }} -} -#endif - -} -#endif diff --git a/lib/Algorithms.h b/lib/algorithms/Algorithms.h similarity index 98% rename from lib/Algorithms.h rename to lib/algorithms/Algorithms.h index 67eb11c3..1b82f0ce 100644 --- a/lib/Algorithms.h +++ b/lib/algorithms/Algorithms.h @@ -42,15 +42,14 @@ Author: Peter Boyle #include #include #include - #include #include // Lanczos support #include #include - #include +#include // Eigen/lanczos // EigCg diff --git a/lib/FFT.h b/lib/algorithms/FFT.h similarity index 100% rename from lib/FFT.h rename to lib/algorithms/FFT.h diff --git a/lib/algorithms/approx/MultiShiftFunction.cc b/lib/algorithms/approx/MultiShiftFunction.cc index fc642ad7..34befdde 100644 --- a/lib/algorithms/approx/MultiShiftFunction.cc +++ b/lib/algorithms/approx/MultiShiftFunction.cc @@ -25,7 +25,7 @@ Author: Azusa Yamaguchi See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include namespace Grid { double MultiShiftFunction::approx(double x) diff --git a/lib/algorithms/approx/Remez.cc b/lib/algorithms/approx/Remez.cc index 38d60088..ca00a330 100644 --- a/lib/algorithms/approx/Remez.cc +++ b/lib/algorithms/approx/Remez.cc @@ -20,7 +20,7 @@ #include #include -#include +#include // Constructor AlgRemez::AlgRemez(double lower, double upper, long precision) diff --git a/lib/algorithms/approx/Remez.h b/lib/algorithms/approx/Remez.h index 31938779..804bce7c 100644 --- a/lib/algorithms/approx/Remez.h +++ b/lib/algorithms/approx/Remez.h @@ -16,7 +16,7 @@ #define INCLUDED_ALG_REMEZ_H #include -#include +#include #ifdef HAVE_LIBGMP #include "bigfloat.h" diff --git a/lib/AlignedAllocator.cc b/lib/allocator/AlignedAllocator.cc similarity index 97% rename from lib/AlignedAllocator.cc rename to lib/allocator/AlignedAllocator.cc index f6d234d5..4249a72e 100644 --- a/lib/AlignedAllocator.cc +++ b/lib/allocator/AlignedAllocator.cc @@ -1,7 +1,7 @@ -#include +#include namespace Grid { diff --git a/lib/AlignedAllocator.h b/lib/allocator/AlignedAllocator.h similarity index 100% rename from lib/AlignedAllocator.h rename to lib/allocator/AlignedAllocator.h diff --git a/lib/Cartesian.h b/lib/cartesian/Cartesian.h similarity index 100% rename from lib/Cartesian.h rename to lib/cartesian/Cartesian.h diff --git a/lib/Communicator.h b/lib/communicator/Communicator.h similarity index 100% rename from lib/Communicator.h rename to lib/communicator/Communicator.h diff --git a/lib/communicator/Communicator_base.cc b/lib/communicator/Communicator_base.cc index 1cc1abd0..98d2abf4 100644 --- a/lib/communicator/Communicator_base.cc +++ b/lib/communicator/Communicator_base.cc @@ -25,7 +25,8 @@ Author: Peter Boyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include "Grid.h" +#include + namespace Grid { /////////////////////////////////////////////////////////////// diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index 4dfdd025..1a48d856 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -25,7 +25,7 @@ Author: Peter Boyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include "Grid.h" +#include #include namespace Grid { diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index 557e4ebf..2f3f1d67 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -25,8 +25,8 @@ Author: Peter Boyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include "Grid.h" -//#include +#include + #include #include diff --git a/lib/communicator/Communicator_none.cc b/lib/communicator/Communicator_none.cc index 5e91b305..5cc839d5 100644 --- a/lib/communicator/Communicator_none.cc +++ b/lib/communicator/Communicator_none.cc @@ -25,7 +25,8 @@ Author: Peter Boyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include "Grid.h" +#include + namespace Grid { /////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/lib/communicator/Communicator_shmem.cc b/lib/communicator/Communicator_shmem.cc index 56e03224..b7a263a4 100644 --- a/lib/communicator/Communicator_shmem.cc +++ b/lib/communicator/Communicator_shmem.cc @@ -25,7 +25,7 @@ Author: Peter Boyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include "Grid.h" +#include #include namespace Grid { diff --git a/lib/Cshift.h b/lib/cshift/Cshift.h similarity index 100% rename from lib/Cshift.h rename to lib/cshift/Cshift.h diff --git a/lib/Lattice.h b/lib/lattice/Lattice.h similarity index 100% rename from lib/Lattice.h rename to lib/lattice/Lattice.h diff --git a/lib/Log.cc b/lib/log/Log.cc similarity index 99% rename from lib/Log.cc rename to lib/log/Log.cc index 7521657b..a6ef2857 100644 --- a/lib/Log.cc +++ b/lib/log/Log.cc @@ -29,7 +29,7 @@ See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include #include diff --git a/lib/Log.h b/lib/log/Log.h similarity index 100% rename from lib/Log.h rename to lib/log/Log.h diff --git a/lib/PerfCount.cc b/lib/perfmon/PerfCount.cc similarity index 98% rename from lib/PerfCount.cc rename to lib/perfmon/PerfCount.cc index b61851a9..4778295a 100644 --- a/lib/PerfCount.cc +++ b/lib/perfmon/PerfCount.cc @@ -26,8 +26,8 @@ Author: paboyle *************************************************************************************/ /* END LEGAL */ -#include -#include +#include +#include namespace Grid { diff --git a/lib/PerfCount.h b/lib/perfmon/PerfCount.h similarity index 100% rename from lib/PerfCount.h rename to lib/perfmon/PerfCount.h diff --git a/lib/Stat.cc b/lib/perfmon/Stat.cc similarity index 98% rename from lib/Stat.cc rename to lib/perfmon/Stat.cc index 7f2e4086..e34b73bd 100644 --- a/lib/Stat.cc +++ b/lib/perfmon/Stat.cc @@ -1,6 +1,6 @@ -#include -#include -#include +#include +#include +#include namespace Grid { diff --git a/lib/Stat.h b/lib/perfmon/Stat.h similarity index 100% rename from lib/Stat.h rename to lib/perfmon/Stat.h diff --git a/lib/Timer.h b/lib/perfmon/Timer.h similarity index 100% rename from lib/Timer.h rename to lib/perfmon/Timer.h diff --git a/lib/qcd/QCD.h b/lib/qcd/QCD.h index f434bdd9..6e6144da 100644 --- a/lib/qcd/QCD.h +++ b/lib/qcd/QCD.h @@ -29,8 +29,8 @@ Author: paboyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#ifndef GRID_QCD_H -#define GRID_QCD_H +#ifndef GRID_QCD_BASE_H +#define GRID_QCD_BASE_H namespace Grid{ namespace QCD { @@ -62,7 +62,6 @@ namespace QCD { #define SpinIndex 1 #define LorentzIndex 0 - // Also should make these a named enum type static const int DaggerNo=0; static const int DaggerYes=1; @@ -494,26 +493,5 @@ namespace QCD { } // Grid -#include -#include -#include -#include -#include - -// Include representations -#include -#include -#include -#include - -#include - -#include - -#include -#include -#include - - #endif diff --git a/lib/qcd/action/Action.h b/lib/qcd/action/Action.h new file mode 100644 index 00000000..37b13a9f --- /dev/null +++ b/lib/qcd/action/Action.h @@ -0,0 +1,55 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/qcd/action/Actions.h + + Copyright (C) 2015 + +Author: Azusa Yamaguchi +Author: Peter Boyle +Author: Peter Boyle +Author: Peter Boyle +Author: neo +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#ifndef GRID_QCD_ACTIONS_H +#define GRID_QCD_ACTIONS_H + +// * Linear operators (Hermitian and non-hermitian) .. my LinearOperator +// * System solvers (Hermitian and non-hermitian) .. my OperatorFunction +// * MultiShift System solvers (Hermitian and non-hermitian) .. my OperatorFunction + +//////////////////////////////////////////// +// Abstract base interface +//////////////////////////////////////////// +#include + +//////////////////////////////////////////////////////////////////////// +// Fermion actions; prevent coupling fermion.cc files to other headers +//////////////////////////////////////////////////////////////////////// +#include + +//////////////////////////////////////// +// Pseudo fermion combinations for HMC +//////////////////////////////////////// +#include + +#endif diff --git a/lib/qcd/action/ActionBase.h b/lib/qcd/action/ActionBase.h index 56d6b8e0..2f9fed4b 100644 --- a/lib/qcd/action/ActionBase.h +++ b/lib/qcd/action/ActionBase.h @@ -150,4 +150,5 @@ using ActionSet = std::vector >; } } + #endif diff --git a/lib/qcd/action/ActionCore.h b/lib/qcd/action/ActionCore.h new file mode 100644 index 00000000..839645a3 --- /dev/null +++ b/lib/qcd/action/ActionCore.h @@ -0,0 +1,45 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/ActionCore.h + +Copyright (C) 2015 + +Author: Peter Boyle +Author: neo + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution +directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef QCD_ACTION_CORE +#define QCD_ACTION_CORE + +#include +#include + +//////////////////////////////////////////// +// Gauge Actions +//////////////////////////////////////////// +#include +//////////////////////////////////////////// +// Fermion prereqs +//////////////////////////////////////////// +#include + +#endif diff --git a/lib/qcd/action/fermion/CayleyFermion5D.cc b/lib/qcd/action/fermion/CayleyFermion5D.cc index 781380e5..3525c12c 100644 --- a/lib/qcd/action/fermion/CayleyFermion5D.cc +++ b/lib/qcd/action/fermion/CayleyFermion5D.cc @@ -30,8 +30,8 @@ Author: paboyle /* END LEGAL */ #include -#include - +#include +#include namespace Grid { namespace QCD { diff --git a/lib/qcd/action/fermion/CayleyFermion5D.h b/lib/qcd/action/fermion/CayleyFermion5D.h index 86255be6..1c7431bd 100644 --- a/lib/qcd/action/fermion/CayleyFermion5D.h +++ b/lib/qcd/action/fermion/CayleyFermion5D.h @@ -29,6 +29,8 @@ Author: Peter Boyle #ifndef GRID_QCD_CAYLEY_FERMION_H #define GRID_QCD_CAYLEY_FERMION_H +#include + namespace Grid { namespace QCD { diff --git a/lib/qcd/action/fermion/CayleyFermion5Dcache.cc b/lib/qcd/action/fermion/CayleyFermion5Dcache.cc index 319f8d3c..2c3e98ae 100644 --- a/lib/qcd/action/fermion/CayleyFermion5Dcache.cc +++ b/lib/qcd/action/fermion/CayleyFermion5Dcache.cc @@ -29,7 +29,8 @@ Author: paboyle *************************************************************************************/ /* END LEGAL */ -#include +#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/CayleyFermion5Ddense.cc b/lib/qcd/action/fermion/CayleyFermion5Ddense.cc index 5fa75b50..519af37a 100644 --- a/lib/qcd/action/fermion/CayleyFermion5Ddense.cc +++ b/lib/qcd/action/fermion/CayleyFermion5Ddense.cc @@ -30,7 +30,8 @@ Author: paboyle /* END LEGAL */ #include -#include +#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/CayleyFermion5Dssp.cc b/lib/qcd/action/fermion/CayleyFermion5Dssp.cc index ad7daddb..415b9150 100644 --- a/lib/qcd/action/fermion/CayleyFermion5Dssp.cc +++ b/lib/qcd/action/fermion/CayleyFermion5Dssp.cc @@ -29,7 +29,8 @@ Author: paboyle *************************************************************************************/ /* END LEGAL */ -#include +#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/CayleyFermion5Dvec.cc b/lib/qcd/action/fermion/CayleyFermion5Dvec.cc index 6d7b252e..e8915410 100644 --- a/lib/qcd/action/fermion/CayleyFermion5Dvec.cc +++ b/lib/qcd/action/fermion/CayleyFermion5Dvec.cc @@ -30,7 +30,8 @@ Author: paboyle /* END LEGAL */ -#include +#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/ContinuedFractionFermion5D.cc b/lib/qcd/action/fermion/ContinuedFractionFermion5D.cc index e58ab4da..5d39ef9b 100644 --- a/lib/qcd/action/fermion/ContinuedFractionFermion5D.cc +++ b/lib/qcd/action/fermion/ContinuedFractionFermion5D.cc @@ -26,7 +26,8 @@ Author: Peter Boyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include +#include namespace Grid { namespace QCD { diff --git a/lib/qcd/action/fermion/ContinuedFractionFermion5D.h b/lib/qcd/action/fermion/ContinuedFractionFermion5D.h index 15d44945..e1e50aa5 100644 --- a/lib/qcd/action/fermion/ContinuedFractionFermion5D.h +++ b/lib/qcd/action/fermion/ContinuedFractionFermion5D.h @@ -29,6 +29,8 @@ Author: Peter Boyle #ifndef GRID_QCD_CONTINUED_FRACTION_H #define GRID_QCD_CONTINUED_FRACTION_H +#include + namespace Grid { namespace QCD { diff --git a/lib/qcd/action/fermion/DomainWallFermion.h b/lib/qcd/action/fermion/DomainWallFermion.h index c0b6b6aa..ad4bf87e 100644 --- a/lib/qcd/action/fermion/DomainWallFermion.h +++ b/lib/qcd/action/fermion/DomainWallFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef GRID_QCD_DOMAIN_WALL_FERMION_H #define GRID_QCD_DOMAIN_WALL_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/Actions.h b/lib/qcd/action/fermion/Fermion.h similarity index 64% rename from lib/qcd/action/Actions.h rename to lib/qcd/action/fermion/Fermion.h index 4a30f8c3..c806342d 100644 --- a/lib/qcd/action/Actions.h +++ b/lib/qcd/action/fermion/Fermion.h @@ -2,16 +2,11 @@ Grid physics library, www.github.com/paboyle/Grid - Source file: ./lib/qcd/action/Actions.h + Source file: ./lib/qcd/action/fermion/Fermion_base_aggregate.h Copyright (C) 2015 -Author: Azusa Yamaguchi Author: Peter Boyle -Author: Peter Boyle -Author: Peter Boyle -Author: neo -Author: paboyle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -30,67 +25,8 @@ Author: paboyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#ifndef GRID_QCD_ACTIONS_H -#define GRID_QCD_ACTIONS_H - -// * Linear operators (Hermitian and non-hermitian) .. my LinearOperator -// * System solvers (Hermitian and non-hermitian) .. my OperatorFunction -// * MultiShift System solvers (Hermitian and non-hermitian) .. my OperatorFunction - -//////////////////////////////////////////// -// Abstract base interface -//////////////////////////////////////////// -#include -#include - -//////////////////////////////////////////// -// Utility functions -//////////////////////////////////////////// -#include -#include - -#include //used by all wilson type fermions -#include -#include -#include //used by all wilson type fermions - -//////////////////////////////////////////// -// Gauge Actions -//////////////////////////////////////////// -#include -#include - -namespace Grid { -namespace QCD { - -typedef WilsonGaugeAction WilsonGaugeActionR; -typedef WilsonGaugeAction WilsonGaugeActionF; -typedef WilsonGaugeAction WilsonGaugeActionD; -typedef PlaqPlusRectangleAction PlaqPlusRectangleActionR; -typedef PlaqPlusRectangleAction PlaqPlusRectangleActionF; -typedef PlaqPlusRectangleAction PlaqPlusRectangleActionD; -typedef IwasakiGaugeAction IwasakiGaugeActionR; -typedef IwasakiGaugeAction IwasakiGaugeActionF; -typedef IwasakiGaugeAction IwasakiGaugeActionD; -typedef SymanzikGaugeAction SymanzikGaugeActionR; -typedef SymanzikGaugeAction SymanzikGaugeActionF; -typedef SymanzikGaugeAction SymanzikGaugeActionD; - - -typedef WilsonGaugeAction ConjugateWilsonGaugeActionR; -typedef WilsonGaugeAction ConjugateWilsonGaugeActionF; -typedef WilsonGaugeAction ConjugateWilsonGaugeActionD; -typedef PlaqPlusRectangleAction ConjugatePlaqPlusRectangleActionR; -typedef PlaqPlusRectangleAction ConjugatePlaqPlusRectangleActionF; -typedef PlaqPlusRectangleAction ConjugatePlaqPlusRectangleActionD; -typedef IwasakiGaugeAction ConjugateIwasakiGaugeActionR; -typedef IwasakiGaugeAction ConjugateIwasakiGaugeActionF; -typedef IwasakiGaugeAction ConjugateIwasakiGaugeActionD; -typedef SymanzikGaugeAction ConjugateSymanzikGaugeActionR; -typedef SymanzikGaugeAction ConjugateSymanzikGaugeActionF; -typedef SymanzikGaugeAction ConjugateSymanzikGaugeActionD; - -}} +#ifndef GRID_QCD_FERMION_ACTIONS_H +#define GRID_QCD_FERMION_ACTIONS_H //////////////////////////////////////////////////////////////////////////////////////////////////// // Explicit explicit template instantiation is still required in the .cc files @@ -107,36 +43,6 @@ typedef SymanzikGaugeAction ConjugateSymanzikGaugeAction // for EVERY .cc file. This define centralises the list and restores global push of impl cases //////////////////////////////////////////////////////////////////////////////////////////////////// - -#define FermOp4dVecTemplateInstantiate(A) \ - template class A; \ - template class A; \ - template class A; \ - template class A; \ - template class A; \ - template class A; - -#define AdjointFermOpTemplateInstantiate(A) \ - template class A; \ - template class A; - -#define TwoIndexFermOpTemplateInstantiate(A) \ - template class A; \ - template class A; - -#define FermOp5dVecTemplateInstantiate(A) \ - template class A; \ - template class A; \ - template class A; \ - template class A; - -#define FermOpTemplateInstantiate(A) \ - FermOp4dVecTemplateInstantiate(A) \ - FermOp5dVecTemplateInstantiate(A) - - -#define GparityFermOpTemplateInstantiate(A) - //////////////////////////////////////////// // Fermion operators / actions //////////////////////////////////////////// @@ -144,9 +50,7 @@ typedef SymanzikGaugeAction ConjugateSymanzikGaugeAction #include // 4d wilson like #include // 4d wilson like #include // 5d base used by all 5d overlap types - //#include - #include // Cayley types #include #include @@ -157,14 +61,16 @@ typedef SymanzikGaugeAction ConjugateSymanzikGaugeAction #include #include #include - #include // Continued fraction #include #include - #include // Partial fraction #include #include +/////////////////////////////////////////////////////////////////////////////// +// G5 herm -- this has to live in QCD since dirac matrix is not in the broader sector of code +/////////////////////////////////////////////////////////////////////////////// +#include //////////////////////////////////////////////////////////////////////////////////////////////////// // More maintainable to maintain the following typedef list centrally, as more "impl" targets @@ -271,24 +177,5 @@ typedef MobiusFermion GparityMobiusFermionD; }} -/////////////////////////////////////////////////////////////////////////////// -// G5 herm -- this has to live in QCD since dirac matrix is not in the broader sector of code -/////////////////////////////////////////////////////////////////////////////// -#include - -//////////////////////////////////////// -// Pseudo fermion combinations for HMC -//////////////////////////////////////// -#include - -#include -#include -#include -#include - -#include -#include -#include -#include #endif diff --git a/lib/qcd/action/fermion/FermionCore.h b/lib/qcd/action/fermion/FermionCore.h new file mode 100644 index 00000000..23b31eae --- /dev/null +++ b/lib/qcd/action/fermion/FermionCore.h @@ -0,0 +1,71 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/qcd/action/fermion/Fermion_base_aggregate.h + + Copyright (C) 2015 + +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#ifndef GRID_QCD_FERMION_CORE_H +#define GRID_QCD_FERMION_CORE_H + +#include +#include +#include + +//////////////////////////////////////////// +// Fermion prereqs +//////////////////////////////////////////// +#include //used by all wilson type fermions +#include +#include +#include //used by all wilson type fermions + +#define FermOp4dVecTemplateInstantiate(A) \ + template class A; \ + template class A; \ + template class A; \ + template class A; \ + template class A; \ + template class A; + +#define AdjointFermOpTemplateInstantiate(A) \ + template class A; \ + template class A; + +#define TwoIndexFermOpTemplateInstantiate(A) \ + template class A; \ + template class A; + +#define FermOp5dVecTemplateInstantiate(A) \ + template class A; \ + template class A; \ + template class A; \ + template class A; + +#define FermOpTemplateInstantiate(A) \ + FermOp4dVecTemplateInstantiate(A) \ + FermOp5dVecTemplateInstantiate(A) + +#define GparityFermOpTemplateInstantiate(A) + +#endif diff --git a/lib/qcd/action/fermion/MobiusFermion.h b/lib/qcd/action/fermion/MobiusFermion.h index ade9ca4d..b61c26d5 100644 --- a/lib/qcd/action/fermion/MobiusFermion.h +++ b/lib/qcd/action/fermion/MobiusFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef GRID_QCD_MOBIUS_FERMION_H #define GRID_QCD_MOBIUS_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/MobiusZolotarevFermion.h b/lib/qcd/action/fermion/MobiusZolotarevFermion.h index 609d5cea..078d4f3e 100644 --- a/lib/qcd/action/fermion/MobiusZolotarevFermion.h +++ b/lib/qcd/action/fermion/MobiusZolotarevFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H #define GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h b/lib/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h index 9cab0e22..f516c5d0 100644 --- a/lib/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h +++ b/lib/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef OVERLAP_WILSON_CAYLEY_TANH_FERMION_H #define OVERLAP_WILSON_CAYLEY_TANH_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/OverlapWilsonCayleyZolotarevFermion.h b/lib/qcd/action/fermion/OverlapWilsonCayleyZolotarevFermion.h index 048244cc..4f1adbbf 100644 --- a/lib/qcd/action/fermion/OverlapWilsonCayleyZolotarevFermion.h +++ b/lib/qcd/action/fermion/OverlapWilsonCayleyZolotarevFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H #define OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/OverlapWilsonContfracTanhFermion.h b/lib/qcd/action/fermion/OverlapWilsonContfracTanhFermion.h index bbac735a..38d0fda2 100644 --- a/lib/qcd/action/fermion/OverlapWilsonContfracTanhFermion.h +++ b/lib/qcd/action/fermion/OverlapWilsonContfracTanhFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H #define OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/OverlapWilsonContfracZolotarevFermion.h b/lib/qcd/action/fermion/OverlapWilsonContfracZolotarevFermion.h index 9da30f65..6773b4d2 100644 --- a/lib/qcd/action/fermion/OverlapWilsonContfracZolotarevFermion.h +++ b/lib/qcd/action/fermion/OverlapWilsonContfracZolotarevFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H #define OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/OverlapWilsonPartialFractionTanhFermion.h b/lib/qcd/action/fermion/OverlapWilsonPartialFractionTanhFermion.h index 3b867174..84c4f597 100644 --- a/lib/qcd/action/fermion/OverlapWilsonPartialFractionTanhFermion.h +++ b/lib/qcd/action/fermion/OverlapWilsonPartialFractionTanhFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H #define OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/OverlapWilsonPartialFractionZolotarevFermion.h b/lib/qcd/action/fermion/OverlapWilsonPartialFractionZolotarevFermion.h index e1d0763b..dc275852 100644 --- a/lib/qcd/action/fermion/OverlapWilsonPartialFractionZolotarevFermion.h +++ b/lib/qcd/action/fermion/OverlapWilsonPartialFractionZolotarevFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H #define OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/PartialFractionFermion5D.cc b/lib/qcd/action/fermion/PartialFractionFermion5D.cc index 4fcb8784..ec004816 100644 --- a/lib/qcd/action/fermion/PartialFractionFermion5D.cc +++ b/lib/qcd/action/fermion/PartialFractionFermion5D.cc @@ -26,7 +26,8 @@ Author: Peter Boyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include +#include namespace Grid { namespace QCD { diff --git a/lib/qcd/action/fermion/PartialFractionFermion5D.h b/lib/qcd/action/fermion/PartialFractionFermion5D.h index 126f3299..0ec72de4 100644 --- a/lib/qcd/action/fermion/PartialFractionFermion5D.h +++ b/lib/qcd/action/fermion/PartialFractionFermion5D.h @@ -29,6 +29,8 @@ Author: Peter Boyle #ifndef GRID_QCD_PARTIAL_FRACTION_H #define GRID_QCD_PARTIAL_FRACTION_H +#include + namespace Grid { namespace QCD { diff --git a/lib/qcd/action/fermion/ScaledShamirFermion.h b/lib/qcd/action/fermion/ScaledShamirFermion.h index f850ee4d..b779b9c0 100644 --- a/lib/qcd/action/fermion/ScaledShamirFermion.h +++ b/lib/qcd/action/fermion/ScaledShamirFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef GRID_QCD_SCALED_SHAMIR_FERMION_H #define GRID_QCD_SCALED_SHAMIR_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/ShamirZolotarevFermion.h b/lib/qcd/action/fermion/ShamirZolotarevFermion.h index 732afa0a..f9397911 100644 --- a/lib/qcd/action/fermion/ShamirZolotarevFermion.h +++ b/lib/qcd/action/fermion/ShamirZolotarevFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef GRID_QCD_SHAMIR_ZOLOTAREV_FERMION_H #define GRID_QCD_SHAMIR_ZOLOTAREV_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/WilsonFermion.cc b/lib/qcd/action/fermion/WilsonFermion.cc index ac5f8945..0320bb27 100644 --- a/lib/qcd/action/fermion/WilsonFermion.cc +++ b/lib/qcd/action/fermion/WilsonFermion.cc @@ -29,15 +29,14 @@ See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include +#include namespace Grid { namespace QCD { -const std::vector WilsonFermionStatic::directions({0, 1, 2, 3, 0, 1, 2, - 3}); -const std::vector WilsonFermionStatic::displacements({1, 1, 1, 1, -1, -1, - -1, -1}); +const std::vector WilsonFermionStatic::directions({0, 1, 2, 3, 0, 1, 2, 3}); +const std::vector WilsonFermionStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1}); int WilsonFermionStatic::HandOptDslash; ///////////////////////////////// @@ -52,10 +51,8 @@ WilsonFermion::WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid, _grid(&Fgrid), _cbgrid(&Hgrid), Stencil(&Fgrid, npoint, Even, directions, displacements), - StencilEven(&Hgrid, npoint, Even, directions, - displacements), // source is Even - StencilOdd(&Hgrid, npoint, Odd, directions, - displacements), // source is Odd + StencilEven(&Hgrid, npoint, Even, directions,displacements), // source is Even + StencilOdd(&Hgrid, npoint, Odd, directions,displacements), // source is Odd mass(_mass), Lebesgue(_grid), LebesgueEvenOdd(_cbgrid), @@ -113,86 +110,85 @@ void WilsonFermion::MeooeDag(const FermionField &in, FermionField &out) { } } - template - void WilsonFermion::Mooee(const FermionField &in, FermionField &out) { - out.checkerboard = in.checkerboard; - typename FermionField::scalar_type scal(4.0 + mass); - out = scal * in; - } +template +void WilsonFermion::Mooee(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + typename FermionField::scalar_type scal(4.0 + mass); + out = scal * in; +} - template - void WilsonFermion::MooeeDag(const FermionField &in, FermionField &out) { - out.checkerboard = in.checkerboard; - Mooee(in, out); - } +template +void WilsonFermion::MooeeDag(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + Mooee(in, out); +} - template - void WilsonFermion::MooeeInv(const FermionField &in, FermionField &out) { - out.checkerboard = in.checkerboard; - out = (1.0/(4.0+mass))*in; +template +void WilsonFermion::MooeeInv(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + out = (1.0/(4.0+mass))*in; +} + +template +void WilsonFermion::MooeeInvDag(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + MooeeInv(in,out); +} + +template +void WilsonFermion::MomentumSpacePropagator(FermionField &out, const FermionField &in,RealD _m) +{ + typedef typename FermionField::vector_type vector_type; + typedef typename FermionField::scalar_type ScalComplex; + typedef Lattice > LatComplex; + + // what type LatticeComplex + conformable(_grid,out._grid); + + Gamma::GammaMatrix Gmu [] = { + Gamma::GammaX, + Gamma::GammaY, + Gamma::GammaZ, + Gamma::GammaT + }; + + std::vector latt_size = _grid->_fdimensions; + + FermionField num (_grid); num = zero; + LatComplex wilson(_grid); wilson= zero; + LatComplex one (_grid); one = ScalComplex(1.0,0.0); + + LatComplex denom(_grid); denom= zero; + LatComplex kmu(_grid); + ScalComplex ci(0.0,1.0); + // momphase = n * 2pi / L + for(int mu=0;mu - void WilsonFermion::MooeeInvDag(const FermionField &in, FermionField &out) { - out.checkerboard = in.checkerboard; - MooeeInv(in,out); - } - - template - void WilsonFermion::MomentumSpacePropagator(FermionField &out, const FermionField &in,RealD _m) { - - // what type LatticeComplex - conformable(_grid,out._grid); - - typedef typename FermionField::vector_type vector_type; - typedef typename FermionField::scalar_type ScalComplex; - - typedef Lattice > LatComplex; - - Gamma::GammaMatrix Gmu [] = { - Gamma::GammaX, - Gamma::GammaY, - Gamma::GammaZ, - Gamma::GammaT - }; - - std::vector latt_size = _grid->_fdimensions; - - FermionField num (_grid); num = zero; - LatComplex wilson(_grid); wilson= zero; - LatComplex one (_grid); one = ScalComplex(1.0,0.0); - - LatComplex denom(_grid); denom= zero; - LatComplex kmu(_grid); - ScalComplex ci(0.0,1.0); - // momphase = n * 2pi / L - for(int mu=0;mu::DerivInternal(StencilImpl &st, DoubledGaugeField &U, // Call the single hop //////////////////////// parallel_for (int sss = 0; sss < B._grid->oSites(); sss++) { - Kernels::DhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu, - gamma); + Kernels::DhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu, gamma); } ////////////////////////////////////////////////// @@ -275,8 +270,7 @@ void WilsonFermion::DhopDerivEO(GaugeField &mat, const FermionField &U, } template -void WilsonFermion::Dhop(const FermionField &in, FermionField &out, - int dag) { +void WilsonFermion::Dhop(const FermionField &in, FermionField &out, int dag) { conformable(in._grid, _grid); // verifies full grid conformable(in._grid, out._grid); @@ -286,8 +280,7 @@ void WilsonFermion::Dhop(const FermionField &in, FermionField &out, } template -void WilsonFermion::DhopOE(const FermionField &in, FermionField &out, - int dag) { +void WilsonFermion::DhopOE(const FermionField &in, FermionField &out, int dag) { conformable(in._grid, _cbgrid); // verifies half grid conformable(in._grid, out._grid); // drops the cb check @@ -298,8 +291,7 @@ void WilsonFermion::DhopOE(const FermionField &in, FermionField &out, } template -void WilsonFermion::DhopEO(const FermionField &in, FermionField &out, - int dag) { +void WilsonFermion::DhopEO(const FermionField &in, FermionField &out,int dag) { conformable(in._grid, _cbgrid); // verifies half grid conformable(in._grid, out._grid); // drops the cb check @@ -310,14 +302,12 @@ void WilsonFermion::DhopEO(const FermionField &in, FermionField &out, } template -void WilsonFermion::Mdir(const FermionField &in, FermionField &out, - int dir, int disp) { +void WilsonFermion::Mdir(const FermionField &in, FermionField &out, int dir, int disp) { DhopDir(in, out, dir, disp); } template -void WilsonFermion::DhopDir(const FermionField &in, FermionField &out, - int dir, int disp) { +void WilsonFermion::DhopDir(const FermionField &in, FermionField &out, int dir, int disp) { int skip = (disp == 1) ? 0 : 1; int dirdisp = dir + skip * 4; int gamma = dir + (1 - skip) * 4; @@ -326,8 +316,7 @@ void WilsonFermion::DhopDir(const FermionField &in, FermionField &out, }; template -void WilsonFermion::DhopDirDisp(const FermionField &in, FermionField &out, - int dirdisp, int gamma, int dag) { +void WilsonFermion::DhopDirDisp(const FermionField &in, FermionField &out,int dirdisp, int gamma, int dag) { Compressor compressor(dag); Stencil.HaloExchange(in, compressor); diff --git a/lib/qcd/action/fermion/WilsonFermion5D.cc b/lib/qcd/action/fermion/WilsonFermion5D.cc index 39e61bc6..62f339ed 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.cc +++ b/lib/qcd/action/fermion/WilsonFermion5D.cc @@ -29,8 +29,9 @@ Author: paboyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include -#include +#include +#include +#include namespace Grid { namespace QCD { diff --git a/lib/qcd/action/fermion/WilsonFermion5D.h b/lib/qcd/action/fermion/WilsonFermion5D.h index 76a70d4d..e87e927e 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.h +++ b/lib/qcd/action/fermion/WilsonFermion5D.h @@ -31,7 +31,7 @@ Author: paboyle #ifndef GRID_QCD_WILSON_FERMION_5D_H #define GRID_QCD_WILSON_FERMION_5D_H -#include +#include namespace Grid { namespace QCD { diff --git a/lib/qcd/action/fermion/WilsonKernels.cc b/lib/qcd/action/fermion/WilsonKernels.cc index 3a70bb5b..e44d58b6 100644 --- a/lib/qcd/action/fermion/WilsonKernels.cc +++ b/lib/qcd/action/fermion/WilsonKernels.cc @@ -28,7 +28,7 @@ See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include namespace Grid { namespace QCD { diff --git a/lib/qcd/action/fermion/WilsonKernelsAsm.cc b/lib/qcd/action/fermion/WilsonKernelsAsm.cc index f627a939..2d11523a 100644 --- a/lib/qcd/action/fermion/WilsonKernelsAsm.cc +++ b/lib/qcd/action/fermion/WilsonKernelsAsm.cc @@ -30,7 +30,7 @@ Author: Guido Cossu *************************************************************************************/ /* END LEGAL */ -#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/WilsonKernelsHand.cc b/lib/qcd/action/fermion/WilsonKernelsHand.cc index 90496bdf..0a60c107 100644 --- a/lib/qcd/action/fermion/WilsonKernelsHand.cc +++ b/lib/qcd/action/fermion/WilsonKernelsHand.cc @@ -26,7 +26,7 @@ Author: paboyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include #define REGISTER diff --git a/lib/qcd/action/fermion/WilsonTMFermion.cc b/lib/qcd/action/fermion/WilsonTMFermion.cc index f74f9f00..d4604b10 100644 --- a/lib/qcd/action/fermion/WilsonTMFermion.cc +++ b/lib/qcd/action/fermion/WilsonTMFermion.cc @@ -25,7 +25,8 @@ Author: paboyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include +#include namespace Grid { namespace QCD { diff --git a/lib/qcd/action/fermion/WilsonTMFermion.h b/lib/qcd/action/fermion/WilsonTMFermion.h index 5901cb2f..f75c287b 100644 --- a/lib/qcd/action/fermion/WilsonTMFermion.h +++ b/lib/qcd/action/fermion/WilsonTMFermion.h @@ -28,7 +28,8 @@ Author: paboyle #ifndef GRID_QCD_WILSON_TM_FERMION_H #define GRID_QCD_WILSON_TM_FERMION_H -#include +#include +#include namespace Grid { diff --git a/lib/qcd/action/fermion/ZMobiusFermion.h b/lib/qcd/action/fermion/ZMobiusFermion.h index d0e00657..32ff7670 100644 --- a/lib/qcd/action/fermion/ZMobiusFermion.h +++ b/lib/qcd/action/fermion/ZMobiusFermion.h @@ -29,7 +29,7 @@ Author: Peter Boyle #ifndef GRID_QCD_ZMOBIUS_FERMION_H #define GRID_QCD_ZMOBIUS_FERMION_H -#include +#include namespace Grid { diff --git a/lib/qcd/action/gauge/Gauge.h b/lib/qcd/action/gauge/Gauge.h new file mode 100644 index 00000000..da670911 --- /dev/null +++ b/lib/qcd/action/gauge/Gauge.h @@ -0,0 +1,70 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/gauge/Gauge_aggregate.h + +Copyright (C) 2015 + +Author: paboyle + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution +directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef GRID_QCD_GAUGE_AGGREGATE_H +#define GRID_QCD_GAUGE_AGGREGATE_H + +#include +#include +#include +#include + +namespace Grid { +namespace QCD { + +typedef WilsonGaugeAction WilsonGaugeActionR; +typedef WilsonGaugeAction WilsonGaugeActionF; +typedef WilsonGaugeAction WilsonGaugeActionD; +typedef PlaqPlusRectangleAction PlaqPlusRectangleActionR; +typedef PlaqPlusRectangleAction PlaqPlusRectangleActionF; +typedef PlaqPlusRectangleAction PlaqPlusRectangleActionD; +typedef IwasakiGaugeAction IwasakiGaugeActionR; +typedef IwasakiGaugeAction IwasakiGaugeActionF; +typedef IwasakiGaugeAction IwasakiGaugeActionD; +typedef SymanzikGaugeAction SymanzikGaugeActionR; +typedef SymanzikGaugeAction SymanzikGaugeActionF; +typedef SymanzikGaugeAction SymanzikGaugeActionD; + + +typedef WilsonGaugeAction ConjugateWilsonGaugeActionR; +typedef WilsonGaugeAction ConjugateWilsonGaugeActionF; +typedef WilsonGaugeAction ConjugateWilsonGaugeActionD; +typedef PlaqPlusRectangleAction ConjugatePlaqPlusRectangleActionR; +typedef PlaqPlusRectangleAction ConjugatePlaqPlusRectangleActionF; +typedef PlaqPlusRectangleAction ConjugatePlaqPlusRectangleActionD; +typedef IwasakiGaugeAction ConjugateIwasakiGaugeActionR; +typedef IwasakiGaugeAction ConjugateIwasakiGaugeActionF; +typedef IwasakiGaugeAction ConjugateIwasakiGaugeActionD; +typedef SymanzikGaugeAction ConjugateSymanzikGaugeActionR; +typedef SymanzikGaugeAction ConjugateSymanzikGaugeActionF; +typedef SymanzikGaugeAction ConjugateSymanzikGaugeActionD; + +}} + + +#endif diff --git a/lib/qcd/action/pseudofermion/PseudoFermion.h b/lib/qcd/action/pseudofermion/PseudoFermion.h new file mode 100644 index 00000000..bccca3d4 --- /dev/null +++ b/lib/qcd/action/pseudofermion/PseudoFermion.h @@ -0,0 +1,42 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/pseudofermion/PseudoFermion_aggregate.h + +Copyright (C) 2015 + +Author: Peter Boyle + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution +directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef QCD_PSEUDOFERMION_AGGREGATE_H +#define QCD_PSEUDOFERMION_AGGREGATE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#endif diff --git a/lib/qcd/hmc/HMC.h b/lib/qcd/hmc/HMC.h index 05838349..a5dc611e 100644 --- a/lib/qcd/hmc/HMC.h +++ b/lib/qcd/hmc/HMC.h @@ -42,6 +42,9 @@ directory #include +#include +#include + namespace Grid { namespace QCD { @@ -230,7 +233,12 @@ class HybridMonteCarlo { } }; + } // QCD } // Grid +#include +#include +#include + #endif diff --git a/lib/qcd/hmc/HMC_aggregate.h b/lib/qcd/hmc/HMC_aggregate.h new file mode 100644 index 00000000..7d3ec377 --- /dev/null +++ b/lib/qcd/hmc/HMC_aggregate.h @@ -0,0 +1,42 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/hmc/HMC.h + +Copyright (C) 2015 + +Author: Peter Boyle + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution +directory +*************************************************************************************/ +/* END LEGAL */ +//-------------------------------------------------------------------- +//-------------------------------------------------------------------- +#ifndef HMC_AGGREGATE_INCLUDED +#define HMC_AGGREGATE_INCLUDED + +#include + +#include +// annoying location; should move this ? +#include +#include +#include + +#endif diff --git a/lib/qcd/representations/Representations.h b/lib/qcd/representations/Representations.h new file mode 100644 index 00000000..22311be0 --- /dev/null +++ b/lib/qcd/representations/Representations.h @@ -0,0 +1,9 @@ +#ifndef REPRESENTATIONS_H +#define REPRESENTATIONS_H + +#include +#include +#include +#include + +#endif diff --git a/lib/qcd/representations/adjoint.h b/lib/qcd/representations/adjoint.h index facc72f1..078d12a1 100644 --- a/lib/qcd/representations/adjoint.h +++ b/lib/qcd/representations/adjoint.h @@ -112,4 +112,4 @@ typedef AdjointRep AdjointRepresentation; } } -#endif \ No newline at end of file +#endif diff --git a/lib/qcd/spin/Dirac.cc b/lib/qcd/spin/Dirac.cc index 8279a4a7..70e052de 100644 --- a/lib/qcd/spin/Dirac.cc +++ b/lib/qcd/spin/Dirac.cc @@ -25,7 +25,8 @@ Author: Peter Boyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include +#include namespace Grid { diff --git a/lib/qcd/spin/Spin.h b/lib/qcd/spin/Spin.h new file mode 100644 index 00000000..107515ed --- /dev/null +++ b/lib/qcd/spin/Spin.h @@ -0,0 +1,5 @@ +#ifndef QCD_SPIN_H +#define QCD_SPIN_H +#include +#include +#endif diff --git a/lib/qcd/utils/SUnAdjoint.h b/lib/qcd/utils/SUnAdjoint.h index 7c1145e3..9d9b77bd 100644 --- a/lib/qcd/utils/SUnAdjoint.h +++ b/lib/qcd/utils/SUnAdjoint.h @@ -179,4 +179,4 @@ typedef SU_Adjoint AdjointMatrices; } } -#endif \ No newline at end of file +#endif diff --git a/lib/qcd/utils/SpaceTimeGrid.cc b/lib/qcd/utils/SpaceTimeGrid.cc index 695c7f69..3ada4a3b 100644 --- a/lib/qcd/utils/SpaceTimeGrid.cc +++ b/lib/qcd/utils/SpaceTimeGrid.cc @@ -25,7 +25,8 @@ Author: Peter Boyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include +#include namespace Grid { namespace QCD { diff --git a/lib/qcd/utils/Utils.h b/lib/qcd/utils/Utils.h new file mode 100644 index 00000000..1bde6f9b --- /dev/null +++ b/lib/qcd/utils/Utils.h @@ -0,0 +1,9 @@ +#ifndef QCD_UTILS_H +#define QCD_UTILS_H +#include +#include +#include +#include +#include +#include +#endif diff --git a/lib/serialisation/BinaryIO.cc b/lib/serialisation/BinaryIO.cc index ae0b0744..0a4f2665 100644 --- a/lib/serialisation/BinaryIO.cc +++ b/lib/serialisation/BinaryIO.cc @@ -26,7 +26,8 @@ Author: paboyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include + +#include using namespace Grid; using namespace std; diff --git a/lib/serialisation/TextIO.cc b/lib/serialisation/TextIO.cc index 39b987d0..c0018bee 100644 --- a/lib/serialisation/TextIO.cc +++ b/lib/serialisation/TextIO.cc @@ -26,7 +26,7 @@ Author: paboyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include using namespace Grid; using namespace std; diff --git a/lib/serialisation/XmlIO.cc b/lib/serialisation/XmlIO.cc index d8953a00..db1535d6 100644 --- a/lib/serialisation/XmlIO.cc +++ b/lib/serialisation/XmlIO.cc @@ -26,7 +26,7 @@ Author: paboyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include using namespace Grid; using namespace std; diff --git a/lib/Simd.h b/lib/simd/Simd.h similarity index 99% rename from lib/Simd.h rename to lib/simd/Simd.h index adc2849d..3f2b10dc 100644 --- a/lib/Simd.h +++ b/lib/simd/Simd.h @@ -172,8 +172,8 @@ namespace Grid { }; -#include "simd/Grid_vector_types.h" -#include "simd/Grid_vector_unops.h" +#include +#include namespace Grid { // Default precision diff --git a/lib/stencil/Lebesgue.cc b/lib/stencil/Lebesgue.cc index c83975a9..4551878c 100644 --- a/lib/stencil/Lebesgue.cc +++ b/lib/stencil/Lebesgue.cc @@ -26,7 +26,7 @@ Author: paboyle See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include #include namespace Grid { diff --git a/lib/Stencil.cc b/lib/stencil/Stencil.cc similarity index 98% rename from lib/Stencil.cc rename to lib/stencil/Stencil.cc index c492efa0..e04a5360 100644 --- a/lib/Stencil.cc +++ b/lib/stencil/Stencil.cc @@ -25,7 +25,7 @@ See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include "Grid.h" +#include namespace Grid { diff --git a/lib/Stencil.h b/lib/stencil/Stencil.h similarity index 100% rename from lib/Stencil.h rename to lib/stencil/Stencil.h diff --git a/lib/Tensors.h b/lib/tensors/Tensors.h similarity index 100% rename from lib/Tensors.h rename to lib/tensors/Tensors.h diff --git a/lib/Threads.h b/lib/threads/Threads.h similarity index 100% rename from lib/Threads.h rename to lib/threads/Threads.h diff --git a/lib/Init.cc b/lib/util/Init.cc similarity index 99% rename from lib/Init.cc rename to lib/util/Init.cc index 077d53ba..639e8506 100644 --- a/lib/Init.cc +++ b/lib/util/Init.cc @@ -41,12 +41,13 @@ Author: paboyle #include #include #include -#include #include #include #include #include +#include + #include #ifdef __APPLE__ diff --git a/lib/Init.h b/lib/util/Init.h similarity index 100% rename from lib/Init.h rename to lib/util/Init.h diff --git a/lib/Lexicographic.h b/lib/util/Lexicographic.h similarity index 100% rename from lib/Lexicographic.h rename to lib/util/Lexicographic.h diff --git a/lib/util/Util.h b/lib/util/Util.h new file mode 100644 index 00000000..0a6802a0 --- /dev/null +++ b/lib/util/Util.h @@ -0,0 +1,5 @@ +#ifndef GRID_UTIL_H +#define GRID_UTIL_H +#include +#include +#endif From 96d44d5c554935ae8e2e86d84ab5c0bd84116d16 Mon Sep 17 00:00:00 2001 From: paboyle Date: Fri, 24 Feb 2017 19:12:11 -0500 Subject: [PATCH 068/101] Header fix --- lib/log/Log.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/log/Log.cc b/lib/log/Log.cc index a6ef2857..320381cb 100644 --- a/lib/log/Log.cc +++ b/lib/log/Log.cc @@ -32,6 +32,7 @@ directory #include #include +#include namespace Grid { From 06a132e3f981298e35fddeeaea2cd40a7c1ad1d3 Mon Sep 17 00:00:00 2001 From: Christopher Kelly Date: Tue, 28 Feb 2017 13:31:54 -0800 Subject: [PATCH 069/101] Fixes to SHMEM comms --- lib/communicator/Communicator_mpi.cc | 2 ++ lib/communicator/Communicator_shmem.cc | 39 +++++++++++++++-------- lib/qcd/action/fermion/WilsonCompressor.h | 2 +- lib/util/Init.cc | 3 ++ 4 files changed, 32 insertions(+), 14 deletions(-) diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index 1a48d856..470a06c7 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -26,6 +26,8 @@ Author: Peter Boyle *************************************************************************************/ /* END LEGAL */ #include +#include +#include #include namespace Grid { diff --git a/lib/communicator/Communicator_shmem.cc b/lib/communicator/Communicator_shmem.cc index b7a263a4..3c76c808 100644 --- a/lib/communicator/Communicator_shmem.cc +++ b/lib/communicator/Communicator_shmem.cc @@ -27,6 +27,7 @@ Author: Peter Boyle /* END LEGAL */ #include #include +#include namespace Grid { @@ -51,7 +52,7 @@ typedef struct HandShake_t { } HandShake; std::array make_psync_init(void) { - array ret; + std::array ret; ret.fill(SHMEM_SYNC_VALUE); return ret; } @@ -109,7 +110,7 @@ void CartesianCommunicator::GlobalSum(uint32_t &u){ source = u; dest = 0; - shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync); + shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data()); shmem_barrier_all(); // necessary? u = dest; } @@ -125,7 +126,7 @@ void CartesianCommunicator::GlobalSum(uint64_t &u){ source = u; dest = 0; - shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync); + shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data()); shmem_barrier_all(); // necessary? u = dest; } @@ -137,7 +138,8 @@ void CartesianCommunicator::GlobalSum(float &f){ source = f; dest =0.0; - shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync); + shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data()); + shmem_barrier_all(); f = dest; } void CartesianCommunicator::GlobalSumVector(float *f,int N) @@ -148,14 +150,16 @@ void CartesianCommunicator::GlobalSumVector(float *f,int N) static std::array psync = psync_init; if ( shmem_addr_accessible(f,_processor) ){ - shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync); + shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync.data()); + shmem_barrier_all(); return; } for(int i=0;i &lis SHMEM_VET(recv); // shmem_putmem_nb(recv,xmit,bytes,dest,NULL); shmem_putmem(recv,xmit,bytes,dest); + + if ( CommunicatorPolicy == CommunicatorPolicySequential ) shmem_barrier_all(); } void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) { // shmem_quiet(); // I'm done - shmem_barrier_all();// He's done too + if( CommunicatorPolicy == CommunicatorPolicyConcurrent ) shmem_barrier_all();// He's done too } void CartesianCommunicator::Barrier(void) { @@ -301,13 +310,13 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes) int words = bytes/4; if ( shmem_addr_accessible(data,_processor) ){ - shmem_broadcast32(data,data,words,root,0,0,shmem_n_pes(),psync); + shmem_broadcast32(data,data,words,root,0,0,shmem_n_pes(),psync.data()); return; } for(int w=0;wHaloGatherDir(source,XpCompress,Xp,face_idx); this->HaloGatherDir(source,YpCompress,Yp,face_idx); this->HaloGatherDir(source,ZpCompress,Zp,face_idx); diff --git a/lib/util/Init.cc b/lib/util/Init.cc index 639e8506..972cb1a8 100644 --- a/lib/util/Init.cc +++ b/lib/util/Init.cc @@ -390,6 +390,9 @@ void Grid_finalize(void) MPI_Finalize(); Grid_unquiesce_nodes(); #endif +#if defined (GRID_COMMS_SHMEM) + shmem_finalize(); +#endif } void * Grid_backtrace_buffer[_NBACKTRACE]; From af230a1fb8fddb3e809b23da4ada10707418eace Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 28 Feb 2017 17:05:22 -0500 Subject: [PATCH 070/101] Average the time across the whole machine for outliers --- lib/qcd/action/fermion/WilsonFermion5D.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/qcd/action/fermion/WilsonFermion5D.cc b/lib/qcd/action/fermion/WilsonFermion5D.cc index 6027322b..7a7a51c2 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.cc +++ b/lib/qcd/action/fermion/WilsonFermion5D.cc @@ -194,6 +194,9 @@ void WilsonFermion5D::Report(void) std::cout << GridLogMessage << "WilsonFermion5D ComputeTime1/Calls : " << DhopComputeTime / DhopCalls << " us" << std::endl; std::cout << GridLogMessage << "WilsonFermion5D ComputeTime2/Calls : " << DhopComputeTime2/ DhopCalls << " us" << std::endl; + // Average the compute time + _FourDimGrid->GlobalSum(DhopComputeTime); + DhopComputeTime/=NP; RealD mflops = 1344*volume*DhopCalls/DhopComputeTime/2; // 2 for red black counting std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl; std::cout << GridLogMessage << "Average mflops/s per call per rank : " << mflops/NP << std::endl; From 3901b17ade73ba05c55c55bd4ab746d1fb71662c Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 28 Feb 2017 17:06:45 -0500 Subject: [PATCH 071/101] timeings from BNL --- scripts/scatter | 444 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 444 insertions(+) create mode 100644 scripts/scatter diff --git a/scripts/scatter b/scripts/scatter new file mode 100644 index 00000000..09cb2743 --- /dev/null +++ b/scripts/scatter @@ -0,0 +1,444 @@ +11.7987 665962 +15.6078 804738 +16.8195 817117 +11.7002 667457 +12.531 663347 +15.6475 802544 +16.129 819802 +11.7684 665000 +11.6168 667651 +15.1805 800892 +17.6561 822562 +11.4019 664136 +11.9189 663385 +16.0362 798230 +16.0763 818004 +11.5681 662081 +19.7502 662637 +15.5355 806539 +15.8243 821544 +11.16 668250 +13.0345 663874 +15.5772 801823 +16.2306 818114 +11.3338 663786 +11.5693 668819 +15.8559 798630 +16.077 819714 +11.6406 659723 +11.6879 664649 +15.4282 805797 +16.1491 819745 +11.2908 671340 +11.3591 669315 +15.9951 804795 +17.0154 818908 +11.179 668178 +11.6526 664287 +15.688 802454 +16.4822 815271 +11.7621 661223 +11.6672 664459 +15.4835 798722 +16.7267 820133 +11.624 668664 +11.6175 661994 +15.3587 802845 +16.044 821846 +11.3323 665252 +11.7211 662942 +15.4859 802990 +16.3677 824283 +11.4744 662839 +11.8487 664919 +15.6107 797731 +15.8069 820478 +11.2979 658007 +11.7564 663817 +15.541 797783 +17.0808 819743 +12.3383 661733 +36.0835 663562 +15.7731 798800 +15.8485 822127 +11.1223 666914 +12.4756 660221 +16.3073 799764 +16.9154 822327 +12.3991 657394 +11.7097 665528 +16.6175 800274 +15.9862 818149 +11.2491 664158 +12.0396 664001 +16.1483 799878 +16.1754 821353 +32.8124 663041 +11.9903 663036 +16.0966 803422 +16.3731 822092 +11.9109 660189 +11.9452 664600 +9.66713 808509 +17.1584 817437 +11.5856 664571 +11.8809 666913 +15.8264 797351 +16.5976 816830 +11.4465 666261 +12.2163 665461 +16.2477 800076 +16.083 821536 +11.5841 664139 +11.7192 666777 +15.5412 802674 +15.9575 816875 +11.0946 670884 +11.7391 668389 +15.3244 801328 +16.9828 816831 +11.5186 667912 +11.8553 668159 +15.9975 799798 +16.5647 818386 +11.3678 667215 +11.8375 664926 +15.6273 800393 +17.2328 816855 +11.4451 670339 +11.5145 670155 +15.442 801500 +16.8771 823647 +11.5946 664410 +11.7574 667053 +15.7901 801231 +16.1462 821653 +11.8786 656763 +11.5349 663420 +15.8349 802817 +17.0659 812767 +11.5311 662683 +11.6272 667676 +15.9587 803022 +15.809 811142 +11.3299 667866 +11.5119 666844 +15.7014 801714 +16.1993 819118 +11.2353 669254 +11.8388 664464 +15.8903 797444 +16.8866 816635 +11.7656 658494 +11.6656 666166 +15.256 805288 +16.284 817737 +11.2608 659269 +11.3609 669138 +15.0724 801659 +15.7254 819883 +11.0887 667137 +11.5796 669283 +15.7326 802443 +16.2654 817360 +11.9706 661339 +12.5527 663043 +15.2276 794816 +16.8155 823258 +11.4149 664258 +12.0766 663720 +15.8403 799492 +17.5981 818338 +11.5562 665220 +22.1544 661378 +12.0874 804829 +15.4826 816649 +10.9756 664841 +35.9173 665455 +15.288 801766 +16.1119 822671 +11.4758 663216 +12.3041 666616 +15.2521 800945 +16.1784 819014 +11.3541 663399 +11.863 664907 +15.2269 795055 +16.8058 821320 +11.5113 663369 +13.7629 666975 +15.2298 801234 +15.7366 822774 +11.2174 670346 +12.0583 663598 +15.8147 801926 +15.7665 821852 +11.2676 665752 +11.7262 670282 +15.6965 806129 +15.8385 820080 +11.663 660842 +11.4393 667578 +15.6585 798745 +16.5194 818426 +11.4044 663854 +11.9518 662532 +15.5785 798254 +16.1043 821756 +11.3142 667176 +12.1981 666031 +15.3541 799680 +16.9157 817076 +17.4885 667121 +11.83 660487 +12.1585 801534 +16.3931 820708 +11.1599 665780 +11.9377 663366 +16.555 802357 +16.3592 819296 +14.948 662876 +11.4625 668747 +16.4882 802712 +16.0941 820000 +31.9435 665040 +11.6125 669814 +15.3871 800464 +16.6008 820317 +11.4717 659916 +12.9048 664130 +16.7163 800849 +17.8285 813151 +11.5916 658383 +12.0202 663072 +15.5845 806755 +16.307 817906 +11.3942 669369 +11.8465 668763 +15.2451 802236 +16.5219 819934 +11.6291 662012 +11.6539 660430 +15.4529 803424 +15.8335 821817 +12.0544 659237 +11.6396 665152 +15.3411 802963 +16.6358 821266 +11.4613 665441 +11.902 663221 +15.747 803433 +15.8396 817497 +10.7179 664747 +11.6572 668304 +15.6532 804598 +17.1196 821115 +20.0255 661072 +11.4765 665925 +15.219 805882 +16.1594 815428 +11.3692 662945 +11.8594 663928 +16.1808 799668 +16.6602 821609 +11.5332 660173 +11.9081 662718 +15.9127 801288 +16.325 815644 +11.1836 665425 +11.4505 666855 +15.321 801245 +15.8323 814424 +11.2948 663168 +11.6377 661825 +15.8977 804792 +16.8616 820948 +11.919 659638 +11.6931 662183 +15.4429 803952 +16.2615 817444 +11.2996 664382 +12.692 663975 +15.4407 801448 +15.7445 818021 +11.1849 667460 +11.7541 664391 +16.0659 801714 +16.5602 819929 +11.5849 667401 +13.179 663303 +15.7583 802808 +16.2668 817484 +11.9216 663598 +11.5344 668783 +15.8009 799397 +16.5506 818528 +12.7165 658400 +11.4921 667373 +15.7904 796592 +17.3128 822103 +11.4334 660329 +11.5123 663333 +15.793 796578 +16.0427 818224 +11.4749 659239 +11.681 666986 +15.2297 801274 +15.4179 814059 +11.2333 673591 +11.6406 664406 +15.7716 801475 +16.0821 818385 +11.4388 661914 +11.7308 663324 +15.5508 803090 +15.8351 815836 +10.9782 663492 +11.6368 664583 +15.009 799445 +15.9809 818041 +11.5179 663563 +11.6583 665515 +15.5631 800525 +15.9809 824339 +11.5055 667720 +11.8791 664897 +15.834 805944 +16.4676 818236 +11.4656 658528 +11.6217 668202 +15.6001 801829 +16.1034 820178 +11.4374 665518 +11.8762 670965 +15.7434 796395 +16.3487 816459 +12.0269 662854 +11.6678 668820 +15.7425 803427 +16.7735 819775 +11.4964 666028 +12.5349 665748 +16.0529 803255 +16.7488 816778 +11.4022 663786 +11.8303 667042 +15.4075 797604 +19.2712 818582 +11.4384 663740 +12.1416 661861 +15.8256 801284 +15.963 825871 +11.2444 663654 +11.7056 668604 +15.4614 802378 +16.4954 816582 +11.2539 668951 +11.7545 665935 +15.38 800086 +16.1278 821042 +11.5973 660659 +11.6693 666349 +15.5384 798376 +16.4959 817743 +11.4558 666194 +11.5678 663109 +15.4735 804128 +16.1625 820650 +11.8488 659917 +12.4055 665791 +15.4519 800796 +16.4947 816381 +11.4139 661665 +11.6561 672431 +15.4001 805832 +16.2371 816502 +11.2281 659837 +11.7132 666310 +15.5137 799183 +16.0413 822949 +11.2602 670131 +11.9453 666615 +15.7025 801522 +17.0962 811704 +11.2628 662112 +11.7406 667851 +15.6135 801576 +16.5934 822863 +11.5247 663162 +11.7487 667251 +16.0055 805087 +16.4157 823424 +11.5227 659356 +11.7859 663296 +16.378 807641 +15.4783 819400 +10.9612 663134 +11.5343 668661 +15.3816 799786 +17.3117 822440 +11.8563 663912 +11.643 668021 +15.4162 801892 +16.4866 818794 +12.668 663039 +11.2972 670117 +16.0342 797551 +16.5605 816882 +11.446 662352 +11.7654 665324 +15.5969 797676 +16.6003 821891 +11.6407 663619 +11.7488 666635 +15.179 801874 +16.4199 817806 +17.372 657731 +11.84 666318 +15.5772 799644 +16.5417 817404 +11.5194 660614 +11.6988 669740 +15.4915 796250 +15.5265 819705 +11.1317 660988 +11.7593 667587 +15.5395 801921 +16.646 816722 +11.3554 661571 +11.6774 664913 +17.2897 801999 +16.3433 816831 +11.782 653740 +11.4549 664438 +15.7027 803217 +16.7364 812992 +11.8037 660087 +11.7091 659665 +15.6669 801636 +18.1318 820841 +11.5725 669543 +12.0148 663589 +15.462 800805 +16.417 818946 +12.7225 660913 +11.7885 667164 +15.527 803806 +16.681 817339 +12.0822 660985 +11.8257 662455 +15.9106 800697 +16.6026 820530 +11.5612 661736 +12.0533 664089 +10.5293 801898 +16.7891 823599 +10.7908 665845 +11.7947 665325 +31.6308 798813 +16.8129 818671 +11.7356 662020 +11.9572 665635 +15.2262 802465 +15.776 821759 +11.3732 669772 From 447c5e6cd73a154b47ea34a7872cf01f2ec37f81 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 13 Mar 2017 01:30:43 +0000 Subject: [PATCH 072/101] Z mobius hermiticity correction --- .travis.yml | 2 +- lib/qcd/action/fermion/CayleyFermion5D.cc | 13 ++++++++++++- tests/debug/Test_cayley_cg.cc | 6 ++++++ tests/debug/Test_cayley_even_odd.cc | 6 ++++++ tests/debug/Test_zmm.cc | 1 - 5 files changed, 25 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index ae3efda8..9b9d5434 100644 --- a/.travis.yml +++ b/.travis.yml @@ -102,5 +102,5 @@ script: - ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=mpi-auto - make -j4 - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then mpirun.openmpi -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi - - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then mpirun -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi + - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then ./benchmarks/Benchmark_dwf --threads 1 ; fi diff --git a/lib/qcd/action/fermion/CayleyFermion5D.cc b/lib/qcd/action/fermion/CayleyFermion5D.cc index 3525c12c..7c578fe8 100644 --- a/lib/qcd/action/fermion/CayleyFermion5D.cc +++ b/lib/qcd/action/fermion/CayleyFermion5D.cc @@ -190,7 +190,12 @@ void CayleyFermion5D::MooeeDag (const FermionField &psi, FermionField & lower[s]=-cee[s-1]; } } - + // Conjugate the terms ? + for (int s=0;s::MeooeDag5D (const FermionField &psi, FermionField std::vector lower=cs; upper[Ls-1]=-mass*upper[Ls-1]; lower[0] =-mass*lower[0]; + // Conjugate the terms ? + for (int s=0;s gamma(Ls,ComplexD(1.0,0.0)); + std::cout<(Dmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5); + std::cout<(ZDmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5); + std::cout<(Dzolo,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5); diff --git a/tests/debug/Test_cayley_even_odd.cc b/tests/debug/Test_cayley_even_odd.cc index 7d8d2a12..36a97997 100644 --- a/tests/debug/Test_cayley_even_odd.cc +++ b/tests/debug/Test_cayley_even_odd.cc @@ -81,10 +81,16 @@ int main (int argc, char ** argv) RealD b=1.5;// Scale factor b+c=2, b-c=1 RealD c=0.5; + std::vector gamma(Ls,ComplexD(1.0,0.1)); + std::cout<(Dmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5); + std::cout<(ZDmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5); + std::cout<(Dzolo,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5); diff --git a/tests/debug/Test_zmm.cc b/tests/debug/Test_zmm.cc index 40263cb9..f389cdd1 100644 --- a/tests/debug/Test_zmm.cc +++ b/tests/debug/Test_zmm.cc @@ -26,7 +26,6 @@ See the full license in the file "LICENSE" in the top level distribution directo *************************************************************************************/ /* END LEGAL */ #include -#include #ifdef TEST_ZMM From b64e004555a29bb68368cc22845b6971c4ae7e71 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 13 Mar 2017 01:59:01 +0000 Subject: [PATCH 073/101] MPI run fail on macos --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 9b9d5434..bc6dd0ef 100644 --- a/.travis.yml +++ b/.travis.yml @@ -102,5 +102,5 @@ script: - ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=mpi-auto - make -j4 - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then mpirun.openmpi -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi - - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then ./benchmarks/Benchmark_dwf --threads 1 ; fi + From 33edde245d45e323275d78cd9e47e7b1502f348b Mon Sep 17 00:00:00 2001 From: Chulwoo Jung Date: Sun, 12 Mar 2017 23:02:42 -0400 Subject: [PATCH 074/101] Changing Dminus(Dag) to use full vectors to work correctly --- lib/qcd/action/fermion/CayleyFermion5D.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/qcd/action/fermion/CayleyFermion5D.cc b/lib/qcd/action/fermion/CayleyFermion5D.cc index 14b4edb6..4aa48aa1 100644 --- a/lib/qcd/action/fermion/CayleyFermion5D.cc +++ b/lib/qcd/action/fermion/CayleyFermion5D.cc @@ -57,10 +57,11 @@ void CayleyFermion5D::Dminus(const FermionField &psi, FermionField &chi) { int Ls=this->Ls; - this->DW(psi,this->tmp(),DaggerNo); + FermionField tmp_f(this->FermionGrid()); + this->DW(psi,tmp_f,DaggerNo); for(int s=0;stmp(),s,s);// chi = (1-c[s] D_W) psi + axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp_f,s,s);// chi = (1-c[s] D_W) psi } } @@ -112,10 +113,11 @@ void CayleyFermion5D::DminusDag(const FermionField &psi, FermionField &chi { int Ls=this->Ls; - this->DW(psi,this->tmp(),DaggerYes); + FermionField tmp_f(this->FermionGrid()); + this->DW(psi,tmp_f,DaggerYes); for(int s=0;stmp(),s,s);// chi = (1-c[s] D_W) psi + axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp_f,s,s);// chi = (1-c[s] D_W) psi } } template From 0b61f75c9e106ca5a73d94584792461d7220e7d1 Mon Sep 17 00:00:00 2001 From: Chulwoo Jung Date: Mon, 13 Mar 2017 00:12:43 -0400 Subject: [PATCH 075/101] Adding ZMobius CG test --- tests/solver/Test_zmobius_cg_prec.cc | 111 +++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 tests/solver/Test_zmobius_cg_prec.cc diff --git a/tests/solver/Test_zmobius_cg_prec.cc b/tests/solver/Test_zmobius_cg_prec.cc new file mode 100644 index 00000000..c66b6246 --- /dev/null +++ b/tests/solver/Test_zmobius_cg_prec.cc @@ -0,0 +1,111 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./tests/Test_dwf_cg_prec.cc + +Copyright (C) 2015 + +Author: Peter Boyle + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution +directory +*************************************************************************************/ +/* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +struct scal { + d internal; +}; + +Gamma::Algebra Gmu[] = {Gamma::Algebra::GammaX, Gamma::Algebra::GammaY, Gamma::Algebra::GammaZ, + Gamma::Algebra::GammaT}; + +int main(int argc, char** argv) { + Grid_init(&argc, &argv); + + const int Ls = 16; + + GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid( + GridDefaultLatt(), GridDefaultSimd(Nd, vComplex::Nsimd()), + GridDefaultMpi()); + GridRedBlackCartesian* UrbGrid = + SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian* FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid); + GridRedBlackCartesian* FrbGrid = + SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid); + + std::vector seeds4({1, 2, 3, 4}); + std::vector seeds5({5, 6, 7, 8}); + GridParallelRNG RNG5(FGrid); + RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); + RNG4.SeedFixedIntegers(seeds4); + + LatticeFermion src(FGrid); + random(RNG5, src); + LatticeFermion result(FGrid); + result = zero; + LatticeGaugeField Umu(UGrid); + + SU3::HotConfiguration(RNG4, Umu); + + std::cout << GridLogMessage << "Lattice dimensions: " << GridDefaultLatt() + << " Ls: " << Ls << std::endl; + + std::vector U(4, UGrid); + for (int mu = 0; mu < Nd; mu++) { + U[mu] = PeekIndex(Umu, mu); + } + + RealD mass = 0.01; + RealD M5 = 1.8; + std::vector < std::complex > omegas; + for(int i=0;i temp (0.25+0.00*i, 0.0+0.00*i); + omegas.push_back(temp); + } +// DomainWallFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5); + ZMobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omegas,1.,0.); + + LatticeFermion src_o(FrbGrid); + LatticeFermion result_o(FrbGrid); + pickCheckerboard(Odd, src_o, src); + result_o = zero; + + GridStopWatch CGTimer; + + SchurDiagMooeeOperator HermOpEO(Ddwf); + ConjugateGradient CG(1.0e-8, 10000, 0);// switch off the assert + + CGTimer.Start(); + CG(HermOpEO, src_o, result_o); + CGTimer.Stop(); + + std::cout << GridLogMessage << "Total CG time : " << CGTimer.Elapsed() + << std::endl; + + std::cout << GridLogMessage << "######## Dhop calls summary" << std::endl; + Ddwf.Report(); + + Grid_finalize(); +} From 8dc57a1e25352a58c36b11de450908f24dab43c3 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 13 Mar 2017 11:11:46 +0000 Subject: [PATCH 076/101] Layout change --- lib/qcd/action/fermion/CayleyFermion5D.cc | 24 +++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/lib/qcd/action/fermion/CayleyFermion5D.cc b/lib/qcd/action/fermion/CayleyFermion5D.cc index 4ff28124..90614295 100644 --- a/lib/qcd/action/fermion/CayleyFermion5D.cc +++ b/lib/qcd/action/fermion/CayleyFermion5D.cc @@ -64,6 +64,18 @@ void CayleyFermion5D::Dminus(const FermionField &psi, FermionField &chi) axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp_f,s,s);// chi = (1-c[s] D_W) psi } } +template +void CayleyFermion5D::DminusDag(const FermionField &psi, FermionField &chi) +{ + int Ls=this->Ls; + + FermionField tmp_f(this->FermionGrid()); + this->DW(psi,tmp_f,DaggerYes); + + for(int s=0;s void CayleyFermion5D::CayleyReport(void) @@ -108,18 +120,6 @@ template void CayleyFermion5D::CayleyZeroCounters(void) } -template -void CayleyFermion5D::DminusDag(const FermionField &psi, FermionField &chi) -{ - int Ls=this->Ls; - - FermionField tmp_f(this->FermionGrid()); - this->DW(psi,tmp_f,DaggerYes); - - for(int s=0;s void CayleyFermion5D::M5D (const FermionField &psi, FermionField &chi) { From e7c36771ed79a8fb4a207c8e39990aa6946f32af Mon Sep 17 00:00:00 2001 From: paboyle Date: Wed, 15 Mar 2017 14:23:33 -0400 Subject: [PATCH 077/101] ZMobius prep for asm --- lib/qcd/action/fermion/WilsonKernelsAsm.cc | 1 - .../action/fermion/WilsonKernelsAsmAvx512.h | 112 +++++++++++++++++- 2 files changed, 107 insertions(+), 6 deletions(-) diff --git a/lib/qcd/action/fermion/WilsonKernelsAsm.cc b/lib/qcd/action/fermion/WilsonKernelsAsm.cc index d43d9c92..365be69a 100644 --- a/lib/qcd/action/fermion/WilsonKernelsAsm.cc +++ b/lib/qcd/action/fermion/WilsonKernelsAsm.cc @@ -101,7 +101,6 @@ template void WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & l template void WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\ int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out);\ - INSTANTIATE_ASM(WilsonImplF); INSTANTIATE_ASM(WilsonImplD); INSTANTIATE_ASM(ZWilsonImplF); diff --git a/lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h b/lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h index 6d602a2b..1839e9bc 100644 --- a/lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h +++ b/lib/qcd/action/fermion/WilsonKernelsAsmAvx512.h @@ -49,14 +49,10 @@ static Vector signsF; } static int signInitF = setupSigns(signsF); + #define MAYBEPERM(A,perm) if (perm) { A ; } #define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf) #define COMPLEX_SIGNS(isigns) vComplexF *isigns = &signsF[0]; - - -#define INTERIOR_AND_EXTERIOR -#undef INTERIOR -#undef EXTERIOR ///////////////////////////////////////////////////////////////// // XYZT vectorised, undag Kernel, single @@ -70,6 +66,11 @@ WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,Doubl int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + #undef INTERIOR_AND_EXTERIOR #define INTERIOR #undef EXTERIOR @@ -78,6 +79,12 @@ WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,Do int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + + #undef INTERIOR_AND_EXTERIOR #undef INTERIOR #define EXTERIOR @@ -85,6 +92,11 @@ template<> void WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include + +template<> void +WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include ///////////////////////////////////////////////////////////////// // XYZT vectorised, dag Kernel, single @@ -98,6 +110,11 @@ WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,Do int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + #undef INTERIOR_AND_EXTERIOR #define INTERIOR #undef EXTERIOR @@ -106,6 +123,11 @@ WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + #undef INTERIOR_AND_EXTERIOR #undef INTERIOR #define EXTERIOR @@ -114,6 +136,11 @@ WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + #undef MAYBEPERM #undef MULT_2SPIN #define MAYBEPERM(A,B) @@ -130,6 +157,10 @@ template<> void WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #define INTERIOR @@ -138,6 +169,10 @@ template<> void WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #undef INTERIOR @@ -149,6 +184,11 @@ WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrde int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + ///////////////////////////////////////////////////////////////// // Ls vectorised, dag Kernel, single ///////////////////////////////////////////////////////////////// @@ -160,6 +200,10 @@ template<> void WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #define INTERIOR @@ -168,6 +212,10 @@ template<> void WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #undef INTERIOR @@ -176,11 +224,17 @@ template<> void WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef COMPLEX_SIGNS #undef MAYBEPERM #undef MULT_2SPIN + + /////////////////////////////////////////////////////////// // If we are AVX512 specialise the double precision routine /////////////////////////////////////////////////////////// @@ -210,6 +264,10 @@ template<> void WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #define INTERIOR @@ -218,6 +276,10 @@ template<> void WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #undef INTERIOR @@ -226,6 +288,10 @@ template<> void WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include ///////////////////////////////////////////////////////////////// // XYZT vectorised, dag Kernel, single @@ -238,6 +304,10 @@ template<> void WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #define INTERIOR @@ -246,6 +316,10 @@ template<> void WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #undef INTERIOR @@ -254,6 +328,10 @@ template<> void WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef MAYBEPERM #undef MULT_2SPIN @@ -271,6 +349,10 @@ template<> void WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #define INTERIOR @@ -279,6 +361,10 @@ template<> void WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #undef INTERIOR @@ -289,6 +375,10 @@ template<> void WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include ///////////////////////////////////////////////////////////////// // Ls vectorised, dag Kernel, single @@ -301,6 +391,10 @@ template<> void WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #define INTERIOR @@ -309,6 +403,10 @@ template<> void WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDagInt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef INTERIOR_AND_EXTERIOR #undef INTERIOR @@ -317,6 +415,10 @@ template<> void WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +template<> void +WilsonKernels::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include #undef COMPLEX_SIGNS #undef MAYBEPERM From 8c8473998df2ae7beb22d9bd188e6f6cadd7d5d9 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 21 Mar 2017 22:29:51 -0400 Subject: [PATCH 078/101] Average over whole cluster the comm time. --- lib/stencil/Stencil.h | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/stencil/Stencil.h b/lib/stencil/Stencil.h index d833c53b..479cd979 100644 --- a/lib/stencil/Stencil.h +++ b/lib/stencil/Stencil.h @@ -388,6 +388,7 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal RealD NP = _grid->_Nprocessors; RealD NN = _grid->NodeCount(); + _grid->GlobalSum(commtime); commtime/=NP; if ( calls > 0. ) { std::cout << GridLogMessage << " Stencil calls "< Date: Tue, 21 Mar 2017 22:30:29 -0400 Subject: [PATCH 079/101] Save some code for static huge tlb's. It is ifdef'ed out but an interesting root only experiment. No gain from it. --- lib/communicator/Communicator_mpi3.cc | 48 ++++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index 2f3f1d67..eac003ce 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -33,8 +33,14 @@ Author: Peter Boyle #include #include #include +#include +#include +#include #include //#include +#ifndef SHM_HUGETLB +#define SHM_HUGETLB 04000 +#endif namespace Grid { @@ -189,8 +195,9 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { ShmCommBuf = 0; ShmCommBufs.resize(ShmSize); - char shm_name [NAME_MAX]; +#if 1 + char shm_name [NAME_MAX]; if ( ShmRank == 0 ) { for(int r=0;r shmids(ShmSize); + + if ( ShmRank == 0 ) { + for(int r=0;r Date: Sat, 25 Mar 2017 09:25:46 -0400 Subject: [PATCH 080/101] Added a bnl log --- scripts/loop.log | 15432 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 15432 insertions(+) create mode 100644 scripts/loop.log diff --git a/scripts/loop.log b/scripts/loop.log new file mode 100644 index 00000000..21b00a0d --- /dev/null +++ b/scripts/loop.log @@ -0,0 +1,15432 @@ +Grid : Message : Stencil 12.6786 GB/s per node +Grid : Message : Stencil 17.2339 GB/s per node +Grid : Message : Stencil 17.5982 GB/s per node +Grid : Message : Stencil 12.79 GB/s per node +Grid : Message : Average mflops/s per call per node : 671730 +Grid : Message : Average mflops/s per call per node : 802660 +Grid : Message : Average mflops/s per call per node : 827085 +Grid : Message : Average mflops/s per call per node : 665055 +Grid : Message : Average mflops/s per call per node (full): 314188 +Grid : Message : Average mflops/s per call per node (full): 437622 +Grid : Message : Average mflops/s per call per node (full): 446214 +Grid : Message : Average mflops/s per call per node (full): 304778 +Grid : Message : Stencil 12.3348 GB/s per node +Grid : Message : Stencil 16.7072 GB/s per node +Grid : Message : Stencil 17.5215 GB/s per node +Grid : Message : Stencil 13.0792 GB/s per node +Grid : Message : Average mflops/s per call per node : 673845 +Grid : Message : Average mflops/s per call per node : 803432 +Grid : Message : Average mflops/s per call per node : 827130 +Grid : Message : Average mflops/s per call per node : 661863 +Grid : Message : Average mflops/s per call per node (full): 312916 +Grid : Message : Average mflops/s per call per node (full): 437409 +Grid : Message : Average mflops/s per call per node (full): 446117 +Grid : Message : Average mflops/s per call per node (full): 305031 +Grid : Message : Stencil 13.251 GB/s per node +Grid : Message : Stencil 17.0389 GB/s per node +Grid : Message : Stencil 17.9444 GB/s per node +Grid : Message : Stencil 12.8909 GB/s per node +Grid : Message : Average mflops/s per call per node : 670180 +Grid : Message : Average mflops/s per call per node : 803789 +Grid : Message : Average mflops/s per call per node : 820549 +Grid : Message : Average mflops/s per call per node : 665372 +Grid : Message : Average mflops/s per call per node (full): 316277 +Grid : Message : Average mflops/s per call per node (full): 435976 +Grid : Message : Average mflops/s per call per node (full): 446442 +Grid : Message : Average mflops/s per call per node (full): 305106 +Grid : Message : Stencil 13.6097 GB/s per node +Grid : Message : Stencil 17.7981 GB/s per node +Grid : Message : Stencil 17.5185 GB/s per node +Grid : Message : Stencil 14.4014 GB/s per node +Grid : Message : Average mflops/s per call per node : 666791 +Grid : Message : Average mflops/s per call per node : 799898 +Grid : Message : Average mflops/s per call per node : 826498 +Grid : Message : Average mflops/s per call per node : 663479 +Grid : Message : Average mflops/s per call per node (full): 316681 +Grid : Message : Average mflops/s per call per node (full): 437896 +Grid : Message : Average mflops/s per call per node (full): 445597 +Grid : Message : Average mflops/s per call per node (full): 307770 +Grid : Message : Stencil 14.2804 GB/s per node +Grid : Message : Stencil 16.6792 GB/s per node +Grid : Message : Stencil 18.2657 GB/s per node +Grid : Message : Stencil 12.4275 GB/s per node +Grid : Message : Average mflops/s per call per node : 661518 +Grid : Message : Average mflops/s per call per node : 804204 +Grid : Message : Average mflops/s per call per node : 821252 +Grid : Message : Average mflops/s per call per node : 669218 +Grid : Message : Average mflops/s per call per node (full): 315153 +Grid : Message : Average mflops/s per call per node (full): 436294 +Grid : Message : Average mflops/s per call per node (full): 448032 +Grid : Message : Average mflops/s per call per node (full): 305047 +Grid : Message : Stencil 13.1218 GB/s per node +Grid : Message : Stencil 16.6241 GB/s per node +Grid : Message : Stencil 17.8377 GB/s per node +Grid : Message : Stencil 13.1379 GB/s per node +Grid : Message : Average mflops/s per call per node : 667798 +Grid : Message : Average mflops/s per call per node : 804417 +Grid : Message : Average mflops/s per call per node : 816011 +Grid : Message : Average mflops/s per call per node : 658982 +Grid : Message : Average mflops/s per call per node (full): 314849 +Grid : Message : Average mflops/s per call per node (full): 436163 +Grid : Message : Average mflops/s per call per node (full): 438805 +Grid : Message : Average mflops/s per call per node (full): 304163 +Grid : Message : Stencil 13.5449 GB/s per node +Grid : Message : Stencil 17.0727 GB/s per node +Grid : Message : Stencil 18.1857 GB/s per node +Grid : Message : Stencil 13.3749 GB/s per node +Grid : Message : Average mflops/s per call per node : 667016 +Grid : Message : Average mflops/s per call per node : 805999 +Grid : Message : Average mflops/s per call per node : 825220 +Grid : Message : Average mflops/s per call per node : 664181 +Grid : Message : Average mflops/s per call per node (full): 315059 +Grid : Message : Average mflops/s per call per node (full): 438031 +Grid : Message : Average mflops/s per call per node (full): 447100 +Grid : Message : Average mflops/s per call per node (full): 306033 +Grid : Message : Stencil 12.7492 GB/s per node +Grid : Message : Stencil 16.8398 GB/s per node +Grid : Message : Stencil 18.5169 GB/s per node +Grid : Message : Stencil 14.1175 GB/s per node +Grid : Message : Average mflops/s per call per node : 667433 +Grid : Message : Average mflops/s per call per node : 805205 +Grid : Message : Average mflops/s per call per node : 818690 +Grid : Message : Average mflops/s per call per node : 662018 +Grid : Message : Average mflops/s per call per node (full): 312571 +Grid : Message : Average mflops/s per call per node (full): 436413 +Grid : Message : Average mflops/s per call per node (full): 447176 +Grid : Message : Average mflops/s per call per node (full): 307166 +Grid : Message : Stencil 14.073 GB/s per node +Grid : Message : Stencil 14.0583 GB/s per node +Grid : Message : Stencil 17.3888 GB/s per node +Grid : Message : Stencil 12.4105 GB/s per node +Grid : Message : Average mflops/s per call per node : 662617 +Grid : Message : Average mflops/s per call per node : 809873 +Grid : Message : Average mflops/s per call per node : 823381 +Grid : Message : Average mflops/s per call per node : 664561 +Grid : Message : Average mflops/s per call per node (full): 315727 +Grid : Message : Average mflops/s per call per node (full): 400767 +Grid : Message : Average mflops/s per call per node (full): 444114 +Grid : Message : Average mflops/s per call per node (full): 304126 +Grid : Message : Stencil 14.5548 GB/s per node +Grid : Message : Stencil 16.9469 GB/s per node +Grid : Message : Stencil 17.4579 GB/s per node +Grid : Message : Stencil 14.3184 GB/s per node +Grid : Message : Average mflops/s per call per node : 662519 +Grid : Message : Average mflops/s per call per node : 799987 +Grid : Message : Average mflops/s per call per node : 822293 +Grid : Message : Average mflops/s per call per node : 662139 +Grid : Message : Average mflops/s per call per node (full): 316617 +Grid : Message : Average mflops/s per call per node (full): 433846 +Grid : Message : Average mflops/s per call per node (full): 444509 +Grid : Message : Average mflops/s per call per node (full): 305318 +Grid : Message : Stencil 13.7009 GB/s per node +Grid : Message : Stencil 16.6146 GB/s per node +Grid : Message : Stencil 17.494 GB/s per node +Grid : Message : Stencil 12.1012 GB/s per node +Grid : Message : Average mflops/s per call per node : 664936 +Grid : Message : Average mflops/s per call per node : 808221 +Grid : Message : Average mflops/s per call per node : 822622 +Grid : Message : Average mflops/s per call per node : 670400 +Grid : Message : Average mflops/s per call per node (full): 316165 +Grid : Message : Average mflops/s per call per node (full): 436412 +Grid : Message : Average mflops/s per call per node (full): 444939 +Grid : Message : Average mflops/s per call per node (full): 303228 +Grid : Message : Stencil 12.6849 GB/s per node +Grid : Message : Stencil 16.6473 GB/s per node +Grid : Message : Stencil 17.4004 GB/s per node +Grid : Message : Stencil 13.1606 GB/s per node +Grid : Message : Average mflops/s per call per node : 671546 +Grid : Message : Average mflops/s per call per node : 807297 +Grid : Message : Average mflops/s per call per node : 821422 +Grid : Message : Average mflops/s per call per node : 667455 +Grid : Message : Average mflops/s per call per node (full): 314965 +Grid : Message : Average mflops/s per call per node (full): 436228 +Grid : Message : Average mflops/s per call per node (full): 444879 +Grid : Message : Average mflops/s per call per node (full): 305982 +Grid : Message : Stencil 13.809 GB/s per node +Grid : Message : Stencil 16.3086 GB/s per node +Grid : Message : Stencil 17.3784 GB/s per node +Grid : Message : Stencil 12.5934 GB/s per node +Grid : Message : Average mflops/s per call per node : 667096 +Grid : Message : Average mflops/s per call per node : 803680 +Grid : Message : Average mflops/s per call per node : 823904 +Grid : Message : Average mflops/s per call per node : 667368 +Grid : Message : Average mflops/s per call per node (full): 317077 +Grid : Message : Average mflops/s per call per node (full): 432108 +Grid : Message : Average mflops/s per call per node (full): 445114 +Grid : Message : Average mflops/s per call per node (full): 304958 +Grid : Message : Stencil 14.3258 GB/s per node +Grid : Message : Stencil 16.9233 GB/s per node +Grid : Message : Stencil 17.6914 GB/s per node +Grid : Message : Stencil 12.1857 GB/s per node +Grid : Message : Average mflops/s per call per node : 665305 +Grid : Message : Average mflops/s per call per node : 804694 +Grid : Message : Average mflops/s per call per node : 824508 +Grid : Message : Average mflops/s per call per node : 667104 +Grid : Message : Average mflops/s per call per node (full): 316651 +Grid : Message : Average mflops/s per call per node (full): 428082 +Grid : Message : Average mflops/s per call per node (full): 446379 +Grid : Message : Average mflops/s per call per node (full): 303779 +Grid : Message : Stencil 13.2527 GB/s per node +Grid : Message : Stencil 16.6723 GB/s per node +Grid : Message : Stencil 17.581 GB/s per node +Grid : Message : Stencil 13.4152 GB/s per node +Grid : Message : Average mflops/s per call per node : 669149 +Grid : Message : Average mflops/s per call per node : 799407 +Grid : Message : Average mflops/s per call per node : 820946 +Grid : Message : Average mflops/s per call per node : 658257 +Grid : Message : Average mflops/s per call per node (full): 315691 +Grid : Message : Average mflops/s per call per node (full): 435436 +Grid : Message : Average mflops/s per call per node (full): 445192 +Grid : Message : Average mflops/s per call per node (full): 305074 +Grid : Message : Stencil 13.0576 GB/s per node +Grid : Message : Stencil 16.4259 GB/s per node +Grid : Message : Stencil 17.3695 GB/s per node +Grid : Message : Stencil 13.224 GB/s per node +Grid : Message : Average mflops/s per call per node : 671756 +Grid : Message : Average mflops/s per call per node : 805129 +Grid : Message : Average mflops/s per call per node : 826323 +Grid : Message : Average mflops/s per call per node : 663117 +Grid : Message : Average mflops/s per call per node (full): 316939 +Grid : Message : Average mflops/s per call per node (full): 434386 +Grid : Message : Average mflops/s per call per node (full): 445016 +Grid : Message : Average mflops/s per call per node (full): 306607 +Grid : Message : Stencil 12.735 GB/s per node +Grid : Message : Stencil 18.7133 GB/s per node +Grid : Message : Stencil 17.5949 GB/s per node +Grid : Message : Stencil 13.6578 GB/s per node +Grid : Message : Average mflops/s per call per node : 673814 +Grid : Message : Average mflops/s per call per node : 803124 +Grid : Message : Average mflops/s per call per node : 822958 +Grid : Message : Average mflops/s per call per node : 662935 +Grid : Message : Average mflops/s per call per node (full): 315459 +Grid : Message : Average mflops/s per call per node (full): 440286 +Grid : Message : Average mflops/s per call per node (full): 446144 +Grid : Message : Average mflops/s per call per node (full): 306944 +Grid : Message : Stencil 14.5299 GB/s per node +Grid : Message : Stencil 16.827 GB/s per node +Grid : Message : Stencil 18.227 GB/s per node +Grid : Message : Stencil 12.5667 GB/s per node +Grid : Message : Average mflops/s per call per node : 664966 +Grid : Message : Average mflops/s per call per node : 805649 +Grid : Message : Average mflops/s per call per node : 820323 +Grid : Message : Average mflops/s per call per node : 662468 +Grid : Message : Average mflops/s per call per node (full): 316234 +Grid : Message : Average mflops/s per call per node (full): 436979 +Grid : Message : Average mflops/s per call per node (full): 445870 +Grid : Message : Average mflops/s per call per node (full): 304734 +Grid : Message : Stencil 14.0947 GB/s per node +Grid : Message : Stencil 17.2047 GB/s per node +Grid : Message : Stencil 16.3778 GB/s per node +Grid : Message : Stencil 11.9717 GB/s per node +Grid : Message : Average mflops/s per call per node : 667253 +Grid : Message : Average mflops/s per call per node : 801164 +Grid : Message : Average mflops/s per call per node : 823547 +Grid : Message : Average mflops/s per call per node : 671607 +Grid : Message : Average mflops/s per call per node (full): 317280 +Grid : Message : Average mflops/s per call per node (full): 438057 +Grid : Message : Average mflops/s per call per node (full): 429727 +Grid : Message : Average mflops/s per call per node (full): 302300 +Grid : Message : Stencil 14.4044 GB/s per node +Grid : Message : Stencil 16.4973 GB/s per node +Grid : Message : Stencil 16.8518 GB/s per node +Grid : Message : Stencil 13.1009 GB/s per node +Grid : Message : Average mflops/s per call per node : 664077 +Grid : Message : Average mflops/s per call per node : 802258 +Grid : Message : Average mflops/s per call per node : 823349 +Grid : Message : Average mflops/s per call per node : 667635 +Grid : Message : Average mflops/s per call per node (full): 316900 +Grid : Message : Average mflops/s per call per node (full): 433661 +Grid : Message : Average mflops/s per call per node (full): 436108 +Grid : Message : Average mflops/s per call per node (full): 305754 +Grid : Message : Stencil 13.228 GB/s per node +Grid : Message : Stencil 16.8887 GB/s per node +Grid : Message : Stencil 17.6471 GB/s per node +Grid : Message : Stencil 12.7062 GB/s per node +Grid : Message : Average mflops/s per call per node : 669067 +Grid : Message : Average mflops/s per call per node : 804099 +Grid : Message : Average mflops/s per call per node : 821604 +Grid : Message : Average mflops/s per call per node : 658039 +Grid : Message : Average mflops/s per call per node (full): 316770 +Grid : Message : Average mflops/s per call per node (full): 437420 +Grid : Message : Average mflops/s per call per node (full): 445975 +Grid : Message : Average mflops/s per call per node (full): 298715 +Grid : Message : Stencil 13.5671 GB/s per node +Grid : Message : Stencil 16.5942 GB/s per node +Grid : Message : Stencil 18.1271 GB/s per node +Grid : Message : Stencil 11.9482 GB/s per node +Grid : Message : Average mflops/s per call per node : 667462 +Grid : Message : Average mflops/s per call per node : 807110 +Grid : Message : Average mflops/s per call per node : 823079 +Grid : Message : Average mflops/s per call per node : 667472 +Grid : Message : Average mflops/s per call per node (full): 314605 +Grid : Message : Average mflops/s per call per node (full): 436264 +Grid : Message : Average mflops/s per call per node (full): 446541 +Grid : Message : Average mflops/s per call per node (full): 301224 +Grid : Message : Stencil 12.3664 GB/s per node +Grid : Message : Stencil 13.5017 GB/s per node +Grid : Message : Stencil 17.7799 GB/s per node +Grid : Message : Stencil 12.3154 GB/s per node +Grid : Message : Average mflops/s per call per node : 672086 +Grid : Message : Average mflops/s per call per node : 808002 +Grid : Message : Average mflops/s per call per node : 822256 +Grid : Message : Average mflops/s per call per node : 666533 +Grid : Message : Average mflops/s per call per node (full): 313568 +Grid : Message : Average mflops/s per call per node (full): 391027 +Grid : Message : Average mflops/s per call per node (full): 441218 +Grid : Message : Average mflops/s per call per node (full): 303786 +Grid : Message : Stencil 13.5781 GB/s per node +Grid : Message : Stencil 16.4869 GB/s per node +Grid : Message : Stencil 17.8246 GB/s per node +Grid : Message : Stencil 13.8167 GB/s per node +Grid : Message : Average mflops/s per call per node : 667731 +Grid : Message : Average mflops/s per call per node : 803031 +Grid : Message : Average mflops/s per call per node : 826184 +Grid : Message : Average mflops/s per call per node : 662380 +Grid : Message : Average mflops/s per call per node (full): 316580 +Grid : Message : Average mflops/s per call per node (full): 434474 +Grid : Message : Average mflops/s per call per node (full): 447463 +Grid : Message : Average mflops/s per call per node (full): 305866 +Grid : Message : Stencil 14.1182 GB/s per node +Grid : Message : Stencil 16.975 GB/s per node +Grid : Message : Stencil 18.8744 GB/s per node +Grid : Message : Stencil 12.1288 GB/s per node +Grid : Message : Average mflops/s per call per node : 661970 +Grid : Message : Average mflops/s per call per node : 803461 +Grid : Message : Average mflops/s per call per node : 820977 +Grid : Message : Average mflops/s per call per node : 668043 +Grid : Message : Average mflops/s per call per node (full): 316254 +Grid : Message : Average mflops/s per call per node (full): 437180 +Grid : Message : Average mflops/s per call per node (full): 448645 +Grid : Message : Average mflops/s per call per node (full): 302267 +Grid : Message : Stencil 13.4813 GB/s per node +Grid : Message : Stencil 16.4433 GB/s per node +Grid : Message : Stencil 17.8945 GB/s per node +Grid : Message : Stencil 11.9791 GB/s per node +Grid : Message : Average mflops/s per call per node : 667678 +Grid : Message : Average mflops/s per call per node : 802481 +Grid : Message : Average mflops/s per call per node : 823512 +Grid : Message : Average mflops/s per call per node : 662641 +Grid : Message : Average mflops/s per call per node (full): 316494 +Grid : Message : Average mflops/s per call per node (full): 433789 +Grid : Message : Average mflops/s per call per node (full): 447496 +Grid : Message : Average mflops/s per call per node (full): 300465 +Grid : Message : Stencil 13.123 GB/s per node +Grid : Message : Stencil 11.7805 GB/s per node +Grid : Message : Stencil 17.9281 GB/s per node +Grid : Message : Stencil 12.2311 GB/s per node +Grid : Message : Average mflops/s per call per node : 665079 +Grid : Message : Average mflops/s per call per node : 806976 +Grid : Message : Average mflops/s per call per node : 824353 +Grid : Message : Average mflops/s per call per node : 669184 +Grid : Message : Average mflops/s per call per node (full): 315687 +Grid : Message : Average mflops/s per call per node (full): 356593 +Grid : Message : Average mflops/s per call per node (full): 441731 +Grid : Message : Average mflops/s per call per node (full): 304029 +Grid : Message : Stencil 14.1549 GB/s per node +Grid : Message : Stencil 18.8328 GB/s per node +Grid : Message : Stencil 16.9621 GB/s per node +Grid : Message : Stencil 12.2749 GB/s per node +Grid : Message : Average mflops/s per call per node : 664087 +Grid : Message : Average mflops/s per call per node : 798788 +Grid : Message : Average mflops/s per call per node : 817414 +Grid : Message : Average mflops/s per call per node : 667448 +Grid : Message : Average mflops/s per call per node (full): 316819 +Grid : Message : Average mflops/s per call per node (full): 440185 +Grid : Message : Average mflops/s per call per node (full): 439012 +Grid : Message : Average mflops/s per call per node (full): 304156 +Grid : Message : Stencil 13.5542 GB/s per node +Grid : Message : Stencil 18.0781 GB/s per node +Grid : Message : Stencil 18.4257 GB/s per node +Grid : Message : Stencil 12.1906 GB/s per node +Grid : Message : Average mflops/s per call per node : 665956 +Grid : Message : Average mflops/s per call per node : 801445 +Grid : Message : Average mflops/s per call per node : 816661 +Grid : Message : Average mflops/s per call per node : 663424 +Grid : Message : Average mflops/s per call per node (full): 316319 +Grid : Message : Average mflops/s per call per node (full): 438901 +Grid : Message : Average mflops/s per call per node (full): 445947 +Grid : Message : Average mflops/s per call per node (full): 301634 +Grid : Message : Stencil 12.6518 GB/s per node +Grid : Message : Stencil 17.1647 GB/s per node +Grid : Message : Stencil 17.1054 GB/s per node +Grid : Message : Stencil 14.0803 GB/s per node +Grid : Message : Average mflops/s per call per node : 669876 +Grid : Message : Average mflops/s per call per node : 800051 +Grid : Message : Average mflops/s per call per node : 817636 +Grid : Message : Average mflops/s per call per node : 661101 +Grid : Message : Average mflops/s per call per node (full): 314390 +Grid : Message : Average mflops/s per call per node (full): 436913 +Grid : Message : Average mflops/s per call per node (full): 439852 +Grid : Message : Average mflops/s per call per node (full): 305943 +Grid : Message : Stencil 12.5875 GB/s per node +Grid : Message : Stencil 16.6244 GB/s per node +Grid : Message : Stencil 17.697 GB/s per node +Grid : Message : Stencil 13.9751 GB/s per node +Grid : Message : Average mflops/s per call per node : 668687 +Grid : Message : Average mflops/s per call per node : 804376 +Grid : Message : Average mflops/s per call per node : 820248 +Grid : Message : Average mflops/s per call per node : 657872 +Grid : Message : Average mflops/s per call per node (full): 312425 +Grid : Message : Average mflops/s per call per node (full): 432575 +Grid : Message : Average mflops/s per call per node (full): 445039 +Grid : Message : Average mflops/s per call per node (full): 306199 +Grid : Message : Stencil 12.9612 GB/s per node +Grid : Message : Stencil 16.9251 GB/s per node +Grid : Message : Stencil 18.2427 GB/s per node +Grid : Message : Stencil 13.9809 GB/s per node +Grid : Message : Average mflops/s per call per node : 670116 +Grid : Message : Average mflops/s per call per node : 802462 +Grid : Message : Average mflops/s per call per node : 815357 +Grid : Message : Average mflops/s per call per node : 663631 +Grid : Message : Average mflops/s per call per node (full): 315610 +Grid : Message : Average mflops/s per call per node (full): 437106 +Grid : Message : Average mflops/s per call per node (full): 447389 +Grid : Message : Average mflops/s per call per node (full): 307173 +Grid : Message : Stencil 14.9913 GB/s per node +Grid : Message : Stencil 16.6469 GB/s per node +Grid : Message : Stencil 19.1252 GB/s per node +Grid : Message : Stencil 12.2903 GB/s per node +Grid : Message : Average mflops/s per call per node : 662529 +Grid : Message : Average mflops/s per call per node : 808307 +Grid : Message : Average mflops/s per call per node : 819406 +Grid : Message : Average mflops/s per call per node : 671267 +Grid : Message : Average mflops/s per call per node (full): 316715 +Grid : Message : Average mflops/s per call per node (full): 436353 +Grid : Message : Average mflops/s per call per node (full): 448492 +Grid : Message : Average mflops/s per call per node (full): 304655 +Grid : Message : Stencil 13.9691 GB/s per node +Grid : Message : Stencil 17.0655 GB/s per node +Grid : Message : Stencil 17.2496 GB/s per node +Grid : Message : Stencil 12.9983 GB/s per node +Grid : Message : Average mflops/s per call per node : 665233 +Grid : Message : Average mflops/s per call per node : 802284 +Grid : Message : Average mflops/s per call per node : 826984 +Grid : Message : Average mflops/s per call per node : 668970 +Grid : Message : Average mflops/s per call per node (full): 315863 +Grid : Message : Average mflops/s per call per node (full): 437702 +Grid : Message : Average mflops/s per call per node (full): 442069 +Grid : Message : Average mflops/s per call per node (full): 306745 +Grid : Message : Stencil 15.661 GB/s per node +Grid : Message : Stencil 16.8648 GB/s per node +Grid : Message : Stencil 17.2765 GB/s per node +Grid : Message : Stencil 12.083 GB/s per node +Grid : Message : Average mflops/s per call per node : 659457 +Grid : Message : Average mflops/s per call per node : 806816 +Grid : Message : Average mflops/s per call per node : 824213 +Grid : Message : Average mflops/s per call per node : 667384 +Grid : Message : Average mflops/s per call per node (full): 316720 +Grid : Message : Average mflops/s per call per node (full): 437656 +Grid : Message : Average mflops/s per call per node (full): 444228 +Grid : Message : Average mflops/s per call per node (full): 302337 +Grid : Message : Stencil 13.4679 GB/s per node +Grid : Message : Stencil 16.4826 GB/s per node +Grid : Message : Stencil 17.4329 GB/s per node +Grid : Message : Stencil 12.1631 GB/s per node +Grid : Message : Average mflops/s per call per node : 663334 +Grid : Message : Average mflops/s per call per node : 801140 +Grid : Message : Average mflops/s per call per node : 827083 +Grid : Message : Average mflops/s per call per node : 668463 +Grid : Message : Average mflops/s per call per node (full): 313267 +Grid : Message : Average mflops/s per call per node (full): 432812 +Grid : Message : Average mflops/s per call per node (full): 443554 +Grid : Message : Average mflops/s per call per node (full): 303006 +Grid : Message : Stencil 14.1353 GB/s per node +Grid : Message : Stencil 16.8278 GB/s per node +Grid : Message : Stencil 17.6039 GB/s per node +Grid : Message : Stencil 12.1976 GB/s per node +Grid : Message : Average mflops/s per call per node : 664989 +Grid : Message : Average mflops/s per call per node : 800166 +Grid : Message : Average mflops/s per call per node : 826481 +Grid : Message : Average mflops/s per call per node : 666778 +Grid : Message : Average mflops/s per call per node (full): 316360 +Grid : Message : Average mflops/s per call per node (full): 436968 +Grid : Message : Average mflops/s per call per node (full): 446051 +Grid : Message : Average mflops/s per call per node (full): 303706 +Grid : Message : Stencil 14.9322 GB/s per node +Grid : Message : Stencil 17.0553 GB/s per node +Grid : Message : Stencil 17.8388 GB/s per node +Grid : Message : Stencil 11.9921 GB/s per node +Grid : Message : Average mflops/s per call per node : 663931 +Grid : Message : Average mflops/s per call per node : 797226 +Grid : Message : Average mflops/s per call per node : 817099 +Grid : Message : Average mflops/s per call per node : 664999 +Grid : Message : Average mflops/s per call per node (full): 317371 +Grid : Message : Average mflops/s per call per node (full): 437367 +Grid : Message : Average mflops/s per call per node (full): 442754 +Grid : Message : Average mflops/s per call per node (full): 301680 +Grid : Message : Stencil 13.8221 GB/s per node +Grid : Message : Stencil 16.6444 GB/s per node +Grid : Message : Stencil 18.01 GB/s per node +Grid : Message : Stencil 12.6676 GB/s per node +Grid : Message : Average mflops/s per call per node : 664187 +Grid : Message : Average mflops/s per call per node : 805007 +Grid : Message : Average mflops/s per call per node : 816560 +Grid : Message : Average mflops/s per call per node : 662956 +Grid : Message : Average mflops/s per call per node (full): 313418 +Grid : Message : Average mflops/s per call per node (full): 436533 +Grid : Message : Average mflops/s per call per node (full): 441327 +Grid : Message : Average mflops/s per call per node (full): 304681 +Grid : Message : Stencil 12.9318 GB/s per node +Grid : Message : Stencil 16.4364 GB/s per node +Grid : Message : Stencil 17.768 GB/s per node +Grid : Message : Stencil 13.492 GB/s per node +Grid : Message : Average mflops/s per call per node : 669173 +Grid : Message : Average mflops/s per call per node : 806886 +Grid : Message : Average mflops/s per call per node : 829390 +Grid : Message : Average mflops/s per call per node : 663708 +Grid : Message : Average mflops/s per call per node (full): 314081 +Grid : Message : Average mflops/s per call per node (full): 434090 +Grid : Message : Average mflops/s per call per node (full): 443265 +Grid : Message : Average mflops/s per call per node (full): 303994 +Grid : Message : Stencil 12.0068 GB/s per node +Grid : Message : Stencil 18.4427 GB/s per node +Grid : Message : Stencil 17.3472 GB/s per node +Grid : Message : Stencil 12.1353 GB/s per node +Grid : Message : Average mflops/s per call per node : 669657 +Grid : Message : Average mflops/s per call per node : 806409 +Grid : Message : Average mflops/s per call per node : 821373 +Grid : Message : Average mflops/s per call per node : 664660 +Grid : Message : Average mflops/s per call per node (full): 308790 +Grid : Message : Average mflops/s per call per node (full): 443314 +Grid : Message : Average mflops/s per call per node (full): 443511 +Grid : Message : Average mflops/s per call per node (full): 302802 +Grid : Message : Stencil 11.8595 GB/s per node +Grid : Message : Stencil 13.2191 GB/s per node +Grid : Message : Stencil 17.0265 GB/s per node +Grid : Message : Stencil 11.9838 GB/s per node +Grid : Message : Average mflops/s per call per node : 670429 +Grid : Message : Average mflops/s per call per node : 805409 +Grid : Message : Average mflops/s per call per node : 821689 +Grid : Message : Average mflops/s per call per node : 663832 +Grid : Message : Average mflops/s per call per node (full): 306883 +Grid : Message : Average mflops/s per call per node (full): 385481 +Grid : Message : Average mflops/s per call per node (full): 440786 +Grid : Message : Average mflops/s per call per node (full): 301769 +Grid : Message : Stencil 12.2979 GB/s per node +Grid : Message : Stencil 17.2923 GB/s per node +Grid : Message : Stencil 17.2221 GB/s per node +Grid : Message : Stencil 13.9567 GB/s per node +Grid : Message : Average mflops/s per call per node : 671006 +Grid : Message : Average mflops/s per call per node : 803697 +Grid : Message : Average mflops/s per call per node : 826999 +Grid : Message : Average mflops/s per call per node : 663643 +Grid : Message : Average mflops/s per call per node (full): 311482 +Grid : Message : Average mflops/s per call per node (full): 437719 +Grid : Message : Average mflops/s per call per node (full): 443215 +Grid : Message : Average mflops/s per call per node (full): 307982 +Grid : Message : Stencil 12.3281 GB/s per node +Grid : Message : Stencil 17.6035 GB/s per node +Grid : Message : Stencil 17.9606 GB/s per node +Grid : Message : Stencil 13.341 GB/s per node +Grid : Message : Average mflops/s per call per node : 668979 +Grid : Message : Average mflops/s per call per node : 801647 +Grid : Message : Average mflops/s per call per node : 820156 +Grid : Message : Average mflops/s per call per node : 662391 +Grid : Message : Average mflops/s per call per node (full): 310984 +Grid : Message : Average mflops/s per call per node (full): 438264 +Grid : Message : Average mflops/s per call per node (full): 440264 +Grid : Message : Average mflops/s per call per node (full): 306365 +Grid : Message : Stencil 12.1279 GB/s per node +Grid : Message : Stencil 16.409 GB/s per node +Grid : Message : Stencil 17.7244 GB/s per node +Grid : Message : Stencil 12.4881 GB/s per node +Grid : Message : Average mflops/s per call per node : 666087 +Grid : Message : Average mflops/s per call per node : 800875 +Grid : Message : Average mflops/s per call per node : 815854 +Grid : Message : Average mflops/s per call per node : 662393 +Grid : Message : Average mflops/s per call per node (full): 310224 +Grid : Message : Average mflops/s per call per node (full): 433186 +Grid : Message : Average mflops/s per call per node (full): 444792 +Grid : Message : Average mflops/s per call per node (full): 303496 +Grid : Message : Stencil 13.0349 GB/s per node +Grid : Message : Stencil 14.3654 GB/s per node +Grid : Message : Stencil 17.6391 GB/s per node +Grid : Message : Stencil 13.7551 GB/s per node +Grid : Message : Average mflops/s per call per node : 668233 +Grid : Message : Average mflops/s per call per node : 804659 +Grid : Message : Average mflops/s per call per node : 814795 +Grid : Message : Average mflops/s per call per node : 664072 +Grid : Message : Average mflops/s per call per node (full): 314520 +Grid : Message : Average mflops/s per call per node (full): 406269 +Grid : Message : Average mflops/s per call per node (full): 444639 +Grid : Message : Average mflops/s per call per node (full): 306410 +Grid : Message : Stencil 14.403 GB/s per node +Grid : Message : Stencil 16.4274 GB/s per node +Grid : Message : Stencil 17.764 GB/s per node +Grid : Message : Stencil 13.506 GB/s per node +Grid : Message : Average mflops/s per call per node : 662911 +Grid : Message : Average mflops/s per call per node : 804622 +Grid : Message : Average mflops/s per call per node : 820148 +Grid : Message : Average mflops/s per call per node : 661708 +Grid : Message : Average mflops/s per call per node (full): 316671 +Grid : Message : Average mflops/s per call per node (full): 433131 +Grid : Message : Average mflops/s per call per node (full): 445038 +Grid : Message : Average mflops/s per call per node (full): 304646 +Grid : Message : Stencil 13.1869 GB/s per node +Grid : Message : Stencil 16.7596 GB/s per node +Grid : Message : Stencil 17.5764 GB/s per node +Grid : Message : Stencil 12.3926 GB/s per node +Grid : Message : Average mflops/s per call per node : 666711 +Grid : Message : Average mflops/s per call per node : 806839 +Grid : Message : Average mflops/s per call per node : 826096 +Grid : Message : Average mflops/s per call per node : 667397 +Grid : Message : Average mflops/s per call per node (full): 315643 +Grid : Message : Average mflops/s per call per node (full): 436755 +Grid : Message : Average mflops/s per call per node (full): 445928 +Grid : Message : Average mflops/s per call per node (full): 304457 +Grid : Message : Stencil 13.263 GB/s per node +Grid : Message : Stencil 17.5252 GB/s per node +Grid : Message : Stencil 17.8626 GB/s per node +Grid : Message : Stencil 13.3588 GB/s per node +Grid : Message : Average mflops/s per call per node : 668043 +Grid : Message : Average mflops/s per call per node : 799596 +Grid : Message : Average mflops/s per call per node : 822349 +Grid : Message : Average mflops/s per call per node : 663654 +Grid : Message : Average mflops/s per call per node (full): 316238 +Grid : Message : Average mflops/s per call per node (full): 438539 +Grid : Message : Average mflops/s per call per node (full): 445313 +Grid : Message : Average mflops/s per call per node (full): 305676 +Grid : Message : Stencil 13.1748 GB/s per node +Grid : Message : Stencil 16.4387 GB/s per node +Grid : Message : Stencil 16.8156 GB/s per node +Grid : Message : Stencil 11.9933 GB/s per node +Grid : Message : Average mflops/s per call per node : 665377 +Grid : Message : Average mflops/s per call per node : 801811 +Grid : Message : Average mflops/s per call per node : 824709 +Grid : Message : Average mflops/s per call per node : 666862 +Grid : Message : Average mflops/s per call per node (full): 314939 +Grid : Message : Average mflops/s per call per node (full): 433577 +Grid : Message : Average mflops/s per call per node (full): 438749 +Grid : Message : Average mflops/s per call per node (full): 302165 +Grid : Message : Stencil 13.6378 GB/s per node +Grid : Message : Stencil 16.6342 GB/s per node +Grid : Message : Stencil 17.8761 GB/s per node +Grid : Message : Stencil 13.0271 GB/s per node +Grid : Message : Average mflops/s per call per node : 665072 +Grid : Message : Average mflops/s per call per node : 803115 +Grid : Message : Average mflops/s per call per node : 811562 +Grid : Message : Average mflops/s per call per node : 665020 +Grid : Message : Average mflops/s per call per node (full): 315482 +Grid : Message : Average mflops/s per call per node (full): 435792 +Grid : Message : Average mflops/s per call per node (full): 443680 +Grid : Message : Average mflops/s per call per node (full): 306156 +Grid : Message : Stencil 14.0895 GB/s per node +Grid : Message : Stencil 16.4099 GB/s per node +Grid : Message : Stencil 16.8037 GB/s per node +Grid : Message : Stencil 12.5877 GB/s per node +Grid : Message : Average mflops/s per call per node : 665096 +Grid : Message : Average mflops/s per call per node : 804712 +Grid : Message : Average mflops/s per call per node : 825464 +Grid : Message : Average mflops/s per call per node : 665110 +Grid : Message : Average mflops/s per call per node (full): 316653 +Grid : Message : Average mflops/s per call per node (full): 433047 +Grid : Message : Average mflops/s per call per node (full): 438423 +Grid : Message : Average mflops/s per call per node (full): 303523 +Grid : Message : Stencil 14.4215 GB/s per node +Grid : Message : Stencil 18.1001 GB/s per node +Grid : Message : Stencil 18.8503 GB/s per node +Grid : Message : Stencil 12.8565 GB/s per node +Grid : Message : Average mflops/s per call per node : 663644 +Grid : Message : Average mflops/s per call per node : 803815 +Grid : Message : Average mflops/s per call per node : 818369 +Grid : Message : Average mflops/s per call per node : 660859 +Grid : Message : Average mflops/s per call per node (full): 316221 +Grid : Message : Average mflops/s per call per node (full): 440122 +Grid : Message : Average mflops/s per call per node (full): 447962 +Grid : Message : Average mflops/s per call per node (full): 303555 +Grid : Message : Stencil 13.2125 GB/s per node +Grid : Message : Stencil 17.2994 GB/s per node +Grid : Message : Stencil 17.782 GB/s per node +Grid : Message : Stencil 12.6348 GB/s per node +Grid : Message : Average mflops/s per call per node : 666511 +Grid : Message : Average mflops/s per call per node : 803513 +Grid : Message : Average mflops/s per call per node : 820944 +Grid : Message : Average mflops/s per call per node : 666189 +Grid : Message : Average mflops/s per call per node (full): 314097 +Grid : Message : Average mflops/s per call per node (full): 437299 +Grid : Message : Average mflops/s per call per node (full): 443860 +Grid : Message : Average mflops/s per call per node (full): 304287 +Grid : Message : Stencil 12.5 GB/s per node +Grid : Message : Stencil 18.2757 GB/s per node +Grid : Message : Stencil 18.1259 GB/s per node +Grid : Message : Stencil 12.4919 GB/s per node +Grid : Message : Average mflops/s per call per node : 667520 +Grid : Message : Average mflops/s per call per node : 799292 +Grid : Message : Average mflops/s per call per node : 818353 +Grid : Message : Average mflops/s per call per node : 665097 +Grid : Message : Average mflops/s per call per node (full): 313158 +Grid : Message : Average mflops/s per call per node (full): 438549 +Grid : Message : Average mflops/s per call per node (full): 447002 +Grid : Message : Average mflops/s per call per node (full): 304029 +Grid : Message : Stencil 15.4003 GB/s per node +Grid : Message : Stencil 16.7543 GB/s per node +Grid : Message : Stencil 18.0461 GB/s per node +Grid : Message : Stencil 12.4582 GB/s per node +Grid : Message : Average mflops/s per call per node : 659704 +Grid : Message : Average mflops/s per call per node : 802566 +Grid : Message : Average mflops/s per call per node : 827087 +Grid : Message : Average mflops/s per call per node : 667201 +Grid : Message : Average mflops/s per call per node (full): 316473 +Grid : Message : Average mflops/s per call per node (full): 435987 +Grid : Message : Average mflops/s per call per node (full): 448107 +Grid : Message : Average mflops/s per call per node (full): 304209 +Grid : Message : Stencil 15.2555 GB/s per node +Grid : Message : Stencil 17.2014 GB/s per node +Grid : Message : Stencil 17.4156 GB/s per node +Grid : Message : Stencil 12.7329 GB/s per node +Grid : Message : Average mflops/s per call per node : 662821 +Grid : Message : Average mflops/s per call per node : 801309 +Grid : Message : Average mflops/s per call per node : 819615 +Grid : Message : Average mflops/s per call per node : 670265 +Grid : Message : Average mflops/s per call per node (full): 317148 +Grid : Message : Average mflops/s per call per node (full): 436550 +Grid : Message : Average mflops/s per call per node (full): 440511 +Grid : Message : Average mflops/s per call per node (full): 306119 +Grid : Message : Stencil 12.5579 GB/s per node +Grid : Message : Stencil 17.8074 GB/s per node +Grid : Message : Stencil 18.2134 GB/s per node +Grid : Message : Stencil 13.6269 GB/s per node +Grid : Message : Average mflops/s per call per node : 666563 +Grid : Message : Average mflops/s per call per node : 802986 +Grid : Message : Average mflops/s per call per node : 822776 +Grid : Message : Average mflops/s per call per node : 661145 +Grid : Message : Average mflops/s per call per node (full): 313962 +Grid : Message : Average mflops/s per call per node (full): 439128 +Grid : Message : Average mflops/s per call per node (full): 447571 +Grid : Message : Average mflops/s per call per node (full): 306357 +Grid : Message : Stencil 13.181 GB/s per node +Grid : Message : Stencil 17.6128 GB/s per node +Grid : Message : Stencil 17.5374 GB/s per node +Grid : Message : Stencil 13.5804 GB/s per node +Grid : Message : Average mflops/s per call per node : 664015 +Grid : Message : Average mflops/s per call per node : 803507 +Grid : Message : Average mflops/s per call per node : 818642 +Grid : Message : Average mflops/s per call per node : 663962 +Grid : Message : Average mflops/s per call per node (full): 314661 +Grid : Message : Average mflops/s per call per node (full): 439069 +Grid : Message : Average mflops/s per call per node (full): 444028 +Grid : Message : Average mflops/s per call per node (full): 307019 +Grid : Message : Stencil 13.6153 GB/s per node +Grid : Message : Stencil 17.0466 GB/s per node +Grid : Message : Stencil 17.4903 GB/s per node +Grid : Message : Stencil 12.5444 GB/s per node +Grid : Message : Average mflops/s per call per node : 666360 +Grid : Message : Average mflops/s per call per node : 800808 +Grid : Message : Average mflops/s per call per node : 824012 +Grid : Message : Average mflops/s per call per node : 663114 +Grid : Message : Average mflops/s per call per node (full): 315889 +Grid : Message : Average mflops/s per call per node (full): 437328 +Grid : Message : Average mflops/s per call per node (full): 444113 +Grid : Message : Average mflops/s per call per node (full): 303517 +Grid : Message : Stencil 14.3039 GB/s per node +Grid : Message : Stencil 17.4336 GB/s per node +Grid : Message : Stencil 17.3709 GB/s per node +Grid : Message : Stencil 13.4663 GB/s per node +Grid : Message : Average mflops/s per call per node : 661343 +Grid : Message : Average mflops/s per call per node : 801882 +Grid : Message : Average mflops/s per call per node : 825649 +Grid : Message : Average mflops/s per call per node : 661617 +Grid : Message : Average mflops/s per call per node (full): 316563 +Grid : Message : Average mflops/s per call per node (full): 439155 +Grid : Message : Average mflops/s per call per node (full): 443762 +Grid : Message : Average mflops/s per call per node (full): 306622 +Grid : Message : Stencil 13.4482 GB/s per node +Grid : Message : Stencil 16.7404 GB/s per node +Grid : Message : Stencil 18.4052 GB/s per node +Grid : Message : Stencil 12.698 GB/s per node +Grid : Message : Average mflops/s per call per node : 667886 +Grid : Message : Average mflops/s per call per node : 798467 +Grid : Message : Average mflops/s per call per node : 819316 +Grid : Message : Average mflops/s per call per node : 663127 +Grid : Message : Average mflops/s per call per node (full): 316365 +Grid : Message : Average mflops/s per call per node (full): 435445 +Grid : Message : Average mflops/s per call per node (full): 447972 +Grid : Message : Average mflops/s per call per node (full): 304094 +Grid : Message : Stencil 13.7252 GB/s per node +Grid : Message : Stencil 16.6097 GB/s per node +Grid : Message : Stencil 17.8379 GB/s per node +Grid : Message : Stencil 13.1634 GB/s per node +Grid : Message : Average mflops/s per call per node : 665025 +Grid : Message : Average mflops/s per call per node : 804014 +Grid : Message : Average mflops/s per call per node : 813861 +Grid : Message : Average mflops/s per call per node : 660909 +Grid : Message : Average mflops/s per call per node (full): 316061 +Grid : Message : Average mflops/s per call per node (full): 436117 +Grid : Message : Average mflops/s per call per node (full): 444560 +Grid : Message : Average mflops/s per call per node (full): 304968 +Grid : Message : Stencil 13.8481 GB/s per node +Grid : Message : Stencil 14.2236 GB/s per node +Grid : Message : Stencil 19.1255 GB/s per node +Grid : Message : Stencil 12.4868 GB/s per node +Grid : Message : Average mflops/s per call per node : 665570 +Grid : Message : Average mflops/s per call per node : 802607 +Grid : Message : Average mflops/s per call per node : 823274 +Grid : Message : Average mflops/s per call per node : 666035 +Grid : Message : Average mflops/s per call per node (full): 316665 +Grid : Message : Average mflops/s per call per node (full): 404232 +Grid : Message : Average mflops/s per call per node (full): 450107 +Grid : Message : Average mflops/s per call per node (full): 303665 +Grid : Message : Stencil 12.5744 GB/s per node +Grid : Message : Stencil 17.3745 GB/s per node +Grid : Message : Stencil 18.5195 GB/s per node +Grid : Message : Stencil 12.7849 GB/s per node +Grid : Message : Average mflops/s per call per node : 670859 +Grid : Message : Average mflops/s per call per node : 806203 +Grid : Message : Average mflops/s per call per node : 820805 +Grid : Message : Average mflops/s per call per node : 663512 +Grid : Message : Average mflops/s per call per node (full): 313422 +Grid : Message : Average mflops/s per call per node (full): 439284 +Grid : Message : Average mflops/s per call per node (full): 447298 +Grid : Message : Average mflops/s per call per node (full): 304256 +Grid : Message : Stencil 13.6758 GB/s per node +Grid : Message : Stencil 17.4313 GB/s per node +Grid : Message : Stencil 17.4837 GB/s per node +Grid : Message : Stencil 12.4561 GB/s per node +Grid : Message : Average mflops/s per call per node : 664223 +Grid : Message : Average mflops/s per call per node : 803113 +Grid : Message : Average mflops/s per call per node : 823434 +Grid : Message : Average mflops/s per call per node : 665009 +Grid : Message : Average mflops/s per call per node (full): 316348 +Grid : Message : Average mflops/s per call per node (full): 438938 +Grid : Message : Average mflops/s per call per node (full): 444205 +Grid : Message : Average mflops/s per call per node (full): 302728 +Grid : Message : Stencil 14.1599 GB/s per node +Grid : Message : Stencil 18.1794 GB/s per node +Grid : Message : Stencil 17.5413 GB/s per node +Grid : Message : Stencil 13.6046 GB/s per node +Grid : Message : Average mflops/s per call per node : 660446 +Grid : Message : Average mflops/s per call per node : 803789 +Grid : Message : Average mflops/s per call per node : 820256 +Grid : Message : Average mflops/s per call per node : 661423 +Grid : Message : Average mflops/s per call per node (full): 316034 +Grid : Message : Average mflops/s per call per node (full): 436357 +Grid : Message : Average mflops/s per call per node (full): 439043 +Grid : Message : Average mflops/s per call per node (full): 305437 +Grid : Message : Stencil 15.3867 GB/s per node +Grid : Message : Stencil 16.7997 GB/s per node +Grid : Message : Stencil 17.7 GB/s per node +Grid : Message : Stencil 12.2701 GB/s per node +Grid : Message : Average mflops/s per call per node : 660885 +Grid : Message : Average mflops/s per call per node : 802864 +Grid : Message : Average mflops/s per call per node : 822424 +Grid : Message : Average mflops/s per call per node : 670941 +Grid : Message : Average mflops/s per call per node (full): 316719 +Grid : Message : Average mflops/s per call per node (full): 430612 +Grid : Message : Average mflops/s per call per node (full): 445515 +Grid : Message : Average mflops/s per call per node (full): 302982 +Grid : Message : Stencil 12.8568 GB/s per node +Grid : Message : Stencil 16.7052 GB/s per node +Grid : Message : Stencil 16.6873 GB/s per node +Grid : Message : Stencil 13.3301 GB/s per node +Grid : Message : Average mflops/s per call per node : 663559 +Grid : Message : Average mflops/s per call per node : 804200 +Grid : Message : Average mflops/s per call per node : 820259 +Grid : Message : Average mflops/s per call per node : 659533 +Grid : Message : Average mflops/s per call per node (full): 313045 +Grid : Message : Average mflops/s per call per node (full): 437324 +Grid : Message : Average mflops/s per call per node (full): 437247 +Grid : Message : Average mflops/s per call per node (full): 305163 +Grid : Message : Stencil 13.1161 GB/s per node +Grid : Message : Stencil 16.5145 GB/s per node +Grid : Message : Stencil 17.5881 GB/s per node +Grid : Message : Stencil 12.1828 GB/s per node +Grid : Message : Average mflops/s per call per node : 661494 +Grid : Message : Average mflops/s per call per node : 806467 +Grid : Message : Average mflops/s per call per node : 820002 +Grid : Message : Average mflops/s per call per node : 670374 +Grid : Message : Average mflops/s per call per node (full): 312961 +Grid : Message : Average mflops/s per call per node (full): 435157 +Grid : Message : Average mflops/s per call per node (full): 434648 +Grid : Message : Average mflops/s per call per node (full): 302641 +Grid : Message : Stencil 13.4692 GB/s per node +Grid : Message : Stencil 16.3541 GB/s per node +Grid : Message : Stencil 17.6659 GB/s per node +Grid : Message : Stencil 12.8814 GB/s per node +Grid : Message : Average mflops/s per call per node : 663625 +Grid : Message : Average mflops/s per call per node : 804987 +Grid : Message : Average mflops/s per call per node : 822099 +Grid : Message : Average mflops/s per call per node : 662334 +Grid : Message : Average mflops/s per call per node (full): 312334 +Grid : Message : Average mflops/s per call per node (full): 432101 +Grid : Message : Average mflops/s per call per node (full): 446640 +Grid : Message : Average mflops/s per call per node (full): 305115 +Grid : Message : Stencil 12.8942 GB/s per node +Grid : Message : Stencil 16.7898 GB/s per node +Grid : Message : Stencil 17.5971 GB/s per node +Grid : Message : Stencil 12.2456 GB/s per node +Grid : Message : Average mflops/s per call per node : 668954 +Grid : Message : Average mflops/s per call per node : 798117 +Grid : Message : Average mflops/s per call per node : 824941 +Grid : Message : Average mflops/s per call per node : 669293 +Grid : Message : Average mflops/s per call per node (full): 316080 +Grid : Message : Average mflops/s per call per node (full): 433248 +Grid : Message : Average mflops/s per call per node (full): 446401 +Grid : Message : Average mflops/s per call per node (full): 303309 +Grid : Message : Stencil 12.6616 GB/s per node +Grid : Message : Stencil 16.7577 GB/s per node +Grid : Message : Stencil 17.7365 GB/s per node +Grid : Message : Stencil 12.4545 GB/s per node +Grid : Message : Average mflops/s per call per node : 670534 +Grid : Message : Average mflops/s per call per node : 804258 +Grid : Message : Average mflops/s per call per node : 819481 +Grid : Message : Average mflops/s per call per node : 664434 +Grid : Message : Average mflops/s per call per node (full): 314697 +Grid : Message : Average mflops/s per call per node (full): 436280 +Grid : Message : Average mflops/s per call per node (full): 445594 +Grid : Message : Average mflops/s per call per node (full): 303729 +Grid : Message : Stencil 13.4683 GB/s per node +Grid : Message : Stencil 16.6307 GB/s per node +Grid : Message : Stencil 18.91 GB/s per node +Grid : Message : Stencil 14.0652 GB/s per node +Grid : Message : Average mflops/s per call per node : 668763 +Grid : Message : Average mflops/s per call per node : 805773 +Grid : Message : Average mflops/s per call per node : 817079 +Grid : Message : Average mflops/s per call per node : 661217 +Grid : Message : Average mflops/s per call per node (full): 316613 +Grid : Message : Average mflops/s per call per node (full): 437106 +Grid : Message : Average mflops/s per call per node (full): 447723 +Grid : Message : Average mflops/s per call per node (full): 305805 +Grid : Message : Stencil 12.8557 GB/s per node +Grid : Message : Stencil 15.3086 GB/s per node +Grid : Message : Stencil 17.0187 GB/s per node +Grid : Message : Stencil 12.391 GB/s per node +Grid : Message : Average mflops/s per call per node : 671402 +Grid : Message : Average mflops/s per call per node : 804054 +Grid : Message : Average mflops/s per call per node : 829054 +Grid : Message : Average mflops/s per call per node : 663265 +Grid : Message : Average mflops/s per call per node (full): 316413 +Grid : Message : Average mflops/s per call per node (full): 420778 +Grid : Message : Average mflops/s per call per node (full): 441034 +Grid : Message : Average mflops/s per call per node (full): 302925 +Grid : Message : Stencil 12.5937 GB/s per node +Grid : Message : Stencil 14.2749 GB/s per node +Grid : Message : Stencil 17.3705 GB/s per node +Grid : Message : Stencil 12.9952 GB/s per node +Grid : Message : Average mflops/s per call per node : 673301 +Grid : Message : Average mflops/s per call per node : 811708 +Grid : Message : Average mflops/s per call per node : 825225 +Grid : Message : Average mflops/s per call per node : 666381 +Grid : Message : Average mflops/s per call per node (full): 314965 +Grid : Message : Average mflops/s per call per node (full): 404682 +Grid : Message : Average mflops/s per call per node (full): 444928 +Grid : Message : Average mflops/s per call per node (full): 306184 +Grid : Message : Stencil 12.4751 GB/s per node +Grid : Message : Stencil 16.734 GB/s per node +Grid : Message : Stencil 17.7405 GB/s per node +Grid : Message : Stencil 13.0793 GB/s per node +Grid : Message : Average mflops/s per call per node : 670239 +Grid : Message : Average mflops/s per call per node : 803610 +Grid : Message : Average mflops/s per call per node : 822767 +Grid : Message : Average mflops/s per call per node : 661298 +Grid : Message : Average mflops/s per call per node (full): 314063 +Grid : Message : Average mflops/s per call per node (full): 436778 +Grid : Message : Average mflops/s per call per node (full): 446006 +Grid : Message : Average mflops/s per call per node (full): 305186 +Grid : Message : Stencil 13.1137 GB/s per node +Grid : Message : Stencil 16.3732 GB/s per node +Grid : Message : Stencil 18.3038 GB/s per node +Grid : Message : Stencil 12.7076 GB/s per node +Grid : Message : Average mflops/s per call per node : 666051 +Grid : Message : Average mflops/s per call per node : 803733 +Grid : Message : Average mflops/s per call per node : 825737 +Grid : Message : Average mflops/s per call per node : 667496 +Grid : Message : Average mflops/s per call per node (full): 314399 +Grid : Message : Average mflops/s per call per node (full): 432762 +Grid : Message : Average mflops/s per call per node (full): 449063 +Grid : Message : Average mflops/s per call per node (full): 305673 +Grid : Message : Stencil 12.5174 GB/s per node +Grid : Message : Stencil 16.1717 GB/s per node +Grid : Message : Stencil 17.2254 GB/s per node +Grid : Message : Stencil 12.9635 GB/s per node +Grid : Message : Average mflops/s per call per node : 666187 +Grid : Message : Average mflops/s per call per node : 803564 +Grid : Message : Average mflops/s per call per node : 822850 +Grid : Message : Average mflops/s per call per node : 664979 +Grid : Message : Average mflops/s per call per node (full): 313719 +Grid : Message : Average mflops/s per call per node (full): 430242 +Grid : Message : Average mflops/s per call per node (full): 442125 +Grid : Message : Average mflops/s per call per node (full): 304770 +Grid : Message : Stencil 12.8431 GB/s per node +Grid : Message : Stencil 17.4906 GB/s per node +Grid : Message : Stencil 17.2259 GB/s per node +Grid : Message : Stencil 13.003 GB/s per node +Grid : Message : Average mflops/s per call per node : 666116 +Grid : Message : Average mflops/s per call per node : 801907 +Grid : Message : Average mflops/s per call per node : 818616 +Grid : Message : Average mflops/s per call per node : 661256 +Grid : Message : Average mflops/s per call per node (full): 314504 +Grid : Message : Average mflops/s per call per node (full): 437613 +Grid : Message : Average mflops/s per call per node (full): 443053 +Grid : Message : Average mflops/s per call per node (full): 305154 +Grid : Message : Stencil 14.0723 GB/s per node +Grid : Message : Stencil 17.498 GB/s per node +Grid : Message : Stencil 17.6098 GB/s per node +Grid : Message : Stencil 12.2343 GB/s per node +Grid : Message : Average mflops/s per call per node : 664473 +Grid : Message : Average mflops/s per call per node : 802242 +Grid : Message : Average mflops/s per call per node : 820944 +Grid : Message : Average mflops/s per call per node : 669711 +Grid : Message : Average mflops/s per call per node (full): 316594 +Grid : Message : Average mflops/s per call per node (full): 439951 +Grid : Message : Average mflops/s per call per node (full): 443698 +Grid : Message : Average mflops/s per call per node (full): 304216 +Grid : Message : Stencil 13.0752 GB/s per node +Grid : Message : Stencil 18.2619 GB/s per node +Grid : Message : Stencil 18.5119 GB/s per node +Grid : Message : Stencil 12.4328 GB/s per node +Grid : Message : Average mflops/s per call per node : 664692 +Grid : Message : Average mflops/s per call per node : 802193 +Grid : Message : Average mflops/s per call per node : 824987 +Grid : Message : Average mflops/s per call per node : 664304 +Grid : Message : Average mflops/s per call per node (full): 314319 +Grid : Message : Average mflops/s per call per node (full): 441373 +Grid : Message : Average mflops/s per call per node (full): 448471 +Grid : Message : Average mflops/s per call per node (full): 303780 +Grid : Message : Stencil 12.4062 GB/s per node +Grid : Message : Stencil 16.5179 GB/s per node +Grid : Message : Stencil 17.8355 GB/s per node +Grid : Message : Stencil 14.5781 GB/s per node +Grid : Message : Average mflops/s per call per node : 668287 +Grid : Message : Average mflops/s per call per node : 804701 +Grid : Message : Average mflops/s per call per node : 823599 +Grid : Message : Average mflops/s per call per node : 658889 +Grid : Message : Average mflops/s per call per node (full): 312743 +Grid : Message : Average mflops/s per call per node (full): 434922 +Grid : Message : Average mflops/s per call per node (full): 438520 +Grid : Message : Average mflops/s per call per node (full): 306746 +Grid : Message : Stencil 13.3246 GB/s per node +Grid : Message : Stencil 17.0093 GB/s per node +Grid : Message : Stencil 18.994 GB/s per node +Grid : Message : Stencil 13.4837 GB/s per node +Grid : Message : Average mflops/s per call per node : 664741 +Grid : Message : Average mflops/s per call per node : 800517 +Grid : Message : Average mflops/s per call per node : 820825 +Grid : Message : Average mflops/s per call per node : 660615 +Grid : Message : Average mflops/s per call per node (full): 314883 +Grid : Message : Average mflops/s per call per node (full): 437339 +Grid : Message : Average mflops/s per call per node (full): 449436 +Grid : Message : Average mflops/s per call per node (full): 303865 +Grid : Message : Stencil 12.2983 GB/s per node +Grid : Message : Stencil 16.8586 GB/s per node +Grid : Message : Stencil 18.3762 GB/s per node +Grid : Message : Stencil 12.0417 GB/s per node +Grid : Message : Average mflops/s per call per node : 665390 +Grid : Message : Average mflops/s per call per node : 807029 +Grid : Message : Average mflops/s per call per node : 823583 +Grid : Message : Average mflops/s per call per node : 671107 +Grid : Message : Average mflops/s per call per node (full): 311919 +Grid : Message : Average mflops/s per call per node (full): 438107 +Grid : Message : Average mflops/s per call per node (full): 448653 +Grid : Message : Average mflops/s per call per node (full): 302841 +Grid : Message : Stencil 13.0546 GB/s per node +Grid : Message : Stencil 16.4252 GB/s per node +Grid : Message : Stencil 17.4846 GB/s per node +Grid : Message : Stencil 12.7131 GB/s per node +Grid : Message : Average mflops/s per call per node : 667790 +Grid : Message : Average mflops/s per call per node : 804420 +Grid : Message : Average mflops/s per call per node : 822428 +Grid : Message : Average mflops/s per call per node : 662207 +Grid : Message : Average mflops/s per call per node (full): 314403 +Grid : Message : Average mflops/s per call per node (full): 433325 +Grid : Message : Average mflops/s per call per node (full): 444422 +Grid : Message : Average mflops/s per call per node (full): 304579 +Grid : Message : Stencil 13.2941 GB/s per node +Grid : Message : Stencil 9.77585 GB/s per node +Grid : Message : Stencil 17.002 GB/s per node +Grid : Message : Stencil 12.5608 GB/s per node +Grid : Message : Average mflops/s per call per node : 665332 +Grid : Message : Average mflops/s per call per node : 809864 +Grid : Message : Average mflops/s per call per node : 821372 +Grid : Message : Average mflops/s per call per node : 667238 +Grid : Message : Average mflops/s per call per node (full): 315191 +Grid : Message : Average mflops/s per call per node (full): 311246 +Grid : Message : Average mflops/s per call per node (full): 440765 +Grid : Message : Average mflops/s per call per node (full): 305183 +Grid : Message : Stencil 13.4714 GB/s per node +Grid : Message : Stencil 16.6391 GB/s per node +Grid : Message : Stencil 17.3293 GB/s per node +Grid : Message : Stencil 12.9076 GB/s per node +Grid : Message : Average mflops/s per call per node : 662206 +Grid : Message : Average mflops/s per call per node : 803920 +Grid : Message : Average mflops/s per call per node : 818714 +Grid : Message : Average mflops/s per call per node : 662318 +Grid : Message : Average mflops/s per call per node (full): 314274 +Grid : Message : Average mflops/s per call per node (full): 436457 +Grid : Message : Average mflops/s per call per node (full): 443368 +Grid : Message : Average mflops/s per call per node (full): 304443 +Grid : Message : Stencil 13.1367 GB/s per node +Grid : Message : Stencil 17.4188 GB/s per node +Grid : Message : Stencil 18.3514 GB/s per node +Grid : Message : Stencil 13.8068 GB/s per node +Grid : Message : Average mflops/s per call per node : 668658 +Grid : Message : Average mflops/s per call per node : 805311 +Grid : Message : Average mflops/s per call per node : 819600 +Grid : Message : Average mflops/s per call per node : 662668 +Grid : Message : Average mflops/s per call per node (full): 315365 +Grid : Message : Average mflops/s per call per node (full): 439100 +Grid : Message : Average mflops/s per call per node (full): 445623 +Grid : Message : Average mflops/s per call per node (full): 306193 +Grid : Message : Stencil 13.9619 GB/s per node +Grid : Message : Stencil 14.1776 GB/s per node +Grid : Message : Stencil 17.4347 GB/s per node +Grid : Message : Stencil 12.8607 GB/s per node +Grid : Message : Average mflops/s per call per node : 662908 +Grid : Message : Average mflops/s per call per node : 810045 +Grid : Message : Average mflops/s per call per node : 829036 +Grid : Message : Average mflops/s per call per node : 664978 +Grid : Message : Average mflops/s per call per node (full): 316261 +Grid : Message : Average mflops/s per call per node (full): 402716 +Grid : Message : Average mflops/s per call per node (full): 445448 +Grid : Message : Average mflops/s per call per node (full): 304756 +Grid : Message : Stencil 13.2573 GB/s per node +Grid : Message : Stencil 17.1684 GB/s per node +Grid : Message : Stencil 17.4563 GB/s per node +Grid : Message : Stencil 12.9146 GB/s per node +Grid : Message : Average mflops/s per call per node : 666694 +Grid : Message : Average mflops/s per call per node : 800308 +Grid : Message : Average mflops/s per call per node : 826364 +Grid : Message : Average mflops/s per call per node : 667065 +Grid : Message : Average mflops/s per call per node (full): 315370 +Grid : Message : Average mflops/s per call per node (full): 435363 +Grid : Message : Average mflops/s per call per node (full): 444945 +Grid : Message : Average mflops/s per call per node (full): 305344 +Grid : Message : Stencil 12.5017 GB/s per node +Grid : Message : Stencil 17.702 GB/s per node +Grid : Message : Stencil 17.1299 GB/s per node +Grid : Message : Stencil 12.8331 GB/s per node +Grid : Message : Average mflops/s per call per node : 669806 +Grid : Message : Average mflops/s per call per node : 800532 +Grid : Message : Average mflops/s per call per node : 827147 +Grid : Message : Average mflops/s per call per node : 659226 +Grid : Message : Average mflops/s per call per node (full): 313519 +Grid : Message : Average mflops/s per call per node (full): 439183 +Grid : Message : Average mflops/s per call per node (full): 440451 +Grid : Message : Average mflops/s per call per node (full): 302614 +Grid : Message : Stencil 13.0484 GB/s per node +Grid : Message : Stencil 16.8314 GB/s per node +Grid : Message : Stencil 18.3745 GB/s per node +Grid : Message : Stencil 12.5254 GB/s per node +Grid : Message : Average mflops/s per call per node : 666511 +Grid : Message : Average mflops/s per call per node : 803076 +Grid : Message : Average mflops/s per call per node : 819195 +Grid : Message : Average mflops/s per call per node : 664296 +Grid : Message : Average mflops/s per call per node (full): 314529 +Grid : Message : Average mflops/s per call per node (full): 435843 +Grid : Message : Average mflops/s per call per node (full): 446847 +Grid : Message : Average mflops/s per call per node (full): 303151 +Grid : Message : Stencil 12.3858 GB/s per node +Grid : Message : Stencil 17.2709 GB/s per node +Grid : Message : Stencil 17.303 GB/s per node +Grid : Message : Stencil 13.8556 GB/s per node +Grid : Message : Average mflops/s per call per node : 667433 +Grid : Message : Average mflops/s per call per node : 799882 +Grid : Message : Average mflops/s per call per node : 825126 +Grid : Message : Average mflops/s per call per node : 661650 +Grid : Message : Average mflops/s per call per node (full): 312345 +Grid : Message : Average mflops/s per call per node (full): 435733 +Grid : Message : Average mflops/s per call per node (full): 444059 +Grid : Message : Average mflops/s per call per node (full): 306157 +Grid : Message : Stencil 13.2423 GB/s per node +Grid : Message : Stencil 17.1759 GB/s per node +Grid : Message : Stencil 17.6773 GB/s per node +Grid : Message : Stencil 13.8838 GB/s per node +Grid : Message : Average mflops/s per call per node : 666065 +Grid : Message : Average mflops/s per call per node : 803042 +Grid : Message : Average mflops/s per call per node : 822598 +Grid : Message : Average mflops/s per call per node : 666285 +Grid : Message : Average mflops/s per call per node (full): 315382 +Grid : Message : Average mflops/s per call per node (full): 437049 +Grid : Message : Average mflops/s per call per node (full): 445820 +Grid : Message : Average mflops/s per call per node (full): 306615 +Grid : Message : Stencil 13.2119 GB/s per node +Grid : Message : Stencil 17.4023 GB/s per node +Grid : Message : Stencil 18.1174 GB/s per node +Grid : Message : Stencil 12.4868 GB/s per node +Grid : Message : Average mflops/s per call per node : 666930 +Grid : Message : Average mflops/s per call per node : 805994 +Grid : Message : Average mflops/s per call per node : 821803 +Grid : Message : Average mflops/s per call per node : 665081 +Grid : Message : Average mflops/s per call per node (full): 315690 +Grid : Message : Average mflops/s per call per node (full): 437432 +Grid : Message : Average mflops/s per call per node (full): 447243 +Grid : Message : Average mflops/s per call per node (full): 305000 +Grid : Message : Stencil 13.9222 GB/s per node +Grid : Message : Stencil 16.6438 GB/s per node +Grid : Message : Stencil 16.9078 GB/s per node +Grid : Message : Stencil 13.3717 GB/s per node +Grid : Message : Average mflops/s per call per node : 664940 +Grid : Message : Average mflops/s per call per node : 802706 +Grid : Message : Average mflops/s per call per node : 821356 +Grid : Message : Average mflops/s per call per node : 664479 +Grid : Message : Average mflops/s per call per node (full): 316591 +Grid : Message : Average mflops/s per call per node (full): 436145 +Grid : Message : Average mflops/s per call per node (full): 439766 +Grid : Message : Average mflops/s per call per node (full): 305996 +Grid : Message : Stencil 13.0773 GB/s per node +Grid : Message : Stencil 17.1815 GB/s per node +Grid : Message : Stencil 17.4962 GB/s per node +Grid : Message : Stencil 14.5529 GB/s per node +Grid : Message : Average mflops/s per call per node : 667989 +Grid : Message : Average mflops/s per call per node : 806203 +Grid : Message : Average mflops/s per call per node : 821061 +Grid : Message : Average mflops/s per call per node : 660524 +Grid : Message : Average mflops/s per call per node (full): 315502 +Grid : Message : Average mflops/s per call per node (full): 436257 +Grid : Message : Average mflops/s per call per node (full): 444052 +Grid : Message : Average mflops/s per call per node (full): 307003 +Grid : Message : Stencil 14.2909 GB/s per node +Grid : Message : Stencil 16.5371 GB/s per node +Grid : Message : Stencil 17.1227 GB/s per node +Grid : Message : Stencil 13.4769 GB/s per node +Grid : Message : Average mflops/s per call per node : 665438 +Grid : Message : Average mflops/s per call per node : 804734 +Grid : Message : Average mflops/s per call per node : 825460 +Grid : Message : Average mflops/s per call per node : 667013 +Grid : Message : Average mflops/s per call per node (full): 315943 +Grid : Message : Average mflops/s per call per node (full): 435302 +Grid : Message : Average mflops/s per call per node (full): 436536 +Grid : Message : Average mflops/s per call per node (full): 305996 +Grid : Message : Stencil 13.9086 GB/s per node +Grid : Message : Stencil 17.3296 GB/s per node +Grid : Message : Stencil 17.3873 GB/s per node +Grid : Message : Stencil 13.8987 GB/s per node +Grid : Message : Average mflops/s per call per node : 663859 +Grid : Message : Average mflops/s per call per node : 806287 +Grid : Message : Average mflops/s per call per node : 826110 +Grid : Message : Average mflops/s per call per node : 663951 +Grid : Message : Average mflops/s per call per node (full): 314912 +Grid : Message : Average mflops/s per call per node (full): 440008 +Grid : Message : Average mflops/s per call per node (full): 442199 +Grid : Message : Average mflops/s per call per node (full): 307155 +Grid : Message : Stencil 13.5995 GB/s per node +Grid : Message : Stencil 17.1634 GB/s per node +Grid : Message : Stencil 17.526 GB/s per node +Grid : Message : Stencil 13.5101 GB/s per node +Grid : Message : Average mflops/s per call per node : 665903 +Grid : Message : Average mflops/s per call per node : 804316 +Grid : Message : Average mflops/s per call per node : 823921 +Grid : Message : Average mflops/s per call per node : 654776 +Grid : Message : Average mflops/s per call per node (full): 317058 +Grid : Message : Average mflops/s per call per node (full): 438237 +Grid : Message : Average mflops/s per call per node (full): 445829 +Grid : Message : Average mflops/s per call per node (full): 301512 +Grid : Message : Stencil 14.1811 GB/s per node +Grid : Message : Stencil 17.4119 GB/s per node +Grid : Message : Stencil 17.4588 GB/s per node +Grid : Message : Stencil 12.7528 GB/s per node +Grid : Message : Average mflops/s per call per node : 664117 +Grid : Message : Average mflops/s per call per node : 804561 +Grid : Message : Average mflops/s per call per node : 823380 +Grid : Message : Average mflops/s per call per node : 666616 +Grid : Message : Average mflops/s per call per node (full): 316154 +Grid : Message : Average mflops/s per call per node (full): 437275 +Grid : Message : Average mflops/s per call per node (full): 444732 +Grid : Message : Average mflops/s per call per node (full): 306355 +Grid : Message : Stencil 12.5427 GB/s per node +Grid : Message : Stencil 16.8808 GB/s per node +Grid : Message : Stencil 17.2794 GB/s per node +Grid : Message : Stencil 12.1337 GB/s per node +Grid : Message : Average mflops/s per call per node : 666693 +Grid : Message : Average mflops/s per call per node : 805130 +Grid : Message : Average mflops/s per call per node : 822421 +Grid : Message : Average mflops/s per call per node : 667030 +Grid : Message : Average mflops/s per call per node (full): 313444 +Grid : Message : Average mflops/s per call per node (full): 438107 +Grid : Message : Average mflops/s per call per node (full): 443039 +Grid : Message : Average mflops/s per call per node (full): 303071 +Grid : Message : Stencil 13.5952 GB/s per node +Grid : Message : Stencil 17.2235 GB/s per node +Grid : Message : Stencil 17.5301 GB/s per node +Grid : Message : Stencil 12.2712 GB/s per node +Grid : Message : Average mflops/s per call per node : 666773 +Grid : Message : Average mflops/s per call per node : 798745 +Grid : Message : Average mflops/s per call per node : 825565 +Grid : Message : Average mflops/s per call per node : 663631 +Grid : Message : Average mflops/s per call per node (full): 315302 +Grid : Message : Average mflops/s per call per node (full): 435436 +Grid : Message : Average mflops/s per call per node (full): 445069 +Grid : Message : Average mflops/s per call per node (full): 304156 +Grid : Message : Stencil 13.3931 GB/s per node +Grid : Message : Stencil 16.741 GB/s per node +Grid : Message : Stencil 18.0163 GB/s per node +Grid : Message : Stencil 13.0197 GB/s per node +Grid : Message : Average mflops/s per call per node : 666664 +Grid : Message : Average mflops/s per call per node : 806233 +Grid : Message : Average mflops/s per call per node : 822672 +Grid : Message : Average mflops/s per call per node : 664351 +Grid : Message : Average mflops/s per call per node (full): 316088 +Grid : Message : Average mflops/s per call per node (full): 436794 +Grid : Message : Average mflops/s per call per node (full): 448626 +Grid : Message : Average mflops/s per call per node (full): 305654 +Grid : Message : Stencil 13.343 GB/s per node +Grid : Message : Stencil 16.877 GB/s per node +Grid : Message : Stencil 18.055 GB/s per node +Grid : Message : Stencil 13.0708 GB/s per node +Grid : Message : Average mflops/s per call per node : 666103 +Grid : Message : Average mflops/s per call per node : 804067 +Grid : Message : Average mflops/s per call per node : 821323 +Grid : Message : Average mflops/s per call per node : 661550 +Grid : Message : Average mflops/s per call per node (full): 315256 +Grid : Message : Average mflops/s per call per node (full): 436469 +Grid : Message : Average mflops/s per call per node (full): 445568 +Grid : Message : Average mflops/s per call per node (full): 304861 +Grid : Message : Stencil 13.3767 GB/s per node +Grid : Message : Stencil 17.4307 GB/s per node +Grid : Message : Stencil 17.8732 GB/s per node +Grid : Message : Stencil 14.365 GB/s per node +Grid : Message : Average mflops/s per call per node : 665950 +Grid : Message : Average mflops/s per call per node : 804442 +Grid : Message : Average mflops/s per call per node : 820539 +Grid : Message : Average mflops/s per call per node : 665047 +Grid : Message : Average mflops/s per call per node (full): 316375 +Grid : Message : Average mflops/s per call per node (full): 439167 +Grid : Message : Average mflops/s per call per node (full): 447065 +Grid : Message : Average mflops/s per call per node (full): 307428 +Grid : Message : Stencil 12.821 GB/s per node +Grid : Message : Stencil 13.5966 GB/s per node +Grid : Message : Stencil 17.6488 GB/s per node +Grid : Message : Stencil 13.8616 GB/s per node +Grid : Message : Average mflops/s per call per node : 670359 +Grid : Message : Average mflops/s per call per node : 812443 +Grid : Message : Average mflops/s per call per node : 821539 +Grid : Message : Average mflops/s per call per node : 662747 +Grid : Message : Average mflops/s per call per node (full): 315688 +Grid : Message : Average mflops/s per call per node (full): 392718 +Grid : Message : Average mflops/s per call per node (full): 445675 +Grid : Message : Average mflops/s per call per node (full): 306518 +Grid : Message : Stencil 12.7894 GB/s per node +Grid : Message : Stencil 17.3747 GB/s per node +Grid : Message : Stencil 18.4541 GB/s per node +Grid : Message : Stencil 14.2313 GB/s per node +Grid : Message : Average mflops/s per call per node : 668881 +Grid : Message : Average mflops/s per call per node : 807244 +Grid : Message : Average mflops/s per call per node : 820681 +Grid : Message : Average mflops/s per call per node : 660460 +Grid : Message : Average mflops/s per call per node (full): 315852 +Grid : Message : Average mflops/s per call per node (full): 433116 +Grid : Message : Average mflops/s per call per node (full): 448640 +Grid : Message : Average mflops/s per call per node (full): 306582 +Grid : Message : Stencil 12.9534 GB/s per node +Grid : Message : Stencil 17.086 GB/s per node +Grid : Message : Stencil 17.926 GB/s per node +Grid : Message : Stencil 12.4938 GB/s per node +Grid : Message : Average mflops/s per call per node : 666505 +Grid : Message : Average mflops/s per call per node : 804431 +Grid : Message : Average mflops/s per call per node : 820428 +Grid : Message : Average mflops/s per call per node : 669124 +Grid : Message : Average mflops/s per call per node (full): 315643 +Grid : Message : Average mflops/s per call per node (full): 439109 +Grid : Message : Average mflops/s per call per node (full): 446081 +Grid : Message : Average mflops/s per call per node (full): 305512 +Grid : Message : Stencil 13.1323 GB/s per node +Grid : Message : Stencil 16.6478 GB/s per node +Grid : Message : Stencil 17.6423 GB/s per node +Grid : Message : Stencil 12.3352 GB/s per node +Grid : Message : Average mflops/s per call per node : 665775 +Grid : Message : Average mflops/s per call per node : 807142 +Grid : Message : Average mflops/s per call per node : 819368 +Grid : Message : Average mflops/s per call per node : 666051 +Grid : Message : Average mflops/s per call per node (full): 315122 +Grid : Message : Average mflops/s per call per node (full): 431694 +Grid : Message : Average mflops/s per call per node (full): 444808 +Grid : Message : Average mflops/s per call per node (full): 302136 +Grid : Message : Stencil 14.1863 GB/s per node +Grid : Message : Stencil 16.6056 GB/s per node +Grid : Message : Stencil 18.3201 GB/s per node +Grid : Message : Stencil 12.6007 GB/s per node +Grid : Message : Average mflops/s per call per node : 663137 +Grid : Message : Average mflops/s per call per node : 802466 +Grid : Message : Average mflops/s per call per node : 821600 +Grid : Message : Average mflops/s per call per node : 661086 +Grid : Message : Average mflops/s per call per node (full): 315558 +Grid : Message : Average mflops/s per call per node (full): 435761 +Grid : Message : Average mflops/s per call per node (full): 448283 +Grid : Message : Average mflops/s per call per node (full): 302765 +Grid : Message : Stencil 13.3338 GB/s per node +Grid : Message : Stencil 17.1289 GB/s per node +Grid : Message : Stencil 17.5447 GB/s per node +Grid : Message : Stencil 12.1588 GB/s per node +Grid : Message : Average mflops/s per call per node : 661532 +Grid : Message : Average mflops/s per call per node : 806950 +Grid : Message : Average mflops/s per call per node : 826036 +Grid : Message : Average mflops/s per call per node : 668188 +Grid : Message : Average mflops/s per call per node (full): 312926 +Grid : Message : Average mflops/s per call per node (full): 437556 +Grid : Message : Average mflops/s per call per node (full): 445663 +Grid : Message : Average mflops/s per call per node (full): 303389 +Grid : Message : Stencil 13.5784 GB/s per node +Grid : Message : Stencil 14.0755 GB/s per node +Grid : Message : Stencil 17.46 GB/s per node +Grid : Message : Stencil 13.1343 GB/s per node +Grid : Message : Average mflops/s per call per node : 662222 +Grid : Message : Average mflops/s per call per node : 799662 +Grid : Message : Average mflops/s per call per node : 820866 +Grid : Message : Average mflops/s per call per node : 667038 +Grid : Message : Average mflops/s per call per node (full): 314441 +Grid : Message : Average mflops/s per call per node (full): 400252 +Grid : Message : Average mflops/s per call per node (full): 442573 +Grid : Message : Average mflops/s per call per node (full): 306094 +Grid : Message : Stencil 13.7759 GB/s per node +Grid : Message : Stencil 17.173 GB/s per node +Grid : Message : Stencil 19.3367 GB/s per node +Grid : Message : Stencil 14.1301 GB/s per node +Grid : Message : Average mflops/s per call per node : 664650 +Grid : Message : Average mflops/s per call per node : 806907 +Grid : Message : Average mflops/s per call per node : 820560 +Grid : Message : Average mflops/s per call per node : 659010 +Grid : Message : Average mflops/s per call per node (full): 315181 +Grid : Message : Average mflops/s per call per node (full): 435972 +Grid : Message : Average mflops/s per call per node (full): 449939 +Grid : Message : Average mflops/s per call per node (full): 303438 +Grid : Message : Stencil 13.6664 GB/s per node +Grid : Message : Stencil 17.1214 GB/s per node +Grid : Message : Stencil 18.6147 GB/s per node +Grid : Message : Stencil 12.3372 GB/s per node +Grid : Message : Average mflops/s per call per node : 663164 +Grid : Message : Average mflops/s per call per node : 804815 +Grid : Message : Average mflops/s per call per node : 813523 +Grid : Message : Average mflops/s per call per node : 664938 +Grid : Message : Average mflops/s per call per node (full): 315596 +Grid : Message : Average mflops/s per call per node (full): 437513 +Grid : Message : Average mflops/s per call per node (full): 447273 +Grid : Message : Average mflops/s per call per node (full): 304029 +Grid : Message : Stencil 14.2618 GB/s per node +Grid : Message : Stencil 10.0889 GB/s per node +Grid : Message : Stencil 17.4008 GB/s per node +Grid : Message : Stencil 12.3218 GB/s per node +Grid : Message : Average mflops/s per call per node : 661479 +Grid : Message : Average mflops/s per call per node : 807279 +Grid : Message : Average mflops/s per call per node : 829648 +Grid : Message : Average mflops/s per call per node : 667827 +Grid : Message : Average mflops/s per call per node (full): 316019 +Grid : Message : Average mflops/s per call per node (full): 318759 +Grid : Message : Average mflops/s per call per node (full): 445239 +Grid : Message : Average mflops/s per call per node (full): 304443 +Grid : Message : Stencil 13.7394 GB/s per node +Grid : Message : Stencil 17.5089 GB/s per node +Grid : Message : Stencil 17.5503 GB/s per node +Grid : Message : Stencil 13.057 GB/s per node +Grid : Message : Average mflops/s per call per node : 665386 +Grid : Message : Average mflops/s per call per node : 801394 +Grid : Message : Average mflops/s per call per node : 823385 +Grid : Message : Average mflops/s per call per node : 662710 +Grid : Message : Average mflops/s per call per node (full): 316635 +Grid : Message : Average mflops/s per call per node (full): 439026 +Grid : Message : Average mflops/s per call per node (full): 438906 +Grid : Message : Average mflops/s per call per node (full): 304242 +Grid : Message : Stencil 13.4454 GB/s per node +Grid : Message : Stencil 16.8154 GB/s per node +Grid : Message : Stencil 19.4219 GB/s per node +Grid : Message : Stencil 13.3221 GB/s per node +Grid : Message : Average mflops/s per call per node : 664908 +Grid : Message : Average mflops/s per call per node : 807446 +Grid : Message : Average mflops/s per call per node : 819760 +Grid : Message : Average mflops/s per call per node : 658599 +Grid : Message : Average mflops/s per call per node (full): 315954 +Grid : Message : Average mflops/s per call per node (full): 437560 +Grid : Message : Average mflops/s per call per node (full): 449689 +Grid : Message : Average mflops/s per call per node (full): 305350 +Grid : Message : Stencil 14.9133 GB/s per node +Grid : Message : Stencil 16.8471 GB/s per node +Grid : Message : Stencil 17.4022 GB/s per node +Grid : Message : Stencil 13.4123 GB/s per node +Grid : Message : Average mflops/s per call per node : 664239 +Grid : Message : Average mflops/s per call per node : 806584 +Grid : Message : Average mflops/s per call per node : 823548 +Grid : Message : Average mflops/s per call per node : 661416 +Grid : Message : Average mflops/s per call per node (full): 316375 +Grid : Message : Average mflops/s per call per node (full): 438739 +Grid : Message : Average mflops/s per call per node (full): 443985 +Grid : Message : Average mflops/s per call per node (full): 305805 +Grid : Message : Stencil 13.5212 GB/s per node +Grid : Message : Stencil 17.0275 GB/s per node +Grid : Message : Stencil 18.5525 GB/s per node +Grid : Message : Stencil 14.6028 GB/s per node +Grid : Message : Average mflops/s per call per node : 665280 +Grid : Message : Average mflops/s per call per node : 804417 +Grid : Message : Average mflops/s per call per node : 819650 +Grid : Message : Average mflops/s per call per node : 662591 +Grid : Message : Average mflops/s per call per node (full): 315578 +Grid : Message : Average mflops/s per call per node (full): 435106 +Grid : Message : Average mflops/s per call per node (full): 445912 +Grid : Message : Average mflops/s per call per node (full): 307890 +Grid : Message : Stencil 12.5063 GB/s per node +Grid : Message : Stencil 17.0401 GB/s per node +Grid : Message : Stencil 17.909 GB/s per node +Grid : Message : Stencil 12.4003 GB/s per node +Grid : Message : Average mflops/s per call per node : 670017 +Grid : Message : Average mflops/s per call per node : 797635 +Grid : Message : Average mflops/s per call per node : 822123 +Grid : Message : Average mflops/s per call per node : 669380 +Grid : Message : Average mflops/s per call per node (full): 313835 +Grid : Message : Average mflops/s per call per node (full): 432405 +Grid : Message : Average mflops/s per call per node (full): 445624 +Grid : Message : Average mflops/s per call per node (full): 304862 +Grid : Message : Stencil 13.4388 GB/s per node +Grid : Message : Stencil 10.13 GB/s per node +Grid : Message : Stencil 18.4098 GB/s per node +Grid : Message : Stencil 12.6245 GB/s per node +Grid : Message : Average mflops/s per call per node : 664449 +Grid : Message : Average mflops/s per call per node : 813357 +Grid : Message : Average mflops/s per call per node : 820461 +Grid : Message : Average mflops/s per call per node : 666593 +Grid : Message : Average mflops/s per call per node (full): 315191 +Grid : Message : Average mflops/s per call per node (full): 319670 +Grid : Message : Average mflops/s per call per node (full): 448415 +Grid : Message : Average mflops/s per call per node (full): 304444 +Grid : Message : Stencil 12.8495 GB/s per node +Grid : Message : Stencil 16.3414 GB/s per node +Grid : Message : Stencil 17.4925 GB/s per node +Grid : Message : Stencil 12.4912 GB/s per node +Grid : Message : Average mflops/s per call per node : 667422 +Grid : Message : Average mflops/s per call per node : 806560 +Grid : Message : Average mflops/s per call per node : 825592 +Grid : Message : Average mflops/s per call per node : 667284 +Grid : Message : Average mflops/s per call per node (full): 314544 +Grid : Message : Average mflops/s per call per node (full): 433583 +Grid : Message : Average mflops/s per call per node (full): 444810 +Grid : Message : Average mflops/s per call per node (full): 304577 +Grid : Message : Stencil 12.3674 GB/s per node +Grid : Message : Stencil 14.5964 GB/s per node +Grid : Message : Stencil 17.6038 GB/s per node +Grid : Message : Stencil 11.969 GB/s per node +Grid : Message : Average mflops/s per call per node : 669368 +Grid : Message : Average mflops/s per call per node : 805939 +Grid : Message : Average mflops/s per call per node : 827919 +Grid : Message : Average mflops/s per call per node : 659033 +Grid : Message : Average mflops/s per call per node (full): 312956 +Grid : Message : Average mflops/s per call per node (full): 409279 +Grid : Message : Average mflops/s per call per node (full): 446149 +Grid : Message : Average mflops/s per call per node (full): 301303 +Grid : Message : Stencil 12.7816 GB/s per node +Grid : Message : Stencil 17.2484 GB/s per node +Grid : Message : Stencil 17.5063 GB/s per node +Grid : Message : Stencil 14.1445 GB/s per node +Grid : Message : Average mflops/s per call per node : 666224 +Grid : Message : Average mflops/s per call per node : 800794 +Grid : Message : Average mflops/s per call per node : 824797 +Grid : Message : Average mflops/s per call per node : 663407 +Grid : Message : Average mflops/s per call per node (full): 313092 +Grid : Message : Average mflops/s per call per node (full): 438551 +Grid : Message : Average mflops/s per call per node (full): 445019 +Grid : Message : Average mflops/s per call per node (full): 306984 +Grid : Message : Stencil 12.5945 GB/s per node +Grid : Message : Stencil 17.3642 GB/s per node +Grid : Message : Stencil 16.9934 GB/s per node +Grid : Message : Stencil 11.7458 GB/s per node +Grid : Message : Average mflops/s per call per node : 669025 +Grid : Message : Average mflops/s per call per node : 804992 +Grid : Message : Average mflops/s per call per node : 823272 +Grid : Message : Average mflops/s per call per node : 661218 +Grid : Message : Average mflops/s per call per node (full): 314220 +Grid : Message : Average mflops/s per call per node (full): 438383 +Grid : Message : Average mflops/s per call per node (full): 440220 +Grid : Message : Average mflops/s per call per node (full): 297043 +Grid : Message : Stencil 13.5802 GB/s per node +Grid : Message : Stencil 16.4426 GB/s per node +Grid : Message : Stencil 18.345 GB/s per node +Grid : Message : Stencil 12.5696 GB/s per node +Grid : Message : Average mflops/s per call per node : 665125 +Grid : Message : Average mflops/s per call per node : 803719 +Grid : Message : Average mflops/s per call per node : 817192 +Grid : Message : Average mflops/s per call per node : 663354 +Grid : Message : Average mflops/s per call per node (full): 315413 +Grid : Message : Average mflops/s per call per node (full): 433859 +Grid : Message : Average mflops/s per call per node (full): 446174 +Grid : Message : Average mflops/s per call per node (full): 303291 +Grid : Message : Stencil 12.7315 GB/s per node +Grid : Message : Stencil 15.383 GB/s per node +Grid : Message : Stencil 18.1797 GB/s per node +Grid : Message : Stencil 13.1161 GB/s per node +Grid : Message : Average mflops/s per call per node : 666533 +Grid : Message : Average mflops/s per call per node : 806042 +Grid : Message : Average mflops/s per call per node : 821914 +Grid : Message : Average mflops/s per call per node : 664713 +Grid : Message : Average mflops/s per call per node (full): 313225 +Grid : Message : Average mflops/s per call per node (full): 421118 +Grid : Message : Average mflops/s per call per node (full): 447010 +Grid : Message : Average mflops/s per call per node (full): 305894 +Grid : Message : Stencil 12.81 GB/s per node +Grid : Message : Stencil 16.7324 GB/s per node +Grid : Message : Stencil 17.5909 GB/s per node +Grid : Message : Stencil 12.8213 GB/s per node +Grid : Message : Average mflops/s per call per node : 667673 +Grid : Message : Average mflops/s per call per node : 806181 +Grid : Message : Average mflops/s per call per node : 821357 +Grid : Message : Average mflops/s per call per node : 667377 +Grid : Message : Average mflops/s per call per node (full): 315296 +Grid : Message : Average mflops/s per call per node (full): 436948 +Grid : Message : Average mflops/s per call per node (full): 445420 +Grid : Message : Average mflops/s per call per node (full): 305824 +Grid : Message : Stencil 14.4771 GB/s per node +Grid : Message : Stencil 18.0131 GB/s per node +Grid : Message : Stencil 17.9633 GB/s per node +Grid : Message : Stencil 12.8198 GB/s per node +Grid : Message : Average mflops/s per call per node : 662482 +Grid : Message : Average mflops/s per call per node : 800550 +Grid : Message : Average mflops/s per call per node : 823363 +Grid : Message : Average mflops/s per call per node : 655407 +Grid : Message : Average mflops/s per call per node (full): 314434 +Grid : Message : Average mflops/s per call per node (full): 438833 +Grid : Message : Average mflops/s per call per node (full): 438964 +Grid : Message : Average mflops/s per call per node (full): 303720 +Grid : Message : Stencil 12.8066 GB/s per node +Grid : Message : Stencil 12.4512 GB/s per node +Grid : Message : Stencil 16.9279 GB/s per node +Grid : Message : Stencil 13.2672 GB/s per node +Grid : Message : Average mflops/s per call per node : 669351 +Grid : Message : Average mflops/s per call per node : 808688 +Grid : Message : Average mflops/s per call per node : 824319 +Grid : Message : Average mflops/s per call per node : 662097 +Grid : Message : Average mflops/s per call per node (full): 314366 +Grid : Message : Average mflops/s per call per node (full): 370483 +Grid : Message : Average mflops/s per call per node (full): 439565 +Grid : Message : Average mflops/s per call per node (full): 305894 +Grid : Message : Stencil 12.7226 GB/s per node +Grid : Message : Stencil 17.5536 GB/s per node +Grid : Message : Stencil 18.3302 GB/s per node +Grid : Message : Stencil 12.0157 GB/s per node +Grid : Message : Average mflops/s per call per node : 673634 +Grid : Message : Average mflops/s per call per node : 799870 +Grid : Message : Average mflops/s per call per node : 825012 +Grid : Message : Average mflops/s per call per node : 665898 +Grid : Message : Average mflops/s per call per node (full): 315377 +Grid : Message : Average mflops/s per call per node (full): 439524 +Grid : Message : Average mflops/s per call per node (full): 448642 +Grid : Message : Average mflops/s per call per node (full): 301330 +Grid : Message : Stencil 15.665 GB/s per node +Grid : Message : Stencil 16.7653 GB/s per node +Grid : Message : Stencil 17.6713 GB/s per node +Grid : Message : Stencil 12.1773 GB/s per node +Grid : Message : Average mflops/s per call per node : 662461 +Grid : Message : Average mflops/s per call per node : 803518 +Grid : Message : Average mflops/s per call per node : 830364 +Grid : Message : Average mflops/s per call per node : 665534 +Grid : Message : Average mflops/s per call per node (full): 317567 +Grid : Message : Average mflops/s per call per node (full): 436654 +Grid : Message : Average mflops/s per call per node (full): 445703 +Grid : Message : Average mflops/s per call per node (full): 303153 +Grid : Message : Stencil 14.7891 GB/s per node +Grid : Message : Stencil 16.8005 GB/s per node +Grid : Message : Stencil 17.7006 GB/s per node +Grid : Message : Stencil 12.0703 GB/s per node +Grid : Message : Average mflops/s per call per node : 660523 +Grid : Message : Average mflops/s per call per node : 801552 +Grid : Message : Average mflops/s per call per node : 819377 +Grid : Message : Average mflops/s per call per node : 668934 +Grid : Message : Average mflops/s per call per node (full): 315578 +Grid : Message : Average mflops/s per call per node (full): 432654 +Grid : Message : Average mflops/s per call per node (full): 445646 +Grid : Message : Average mflops/s per call per node (full): 302725 +Grid : Message : Stencil 12.4778 GB/s per node +Grid : Message : Stencil 17.3503 GB/s per node +Grid : Message : Stencil 17.3418 GB/s per node +Grid : Message : Stencil 12.9612 GB/s per node +Grid : Message : Average mflops/s per call per node : 669193 +Grid : Message : Average mflops/s per call per node : 803618 +Grid : Message : Average mflops/s per call per node : 818478 +Grid : Message : Average mflops/s per call per node : 659136 +Grid : Message : Average mflops/s per call per node (full): 313641 +Grid : Message : Average mflops/s per call per node (full): 437523 +Grid : Message : Average mflops/s per call per node (full): 442583 +Grid : Message : Average mflops/s per call per node (full): 305080 +Grid : Message : Stencil 12.5965 GB/s per node +Grid : Message : Stencil 16.2825 GB/s per node +Grid : Message : Stencil 17.1698 GB/s per node +Grid : Message : Stencil 12.9088 GB/s per node +Grid : Message : Average mflops/s per call per node : 668568 +Grid : Message : Average mflops/s per call per node : 809293 +Grid : Message : Average mflops/s per call per node : 820792 +Grid : Message : Average mflops/s per call per node : 658146 +Grid : Message : Average mflops/s per call per node (full): 313932 +Grid : Message : Average mflops/s per call per node (full): 432644 +Grid : Message : Average mflops/s per call per node (full): 442291 +Grid : Message : Average mflops/s per call per node (full): 302658 +Grid : Message : Stencil 12.7875 GB/s per node +Grid : Message : Stencil 17.3888 GB/s per node +Grid : Message : Stencil 18.2178 GB/s per node +Grid : Message : Stencil 15.7381 GB/s per node +Grid : Message : Average mflops/s per call per node : 666964 +Grid : Message : Average mflops/s per call per node : 795974 +Grid : Message : Average mflops/s per call per node : 822389 +Grid : Message : Average mflops/s per call per node : 658799 +Grid : Message : Average mflops/s per call per node (full): 314510 +Grid : Message : Average mflops/s per call per node (full): 436541 +Grid : Message : Average mflops/s per call per node (full): 448418 +Grid : Message : Average mflops/s per call per node (full): 307398 +Grid : Message : Stencil 13.0569 GB/s per node +Grid : Message : Stencil 18.0567 GB/s per node +Grid : Message : Stencil 18.5102 GB/s per node +Grid : Message : Stencil 12.0611 GB/s per node +Grid : Message : Average mflops/s per call per node : 666304 +Grid : Message : Average mflops/s per call per node : 798361 +Grid : Message : Average mflops/s per call per node : 823575 +Grid : Message : Average mflops/s per call per node : 667601 +Grid : Message : Average mflops/s per call per node (full): 315659 +Grid : Message : Average mflops/s per call per node (full): 438070 +Grid : Message : Average mflops/s per call per node (full): 445969 +Grid : Message : Average mflops/s per call per node (full): 302695 +Grid : Message : Stencil 13.1052 GB/s per node +Grid : Message : Stencil 16.5789 GB/s per node +Grid : Message : Stencil 16.905 GB/s per node +Grid : Message : Stencil 12.5662 GB/s per node +Grid : Message : Average mflops/s per call per node : 666859 +Grid : Message : Average mflops/s per call per node : 803782 +Grid : Message : Average mflops/s per call per node : 821719 +Grid : Message : Average mflops/s per call per node : 667919 +Grid : Message : Average mflops/s per call per node (full): 315018 +Grid : Message : Average mflops/s per call per node (full): 435319 +Grid : Message : Average mflops/s per call per node (full): 439161 +Grid : Message : Average mflops/s per call per node (full): 305586 +Grid : Message : Stencil 12.6053 GB/s per node +Grid : Message : Stencil 16.9889 GB/s per node +Grid : Message : Stencil 17.299 GB/s per node +Grid : Message : Stencil 13.3575 GB/s per node +Grid : Message : Average mflops/s per call per node : 668441 +Grid : Message : Average mflops/s per call per node : 800242 +Grid : Message : Average mflops/s per call per node : 813284 +Grid : Message : Average mflops/s per call per node : 663141 +Grid : Message : Average mflops/s per call per node (full): 314179 +Grid : Message : Average mflops/s per call per node (full): 436227 +Grid : Message : Average mflops/s per call per node (full): 442472 +Grid : Message : Average mflops/s per call per node (full): 305967 +Grid : Message : Stencil 12.9178 GB/s per node +Grid : Message : Stencil 16.6625 GB/s per node +Grid : Message : Stencil 17.7502 GB/s per node +Grid : Message : Stencil 13.6215 GB/s per node +Grid : Message : Average mflops/s per call per node : 669848 +Grid : Message : Average mflops/s per call per node : 805128 +Grid : Message : Average mflops/s per call per node : 819001 +Grid : Message : Average mflops/s per call per node : 664476 +Grid : Message : Average mflops/s per call per node (full): 316078 +Grid : Message : Average mflops/s per call per node (full): 435852 +Grid : Message : Average mflops/s per call per node (full): 445856 +Grid : Message : Average mflops/s per call per node (full): 305997 +Grid : Message : Stencil 12.7647 GB/s per node +Grid : Message : Stencil 16.037 GB/s per node +Grid : Message : Stencil 17.6307 GB/s per node +Grid : Message : Stencil 12.981 GB/s per node +Grid : Message : Average mflops/s per call per node : 668508 +Grid : Message : Average mflops/s per call per node : 807700 +Grid : Message : Average mflops/s per call per node : 823565 +Grid : Message : Average mflops/s per call per node : 659680 +Grid : Message : Average mflops/s per call per node (full): 314931 +Grid : Message : Average mflops/s per call per node (full): 428598 +Grid : Message : Average mflops/s per call per node (full): 445601 +Grid : Message : Average mflops/s per call per node (full): 305267 +Grid : Message : Stencil 12.7174 GB/s per node +Grid : Message : Stencil 15.3893 GB/s per node +Grid : Message : Stencil 17.7699 GB/s per node +Grid : Message : Stencil 14.1334 GB/s per node +Grid : Message : Average mflops/s per call per node : 668861 +Grid : Message : Average mflops/s per call per node : 808645 +Grid : Message : Average mflops/s per call per node : 818086 +Grid : Message : Average mflops/s per call per node : 665203 +Grid : Message : Average mflops/s per call per node (full): 314695 +Grid : Message : Average mflops/s per call per node (full): 421627 +Grid : Message : Average mflops/s per call per node (full): 438315 +Grid : Message : Average mflops/s per call per node (full): 307073 +Grid : Message : Stencil 13.5612 GB/s per node +Grid : Message : Stencil 14.3094 GB/s per node +Grid : Message : Stencil 17.7979 GB/s per node +Grid : Message : Stencil 13.7627 GB/s per node +Grid : Message : Average mflops/s per call per node : 664046 +Grid : Message : Average mflops/s per call per node : 804608 +Grid : Message : Average mflops/s per call per node : 822177 +Grid : Message : Average mflops/s per call per node : 661169 +Grid : Message : Average mflops/s per call per node (full): 314385 +Grid : Message : Average mflops/s per call per node (full): 401651 +Grid : Message : Average mflops/s per call per node (full): 443230 +Grid : Message : Average mflops/s per call per node (full): 304789 +Grid : Message : Stencil 13.6262 GB/s per node +Grid : Message : Stencil 16.5029 GB/s per node +Grid : Message : Stencil 18.9915 GB/s per node +Grid : Message : Stencil 11.9887 GB/s per node +Grid : Message : Average mflops/s per call per node : 669539 +Grid : Message : Average mflops/s per call per node : 802650 +Grid : Message : Average mflops/s per call per node : 819077 +Grid : Message : Average mflops/s per call per node : 666765 +Grid : Message : Average mflops/s per call per node (full): 317347 +Grid : Message : Average mflops/s per call per node (full): 434704 +Grid : Message : Average mflops/s per call per node (full): 449241 +Grid : Message : Average mflops/s per call per node (full): 302354 +Grid : Message : Stencil 13.9588 GB/s per node +Grid : Message : Stencil 12.2464 GB/s per node +Grid : Message : Stencil 18.1376 GB/s per node +Grid : Message : Stencil 12.7738 GB/s per node +Grid : Message : Average mflops/s per call per node : 666887 +Grid : Message : Average mflops/s per call per node : 815235 +Grid : Message : Average mflops/s per call per node : 820800 +Grid : Message : Average mflops/s per call per node : 672403 +Grid : Message : Average mflops/s per call per node (full): 316395 +Grid : Message : Average mflops/s per call per node (full): 366349 +Grid : Message : Average mflops/s per call per node (full): 447028 +Grid : Message : Average mflops/s per call per node (full): 305605 +Grid : Message : Stencil 13.3593 GB/s per node +Grid : Message : Stencil 16.3731 GB/s per node +Grid : Message : Stencil 16.966 GB/s per node +Grid : Message : Stencil 12.9026 GB/s per node +Grid : Message : Average mflops/s per call per node : 667752 +Grid : Message : Average mflops/s per call per node : 805011 +Grid : Message : Average mflops/s per call per node : 824621 +Grid : Message : Average mflops/s per call per node : 664981 +Grid : Message : Average mflops/s per call per node (full): 315591 +Grid : Message : Average mflops/s per call per node (full): 429557 +Grid : Message : Average mflops/s per call per node (full): 441140 +Grid : Message : Average mflops/s per call per node (full): 304634 +Grid : Message : Stencil 14.6674 GB/s per node +Grid : Message : Stencil 16.6037 GB/s per node +Grid : Message : Stencil 18.8243 GB/s per node +Grid : Message : Stencil 13.0788 GB/s per node +Grid : Message : Average mflops/s per call per node : 662330 +Grid : Message : Average mflops/s per call per node : 800743 +Grid : Message : Average mflops/s per call per node : 822556 +Grid : Message : Average mflops/s per call per node : 662263 +Grid : Message : Average mflops/s per call per node (full): 316637 +Grid : Message : Average mflops/s per call per node (full): 435866 +Grid : Message : Average mflops/s per call per node (full): 449299 +Grid : Message : Average mflops/s per call per node (full): 305056 +Grid : Message : Stencil 14.3719 GB/s per node +Grid : Message : Stencil 16.6864 GB/s per node +Grid : Message : Stencil 16.5486 GB/s per node +Grid : Message : Stencil 12.9351 GB/s per node +Grid : Message : Average mflops/s per call per node : 661735 +Grid : Message : Average mflops/s per call per node : 804615 +Grid : Message : Average mflops/s per call per node : 816724 +Grid : Message : Average mflops/s per call per node : 666066 +Grid : Message : Average mflops/s per call per node (full): 316431 +Grid : Message : Average mflops/s per call per node (full): 435436 +Grid : Message : Average mflops/s per call per node (full): 429896 +Grid : Message : Average mflops/s per call per node (full): 305633 +Grid : Message : Stencil 13.8614 GB/s per node +Grid : Message : Stencil 16.7359 GB/s per node +Grid : Message : Stencil 17.6222 GB/s per node +Grid : Message : Stencil 12.787 GB/s per node +Grid : Message : Average mflops/s per call per node : 663609 +Grid : Message : Average mflops/s per call per node : 802481 +Grid : Message : Average mflops/s per call per node : 822284 +Grid : Message : Average mflops/s per call per node : 663024 +Grid : Message : Average mflops/s per call per node (full): 316361 +Grid : Message : Average mflops/s per call per node (full): 436671 +Grid : Message : Average mflops/s per call per node (full): 445878 +Grid : Message : Average mflops/s per call per node (full): 304683 +Grid : Message : Stencil 12.3693 GB/s per node +Grid : Message : Stencil 17.9277 GB/s per node +Grid : Message : Stencil 17.857 GB/s per node +Grid : Message : Stencil 12.4024 GB/s per node +Grid : Message : Average mflops/s per call per node : 666807 +Grid : Message : Average mflops/s per call per node : 799770 +Grid : Message : Average mflops/s per call per node : 814899 +Grid : Message : Average mflops/s per call per node : 664095 +Grid : Message : Average mflops/s per call per node (full): 310994 +Grid : Message : Average mflops/s per call per node (full): 440054 +Grid : Message : Average mflops/s per call per node (full): 444183 +Grid : Message : Average mflops/s per call per node (full): 303609 +Grid : Message : Stencil 12.9655 GB/s per node +Grid : Message : Stencil 16.8382 GB/s per node +Grid : Message : Stencil 17.7169 GB/s per node +Grid : Message : Stencil 12.5815 GB/s per node +Grid : Message : Average mflops/s per call per node : 667126 +Grid : Message : Average mflops/s per call per node : 804406 +Grid : Message : Average mflops/s per call per node : 824484 +Grid : Message : Average mflops/s per call per node : 668240 +Grid : Message : Average mflops/s per call per node (full): 315412 +Grid : Message : Average mflops/s per call per node (full): 437094 +Grid : Message : Average mflops/s per call per node (full): 445690 +Grid : Message : Average mflops/s per call per node (full): 304519 +Grid : Message : Stencil 12.4799 GB/s per node +Grid : Message : Stencil 17.4822 GB/s per node +Grid : Message : Stencil 17.1568 GB/s per node +Grid : Message : Stencil 13.2932 GB/s per node +Grid : Message : Average mflops/s per call per node : 672305 +Grid : Message : Average mflops/s per call per node : 800954 +Grid : Message : Average mflops/s per call per node : 822328 +Grid : Message : Average mflops/s per call per node : 669005 +Grid : Message : Average mflops/s per call per node (full): 313890 +Grid : Message : Average mflops/s per call per node (full): 439128 +Grid : Message : Average mflops/s per call per node (full): 442636 +Grid : Message : Average mflops/s per call per node (full): 306782 +Grid : Message : Stencil 12.5206 GB/s per node +Grid : Message : Stencil 16.6807 GB/s per node +Grid : Message : Stencil 18.4892 GB/s per node +Grid : Message : Stencil 12.5474 GB/s per node +Grid : Message : Average mflops/s per call per node : 670051 +Grid : Message : Average mflops/s per call per node : 803701 +Grid : Message : Average mflops/s per call per node : 814013 +Grid : Message : Average mflops/s per call per node : 662089 +Grid : Message : Average mflops/s per call per node (full): 313988 +Grid : Message : Average mflops/s per call per node (full): 434021 +Grid : Message : Average mflops/s per call per node (full): 446562 +Grid : Message : Average mflops/s per call per node (full): 300483 +Grid : Message : Stencil 12.6668 GB/s per node +Grid : Message : Stencil 13.4979 GB/s per node +Grid : Message : Stencil 18.2685 GB/s per node +Grid : Message : Stencil 13.2324 GB/s per node +Grid : Message : Average mflops/s per call per node : 670057 +Grid : Message : Average mflops/s per call per node : 810582 +Grid : Message : Average mflops/s per call per node : 819467 +Grid : Message : Average mflops/s per call per node : 664962 +Grid : Message : Average mflops/s per call per node (full): 314212 +Grid : Message : Average mflops/s per call per node (full): 390848 +Grid : Message : Average mflops/s per call per node (full): 446832 +Grid : Message : Average mflops/s per call per node (full): 306389 +Grid : Message : Stencil 13.4024 GB/s per node +Grid : Message : Stencil 17.8795 GB/s per node +Grid : Message : Stencil 17.4326 GB/s per node +Grid : Message : Stencil 14.4151 GB/s per node +Grid : Message : Average mflops/s per call per node : 666151 +Grid : Message : Average mflops/s per call per node : 804497 +Grid : Message : Average mflops/s per call per node : 826075 +Grid : Message : Average mflops/s per call per node : 661044 +Grid : Message : Average mflops/s per call per node (full): 315120 +Grid : Message : Average mflops/s per call per node (full): 439813 +Grid : Message : Average mflops/s per call per node (full): 445667 +Grid : Message : Average mflops/s per call per node (full): 307218 +Grid : Message : Stencil 13.5361 GB/s per node +Grid : Message : Stencil 17.1346 GB/s per node +Grid : Message : Stencil 18.1014 GB/s per node +Grid : Message : Stencil 12.8291 GB/s per node +Grid : Message : Average mflops/s per call per node : 669046 +Grid : Message : Average mflops/s per call per node : 808504 +Grid : Message : Average mflops/s per call per node : 818995 +Grid : Message : Average mflops/s per call per node : 662119 +Grid : Message : Average mflops/s per call per node (full): 315850 +Grid : Message : Average mflops/s per call per node (full): 438213 +Grid : Message : Average mflops/s per call per node (full): 446359 +Grid : Message : Average mflops/s per call per node (full): 306122 +Grid : Message : Stencil 13.3801 GB/s per node +Grid : Message : Stencil 16.8594 GB/s per node +Grid : Message : Stencil 17.7869 GB/s per node +Grid : Message : Stencil 12.5915 GB/s per node +Grid : Message : Average mflops/s per call per node : 668371 +Grid : Message : Average mflops/s per call per node : 804653 +Grid : Message : Average mflops/s per call per node : 824526 +Grid : Message : Average mflops/s per call per node : 667430 +Grid : Message : Average mflops/s per call per node (full): 315682 +Grid : Message : Average mflops/s per call per node (full): 437676 +Grid : Message : Average mflops/s per call per node (full): 446980 +Grid : Message : Average mflops/s per call per node (full): 304992 +Grid : Message : Stencil 13.7549 GB/s per node +Grid : Message : Stencil 14.9992 GB/s per node +Grid : Message : Stencil 17.5302 GB/s per node +Grid : Message : Stencil 12.2735 GB/s per node +Grid : Message : Average mflops/s per call per node : 666096 +Grid : Message : Average mflops/s per call per node : 811499 +Grid : Message : Average mflops/s per call per node : 824694 +Grid : Message : Average mflops/s per call per node : 668519 +Grid : Message : Average mflops/s per call per node (full): 316858 +Grid : Message : Average mflops/s per call per node (full): 414059 +Grid : Message : Average mflops/s per call per node (full): 445936 +Grid : Message : Average mflops/s per call per node (full): 303533 +Grid : Message : Stencil 13.6632 GB/s per node +Grid : Message : Stencil 17.0763 GB/s per node +Grid : Message : Stencil 17.3609 GB/s per node +Grid : Message : Stencil 11.946 GB/s per node +Grid : Message : Average mflops/s per call per node : 664469 +Grid : Message : Average mflops/s per call per node : 804234 +Grid : Message : Average mflops/s per call per node : 823660 +Grid : Message : Average mflops/s per call per node : 665986 +Grid : Message : Average mflops/s per call per node (full): 313506 +Grid : Message : Average mflops/s per call per node (full): 436407 +Grid : Message : Average mflops/s per call per node (full): 445089 +Grid : Message : Average mflops/s per call per node (full): 301375 +Grid : Message : Stencil 13.127 GB/s per node +Grid : Message : Stencil 16.4242 GB/s per node +Grid : Message : Stencil 17.4365 GB/s per node +Grid : Message : Stencil 13.6282 GB/s per node +Grid : Message : Average mflops/s per call per node : 670281 +Grid : Message : Average mflops/s per call per node : 805435 +Grid : Message : Average mflops/s per call per node : 822530 +Grid : Message : Average mflops/s per call per node : 659966 +Grid : Message : Average mflops/s per call per node (full): 315917 +Grid : Message : Average mflops/s per call per node (full): 434543 +Grid : Message : Average mflops/s per call per node (full): 441755 +Grid : Message : Average mflops/s per call per node (full): 306428 +Grid : Message : Stencil 13.2015 GB/s per node +Grid : Message : Stencil 17.5169 GB/s per node +Grid : Message : Stencil 18 GB/s per node +Grid : Message : Stencil 12.8812 GB/s per node +Grid : Message : Average mflops/s per call per node : 666946 +Grid : Message : Average mflops/s per call per node : 804926 +Grid : Message : Average mflops/s per call per node : 821508 +Grid : Message : Average mflops/s per call per node : 665034 +Grid : Message : Average mflops/s per call per node (full): 313490 +Grid : Message : Average mflops/s per call per node (full): 438264 +Grid : Message : Average mflops/s per call per node (full): 446763 +Grid : Message : Average mflops/s per call per node (full): 302935 +Grid : Message : Stencil 13.3755 GB/s per node +Grid : Message : Stencil 17.2901 GB/s per node +Grid : Message : Stencil 18.1233 GB/s per node +Grid : Message : Stencil 13.465 GB/s per node +Grid : Message : Average mflops/s per call per node : 668938 +Grid : Message : Average mflops/s per call per node : 804972 +Grid : Message : Average mflops/s per call per node : 822896 +Grid : Message : Average mflops/s per call per node : 663745 +Grid : Message : Average mflops/s per call per node (full): 316147 +Grid : Message : Average mflops/s per call per node (full): 438113 +Grid : Message : Average mflops/s per call per node (full): 446562 +Grid : Message : Average mflops/s per call per node (full): 306549 +Grid : Message : Stencil 14.3151 GB/s per node +Grid : Message : Stencil 18.0712 GB/s per node +Grid : Message : Stencil 19.3206 GB/s per node +Grid : Message : Stencil 13.0481 GB/s per node +Grid : Message : Average mflops/s per call per node : 662888 +Grid : Message : Average mflops/s per call per node : 803274 +Grid : Message : Average mflops/s per call per node : 821731 +Grid : Message : Average mflops/s per call per node : 666168 +Grid : Message : Average mflops/s per call per node (full): 315353 +Grid : Message : Average mflops/s per call per node (full): 442293 +Grid : Message : Average mflops/s per call per node (full): 449390 +Grid : Message : Average mflops/s per call per node (full): 305695 +Grid : Message : Stencil 13.6391 GB/s per node +Grid : Message : Stencil 16.6396 GB/s per node +Grid : Message : Stencil 18.4333 GB/s per node +Grid : Message : Stencil 13.2398 GB/s per node +Grid : Message : Average mflops/s per call per node : 663417 +Grid : Message : Average mflops/s per call per node : 808102 +Grid : Message : Average mflops/s per call per node : 820793 +Grid : Message : Average mflops/s per call per node : 665197 +Grid : Message : Average mflops/s per call per node (full): 314956 +Grid : Message : Average mflops/s per call per node (full): 436651 +Grid : Message : Average mflops/s per call per node (full): 448931 +Grid : Message : Average mflops/s per call per node (full): 303869 +Grid : Message : Stencil 12.8134 GB/s per node +Grid : Message : Stencil 16.688 GB/s per node +Grid : Message : Stencil 17.268 GB/s per node +Grid : Message : Stencil 13.5131 GB/s per node +Grid : Message : Average mflops/s per call per node : 667084 +Grid : Message : Average mflops/s per call per node : 801652 +Grid : Message : Average mflops/s per call per node : 818306 +Grid : Message : Average mflops/s per call per node : 664899 +Grid : Message : Average mflops/s per call per node (full): 313861 +Grid : Message : Average mflops/s per call per node (full): 435206 +Grid : Message : Average mflops/s per call per node (full): 443181 +Grid : Message : Average mflops/s per call per node (full): 306460 +Grid : Message : Stencil 13.1928 GB/s per node +Grid : Message : Stencil 16.9779 GB/s per node +Grid : Message : Stencil 18.3528 GB/s per node +Grid : Message : Stencil 13.6895 GB/s per node +Grid : Message : Average mflops/s per call per node : 665268 +Grid : Message : Average mflops/s per call per node : 801351 +Grid : Message : Average mflops/s per call per node : 820825 +Grid : Message : Average mflops/s per call per node : 663334 +Grid : Message : Average mflops/s per call per node (full): 314589 +Grid : Message : Average mflops/s per call per node (full): 437686 +Grid : Message : Average mflops/s per call per node (full): 447820 +Grid : Message : Average mflops/s per call per node (full): 306741 +Grid : Message : Stencil 14.5065 GB/s per node +Grid : Message : Stencil 16.8067 GB/s per node +Grid : Message : Stencil 18.635 GB/s per node +Grid : Message : Stencil 12.8957 GB/s per node +Grid : Message : Average mflops/s per call per node : 662023 +Grid : Message : Average mflops/s per call per node : 802875 +Grid : Message : Average mflops/s per call per node : 817871 +Grid : Message : Average mflops/s per call per node : 664987 +Grid : Message : Average mflops/s per call per node (full): 316621 +Grid : Message : Average mflops/s per call per node (full): 435212 +Grid : Message : Average mflops/s per call per node (full): 446336 +Grid : Message : Average mflops/s per call per node (full): 304657 +Grid : Message : Stencil 12.4429 GB/s per node +Grid : Message : Stencil 16.5042 GB/s per node +Grid : Message : Stencil 17.7928 GB/s per node +Grid : Message : Stencil 12.6474 GB/s per node +Grid : Message : Average mflops/s per call per node : 669273 +Grid : Message : Average mflops/s per call per node : 801563 +Grid : Message : Average mflops/s per call per node : 821917 +Grid : Message : Average mflops/s per call per node : 667029 +Grid : Message : Average mflops/s per call per node (full): 312870 +Grid : Message : Average mflops/s per call per node (full): 433644 +Grid : Message : Average mflops/s per call per node (full): 447043 +Grid : Message : Average mflops/s per call per node (full): 305892 +Grid : Message : Stencil 11.3677 GB/s per node +Grid : Message : Stencil 16.7617 GB/s per node +Grid : Message : Stencil 19.4241 GB/s per node +Grid : Message : Stencil 12.6271 GB/s per node +Grid : Message : Average mflops/s per call per node : 662449 +Grid : Message : Average mflops/s per call per node : 800926 +Grid : Message : Average mflops/s per call per node : 820710 +Grid : Message : Average mflops/s per call per node : 663453 +Grid : Message : Average mflops/s per call per node (full): 303543 +Grid : Message : Average mflops/s per call per node (full): 435451 +Grid : Message : Average mflops/s per call per node (full): 449223 +Grid : Message : Average mflops/s per call per node (full): 303758 +Grid : Message : Stencil 12.2592 GB/s per node +Grid : Message : Stencil 17.931 GB/s per node +Grid : Message : Stencil 18.5877 GB/s per node +Grid : Message : Stencil 13.3924 GB/s per node +Grid : Message : Average mflops/s per call per node : 668599 +Grid : Message : Average mflops/s per call per node : 798325 +Grid : Message : Average mflops/s per call per node : 821788 +Grid : Message : Average mflops/s per call per node : 665107 +Grid : Message : Average mflops/s per call per node (full): 311819 +Grid : Message : Average mflops/s per call per node (full): 439804 +Grid : Message : Average mflops/s per call per node (full): 449096 +Grid : Message : Average mflops/s per call per node (full): 306771 +Grid : Message : Stencil 13.4397 GB/s per node +Grid : Message : Stencil 18.4553 GB/s per node +Grid : Message : Stencil 17.2144 GB/s per node +Grid : Message : Stencil 13.4523 GB/s per node +Grid : Message : Average mflops/s per call per node : 663828 +Grid : Message : Average mflops/s per call per node : 803533 +Grid : Message : Average mflops/s per call per node : 829133 +Grid : Message : Average mflops/s per call per node : 667575 +Grid : Message : Average mflops/s per call per node (full): 315880 +Grid : Message : Average mflops/s per call per node (full): 442364 +Grid : Message : Average mflops/s per call per node (full): 444101 +Grid : Message : Average mflops/s per call per node (full): 306812 +Grid : Message : Stencil 13.4004 GB/s per node +Grid : Message : Stencil 16.9872 GB/s per node +Grid : Message : Stencil 19.2425 GB/s per node +Grid : Message : Stencil 12.3307 GB/s per node +Grid : Message : Average mflops/s per call per node : 666260 +Grid : Message : Average mflops/s per call per node : 805756 +Grid : Message : Average mflops/s per call per node : 819795 +Grid : Message : Average mflops/s per call per node : 665190 +Grid : Message : Average mflops/s per call per node (full): 314488 +Grid : Message : Average mflops/s per call per node (full): 439238 +Grid : Message : Average mflops/s per call per node (full): 449947 +Grid : Message : Average mflops/s per call per node (full): 304314 +Grid : Message : Stencil 13.2794 GB/s per node +Grid : Message : Stencil 16.3363 GB/s per node +Grid : Message : Stencil 17.4506 GB/s per node +Grid : Message : Stencil 11.8973 GB/s per node +Grid : Message : Average mflops/s per call per node : 661869 +Grid : Message : Average mflops/s per call per node : 802011 +Grid : Message : Average mflops/s per call per node : 821374 +Grid : Message : Average mflops/s per call per node : 667753 +Grid : Message : Average mflops/s per call per node (full): 314151 +Grid : Message : Average mflops/s per call per node (full): 432113 +Grid : Message : Average mflops/s per call per node (full): 444162 +Grid : Message : Average mflops/s per call per node (full): 300812 +Grid : Message : Stencil 13.1259 GB/s per node +Grid : Message : Stencil 16.3224 GB/s per node +Grid : Message : Stencil 16.8754 GB/s per node +Grid : Message : Stencil 12.6539 GB/s per node +Grid : Message : Average mflops/s per call per node : 667421 +Grid : Message : Average mflops/s per call per node : 800472 +Grid : Message : Average mflops/s per call per node : 823593 +Grid : Message : Average mflops/s per call per node : 662618 +Grid : Message : Average mflops/s per call per node (full): 314562 +Grid : Message : Average mflops/s per call per node (full): 426645 +Grid : Message : Average mflops/s per call per node (full): 434872 +Grid : Message : Average mflops/s per call per node (full): 303988 +Grid : Message : Stencil 12.3184 GB/s per node +Grid : Message : Stencil 17.8475 GB/s per node +Grid : Message : Stencil 17.5751 GB/s per node +Grid : Message : Stencil 12.3613 GB/s per node +Grid : Message : Average mflops/s per call per node : 664840 +Grid : Message : Average mflops/s per call per node : 804370 +Grid : Message : Average mflops/s per call per node : 815424 +Grid : Message : Average mflops/s per call per node : 664303 +Grid : Message : Average mflops/s per call per node (full): 311918 +Grid : Message : Average mflops/s per call per node (full): 438836 +Grid : Message : Average mflops/s per call per node (full): 443442 +Grid : Message : Average mflops/s per call per node (full): 303934 +Grid : Message : Stencil 13.7473 GB/s per node +Grid : Message : Stencil 11.9221 GB/s per node +Grid : Message : Stencil 17.4339 GB/s per node +Grid : Message : Stencil 13.3121 GB/s per node +Grid : Message : Average mflops/s per call per node : 662807 +Grid : Message : Average mflops/s per call per node : 806827 +Grid : Message : Average mflops/s per call per node : 819802 +Grid : Message : Average mflops/s per call per node : 662039 +Grid : Message : Average mflops/s per call per node (full): 315933 +Grid : Message : Average mflops/s per call per node (full): 359834 +Grid : Message : Average mflops/s per call per node (full): 443439 +Grid : Message : Average mflops/s per call per node (full): 305833 +Grid : Message : Stencil 12.6701 GB/s per node +Grid : Message : Stencil 16.6586 GB/s per node +Grid : Message : Stencil 16.9694 GB/s per node +Grid : Message : Stencil 12.4529 GB/s per node +Grid : Message : Average mflops/s per call per node : 666476 +Grid : Message : Average mflops/s per call per node : 804596 +Grid : Message : Average mflops/s per call per node : 824614 +Grid : Message : Average mflops/s per call per node : 666724 +Grid : Message : Average mflops/s per call per node (full): 314720 +Grid : Message : Average mflops/s per call per node (full): 431010 +Grid : Message : Average mflops/s per call per node (full): 430781 +Grid : Message : Average mflops/s per call per node (full): 304274 +Grid : Message : Stencil 13.8334 GB/s per node +Grid : Message : Stencil 17.5333 GB/s per node +Grid : Message : Stencil 17.2497 GB/s per node +Grid : Message : Stencil 14.9046 GB/s per node +Grid : Message : Average mflops/s per call per node : 664783 +Grid : Message : Average mflops/s per call per node : 805252 +Grid : Message : Average mflops/s per call per node : 820404 +Grid : Message : Average mflops/s per call per node : 655310 +Grid : Message : Average mflops/s per call per node (full): 315375 +Grid : Message : Average mflops/s per call per node (full): 440397 +Grid : Message : Average mflops/s per call per node (full): 442864 +Grid : Message : Average mflops/s per call per node (full): 305982 +Grid : Message : Stencil 12.6778 GB/s per node +Grid : Message : Stencil 16.9202 GB/s per node +Grid : Message : Stencil 17.2772 GB/s per node +Grid : Message : Stencil 13.485 GB/s per node +Grid : Message : Average mflops/s per call per node : 666072 +Grid : Message : Average mflops/s per call per node : 806807 +Grid : Message : Average mflops/s per call per node : 825291 +Grid : Message : Average mflops/s per call per node : 664838 +Grid : Message : Average mflops/s per call per node (full): 314502 +Grid : Message : Average mflops/s per call per node (full): 437343 +Grid : Message : Average mflops/s per call per node (full): 443630 +Grid : Message : Average mflops/s per call per node (full): 306984 +Grid : Message : Stencil 13.5258 GB/s per node +Grid : Message : Stencil 17.4691 GB/s per node +Grid : Message : Stencil 17.4799 GB/s per node +Grid : Message : Stencil 12.612 GB/s per node +Grid : Message : Average mflops/s per call per node : 663070 +Grid : Message : Average mflops/s per call per node : 796856 +Grid : Message : Average mflops/s per call per node : 825192 +Grid : Message : Average mflops/s per call per node : 667299 +Grid : Message : Average mflops/s per call per node (full): 316114 +Grid : Message : Average mflops/s per call per node (full): 438528 +Grid : Message : Average mflops/s per call per node (full): 444614 +Grid : Message : Average mflops/s per call per node (full): 305545 +Grid : Message : Stencil 12.8092 GB/s per node +Grid : Message : Stencil 10.023 GB/s per node +Grid : Message : Stencil 17.95 GB/s per node +Grid : Message : Stencil 12.5901 GB/s per node +Grid : Message : Average mflops/s per call per node : 666850 +Grid : Message : Average mflops/s per call per node : 808491 +Grid : Message : Average mflops/s per call per node : 821557 +Grid : Message : Average mflops/s per call per node : 666166 +Grid : Message : Average mflops/s per call per node (full): 315060 +Grid : Message : Average mflops/s per call per node (full): 316803 +Grid : Message : Average mflops/s per call per node (full): 444537 +Grid : Message : Average mflops/s per call per node (full): 304435 +Grid : Message : Stencil 13.6691 GB/s per node +Grid : Message : Stencil 17.0066 GB/s per node +Grid : Message : Stencil 17.4481 GB/s per node +Grid : Message : Stencil 13.2227 GB/s per node +Grid : Message : Average mflops/s per call per node : 665356 +Grid : Message : Average mflops/s per call per node : 799926 +Grid : Message : Average mflops/s per call per node : 820032 +Grid : Message : Average mflops/s per call per node : 664401 +Grid : Message : Average mflops/s per call per node (full): 316276 +Grid : Message : Average mflops/s per call per node (full): 434571 +Grid : Message : Average mflops/s per call per node (full): 444560 +Grid : Message : Average mflops/s per call per node (full): 306388 +Grid : Message : Stencil 13.0078 GB/s per node +Grid : Message : Stencil 14.0913 GB/s per node +Grid : Message : Stencil 18.2835 GB/s per node +Grid : Message : Stencil 13.4152 GB/s per node +Grid : Message : Average mflops/s per call per node : 664674 +Grid : Message : Average mflops/s per call per node : 808509 +Grid : Message : Average mflops/s per call per node : 819343 +Grid : Message : Average mflops/s per call per node : 665803 +Grid : Message : Average mflops/s per call per node (full): 314690 +Grid : Message : Average mflops/s per call per node (full): 401452 +Grid : Message : Average mflops/s per call per node (full): 447251 +Grid : Message : Average mflops/s per call per node (full): 306926 +Grid : Message : Stencil 14.6221 GB/s per node +Grid : Message : Stencil 16.6677 GB/s per node +Grid : Message : Stencil 17.582 GB/s per node +Grid : Message : Stencil 12.2818 GB/s per node +Grid : Message : Average mflops/s per call per node : 659245 +Grid : Message : Average mflops/s per call per node : 804855 +Grid : Message : Average mflops/s per call per node : 819504 +Grid : Message : Average mflops/s per call per node : 667461 +Grid : Message : Average mflops/s per call per node (full): 315871 +Grid : Message : Average mflops/s per call per node (full): 434870 +Grid : Message : Average mflops/s per call per node (full): 443764 +Grid : Message : Average mflops/s per call per node (full): 304046 +Grid : Message : Stencil 13.0725 GB/s per node +Grid : Message : Stencil 16.8008 GB/s per node +Grid : Message : Stencil 17.7133 GB/s per node +Grid : Message : Stencil 12.5526 GB/s per node +Grid : Message : Average mflops/s per call per node : 662510 +Grid : Message : Average mflops/s per call per node : 801213 +Grid : Message : Average mflops/s per call per node : 818551 +Grid : Message : Average mflops/s per call per node : 666014 +Grid : Message : Average mflops/s per call per node (full): 313216 +Grid : Message : Average mflops/s per call per node (full): 436557 +Grid : Message : Average mflops/s per call per node (full): 445124 +Grid : Message : Average mflops/s per call per node (full): 305612 +Grid : Message : Stencil 13.0189 GB/s per node +Grid : Message : Stencil 16.4422 GB/s per node +Grid : Message : Stencil 18.3631 GB/s per node +Grid : Message : Stencil 12.9549 GB/s per node +Grid : Message : Average mflops/s per call per node : 668479 +Grid : Message : Average mflops/s per call per node : 804731 +Grid : Message : Average mflops/s per call per node : 823586 +Grid : Message : Average mflops/s per call per node : 663128 +Grid : Message : Average mflops/s per call per node (full): 315350 +Grid : Message : Average mflops/s per call per node (full): 433633 +Grid : Message : Average mflops/s per call per node (full): 448125 +Grid : Message : Average mflops/s per call per node (full): 305041 +Grid : Message : Stencil 13.7261 GB/s per node +Grid : Message : Stencil 16.5987 GB/s per node +Grid : Message : Stencil 18.1536 GB/s per node +Grid : Message : Stencil 13.5897 GB/s per node +Grid : Message : Average mflops/s per call per node : 662310 +Grid : Message : Average mflops/s per call per node : 797417 +Grid : Message : Average mflops/s per call per node : 821599 +Grid : Message : Average mflops/s per call per node : 659934 +Grid : Message : Average mflops/s per call per node (full): 315396 +Grid : Message : Average mflops/s per call per node (full): 433694 +Grid : Message : Average mflops/s per call per node (full): 446757 +Grid : Message : Average mflops/s per call per node (full): 305987 +Grid : Message : Stencil 15.2071 GB/s per node +Grid : Message : Stencil 17.2064 GB/s per node +Grid : Message : Stencil 17.2153 GB/s per node +Grid : Message : Stencil 14.5624 GB/s per node +Grid : Message : Average mflops/s per call per node : 661738 +Grid : Message : Average mflops/s per call per node : 804050 +Grid : Message : Average mflops/s per call per node : 823835 +Grid : Message : Average mflops/s per call per node : 657306 +Grid : Message : Average mflops/s per call per node (full): 316739 +Grid : Message : Average mflops/s per call per node (full): 438758 +Grid : Message : Average mflops/s per call per node (full): 443199 +Grid : Message : Average mflops/s per call per node (full): 306799 +Grid : Message : Stencil 14.368 GB/s per node +Grid : Message : Stencil 12.6867 GB/s per node +Grid : Message : Stencil 17.667 GB/s per node +Grid : Message : Stencil 12.7552 GB/s per node +Grid : Message : Average mflops/s per call per node : 664257 +Grid : Message : Average mflops/s per call per node : 806847 +Grid : Message : Average mflops/s per call per node : 825078 +Grid : Message : Average mflops/s per call per node : 668175 +Grid : Message : Average mflops/s per call per node (full): 316318 +Grid : Message : Average mflops/s per call per node (full): 375326 +Grid : Message : Average mflops/s per call per node (full): 446654 +Grid : Message : Average mflops/s per call per node (full): 305203 +Grid : Message : Stencil 14.4694 GB/s per node +Grid : Message : Stencil 16.4098 GB/s per node +Grid : Message : Stencil 17.5862 GB/s per node +Grid : Message : Stencil 11.8687 GB/s per node +Grid : Message : Average mflops/s per call per node : 664554 +Grid : Message : Average mflops/s per call per node : 802905 +Grid : Message : Average mflops/s per call per node : 820834 +Grid : Message : Average mflops/s per call per node : 666175 +Grid : Message : Average mflops/s per call per node (full): 314881 +Grid : Message : Average mflops/s per call per node (full): 431615 +Grid : Message : Average mflops/s per call per node (full): 432655 +Grid : Message : Average mflops/s per call per node (full): 298226 +Grid : Message : Stencil 13.5055 GB/s per node +Grid : Message : Stencil 16.7847 GB/s per node +Grid : Message : Stencil 18.1815 GB/s per node +Grid : Message : Stencil 13.5363 GB/s per node +Grid : Message : Average mflops/s per call per node : 662343 +Grid : Message : Average mflops/s per call per node : 802398 +Grid : Message : Average mflops/s per call per node : 822336 +Grid : Message : Average mflops/s per call per node : 667545 +Grid : Message : Average mflops/s per call per node (full): 315990 +Grid : Message : Average mflops/s per call per node (full): 437402 +Grid : Message : Average mflops/s per call per node (full): 443588 +Grid : Message : Average mflops/s per call per node (full): 306124 +Grid : Message : Stencil 14.5623 GB/s per node +Grid : Message : Stencil 16.7777 GB/s per node +Grid : Message : Stencil 17.9493 GB/s per node +Grid : Message : Stencil 13.6665 GB/s per node +Grid : Message : Average mflops/s per call per node : 661588 +Grid : Message : Average mflops/s per call per node : 804256 +Grid : Message : Average mflops/s per call per node : 823914 +Grid : Message : Average mflops/s per call per node : 665377 +Grid : Message : Average mflops/s per call per node (full): 315257 +Grid : Message : Average mflops/s per call per node (full): 435848 +Grid : Message : Average mflops/s per call per node (full): 445922 +Grid : Message : Average mflops/s per call per node (full): 306689 +Grid : Message : Stencil 12.7961 GB/s per node +Grid : Message : Stencil 16.6998 GB/s per node +Grid : Message : Stencil 17.8133 GB/s per node +Grid : Message : Stencil 12.2565 GB/s per node +Grid : Message : Average mflops/s per call per node : 663028 +Grid : Message : Average mflops/s per call per node : 804059 +Grid : Message : Average mflops/s per call per node : 823390 +Grid : Message : Average mflops/s per call per node : 669287 +Grid : Message : Average mflops/s per call per node (full): 312929 +Grid : Message : Average mflops/s per call per node (full): 431848 +Grid : Message : Average mflops/s per call per node (full): 446219 +Grid : Message : Average mflops/s per call per node (full): 304734 +Grid : Message : Stencil 12.7893 GB/s per node +Grid : Message : Stencil 16.221 GB/s per node +Grid : Message : Stencil 17.2351 GB/s per node +Grid : Message : Stencil 13.5079 GB/s per node +Grid : Message : Average mflops/s per call per node : 665857 +Grid : Message : Average mflops/s per call per node : 802147 +Grid : Message : Average mflops/s per call per node : 820083 +Grid : Message : Average mflops/s per call per node : 665001 +Grid : Message : Average mflops/s per call per node (full): 310159 +Grid : Message : Average mflops/s per call per node (full): 428779 +Grid : Message : Average mflops/s per call per node (full): 442738 +Grid : Message : Average mflops/s per call per node (full): 306802 +Grid : Message : Stencil 13.1754 GB/s per node +Grid : Message : Stencil 16.4533 GB/s per node +Grid : Message : Stencil 17.429 GB/s per node +Grid : Message : Stencil 13.1193 GB/s per node +Grid : Message : Average mflops/s per call per node : 668829 +Grid : Message : Average mflops/s per call per node : 800926 +Grid : Message : Average mflops/s per call per node : 820687 +Grid : Message : Average mflops/s per call per node : 661271 +Grid : Message : Average mflops/s per call per node (full): 316558 +Grid : Message : Average mflops/s per call per node (full): 433815 +Grid : Message : Average mflops/s per call per node (full): 444707 +Grid : Message : Average mflops/s per call per node (full): 305770 +Grid : Message : Stencil 12.9593 GB/s per node +Grid : Message : Stencil 17.1329 GB/s per node +Grid : Message : Stencil 17.0942 GB/s per node +Grid : Message : Stencil 12.9989 GB/s per node +Grid : Message : Average mflops/s per call per node : 667452 +Grid : Message : Average mflops/s per call per node : 806554 +Grid : Message : Average mflops/s per call per node : 822115 +Grid : Message : Average mflops/s per call per node : 661052 +Grid : Message : Average mflops/s per call per node (full): 315427 +Grid : Message : Average mflops/s per call per node (full): 440584 +Grid : Message : Average mflops/s per call per node (full): 441843 +Grid : Message : Average mflops/s per call per node (full): 304562 +Grid : Message : Stencil 13.518 GB/s per node +Grid : Message : Stencil 17.0027 GB/s per node +Grid : Message : Stencil 18.5458 GB/s per node +Grid : Message : Stencil 12.7962 GB/s per node +Grid : Message : Average mflops/s per call per node : 667844 +Grid : Message : Average mflops/s per call per node : 810592 +Grid : Message : Average mflops/s per call per node : 808363 +Grid : Message : Average mflops/s per call per node : 663379 +Grid : Message : Average mflops/s per call per node (full): 315060 +Grid : Message : Average mflops/s per call per node (full): 439231 +Grid : Message : Average mflops/s per call per node (full): 445731 +Grid : Message : Average mflops/s per call per node (full): 304239 +Grid : Message : Stencil 12.5581 GB/s per node +Grid : Message : Stencil 16.7383 GB/s per node +Grid : Message : Stencil 17.4816 GB/s per node +Grid : Message : Stencil 12.9929 GB/s per node +Grid : Message : Average mflops/s per call per node : 665838 +Grid : Message : Average mflops/s per call per node : 806940 +Grid : Message : Average mflops/s per call per node : 820026 +Grid : Message : Average mflops/s per call per node : 667036 +Grid : Message : Average mflops/s per call per node (full): 313606 +Grid : Message : Average mflops/s per call per node (full): 436480 +Grid : Message : Average mflops/s per call per node (full): 444863 +Grid : Message : Average mflops/s per call per node (full): 306720 +Grid : Message : Stencil 15.0397 GB/s per node +Grid : Message : Stencil 17.1911 GB/s per node +Grid : Message : Stencil 17.6592 GB/s per node +Grid : Message : Stencil 12.6569 GB/s per node +Grid : Message : Average mflops/s per call per node : 662483 +Grid : Message : Average mflops/s per call per node : 809326 +Grid : Message : Average mflops/s per call per node : 821916 +Grid : Message : Average mflops/s per call per node : 667193 +Grid : Message : Average mflops/s per call per node (full): 316709 +Grid : Message : Average mflops/s per call per node (full): 440519 +Grid : Message : Average mflops/s per call per node (full): 444642 +Grid : Message : Average mflops/s per call per node (full): 304478 +Grid : Message : Stencil 14.1893 GB/s per node +Grid : Message : Stencil 16.7491 GB/s per node +Grid : Message : Stencil 17.7025 GB/s per node +Grid : Message : Stencil 13.0426 GB/s per node +Grid : Message : Average mflops/s per call per node : 663481 +Grid : Message : Average mflops/s per call per node : 803216 +Grid : Message : Average mflops/s per call per node : 824558 +Grid : Message : Average mflops/s per call per node : 664887 +Grid : Message : Average mflops/s per call per node (full): 316694 +Grid : Message : Average mflops/s per call per node (full): 436122 +Grid : Message : Average mflops/s per call per node (full): 446367 +Grid : Message : Average mflops/s per call per node (full): 305147 +Grid : Message : Stencil 12.402 GB/s per node +Grid : Message : Stencil 16.8793 GB/s per node +Grid : Message : Stencil 17.5857 GB/s per node +Grid : Message : Stencil 13.9042 GB/s per node +Grid : Message : Average mflops/s per call per node : 668553 +Grid : Message : Average mflops/s per call per node : 799272 +Grid : Message : Average mflops/s per call per node : 824044 +Grid : Message : Average mflops/s per call per node : 654693 +Grid : Message : Average mflops/s per call per node (full): 311949 +Grid : Message : Average mflops/s per call per node (full): 436881 +Grid : Message : Average mflops/s per call per node (full): 446205 +Grid : Message : Average mflops/s per call per node (full): 305577 +Grid : Message : Stencil 13.1478 GB/s per node +Grid : Message : Stencil 16.31 GB/s per node +Grid : Message : Stencil 18.0335 GB/s per node +Grid : Message : Stencil 12.8276 GB/s per node +Grid : Message : Average mflops/s per call per node : 668461 +Grid : Message : Average mflops/s per call per node : 802629 +Grid : Message : Average mflops/s per call per node : 821788 +Grid : Message : Average mflops/s per call per node : 663470 +Grid : Message : Average mflops/s per call per node (full): 315510 +Grid : Message : Average mflops/s per call per node (full): 432585 +Grid : Message : Average mflops/s per call per node (full): 446386 +Grid : Message : Average mflops/s per call per node (full): 305276 +Grid : Message : Stencil 12.7427 GB/s per node +Grid : Message : Stencil 16.4928 GB/s per node +Grid : Message : Stencil 17.973 GB/s per node +Grid : Message : Stencil 13.3166 GB/s per node +Grid : Message : Average mflops/s per call per node : 673103 +Grid : Message : Average mflops/s per call per node : 804558 +Grid : Message : Average mflops/s per call per node : 823988 +Grid : Message : Average mflops/s per call per node : 663175 +Grid : Message : Average mflops/s per call per node (full): 315899 +Grid : Message : Average mflops/s per call per node (full): 433461 +Grid : Message : Average mflops/s per call per node (full): 442470 +Grid : Message : Average mflops/s per call per node (full): 306588 +Grid : Message : Stencil 13.2121 GB/s per node +Grid : Message : Stencil 17.1273 GB/s per node +Grid : Message : Stencil 17.6244 GB/s per node +Grid : Message : Stencil 13.2802 GB/s per node +Grid : Message : Average mflops/s per call per node : 668004 +Grid : Message : Average mflops/s per call per node : 799380 +Grid : Message : Average mflops/s per call per node : 824882 +Grid : Message : Average mflops/s per call per node : 662423 +Grid : Message : Average mflops/s per call per node (full): 315807 +Grid : Message : Average mflops/s per call per node (full): 437571 +Grid : Message : Average mflops/s per call per node (full): 445182 +Grid : Message : Average mflops/s per call per node (full): 305479 +Grid : Message : Stencil 13.1344 GB/s per node +Grid : Message : Stencil 12.7333 GB/s per node +Grid : Message : Stencil 18.0397 GB/s per node +Grid : Message : Stencil 12.4452 GB/s per node +Grid : Message : Average mflops/s per call per node : 667610 +Grid : Message : Average mflops/s per call per node : 809721 +Grid : Message : Average mflops/s per call per node : 820494 +Grid : Message : Average mflops/s per call per node : 667970 +Grid : Message : Average mflops/s per call per node (full): 315073 +Grid : Message : Average mflops/s per call per node (full): 376912 +Grid : Message : Average mflops/s per call per node (full): 447594 +Grid : Message : Average mflops/s per call per node (full): 302941 +Grid : Message : Stencil 12.9916 GB/s per node +Grid : Message : Stencil 16.4847 GB/s per node +Grid : Message : Stencil 18.4717 GB/s per node +Grid : Message : Stencil 13.8467 GB/s per node +Grid : Message : Average mflops/s per call per node : 666602 +Grid : Message : Average mflops/s per call per node : 806090 +Grid : Message : Average mflops/s per call per node : 817726 +Grid : Message : Average mflops/s per call per node : 661847 +Grid : Message : Average mflops/s per call per node (full): 315127 +Grid : Message : Average mflops/s per call per node (full): 432934 +Grid : Message : Average mflops/s per call per node (full): 448390 +Grid : Message : Average mflops/s per call per node (full): 306390 +Grid : Message : Stencil 12.3533 GB/s per node +Grid : Message : Stencil 17.2274 GB/s per node +Grid : Message : Stencil 17.7197 GB/s per node +Grid : Message : Stencil 13.6455 GB/s per node +Grid : Message : Average mflops/s per call per node : 665051 +Grid : Message : Average mflops/s per call per node : 806937 +Grid : Message : Average mflops/s per call per node : 822513 +Grid : Message : Average mflops/s per call per node : 660128 +Grid : Message : Average mflops/s per call per node (full): 307102 +Grid : Message : Average mflops/s per call per node (full): 438441 +Grid : Message : Average mflops/s per call per node (full): 446803 +Grid : Message : Average mflops/s per call per node (full): 305302 +Grid : Message : Stencil 12.3064 GB/s per node +Grid : Message : Stencil 16.2822 GB/s per node +Grid : Message : Stencil 17.2384 GB/s per node +Grid : Message : Stencil 14.1965 GB/s per node +Grid : Message : Average mflops/s per call per node : 665604 +Grid : Message : Average mflops/s per call per node : 801811 +Grid : Message : Average mflops/s per call per node : 826139 +Grid : Message : Average mflops/s per call per node : 658406 +Grid : Message : Average mflops/s per call per node (full): 311986 +Grid : Message : Average mflops/s per call per node (full): 431562 +Grid : Message : Average mflops/s per call per node (full): 443145 +Grid : Message : Average mflops/s per call per node (full): 304433 +Grid : Message : Stencil 12.532 GB/s per node +Grid : Message : Stencil 17.5079 GB/s per node +Grid : Message : Stencil 18.6678 GB/s per node +Grid : Message : Stencil 12.3361 GB/s per node +Grid : Message : Average mflops/s per call per node : 667572 +Grid : Message : Average mflops/s per call per node : 801710 +Grid : Message : Average mflops/s per call per node : 818537 +Grid : Message : Average mflops/s per call per node : 668800 +Grid : Message : Average mflops/s per call per node (full): 313979 +Grid : Message : Average mflops/s per call per node (full): 439231 +Grid : Message : Average mflops/s per call per node (full): 446644 +Grid : Message : Average mflops/s per call per node (full): 304258 +Grid : Message : Stencil 13.1495 GB/s per node +Grid : Message : Stencil 16.1774 GB/s per node +Grid : Message : Stencil 17.0706 GB/s per node +Grid : Message : Stencil 12.2607 GB/s per node +Grid : Message : Average mflops/s per call per node : 669214 +Grid : Message : Average mflops/s per call per node : 802115 +Grid : Message : Average mflops/s per call per node : 824918 +Grid : Message : Average mflops/s per call per node : 668359 +Grid : Message : Average mflops/s per call per node (full): 315314 +Grid : Message : Average mflops/s per call per node (full): 430106 +Grid : Message : Average mflops/s per call per node (full): 441147 +Grid : Message : Average mflops/s per call per node (full): 303811 +Grid : Message : Stencil 13.1248 GB/s per node +Grid : Message : Stencil 9.80877 GB/s per node +Grid : Message : Stencil 17.9584 GB/s per node +Grid : Message : Stencil 12.2776 GB/s per node +Grid : Message : Average mflops/s per call per node : 667706 +Grid : Message : Average mflops/s per call per node : 804502 +Grid : Message : Average mflops/s per call per node : 828082 +Grid : Message : Average mflops/s per call per node : 670349 +Grid : Message : Average mflops/s per call per node (full): 315359 +Grid : Message : Average mflops/s per call per node (full): 310713 +Grid : Message : Average mflops/s per call per node (full): 446836 +Grid : Message : Average mflops/s per call per node (full): 304280 +Grid : Message : Stencil 13.5257 GB/s per node +Grid : Message : Stencil 17.2223 GB/s per node +Grid : Message : Stencil 17.3992 GB/s per node +Grid : Message : Stencil 14.4036 GB/s per node +Grid : Message : Average mflops/s per call per node : 668025 +Grid : Message : Average mflops/s per call per node : 803956 +Grid : Message : Average mflops/s per call per node : 821781 +Grid : Message : Average mflops/s per call per node : 656816 +Grid : Message : Average mflops/s per call per node (full): 315395 +Grid : Message : Average mflops/s per call per node (full): 438030 +Grid : Message : Average mflops/s per call per node (full): 443848 +Grid : Message : Average mflops/s per call per node (full): 306141 +Grid : Message : Stencil 13.7108 GB/s per node +Grid : Message : Stencil 17.0664 GB/s per node +Grid : Message : Stencil 17.4254 GB/s per node +Grid : Message : Stencil 12.3972 GB/s per node +Grid : Message : Average mflops/s per call per node : 666651 +Grid : Message : Average mflops/s per call per node : 804934 +Grid : Message : Average mflops/s per call per node : 829044 +Grid : Message : Average mflops/s per call per node : 667967 +Grid : Message : Average mflops/s per call per node (full): 316229 +Grid : Message : Average mflops/s per call per node (full): 437367 +Grid : Message : Average mflops/s per call per node (full): 445282 +Grid : Message : Average mflops/s per call per node (full): 304927 +Grid : Message : Stencil 13.6529 GB/s per node +Grid : Message : Stencil 17.3621 GB/s per node +Grid : Message : Stencil 17.7085 GB/s per node +Grid : Message : Stencil 12.1508 GB/s per node +Grid : Message : Average mflops/s per call per node : 665599 +Grid : Message : Average mflops/s per call per node : 802139 +Grid : Message : Average mflops/s per call per node : 819221 +Grid : Message : Average mflops/s per call per node : 666278 +Grid : Message : Average mflops/s per call per node (full): 315715 +Grid : Message : Average mflops/s per call per node (full): 437760 +Grid : Message : Average mflops/s per call per node (full): 444470 +Grid : Message : Average mflops/s per call per node (full): 303334 +Grid : Message : Stencil 13.7906 GB/s per node +Grid : Message : Stencil 6.55631 GB/s per node +Grid : Message : Stencil 18.0631 GB/s per node +Grid : Message : Stencil 12.4737 GB/s per node +Grid : Message : Average mflops/s per call per node : 663705 +Grid : Message : Average mflops/s per call per node : 807329 +Grid : Message : Average mflops/s per call per node : 822044 +Grid : Message : Average mflops/s per call per node : 667803 +Grid : Message : Average mflops/s per call per node (full): 316153 +Grid : Message : Average mflops/s per call per node (full): 226870 +Grid : Message : Average mflops/s per call per node (full): 445791 +Grid : Message : Average mflops/s per call per node (full): 304399 +Grid : Message : Stencil 13.9422 GB/s per node +Grid : Message : Stencil 18.0489 GB/s per node +Grid : Message : Stencil 18.202 GB/s per node +Grid : Message : Stencil 12.7549 GB/s per node +Grid : Message : Average mflops/s per call per node : 664665 +Grid : Message : Average mflops/s per call per node : 805682 +Grid : Message : Average mflops/s per call per node : 823134 +Grid : Message : Average mflops/s per call per node : 660925 +Grid : Message : Average mflops/s per call per node (full): 316494 +Grid : Message : Average mflops/s per call per node (full): 441297 +Grid : Message : Average mflops/s per call per node (full): 447213 +Grid : Message : Average mflops/s per call per node (full): 303280 +Grid : Message : Stencil 14.8899 GB/s per node +Grid : Message : Stencil 17.1059 GB/s per node +Grid : Message : Stencil 17.6417 GB/s per node +Grid : Message : Stencil 12.4758 GB/s per node +Grid : Message : Average mflops/s per call per node : 663410 +Grid : Message : Average mflops/s per call per node : 805254 +Grid : Message : Average mflops/s per call per node : 825254 +Grid : Message : Average mflops/s per call per node : 667450 +Grid : Message : Average mflops/s per call per node (full): 317238 +Grid : Message : Average mflops/s per call per node (full): 437415 +Grid : Message : Average mflops/s per call per node (full): 437589 +Grid : Message : Average mflops/s per call per node (full): 303595 +Grid : Message : Stencil 12.6778 GB/s per node +Grid : Message : Stencil 17.786 GB/s per node +Grid : Message : Stencil 17.91 GB/s per node +Grid : Message : Stencil 13.2347 GB/s per node +Grid : Message : Average mflops/s per call per node : 671875 +Grid : Message : Average mflops/s per call per node : 802320 +Grid : Message : Average mflops/s per call per node : 826003 +Grid : Message : Average mflops/s per call per node : 664528 +Grid : Message : Average mflops/s per call per node (full): 315102 +Grid : Message : Average mflops/s per call per node (full): 438671 +Grid : Message : Average mflops/s per call per node (full): 448007 +Grid : Message : Average mflops/s per call per node (full): 304178 +Grid : Message : Stencil 12.8066 GB/s per node +Grid : Message : Stencil 9.47955 GB/s per node +Grid : Message : Stencil 17.1296 GB/s per node +Grid : Message : Stencil 13.3126 GB/s per node +Grid : Message : Average mflops/s per call per node : 667534 +Grid : Message : Average mflops/s per call per node : 810356 +Grid : Message : Average mflops/s per call per node : 820584 +Grid : Message : Average mflops/s per call per node : 665002 +Grid : Message : Average mflops/s per call per node (full): 315031 +Grid : Message : Average mflops/s per call per node (full): 303250 +Grid : Message : Average mflops/s per call per node (full): 440421 +Grid : Message : Average mflops/s per call per node (full): 306009 +Grid : Message : Stencil 12.8412 GB/s per node +Grid : Message : Stencil 17.5009 GB/s per node +Grid : Message : Stencil 19.7497 GB/s per node +Grid : Message : Stencil 12.4201 GB/s per node +Grid : Message : Average mflops/s per call per node : 666739 +Grid : Message : Average mflops/s per call per node : 798613 +Grid : Message : Average mflops/s per call per node : 818810 +Grid : Message : Average mflops/s per call per node : 663304 +Grid : Message : Average mflops/s per call per node (full): 312930 +Grid : Message : Average mflops/s per call per node (full): 438826 +Grid : Message : Average mflops/s per call per node (full): 450335 +Grid : Message : Average mflops/s per call per node (full): 303353 +Grid : Message : Stencil 12.7016 GB/s per node +Grid : Message : Stencil 16.2536 GB/s per node +Grid : Message : Stencil 18.2265 GB/s per node +Grid : Message : Stencil 12.3248 GB/s per node +Grid : Message : Average mflops/s per call per node : 670398 +Grid : Message : Average mflops/s per call per node : 804082 +Grid : Message : Average mflops/s per call per node : 821654 +Grid : Message : Average mflops/s per call per node : 665580 +Grid : Message : Average mflops/s per call per node (full): 314434 +Grid : Message : Average mflops/s per call per node (full): 429458 +Grid : Message : Average mflops/s per call per node (full): 446120 +Grid : Message : Average mflops/s per call per node (full): 303792 +Grid : Message : Stencil 12.8835 GB/s per node +Grid : Message : Stencil 16.4583 GB/s per node +Grid : Message : Stencil 17.5232 GB/s per node +Grid : Message : Stencil 12.9388 GB/s per node +Grid : Message : Average mflops/s per call per node : 668522 +Grid : Message : Average mflops/s per call per node : 808236 +Grid : Message : Average mflops/s per call per node : 821330 +Grid : Message : Average mflops/s per call per node : 660228 +Grid : Message : Average mflops/s per call per node (full): 313520 +Grid : Message : Average mflops/s per call per node (full): 430168 +Grid : Message : Average mflops/s per call per node (full): 439663 +Grid : Message : Average mflops/s per call per node (full): 304068 +Grid : Message : Stencil 13.021 GB/s per node +Grid : Message : Stencil 16.8159 GB/s per node +Grid : Message : Stencil 17.3261 GB/s per node +Grid : Message : Stencil 12.7807 GB/s per node +Grid : Message : Average mflops/s per call per node : 667768 +Grid : Message : Average mflops/s per call per node : 807405 +Grid : Message : Average mflops/s per call per node : 825368 +Grid : Message : Average mflops/s per call per node : 667833 +Grid : Message : Average mflops/s per call per node (full): 313354 +Grid : Message : Average mflops/s per call per node (full): 433510 +Grid : Message : Average mflops/s per call per node (full): 443765 +Grid : Message : Average mflops/s per call per node (full): 306532 +Grid : Message : Stencil 13.04 GB/s per node +Grid : Message : Stencil 17.9266 GB/s per node +Grid : Message : Stencil 16.9641 GB/s per node +Grid : Message : Stencil 12.1914 GB/s per node +Grid : Message : Average mflops/s per call per node : 667822 +Grid : Message : Average mflops/s per call per node : 796507 +Grid : Message : Average mflops/s per call per node : 826307 +Grid : Message : Average mflops/s per call per node : 667710 +Grid : Message : Average mflops/s per call per node (full): 314874 +Grid : Message : Average mflops/s per call per node (full): 438934 +Grid : Message : Average mflops/s per call per node (full): 440721 +Grid : Message : Average mflops/s per call per node (full): 301811 +Grid : Message : Stencil 14.0713 GB/s per node +Grid : Message : Stencil 9.84629 GB/s per node +Grid : Message : Stencil 17.975 GB/s per node +Grid : Message : Stencil 12.6292 GB/s per node +Grid : Message : Average mflops/s per call per node : 662677 +Grid : Message : Average mflops/s per call per node : 807628 +Grid : Message : Average mflops/s per call per node : 819486 +Grid : Message : Average mflops/s per call per node : 665414 +Grid : Message : Average mflops/s per call per node (full): 314320 +Grid : Message : Average mflops/s per call per node (full): 311792 +Grid : Message : Average mflops/s per call per node (full): 446145 +Grid : Message : Average mflops/s per call per node (full): 305001 +Grid : Message : Stencil 15.2395 GB/s per node +Grid : Message : Stencil 16.6914 GB/s per node +Grid : Message : Stencil 16.7894 GB/s per node +Grid : Message : Stencil 13.1997 GB/s per node +Grid : Message : Average mflops/s per call per node : 661209 +Grid : Message : Average mflops/s per call per node : 809394 +Grid : Message : Average mflops/s per call per node : 825684 +Grid : Message : Average mflops/s per call per node : 662842 +Grid : Message : Average mflops/s per call per node (full): 316996 +Grid : Message : Average mflops/s per call per node (full): 437153 +Grid : Message : Average mflops/s per call per node (full): 438272 +Grid : Message : Average mflops/s per call per node (full): 304307 +Grid : Message : Stencil 13.3791 GB/s per node +Grid : Message : Stencil 16.8369 GB/s per node +Grid : Message : Stencil 17.224 GB/s per node +Grid : Message : Stencil 14.0024 GB/s per node +Grid : Message : Average mflops/s per call per node : 665689 +Grid : Message : Average mflops/s per call per node : 802725 +Grid : Message : Average mflops/s per call per node : 827751 +Grid : Message : Average mflops/s per call per node : 654313 +Grid : Message : Average mflops/s per call per node (full): 316236 +Grid : Message : Average mflops/s per call per node (full): 436545 +Grid : Message : Average mflops/s per call per node (full): 443947 +Grid : Message : Average mflops/s per call per node (full): 304434 +Grid : Message : Stencil 12.5735 GB/s per node +Grid : Message : Stencil 11.2593 GB/s per node +Grid : Message : Stencil 17.4278 GB/s per node +Grid : Message : Stencil 12.9417 GB/s per node +Grid : Message : Average mflops/s per call per node : 672094 +Grid : Message : Average mflops/s per call per node : 807479 +Grid : Message : Average mflops/s per call per node : 827536 +Grid : Message : Average mflops/s per call per node : 663187 +Grid : Message : Average mflops/s per call per node (full): 314938 +Grid : Message : Average mflops/s per call per node (full): 345570 +Grid : Message : Average mflops/s per call per node (full): 444522 +Grid : Message : Average mflops/s per call per node (full): 304011 +Grid : Message : Stencil 13.6533 GB/s per node +Grid : Message : Stencil 17.3081 GB/s per node +Grid : Message : Stencil 17.4874 GB/s per node +Grid : Message : Stencil 12.6095 GB/s per node +Grid : Message : Average mflops/s per call per node : 668633 +Grid : Message : Average mflops/s per call per node : 802535 +Grid : Message : Average mflops/s per call per node : 823084 +Grid : Message : Average mflops/s per call per node : 668702 +Grid : Message : Average mflops/s per call per node (full): 317040 +Grid : Message : Average mflops/s per call per node (full): 438029 +Grid : Message : Average mflops/s per call per node (full): 445463 +Grid : Message : Average mflops/s per call per node (full): 306130 +Grid : Message : Stencil 12.867 GB/s per node +Grid : Message : Stencil 17.2766 GB/s per node +Grid : Message : Stencil 16.1951 GB/s per node +Grid : Message : Stencil 11.682 GB/s per node +Grid : Message : Average mflops/s per call per node : 667313 +Grid : Message : Average mflops/s per call per node : 802375 +Grid : Message : Average mflops/s per call per node : 827107 +Grid : Message : Average mflops/s per call per node : 664678 +Grid : Message : Average mflops/s per call per node (full): 313388 +Grid : Message : Average mflops/s per call per node (full): 438214 +Grid : Message : Average mflops/s per call per node (full): 424986 +Grid : Message : Average mflops/s per call per node (full): 298506 +Grid : Message : Stencil 13.0763 GB/s per node +Grid : Message : Stencil 16.5815 GB/s per node +Grid : Message : Stencil 17.1128 GB/s per node +Grid : Message : Stencil 12.3072 GB/s per node +Grid : Message : Average mflops/s per call per node : 666096 +Grid : Message : Average mflops/s per call per node : 805076 +Grid : Message : Average mflops/s per call per node : 820869 +Grid : Message : Average mflops/s per call per node : 664399 +Grid : Message : Average mflops/s per call per node (full): 314237 +Grid : Message : Average mflops/s per call per node (full): 436072 +Grid : Message : Average mflops/s per call per node (full): 440936 +Grid : Message : Average mflops/s per call per node (full): 303585 +Grid : Message : Stencil 13.4101 GB/s per node +Grid : Message : Stencil 16.7424 GB/s per node +Grid : Message : Stencil 17.8502 GB/s per node +Grid : Message : Stencil 14.3448 GB/s per node +Grid : Message : Average mflops/s per call per node : 666799 +Grid : Message : Average mflops/s per call per node : 809391 +Grid : Message : Average mflops/s per call per node : 823629 +Grid : Message : Average mflops/s per call per node : 664725 +Grid : Message : Average mflops/s per call per node (full): 315379 +Grid : Message : Average mflops/s per call per node (full): 437879 +Grid : Message : Average mflops/s per call per node (full): 447103 +Grid : Message : Average mflops/s per call per node (full): 307257 +Grid : Message : Stencil 13.568 GB/s per node +Grid : Message : Stencil 17.6779 GB/s per node +Grid : Message : Stencil 16.8741 GB/s per node +Grid : Message : Stencil 12.4839 GB/s per node +Grid : Message : Average mflops/s per call per node : 668025 +Grid : Message : Average mflops/s per call per node : 804674 +Grid : Message : Average mflops/s per call per node : 821256 +Grid : Message : Average mflops/s per call per node : 664966 +Grid : Message : Average mflops/s per call per node (full): 316063 +Grid : Message : Average mflops/s per call per node (full): 440264 +Grid : Message : Average mflops/s per call per node (full): 439220 +Grid : Message : Average mflops/s per call per node (full): 303367 +Grid : Message : Stencil 13.8637 GB/s per node +Grid : Message : Stencil 16.318 GB/s per node +Grid : Message : Stencil 17.3829 GB/s per node +Grid : Message : Stencil 12.7476 GB/s per node +Grid : Message : Average mflops/s per call per node : 667436 +Grid : Message : Average mflops/s per call per node : 803547 +Grid : Message : Average mflops/s per call per node : 825521 +Grid : Message : Average mflops/s per call per node : 662615 +Grid : Message : Average mflops/s per call per node (full): 316257 +Grid : Message : Average mflops/s per call per node (full): 429325 +Grid : Message : Average mflops/s per call per node (full): 441772 +Grid : Message : Average mflops/s per call per node (full): 304181 +Grid : Message : Stencil 14.1388 GB/s per node +Grid : Message : Stencil 16.9508 GB/s per node +Grid : Message : Stencil 17.6921 GB/s per node +Grid : Message : Stencil 12.6858 GB/s per node +Grid : Message : Average mflops/s per call per node : 666942 +Grid : Message : Average mflops/s per call per node : 802603 +Grid : Message : Average mflops/s per call per node : 824945 +Grid : Message : Average mflops/s per call per node : 668276 +Grid : Message : Average mflops/s per call per node (full): 317268 +Grid : Message : Average mflops/s per call per node (full): 435966 +Grid : Message : Average mflops/s per call per node (full): 445270 +Grid : Message : Average mflops/s per call per node (full): 305709 +Grid : Message : Stencil 13.2752 GB/s per node +Grid : Message : Stencil 14.3609 GB/s per node +Grid : Message : Stencil 19.2179 GB/s per node +Grid : Message : Stencil 13.5641 GB/s per node +Grid : Message : Average mflops/s per call per node : 666144 +Grid : Message : Average mflops/s per call per node : 803392 +Grid : Message : Average mflops/s per call per node : 822152 +Grid : Message : Average mflops/s per call per node : 658488 +Grid : Message : Average mflops/s per call per node (full): 315994 +Grid : Message : Average mflops/s per call per node (full): 402828 +Grid : Message : Average mflops/s per call per node (full): 449873 +Grid : Message : Average mflops/s per call per node (full): 304869 +Grid : Message : Stencil 13.6586 GB/s per node +Grid : Message : Stencil 16.6687 GB/s per node +Grid : Message : Stencil 17.8118 GB/s per node +Grid : Message : Stencil 12.9281 GB/s per node +Grid : Message : Average mflops/s per call per node : 662645 +Grid : Message : Average mflops/s per call per node : 805011 +Grid : Message : Average mflops/s per call per node : 823772 +Grid : Message : Average mflops/s per call per node : 659767 +Grid : Message : Average mflops/s per call per node (full): 315917 +Grid : Message : Average mflops/s per call per node (full): 434045 +Grid : Message : Average mflops/s per call per node (full): 447067 +Grid : Message : Average mflops/s per call per node (full): 303836 +Grid : Message : Stencil 12.783 GB/s per node +Grid : Message : Stencil 16.5007 GB/s per node +Grid : Message : Stencil 18.1978 GB/s per node +Grid : Message : Stencil 12.5762 GB/s per node +Grid : Message : Average mflops/s per call per node : 668439 +Grid : Message : Average mflops/s per call per node : 808408 +Grid : Message : Average mflops/s per call per node : 818862 +Grid : Message : Average mflops/s per call per node : 666769 +Grid : Message : Average mflops/s per call per node (full): 313955 +Grid : Message : Average mflops/s per call per node (full): 435371 +Grid : Message : Average mflops/s per call per node (full): 444736 +Grid : Message : Average mflops/s per call per node (full): 302601 +Grid : Message : Stencil 12.3588 GB/s per node +Grid : Message : Stencil 16.6906 GB/s per node +Grid : Message : Stencil 17.5897 GB/s per node +Grid : Message : Stencil 12.731 GB/s per node +Grid : Message : Average mflops/s per call per node : 672717 +Grid : Message : Average mflops/s per call per node : 809225 +Grid : Message : Average mflops/s per call per node : 824966 +Grid : Message : Average mflops/s per call per node : 664892 +Grid : Message : Average mflops/s per call per node (full): 312967 +Grid : Message : Average mflops/s per call per node (full): 437334 +Grid : Message : Average mflops/s per call per node (full): 446624 +Grid : Message : Average mflops/s per call per node (full): 305067 +Grid : Message : Stencil 13.5086 GB/s per node +Grid : Message : Stencil 16.7682 GB/s per node +Grid : Message : Stencil 17.36 GB/s per node +Grid : Message : Stencil 12.4457 GB/s per node +Grid : Message : Average mflops/s per call per node : 665369 +Grid : Message : Average mflops/s per call per node : 803451 +Grid : Message : Average mflops/s per call per node : 819366 +Grid : Message : Average mflops/s per call per node : 664067 +Grid : Message : Average mflops/s per call per node (full): 314363 +Grid : Message : Average mflops/s per call per node (full): 436058 +Grid : Message : Average mflops/s per call per node (full): 443030 +Grid : Message : Average mflops/s per call per node (full): 303937 +Grid : Message : Stencil 13.3165 GB/s per node +Grid : Message : Stencil 12.5025 GB/s per node +Grid : Message : Stencil 17.7703 GB/s per node +Grid : Message : Stencil 14.2332 GB/s per node +Grid : Message : Average mflops/s per call per node : 665657 +Grid : Message : Average mflops/s per call per node : 812801 +Grid : Message : Average mflops/s per call per node : 823140 +Grid : Message : Average mflops/s per call per node : 663945 +Grid : Message : Average mflops/s per call per node (full): 315523 +Grid : Message : Average mflops/s per call per node (full): 372095 +Grid : Message : Average mflops/s per call per node (full): 446653 +Grid : Message : Average mflops/s per call per node (full): 307241 +Grid : Message : Stencil 14.9576 GB/s per node +Grid : Message : Stencil 8.89052 GB/s per node +Grid : Message : Stencil 17.2728 GB/s per node +Grid : Message : Stencil 12.6808 GB/s per node +Grid : Message : Average mflops/s per call per node : 662859 +Grid : Message : Average mflops/s per call per node : 812123 +Grid : Message : Average mflops/s per call per node : 830044 +Grid : Message : Average mflops/s per call per node : 665640 +Grid : Message : Average mflops/s per call per node (full): 317215 +Grid : Message : Average mflops/s per call per node (full): 289792 +Grid : Message : Average mflops/s per call per node (full): 444953 +Grid : Message : Average mflops/s per call per node (full): 305590 +Grid : Message : Stencil 13.2073 GB/s per node +Grid : Message : Stencil 16.5902 GB/s per node +Grid : Message : Stencil 17.9064 GB/s per node +Grid : Message : Stencil 12.3071 GB/s per node +Grid : Message : Average mflops/s per call per node : 665944 +Grid : Message : Average mflops/s per call per node : 805445 +Grid : Message : Average mflops/s per call per node : 821582 +Grid : Message : Average mflops/s per call per node : 659905 +Grid : Message : Average mflops/s per call per node (full): 315108 +Grid : Message : Average mflops/s per call per node (full): 434920 +Grid : Message : Average mflops/s per call per node (full): 440533 +Grid : Message : Average mflops/s per call per node (full): 300261 +Grid : Message : Stencil 13.6207 GB/s per node +Grid : Message : Stencil 16.963 GB/s per node +Grid : Message : Stencil 18.8015 GB/s per node +Grid : Message : Stencil 12.6053 GB/s per node +Grid : Message : Average mflops/s per call per node : 662003 +Grid : Message : Average mflops/s per call per node : 802034 +Grid : Message : Average mflops/s per call per node : 823994 +Grid : Message : Average mflops/s per call per node : 664597 +Grid : Message : Average mflops/s per call per node (full): 315494 +Grid : Message : Average mflops/s per call per node (full): 436003 +Grid : Message : Average mflops/s per call per node (full): 449112 +Grid : Message : Average mflops/s per call per node (full): 304923 +Grid : Message : Stencil 13.6174 GB/s per node +Grid : Message : Stencil 12.2346 GB/s per node +Grid : Message : Stencil 17.3975 GB/s per node +Grid : Message : Stencil 13.829 GB/s per node +Grid : Message : Average mflops/s per call per node : 661357 +Grid : Message : Average mflops/s per call per node : 801615 +Grid : Message : Average mflops/s per call per node : 822680 +Grid : Message : Average mflops/s per call per node : 662306 +Grid : Message : Average mflops/s per call per node (full): 314288 +Grid : Message : Average mflops/s per call per node (full): 366449 +Grid : Message : Average mflops/s per call per node (full): 444360 +Grid : Message : Average mflops/s per call per node (full): 307243 +Grid : Message : Stencil 13.2055 GB/s per node +Grid : Message : Stencil 15.5344 GB/s per node +Grid : Message : Stencil 18.6795 GB/s per node +Grid : Message : Stencil 13.3403 GB/s per node +Grid : Message : Average mflops/s per call per node : 667854 +Grid : Message : Average mflops/s per call per node : 805612 +Grid : Message : Average mflops/s per call per node : 825962 +Grid : Message : Average mflops/s per call per node : 661435 +Grid : Message : Average mflops/s per call per node (full): 315217 +Grid : Message : Average mflops/s per call per node (full): 423496 +Grid : Message : Average mflops/s per call per node (full): 449684 +Grid : Message : Average mflops/s per call per node (full): 305117 +Grid : Message : Stencil 14.0589 GB/s per node +Grid : Message : Stencil 17.53 GB/s per node +Grid : Message : Stencil 17.3587 GB/s per node +Grid : Message : Stencil 12.8013 GB/s per node +Grid : Message : Average mflops/s per call per node : 663290 +Grid : Message : Average mflops/s per call per node : 802056 +Grid : Message : Average mflops/s per call per node : 820628 +Grid : Message : Average mflops/s per call per node : 667458 +Grid : Message : Average mflops/s per call per node (full): 316086 +Grid : Message : Average mflops/s per call per node (full): 433471 +Grid : Message : Average mflops/s per call per node (full): 443865 +Grid : Message : Average mflops/s per call per node (full): 305638 +Grid : Message : Stencil 12.4114 GB/s per node +Grid : Message : Stencil 16.5811 GB/s per node +Grid : Message : Stencil 17.246 GB/s per node +Grid : Message : Stencil 12.9512 GB/s per node +Grid : Message : Average mflops/s per call per node : 670801 +Grid : Message : Average mflops/s per call per node : 803141 +Grid : Message : Average mflops/s per call per node : 818087 +Grid : Message : Average mflops/s per call per node : 662536 +Grid : Message : Average mflops/s per call per node (full): 313026 +Grid : Message : Average mflops/s per call per node (full): 435532 +Grid : Message : Average mflops/s per call per node (full): 440716 +Grid : Message : Average mflops/s per call per node (full): 304931 +Grid : Message : Stencil 12.3893 GB/s per node +Grid : Message : Stencil 16.275 GB/s per node +Grid : Message : Stencil 17.6015 GB/s per node +Grid : Message : Stencil 14.2541 GB/s per node +Grid : Message : Average mflops/s per call per node : 670981 +Grid : Message : Average mflops/s per call per node : 808883 +Grid : Message : Average mflops/s per call per node : 824212 +Grid : Message : Average mflops/s per call per node : 662167 +Grid : Message : Average mflops/s per call per node (full): 313634 +Grid : Message : Average mflops/s per call per node (full): 433070 +Grid : Message : Average mflops/s per call per node (full): 445960 +Grid : Message : Average mflops/s per call per node (full): 307540 +Grid : Message : Stencil 12.5686 GB/s per node +Grid : Message : Stencil 17.4453 GB/s per node +Grid : Message : Stencil 18.5536 GB/s per node +Grid : Message : Stencil 14.4062 GB/s per node +Grid : Message : Average mflops/s per call per node : 671233 +Grid : Message : Average mflops/s per call per node : 803489 +Grid : Message : Average mflops/s per call per node : 822710 +Grid : Message : Average mflops/s per call per node : 659322 +Grid : Message : Average mflops/s per call per node (full): 314646 +Grid : Message : Average mflops/s per call per node (full): 438822 +Grid : Message : Average mflops/s per call per node (full): 448936 +Grid : Message : Average mflops/s per call per node (full): 307099 +Grid : Message : Stencil 12.17 GB/s per node +Grid : Message : Stencil 17.2394 GB/s per node +Grid : Message : Stencil 17.1924 GB/s per node +Grid : Message : Stencil 14.3786 GB/s per node +Grid : Message : Average mflops/s per call per node : 670419 +Grid : Message : Average mflops/s per call per node : 803856 +Grid : Message : Average mflops/s per call per node : 819163 +Grid : Message : Average mflops/s per call per node : 660090 +Grid : Message : Average mflops/s per call per node (full): 309861 +Grid : Message : Average mflops/s per call per node (full): 439206 +Grid : Message : Average mflops/s per call per node (full): 443010 +Grid : Message : Average mflops/s per call per node (full): 306431 +Grid : Message : Stencil 13.4474 GB/s per node +Grid : Message : Stencil 16.5257 GB/s per node +Grid : Message : Stencil 16.5948 GB/s per node +Grid : Message : Stencil 12.4461 GB/s per node +Grid : Message : Average mflops/s per call per node : 664070 +Grid : Message : Average mflops/s per call per node : 805521 +Grid : Message : Average mflops/s per call per node : 825787 +Grid : Message : Average mflops/s per call per node : 666217 +Grid : Message : Average mflops/s per call per node (full): 314056 +Grid : Message : Average mflops/s per call per node (full): 435679 +Grid : Message : Average mflops/s per call per node (full): 431690 +Grid : Message : Average mflops/s per call per node (full): 305067 +Grid : Message : Stencil 13.2165 GB/s per node +Grid : Message : Stencil 11.7598 GB/s per node +Grid : Message : Stencil 17.8103 GB/s per node +Grid : Message : Stencil 11.9989 GB/s per node +Grid : Message : Average mflops/s per call per node : 667313 +Grid : Message : Average mflops/s per call per node : 811988 +Grid : Message : Average mflops/s per call per node : 822549 +Grid : Message : Average mflops/s per call per node : 667800 +Grid : Message : Average mflops/s per call per node (full): 314792 +Grid : Message : Average mflops/s per call per node (full): 356582 +Grid : Message : Average mflops/s per call per node (full): 445769 +Grid : Message : Average mflops/s per call per node (full): 302482 +Grid : Message : Stencil 13.2337 GB/s per node +Grid : Message : Stencil 16.9512 GB/s per node +Grid : Message : Stencil 17.8045 GB/s per node +Grid : Message : Stencil 13.8251 GB/s per node +Grid : Message : Average mflops/s per call per node : 664088 +Grid : Message : Average mflops/s per call per node : 803417 +Grid : Message : Average mflops/s per call per node : 818634 +Grid : Message : Average mflops/s per call per node : 663948 +Grid : Message : Average mflops/s per call per node (full): 314716 +Grid : Message : Average mflops/s per call per node (full): 437263 +Grid : Message : Average mflops/s per call per node (full): 445292 +Grid : Message : Average mflops/s per call per node (full): 306747 +Grid : Message : Stencil 12.4742 GB/s per node +Grid : Message : Stencil 17.4951 GB/s per node +Grid : Message : Stencil 17.6713 GB/s per node +Grid : Message : Stencil 13.3499 GB/s per node +Grid : Message : Average mflops/s per call per node : 665881 +Grid : Message : Average mflops/s per call per node : 801179 +Grid : Message : Average mflops/s per call per node : 824577 +Grid : Message : Average mflops/s per call per node : 667595 +Grid : Message : Average mflops/s per call per node (full): 311674 +Grid : Message : Average mflops/s per call per node (full): 438499 +Grid : Message : Average mflops/s per call per node (full): 445682 +Grid : Message : Average mflops/s per call per node (full): 306601 +Grid : Message : Stencil 12.5619 GB/s per node +Grid : Message : Stencil 11.8294 GB/s per node +Grid : Message : Stencil 16.3323 GB/s per node +Grid : Message : Stencil 11.958 GB/s per node +Grid : Message : Average mflops/s per call per node : 667462 +Grid : Message : Average mflops/s per call per node : 803788 +Grid : Message : Average mflops/s per call per node : 827608 +Grid : Message : Average mflops/s per call per node : 670717 +Grid : Message : Average mflops/s per call per node (full): 313219 +Grid : Message : Average mflops/s per call per node (full): 357704 +Grid : Message : Average mflops/s per call per node (full): 420800 +Grid : Message : Average mflops/s per call per node (full): 301473 +Grid : Message : Stencil 13.1724 GB/s per node +Grid : Message : Stencil 17.0143 GB/s per node +Grid : Message : Stencil 17.4939 GB/s per node +Grid : Message : Stencil 13.4218 GB/s per node +Grid : Message : Average mflops/s per call per node : 669132 +Grid : Message : Average mflops/s per call per node : 805514 +Grid : Message : Average mflops/s per call per node : 823415 +Grid : Message : Average mflops/s per call per node : 666192 +Grid : Message : Average mflops/s per call per node (full): 315321 +Grid : Message : Average mflops/s per call per node (full): 437871 +Grid : Message : Average mflops/s per call per node (full): 445335 +Grid : Message : Average mflops/s per call per node (full): 306812 +Grid : Message : Stencil 13.7464 GB/s per node +Grid : Message : Stencil 16.6745 GB/s per node +Grid : Message : Stencil 17.1911 GB/s per node +Grid : Message : Stencil 15.0666 GB/s per node +Grid : Message : Average mflops/s per call per node : 664781 +Grid : Message : Average mflops/s per call per node : 803928 +Grid : Message : Average mflops/s per call per node : 822713 +Grid : Message : Average mflops/s per call per node : 658377 +Grid : Message : Average mflops/s per call per node (full): 316466 +Grid : Message : Average mflops/s per call per node (full): 436232 +Grid : Message : Average mflops/s per call per node (full): 442786 +Grid : Message : Average mflops/s per call per node (full): 306735 +Grid : Message : Stencil 14.3112 GB/s per node +Grid : Message : Stencil 16.9462 GB/s per node +Grid : Message : Stencil 17.9614 GB/s per node +Grid : Message : Stencil 13.8318 GB/s per node +Grid : Message : Average mflops/s per call per node : 665989 +Grid : Message : Average mflops/s per call per node : 803710 +Grid : Message : Average mflops/s per call per node : 819066 +Grid : Message : Average mflops/s per call per node : 664430 +Grid : Message : Average mflops/s per call per node (full): 316879 +Grid : Message : Average mflops/s per call per node (full): 438108 +Grid : Message : Average mflops/s per call per node (full): 446138 +Grid : Message : Average mflops/s per call per node (full): 307528 +Grid : Message : Stencil 13.2229 GB/s per node +Grid : Message : Stencil 17.2677 GB/s per node +Grid : Message : Stencil 17.7745 GB/s per node +Grid : Message : Stencil 12.5946 GB/s per node +Grid : Message : Average mflops/s per call per node : 668613 +Grid : Message : Average mflops/s per call per node : 805485 +Grid : Message : Average mflops/s per call per node : 825436 +Grid : Message : Average mflops/s per call per node : 665651 +Grid : Message : Average mflops/s per call per node (full): 316275 +Grid : Message : Average mflops/s per call per node (full): 432677 +Grid : Message : Average mflops/s per call per node (full): 446369 +Grid : Message : Average mflops/s per call per node (full): 303448 +Grid : Message : Stencil 13.5782 GB/s per node +Grid : Message : Stencil 17.1672 GB/s per node +Grid : Message : Stencil 17.4806 GB/s per node +Grid : Message : Stencil 13.6005 GB/s per node +Grid : Message : Average mflops/s per call per node : 665271 +Grid : Message : Average mflops/s per call per node : 803369 +Grid : Message : Average mflops/s per call per node : 826869 +Grid : Message : Average mflops/s per call per node : 661994 +Grid : Message : Average mflops/s per call per node (full): 315703 +Grid : Message : Average mflops/s per call per node (full): 438789 +Grid : Message : Average mflops/s per call per node (full): 445241 +Grid : Message : Average mflops/s per call per node (full): 306011 +Grid : Message : Stencil 13.0431 GB/s per node +Grid : Message : Stencil 16.4806 GB/s per node +Grid : Message : Stencil 18.8665 GB/s per node +Grid : Message : Stencil 13.3775 GB/s per node +Grid : Message : Average mflops/s per call per node : 667692 +Grid : Message : Average mflops/s per call per node : 805333 +Grid : Message : Average mflops/s per call per node : 829122 +Grid : Message : Average mflops/s per call per node : 663530 +Grid : Message : Average mflops/s per call per node (full): 315212 +Grid : Message : Average mflops/s per call per node (full): 434699 +Grid : Message : Average mflops/s per call per node (full): 450563 +Grid : Message : Average mflops/s per call per node (full): 306393 +Grid : Message : Stencil 12.7172 GB/s per node +Grid : Message : Stencil 16.4615 GB/s per node +Grid : Message : Stencil 19.0358 GB/s per node +Grid : Message : Stencil 13.4485 GB/s per node +Grid : Message : Average mflops/s per call per node : 669490 +Grid : Message : Average mflops/s per call per node : 800479 +Grid : Message : Average mflops/s per call per node : 822324 +Grid : Message : Average mflops/s per call per node : 660896 +Grid : Message : Average mflops/s per call per node (full): 314962 +Grid : Message : Average mflops/s per call per node (full): 431262 +Grid : Message : Average mflops/s per call per node (full): 450903 +Grid : Message : Average mflops/s per call per node (full): 306073 +Grid : Message : Stencil 13.6954 GB/s per node +Grid : Message : Stencil 16.7403 GB/s per node +Grid : Message : Stencil 17.4408 GB/s per node +Grid : Message : Stencil 12.6016 GB/s per node +Grid : Message : Average mflops/s per call per node : 663190 +Grid : Message : Average mflops/s per call per node : 800581 +Grid : Message : Average mflops/s per call per node : 826168 +Grid : Message : Average mflops/s per call per node : 665855 +Grid : Message : Average mflops/s per call per node (full): 313723 +Grid : Message : Average mflops/s per call per node (full): 437454 +Grid : Message : Average mflops/s per call per node (full): 445763 +Grid : Message : Average mflops/s per call per node (full): 304748 +Grid : Message : Stencil 14.0301 GB/s per node +Grid : Message : Stencil 16.8619 GB/s per node +Grid : Message : Stencil 17.9328 GB/s per node +Grid : Message : Stencil 14.2081 GB/s per node +Grid : Message : Average mflops/s per call per node : 665022 +Grid : Message : Average mflops/s per call per node : 808321 +Grid : Message : Average mflops/s per call per node : 826120 +Grid : Message : Average mflops/s per call per node : 662543 +Grid : Message : Average mflops/s per call per node (full): 316402 +Grid : Message : Average mflops/s per call per node (full): 438387 +Grid : Message : Average mflops/s per call per node (full): 447424 +Grid : Message : Average mflops/s per call per node (full): 307661 +Grid : Message : Stencil 14.4306 GB/s per node +Grid : Message : Stencil 16.4094 GB/s per node +Grid : Message : Stencil 17.3191 GB/s per node +Grid : Message : Stencil 13.8437 GB/s per node +Grid : Message : Average mflops/s per call per node : 664494 +Grid : Message : Average mflops/s per call per node : 806160 +Grid : Message : Average mflops/s per call per node : 820601 +Grid : Message : Average mflops/s per call per node : 661708 +Grid : Message : Average mflops/s per call per node (full): 315384 +Grid : Message : Average mflops/s per call per node (full): 434299 +Grid : Message : Average mflops/s per call per node (full): 443111 +Grid : Message : Average mflops/s per call per node (full): 306571 +Grid : Message : Stencil 13.5568 GB/s per node +Grid : Message : Stencil 18.3458 GB/s per node +Grid : Message : Stencil 18.3744 GB/s per node +Grid : Message : Stencil 12.6788 GB/s per node +Grid : Message : Average mflops/s per call per node : 664391 +Grid : Message : Average mflops/s per call per node : 798768 +Grid : Message : Average mflops/s per call per node : 826634 +Grid : Message : Average mflops/s per call per node : 663137 +Grid : Message : Average mflops/s per call per node (full): 315463 +Grid : Message : Average mflops/s per call per node (full): 440577 +Grid : Message : Average mflops/s per call per node (full): 449206 +Grid : Message : Average mflops/s per call per node (full): 305236 +Grid : Message : Stencil 12.6774 GB/s per node +Grid : Message : Stencil 17.6112 GB/s per node +Grid : Message : Stencil 17.2765 GB/s per node +Grid : Message : Stencil 13.9594 GB/s per node +Grid : Message : Average mflops/s per call per node : 670856 +Grid : Message : Average mflops/s per call per node : 804658 +Grid : Message : Average mflops/s per call per node : 818594 +Grid : Message : Average mflops/s per call per node : 654510 +Grid : Message : Average mflops/s per call per node (full): 313844 +Grid : Message : Average mflops/s per call per node (full): 438540 +Grid : Message : Average mflops/s per call per node (full): 443813 +Grid : Message : Average mflops/s per call per node (full): 305913 +Grid : Message : Stencil 14.0668 GB/s per node +Grid : Message : Stencil 16.7665 GB/s per node +Grid : Message : Stencil 17.9828 GB/s per node +Grid : Message : Stencil 13.3468 GB/s per node +Grid : Message : Average mflops/s per call per node : 665086 +Grid : Message : Average mflops/s per call per node : 805773 +Grid : Message : Average mflops/s per call per node : 826598 +Grid : Message : Average mflops/s per call per node : 665131 +Grid : Message : Average mflops/s per call per node (full): 314886 +Grid : Message : Average mflops/s per call per node (full): 437424 +Grid : Message : Average mflops/s per call per node (full): 445201 +Grid : Message : Average mflops/s per call per node (full): 303112 +Grid : Message : Stencil 12.9395 GB/s per node +Grid : Message : Stencil 16.6062 GB/s per node +Grid : Message : Stencil 17.4802 GB/s per node +Grid : Message : Stencil 13.1207 GB/s per node +Grid : Message : Average mflops/s per call per node : 667340 +Grid : Message : Average mflops/s per call per node : 802719 +Grid : Message : Average mflops/s per call per node : 821553 +Grid : Message : Average mflops/s per call per node : 660221 +Grid : Message : Average mflops/s per call per node (full): 313758 +Grid : Message : Average mflops/s per call per node (full): 435392 +Grid : Message : Average mflops/s per call per node (full): 443062 +Grid : Message : Average mflops/s per call per node (full): 304683 +Grid : Message : Stencil 13.5273 GB/s per node +Grid : Message : Stencil 16.7218 GB/s per node +Grid : Message : Stencil 17.3427 GB/s per node +Grid : Message : Stencil 12.3055 GB/s per node +Grid : Message : Average mflops/s per call per node : 666951 +Grid : Message : Average mflops/s per call per node : 808340 +Grid : Message : Average mflops/s per call per node : 825856 +Grid : Message : Average mflops/s per call per node : 669653 +Grid : Message : Average mflops/s per call per node (full): 316393 +Grid : Message : Average mflops/s per call per node (full): 437208 +Grid : Message : Average mflops/s per call per node (full): 444151 +Grid : Message : Average mflops/s per call per node (full): 304506 +Grid : Message : Stencil 12.9419 GB/s per node +Grid : Message : Stencil 17.7185 GB/s per node +Grid : Message : Stencil 17.6023 GB/s per node +Grid : Message : Stencil 13.1646 GB/s per node +Grid : Message : Average mflops/s per call per node : 671486 +Grid : Message : Average mflops/s per call per node : 803835 +Grid : Message : Average mflops/s per call per node : 812919 +Grid : Message : Average mflops/s per call per node : 661156 +Grid : Message : Average mflops/s per call per node (full): 316181 +Grid : Message : Average mflops/s per call per node (full): 439019 +Grid : Message : Average mflops/s per call per node (full): 443163 +Grid : Message : Average mflops/s per call per node (full): 305543 +Grid : Message : Stencil 12.8064 GB/s per node +Grid : Message : Stencil 17.3831 GB/s per node +Grid : Message : Stencil 19.2456 GB/s per node +Grid : Message : Stencil 13.7529 GB/s per node +Grid : Message : Average mflops/s per call per node : 671019 +Grid : Message : Average mflops/s per call per node : 804072 +Grid : Message : Average mflops/s per call per node : 818083 +Grid : Message : Average mflops/s per call per node : 657458 +Grid : Message : Average mflops/s per call per node (full): 315531 +Grid : Message : Average mflops/s per call per node (full): 437530 +Grid : Message : Average mflops/s per call per node (full): 447228 +Grid : Message : Average mflops/s per call per node (full): 305933 +Grid : Message : Stencil 13.0671 GB/s per node +Grid : Message : Stencil 16.3692 GB/s per node +Grid : Message : Stencil 17.5399 GB/s per node +Grid : Message : Stencil 12.765 GB/s per node +Grid : Message : Average mflops/s per call per node : 668696 +Grid : Message : Average mflops/s per call per node : 806903 +Grid : Message : Average mflops/s per call per node : 815258 +Grid : Message : Average mflops/s per call per node : 662416 +Grid : Message : Average mflops/s per call per node (full): 314894 +Grid : Message : Average mflops/s per call per node (full): 433481 +Grid : Message : Average mflops/s per call per node (full): 443106 +Grid : Message : Average mflops/s per call per node (full): 304658 +Grid : Message : Stencil 13.2608 GB/s per node +Grid : Message : Stencil 16.5387 GB/s per node +Grid : Message : Stencil 17.9303 GB/s per node +Grid : Message : Stencil 12.9177 GB/s per node +Grid : Message : Average mflops/s per call per node : 665759 +Grid : Message : Average mflops/s per call per node : 804452 +Grid : Message : Average mflops/s per call per node : 822492 +Grid : Message : Average mflops/s per call per node : 664038 +Grid : Message : Average mflops/s per call per node (full): 315706 +Grid : Message : Average mflops/s per call per node (full): 435423 +Grid : Message : Average mflops/s per call per node (full): 446707 +Grid : Message : Average mflops/s per call per node (full): 303776 +Grid : Message : Stencil 12.7231 GB/s per node +Grid : Message : Stencil 17.212 GB/s per node +Grid : Message : Stencil 17.2632 GB/s per node +Grid : Message : Stencil 15.0456 GB/s per node +Grid : Message : Average mflops/s per call per node : 668041 +Grid : Message : Average mflops/s per call per node : 804260 +Grid : Message : Average mflops/s per call per node : 828111 +Grid : Message : Average mflops/s per call per node : 659790 +Grid : Message : Average mflops/s per call per node (full): 315169 +Grid : Message : Average mflops/s per call per node (full): 436572 +Grid : Message : Average mflops/s per call per node (full): 445038 +Grid : Message : Average mflops/s per call per node (full): 307447 +Grid : Message : Stencil 13.9317 GB/s per node +Grid : Message : Stencil 17.6126 GB/s per node +Grid : Message : Stencil 17.9129 GB/s per node +Grid : Message : Stencil 12.1942 GB/s per node +Grid : Message : Average mflops/s per call per node : 663571 +Grid : Message : Average mflops/s per call per node : 806005 +Grid : Message : Average mflops/s per call per node : 818928 +Grid : Message : Average mflops/s per call per node : 667109 +Grid : Message : Average mflops/s per call per node (full): 315229 +Grid : Message : Average mflops/s per call per node (full): 437729 +Grid : Message : Average mflops/s per call per node (full): 443696 +Grid : Message : Average mflops/s per call per node (full): 303522 +Grid : Message : Stencil 13.8441 GB/s per node +Grid : Message : Stencil 16.8207 GB/s per node +Grid : Message : Stencil 17.3388 GB/s per node +Grid : Message : Stencil 12.6899 GB/s per node +Grid : Message : Average mflops/s per call per node : 667025 +Grid : Message : Average mflops/s per call per node : 803488 +Grid : Message : Average mflops/s per call per node : 826681 +Grid : Message : Average mflops/s per call per node : 668904 +Grid : Message : Average mflops/s per call per node (full): 316439 +Grid : Message : Average mflops/s per call per node (full): 438327 +Grid : Message : Average mflops/s per call per node (full): 443946 +Grid : Message : Average mflops/s per call per node (full): 304893 +Grid : Message : Stencil 14.6365 GB/s per node +Grid : Message : Stencil 16.8923 GB/s per node +Grid : Message : Stencil 18.0901 GB/s per node +Grid : Message : Stencil 14.1822 GB/s per node +Grid : Message : Average mflops/s per call per node : 666147 +Grid : Message : Average mflops/s per call per node : 804740 +Grid : Message : Average mflops/s per call per node : 822198 +Grid : Message : Average mflops/s per call per node : 661873 +Grid : Message : Average mflops/s per call per node (full): 316893 +Grid : Message : Average mflops/s per call per node (full): 437004 +Grid : Message : Average mflops/s per call per node (full): 446809 +Grid : Message : Average mflops/s per call per node (full): 307507 +Grid : Message : Stencil 13.0344 GB/s per node +Grid : Message : Stencil 16.2898 GB/s per node +Grid : Message : Stencil 18.1261 GB/s per node +Grid : Message : Stencil 13.4165 GB/s per node +Grid : Message : Average mflops/s per call per node : 669087 +Grid : Message : Average mflops/s per call per node : 804366 +Grid : Message : Average mflops/s per call per node : 822323 +Grid : Message : Average mflops/s per call per node : 659677 +Grid : Message : Average mflops/s per call per node (full): 315904 +Grid : Message : Average mflops/s per call per node (full): 432455 +Grid : Message : Average mflops/s per call per node (full): 445337 +Grid : Message : Average mflops/s per call per node (full): 304386 +Grid : Message : Stencil 13.7753 GB/s per node +Grid : Message : Stencil 11.5873 GB/s per node +Grid : Message : Stencil 17.7056 GB/s per node +Grid : Message : Stencil 12.6323 GB/s per node +Grid : Message : Average mflops/s per call per node : 666533 +Grid : Message : Average mflops/s per call per node : 808259 +Grid : Message : Average mflops/s per call per node : 821688 +Grid : Message : Average mflops/s per call per node : 669442 +Grid : Message : Average mflops/s per call per node (full): 315905 +Grid : Message : Average mflops/s per call per node (full): 352761 +Grid : Message : Average mflops/s per call per node (full): 444870 +Grid : Message : Average mflops/s per call per node (full): 305567 +Grid : Message : Stencil 13.3282 GB/s per node +Grid : Message : Stencil 16.8213 GB/s per node +Grid : Message : Stencil 17.8142 GB/s per node +Grid : Message : Stencil 12.2119 GB/s per node +Grid : Message : Average mflops/s per call per node : 670208 +Grid : Message : Average mflops/s per call per node : 805861 +Grid : Message : Average mflops/s per call per node : 822623 +Grid : Message : Average mflops/s per call per node : 666740 +Grid : Message : Average mflops/s per call per node (full): 316655 +Grid : Message : Average mflops/s per call per node (full): 432955 +Grid : Message : Average mflops/s per call per node (full): 446403 +Grid : Message : Average mflops/s per call per node (full): 303583 +Grid : Message : Stencil 12.71 GB/s per node +Grid : Message : Stencil 17.2789 GB/s per node +Grid : Message : Stencil 17.3154 GB/s per node +Grid : Message : Stencil 12.9706 GB/s per node +Grid : Message : Average mflops/s per call per node : 668272 +Grid : Message : Average mflops/s per call per node : 803593 +Grid : Message : Average mflops/s per call per node : 821190 +Grid : Message : Average mflops/s per call per node : 665746 +Grid : Message : Average mflops/s per call per node (full): 315074 +Grid : Message : Average mflops/s per call per node (full): 436341 +Grid : Message : Average mflops/s per call per node (full): 438150 +Grid : Message : Average mflops/s per call per node (full): 304502 +Grid : Message : Stencil 12.5427 GB/s per node +Grid : Message : Stencil 17.1143 GB/s per node +Grid : Message : Stencil 17.4695 GB/s per node +Grid : Message : Stencil 14.2233 GB/s per node +Grid : Message : Average mflops/s per call per node : 668353 +Grid : Message : Average mflops/s per call per node : 807108 +Grid : Message : Average mflops/s per call per node : 826014 +Grid : Message : Average mflops/s per call per node : 661985 +Grid : Message : Average mflops/s per call per node (full): 313652 +Grid : Message : Average mflops/s per call per node (full): 435772 +Grid : Message : Average mflops/s per call per node (full): 444672 +Grid : Message : Average mflops/s per call per node (full): 306525 +Grid : Message : Stencil 12.3856 GB/s per node +Grid : Message : Stencil 16.6937 GB/s per node +Grid : Message : Stencil 18.4506 GB/s per node +Grid : Message : Stencil 13.9017 GB/s per node +Grid : Message : Average mflops/s per call per node : 671646 +Grid : Message : Average mflops/s per call per node : 799233 +Grid : Message : Average mflops/s per call per node : 825952 +Grid : Message : Average mflops/s per call per node : 666904 +Grid : Message : Average mflops/s per call per node (full): 313151 +Grid : Message : Average mflops/s per call per node (full): 435437 +Grid : Message : Average mflops/s per call per node (full): 449338 +Grid : Message : Average mflops/s per call per node (full): 307579 +Grid : Message : Stencil 13.5081 GB/s per node +Grid : Message : Stencil 16.904 GB/s per node +Grid : Message : Stencil 17.8221 GB/s per node +Grid : Message : Stencil 11.9864 GB/s per node +Grid : Message : Average mflops/s per call per node : 667372 +Grid : Message : Average mflops/s per call per node : 806954 +Grid : Message : Average mflops/s per call per node : 823843 +Grid : Message : Average mflops/s per call per node : 668278 +Grid : Message : Average mflops/s per call per node (full): 315974 +Grid : Message : Average mflops/s per call per node (full): 436821 +Grid : Message : Average mflops/s per call per node (full): 437610 +Grid : Message : Average mflops/s per call per node (full): 301928 +Grid : Message : Stencil 13.2842 GB/s per node +Grid : Message : Stencil 16.9778 GB/s per node +Grid : Message : Stencil 17.1985 GB/s per node +Grid : Message : Stencil 12.6819 GB/s per node +Grid : Message : Average mflops/s per call per node : 663084 +Grid : Message : Average mflops/s per call per node : 806820 +Grid : Message : Average mflops/s per call per node : 819492 +Grid : Message : Average mflops/s per call per node : 663353 +Grid : Message : Average mflops/s per call per node (full): 315298 +Grid : Message : Average mflops/s per call per node (full): 438561 +Grid : Message : Average mflops/s per call per node (full): 441997 +Grid : Message : Average mflops/s per call per node (full): 304187 +Grid : Message : Stencil 12.9111 GB/s per node +Grid : Message : Stencil 17.6963 GB/s per node +Grid : Message : Stencil 17.6975 GB/s per node +Grid : Message : Stencil 13.016 GB/s per node +Grid : Message : Average mflops/s per call per node : 664176 +Grid : Message : Average mflops/s per call per node : 800210 +Grid : Message : Average mflops/s per call per node : 821890 +Grid : Message : Average mflops/s per call per node : 662729 +Grid : Message : Average mflops/s per call per node (full): 314576 +Grid : Message : Average mflops/s per call per node (full): 437851 +Grid : Message : Average mflops/s per call per node (full): 446601 +Grid : Message : Average mflops/s per call per node (full): 305381 +Grid : Message : Stencil 13.1248 GB/s per node +Grid : Message : Stencil 11.7779 GB/s per node +Grid : Message : Stencil 17.7619 GB/s per node +Grid : Message : Stencil 13.4022 GB/s per node +Grid : Message : Average mflops/s per call per node : 664419 +Grid : Message : Average mflops/s per call per node : 810492 +Grid : Message : Average mflops/s per call per node : 823930 +Grid : Message : Average mflops/s per call per node : 665522 +Grid : Message : Average mflops/s per call per node (full): 314881 +Grid : Message : Average mflops/s per call per node (full): 356528 +Grid : Message : Average mflops/s per call per node (full): 445937 +Grid : Message : Average mflops/s per call per node (full): 306116 +Grid : Message : Stencil 14.3878 GB/s per node +Grid : Message : Stencil 16.7257 GB/s per node +Grid : Message : Stencil 17.8578 GB/s per node +Grid : Message : Stencil 13.9036 GB/s per node +Grid : Message : Average mflops/s per call per node : 660208 +Grid : Message : Average mflops/s per call per node : 807016 +Grid : Message : Average mflops/s per call per node : 823662 +Grid : Message : Average mflops/s per call per node : 664817 +Grid : Message : Average mflops/s per call per node (full): 315583 +Grid : Message : Average mflops/s per call per node (full): 436472 +Grid : Message : Average mflops/s per call per node (full): 446283 +Grid : Message : Average mflops/s per call per node (full): 306769 +Grid : Message : Stencil 13.7372 GB/s per node +Grid : Message : Stencil 14.8812 GB/s per node +Grid : Message : Stencil 17.5616 GB/s per node +Grid : Message : Stencil 11.8767 GB/s per node +Grid : Message : Average mflops/s per call per node : 660390 +Grid : Message : Average mflops/s per call per node : 804542 +Grid : Message : Average mflops/s per call per node : 819512 +Grid : Message : Average mflops/s per call per node : 664682 +Grid : Message : Average mflops/s per call per node (full): 315855 +Grid : Message : Average mflops/s per call per node (full): 414288 +Grid : Message : Average mflops/s per call per node (full): 442000 +Grid : Message : Average mflops/s per call per node (full): 300282 +Grid : Message : Stencil 14.2464 GB/s per node +Grid : Message : Stencil 17.0363 GB/s per node +Grid : Message : Stencil 17.9915 GB/s per node +Grid : Message : Stencil 13.461 GB/s per node +Grid : Message : Average mflops/s per call per node : 661537 +Grid : Message : Average mflops/s per call per node : 805651 +Grid : Message : Average mflops/s per call per node : 824045 +Grid : Message : Average mflops/s per call per node : 667438 +Grid : Message : Average mflops/s per call per node (full): 316224 +Grid : Message : Average mflops/s per call per node (full): 435645 +Grid : Message : Average mflops/s per call per node (full): 446580 +Grid : Message : Average mflops/s per call per node (full): 307108 +Grid : Message : Stencil 14.2497 GB/s per node +Grid : Message : Stencil 16.2639 GB/s per node +Grid : Message : Stencil 17.1763 GB/s per node +Grid : Message : Stencil 12.0851 GB/s per node +Grid : Message : Average mflops/s per call per node : 659856 +Grid : Message : Average mflops/s per call per node : 808352 +Grid : Message : Average mflops/s per call per node : 823527 +Grid : Message : Average mflops/s per call per node : 662137 +Grid : Message : Average mflops/s per call per node (full): 315624 +Grid : Message : Average mflops/s per call per node (full): 431227 +Grid : Message : Average mflops/s per call per node (full): 441847 +Grid : Message : Average mflops/s per call per node (full): 302014 +Grid : Message : Stencil 12.6238 GB/s per node +Grid : Message : Stencil 16.3639 GB/s per node +Grid : Message : Stencil 17.937 GB/s per node +Grid : Message : Stencil 12.8845 GB/s per node +Grid : Message : Average mflops/s per call per node : 664350 +Grid : Message : Average mflops/s per call per node : 803314 +Grid : Message : Average mflops/s per call per node : 819809 +Grid : Message : Average mflops/s per call per node : 660241 +Grid : Message : Average mflops/s per call per node (full): 312411 +Grid : Message : Average mflops/s per call per node (full): 431811 +Grid : Message : Average mflops/s per call per node (full): 445748 +Grid : Message : Average mflops/s per call per node (full): 301276 +Grid : Message : Stencil 12.5847 GB/s per node +Grid : Message : Stencil 17.6308 GB/s per node +Grid : Message : Stencil 18.0077 GB/s per node +Grid : Message : Stencil 12.2625 GB/s per node +Grid : Message : Average mflops/s per call per node : 662560 +Grid : Message : Average mflops/s per call per node : 800141 +Grid : Message : Average mflops/s per call per node : 819948 +Grid : Message : Average mflops/s per call per node : 665869 +Grid : Message : Average mflops/s per call per node (full): 313352 +Grid : Message : Average mflops/s per call per node (full): 439427 +Grid : Message : Average mflops/s per call per node (full): 444876 +Grid : Message : Average mflops/s per call per node (full): 304118 +Grid : Message : Stencil 13.4228 GB/s per node +Grid : Message : Stencil 16.4704 GB/s per node +Grid : Message : Stencil 17.0233 GB/s per node +Grid : Message : Stencil 12.4666 GB/s per node +Grid : Message : Average mflops/s per call per node : 659557 +Grid : Message : Average mflops/s per call per node : 805825 +Grid : Message : Average mflops/s per call per node : 822933 +Grid : Message : Average mflops/s per call per node : 663659 +Grid : Message : Average mflops/s per call per node (full): 314515 +Grid : Message : Average mflops/s per call per node (full): 435573 +Grid : Message : Average mflops/s per call per node (full): 440776 +Grid : Message : Average mflops/s per call per node (full): 304630 +Grid : Message : Stencil 12.4644 GB/s per node +Grid : Message : Stencil 8.22093 GB/s per node +Grid : Message : Stencil 17.7152 GB/s per node +Grid : Message : Stencil 14.6792 GB/s per node +Grid : Message : Average mflops/s per call per node : 665642 +Grid : Message : Average mflops/s per call per node : 808130 +Grid : Message : Average mflops/s per call per node : 823904 +Grid : Message : Average mflops/s per call per node : 661846 +Grid : Message : Average mflops/s per call per node (full): 312503 +Grid : Message : Average mflops/s per call per node (full): 272466 +Grid : Message : Average mflops/s per call per node (full): 445081 +Grid : Message : Average mflops/s per call per node (full): 306767 +Grid : Message : Stencil 12.6647 GB/s per node +Grid : Message : Stencil 17.5637 GB/s per node +Grid : Message : Stencil 16.5685 GB/s per node +Grid : Message : Stencil 12.8074 GB/s per node +Grid : Message : Average mflops/s per call per node : 664429 +Grid : Message : Average mflops/s per call per node : 804657 +Grid : Message : Average mflops/s per call per node : 825316 +Grid : Message : Average mflops/s per call per node : 662416 +Grid : Message : Average mflops/s per call per node (full): 314387 +Grid : Message : Average mflops/s per call per node (full): 439744 +Grid : Message : Average mflops/s per call per node (full): 429863 +Grid : Message : Average mflops/s per call per node (full): 304961 +Grid : Message : Stencil 13.6648 GB/s per node +Grid : Message : Stencil 17.817 GB/s per node +Grid : Message : Stencil 16.2935 GB/s per node +Grid : Message : Stencil 12.3396 GB/s per node +Grid : Message : Average mflops/s per call per node : 665153 +Grid : Message : Average mflops/s per call per node : 799606 +Grid : Message : Average mflops/s per call per node : 823484 +Grid : Message : Average mflops/s per call per node : 664285 +Grid : Message : Average mflops/s per call per node (full): 315681 +Grid : Message : Average mflops/s per call per node (full): 438394 +Grid : Message : Average mflops/s per call per node (full): 421651 +Grid : Message : Average mflops/s per call per node (full): 302749 +Grid : Message : Stencil 12.9152 GB/s per node +Grid : Message : Stencil 17.2826 GB/s per node +Grid : Message : Stencil 17.2844 GB/s per node +Grid : Message : Stencil 14.1308 GB/s per node +Grid : Message : Average mflops/s per call per node : 671951 +Grid : Message : Average mflops/s per call per node : 805099 +Grid : Message : Average mflops/s per call per node : 820406 +Grid : Message : Average mflops/s per call per node : 666892 +Grid : Message : Average mflops/s per call per node (full): 315966 +Grid : Message : Average mflops/s per call per node (full): 440134 +Grid : Message : Average mflops/s per call per node (full): 442765 +Grid : Message : Average mflops/s per call per node (full): 305421 +Grid : Message : Stencil 14.0159 GB/s per node +Grid : Message : Stencil 16.8766 GB/s per node +Grid : Message : Stencil 17.9298 GB/s per node +Grid : Message : Stencil 12.887 GB/s per node +Grid : Message : Average mflops/s per call per node : 663984 +Grid : Message : Average mflops/s per call per node : 804339 +Grid : Message : Average mflops/s per call per node : 825334 +Grid : Message : Average mflops/s per call per node : 662330 +Grid : Message : Average mflops/s per call per node (full): 315302 +Grid : Message : Average mflops/s per call per node (full): 435971 +Grid : Message : Average mflops/s per call per node (full): 446246 +Grid : Message : Average mflops/s per call per node (full): 304300 +Grid : Message : Stencil 14.5695 GB/s per node +Grid : Message : Stencil 16.8476 GB/s per node +Grid : Message : Stencil 17.7522 GB/s per node +Grid : Message : Stencil 14.0278 GB/s per node +Grid : Message : Average mflops/s per call per node : 663605 +Grid : Message : Average mflops/s per call per node : 803894 +Grid : Message : Average mflops/s per call per node : 828041 +Grid : Message : Average mflops/s per call per node : 665135 +Grid : Message : Average mflops/s per call per node (full): 317216 +Grid : Message : Average mflops/s per call per node (full): 435113 +Grid : Message : Average mflops/s per call per node (full): 446534 +Grid : Message : Average mflops/s per call per node (full): 307800 +Grid : Message : Stencil 12.3099 GB/s per node +Grid : Message : Stencil 17.0746 GB/s per node +Grid : Message : Stencil 17.5469 GB/s per node +Grid : Message : Stencil 13.6681 GB/s per node +Grid : Message : Average mflops/s per call per node : 669123 +Grid : Message : Average mflops/s per call per node : 801928 +Grid : Message : Average mflops/s per call per node : 824345 +Grid : Message : Average mflops/s per call per node : 659179 +Grid : Message : Average mflops/s per call per node (full): 312030 +Grid : Message : Average mflops/s per call per node (full): 432529 +Grid : Message : Average mflops/s per call per node (full): 446152 +Grid : Message : Average mflops/s per call per node (full): 306748 +Grid : Message : Stencil 12.8134 GB/s per node +Grid : Message : Stencil 18.2851 GB/s per node +Grid : Message : Stencil 17.6986 GB/s per node +Grid : Message : Stencil 12.7626 GB/s per node +Grid : Message : Average mflops/s per call per node : 666400 +Grid : Message : Average mflops/s per call per node : 804575 +Grid : Message : Average mflops/s per call per node : 828300 +Grid : Message : Average mflops/s per call per node : 663369 +Grid : Message : Average mflops/s per call per node (full): 313964 +Grid : Message : Average mflops/s per call per node (full): 442054 +Grid : Message : Average mflops/s per call per node (full): 447876 +Grid : Message : Average mflops/s per call per node (full): 305509 +Grid : Message : Stencil 13.7164 GB/s per node +Grid : Message : Stencil 14.5666 GB/s per node +Grid : Message : Stencil 17.8571 GB/s per node +Grid : Message : Stencil 12.8831 GB/s per node +Grid : Message : Average mflops/s per call per node : 662667 +Grid : Message : Average mflops/s per call per node : 808341 +Grid : Message : Average mflops/s per call per node : 823866 +Grid : Message : Average mflops/s per call per node : 662070 +Grid : Message : Average mflops/s per call per node (full): 315443 +Grid : Message : Average mflops/s per call per node (full): 409315 +Grid : Message : Average mflops/s per call per node (full): 446895 +Grid : Message : Average mflops/s per call per node (full): 304843 +Grid : Message : Stencil 13.3634 GB/s per node +Grid : Message : Stencil 16.8785 GB/s per node +Grid : Message : Stencil 17.6547 GB/s per node +Grid : Message : Stencil 13.929 GB/s per node +Grid : Message : Average mflops/s per call per node : 665176 +Grid : Message : Average mflops/s per call per node : 804424 +Grid : Message : Average mflops/s per call per node : 821991 +Grid : Message : Average mflops/s per call per node : 659931 +Grid : Message : Average mflops/s per call per node (full): 314784 +Grid : Message : Average mflops/s per call per node (full): 437025 +Grid : Message : Average mflops/s per call per node (full): 446321 +Grid : Message : Average mflops/s per call per node (full): 306278 +Grid : Message : Stencil 12.355 GB/s per node +Grid : Message : Stencil 16.895 GB/s per node +Grid : Message : Stencil 18.166 GB/s per node +Grid : Message : Stencil 12.8473 GB/s per node +Grid : Message : Average mflops/s per call per node : 671496 +Grid : Message : Average mflops/s per call per node : 808884 +Grid : Message : Average mflops/s per call per node : 820349 +Grid : Message : Average mflops/s per call per node : 666351 +Grid : Message : Average mflops/s per call per node (full): 312992 +Grid : Message : Average mflops/s per call per node (full): 439455 +Grid : Message : Average mflops/s per call per node (full): 446030 +Grid : Message : Average mflops/s per call per node (full): 306316 +Grid : Message : Stencil 12.6097 GB/s per node +Grid : Message : Stencil 16.6952 GB/s per node +Grid : Message : Stencil 17.7702 GB/s per node +Grid : Message : Stencil 12.9123 GB/s per node +Grid : Message : Average mflops/s per call per node : 667832 +Grid : Message : Average mflops/s per call per node : 807358 +Grid : Message : Average mflops/s per call per node : 825113 +Grid : Message : Average mflops/s per call per node : 664207 +Grid : Message : Average mflops/s per call per node (full): 314251 +Grid : Message : Average mflops/s per call per node (full): 436472 +Grid : Message : Average mflops/s per call per node (full): 446405 +Grid : Message : Average mflops/s per call per node (full): 303965 +Grid : Message : Stencil 14.0821 GB/s per node +Grid : Message : Stencil 15.1892 GB/s per node +Grid : Message : Stencil 17.7668 GB/s per node +Grid : Message : Stencil 12.6889 GB/s per node +Grid : Message : Average mflops/s per call per node : 664142 +Grid : Message : Average mflops/s per call per node : 806184 +Grid : Message : Average mflops/s per call per node : 826703 +Grid : Message : Average mflops/s per call per node : 659089 +Grid : Message : Average mflops/s per call per node (full): 316433 +Grid : Message : Average mflops/s per call per node (full): 417763 +Grid : Message : Average mflops/s per call per node (full): 445587 +Grid : Message : Average mflops/s per call per node (full): 303256 +Grid : Message : Stencil 13.5766 GB/s per node +Grid : Message : Stencil 17.2951 GB/s per node +Grid : Message : Stencil 17.4029 GB/s per node +Grid : Message : Stencil 12.7326 GB/s per node +Grid : Message : Average mflops/s per call per node : 666642 +Grid : Message : Average mflops/s per call per node : 802032 +Grid : Message : Average mflops/s per call per node : 819241 +Grid : Message : Average mflops/s per call per node : 666999 +Grid : Message : Average mflops/s per call per node (full): 316199 +Grid : Message : Average mflops/s per call per node (full): 438555 +Grid : Message : Average mflops/s per call per node (full): 443653 +Grid : Message : Average mflops/s per call per node (full): 305053 +Grid : Message : Stencil 13.2271 GB/s per node +Grid : Message : Stencil 16.6175 GB/s per node +Grid : Message : Stencil 18.169 GB/s per node +Grid : Message : Stencil 12.1386 GB/s per node +Grid : Message : Average mflops/s per call per node : 668187 +Grid : Message : Average mflops/s per call per node : 804086 +Grid : Message : Average mflops/s per call per node : 826263 +Grid : Message : Average mflops/s per call per node : 664120 +Grid : Message : Average mflops/s per call per node (full): 315783 +Grid : Message : Average mflops/s per call per node (full): 435193 +Grid : Message : Average mflops/s per call per node (full): 446811 +Grid : Message : Average mflops/s per call per node (full): 302426 +Grid : Message : Stencil 12.5939 GB/s per node +Grid : Message : Stencil 16.4433 GB/s per node +Grid : Message : Stencil 17.8313 GB/s per node +Grid : Message : Stencil 11.7482 GB/s per node +Grid : Message : Average mflops/s per call per node : 668082 +Grid : Message : Average mflops/s per call per node : 807633 +Grid : Message : Average mflops/s per call per node : 822405 +Grid : Message : Average mflops/s per call per node : 668530 +Grid : Message : Average mflops/s per call per node (full): 314384 +Grid : Message : Average mflops/s per call per node (full): 433890 +Grid : Message : Average mflops/s per call per node (full): 436885 +Grid : Message : Average mflops/s per call per node (full): 294829 +Grid : Message : Stencil 12.9189 GB/s per node +Grid : Message : Stencil 16.7443 GB/s per node +Grid : Message : Stencil 18.5545 GB/s per node +Grid : Message : Stencil 13.4276 GB/s per node +Grid : Message : Average mflops/s per call per node : 666286 +Grid : Message : Average mflops/s per call per node : 804392 +Grid : Message : Average mflops/s per call per node : 818844 +Grid : Message : Average mflops/s per call per node : 664687 +Grid : Message : Average mflops/s per call per node (full): 315228 +Grid : Message : Average mflops/s per call per node (full): 436326 +Grid : Message : Average mflops/s per call per node (full): 447264 +Grid : Message : Average mflops/s per call per node (full): 307040 +Grid : Message : Stencil 13.3248 GB/s per node +Grid : Message : Stencil 12.1998 GB/s per node +Grid : Message : Stencil 17.5079 GB/s per node +Grid : Message : Stencil 13.3229 GB/s per node +Grid : Message : Average mflops/s per call per node : 667839 +Grid : Message : Average mflops/s per call per node : 813175 +Grid : Message : Average mflops/s per call per node : 814133 +Grid : Message : Average mflops/s per call per node : 665674 +Grid : Message : Average mflops/s per call per node (full): 315289 +Grid : Message : Average mflops/s per call per node (full): 365548 +Grid : Message : Average mflops/s per call per node (full): 443603 +Grid : Message : Average mflops/s per call per node (full): 305937 +Grid : Message : Stencil 13.1377 GB/s per node +Grid : Message : Stencil 16.4798 GB/s per node +Grid : Message : Stencil 17.8989 GB/s per node +Grid : Message : Stencil 13.3023 GB/s per node +Grid : Message : Average mflops/s per call per node : 669090 +Grid : Message : Average mflops/s per call per node : 804535 +Grid : Message : Average mflops/s per call per node : 825306 +Grid : Message : Average mflops/s per call per node : 666189 +Grid : Message : Average mflops/s per call per node (full): 315289 +Grid : Message : Average mflops/s per call per node (full): 434236 +Grid : Message : Average mflops/s per call per node (full): 445521 +Grid : Message : Average mflops/s per call per node (full): 303984 +Grid : Message : Stencil 14.0527 GB/s per node +Grid : Message : Stencil 16.9076 GB/s per node +Grid : Message : Stencil 17.3601 GB/s per node +Grid : Message : Stencil 12.4109 GB/s per node +Grid : Message : Average mflops/s per call per node : 663052 +Grid : Message : Average mflops/s per call per node : 803103 +Grid : Message : Average mflops/s per call per node : 828502 +Grid : Message : Average mflops/s per call per node : 663911 +Grid : Message : Average mflops/s per call per node (full): 315560 +Grid : Message : Average mflops/s per call per node (full): 436378 +Grid : Message : Average mflops/s per call per node (full): 444933 +Grid : Message : Average mflops/s per call per node (full): 303394 +Grid : Message : Stencil 13.2076 GB/s per node +Grid : Message : Stencil 17.8873 GB/s per node +Grid : Message : Stencil 17.3344 GB/s per node +Grid : Message : Stencil 13.4283 GB/s per node +Grid : Message : Average mflops/s per call per node : 666130 +Grid : Message : Average mflops/s per call per node : 802572 +Grid : Message : Average mflops/s per call per node : 824862 +Grid : Message : Average mflops/s per call per node : 665551 +Grid : Message : Average mflops/s per call per node (full): 315252 +Grid : Message : Average mflops/s per call per node (full): 440432 +Grid : Message : Average mflops/s per call per node (full): 443691 +Grid : Message : Average mflops/s per call per node (full): 306628 +Grid : Message : Stencil 12.6615 GB/s per node +Grid : Message : Stencil 17.1256 GB/s per node +Grid : Message : Stencil 17.3373 GB/s per node +Grid : Message : Stencil 12.4662 GB/s per node +Grid : Message : Average mflops/s per call per node : 671242 +Grid : Message : Average mflops/s per call per node : 804096 +Grid : Message : Average mflops/s per call per node : 825872 +Grid : Message : Average mflops/s per call per node : 663829 +Grid : Message : Average mflops/s per call per node (full): 314762 +Grid : Message : Average mflops/s per call per node (full): 438760 +Grid : Message : Average mflops/s per call per node (full): 442389 +Grid : Message : Average mflops/s per call per node (full): 304135 +Grid : Message : Stencil 13.9984 GB/s per node +Grid : Message : Stencil 16.7166 GB/s per node +Grid : Message : Stencil 17.9318 GB/s per node +Grid : Message : Stencil 12.5099 GB/s per node +Grid : Message : Average mflops/s per call per node : 665745 +Grid : Message : Average mflops/s per call per node : 804408 +Grid : Message : Average mflops/s per call per node : 825315 +Grid : Message : Average mflops/s per call per node : 662627 +Grid : Message : Average mflops/s per call per node (full): 316107 +Grid : Message : Average mflops/s per call per node (full): 436169 +Grid : Message : Average mflops/s per call per node (full): 447081 +Grid : Message : Average mflops/s per call per node (full): 304059 +Grid : Message : Stencil 14.1307 GB/s per node +Grid : Message : Stencil 16.955 GB/s per node +Grid : Message : Stencil 17.413 GB/s per node +Grid : Message : Stencil 13.2242 GB/s per node +Grid : Message : Average mflops/s per call per node : 665882 +Grid : Message : Average mflops/s per call per node : 805427 +Grid : Message : Average mflops/s per call per node : 819318 +Grid : Message : Average mflops/s per call per node : 664097 +Grid : Message : Average mflops/s per call per node (full): 315293 +Grid : Message : Average mflops/s per call per node (full): 433667 +Grid : Message : Average mflops/s per call per node (full): 444729 +Grid : Message : Average mflops/s per call per node (full): 305803 +Grid : Message : Stencil 14.9627 GB/s per node +Grid : Message : Stencil 16.6868 GB/s per node +Grid : Message : Stencil 18.1597 GB/s per node +Grid : Message : Stencil 13.8765 GB/s per node +Grid : Message : Average mflops/s per call per node : 663128 +Grid : Message : Average mflops/s per call per node : 805706 +Grid : Message : Average mflops/s per call per node : 816270 +Grid : Message : Average mflops/s per call per node : 662347 +Grid : Message : Average mflops/s per call per node (full): 316989 +Grid : Message : Average mflops/s per call per node (full): 437238 +Grid : Message : Average mflops/s per call per node (full): 446091 +Grid : Message : Average mflops/s per call per node (full): 306802 +Grid : Message : Stencil 13.1026 GB/s per node +Grid : Message : Stencil 16.688 GB/s per node +Grid : Message : Stencil 17.4324 GB/s per node +Grid : Message : Stencil 13.3778 GB/s per node +Grid : Message : Average mflops/s per call per node : 668583 +Grid : Message : Average mflops/s per call per node : 805193 +Grid : Message : Average mflops/s per call per node : 824697 +Grid : Message : Average mflops/s per call per node : 661618 +Grid : Message : Average mflops/s per call per node (full): 316151 +Grid : Message : Average mflops/s per call per node (full): 437204 +Grid : Message : Average mflops/s per call per node (full): 444908 +Grid : Message : Average mflops/s per call per node (full): 306332 +Grid : Message : Stencil 13.12 GB/s per node +Grid : Message : Stencil 17.2708 GB/s per node +Grid : Message : Stencil 17.37 GB/s per node +Grid : Message : Stencil 12.1212 GB/s per node +Grid : Message : Average mflops/s per call per node : 667632 +Grid : Message : Average mflops/s per call per node : 800607 +Grid : Message : Average mflops/s per call per node : 822822 +Grid : Message : Average mflops/s per call per node : 665908 +Grid : Message : Average mflops/s per call per node (full): 314548 +Grid : Message : Average mflops/s per call per node (full): 437853 +Grid : Message : Average mflops/s per call per node (full): 443769 +Grid : Message : Average mflops/s per call per node (full): 301555 +Grid : Message : Stencil 12.4694 GB/s per node +Grid : Message : Stencil 16.5969 GB/s per node +Grid : Message : Stencil 17.213 GB/s per node +Grid : Message : Stencil 13.3999 GB/s per node +Grid : Message : Average mflops/s per call per node : 669733 +Grid : Message : Average mflops/s per call per node : 806943 +Grid : Message : Average mflops/s per call per node : 822933 +Grid : Message : Average mflops/s per call per node : 662354 +Grid : Message : Average mflops/s per call per node (full): 313965 +Grid : Message : Average mflops/s per call per node (full): 435489 +Grid : Message : Average mflops/s per call per node (full): 440837 +Grid : Message : Average mflops/s per call per node (full): 306316 +Grid : Message : Stencil 12.5374 GB/s per node +Grid : Message : Stencil 17.5318 GB/s per node +Grid : Message : Stencil 16.8417 GB/s per node +Grid : Message : Stencil 12.4993 GB/s per node +Grid : Message : Average mflops/s per call per node : 667866 +Grid : Message : Average mflops/s per call per node : 796795 +Grid : Message : Average mflops/s per call per node : 821464 +Grid : Message : Average mflops/s per call per node : 661191 +Grid : Message : Average mflops/s per call per node (full): 313613 +Grid : Message : Average mflops/s per call per node (full): 435727 +Grid : Message : Average mflops/s per call per node (full): 427083 +Grid : Message : Average mflops/s per call per node (full): 303886 +Grid : Message : Stencil 12.6277 GB/s per node +Grid : Message : Stencil 17.4118 GB/s per node +Grid : Message : Stencil 18.5909 GB/s per node +Grid : Message : Stencil 12.4698 GB/s per node +Grid : Message : Average mflops/s per call per node : 666965 +Grid : Message : Average mflops/s per call per node : 798077 +Grid : Message : Average mflops/s per call per node : 817594 +Grid : Message : Average mflops/s per call per node : 669968 +Grid : Message : Average mflops/s per call per node (full): 314027 +Grid : Message : Average mflops/s per call per node (full): 437054 +Grid : Message : Average mflops/s per call per node (full): 447328 +Grid : Message : Average mflops/s per call per node (full): 304924 +Grid : Message : Stencil 12.7725 GB/s per node +Grid : Message : Stencil 16.6166 GB/s per node +Grid : Message : Stencil 17.397 GB/s per node +Grid : Message : Stencil 12.4983 GB/s per node +Grid : Message : Average mflops/s per call per node : 665732 +Grid : Message : Average mflops/s per call per node : 807913 +Grid : Message : Average mflops/s per call per node : 821210 +Grid : Message : Average mflops/s per call per node : 661150 +Grid : Message : Average mflops/s per call per node (full): 313604 +Grid : Message : Average mflops/s per call per node (full): 435521 +Grid : Message : Average mflops/s per call per node (full): 443043 +Grid : Message : Average mflops/s per call per node (full): 303752 +Grid : Message : Stencil 13.6786 GB/s per node +Grid : Message : Stencil 17.4595 GB/s per node +Grid : Message : Stencil 17.5525 GB/s per node +Grid : Message : Stencil 13.3556 GB/s per node +Grid : Message : Average mflops/s per call per node : 661439 +Grid : Message : Average mflops/s per call per node : 805238 +Grid : Message : Average mflops/s per call per node : 815276 +Grid : Message : Average mflops/s per call per node : 662636 +Grid : Message : Average mflops/s per call per node (full): 315806 +Grid : Message : Average mflops/s per call per node (full): 440289 +Grid : Message : Average mflops/s per call per node (full): 443045 +Grid : Message : Average mflops/s per call per node (full): 306035 +Grid : Message : Stencil 12.614 GB/s per node +Grid : Message : Stencil 17.0698 GB/s per node +Grid : Message : Stencil 17.8556 GB/s per node +Grid : Message : Stencil 12.4782 GB/s per node +Grid : Message : Average mflops/s per call per node : 665447 +Grid : Message : Average mflops/s per call per node : 803670 +Grid : Message : Average mflops/s per call per node : 817131 +Grid : Message : Average mflops/s per call per node : 668088 +Grid : Message : Average mflops/s per call per node (full): 310869 +Grid : Message : Average mflops/s per call per node (full): 439056 +Grid : Message : Average mflops/s per call per node (full): 446346 +Grid : Message : Average mflops/s per call per node (full): 304479 +Grid : Message : Stencil 13.6138 GB/s per node +Grid : Message : Stencil 18.4778 GB/s per node +Grid : Message : Stencil 17.1873 GB/s per node +Grid : Message : Stencil 13.0416 GB/s per node +Grid : Message : Average mflops/s per call per node : 666452 +Grid : Message : Average mflops/s per call per node : 795137 +Grid : Message : Average mflops/s per call per node : 828115 +Grid : Message : Average mflops/s per call per node : 662954 +Grid : Message : Average mflops/s per call per node (full): 314428 +Grid : Message : Average mflops/s per call per node (full): 437022 +Grid : Message : Average mflops/s per call per node (full): 442798 +Grid : Message : Average mflops/s per call per node (full): 305421 +Grid : Message : Stencil 14.7139 GB/s per node +Grid : Message : Stencil 16.6484 GB/s per node +Grid : Message : Stencil 17.9508 GB/s per node +Grid : Message : Stencil 13.5629 GB/s per node +Grid : Message : Average mflops/s per call per node : 665832 +Grid : Message : Average mflops/s per call per node : 803880 +Grid : Message : Average mflops/s per call per node : 821362 +Grid : Message : Average mflops/s per call per node : 663756 +Grid : Message : Average mflops/s per call per node (full): 316832 +Grid : Message : Average mflops/s per call per node (full): 436166 +Grid : Message : Average mflops/s per call per node (full): 445924 +Grid : Message : Average mflops/s per call per node (full): 305439 +Grid : Message : Stencil 13.0084 GB/s per node +Grid : Message : Stencil 16.4882 GB/s per node +Grid : Message : Stencil 17.9387 GB/s per node +Grid : Message : Stencil 14.2227 GB/s per node +Grid : Message : Average mflops/s per call per node : 671512 +Grid : Message : Average mflops/s per call per node : 807289 +Grid : Message : Average mflops/s per call per node : 821044 +Grid : Message : Average mflops/s per call per node : 663509 +Grid : Message : Average mflops/s per call per node (full): 315038 +Grid : Message : Average mflops/s per call per node (full): 434524 +Grid : Message : Average mflops/s per call per node (full): 447069 +Grid : Message : Average mflops/s per call per node (full): 307107 +Grid : Message : Stencil 13.0866 GB/s per node +Grid : Message : Stencil 18.7245 GB/s per node +Grid : Message : Stencil 17.3379 GB/s per node +Grid : Message : Stencil 12.6833 GB/s per node +Grid : Message : Average mflops/s per call per node : 667359 +Grid : Message : Average mflops/s per call per node : 800618 +Grid : Message : Average mflops/s per call per node : 824683 +Grid : Message : Average mflops/s per call per node : 665308 +Grid : Message : Average mflops/s per call per node (full): 314225 +Grid : Message : Average mflops/s per call per node (full): 440026 +Grid : Message : Average mflops/s per call per node (full): 443438 +Grid : Message : Average mflops/s per call per node (full): 304101 +Grid : Message : Stencil 14.1214 GB/s per node +Grid : Message : Stencil 11.1327 GB/s per node +Grid : Message : Stencil 18.438 GB/s per node +Grid : Message : Stencil 12.6113 GB/s per node +Grid : Message : Average mflops/s per call per node : 664777 +Grid : Message : Average mflops/s per call per node : 804016 +Grid : Message : Average mflops/s per call per node : 821008 +Grid : Message : Average mflops/s per call per node : 668356 +Grid : Message : Average mflops/s per call per node (full): 317217 +Grid : Message : Average mflops/s per call per node (full): 342710 +Grid : Message : Average mflops/s per call per node (full): 447021 +Grid : Message : Average mflops/s per call per node (full): 297876 +Grid : Message : Stencil 13.408 GB/s per node +Grid : Message : Stencil 17.4022 GB/s per node +Grid : Message : Stencil 16.8897 GB/s per node +Grid : Message : Stencil 13.0928 GB/s per node +Grid : Message : Average mflops/s per call per node : 667130 +Grid : Message : Average mflops/s per call per node : 807309 +Grid : Message : Average mflops/s per call per node : 821878 +Grid : Message : Average mflops/s per call per node : 666926 +Grid : Message : Average mflops/s per call per node (full): 315943 +Grid : Message : Average mflops/s per call per node (full): 440890 +Grid : Message : Average mflops/s per call per node (full): 438122 +Grid : Message : Average mflops/s per call per node (full): 306182 +Grid : Message : Stencil 12.573 GB/s per node +Grid : Message : Stencil 16.7063 GB/s per node +Grid : Message : Stencil 17.1319 GB/s per node +Grid : Message : Stencil 13.0951 GB/s per node +Grid : Message : Average mflops/s per call per node : 668167 +Grid : Message : Average mflops/s per call per node : 802663 +Grid : Message : Average mflops/s per call per node : 825467 +Grid : Message : Average mflops/s per call per node : 663485 +Grid : Message : Average mflops/s per call per node (full): 314510 +Grid : Message : Average mflops/s per call per node (full): 436425 +Grid : Message : Average mflops/s per call per node (full): 441905 +Grid : Message : Average mflops/s per call per node (full): 304733 +Grid : Message : Stencil 12.5289 GB/s per node +Grid : Message : Stencil 17.3595 GB/s per node +Grid : Message : Stencil 18.2739 GB/s per node +Grid : Message : Stencil 12.4844 GB/s per node +Grid : Message : Average mflops/s per call per node : 669621 +Grid : Message : Average mflops/s per call per node : 799998 +Grid : Message : Average mflops/s per call per node : 816993 +Grid : Message : Average mflops/s per call per node : 658972 +Grid : Message : Average mflops/s per call per node (full): 314123 +Grid : Message : Average mflops/s per call per node (full): 439790 +Grid : Message : Average mflops/s per call per node (full): 445963 +Grid : Message : Average mflops/s per call per node (full): 303737 +Grid : Message : Stencil 13.4448 GB/s per node +Grid : Message : Stencil 16.9993 GB/s per node +Grid : Message : Stencil 17.8453 GB/s per node +Grid : Message : Stencil 15.0281 GB/s per node +Grid : Message : Average mflops/s per call per node : 663621 +Grid : Message : Average mflops/s per call per node : 805340 +Grid : Message : Average mflops/s per call per node : 821926 +Grid : Message : Average mflops/s per call per node : 658941 +Grid : Message : Average mflops/s per call per node (full): 315829 +Grid : Message : Average mflops/s per call per node (full): 436984 +Grid : Message : Average mflops/s per call per node (full): 439082 +Grid : Message : Average mflops/s per call per node (full): 306427 +Grid : Message : Stencil 13.0982 GB/s per node +Grid : Message : Stencil 16.6596 GB/s per node +Grid : Message : Stencil 18.4421 GB/s per node +Grid : Message : Stencil 13.3526 GB/s per node +Grid : Message : Average mflops/s per call per node : 665223 +Grid : Message : Average mflops/s per call per node : 803700 +Grid : Message : Average mflops/s per call per node : 822412 +Grid : Message : Average mflops/s per call per node : 668318 +Grid : Message : Average mflops/s per call per node (full): 315138 +Grid : Message : Average mflops/s per call per node (full): 436240 +Grid : Message : Average mflops/s per call per node (full): 449103 +Grid : Message : Average mflops/s per call per node (full): 307087 +Grid : Message : Stencil 13.1714 GB/s per node +Grid : Message : Stencil 16.8825 GB/s per node +Grid : Message : Stencil 17.5464 GB/s per node +Grid : Message : Stencil 13.121 GB/s per node +Grid : Message : Average mflops/s per call per node : 667784 +Grid : Message : Average mflops/s per call per node : 807189 +Grid : Message : Average mflops/s per call per node : 820395 +Grid : Message : Average mflops/s per call per node : 667089 +Grid : Message : Average mflops/s per call per node (full): 314869 +Grid : Message : Average mflops/s per call per node (full): 438273 +Grid : Message : Average mflops/s per call per node (full): 445656 +Grid : Message : Average mflops/s per call per node (full): 307350 +Grid : Message : Stencil 13.72 GB/s per node +Grid : Message : Stencil 12.6906 GB/s per node +Grid : Message : Stencil 18.2964 GB/s per node +Grid : Message : Stencil 13.3584 GB/s per node +Grid : Message : Average mflops/s per call per node : 666867 +Grid : Message : Average mflops/s per call per node : 809633 +Grid : Message : Average mflops/s per call per node : 828112 +Grid : Message : Average mflops/s per call per node : 667237 +Grid : Message : Average mflops/s per call per node (full): 315368 +Grid : Message : Average mflops/s per call per node (full): 375897 +Grid : Message : Average mflops/s per call per node (full): 448476 +Grid : Message : Average mflops/s per call per node (full): 306870 +Grid : Message : Stencil 12.6439 GB/s per node +Grid : Message : Stencil 16.0623 GB/s per node +Grid : Message : Stencil 18.2302 GB/s per node +Grid : Message : Stencil 14.5926 GB/s per node +Grid : Message : Average mflops/s per call per node : 669424 +Grid : Message : Average mflops/s per call per node : 800242 +Grid : Message : Average mflops/s per call per node : 822926 +Grid : Message : Average mflops/s per call per node : 655791 +Grid : Message : Average mflops/s per call per node (full): 313407 +Grid : Message : Average mflops/s per call per node (full): 428941 +Grid : Message : Average mflops/s per call per node (full): 447806 +Grid : Message : Average mflops/s per call per node (full): 306464 +Grid : Message : Stencil 14.4725 GB/s per node +Grid : Message : Stencil 14.2302 GB/s per node +Grid : Message : Stencil 16.734 GB/s per node +Grid : Message : Stencil 11.6196 GB/s per node +Grid : Message : Average mflops/s per call per node : 664432 +Grid : Message : Average mflops/s per call per node : 803748 +Grid : Message : Average mflops/s per call per node : 823327 +Grid : Message : Average mflops/s per call per node : 669175 +Grid : Message : Average mflops/s per call per node (full): 316590 +Grid : Message : Average mflops/s per call per node (full): 402428 +Grid : Message : Average mflops/s per call per node (full): 437468 +Grid : Message : Average mflops/s per call per node (full): 298073 +Grid : Message : Stencil 13.7889 GB/s per node +Grid : Message : Stencil 16.9001 GB/s per node +Grid : Message : Stencil 17.9268 GB/s per node +Grid : Message : Stencil 12.4647 GB/s per node +Grid : Message : Average mflops/s per call per node : 665976 +Grid : Message : Average mflops/s per call per node : 803838 +Grid : Message : Average mflops/s per call per node : 819200 +Grid : Message : Average mflops/s per call per node : 666380 +Grid : Message : Average mflops/s per call per node (full): 316274 +Grid : Message : Average mflops/s per call per node (full): 437659 +Grid : Message : Average mflops/s per call per node (full): 446601 +Grid : Message : Average mflops/s per call per node (full): 305255 +Grid : Message : Stencil 12.9962 GB/s per node +Grid : Message : Stencil 17.5956 GB/s per node +Grid : Message : Stencil 17.5064 GB/s per node +Grid : Message : Stencil 12.5874 GB/s per node +Grid : Message : Average mflops/s per call per node : 668479 +Grid : Message : Average mflops/s per call per node : 799041 +Grid : Message : Average mflops/s per call per node : 818390 +Grid : Message : Average mflops/s per call per node : 664712 +Grid : Message : Average mflops/s per call per node (full): 315237 +Grid : Message : Average mflops/s per call per node (full): 440281 +Grid : Message : Average mflops/s per call per node (full): 443676 +Grid : Message : Average mflops/s per call per node (full): 304852 +Grid : Message : Stencil 12.6041 GB/s per node +Grid : Message : Stencil 17.3707 GB/s per node +Grid : Message : Stencil 17.659 GB/s per node +Grid : Message : Stencil 12.3764 GB/s per node +Grid : Message : Average mflops/s per call per node : 669297 +Grid : Message : Average mflops/s per call per node : 803887 +Grid : Message : Average mflops/s per call per node : 823498 +Grid : Message : Average mflops/s per call per node : 665741 +Grid : Message : Average mflops/s per call per node (full): 315614 +Grid : Message : Average mflops/s per call per node (full): 436539 +Grid : Message : Average mflops/s per call per node (full): 440952 +Grid : Message : Average mflops/s per call per node (full): 304036 +Grid : Message : Stencil 13.5348 GB/s per node +Grid : Message : Stencil 17.038 GB/s per node +Grid : Message : Stencil 18.1659 GB/s per node +Grid : Message : Stencil 13.1206 GB/s per node +Grid : Message : Average mflops/s per call per node : 664512 +Grid : Message : Average mflops/s per call per node : 805318 +Grid : Message : Average mflops/s per call per node : 825232 +Grid : Message : Average mflops/s per call per node : 666321 +Grid : Message : Average mflops/s per call per node (full): 314166 +Grid : Message : Average mflops/s per call per node (full): 437732 +Grid : Message : Average mflops/s per call per node (full): 448411 +Grid : Message : Average mflops/s per call per node (full): 306062 +Grid : Message : Stencil 13.7061 GB/s per node +Grid : Message : Stencil 16.3278 GB/s per node +Grid : Message : Stencil 18.5878 GB/s per node +Grid : Message : Stencil 12.4536 GB/s per node +Grid : Message : Average mflops/s per call per node : 661594 +Grid : Message : Average mflops/s per call per node : 808149 +Grid : Message : Average mflops/s per call per node : 821405 +Grid : Message : Average mflops/s per call per node : 665914 +Grid : Message : Average mflops/s per call per node (full): 315010 +Grid : Message : Average mflops/s per call per node (full): 432917 +Grid : Message : Average mflops/s per call per node (full): 448662 +Grid : Message : Average mflops/s per call per node (full): 302919 +Grid : Message : Stencil 12.7146 GB/s per node +Grid : Message : Stencil 16.0773 GB/s per node +Grid : Message : Stencil 17.1469 GB/s per node +Grid : Message : Stencil 12.8855 GB/s per node +Grid : Message : Average mflops/s per call per node : 666923 +Grid : Message : Average mflops/s per call per node : 805953 +Grid : Message : Average mflops/s per call per node : 828904 +Grid : Message : Average mflops/s per call per node : 665301 +Grid : Message : Average mflops/s per call per node (full): 314385 +Grid : Message : Average mflops/s per call per node (full): 428018 +Grid : Message : Average mflops/s per call per node (full): 440554 +Grid : Message : Average mflops/s per call per node (full): 305729 +Grid : Message : Stencil 12.9741 GB/s per node +Grid : Message : Stencil 18.8804 GB/s per node +Grid : Message : Stencil 17.7919 GB/s per node +Grid : Message : Stencil 13.4678 GB/s per node +Grid : Message : Average mflops/s per call per node : 666032 +Grid : Message : Average mflops/s per call per node : 803449 +Grid : Message : Average mflops/s per call per node : 822860 +Grid : Message : Average mflops/s per call per node : 662973 +Grid : Message : Average mflops/s per call per node (full): 313551 +Grid : Message : Average mflops/s per call per node (full): 441640 +Grid : Message : Average mflops/s per call per node (full): 446690 +Grid : Message : Average mflops/s per call per node (full): 306235 +Grid : Message : Stencil 12.521 GB/s per node +Grid : Message : Stencil 16.3717 GB/s per node +Grid : Message : Stencil 16.7679 GB/s per node +Grid : Message : Stencil 12.8942 GB/s per node +Grid : Message : Average mflops/s per call per node : 665824 +Grid : Message : Average mflops/s per call per node : 806997 +Grid : Message : Average mflops/s per call per node : 825090 +Grid : Message : Average mflops/s per call per node : 664015 +Grid : Message : Average mflops/s per call per node (full): 312669 +Grid : Message : Average mflops/s per call per node (full): 432103 +Grid : Message : Average mflops/s per call per node (full): 428071 +Grid : Message : Average mflops/s per call per node (full): 305300 +Grid : Message : Stencil 13.3708 GB/s per node +Grid : Message : Stencil 16.5066 GB/s per node +Grid : Message : Stencil 16.9276 GB/s per node +Grid : Message : Stencil 12.0497 GB/s per node +Grid : Message : Average mflops/s per call per node : 666178 +Grid : Message : Average mflops/s per call per node : 804912 +Grid : Message : Average mflops/s per call per node : 822589 +Grid : Message : Average mflops/s per call per node : 668246 +Grid : Message : Average mflops/s per call per node (full): 316301 +Grid : Message : Average mflops/s per call per node (full): 435095 +Grid : Message : Average mflops/s per call per node (full): 436080 +Grid : Message : Average mflops/s per call per node (full): 302118 +Grid : Message : Stencil 12.7027 GB/s per node +Grid : Message : Stencil 16.5099 GB/s per node +Grid : Message : Stencil 17.1585 GB/s per node +Grid : Message : Stencil 13.2181 GB/s per node +Grid : Message : Average mflops/s per call per node : 668274 +Grid : Message : Average mflops/s per call per node : 802058 +Grid : Message : Average mflops/s per call per node : 823686 +Grid : Message : Average mflops/s per call per node : 665332 +Grid : Message : Average mflops/s per call per node (full): 314731 +Grid : Message : Average mflops/s per call per node (full): 433988 +Grid : Message : Average mflops/s per call per node (full): 441863 +Grid : Message : Average mflops/s per call per node (full): 306171 +Grid : Message : Stencil 13.5096 GB/s per node +Grid : Message : Stencil 16.6291 GB/s per node +Grid : Message : Stencil 17.6593 GB/s per node +Grid : Message : Stencil 12.738 GB/s per node +Grid : Message : Average mflops/s per call per node : 667716 +Grid : Message : Average mflops/s per call per node : 804795 +Grid : Message : Average mflops/s per call per node : 823643 +Grid : Message : Average mflops/s per call per node : 661819 +Grid : Message : Average mflops/s per call per node (full): 315837 +Grid : Message : Average mflops/s per call per node (full): 436896 +Grid : Message : Average mflops/s per call per node (full): 443696 +Grid : Message : Average mflops/s per call per node (full): 303158 +Grid : Message : Stencil 13.764 GB/s per node +Grid : Message : Stencil 16.4805 GB/s per node +Grid : Message : Stencil 16.806 GB/s per node +Grid : Message : Stencil 12.2637 GB/s per node +Grid : Message : Average mflops/s per call per node : 666394 +Grid : Message : Average mflops/s per call per node : 808315 +Grid : Message : Average mflops/s per call per node : 830523 +Grid : Message : Average mflops/s per call per node : 667099 +Grid : Message : Average mflops/s per call per node (full): 316598 +Grid : Message : Average mflops/s per call per node (full): 431115 +Grid : Message : Average mflops/s per call per node (full): 438804 +Grid : Message : Average mflops/s per call per node (full): 303771 +Grid : Message : Stencil 14.0315 GB/s per node +Grid : Message : Stencil 16.6177 GB/s per node +Grid : Message : Stencil 17.5158 GB/s per node +Grid : Message : Stencil 12.8021 GB/s per node +Grid : Message : Average mflops/s per call per node : 664946 +Grid : Message : Average mflops/s per call per node : 801175 +Grid : Message : Average mflops/s per call per node : 824969 +Grid : Message : Average mflops/s per call per node : 664651 +Grid : Message : Average mflops/s per call per node (full): 316485 +Grid : Message : Average mflops/s per call per node (full): 434346 +Grid : Message : Average mflops/s per call per node (full): 445188 +Grid : Message : Average mflops/s per call per node (full): 304439 +Grid : Message : Stencil 13.3968 GB/s per node +Grid : Message : Stencil 16.7046 GB/s per node +Grid : Message : Stencil 19.0282 GB/s per node +Grid : Message : Stencil 13.6061 GB/s per node +Grid : Message : Average mflops/s per call per node : 665767 +Grid : Message : Average mflops/s per call per node : 806549 +Grid : Message : Average mflops/s per call per node : 824649 +Grid : Message : Average mflops/s per call per node : 659771 +Grid : Message : Average mflops/s per call per node (full): 316021 +Grid : Message : Average mflops/s per call per node (full): 437159 +Grid : Message : Average mflops/s per call per node (full): 448624 +Grid : Message : Average mflops/s per call per node (full): 305509 +Grid : Message : Stencil 13.3675 GB/s per node +Grid : Message : Stencil 16.382 GB/s per node +Grid : Message : Stencil 18.511 GB/s per node +Grid : Message : Stencil 13.2205 GB/s per node +Grid : Message : Average mflops/s per call per node : 665497 +Grid : Message : Average mflops/s per call per node : 803878 +Grid : Message : Average mflops/s per call per node : 827619 +Grid : Message : Average mflops/s per call per node : 665752 +Grid : Message : Average mflops/s per call per node (full): 316256 +Grid : Message : Average mflops/s per call per node (full): 433503 +Grid : Message : Average mflops/s per call per node (full): 448486 +Grid : Message : Average mflops/s per call per node (full): 306068 +Grid : Message : Stencil 13.0375 GB/s per node +Grid : Message : Stencil 16.6764 GB/s per node +Grid : Message : Stencil 17.3085 GB/s per node +Grid : Message : Stencil 12.0552 GB/s per node +Grid : Message : Average mflops/s per call per node : 668003 +Grid : Message : Average mflops/s per call per node : 806652 +Grid : Message : Average mflops/s per call per node : 825741 +Grid : Message : Average mflops/s per call per node : 665785 +Grid : Message : Average mflops/s per call per node (full): 313382 +Grid : Message : Average mflops/s per call per node (full): 436713 +Grid : Message : Average mflops/s per call per node (full): 445120 +Grid : Message : Average mflops/s per call per node (full): 302148 +Grid : Message : Stencil 14.8512 GB/s per node +Grid : Message : Stencil 16.5633 GB/s per node +Grid : Message : Stencil 17.388 GB/s per node +Grid : Message : Stencil 12.0063 GB/s per node +Grid : Message : Average mflops/s per call per node : 663881 +Grid : Message : Average mflops/s per call per node : 804976 +Grid : Message : Average mflops/s per call per node : 820549 +Grid : Message : Average mflops/s per call per node : 668148 +Grid : Message : Average mflops/s per call per node (full): 316076 +Grid : Message : Average mflops/s per call per node (full): 435694 +Grid : Message : Average mflops/s per call per node (full): 443735 +Grid : Message : Average mflops/s per call per node (full): 302773 +Grid : Message : Stencil 13.3267 GB/s per node +Grid : Message : Stencil 17.0588 GB/s per node +Grid : Message : Stencil 16.5925 GB/s per node +Grid : Message : Stencil 13.05 GB/s per node +Grid : Message : Average mflops/s per call per node : 665763 +Grid : Message : Average mflops/s per call per node : 803088 +Grid : Message : Average mflops/s per call per node : 826037 +Grid : Message : Average mflops/s per call per node : 662986 +Grid : Message : Average mflops/s per call per node (full): 315659 +Grid : Message : Average mflops/s per call per node (full): 435380 +Grid : Message : Average mflops/s per call per node (full): 433958 +Grid : Message : Average mflops/s per call per node (full): 306295 +Grid : Message : Stencil 12.9447 GB/s per node +Grid : Message : Stencil 17.6472 GB/s per node +Grid : Message : Stencil 18.4279 GB/s per node +Grid : Message : Stencil 13.0117 GB/s per node +Grid : Message : Average mflops/s per call per node : 667222 +Grid : Message : Average mflops/s per call per node : 801881 +Grid : Message : Average mflops/s per call per node : 821229 +Grid : Message : Average mflops/s per call per node : 660378 +Grid : Message : Average mflops/s per call per node (full): 313981 +Grid : Message : Average mflops/s per call per node (full): 438879 +Grid : Message : Average mflops/s per call per node (full): 447844 +Grid : Message : Average mflops/s per call per node (full): 303894 +Grid : Message : Stencil 13.1517 GB/s per node +Grid : Message : Stencil 16.6235 GB/s per node +Grid : Message : Stencil 17.8076 GB/s per node +Grid : Message : Stencil 12.154 GB/s per node +Grid : Message : Average mflops/s per call per node : 669391 +Grid : Message : Average mflops/s per call per node : 800786 +Grid : Message : Average mflops/s per call per node : 818034 +Grid : Message : Average mflops/s per call per node : 664339 +Grid : Message : Average mflops/s per call per node (full): 315474 +Grid : Message : Average mflops/s per call per node (full): 434236 +Grid : Message : Average mflops/s per call per node (full): 444109 +Grid : Message : Average mflops/s per call per node (full): 302887 +Grid : Message : Stencil 13.2738 GB/s per node +Grid : Message : Stencil 16.4548 GB/s per node +Grid : Message : Stencil 18.0116 GB/s per node +Grid : Message : Stencil 14.688 GB/s per node +Grid : Message : Average mflops/s per call per node : 667392 +Grid : Message : Average mflops/s per call per node : 802106 +Grid : Message : Average mflops/s per call per node : 819500 +Grid : Message : Average mflops/s per call per node : 658955 +Grid : Message : Average mflops/s per call per node (full): 316091 +Grid : Message : Average mflops/s per call per node (full): 433778 +Grid : Message : Average mflops/s per call per node (full): 446855 +Grid : Message : Average mflops/s per call per node (full): 305394 +Grid : Message : Stencil 12.7989 GB/s per node +Grid : Message : Stencil 17.0567 GB/s per node +Grid : Message : Stencil 17.9343 GB/s per node +Grid : Message : Stencil 12.7083 GB/s per node +Grid : Message : Average mflops/s per call per node : 671386 +Grid : Message : Average mflops/s per call per node : 802837 +Grid : Message : Average mflops/s per call per node : 820421 +Grid : Message : Average mflops/s per call per node : 663040 +Grid : Message : Average mflops/s per call per node (full): 315605 +Grid : Message : Average mflops/s per call per node (full): 437290 +Grid : Message : Average mflops/s per call per node (full): 447574 +Grid : Message : Average mflops/s per call per node (full): 304870 +Grid : Message : Stencil 12.2852 GB/s per node +Grid : Message : Stencil 17.3791 GB/s per node +Grid : Message : Stencil 17.1474 GB/s per node +Grid : Message : Stencil 13.436 GB/s per node +Grid : Message : Average mflops/s per call per node : 670569 +Grid : Message : Average mflops/s per call per node : 805222 +Grid : Message : Average mflops/s per call per node : 825606 +Grid : Message : Average mflops/s per call per node : 663347 +Grid : Message : Average mflops/s per call per node (full): 312029 +Grid : Message : Average mflops/s per call per node (full): 440050 +Grid : Message : Average mflops/s per call per node (full): 441858 +Grid : Message : Average mflops/s per call per node (full): 305846 +Grid : Message : Stencil 12.2379 GB/s per node +Grid : Message : Stencil 14.0604 GB/s per node +Grid : Message : Stencil 18.6758 GB/s per node +Grid : Message : Stencil 13.2438 GB/s per node +Grid : Message : Average mflops/s per call per node : 671841 +Grid : Message : Average mflops/s per call per node : 807685 +Grid : Message : Average mflops/s per call per node : 820105 +Grid : Message : Average mflops/s per call per node : 656744 +Grid : Message : Average mflops/s per call per node (full): 311636 +Grid : Message : Average mflops/s per call per node (full): 401570 +Grid : Message : Average mflops/s per call per node (full): 447034 +Grid : Message : Average mflops/s per call per node (full): 304702 +Grid : Message : Stencil 13.5392 GB/s per node +Grid : Message : Stencil 17.0725 GB/s per node +Grid : Message : Stencil 17.1784 GB/s per node +Grid : Message : Stencil 12.2392 GB/s per node +Grid : Message : Average mflops/s per call per node : 664917 +Grid : Message : Average mflops/s per call per node : 801687 +Grid : Message : Average mflops/s per call per node : 815332 +Grid : Message : Average mflops/s per call per node : 656729 +Grid : Message : Average mflops/s per call per node (full): 315726 +Grid : Message : Average mflops/s per call per node (full): 434728 +Grid : Message : Average mflops/s per call per node (full): 431152 +Grid : Message : Average mflops/s per call per node (full): 302133 +Grid : Message : Stencil 13.5374 GB/s per node +Grid : Message : Stencil 16.5367 GB/s per node +Grid : Message : Stencil 17.4212 GB/s per node +Grid : Message : Stencil 12.8501 GB/s per node +Grid : Message : Average mflops/s per call per node : 668077 +Grid : Message : Average mflops/s per call per node : 803680 +Grid : Message : Average mflops/s per call per node : 826720 +Grid : Message : Average mflops/s per call per node : 665432 +Grid : Message : Average mflops/s per call per node (full): 316753 +Grid : Message : Average mflops/s per call per node (full): 434204 +Grid : Message : Average mflops/s per call per node (full): 444228 +Grid : Message : Average mflops/s per call per node (full): 305541 +Grid : Message : Stencil 12.8187 GB/s per node +Grid : Message : Stencil 16.7074 GB/s per node +Grid : Message : Stencil 18.4389 GB/s per node +Grid : Message : Stencil 12.8444 GB/s per node +Grid : Message : Average mflops/s per call per node : 670256 +Grid : Message : Average mflops/s per call per node : 804534 +Grid : Message : Average mflops/s per call per node : 820132 +Grid : Message : Average mflops/s per call per node : 666422 +Grid : Message : Average mflops/s per call per node (full): 315529 +Grid : Message : Average mflops/s per call per node (full): 435032 +Grid : Message : Average mflops/s per call per node (full): 447477 +Grid : Message : Average mflops/s per call per node (full): 304948 +Grid : Message : Stencil 13.379 GB/s per node +Grid : Message : Stencil 17.7912 GB/s per node +Grid : Message : Stencil 17.7083 GB/s per node +Grid : Message : Stencil 14.0831 GB/s per node +Grid : Message : Average mflops/s per call per node : 670713 +Grid : Message : Average mflops/s per call per node : 801535 +Grid : Message : Average mflops/s per call per node : 820180 +Grid : Message : Average mflops/s per call per node : 660857 +Grid : Message : Average mflops/s per call per node (full): 316493 +Grid : Message : Average mflops/s per call per node (full): 439429 +Grid : Message : Average mflops/s per call per node (full): 442506 +Grid : Message : Average mflops/s per call per node (full): 306858 +Grid : Message : Stencil 13.1634 GB/s per node +Grid : Message : Stencil 16.8688 GB/s per node +Grid : Message : Stencil 17.749 GB/s per node +Grid : Message : Stencil 13.1366 GB/s per node +Grid : Message : Average mflops/s per call per node : 669289 +Grid : Message : Average mflops/s per call per node : 802523 +Grid : Message : Average mflops/s per call per node : 820049 +Grid : Message : Average mflops/s per call per node : 663095 +Grid : Message : Average mflops/s per call per node (full): 314878 +Grid : Message : Average mflops/s per call per node (full): 436156 +Grid : Message : Average mflops/s per call per node (full): 444995 +Grid : Message : Average mflops/s per call per node (full): 304995 +Grid : Message : Stencil 12.3941 GB/s per node +Grid : Message : Stencil 16.754 GB/s per node +Grid : Message : Stencil 18.4191 GB/s per node +Grid : Message : Stencil 14.0827 GB/s per node +Grid : Message : Average mflops/s per call per node : 669824 +Grid : Message : Average mflops/s per call per node : 803334 +Grid : Message : Average mflops/s per call per node : 820453 +Grid : Message : Average mflops/s per call per node : 661164 +Grid : Message : Average mflops/s per call per node (full): 313443 +Grid : Message : Average mflops/s per call per node (full): 437198 +Grid : Message : Average mflops/s per call per node (full): 448787 +Grid : Message : Average mflops/s per call per node (full): 306816 +Grid : Message : Stencil 13.5058 GB/s per node +Grid : Message : Stencil 16.1241 GB/s per node +Grid : Message : Stencil 17.2608 GB/s per node +Grid : Message : Stencil 12.0562 GB/s per node +Grid : Message : Average mflops/s per call per node : 664161 +Grid : Message : Average mflops/s per call per node : 805610 +Grid : Message : Average mflops/s per call per node : 825873 +Grid : Message : Average mflops/s per call per node : 668450 +Grid : Message : Average mflops/s per call per node (full): 315219 +Grid : Message : Average mflops/s per call per node (full): 430622 +Grid : Message : Average mflops/s per call per node (full): 443767 +Grid : Message : Average mflops/s per call per node (full): 302160 +Grid : Message : Stencil 12.4599 GB/s per node +Grid : Message : Stencil 17.2007 GB/s per node +Grid : Message : Stencil 18.89 GB/s per node +Grid : Message : Stencil 12.934 GB/s per node +Grid : Message : Average mflops/s per call per node : 668564 +Grid : Message : Average mflops/s per call per node : 807248 +Grid : Message : Average mflops/s per call per node : 818786 +Grid : Message : Average mflops/s per call per node : 657532 +Grid : Message : Average mflops/s per call per node (full): 313528 +Grid : Message : Average mflops/s per call per node (full): 440095 +Grid : Message : Average mflops/s per call per node (full): 448704 +Grid : Message : Average mflops/s per call per node (full): 304130 +Grid : Message : Stencil 12.78 GB/s per node +Grid : Message : Stencil 16.6666 GB/s per node +Grid : Message : Stencil 18.0844 GB/s per node +Grid : Message : Stencil 13.0648 GB/s per node +Grid : Message : Average mflops/s per call per node : 667875 +Grid : Message : Average mflops/s per call per node : 803602 +Grid : Message : Average mflops/s per call per node : 823689 +Grid : Message : Average mflops/s per call per node : 667685 +Grid : Message : Average mflops/s per call per node (full): 315202 +Grid : Message : Average mflops/s per call per node (full): 435594 +Grid : Message : Average mflops/s per call per node (full): 445154 +Grid : Message : Average mflops/s per call per node (full): 305952 +Grid : Message : Stencil 13.243 GB/s per node +Grid : Message : Stencil 18.4039 GB/s per node +Grid : Message : Stencil 18.6476 GB/s per node +Grid : Message : Stencil 12.592 GB/s per node +Grid : Message : Average mflops/s per call per node : 668126 +Grid : Message : Average mflops/s per call per node : 809386 +Grid : Message : Average mflops/s per call per node : 821717 +Grid : Message : Average mflops/s per call per node : 667399 +Grid : Message : Average mflops/s per call per node (full): 315372 +Grid : Message : Average mflops/s per call per node (full): 443816 +Grid : Message : Average mflops/s per call per node (full): 444890 +Grid : Message : Average mflops/s per call per node (full): 302195 +Grid : Message : Stencil 12.46 GB/s per node +Grid : Message : Stencil 18.2155 GB/s per node +Grid : Message : Stencil 19.4584 GB/s per node +Grid : Message : Stencil 12.0904 GB/s per node +Grid : Message : Average mflops/s per call per node : 669848 +Grid : Message : Average mflops/s per call per node : 803086 +Grid : Message : Average mflops/s per call per node : 822755 +Grid : Message : Average mflops/s per call per node : 664231 +Grid : Message : Average mflops/s per call per node (full): 313075 +Grid : Message : Average mflops/s per call per node (full): 440188 +Grid : Message : Average mflops/s per call per node (full): 451379 +Grid : Message : Average mflops/s per call per node (full): 301640 +Grid : Message : Stencil 12.8262 GB/s per node +Grid : Message : Stencil 17.1159 GB/s per node +Grid : Message : Stencil 19.3861 GB/s per node +Grid : Message : Stencil 12.3982 GB/s per node +Grid : Message : Average mflops/s per call per node : 668377 +Grid : Message : Average mflops/s per call per node : 798894 +Grid : Message : Average mflops/s per call per node : 824796 +Grid : Message : Average mflops/s per call per node : 664084 +Grid : Message : Average mflops/s per call per node (full): 315225 +Grid : Message : Average mflops/s per call per node (full): 432619 +Grid : Message : Average mflops/s per call per node (full): 450634 +Grid : Message : Average mflops/s per call per node (full): 304496 +Grid : Message : Stencil 13.0438 GB/s per node +Grid : Message : Stencil 16.5429 GB/s per node +Grid : Message : Stencil 18.9669 GB/s per node +Grid : Message : Stencil 13.4612 GB/s per node +Grid : Message : Average mflops/s per call per node : 666991 +Grid : Message : Average mflops/s per call per node : 806302 +Grid : Message : Average mflops/s per call per node : 821472 +Grid : Message : Average mflops/s per call per node : 659024 +Grid : Message : Average mflops/s per call per node (full): 315137 +Grid : Message : Average mflops/s per call per node (full): 434857 +Grid : Message : Average mflops/s per call per node (full): 449237 +Grid : Message : Average mflops/s per call per node (full): 303623 +Grid : Message : Stencil 12.9249 GB/s per node +Grid : Message : Stencil 16.6397 GB/s per node +Grid : Message : Stencil 18.3127 GB/s per node +Grid : Message : Stencil 12.2587 GB/s per node +Grid : Message : Average mflops/s per call per node : 667634 +Grid : Message : Average mflops/s per call per node : 805112 +Grid : Message : Average mflops/s per call per node : 816695 +Grid : Message : Average mflops/s per call per node : 671686 +Grid : Message : Average mflops/s per call per node (full): 315028 +Grid : Message : Average mflops/s per call per node (full): 426131 +Grid : Message : Average mflops/s per call per node (full): 445520 +Grid : Message : Average mflops/s per call per node (full): 305326 +Grid : Message : Stencil 12.682 GB/s per node +Grid : Message : Stencil 16.5346 GB/s per node +Grid : Message : Stencil 17.7569 GB/s per node +Grid : Message : Stencil 12.9644 GB/s per node +Grid : Message : Average mflops/s per call per node : 671225 +Grid : Message : Average mflops/s per call per node : 807645 +Grid : Message : Average mflops/s per call per node : 820011 +Grid : Message : Average mflops/s per call per node : 665084 +Grid : Message : Average mflops/s per call per node (full): 315592 +Grid : Message : Average mflops/s per call per node (full): 434958 +Grid : Message : Average mflops/s per call per node (full): 445033 +Grid : Message : Average mflops/s per call per node (full): 306041 +Grid : Message : Stencil 12.6496 GB/s per node +Grid : Message : Stencil 18.4247 GB/s per node +Grid : Message : Stencil 17.1874 GB/s per node +Grid : Message : Stencil 12.4295 GB/s per node +Grid : Message : Average mflops/s per call per node : 672262 +Grid : Message : Average mflops/s per call per node : 804437 +Grid : Message : Average mflops/s per call per node : 821138 +Grid : Message : Average mflops/s per call per node : 668501 +Grid : Message : Average mflops/s per call per node (full): 314993 +Grid : Message : Average mflops/s per call per node (full): 443670 +Grid : Message : Average mflops/s per call per node (full): 443111 +Grid : Message : Average mflops/s per call per node (full): 305376 +Grid : Message : Stencil 12.462 GB/s per node +Grid : Message : Stencil 10.9507 GB/s per node +Grid : Message : Stencil 17.384 GB/s per node +Grid : Message : Stencil 15.5983 GB/s per node +Grid : Message : Average mflops/s per call per node : 669546 +Grid : Message : Average mflops/s per call per node : 814575 +Grid : Message : Average mflops/s per call per node : 817613 +Grid : Message : Average mflops/s per call per node : 658649 +Grid : Message : Average mflops/s per call per node (full): 312235 +Grid : Message : Average mflops/s per call per node (full): 338314 +Grid : Message : Average mflops/s per call per node (full): 443080 +Grid : Message : Average mflops/s per call per node (full): 307255 +Grid : Message : Stencil 12.7405 GB/s per node +Grid : Message : Stencil 8.93222 GB/s per node +Grid : Message : Stencil 19.2037 GB/s per node +Grid : Message : Stencil 12.2945 GB/s per node +Grid : Message : Average mflops/s per call per node : 667278 +Grid : Message : Average mflops/s per call per node : 807521 +Grid : Message : Average mflops/s per call per node : 819983 +Grid : Message : Average mflops/s per call per node : 665896 +Grid : Message : Average mflops/s per call per node (full): 313870 +Grid : Message : Average mflops/s per call per node (full): 290570 +Grid : Message : Average mflops/s per call per node (full): 448551 +Grid : Message : Average mflops/s per call per node (full): 303654 +Grid : Message : Stencil 13.1072 GB/s per node +Grid : Message : Stencil 17.1792 GB/s per node +Grid : Message : Stencil 17.8879 GB/s per node +Grid : Message : Stencil 12.8571 GB/s per node +Grid : Message : Average mflops/s per call per node : 669444 +Grid : Message : Average mflops/s per call per node : 809635 +Grid : Message : Average mflops/s per call per node : 815615 +Grid : Message : Average mflops/s per call per node : 662025 +Grid : Message : Average mflops/s per call per node (full): 316555 +Grid : Message : Average mflops/s per call per node (full): 439307 +Grid : Message : Average mflops/s per call per node (full): 444712 +Grid : Message : Average mflops/s per call per node (full): 304171 +Grid : Message : Stencil 13.9813 GB/s per node +Grid : Message : Stencil 17.1032 GB/s per node +Grid : Message : Stencil 18.3618 GB/s per node +Grid : Message : Stencil 12.8147 GB/s per node +Grid : Message : Average mflops/s per call per node : 663352 +Grid : Message : Average mflops/s per call per node : 802854 +Grid : Message : Average mflops/s per call per node : 819638 +Grid : Message : Average mflops/s per call per node : 666209 +Grid : Message : Average mflops/s per call per node (full): 315948 +Grid : Message : Average mflops/s per call per node (full): 435988 +Grid : Message : Average mflops/s per call per node (full): 445941 +Grid : Message : Average mflops/s per call per node (full): 305988 +Grid : Message : Stencil 12.4969 GB/s per node +Grid : Message : Stencil 16.6988 GB/s per node +Grid : Message : Stencil 17.843 GB/s per node +Grid : Message : Stencil 12.6231 GB/s per node +Grid : Message : Average mflops/s per call per node : 670303 +Grid : Message : Average mflops/s per call per node : 803093 +Grid : Message : Average mflops/s per call per node : 820375 +Grid : Message : Average mflops/s per call per node : 666858 +Grid : Message : Average mflops/s per call per node (full): 314096 +Grid : Message : Average mflops/s per call per node (full): 436713 +Grid : Message : Average mflops/s per call per node (full): 445877 +Grid : Message : Average mflops/s per call per node (full): 304598 +Grid : Message : Stencil 13.397 GB/s per node +Grid : Message : Stencil 7.74249 GB/s per node +Grid : Message : Stencil 17.764 GB/s per node +Grid : Message : Stencil 13.4336 GB/s per node +Grid : Message : Average mflops/s per call per node : 667487 +Grid : Message : Average mflops/s per call per node : 810620 +Grid : Message : Average mflops/s per call per node : 826152 +Grid : Message : Average mflops/s per call per node : 658813 +Grid : Message : Average mflops/s per call per node (full): 316835 +Grid : Message : Average mflops/s per call per node (full): 259695 +Grid : Message : Average mflops/s per call per node (full): 444902 +Grid : Message : Average mflops/s per call per node (full): 305398 +Grid : Message : Stencil 12.8336 GB/s per node +Grid : Message : Stencil 16.7161 GB/s per node +Grid : Message : Stencil 18.6455 GB/s per node +Grid : Message : Stencil 12.5245 GB/s per node +Grid : Message : Average mflops/s per call per node : 670738 +Grid : Message : Average mflops/s per call per node : 804355 +Grid : Message : Average mflops/s per call per node : 819528 +Grid : Message : Average mflops/s per call per node : 666568 +Grid : Message : Average mflops/s per call per node (full): 315705 +Grid : Message : Average mflops/s per call per node (full): 429704 +Grid : Message : Average mflops/s per call per node (full): 447242 +Grid : Message : Average mflops/s per call per node (full): 304879 +Grid : Message : Stencil 12.9128 GB/s per node +Grid : Message : Stencil 16.2871 GB/s per node +Grid : Message : Stencil 18.1038 GB/s per node +Grid : Message : Stencil 12.6756 GB/s per node +Grid : Message : Average mflops/s per call per node : 670906 +Grid : Message : Average mflops/s per call per node : 804414 +Grid : Message : Average mflops/s per call per node : 823795 +Grid : Message : Average mflops/s per call per node : 667857 +Grid : Message : Average mflops/s per call per node (full): 315623 +Grid : Message : Average mflops/s per call per node (full): 432420 +Grid : Message : Average mflops/s per call per node (full): 445608 +Grid : Message : Average mflops/s per call per node (full): 305259 +Grid : Message : Stencil 13.1449 GB/s per node +Grid : Message : Stencil 17.4201 GB/s per node +Grid : Message : Stencil 18.0655 GB/s per node +Grid : Message : Stencil 13.6378 GB/s per node +Grid : Message : Average mflops/s per call per node : 667533 +Grid : Message : Average mflops/s per call per node : 806456 +Grid : Message : Average mflops/s per call per node : 823065 +Grid : Message : Average mflops/s per call per node : 662906 +Grid : Message : Average mflops/s per call per node (full): 315087 +Grid : Message : Average mflops/s per call per node (full): 439271 +Grid : Message : Average mflops/s per call per node (full): 446828 +Grid : Message : Average mflops/s per call per node (full): 304904 +Grid : Message : Stencil 12.5603 GB/s per node +Grid : Message : Stencil 16.4946 GB/s per node +Grid : Message : Stencil 17.2015 GB/s per node +Grid : Message : Stencil 12.6719 GB/s per node +Grid : Message : Average mflops/s per call per node : 667440 +Grid : Message : Average mflops/s per call per node : 804005 +Grid : Message : Average mflops/s per call per node : 819082 +Grid : Message : Average mflops/s per call per node : 659045 +Grid : Message : Average mflops/s per call per node (full): 313637 +Grid : Message : Average mflops/s per call per node (full): 434386 +Grid : Message : Average mflops/s per call per node (full): 441996 +Grid : Message : Average mflops/s per call per node (full): 302145 +Grid : Message : Stencil 12.5421 GB/s per node +Grid : Message : Stencil 16.6179 GB/s per node +Grid : Message : Stencil 17.576 GB/s per node +Grid : Message : Stencil 12.7209 GB/s per node +Grid : Message : Average mflops/s per call per node : 673301 +Grid : Message : Average mflops/s per call per node : 802954 +Grid : Message : Average mflops/s per call per node : 822063 +Grid : Message : Average mflops/s per call per node : 665198 +Grid : Message : Average mflops/s per call per node (full): 314161 +Grid : Message : Average mflops/s per call per node (full): 433576 +Grid : Message : Average mflops/s per call per node (full): 445628 +Grid : Message : Average mflops/s per call per node (full): 305214 +Grid : Message : Stencil 13.2862 GB/s per node +Grid : Message : Stencil 10.5698 GB/s per node +Grid : Message : Stencil 17.4027 GB/s per node +Grid : Message : Stencil 12.7517 GB/s per node +Grid : Message : Average mflops/s per call per node : 669822 +Grid : Message : Average mflops/s per call per node : 804922 +Grid : Message : Average mflops/s per call per node : 822865 +Grid : Message : Average mflops/s per call per node : 663862 +Grid : Message : Average mflops/s per call per node (full): 316809 +Grid : Message : Average mflops/s per call per node (full): 329286 +Grid : Message : Average mflops/s per call per node (full): 442725 +Grid : Message : Average mflops/s per call per node (full): 303149 +Grid : Message : Stencil 12.852 GB/s per node +Grid : Message : Stencil 16.5856 GB/s per node +Grid : Message : Stencil 16.8703 GB/s per node +Grid : Message : Stencil 12.3381 GB/s per node +Grid : Message : Average mflops/s per call per node : 672030 +Grid : Message : Average mflops/s per call per node : 806056 +Grid : Message : Average mflops/s per call per node : 824011 +Grid : Message : Average mflops/s per call per node : 663725 +Grid : Message : Average mflops/s per call per node (full): 313016 +Grid : Message : Average mflops/s per call per node (full): 433934 +Grid : Message : Average mflops/s per call per node (full): 437887 +Grid : Message : Average mflops/s per call per node (full): 304202 +Grid : Message : Stencil 13.1136 GB/s per node +Grid : Message : Stencil 17.4085 GB/s per node +Grid : Message : Stencil 17.6594 GB/s per node +Grid : Message : Stencil 12.5101 GB/s per node +Grid : Message : Average mflops/s per call per node : 667205 +Grid : Message : Average mflops/s per call per node : 801624 +Grid : Message : Average mflops/s per call per node : 821358 +Grid : Message : Average mflops/s per call per node : 666993 +Grid : Message : Average mflops/s per call per node (full): 315372 +Grid : Message : Average mflops/s per call per node (full): 438220 +Grid : Message : Average mflops/s per call per node (full): 446266 +Grid : Message : Average mflops/s per call per node (full): 305044 +Grid : Message : Stencil 12.615 GB/s per node +Grid : Message : Stencil 16.9959 GB/s per node +Grid : Message : Stencil 17.4396 GB/s per node +Grid : Message : Stencil 12.6798 GB/s per node +Grid : Message : Average mflops/s per call per node : 668788 +Grid : Message : Average mflops/s per call per node : 800461 +Grid : Message : Average mflops/s per call per node : 826309 +Grid : Message : Average mflops/s per call per node : 664766 +Grid : Message : Average mflops/s per call per node (full): 314281 +Grid : Message : Average mflops/s per call per node (full): 435637 +Grid : Message : Average mflops/s per call per node (full): 445094 +Grid : Message : Average mflops/s per call per node (full): 303595 +Grid : Message : Stencil 12.6878 GB/s per node +Grid : Message : Stencil 16.6081 GB/s per node +Grid : Message : Stencil 16.9322 GB/s per node +Grid : Message : Stencil 13.615 GB/s per node +Grid : Message : Average mflops/s per call per node : 667055 +Grid : Message : Average mflops/s per call per node : 802579 +Grid : Message : Average mflops/s per call per node : 829844 +Grid : Message : Average mflops/s per call per node : 662433 +Grid : Message : Average mflops/s per call per node (full): 314107 +Grid : Message : Average mflops/s per call per node (full): 434352 +Grid : Message : Average mflops/s per call per node (full): 429138 +Grid : Message : Average mflops/s per call per node (full): 305501 +Grid : Message : Stencil 13.0521 GB/s per node +Grid : Message : Stencil 14.0309 GB/s per node +Grid : Message : Stencil 16.8677 GB/s per node +Grid : Message : Stencil 12.5594 GB/s per node +Grid : Message : Average mflops/s per call per node : 668824 +Grid : Message : Average mflops/s per call per node : 810876 +Grid : Message : Average mflops/s per call per node : 821318 +Grid : Message : Average mflops/s per call per node : 664964 +Grid : Message : Average mflops/s per call per node (full): 316377 +Grid : Message : Average mflops/s per call per node (full): 399792 +Grid : Message : Average mflops/s per call per node (full): 438584 +Grid : Message : Average mflops/s per call per node (full): 302806 +Grid : Message : Stencil 15.8763 GB/s per node +Grid : Message : Stencil 17.0735 GB/s per node +Grid : Message : Stencil 18.3697 GB/s per node +Grid : Message : Stencil 12.4011 GB/s per node +Grid : Message : Average mflops/s per call per node : 658812 +Grid : Message : Average mflops/s per call per node : 807395 +Grid : Message : Average mflops/s per call per node : 825888 +Grid : Message : Average mflops/s per call per node : 663160 +Grid : Message : Average mflops/s per call per node (full): 316458 +Grid : Message : Average mflops/s per call per node (full): 439902 +Grid : Message : Average mflops/s per call per node (full): 450768 +Grid : Message : Average mflops/s per call per node (full): 304482 +Grid : Message : Stencil 13.3067 GB/s per node +Grid : Message : Stencil 10.7314 GB/s per node +Grid : Message : Stencil 17.4575 GB/s per node +Grid : Message : Stencil 12.6334 GB/s per node +Grid : Message : Average mflops/s per call per node : 663778 +Grid : Message : Average mflops/s per call per node : 804718 +Grid : Message : Average mflops/s per call per node : 827613 +Grid : Message : Average mflops/s per call per node : 668973 +Grid : Message : Average mflops/s per call per node (full): 315624 +Grid : Message : Average mflops/s per call per node (full): 332576 +Grid : Message : Average mflops/s per call per node (full): 446449 +Grid : Message : Average mflops/s per call per node (full): 306130 +Grid : Message : Stencil 12.8944 GB/s per node +Grid : Message : Stencil 16.6851 GB/s per node +Grid : Message : Stencil 17.6033 GB/s per node +Grid : Message : Stencil 12.1455 GB/s per node +Grid : Message : Average mflops/s per call per node : 667064 +Grid : Message : Average mflops/s per call per node : 804489 +Grid : Message : Average mflops/s per call per node : 814207 +Grid : Message : Average mflops/s per call per node : 662144 +Grid : Message : Average mflops/s per call per node (full): 312704 +Grid : Message : Average mflops/s per call per node (full): 436769 +Grid : Message : Average mflops/s per call per node (full): 444143 +Grid : Message : Average mflops/s per call per node (full): 300723 +Grid : Message : Stencil 13.4574 GB/s per node +Grid : Message : Stencil 16.9887 GB/s per node +Grid : Message : Stencil 17.1818 GB/s per node +Grid : Message : Stencil 12.4573 GB/s per node +Grid : Message : Average mflops/s per call per node : 666017 +Grid : Message : Average mflops/s per call per node : 801217 +Grid : Message : Average mflops/s per call per node : 828869 +Grid : Message : Average mflops/s per call per node : 669250 +Grid : Message : Average mflops/s per call per node (full): 316097 +Grid : Message : Average mflops/s per call per node (full): 438066 +Grid : Message : Average mflops/s per call per node (full): 443785 +Grid : Message : Average mflops/s per call per node (full): 305111 +Grid : Message : Stencil 14.3726 GB/s per node +Grid : Message : Stencil 16.5001 GB/s per node +Grid : Message : Stencil 17.4919 GB/s per node +Grid : Message : Stencil 11.8296 GB/s per node +Grid : Message : Average mflops/s per call per node : 661241 +Grid : Message : Average mflops/s per call per node : 805278 +Grid : Message : Average mflops/s per call per node : 819257 +Grid : Message : Average mflops/s per call per node : 662070 +Grid : Message : Average mflops/s per call per node (full): 316571 +Grid : Message : Average mflops/s per call per node (full): 434901 +Grid : Message : Average mflops/s per call per node (full): 443915 +Grid : Message : Average mflops/s per call per node (full): 299770 +Grid : Message : Stencil 13.9432 GB/s per node +Grid : Message : Stencil 17.1709 GB/s per node +Grid : Message : Stencil 18.5397 GB/s per node +Grid : Message : Stencil 12.0828 GB/s per node +Grid : Message : Average mflops/s per call per node : 662025 +Grid : Message : Average mflops/s per call per node : 798735 +Grid : Message : Average mflops/s per call per node : 827157 +Grid : Message : Average mflops/s per call per node : 669224 +Grid : Message : Average mflops/s per call per node (full): 315170 +Grid : Message : Average mflops/s per call per node (full): 436317 +Grid : Message : Average mflops/s per call per node (full): 450815 +Grid : Message : Average mflops/s per call per node (full): 303414 +Grid : Message : Stencil 14.8256 GB/s per node +Grid : Message : Stencil 16.8343 GB/s per node +Grid : Message : Stencil 17.9506 GB/s per node +Grid : Message : Stencil 12.1935 GB/s per node +Grid : Message : Average mflops/s per call per node : 658702 +Grid : Message : Average mflops/s per call per node : 805008 +Grid : Message : Average mflops/s per call per node : 821807 +Grid : Message : Average mflops/s per call per node : 669907 +Grid : Message : Average mflops/s per call per node (full): 315640 +Grid : Message : Average mflops/s per call per node (full): 436405 +Grid : Message : Average mflops/s per call per node (full): 447011 +Grid : Message : Average mflops/s per call per node (full): 303721 +Grid : Message : Stencil 12.8896 GB/s per node +Grid : Message : Stencil 17.0187 GB/s per node +Grid : Message : Stencil 18.0889 GB/s per node +Grid : Message : Stencil 12.6104 GB/s per node +Grid : Message : Average mflops/s per call per node : 668724 +Grid : Message : Average mflops/s per call per node : 804606 +Grid : Message : Average mflops/s per call per node : 816875 +Grid : Message : Average mflops/s per call per node : 669186 +Grid : Message : Average mflops/s per call per node (full): 315034 +Grid : Message : Average mflops/s per call per node (full): 440288 +Grid : Message : Average mflops/s per call per node (full): 445646 +Grid : Message : Average mflops/s per call per node (full): 304050 +Grid : Message : Stencil 13.7508 GB/s per node +Grid : Message : Stencil 16.7089 GB/s per node +Grid : Message : Stencil 18.2857 GB/s per node +Grid : Message : Stencil 13.2618 GB/s per node +Grid : Message : Average mflops/s per call per node : 663381 +Grid : Message : Average mflops/s per call per node : 811779 +Grid : Message : Average mflops/s per call per node : 819120 +Grid : Message : Average mflops/s per call per node : 663294 +Grid : Message : Average mflops/s per call per node (full): 316059 +Grid : Message : Average mflops/s per call per node (full): 437388 +Grid : Message : Average mflops/s per call per node (full): 447847 +Grid : Message : Average mflops/s per call per node (full): 305901 +Grid : Message : Stencil 12.8254 GB/s per node +Grid : Message : Stencil 17.0104 GB/s per node +Grid : Message : Stencil 19.176 GB/s per node +Grid : Message : Stencil 12.5891 GB/s per node +Grid : Message : Average mflops/s per call per node : 663632 +Grid : Message : Average mflops/s per call per node : 805038 +Grid : Message : Average mflops/s per call per node : 820317 +Grid : Message : Average mflops/s per call per node : 661826 +Grid : Message : Average mflops/s per call per node (full): 313980 +Grid : Message : Average mflops/s per call per node (full): 438791 +Grid : Message : Average mflops/s per call per node (full): 448800 +Grid : Message : Average mflops/s per call per node (full): 304529 +Grid : Message : Stencil 15.0151 GB/s per node +Grid : Message : Stencil 16.4325 GB/s per node +Grid : Message : Stencil 17.3295 GB/s per node +Grid : Message : Stencil 11.941 GB/s per node +Grid : Message : Average mflops/s per call per node : 659911 +Grid : Message : Average mflops/s per call per node : 803247 +Grid : Message : Average mflops/s per call per node : 820411 +Grid : Message : Average mflops/s per call per node : 671308 +Grid : Message : Average mflops/s per call per node (full): 315850 +Grid : Message : Average mflops/s per call per node (full): 433571 +Grid : Message : Average mflops/s per call per node (full): 441916 +Grid : Message : Average mflops/s per call per node (full): 301998 +Grid : Message : Stencil 14.2255 GB/s per node +Grid : Message : Stencil 17.661 GB/s per node +Grid : Message : Stencil 18.0753 GB/s per node +Grid : Message : Stencil 13.4795 GB/s per node +Grid : Message : Average mflops/s per call per node : 662586 +Grid : Message : Average mflops/s per call per node : 805452 +Grid : Message : Average mflops/s per call per node : 827893 +Grid : Message : Average mflops/s per call per node : 664931 +Grid : Message : Average mflops/s per call per node (full): 315941 +Grid : Message : Average mflops/s per call per node (full): 440056 +Grid : Message : Average mflops/s per call per node (full): 438518 +Grid : Message : Average mflops/s per call per node (full): 305840 +Grid : Message : Stencil 12.5694 GB/s per node +Grid : Message : Stencil 16.384 GB/s per node +Grid : Message : Stencil 17.0648 GB/s per node +Grid : Message : Stencil 12.5373 GB/s per node +Grid : Message : Average mflops/s per call per node : 667746 +Grid : Message : Average mflops/s per call per node : 805444 +Grid : Message : Average mflops/s per call per node : 820356 +Grid : Message : Average mflops/s per call per node : 664620 +Grid : Message : Average mflops/s per call per node (full): 313742 +Grid : Message : Average mflops/s per call per node (full): 433099 +Grid : Message : Average mflops/s per call per node (full): 440895 +Grid : Message : Average mflops/s per call per node (full): 304415 +Grid : Message : Stencil 9.82643 GB/s per node +Grid : Message : Stencil 17.1429 GB/s per node +Grid : Message : Stencil 18.2343 GB/s per node +Grid : Message : Stencil 12.7365 GB/s per node +Grid : Message : Average mflops/s per call per node : 667814 +Grid : Message : Average mflops/s per call per node : 806928 +Grid : Message : Average mflops/s per call per node : 821145 +Grid : Message : Average mflops/s per call per node : 666313 +Grid : Message : Average mflops/s per call per node (full): 278891 +Grid : Message : Average mflops/s per call per node (full): 438675 +Grid : Message : Average mflops/s per call per node (full): 446588 +Grid : Message : Average mflops/s per call per node (full): 305229 +Grid : Message : Stencil 12.7522 GB/s per node +Grid : Message : Stencil 17.7047 GB/s per node +Grid : Message : Stencil 18.7484 GB/s per node +Grid : Message : Stencil 13.3864 GB/s per node +Grid : Message : Average mflops/s per call per node : 667383 +Grid : Message : Average mflops/s per call per node : 804993 +Grid : Message : Average mflops/s per call per node : 822793 +Grid : Message : Average mflops/s per call per node : 667884 +Grid : Message : Average mflops/s per call per node (full): 314158 +Grid : Message : Average mflops/s per call per node (full): 439202 +Grid : Message : Average mflops/s per call per node (full): 449309 +Grid : Message : Average mflops/s per call per node (full): 306541 +Grid : Message : Stencil 12.586 GB/s per node +Grid : Message : Stencil 16.2104 GB/s per node +Grid : Message : Stencil 17.4591 GB/s per node +Grid : Message : Stencil 12.9073 GB/s per node +Grid : Message : Average mflops/s per call per node : 669139 +Grid : Message : Average mflops/s per call per node : 801610 +Grid : Message : Average mflops/s per call per node : 824310 +Grid : Message : Average mflops/s per call per node : 666021 +Grid : Message : Average mflops/s per call per node (full): 314005 +Grid : Message : Average mflops/s per call per node (full): 422426 +Grid : Message : Average mflops/s per call per node (full): 443867 +Grid : Message : Average mflops/s per call per node (full): 304793 +Grid : Message : Stencil 12.5338 GB/s per node +Grid : Message : Stencil 17.3649 GB/s per node +Grid : Message : Stencil 17.3414 GB/s per node +Grid : Message : Stencil 12.9893 GB/s per node +Grid : Message : Average mflops/s per call per node : 668380 +Grid : Message : Average mflops/s per call per node : 801173 +Grid : Message : Average mflops/s per call per node : 821810 +Grid : Message : Average mflops/s per call per node : 668572 +Grid : Message : Average mflops/s per call per node (full): 313675 +Grid : Message : Average mflops/s per call per node (full): 437900 +Grid : Message : Average mflops/s per call per node (full): 442784 +Grid : Message : Average mflops/s per call per node (full): 305719 +Grid : Message : Stencil 12.9458 GB/s per node +Grid : Message : Stencil 16.6798 GB/s per node +Grid : Message : Stencil 17.6924 GB/s per node +Grid : Message : Stencil 13.7427 GB/s per node +Grid : Message : Average mflops/s per call per node : 669642 +Grid : Message : Average mflops/s per call per node : 804386 +Grid : Message : Average mflops/s per call per node : 825651 +Grid : Message : Average mflops/s per call per node : 665360 +Grid : Message : Average mflops/s per call per node (full): 315893 +Grid : Message : Average mflops/s per call per node (full): 437608 +Grid : Message : Average mflops/s per call per node (full): 446511 +Grid : Message : Average mflops/s per call per node (full): 307120 +Grid : Message : Stencil 12.6993 GB/s per node +Grid : Message : Stencil 17.4135 GB/s per node +Grid : Message : Stencil 17.7356 GB/s per node +Grid : Message : Stencil 13.9442 GB/s per node +Grid : Message : Average mflops/s per call per node : 669407 +Grid : Message : Average mflops/s per call per node : 802858 +Grid : Message : Average mflops/s per call per node : 818423 +Grid : Message : Average mflops/s per call per node : 661573 +Grid : Message : Average mflops/s per call per node (full): 314725 +Grid : Message : Average mflops/s per call per node (full): 438259 +Grid : Message : Average mflops/s per call per node (full): 445175 +Grid : Message : Average mflops/s per call per node (full): 306416 +Grid : Message : Stencil 13.4012 GB/s per node +Grid : Message : Stencil 16.8096 GB/s per node +Grid : Message : Stencil 18.5014 GB/s per node +Grid : Message : Stencil 12.5647 GB/s per node +Grid : Message : Average mflops/s per call per node : 666205 +Grid : Message : Average mflops/s per call per node : 804380 +Grid : Message : Average mflops/s per call per node : 817652 +Grid : Message : Average mflops/s per call per node : 667548 +Grid : Message : Average mflops/s per call per node (full): 316299 +Grid : Message : Average mflops/s per call per node (full): 436949 +Grid : Message : Average mflops/s per call per node (full): 446093 +Grid : Message : Average mflops/s per call per node (full): 304572 +Grid : Message : Stencil 12.857 GB/s per node +Grid : Message : Stencil 18.2254 GB/s per node +Grid : Message : Stencil 16.6485 GB/s per node +Grid : Message : Stencil 12.3756 GB/s per node +Grid : Message : Average mflops/s per call per node : 668871 +Grid : Message : Average mflops/s per call per node : 802096 +Grid : Message : Average mflops/s per call per node : 824877 +Grid : Message : Average mflops/s per call per node : 660461 +Grid : Message : Average mflops/s per call per node (full): 315638 +Grid : Message : Average mflops/s per call per node (full): 440771 +Grid : Message : Average mflops/s per call per node (full): 424600 +Grid : Message : Average mflops/s per call per node (full): 299492 +Grid : Message : Stencil 14.3042 GB/s per node +Grid : Message : Stencil 18.4101 GB/s per node +Grid : Message : Stencil 17.7716 GB/s per node +Grid : Message : Stencil 13.5163 GB/s per node +Grid : Message : Average mflops/s per call per node : 665246 +Grid : Message : Average mflops/s per call per node : 802891 +Grid : Message : Average mflops/s per call per node : 821382 +Grid : Message : Average mflops/s per call per node : 661226 +Grid : Message : Average mflops/s per call per node (full): 316755 +Grid : Message : Average mflops/s per call per node (full): 442426 +Grid : Message : Average mflops/s per call per node (full): 446803 +Grid : Message : Average mflops/s per call per node (full): 305937 +Grid : Message : Stencil 13.6618 GB/s per node +Grid : Message : Stencil 16.3922 GB/s per node +Grid : Message : Stencil 17.3079 GB/s per node +Grid : Message : Stencil 13.1425 GB/s per node +Grid : Message : Average mflops/s per call per node : 668153 +Grid : Message : Average mflops/s per call per node : 806500 +Grid : Message : Average mflops/s per call per node : 823490 +Grid : Message : Average mflops/s per call per node : 663790 +Grid : Message : Average mflops/s per call per node (full): 316957 +Grid : Message : Average mflops/s per call per node (full): 434594 +Grid : Message : Average mflops/s per call per node (full): 442187 +Grid : Message : Average mflops/s per call per node (full): 304909 +Grid : Message : Stencil 13.9324 GB/s per node +Grid : Message : Stencil 17.2523 GB/s per node +Grid : Message : Stencil 17.4606 GB/s per node +Grid : Message : Stencil 12.2759 GB/s per node +Grid : Message : Average mflops/s per call per node : 664155 +Grid : Message : Average mflops/s per call per node : 802125 +Grid : Message : Average mflops/s per call per node : 819229 +Grid : Message : Average mflops/s per call per node : 664597 +Grid : Message : Average mflops/s per call per node (full): 316763 +Grid : Message : Average mflops/s per call per node (full): 438223 +Grid : Message : Average mflops/s per call per node (full): 444372 +Grid : Message : Average mflops/s per call per node (full): 303570 +Grid : Message : Stencil 13.0636 GB/s per node +Grid : Message : Stencil 15.9842 GB/s per node +Grid : Message : Stencil 17.529 GB/s per node +Grid : Message : Stencil 14.4215 GB/s per node +Grid : Message : Average mflops/s per call per node : 664767 +Grid : Message : Average mflops/s per call per node : 807391 +Grid : Message : Average mflops/s per call per node : 821570 +Grid : Message : Average mflops/s per call per node : 661350 +Grid : Message : Average mflops/s per call per node (full): 315462 +Grid : Message : Average mflops/s per call per node (full): 424480 +Grid : Message : Average mflops/s per call per node (full): 444477 +Grid : Message : Average mflops/s per call per node (full): 307316 +Grid : Message : Stencil 12.6748 GB/s per node +Grid : Message : Stencil 16.9539 GB/s per node +Grid : Message : Stencil 17.3641 GB/s per node +Grid : Message : Stencil 13.0659 GB/s per node +Grid : Message : Average mflops/s per call per node : 667960 +Grid : Message : Average mflops/s per call per node : 807442 +Grid : Message : Average mflops/s per call per node : 818275 +Grid : Message : Average mflops/s per call per node : 661018 +Grid : Message : Average mflops/s per call per node (full): 314619 +Grid : Message : Average mflops/s per call per node (full): 435237 +Grid : Message : Average mflops/s per call per node (full): 440508 +Grid : Message : Average mflops/s per call per node (full): 305566 +Grid : Message : Stencil 14.8347 GB/s per node +Grid : Message : Stencil 17.385 GB/s per node +Grid : Message : Stencil 17.593 GB/s per node +Grid : Message : Stencil 13.8755 GB/s per node +Grid : Message : Average mflops/s per call per node : 662295 +Grid : Message : Average mflops/s per call per node : 805309 +Grid : Message : Average mflops/s per call per node : 814833 +Grid : Message : Average mflops/s per call per node : 662705 +Grid : Message : Average mflops/s per call per node (full): 315703 +Grid : Message : Average mflops/s per call per node (full): 438796 +Grid : Message : Average mflops/s per call per node (full): 444603 +Grid : Message : Average mflops/s per call per node (full): 304304 +Grid : Message : Stencil 12.7963 GB/s per node +Grid : Message : Stencil 16.472 GB/s per node +Grid : Message : Stencil 17.7234 GB/s per node +Grid : Message : Stencil 14.8846 GB/s per node +Grid : Message : Average mflops/s per call per node : 666891 +Grid : Message : Average mflops/s per call per node : 805228 +Grid : Message : Average mflops/s per call per node : 826469 +Grid : Message : Average mflops/s per call per node : 666819 +Grid : Message : Average mflops/s per call per node (full): 314699 +Grid : Message : Average mflops/s per call per node (full): 434810 +Grid : Message : Average mflops/s per call per node (full): 447368 +Grid : Message : Average mflops/s per call per node (full): 308042 +Grid : Message : Stencil 14.6722 GB/s per node +Grid : Message : Stencil 17.809 GB/s per node +Grid : Message : Stencil 17.9151 GB/s per node +Grid : Message : Stencil 12.8243 GB/s per node +Grid : Message : Average mflops/s per call per node : 661154 +Grid : Message : Average mflops/s per call per node : 798660 +Grid : Message : Average mflops/s per call per node : 822729 +Grid : Message : Average mflops/s per call per node : 672214 +Grid : Message : Average mflops/s per call per node (full): 316280 +Grid : Message : Average mflops/s per call per node (full): 438026 +Grid : Message : Average mflops/s per call per node (full): 445698 +Grid : Message : Average mflops/s per call per node (full): 307144 +Grid : Message : Stencil 14.2181 GB/s per node +Grid : Message : Stencil 16.8218 GB/s per node +Grid : Message : Stencil 16.8766 GB/s per node +Grid : Message : Stencil 12.6323 GB/s per node +Grid : Message : Average mflops/s per call per node : 663563 +Grid : Message : Average mflops/s per call per node : 806464 +Grid : Message : Average mflops/s per call per node : 824904 +Grid : Message : Average mflops/s per call per node : 664000 +Grid : Message : Average mflops/s per call per node (full): 316627 +Grid : Message : Average mflops/s per call per node (full): 438328 +Grid : Message : Average mflops/s per call per node (full): 439408 +Grid : Message : Average mflops/s per call per node (full): 303348 +Grid : Message : Stencil 13.1026 GB/s per node +Grid : Message : Stencil 17.6235 GB/s per node +Grid : Message : Stencil 18.184 GB/s per node +Grid : Message : Stencil 12.6188 GB/s per node +Grid : Message : Average mflops/s per call per node : 668665 +Grid : Message : Average mflops/s per call per node : 799103 +Grid : Message : Average mflops/s per call per node : 823048 +Grid : Message : Average mflops/s per call per node : 665913 +Grid : Message : Average mflops/s per call per node (full): 315766 +Grid : Message : Average mflops/s per call per node (full): 439133 +Grid : Message : Average mflops/s per call per node (full): 447554 +Grid : Message : Average mflops/s per call per node (full): 304968 +Grid : Message : Stencil 13.8485 GB/s per node +Grid : Message : Stencil 18.4952 GB/s per node +Grid : Message : Stencil 17.5541 GB/s per node +Grid : Message : Stencil 12.1178 GB/s per node +Grid : Message : Average mflops/s per call per node : 663295 +Grid : Message : Average mflops/s per call per node : 805011 +Grid : Message : Average mflops/s per call per node : 825778 +Grid : Message : Average mflops/s per call per node : 671942 +Grid : Message : Average mflops/s per call per node (full): 315425 +Grid : Message : Average mflops/s per call per node (full): 440979 +Grid : Message : Average mflops/s per call per node (full): 446281 +Grid : Message : Average mflops/s per call per node (full): 303669 +Grid : Message : Stencil 15.4956 GB/s per node +Grid : Message : Stencil 16.4981 GB/s per node +Grid : Message : Stencil 18.3299 GB/s per node +Grid : Message : Stencil 12.8365 GB/s per node +Grid : Message : Average mflops/s per call per node : 663849 +Grid : Message : Average mflops/s per call per node : 806486 +Grid : Message : Average mflops/s per call per node : 820322 +Grid : Message : Average mflops/s per call per node : 666420 +Grid : Message : Average mflops/s per call per node (full): 317964 +Grid : Message : Average mflops/s per call per node (full): 434864 +Grid : Message : Average mflops/s per call per node (full): 448068 +Grid : Message : Average mflops/s per call per node (full): 305884 +Grid : Message : Stencil 12.9709 GB/s per node +Grid : Message : Stencil 17.3497 GB/s per node +Grid : Message : Stencil 17.8545 GB/s per node +Grid : Message : Stencil 14.3185 GB/s per node +Grid : Message : Average mflops/s per call per node : 666199 +Grid : Message : Average mflops/s per call per node : 805234 +Grid : Message : Average mflops/s per call per node : 821724 +Grid : Message : Average mflops/s per call per node : 661092 +Grid : Message : Average mflops/s per call per node (full): 314666 +Grid : Message : Average mflops/s per call per node (full): 439669 +Grid : Message : Average mflops/s per call per node (full): 443151 +Grid : Message : Average mflops/s per call per node (full): 305498 +Grid : Message : Stencil 14.0359 GB/s per node +Grid : Message : Stencil 16.959 GB/s per node +Grid : Message : Stencil 17.5696 GB/s per node +Grid : Message : Stencil 14.5427 GB/s per node +Grid : Message : Average mflops/s per call per node : 663099 +Grid : Message : Average mflops/s per call per node : 801907 +Grid : Message : Average mflops/s per call per node : 825632 +Grid : Message : Average mflops/s per call per node : 656212 +Grid : Message : Average mflops/s per call per node (full): 316620 +Grid : Message : Average mflops/s per call per node (full): 437226 +Grid : Message : Average mflops/s per call per node (full): 446519 +Grid : Message : Average mflops/s per call per node (full): 307200 +Grid : Message : Stencil 13.1194 GB/s per node +Grid : Message : Stencil 16.5614 GB/s per node +Grid : Message : Stencil 17.534 GB/s per node +Grid : Message : Stencil 12.7429 GB/s per node +Grid : Message : Average mflops/s per call per node : 667941 +Grid : Message : Average mflops/s per call per node : 798824 +Grid : Message : Average mflops/s per call per node : 827515 +Grid : Message : Average mflops/s per call per node : 667869 +Grid : Message : Average mflops/s per call per node (full): 316068 +Grid : Message : Average mflops/s per call per node (full): 433793 +Grid : Message : Average mflops/s per call per node (full): 446594 +Grid : Message : Average mflops/s per call per node (full): 305905 +Grid : Message : Stencil 14.0792 GB/s per node +Grid : Message : Stencil 16.1996 GB/s per node +Grid : Message : Stencil 17.2694 GB/s per node +Grid : Message : Stencil 12.9557 GB/s per node +Grid : Message : Average mflops/s per call per node : 663768 +Grid : Message : Average mflops/s per call per node : 807580 +Grid : Message : Average mflops/s per call per node : 820271 +Grid : Message : Average mflops/s per call per node : 662811 +Grid : Message : Average mflops/s per call per node (full): 316384 +Grid : Message : Average mflops/s per call per node (full): 431281 +Grid : Message : Average mflops/s per call per node (full): 442801 +Grid : Message : Average mflops/s per call per node (full): 305739 +Grid : Message : Stencil 12.9774 GB/s per node +Grid : Message : Stencil 17.2278 GB/s per node +Grid : Message : Stencil 17.2784 GB/s per node +Grid : Message : Stencil 13.1069 GB/s per node +Grid : Message : Average mflops/s per call per node : 664316 +Grid : Message : Average mflops/s per call per node : 805309 +Grid : Message : Average mflops/s per call per node : 822463 +Grid : Message : Average mflops/s per call per node : 666367 +Grid : Message : Average mflops/s per call per node (full): 314621 +Grid : Message : Average mflops/s per call per node (full): 431987 +Grid : Message : Average mflops/s per call per node (full): 442618 +Grid : Message : Average mflops/s per call per node (full): 305410 +Grid : Message : Stencil 13.6128 GB/s per node +Grid : Message : Stencil 16.5456 GB/s per node +Grid : Message : Stencil 17.9411 GB/s per node +Grid : Message : Stencil 11.9459 GB/s per node +Grid : Message : Average mflops/s per call per node : 660818 +Grid : Message : Average mflops/s per call per node : 799034 +Grid : Message : Average mflops/s per call per node : 821010 +Grid : Message : Average mflops/s per call per node : 663987 +Grid : Message : Average mflops/s per call per node (full): 312564 +Grid : Message : Average mflops/s per call per node (full): 430588 +Grid : Message : Average mflops/s per call per node (full): 445427 +Grid : Message : Average mflops/s per call per node (full): 299941 +Grid : Message : Stencil 13.9729 GB/s per node +Grid : Message : Stencil 17.9032 GB/s per node +Grid : Message : Stencil 17.6193 GB/s per node +Grid : Message : Stencil 13.7253 GB/s per node +Grid : Message : Average mflops/s per call per node : 663104 +Grid : Message : Average mflops/s per call per node : 801495 +Grid : Message : Average mflops/s per call per node : 825126 +Grid : Message : Average mflops/s per call per node : 663916 +Grid : Message : Average mflops/s per call per node (full): 316378 +Grid : Message : Average mflops/s per call per node (full): 439695 +Grid : Message : Average mflops/s per call per node (full): 446413 +Grid : Message : Average mflops/s per call per node (full): 305940 +Grid : Message : Stencil 13.0165 GB/s per node +Grid : Message : Stencil 17.1341 GB/s per node +Grid : Message : Stencil 18.0119 GB/s per node +Grid : Message : Stencil 12.7273 GB/s per node +Grid : Message : Average mflops/s per call per node : 667567 +Grid : Message : Average mflops/s per call per node : 803152 +Grid : Message : Average mflops/s per call per node : 822604 +Grid : Message : Average mflops/s per call per node : 668782 +Grid : Message : Average mflops/s per call per node (full): 314125 +Grid : Message : Average mflops/s per call per node (full): 436297 +Grid : Message : Average mflops/s per call per node (full): 447752 +Grid : Message : Average mflops/s per call per node (full): 305420 +Grid : Message : Stencil 13.8381 GB/s per node +Grid : Message : Stencil 17.2971 GB/s per node +Grid : Message : Stencil 17.7722 GB/s per node +Grid : Message : Stencil 12.8672 GB/s per node +Grid : Message : Average mflops/s per call per node : 662547 +Grid : Message : Average mflops/s per call per node : 802852 +Grid : Message : Average mflops/s per call per node : 821718 +Grid : Message : Average mflops/s per call per node : 665407 +Grid : Message : Average mflops/s per call per node (full): 312763 +Grid : Message : Average mflops/s per call per node (full): 438642 +Grid : Message : Average mflops/s per call per node (full): 443881 +Grid : Message : Average mflops/s per call per node (full): 305626 +Grid : Message : Stencil 12.7343 GB/s per node +Grid : Message : Stencil 17.479 GB/s per node +Grid : Message : Stencil 17.3539 GB/s per node +Grid : Message : Stencil 12.7125 GB/s per node +Grid : Message : Average mflops/s per call per node : 669782 +Grid : Message : Average mflops/s per call per node : 801289 +Grid : Message : Average mflops/s per call per node : 823651 +Grid : Message : Average mflops/s per call per node : 669702 +Grid : Message : Average mflops/s per call per node (full): 314697 +Grid : Message : Average mflops/s per call per node (full): 439325 +Grid : Message : Average mflops/s per call per node (full): 442798 +Grid : Message : Average mflops/s per call per node (full): 305791 +Grid : Message : Stencil 12.907 GB/s per node +Grid : Message : Stencil 17.4736 GB/s per node +Grid : Message : Stencil 17.468 GB/s per node +Grid : Message : Stencil 12.2998 GB/s per node +Grid : Message : Average mflops/s per call per node : 667492 +Grid : Message : Average mflops/s per call per node : 806048 +Grid : Message : Average mflops/s per call per node : 820264 +Grid : Message : Average mflops/s per call per node : 667962 +Grid : Message : Average mflops/s per call per node (full): 314788 +Grid : Message : Average mflops/s per call per node (full): 440490 +Grid : Message : Average mflops/s per call per node (full): 442776 +Grid : Message : Average mflops/s per call per node (full): 303726 +Grid : Message : Stencil 12.0599 GB/s per node +Grid : Message : Stencil 16.8494 GB/s per node +Grid : Message : Stencil 17.1225 GB/s per node +Grid : Message : Stencil 14.4116 GB/s per node +Grid : Message : Average mflops/s per call per node : 667139 +Grid : Message : Average mflops/s per call per node : 801106 +Grid : Message : Average mflops/s per call per node : 820180 +Grid : Message : Average mflops/s per call per node : 661676 +Grid : Message : Average mflops/s per call per node (full): 309309 +Grid : Message : Average mflops/s per call per node (full): 437179 +Grid : Message : Average mflops/s per call per node (full): 441473 +Grid : Message : Average mflops/s per call per node (full): 307348 +Grid : Message : Stencil 13.1343 GB/s per node +Grid : Message : Stencil 16.8507 GB/s per node +Grid : Message : Stencil 18.1028 GB/s per node +Grid : Message : Stencil 13.8568 GB/s per node +Grid : Message : Average mflops/s per call per node : 664792 +Grid : Message : Average mflops/s per call per node : 802847 +Grid : Message : Average mflops/s per call per node : 821274 +Grid : Message : Average mflops/s per call per node : 661640 +Grid : Message : Average mflops/s per call per node (full): 315142 +Grid : Message : Average mflops/s per call per node (full): 434607 +Grid : Message : Average mflops/s per call per node (full): 445560 +Grid : Message : Average mflops/s per call per node (full): 305731 +Grid : Message : Stencil 14.1103 GB/s per node +Grid : Message : Stencil 18.1799 GB/s per node +Grid : Message : Stencil 16.1787 GB/s per node +Grid : Message : Stencil 13.0525 GB/s per node +Grid : Message : Average mflops/s per call per node : 664712 +Grid : Message : Average mflops/s per call per node : 801524 +Grid : Message : Average mflops/s per call per node : 826986 +Grid : Message : Average mflops/s per call per node : 663457 +Grid : Message : Average mflops/s per call per node (full): 316484 +Grid : Message : Average mflops/s per call per node (full): 440438 +Grid : Message : Average mflops/s per call per node (full): 419089 +Grid : Message : Average mflops/s per call per node (full): 304400 +Grid : Message : Stencil 14.4843 GB/s per node +Grid : Message : Stencil 16.718 GB/s per node +Grid : Message : Stencil 17.5117 GB/s per node +Grid : Message : Stencil 12.4229 GB/s per node +Grid : Message : Average mflops/s per call per node : 659603 +Grid : Message : Average mflops/s per call per node : 806019 +Grid : Message : Average mflops/s per call per node : 823493 +Grid : Message : Average mflops/s per call per node : 670796 +Grid : Message : Average mflops/s per call per node (full): 316105 +Grid : Message : Average mflops/s per call per node (full): 437210 +Grid : Message : Average mflops/s per call per node (full): 444135 +Grid : Message : Average mflops/s per call per node (full): 302971 +Grid : Message : Stencil 13.5064 GB/s per node +Grid : Message : Stencil 10.846 GB/s per node +Grid : Message : Stencil 18.2372 GB/s per node +Grid : Message : Stencil 12.0147 GB/s per node +Grid : Message : Average mflops/s per call per node : 664622 +Grid : Message : Average mflops/s per call per node : 811024 +Grid : Message : Average mflops/s per call per node : 826650 +Grid : Message : Average mflops/s per call per node : 664303 +Grid : Message : Average mflops/s per call per node (full): 314965 +Grid : Message : Average mflops/s per call per node (full): 336084 +Grid : Message : Average mflops/s per call per node (full): 449306 +Grid : Message : Average mflops/s per call per node (full): 301577 +Grid : Message : Stencil 12.3366 GB/s per node +Grid : Message : Stencil 17.1414 GB/s per node +Grid : Message : Stencil 18.1672 GB/s per node +Grid : Message : Stencil 12.544 GB/s per node +Grid : Message : Average mflops/s per call per node : 668773 +Grid : Message : Average mflops/s per call per node : 803488 +Grid : Message : Average mflops/s per call per node : 823559 +Grid : Message : Average mflops/s per call per node : 666131 +Grid : Message : Average mflops/s per call per node (full): 312170 +Grid : Message : Average mflops/s per call per node (full): 435861 +Grid : Message : Average mflops/s per call per node (full): 448144 +Grid : Message : Average mflops/s per call per node (full): 304729 +Grid : Message : Stencil 13.962 GB/s per node +Grid : Message : Stencil 16.6905 GB/s per node +Grid : Message : Stencil 17.603 GB/s per node +Grid : Message : Stencil 12.6235 GB/s per node +Grid : Message : Average mflops/s per call per node : 664443 +Grid : Message : Average mflops/s per call per node : 800430 +Grid : Message : Average mflops/s per call per node : 821436 +Grid : Message : Average mflops/s per call per node : 660055 +Grid : Message : Average mflops/s per call per node (full): 315811 +Grid : Message : Average mflops/s per call per node (full): 435238 +Grid : Message : Average mflops/s per call per node (full): 445246 +Grid : Message : Average mflops/s per call per node (full): 302370 +Grid : Message : Stencil 12.8179 GB/s per node +Grid : Message : Stencil 16.5557 GB/s per node +Grid : Message : Stencil 17.7032 GB/s per node +Grid : Message : Stencil 11.7236 GB/s per node +Grid : Message : Average mflops/s per call per node : 669912 +Grid : Message : Average mflops/s per call per node : 805298 +Grid : Message : Average mflops/s per call per node : 823263 +Grid : Message : Average mflops/s per call per node : 665005 +Grid : Message : Average mflops/s per call per node (full): 315095 +Grid : Message : Average mflops/s per call per node (full): 432900 +Grid : Message : Average mflops/s per call per node (full): 445229 +Grid : Message : Average mflops/s per call per node (full): 298923 +Grid : Message : Stencil 15.3521 GB/s per node +Grid : Message : Stencil 17.6488 GB/s per node +Grid : Message : Stencil 17.7158 GB/s per node +Grid : Message : Stencil 14.2351 GB/s per node +Grid : Message : Average mflops/s per call per node : 662351 +Grid : Message : Average mflops/s per call per node : 806679 +Grid : Message : Average mflops/s per call per node : 825100 +Grid : Message : Average mflops/s per call per node : 658530 +Grid : Message : Average mflops/s per call per node (full): 317173 +Grid : Message : Average mflops/s per call per node (full): 440280 +Grid : Message : Average mflops/s per call per node (full): 447566 +Grid : Message : Average mflops/s per call per node (full): 306379 +Grid : Message : Stencil 13.817 GB/s per node +Grid : Message : Stencil 17.4135 GB/s per node +Grid : Message : Stencil 17.6026 GB/s per node +Grid : Message : Stencil 14.0141 GB/s per node +Grid : Message : Average mflops/s per call per node : 662014 +Grid : Message : Average mflops/s per call per node : 801619 +Grid : Message : Average mflops/s per call per node : 827237 +Grid : Message : Average mflops/s per call per node : 656741 +Grid : Message : Average mflops/s per call per node (full): 315937 +Grid : Message : Average mflops/s per call per node (full): 438308 +Grid : Message : Average mflops/s per call per node (full): 445880 +Grid : Message : Average mflops/s per call per node (full): 306123 +Grid : Message : Stencil 12.7159 GB/s per node +Grid : Message : Stencil 17.4458 GB/s per node +Grid : Message : Stencil 17.2803 GB/s per node +Grid : Message : Stencil 12.1528 GB/s per node +Grid : Message : Average mflops/s per call per node : 668929 +Grid : Message : Average mflops/s per call per node : 805120 +Grid : Message : Average mflops/s per call per node : 827176 +Grid : Message : Average mflops/s per call per node : 667771 +Grid : Message : Average mflops/s per call per node (full): 315238 +Grid : Message : Average mflops/s per call per node (full): 438501 +Grid : Message : Average mflops/s per call per node (full): 443475 +Grid : Message : Average mflops/s per call per node (full): 302603 +Grid : Message : Stencil 13.6026 GB/s per node +Grid : Message : Stencil 17.4989 GB/s per node +Grid : Message : Stencil 17.1213 GB/s per node +Grid : Message : Stencil 11.9238 GB/s per node +Grid : Message : Average mflops/s per call per node : 662910 +Grid : Message : Average mflops/s per call per node : 798563 +Grid : Message : Average mflops/s per call per node : 825342 +Grid : Message : Average mflops/s per call per node : 665980 +Grid : Message : Average mflops/s per call per node (full): 315142 +Grid : Message : Average mflops/s per call per node (full): 437721 +Grid : Message : Average mflops/s per call per node (full): 439211 +Grid : Message : Average mflops/s per call per node (full): 300811 +Grid : Message : Stencil 13.5228 GB/s per node +Grid : Message : Stencil 16.4377 GB/s per node +Grid : Message : Stencil 17.4979 GB/s per node +Grid : Message : Stencil 12.0545 GB/s per node +Grid : Message : Average mflops/s per call per node : 664701 +Grid : Message : Average mflops/s per call per node : 806089 +Grid : Message : Average mflops/s per call per node : 822489 +Grid : Message : Average mflops/s per call per node : 664298 +Grid : Message : Average mflops/s per call per node (full): 312607 +Grid : Message : Average mflops/s per call per node (full): 433771 +Grid : Message : Average mflops/s per call per node (full): 444755 +Grid : Message : Average mflops/s per call per node (full): 299660 +Grid : Message : Stencil 12.5371 GB/s per node +Grid : Message : Stencil 17.1109 GB/s per node +Grid : Message : Stencil 17.6479 GB/s per node +Grid : Message : Stencil 12.1976 GB/s per node +Grid : Message : Average mflops/s per call per node : 669989 +Grid : Message : Average mflops/s per call per node : 807323 +Grid : Message : Average mflops/s per call per node : 820493 +Grid : Message : Average mflops/s per call per node : 658760 +Grid : Message : Average mflops/s per call per node (full): 311514 +Grid : Message : Average mflops/s per call per node (full): 440217 +Grid : Message : Average mflops/s per call per node (full): 444441 +Grid : Message : Average mflops/s per call per node (full): 302401 +Grid : Message : Stencil 12.8845 GB/s per node +Grid : Message : Stencil 17.2483 GB/s per node +Grid : Message : Stencil 18.8034 GB/s per node +Grid : Message : Stencil 12.3223 GB/s per node +Grid : Message : Average mflops/s per call per node : 669446 +Grid : Message : Average mflops/s per call per node : 800302 +Grid : Message : Average mflops/s per call per node : 819148 +Grid : Message : Average mflops/s per call per node : 661661 +Grid : Message : Average mflops/s per call per node (full): 314380 +Grid : Message : Average mflops/s per call per node (full): 437620 +Grid : Message : Average mflops/s per call per node (full): 447853 +Grid : Message : Average mflops/s per call per node (full): 303941 +Grid : Message : Stencil 12.9305 GB/s per node +Grid : Message : Stencil 17.4124 GB/s per node +Grid : Message : Stencil 17.5552 GB/s per node +Grid : Message : Stencil 14.0837 GB/s per node +Grid : Message : Average mflops/s per call per node : 666221 +Grid : Message : Average mflops/s per call per node : 799268 +Grid : Message : Average mflops/s per call per node : 822312 +Grid : Message : Average mflops/s per call per node : 658214 +Grid : Message : Average mflops/s per call per node (full): 315500 +Grid : Message : Average mflops/s per call per node (full): 437563 +Grid : Message : Average mflops/s per call per node (full): 434967 +Grid : Message : Average mflops/s per call per node (full): 305814 +Grid : Message : Stencil 13.6345 GB/s per node +Grid : Message : Stencil 18.8579 GB/s per node +Grid : Message : Stencil 17.4464 GB/s per node +Grid : Message : Stencil 12.8563 GB/s per node +Grid : Message : Average mflops/s per call per node : 665744 +Grid : Message : Average mflops/s per call per node : 800913 +Grid : Message : Average mflops/s per call per node : 819724 +Grid : Message : Average mflops/s per call per node : 664268 +Grid : Message : Average mflops/s per call per node (full): 315457 +Grid : Message : Average mflops/s per call per node (full): 442373 +Grid : Message : Average mflops/s per call per node (full): 443078 +Grid : Message : Average mflops/s per call per node (full): 304337 +Grid : Message : Stencil 13.2966 GB/s per node +Grid : Message : Stencil 16.5044 GB/s per node +Grid : Message : Stencil 17.98 GB/s per node +Grid : Message : Stencil 13.0868 GB/s per node +Grid : Message : Average mflops/s per call per node : 667082 +Grid : Message : Average mflops/s per call per node : 804780 +Grid : Message : Average mflops/s per call per node : 824113 +Grid : Message : Average mflops/s per call per node : 662976 +Grid : Message : Average mflops/s per call per node (full): 315618 +Grid : Message : Average mflops/s per call per node (full): 434837 +Grid : Message : Average mflops/s per call per node (full): 444083 +Grid : Message : Average mflops/s per call per node (full): 304187 +Grid : Message : Stencil 13.6028 GB/s per node +Grid : Message : Stencil 11.7606 GB/s per node +Grid : Message : Stencil 18.1364 GB/s per node +Grid : Message : Stencil 12.9121 GB/s per node +Grid : Message : Average mflops/s per call per node : 664563 +Grid : Message : Average mflops/s per call per node : 810802 +Grid : Message : Average mflops/s per call per node : 816970 +Grid : Message : Average mflops/s per call per node : 664115 +Grid : Message : Average mflops/s per call per node (full): 315142 +Grid : Message : Average mflops/s per call per node (full): 356461 +Grid : Message : Average mflops/s per call per node (full): 445728 +Grid : Message : Average mflops/s per call per node (full): 306152 +Grid : Message : Stencil 13.0699 GB/s per node +Grid : Message : Stencil 16.2134 GB/s per node +Grid : Message : Stencil 17.513 GB/s per node +Grid : Message : Stencil 13.8265 GB/s per node +Grid : Message : Average mflops/s per call per node : 667083 +Grid : Message : Average mflops/s per call per node : 805456 +Grid : Message : Average mflops/s per call per node : 830206 +Grid : Message : Average mflops/s per call per node : 661448 +Grid : Message : Average mflops/s per call per node (full): 313897 +Grid : Message : Average mflops/s per call per node (full): 425836 +Grid : Message : Average mflops/s per call per node (full): 444927 +Grid : Message : Average mflops/s per call per node (full): 306378 +Grid : Message : Stencil 13.472 GB/s per node +Grid : Message : Stencil 16.6024 GB/s per node +Grid : Message : Stencil 17.4968 GB/s per node +Grid : Message : Stencil 13.2805 GB/s per node +Grid : Message : Average mflops/s per call per node : 664531 +Grid : Message : Average mflops/s per call per node : 806542 +Grid : Message : Average mflops/s per call per node : 819825 +Grid : Message : Average mflops/s per call per node : 666230 +Grid : Message : Average mflops/s per call per node (full): 315455 +Grid : Message : Average mflops/s per call per node (full): 436165 +Grid : Message : Average mflops/s per call per node (full): 445133 +Grid : Message : Average mflops/s per call per node (full): 306197 +Grid : Message : Stencil 12.518 GB/s per node +Grid : Message : Stencil 17.3645 GB/s per node +Grid : Message : Stencil 17.1035 GB/s per node +Grid : Message : Stencil 12.106 GB/s per node +Grid : Message : Average mflops/s per call per node : 666808 +Grid : Message : Average mflops/s per call per node : 802389 +Grid : Message : Average mflops/s per call per node : 819214 +Grid : Message : Average mflops/s per call per node : 667164 +Grid : Message : Average mflops/s per call per node (full): 313082 +Grid : Message : Average mflops/s per call per node (full): 439283 +Grid : Message : Average mflops/s per call per node (full): 440588 +Grid : Message : Average mflops/s per call per node (full): 304126 +Grid : Message : Stencil 12.9822 GB/s per node +Grid : Message : Stencil 15.7679 GB/s per node +Grid : Message : Stencil 17.8964 GB/s per node +Grid : Message : Stencil 12.1026 GB/s per node +Grid : Message : Average mflops/s per call per node : 667679 +Grid : Message : Average mflops/s per call per node : 802767 +Grid : Message : Average mflops/s per call per node : 820210 +Grid : Message : Average mflops/s per call per node : 666777 +Grid : Message : Average mflops/s per call per node (full): 314809 +Grid : Message : Average mflops/s per call per node (full): 425015 +Grid : Message : Average mflops/s per call per node (full): 444200 +Grid : Message : Average mflops/s per call per node (full): 302785 +Grid : Message : Stencil 13.074 GB/s per node +Grid : Message : Stencil 16.3824 GB/s per node +Grid : Message : Stencil 17.4478 GB/s per node +Grid : Message : Stencil 12.6308 GB/s per node +Grid : Message : Average mflops/s per call per node : 664206 +Grid : Message : Average mflops/s per call per node : 804750 +Grid : Message : Average mflops/s per call per node : 825824 +Grid : Message : Average mflops/s per call per node : 668402 +Grid : Message : Average mflops/s per call per node (full): 313047 +Grid : Message : Average mflops/s per call per node (full): 433801 +Grid : Message : Average mflops/s per call per node (full): 446196 +Grid : Message : Average mflops/s per call per node (full): 305143 +Grid : Message : Stencil 14.5859 GB/s per node +Grid : Message : Stencil 8.49655 GB/s per node +Grid : Message : Stencil 17.462 GB/s per node +Grid : Message : Stencil 13.5331 GB/s per node +Grid : Message : Average mflops/s per call per node : 659024 +Grid : Message : Average mflops/s per call per node : 814575 +Grid : Message : Average mflops/s per call per node : 816364 +Grid : Message : Average mflops/s per call per node : 665033 +Grid : Message : Average mflops/s per call per node (full): 316545 +Grid : Message : Average mflops/s per call per node (full): 278554 +Grid : Message : Average mflops/s per call per node (full): 442461 +Grid : Message : Average mflops/s per call per node (full): 304570 +Grid : Message : Stencil 13.853 GB/s per node +Grid : Message : Stencil 17.2408 GB/s per node +Grid : Message : Stencil 17.7049 GB/s per node +Grid : Message : Stencil 13.8443 GB/s per node +Grid : Message : Average mflops/s per call per node : 661716 +Grid : Message : Average mflops/s per call per node : 803346 +Grid : Message : Average mflops/s per call per node : 816573 +Grid : Message : Average mflops/s per call per node : 662450 +Grid : Message : Average mflops/s per call per node (full): 316011 +Grid : Message : Average mflops/s per call per node (full): 436728 +Grid : Message : Average mflops/s per call per node (full): 441098 +Grid : Message : Average mflops/s per call per node (full): 304371 +Grid : Message : Stencil 13.3244 GB/s per node +Grid : Message : Stencil 16.8162 GB/s per node +Grid : Message : Stencil 17.7584 GB/s per node +Grid : Message : Stencil 11.9128 GB/s per node +Grid : Message : Average mflops/s per call per node : 667918 +Grid : Message : Average mflops/s per call per node : 804741 +Grid : Message : Average mflops/s per call per node : 820470 +Grid : Message : Average mflops/s per call per node : 666901 +Grid : Message : Average mflops/s per call per node (full): 314445 +Grid : Message : Average mflops/s per call per node (full): 436457 +Grid : Message : Average mflops/s per call per node (full): 443473 +Grid : Message : Average mflops/s per call per node (full): 300946 +Grid : Message : Stencil 13.4446 GB/s per node +Grid : Message : Stencil 16.8478 GB/s per node +Grid : Message : Stencil 17.7755 GB/s per node +Grid : Message : Stencil 13.8389 GB/s per node +Grid : Message : Average mflops/s per call per node : 667766 +Grid : Message : Average mflops/s per call per node : 802491 +Grid : Message : Average mflops/s per call per node : 821947 +Grid : Message : Average mflops/s per call per node : 665397 +Grid : Message : Average mflops/s per call per node (full): 316293 +Grid : Message : Average mflops/s per call per node (full): 437154 +Grid : Message : Average mflops/s per call per node (full): 446942 +Grid : Message : Average mflops/s per call per node (full): 307415 +Grid : Message : Stencil 14.0143 GB/s per node +Grid : Message : Stencil 16.6462 GB/s per node +Grid : Message : Stencil 18.0273 GB/s per node +Grid : Message : Stencil 12.2425 GB/s per node +Grid : Message : Average mflops/s per call per node : 666333 +Grid : Message : Average mflops/s per call per node : 805973 +Grid : Message : Average mflops/s per call per node : 818586 +Grid : Message : Average mflops/s per call per node : 667479 +Grid : Message : Average mflops/s per call per node (full): 316949 +Grid : Message : Average mflops/s per call per node (full): 434593 +Grid : Message : Average mflops/s per call per node (full): 439484 +Grid : Message : Average mflops/s per call per node (full): 303910 +Grid : Message : Stencil 13.7419 GB/s per node +Grid : Message : Stencil 16.4797 GB/s per node +Grid : Message : Stencil 16.9528 GB/s per node +Grid : Message : Stencil 12.4341 GB/s per node +Grid : Message : Average mflops/s per call per node : 669863 +Grid : Message : Average mflops/s per call per node : 804020 +Grid : Message : Average mflops/s per call per node : 818802 +Grid : Message : Average mflops/s per call per node : 663249 +Grid : Message : Average mflops/s per call per node (full): 316526 +Grid : Message : Average mflops/s per call per node (full): 431400 +Grid : Message : Average mflops/s per call per node (full): 439791 +Grid : Message : Average mflops/s per call per node (full): 303398 +Grid : Message : Stencil 12.9811 GB/s per node +Grid : Message : Stencil 16.9562 GB/s per node +Grid : Message : Stencil 18.895 GB/s per node +Grid : Message : Stencil 14.0543 GB/s per node +Grid : Message : Average mflops/s per call per node : 669785 +Grid : Message : Average mflops/s per call per node : 805240 +Grid : Message : Average mflops/s per call per node : 820750 +Grid : Message : Average mflops/s per call per node : 657780 +Grid : Message : Average mflops/s per call per node (full): 315425 +Grid : Message : Average mflops/s per call per node (full): 437260 +Grid : Message : Average mflops/s per call per node (full): 448428 +Grid : Message : Average mflops/s per call per node (full): 305976 +Grid : Message : Stencil 12.9075 GB/s per node +Grid : Message : Stencil 16.5077 GB/s per node +Grid : Message : Stencil 18.1803 GB/s per node +Grid : Message : Stencil 13.2324 GB/s per node +Grid : Message : Average mflops/s per call per node : 669413 +Grid : Message : Average mflops/s per call per node : 806530 +Grid : Message : Average mflops/s per call per node : 820634 +Grid : Message : Average mflops/s per call per node : 663184 +Grid : Message : Average mflops/s per call per node (full): 314776 +Grid : Message : Average mflops/s per call per node (full): 434158 +Grid : Message : Average mflops/s per call per node (full): 448497 +Grid : Message : Average mflops/s per call per node (full): 304859 +Grid : Message : Stencil 13.287 GB/s per node +Grid : Message : Stencil 16.4753 GB/s per node +Grid : Message : Stencil 17.1715 GB/s per node +Grid : Message : Stencil 15.9104 GB/s per node +Grid : Message : Average mflops/s per call per node : 668487 +Grid : Message : Average mflops/s per call per node : 806878 +Grid : Message : Average mflops/s per call per node : 820880 +Grid : Message : Average mflops/s per call per node : 661266 +Grid : Message : Average mflops/s per call per node (full): 315909 +Grid : Message : Average mflops/s per call per node (full): 431333 +Grid : Message : Average mflops/s per call per node (full): 442874 +Grid : Message : Average mflops/s per call per node (full): 308064 +Grid : Message : Stencil 13.4721 GB/s per node +Grid : Message : Stencil 17.1158 GB/s per node +Grid : Message : Stencil 17.2517 GB/s per node +Grid : Message : Stencil 11.9345 GB/s per node +Grid : Message : Average mflops/s per call per node : 666143 +Grid : Message : Average mflops/s per call per node : 801154 +Grid : Message : Average mflops/s per call per node : 821732 +Grid : Message : Average mflops/s per call per node : 665116 +Grid : Message : Average mflops/s per call per node (full): 316290 +Grid : Message : Average mflops/s per call per node (full): 438552 +Grid : Message : Average mflops/s per call per node (full): 443037 +Grid : Message : Average mflops/s per call per node (full): 301309 +Grid : Message : Stencil 13.3112 GB/s per node +Grid : Message : Stencil 16.9799 GB/s per node +Grid : Message : Stencil 17.3504 GB/s per node +Grid : Message : Stencil 12.3472 GB/s per node +Grid : Message : Average mflops/s per call per node : 666107 +Grid : Message : Average mflops/s per call per node : 803853 +Grid : Message : Average mflops/s per call per node : 826777 +Grid : Message : Average mflops/s per call per node : 663021 +Grid : Message : Average mflops/s per call per node (full): 315759 +Grid : Message : Average mflops/s per call per node (full): 437814 +Grid : Message : Average mflops/s per call per node (full): 444277 +Grid : Message : Average mflops/s per call per node (full): 302103 +Grid : Message : Stencil 14.5476 GB/s per node +Grid : Message : Stencil 16.744 GB/s per node +Grid : Message : Stencil 18.094 GB/s per node +Grid : Message : Stencil 12.4832 GB/s per node +Grid : Message : Average mflops/s per call per node : 661807 +Grid : Message : Average mflops/s per call per node : 805374 +Grid : Message : Average mflops/s per call per node : 815879 +Grid : Message : Average mflops/s per call per node : 669430 +Grid : Message : Average mflops/s per call per node (full): 317179 +Grid : Message : Average mflops/s per call per node (full): 436523 +Grid : Message : Average mflops/s per call per node (full): 445499 +Grid : Message : Average mflops/s per call per node (full): 304739 +Grid : Message : Stencil 13.5476 GB/s per node +Grid : Message : Stencil 17.0627 GB/s per node +Grid : Message : Stencil 17.9587 GB/s per node +Grid : Message : Stencil 12.7293 GB/s per node +Grid : Message : Average mflops/s per call per node : 664909 +Grid : Message : Average mflops/s per call per node : 802387 +Grid : Message : Average mflops/s per call per node : 819178 +Grid : Message : Average mflops/s per call per node : 662260 +Grid : Message : Average mflops/s per call per node (full): 315249 +Grid : Message : Average mflops/s per call per node (full): 435294 +Grid : Message : Average mflops/s per call per node (full): 446118 +Grid : Message : Average mflops/s per call per node (full): 304420 +Grid : Message : Stencil 14.9442 GB/s per node +Grid : Message : Stencil 17.2568 GB/s per node +Grid : Message : Stencil 17.3896 GB/s per node +Grid : Message : Stencil 12.517 GB/s per node +Grid : Message : Average mflops/s per call per node : 663158 +Grid : Message : Average mflops/s per call per node : 803797 +Grid : Message : Average mflops/s per call per node : 820222 +Grid : Message : Average mflops/s per call per node : 664988 +Grid : Message : Average mflops/s per call per node (full): 317171 +Grid : Message : Average mflops/s per call per node (full): 437926 +Grid : Message : Average mflops/s per call per node (full): 443898 +Grid : Message : Average mflops/s per call per node (full): 303122 +Grid : Message : Stencil 13.7926 GB/s per node +Grid : Message : Stencil 10.182 GB/s per node +Grid : Message : Stencil 17.1182 GB/s per node +Grid : Message : Stencil 12.6113 GB/s per node +Grid : Message : Average mflops/s per call per node : 666118 +Grid : Message : Average mflops/s per call per node : 811413 +Grid : Message : Average mflops/s per call per node : 827386 +Grid : Message : Average mflops/s per call per node : 662404 +Grid : Message : Average mflops/s per call per node (full): 316161 +Grid : Message : Average mflops/s per call per node (full): 320980 +Grid : Message : Average mflops/s per call per node (full): 441691 +Grid : Message : Average mflops/s per call per node (full): 304311 +Grid : Message : Stencil 15.0103 GB/s per node +Grid : Message : Stencil 17.974 GB/s per node +Grid : Message : Stencil 16.8732 GB/s per node +Grid : Message : Stencil 11.9428 GB/s per node +Grid : Message : Average mflops/s per call per node : 659495 +Grid : Message : Average mflops/s per call per node : 798743 +Grid : Message : Average mflops/s per call per node : 828324 +Grid : Message : Average mflops/s per call per node : 669846 +Grid : Message : Average mflops/s per call per node (full): 315714 +Grid : Message : Average mflops/s per call per node (full): 439913 +Grid : Message : Average mflops/s per call per node (full): 439295 +Grid : Message : Average mflops/s per call per node (full): 301726 +Grid : Message : Stencil 13.4555 GB/s per node +Grid : Message : Stencil 16.2566 GB/s per node +Grid : Message : Stencil 17.5285 GB/s per node +Grid : Message : Stencil 13.3142 GB/s per node +Grid : Message : Average mflops/s per call per node : 664309 +Grid : Message : Average mflops/s per call per node : 801698 +Grid : Message : Average mflops/s per call per node : 823972 +Grid : Message : Average mflops/s per call per node : 663338 +Grid : Message : Average mflops/s per call per node (full): 314162 +Grid : Message : Average mflops/s per call per node (full): 431179 +Grid : Message : Average mflops/s per call per node (full): 445222 +Grid : Message : Average mflops/s per call per node (full): 305774 +Grid : Message : Stencil 12.698 GB/s per node +Grid : Message : Stencil 16.5092 GB/s per node +Grid : Message : Stencil 17.4474 GB/s per node +Grid : Message : Stencil 12.3678 GB/s per node +Grid : Message : Average mflops/s per call per node : 665460 +Grid : Message : Average mflops/s per call per node : 803692 +Grid : Message : Average mflops/s per call per node : 814742 +Grid : Message : Average mflops/s per call per node : 665287 +Grid : Message : Average mflops/s per call per node (full): 314678 +Grid : Message : Average mflops/s per call per node (full): 433743 +Grid : Message : Average mflops/s per call per node (full): 432566 +Grid : Message : Average mflops/s per call per node (full): 304588 +Grid : Message : Stencil 13.7497 GB/s per node +Grid : Message : Stencil 16.4933 GB/s per node +Grid : Message : Stencil 17.969 GB/s per node +Grid : Message : Stencil 15.9667 GB/s per node +Grid : Message : Average mflops/s per call per node : 663973 +Grid : Message : Average mflops/s per call per node : 809146 +Grid : Message : Average mflops/s per call per node : 824922 +Grid : Message : Average mflops/s per call per node : 656900 +Grid : Message : Average mflops/s per call per node (full): 314409 +Grid : Message : Average mflops/s per call per node (full): 434490 +Grid : Message : Average mflops/s per call per node (full): 446536 +Grid : Message : Average mflops/s per call per node (full): 307389 +Grid : Message : Stencil 12.961 GB/s per node +Grid : Message : Stencil 12.5404 GB/s per node +Grid : Message : Stencil 16.8662 GB/s per node +Grid : Message : Stencil 12.6023 GB/s per node +Grid : Message : Average mflops/s per call per node : 666260 +Grid : Message : Average mflops/s per call per node : 809160 +Grid : Message : Average mflops/s per call per node : 821739 +Grid : Message : Average mflops/s per call per node : 666750 +Grid : Message : Average mflops/s per call per node (full): 313902 +Grid : Message : Average mflops/s per call per node (full): 371663 +Grid : Message : Average mflops/s per call per node (full): 437243 +Grid : Message : Average mflops/s per call per node (full): 300832 +Grid : Message : Stencil 13.1174 GB/s per node +Grid : Message : Stencil 16.4419 GB/s per node +Grid : Message : Stencil 17.6359 GB/s per node +Grid : Message : Stencil 12.9856 GB/s per node +Grid : Message : Average mflops/s per call per node : 666836 +Grid : Message : Average mflops/s per call per node : 803190 +Grid : Message : Average mflops/s per call per node : 831600 +Grid : Message : Average mflops/s per call per node : 663531 +Grid : Message : Average mflops/s per call per node (full): 315346 +Grid : Message : Average mflops/s per call per node (full): 434213 +Grid : Message : Average mflops/s per call per node (full): 446042 +Grid : Message : Average mflops/s per call per node (full): 305281 +Grid : Message : Stencil 13.8596 GB/s per node +Grid : Message : Stencil 16.4839 GB/s per node +Grid : Message : Stencil 16.9857 GB/s per node +Grid : Message : Stencil 11.9013 GB/s per node +Grid : Message : Average mflops/s per call per node : 665002 +Grid : Message : Average mflops/s per call per node : 803603 +Grid : Message : Average mflops/s per call per node : 826298 +Grid : Message : Average mflops/s per call per node : 666970 +Grid : Message : Average mflops/s per call per node (full): 312641 +Grid : Message : Average mflops/s per call per node (full): 435404 +Grid : Message : Average mflops/s per call per node (full): 439788 +Grid : Message : Average mflops/s per call per node (full): 300583 +Grid : Message : Stencil 13.126 GB/s per node +Grid : Message : Stencil 7.92625 GB/s per node +Grid : Message : Stencil 17.2697 GB/s per node +Grid : Message : Stencil 12.5352 GB/s per node +Grid : Message : Average mflops/s per call per node : 660996 +Grid : Message : Average mflops/s per call per node : 812284 +Grid : Message : Average mflops/s per call per node : 825278 +Grid : Message : Average mflops/s per call per node : 668621 +Grid : Message : Average mflops/s per call per node (full): 313449 +Grid : Message : Average mflops/s per call per node (full): 264451 +Grid : Message : Average mflops/s per call per node (full): 442895 +Grid : Message : Average mflops/s per call per node (full): 305272 +Grid : Message : Stencil 14.0345 GB/s per node +Grid : Message : Stencil 17.1177 GB/s per node +Grid : Message : Stencil 17.9579 GB/s per node +Grid : Message : Stencil 13.7344 GB/s per node +Grid : Message : Average mflops/s per call per node : 664502 +Grid : Message : Average mflops/s per call per node : 805953 +Grid : Message : Average mflops/s per call per node : 825593 +Grid : Message : Average mflops/s per call per node : 660114 +Grid : Message : Average mflops/s per call per node (full): 315873 +Grid : Message : Average mflops/s per call per node (full): 437394 +Grid : Message : Average mflops/s per call per node (full): 447291 +Grid : Message : Average mflops/s per call per node (full): 304741 +Grid : Message : Stencil 13.4645 GB/s per node +Grid : Message : Stencil 17.4987 GB/s per node +Grid : Message : Stencil 17.4917 GB/s per node +Grid : Message : Stencil 13.0298 GB/s per node +Grid : Message : Average mflops/s per call per node : 666735 +Grid : Message : Average mflops/s per call per node : 803387 +Grid : Message : Average mflops/s per call per node : 815749 +Grid : Message : Average mflops/s per call per node : 664987 +Grid : Message : Average mflops/s per call per node (full): 316438 +Grid : Message : Average mflops/s per call per node (full): 439568 +Grid : Message : Average mflops/s per call per node (full): 443471 +Grid : Message : Average mflops/s per call per node (full): 305477 +Grid : Message : Stencil 13.3655 GB/s per node +Grid : Message : Stencil 17.6846 GB/s per node +Grid : Message : Stencil 18.5753 GB/s per node +Grid : Message : Stencil 13.1193 GB/s per node +Grid : Message : Average mflops/s per call per node : 664948 +Grid : Message : Average mflops/s per call per node : 806213 +Grid : Message : Average mflops/s per call per node : 823464 +Grid : Message : Average mflops/s per call per node : 662498 +Grid : Message : Average mflops/s per call per node (full): 315014 +Grid : Message : Average mflops/s per call per node (full): 441551 +Grid : Message : Average mflops/s per call per node (full): 447925 +Grid : Message : Average mflops/s per call per node (full): 305503 +Grid : Message : Stencil 14.1704 GB/s per node +Grid : Message : Stencil 16.4379 GB/s per node +Grid : Message : Stencil 17.1236 GB/s per node +Grid : Message : Stencil 12.5188 GB/s per node +Grid : Message : Average mflops/s per call per node : 665015 +Grid : Message : Average mflops/s per call per node : 804900 +Grid : Message : Average mflops/s per call per node : 821353 +Grid : Message : Average mflops/s per call per node : 666641 +Grid : Message : Average mflops/s per call per node (full): 316231 +Grid : Message : Average mflops/s per call per node (full): 434665 +Grid : Message : Average mflops/s per call per node (full): 435790 +Grid : Message : Average mflops/s per call per node (full): 297563 +Grid : Message : Stencil 13.258 GB/s per node +Grid : Message : Stencil 16.6796 GB/s per node +Grid : Message : Stencil 17.6714 GB/s per node +Grid : Message : Stencil 13.6617 GB/s per node +Grid : Message : Average mflops/s per call per node : 668216 +Grid : Message : Average mflops/s per call per node : 804154 +Grid : Message : Average mflops/s per call per node : 819799 +Grid : Message : Average mflops/s per call per node : 662504 +Grid : Message : Average mflops/s per call per node (full): 315646 +Grid : Message : Average mflops/s per call per node (full): 436768 +Grid : Message : Average mflops/s per call per node (full): 445854 +Grid : Message : Average mflops/s per call per node (full): 306523 +Grid : Message : Stencil 12.5776 GB/s per node +Grid : Message : Stencil 19.1094 GB/s per node +Grid : Message : Stencil 17.8382 GB/s per node +Grid : Message : Stencil 12.3613 GB/s per node +Grid : Message : Average mflops/s per call per node : 668374 +Grid : Message : Average mflops/s per call per node : 803874 +Grid : Message : Average mflops/s per call per node : 822898 +Grid : Message : Average mflops/s per call per node : 663293 +Grid : Message : Average mflops/s per call per node (full): 314337 +Grid : Message : Average mflops/s per call per node (full): 442282 +Grid : Message : Average mflops/s per call per node (full): 445664 +Grid : Message : Average mflops/s per call per node (full): 302925 +Grid : Message : Stencil 13.4442 GB/s per node +Grid : Message : Stencil 16.8888 GB/s per node +Grid : Message : Stencil 17.1822 GB/s per node +Grid : Message : Stencil 13.2556 GB/s per node +Grid : Message : Average mflops/s per call per node : 664317 +Grid : Message : Average mflops/s per call per node : 801116 +Grid : Message : Average mflops/s per call per node : 818191 +Grid : Message : Average mflops/s per call per node : 661072 +Grid : Message : Average mflops/s per call per node (full): 314244 +Grid : Message : Average mflops/s per call per node (full): 437558 +Grid : Message : Average mflops/s per call per node (full): 443380 +Grid : Message : Average mflops/s per call per node (full): 305380 +Grid : Message : Stencil 13.4673 GB/s per node +Grid : Message : Stencil 17.4665 GB/s per node +Grid : Message : Stencil 18.2401 GB/s per node +Grid : Message : Stencil 13.2764 GB/s per node +Grid : Message : Average mflops/s per call per node : 664984 +Grid : Message : Average mflops/s per call per node : 805208 +Grid : Message : Average mflops/s per call per node : 827513 +Grid : Message : Average mflops/s per call per node : 664600 +Grid : Message : Average mflops/s per call per node (full): 314235 +Grid : Message : Average mflops/s per call per node (full): 438965 +Grid : Message : Average mflops/s per call per node (full): 448809 +Grid : Message : Average mflops/s per call per node (full): 306326 +Grid : Message : Stencil 14.6654 GB/s per node +Grid : Message : Stencil 7.58732 GB/s per node +Grid : Message : Stencil 18.1682 GB/s per node +Grid : Message : Stencil 11.7598 GB/s per node +Grid : Message : Average mflops/s per call per node : 661720 +Grid : Message : Average mflops/s per call per node : 806420 +Grid : Message : Average mflops/s per call per node : 818420 +Grid : Message : Average mflops/s per call per node : 668057 +Grid : Message : Average mflops/s per call per node (full): 316368 +Grid : Message : Average mflops/s per call per node (full): 255634 +Grid : Message : Average mflops/s per call per node (full): 446359 +Grid : Message : Average mflops/s per call per node (full): 298658 +Grid : Message : Stencil 14.6144 GB/s per node +Grid : Message : Stencil 16.5195 GB/s per node +Grid : Message : Stencil 17.374 GB/s per node +Grid : Message : Stencil 13.3331 GB/s per node +Grid : Message : Average mflops/s per call per node : 663454 +Grid : Message : Average mflops/s per call per node : 803748 +Grid : Message : Average mflops/s per call per node : 827999 +Grid : Message : Average mflops/s per call per node : 663467 +Grid : Message : Average mflops/s per call per node (full): 316129 +Grid : Message : Average mflops/s per call per node (full): 434864 +Grid : Message : Average mflops/s per call per node (full): 443165 +Grid : Message : Average mflops/s per call per node (full): 304171 +Grid : Message : Stencil 14.3637 GB/s per node +Grid : Message : Stencil 17.712 GB/s per node +Grid : Message : Stencil 17.178 GB/s per node +Grid : Message : Stencil 12.8432 GB/s per node +Grid : Message : Average mflops/s per call per node : 665494 +Grid : Message : Average mflops/s per call per node : 802527 +Grid : Message : Average mflops/s per call per node : 821649 +Grid : Message : Average mflops/s per call per node : 660891 +Grid : Message : Average mflops/s per call per node (full): 316872 +Grid : Message : Average mflops/s per call per node (full): 436560 +Grid : Message : Average mflops/s per call per node (full): 441310 +Grid : Message : Average mflops/s per call per node (full): 305030 +Grid : Message : Stencil 13.6686 GB/s per node +Grid : Message : Stencil 17.4995 GB/s per node +Grid : Message : Stencil 17.6195 GB/s per node +Grid : Message : Stencil 12.6683 GB/s per node +Grid : Message : Average mflops/s per call per node : 663927 +Grid : Message : Average mflops/s per call per node : 804827 +Grid : Message : Average mflops/s per call per node : 828559 +Grid : Message : Average mflops/s per call per node : 667145 +Grid : Message : Average mflops/s per call per node (full): 313344 +Grid : Message : Average mflops/s per call per node (full): 435859 +Grid : Message : Average mflops/s per call per node (full): 445948 +Grid : Message : Average mflops/s per call per node (full): 303649 +Grid : Message : Stencil 13.2974 GB/s per node +Grid : Message : Stencil 16.9957 GB/s per node +Grid : Message : Stencil 17.9603 GB/s per node +Grid : Message : Stencil 13.7693 GB/s per node +Grid : Message : Average mflops/s per call per node : 660695 +Grid : Message : Average mflops/s per call per node : 799165 +Grid : Message : Average mflops/s per call per node : 820450 +Grid : Message : Average mflops/s per call per node : 661012 +Grid : Message : Average mflops/s per call per node (full): 312753 +Grid : Message : Average mflops/s per call per node (full): 436415 +Grid : Message : Average mflops/s per call per node (full): 447591 +Grid : Message : Average mflops/s per call per node (full): 306742 +Grid : Message : Stencil 14.316 GB/s per node +Grid : Message : Stencil 17.3739 GB/s per node +Grid : Message : Stencil 17.5313 GB/s per node +Grid : Message : Stencil 12.8304 GB/s per node +Grid : Message : Average mflops/s per call per node : 658790 +Grid : Message : Average mflops/s per call per node : 799323 +Grid : Message : Average mflops/s per call per node : 824848 +Grid : Message : Average mflops/s per call per node : 665160 +Grid : Message : Average mflops/s per call per node (full): 316336 +Grid : Message : Average mflops/s per call per node (full): 435574 +Grid : Message : Average mflops/s per call per node (full): 440652 +Grid : Message : Average mflops/s per call per node (full): 304537 +Grid : Message : Stencil 14.3048 GB/s per node +Grid : Message : Stencil 17.5397 GB/s per node +Grid : Message : Stencil 17.0633 GB/s per node +Grid : Message : Stencil 13.5409 GB/s per node +Grid : Message : Average mflops/s per call per node : 659717 +Grid : Message : Average mflops/s per call per node : 804485 +Grid : Message : Average mflops/s per call per node : 825617 +Grid : Message : Average mflops/s per call per node : 661768 +Grid : Message : Average mflops/s per call per node (full): 314706 +Grid : Message : Average mflops/s per call per node (full): 439069 +Grid : Message : Average mflops/s per call per node (full): 440527 +Grid : Message : Average mflops/s per call per node (full): 306173 +Grid : Message : Stencil 12.8025 GB/s per node +Grid : Message : Stencil 17.2434 GB/s per node +Grid : Message : Stencil 17.9128 GB/s per node +Grid : Message : Stencil 13.279 GB/s per node +Grid : Message : Average mflops/s per call per node : 668256 +Grid : Message : Average mflops/s per call per node : 803676 +Grid : Message : Average mflops/s per call per node : 820491 +Grid : Message : Average mflops/s per call per node : 662418 +Grid : Message : Average mflops/s per call per node (full): 315205 +Grid : Message : Average mflops/s per call per node (full): 437749 +Grid : Message : Average mflops/s per call per node (full): 446306 +Grid : Message : Average mflops/s per call per node (full): 305289 +Grid : Message : Stencil 12.5601 GB/s per node +Grid : Message : Stencil 16.8043 GB/s per node +Grid : Message : Stencil 16.5785 GB/s per node +Grid : Message : Stencil 12.8368 GB/s per node +Grid : Message : Average mflops/s per call per node : 668628 +Grid : Message : Average mflops/s per call per node : 803293 +Grid : Message : Average mflops/s per call per node : 821762 +Grid : Message : Average mflops/s per call per node : 660631 +Grid : Message : Average mflops/s per call per node (full): 313970 +Grid : Message : Average mflops/s per call per node (full): 437030 +Grid : Message : Average mflops/s per call per node (full): 426193 +Grid : Message : Average mflops/s per call per node (full): 303849 +Grid : Message : Stencil 12.2139 GB/s per node +Grid : Message : Stencil 16.8379 GB/s per node +Grid : Message : Stencil 17.767 GB/s per node +Grid : Message : Stencil 13.2638 GB/s per node +Grid : Message : Average mflops/s per call per node : 671858 +Grid : Message : Average mflops/s per call per node : 803407 +Grid : Message : Average mflops/s per call per node : 822154 +Grid : Message : Average mflops/s per call per node : 665694 +Grid : Message : Average mflops/s per call per node (full): 311775 +Grid : Message : Average mflops/s per call per node (full): 438106 +Grid : Message : Average mflops/s per call per node (full): 446340 +Grid : Message : Average mflops/s per call per node (full): 305394 +Grid : Message : Stencil 13.1395 GB/s per node +Grid : Message : Stencil 17.0386 GB/s per node +Grid : Message : Stencil 17.3741 GB/s per node +Grid : Message : Stencil 12.463 GB/s per node +Grid : Message : Average mflops/s per call per node : 667151 +Grid : Message : Average mflops/s per call per node : 802197 +Grid : Message : Average mflops/s per call per node : 822477 +Grid : Message : Average mflops/s per call per node : 664371 +Grid : Message : Average mflops/s per call per node (full): 314042 +Grid : Message : Average mflops/s per call per node (full): 437082 +Grid : Message : Average mflops/s per call per node (full): 443449 +Grid : Message : Average mflops/s per call per node (full): 303713 +Grid : Message : Stencil 12.8417 GB/s per node +Grid : Message : Stencil 16.6673 GB/s per node +Grid : Message : Stencil 19.5679 GB/s per node +Grid : Message : Stencil 12.7913 GB/s per node +Grid : Message : Average mflops/s per call per node : 669236 +Grid : Message : Average mflops/s per call per node : 803589 +Grid : Message : Average mflops/s per call per node : 821074 +Grid : Message : Average mflops/s per call per node : 663866 +Grid : Message : Average mflops/s per call per node (full): 315084 +Grid : Message : Average mflops/s per call per node (full): 436973 +Grid : Message : Average mflops/s per call per node (full): 449685 +Grid : Message : Average mflops/s per call per node (full): 306224 +Grid : Message : Stencil 12.5789 GB/s per node +Grid : Message : Stencil 15.6077 GB/s per node +Grid : Message : Stencil 17.4916 GB/s per node +Grid : Message : Stencil 13.0994 GB/s per node +Grid : Message : Average mflops/s per call per node : 666648 +Grid : Message : Average mflops/s per call per node : 802272 +Grid : Message : Average mflops/s per call per node : 822978 +Grid : Message : Average mflops/s per call per node : 661676 +Grid : Message : Average mflops/s per call per node (full): 314172 +Grid : Message : Average mflops/s per call per node (full): 413146 +Grid : Message : Average mflops/s per call per node (full): 444026 +Grid : Message : Average mflops/s per call per node (full): 305552 +Grid : Message : Stencil 12.7795 GB/s per node +Grid : Message : Stencil 16.6517 GB/s per node +Grid : Message : Stencil 17.9527 GB/s per node +Grid : Message : Stencil 12.8314 GB/s per node +Grid : Message : Average mflops/s per call per node : 671120 +Grid : Message : Average mflops/s per call per node : 805540 +Grid : Message : Average mflops/s per call per node : 826565 +Grid : Message : Average mflops/s per call per node : 664837 +Grid : Message : Average mflops/s per call per node (full): 315358 +Grid : Message : Average mflops/s per call per node (full): 435827 +Grid : Message : Average mflops/s per call per node (full): 447974 +Grid : Message : Average mflops/s per call per node (full): 304610 +Grid : Message : Stencil 13.9753 GB/s per node +Grid : Message : Stencil 12.3165 GB/s per node +Grid : Message : Stencil 18.4297 GB/s per node +Grid : Message : Stencil 12.4519 GB/s per node +Grid : Message : Average mflops/s per call per node : 662207 +Grid : Message : Average mflops/s per call per node : 810547 +Grid : Message : Average mflops/s per call per node : 820474 +Grid : Message : Average mflops/s per call per node : 666930 +Grid : Message : Average mflops/s per call per node (full): 315541 +Grid : Message : Average mflops/s per call per node (full): 367835 +Grid : Message : Average mflops/s per call per node (full): 447424 +Grid : Message : Average mflops/s per call per node (full): 304845 +Grid : Message : Stencil 12.2834 GB/s per node +Grid : Message : Stencil 17.2565 GB/s per node +Grid : Message : Stencil 18.1191 GB/s per node +Grid : Message : Stencil 11.8336 GB/s per node +Grid : Message : Average mflops/s per call per node : 670755 +Grid : Message : Average mflops/s per call per node : 802889 +Grid : Message : Average mflops/s per call per node : 820477 +Grid : Message : Average mflops/s per call per node : 667293 +Grid : Message : Average mflops/s per call per node (full): 312060 +Grid : Message : Average mflops/s per call per node (full): 436713 +Grid : Message : Average mflops/s per call per node (full): 445364 +Grid : Message : Average mflops/s per call per node (full): 300346 +Grid : Message : Stencil 12.2895 GB/s per node +Grid : Message : Stencil 17.4713 GB/s per node +Grid : Message : Stencil 17.4528 GB/s per node +Grid : Message : Stencil 12.3124 GB/s per node +Grid : Message : Average mflops/s per call per node : 668955 +Grid : Message : Average mflops/s per call per node : 808029 +Grid : Message : Average mflops/s per call per node : 820266 +Grid : Message : Average mflops/s per call per node : 666742 +Grid : Message : Average mflops/s per call per node (full): 310810 +Grid : Message : Average mflops/s per call per node (full): 439407 +Grid : Message : Average mflops/s per call per node (full): 444057 +Grid : Message : Average mflops/s per call per node (full): 303354 +Grid : Message : Stencil 13.0029 GB/s per node +Grid : Message : Stencil 16.9907 GB/s per node +Grid : Message : Stencil 18.4251 GB/s per node +Grid : Message : Stencil 14.6304 GB/s per node +Grid : Message : Average mflops/s per call per node : 667661 +Grid : Message : Average mflops/s per call per node : 804566 +Grid : Message : Average mflops/s per call per node : 822622 +Grid : Message : Average mflops/s per call per node : 659795 +Grid : Message : Average mflops/s per call per node (full): 315440 +Grid : Message : Average mflops/s per call per node (full): 437435 +Grid : Message : Average mflops/s per call per node (full): 448746 +Grid : Message : Average mflops/s per call per node (full): 305813 +Grid : Message : Stencil 12.4992 GB/s per node +Grid : Message : Stencil 16.8081 GB/s per node +Grid : Message : Stencil 16.9932 GB/s per node +Grid : Message : Stencil 11.8852 GB/s per node +Grid : Message : Average mflops/s per call per node : 668265 +Grid : Message : Average mflops/s per call per node : 805726 +Grid : Message : Average mflops/s per call per node : 825066 +Grid : Message : Average mflops/s per call per node : 667803 +Grid : Message : Average mflops/s per call per node (full): 311253 +Grid : Message : Average mflops/s per call per node (full): 437038 +Grid : Message : Average mflops/s per call per node (full): 440641 +Grid : Message : Average mflops/s per call per node (full): 300243 +Grid : Message : Stencil 12.858 GB/s per node +Grid : Message : Stencil 16.6191 GB/s per node +Grid : Message : Stencil 17.4867 GB/s per node +Grid : Message : Stencil 12.0766 GB/s per node +Grid : Message : Average mflops/s per call per node : 664701 +Grid : Message : Average mflops/s per call per node : 802362 +Grid : Message : Average mflops/s per call per node : 825296 +Grid : Message : Average mflops/s per call per node : 671195 +Grid : Message : Average mflops/s per call per node (full): 313291 +Grid : Message : Average mflops/s per call per node (full): 435415 +Grid : Message : Average mflops/s per call per node (full): 444173 +Grid : Message : Average mflops/s per call per node (full): 302506 +Grid : Message : Stencil 12.819 GB/s per node +Grid : Message : Stencil 16.7382 GB/s per node +Grid : Message : Stencil 17.8396 GB/s per node +Grid : Message : Stencil 12.7021 GB/s per node +Grid : Message : Average mflops/s per call per node : 661640 +Grid : Message : Average mflops/s per call per node : 800746 +Grid : Message : Average mflops/s per call per node : 825440 +Grid : Message : Average mflops/s per call per node : 664101 +Grid : Message : Average mflops/s per call per node (full): 312336 +Grid : Message : Average mflops/s per call per node (full): 435376 +Grid : Message : Average mflops/s per call per node (full): 447927 +Grid : Message : Average mflops/s per call per node (full): 303009 +Grid : Message : Stencil 13.2615 GB/s per node +Grid : Message : Stencil 16.7029 GB/s per node +Grid : Message : Stencil 17.164 GB/s per node +Grid : Message : Stencil 15.1159 GB/s per node +Grid : Message : Average mflops/s per call per node : 663828 +Grid : Message : Average mflops/s per call per node : 803900 +Grid : Message : Average mflops/s per call per node : 823424 +Grid : Message : Average mflops/s per call per node : 661500 +Grid : Message : Average mflops/s per call per node (full): 315033 +Grid : Message : Average mflops/s per call per node (full): 434653 +Grid : Message : Average mflops/s per call per node (full): 427996 +Grid : Message : Average mflops/s per call per node (full): 308046 +Grid : Message : Stencil 12.6615 GB/s per node +Grid : Message : Stencil 16.4168 GB/s per node +Grid : Message : Stencil 17.8024 GB/s per node +Grid : Message : Stencil 11.8658 GB/s per node +Grid : Message : Average mflops/s per call per node : 667893 +Grid : Message : Average mflops/s per call per node : 805218 +Grid : Message : Average mflops/s per call per node : 820360 +Grid : Message : Average mflops/s per call per node : 660027 +Grid : Message : Average mflops/s per call per node (full): 314693 +Grid : Message : Average mflops/s per call per node (full): 433820 +Grid : Message : Average mflops/s per call per node (full): 444477 +Grid : Message : Average mflops/s per call per node (full): 299996 +Grid : Message : Stencil 12.4537 GB/s per node +Grid : Message : Stencil 16.6316 GB/s per node +Grid : Message : Stencil 17.255 GB/s per node +Grid : Message : Stencil 13.0949 GB/s per node +Grid : Message : Average mflops/s per call per node : 667214 +Grid : Message : Average mflops/s per call per node : 807821 +Grid : Message : Average mflops/s per call per node : 821258 +Grid : Message : Average mflops/s per call per node : 665148 +Grid : Message : Average mflops/s per call per node (full): 313047 +Grid : Message : Average mflops/s per call per node (full): 435113 +Grid : Message : Average mflops/s per call per node (full): 441888 +Grid : Message : Average mflops/s per call per node (full): 305770 +Grid : Message : Stencil 13.026 GB/s per node +Grid : Message : Stencil 16.8508 GB/s per node +Grid : Message : Stencil 18.4139 GB/s per node +Grid : Message : Stencil 16.757 GB/s per node +Grid : Message : Average mflops/s per call per node : 664610 +Grid : Message : Average mflops/s per call per node : 796445 +Grid : Message : Average mflops/s per call per node : 819592 +Grid : Message : Average mflops/s per call per node : 657022 +Grid : Message : Average mflops/s per call per node (full): 313254 +Grid : Message : Average mflops/s per call per node (full): 435785 +Grid : Message : Average mflops/s per call per node (full): 447783 +Grid : Message : Average mflops/s per call per node (full): 307976 +Grid : Message : Stencil 13.6629 GB/s per node +Grid : Message : Stencil 14.7722 GB/s per node +Grid : Message : Stencil 17.0123 GB/s per node +Grid : Message : Stencil 14.6703 GB/s per node +Grid : Message : Average mflops/s per call per node : 666158 +Grid : Message : Average mflops/s per call per node : 808269 +Grid : Message : Average mflops/s per call per node : 823241 +Grid : Message : Average mflops/s per call per node : 659966 +Grid : Message : Average mflops/s per call per node (full): 315972 +Grid : Message : Average mflops/s per call per node (full): 408625 +Grid : Message : Average mflops/s per call per node (full): 441349 +Grid : Message : Average mflops/s per call per node (full): 307814 +Grid : Message : Stencil 13.8222 GB/s per node +Grid : Message : Stencil 16.5421 GB/s per node +Grid : Message : Stencil 17.9573 GB/s per node +Grid : Message : Stencil 13.5068 GB/s per node +Grid : Message : Average mflops/s per call per node : 662776 +Grid : Message : Average mflops/s per call per node : 802749 +Grid : Message : Average mflops/s per call per node : 819045 +Grid : Message : Average mflops/s per call per node : 667423 +Grid : Message : Average mflops/s per call per node (full): 315685 +Grid : Message : Average mflops/s per call per node (full): 434846 +Grid : Message : Average mflops/s per call per node (full): 445561 +Grid : Message : Average mflops/s per call per node (full): 306973 +Grid : Message : Stencil 12.6947 GB/s per node +Grid : Message : Stencil 16.6596 GB/s per node +Grid : Message : Stencil 17.3481 GB/s per node +Grid : Message : Stencil 12.2762 GB/s per node +Grid : Message : Average mflops/s per call per node : 664797 +Grid : Message : Average mflops/s per call per node : 803254 +Grid : Message : Average mflops/s per call per node : 817960 +Grid : Message : Average mflops/s per call per node : 663602 +Grid : Message : Average mflops/s per call per node (full): 313921 +Grid : Message : Average mflops/s per call per node (full): 434428 +Grid : Message : Average mflops/s per call per node (full): 443691 +Grid : Message : Average mflops/s per call per node (full): 303453 +Grid : Message : Stencil 13.0304 GB/s per node +Grid : Message : Stencil 16.5727 GB/s per node +Grid : Message : Stencil 17.5342 GB/s per node +Grid : Message : Stencil 13.6637 GB/s per node +Grid : Message : Average mflops/s per call per node : 661446 +Grid : Message : Average mflops/s per call per node : 800983 +Grid : Message : Average mflops/s per call per node : 825268 +Grid : Message : Average mflops/s per call per node : 662414 +Grid : Message : Average mflops/s per call per node (full): 312931 +Grid : Message : Average mflops/s per call per node (full): 434956 +Grid : Message : Average mflops/s per call per node (full): 446353 +Grid : Message : Average mflops/s per call per node (full): 306809 +Grid : Message : Stencil 13.4499 GB/s per node +Grid : Message : Stencil 17.8274 GB/s per node +Grid : Message : Stencil 17.3187 GB/s per node +Grid : Message : Stencil 12.2928 GB/s per node +Grid : Message : Average mflops/s per call per node : 663668 +Grid : Message : Average mflops/s per call per node : 802349 +Grid : Message : Average mflops/s per call per node : 824225 +Grid : Message : Average mflops/s per call per node : 664527 +Grid : Message : Average mflops/s per call per node (full): 313580 +Grid : Message : Average mflops/s per call per node (full): 437042 +Grid : Message : Average mflops/s per call per node (full): 444399 +Grid : Message : Average mflops/s per call per node (full): 303315 +Grid : Message : Stencil 13.4204 GB/s per node +Grid : Message : Stencil 14.6526 GB/s per node +Grid : Message : Stencil 17.0765 GB/s per node +Grid : Message : Stencil 13.3068 GB/s per node +Grid : Message : Average mflops/s per call per node : 665269 +Grid : Message : Average mflops/s per call per node : 801119 +Grid : Message : Average mflops/s per call per node : 821479 +Grid : Message : Average mflops/s per call per node : 660903 +Grid : Message : Average mflops/s per call per node (full): 315610 +Grid : Message : Average mflops/s per call per node (full): 410340 +Grid : Message : Average mflops/s per call per node (full): 440724 +Grid : Message : Average mflops/s per call per node (full): 304392 +Grid : Message : Stencil 12.9226 GB/s per node +Grid : Message : Stencil 16.9886 GB/s per node +Grid : Message : Stencil 18.0785 GB/s per node +Grid : Message : Stencil 12.6001 GB/s per node +Grid : Message : Average mflops/s per call per node : 667969 +Grid : Message : Average mflops/s per call per node : 805671 +Grid : Message : Average mflops/s per call per node : 825970 +Grid : Message : Average mflops/s per call per node : 668389 +Grid : Message : Average mflops/s per call per node (full): 315630 +Grid : Message : Average mflops/s per call per node (full): 437228 +Grid : Message : Average mflops/s per call per node (full): 447175 +Grid : Message : Average mflops/s per call per node (full): 305637 +Grid : Message : Stencil 13.0071 GB/s per node +Grid : Message : Stencil 16.4769 GB/s per node +Grid : Message : Stencil 17.5628 GB/s per node +Grid : Message : Stencil 12.6108 GB/s per node +Grid : Message : Average mflops/s per call per node : 664811 +Grid : Message : Average mflops/s per call per node : 807014 +Grid : Message : Average mflops/s per call per node : 825491 +Grid : Message : Average mflops/s per call per node : 664695 +Grid : Message : Average mflops/s per call per node (full): 314531 +Grid : Message : Average mflops/s per call per node (full): 434962 +Grid : Message : Average mflops/s per call per node (full): 445453 +Grid : Message : Average mflops/s per call per node (full): 305524 +Grid : Message : Stencil 13.0581 GB/s per node +Grid : Message : Stencil 16.6896 GB/s per node +Grid : Message : Stencil 17.6596 GB/s per node +Grid : Message : Stencil 12.3528 GB/s per node +Grid : Message : Average mflops/s per call per node : 666024 +Grid : Message : Average mflops/s per call per node : 803409 +Grid : Message : Average mflops/s per call per node : 820641 +Grid : Message : Average mflops/s per call per node : 668749 +Grid : Message : Average mflops/s per call per node (full): 314320 +Grid : Message : Average mflops/s per call per node (full): 435101 +Grid : Message : Average mflops/s per call per node (full): 444807 +Grid : Message : Average mflops/s per call per node (full): 305022 +Grid : Message : Stencil 13.858 GB/s per node +Grid : Message : Stencil 16.4654 GB/s per node +Grid : Message : Stencil 16.3535 GB/s per node +Grid : Message : Stencil 12.7462 GB/s per node +Grid : Message : Average mflops/s per call per node : 663278 +Grid : Message : Average mflops/s per call per node : 799008 +Grid : Message : Average mflops/s per call per node : 828356 +Grid : Message : Average mflops/s per call per node : 665600 +Grid : Message : Average mflops/s per call per node (full): 315862 +Grid : Message : Average mflops/s per call per node (full): 424663 +Grid : Message : Average mflops/s per call per node (full): 429391 +Grid : Message : Average mflops/s per call per node (full): 305352 +Grid : Message : Stencil 13.395 GB/s per node +Grid : Message : Stencil 16.9958 GB/s per node +Grid : Message : Stencil 17.0761 GB/s per node +Grid : Message : Stencil 11.9704 GB/s per node +Grid : Message : Average mflops/s per call per node : 663609 +Grid : Message : Average mflops/s per call per node : 798959 +Grid : Message : Average mflops/s per call per node : 825384 +Grid : Message : Average mflops/s per call per node : 668680 +Grid : Message : Average mflops/s per call per node (full): 315709 +Grid : Message : Average mflops/s per call per node (full): 434231 +Grid : Message : Average mflops/s per call per node (full): 442366 +Grid : Message : Average mflops/s per call per node (full): 301876 +Grid : Message : Stencil 14.9144 GB/s per node +Grid : Message : Stencil 17.035 GB/s per node +Grid : Message : Stencil 16.9675 GB/s per node +Grid : Message : Stencil 12.5384 GB/s per node +Grid : Message : Average mflops/s per call per node : 660331 +Grid : Message : Average mflops/s per call per node : 799565 +Grid : Message : Average mflops/s per call per node : 820775 +Grid : Message : Average mflops/s per call per node : 664950 +Grid : Message : Average mflops/s per call per node (full): 316087 +Grid : Message : Average mflops/s per call per node (full): 435532 +Grid : Message : Average mflops/s per call per node (full): 439632 +Grid : Message : Average mflops/s per call per node (full): 304705 +Grid : Message : Stencil 13.2079 GB/s per node +Grid : Message : Stencil 9.38049 GB/s per node +Grid : Message : Stencil 17.5985 GB/s per node +Grid : Message : Stencil 12.9394 GB/s per node +Grid : Message : Average mflops/s per call per node : 666286 +Grid : Message : Average mflops/s per call per node : 807472 +Grid : Message : Average mflops/s per call per node : 822532 +Grid : Message : Average mflops/s per call per node : 666927 +Grid : Message : Average mflops/s per call per node (full): 315426 +Grid : Message : Average mflops/s per call per node (full): 301528 +Grid : Message : Average mflops/s per call per node (full): 444270 +Grid : Message : Average mflops/s per call per node (full): 305483 +Grid : Message : Stencil 12.9366 GB/s per node +Grid : Message : Stencil 8.86962 GB/s per node +Grid : Message : Stencil 18.1196 GB/s per node +Grid : Message : Stencil 14.2661 GB/s per node +Grid : Message : Average mflops/s per call per node : 664986 +Grid : Message : Average mflops/s per call per node : 810537 +Grid : Message : Average mflops/s per call per node : 825000 +Grid : Message : Average mflops/s per call per node : 662504 +Grid : Message : Average mflops/s per call per node (full): 311648 +Grid : Message : Average mflops/s per call per node (full): 288921 +Grid : Message : Average mflops/s per call per node (full): 448842 +Grid : Message : Average mflops/s per call per node (full): 307355 +Grid : Message : Stencil 12.8223 GB/s per node +Grid : Message : Stencil 16.8345 GB/s per node +Grid : Message : Stencil 17.715 GB/s per node +Grid : Message : Stencil 13.3817 GB/s per node +Grid : Message : Average mflops/s per call per node : 666438 +Grid : Message : Average mflops/s per call per node : 806506 +Grid : Message : Average mflops/s per call per node : 823873 +Grid : Message : Average mflops/s per call per node : 664154 +Grid : Message : Average mflops/s per call per node (full): 314939 +Grid : Message : Average mflops/s per call per node (full): 438382 +Grid : Message : Average mflops/s per call per node (full): 445696 +Grid : Message : Average mflops/s per call per node (full): 304392 +Grid : Message : Stencil 13.5753 GB/s per node +Grid : Message : Stencil 16.8199 GB/s per node +Grid : Message : Stencil 17.6639 GB/s per node +Grid : Message : Stencil 12.508 GB/s per node +Grid : Message : Average mflops/s per call per node : 661288 +Grid : Message : Average mflops/s per call per node : 801734 +Grid : Message : Average mflops/s per call per node : 826895 +Grid : Message : Average mflops/s per call per node : 666866 +Grid : Message : Average mflops/s per call per node (full): 315113 +Grid : Message : Average mflops/s per call per node (full): 436627 +Grid : Message : Average mflops/s per call per node (full): 445885 +Grid : Message : Average mflops/s per call per node (full): 304389 +Grid : Message : Stencil 13.2609 GB/s per node +Grid : Message : Stencil 16.5876 GB/s per node +Grid : Message : Stencil 17.518 GB/s per node +Grid : Message : Stencil 12.622 GB/s per node +Grid : Message : Average mflops/s per call per node : 666661 +Grid : Message : Average mflops/s per call per node : 806564 +Grid : Message : Average mflops/s per call per node : 829801 +Grid : Message : Average mflops/s per call per node : 666031 +Grid : Message : Average mflops/s per call per node (full): 315103 +Grid : Message : Average mflops/s per call per node (full): 434902 +Grid : Message : Average mflops/s per call per node (full): 446852 +Grid : Message : Average mflops/s per call per node (full): 303927 +Grid : Message : Stencil 12.7534 GB/s per node +Grid : Message : Stencil 18.1301 GB/s per node +Grid : Message : Stencil 17.676 GB/s per node +Grid : Message : Stencil 12.6733 GB/s per node +Grid : Message : Average mflops/s per call per node : 666771 +Grid : Message : Average mflops/s per call per node : 795816 +Grid : Message : Average mflops/s per call per node : 819261 +Grid : Message : Average mflops/s per call per node : 661509 +Grid : Message : Average mflops/s per call per node (full): 313882 +Grid : Message : Average mflops/s per call per node (full): 436371 +Grid : Message : Average mflops/s per call per node (full): 445274 +Grid : Message : Average mflops/s per call per node (full): 304463 +Grid : Message : Stencil 13.021 GB/s per node +Grid : Message : Stencil 17.343 GB/s per node +Grid : Message : Stencil 17.7043 GB/s per node +Grid : Message : Stencil 12.9421 GB/s per node +Grid : Message : Average mflops/s per call per node : 667115 +Grid : Message : Average mflops/s per call per node : 801682 +Grid : Message : Average mflops/s per call per node : 821272 +Grid : Message : Average mflops/s per call per node : 667456 +Grid : Message : Average mflops/s per call per node (full): 314729 +Grid : Message : Average mflops/s per call per node (full): 437239 +Grid : Message : Average mflops/s per call per node (full): 440060 +Grid : Message : Average mflops/s per call per node (full): 305517 +Grid : Message : Stencil 14.6619 GB/s per node +Grid : Message : Stencil 14.2718 GB/s per node +Grid : Message : Stencil 17.7885 GB/s per node +Grid : Message : Stencil 12.7771 GB/s per node +Grid : Message : Average mflops/s per call per node : 663197 +Grid : Message : Average mflops/s per call per node : 804402 +Grid : Message : Average mflops/s per call per node : 818351 +Grid : Message : Average mflops/s per call per node : 659551 +Grid : Message : Average mflops/s per call per node (full): 316195 +Grid : Message : Average mflops/s per call per node (full): 404190 +Grid : Message : Average mflops/s per call per node (full): 445743 +Grid : Message : Average mflops/s per call per node (full): 303828 +Grid : Message : Stencil 14.1316 GB/s per node +Grid : Message : Stencil 16.6678 GB/s per node +Grid : Message : Stencil 18.9153 GB/s per node +Grid : Message : Stencil 12.2411 GB/s per node +Grid : Message : Average mflops/s per call per node : 664796 +Grid : Message : Average mflops/s per call per node : 801994 +Grid : Message : Average mflops/s per call per node : 817793 +Grid : Message : Average mflops/s per call per node : 671268 +Grid : Message : Average mflops/s per call per node (full): 316474 +Grid : Message : Average mflops/s per call per node (full): 434130 +Grid : Message : Average mflops/s per call per node (full): 446851 +Grid : Message : Average mflops/s per call per node (full): 304576 +Grid : Message : Stencil 15.3967 GB/s per node +Grid : Message : Stencil 17.3259 GB/s per node +Grid : Message : Stencil 17.8009 GB/s per node +Grid : Message : Stencil 12.0176 GB/s per node +Grid : Message : Average mflops/s per call per node : 662867 +Grid : Message : Average mflops/s per call per node : 802190 +Grid : Message : Average mflops/s per call per node : 818363 +Grid : Message : Average mflops/s per call per node : 672577 +Grid : Message : Average mflops/s per call per node (full): 317209 +Grid : Message : Average mflops/s per call per node (full): 437671 +Grid : Message : Average mflops/s per call per node (full): 443132 +Grid : Message : Average mflops/s per call per node (full): 302658 +Grid : Message : Stencil 13.7304 GB/s per node +Grid : Message : Stencil 16.5138 GB/s per node +Grid : Message : Stencil 17.2066 GB/s per node +Grid : Message : Stencil 12.6344 GB/s per node +Grid : Message : Average mflops/s per call per node : 667165 +Grid : Message : Average mflops/s per call per node : 801862 +Grid : Message : Average mflops/s per call per node : 821386 +Grid : Message : Average mflops/s per call per node : 668298 +Grid : Message : Average mflops/s per call per node (full): 313850 +Grid : Message : Average mflops/s per call per node (full): 434568 +Grid : Message : Average mflops/s per call per node (full): 441871 +Grid : Message : Average mflops/s per call per node (full): 305778 +Grid : Message : Stencil 14.1902 GB/s per node +Grid : Message : Stencil 12.5243 GB/s per node +Grid : Message : Stencil 17.6716 GB/s per node +Grid : Message : Stencil 12.2327 GB/s per node +Grid : Message : Average mflops/s per call per node : 667550 +Grid : Message : Average mflops/s per call per node : 808486 +Grid : Message : Average mflops/s per call per node : 821750 +Grid : Message : Average mflops/s per call per node : 664300 +Grid : Message : Average mflops/s per call per node (full): 317058 +Grid : Message : Average mflops/s per call per node (full): 371507 +Grid : Message : Average mflops/s per call per node (full): 445703 +Grid : Message : Average mflops/s per call per node (full): 302001 +Grid : Message : Stencil 14.7126 GB/s per node +Grid : Message : Stencil 17.3296 GB/s per node +Grid : Message : Stencil 16.9344 GB/s per node +Grid : Message : Stencil 13.7377 GB/s per node +Grid : Message : Average mflops/s per call per node : 663247 +Grid : Message : Average mflops/s per call per node : 801530 +Grid : Message : Average mflops/s per call per node : 823572 +Grid : Message : Average mflops/s per call per node : 660202 +Grid : Message : Average mflops/s per call per node (full): 317715 +Grid : Message : Average mflops/s per call per node (full): 438797 +Grid : Message : Average mflops/s per call per node (full): 440266 +Grid : Message : Average mflops/s per call per node (full): 305603 +Grid : Message : Stencil 14.2874 GB/s per node +Grid : Message : Stencil 16.3823 GB/s per node +Grid : Message : Stencil 18.2821 GB/s per node +Grid : Message : Stencil 12.2695 GB/s per node +Grid : Message : Average mflops/s per call per node : 664440 +Grid : Message : Average mflops/s per call per node : 802782 +Grid : Message : Average mflops/s per call per node : 817892 +Grid : Message : Average mflops/s per call per node : 664293 +Grid : Message : Average mflops/s per call per node (full): 314310 +Grid : Message : Average mflops/s per call per node (full): 431204 +Grid : Message : Average mflops/s per call per node (full): 445353 +Grid : Message : Average mflops/s per call per node (full): 304266 +Grid : Message : Stencil 12.2287 GB/s per node +Grid : Message : Stencil 9.84358 GB/s per node +Grid : Message : Stencil 18.2287 GB/s per node +Grid : Message : Stencil 13.4763 GB/s per node +Grid : Message : Average mflops/s per call per node : 669877 +Grid : Message : Average mflops/s per call per node : 812425 +Grid : Message : Average mflops/s per call per node : 815365 +Grid : Message : Average mflops/s per call per node : 662986 +Grid : Message : Average mflops/s per call per node (full): 310980 +Grid : Message : Average mflops/s per call per node (full): 312378 +Grid : Message : Average mflops/s per call per node (full): 445350 +Grid : Message : Average mflops/s per call per node (full): 304896 +Grid : Message : Stencil 12.4854 GB/s per node +Grid : Message : Stencil 16.9744 GB/s per node +Grid : Message : Stencil 17.2508 GB/s per node +Grid : Message : Stencil 14.1054 GB/s per node +Grid : Message : Average mflops/s per call per node : 666189 +Grid : Message : Average mflops/s per call per node : 800186 +Grid : Message : Average mflops/s per call per node : 824927 +Grid : Message : Average mflops/s per call per node : 660589 +Grid : Message : Average mflops/s per call per node (full): 313277 +Grid : Message : Average mflops/s per call per node (full): 437682 +Grid : Message : Average mflops/s per call per node (full): 443906 +Grid : Message : Average mflops/s per call per node (full): 304831 +Grid : Message : Stencil 13.8368 GB/s per node +Grid : Message : Stencil 16.3028 GB/s per node +Grid : Message : Stencil 17.5656 GB/s per node +Grid : Message : Stencil 12.244 GB/s per node +Grid : Message : Average mflops/s per call per node : 664243 +Grid : Message : Average mflops/s per call per node : 800247 +Grid : Message : Average mflops/s per call per node : 825207 +Grid : Message : Average mflops/s per call per node : 669564 +Grid : Message : Average mflops/s per call per node (full): 316090 +Grid : Message : Average mflops/s per call per node (full): 431846 +Grid : Message : Average mflops/s per call per node (full): 445461 +Grid : Message : Average mflops/s per call per node (full): 303726 +Grid : Message : Stencil 13.2986 GB/s per node +Grid : Message : Stencil 16.8133 GB/s per node +Grid : Message : Stencil 17.0501 GB/s per node +Grid : Message : Stencil 12.7636 GB/s per node +Grid : Message : Average mflops/s per call per node : 663854 +Grid : Message : Average mflops/s per call per node : 802586 +Grid : Message : Average mflops/s per call per node : 815724 +Grid : Message : Average mflops/s per call per node : 670749 +Grid : Message : Average mflops/s per call per node (full): 315614 +Grid : Message : Average mflops/s per call per node (full): 437869 +Grid : Message : Average mflops/s per call per node (full): 440744 +Grid : Message : Average mflops/s per call per node (full): 307138 +Grid : Message : Stencil 12.9122 GB/s per node +Grid : Message : Stencil 15.6169 GB/s per node +Grid : Message : Stencil 17.9642 GB/s per node +Grid : Message : Stencil 13.2988 GB/s per node +Grid : Message : Average mflops/s per call per node : 666449 +Grid : Message : Average mflops/s per call per node : 810116 +Grid : Message : Average mflops/s per call per node : 822794 +Grid : Message : Average mflops/s per call per node : 666926 +Grid : Message : Average mflops/s per call per node (full): 313636 +Grid : Message : Average mflops/s per call per node (full): 425292 +Grid : Message : Average mflops/s per call per node (full): 447099 +Grid : Message : Average mflops/s per call per node (full): 306732 +Grid : Message : Stencil 13.0659 GB/s per node +Grid : Message : Stencil 17.3714 GB/s per node +Grid : Message : Stencil 18.1176 GB/s per node +Grid : Message : Stencil 12.6298 GB/s per node +Grid : Message : Average mflops/s per call per node : 665254 +Grid : Message : Average mflops/s per call per node : 800461 +Grid : Message : Average mflops/s per call per node : 825509 +Grid : Message : Average mflops/s per call per node : 664509 +Grid : Message : Average mflops/s per call per node (full): 314606 +Grid : Message : Average mflops/s per call per node (full): 437387 +Grid : Message : Average mflops/s per call per node (full): 442048 +Grid : Message : Average mflops/s per call per node (full): 304503 +Grid : Message : Stencil 13.1864 GB/s per node +Grid : Message : Stencil 16.961 GB/s per node +Grid : Message : Stencil 18.1721 GB/s per node +Grid : Message : Stencil 13.3021 GB/s per node +Grid : Message : Average mflops/s per call per node : 668091 +Grid : Message : Average mflops/s per call per node : 797834 +Grid : Message : Average mflops/s per call per node : 822533 +Grid : Message : Average mflops/s per call per node : 665343 +Grid : Message : Average mflops/s per call per node (full): 315989 +Grid : Message : Average mflops/s per call per node (full): 437285 +Grid : Message : Average mflops/s per call per node (full): 448604 +Grid : Message : Average mflops/s per call per node (full): 305937 +Grid : Message : Stencil 12.7171 GB/s per node +Grid : Message : Stencil 17.0547 GB/s per node +Grid : Message : Stencil 17.2289 GB/s per node +Grid : Message : Stencil 13.0249 GB/s per node +Grid : Message : Average mflops/s per call per node : 669958 +Grid : Message : Average mflops/s per call per node : 802599 +Grid : Message : Average mflops/s per call per node : 823252 +Grid : Message : Average mflops/s per call per node : 663794 +Grid : Message : Average mflops/s per call per node (full): 315310 +Grid : Message : Average mflops/s per call per node (full): 434842 +Grid : Message : Average mflops/s per call per node (full): 443271 +Grid : Message : Average mflops/s per call per node (full): 305779 +Grid : Message : Stencil 12.7706 GB/s per node +Grid : Message : Stencil 17.9404 GB/s per node +Grid : Message : Stencil 17.5594 GB/s per node +Grid : Message : Stencil 11.8661 GB/s per node +Grid : Message : Average mflops/s per call per node : 670656 +Grid : Message : Average mflops/s per call per node : 798841 +Grid : Message : Average mflops/s per call per node : 816038 +Grid : Message : Average mflops/s per call per node : 667840 +Grid : Message : Average mflops/s per call per node (full): 315570 +Grid : Message : Average mflops/s per call per node (full): 438820 +Grid : Message : Average mflops/s per call per node (full): 444007 +Grid : Message : Average mflops/s per call per node (full): 301392 +Grid : Message : Stencil 12.6387 GB/s per node +Grid : Message : Stencil 17.87 GB/s per node +Grid : Message : Stencil 17.3612 GB/s per node +Grid : Message : Stencil 12.1397 GB/s per node +Grid : Message : Average mflops/s per call per node : 670097 +Grid : Message : Average mflops/s per call per node : 801686 +Grid : Message : Average mflops/s per call per node : 822358 +Grid : Message : Average mflops/s per call per node : 673319 +Grid : Message : Average mflops/s per call per node (full): 314702 +Grid : Message : Average mflops/s per call per node (full): 433081 +Grid : Message : Average mflops/s per call per node (full): 443404 +Grid : Message : Average mflops/s per call per node (full): 303616 +Grid : Message : Stencil 12.6213 GB/s per node +Grid : Message : Stencil 17.3179 GB/s per node +Grid : Message : Stencil 17.3871 GB/s per node +Grid : Message : Stencil 14.4538 GB/s per node +Grid : Message : Average mflops/s per call per node : 669104 +Grid : Message : Average mflops/s per call per node : 808904 +Grid : Message : Average mflops/s per call per node : 816456 +Grid : Message : Average mflops/s per call per node : 664116 +Grid : Message : Average mflops/s per call per node (full): 314805 +Grid : Message : Average mflops/s per call per node (full): 438129 +Grid : Message : Average mflops/s per call per node (full): 443576 +Grid : Message : Average mflops/s per call per node (full): 307045 +Grid : Message : Stencil 13.1342 GB/s per node +Grid : Message : Stencil 17.2667 GB/s per node +Grid : Message : Stencil 18.9676 GB/s per node +Grid : Message : Stencil 13.1063 GB/s per node +Grid : Message : Average mflops/s per call per node : 666448 +Grid : Message : Average mflops/s per call per node : 803193 +Grid : Message : Average mflops/s per call per node : 820740 +Grid : Message : Average mflops/s per call per node : 661598 +Grid : Message : Average mflops/s per call per node (full): 315157 +Grid : Message : Average mflops/s per call per node (full): 437486 +Grid : Message : Average mflops/s per call per node (full): 447975 +Grid : Message : Average mflops/s per call per node (full): 305551 +Grid : Message : Stencil 12.8885 GB/s per node +Grid : Message : Stencil 16.7165 GB/s per node +Grid : Message : Stencil 17.4135 GB/s per node +Grid : Message : Stencil 12.4162 GB/s per node +Grid : Message : Average mflops/s per call per node : 669610 +Grid : Message : Average mflops/s per call per node : 798878 +Grid : Message : Average mflops/s per call per node : 828761 +Grid : Message : Average mflops/s per call per node : 665508 +Grid : Message : Average mflops/s per call per node (full): 315714 +Grid : Message : Average mflops/s per call per node (full): 433076 +Grid : Message : Average mflops/s per call per node (full): 445197 +Grid : Message : Average mflops/s per call per node (full): 304990 +Grid : Message : Stencil 13.4829 GB/s per node +Grid : Message : Stencil 17.3411 GB/s per node +Grid : Message : Stencil 19.5368 GB/s per node +Grid : Message : Stencil 12.2651 GB/s per node +Grid : Message : Average mflops/s per call per node : 663229 +Grid : Message : Average mflops/s per call per node : 801443 +Grid : Message : Average mflops/s per call per node : 822028 +Grid : Message : Average mflops/s per call per node : 666187 +Grid : Message : Average mflops/s per call per node (full): 313415 +Grid : Message : Average mflops/s per call per node (full): 437696 +Grid : Message : Average mflops/s per call per node (full): 451046 +Grid : Message : Average mflops/s per call per node (full): 303781 +Grid : Message : Stencil 12.9546 GB/s per node +Grid : Message : Stencil 16.7871 GB/s per node +Grid : Message : Stencil 17.4806 GB/s per node +Grid : Message : Stencil 12.5693 GB/s per node +Grid : Message : Average mflops/s per call per node : 667984 +Grid : Message : Average mflops/s per call per node : 804416 +Grid : Message : Average mflops/s per call per node : 820681 +Grid : Message : Average mflops/s per call per node : 662491 +Grid : Message : Average mflops/s per call per node (full): 315209 +Grid : Message : Average mflops/s per call per node (full): 437613 +Grid : Message : Average mflops/s per call per node (full): 444061 +Grid : Message : Average mflops/s per call per node (full): 304878 +Grid : Message : Stencil 13.2857 GB/s per node +Grid : Message : Stencil 16.9461 GB/s per node +Grid : Message : Stencil 17.5121 GB/s per node +Grid : Message : Stencil 13.1988 GB/s per node +Grid : Message : Average mflops/s per call per node : 666158 +Grid : Message : Average mflops/s per call per node : 806146 +Grid : Message : Average mflops/s per call per node : 822367 +Grid : Message : Average mflops/s per call per node : 667978 +Grid : Message : Average mflops/s per call per node (full): 315279 +Grid : Message : Average mflops/s per call per node (full): 437540 +Grid : Message : Average mflops/s per call per node (full): 445660 +Grid : Message : Average mflops/s per call per node (full): 306304 +Grid : Message : Stencil 13.1804 GB/s per node +Grid : Message : Stencil 17.6715 GB/s per node +Grid : Message : Stencil 17.9868 GB/s per node +Grid : Message : Stencil 13.724 GB/s per node +Grid : Message : Average mflops/s per call per node : 667593 +Grid : Message : Average mflops/s per call per node : 801475 +Grid : Message : Average mflops/s per call per node : 823711 +Grid : Message : Average mflops/s per call per node : 668033 +Grid : Message : Average mflops/s per call per node (full): 315247 +Grid : Message : Average mflops/s per call per node (full): 435588 +Grid : Message : Average mflops/s per call per node (full): 446829 +Grid : Message : Average mflops/s per call per node (full): 307368 +Grid : Message : Stencil 12.851 GB/s per node +Grid : Message : Stencil 17.4314 GB/s per node +Grid : Message : Stencil 17.2006 GB/s per node +Grid : Message : Stencil 13.0596 GB/s per node +Grid : Message : Average mflops/s per call per node : 667473 +Grid : Message : Average mflops/s per call per node : 802011 +Grid : Message : Average mflops/s per call per node : 827037 +Grid : Message : Average mflops/s per call per node : 663835 +Grid : Message : Average mflops/s per call per node (full): 313637 +Grid : Message : Average mflops/s per call per node (full): 440010 +Grid : Message : Average mflops/s per call per node (full): 443524 +Grid : Message : Average mflops/s per call per node (full): 304671 +Grid : Message : Stencil 13.5352 GB/s per node +Grid : Message : Stencil 13.4914 GB/s per node +Grid : Message : Stencil 18.1952 GB/s per node +Grid : Message : Stencil 13.6803 GB/s per node +Grid : Message : Average mflops/s per call per node : 666383 +Grid : Message : Average mflops/s per call per node : 810543 +Grid : Message : Average mflops/s per call per node : 819120 +Grid : Message : Average mflops/s per call per node : 661499 +Grid : Message : Average mflops/s per call per node (full): 315285 +Grid : Message : Average mflops/s per call per node (full): 390981 +Grid : Message : Average mflops/s per call per node (full): 437005 +Grid : Message : Average mflops/s per call per node (full): 305682 +Grid : Message : Stencil 13.9432 GB/s per node +Grid : Message : Stencil 16.9757 GB/s per node +Grid : Message : Stencil 18.1128 GB/s per node +Grid : Message : Stencil 14.5547 GB/s per node +Grid : Message : Average mflops/s per call per node : 664137 +Grid : Message : Average mflops/s per call per node : 806558 +Grid : Message : Average mflops/s per call per node : 819594 +Grid : Message : Average mflops/s per call per node : 660482 +Grid : Message : Average mflops/s per call per node (full): 315526 +Grid : Message : Average mflops/s per call per node (full): 437547 +Grid : Message : Average mflops/s per call per node (full): 445861 +Grid : Message : Average mflops/s per call per node (full): 306961 +Grid : Message : Stencil 13.4324 GB/s per node +Grid : Message : Stencil 18.4747 GB/s per node +Grid : Message : Stencil 17.1373 GB/s per node +Grid : Message : Stencil 13.291 GB/s per node +Grid : Message : Average mflops/s per call per node : 666766 +Grid : Message : Average mflops/s per call per node : 798827 +Grid : Message : Average mflops/s per call per node : 824186 +Grid : Message : Average mflops/s per call per node : 665952 +Grid : Message : Average mflops/s per call per node (full): 315836 +Grid : Message : Average mflops/s per call per node (full): 440620 +Grid : Message : Average mflops/s per call per node (full): 442485 +Grid : Message : Average mflops/s per call per node (full): 306693 +Grid : Message : Stencil 14.2648 GB/s per node +Grid : Message : Stencil 17.4152 GB/s per node +Grid : Message : Stencil 17.9912 GB/s per node +Grid : Message : Stencil 12.5767 GB/s per node +Grid : Message : Average mflops/s per call per node : 665482 +Grid : Message : Average mflops/s per call per node : 806993 +Grid : Message : Average mflops/s per call per node : 816942 +Grid : Message : Average mflops/s per call per node : 661803 +Grid : Message : Average mflops/s per call per node (full): 317197 +Grid : Message : Average mflops/s per call per node (full): 438764 +Grid : Message : Average mflops/s per call per node (full): 445909 +Grid : Message : Average mflops/s per call per node (full): 304387 +Grid : Message : Stencil 14.4825 GB/s per node +Grid : Message : Stencil 16.3628 GB/s per node +Grid : Message : Stencil 17.3344 GB/s per node +Grid : Message : Stencil 12.5575 GB/s per node +Grid : Message : Average mflops/s per call per node : 661238 +Grid : Message : Average mflops/s per call per node : 803942 +Grid : Message : Average mflops/s per call per node : 818555 +Grid : Message : Average mflops/s per call per node : 667193 +Grid : Message : Average mflops/s per call per node (full): 316596 +Grid : Message : Average mflops/s per call per node (full): 426580 +Grid : Message : Average mflops/s per call per node (full): 442295 +Grid : Message : Average mflops/s per call per node (full): 305003 +Grid : Message : Stencil 13.2119 GB/s per node +Grid : Message : Stencil 14.9454 GB/s per node +Grid : Message : Stencil 18.2205 GB/s per node +Grid : Message : Stencil 13.2841 GB/s per node +Grid : Message : Average mflops/s per call per node : 666447 +Grid : Message : Average mflops/s per call per node : 805976 +Grid : Message : Average mflops/s per call per node : 825025 +Grid : Message : Average mflops/s per call per node : 662542 +Grid : Message : Average mflops/s per call per node (full): 315221 +Grid : Message : Average mflops/s per call per node (full): 414701 +Grid : Message : Average mflops/s per call per node (full): 449093 +Grid : Message : Average mflops/s per call per node (full): 305750 +Grid : Message : Stencil 13.3864 GB/s per node +Grid : Message : Stencil 16.3266 GB/s per node +Grid : Message : Stencil 18.0105 GB/s per node +Grid : Message : Stencil 13.2748 GB/s per node +Grid : Message : Average mflops/s per call per node : 666430 +Grid : Message : Average mflops/s per call per node : 801581 +Grid : Message : Average mflops/s per call per node : 822402 +Grid : Message : Average mflops/s per call per node : 664154 +Grid : Message : Average mflops/s per call per node (full): 316332 +Grid : Message : Average mflops/s per call per node (full): 432812 +Grid : Message : Average mflops/s per call per node (full): 447225 +Grid : Message : Average mflops/s per call per node (full): 306327 +Grid : Message : Stencil 14.0436 GB/s per node +Grid : Message : Stencil 17.5313 GB/s per node +Grid : Message : Stencil 17.9904 GB/s per node +Grid : Message : Stencil 14.0196 GB/s per node +Grid : Message : Average mflops/s per call per node : 661958 +Grid : Message : Average mflops/s per call per node : 802925 +Grid : Message : Average mflops/s per call per node : 822580 +Grid : Message : Average mflops/s per call per node : 655474 +Grid : Message : Average mflops/s per call per node (full): 315807 +Grid : Message : Average mflops/s per call per node (full): 440084 +Grid : Message : Average mflops/s per call per node (full): 446506 +Grid : Message : Average mflops/s per call per node (full): 305478 +Grid : Message : Stencil 13.8095 GB/s per node +Grid : Message : Stencil 17.219 GB/s per node +Grid : Message : Stencil 17.8972 GB/s per node +Grid : Message : Stencil 12.5523 GB/s per node +Grid : Message : Average mflops/s per call per node : 662486 +Grid : Message : Average mflops/s per call per node : 805245 +Grid : Message : Average mflops/s per call per node : 822277 +Grid : Message : Average mflops/s per call per node : 667800 +Grid : Message : Average mflops/s per call per node (full): 315251 +Grid : Message : Average mflops/s per call per node (full): 437134 +Grid : Message : Average mflops/s per call per node (full): 445001 +Grid : Message : Average mflops/s per call per node (full): 303441 +Grid : Message : Stencil 13.4219 GB/s per node +Grid : Message : Stencil 16.4628 GB/s per node +Grid : Message : Stencil 17.7018 GB/s per node +Grid : Message : Stencil 12.3819 GB/s per node +Grid : Message : Average mflops/s per call per node : 665299 +Grid : Message : Average mflops/s per call per node : 805631 +Grid : Message : Average mflops/s per call per node : 819042 +Grid : Message : Average mflops/s per call per node : 670935 +Grid : Message : Average mflops/s per call per node (full): 315640 +Grid : Message : Average mflops/s per call per node (full): 434757 +Grid : Message : Average mflops/s per call per node (full): 444690 +Grid : Message : Average mflops/s per call per node (full): 305656 +Grid : Message : Stencil 12.4988 GB/s per node +Grid : Message : Stencil 16.9083 GB/s per node +Grid : Message : Stencil 17.7146 GB/s per node +Grid : Message : Stencil 14.3923 GB/s per node +Grid : Message : Average mflops/s per call per node : 665591 +Grid : Message : Average mflops/s per call per node : 803923 +Grid : Message : Average mflops/s per call per node : 828013 +Grid : Message : Average mflops/s per call per node : 663066 +Grid : Message : Average mflops/s per call per node (full): 313228 +Grid : Message : Average mflops/s per call per node (full): 435404 +Grid : Message : Average mflops/s per call per node (full): 442913 +Grid : Message : Average mflops/s per call per node (full): 306970 +Grid : Message : Stencil 13.3269 GB/s per node +Grid : Message : Stencil 16.7782 GB/s per node +Grid : Message : Stencil 18.0916 GB/s per node +Grid : Message : Stencil 13.0793 GB/s per node +Grid : Message : Average mflops/s per call per node : 666367 +Grid : Message : Average mflops/s per call per node : 806076 +Grid : Message : Average mflops/s per call per node : 819398 +Grid : Message : Average mflops/s per call per node : 663443 +Grid : Message : Average mflops/s per call per node (full): 315773 +Grid : Message : Average mflops/s per call per node (full): 436873 +Grid : Message : Average mflops/s per call per node (full): 445575 +Grid : Message : Average mflops/s per call per node (full): 305899 +Grid : Message : Stencil 12.4975 GB/s per node +Grid : Message : Stencil 16.6148 GB/s per node +Grid : Message : Stencil 16.7767 GB/s per node +Grid : Message : Stencil 14.3518 GB/s per node +Grid : Message : Average mflops/s per call per node : 666796 +Grid : Message : Average mflops/s per call per node : 806592 +Grid : Message : Average mflops/s per call per node : 823710 +Grid : Message : Average mflops/s per call per node : 664034 +Grid : Message : Average mflops/s per call per node (full): 312829 +Grid : Message : Average mflops/s per call per node (full): 435589 +Grid : Message : Average mflops/s per call per node (full): 431787 +Grid : Message : Average mflops/s per call per node (full): 307315 +Grid : Message : Stencil 12.7171 GB/s per node +Grid : Message : Stencil 17.2914 GB/s per node +Grid : Message : Stencil 17.1318 GB/s per node +Grid : Message : Stencil 14.4639 GB/s per node +Grid : Message : Average mflops/s per call per node : 664303 +Grid : Message : Average mflops/s per call per node : 805861 +Grid : Message : Average mflops/s per call per node : 827627 +Grid : Message : Average mflops/s per call per node : 657617 +Grid : Message : Average mflops/s per call per node (full): 311895 +Grid : Message : Average mflops/s per call per node (full): 440178 +Grid : Message : Average mflops/s per call per node (full): 438531 +Grid : Message : Average mflops/s per call per node (full): 305532 +Grid : Message : Stencil 13.8382 GB/s per node +Grid : Message : Stencil 16.6842 GB/s per node +Grid : Message : Stencil 17.5183 GB/s per node +Grid : Message : Stencil 12.7917 GB/s per node +Grid : Message : Average mflops/s per call per node : 663577 +Grid : Message : Average mflops/s per call per node : 805710 +Grid : Message : Average mflops/s per call per node : 820054 +Grid : Message : Average mflops/s per call per node : 661076 +Grid : Message : Average mflops/s per call per node (full): 315325 +Grid : Message : Average mflops/s per call per node (full): 437400 +Grid : Message : Average mflops/s per call per node (full): 443599 +Grid : Message : Average mflops/s per call per node (full): 304844 +Grid : Message : Stencil 13.2596 GB/s per node +Grid : Message : Stencil 16.6761 GB/s per node +Grid : Message : Stencil 17.0363 GB/s per node +Grid : Message : Stencil 14.1222 GB/s per node +Grid : Message : Average mflops/s per call per node : 665035 +Grid : Message : Average mflops/s per call per node : 806591 +Grid : Message : Average mflops/s per call per node : 819211 +Grid : Message : Average mflops/s per call per node : 660628 +Grid : Message : Average mflops/s per call per node (full): 314882 +Grid : Message : Average mflops/s per call per node (full): 436507 +Grid : Message : Average mflops/s per call per node (full): 440248 +Grid : Message : Average mflops/s per call per node (full): 306543 +Grid : Message : Stencil 13.9848 GB/s per node +Grid : Message : Stencil 18.3976 GB/s per node +Grid : Message : Stencil 19.3124 GB/s per node +Grid : Message : Stencil 12.9166 GB/s per node +Grid : Message : Average mflops/s per call per node : 664656 +Grid : Message : Average mflops/s per call per node : 799203 +Grid : Message : Average mflops/s per call per node : 819769 +Grid : Message : Average mflops/s per call per node : 669625 +Grid : Message : Average mflops/s per call per node (full): 316861 +Grid : Message : Average mflops/s per call per node (full): 439961 +Grid : Message : Average mflops/s per call per node (full): 448614 +Grid : Message : Average mflops/s per call per node (full): 307114 +Grid : Message : Stencil 12.4935 GB/s per node +Grid : Message : Stencil 11.2988 GB/s per node +Grid : Message : Stencil 17.3402 GB/s per node +Grid : Message : Stencil 12.5342 GB/s per node +Grid : Message : Average mflops/s per call per node : 668122 +Grid : Message : Average mflops/s per call per node : 813150 +Grid : Message : Average mflops/s per call per node : 823168 +Grid : Message : Average mflops/s per call per node : 665416 +Grid : Message : Average mflops/s per call per node (full): 313288 +Grid : Message : Average mflops/s per call per node (full): 346150 +Grid : Message : Average mflops/s per call per node (full): 437381 +Grid : Message : Average mflops/s per call per node (full): 304229 +Grid : Message : Stencil 14.216 GB/s per node +Grid : Message : Stencil 17.0074 GB/s per node +Grid : Message : Stencil 17.2103 GB/s per node +Grid : Message : Stencil 13.9827 GB/s per node +Grid : Message : Average mflops/s per call per node : 663028 +Grid : Message : Average mflops/s per call per node : 805888 +Grid : Message : Average mflops/s per call per node : 820719 +Grid : Message : Average mflops/s per call per node : 663627 +Grid : Message : Average mflops/s per call per node (full): 316786 +Grid : Message : Average mflops/s per call per node (full): 438756 +Grid : Message : Average mflops/s per call per node (full): 441710 +Grid : Message : Average mflops/s per call per node (full): 307103 +Grid : Message : Stencil 13.1908 GB/s per node +Grid : Message : Stencil 14.8085 GB/s per node +Grid : Message : Stencil 17.4607 GB/s per node +Grid : Message : Stencil 12.3831 GB/s per node +Grid : Message : Average mflops/s per call per node : 665860 +Grid : Message : Average mflops/s per call per node : 807115 +Grid : Message : Average mflops/s per call per node : 824929 +Grid : Message : Average mflops/s per call per node : 670725 +Grid : Message : Average mflops/s per call per node (full): 314340 +Grid : Message : Average mflops/s per call per node (full): 414385 +Grid : Message : Average mflops/s per call per node (full): 445376 +Grid : Message : Average mflops/s per call per node (full): 304375 +Grid : Message : Stencil 13.5194 GB/s per node +Grid : Message : Stencil 13.942 GB/s per node +Grid : Message : Stencil 17.6336 GB/s per node +Grid : Message : Stencil 12.367 GB/s per node +Grid : Message : Average mflops/s per call per node : 669064 +Grid : Message : Average mflops/s per call per node : 804949 +Grid : Message : Average mflops/s per call per node : 817478 +Grid : Message : Average mflops/s per call per node : 665460 +Grid : Message : Average mflops/s per call per node (full): 316926 +Grid : Message : Average mflops/s per call per node (full): 399455 +Grid : Message : Average mflops/s per call per node (full): 446074 +Grid : Message : Average mflops/s per call per node (full): 304541 +Grid : Message : Stencil 13.6126 GB/s per node +Grid : Message : Stencil 15.9337 GB/s per node +Grid : Message : Stencil 17.3187 GB/s per node +Grid : Message : Stencil 12.4468 GB/s per node +Grid : Message : Average mflops/s per call per node : 665578 +Grid : Message : Average mflops/s per call per node : 812716 +Grid : Message : Average mflops/s per call per node : 821287 +Grid : Message : Average mflops/s per call per node : 665420 +Grid : Message : Average mflops/s per call per node (full): 316025 +Grid : Message : Average mflops/s per call per node (full): 426919 +Grid : Message : Average mflops/s per call per node (full): 443410 +Grid : Message : Average mflops/s per call per node (full): 304065 +Grid : Message : Stencil 12.7614 GB/s per node +Grid : Message : Stencil 17.1654 GB/s per node +Grid : Message : Stencil 18.0923 GB/s per node +Grid : Message : Stencil 13.4006 GB/s per node +Grid : Message : Average mflops/s per call per node : 671025 +Grid : Message : Average mflops/s per call per node : 797503 +Grid : Message : Average mflops/s per call per node : 823647 +Grid : Message : Average mflops/s per call per node : 659844 +Grid : Message : Average mflops/s per call per node (full): 316108 +Grid : Message : Average mflops/s per call per node (full): 438833 +Grid : Message : Average mflops/s per call per node (full): 447618 +Grid : Message : Average mflops/s per call per node (full): 305267 +Grid : Message : Stencil 12.9773 GB/s per node +Grid : Message : Stencil 17.1714 GB/s per node +Grid : Message : Stencil 17.282 GB/s per node +Grid : Message : Stencil 11.9263 GB/s per node +Grid : Message : Average mflops/s per call per node : 671307 +Grid : Message : Average mflops/s per call per node : 804090 +Grid : Message : Average mflops/s per call per node : 827698 +Grid : Message : Average mflops/s per call per node : 672546 +Grid : Message : Average mflops/s per call per node (full): 316159 +Grid : Message : Average mflops/s per call per node (full): 438851 +Grid : Message : Average mflops/s per call per node (full): 445167 +Grid : Message : Average mflops/s per call per node (full): 300966 +Grid : Message : Stencil 13.6184 GB/s per node +Grid : Message : Stencil 17.0852 GB/s per node +Grid : Message : Stencil 17.8702 GB/s per node +Grid : Message : Stencil 12.7869 GB/s per node +Grid : Message : Average mflops/s per call per node : 665162 +Grid : Message : Average mflops/s per call per node : 803811 +Grid : Message : Average mflops/s per call per node : 828009 +Grid : Message : Average mflops/s per call per node : 662307 +Grid : Message : Average mflops/s per call per node (full): 316505 +Grid : Message : Average mflops/s per call per node (full): 438134 +Grid : Message : Average mflops/s per call per node (full): 447694 +Grid : Message : Average mflops/s per call per node (full): 304436 +Grid : Message : Stencil 12.6024 GB/s per node +Grid : Message : Stencil 17.1863 GB/s per node +Grid : Message : Stencil 18.0182 GB/s per node +Grid : Message : Stencil 13.271 GB/s per node +Grid : Message : Average mflops/s per call per node : 668267 +Grid : Message : Average mflops/s per call per node : 800026 +Grid : Message : Average mflops/s per call per node : 822868 +Grid : Message : Average mflops/s per call per node : 661952 +Grid : Message : Average mflops/s per call per node (full): 310228 +Grid : Message : Average mflops/s per call per node (full): 435863 +Grid : Message : Average mflops/s per call per node (full): 446221 +Grid : Message : Average mflops/s per call per node (full): 305856 +Grid : Message : Stencil 13.1809 GB/s per node +Grid : Message : Stencil 13.4887 GB/s per node +Grid : Message : Stencil 17.3289 GB/s per node +Grid : Message : Stencil 13.1904 GB/s per node +Grid : Message : Average mflops/s per call per node : 664234 +Grid : Message : Average mflops/s per call per node : 803314 +Grid : Message : Average mflops/s per call per node : 815947 +Grid : Message : Average mflops/s per call per node : 661962 +Grid : Message : Average mflops/s per call per node (full): 314075 +Grid : Message : Average mflops/s per call per node (full): 389819 +Grid : Message : Average mflops/s per call per node (full): 441479 +Grid : Message : Average mflops/s per call per node (full): 305914 +Grid : Message : Stencil 13.1528 GB/s per node +Grid : Message : Stencil 16.7188 GB/s per node +Grid : Message : Stencil 16.5866 GB/s per node +Grid : Message : Stencil 12.213 GB/s per node +Grid : Message : Average mflops/s per call per node : 663048 +Grid : Message : Average mflops/s per call per node : 804374 +Grid : Message : Average mflops/s per call per node : 819659 +Grid : Message : Average mflops/s per call per node : 667866 +Grid : Message : Average mflops/s per call per node (full): 312955 +Grid : Message : Average mflops/s per call per node (full): 436178 +Grid : Message : Average mflops/s per call per node (full): 435129 +Grid : Message : Average mflops/s per call per node (full): 299583 +Grid : Message : Stencil 12.839 GB/s per node +Grid : Message : Stencil 17.4587 GB/s per node +Grid : Message : Stencil 17.6344 GB/s per node +Grid : Message : Stencil 13.0173 GB/s per node +Grid : Message : Average mflops/s per call per node : 660284 +Grid : Message : Average mflops/s per call per node : 804717 +Grid : Message : Average mflops/s per call per node : 827121 +Grid : Message : Average mflops/s per call per node : 663707 +Grid : Message : Average mflops/s per call per node (full): 312460 +Grid : Message : Average mflops/s per call per node (full): 438847 +Grid : Message : Average mflops/s per call per node (full): 446329 +Grid : Message : Average mflops/s per call per node (full): 305415 +Grid : Message : Stencil 13.2555 GB/s per node +Grid : Message : Stencil 17.6703 GB/s per node +Grid : Message : Stencil 18.5159 GB/s per node +Grid : Message : Stencil 13.467 GB/s per node +Grid : Message : Average mflops/s per call per node : 663868 +Grid : Message : Average mflops/s per call per node : 800212 +Grid : Message : Average mflops/s per call per node : 824806 +Grid : Message : Average mflops/s per call per node : 664923 +Grid : Message : Average mflops/s per call per node (full): 315689 +Grid : Message : Average mflops/s per call per node (full): 439311 +Grid : Message : Average mflops/s per call per node (full): 449243 +Grid : Message : Average mflops/s per call per node (full): 305734 +Grid : Message : Stencil 12.3396 GB/s per node +Grid : Message : Stencil 16.991 GB/s per node +Grid : Message : Stencil 17.5986 GB/s per node +Grid : Message : Stencil 11.9851 GB/s per node +Grid : Message : Average mflops/s per call per node : 665254 +Grid : Message : Average mflops/s per call per node : 807953 +Grid : Message : Average mflops/s per call per node : 821190 +Grid : Message : Average mflops/s per call per node : 668761 +Grid : Message : Average mflops/s per call per node (full): 312356 +Grid : Message : Average mflops/s per call per node (full): 432148 +Grid : Message : Average mflops/s per call per node (full): 435364 +Grid : Message : Average mflops/s per call per node (full): 302434 +Grid : Message : Stencil 12.6533 GB/s per node +Grid : Message : Stencil 16.6566 GB/s per node +Grid : Message : Stencil 17.421 GB/s per node +Grid : Message : Stencil 13.6647 GB/s per node +Grid : Message : Average mflops/s per call per node : 664238 +Grid : Message : Average mflops/s per call per node : 807496 +Grid : Message : Average mflops/s per call per node : 826445 +Grid : Message : Average mflops/s per call per node : 657369 +Grid : Message : Average mflops/s per call per node (full): 313584 +Grid : Message : Average mflops/s per call per node (full): 435134 +Grid : Message : Average mflops/s per call per node (full): 441673 +Grid : Message : Average mflops/s per call per node (full): 305415 +Grid : Message : Stencil 12.4773 GB/s per node +Grid : Message : Stencil 18.1009 GB/s per node +Grid : Message : Stencil 17.9373 GB/s per node +Grid : Message : Stencil 12.6955 GB/s per node +Grid : Message : Average mflops/s per call per node : 664810 +Grid : Message : Average mflops/s per call per node : 798276 +Grid : Message : Average mflops/s per call per node : 820774 +Grid : Message : Average mflops/s per call per node : 667568 +Grid : Message : Average mflops/s per call per node (full): 313601 +Grid : Message : Average mflops/s per call per node (full): 439527 +Grid : Message : Average mflops/s per call per node (full): 446031 +Grid : Message : Average mflops/s per call per node (full): 304211 +Grid : Message : Stencil 12.8544 GB/s per node +Grid : Message : Stencil 18.2949 GB/s per node +Grid : Message : Stencil 16.4737 GB/s per node +Grid : Message : Stencil 12.2715 GB/s per node +Grid : Message : Average mflops/s per call per node : 670412 +Grid : Message : Average mflops/s per call per node : 805830 +Grid : Message : Average mflops/s per call per node : 826551 +Grid : Message : Average mflops/s per call per node : 667492 +Grid : Message : Average mflops/s per call per node (full): 315155 +Grid : Message : Average mflops/s per call per node (full): 441838 +Grid : Message : Average mflops/s per call per node (full): 432139 +Grid : Message : Average mflops/s per call per node (full): 304623 +Grid : Message : Stencil 14.1742 GB/s per node +Grid : Message : Stencil 16.563 GB/s per node +Grid : Message : Stencil 17.5296 GB/s per node +Grid : Message : Stencil 13.3106 GB/s per node +Grid : Message : Average mflops/s per call per node : 666231 +Grid : Message : Average mflops/s per call per node : 809848 +Grid : Message : Average mflops/s per call per node : 815874 +Grid : Message : Average mflops/s per call per node : 662812 +Grid : Message : Average mflops/s per call per node (full): 315886 +Grid : Message : Average mflops/s per call per node (full): 436379 +Grid : Message : Average mflops/s per call per node (full): 444229 +Grid : Message : Average mflops/s per call per node (full): 306706 +Grid : Message : Stencil 13.297 GB/s per node +Grid : Message : Stencil 10.6758 GB/s per node +Grid : Message : Stencil 18.0975 GB/s per node +Grid : Message : Stencil 12.9191 GB/s per node +Grid : Message : Average mflops/s per call per node : 664986 +Grid : Message : Average mflops/s per call per node : 809930 +Grid : Message : Average mflops/s per call per node : 824570 +Grid : Message : Average mflops/s per call per node : 662503 +Grid : Message : Average mflops/s per call per node (full): 314510 +Grid : Message : Average mflops/s per call per node (full): 332417 +Grid : Message : Average mflops/s per call per node (full): 447999 +Grid : Message : Average mflops/s per call per node (full): 304748 +Grid : Message : Stencil 13.2278 GB/s per node +Grid : Message : Stencil 10.026 GB/s per node +Grid : Message : Stencil 18.1989 GB/s per node +Grid : Message : Stencil 13.4784 GB/s per node +Grid : Message : Average mflops/s per call per node : 668048 +Grid : Message : Average mflops/s per call per node : 807632 +Grid : Message : Average mflops/s per call per node : 822427 +Grid : Message : Average mflops/s per call per node : 663594 +Grid : Message : Average mflops/s per call per node (full): 315399 +Grid : Message : Average mflops/s per call per node (full): 317245 +Grid : Message : Average mflops/s per call per node (full): 446650 +Grid : Message : Average mflops/s per call per node (full): 306189 +Grid : Message : Stencil 13.2856 GB/s per node +Grid : Message : Stencil 16.8058 GB/s per node +Grid : Message : Stencil 18.3342 GB/s per node +Grid : Message : Stencil 13.7579 GB/s per node +Grid : Message : Average mflops/s per call per node : 666376 +Grid : Message : Average mflops/s per call per node : 807190 +Grid : Message : Average mflops/s per call per node : 820858 +Grid : Message : Average mflops/s per call per node : 658153 +Grid : Message : Average mflops/s per call per node (full): 314857 +Grid : Message : Average mflops/s per call per node (full): 435853 +Grid : Message : Average mflops/s per call per node (full): 447087 +Grid : Message : Average mflops/s per call per node (full): 305513 +Grid : Message : Stencil 15.1605 GB/s per node +Grid : Message : Stencil 17.0136 GB/s per node +Grid : Message : Stencil 18.373 GB/s per node +Grid : Message : Stencil 12.3423 GB/s per node +Grid : Message : Average mflops/s per call per node : 662966 +Grid : Message : Average mflops/s per call per node : 808304 +Grid : Message : Average mflops/s per call per node : 818004 +Grid : Message : Average mflops/s per call per node : 666981 +Grid : Message : Average mflops/s per call per node (full): 316955 +Grid : Message : Average mflops/s per call per node (full): 439448 +Grid : Message : Average mflops/s per call per node (full): 447067 +Grid : Message : Average mflops/s per call per node (full): 302182 +Grid : Message : Stencil 12.5394 GB/s per node +Grid : Message : Stencil 16.3613 GB/s per node +Grid : Message : Stencil 17.7478 GB/s per node +Grid : Message : Stencil 12.9737 GB/s per node +Grid : Message : Average mflops/s per call per node : 666957 +Grid : Message : Average mflops/s per call per node : 803510 +Grid : Message : Average mflops/s per call per node : 822896 +Grid : Message : Average mflops/s per call per node : 661411 +Grid : Message : Average mflops/s per call per node (full): 314184 +Grid : Message : Average mflops/s per call per node (full): 432010 +Grid : Message : Average mflops/s per call per node (full): 441724 +Grid : Message : Average mflops/s per call per node (full): 301640 +Grid : Message : Stencil 13.5336 GB/s per node +Grid : Message : Stencil 16.7272 GB/s per node +Grid : Message : Stencil 16.7701 GB/s per node +Grid : Message : Stencil 11.9536 GB/s per node +Grid : Message : Average mflops/s per call per node : 663344 +Grid : Message : Average mflops/s per call per node : 807277 +Grid : Message : Average mflops/s per call per node : 828164 +Grid : Message : Average mflops/s per call per node : 666565 +Grid : Message : Average mflops/s per call per node (full): 314791 +Grid : Message : Average mflops/s per call per node (full): 436726 +Grid : Message : Average mflops/s per call per node (full): 438177 +Grid : Message : Average mflops/s per call per node (full): 300950 +Grid : Message : Stencil 13.0412 GB/s per node +Grid : Message : Stencil 18.249 GB/s per node +Grid : Message : Stencil 17.3893 GB/s per node +Grid : Message : Stencil 12.5904 GB/s per node +Grid : Message : Average mflops/s per call per node : 667289 +Grid : Message : Average mflops/s per call per node : 800480 +Grid : Message : Average mflops/s per call per node : 826123 +Grid : Message : Average mflops/s per call per node : 668870 +Grid : Message : Average mflops/s per call per node (full): 314766 +Grid : Message : Average mflops/s per call per node (full): 440731 +Grid : Message : Average mflops/s per call per node (full): 444114 +Grid : Message : Average mflops/s per call per node (full): 305542 +Grid : Message : Stencil 14.1855 GB/s per node +Grid : Message : Stencil 17.3159 GB/s per node +Grid : Message : Stencil 17.3951 GB/s per node +Grid : Message : Stencil 14.7946 GB/s per node +Grid : Message : Average mflops/s per call per node : 664318 +Grid : Message : Average mflops/s per call per node : 802147 +Grid : Message : Average mflops/s per call per node : 823625 +Grid : Message : Average mflops/s per call per node : 660868 +Grid : Message : Average mflops/s per call per node (full): 315826 +Grid : Message : Average mflops/s per call per node (full): 438869 +Grid : Message : Average mflops/s per call per node (full): 445153 +Grid : Message : Average mflops/s per call per node (full): 307083 +Grid : Message : Stencil 14.3068 GB/s per node +Grid : Message : Stencil 16.111 GB/s per node +Grid : Message : Stencil 17.2469 GB/s per node +Grid : Message : Stencil 12.5397 GB/s per node +Grid : Message : Average mflops/s per call per node : 662917 +Grid : Message : Average mflops/s per call per node : 809051 +Grid : Message : Average mflops/s per call per node : 821687 +Grid : Message : Average mflops/s per call per node : 666637 +Grid : Message : Average mflops/s per call per node (full): 316955 +Grid : Message : Average mflops/s per call per node (full): 422775 +Grid : Message : Average mflops/s per call per node (full): 442596 +Grid : Message : Average mflops/s per call per node (full): 304989 +Grid : Message : Stencil 13.1324 GB/s per node +Grid : Message : Stencil 15.9543 GB/s per node +Grid : Message : Stencil 17.6188 GB/s per node +Grid : Message : Stencil 13.5612 GB/s per node +Grid : Message : Average mflops/s per call per node : 666396 +Grid : Message : Average mflops/s per call per node : 809715 +Grid : Message : Average mflops/s per call per node : 820205 +Grid : Message : Average mflops/s per call per node : 664769 +Grid : Message : Average mflops/s per call per node (full): 315161 +Grid : Message : Average mflops/s per call per node (full): 426166 +Grid : Message : Average mflops/s per call per node (full): 444006 +Grid : Message : Average mflops/s per call per node (full): 305982 +Grid : Message : Stencil 12.978 GB/s per node +Grid : Message : Stencil 17.042 GB/s per node +Grid : Message : Stencil 17.937 GB/s per node +Grid : Message : Stencil 11.8709 GB/s per node +Grid : Message : Average mflops/s per call per node : 666662 +Grid : Message : Average mflops/s per call per node : 799634 +Grid : Message : Average mflops/s per call per node : 819919 +Grid : Message : Average mflops/s per call per node : 663800 +Grid : Message : Average mflops/s per call per node (full): 315038 +Grid : Message : Average mflops/s per call per node (full): 436877 +Grid : Message : Average mflops/s per call per node (full): 445358 +Grid : Message : Average mflops/s per call per node (full): 300508 +Grid : Message : Stencil 12.4893 GB/s per node +Grid : Message : Stencil 17.7135 GB/s per node +Grid : Message : Stencil 17.2763 GB/s per node +Grid : Message : Stencil 12.7222 GB/s per node +Grid : Message : Average mflops/s per call per node : 664912 +Grid : Message : Average mflops/s per call per node : 806802 +Grid : Message : Average mflops/s per call per node : 826341 +Grid : Message : Average mflops/s per call per node : 659176 +Grid : Message : Average mflops/s per call per node (full): 312454 +Grid : Message : Average mflops/s per call per node (full): 441070 +Grid : Message : Average mflops/s per call per node (full): 442784 +Grid : Message : Average mflops/s per call per node (full): 304059 +Grid : Message : Stencil 12.7942 GB/s per node +Grid : Message : Stencil 17.2664 GB/s per node +Grid : Message : Stencil 17.7487 GB/s per node +Grid : Message : Stencil 12.6147 GB/s per node +Grid : Message : Average mflops/s per call per node : 663989 +Grid : Message : Average mflops/s per call per node : 802431 +Grid : Message : Average mflops/s per call per node : 825882 +Grid : Message : Average mflops/s per call per node : 669656 +Grid : Message : Average mflops/s per call per node (full): 303988 +Grid : Message : Average mflops/s per call per node (full): 437662 +Grid : Message : Average mflops/s per call per node (full): 444731 +Grid : Message : Average mflops/s per call per node (full): 305627 +Grid : Message : Stencil 13.6631 GB/s per node +Grid : Message : Stencil 16.8758 GB/s per node +Grid : Message : Stencil 17.9437 GB/s per node +Grid : Message : Stencil 12.5825 GB/s per node +Grid : Message : Average mflops/s per call per node : 664333 +Grid : Message : Average mflops/s per call per node : 806678 +Grid : Message : Average mflops/s per call per node : 818733 +Grid : Message : Average mflops/s per call per node : 661848 +Grid : Message : Average mflops/s per call per node (full): 315757 +Grid : Message : Average mflops/s per call per node (full): 438414 +Grid : Message : Average mflops/s per call per node (full): 445361 +Grid : Message : Average mflops/s per call per node (full): 302864 +Grid : Message : Stencil 12.1964 GB/s per node +Grid : Message : Stencil 14.0574 GB/s per node +Grid : Message : Stencil 17.7224 GB/s per node +Grid : Message : Stencil 12.4657 GB/s per node +Grid : Message : Average mflops/s per call per node : 667607 +Grid : Message : Average mflops/s per call per node : 807422 +Grid : Message : Average mflops/s per call per node : 825382 +Grid : Message : Average mflops/s per call per node : 670496 +Grid : Message : Average mflops/s per call per node (full): 310813 +Grid : Message : Average mflops/s per call per node (full): 400623 +Grid : Message : Average mflops/s per call per node (full): 446110 +Grid : Message : Average mflops/s per call per node (full): 305631 +Grid : Message : Stencil 12.9772 GB/s per node +Grid : Message : Stencil 16.5692 GB/s per node +Grid : Message : Stencil 17.082 GB/s per node +Grid : Message : Stencil 13.1432 GB/s per node +Grid : Message : Average mflops/s per call per node : 663156 +Grid : Message : Average mflops/s per call per node : 800475 +Grid : Message : Average mflops/s per call per node : 827650 +Grid : Message : Average mflops/s per call per node : 663837 +Grid : Message : Average mflops/s per call per node (full): 313312 +Grid : Message : Average mflops/s per call per node (full): 433722 +Grid : Message : Average mflops/s per call per node (full): 440128 +Grid : Message : Average mflops/s per call per node (full): 305750 +Grid : Message : Stencil 12.6959 GB/s per node +Grid : Message : Stencil 9.75687 GB/s per node +Grid : Message : Stencil 17.5084 GB/s per node +Grid : Message : Stencil 12.0511 GB/s per node +Grid : Message : Average mflops/s per call per node : 670135 +Grid : Message : Average mflops/s per call per node : 813094 +Grid : Message : Average mflops/s per call per node : 828302 +Grid : Message : Average mflops/s per call per node : 668614 +Grid : Message : Average mflops/s per call per node (full): 314985 +Grid : Message : Average mflops/s per call per node (full): 310972 +Grid : Message : Average mflops/s per call per node (full): 445489 +Grid : Message : Average mflops/s per call per node (full): 302304 +Grid : Message : Stencil 15.1776 GB/s per node +Grid : Message : Stencil 16.8651 GB/s per node +Grid : Message : Stencil 16.6968 GB/s per node +Grid : Message : Stencil 12.2462 GB/s per node +Grid : Message : Average mflops/s per call per node : 664631 +Grid : Message : Average mflops/s per call per node : 804873 +Grid : Message : Average mflops/s per call per node : 816983 +Grid : Message : Average mflops/s per call per node : 671057 +Grid : Message : Average mflops/s per call per node (full): 317617 +Grid : Message : Average mflops/s per call per node (full): 436935 +Grid : Message : Average mflops/s per call per node (full): 426973 +Grid : Message : Average mflops/s per call per node (full): 304192 +Grid : Message : Stencil 13.7144 GB/s per node +Grid : Message : Stencil 18.7334 GB/s per node +Grid : Message : Stencil 17.9377 GB/s per node +Grid : Message : Stencil 13.5519 GB/s per node +Grid : Message : Average mflops/s per call per node : 665825 +Grid : Message : Average mflops/s per call per node : 807739 +Grid : Message : Average mflops/s per call per node : 821023 +Grid : Message : Average mflops/s per call per node : 665112 +Grid : Message : Average mflops/s per call per node (full): 317255 +Grid : Message : Average mflops/s per call per node (full): 441000 +Grid : Message : Average mflops/s per call per node (full): 446026 +Grid : Message : Average mflops/s per call per node (full): 306568 +Grid : Message : Stencil 14.4005 GB/s per node +Grid : Message : Stencil 14.9304 GB/s per node +Grid : Message : Stencil 17.8885 GB/s per node +Grid : Message : Stencil 12.5922 GB/s per node +Grid : Message : Average mflops/s per call per node : 666094 +Grid : Message : Average mflops/s per call per node : 805924 +Grid : Message : Average mflops/s per call per node : 821798 +Grid : Message : Average mflops/s per call per node : 670074 +Grid : Message : Average mflops/s per call per node (full): 316933 +Grid : Message : Average mflops/s per call per node (full): 415751 +Grid : Message : Average mflops/s per call per node (full): 446741 +Grid : Message : Average mflops/s per call per node (full): 305588 +Grid : Message : Stencil 13.4556 GB/s per node +Grid : Message : Stencil 17.7989 GB/s per node +Grid : Message : Stencil 17.8123 GB/s per node +Grid : Message : Stencil 13.2153 GB/s per node +Grid : Message : Average mflops/s per call per node : 665540 +Grid : Message : Average mflops/s per call per node : 798428 +Grid : Message : Average mflops/s per call per node : 819450 +Grid : Message : Average mflops/s per call per node : 662678 +Grid : Message : Average mflops/s per call per node (full): 314840 +Grid : Message : Average mflops/s per call per node (full): 438204 +Grid : Message : Average mflops/s per call per node (full): 444908 +Grid : Message : Average mflops/s per call per node (full): 305172 +Grid : Message : Stencil 14.147 GB/s per node +Grid : Message : Stencil 16.5951 GB/s per node +Grid : Message : Stencil 17.045 GB/s per node +Grid : Message : Stencil 12.3705 GB/s per node +Grid : Message : Average mflops/s per call per node : 667115 +Grid : Message : Average mflops/s per call per node : 802583 +Grid : Message : Average mflops/s per call per node : 820262 +Grid : Message : Average mflops/s per call per node : 661130 +Grid : Message : Average mflops/s per call per node (full): 316760 +Grid : Message : Average mflops/s per call per node (full): 425907 +Grid : Message : Average mflops/s per call per node (full): 440866 +Grid : Message : Average mflops/s per call per node (full): 302945 +Grid : Message : Stencil 12.4367 GB/s per node +Grid : Message : Stencil 16.6064 GB/s per node +Grid : Message : Stencil 17.5276 GB/s per node +Grid : Message : Stencil 12.5734 GB/s per node +Grid : Message : Average mflops/s per call per node : 666864 +Grid : Message : Average mflops/s per call per node : 798125 +Grid : Message : Average mflops/s per call per node : 820718 +Grid : Message : Average mflops/s per call per node : 665262 +Grid : Message : Average mflops/s per call per node (full): 310215 +Grid : Message : Average mflops/s per call per node (full): 429816 +Grid : Message : Average mflops/s per call per node (full): 442081 +Grid : Message : Average mflops/s per call per node (full): 304755 +Grid : Message : Stencil 13.0628 GB/s per node +Grid : Message : Stencil 16.6893 GB/s per node +Grid : Message : Stencil 17.1925 GB/s per node +Grid : Message : Stencil 12.3717 GB/s per node +Grid : Message : Average mflops/s per call per node : 665550 +Grid : Message : Average mflops/s per call per node : 803240 +Grid : Message : Average mflops/s per call per node : 817650 +Grid : Message : Average mflops/s per call per node : 661483 +Grid : Message : Average mflops/s per call per node (full): 313860 +Grid : Message : Average mflops/s per call per node (full): 435647 +Grid : Message : Average mflops/s per call per node (full): 442649 +Grid : Message : Average mflops/s per call per node (full): 303165 +Grid : Message : Stencil 12.7405 GB/s per node +Grid : Message : Stencil 17.0502 GB/s per node +Grid : Message : Stencil 17.5352 GB/s per node +Grid : Message : Stencil 11.8669 GB/s per node +Grid : Message : Average mflops/s per call per node : 667635 +Grid : Message : Average mflops/s per call per node : 797258 +Grid : Message : Average mflops/s per call per node : 815596 +Grid : Message : Average mflops/s per call per node : 663218 +Grid : Message : Average mflops/s per call per node (full): 314792 +Grid : Message : Average mflops/s per call per node (full): 436536 +Grid : Message : Average mflops/s per call per node (full): 443681 +Grid : Message : Average mflops/s per call per node (full): 300390 +Grid : Message : Stencil 12.4671 GB/s per node +Grid : Message : Stencil 16.5639 GB/s per node +Grid : Message : Stencil 18.9733 GB/s per node +Grid : Message : Stencil 11.9817 GB/s per node +Grid : Message : Average mflops/s per call per node : 664843 +Grid : Message : Average mflops/s per call per node : 800012 +Grid : Message : Average mflops/s per call per node : 819876 +Grid : Message : Average mflops/s per call per node : 663148 +Grid : Message : Average mflops/s per call per node (full): 311164 +Grid : Message : Average mflops/s per call per node (full): 435468 +Grid : Message : Average mflops/s per call per node (full): 448897 +Grid : Message : Average mflops/s per call per node (full): 301392 +Grid : Message : Stencil 12.9655 GB/s per node +Grid : Message : Stencil 16.8094 GB/s per node +Grid : Message : Stencil 17.5092 GB/s per node +Grid : Message : Stencil 12.7611 GB/s per node +Grid : Message : Average mflops/s per call per node : 661145 +Grid : Message : Average mflops/s per call per node : 802228 +Grid : Message : Average mflops/s per call per node : 826971 +Grid : Message : Average mflops/s per call per node : 660795 +Grid : Message : Average mflops/s per call per node (full): 314337 +Grid : Message : Average mflops/s per call per node (full): 435938 +Grid : Message : Average mflops/s per call per node (full): 445923 +Grid : Message : Average mflops/s per call per node (full): 304664 +Grid : Message : Stencil 13.1316 GB/s per node +Grid : Message : Stencil 13.4993 GB/s per node +Grid : Message : Stencil 17.7713 GB/s per node +Grid : Message : Stencil 12.6489 GB/s per node +Grid : Message : Average mflops/s per call per node : 663786 +Grid : Message : Average mflops/s per call per node : 800213 +Grid : Message : Average mflops/s per call per node : 822439 +Grid : Message : Average mflops/s per call per node : 662126 +Grid : Message : Average mflops/s per call per node (full): 315340 +Grid : Message : Average mflops/s per call per node (full): 388301 +Grid : Message : Average mflops/s per call per node (full): 446386 +Grid : Message : Average mflops/s per call per node (full): 304489 +Grid : Message : Stencil 12.78 GB/s per node +Grid : Message : Stencil 16.1777 GB/s per node +Grid : Message : Stencil 17.0707 GB/s per node +Grid : Message : Stencil 13.3816 GB/s per node +Grid : Message : Average mflops/s per call per node : 666441 +Grid : Message : Average mflops/s per call per node : 803715 +Grid : Message : Average mflops/s per call per node : 819076 +Grid : Message : Average mflops/s per call per node : 661900 +Grid : Message : Average mflops/s per call per node (full): 314891 +Grid : Message : Average mflops/s per call per node (full): 430371 +Grid : Message : Average mflops/s per call per node (full): 441525 +Grid : Message : Average mflops/s per call per node (full): 304762 +Grid : Message : Stencil 14.2821 GB/s per node +Grid : Message : Stencil 17.3567 GB/s per node +Grid : Message : Stencil 17.0311 GB/s per node +Grid : Message : Stencil 12.3513 GB/s per node +Grid : Message : Average mflops/s per call per node : 660644 +Grid : Message : Average mflops/s per call per node : 807218 +Grid : Message : Average mflops/s per call per node : 829333 +Grid : Message : Average mflops/s per call per node : 671991 +Grid : Message : Average mflops/s per call per node (full): 315644 +Grid : Message : Average mflops/s per call per node (full): 438153 +Grid : Message : Average mflops/s per call per node (full): 441341 +Grid : Message : Average mflops/s per call per node (full): 304531 +Grid : Message : Stencil 14.2211 GB/s per node +Grid : Message : Stencil 16.8738 GB/s per node +Grid : Message : Stencil 17.8739 GB/s per node +Grid : Message : Stencil 12.481 GB/s per node +Grid : Message : Average mflops/s per call per node : 658591 +Grid : Message : Average mflops/s per call per node : 803936 +Grid : Message : Average mflops/s per call per node : 819286 +Grid : Message : Average mflops/s per call per node : 666410 +Grid : Message : Average mflops/s per call per node (full): 314857 +Grid : Message : Average mflops/s per call per node (full): 438458 +Grid : Message : Average mflops/s per call per node (full): 436476 +Grid : Message : Average mflops/s per call per node (full): 301718 +Grid : Message : Stencil 12.858 GB/s per node +Grid : Message : Stencil 17.1302 GB/s per node +Grid : Message : Stencil 17.3094 GB/s per node +Grid : Message : Stencil 12.7569 GB/s per node +Grid : Message : Average mflops/s per call per node : 664558 +Grid : Message : Average mflops/s per call per node : 801165 +Grid : Message : Average mflops/s per call per node : 821151 +Grid : Message : Average mflops/s per call per node : 662358 +Grid : Message : Average mflops/s per call per node (full): 314254 +Grid : Message : Average mflops/s per call per node (full): 435368 +Grid : Message : Average mflops/s per call per node (full): 444693 +Grid : Message : Average mflops/s per call per node (full): 304294 +Grid : Message : Stencil 15.4771 GB/s per node +Grid : Message : Stencil 14.5214 GB/s per node +Grid : Message : Stencil 18.2442 GB/s per node +Grid : Message : Stencil 13.1201 GB/s per node +Grid : Message : Average mflops/s per call per node : 659965 +Grid : Message : Average mflops/s per call per node : 807033 +Grid : Message : Average mflops/s per call per node : 820979 +Grid : Message : Average mflops/s per call per node : 662589 +Grid : Message : Average mflops/s per call per node (full): 316696 +Grid : Message : Average mflops/s per call per node (full): 409084 +Grid : Message : Average mflops/s per call per node (full): 448128 +Grid : Message : Average mflops/s per call per node (full): 304887 +Grid : Message : Stencil 12.5206 GB/s per node +Grid : Message : Stencil 16.7692 GB/s per node +Grid : Message : Stencil 17.6077 GB/s per node +Grid : Message : Stencil 12.2452 GB/s per node +Grid : Message : Average mflops/s per call per node : 664987 +Grid : Message : Average mflops/s per call per node : 804050 +Grid : Message : Average mflops/s per call per node : 822386 +Grid : Message : Average mflops/s per call per node : 665008 +Grid : Message : Average mflops/s per call per node (full): 312935 +Grid : Message : Average mflops/s per call per node (full): 435278 +Grid : Message : Average mflops/s per call per node (full): 445270 +Grid : Message : Average mflops/s per call per node (full): 302602 +Grid : Message : Stencil 12.8442 GB/s per node +Grid : Message : Stencil 17.2721 GB/s per node +Grid : Message : Stencil 18.2259 GB/s per node +Grid : Message : Stencil 12.1537 GB/s per node +Grid : Message : Average mflops/s per call per node : 665418 +Grid : Message : Average mflops/s per call per node : 800007 +Grid : Message : Average mflops/s per call per node : 815405 +Grid : Message : Average mflops/s per call per node : 670901 +Grid : Message : Average mflops/s per call per node (full): 312220 +Grid : Message : Average mflops/s per call per node (full): 432543 +Grid : Message : Average mflops/s per call per node (full): 445549 +Grid : Message : Average mflops/s per call per node (full): 303527 +Grid : Message : Stencil 12.5181 GB/s per node +Grid : Message : Stencil 17.2748 GB/s per node +Grid : Message : Stencil 18.1267 GB/s per node +Grid : Message : Stencil 13.6044 GB/s per node +Grid : Message : Average mflops/s per call per node : 667650 +Grid : Message : Average mflops/s per call per node : 800526 +Grid : Message : Average mflops/s per call per node : 818541 +Grid : Message : Average mflops/s per call per node : 660433 +Grid : Message : Average mflops/s per call per node (full): 313808 +Grid : Message : Average mflops/s per call per node (full): 437457 +Grid : Message : Average mflops/s per call per node (full): 445161 +Grid : Message : Average mflops/s per call per node (full): 305531 +Grid : Message : Stencil 13.9883 GB/s per node +Grid : Message : Stencil 17.3733 GB/s per node +Grid : Message : Stencil 18.4282 GB/s per node +Grid : Message : Stencil 12.2481 GB/s per node +Grid : Message : Average mflops/s per call per node : 665282 +Grid : Message : Average mflops/s per call per node : 803784 +Grid : Message : Average mflops/s per call per node : 821363 +Grid : Message : Average mflops/s per call per node : 673318 +Grid : Message : Average mflops/s per call per node (full): 316685 +Grid : Message : Average mflops/s per call per node (full): 438703 +Grid : Message : Average mflops/s per call per node (full): 446737 +Grid : Message : Average mflops/s per call per node (full): 304041 +Grid : Message : Stencil 13.2789 GB/s per node +Grid : Message : Stencil 16.4879 GB/s per node +Grid : Message : Stencil 17.6746 GB/s per node +Grid : Message : Stencil 13.917 GB/s per node +Grid : Message : Average mflops/s per call per node : 665807 +Grid : Message : Average mflops/s per call per node : 803510 +Grid : Message : Average mflops/s per call per node : 816688 +Grid : Message : Average mflops/s per call per node : 661748 +Grid : Message : Average mflops/s per call per node (full): 313889 +Grid : Message : Average mflops/s per call per node (full): 434971 +Grid : Message : Average mflops/s per call per node (full): 445035 +Grid : Message : Average mflops/s per call per node (full): 307077 +Grid : Message : Stencil 12.2734 GB/s per node +Grid : Message : Stencil 12.66 GB/s per node +Grid : Message : Stencil 17.0617 GB/s per node +Grid : Message : Stencil 13.5623 GB/s per node +Grid : Message : Average mflops/s per call per node : 667015 +Grid : Message : Average mflops/s per call per node : 807794 +Grid : Message : Average mflops/s per call per node : 822143 +Grid : Message : Average mflops/s per call per node : 661033 +Grid : Message : Average mflops/s per call per node (full): 311024 +Grid : Message : Average mflops/s per call per node (full): 374728 +Grid : Message : Average mflops/s per call per node (full): 441706 +Grid : Message : Average mflops/s per call per node (full): 305472 +Grid : Message : Stencil 12.1509 GB/s per node +Grid : Message : Stencil 17.0251 GB/s per node +Grid : Message : Stencil 16.1424 GB/s per node +Grid : Message : Stencil 12.3466 GB/s per node +Grid : Message : Average mflops/s per call per node : 668234 +Grid : Message : Average mflops/s per call per node : 806025 +Grid : Message : Average mflops/s per call per node : 821907 +Grid : Message : Average mflops/s per call per node : 665139 +Grid : Message : Average mflops/s per call per node (full): 310571 +Grid : Message : Average mflops/s per call per node (full): 438555 +Grid : Message : Average mflops/s per call per node (full): 417720 +Grid : Message : Average mflops/s per call per node (full): 301979 +Grid : Message : Stencil 12.3345 GB/s per node +Grid : Message : Stencil 16.5728 GB/s per node +Grid : Message : Stencil 17.1912 GB/s per node +Grid : Message : Stencil 12.7909 GB/s per node +Grid : Message : Average mflops/s per call per node : 667776 +Grid : Message : Average mflops/s per call per node : 809703 +Grid : Message : Average mflops/s per call per node : 827346 +Grid : Message : Average mflops/s per call per node : 665721 +Grid : Message : Average mflops/s per call per node (full): 310230 +Grid : Message : Average mflops/s per call per node (full): 435928 +Grid : Message : Average mflops/s per call per node (full): 443387 +Grid : Message : Average mflops/s per call per node (full): 305302 +Grid : Message : Stencil 14.1641 GB/s per node +Grid : Message : Stencil 16.2782 GB/s per node +Grid : Message : Stencil 17.5187 GB/s per node +Grid : Message : Stencil 12.2126 GB/s per node +Grid : Message : Average mflops/s per call per node : 665588 +Grid : Message : Average mflops/s per call per node : 803008 +Grid : Message : Average mflops/s per call per node : 823725 +Grid : Message : Average mflops/s per call per node : 670796 +Grid : Message : Average mflops/s per call per node (full): 316399 +Grid : Message : Average mflops/s per call per node (full): 432108 +Grid : Message : Average mflops/s per call per node (full): 445275 +Grid : Message : Average mflops/s per call per node (full): 304455 +Grid : Message : Stencil 12.3719 GB/s per node +Grid : Message : Stencil 9.44157 GB/s per node +Grid : Message : Stencil 18.2184 GB/s per node +Grid : Message : Stencil 13.0357 GB/s per node +Grid : Message : Average mflops/s per call per node : 673287 +Grid : Message : Average mflops/s per call per node : 810733 +Grid : Message : Average mflops/s per call per node : 826413 +Grid : Message : Average mflops/s per call per node : 664360 +Grid : Message : Average mflops/s per call per node (full): 313111 +Grid : Message : Average mflops/s per call per node (full): 303110 +Grid : Message : Average mflops/s per call per node (full): 447822 +Grid : Message : Average mflops/s per call per node (full): 305094 +Grid : Message : Stencil 14.4216 GB/s per node +Grid : Message : Stencil 16.8166 GB/s per node +Grid : Message : Stencil 17.0233 GB/s per node +Grid : Message : Stencil 12.4243 GB/s per node +Grid : Message : Average mflops/s per call per node : 664592 +Grid : Message : Average mflops/s per call per node : 799741 +Grid : Message : Average mflops/s per call per node : 817291 +Grid : Message : Average mflops/s per call per node : 660091 +Grid : Message : Average mflops/s per call per node (full): 316638 +Grid : Message : Average mflops/s per call per node (full): 436341 +Grid : Message : Average mflops/s per call per node (full): 440702 +Grid : Message : Average mflops/s per call per node (full): 302313 +Grid : Message : Stencil 14.0866 GB/s per node +Grid : Message : Stencil 15.121 GB/s per node +Grid : Message : Stencil 17.1513 GB/s per node +Grid : Message : Stencil 14.4614 GB/s per node +Grid : Message : Average mflops/s per call per node : 662020 +Grid : Message : Average mflops/s per call per node : 805280 +Grid : Message : Average mflops/s per call per node : 825135 +Grid : Message : Average mflops/s per call per node : 663528 +Grid : Message : Average mflops/s per call per node (full): 315369 +Grid : Message : Average mflops/s per call per node (full): 416997 +Grid : Message : Average mflops/s per call per node (full): 442667 +Grid : Message : Average mflops/s per call per node (full): 307982 +Grid : Message : Stencil 13.0421 GB/s per node +Grid : Message : Stencil 13.475 GB/s per node +Grid : Message : Stencil 17.8514 GB/s per node +Grid : Message : Stencil 12.3485 GB/s per node +Grid : Message : Average mflops/s per call per node : 668948 +Grid : Message : Average mflops/s per call per node : 806437 +Grid : Message : Average mflops/s per call per node : 816250 +Grid : Message : Average mflops/s per call per node : 667938 +Grid : Message : Average mflops/s per call per node (full): 315978 +Grid : Message : Average mflops/s per call per node (full): 390547 +Grid : Message : Average mflops/s per call per node (full): 446984 +Grid : Message : Average mflops/s per call per node (full): 304395 +Grid : Message : Stencil 12.5389 GB/s per node +Grid : Message : Stencil 16.7972 GB/s per node +Grid : Message : Stencil 17.4921 GB/s per node +Grid : Message : Stencil 12.8593 GB/s per node +Grid : Message : Average mflops/s per call per node : 668172 +Grid : Message : Average mflops/s per call per node : 801761 +Grid : Message : Average mflops/s per call per node : 818191 +Grid : Message : Average mflops/s per call per node : 667537 +Grid : Message : Average mflops/s per call per node (full): 313733 +Grid : Message : Average mflops/s per call per node (full): 435641 +Grid : Message : Average mflops/s per call per node (full): 443637 +Grid : Message : Average mflops/s per call per node (full): 305751 +Grid : Message : Stencil 12.9159 GB/s per node +Grid : Message : Stencil 17.2392 GB/s per node +Grid : Message : Stencil 17.8093 GB/s per node +Grid : Message : Stencil 12.1072 GB/s per node +Grid : Message : Average mflops/s per call per node : 665873 +Grid : Message : Average mflops/s per call per node : 801432 +Grid : Message : Average mflops/s per call per node : 819327 +Grid : Message : Average mflops/s per call per node : 666187 +Grid : Message : Average mflops/s per call per node (full): 315585 +Grid : Message : Average mflops/s per call per node (full): 439621 +Grid : Message : Average mflops/s per call per node (full): 445822 +Grid : Message : Average mflops/s per call per node (full): 302671 +Grid : Message : Stencil 15.4449 GB/s per node +Grid : Message : Stencil 10.9898 GB/s per node +Grid : Message : Stencil 16.9168 GB/s per node +Grid : Message : Stencil 12.7072 GB/s per node +Grid : Message : Average mflops/s per call per node : 659037 +Grid : Message : Average mflops/s per call per node : 807417 +Grid : Message : Average mflops/s per call per node : 823099 +Grid : Message : Average mflops/s per call per node : 667598 +Grid : Message : Average mflops/s per call per node (full): 316591 +Grid : Message : Average mflops/s per call per node (full): 339479 +Grid : Message : Average mflops/s per call per node (full): 430694 +Grid : Message : Average mflops/s per call per node (full): 303012 +Grid : Message : Stencil 14.7637 GB/s per node +Grid : Message : Stencil 16.2874 GB/s per node +Grid : Message : Stencil 18.6323 GB/s per node +Grid : Message : Stencil 12.9178 GB/s per node +Grid : Message : Average mflops/s per call per node : 660695 +Grid : Message : Average mflops/s per call per node : 801611 +Grid : Message : Average mflops/s per call per node : 815351 +Grid : Message : Average mflops/s per call per node : 669428 +Grid : Message : Average mflops/s per call per node (full): 316066 +Grid : Message : Average mflops/s per call per node (full): 431920 +Grid : Message : Average mflops/s per call per node (full): 448644 +Grid : Message : Average mflops/s per call per node (full): 306518 +Grid : Message : Stencil 14.3132 GB/s per node +Grid : Message : Stencil 17.1266 GB/s per node +Grid : Message : Stencil 17.0616 GB/s per node +Grid : Message : Stencil 13.8447 GB/s per node +Grid : Message : Average mflops/s per call per node : 658407 +Grid : Message : Average mflops/s per call per node : 803101 +Grid : Message : Average mflops/s per call per node : 819603 +Grid : Message : Average mflops/s per call per node : 659596 +Grid : Message : Average mflops/s per call per node (full): 315588 +Grid : Message : Average mflops/s per call per node (full): 438032 +Grid : Message : Average mflops/s per call per node (full): 440639 +Grid : Message : Average mflops/s per call per node (full): 305823 +Grid : Message : Stencil 13.4098 GB/s per node +Grid : Message : Stencil 16.5838 GB/s per node +Grid : Message : Stencil 17.4944 GB/s per node +Grid : Message : Stencil 12.611 GB/s per node +Grid : Message : Average mflops/s per call per node : 661273 +Grid : Message : Average mflops/s per call per node : 800292 +Grid : Message : Average mflops/s per call per node : 821205 +Grid : Message : Average mflops/s per call per node : 666224 +Grid : Message : Average mflops/s per call per node (full): 314171 +Grid : Message : Average mflops/s per call per node (full): 435713 +Grid : Message : Average mflops/s per call per node (full): 445784 +Grid : Message : Average mflops/s per call per node (full): 305169 +Grid : Message : Stencil 13.1361 GB/s per node +Grid : Message : Stencil 17.236 GB/s per node +Grid : Message : Stencil 17.6766 GB/s per node +Grid : Message : Stencil 14.132 GB/s per node +Grid : Message : Average mflops/s per call per node : 663250 +Grid : Message : Average mflops/s per call per node : 807288 +Grid : Message : Average mflops/s per call per node : 824157 +Grid : Message : Average mflops/s per call per node : 660656 +Grid : Message : Average mflops/s per call per node (full): 314434 +Grid : Message : Average mflops/s per call per node (full): 437413 +Grid : Message : Average mflops/s per call per node (full): 446797 +Grid : Message : Average mflops/s per call per node (full): 307108 +Grid : Message : Stencil 12.9989 GB/s per node +Grid : Message : Stencil 17.8977 GB/s per node +Grid : Message : Stencil 17.4466 GB/s per node +Grid : Message : Stencil 12.2233 GB/s per node +Grid : Message : Average mflops/s per call per node : 662489 +Grid : Message : Average mflops/s per call per node : 801498 +Grid : Message : Average mflops/s per call per node : 827275 +Grid : Message : Average mflops/s per call per node : 667737 +Grid : Message : Average mflops/s per call per node (full): 311317 +Grid : Message : Average mflops/s per call per node (full): 435001 +Grid : Message : Average mflops/s per call per node (full): 441079 +Grid : Message : Average mflops/s per call per node (full): 303713 +Grid : Message : Stencil 13.7653 GB/s per node +Grid : Message : Stencil 17.0009 GB/s per node +Grid : Message : Stencil 17.617 GB/s per node +Grid : Message : Stencil 12.0234 GB/s per node +Grid : Message : Average mflops/s per call per node : 664655 +Grid : Message : Average mflops/s per call per node : 802383 +Grid : Message : Average mflops/s per call per node : 821660 +Grid : Message : Average mflops/s per call per node : 664596 +Grid : Message : Average mflops/s per call per node (full): 315360 +Grid : Message : Average mflops/s per call per node (full): 437320 +Grid : Message : Average mflops/s per call per node (full): 444749 +Grid : Message : Average mflops/s per call per node (full): 302217 +Grid : Message : Stencil 13.2126 GB/s per node +Grid : Message : Stencil 17.8005 GB/s per node +Grid : Message : Stencil 17.0058 GB/s per node +Grid : Message : Stencil 12.0881 GB/s per node +Grid : Message : Average mflops/s per call per node : 664981 +Grid : Message : Average mflops/s per call per node : 806023 +Grid : Message : Average mflops/s per call per node : 822818 +Grid : Message : Average mflops/s per call per node : 667398 +Grid : Message : Average mflops/s per call per node (full): 315465 +Grid : Message : Average mflops/s per call per node (full): 441619 +Grid : Message : Average mflops/s per call per node (full): 440684 +Grid : Message : Average mflops/s per call per node (full): 302959 +Grid : Message : Stencil 13.5388 GB/s per node +Grid : Message : Stencil 18.0366 GB/s per node +Grid : Message : Stencil 17.8471 GB/s per node +Grid : Message : Stencil 12.4445 GB/s per node +Grid : Message : Average mflops/s per call per node : 663666 +Grid : Message : Average mflops/s per call per node : 798323 +Grid : Message : Average mflops/s per call per node : 822953 +Grid : Message : Average mflops/s per call per node : 666327 +Grid : Message : Average mflops/s per call per node (full): 311731 +Grid : Message : Average mflops/s per call per node (full): 439801 +Grid : Message : Average mflops/s per call per node (full): 446810 +Grid : Message : Average mflops/s per call per node (full): 304155 +Grid : Message : Stencil 13.4909 GB/s per node +Grid : Message : Stencil 16.82 GB/s per node +Grid : Message : Stencil 17.578 GB/s per node +Grid : Message : Stencil 12.1492 GB/s per node +Grid : Message : Average mflops/s per call per node : 663940 +Grid : Message : Average mflops/s per call per node : 804399 +Grid : Message : Average mflops/s per call per node : 826470 +Grid : Message : Average mflops/s per call per node : 667211 +Grid : Message : Average mflops/s per call per node (full): 314458 +Grid : Message : Average mflops/s per call per node (full): 433138 +Grid : Message : Average mflops/s per call per node (full): 445496 +Grid : Message : Average mflops/s per call per node (full): 303364 +Grid : Message : Stencil 13.2232 GB/s per node +Grid : Message : Stencil 16.7092 GB/s per node +Grid : Message : Stencil 18.4317 GB/s per node +Grid : Message : Stencil 13.5936 GB/s per node +Grid : Message : Average mflops/s per call per node : 667945 +Grid : Message : Average mflops/s per call per node : 808507 +Grid : Message : Average mflops/s per call per node : 819600 +Grid : Message : Average mflops/s per call per node : 664098 +Grid : Message : Average mflops/s per call per node (full): 315045 +Grid : Message : Average mflops/s per call per node (full): 437537 +Grid : Message : Average mflops/s per call per node (full): 446572 +Grid : Message : Average mflops/s per call per node (full): 307152 +Grid : Message : Stencil 12.7131 GB/s per node +Grid : Message : Stencil 18.2008 GB/s per node +Grid : Message : Stencil 17.2221 GB/s per node +Grid : Message : Stencil 12.6037 GB/s per node +Grid : Message : Average mflops/s per call per node : 666033 +Grid : Message : Average mflops/s per call per node : 807661 +Grid : Message : Average mflops/s per call per node : 826467 +Grid : Message : Average mflops/s per call per node : 663763 +Grid : Message : Average mflops/s per call per node (full): 314663 +Grid : Message : Average mflops/s per call per node (full): 442038 +Grid : Message : Average mflops/s per call per node (full): 443390 +Grid : Message : Average mflops/s per call per node (full): 303637 +Grid : Message : Stencil 13.5181 GB/s per node +Grid : Message : Stencil 10.8977 GB/s per node +Grid : Message : Stencil 17.9444 GB/s per node +Grid : Message : Stencil 12.0613 GB/s per node +Grid : Message : Average mflops/s per call per node : 661244 +Grid : Message : Average mflops/s per call per node : 810256 +Grid : Message : Average mflops/s per call per node : 827175 +Grid : Message : Average mflops/s per call per node : 663484 +Grid : Message : Average mflops/s per call per node (full): 314360 +Grid : Message : Average mflops/s per call per node (full): 337415 +Grid : Message : Average mflops/s per call per node (full): 448417 +Grid : Message : Average mflops/s per call per node (full): 302725 +Grid : Message : Stencil 13.5593 GB/s per node +Grid : Message : Stencil 16.7705 GB/s per node +Grid : Message : Stencil 17.8027 GB/s per node +Grid : Message : Stencil 15.5264 GB/s per node +Grid : Message : Average mflops/s per call per node : 666063 +Grid : Message : Average mflops/s per call per node : 804006 +Grid : Message : Average mflops/s per call per node : 823762 +Grid : Message : Average mflops/s per call per node : 658362 +Grid : Message : Average mflops/s per call per node (full): 316752 +Grid : Message : Average mflops/s per call per node (full): 425272 +Grid : Message : Average mflops/s per call per node (full): 446540 +Grid : Message : Average mflops/s per call per node (full): 307569 +Grid : Message : Stencil 14.9456 GB/s per node +Grid : Message : Stencil 17.4885 GB/s per node +Grid : Message : Stencil 17.3422 GB/s per node +Grid : Message : Stencil 12.3453 GB/s per node +Grid : Message : Average mflops/s per call per node : 663467 +Grid : Message : Average mflops/s per call per node : 801409 +Grid : Message : Average mflops/s per call per node : 816290 +Grid : Message : Average mflops/s per call per node : 669270 +Grid : Message : Average mflops/s per call per node (full): 317182 +Grid : Message : Average mflops/s per call per node (full): 438211 +Grid : Message : Average mflops/s per call per node (full): 442788 +Grid : Message : Average mflops/s per call per node (full): 304823 +Grid : Message : Stencil 13.9943 GB/s per node +Grid : Message : Stencil 17.1094 GB/s per node +Grid : Message : Stencil 18.2979 GB/s per node +Grid : Message : Stencil 12.9114 GB/s per node +Grid : Message : Average mflops/s per call per node : 664422 +Grid : Message : Average mflops/s per call per node : 807973 +Grid : Message : Average mflops/s per call per node : 818457 +Grid : Message : Average mflops/s per call per node : 668307 +Grid : Message : Average mflops/s per call per node (full): 316225 +Grid : Message : Average mflops/s per call per node (full): 438810 +Grid : Message : Average mflops/s per call per node (full): 447538 +Grid : Message : Average mflops/s per call per node (full): 306457 +Grid : Message : Stencil 12.4988 GB/s per node +Grid : Message : Stencil 16.2525 GB/s per node +Grid : Message : Stencil 17.7376 GB/s per node +Grid : Message : Stencil 11.9912 GB/s per node +Grid : Message : Average mflops/s per call per node : 666295 +Grid : Message : Average mflops/s per call per node : 803164 +Grid : Message : Average mflops/s per call per node : 824180 +Grid : Message : Average mflops/s per call per node : 670430 +Grid : Message : Average mflops/s per call per node (full): 313251 +Grid : Message : Average mflops/s per call per node (full): 432096 +Grid : Message : Average mflops/s per call per node (full): 446445 +Grid : Message : Average mflops/s per call per node (full): 302679 +Grid : Message : Stencil 14.3437 GB/s per node +Grid : Message : Stencil 17.4697 GB/s per node +Grid : Message : Stencil 16.9784 GB/s per node +Grid : Message : Stencil 13.9946 GB/s per node +Grid : Message : Average mflops/s per call per node : 661222 +Grid : Message : Average mflops/s per call per node : 802776 +Grid : Message : Average mflops/s per call per node : 824277 +Grid : Message : Average mflops/s per call per node : 662738 +Grid : Message : Average mflops/s per call per node (full): 315779 +Grid : Message : Average mflops/s per call per node (full): 439965 +Grid : Message : Average mflops/s per call per node (full): 440859 +Grid : Message : Average mflops/s per call per node (full): 307038 +Grid : Message : Stencil 14.8629 GB/s per node +Grid : Message : Stencil 16.1011 GB/s per node +Grid : Message : Stencil 18.7162 GB/s per node +Grid : Message : Stencil 12.7471 GB/s per node +Grid : Message : Average mflops/s per call per node : 662883 +Grid : Message : Average mflops/s per call per node : 804761 +Grid : Message : Average mflops/s per call per node : 821957 +Grid : Message : Average mflops/s per call per node : 666030 +Grid : Message : Average mflops/s per call per node (full): 316443 +Grid : Message : Average mflops/s per call per node (full): 429850 +Grid : Message : Average mflops/s per call per node (full): 448767 +Grid : Message : Average mflops/s per call per node (full): 304830 +Grid : Message : Stencil 14.3938 GB/s per node +Grid : Message : Stencil 16.8236 GB/s per node +Grid : Message : Stencil 18.0328 GB/s per node +Grid : Message : Stencil 11.6708 GB/s per node +Grid : Message : Average mflops/s per call per node : 665694 +Grid : Message : Average mflops/s per call per node : 803772 +Grid : Message : Average mflops/s per call per node : 820641 +Grid : Message : Average mflops/s per call per node : 668786 +Grid : Message : Average mflops/s per call per node (full): 316913 +Grid : Message : Average mflops/s per call per node (full): 437280 +Grid : Message : Average mflops/s per call per node (full): 446599 +Grid : Message : Average mflops/s per call per node (full): 298644 +Grid : Message : Stencil 13.6226 GB/s per node +Grid : Message : Stencil 10.1949 GB/s per node +Grid : Message : Stencil 18.0608 GB/s per node +Grid : Message : Stencil 11.8098 GB/s per node +Grid : Message : Average mflops/s per call per node : 663352 +Grid : Message : Average mflops/s per call per node : 806740 +Grid : Message : Average mflops/s per call per node : 823623 +Grid : Message : Average mflops/s per call per node : 669411 +Grid : Message : Average mflops/s per call per node (full): 315926 +Grid : Message : Average mflops/s per call per node (full): 321727 +Grid : Message : Average mflops/s per call per node (full): 447028 +Grid : Message : Average mflops/s per call per node (full): 300846 +Grid : Message : Stencil 13.8048 GB/s per node +Grid : Message : Stencil 17.1501 GB/s per node +Grid : Message : Stencil 17.915 GB/s per node +Grid : Message : Stencil 12.8037 GB/s per node +Grid : Message : Average mflops/s per call per node : 665595 +Grid : Message : Average mflops/s per call per node : 800369 +Grid : Message : Average mflops/s per call per node : 821968 +Grid : Message : Average mflops/s per call per node : 663864 +Grid : Message : Average mflops/s per call per node (full): 316305 +Grid : Message : Average mflops/s per call per node (full): 435911 +Grid : Message : Average mflops/s per call per node (full): 445401 +Grid : Message : Average mflops/s per call per node (full): 304844 +Grid : Message : Stencil 13.3276 GB/s per node +Grid : Message : Stencil 16.5479 GB/s per node +Grid : Message : Stencil 15.9232 GB/s per node +Grid : Message : Stencil 12.6292 GB/s per node +Grid : Message : Average mflops/s per call per node : 664191 +Grid : Message : Average mflops/s per call per node : 805749 +Grid : Message : Average mflops/s per call per node : 824182 +Grid : Message : Average mflops/s per call per node : 662231 +Grid : Message : Average mflops/s per call per node (full): 316444 +Grid : Message : Average mflops/s per call per node (full): 433993 +Grid : Message : Average mflops/s per call per node (full): 418336 +Grid : Message : Average mflops/s per call per node (full): 304125 +Grid : Message : Stencil 12.9556 GB/s per node +Grid : Message : Stencil 16.5463 GB/s per node +Grid : Message : Stencil 17.379 GB/s per node +Grid : Message : Stencil 12.8479 GB/s per node +Grid : Message : Average mflops/s per call per node : 667084 +Grid : Message : Average mflops/s per call per node : 801127 +Grid : Message : Average mflops/s per call per node : 824920 +Grid : Message : Average mflops/s per call per node : 668330 +Grid : Message : Average mflops/s per call per node (full): 315162 +Grid : Message : Average mflops/s per call per node (full): 435167 +Grid : Message : Average mflops/s per call per node (full): 444288 +Grid : Message : Average mflops/s per call per node (full): 306298 +Grid : Message : Stencil 13.9152 GB/s per node +Grid : Message : Stencil 17.8934 GB/s per node +Grid : Message : Stencil 17.9279 GB/s per node +Grid : Message : Stencil 12.7926 GB/s per node +Grid : Message : Average mflops/s per call per node : 662878 +Grid : Message : Average mflops/s per call per node : 798961 +Grid : Message : Average mflops/s per call per node : 824106 +Grid : Message : Average mflops/s per call per node : 663412 +Grid : Message : Average mflops/s per call per node (full): 315995 +Grid : Message : Average mflops/s per call per node (full): 437028 +Grid : Message : Average mflops/s per call per node (full): 446450 +Grid : Message : Average mflops/s per call per node (full): 305560 +Grid : Message : Stencil 14.802 GB/s per node +Grid : Message : Stencil 17.5445 GB/s per node +Grid : Message : Stencil 17.7725 GB/s per node +Grid : Message : Stencil 12.817 GB/s per node +Grid : Message : Average mflops/s per call per node : 660844 +Grid : Message : Average mflops/s per call per node : 797512 +Grid : Message : Average mflops/s per call per node : 816649 +Grid : Message : Average mflops/s per call per node : 661087 +Grid : Message : Average mflops/s per call per node (full): 316362 +Grid : Message : Average mflops/s per call per node (full): 436374 +Grid : Message : Average mflops/s per call per node (full): 446109 +Grid : Message : Average mflops/s per call per node (full): 304547 +Grid : Message : Stencil 13.819 GB/s per node +Grid : Message : Stencil 17.337 GB/s per node +Grid : Message : Stencil 17.8609 GB/s per node +Grid : Message : Stencil 12.4582 GB/s per node +Grid : Message : Average mflops/s per call per node : 661106 +Grid : Message : Average mflops/s per call per node : 801815 +Grid : Message : Average mflops/s per call per node : 824608 +Grid : Message : Average mflops/s per call per node : 666840 +Grid : Message : Average mflops/s per call per node (full): 315404 +Grid : Message : Average mflops/s per call per node (full): 436362 +Grid : Message : Average mflops/s per call per node (full): 446372 +Grid : Message : Average mflops/s per call per node (full): 304462 +Grid : Message : Stencil 13.3501 GB/s per node +Grid : Message : Stencil 16.2943 GB/s per node +Grid : Message : Stencil 17.9905 GB/s per node +Grid : Message : Stencil 13.1651 GB/s per node +Grid : Message : Average mflops/s per call per node : 667754 +Grid : Message : Average mflops/s per call per node : 805087 +Grid : Message : Average mflops/s per call per node : 815001 +Grid : Message : Average mflops/s per call per node : 663820 +Grid : Message : Average mflops/s per call per node (full): 315198 +Grid : Message : Average mflops/s per call per node (full): 430787 +Grid : Message : Average mflops/s per call per node (full): 444664 +Grid : Message : Average mflops/s per call per node (full): 304994 +Grid : Message : Stencil 12.5859 GB/s per node +Grid : Message : Stencil 16.4739 GB/s per node +Grid : Message : Stencil 17.3418 GB/s per node +Grid : Message : Stencil 12.8162 GB/s per node +Grid : Message : Average mflops/s per call per node : 671092 +Grid : Message : Average mflops/s per call per node : 803434 +Grid : Message : Average mflops/s per call per node : 819660 +Grid : Message : Average mflops/s per call per node : 667983 +Grid : Message : Average mflops/s per call per node (full): 314765 +Grid : Message : Average mflops/s per call per node (full): 434197 +Grid : Message : Average mflops/s per call per node (full): 442487 +Grid : Message : Average mflops/s per call per node (full): 306098 +Grid : Message : Stencil 13.4637 GB/s per node +Grid : Message : Stencil 7.73841 GB/s per node +Grid : Message : Stencil 17.9445 GB/s per node +Grid : Message : Stencil 13.2531 GB/s per node +Grid : Message : Average mflops/s per call per node : 666729 +Grid : Message : Average mflops/s per call per node : 809090 +Grid : Message : Average mflops/s per call per node : 823783 +Grid : Message : Average mflops/s per call per node : 661230 +Grid : Message : Average mflops/s per call per node (full): 315758 +Grid : Message : Average mflops/s per call per node (full): 259334 +Grid : Message : Average mflops/s per call per node (full): 447738 +Grid : Message : Average mflops/s per call per node (full): 304430 +Grid : Message : Stencil 12.637 GB/s per node +Grid : Message : Stencil 16.3219 GB/s per node +Grid : Message : Stencil 17.178 GB/s per node +Grid : Message : Stencil 12.8779 GB/s per node +Grid : Message : Average mflops/s per call per node : 665199 +Grid : Message : Average mflops/s per call per node : 808316 +Grid : Message : Average mflops/s per call per node : 825699 +Grid : Message : Average mflops/s per call per node : 667714 +Grid : Message : Average mflops/s per call per node (full): 313642 +Grid : Message : Average mflops/s per call per node (full): 432798 +Grid : Message : Average mflops/s per call per node (full): 442997 +Grid : Message : Average mflops/s per call per node (full): 305697 +Grid : Message : Stencil 13.8312 GB/s per node +Grid : Message : Stencil 17.2629 GB/s per node +Grid : Message : Stencil 17.0892 GB/s per node +Grid : Message : Stencil 12.7407 GB/s per node +Grid : Message : Average mflops/s per call per node : 660395 +Grid : Message : Average mflops/s per call per node : 798639 +Grid : Message : Average mflops/s per call per node : 825638 +Grid : Message : Average mflops/s per call per node : 661845 +Grid : Message : Average mflops/s per call per node (full): 315821 +Grid : Message : Average mflops/s per call per node (full): 436751 +Grid : Message : Average mflops/s per call per node (full): 436490 +Grid : Message : Average mflops/s per call per node (full): 303363 +Grid : Message : Stencil 14.028 GB/s per node +Grid : Message : Stencil 16.6012 GB/s per node +Grid : Message : Stencil 17.5076 GB/s per node +Grid : Message : Stencil 12.9486 GB/s per node +Grid : Message : Average mflops/s per call per node : 662658 +Grid : Message : Average mflops/s per call per node : 803401 +Grid : Message : Average mflops/s per call per node : 820992 +Grid : Message : Average mflops/s per call per node : 662085 +Grid : Message : Average mflops/s per call per node (full): 316026 +Grid : Message : Average mflops/s per call per node (full): 436247 +Grid : Message : Average mflops/s per call per node (full): 443812 +Grid : Message : Average mflops/s per call per node (full): 304274 +Grid : Message : Stencil 13.5494 GB/s per node +Grid : Message : Stencil 10.9216 GB/s per node +Grid : Message : Stencil 17.3544 GB/s per node +Grid : Message : Stencil 12.5985 GB/s per node +Grid : Message : Average mflops/s per call per node : 665674 +Grid : Message : Average mflops/s per call per node : 805542 +Grid : Message : Average mflops/s per call per node : 830375 +Grid : Message : Average mflops/s per call per node : 670696 +Grid : Message : Average mflops/s per call per node (full): 315974 +Grid : Message : Average mflops/s per call per node (full): 337951 +Grid : Message : Average mflops/s per call per node (full): 444669 +Grid : Message : Average mflops/s per call per node (full): 306025 +Grid : Message : Stencil 14.5241 GB/s per node +Grid : Message : Stencil 18.0398 GB/s per node +Grid : Message : Stencil 17.3894 GB/s per node +Grid : Message : Stencil 13.0366 GB/s per node +Grid : Message : Average mflops/s per call per node : 662707 +Grid : Message : Average mflops/s per call per node : 802766 +Grid : Message : Average mflops/s per call per node : 816841 +Grid : Message : Average mflops/s per call per node : 665898 +Grid : Message : Average mflops/s per call per node (full): 317317 +Grid : Message : Average mflops/s per call per node (full): 439686 +Grid : Message : Average mflops/s per call per node (full): 441888 +Grid : Message : Average mflops/s per call per node (full): 305500 +Grid : Message : Stencil 12.7563 GB/s per node +Grid : Message : Stencil 16.6327 GB/s per node +Grid : Message : Stencil 17.4013 GB/s per node +Grid : Message : Stencil 14.5206 GB/s per node +Grid : Message : Average mflops/s per call per node : 670543 +Grid : Message : Average mflops/s per call per node : 807913 +Grid : Message : Average mflops/s per call per node : 824241 +Grid : Message : Average mflops/s per call per node : 664469 +Grid : Message : Average mflops/s per call per node (full): 313226 +Grid : Message : Average mflops/s per call per node (full): 435507 +Grid : Message : Average mflops/s per call per node (full): 444819 +Grid : Message : Average mflops/s per call per node (full): 308239 +Grid : Message : Stencil 13.8565 GB/s per node +Grid : Message : Stencil 16.9393 GB/s per node +Grid : Message : Stencil 17.3037 GB/s per node +Grid : Message : Stencil 12.6889 GB/s per node +Grid : Message : Average mflops/s per call per node : 668007 +Grid : Message : Average mflops/s per call per node : 809567 +Grid : Message : Average mflops/s per call per node : 819754 +Grid : Message : Average mflops/s per call per node : 661690 +Grid : Message : Average mflops/s per call per node (full): 314114 +Grid : Message : Average mflops/s per call per node (full): 438418 +Grid : Message : Average mflops/s per call per node (full): 442341 +Grid : Message : Average mflops/s per call per node (full): 303115 +Grid : Message : Stencil 14.6176 GB/s per node +Grid : Message : Stencil 16.4156 GB/s per node +Grid : Message : Stencil 17.9656 GB/s per node +Grid : Message : Stencil 12.1492 GB/s per node +Grid : Message : Average mflops/s per call per node : 663949 +Grid : Message : Average mflops/s per call per node : 811066 +Grid : Message : Average mflops/s per call per node : 818656 +Grid : Message : Average mflops/s per call per node : 660849 +Grid : Message : Average mflops/s per call per node (full): 316079 +Grid : Message : Average mflops/s per call per node (full): 432736 +Grid : Message : Average mflops/s per call per node (full): 445519 +Grid : Message : Average mflops/s per call per node (full): 301745 +Grid : Message : Stencil 13.1905 GB/s per node +Grid : Message : Stencil 17.9314 GB/s per node +Grid : Message : Stencil 18.1212 GB/s per node +Grid : Message : Stencil 13.4665 GB/s per node +Grid : Message : Average mflops/s per call per node : 667469 +Grid : Message : Average mflops/s per call per node : 799710 +Grid : Message : Average mflops/s per call per node : 823445 +Grid : Message : Average mflops/s per call per node : 662119 +Grid : Message : Average mflops/s per call per node (full): 315512 +Grid : Message : Average mflops/s per call per node (full): 440524 +Grid : Message : Average mflops/s per call per node (full): 446878 +Grid : Message : Average mflops/s per call per node (full): 306150 +Grid : Message : Stencil 12.7929 GB/s per node +Grid : Message : Stencil 9.08547 GB/s per node +Grid : Message : Stencil 18.3256 GB/s per node +Grid : Message : Stencil 14.0092 GB/s per node +Grid : Message : Average mflops/s per call per node : 669543 +Grid : Message : Average mflops/s per call per node : 807014 +Grid : Message : Average mflops/s per call per node : 818948 +Grid : Message : Average mflops/s per call per node : 658341 +Grid : Message : Average mflops/s per call per node (full): 314512 +Grid : Message : Average mflops/s per call per node (full): 294179 +Grid : Message : Average mflops/s per call per node (full): 446968 +Grid : Message : Average mflops/s per call per node (full): 306159 +Grid : Message : Stencil 13.6148 GB/s per node +Grid : Message : Stencil 16.5713 GB/s per node +Grid : Message : Stencil 17.8983 GB/s per node +Grid : Message : Stencil 12.3309 GB/s per node +Grid : Message : Average mflops/s per call per node : 666637 +Grid : Message : Average mflops/s per call per node : 806221 +Grid : Message : Average mflops/s per call per node : 819564 +Grid : Message : Average mflops/s per call per node : 668249 +Grid : Message : Average mflops/s per call per node (full): 313595 +Grid : Message : Average mflops/s per call per node (full): 436378 +Grid : Message : Average mflops/s per call per node (full): 440557 +Grid : Message : Average mflops/s per call per node (full): 301593 +Grid : Message : Stencil 12.452 GB/s per node +Grid : Message : Stencil 17.205 GB/s per node +Grid : Message : Stencil 17.6185 GB/s per node +Grid : Message : Stencil 12.9366 GB/s per node +Grid : Message : Average mflops/s per call per node : 673064 +Grid : Message : Average mflops/s per call per node : 800476 +Grid : Message : Average mflops/s per call per node : 821564 +Grid : Message : Average mflops/s per call per node : 661854 +Grid : Message : Average mflops/s per call per node (full): 313548 +Grid : Message : Average mflops/s per call per node (full): 434248 +Grid : Message : Average mflops/s per call per node (full): 445327 +Grid : Message : Average mflops/s per call per node (full): 304364 +Grid : Message : Stencil 13.0521 GB/s per node +Grid : Message : Stencil 16.7585 GB/s per node +Grid : Message : Stencil 17.2553 GB/s per node +Grid : Message : Stencil 13.3428 GB/s per node +Grid : Message : Average mflops/s per call per node : 663655 +Grid : Message : Average mflops/s per call per node : 806512 +Grid : Message : Average mflops/s per call per node : 819044 +Grid : Message : Average mflops/s per call per node : 661744 +Grid : Message : Average mflops/s per call per node (full): 314477 +Grid : Message : Average mflops/s per call per node (full): 436994 +Grid : Message : Average mflops/s per call per node (full): 443491 +Grid : Message : Average mflops/s per call per node (full): 305860 +Grid : Message : Stencil 14.1847 GB/s per node +Grid : Message : Stencil 12.4488 GB/s per node +Grid : Message : Stencil 17.0334 GB/s per node +Grid : Message : Stencil 12.6692 GB/s per node +Grid : Message : Average mflops/s per call per node : 666023 +Grid : Message : Average mflops/s per call per node : 808708 +Grid : Message : Average mflops/s per call per node : 819004 +Grid : Message : Average mflops/s per call per node : 666128 +Grid : Message : Average mflops/s per call per node (full): 317309 +Grid : Message : Average mflops/s per call per node (full): 370478 +Grid : Message : Average mflops/s per call per node (full): 438950 +Grid : Message : Average mflops/s per call per node (full): 304976 +Grid : Message : Stencil 13.2247 GB/s per node +Grid : Message : Stencil 9.3349 GB/s per node +Grid : Message : Stencil 17.4661 GB/s per node +Grid : Message : Stencil 12.2981 GB/s per node +Grid : Message : Average mflops/s per call per node : 664947 +Grid : Message : Average mflops/s per call per node : 812408 +Grid : Message : Average mflops/s per call per node : 816737 +Grid : Message : Average mflops/s per call per node : 668435 +Grid : Message : Average mflops/s per call per node (full): 315500 +Grid : Message : Average mflops/s per call per node (full): 300518 +Grid : Message : Average mflops/s per call per node (full): 443404 +Grid : Message : Average mflops/s per call per node (full): 303863 +Grid : Message : Stencil 13.7203 GB/s per node +Grid : Message : Stencil 17.2069 GB/s per node +Grid : Message : Stencil 17.7459 GB/s per node +Grid : Message : Stencil 12.5509 GB/s per node +Grid : Message : Average mflops/s per call per node : 663900 +Grid : Message : Average mflops/s per call per node : 799419 +Grid : Message : Average mflops/s per call per node : 821869 +Grid : Message : Average mflops/s per call per node : 666586 +Grid : Message : Average mflops/s per call per node (full): 315915 +Grid : Message : Average mflops/s per call per node (full): 437670 +Grid : Message : Average mflops/s per call per node (full): 445017 +Grid : Message : Average mflops/s per call per node (full): 302862 +Grid : Message : Stencil 13.4093 GB/s per node +Grid : Message : Stencil 16.9177 GB/s per node +Grid : Message : Stencil 17.0182 GB/s per node +Grid : Message : Stencil 12.9735 GB/s per node +Grid : Message : Average mflops/s per call per node : 667387 +Grid : Message : Average mflops/s per call per node : 805937 +Grid : Message : Average mflops/s per call per node : 820998 +Grid : Message : Average mflops/s per call per node : 663590 +Grid : Message : Average mflops/s per call per node (full): 316559 +Grid : Message : Average mflops/s per call per node (full): 437874 +Grid : Message : Average mflops/s per call per node (full): 441596 +Grid : Message : Average mflops/s per call per node (full): 305372 +Grid : Message : Stencil 12.5414 GB/s per node +Grid : Message : Stencil 13.4095 GB/s per node +Grid : Message : Stencil 17.9259 GB/s per node +Grid : Message : Stencil 14.453 GB/s per node +Grid : Message : Average mflops/s per call per node : 671866 +Grid : Message : Average mflops/s per call per node : 808558 +Grid : Message : Average mflops/s per call per node : 816009 +Grid : Message : Average mflops/s per call per node : 662280 +Grid : Message : Average mflops/s per call per node (full): 313778 +Grid : Message : Average mflops/s per call per node (full): 390067 +Grid : Message : Average mflops/s per call per node (full): 445621 +Grid : Message : Average mflops/s per call per node (full): 307685 +Grid : Message : Stencil 13.0586 GB/s per node +Grid : Message : Stencil 17.8407 GB/s per node +Grid : Message : Stencil 17.6383 GB/s per node +Grid : Message : Stencil 12.424 GB/s per node +Grid : Message : Average mflops/s per call per node : 665049 +Grid : Message : Average mflops/s per call per node : 802252 +Grid : Message : Average mflops/s per call per node : 818440 +Grid : Message : Average mflops/s per call per node : 664308 +Grid : Message : Average mflops/s per call per node (full): 314421 +Grid : Message : Average mflops/s per call per node (full): 439421 +Grid : Message : Average mflops/s per call per node (full): 444839 +Grid : Message : Average mflops/s per call per node (full): 301055 +Grid : Message : Stencil 13.8105 GB/s per node +Grid : Message : Stencil 16.9644 GB/s per node +Grid : Message : Stencil 17.9834 GB/s per node +Grid : Message : Stencil 12.6671 GB/s per node +Grid : Message : Average mflops/s per call per node : 664235 +Grid : Message : Average mflops/s per call per node : 807501 +Grid : Message : Average mflops/s per call per node : 817287 +Grid : Message : Average mflops/s per call per node : 662850 +Grid : Message : Average mflops/s per call per node (full): 315857 +Grid : Message : Average mflops/s per call per node (full): 438824 +Grid : Message : Average mflops/s per call per node (full): 446259 +Grid : Message : Average mflops/s per call per node (full): 303882 +Grid : Message : Stencil 14.374 GB/s per node +Grid : Message : Stencil 16.8686 GB/s per node +Grid : Message : Stencil 18.0065 GB/s per node +Grid : Message : Stencil 12.4734 GB/s per node +Grid : Message : Average mflops/s per call per node : 664446 +Grid : Message : Average mflops/s per call per node : 804749 +Grid : Message : Average mflops/s per call per node : 815605 +Grid : Message : Average mflops/s per call per node : 669340 +Grid : Message : Average mflops/s per call per node (full): 316594 +Grid : Message : Average mflops/s per call per node (full): 436341 +Grid : Message : Average mflops/s per call per node (full): 446244 +Grid : Message : Average mflops/s per call per node (full): 304947 +Grid : Message : Stencil 14.0086 GB/s per node +Grid : Message : Stencil 14.369 GB/s per node +Grid : Message : Stencil 17.0426 GB/s per node +Grid : Message : Stencil 14.3542 GB/s per node +Grid : Message : Average mflops/s per call per node : 666933 +Grid : Message : Average mflops/s per call per node : 807542 +Grid : Message : Average mflops/s per call per node : 821032 +Grid : Message : Average mflops/s per call per node : 660200 +Grid : Message : Average mflops/s per call per node (full): 317049 +Grid : Message : Average mflops/s per call per node (full): 405986 +Grid : Message : Average mflops/s per call per node (full): 440205 +Grid : Message : Average mflops/s per call per node (full): 306721 +Grid : Message : Stencil 13.74 GB/s per node +Grid : Message : Stencil 16.4136 GB/s per node +Grid : Message : Stencil 17.747 GB/s per node +Grid : Message : Stencil 13.0566 GB/s per node +Grid : Message : Average mflops/s per call per node : 667117 +Grid : Message : Average mflops/s per call per node : 799704 +Grid : Message : Average mflops/s per call per node : 825934 +Grid : Message : Average mflops/s per call per node : 666386 +Grid : Message : Average mflops/s per call per node (full): 316527 +Grid : Message : Average mflops/s per call per node (full): 431300 +Grid : Message : Average mflops/s per call per node (full): 445056 +Grid : Message : Average mflops/s per call per node (full): 305967 +Grid : Message : Stencil 15.0414 GB/s per node +Grid : Message : Stencil 16.3805 GB/s per node +Grid : Message : Stencil 17.7157 GB/s per node +Grid : Message : Stencil 11.8524 GB/s per node +Grid : Message : Average mflops/s per call per node : 662854 +Grid : Message : Average mflops/s per call per node : 803063 +Grid : Message : Average mflops/s per call per node : 819147 +Grid : Message : Average mflops/s per call per node : 668262 +Grid : Message : Average mflops/s per call per node (full): 316698 +Grid : Message : Average mflops/s per call per node (full): 425852 +Grid : Message : Average mflops/s per call per node (full): 446307 +Grid : Message : Average mflops/s per call per node (full): 299278 +Grid : Message : Stencil 14.2677 GB/s per node +Grid : Message : Stencil 17.8884 GB/s per node +Grid : Message : Stencil 18.4766 GB/s per node +Grid : Message : Stencil 12.2529 GB/s per node +Grid : Message : Average mflops/s per call per node : 661618 +Grid : Message : Average mflops/s per call per node : 803210 +Grid : Message : Average mflops/s per call per node : 819503 +Grid : Message : Average mflops/s per call per node : 665029 +Grid : Message : Average mflops/s per call per node (full): 314492 +Grid : Message : Average mflops/s per call per node (full): 439913 +Grid : Message : Average mflops/s per call per node (full): 447901 +Grid : Message : Average mflops/s per call per node (full): 303560 +Grid : Message : Stencil 15.4062 GB/s per node +Grid : Message : Stencil 14.2115 GB/s per node +Grid : Message : Stencil 17.3118 GB/s per node +Grid : Message : Stencil 12.8169 GB/s per node +Grid : Message : Average mflops/s per call per node : 660690 +Grid : Message : Average mflops/s per call per node : 806604 +Grid : Message : Average mflops/s per call per node : 825682 +Grid : Message : Average mflops/s per call per node : 663405 +Grid : Message : Average mflops/s per call per node (full): 317080 +Grid : Message : Average mflops/s per call per node (full): 403412 +Grid : Message : Average mflops/s per call per node (full): 444843 +Grid : Message : Average mflops/s per call per node (full): 305059 +Grid : Message : Stencil 13.2666 GB/s per node +Grid : Message : Stencil 18.5442 GB/s per node +Grid : Message : Stencil 18.4985 GB/s per node +Grid : Message : Stencil 14.6172 GB/s per node +Grid : Message : Average mflops/s per call per node : 666265 +Grid : Message : Average mflops/s per call per node : 799181 +Grid : Message : Average mflops/s per call per node : 815491 +Grid : Message : Average mflops/s per call per node : 657783 +Grid : Message : Average mflops/s per call per node (full): 315400 +Grid : Message : Average mflops/s per call per node (full): 440243 +Grid : Message : Average mflops/s per call per node (full): 444845 +Grid : Message : Average mflops/s per call per node (full): 307117 +Grid : Message : Stencil 13.0131 GB/s per node +Grid : Message : Stencil 17.2194 GB/s per node +Grid : Message : Stencil 17.1459 GB/s per node +Grid : Message : Stencil 12.3687 GB/s per node +Grid : Message : Average mflops/s per call per node : 670289 +Grid : Message : Average mflops/s per call per node : 808556 +Grid : Message : Average mflops/s per call per node : 823513 +Grid : Message : Average mflops/s per call per node : 665368 +Grid : Message : Average mflops/s per call per node (full): 314349 +Grid : Message : Average mflops/s per call per node (full): 440568 +Grid : Message : Average mflops/s per call per node (full): 441278 +Grid : Message : Average mflops/s per call per node (full): 303793 +Grid : Message : Stencil 12.5975 GB/s per node +Grid : Message : Stencil 17.4383 GB/s per node +Grid : Message : Stencil 17.8292 GB/s per node +Grid : Message : Stencil 12.6179 GB/s per node +Grid : Message : Average mflops/s per call per node : 671699 +Grid : Message : Average mflops/s per call per node : 808745 +Grid : Message : Average mflops/s per call per node : 825012 +Grid : Message : Average mflops/s per call per node : 667837 +Grid : Message : Average mflops/s per call per node (full): 314785 +Grid : Message : Average mflops/s per call per node (full): 441635 +Grid : Message : Average mflops/s per call per node (full): 445869 +Grid : Message : Average mflops/s per call per node (full): 305078 +Grid : Message : Stencil 13.4828 GB/s per node +Grid : Message : Stencil 17.391 GB/s per node +Grid : Message : Stencil 17.5985 GB/s per node +Grid : Message : Stencil 13.0579 GB/s per node +Grid : Message : Average mflops/s per call per node : 670305 +Grid : Message : Average mflops/s per call per node : 800124 +Grid : Message : Average mflops/s per call per node : 821717 +Grid : Message : Average mflops/s per call per node : 665973 +Grid : Message : Average mflops/s per call per node (full): 316858 +Grid : Message : Average mflops/s per call per node (full): 436514 +Grid : Message : Average mflops/s per call per node (full): 445506 +Grid : Message : Average mflops/s per call per node (full): 305669 +Grid : Message : Stencil 13.8375 GB/s per node +Grid : Message : Stencil 16.8669 GB/s per node +Grid : Message : Stencil 17.1294 GB/s per node +Grid : Message : Stencil 13.6601 GB/s per node +Grid : Message : Average mflops/s per call per node : 670173 +Grid : Message : Average mflops/s per call per node : 802703 +Grid : Message : Average mflops/s per call per node : 826415 +Grid : Message : Average mflops/s per call per node : 665591 +Grid : Message : Average mflops/s per call per node (full): 316417 +Grid : Message : Average mflops/s per call per node (full): 436306 +Grid : Message : Average mflops/s per call per node (full): 432316 +Grid : Message : Average mflops/s per call per node (full): 306859 +Grid : Message : Stencil 13.1054 GB/s per node +Grid : Message : Stencil 16.5859 GB/s per node +Grid : Message : Stencil 18.0068 GB/s per node +Grid : Message : Stencil 13.3095 GB/s per node +Grid : Message : Average mflops/s per call per node : 669786 +Grid : Message : Average mflops/s per call per node : 804012 +Grid : Message : Average mflops/s per call per node : 815813 +Grid : Message : Average mflops/s per call per node : 664266 +Grid : Message : Average mflops/s per call per node (full): 315101 +Grid : Message : Average mflops/s per call per node (full): 434858 +Grid : Message : Average mflops/s per call per node (full): 444859 +Grid : Message : Average mflops/s per call per node (full): 306065 +Grid : Message : Stencil 12.876 GB/s per node +Grid : Message : Stencil 17.3527 GB/s per node +Grid : Message : Stencil 16.3377 GB/s per node +Grid : Message : Stencil 12.7555 GB/s per node +Grid : Message : Average mflops/s per call per node : 670670 +Grid : Message : Average mflops/s per call per node : 803765 +Grid : Message : Average mflops/s per call per node : 823899 +Grid : Message : Average mflops/s per call per node : 660135 +Grid : Message : Average mflops/s per call per node (full): 315159 +Grid : Message : Average mflops/s per call per node (full): 438142 +Grid : Message : Average mflops/s per call per node (full): 422732 +Grid : Message : Average mflops/s per call per node (full): 303690 +Grid : Message : Stencil 14.5704 GB/s per node +Grid : Message : Stencil 12.0734 GB/s per node +Grid : Message : Stencil 18.0296 GB/s per node +Grid : Message : Stencil 12.6776 GB/s per node +Grid : Message : Average mflops/s per call per node : 666129 +Grid : Message : Average mflops/s per call per node : 803061 +Grid : Message : Average mflops/s per call per node : 814283 +Grid : Message : Average mflops/s per call per node : 657234 +Grid : Message : Average mflops/s per call per node (full): 316864 +Grid : Message : Average mflops/s per call per node (full): 362979 +Grid : Message : Average mflops/s per call per node (full): 444805 +Grid : Message : Average mflops/s per call per node (full): 302650 +Grid : Message : Stencil 13.5439 GB/s per node +Grid : Message : Stencil 16.9154 GB/s per node +Grid : Message : Stencil 20.0573 GB/s per node +Grid : Message : Stencil 13.6491 GB/s per node +Grid : Message : Average mflops/s per call per node : 666737 +Grid : Message : Average mflops/s per call per node : 796338 +Grid : Message : Average mflops/s per call per node : 816756 +Grid : Message : Average mflops/s per call per node : 663693 +Grid : Message : Average mflops/s per call per node (full): 316049 +Grid : Message : Average mflops/s per call per node (full): 434567 +Grid : Message : Average mflops/s per call per node (full): 449696 +Grid : Message : Average mflops/s per call per node (full): 306704 +Grid : Message : Stencil 12.5183 GB/s per node +Grid : Message : Stencil 16.5827 GB/s per node +Grid : Message : Stencil 18.559 GB/s per node +Grid : Message : Stencil 14.6143 GB/s per node +Grid : Message : Average mflops/s per call per node : 669246 +Grid : Message : Average mflops/s per call per node : 804736 +Grid : Message : Average mflops/s per call per node : 813387 +Grid : Message : Average mflops/s per call per node : 663006 +Grid : Message : Average mflops/s per call per node (full): 313893 +Grid : Message : Average mflops/s per call per node (full): 435101 +Grid : Message : Average mflops/s per call per node (full): 445623 +Grid : Message : Average mflops/s per call per node (full): 307351 +Grid : Message : Stencil 12.4063 GB/s per node +Grid : Message : Stencil 16.7085 GB/s per node +Grid : Message : Stencil 17.7427 GB/s per node +Grid : Message : Stencil 14.278 GB/s per node +Grid : Message : Average mflops/s per call per node : 667017 +Grid : Message : Average mflops/s per call per node : 806776 +Grid : Message : Average mflops/s per call per node : 818848 +Grid : Message : Average mflops/s per call per node : 663672 +Grid : Message : Average mflops/s per call per node (full): 309893 +Grid : Message : Average mflops/s per call per node (full): 437230 +Grid : Message : Average mflops/s per call per node (full): 443248 +Grid : Message : Average mflops/s per call per node (full): 307578 +Grid : Message : Stencil 12.4697 GB/s per node +Grid : Message : Stencil 17.7348 GB/s per node +Grid : Message : Stencil 17.8787 GB/s per node +Grid : Message : Stencil 12.386 GB/s per node +Grid : Message : Average mflops/s per call per node : 668605 +Grid : Message : Average mflops/s per call per node : 803182 +Grid : Message : Average mflops/s per call per node : 821161 +Grid : Message : Average mflops/s per call per node : 666723 +Grid : Message : Average mflops/s per call per node (full): 312663 +Grid : Message : Average mflops/s per call per node (full): 438756 +Grid : Message : Average mflops/s per call per node (full): 445401 +Grid : Message : Average mflops/s per call per node (full): 303558 +Grid : Message : Stencil 13.4515 GB/s per node +Grid : Message : Stencil 11.3553 GB/s per node +Grid : Message : Stencil 18.5098 GB/s per node +Grid : Message : Stencil 13.0622 GB/s per node +Grid : Message : Average mflops/s per call per node : 663087 +Grid : Message : Average mflops/s per call per node : 812456 +Grid : Message : Average mflops/s per call per node : 814407 +Grid : Message : Average mflops/s per call per node : 665146 +Grid : Message : Average mflops/s per call per node (full): 316157 +Grid : Message : Average mflops/s per call per node (full): 347578 +Grid : Message : Average mflops/s per call per node (full): 444978 +Grid : Message : Average mflops/s per call per node (full): 305370 +Grid : Message : Stencil 13.2173 GB/s per node +Grid : Message : Stencil 16.2429 GB/s per node +Grid : Message : Stencil 18.5115 GB/s per node +Grid : Message : Stencil 12.5288 GB/s per node +Grid : Message : Average mflops/s per call per node : 668092 +Grid : Message : Average mflops/s per call per node : 805973 +Grid : Message : Average mflops/s per call per node : 824247 +Grid : Message : Average mflops/s per call per node : 669384 +Grid : Message : Average mflops/s per call per node (full): 315328 +Grid : Message : Average mflops/s per call per node (full): 430969 +Grid : Message : Average mflops/s per call per node (full): 443273 +Grid : Message : Average mflops/s per call per node (full): 304709 +Grid : Message : Stencil 13.1158 GB/s per node +Grid : Message : Stencil 16.8212 GB/s per node +Grid : Message : Stencil 17.6751 GB/s per node +Grid : Message : Stencil 12.5535 GB/s per node +Grid : Message : Average mflops/s per call per node : 664131 +Grid : Message : Average mflops/s per call per node : 804083 +Grid : Message : Average mflops/s per call per node : 824802 +Grid : Message : Average mflops/s per call per node : 664059 +Grid : Message : Average mflops/s per call per node (full): 315125 +Grid : Message : Average mflops/s per call per node (full): 431866 +Grid : Message : Average mflops/s per call per node (full): 445963 +Grid : Message : Average mflops/s per call per node (full): 304673 +Grid : Message : Stencil 14.1054 GB/s per node +Grid : Message : Stencil 17.0876 GB/s per node +Grid : Message : Stencil 16.244 GB/s per node +Grid : Message : Stencil 12.5805 GB/s per node +Grid : Message : Average mflops/s per call per node : 663063 +Grid : Message : Average mflops/s per call per node : 800649 +Grid : Message : Average mflops/s per call per node : 832698 +Grid : Message : Average mflops/s per call per node : 668734 +Grid : Message : Average mflops/s per call per node (full): 316797 +Grid : Message : Average mflops/s per call per node (full): 438163 +Grid : Message : Average mflops/s per call per node (full): 421935 +Grid : Message : Average mflops/s per call per node (full): 303610 +Grid : Message : Stencil 12.8483 GB/s per node +Grid : Message : Stencil 17.087 GB/s per node +Grid : Message : Stencil 17.8021 GB/s per node +Grid : Message : Stencil 14.3434 GB/s per node +Grid : Message : Average mflops/s per call per node : 666039 +Grid : Message : Average mflops/s per call per node : 800967 +Grid : Message : Average mflops/s per call per node : 826722 +Grid : Message : Average mflops/s per call per node : 660834 +Grid : Message : Average mflops/s per call per node (full): 315401 +Grid : Message : Average mflops/s per call per node (full): 436993 +Grid : Message : Average mflops/s per call per node (full): 446784 +Grid : Message : Average mflops/s per call per node (full): 306784 +Grid : Message : Stencil 12.7988 GB/s per node +Grid : Message : Stencil 16.4221 GB/s per node +Grid : Message : Stencil 17.3057 GB/s per node +Grid : Message : Stencil 14.4481 GB/s per node +Grid : Message : Average mflops/s per call per node : 668754 +Grid : Message : Average mflops/s per call per node : 799936 +Grid : Message : Average mflops/s per call per node : 824533 +Grid : Message : Average mflops/s per call per node : 661878 +Grid : Message : Average mflops/s per call per node (full): 315340 +Grid : Message : Average mflops/s per call per node (full): 433837 +Grid : Message : Average mflops/s per call per node (full): 445578 +Grid : Message : Average mflops/s per call per node (full): 306850 +Grid : Message : Stencil 14.4045 GB/s per node +Grid : Message : Stencil 16.6417 GB/s per node +Grid : Message : Stencil 17.9008 GB/s per node +Grid : Message : Stencil 13.8014 GB/s per node +Grid : Message : Average mflops/s per call per node : 662777 +Grid : Message : Average mflops/s per call per node : 802338 +Grid : Message : Average mflops/s per call per node : 820027 +Grid : Message : Average mflops/s per call per node : 664195 +Grid : Message : Average mflops/s per call per node (full): 316459 +Grid : Message : Average mflops/s per call per node (full): 437409 +Grid : Message : Average mflops/s per call per node (full): 445966 +Grid : Message : Average mflops/s per call per node (full): 306817 +Grid : Message : Stencil 13.1521 GB/s per node +Grid : Message : Stencil 17.1275 GB/s per node +Grid : Message : Stencil 17.9507 GB/s per node +Grid : Message : Stencil 12.6391 GB/s per node +Grid : Message : Average mflops/s per call per node : 668265 +Grid : Message : Average mflops/s per call per node : 808937 +Grid : Message : Average mflops/s per call per node : 820979 +Grid : Message : Average mflops/s per call per node : 662241 +Grid : Message : Average mflops/s per call per node (full): 316102 +Grid : Message : Average mflops/s per call per node (full): 435257 +Grid : Message : Average mflops/s per call per node (full): 445397 +Grid : Message : Average mflops/s per call per node (full): 303644 +Grid : Message : Stencil 12.8526 GB/s per node +Grid : Message : Stencil 16.7457 GB/s per node +Grid : Message : Stencil 17.2491 GB/s per node +Grid : Message : Stencil 13.7785 GB/s per node +Grid : Message : Average mflops/s per call per node : 666915 +Grid : Message : Average mflops/s per call per node : 807411 +Grid : Message : Average mflops/s per call per node : 822023 +Grid : Message : Average mflops/s per call per node : 656054 +Grid : Message : Average mflops/s per call per node (full): 314974 +Grid : Message : Average mflops/s per call per node (full): 437865 +Grid : Message : Average mflops/s per call per node (full): 443123 +Grid : Message : Average mflops/s per call per node (full): 305622 +Grid : Message : Stencil 13.0399 GB/s per node +Grid : Message : Stencil 17.3983 GB/s per node +Grid : Message : Stencil 17.8769 GB/s per node +Grid : Message : Stencil 12.7115 GB/s per node +Grid : Message : Average mflops/s per call per node : 669243 +Grid : Message : Average mflops/s per call per node : 804714 +Grid : Message : Average mflops/s per call per node : 821109 +Grid : Message : Average mflops/s per call per node : 665715 +Grid : Message : Average mflops/s per call per node (full): 315930 +Grid : Message : Average mflops/s per call per node (full): 439006 +Grid : Message : Average mflops/s per call per node (full): 445645 +Grid : Message : Average mflops/s per call per node (full): 304465 +Grid : Message : Stencil 13.0748 GB/s per node +Grid : Message : Stencil 17.0022 GB/s per node +Grid : Message : Stencil 17.9339 GB/s per node +Grid : Message : Stencil 13.2873 GB/s per node +Grid : Message : Average mflops/s per call per node : 670384 +Grid : Message : Average mflops/s per call per node : 803062 +Grid : Message : Average mflops/s per call per node : 821719 +Grid : Message : Average mflops/s per call per node : 664236 +Grid : Message : Average mflops/s per call per node (full): 315475 +Grid : Message : Average mflops/s per call per node (full): 437462 +Grid : Message : Average mflops/s per call per node (full): 446076 +Grid : Message : Average mflops/s per call per node (full): 304854 +Grid : Message : Stencil 13.3451 GB/s per node +Grid : Message : Stencil 16.987 GB/s per node +Grid : Message : Stencil 17.4433 GB/s per node +Grid : Message : Stencil 13.2438 GB/s per node +Grid : Message : Average mflops/s per call per node : 668057 +Grid : Message : Average mflops/s per call per node : 807339 +Grid : Message : Average mflops/s per call per node : 823540 +Grid : Message : Average mflops/s per call per node : 662882 +Grid : Message : Average mflops/s per call per node (full): 313029 +Grid : Message : Average mflops/s per call per node (full): 437000 +Grid : Message : Average mflops/s per call per node (full): 444704 +Grid : Message : Average mflops/s per call per node (full): 305305 +Grid : Message : Stencil 13.985 GB/s per node +Grid : Message : Stencil 10.8433 GB/s per node +Grid : Message : Stencil 18.4694 GB/s per node +Grid : Message : Stencil 12.2394 GB/s per node +Grid : Message : Average mflops/s per call per node : 660780 +Grid : Message : Average mflops/s per call per node : 811878 +Grid : Message : Average mflops/s per call per node : 819535 +Grid : Message : Average mflops/s per call per node : 663962 +Grid : Message : Average mflops/s per call per node (full): 315398 +Grid : Message : Average mflops/s per call per node (full): 335978 +Grid : Message : Average mflops/s per call per node (full): 447079 +Grid : Message : Average mflops/s per call per node (full): 299551 +Grid : Message : Stencil 12.7344 GB/s per node +Grid : Message : Stencil 16.7795 GB/s per node +Grid : Message : Stencil 17.9974 GB/s per node +Grid : Message : Stencil 12.3923 GB/s per node +Grid : Message : Average mflops/s per call per node : 666265 +Grid : Message : Average mflops/s per call per node : 806199 +Grid : Message : Average mflops/s per call per node : 821853 +Grid : Message : Average mflops/s per call per node : 665136 +Grid : Message : Average mflops/s per call per node (full): 314563 +Grid : Message : Average mflops/s per call per node (full): 437706 +Grid : Message : Average mflops/s per call per node (full): 445625 +Grid : Message : Average mflops/s per call per node (full): 304797 +Grid : Message : Stencil 13.5227 GB/s per node +Grid : Message : Stencil 9.9904 GB/s per node +Grid : Message : Stencil 18.1087 GB/s per node +Grid : Message : Stencil 13.3208 GB/s per node +Grid : Message : Average mflops/s per call per node : 665052 +Grid : Message : Average mflops/s per call per node : 806549 +Grid : Message : Average mflops/s per call per node : 820800 +Grid : Message : Average mflops/s per call per node : 668015 +Grid : Message : Average mflops/s per call per node (full): 314606 +Grid : Message : Average mflops/s per call per node (full): 316705 +Grid : Message : Average mflops/s per call per node (full): 446817 +Grid : Message : Average mflops/s per call per node (full): 306683 +Grid : Message : Stencil 14.0079 GB/s per node +Grid : Message : Stencil 10.2811 GB/s per node +Grid : Message : Stencil 17.4514 GB/s per node +Grid : Message : Stencil 12.1589 GB/s per node +Grid : Message : Average mflops/s per call per node : 664551 +Grid : Message : Average mflops/s per call per node : 807886 +Grid : Message : Average mflops/s per call per node : 823665 +Grid : Message : Average mflops/s per call per node : 668830 +Grid : Message : Average mflops/s per call per node (full): 315811 +Grid : Message : Average mflops/s per call per node (full): 322821 +Grid : Message : Average mflops/s per call per node (full): 444862 +Grid : Message : Average mflops/s per call per node (full): 303960 +Grid : Message : Stencil 12.9543 GB/s per node +Grid : Message : Stencil 17.2676 GB/s per node +Grid : Message : Stencil 17.5014 GB/s per node +Grid : Message : Stencil 12.4183 GB/s per node +Grid : Message : Average mflops/s per call per node : 666039 +Grid : Message : Average mflops/s per call per node : 801726 +Grid : Message : Average mflops/s per call per node : 818973 +Grid : Message : Average mflops/s per call per node : 668015 +Grid : Message : Average mflops/s per call per node (full): 312869 +Grid : Message : Average mflops/s per call per node (full): 434229 +Grid : Message : Average mflops/s per call per node (full): 444635 +Grid : Message : Average mflops/s per call per node (full): 304935 +Grid : Message : Stencil 12.5383 GB/s per node +Grid : Message : Stencil 14.5256 GB/s per node +Grid : Message : Stencil 18.1822 GB/s per node +Grid : Message : Stencil 12.5818 GB/s per node +Grid : Message : Average mflops/s per call per node : 669210 +Grid : Message : Average mflops/s per call per node : 807821 +Grid : Message : Average mflops/s per call per node : 822054 +Grid : Message : Average mflops/s per call per node : 664072 +Grid : Message : Average mflops/s per call per node (full): 313947 +Grid : Message : Average mflops/s per call per node (full): 408152 +Grid : Message : Average mflops/s per call per node (full): 446600 +Grid : Message : Average mflops/s per call per node (full): 304352 +Grid : Message : Stencil 13.144 GB/s per node +Grid : Message : Stencil 17.0589 GB/s per node +Grid : Message : Stencil 17.9078 GB/s per node +Grid : Message : Stencil 12.6028 GB/s per node +Grid : Message : Average mflops/s per call per node : 663790 +Grid : Message : Average mflops/s per call per node : 801031 +Grid : Message : Average mflops/s per call per node : 826611 +Grid : Message : Average mflops/s per call per node : 668085 +Grid : Message : Average mflops/s per call per node (full): 314625 +Grid : Message : Average mflops/s per call per node (full): 435639 +Grid : Message : Average mflops/s per call per node (full): 447655 +Grid : Message : Average mflops/s per call per node (full): 304941 +Grid : Message : Stencil 13.2036 GB/s per node +Grid : Message : Stencil 16.4167 GB/s per node +Grid : Message : Stencil 18.1195 GB/s per node +Grid : Message : Stencil 12.1919 GB/s per node +Grid : Message : Average mflops/s per call per node : 668091 +Grid : Message : Average mflops/s per call per node : 804708 +Grid : Message : Average mflops/s per call per node : 826018 +Grid : Message : Average mflops/s per call per node : 665139 +Grid : Message : Average mflops/s per call per node (full): 316459 +Grid : Message : Average mflops/s per call per node (full): 432613 +Grid : Message : Average mflops/s per call per node (full): 447761 +Grid : Message : Average mflops/s per call per node (full): 303228 +Grid : Message : Stencil 12.9496 GB/s per node +Grid : Message : Stencil 8.04132 GB/s per node +Grid : Message : Stencil 18.2398 GB/s per node +Grid : Message : Stencil 12.7744 GB/s per node +Grid : Message : Average mflops/s per call per node : 666903 +Grid : Message : Average mflops/s per call per node : 812769 +Grid : Message : Average mflops/s per call per node : 812983 +Grid : Message : Average mflops/s per call per node : 666973 +Grid : Message : Average mflops/s per call per node (full): 312722 +Grid : Message : Average mflops/s per call per node (full): 267572 +Grid : Message : Average mflops/s per call per node (full): 445639 +Grid : Message : Average mflops/s per call per node (full): 305841 +Grid : Message : Stencil 12.3602 GB/s per node +Grid : Message : Stencil 16.9159 GB/s per node +Grid : Message : Stencil 18.6838 GB/s per node +Grid : Message : Stencil 12.9963 GB/s per node +Grid : Message : Average mflops/s per call per node : 669015 +Grid : Message : Average mflops/s per call per node : 807804 +Grid : Message : Average mflops/s per call per node : 824645 +Grid : Message : Average mflops/s per call per node : 665724 +Grid : Message : Average mflops/s per call per node (full): 312875 +Grid : Message : Average mflops/s per call per node (full): 432958 +Grid : Message : Average mflops/s per call per node (full): 450242 +Grid : Message : Average mflops/s per call per node (full): 305161 +Grid : Message : Stencil 13.4647 GB/s per node +Grid : Message : Stencil 16.3383 GB/s per node +Grid : Message : Stencil 17.0735 GB/s per node +Grid : Message : Stencil 12.7368 GB/s per node +Grid : Message : Average mflops/s per call per node : 668122 +Grid : Message : Average mflops/s per call per node : 803149 +Grid : Message : Average mflops/s per call per node : 825487 +Grid : Message : Average mflops/s per call per node : 667634 +Grid : Message : Average mflops/s per call per node (full): 315650 +Grid : Message : Average mflops/s per call per node (full): 432558 +Grid : Message : Average mflops/s per call per node (full): 441983 +Grid : Message : Average mflops/s per call per node (full): 305838 +Grid : Message : Stencil 13.4652 GB/s per node +Grid : Message : Stencil 16.738 GB/s per node +Grid : Message : Stencil 17.9649 GB/s per node +Grid : Message : Stencil 12.6534 GB/s per node +Grid : Message : Average mflops/s per call per node : 664658 +Grid : Message : Average mflops/s per call per node : 800925 +Grid : Message : Average mflops/s per call per node : 821484 +Grid : Message : Average mflops/s per call per node : 665831 +Grid : Message : Average mflops/s per call per node (full): 315984 +Grid : Message : Average mflops/s per call per node (full): 435073 +Grid : Message : Average mflops/s per call per node (full): 445590 +Grid : Message : Average mflops/s per call per node (full): 305339 +Grid : Message : Stencil 13.2929 GB/s per node +Grid : Message : Stencil 17.0294 GB/s per node +Grid : Message : Stencil 17.992 GB/s per node +Grid : Message : Stencil 12.0384 GB/s per node +Grid : Message : Average mflops/s per call per node : 665322 +Grid : Message : Average mflops/s per call per node : 806397 +Grid : Message : Average mflops/s per call per node : 817346 +Grid : Message : Average mflops/s per call per node : 671984 +Grid : Message : Average mflops/s per call per node (full): 316333 +Grid : Message : Average mflops/s per call per node (full): 438125 +Grid : Message : Average mflops/s per call per node (full): 444428 +Grid : Message : Average mflops/s per call per node (full): 302906 +Grid : Message : Stencil 12.8307 GB/s per node +Grid : Message : Stencil 17.5075 GB/s per node +Grid : Message : Stencil 17.4408 GB/s per node +Grid : Message : Stencil 13.0046 GB/s per node +Grid : Message : Average mflops/s per call per node : 672324 +Grid : Message : Average mflops/s per call per node : 798334 +Grid : Message : Average mflops/s per call per node : 817300 +Grid : Message : Average mflops/s per call per node : 664807 +Grid : Message : Average mflops/s per call per node (full): 316035 +Grid : Message : Average mflops/s per call per node (full): 435968 +Grid : Message : Average mflops/s per call per node (full): 439149 +Grid : Message : Average mflops/s per call per node (full): 306540 +Grid : Message : Stencil 12.664 GB/s per node +Grid : Message : Stencil 16.3831 GB/s per node +Grid : Message : Stencil 18.4677 GB/s per node +Grid : Message : Stencil 12.9777 GB/s per node +Grid : Message : Average mflops/s per call per node : 672242 +Grid : Message : Average mflops/s per call per node : 806043 +Grid : Message : Average mflops/s per call per node : 823586 +Grid : Message : Average mflops/s per call per node : 668304 +Grid : Message : Average mflops/s per call per node (full): 315131 +Grid : Message : Average mflops/s per call per node (full): 433011 +Grid : Message : Average mflops/s per call per node (full): 447910 +Grid : Message : Average mflops/s per call per node (full): 304572 +Grid : Message : Stencil 12.647 GB/s per node +Grid : Message : Stencil 17.7233 GB/s per node +Grid : Message : Stencil 19.045 GB/s per node +Grid : Message : Stencil 14.4043 GB/s per node +Grid : Message : Average mflops/s per call per node : 670068 +Grid : Message : Average mflops/s per call per node : 802508 +Grid : Message : Average mflops/s per call per node : 822255 +Grid : Message : Average mflops/s per call per node : 660874 +Grid : Message : Average mflops/s per call per node (full): 314382 +Grid : Message : Average mflops/s per call per node (full): 440019 +Grid : Message : Average mflops/s per call per node (full): 449501 +Grid : Message : Average mflops/s per call per node (full): 307181 +Grid : Message : Stencil 12.933 GB/s per node +Grid : Message : Stencil 16.7577 GB/s per node +Grid : Message : Stencil 17.9727 GB/s per node +Grid : Message : Stencil 13.1741 GB/s per node +Grid : Message : Average mflops/s per call per node : 667285 +Grid : Message : Average mflops/s per call per node : 807065 +Grid : Message : Average mflops/s per call per node : 816317 +Grid : Message : Average mflops/s per call per node : 660677 +Grid : Message : Average mflops/s per call per node (full): 313925 +Grid : Message : Average mflops/s per call per node (full): 437370 +Grid : Message : Average mflops/s per call per node (full): 445682 +Grid : Message : Average mflops/s per call per node (full): 304500 +Grid : Message : Stencil 12.8929 GB/s per node +Grid : Message : Stencil 16.5216 GB/s per node +Grid : Message : Stencil 17.6462 GB/s per node +Grid : Message : Stencil 11.6618 GB/s per node +Grid : Message : Average mflops/s per call per node : 670583 +Grid : Message : Average mflops/s per call per node : 809937 +Grid : Message : Average mflops/s per call per node : 818639 +Grid : Message : Average mflops/s per call per node : 669559 +Grid : Message : Average mflops/s per call per node (full): 314614 +Grid : Message : Average mflops/s per call per node (full): 434299 +Grid : Message : Average mflops/s per call per node (full): 444539 +Grid : Message : Average mflops/s per call per node (full): 298526 +Grid : Message : Stencil 13.0396 GB/s per node +Grid : Message : Stencil 16.8357 GB/s per node +Grid : Message : Stencil 17.251 GB/s per node +Grid : Message : Stencil 14.0986 GB/s per node +Grid : Message : Average mflops/s per call per node : 667395 +Grid : Message : Average mflops/s per call per node : 807567 +Grid : Message : Average mflops/s per call per node : 825726 +Grid : Message : Average mflops/s per call per node : 661632 +Grid : Message : Average mflops/s per call per node (full): 314509 +Grid : Message : Average mflops/s per call per node (full): 437961 +Grid : Message : Average mflops/s per call per node (full): 443090 +Grid : Message : Average mflops/s per call per node (full): 303319 +Grid : Message : Stencil 14.2244 GB/s per node +Grid : Message : Stencil 16.71 GB/s per node +Grid : Message : Stencil 17.8663 GB/s per node +Grid : Message : Stencil 13.1549 GB/s per node +Grid : Message : Average mflops/s per call per node : 663781 +Grid : Message : Average mflops/s per call per node : 802430 +Grid : Message : Average mflops/s per call per node : 822847 +Grid : Message : Average mflops/s per call per node : 662736 +Grid : Message : Average mflops/s per call per node (full): 315901 +Grid : Message : Average mflops/s per call per node (full): 434370 +Grid : Message : Average mflops/s per call per node (full): 447766 +Grid : Message : Average mflops/s per call per node (full): 305592 +Grid : Message : Stencil 13.8167 GB/s per node +Grid : Message : Stencil 16.4358 GB/s per node +Grid : Message : Stencil 17.2993 GB/s per node +Grid : Message : Stencil 12.7085 GB/s per node +Grid : Message : Average mflops/s per call per node : 661245 +Grid : Message : Average mflops/s per call per node : 803105 +Grid : Message : Average mflops/s per call per node : 823692 +Grid : Message : Average mflops/s per call per node : 664362 +Grid : Message : Average mflops/s per call per node (full): 314367 +Grid : Message : Average mflops/s per call per node (full): 433581 +Grid : Message : Average mflops/s per call per node (full): 442903 +Grid : Message : Average mflops/s per call per node (full): 305447 +Grid : Message : Stencil 13.1055 GB/s per node +Grid : Message : Stencil 9.4695 GB/s per node +Grid : Message : Stencil 17.4881 GB/s per node +Grid : Message : Stencil 13.3955 GB/s per node +Grid : Message : Average mflops/s per call per node : 664820 +Grid : Message : Average mflops/s per call per node : 809060 +Grid : Message : Average mflops/s per call per node : 822009 +Grid : Message : Average mflops/s per call per node : 663366 +Grid : Message : Average mflops/s per call per node (full): 313768 +Grid : Message : Average mflops/s per call per node (full): 303820 +Grid : Message : Average mflops/s per call per node (full): 443817 +Grid : Message : Average mflops/s per call per node (full): 304245 +Grid : Message : Stencil 13.5332 GB/s per node +Grid : Message : Stencil 14.8741 GB/s per node +Grid : Message : Stencil 17.4688 GB/s per node +Grid : Message : Stencil 12.3018 GB/s per node +Grid : Message : Average mflops/s per call per node : 663171 +Grid : Message : Average mflops/s per call per node : 807899 +Grid : Message : Average mflops/s per call per node : 816213 +Grid : Message : Average mflops/s per call per node : 665794 +Grid : Message : Average mflops/s per call per node (full): 314666 +Grid : Message : Average mflops/s per call per node (full): 414230 +Grid : Message : Average mflops/s per call per node (full): 436437 +Grid : Message : Average mflops/s per call per node (full): 300328 +Grid : Message : Stencil 13.4418 GB/s per node +Grid : Message : Stencil 16.7514 GB/s per node +Grid : Message : Stencil 18.5606 GB/s per node +Grid : Message : Stencil 13.6725 GB/s per node +Grid : Message : Average mflops/s per call per node : 663475 +Grid : Message : Average mflops/s per call per node : 802921 +Grid : Message : Average mflops/s per call per node : 820846 +Grid : Message : Average mflops/s per call per node : 660621 +Grid : Message : Average mflops/s per call per node (full): 315321 +Grid : Message : Average mflops/s per call per node (full): 436917 +Grid : Message : Average mflops/s per call per node (full): 447510 +Grid : Message : Average mflops/s per call per node (full): 306476 +Grid : Message : Stencil 13.2975 GB/s per node +Grid : Message : Stencil 16.294 GB/s per node +Grid : Message : Stencil 16.9049 GB/s per node +Grid : Message : Stencil 12.8291 GB/s per node +Grid : Message : Average mflops/s per call per node : 666696 +Grid : Message : Average mflops/s per call per node : 804316 +Grid : Message : Average mflops/s per call per node : 820510 +Grid : Message : Average mflops/s per call per node : 664933 +Grid : Message : Average mflops/s per call per node (full): 315006 +Grid : Message : Average mflops/s per call per node (full): 429726 +Grid : Message : Average mflops/s per call per node (full): 439125 +Grid : Message : Average mflops/s per call per node (full): 305590 +Grid : Message : Stencil 14.0116 GB/s per node +Grid : Message : Stencil 17.0097 GB/s per node +Grid : Message : Stencil 17.7337 GB/s per node +Grid : Message : Stencil 12.7172 GB/s per node +Grid : Message : Average mflops/s per call per node : 664280 +Grid : Message : Average mflops/s per call per node : 803742 +Grid : Message : Average mflops/s per call per node : 827551 +Grid : Message : Average mflops/s per call per node : 665486 +Grid : Message : Average mflops/s per call per node (full): 316235 +Grid : Message : Average mflops/s per call per node (full): 438807 +Grid : Message : Average mflops/s per call per node (full): 447663 +Grid : Message : Average mflops/s per call per node (full): 304516 +Grid : Message : Stencil 13.3899 GB/s per node +Grid : Message : Stencil 18.1307 GB/s per node +Grid : Message : Stencil 17.2902 GB/s per node +Grid : Message : Stencil 13.0768 GB/s per node +Grid : Message : Average mflops/s per call per node : 670032 +Grid : Message : Average mflops/s per call per node : 801219 +Grid : Message : Average mflops/s per call per node : 829903 +Grid : Message : Average mflops/s per call per node : 655400 +Grid : Message : Average mflops/s per call per node (full): 316179 +Grid : Message : Average mflops/s per call per node (full): 438143 +Grid : Message : Average mflops/s per call per node (full): 444077 +Grid : Message : Average mflops/s per call per node (full): 304791 +Grid : Message : Stencil 13.0342 GB/s per node +Grid : Message : Stencil 17.5561 GB/s per node +Grid : Message : Stencil 18.0282 GB/s per node +Grid : Message : Stencil 13.7715 GB/s per node +Grid : Message : Average mflops/s per call per node : 666334 +Grid : Message : Average mflops/s per call per node : 799060 +Grid : Message : Average mflops/s per call per node : 821650 +Grid : Message : Average mflops/s per call per node : 662056 +Grid : Message : Average mflops/s per call per node (full): 315412 +Grid : Message : Average mflops/s per call per node (full): 437403 +Grid : Message : Average mflops/s per call per node (full): 446257 +Grid : Message : Average mflops/s per call per node (full): 307217 +Grid : Message : Stencil 13.5037 GB/s per node +Grid : Message : Stencil 18.5172 GB/s per node +Grid : Message : Stencil 17.1817 GB/s per node +Grid : Message : Stencil 12.5464 GB/s per node +Grid : Message : Average mflops/s per call per node : 665393 +Grid : Message : Average mflops/s per call per node : 799830 +Grid : Message : Average mflops/s per call per node : 824258 +Grid : Message : Average mflops/s per call per node : 671050 +Grid : Message : Average mflops/s per call per node (full): 315641 +Grid : Message : Average mflops/s per call per node (full): 441107 +Grid : Message : Average mflops/s per call per node (full): 441294 +Grid : Message : Average mflops/s per call per node (full): 305549 +Grid : Message : Stencil 13.3831 GB/s per node +Grid : Message : Stencil 14.2564 GB/s per node +Grid : Message : Stencil 17.669 GB/s per node +Grid : Message : Stencil 13.9748 GB/s per node +Grid : Message : Average mflops/s per call per node : 668898 +Grid : Message : Average mflops/s per call per node : 807392 +Grid : Message : Average mflops/s per call per node : 823886 +Grid : Message : Average mflops/s per call per node : 659980 +Grid : Message : Average mflops/s per call per node (full): 316380 +Grid : Message : Average mflops/s per call per node (full): 403824 +Grid : Message : Average mflops/s per call per node (full): 445627 +Grid : Message : Average mflops/s per call per node (full): 305922 +Grid : Message : Stencil 13.0011 GB/s per node +Grid : Message : Stencil 16.4418 GB/s per node +Grid : Message : Stencil 17.5423 GB/s per node +Grid : Message : Stencil 12.2651 GB/s per node +Grid : Message : Average mflops/s per call per node : 667833 +Grid : Message : Average mflops/s per call per node : 799772 +Grid : Message : Average mflops/s per call per node : 825062 +Grid : Message : Average mflops/s per call per node : 662430 +Grid : Message : Average mflops/s per call per node (full): 312990 +Grid : Message : Average mflops/s per call per node (full): 432380 +Grid : Message : Average mflops/s per call per node (full): 446061 +Grid : Message : Average mflops/s per call per node (full): 301703 +Grid : Message : Stencil 13.122 GB/s per node +Grid : Message : Stencil 14.4751 GB/s per node +Grid : Message : Stencil 17.8534 GB/s per node +Grid : Message : Stencil 14.0202 GB/s per node +Grid : Message : Average mflops/s per call per node : 668032 +Grid : Message : Average mflops/s per call per node : 801144 +Grid : Message : Average mflops/s per call per node : 825762 +Grid : Message : Average mflops/s per call per node : 665085 +Grid : Message : Average mflops/s per call per node (full): 315796 +Grid : Message : Average mflops/s per call per node (full): 405994 +Grid : Message : Average mflops/s per call per node (full): 446302 +Grid : Message : Average mflops/s per call per node (full): 306393 +Grid : Message : Stencil 12.8932 GB/s per node +Grid : Message : Stencil 16.6554 GB/s per node +Grid : Message : Stencil 17.8851 GB/s per node +Grid : Message : Stencil 12.5967 GB/s per node +Grid : Message : Average mflops/s per call per node : 665908 +Grid : Message : Average mflops/s per call per node : 808734 +Grid : Message : Average mflops/s per call per node : 822333 +Grid : Message : Average mflops/s per call per node : 662094 +Grid : Message : Average mflops/s per call per node (full): 314977 +Grid : Message : Average mflops/s per call per node (full): 434727 +Grid : Message : Average mflops/s per call per node (full): 446631 +Grid : Message : Average mflops/s per call per node (full): 304237 +Grid : Message : Stencil 12.4235 GB/s per node +Grid : Message : Stencil 16.2135 GB/s per node +Grid : Message : Stencil 17.6276 GB/s per node +Grid : Message : Stencil 14.727 GB/s per node +Grid : Message : Average mflops/s per call per node : 666897 +Grid : Message : Average mflops/s per call per node : 806787 +Grid : Message : Average mflops/s per call per node : 825036 +Grid : Message : Average mflops/s per call per node : 662896 +Grid : Message : Average mflops/s per call per node (full): 311217 +Grid : Message : Average mflops/s per call per node (full): 431113 +Grid : Message : Average mflops/s per call per node (full): 445192 +Grid : Message : Average mflops/s per call per node (full): 307938 +Grid : Message : Stencil 12.8052 GB/s per node +Grid : Message : Stencil 16.6575 GB/s per node +Grid : Message : Stencil 17.5731 GB/s per node +Grid : Message : Stencil 13.5217 GB/s per node +Grid : Message : Average mflops/s per call per node : 665083 +Grid : Message : Average mflops/s per call per node : 802066 +Grid : Message : Average mflops/s per call per node : 822189 +Grid : Message : Average mflops/s per call per node : 664236 +Grid : Message : Average mflops/s per call per node (full): 312239 +Grid : Message : Average mflops/s per call per node (full): 436940 +Grid : Message : Average mflops/s per call per node (full): 446146 +Grid : Message : Average mflops/s per call per node (full): 305529 +Grid : Message : Stencil 12.0483 GB/s per node +Grid : Message : Stencil 17.8025 GB/s per node +Grid : Message : Stencil 17.3049 GB/s per node +Grid : Message : Stencil 11.5328 GB/s per node +Grid : Message : Average mflops/s per call per node : 668534 +Grid : Message : Average mflops/s per call per node : 803686 +Grid : Message : Average mflops/s per call per node : 815868 +Grid : Message : Average mflops/s per call per node : 663886 +Grid : Message : Average mflops/s per call per node (full): 308896 +Grid : Message : Average mflops/s per call per node (full): 439722 +Grid : Message : Average mflops/s per call per node (full): 432427 +Grid : Message : Average mflops/s per call per node (full): 287890 +Grid : Message : Stencil 12.676 GB/s per node +Grid : Message : Stencil 17.23 GB/s per node +Grid : Message : Stencil 17.5567 GB/s per node +Grid : Message : Stencil 12.3602 GB/s per node +Grid : Message : Average mflops/s per call per node : 662447 +Grid : Message : Average mflops/s per call per node : 804064 +Grid : Message : Average mflops/s per call per node : 822752 +Grid : Message : Average mflops/s per call per node : 660888 +Grid : Message : Average mflops/s per call per node (full): 310141 +Grid : Message : Average mflops/s per call per node (full): 439871 +Grid : Message : Average mflops/s per call per node (full): 445413 +Grid : Message : Average mflops/s per call per node (full): 300370 +Grid : Message : Stencil 12.3903 GB/s per node +Grid : Message : Stencil 13.9581 GB/s per node +Grid : Message : Stencil 17.4805 GB/s per node +Grid : Message : Stencil 13.8736 GB/s per node +Grid : Message : Average mflops/s per call per node : 666960 +Grid : Message : Average mflops/s per call per node : 811450 +Grid : Message : Average mflops/s per call per node : 818355 +Grid : Message : Average mflops/s per call per node : 666749 +Grid : Message : Average mflops/s per call per node (full): 312948 +Grid : Message : Average mflops/s per call per node (full): 399590 +Grid : Message : Average mflops/s per call per node (full): 444163 +Grid : Message : Average mflops/s per call per node (full): 307748 +Grid : Message : Stencil 12.2576 GB/s per node +Grid : Message : Stencil 16.9103 GB/s per node +Grid : Message : Stencil 17.6251 GB/s per node +Grid : Message : Stencil 13.1704 GB/s per node +Grid : Message : Average mflops/s per call per node : 669561 +Grid : Message : Average mflops/s per call per node : 804700 +Grid : Message : Average mflops/s per call per node : 822080 +Grid : Message : Average mflops/s per call per node : 663517 +Grid : Message : Average mflops/s per call per node (full): 311869 +Grid : Message : Average mflops/s per call per node (full): 438410 +Grid : Message : Average mflops/s per call per node (full): 445679 +Grid : Message : Average mflops/s per call per node (full): 306253 +Grid : Message : Stencil 13.1253 GB/s per node +Grid : Message : Stencil 17.0406 GB/s per node +Grid : Message : Stencil 17.4521 GB/s per node +Grid : Message : Stencil 12.5503 GB/s per node +Grid : Message : Average mflops/s per call per node : 663884 +Grid : Message : Average mflops/s per call per node : 805130 +Grid : Message : Average mflops/s per call per node : 822047 +Grid : Message : Average mflops/s per call per node : 666019 +Grid : Message : Average mflops/s per call per node (full): 313695 +Grid : Message : Average mflops/s per call per node (full): 433054 +Grid : Message : Average mflops/s per call per node (full): 444033 +Grid : Message : Average mflops/s per call per node (full): 304935 +Grid : Message : Stencil 14.2193 GB/s per node +Grid : Message : Stencil 16.7231 GB/s per node +Grid : Message : Stencil 17.4057 GB/s per node +Grid : Message : Stencil 12.8035 GB/s per node +Grid : Message : Average mflops/s per call per node : 659679 +Grid : Message : Average mflops/s per call per node : 802868 +Grid : Message : Average mflops/s per call per node : 822679 +Grid : Message : Average mflops/s per call per node : 668637 +Grid : Message : Average mflops/s per call per node (full): 316201 +Grid : Message : Average mflops/s per call per node (full): 436200 +Grid : Message : Average mflops/s per call per node (full): 443477 +Grid : Message : Average mflops/s per call per node (full): 305573 +Grid : Message : Stencil 12.5265 GB/s per node +Grid : Message : Stencil 11.3738 GB/s per node +Grid : Message : Stencil 17.8532 GB/s per node +Grid : Message : Stencil 12.9027 GB/s per node +Grid : Message : Average mflops/s per call per node : 664605 +Grid : Message : Average mflops/s per call per node : 806182 +Grid : Message : Average mflops/s per call per node : 817960 +Grid : Message : Average mflops/s per call per node : 665454 +Grid : Message : Average mflops/s per call per node (full): 312821 +Grid : Message : Average mflops/s per call per node (full): 347998 +Grid : Message : Average mflops/s per call per node (full): 445111 +Grid : Message : Average mflops/s per call per node (full): 305415 +Grid : Message : Stencil 12.8852 GB/s per node +Grid : Message : Stencil 15.8886 GB/s per node +Grid : Message : Stencil 17.8262 GB/s per node +Grid : Message : Stencil 14.1356 GB/s per node +Grid : Message : Average mflops/s per call per node : 666314 +Grid : Message : Average mflops/s per call per node : 807479 +Grid : Message : Average mflops/s per call per node : 819779 +Grid : Message : Average mflops/s per call per node : 661020 +Grid : Message : Average mflops/s per call per node (full): 314064 +Grid : Message : Average mflops/s per call per node (full): 426703 +Grid : Message : Average mflops/s per call per node (full): 443793 +Grid : Message : Average mflops/s per call per node (full): 307501 +Grid : Message : Stencil 13.3414 GB/s per node +Grid : Message : Stencil 18.0181 GB/s per node +Grid : Message : Stencil 17.9612 GB/s per node +Grid : Message : Stencil 13.7132 GB/s per node +Grid : Message : Average mflops/s per call per node : 662865 +Grid : Message : Average mflops/s per call per node : 800521 +Grid : Message : Average mflops/s per call per node : 831440 +Grid : Message : Average mflops/s per call per node : 663001 +Grid : Message : Average mflops/s per call per node (full): 312104 +Grid : Message : Average mflops/s per call per node (full): 440473 +Grid : Message : Average mflops/s per call per node (full): 449193 +Grid : Message : Average mflops/s per call per node (full): 305778 +Grid : Message : Stencil 12.4744 GB/s per node +Grid : Message : Stencil 12.4069 GB/s per node +Grid : Message : Stencil 18.6512 GB/s per node +Grid : Message : Stencil 13.9883 GB/s per node +Grid : Message : Average mflops/s per call per node : 668317 +Grid : Message : Average mflops/s per call per node : 809376 +Grid : Message : Average mflops/s per call per node : 824709 +Grid : Message : Average mflops/s per call per node : 659530 +Grid : Message : Average mflops/s per call per node (full): 313873 +Grid : Message : Average mflops/s per call per node (full): 370099 +Grid : Message : Average mflops/s per call per node (full): 447544 +Grid : Message : Average mflops/s per call per node (full): 306571 +Grid : Message : Stencil 13.0316 GB/s per node +Grid : Message : Stencil 16.6968 GB/s per node +Grid : Message : Stencil 17.7661 GB/s per node +Grid : Message : Stencil 14.0046 GB/s per node +Grid : Message : Average mflops/s per call per node : 666400 +Grid : Message : Average mflops/s per call per node : 802491 +Grid : Message : Average mflops/s per call per node : 821831 +Grid : Message : Average mflops/s per call per node : 661370 +Grid : Message : Average mflops/s per call per node (full): 314349 +Grid : Message : Average mflops/s per call per node (full): 435465 +Grid : Message : Average mflops/s per call per node (full): 446423 +Grid : Message : Average mflops/s per call per node (full): 306860 +Grid : Message : Stencil 12.8639 GB/s per node +Grid : Message : Stencil 16.4533 GB/s per node +Grid : Message : Stencil 17.201 GB/s per node +Grid : Message : Stencil 12.2991 GB/s per node +Grid : Message : Average mflops/s per call per node : 665471 +Grid : Message : Average mflops/s per call per node : 801379 +Grid : Message : Average mflops/s per call per node : 817508 +Grid : Message : Average mflops/s per call per node : 663412 +Grid : Message : Average mflops/s per call per node (full): 314238 +Grid : Message : Average mflops/s per call per node (full): 433165 +Grid : Message : Average mflops/s per call per node (full): 441811 +Grid : Message : Average mflops/s per call per node (full): 303330 +Grid : Message : Stencil 12.5565 GB/s per node +Grid : Message : Stencil 15.1478 GB/s per node +Grid : Message : Stencil 18.294 GB/s per node +Grid : Message : Stencil 14.0358 GB/s per node +Grid : Message : Average mflops/s per call per node : 668709 +Grid : Message : Average mflops/s per call per node : 807656 +Grid : Message : Average mflops/s per call per node : 821970 +Grid : Message : Average mflops/s per call per node : 660987 +Grid : Message : Average mflops/s per call per node (full): 314130 +Grid : Message : Average mflops/s per call per node (full): 417627 +Grid : Message : Average mflops/s per call per node (full): 447522 +Grid : Message : Average mflops/s per call per node (full): 306286 +Grid : Message : Stencil 13.4528 GB/s per node +Grid : Message : Stencil 16.5353 GB/s per node +Grid : Message : Stencil 16.4358 GB/s per node +Grid : Message : Stencil 14.2175 GB/s per node +Grid : Message : Average mflops/s per call per node : 667247 +Grid : Message : Average mflops/s per call per node : 803885 +Grid : Message : Average mflops/s per call per node : 824360 +Grid : Message : Average mflops/s per call per node : 663661 +Grid : Message : Average mflops/s per call per node (full): 316220 +Grid : Message : Average mflops/s per call per node (full): 434687 +Grid : Message : Average mflops/s per call per node (full): 424565 +Grid : Message : Average mflops/s per call per node (full): 306818 +Grid : Message : Stencil 12.9009 GB/s per node +Grid : Message : Stencil 16.6621 GB/s per node +Grid : Message : Stencil 17.2524 GB/s per node +Grid : Message : Stencil 12.7505 GB/s per node +Grid : Message : Average mflops/s per call per node : 665737 +Grid : Message : Average mflops/s per call per node : 802449 +Grid : Message : Average mflops/s per call per node : 824897 +Grid : Message : Average mflops/s per call per node : 663450 +Grid : Message : Average mflops/s per call per node (full): 312619 +Grid : Message : Average mflops/s per call per node (full): 434975 +Grid : Message : Average mflops/s per call per node (full): 441741 +Grid : Message : Average mflops/s per call per node (full): 303787 +Grid : Message : Stencil 13.8603 GB/s per node +Grid : Message : Stencil 17.3898 GB/s per node +Grid : Message : Stencil 18.0875 GB/s per node +Grid : Message : Stencil 12.4416 GB/s per node +Grid : Message : Average mflops/s per call per node : 661775 +Grid : Message : Average mflops/s per call per node : 806436 +Grid : Message : Average mflops/s per call per node : 824965 +Grid : Message : Average mflops/s per call per node : 666546 +Grid : Message : Average mflops/s per call per node (full): 315060 +Grid : Message : Average mflops/s per call per node (full): 436662 +Grid : Message : Average mflops/s per call per node (full): 443945 +Grid : Message : Average mflops/s per call per node (full): 304778 +Grid : Message : Stencil 13.7038 GB/s per node +Grid : Message : Stencil 17.6172 GB/s per node +Grid : Message : Stencil 18.4053 GB/s per node +Grid : Message : Stencil 12.6816 GB/s per node +Grid : Message : Average mflops/s per call per node : 663760 +Grid : Message : Average mflops/s per call per node : 801627 +Grid : Message : Average mflops/s per call per node : 825152 +Grid : Message : Average mflops/s per call per node : 667139 +Grid : Message : Average mflops/s per call per node (full): 315050 +Grid : Message : Average mflops/s per call per node (full): 438868 +Grid : Message : Average mflops/s per call per node (full): 448242 +Grid : Message : Average mflops/s per call per node (full): 305717 +Grid : Message : Stencil 12.9353 GB/s per node +Grid : Message : Stencil 16.0852 GB/s per node +Grid : Message : Stencil 17.3692 GB/s per node +Grid : Message : Stencil 12.9265 GB/s per node +Grid : Message : Average mflops/s per call per node : 669251 +Grid : Message : Average mflops/s per call per node : 805532 +Grid : Message : Average mflops/s per call per node : 826799 +Grid : Message : Average mflops/s per call per node : 663717 +Grid : Message : Average mflops/s per call per node (full): 313709 +Grid : Message : Average mflops/s per call per node (full): 424664 +Grid : Message : Average mflops/s per call per node (full): 445421 +Grid : Message : Average mflops/s per call per node (full): 305257 +Grid : Message : Stencil 13.379 GB/s per node +Grid : Message : Stencil 16.9515 GB/s per node +Grid : Message : Stencil 18.3665 GB/s per node +Grid : Message : Stencil 12.7137 GB/s per node +Grid : Message : Average mflops/s per call per node : 665154 +Grid : Message : Average mflops/s per call per node : 802761 +Grid : Message : Average mflops/s per call per node : 818096 +Grid : Message : Average mflops/s per call per node : 664144 +Grid : Message : Average mflops/s per call per node (full): 315941 +Grid : Message : Average mflops/s per call per node (full): 438033 +Grid : Message : Average mflops/s per call per node (full): 446798 +Grid : Message : Average mflops/s per call per node (full): 304773 +Grid : Message : Stencil 13.3168 GB/s per node +Grid : Message : Stencil 10.2857 GB/s per node +Grid : Message : Stencil 17.1783 GB/s per node +Grid : Message : Stencil 12.9949 GB/s per node +Grid : Message : Average mflops/s per call per node : 665785 +Grid : Message : Average mflops/s per call per node : 810825 +Grid : Message : Average mflops/s per call per node : 823353 +Grid : Message : Average mflops/s per call per node : 665362 +Grid : Message : Average mflops/s per call per node (full): 315764 +Grid : Message : Average mflops/s per call per node (full): 323579 +Grid : Message : Average mflops/s per call per node (full): 443309 +Grid : Message : Average mflops/s per call per node (full): 305253 +Grid : Message : Stencil 13.9634 GB/s per node +Grid : Message : Stencil 17.0929 GB/s per node +Grid : Message : Stencil 18.7741 GB/s per node +Grid : Message : Stencil 12.8815 GB/s per node +Grid : Message : Average mflops/s per call per node : 666241 +Grid : Message : Average mflops/s per call per node : 809735 +Grid : Message : Average mflops/s per call per node : 813012 +Grid : Message : Average mflops/s per call per node : 661935 +Grid : Message : Average mflops/s per call per node (full): 316880 +Grid : Message : Average mflops/s per call per node (full): 438139 +Grid : Message : Average mflops/s per call per node (full): 445881 +Grid : Message : Average mflops/s per call per node (full): 305467 +Grid : Message : Stencil 13.1792 GB/s per node +Grid : Message : Stencil 17.8518 GB/s per node +Grid : Message : Stencil 17.7641 GB/s per node +Grid : Message : Stencil 13.0227 GB/s per node +Grid : Message : Average mflops/s per call per node : 667995 +Grid : Message : Average mflops/s per call per node : 806047 +Grid : Message : Average mflops/s per call per node : 825949 +Grid : Message : Average mflops/s per call per node : 663069 +Grid : Message : Average mflops/s per call per node (full): 314623 +Grid : Message : Average mflops/s per call per node (full): 440785 +Grid : Message : Average mflops/s per call per node (full): 446951 +Grid : Message : Average mflops/s per call per node (full): 305984 +Grid : Message : Stencil 12.8287 GB/s per node +Grid : Message : Stencil 16.6164 GB/s per node +Grid : Message : Stencil 18.9068 GB/s per node +Grid : Message : Stencil 12.0297 GB/s per node +Grid : Message : Average mflops/s per call per node : 668841 +Grid : Message : Average mflops/s per call per node : 803523 +Grid : Message : Average mflops/s per call per node : 818370 +Grid : Message : Average mflops/s per call per node : 666093 +Grid : Message : Average mflops/s per call per node (full): 315297 +Grid : Message : Average mflops/s per call per node (full): 435944 +Grid : Message : Average mflops/s per call per node (full): 447388 +Grid : Message : Average mflops/s per call per node (full): 302011 +Grid : Message : Stencil 13.2067 GB/s per node +Grid : Message : Stencil 17.3298 GB/s per node +Grid : Message : Stencil 17.6517 GB/s per node +Grid : Message : Stencil 12.6095 GB/s per node +Grid : Message : Average mflops/s per call per node : 666902 +Grid : Message : Average mflops/s per call per node : 800780 +Grid : Message : Average mflops/s per call per node : 827430 +Grid : Message : Average mflops/s per call per node : 670098 +Grid : Message : Average mflops/s per call per node (full): 315707 +Grid : Message : Average mflops/s per call per node (full): 438855 +Grid : Message : Average mflops/s per call per node (full): 446502 +Grid : Message : Average mflops/s per call per node (full): 305800 +Grid : Message : Stencil 12.8391 GB/s per node +Grid : Message : Stencil 18.1332 GB/s per node +Grid : Message : Stencil 18.4131 GB/s per node +Grid : Message : Stencil 13.6612 GB/s per node +Grid : Message : Average mflops/s per call per node : 667111 +Grid : Message : Average mflops/s per call per node : 800736 +Grid : Message : Average mflops/s per call per node : 825266 +Grid : Message : Average mflops/s per call per node : 659026 +Grid : Message : Average mflops/s per call per node (full): 315263 +Grid : Message : Average mflops/s per call per node (full): 440530 +Grid : Message : Average mflops/s per call per node (full): 448816 +Grid : Message : Average mflops/s per call per node (full): 306118 +Grid : Message : Stencil 13.2343 GB/s per node +Grid : Message : Stencil 16.9053 GB/s per node +Grid : Message : Stencil 18.1019 GB/s per node +Grid : Message : Stencil 12.6898 GB/s per node +Grid : Message : Average mflops/s per call per node : 666497 +Grid : Message : Average mflops/s per call per node : 808899 +Grid : Message : Average mflops/s per call per node : 821718 +Grid : Message : Average mflops/s per call per node : 663943 +Grid : Message : Average mflops/s per call per node (full): 316154 +Grid : Message : Average mflops/s per call per node (full): 437821 +Grid : Message : Average mflops/s per call per node (full): 447299 +Grid : Message : Average mflops/s per call per node (full): 302565 +Grid : Message : Stencil 13.6119 GB/s per node +Grid : Message : Stencil 16.4649 GB/s per node +Grid : Message : Stencil 16.6632 GB/s per node +Grid : Message : Stencil 13.1453 GB/s per node +Grid : Message : Average mflops/s per call per node : 665911 +Grid : Message : Average mflops/s per call per node : 802839 +Grid : Message : Average mflops/s per call per node : 822157 +Grid : Message : Average mflops/s per call per node : 659798 +Grid : Message : Average mflops/s per call per node (full): 317175 +Grid : Message : Average mflops/s per call per node (full): 434421 +Grid : Message : Average mflops/s per call per node (full): 426729 +Grid : Message : Average mflops/s per call per node (full): 301588 +Grid : Message : Stencil 13.6702 GB/s per node +Grid : Message : Stencil 16.2963 GB/s per node +Grid : Message : Stencil 18.2252 GB/s per node +Grid : Message : Stencil 12.2345 GB/s per node +Grid : Message : Average mflops/s per call per node : 666119 +Grid : Message : Average mflops/s per call per node : 803426 +Grid : Message : Average mflops/s per call per node : 823702 +Grid : Message : Average mflops/s per call per node : 668199 +Grid : Message : Average mflops/s per call per node (full): 317097 +Grid : Message : Average mflops/s per call per node (full): 431819 +Grid : Message : Average mflops/s per call per node (full): 447193 +Grid : Message : Average mflops/s per call per node (full): 303845 +Grid : Message : Stencil 13.3411 GB/s per node +Grid : Message : Stencil 17.1795 GB/s per node +Grid : Message : Stencil 18.0849 GB/s per node +Grid : Message : Stencil 12.496 GB/s per node +Grid : Message : Average mflops/s per call per node : 668590 +Grid : Message : Average mflops/s per call per node : 800821 +Grid : Message : Average mflops/s per call per node : 829262 +Grid : Message : Average mflops/s per call per node : 664689 +Grid : Message : Average mflops/s per call per node (full): 316188 +Grid : Message : Average mflops/s per call per node (full): 438769 +Grid : Message : Average mflops/s per call per node (full): 448396 +Grid : Message : Average mflops/s per call per node (full): 304490 +Grid : Message : Stencil 14.7099 GB/s per node +Grid : Message : Stencil 17.1644 GB/s per node +Grid : Message : Stencil 17.1071 GB/s per node +Grid : Message : Stencil 12.4515 GB/s per node +Grid : Message : Average mflops/s per call per node : 663658 +Grid : Message : Average mflops/s per call per node : 804636 +Grid : Message : Average mflops/s per call per node : 821849 +Grid : Message : Average mflops/s per call per node : 659873 +Grid : Message : Average mflops/s per call per node (full): 317243 +Grid : Message : Average mflops/s per call per node (full): 435777 +Grid : Message : Average mflops/s per call per node (full): 438993 +Grid : Message : Average mflops/s per call per node (full): 304428 +Grid : Message : Stencil 14.5401 GB/s per node +Grid : Message : Stencil 17.653 GB/s per node +Grid : Message : Stencil 16.8231 GB/s per node +Grid : Message : Stencil 12.4649 GB/s per node +Grid : Message : Average mflops/s per call per node : 663737 +Grid : Message : Average mflops/s per call per node : 799291 +Grid : Message : Average mflops/s per call per node : 820476 +Grid : Message : Average mflops/s per call per node : 659852 +Grid : Message : Average mflops/s per call per node (full): 316420 +Grid : Message : Average mflops/s per call per node (full): 437281 +Grid : Message : Average mflops/s per call per node (full): 437689 +Grid : Message : Average mflops/s per call per node (full): 302147 +Grid : Message : Stencil 12.8809 GB/s per node +Grid : Message : Stencil 17.1122 GB/s per node +Grid : Message : Stencil 17.9883 GB/s per node +Grid : Message : Stencil 12.297 GB/s per node +Grid : Message : Average mflops/s per call per node : 666660 +Grid : Message : Average mflops/s per call per node : 804044 +Grid : Message : Average mflops/s per call per node : 821177 +Grid : Message : Average mflops/s per call per node : 665370 +Grid : Message : Average mflops/s per call per node (full): 314404 +Grid : Message : Average mflops/s per call per node (full): 435744 +Grid : Message : Average mflops/s per call per node (full): 445622 +Grid : Message : Average mflops/s per call per node (full): 303978 +Grid : Message : Stencil 13.4031 GB/s per node +Grid : Message : Stencil 16.5617 GB/s per node +Grid : Message : Stencil 17.8289 GB/s per node +Grid : Message : Stencil 12.8881 GB/s per node +Grid : Message : Average mflops/s per call per node : 663272 +Grid : Message : Average mflops/s per call per node : 802300 +Grid : Message : Average mflops/s per call per node : 819337 +Grid : Message : Average mflops/s per call per node : 663035 +Grid : Message : Average mflops/s per call per node (full): 315588 +Grid : Message : Average mflops/s per call per node (full): 435178 +Grid : Message : Average mflops/s per call per node (full): 445795 +Grid : Message : Average mflops/s per call per node (full): 301042 +Grid : Message : Stencil 14.0041 GB/s per node +Grid : Message : Stencil 16.5044 GB/s per node +Grid : Message : Stencil 16.8942 GB/s per node +Grid : Message : Stencil 12.0931 GB/s per node +Grid : Message : Average mflops/s per call per node : 659529 +Grid : Message : Average mflops/s per call per node : 800857 +Grid : Message : Average mflops/s per call per node : 821965 +Grid : Message : Average mflops/s per call per node : 668337 +Grid : Message : Average mflops/s per call per node (full): 313966 +Grid : Message : Average mflops/s per call per node (full): 434530 +Grid : Message : Average mflops/s per call per node (full): 438095 +Grid : Message : Average mflops/s per call per node (full): 303502 +Grid : Message : Stencil 13.632 GB/s per node +Grid : Message : Stencil 16.2975 GB/s per node +Grid : Message : Stencil 17.9547 GB/s per node +Grid : Message : Stencil 13.4796 GB/s per node +Grid : Message : Average mflops/s per call per node : 663517 +Grid : Message : Average mflops/s per call per node : 803902 +Grid : Message : Average mflops/s per call per node : 816525 +Grid : Message : Average mflops/s per call per node : 664545 +Grid : Message : Average mflops/s per call per node (full): 314656 +Grid : Message : Average mflops/s per call per node (full): 432209 +Grid : Message : Average mflops/s per call per node (full): 445822 +Grid : Message : Average mflops/s per call per node (full): 305938 +Grid : Message : Stencil 13.1287 GB/s per node +Grid : Message : Stencil 16.983 GB/s per node +Grid : Message : Stencil 17.7084 GB/s per node +Grid : Message : Stencil 12.3646 GB/s per node +Grid : Message : Average mflops/s per call per node : 666280 +Grid : Message : Average mflops/s per call per node : 802273 +Grid : Message : Average mflops/s per call per node : 818273 +Grid : Message : Average mflops/s per call per node : 669441 +Grid : Message : Average mflops/s per call per node (full): 315151 +Grid : Message : Average mflops/s per call per node (full): 433794 +Grid : Message : Average mflops/s per call per node (full): 444069 +Grid : Message : Average mflops/s per call per node (full): 304140 +Grid : Message : Stencil 12.8293 GB/s per node +Grid : Message : Stencil 16.3603 GB/s per node +Grid : Message : Stencil 17.4713 GB/s per node +Grid : Message : Stencil 12.433 GB/s per node +Grid : Message : Average mflops/s per call per node : 666807 +Grid : Message : Average mflops/s per call per node : 803321 +Grid : Message : Average mflops/s per call per node : 827302 +Grid : Message : Average mflops/s per call per node : 668460 +Grid : Message : Average mflops/s per call per node (full): 314888 +Grid : Message : Average mflops/s per call per node (full): 433450 +Grid : Message : Average mflops/s per call per node (full): 446392 +Grid : Message : Average mflops/s per call per node (full): 305275 +Grid : Message : Stencil 13.0512 GB/s per node +Grid : Message : Stencil 17.9731 GB/s per node +Grid : Message : Stencil 17.1651 GB/s per node +Grid : Message : Stencil 11.6682 GB/s per node +Grid : Message : Average mflops/s per call per node : 664991 +Grid : Message : Average mflops/s per call per node : 805192 +Grid : Message : Average mflops/s per call per node : 817573 +Grid : Message : Average mflops/s per call per node : 662967 +Grid : Message : Average mflops/s per call per node (full): 315623 +Grid : Message : Average mflops/s per call per node (full): 441507 +Grid : Message : Average mflops/s per call per node (full): 441601 +Grid : Message : Average mflops/s per call per node (full): 298035 +Grid : Message : Stencil 13.253 GB/s per node +Grid : Message : Stencil 16.3774 GB/s per node +Grid : Message : Stencil 18.6875 GB/s per node +Grid : Message : Stencil 12.2165 GB/s per node +Grid : Message : Average mflops/s per call per node : 667799 +Grid : Message : Average mflops/s per call per node : 803575 +Grid : Message : Average mflops/s per call per node : 818975 +Grid : Message : Average mflops/s per call per node : 670247 +Grid : Message : Average mflops/s per call per node (full): 316219 +Grid : Message : Average mflops/s per call per node (full): 433350 +Grid : Message : Average mflops/s per call per node (full): 448136 +Grid : Message : Average mflops/s per call per node (full): 304675 +Grid : Message : Stencil 14.3041 GB/s per node +Grid : Message : Stencil 17.0718 GB/s per node +Grid : Message : Stencil 17.9263 GB/s per node +Grid : Message : Stencil 12.7822 GB/s per node +Grid : Message : Average mflops/s per call per node : 665593 +Grid : Message : Average mflops/s per call per node : 805329 +Grid : Message : Average mflops/s per call per node : 826751 +Grid : Message : Average mflops/s per call per node : 664358 +Grid : Message : Average mflops/s per call per node (full): 316931 +Grid : Message : Average mflops/s per call per node (full): 437511 +Grid : Message : Average mflops/s per call per node (full): 442404 +Grid : Message : Average mflops/s per call per node (full): 303260 +Grid : Message : Stencil 12.5125 GB/s per node +Grid : Message : Stencil 16.6135 GB/s per node +Grid : Message : Stencil 17.3698 GB/s per node +Grid : Message : Stencil 13.3542 GB/s per node +Grid : Message : Average mflops/s per call per node : 669120 +Grid : Message : Average mflops/s per call per node : 801584 +Grid : Message : Average mflops/s per call per node : 823063 +Grid : Message : Average mflops/s per call per node : 659636 +Grid : Message : Average mflops/s per call per node (full): 314085 +Grid : Message : Average mflops/s per call per node (full): 434002 +Grid : Message : Average mflops/s per call per node (full): 443555 +Grid : Message : Average mflops/s per call per node (full): 305938 +Grid : Message : Stencil 12.6212 GB/s per node +Grid : Message : Stencil 16.2682 GB/s per node +Grid : Message : Stencil 19.0768 GB/s per node +Grid : Message : Stencil 12.0519 GB/s per node +Grid : Message : Average mflops/s per call per node : 669969 +Grid : Message : Average mflops/s per call per node : 806422 +Grid : Message : Average mflops/s per call per node : 824098 +Grid : Message : Average mflops/s per call per node : 668735 +Grid : Message : Average mflops/s per call per node (full): 315226 +Grid : Message : Average mflops/s per call per node (full): 432388 +Grid : Message : Average mflops/s per call per node (full): 450312 +Grid : Message : Average mflops/s per call per node (full): 301944 +Grid : Message : Stencil 13.7274 GB/s per node +Grid : Message : Stencil 17.1047 GB/s per node +Grid : Message : Stencil 17.2058 GB/s per node +Grid : Message : Stencil 12.2715 GB/s per node +Grid : Message : Average mflops/s per call per node : 664176 +Grid : Message : Average mflops/s per call per node : 801792 +Grid : Message : Average mflops/s per call per node : 823566 +Grid : Message : Average mflops/s per call per node : 667132 +Grid : Message : Average mflops/s per call per node (full): 315721 +Grid : Message : Average mflops/s per call per node (full): 437657 +Grid : Message : Average mflops/s per call per node (full): 442652 +Grid : Message : Average mflops/s per call per node (full): 304294 +Grid : Message : Stencil 12.5236 GB/s per node +Grid : Message : Stencil 16.2429 GB/s per node +Grid : Message : Stencil 17.4594 GB/s per node +Grid : Message : Stencil 12.6165 GB/s per node +Grid : Message : Average mflops/s per call per node : 669461 +Grid : Message : Average mflops/s per call per node : 808103 +Grid : Message : Average mflops/s per call per node : 820303 +Grid : Message : Average mflops/s per call per node : 665400 +Grid : Message : Average mflops/s per call per node (full): 311139 +Grid : Message : Average mflops/s per call per node (full): 428508 +Grid : Message : Average mflops/s per call per node (full): 444642 +Grid : Message : Average mflops/s per call per node (full): 303657 +Grid : Message : Stencil 13.1776 GB/s per node +Grid : Message : Stencil 14.7654 GB/s per node +Grid : Message : Stencil 17.2279 GB/s per node +Grid : Message : Stencil 13.697 GB/s per node +Grid : Message : Average mflops/s per call per node : 667935 +Grid : Message : Average mflops/s per call per node : 801285 +Grid : Message : Average mflops/s per call per node : 823621 +Grid : Message : Average mflops/s per call per node : 665857 +Grid : Message : Average mflops/s per call per node (full): 315134 +Grid : Message : Average mflops/s per call per node (full): 411898 +Grid : Message : Average mflops/s per call per node (full): 442485 +Grid : Message : Average mflops/s per call per node (full): 306877 +Grid : Message : Stencil 13.3373 GB/s per node +Grid : Message : Stencil 17.5889 GB/s per node +Grid : Message : Stencil 18.0192 GB/s per node +Grid : Message : Stencil 13.6967 GB/s per node +Grid : Message : Average mflops/s per call per node : 669391 +Grid : Message : Average mflops/s per call per node : 807592 +Grid : Message : Average mflops/s per call per node : 820767 +Grid : Message : Average mflops/s per call per node : 662345 +Grid : Message : Average mflops/s per call per node (full): 315870 +Grid : Message : Average mflops/s per call per node (full): 439749 +Grid : Message : Average mflops/s per call per node (full): 446580 +Grid : Message : Average mflops/s per call per node (full): 304171 +Grid : Message : Stencil 13.801 GB/s per node +Grid : Message : Stencil 16.7288 GB/s per node +Grid : Message : Stencil 17.6663 GB/s per node +Grid : Message : Stencil 12.7834 GB/s per node +Grid : Message : Average mflops/s per call per node : 664427 +Grid : Message : Average mflops/s per call per node : 804456 +Grid : Message : Average mflops/s per call per node : 822575 +Grid : Message : Average mflops/s per call per node : 667446 +Grid : Message : Average mflops/s per call per node (full): 315898 +Grid : Message : Average mflops/s per call per node (full): 437236 +Grid : Message : Average mflops/s per call per node (full): 445612 +Grid : Message : Average mflops/s per call per node (full): 304453 +Grid : Message : Stencil 14.3359 GB/s per node +Grid : Message : Stencil 16.4792 GB/s per node +Grid : Message : Stencil 16.8484 GB/s per node +Grid : Message : Stencil 11.8385 GB/s per node +Grid : Message : Average mflops/s per call per node : 665692 +Grid : Message : Average mflops/s per call per node : 802731 +Grid : Message : Average mflops/s per call per node : 820411 +Grid : Message : Average mflops/s per call per node : 668575 +Grid : Message : Average mflops/s per call per node (full): 315999 +Grid : Message : Average mflops/s per call per node (full): 434040 +Grid : Message : Average mflops/s per call per node (full): 435236 +Grid : Message : Average mflops/s per call per node (full): 300225 +Grid : Message : Stencil 13.8964 GB/s per node +Grid : Message : Stencil 16.9391 GB/s per node +Grid : Message : Stencil 17.6979 GB/s per node +Grid : Message : Stencil 12.8515 GB/s per node +Grid : Message : Average mflops/s per call per node : 662069 +Grid : Message : Average mflops/s per call per node : 803656 +Grid : Message : Average mflops/s per call per node : 815813 +Grid : Message : Average mflops/s per call per node : 665169 +Grid : Message : Average mflops/s per call per node (full): 314933 +Grid : Message : Average mflops/s per call per node (full): 437751 +Grid : Message : Average mflops/s per call per node (full): 445789 +Grid : Message : Average mflops/s per call per node (full): 304065 +Grid : Message : Stencil 13.561 GB/s per node +Grid : Message : Stencil 17.9284 GB/s per node +Grid : Message : Stencil 17.2071 GB/s per node +Grid : Message : Stencil 13.2587 GB/s per node +Grid : Message : Average mflops/s per call per node : 662725 +Grid : Message : Average mflops/s per call per node : 801438 +Grid : Message : Average mflops/s per call per node : 814631 +Grid : Message : Average mflops/s per call per node : 664032 +Grid : Message : Average mflops/s per call per node (full): 314547 +Grid : Message : Average mflops/s per call per node (full): 438516 +Grid : Message : Average mflops/s per call per node (full): 439489 +Grid : Message : Average mflops/s per call per node (full): 306046 +Grid : Message : Stencil 12.3496 GB/s per node +Grid : Message : Stencil 18.8328 GB/s per node +Grid : Message : Stencil 18.8839 GB/s per node +Grid : Message : Stencil 12.5511 GB/s per node +Grid : Message : Average mflops/s per call per node : 665142 +Grid : Message : Average mflops/s per call per node : 803796 +Grid : Message : Average mflops/s per call per node : 813808 +Grid : Message : Average mflops/s per call per node : 664365 +Grid : Message : Average mflops/s per call per node (full): 311260 +Grid : Message : Average mflops/s per call per node (full): 440747 +Grid : Message : Average mflops/s per call per node (full): 445725 +Grid : Message : Average mflops/s per call per node (full): 301717 +Grid : Message : Stencil 13.3218 GB/s per node +Grid : Message : Stencil 16.7001 GB/s per node +Grid : Message : Stencil 17.7836 GB/s per node +Grid : Message : Stencil 13.2898 GB/s per node +Grid : Message : Average mflops/s per call per node : 667594 +Grid : Message : Average mflops/s per call per node : 802337 +Grid : Message : Average mflops/s per call per node : 821749 +Grid : Message : Average mflops/s per call per node : 659994 +Grid : Message : Average mflops/s per call per node (full): 315952 +Grid : Message : Average mflops/s per call per node (full): 436347 +Grid : Message : Average mflops/s per call per node (full): 445341 +Grid : Message : Average mflops/s per call per node (full): 305925 +Grid : Message : Stencil 13.3547 GB/s per node +Grid : Message : Stencil 16.3847 GB/s per node +Grid : Message : Stencil 18.0773 GB/s per node +Grid : Message : Stencil 12.2147 GB/s per node +Grid : Message : Average mflops/s per call per node : 663244 +Grid : Message : Average mflops/s per call per node : 803493 +Grid : Message : Average mflops/s per call per node : 820519 +Grid : Message : Average mflops/s per call per node : 665691 +Grid : Message : Average mflops/s per call per node (full): 315608 +Grid : Message : Average mflops/s per call per node (full): 433807 +Grid : Message : Average mflops/s per call per node (full): 442851 +Grid : Message : Average mflops/s per call per node (full): 299870 +Grid : Message : Stencil 13.4964 GB/s per node +Grid : Message : Stencil 17.4179 GB/s per node +Grid : Message : Stencil 17.2663 GB/s per node +Grid : Message : Stencil 12.9006 GB/s per node +Grid : Message : Average mflops/s per call per node : 665195 +Grid : Message : Average mflops/s per call per node : 806421 +Grid : Message : Average mflops/s per call per node : 821652 +Grid : Message : Average mflops/s per call per node : 664788 +Grid : Message : Average mflops/s per call per node (full): 316373 +Grid : Message : Average mflops/s per call per node (full): 438593 +Grid : Message : Average mflops/s per call per node (full): 442859 +Grid : Message : Average mflops/s per call per node (full): 303968 +Grid : Message : Stencil 13.7037 GB/s per node +Grid : Message : Stencil 16.746 GB/s per node +Grid : Message : Stencil 18.4778 GB/s per node +Grid : Message : Stencil 12.3692 GB/s per node +Grid : Message : Average mflops/s per call per node : 669746 +Grid : Message : Average mflops/s per call per node : 801249 +Grid : Message : Average mflops/s per call per node : 819217 +Grid : Message : Average mflops/s per call per node : 671226 +Grid : Message : Average mflops/s per call per node (full): 316275 +Grid : Message : Average mflops/s per call per node (full): 434504 +Grid : Message : Average mflops/s per call per node (full): 447449 +Grid : Message : Average mflops/s per call per node (full): 303978 +Grid : Message : Stencil 13.6722 GB/s per node +Grid : Message : Stencil 17.1809 GB/s per node +Grid : Message : Stencil 17.5852 GB/s per node +Grid : Message : Stencil 12.8785 GB/s per node +Grid : Message : Average mflops/s per call per node : 667196 +Grid : Message : Average mflops/s per call per node : 808159 +Grid : Message : Average mflops/s per call per node : 822944 +Grid : Message : Average mflops/s per call per node : 660357 +Grid : Message : Average mflops/s per call per node (full): 315620 +Grid : Message : Average mflops/s per call per node (full): 437790 +Grid : Message : Average mflops/s per call per node (full): 445648 +Grid : Message : Average mflops/s per call per node (full): 304434 +Grid : Message : Stencil 12.5679 GB/s per node +Grid : Message : Stencil 17.4199 GB/s per node +Grid : Message : Stencil 17.2132 GB/s per node +Grid : Message : Stencil 14.2392 GB/s per node +Grid : Message : Average mflops/s per call per node : 672005 +Grid : Message : Average mflops/s per call per node : 802189 +Grid : Message : Average mflops/s per call per node : 828593 +Grid : Message : Average mflops/s per call per node : 662159 +Grid : Message : Average mflops/s per call per node (full): 314643 +Grid : Message : Average mflops/s per call per node (full): 437588 +Grid : Message : Average mflops/s per call per node (full): 443841 +Grid : Message : Average mflops/s per call per node (full): 307739 +Grid : Message : Stencil 13.6386 GB/s per node +Grid : Message : Stencil 17.1214 GB/s per node +Grid : Message : Stencil 18.3172 GB/s per node +Grid : Message : Stencil 13.1393 GB/s per node +Grid : Message : Average mflops/s per call per node : 666778 +Grid : Message : Average mflops/s per call per node : 803811 +Grid : Message : Average mflops/s per call per node : 817101 +Grid : Message : Average mflops/s per call per node : 668798 +Grid : Message : Average mflops/s per call per node (full): 316127 +Grid : Message : Average mflops/s per call per node (full): 439144 +Grid : Message : Average mflops/s per call per node (full): 445765 +Grid : Message : Average mflops/s per call per node (full): 306233 +Grid : Message : Stencil 12.5875 GB/s per node +Grid : Message : Stencil 16.5928 GB/s per node +Grid : Message : Stencil 17.1334 GB/s per node +Grid : Message : Stencil 12.7421 GB/s per node +Grid : Message : Average mflops/s per call per node : 668509 +Grid : Message : Average mflops/s per call per node : 802915 +Grid : Message : Average mflops/s per call per node : 826550 +Grid : Message : Average mflops/s per call per node : 663648 +Grid : Message : Average mflops/s per call per node (full): 313506 +Grid : Message : Average mflops/s per call per node (full): 435660 +Grid : Message : Average mflops/s per call per node (full): 442517 +Grid : Message : Average mflops/s per call per node (full): 304384 +Grid : Message : Stencil 12.7432 GB/s per node +Grid : Message : Stencil 17.4419 GB/s per node +Grid : Message : Stencil 18.3907 GB/s per node +Grid : Message : Stencil 12.7773 GB/s per node +Grid : Message : Average mflops/s per call per node : 665510 +Grid : Message : Average mflops/s per call per node : 803177 +Grid : Message : Average mflops/s per call per node : 825020 +Grid : Message : Average mflops/s per call per node : 665546 +Grid : Message : Average mflops/s per call per node (full): 313556 +Grid : Message : Average mflops/s per call per node (full): 438560 +Grid : Message : Average mflops/s per call per node (full): 447051 +Grid : Message : Average mflops/s per call per node (full): 305798 +Grid : Message : Stencil 13.6475 GB/s per node +Grid : Message : Stencil 14.9065 GB/s per node +Grid : Message : Stencil 17.237 GB/s per node +Grid : Message : Stencil 12.7836 GB/s per node +Grid : Message : Average mflops/s per call per node : 666778 +Grid : Message : Average mflops/s per call per node : 807523 +Grid : Message : Average mflops/s per call per node : 824240 +Grid : Message : Average mflops/s per call per node : 662212 +Grid : Message : Average mflops/s per call per node (full): 315898 +Grid : Message : Average mflops/s per call per node (full): 413795 +Grid : Message : Average mflops/s per call per node (full): 442913 +Grid : Message : Average mflops/s per call per node (full): 304489 +Grid : Message : Stencil 13.3365 GB/s per node +Grid : Message : Stencil 17.3594 GB/s per node +Grid : Message : Stencil 17.4579 GB/s per node +Grid : Message : Stencil 12.355 GB/s per node +Grid : Message : Average mflops/s per call per node : 667231 +Grid : Message : Average mflops/s per call per node : 802404 +Grid : Message : Average mflops/s per call per node : 828621 +Grid : Message : Average mflops/s per call per node : 665864 +Grid : Message : Average mflops/s per call per node (full): 314036 +Grid : Message : Average mflops/s per call per node (full): 435686 +Grid : Message : Average mflops/s per call per node (full): 445797 +Grid : Message : Average mflops/s per call per node (full): 303018 +Grid : Message : Stencil 13.3896 GB/s per node +Grid : Message : Stencil 16.8074 GB/s per node +Grid : Message : Stencil 17.3581 GB/s per node +Grid : Message : Stencil 13.3522 GB/s per node +Grid : Message : Average mflops/s per call per node : 668693 +Grid : Message : Average mflops/s per call per node : 804947 +Grid : Message : Average mflops/s per call per node : 823825 +Grid : Message : Average mflops/s per call per node : 663289 +Grid : Message : Average mflops/s per call per node (full): 315872 +Grid : Message : Average mflops/s per call per node (full): 437838 +Grid : Message : Average mflops/s per call per node (full): 444007 +Grid : Message : Average mflops/s per call per node (full): 306131 +Grid : Message : Stencil 12.2837 GB/s per node +Grid : Message : Stencil 16.4696 GB/s per node +Grid : Message : Stencil 18.1402 GB/s per node +Grid : Message : Stencil 13.8135 GB/s per node +Grid : Message : Average mflops/s per call per node : 667352 +Grid : Message : Average mflops/s per call per node : 803747 +Grid : Message : Average mflops/s per call per node : 824454 +Grid : Message : Average mflops/s per call per node : 661954 +Grid : Message : Average mflops/s per call per node (full): 310871 +Grid : Message : Average mflops/s per call per node (full): 434036 +Grid : Message : Average mflops/s per call per node (full): 449157 +Grid : Message : Average mflops/s per call per node (full): 304027 +Grid : Message : Stencil 10.6491 GB/s per node +Grid : Message : Stencil 16.7318 GB/s per node +Grid : Message : Stencil 17.8447 GB/s per node +Grid : Message : Stencil 12.1491 GB/s per node +Grid : Message : Average mflops/s per call per node : 671516 +Grid : Message : Average mflops/s per call per node : 804093 +Grid : Message : Average mflops/s per call per node : 820766 +Grid : Message : Average mflops/s per call per node : 669280 +Grid : Message : Average mflops/s per call per node (full): 293317 +Grid : Message : Average mflops/s per call per node (full): 436082 +Grid : Message : Average mflops/s per call per node (full): 446245 +Grid : Message : Average mflops/s per call per node (full): 303620 +Grid : Message : Stencil 13.9134 GB/s per node +Grid : Message : Stencil 13.2131 GB/s per node +Grid : Message : Stencil 18.4007 GB/s per node +Grid : Message : Stencil 11.608 GB/s per node +Grid : Message : Average mflops/s per call per node : 665008 +Grid : Message : Average mflops/s per call per node : 804174 +Grid : Message : Average mflops/s per call per node : 820197 +Grid : Message : Average mflops/s per call per node : 665283 +Grid : Message : Average mflops/s per call per node (full): 316562 +Grid : Message : Average mflops/s per call per node (full): 385144 +Grid : Message : Average mflops/s per call per node (full): 446507 +Grid : Message : Average mflops/s per call per node (full): 294405 +Grid : Message : Stencil 14.206 GB/s per node +Grid : Message : Stencil 17.3668 GB/s per node +Grid : Message : Stencil 17.3285 GB/s per node +Grid : Message : Stencil 12.5141 GB/s per node +Grid : Message : Average mflops/s per call per node : 666293 +Grid : Message : Average mflops/s per call per node : 799737 +Grid : Message : Average mflops/s per call per node : 825270 +Grid : Message : Average mflops/s per call per node : 665729 +Grid : Message : Average mflops/s per call per node (full): 316554 +Grid : Message : Average mflops/s per call per node (full): 437565 +Grid : Message : Average mflops/s per call per node (full): 442063 +Grid : Message : Average mflops/s per call per node (full): 304515 +Grid : Message : Stencil 13.162 GB/s per node +Grid : Message : Stencil 16.3511 GB/s per node +Grid : Message : Stencil 17.517 GB/s per node +Grid : Message : Stencil 12.0928 GB/s per node +Grid : Message : Average mflops/s per call per node : 666213 +Grid : Message : Average mflops/s per call per node : 804341 +Grid : Message : Average mflops/s per call per node : 818709 +Grid : Message : Average mflops/s per call per node : 662936 +Grid : Message : Average mflops/s per call per node (full): 315494 +Grid : Message : Average mflops/s per call per node (full): 433146 +Grid : Message : Average mflops/s per call per node (full): 444798 +Grid : Message : Average mflops/s per call per node (full): 302146 +Grid : Message : Stencil 13.6063 GB/s per node +Grid : Message : Stencil 16.6806 GB/s per node +Grid : Message : Stencil 17.1707 GB/s per node +Grid : Message : Stencil 12.1258 GB/s per node +Grid : Message : Average mflops/s per call per node : 662785 +Grid : Message : Average mflops/s per call per node : 810464 +Grid : Message : Average mflops/s per call per node : 820353 +Grid : Message : Average mflops/s per call per node : 670523 +Grid : Message : Average mflops/s per call per node (full): 315716 +Grid : Message : Average mflops/s per call per node (full): 436969 +Grid : Message : Average mflops/s per call per node (full): 440335 +Grid : Message : Average mflops/s per call per node (full): 302790 +Grid : Message : Stencil 14.1364 GB/s per node +Grid : Message : Stencil 16.9377 GB/s per node +Grid : Message : Stencil 18.1098 GB/s per node +Grid : Message : Stencil 12.7042 GB/s per node +Grid : Message : Average mflops/s per call per node : 664049 +Grid : Message : Average mflops/s per call per node : 803685 +Grid : Message : Average mflops/s per call per node : 825457 +Grid : Message : Average mflops/s per call per node : 667548 +Grid : Message : Average mflops/s per call per node (full): 315970 +Grid : Message : Average mflops/s per call per node (full): 434618 +Grid : Message : Average mflops/s per call per node (full): 448142 +Grid : Message : Average mflops/s per call per node (full): 305991 +Grid : Message : Stencil 12.8699 GB/s per node +Grid : Message : Stencil 16.6379 GB/s per node +Grid : Message : Stencil 18.522 GB/s per node +Grid : Message : Stencil 13.0003 GB/s per node +Grid : Message : Average mflops/s per call per node : 668760 +Grid : Message : Average mflops/s per call per node : 807728 +Grid : Message : Average mflops/s per call per node : 824873 +Grid : Message : Average mflops/s per call per node : 661669 +Grid : Message : Average mflops/s per call per node (full): 315270 +Grid : Message : Average mflops/s per call per node (full): 436781 +Grid : Message : Average mflops/s per call per node (full): 449931 +Grid : Message : Average mflops/s per call per node (full): 304919 +Grid : Message : Stencil 14.9178 GB/s per node +Grid : Message : Stencil 16.4403 GB/s per node +Grid : Message : Stencil 17.7768 GB/s per node +Grid : Message : Stencil 14.7444 GB/s per node +Grid : Message : Average mflops/s per call per node : 666216 +Grid : Message : Average mflops/s per call per node : 803335 +Grid : Message : Average mflops/s per call per node : 823697 +Grid : Message : Average mflops/s per call per node : 658365 +Grid : Message : Average mflops/s per call per node (full): 316364 +Grid : Message : Average mflops/s per call per node (full): 432841 +Grid : Message : Average mflops/s per call per node (full): 445773 +Grid : Message : Average mflops/s per call per node (full): 307202 +Grid : Message : Stencil 13.4562 GB/s per node +Grid : Message : Stencil 17.3964 GB/s per node +Grid : Message : Stencil 18.6967 GB/s per node +Grid : Message : Stencil 13.176 GB/s per node +Grid : Message : Average mflops/s per call per node : 668300 +Grid : Message : Average mflops/s per call per node : 796984 +Grid : Message : Average mflops/s per call per node : 824084 +Grid : Message : Average mflops/s per call per node : 662759 +Grid : Message : Average mflops/s per call per node (full): 316025 +Grid : Message : Average mflops/s per call per node (full): 436907 +Grid : Message : Average mflops/s per call per node (full): 447205 +Grid : Message : Average mflops/s per call per node (full): 305831 +Grid : Message : Stencil 13.7019 GB/s per node +Grid : Message : Stencil 17.0464 GB/s per node +Grid : Message : Stencil 17.5725 GB/s per node +Grid : Message : Stencil 13.1021 GB/s per node +Grid : Message : Average mflops/s per call per node : 668842 +Grid : Message : Average mflops/s per call per node : 806323 +Grid : Message : Average mflops/s per call per node : 817625 +Grid : Message : Average mflops/s per call per node : 662144 +Grid : Message : Average mflops/s per call per node (full): 316260 +Grid : Message : Average mflops/s per call per node (full): 439138 +Grid : Message : Average mflops/s per call per node (full): 444101 +Grid : Message : Average mflops/s per call per node (full): 305157 +Grid : Message : Stencil 13.3656 GB/s per node +Grid : Message : Stencil 18.3142 GB/s per node +Grid : Message : Stencil 17.2804 GB/s per node +Grid : Message : Stencil 12.0448 GB/s per node +Grid : Message : Average mflops/s per call per node : 669417 +Grid : Message : Average mflops/s per call per node : 804771 +Grid : Message : Average mflops/s per call per node : 821989 +Grid : Message : Average mflops/s per call per node : 670615 +Grid : Message : Average mflops/s per call per node (full): 316023 +Grid : Message : Average mflops/s per call per node (full): 441714 +Grid : Message : Average mflops/s per call per node (full): 442340 +Grid : Message : Average mflops/s per call per node (full): 302689 +Grid : Message : Stencil 13.4482 GB/s per node +Grid : Message : Stencil 17.1081 GB/s per node +Grid : Message : Stencil 17.2833 GB/s per node +Grid : Message : Stencil 12.2318 GB/s per node +Grid : Message : Average mflops/s per call per node : 666082 +Grid : Message : Average mflops/s per call per node : 803669 +Grid : Message : Average mflops/s per call per node : 822570 +Grid : Message : Average mflops/s per call per node : 671442 +Grid : Message : Average mflops/s per call per node (full): 315574 +Grid : Message : Average mflops/s per call per node (full): 438385 +Grid : Message : Average mflops/s per call per node (full): 442237 +Grid : Message : Average mflops/s per call per node (full): 304457 +Grid : Message : Stencil 12.9332 GB/s per node +Grid : Message : Stencil 19.7532 GB/s per node +Grid : Message : Stencil 18.0717 GB/s per node +Grid : Message : Stencil 12.6331 GB/s per node +Grid : Message : Average mflops/s per call per node : 666753 +Grid : Message : Average mflops/s per call per node : 804898 +Grid : Message : Average mflops/s per call per node : 818580 +Grid : Message : Average mflops/s per call per node : 671362 +Grid : Message : Average mflops/s per call per node (full): 314607 +Grid : Message : Average mflops/s per call per node (full): 444353 +Grid : Message : Average mflops/s per call per node (full): 445560 +Grid : Message : Average mflops/s per call per node (full): 305843 +Grid : Message : Stencil 12.5512 GB/s per node +Grid : Message : Stencil 17.5269 GB/s per node +Grid : Message : Stencil 17.2834 GB/s per node +Grid : Message : Stencil 13.4437 GB/s per node +Grid : Message : Average mflops/s per call per node : 672546 +Grid : Message : Average mflops/s per call per node : 803988 +Grid : Message : Average mflops/s per call per node : 823710 +Grid : Message : Average mflops/s per call per node : 661994 +Grid : Message : Average mflops/s per call per node (full): 314246 +Grid : Message : Average mflops/s per call per node (full): 441246 +Grid : Message : Average mflops/s per call per node (full): 442659 +Grid : Message : Average mflops/s per call per node (full): 306323 +Grid : Message : Stencil 13.7881 GB/s per node +Grid : Message : Stencil 17.1041 GB/s per node +Grid : Message : Stencil 17.8683 GB/s per node +Grid : Message : Stencil 14.7355 GB/s per node +Grid : Message : Average mflops/s per call per node : 662517 +Grid : Message : Average mflops/s per call per node : 802137 +Grid : Message : Average mflops/s per call per node : 820673 +Grid : Message : Average mflops/s per call per node : 660132 +Grid : Message : Average mflops/s per call per node (full): 315677 +Grid : Message : Average mflops/s per call per node (full): 435985 +Grid : Message : Average mflops/s per call per node (full): 446418 +Grid : Message : Average mflops/s per call per node (full): 306190 +Grid : Message : Stencil 14.4227 GB/s per node +Grid : Message : Stencil 16.9638 GB/s per node +Grid : Message : Stencil 17.2421 GB/s per node +Grid : Message : Stencil 13.146 GB/s per node +Grid : Message : Average mflops/s per call per node : 662917 +Grid : Message : Average mflops/s per call per node : 805807 +Grid : Message : Average mflops/s per call per node : 817935 +Grid : Message : Average mflops/s per call per node : 664956 +Grid : Message : Average mflops/s per call per node (full): 316235 +Grid : Message : Average mflops/s per call per node (full): 436484 +Grid : Message : Average mflops/s per call per node (full): 440255 +Grid : Message : Average mflops/s per call per node (full): 305928 +Grid : Message : Stencil 12.6977 GB/s per node +Grid : Message : Stencil 10.1535 GB/s per node +Grid : Message : Stencil 17.3576 GB/s per node +Grid : Message : Stencil 12.7675 GB/s per node +Grid : Message : Average mflops/s per call per node : 669493 +Grid : Message : Average mflops/s per call per node : 808986 +Grid : Message : Average mflops/s per call per node : 823811 +Grid : Message : Average mflops/s per call per node : 668299 +Grid : Message : Average mflops/s per call per node (full): 314590 +Grid : Message : Average mflops/s per call per node (full): 320442 +Grid : Message : Average mflops/s per call per node (full): 444171 +Grid : Message : Average mflops/s per call per node (full): 303688 +Grid : Message : Stencil 13.195 GB/s per node +Grid : Message : Stencil 16.5774 GB/s per node +Grid : Message : Stencil 17.064 GB/s per node +Grid : Message : Stencil 12.4249 GB/s per node +Grid : Message : Average mflops/s per call per node : 668161 +Grid : Message : Average mflops/s per call per node : 805483 +Grid : Message : Average mflops/s per call per node : 819279 +Grid : Message : Average mflops/s per call per node : 668078 +Grid : Message : Average mflops/s per call per node (full): 315648 +Grid : Message : Average mflops/s per call per node (full): 435404 +Grid : Message : Average mflops/s per call per node (full): 440242 +Grid : Message : Average mflops/s per call per node (full): 304294 +Grid : Message : Stencil 13.1611 GB/s per node +Grid : Message : Stencil 10.293 GB/s per node +Grid : Message : Stencil 18.4021 GB/s per node +Grid : Message : Stencil 12.2993 GB/s per node +Grid : Message : Average mflops/s per call per node : 664944 +Grid : Message : Average mflops/s per call per node : 810756 +Grid : Message : Average mflops/s per call per node : 823706 +Grid : Message : Average mflops/s per call per node : 665806 +Grid : Message : Average mflops/s per call per node (full): 315544 +Grid : Message : Average mflops/s per call per node (full): 322690 +Grid : Message : Average mflops/s per call per node (full): 446038 +Grid : Message : Average mflops/s per call per node (full): 303429 +Grid : Message : Stencil 14.224 GB/s per node +Grid : Message : Stencil 16.9568 GB/s per node +Grid : Message : Stencil 17.3814 GB/s per node +Grid : Message : Stencil 12.3383 GB/s per node +Grid : Message : Average mflops/s per call per node : 663420 +Grid : Message : Average mflops/s per call per node : 803452 +Grid : Message : Average mflops/s per call per node : 822770 +Grid : Message : Average mflops/s per call per node : 664749 +Grid : Message : Average mflops/s per call per node (full): 316778 +Grid : Message : Average mflops/s per call per node (full): 436976 +Grid : Message : Average mflops/s per call per node (full): 443927 +Grid : Message : Average mflops/s per call per node (full): 303698 +Grid : Message : Stencil 14.2383 GB/s per node +Grid : Message : Stencil 16.4187 GB/s per node +Grid : Message : Stencil 18.1444 GB/s per node +Grid : Message : Stencil 14.1145 GB/s per node +Grid : Message : Average mflops/s per call per node : 662589 +Grid : Message : Average mflops/s per call per node : 803662 +Grid : Message : Average mflops/s per call per node : 824074 +Grid : Message : Average mflops/s per call per node : 655333 +Grid : Message : Average mflops/s per call per node (full): 316927 +Grid : Message : Average mflops/s per call per node (full): 433461 +Grid : Message : Average mflops/s per call per node (full): 447367 +Grid : Message : Average mflops/s per call per node (full): 305873 +Grid : Message : Stencil 14.1274 GB/s per node +Grid : Message : Stencil 16.8365 GB/s per node +Grid : Message : Stencil 18.1599 GB/s per node +Grid : Message : Stencil 13.2011 GB/s per node +Grid : Message : Average mflops/s per call per node : 662459 +Grid : Message : Average mflops/s per call per node : 804676 +Grid : Message : Average mflops/s per call per node : 824916 +Grid : Message : Average mflops/s per call per node : 663064 +Grid : Message : Average mflops/s per call per node (full): 315764 +Grid : Message : Average mflops/s per call per node (full): 437445 +Grid : Message : Average mflops/s per call per node (full): 447714 +Grid : Message : Average mflops/s per call per node (full): 305780 +Grid : Message : Stencil 14.4141 GB/s per node +Grid : Message : Stencil 17.0002 GB/s per node +Grid : Message : Stencil 17.4644 GB/s per node +Grid : Message : Stencil 12.4324 GB/s per node +Grid : Message : Average mflops/s per call per node : 666248 +Grid : Message : Average mflops/s per call per node : 807964 +Grid : Message : Average mflops/s per call per node : 817203 +Grid : Message : Average mflops/s per call per node : 668184 +Grid : Message : Average mflops/s per call per node (full): 315953 +Grid : Message : Average mflops/s per call per node (full): 436210 +Grid : Message : Average mflops/s per call per node (full): 443869 +Grid : Message : Average mflops/s per call per node (full): 304638 +Grid : Message : Stencil 13.9462 GB/s per node +Grid : Message : Stencil 9.16624 GB/s per node +Grid : Message : Stencil 17.3436 GB/s per node +Grid : Message : Stencil 12.7577 GB/s per node +Grid : Message : Average mflops/s per call per node : 663975 +Grid : Message : Average mflops/s per call per node : 815086 +Grid : Message : Average mflops/s per call per node : 819869 +Grid : Message : Average mflops/s per call per node : 661803 +Grid : Message : Average mflops/s per call per node (full): 316413 +Grid : Message : Average mflops/s per call per node (full): 296466 +Grid : Message : Average mflops/s per call per node (full): 443320 +Grid : Message : Average mflops/s per call per node (full): 305294 +Grid : Message : Stencil 13.1198 GB/s per node +Grid : Message : Stencil 16.541 GB/s per node +Grid : Message : Stencil 17.5719 GB/s per node +Grid : Message : Stencil 12.426 GB/s per node +Grid : Message : Average mflops/s per call per node : 666706 +Grid : Message : Average mflops/s per call per node : 806562 +Grid : Message : Average mflops/s per call per node : 827599 +Grid : Message : Average mflops/s per call per node : 671380 +Grid : Message : Average mflops/s per call per node (full): 314551 +Grid : Message : Average mflops/s per call per node (full): 435958 +Grid : Message : Average mflops/s per call per node (full): 447131 +Grid : Message : Average mflops/s per call per node (full): 305332 +Grid : Message : Stencil 14.0187 GB/s per node +Grid : Message : Stencil 16.7282 GB/s per node +Grid : Message : Stencil 17.5016 GB/s per node +Grid : Message : Stencil 12.3897 GB/s per node +Grid : Message : Average mflops/s per call per node : 665303 +Grid : Message : Average mflops/s per call per node : 803576 +Grid : Message : Average mflops/s per call per node : 820190 +Grid : Message : Average mflops/s per call per node : 662707 +Grid : Message : Average mflops/s per call per node (full): 316797 +Grid : Message : Average mflops/s per call per node (full): 436659 +Grid : Message : Average mflops/s per call per node (full): 444002 +Grid : Message : Average mflops/s per call per node (full): 303495 +Grid : Message : Stencil 12.9872 GB/s per node +Grid : Message : Stencil 10.7783 GB/s per node +Grid : Message : Stencil 17.6598 GB/s per node +Grid : Message : Stencil 12.1232 GB/s per node +Grid : Message : Average mflops/s per call per node : 666356 +Grid : Message : Average mflops/s per call per node : 808219 +Grid : Message : Average mflops/s per call per node : 821321 +Grid : Message : Average mflops/s per call per node : 667910 +Grid : Message : Average mflops/s per call per node (full): 315669 +Grid : Message : Average mflops/s per call per node (full): 332681 +Grid : Message : Average mflops/s per call per node (full): 442435 +Grid : Message : Average mflops/s per call per node (full): 302498 +Grid : Message : Stencil 12.3811 GB/s per node +Grid : Message : Stencil 17.0083 GB/s per node +Grid : Message : Stencil 17.8352 GB/s per node +Grid : Message : Stencil 13.3962 GB/s per node +Grid : Message : Average mflops/s per call per node : 669156 +Grid : Message : Average mflops/s per call per node : 805285 +Grid : Message : Average mflops/s per call per node : 818262 +Grid : Message : Average mflops/s per call per node : 657968 +Grid : Message : Average mflops/s per call per node (full): 312932 +Grid : Message : Average mflops/s per call per node (full): 438176 +Grid : Message : Average mflops/s per call per node (full): 437432 +Grid : Message : Average mflops/s per call per node (full): 304967 +Grid : Message : Stencil 12.6208 GB/s per node +Grid : Message : Stencil 16.5345 GB/s per node +Grid : Message : Stencil 17.303 GB/s per node +Grid : Message : Stencil 13.6195 GB/s per node +Grid : Message : Average mflops/s per call per node : 667075 +Grid : Message : Average mflops/s per call per node : 802960 +Grid : Message : Average mflops/s per call per node : 826809 +Grid : Message : Average mflops/s per call per node : 658842 +Grid : Message : Average mflops/s per call per node (full): 311157 +Grid : Message : Average mflops/s per call per node (full): 433094 +Grid : Message : Average mflops/s per call per node (full): 441505 +Grid : Message : Average mflops/s per call per node (full): 304660 +Grid : Message : Stencil 13.5426 GB/s per node +Grid : Message : Stencil 16.4742 GB/s per node +Grid : Message : Stencil 18.4648 GB/s per node +Grid : Message : Stencil 14.1341 GB/s per node +Grid : Message : Average mflops/s per call per node : 665545 +Grid : Message : Average mflops/s per call per node : 804158 +Grid : Message : Average mflops/s per call per node : 819437 +Grid : Message : Average mflops/s per call per node : 664279 +Grid : Message : Average mflops/s per call per node (full): 315607 +Grid : Message : Average mflops/s per call per node (full): 432159 +Grid : Message : Average mflops/s per call per node (full): 446260 +Grid : Message : Average mflops/s per call per node (full): 305848 +Grid : Message : Stencil 13.2587 GB/s per node +Grid : Message : Stencil 16.5012 GB/s per node +Grid : Message : Stencil 17.3578 GB/s per node +Grid : Message : Stencil 11.7879 GB/s per node +Grid : Message : Average mflops/s per call per node : 668955 +Grid : Message : Average mflops/s per call per node : 804300 +Grid : Message : Average mflops/s per call per node : 821209 +Grid : Message : Average mflops/s per call per node : 669854 +Grid : Message : Average mflops/s per call per node (full): 315721 +Grid : Message : Average mflops/s per call per node (full): 431274 +Grid : Message : Average mflops/s per call per node (full): 444321 +Grid : Message : Average mflops/s per call per node (full): 300279 +Grid : Message : Stencil 12.8008 GB/s per node +Grid : Message : Stencil 16.3146 GB/s per node +Grid : Message : Stencil 17.1493 GB/s per node +Grid : Message : Stencil 12.0482 GB/s per node +Grid : Message : Average mflops/s per call per node : 669211 +Grid : Message : Average mflops/s per call per node : 807401 +Grid : Message : Average mflops/s per call per node : 822916 +Grid : Message : Average mflops/s per call per node : 665423 +Grid : Message : Average mflops/s per call per node (full): 313918 +Grid : Message : Average mflops/s per call per node (full): 428195 +Grid : Message : Average mflops/s per call per node (full): 442564 +Grid : Message : Average mflops/s per call per node (full): 301389 +Grid : Message : Stencil 12.6949 GB/s per node +Grid : Message : Stencil 16.4159 GB/s per node +Grid : Message : Stencil 18.1188 GB/s per node +Grid : Message : Stencil 13.1025 GB/s per node +Grid : Message : Average mflops/s per call per node : 666446 +Grid : Message : Average mflops/s per call per node : 804369 +Grid : Message : Average mflops/s per call per node : 824741 +Grid : Message : Average mflops/s per call per node : 663836 +Grid : Message : Average mflops/s per call per node (full): 312030 +Grid : Message : Average mflops/s per call per node (full): 433563 +Grid : Message : Average mflops/s per call per node (full): 448410 +Grid : Message : Average mflops/s per call per node (full): 305915 +Grid : Message : Stencil 15.1128 GB/s per node +Grid : Message : Stencil 17.6454 GB/s per node +Grid : Message : Stencil 17.5161 GB/s per node +Grid : Message : Stencil 12.8753 GB/s per node +Grid : Message : Average mflops/s per call per node : 663917 +Grid : Message : Average mflops/s per call per node : 801411 +Grid : Message : Average mflops/s per call per node : 828285 +Grid : Message : Average mflops/s per call per node : 661367 +Grid : Message : Average mflops/s per call per node (full): 316998 +Grid : Message : Average mflops/s per call per node (full): 437959 +Grid : Message : Average mflops/s per call per node (full): 444458 +Grid : Message : Average mflops/s per call per node (full): 304657 +Grid : Message : Stencil 13.4841 GB/s per node +Grid : Message : Stencil 16.5592 GB/s per node +Grid : Message : Stencil 17.373 GB/s per node +Grid : Message : Stencil 13.1126 GB/s per node +Grid : Message : Average mflops/s per call per node : 667237 +Grid : Message : Average mflops/s per call per node : 800749 +Grid : Message : Average mflops/s per call per node : 822447 +Grid : Message : Average mflops/s per call per node : 664609 +Grid : Message : Average mflops/s per call per node (full): 315971 +Grid : Message : Average mflops/s per call per node (full): 434444 +Grid : Message : Average mflops/s per call per node (full): 444853 +Grid : Message : Average mflops/s per call per node (full): 305671 +Grid : Message : Stencil 12.8365 GB/s per node +Grid : Message : Stencil 16.8574 GB/s per node +Grid : Message : Stencil 18.38 GB/s per node +Grid : Message : Stencil 13.3748 GB/s per node +Grid : Message : Average mflops/s per call per node : 667470 +Grid : Message : Average mflops/s per call per node : 808901 +Grid : Message : Average mflops/s per call per node : 818028 +Grid : Message : Average mflops/s per call per node : 668664 +Grid : Message : Average mflops/s per call per node (full): 312274 +Grid : Message : Average mflops/s per call per node (full): 437422 +Grid : Message : Average mflops/s per call per node (full): 445399 +Grid : Message : Average mflops/s per call per node (full): 307343 +Grid : Message : Stencil 13.3121 GB/s per node +Grid : Message : Stencil 17.5213 GB/s per node +Grid : Message : Stencil 18.3939 GB/s per node +Grid : Message : Stencil 13.0664 GB/s per node +Grid : Message : Average mflops/s per call per node : 667802 +Grid : Message : Average mflops/s per call per node : 801730 +Grid : Message : Average mflops/s per call per node : 827544 +Grid : Message : Average mflops/s per call per node : 665724 +Grid : Message : Average mflops/s per call per node (full): 315980 +Grid : Message : Average mflops/s per call per node (full): 438621 +Grid : Message : Average mflops/s per call per node (full): 448983 +Grid : Message : Average mflops/s per call per node (full): 306193 +Grid : Message : Stencil 13.4233 GB/s per node +Grid : Message : Stencil 17.1774 GB/s per node +Grid : Message : Stencil 17.7923 GB/s per node +Grid : Message : Stencil 11.9634 GB/s per node +Grid : Message : Average mflops/s per call per node : 667088 +Grid : Message : Average mflops/s per call per node : 802362 +Grid : Message : Average mflops/s per call per node : 820102 +Grid : Message : Average mflops/s per call per node : 667356 +Grid : Message : Average mflops/s per call per node (full): 315180 +Grid : Message : Average mflops/s per call per node (full): 436717 +Grid : Message : Average mflops/s per call per node (full): 444688 +Grid : Message : Average mflops/s per call per node (full): 301726 +Grid : Message : Stencil 12.8978 GB/s per node +Grid : Message : Stencil 16.6616 GB/s per node +Grid : Message : Stencil 17.9218 GB/s per node +Grid : Message : Stencil 13.2227 GB/s per node +Grid : Message : Average mflops/s per call per node : 667878 +Grid : Message : Average mflops/s per call per node : 802312 +Grid : Message : Average mflops/s per call per node : 815521 +Grid : Message : Average mflops/s per call per node : 660750 +Grid : Message : Average mflops/s per call per node (full): 314861 +Grid : Message : Average mflops/s per call per node (full): 434376 +Grid : Message : Average mflops/s per call per node (full): 445104 +Grid : Message : Average mflops/s per call per node (full): 305488 +Grid : Message : Stencil 12.7428 GB/s per node +Grid : Message : Stencil 16.777 GB/s per node +Grid : Message : Stencil 16.8466 GB/s per node +Grid : Message : Stencil 12.3905 GB/s per node +Grid : Message : Average mflops/s per call per node : 668370 +Grid : Message : Average mflops/s per call per node : 804917 +Grid : Message : Average mflops/s per call per node : 825243 +Grid : Message : Average mflops/s per call per node : 666776 +Grid : Message : Average mflops/s per call per node (full): 314673 +Grid : Message : Average mflops/s per call per node (full): 435603 +Grid : Message : Average mflops/s per call per node (full): 438303 +Grid : Message : Average mflops/s per call per node (full): 303690 +Grid : Message : Stencil 13.3228 GB/s per node +Grid : Message : Stencil 17.4223 GB/s per node +Grid : Message : Stencil 16.754 GB/s per node +Grid : Message : Stencil 12.6843 GB/s per node +Grid : Message : Average mflops/s per call per node : 667883 +Grid : Message : Average mflops/s per call per node : 805010 +Grid : Message : Average mflops/s per call per node : 820299 +Grid : Message : Average mflops/s per call per node : 663059 +Grid : Message : Average mflops/s per call per node (full): 315375 +Grid : Message : Average mflops/s per call per node (full): 440281 +Grid : Message : Average mflops/s per call per node (full): 430543 +Grid : Message : Average mflops/s per call per node (full): 301193 +Grid : Message : Stencil 13.3313 GB/s per node +Grid : Message : Stencil 9.28908 GB/s per node +Grid : Message : Stencil 18.1805 GB/s per node +Grid : Message : Stencil 13.6462 GB/s per node +Grid : Message : Average mflops/s per call per node : 667383 +Grid : Message : Average mflops/s per call per node : 815148 +Grid : Message : Average mflops/s per call per node : 817603 +Grid : Message : Average mflops/s per call per node : 660300 +Grid : Message : Average mflops/s per call per node (full): 315710 +Grid : Message : Average mflops/s per call per node (full): 299406 +Grid : Message : Average mflops/s per call per node (full): 446043 +Grid : Message : Average mflops/s per call per node (full): 306193 +Grid : Message : Stencil 12.3255 GB/s per node +Grid : Message : Stencil 10.828 GB/s per node +Grid : Message : Stencil 17.9218 GB/s per node +Grid : Message : Stencil 12.2895 GB/s per node +Grid : Message : Average mflops/s per call per node : 668942 +Grid : Message : Average mflops/s per call per node : 808118 +Grid : Message : Average mflops/s per call per node : 815742 +Grid : Message : Average mflops/s per call per node : 666199 +Grid : Message : Average mflops/s per call per node (full): 312475 +Grid : Message : Average mflops/s per call per node (full): 335871 +Grid : Message : Average mflops/s per call per node (full): 444799 +Grid : Message : Average mflops/s per call per node (full): 303522 +Grid : Message : Stencil 12.3314 GB/s per node +Grid : Message : Stencil 16.9833 GB/s per node +Grid : Message : Stencil 17.2071 GB/s per node +Grid : Message : Stencil 13.1734 GB/s per node +Grid : Message : Average mflops/s per call per node : 669271 +Grid : Message : Average mflops/s per call per node : 802289 +Grid : Message : Average mflops/s per call per node : 831405 +Grid : Message : Average mflops/s per call per node : 661655 +Grid : Message : Average mflops/s per call per node (full): 312489 +Grid : Message : Average mflops/s per call per node (full): 434880 +Grid : Message : Average mflops/s per call per node (full): 443932 +Grid : Message : Average mflops/s per call per node (full): 305986 +Grid : Message : Stencil 12.5033 GB/s per node +Grid : Message : Stencil 18.164 GB/s per node +Grid : Message : Stencil 17.9494 GB/s per node +Grid : Message : Stencil 11.9873 GB/s per node +Grid : Message : Average mflops/s per call per node : 668282 +Grid : Message : Average mflops/s per call per node : 798868 +Grid : Message : Average mflops/s per call per node : 821773 +Grid : Message : Average mflops/s per call per node : 668449 +Grid : Message : Average mflops/s per call per node (full): 312460 +Grid : Message : Average mflops/s per call per node (full): 440317 +Grid : Message : Average mflops/s per call per node (full): 445596 +Grid : Message : Average mflops/s per call per node (full): 302268 +Grid : Message : Stencil 13.6313 GB/s per node +Grid : Message : Stencil 9.02077 GB/s per node +Grid : Message : Stencil 17.3999 GB/s per node +Grid : Message : Stencil 13.8185 GB/s per node +Grid : Message : Average mflops/s per call per node : 667175 +Grid : Message : Average mflops/s per call per node : 811629 +Grid : Message : Average mflops/s per call per node : 821079 +Grid : Message : Average mflops/s per call per node : 660880 +Grid : Message : Average mflops/s per call per node (full): 315879 +Grid : Message : Average mflops/s per call per node (full): 293111 +Grid : Message : Average mflops/s per call per node (full): 444471 +Grid : Message : Average mflops/s per call per node (full): 305386 +Grid : Message : Stencil 13.1158 GB/s per node +Grid : Message : Stencil 17.6661 GB/s per node +Grid : Message : Stencil 17.4208 GB/s per node +Grid : Message : Stencil 12.3309 GB/s per node +Grid : Message : Average mflops/s per call per node : 667019 +Grid : Message : Average mflops/s per call per node : 804662 +Grid : Message : Average mflops/s per call per node : 812437 +Grid : Message : Average mflops/s per call per node : 666289 +Grid : Message : Average mflops/s per call per node (full): 314758 +Grid : Message : Average mflops/s per call per node (full): 441791 +Grid : Message : Average mflops/s per call per node (full): 437368 +Grid : Message : Average mflops/s per call per node (full): 303335 +Grid : Message : Stencil 13.6911 GB/s per node +Grid : Message : Stencil 14.1241 GB/s per node +Grid : Message : Stencil 16.9226 GB/s per node +Grid : Message : Stencil 12.4308 GB/s per node +Grid : Message : Average mflops/s per call per node : 665603 +Grid : Message : Average mflops/s per call per node : 807566 +Grid : Message : Average mflops/s per call per node : 824624 +Grid : Message : Average mflops/s per call per node : 668689 +Grid : Message : Average mflops/s per call per node (full): 315008 +Grid : Message : Average mflops/s per call per node (full): 401068 +Grid : Message : Average mflops/s per call per node (full): 439576 +Grid : Message : Average mflops/s per call per node (full): 305056 +Grid : Message : Stencil 13.4017 GB/s per node +Grid : Message : Stencil 17.9305 GB/s per node +Grid : Message : Stencil 18.0716 GB/s per node +Grid : Message : Stencil 13.3354 GB/s per node +Grid : Message : Average mflops/s per call per node : 663292 +Grid : Message : Average mflops/s per call per node : 801563 +Grid : Message : Average mflops/s per call per node : 818955 +Grid : Message : Average mflops/s per call per node : 669092 +Grid : Message : Average mflops/s per call per node (full): 315241 +Grid : Message : Average mflops/s per call per node (full): 438778 +Grid : Message : Average mflops/s per call per node (full): 445765 +Grid : Message : Average mflops/s per call per node (full): 306511 +Grid : Message : Stencil 13.4992 GB/s per node +Grid : Message : Stencil 16.8372 GB/s per node +Grid : Message : Stencil 18.0878 GB/s per node +Grid : Message : Stencil 13.7108 GB/s per node +Grid : Message : Average mflops/s per call per node : 664701 +Grid : Message : Average mflops/s per call per node : 805242 +Grid : Message : Average mflops/s per call per node : 816353 +Grid : Message : Average mflops/s per call per node : 660762 +Grid : Message : Average mflops/s per call per node (full): 316150 +Grid : Message : Average mflops/s per call per node (full): 436528 +Grid : Message : Average mflops/s per call per node (full): 444918 +Grid : Message : Average mflops/s per call per node (full): 304280 +Grid : Message : Stencil 13.2318 GB/s per node +Grid : Message : Stencil 9.11562 GB/s per node +Grid : Message : Stencil 17.3461 GB/s per node +Grid : Message : Stencil 12.4613 GB/s per node +Grid : Message : Average mflops/s per call per node : 664415 +Grid : Message : Average mflops/s per call per node : 812585 +Grid : Message : Average mflops/s per call per node : 823017 +Grid : Message : Average mflops/s per call per node : 664295 +Grid : Message : Average mflops/s per call per node (full): 314799 +Grid : Message : Average mflops/s per call per node (full): 294282 +Grid : Message : Average mflops/s per call per node (full): 443996 +Grid : Message : Average mflops/s per call per node (full): 302613 +Grid : Message : Stencil 12.8501 GB/s per node +Grid : Message : Stencil 18.1365 GB/s per node +Grid : Message : Stencil 17.5319 GB/s per node +Grid : Message : Stencil 13.6167 GB/s per node +Grid : Message : Average mflops/s per call per node : 667116 +Grid : Message : Average mflops/s per call per node : 805284 +Grid : Message : Average mflops/s per call per node : 824300 +Grid : Message : Average mflops/s per call per node : 663720 +Grid : Message : Average mflops/s per call per node (full): 314912 +Grid : Message : Average mflops/s per call per node (full): 441021 +Grid : Message : Average mflops/s per call per node (full): 446060 +Grid : Message : Average mflops/s per call per node (full): 306658 +Grid : Message : Stencil 12.9248 GB/s per node +Grid : Message : Stencil 17.9967 GB/s per node +Grid : Message : Stencil 16.997 GB/s per node +Grid : Message : Stencil 13.6847 GB/s per node +Grid : Message : Average mflops/s per call per node : 664023 +Grid : Message : Average mflops/s per call per node : 804162 +Grid : Message : Average mflops/s per call per node : 824007 +Grid : Message : Average mflops/s per call per node : 664748 +Grid : Message : Average mflops/s per call per node (full): 313748 +Grid : Message : Average mflops/s per call per node (full): 440774 +Grid : Message : Average mflops/s per call per node (full): 440280 +Grid : Message : Average mflops/s per call per node (full): 306476 +Grid : Message : Stencil 13.4693 GB/s per node +Grid : Message : Stencil 10.9112 GB/s per node +Grid : Message : Stencil 18.7818 GB/s per node +Grid : Message : Stencil 13.5111 GB/s per node +Grid : Message : Average mflops/s per call per node : 665246 +Grid : Message : Average mflops/s per call per node : 814175 +Grid : Message : Average mflops/s per call per node : 821192 +Grid : Message : Average mflops/s per call per node : 665535 +Grid : Message : Average mflops/s per call per node (full): 316162 +Grid : Message : Average mflops/s per call per node (full): 337525 +Grid : Message : Average mflops/s per call per node (full): 447987 +Grid : Message : Average mflops/s per call per node (full): 306323 +Grid : Message : Stencil 13.0777 GB/s per node +Grid : Message : Stencil 16.9577 GB/s per node +Grid : Message : Stencil 18.3557 GB/s per node +Grid : Message : Stencil 14.4107 GB/s per node +Grid : Message : Average mflops/s per call per node : 665813 +Grid : Message : Average mflops/s per call per node : 805514 +Grid : Message : Average mflops/s per call per node : 819381 +Grid : Message : Average mflops/s per call per node : 658705 +Grid : Message : Average mflops/s per call per node (full): 314850 +Grid : Message : Average mflops/s per call per node (full): 439586 +Grid : Message : Average mflops/s per call per node (full): 447271 +Grid : Message : Average mflops/s per call per node (full): 306939 +Grid : Message : Stencil 13.3862 GB/s per node +Grid : Message : Stencil 17.1971 GB/s per node +Grid : Message : Stencil 17.6139 GB/s per node +Grid : Message : Stencil 12.9573 GB/s per node +Grid : Message : Average mflops/s per call per node : 666671 +Grid : Message : Average mflops/s per call per node : 803421 +Grid : Message : Average mflops/s per call per node : 825401 +Grid : Message : Average mflops/s per call per node : 659545 +Grid : Message : Average mflops/s per call per node (full): 315837 +Grid : Message : Average mflops/s per call per node (full): 438396 +Grid : Message : Average mflops/s per call per node (full): 445524 +Grid : Message : Average mflops/s per call per node (full): 304195 +Grid : Message : Stencil 13.5591 GB/s per node +Grid : Message : Stencil 17.7183 GB/s per node +Grid : Message : Stencil 17.2019 GB/s per node +Grid : Message : Stencil 12.3189 GB/s per node +Grid : Message : Average mflops/s per call per node : 665892 +Grid : Message : Average mflops/s per call per node : 806264 +Grid : Message : Average mflops/s per call per node : 824604 +Grid : Message : Average mflops/s per call per node : 667157 +Grid : Message : Average mflops/s per call per node (full): 315081 +Grid : Message : Average mflops/s per call per node (full): 440782 +Grid : Message : Average mflops/s per call per node (full): 440963 +Grid : Message : Average mflops/s per call per node (full): 304250 +Grid : Message : Stencil 13.248 GB/s per node +Grid : Message : Stencil 14.8018 GB/s per node +Grid : Message : Stencil 17.461 GB/s per node +Grid : Message : Stencil 13.2692 GB/s per node +Grid : Message : Average mflops/s per call per node : 665956 +Grid : Message : Average mflops/s per call per node : 807889 +Grid : Message : Average mflops/s per call per node : 825939 +Grid : Message : Average mflops/s per call per node : 665659 +Grid : Message : Average mflops/s per call per node (full): 313670 +Grid : Message : Average mflops/s per call per node (full): 412631 +Grid : Message : Average mflops/s per call per node (full): 445575 +Grid : Message : Average mflops/s per call per node (full): 306363 +Grid : Message : Stencil 13.7145 GB/s per node +Grid : Message : Stencil 17.3324 GB/s per node +Grid : Message : Stencil 18.1848 GB/s per node +Grid : Message : Stencil 12.9419 GB/s per node +Grid : Message : Average mflops/s per call per node : 665029 +Grid : Message : Average mflops/s per call per node : 808743 +Grid : Message : Average mflops/s per call per node : 821243 +Grid : Message : Average mflops/s per call per node : 662212 +Grid : Message : Average mflops/s per call per node (full): 314960 +Grid : Message : Average mflops/s per call per node (full): 439231 +Grid : Message : Average mflops/s per call per node (full): 447120 +Grid : Message : Average mflops/s per call per node (full): 305205 +Grid : Message : Stencil 13.5834 GB/s per node +Grid : Message : Stencil 13.9187 GB/s per node +Grid : Message : Stencil 17.0826 GB/s per node +Grid : Message : Stencil 13.5903 GB/s per node +Grid : Message : Average mflops/s per call per node : 668611 +Grid : Message : Average mflops/s per call per node : 810477 +Grid : Message : Average mflops/s per call per node : 820414 +Grid : Message : Average mflops/s per call per node : 662241 +Grid : Message : Average mflops/s per call per node (full): 316736 +Grid : Message : Average mflops/s per call per node (full): 397739 +Grid : Message : Average mflops/s per call per node (full): 434490 +Grid : Message : Average mflops/s per call per node (full): 306342 +Grid : Message : Stencil 12.8785 GB/s per node +Grid : Message : Stencil 18.0336 GB/s per node +Grid : Message : Stencil 17.5106 GB/s per node +Grid : Message : Stencil 12.2151 GB/s per node +Grid : Message : Average mflops/s per call per node : 668606 +Grid : Message : Average mflops/s per call per node : 805111 +Grid : Message : Average mflops/s per call per node : 822796 +Grid : Message : Average mflops/s per call per node : 664684 +Grid : Message : Average mflops/s per call per node (full): 313610 +Grid : Message : Average mflops/s per call per node (full): 440398 +Grid : Message : Average mflops/s per call per node (full): 444645 +Grid : Message : Average mflops/s per call per node (full): 303118 +Grid : Message : Stencil 13.3419 GB/s per node +Grid : Message : Stencil 17.7552 GB/s per node +Grid : Message : Stencil 18.0255 GB/s per node +Grid : Message : Stencil 12.1451 GB/s per node +Grid : Message : Average mflops/s per call per node : 666457 +Grid : Message : Average mflops/s per call per node : 803015 +Grid : Message : Average mflops/s per call per node : 819714 +Grid : Message : Average mflops/s per call per node : 666522 +Grid : Message : Average mflops/s per call per node (full): 314487 +Grid : Message : Average mflops/s per call per node (full): 440563 +Grid : Message : Average mflops/s per call per node (full): 446709 +Grid : Message : Average mflops/s per call per node (full): 302842 +Grid : Message : Stencil 13.0752 GB/s per node +Grid : Message : Stencil 16.7879 GB/s per node +Grid : Message : Stencil 17.1563 GB/s per node +Grid : Message : Stencil 12.2405 GB/s per node +Grid : Message : Average mflops/s per call per node : 661678 +Grid : Message : Average mflops/s per call per node : 806525 +Grid : Message : Average mflops/s per call per node : 822295 +Grid : Message : Average mflops/s per call per node : 662354 +Grid : Message : Average mflops/s per call per node (full): 314978 +Grid : Message : Average mflops/s per call per node (full): 437910 +Grid : Message : Average mflops/s per call per node (full): 432688 +Grid : Message : Average mflops/s per call per node (full): 300123 +Grid : Message : Stencil 12.8164 GB/s per node +Grid : Message : Stencil 10.8645 GB/s per node +Grid : Message : Stencil 17.5877 GB/s per node +Grid : Message : Stencil 12.2955 GB/s per node +Grid : Message : Average mflops/s per call per node : 663617 +Grid : Message : Average mflops/s per call per node : 815350 +Grid : Message : Average mflops/s per call per node : 822622 +Grid : Message : Average mflops/s per call per node : 670769 +Grid : Message : Average mflops/s per call per node (full): 311572 +Grid : Message : Average mflops/s per call per node (full): 336451 +Grid : Message : Average mflops/s per call per node (full): 445744 +Grid : Message : Average mflops/s per call per node (full): 305249 +Grid : Message : Stencil 13.4844 GB/s per node +Grid : Message : Stencil 9.41537 GB/s per node +Grid : Message : Stencil 17.3923 GB/s per node +Grid : Message : Stencil 12.6071 GB/s per node +Grid : Message : Average mflops/s per call per node : 664692 +Grid : Message : Average mflops/s per call per node : 804293 +Grid : Message : Average mflops/s per call per node : 820679 +Grid : Message : Average mflops/s per call per node : 666786 +Grid : Message : Average mflops/s per call per node (full): 315823 +Grid : Message : Average mflops/s per call per node (full): 302137 +Grid : Message : Average mflops/s per call per node (full): 444841 +Grid : Message : Average mflops/s per call per node (full): 304938 +Grid : Message : Stencil 13.9875 GB/s per node +Grid : Message : Stencil 17.01 GB/s per node +Grid : Message : Stencil 18.6205 GB/s per node +Grid : Message : Stencil 13.0562 GB/s per node +Grid : Message : Average mflops/s per call per node : 664500 +Grid : Message : Average mflops/s per call per node : 802832 +Grid : Message : Average mflops/s per call per node : 818612 +Grid : Message : Average mflops/s per call per node : 665705 +Grid : Message : Average mflops/s per call per node (full): 316672 +Grid : Message : Average mflops/s per call per node (full): 438180 +Grid : Message : Average mflops/s per call per node (full): 447127 +Grid : Message : Average mflops/s per call per node (full): 306157 +Grid : Message : Stencil 14.0333 GB/s per node +Grid : Message : Stencil 16.5672 GB/s per node +Grid : Message : Stencil 17.9764 GB/s per node +Grid : Message : Stencil 12.9231 GB/s per node +Grid : Message : Average mflops/s per call per node : 663626 +Grid : Message : Average mflops/s per call per node : 804403 +Grid : Message : Average mflops/s per call per node : 819891 +Grid : Message : Average mflops/s per call per node : 666901 +Grid : Message : Average mflops/s per call per node (full): 315999 +Grid : Message : Average mflops/s per call per node (full): 435532 +Grid : Message : Average mflops/s per call per node (full): 446810 +Grid : Message : Average mflops/s per call per node (full): 306073 +Grid : Message : Stencil 13.809 GB/s per node +Grid : Message : Stencil 18.2537 GB/s per node +Grid : Message : Stencil 17.7281 GB/s per node +Grid : Message : Stencil 14.1155 GB/s per node +Grid : Message : Average mflops/s per call per node : 665162 +Grid : Message : Average mflops/s per call per node : 799502 +Grid : Message : Average mflops/s per call per node : 826679 +Grid : Message : Average mflops/s per call per node : 660384 +Grid : Message : Average mflops/s per call per node (full): 316186 +Grid : Message : Average mflops/s per call per node (full): 439300 +Grid : Message : Average mflops/s per call per node (full): 447229 +Grid : Message : Average mflops/s per call per node (full): 307210 +Grid : Message : Stencil 13.7922 GB/s per node +Grid : Message : Stencil 17.225 GB/s per node +Grid : Message : Stencil 18.2881 GB/s per node +Grid : Message : Stencil 13.7297 GB/s per node +Grid : Message : Average mflops/s per call per node : 665724 +Grid : Message : Average mflops/s per call per node : 799898 +Grid : Message : Average mflops/s per call per node : 825497 +Grid : Message : Average mflops/s per call per node : 660536 +Grid : Message : Average mflops/s per call per node (full): 315508 +Grid : Message : Average mflops/s per call per node (full): 437931 +Grid : Message : Average mflops/s per call per node (full): 448941 +Grid : Message : Average mflops/s per call per node (full): 306707 +Grid : Message : Stencil 13.9262 GB/s per node +Grid : Message : Stencil 16.7104 GB/s per node +Grid : Message : Stencil 18.1467 GB/s per node +Grid : Message : Stencil 13.2761 GB/s per node +Grid : Message : Average mflops/s per call per node : 665930 +Grid : Message : Average mflops/s per call per node : 805745 +Grid : Message : Average mflops/s per call per node : 819049 +Grid : Message : Average mflops/s per call per node : 658184 +Grid : Message : Average mflops/s per call per node (full): 316284 +Grid : Message : Average mflops/s per call per node (full): 436808 +Grid : Message : Average mflops/s per call per node (full): 445900 +Grid : Message : Average mflops/s per call per node (full): 304988 +Grid : Message : Stencil 13.6979 GB/s per node +Grid : Message : Stencil 16.5942 GB/s per node +Grid : Message : Stencil 17.6454 GB/s per node +Grid : Message : Stencil 13.6655 GB/s per node +Grid : Message : Average mflops/s per call per node : 667274 +Grid : Message : Average mflops/s per call per node : 804315 +Grid : Message : Average mflops/s per call per node : 818993 +Grid : Message : Average mflops/s per call per node : 661050 +Grid : Message : Average mflops/s per call per node (full): 315545 +Grid : Message : Average mflops/s per call per node (full): 435802 +Grid : Message : Average mflops/s per call per node (full): 444969 +Grid : Message : Average mflops/s per call per node (full): 305752 +Grid : Message : Stencil 14.8011 GB/s per node +Grid : Message : Stencil 17.4577 GB/s per node +Grid : Message : Stencil 16.0438 GB/s per node +Grid : Message : Stencil 11.8119 GB/s per node +Grid : Message : Average mflops/s per call per node : 666647 +Grid : Message : Average mflops/s per call per node : 803512 +Grid : Message : Average mflops/s per call per node : 825357 +Grid : Message : Average mflops/s per call per node : 669817 +Grid : Message : Average mflops/s per call per node (full): 318122 +Grid : Message : Average mflops/s per call per node (full): 438539 +Grid : Message : Average mflops/s per call per node (full): 420134 +Grid : Message : Average mflops/s per call per node (full): 299258 +Grid : Message : Stencil 14.2815 GB/s per node +Grid : Message : Stencil 16.5677 GB/s per node +Grid : Message : Stencil 16.7552 GB/s per node +Grid : Message : Stencil 12.4322 GB/s per node +Grid : Message : Average mflops/s per call per node : 665525 +Grid : Message : Average mflops/s per call per node : 802985 +Grid : Message : Average mflops/s per call per node : 820117 +Grid : Message : Average mflops/s per call per node : 668353 +Grid : Message : Average mflops/s per call per node (full): 317822 +Grid : Message : Average mflops/s per call per node (full): 435373 +Grid : Message : Average mflops/s per call per node (full): 437842 +Grid : Message : Average mflops/s per call per node (full): 304972 +Grid : Message : Stencil 12.445 GB/s per node +Grid : Message : Stencil 13.4312 GB/s per node +Grid : Message : Stencil 17.1194 GB/s per node +Grid : Message : Stencil 13.338 GB/s per node +Grid : Message : Average mflops/s per call per node : 668202 +Grid : Message : Average mflops/s per call per node : 807277 +Grid : Message : Average mflops/s per call per node : 817844 +Grid : Message : Average mflops/s per call per node : 661735 +Grid : Message : Average mflops/s per call per node (full): 313315 +Grid : Message : Average mflops/s per call per node (full): 390731 +Grid : Message : Average mflops/s per call per node (full): 440923 +Grid : Message : Average mflops/s per call per node (full): 306331 +Grid : Message : Stencil 12.7171 GB/s per node +Grid : Message : Stencil 16.314 GB/s per node +Grid : Message : Stencil 18.5352 GB/s per node +Grid : Message : Stencil 13.2695 GB/s per node +Grid : Message : Average mflops/s per call per node : 666693 +Grid : Message : Average mflops/s per call per node : 800267 +Grid : Message : Average mflops/s per call per node : 826845 +Grid : Message : Average mflops/s per call per node : 660822 +Grid : Message : Average mflops/s per call per node (full): 314168 +Grid : Message : Average mflops/s per call per node (full): 431113 +Grid : Message : Average mflops/s per call per node (full): 448873 +Grid : Message : Average mflops/s per call per node (full): 305170 +Grid : Message : Stencil 12.9387 GB/s per node +Grid : Message : Stencil 16.4085 GB/s per node +Grid : Message : Stencil 16.8739 GB/s per node +Grid : Message : Stencil 13.6264 GB/s per node +Grid : Message : Average mflops/s per call per node : 670156 +Grid : Message : Average mflops/s per call per node : 805849 +Grid : Message : Average mflops/s per call per node : 818217 +Grid : Message : Average mflops/s per call per node : 660247 +Grid : Message : Average mflops/s per call per node (full): 315820 +Grid : Message : Average mflops/s per call per node (full): 429584 +Grid : Message : Average mflops/s per call per node (full): 440164 +Grid : Message : Average mflops/s per call per node (full): 305487 +Grid : Message : Stencil 12.917 GB/s per node +Grid : Message : Stencil 16.6841 GB/s per node +Grid : Message : Stencil 17.6266 GB/s per node +Grid : Message : Stencil 12.6883 GB/s per node +Grid : Message : Average mflops/s per call per node : 671234 +Grid : Message : Average mflops/s per call per node : 806459 +Grid : Message : Average mflops/s per call per node : 823954 +Grid : Message : Average mflops/s per call per node : 662970 +Grid : Message : Average mflops/s per call per node (full): 314276 +Grid : Message : Average mflops/s per call per node (full): 433660 +Grid : Message : Average mflops/s per call per node (full): 445980 +Grid : Message : Average mflops/s per call per node (full): 303927 +Grid : Message : Stencil 12.3094 GB/s per node +Grid : Message : Stencil 16.6002 GB/s per node +Grid : Message : Stencil 17.6975 GB/s per node +Grid : Message : Stencil 12.2183 GB/s per node +Grid : Message : Average mflops/s per call per node : 671314 +Grid : Message : Average mflops/s per call per node : 805711 +Grid : Message : Average mflops/s per call per node : 819593 +Grid : Message : Average mflops/s per call per node : 664132 +Grid : Message : Average mflops/s per call per node (full): 312627 +Grid : Message : Average mflops/s per call per node (full): 435765 +Grid : Message : Average mflops/s per call per node (full): 445720 +Grid : Message : Average mflops/s per call per node (full): 303954 +Grid : Message : Stencil 12.8645 GB/s per node +Grid : Message : Stencil 15.0431 GB/s per node +Grid : Message : Stencil 18.2157 GB/s per node +Grid : Message : Stencil 12.3631 GB/s per node +Grid : Message : Average mflops/s per call per node : 666465 +Grid : Message : Average mflops/s per call per node : 800239 +Grid : Message : Average mflops/s per call per node : 819023 +Grid : Message : Average mflops/s per call per node : 665368 +Grid : Message : Average mflops/s per call per node (full): 314038 +Grid : Message : Average mflops/s per call per node (full): 414968 +Grid : Message : Average mflops/s per call per node (full): 447939 +Grid : Message : Average mflops/s per call per node (full): 301079 +Grid : Message : Stencil 14.8432 GB/s per node +Grid : Message : Stencil 9.11744 GB/s per node +Grid : Message : Stencil 17.9773 GB/s per node +Grid : Message : Stencil 13.6707 GB/s per node +Grid : Message : Average mflops/s per call per node : 662854 +Grid : Message : Average mflops/s per call per node : 809082 +Grid : Message : Average mflops/s per call per node : 820687 +Grid : Message : Average mflops/s per call per node : 662361 +Grid : Message : Average mflops/s per call per node (full): 315454 +Grid : Message : Average mflops/s per call per node (full): 295354 +Grid : Message : Average mflops/s per call per node (full): 446626 +Grid : Message : Average mflops/s per call per node (full): 305813 +Grid : Message : Stencil 12.7809 GB/s per node +Grid : Message : Stencil 16.2911 GB/s per node +Grid : Message : Stencil 17.3266 GB/s per node +Grid : Message : Stencil 13.7354 GB/s per node +Grid : Message : Average mflops/s per call per node : 668695 +Grid : Message : Average mflops/s per call per node : 803473 +Grid : Message : Average mflops/s per call per node : 820087 +Grid : Message : Average mflops/s per call per node : 662076 +Grid : Message : Average mflops/s per call per node (full): 315302 +Grid : Message : Average mflops/s per call per node (full): 432491 +Grid : Message : Average mflops/s per call per node (full): 443546 +Grid : Message : Average mflops/s per call per node (full): 305753 +Grid : Message : Stencil 15.392 GB/s per node +Grid : Message : Stencil 16.6395 GB/s per node +Grid : Message : Stencil 18.4007 GB/s per node +Grid : Message : Stencil 11.7254 GB/s per node +Grid : Message : Average mflops/s per call per node : 661087 +Grid : Message : Average mflops/s per call per node : 800899 +Grid : Message : Average mflops/s per call per node : 823878 +Grid : Message : Average mflops/s per call per node : 665867 +Grid : Message : Average mflops/s per call per node (full): 316630 +Grid : Message : Average mflops/s per call per node (full): 435665 +Grid : Message : Average mflops/s per call per node (full): 447474 +Grid : Message : Average mflops/s per call per node (full): 299133 +Grid : Message : Stencil 12.7109 GB/s per node +Grid : Message : Stencil 16.6862 GB/s per node +Grid : Message : Stencil 17.2417 GB/s per node +Grid : Message : Stencil 12.1287 GB/s per node +Grid : Message : Average mflops/s per call per node : 668518 +Grid : Message : Average mflops/s per call per node : 805298 +Grid : Message : Average mflops/s per call per node : 817691 +Grid : Message : Average mflops/s per call per node : 669558 +Grid : Message : Average mflops/s per call per node (full): 315247 +Grid : Message : Average mflops/s per call per node (full): 436881 +Grid : Message : Average mflops/s per call per node (full): 442923 +Grid : Message : Average mflops/s per call per node (full): 303777 +Grid : Message : Stencil 14.6304 GB/s per node +Grid : Message : Stencil 17.0628 GB/s per node +Grid : Message : Stencil 17.9849 GB/s per node +Grid : Message : Stencil 12.5003 GB/s per node +Grid : Message : Average mflops/s per call per node : 661927 +Grid : Message : Average mflops/s per call per node : 800619 +Grid : Message : Average mflops/s per call per node : 824018 +Grid : Message : Average mflops/s per call per node : 669003 +Grid : Message : Average mflops/s per call per node (full): 316799 +Grid : Message : Average mflops/s per call per node (full): 435870 +Grid : Message : Average mflops/s per call per node (full): 448908 +Grid : Message : Average mflops/s per call per node (full): 305761 +Grid : Message : Stencil 14.2657 GB/s per node +Grid : Message : Stencil 17.7772 GB/s per node +Grid : Message : Stencil 16.4253 GB/s per node +Grid : Message : Stencil 12.0975 GB/s per node +Grid : Message : Average mflops/s per call per node : 662242 +Grid : Message : Average mflops/s per call per node : 803620 +Grid : Message : Average mflops/s per call per node : 822478 +Grid : Message : Average mflops/s per call per node : 666255 +Grid : Message : Average mflops/s per call per node (full): 316413 +Grid : Message : Average mflops/s per call per node (full): 440755 +Grid : Message : Average mflops/s per call per node (full): 425590 +Grid : Message : Average mflops/s per call per node (full): 301935 +Grid : Message : Stencil 15.2164 GB/s per node +Grid : Message : Stencil 9.49935 GB/s per node +Grid : Message : Stencil 17.2102 GB/s per node +Grid : Message : Stencil 13.7943 GB/s per node +Grid : Message : Average mflops/s per call per node : 659534 +Grid : Message : Average mflops/s per call per node : 809661 +Grid : Message : Average mflops/s per call per node : 823187 +Grid : Message : Average mflops/s per call per node : 661130 +Grid : Message : Average mflops/s per call per node (full): 316258 +Grid : Message : Average mflops/s per call per node (full): 304426 +Grid : Message : Average mflops/s per call per node (full): 442628 +Grid : Message : Average mflops/s per call per node (full): 306161 +Grid : Message : Stencil 14.6314 GB/s per node +Grid : Message : Stencil 16.992 GB/s per node +Grid : Message : Stencil 17.8637 GB/s per node +Grid : Message : Stencil 12.3308 GB/s per node +Grid : Message : Average mflops/s per call per node : 658515 +Grid : Message : Average mflops/s per call per node : 801185 +Grid : Message : Average mflops/s per call per node : 823355 +Grid : Message : Average mflops/s per call per node : 670100 +Grid : Message : Average mflops/s per call per node (full): 315176 +Grid : Message : Average mflops/s per call per node (full): 437692 +Grid : Message : Average mflops/s per call per node (full): 444062 +Grid : Message : Average mflops/s per call per node (full): 304858 +Grid : Message : Stencil 12.3942 GB/s per node +Grid : Message : Stencil 17.204 GB/s per node +Grid : Message : Stencil 17.2984 GB/s per node +Grid : Message : Stencil 12.1147 GB/s per node +Grid : Message : Average mflops/s per call per node : 663799 +Grid : Message : Average mflops/s per call per node : 800854 +Grid : Message : Average mflops/s per call per node : 823783 +Grid : Message : Average mflops/s per call per node : 667952 +Grid : Message : Average mflops/s per call per node (full): 312919 +Grid : Message : Average mflops/s per call per node (full): 439336 +Grid : Message : Average mflops/s per call per node (full): 442674 +Grid : Message : Average mflops/s per call per node (full): 302111 +Grid : Message : Stencil 13.5046 GB/s per node +Grid : Message : Stencil 8.65121 GB/s per node +Grid : Message : Stencil 16.8146 GB/s per node +Grid : Message : Stencil 12.6679 GB/s per node +Grid : Message : Average mflops/s per call per node : 664548 +Grid : Message : Average mflops/s per call per node : 810316 +Grid : Message : Average mflops/s per call per node : 824111 +Grid : Message : Average mflops/s per call per node : 668561 +Grid : Message : Average mflops/s per call per node (full): 315068 +Grid : Message : Average mflops/s per call per node (full): 283443 +Grid : Message : Average mflops/s per call per node (full): 438671 +Grid : Message : Average mflops/s per call per node (full): 305982 +Grid : Message : Stencil 13.1484 GB/s per node +Grid : Message : Stencil 17.4256 GB/s per node +Grid : Message : Stencil 18.1358 GB/s per node +Grid : Message : Stencil 12.7422 GB/s per node +Grid : Message : Average mflops/s per call per node : 663668 +Grid : Message : Average mflops/s per call per node : 800415 +Grid : Message : Average mflops/s per call per node : 808348 +Grid : Message : Average mflops/s per call per node : 666082 +Grid : Message : Average mflops/s per call per node (full): 313980 +Grid : Message : Average mflops/s per call per node (full): 437498 +Grid : Message : Average mflops/s per call per node (full): 443415 +Grid : Message : Average mflops/s per call per node (full): 305528 +Grid : Message : Stencil 13.3168 GB/s per node +Grid : Message : Stencil 16.8823 GB/s per node +Grid : Message : Stencil 17.3926 GB/s per node +Grid : Message : Stencil 14.2876 GB/s per node +Grid : Message : Average mflops/s per call per node : 667078 +Grid : Message : Average mflops/s per call per node : 801243 +Grid : Message : Average mflops/s per call per node : 823023 +Grid : Message : Average mflops/s per call per node : 661535 +Grid : Message : Average mflops/s per call per node (full): 315190 +Grid : Message : Average mflops/s per call per node (full): 432799 +Grid : Message : Average mflops/s per call per node (full): 442973 +Grid : Message : Average mflops/s per call per node (full): 303404 +Grid : Message : Stencil 12.6679 GB/s per node +Grid : Message : Stencil 16.6478 GB/s per node +Grid : Message : Stencil 17.3866 GB/s per node +Grid : Message : Stencil 13.3003 GB/s per node +Grid : Message : Average mflops/s per call per node : 670601 +Grid : Message : Average mflops/s per call per node : 802308 +Grid : Message : Average mflops/s per call per node : 818386 +Grid : Message : Average mflops/s per call per node : 661566 +Grid : Message : Average mflops/s per call per node (full): 315534 +Grid : Message : Average mflops/s per call per node (full): 435309 +Grid : Message : Average mflops/s per call per node (full): 443217 +Grid : Message : Average mflops/s per call per node (full): 305060 +Grid : Message : Stencil 13.1465 GB/s per node +Grid : Message : Stencil 11.5329 GB/s per node +Grid : Message : Stencil 17.4103 GB/s per node +Grid : Message : Stencil 13.828 GB/s per node +Grid : Message : Average mflops/s per call per node : 664492 +Grid : Message : Average mflops/s per call per node : 807782 +Grid : Message : Average mflops/s per call per node : 829479 +Grid : Message : Average mflops/s per call per node : 657773 +Grid : Message : Average mflops/s per call per node (full): 315037 +Grid : Message : Average mflops/s per call per node (full): 351401 +Grid : Message : Average mflops/s per call per node (full): 445392 +Grid : Message : Average mflops/s per call per node (full): 305751 +Grid : Message : Stencil 13.773 GB/s per node +Grid : Message : Stencil 17.1506 GB/s per node +Grid : Message : Stencil 17.6133 GB/s per node +Grid : Message : Stencil 13.3226 GB/s per node +Grid : Message : Average mflops/s per call per node : 664042 +Grid : Message : Average mflops/s per call per node : 804459 +Grid : Message : Average mflops/s per call per node : 822795 +Grid : Message : Average mflops/s per call per node : 664842 +Grid : Message : Average mflops/s per call per node (full): 316073 +Grid : Message : Average mflops/s per call per node (full): 437764 +Grid : Message : Average mflops/s per call per node (full): 446153 +Grid : Message : Average mflops/s per call per node (full): 299502 +Grid : Message : Stencil 15.0845 GB/s per node +Grid : Message : Stencil 17.7414 GB/s per node +Grid : Message : Stencil 16.8645 GB/s per node +Grid : Message : Stencil 12.3923 GB/s per node +Grid : Message : Average mflops/s per call per node : 661352 +Grid : Message : Average mflops/s per call per node : 800592 +Grid : Message : Average mflops/s per call per node : 817293 +Grid : Message : Average mflops/s per call per node : 664372 +Grid : Message : Average mflops/s per call per node (full): 316852 +Grid : Message : Average mflops/s per call per node (full): 439206 +Grid : Message : Average mflops/s per call per node (full): 437628 +Grid : Message : Average mflops/s per call per node (full): 304183 +Grid : Message : Stencil 14.7196 GB/s per node +Grid : Message : Stencil 16.5359 GB/s per node +Grid : Message : Stencil 18.1543 GB/s per node +Grid : Message : Stencil 13.2842 GB/s per node +Grid : Message : Average mflops/s per call per node : 662539 +Grid : Message : Average mflops/s per call per node : 802369 +Grid : Message : Average mflops/s per call per node : 824979 +Grid : Message : Average mflops/s per call per node : 661577 +Grid : Message : Average mflops/s per call per node (full): 316824 +Grid : Message : Average mflops/s per call per node (full): 434027 +Grid : Message : Average mflops/s per call per node (full): 446898 +Grid : Message : Average mflops/s per call per node (full): 305991 +Grid : Message : Stencil 15.5375 GB/s per node +Grid : Message : Stencil 16.5096 GB/s per node +Grid : Message : Stencil 18.3063 GB/s per node +Grid : Message : Stencil 13.1609 GB/s per node +Grid : Message : Average mflops/s per call per node : 664450 +Grid : Message : Average mflops/s per call per node : 802683 +Grid : Message : Average mflops/s per call per node : 823743 +Grid : Message : Average mflops/s per call per node : 662443 +Grid : Message : Average mflops/s per call per node (full): 317778 +Grid : Message : Average mflops/s per call per node (full): 435735 +Grid : Message : Average mflops/s per call per node (full): 448569 +Grid : Message : Average mflops/s per call per node (full): 305253 +Grid : Message : Stencil 12.8296 GB/s per node +Grid : Message : Stencil 9.10781 GB/s per node +Grid : Message : Stencil 17.7547 GB/s per node +Grid : Message : Stencil 12.4499 GB/s per node +Grid : Message : Average mflops/s per call per node : 669156 +Grid : Message : Average mflops/s per call per node : 807513 +Grid : Message : Average mflops/s per call per node : 825799 +Grid : Message : Average mflops/s per call per node : 670206 +Grid : Message : Average mflops/s per call per node (full): 314775 +Grid : Message : Average mflops/s per call per node (full): 294729 +Grid : Message : Average mflops/s per call per node (full): 446972 +Grid : Message : Average mflops/s per call per node (full): 305790 +Grid : Message : Stencil 12.6955 GB/s per node +Grid : Message : Stencil 16.6487 GB/s per node +Grid : Message : Stencil 17.6868 GB/s per node +Grid : Message : Stencil 12.7983 GB/s per node +Grid : Message : Average mflops/s per call per node : 673509 +Grid : Message : Average mflops/s per call per node : 802936 +Grid : Message : Average mflops/s per call per node : 827030 +Grid : Message : Average mflops/s per call per node : 664901 +Grid : Message : Average mflops/s per call per node (full): 315828 +Grid : Message : Average mflops/s per call per node (full): 435932 +Grid : Message : Average mflops/s per call per node (full): 446750 +Grid : Message : Average mflops/s per call per node (full): 305108 +Grid : Message : Stencil 12.6757 GB/s per node +Grid : Message : Stencil 16.1476 GB/s per node +Grid : Message : Stencil 18.0383 GB/s per node +Grid : Message : Stencil 14.2022 GB/s per node +Grid : Message : Average mflops/s per call per node : 672735 +Grid : Message : Average mflops/s per call per node : 802948 +Grid : Message : Average mflops/s per call per node : 822948 +Grid : Message : Average mflops/s per call per node : 659746 +Grid : Message : Average mflops/s per call per node (full): 315118 +Grid : Message : Average mflops/s per call per node (full): 429520 +Grid : Message : Average mflops/s per call per node (full): 446367 +Grid : Message : Average mflops/s per call per node (full): 306267 +Grid : Message : Stencil 13.1888 GB/s per node +Grid : Message : Stencil 17.1689 GB/s per node +Grid : Message : Stencil 17.964 GB/s per node +Grid : Message : Stencil 12.4117 GB/s per node +Grid : Message : Average mflops/s per call per node : 666876 +Grid : Message : Average mflops/s per call per node : 806357 +Grid : Message : Average mflops/s per call per node : 820983 +Grid : Message : Average mflops/s per call per node : 665899 +Grid : Message : Average mflops/s per call per node (full): 316192 +Grid : Message : Average mflops/s per call per node (full): 440065 +Grid : Message : Average mflops/s per call per node (full): 445936 +Grid : Message : Average mflops/s per call per node (full): 304403 +Grid : Message : Stencil 13.9266 GB/s per node +Grid : Message : Stencil 9.79511 GB/s per node +Grid : Message : Stencil 17.4312 GB/s per node +Grid : Message : Stencil 12.2498 GB/s per node +Grid : Message : Average mflops/s per call per node : 665834 +Grid : Message : Average mflops/s per call per node : 810976 +Grid : Message : Average mflops/s per call per node : 821093 +Grid : Message : Average mflops/s per call per node : 667009 +Grid : Message : Average mflops/s per call per node (full): 312307 +Grid : Message : Average mflops/s per call per node (full): 311635 +Grid : Message : Average mflops/s per call per node (full): 445404 +Grid : Message : Average mflops/s per call per node (full): 303006 +Grid : Message : Stencil 12.81 GB/s per node +Grid : Message : Stencil 16.5723 GB/s per node +Grid : Message : Stencil 18.4032 GB/s per node +Grid : Message : Stencil 13.6021 GB/s per node +Grid : Message : Average mflops/s per call per node : 671280 +Grid : Message : Average mflops/s per call per node : 806415 +Grid : Message : Average mflops/s per call per node : 818215 +Grid : Message : Average mflops/s per call per node : 664436 +Grid : Message : Average mflops/s per call per node (full): 315501 +Grid : Message : Average mflops/s per call per node (full): 435810 +Grid : Message : Average mflops/s per call per node (full): 444505 +Grid : Message : Average mflops/s per call per node (full): 302928 +Grid : Message : Stencil 12.8577 GB/s per node +Grid : Message : Stencil 17.5319 GB/s per node +Grid : Message : Stencil 18.6292 GB/s per node +Grid : Message : Stencil 12.6488 GB/s per node +Grid : Message : Average mflops/s per call per node : 668092 +Grid : Message : Average mflops/s per call per node : 803661 +Grid : Message : Average mflops/s per call per node : 820460 +Grid : Message : Average mflops/s per call per node : 670432 +Grid : Message : Average mflops/s per call per node (full): 315044 +Grid : Message : Average mflops/s per call per node (full): 439046 +Grid : Message : Average mflops/s per call per node (full): 447930 +Grid : Message : Average mflops/s per call per node (full): 304999 +Grid : Message : Stencil 13.0255 GB/s per node +Grid : Message : Stencil 17.3176 GB/s per node +Grid : Message : Stencil 17.2674 GB/s per node +Grid : Message : Stencil 13.168 GB/s per node +Grid : Message : Average mflops/s per call per node : 668654 +Grid : Message : Average mflops/s per call per node : 802245 +Grid : Message : Average mflops/s per call per node : 822532 +Grid : Message : Average mflops/s per call per node : 660223 +Grid : Message : Average mflops/s per call per node (full): 315458 +Grid : Message : Average mflops/s per call per node (full): 439511 +Grid : Message : Average mflops/s per call per node (full): 443161 +Grid : Message : Average mflops/s per call per node (full): 304948 +Grid : Message : Stencil 13.5487 GB/s per node +Grid : Message : Stencil 17.5157 GB/s per node +Grid : Message : Stencil 17.6107 GB/s per node +Grid : Message : Stencil 13.0801 GB/s per node +Grid : Message : Average mflops/s per call per node : 662485 +Grid : Message : Average mflops/s per call per node : 801571 +Grid : Message : Average mflops/s per call per node : 825590 +Grid : Message : Average mflops/s per call per node : 667309 +Grid : Message : Average mflops/s per call per node (full): 315559 +Grid : Message : Average mflops/s per call per node (full): 439667 +Grid : Message : Average mflops/s per call per node (full): 446588 +Grid : Message : Average mflops/s per call per node (full): 304921 +Grid : Message : Stencil 13.1937 GB/s per node +Grid : Message : Stencil 17.687 GB/s per node +Grid : Message : Stencil 17.401 GB/s per node +Grid : Message : Stencil 13.7869 GB/s per node +Grid : Message : Average mflops/s per call per node : 662902 +Grid : Message : Average mflops/s per call per node : 805299 +Grid : Message : Average mflops/s per call per node : 822967 +Grid : Message : Average mflops/s per call per node : 662737 +Grid : Message : Average mflops/s per call per node (full): 313476 +Grid : Message : Average mflops/s per call per node (full): 439484 +Grid : Message : Average mflops/s per call per node (full): 445173 +Grid : Message : Average mflops/s per call per node (full): 306650 +Grid : Message : Stencil 13.5789 GB/s per node +Grid : Message : Stencil 16.1773 GB/s per node +Grid : Message : Stencil 17.6389 GB/s per node +Grid : Message : Stencil 13.2715 GB/s per node +Grid : Message : Average mflops/s per call per node : 664326 +Grid : Message : Average mflops/s per call per node : 804113 +Grid : Message : Average mflops/s per call per node : 822547 +Grid : Message : Average mflops/s per call per node : 663436 +Grid : Message : Average mflops/s per call per node (full): 314307 +Grid : Message : Average mflops/s per call per node (full): 431593 +Grid : Message : Average mflops/s per call per node (full): 445804 +Grid : Message : Average mflops/s per call per node (full): 306092 +Grid : Message : Stencil 13.654 GB/s per node +Grid : Message : Stencil 17.5621 GB/s per node +Grid : Message : Stencil 16.9713 GB/s per node +Grid : Message : Stencil 12.6528 GB/s per node +Grid : Message : Average mflops/s per call per node : 665081 +Grid : Message : Average mflops/s per call per node : 805621 +Grid : Message : Average mflops/s per call per node : 826388 +Grid : Message : Average mflops/s per call per node : 669706 +Grid : Message : Average mflops/s per call per node (full): 315746 +Grid : Message : Average mflops/s per call per node (full): 441183 +Grid : Message : Average mflops/s per call per node (full): 440868 +Grid : Message : Average mflops/s per call per node (full): 305130 +Grid : Message : Stencil 12.7877 GB/s per node +Grid : Message : Stencil 16.8454 GB/s per node +Grid : Message : Stencil 17.1362 GB/s per node +Grid : Message : Stencil 12.2521 GB/s per node +Grid : Message : Average mflops/s per call per node : 667733 +Grid : Message : Average mflops/s per call per node : 805130 +Grid : Message : Average mflops/s per call per node : 818806 +Grid : Message : Average mflops/s per call per node : 667100 +Grid : Message : Average mflops/s per call per node (full): 314589 +Grid : Message : Average mflops/s per call per node (full): 436885 +Grid : Message : Average mflops/s per call per node (full): 441502 +Grid : Message : Average mflops/s per call per node (full): 303569 +Grid : Message : Stencil 13.6816 GB/s per node +Grid : Message : Stencil 16.5988 GB/s per node +Grid : Message : Stencil 17.4377 GB/s per node +Grid : Message : Stencil 13.7525 GB/s per node +Grid : Message : Average mflops/s per call per node : 664947 +Grid : Message : Average mflops/s per call per node : 807086 +Grid : Message : Average mflops/s per call per node : 822228 +Grid : Message : Average mflops/s per call per node : 664514 +Grid : Message : Average mflops/s per call per node (full): 315168 +Grid : Message : Average mflops/s per call per node (full): 436002 +Grid : Message : Average mflops/s per call per node (full): 443007 +Grid : Message : Average mflops/s per call per node (full): 306737 +Grid : Message : Stencil 13.0057 GB/s per node +Grid : Message : Stencil 16.9441 GB/s per node +Grid : Message : Stencil 17.679 GB/s per node +Grid : Message : Stencil 12.2985 GB/s per node +Grid : Message : Average mflops/s per call per node : 668090 +Grid : Message : Average mflops/s per call per node : 806060 +Grid : Message : Average mflops/s per call per node : 826981 +Grid : Message : Average mflops/s per call per node : 670588 +Grid : Message : Average mflops/s per call per node (full): 314896 +Grid : Message : Average mflops/s per call per node (full): 438351 +Grid : Message : Average mflops/s per call per node (full): 446426 +Grid : Message : Average mflops/s per call per node (full): 304257 +Grid : Message : Stencil 13.1404 GB/s per node +Grid : Message : Stencil 12.6783 GB/s per node +Grid : Message : Stencil 18.8603 GB/s per node +Grid : Message : Stencil 12.6124 GB/s per node +Grid : Message : Average mflops/s per call per node : 665879 +Grid : Message : Average mflops/s per call per node : 806963 +Grid : Message : Average mflops/s per call per node : 814888 +Grid : Message : Average mflops/s per call per node : 669744 +Grid : Message : Average mflops/s per call per node (full): 313332 +Grid : Message : Average mflops/s per call per node (full): 374495 +Grid : Message : Average mflops/s per call per node (full): 447120 +Grid : Message : Average mflops/s per call per node (full): 305294 +Grid : Message : Stencil 12.582 GB/s per node +Grid : Message : Stencil 15.9827 GB/s per node +Grid : Message : Stencil 18.3784 GB/s per node +Grid : Message : Stencil 12.6071 GB/s per node +Grid : Message : Average mflops/s per call per node : 666960 +Grid : Message : Average mflops/s per call per node : 801866 +Grid : Message : Average mflops/s per call per node : 815942 +Grid : Message : Average mflops/s per call per node : 667845 +Grid : Message : Average mflops/s per call per node (full): 314410 +Grid : Message : Average mflops/s per call per node (full): 428552 +Grid : Message : Average mflops/s per call per node (full): 447166 +Grid : Message : Average mflops/s per call per node (full): 304418 +Grid : Message : Stencil 13.5239 GB/s per node +Grid : Message : Stencil 17.4 GB/s per node +Grid : Message : Stencil 16.7494 GB/s per node +Grid : Message : Stencil 12.2387 GB/s per node +Grid : Message : Average mflops/s per call per node : 665104 +Grid : Message : Average mflops/s per call per node : 804421 +Grid : Message : Average mflops/s per call per node : 821566 +Grid : Message : Average mflops/s per call per node : 668199 +Grid : Message : Average mflops/s per call per node (full): 316575 +Grid : Message : Average mflops/s per call per node (full): 437744 +Grid : Message : Average mflops/s per call per node (full): 438409 +Grid : Message : Average mflops/s per call per node (full): 304113 +Grid : Message : Stencil 12.9829 GB/s per node +Grid : Message : Stencil 10.9168 GB/s per node +Grid : Message : Stencil 18.3933 GB/s per node +Grid : Message : Stencil 13.774 GB/s per node +Grid : Message : Average mflops/s per call per node : 666011 +Grid : Message : Average mflops/s per call per node : 807342 +Grid : Message : Average mflops/s per call per node : 821382 +Grid : Message : Average mflops/s per call per node : 660536 +Grid : Message : Average mflops/s per call per node (full): 314102 +Grid : Message : Average mflops/s per call per node (full): 337750 +Grid : Message : Average mflops/s per call per node (full): 447207 +Grid : Message : Average mflops/s per call per node (full): 303080 +Grid : Message : Stencil 13.3301 GB/s per node +Grid : Message : Stencil 17.2677 GB/s per node +Grid : Message : Stencil 17.264 GB/s per node +Grid : Message : Stencil 12.5656 GB/s per node +Grid : Message : Average mflops/s per call per node : 663154 +Grid : Message : Average mflops/s per call per node : 803852 +Grid : Message : Average mflops/s per call per node : 823789 +Grid : Message : Average mflops/s per call per node : 665791 +Grid : Message : Average mflops/s per call per node (full): 314605 +Grid : Message : Average mflops/s per call per node (full): 439696 +Grid : Message : Average mflops/s per call per node (full): 442832 +Grid : Message : Average mflops/s per call per node (full): 303476 +Grid : Message : Stencil 13.0953 GB/s per node +Grid : Message : Stencil 16.4118 GB/s per node +Grid : Message : Stencil 17.0283 GB/s per node +Grid : Message : Stencil 12.9251 GB/s per node +Grid : Message : Average mflops/s per call per node : 663595 +Grid : Message : Average mflops/s per call per node : 799392 +Grid : Message : Average mflops/s per call per node : 821889 +Grid : Message : Average mflops/s per call per node : 663291 +Grid : Message : Average mflops/s per call per node (full): 314648 +Grid : Message : Average mflops/s per call per node (full): 433149 +Grid : Message : Average mflops/s per call per node (full): 439817 +Grid : Message : Average mflops/s per call per node (full): 305093 +Grid : Message : Stencil 14.1367 GB/s per node +Grid : Message : Stencil 16.7558 GB/s per node +Grid : Message : Stencil 17.9835 GB/s per node +Grid : Message : Stencil 13.1096 GB/s per node +Grid : Message : Average mflops/s per call per node : 665854 +Grid : Message : Average mflops/s per call per node : 803889 +Grid : Message : Average mflops/s per call per node : 822656 +Grid : Message : Average mflops/s per call per node : 662789 +Grid : Message : Average mflops/s per call per node (full): 316383 +Grid : Message : Average mflops/s per call per node (full): 437067 +Grid : Message : Average mflops/s per call per node (full): 445712 +Grid : Message : Average mflops/s per call per node (full): 304553 +Grid : Message : Stencil 14.3347 GB/s per node +Grid : Message : Stencil 15.9031 GB/s per node +Grid : Message : Stencil 17.775 GB/s per node +Grid : Message : Stencil 12.7522 GB/s per node +Grid : Message : Average mflops/s per call per node : 666072 +Grid : Message : Average mflops/s per call per node : 804652 +Grid : Message : Average mflops/s per call per node : 819328 +Grid : Message : Average mflops/s per call per node : 670436 +Grid : Message : Average mflops/s per call per node (full): 316941 +Grid : Message : Average mflops/s per call per node (full): 423228 +Grid : Message : Average mflops/s per call per node (full): 444860 +Grid : Message : Average mflops/s per call per node (full): 305769 +Grid : Message : Stencil 14.0882 GB/s per node +Grid : Message : Stencil 16.6305 GB/s per node +Grid : Message : Stencil 17.847 GB/s per node +Grid : Message : Stencil 13.1699 GB/s per node +Grid : Message : Average mflops/s per call per node : 669135 +Grid : Message : Average mflops/s per call per node : 804670 +Grid : Message : Average mflops/s per call per node : 821916 +Grid : Message : Average mflops/s per call per node : 660372 +Grid : Message : Average mflops/s per call per node (full): 317075 +Grid : Message : Average mflops/s per call per node (full): 436490 +Grid : Message : Average mflops/s per call per node (full): 446030 +Grid : Message : Average mflops/s per call per node (full): 305999 +Grid : Message : Stencil 12.3987 GB/s per node +Grid : Message : Stencil 17.0523 GB/s per node +Grid : Message : Stencil 16.8339 GB/s per node +Grid : Message : Stencil 13.2098 GB/s per node +Grid : Message : Average mflops/s per call per node : 667431 +Grid : Message : Average mflops/s per call per node : 806676 +Grid : Message : Average mflops/s per call per node : 819751 +Grid : Message : Average mflops/s per call per node : 660084 +Grid : Message : Average mflops/s per call per node (full): 312865 +Grid : Message : Average mflops/s per call per node (full): 439743 +Grid : Message : Average mflops/s per call per node (full): 438997 +Grid : Message : Average mflops/s per call per node (full): 303877 +Grid : Message : Stencil 13.8262 GB/s per node +Grid : Message : Stencil 16.6954 GB/s per node +Grid : Message : Stencil 18.417 GB/s per node +Grid : Message : Stencil 13.1479 GB/s per node +Grid : Message : Average mflops/s per call per node : 668787 +Grid : Message : Average mflops/s per call per node : 798666 +Grid : Message : Average mflops/s per call per node : 831464 +Grid : Message : Average mflops/s per call per node : 668642 +Grid : Message : Average mflops/s per call per node (full): 317648 +Grid : Message : Average mflops/s per call per node (full): 436717 +Grid : Message : Average mflops/s per call per node (full): 450757 +Grid : Message : Average mflops/s per call per node (full): 306965 +Grid : Message : Stencil 13.7838 GB/s per node +Grid : Message : Stencil 9.57078 GB/s per node +Grid : Message : Stencil 16.9114 GB/s per node +Grid : Message : Stencil 12.9244 GB/s per node +Grid : Message : Average mflops/s per call per node : 667864 +Grid : Message : Average mflops/s per call per node : 810482 +Grid : Message : Average mflops/s per call per node : 825563 +Grid : Message : Average mflops/s per call per node : 666999 +Grid : Message : Average mflops/s per call per node (full): 316673 +Grid : Message : Average mflops/s per call per node (full): 305942 +Grid : Message : Average mflops/s per call per node (full): 440459 +Grid : Message : Average mflops/s per call per node (full): 303764 +Grid : Message : Stencil 14.3677 GB/s per node +Grid : Message : Stencil 15.2322 GB/s per node +Grid : Message : Stencil 17.9007 GB/s per node +Grid : Message : Stencil 13.0004 GB/s per node +Grid : Message : Average mflops/s per call per node : 664683 +Grid : Message : Average mflops/s per call per node : 805400 +Grid : Message : Average mflops/s per call per node : 817421 +Grid : Message : Average mflops/s per call per node : 665777 +Grid : Message : Average mflops/s per call per node (full): 316767 +Grid : Message : Average mflops/s per call per node (full): 417760 +Grid : Message : Average mflops/s per call per node (full): 446308 +Grid : Message : Average mflops/s per call per node (full): 303530 +Grid : Message : Stencil 13.3195 GB/s per node +Grid : Message : Stencil 16.2863 GB/s per node +Grid : Message : Stencil 18.3161 GB/s per node +Grid : Message : Stencil 13.6078 GB/s per node +Grid : Message : Average mflops/s per call per node : 667666 +Grid : Message : Average mflops/s per call per node : 803458 +Grid : Message : Average mflops/s per call per node : 820298 +Grid : Message : Average mflops/s per call per node : 661331 +Grid : Message : Average mflops/s per call per node (full): 316283 +Grid : Message : Average mflops/s per call per node (full): 431512 +Grid : Message : Average mflops/s per call per node (full): 448171 +Grid : Message : Average mflops/s per call per node (full): 306466 +Grid : Message : Stencil 12.7435 GB/s per node +Grid : Message : Stencil 17.6106 GB/s per node +Grid : Message : Stencil 18.4497 GB/s per node +Grid : Message : Stencil 12.8518 GB/s per node +Grid : Message : Average mflops/s per call per node : 667716 +Grid : Message : Average mflops/s per call per node : 802090 +Grid : Message : Average mflops/s per call per node : 819461 +Grid : Message : Average mflops/s per call per node : 659626 +Grid : Message : Average mflops/s per call per node (full): 315239 +Grid : Message : Average mflops/s per call per node (full): 439084 +Grid : Message : Average mflops/s per call per node (full): 446296 +Grid : Message : Average mflops/s per call per node (full): 304276 +Grid : Message : Stencil 13.4268 GB/s per node +Grid : Message : Stencil 16.7193 GB/s per node +Grid : Message : Stencil 17.3289 GB/s per node +Grid : Message : Stencil 12.5761 GB/s per node +Grid : Message : Average mflops/s per call per node : 665303 +Grid : Message : Average mflops/s per call per node : 806654 +Grid : Message : Average mflops/s per call per node : 822986 +Grid : Message : Average mflops/s per call per node : 669838 +Grid : Message : Average mflops/s per call per node (full): 315531 +Grid : Message : Average mflops/s per call per node (full): 437072 +Grid : Message : Average mflops/s per call per node (full): 443806 +Grid : Message : Average mflops/s per call per node (full): 305796 +Grid : Message : Stencil 14.2954 GB/s per node +Grid : Message : Stencil 16.7181 GB/s per node +Grid : Message : Stencil 17.119 GB/s per node +Grid : Message : Stencil 12.2928 GB/s per node +Grid : Message : Average mflops/s per call per node : 660850 +Grid : Message : Average mflops/s per call per node : 805612 +Grid : Message : Average mflops/s per call per node : 822817 +Grid : Message : Average mflops/s per call per node : 662549 +Grid : Message : Average mflops/s per call per node (full): 315957 +Grid : Message : Average mflops/s per call per node (full): 436318 +Grid : Message : Average mflops/s per call per node (full): 439364 +Grid : Message : Average mflops/s per call per node (full): 297783 +Grid : Message : Stencil 12.7076 GB/s per node +Grid : Message : Stencil 8.72101 GB/s per node +Grid : Message : Stencil 18.2561 GB/s per node +Grid : Message : Stencil 13.1808 GB/s per node +Grid : Message : Average mflops/s per call per node : 668366 +Grid : Message : Average mflops/s per call per node : 806517 +Grid : Message : Average mflops/s per call per node : 821816 +Grid : Message : Average mflops/s per call per node : 662894 +Grid : Message : Average mflops/s per call per node (full): 314523 +Grid : Message : Average mflops/s per call per node (full): 285263 +Grid : Message : Average mflops/s per call per node (full): 448258 +Grid : Message : Average mflops/s per call per node (full): 306647 +Grid : Message : Stencil 12.9634 GB/s per node +Grid : Message : Stencil 17.0186 GB/s per node +Grid : Message : Stencil 17.4637 GB/s per node +Grid : Message : Stencil 12.5254 GB/s per node +Grid : Message : Average mflops/s per call per node : 666297 +Grid : Message : Average mflops/s per call per node : 804426 +Grid : Message : Average mflops/s per call per node : 829078 +Grid : Message : Average mflops/s per call per node : 666390 +Grid : Message : Average mflops/s per call per node (full): 313370 +Grid : Message : Average mflops/s per call per node (full): 437900 +Grid : Message : Average mflops/s per call per node (full): 444086 +Grid : Message : Average mflops/s per call per node (full): 304885 +Grid : Message : Stencil 12.691 GB/s per node +Grid : Message : Stencil 14.2967 GB/s per node +Grid : Message : Stencil 18.1624 GB/s per node +Grid : Message : Stencil 14.6255 GB/s per node +Grid : Message : Average mflops/s per call per node : 667652 +Grid : Message : Average mflops/s per call per node : 802604 +Grid : Message : Average mflops/s per call per node : 821366 +Grid : Message : Average mflops/s per call per node : 662911 +Grid : Message : Average mflops/s per call per node (full): 312773 +Grid : Message : Average mflops/s per call per node (full): 403134 +Grid : Message : Average mflops/s per call per node (full): 445599 +Grid : Message : Average mflops/s per call per node (full): 307073 +Grid : Message : Stencil 13.343 GB/s per node +Grid : Message : Stencil 13.1466 GB/s per node +Grid : Message : Stencil 17.5352 GB/s per node +Grid : Message : Stencil 13.6102 GB/s per node +Grid : Message : Average mflops/s per call per node : 665664 +Grid : Message : Average mflops/s per call per node : 806207 +Grid : Message : Average mflops/s per call per node : 818724 +Grid : Message : Average mflops/s per call per node : 662443 +Grid : Message : Average mflops/s per call per node (full): 314916 +Grid : Message : Average mflops/s per call per node (full): 377419 +Grid : Message : Average mflops/s per call per node (full): 445019 +Grid : Message : Average mflops/s per call per node (full): 305836 +Grid : Message : Stencil 13.2107 GB/s per node +Grid : Message : Stencil 17.44 GB/s per node +Grid : Message : Stencil 16.9745 GB/s per node +Grid : Message : Stencil 11.7252 GB/s per node +Grid : Message : Average mflops/s per call per node : 666774 +Grid : Message : Average mflops/s per call per node : 805953 +Grid : Message : Average mflops/s per call per node : 814111 +Grid : Message : Average mflops/s per call per node : 662423 +Grid : Message : Average mflops/s per call per node (full): 313687 +Grid : Message : Average mflops/s per call per node (full): 442093 +Grid : Message : Average mflops/s per call per node (full): 440174 +Grid : Message : Average mflops/s per call per node (full): 298957 +Grid : Message : Stencil 12.3945 GB/s per node +Grid : Message : Stencil 8.02693 GB/s per node +Grid : Message : Stencil 17.5639 GB/s per node +Grid : Message : Stencil 13.2315 GB/s per node +Grid : Message : Average mflops/s per call per node : 667677 +Grid : Message : Average mflops/s per call per node : 811328 +Grid : Message : Average mflops/s per call per node : 821375 +Grid : Message : Average mflops/s per call per node : 666391 +Grid : Message : Average mflops/s per call per node (full): 312447 +Grid : Message : Average mflops/s per call per node (full): 267131 +Grid : Message : Average mflops/s per call per node (full): 444425 +Grid : Message : Average mflops/s per call per node (full): 306431 +Grid : Message : Stencil 12.422 GB/s per node +Grid : Message : Stencil 16.6346 GB/s per node +Grid : Message : Stencil 18.3315 GB/s per node +Grid : Message : Stencil 12.0226 GB/s per node +Grid : Message : Average mflops/s per call per node : 668795 +Grid : Message : Average mflops/s per call per node : 805787 +Grid : Message : Average mflops/s per call per node : 822035 +Grid : Message : Average mflops/s per call per node : 670737 +Grid : Message : Average mflops/s per call per node (full): 312813 +Grid : Message : Average mflops/s per call per node (full): 435939 +Grid : Message : Average mflops/s per call per node (full): 446949 +Grid : Message : Average mflops/s per call per node (full): 302645 +Grid : Message : Stencil 13.6331 GB/s per node +Grid : Message : Stencil 16.8444 GB/s per node +Grid : Message : Stencil 17.3844 GB/s per node +Grid : Message : Stencil 12.7145 GB/s per node +Grid : Message : Average mflops/s per call per node : 662530 +Grid : Message : Average mflops/s per call per node : 802929 +Grid : Message : Average mflops/s per call per node : 823807 +Grid : Message : Average mflops/s per call per node : 660727 +Grid : Message : Average mflops/s per call per node (full): 310885 +Grid : Message : Average mflops/s per call per node (full): 432574 +Grid : Message : Average mflops/s per call per node (full): 444738 +Grid : Message : Average mflops/s per call per node (full): 303776 +Grid : Message : Stencil 13.319 GB/s per node +Grid : Message : Stencil 16.6364 GB/s per node +Grid : Message : Stencil 17.5618 GB/s per node +Grid : Message : Stencil 12.1875 GB/s per node +Grid : Message : Average mflops/s per call per node : 666624 +Grid : Message : Average mflops/s per call per node : 802022 +Grid : Message : Average mflops/s per call per node : 820670 +Grid : Message : Average mflops/s per call per node : 662307 +Grid : Message : Average mflops/s per call per node (full): 315583 +Grid : Message : Average mflops/s per call per node (full): 433871 +Grid : Message : Average mflops/s per call per node (full): 445169 +Grid : Message : Average mflops/s per call per node (full): 295793 +Grid : Message : Stencil 13.5188 GB/s per node +Grid : Message : Stencil 17.4021 GB/s per node +Grid : Message : Stencil 18.2654 GB/s per node +Grid : Message : Stencil 14.2935 GB/s per node +Grid : Message : Average mflops/s per call per node : 664121 +Grid : Message : Average mflops/s per call per node : 802832 +Grid : Message : Average mflops/s per call per node : 820333 +Grid : Message : Average mflops/s per call per node : 662363 +Grid : Message : Average mflops/s per call per node (full): 315530 +Grid : Message : Average mflops/s per call per node (full): 437807 +Grid : Message : Average mflops/s per call per node (full): 446585 +Grid : Message : Average mflops/s per call per node (full): 306735 +Grid : Message : Stencil 13.6374 GB/s per node +Grid : Message : Stencil 16.7984 GB/s per node +Grid : Message : Stencil 17.4589 GB/s per node +Grid : Message : Stencil 13.3826 GB/s per node +Grid : Message : Average mflops/s per call per node : 659833 +Grid : Message : Average mflops/s per call per node : 807520 +Grid : Message : Average mflops/s per call per node : 823853 +Grid : Message : Average mflops/s per call per node : 665819 +Grid : Message : Average mflops/s per call per node (full): 314759 +Grid : Message : Average mflops/s per call per node (full): 436571 +Grid : Message : Average mflops/s per call per node (full): 445851 +Grid : Message : Average mflops/s per call per node (full): 306885 +Grid : Message : Stencil 13.1851 GB/s per node +Grid : Message : Stencil 9.70481 GB/s per node +Grid : Message : Stencil 17.4339 GB/s per node +Grid : Message : Stencil 12.6306 GB/s per node +Grid : Message : Average mflops/s per call per node : 661627 +Grid : Message : Average mflops/s per call per node : 805451 +Grid : Message : Average mflops/s per call per node : 817025 +Grid : Message : Average mflops/s per call per node : 664777 +Grid : Message : Average mflops/s per call per node (full): 312092 +Grid : Message : Average mflops/s per call per node (full): 309023 +Grid : Message : Average mflops/s per call per node (full): 443869 +Grid : Message : Average mflops/s per call per node (full): 304307 +Grid : Message : Stencil 13.8383 GB/s per node +Grid : Message : Stencil 17.767 GB/s per node +Grid : Message : Stencil 17.4802 GB/s per node +Grid : Message : Stencil 13.0814 GB/s per node +Grid : Message : Average mflops/s per call per node : 662545 +Grid : Message : Average mflops/s per call per node : 804357 +Grid : Message : Average mflops/s per call per node : 823795 +Grid : Message : Average mflops/s per call per node : 668634 +Grid : Message : Average mflops/s per call per node (full): 314985 +Grid : Message : Average mflops/s per call per node (full): 437156 +Grid : Message : Average mflops/s per call per node (full): 445449 +Grid : Message : Average mflops/s per call per node (full): 306406 +Grid : Message : Stencil 12.8009 GB/s per node +Grid : Message : Stencil 17.7658 GB/s per node +Grid : Message : Stencil 16.9127 GB/s per node +Grid : Message : Stencil 13.0729 GB/s per node +Grid : Message : Average mflops/s per call per node : 664961 +Grid : Message : Average mflops/s per call per node : 798250 +Grid : Message : Average mflops/s per call per node : 817036 +Grid : Message : Average mflops/s per call per node : 666474 +Grid : Message : Average mflops/s per call per node (full): 313921 +Grid : Message : Average mflops/s per call per node (full): 437420 +Grid : Message : Average mflops/s per call per node (full): 438986 +Grid : Message : Average mflops/s per call per node (full): 305255 +Grid : Message : Stencil 12.5554 GB/s per node +Grid : Message : Stencil 15.1018 GB/s per node +Grid : Message : Stencil 17.9067 GB/s per node +Grid : Message : Stencil 11.922 GB/s per node +Grid : Message : Average mflops/s per call per node : 663244 +Grid : Message : Average mflops/s per call per node : 808394 +Grid : Message : Average mflops/s per call per node : 816336 +Grid : Message : Average mflops/s per call per node : 669089 +Grid : Message : Average mflops/s per call per node (full): 312840 +Grid : Message : Average mflops/s per call per node (full): 417492 +Grid : Message : Average mflops/s per call per node (full): 446369 +Grid : Message : Average mflops/s per call per node (full): 300965 +Grid : Message : Stencil 14.4247 GB/s per node +Grid : Message : Stencil 16.806 GB/s per node +Grid : Message : Stencil 17.3214 GB/s per node +Grid : Message : Stencil 13.4102 GB/s per node +Grid : Message : Average mflops/s per call per node : 662793 +Grid : Message : Average mflops/s per call per node : 805114 +Grid : Message : Average mflops/s per call per node : 820028 +Grid : Message : Average mflops/s per call per node : 663970 +Grid : Message : Average mflops/s per call per node (full): 316120 +Grid : Message : Average mflops/s per call per node (full): 436892 +Grid : Message : Average mflops/s per call per node (full): 444044 +Grid : Message : Average mflops/s per call per node (full): 306598 +Grid : Message : Stencil 13.1868 GB/s per node +Grid : Message : Stencil 16.79 GB/s per node +Grid : Message : Stencil 17.9053 GB/s per node +Grid : Message : Stencil 12.5273 GB/s per node +Grid : Message : Average mflops/s per call per node : 665115 +Grid : Message : Average mflops/s per call per node : 800161 +Grid : Message : Average mflops/s per call per node : 821672 +Grid : Message : Average mflops/s per call per node : 668719 +Grid : Message : Average mflops/s per call per node (full): 315324 +Grid : Message : Average mflops/s per call per node (full): 429540 +Grid : Message : Average mflops/s per call per node (full): 446378 +Grid : Message : Average mflops/s per call per node (full): 306237 +Grid : Message : Stencil 13.2253 GB/s per node +Grid : Message : Stencil 16.8698 GB/s per node +Grid : Message : Stencil 18.2046 GB/s per node +Grid : Message : Stencil 12.7647 GB/s per node +Grid : Message : Average mflops/s per call per node : 667962 +Grid : Message : Average mflops/s per call per node : 802042 +Grid : Message : Average mflops/s per call per node : 820920 +Grid : Message : Average mflops/s per call per node : 668023 +Grid : Message : Average mflops/s per call per node (full): 314959 +Grid : Message : Average mflops/s per call per node (full): 438077 +Grid : Message : Average mflops/s per call per node (full): 445637 +Grid : Message : Average mflops/s per call per node (full): 305345 +Grid : Message : Stencil 13.2723 GB/s per node +Grid : Message : Stencil 17.0483 GB/s per node +Grid : Message : Stencil 17.3302 GB/s per node +Grid : Message : Stencil 12.4456 GB/s per node +Grid : Message : Average mflops/s per call per node : 664764 +Grid : Message : Average mflops/s per call per node : 803607 +Grid : Message : Average mflops/s per call per node : 820416 +Grid : Message : Average mflops/s per call per node : 665533 +Grid : Message : Average mflops/s per call per node (full): 315139 +Grid : Message : Average mflops/s per call per node (full): 439429 +Grid : Message : Average mflops/s per call per node (full): 444492 +Grid : Message : Average mflops/s per call per node (full): 304859 +Grid : Message : Stencil 14.0213 GB/s per node +Grid : Message : Stencil 13.8357 GB/s per node +Grid : Message : Stencil 17.182 GB/s per node +Grid : Message : Stencil 12.6582 GB/s per node +Grid : Message : Average mflops/s per call per node : 664981 +Grid : Message : Average mflops/s per call per node : 806282 +Grid : Message : Average mflops/s per call per node : 824095 +Grid : Message : Average mflops/s per call per node : 665473 +Grid : Message : Average mflops/s per call per node (full): 316013 +Grid : Message : Average mflops/s per call per node (full): 396530 +Grid : Message : Average mflops/s per call per node (full): 442563 +Grid : Message : Average mflops/s per call per node (full): 304489 +Grid : Message : Stencil 15.2683 GB/s per node +Grid : Message : Stencil 10.9317 GB/s per node +Grid : Message : Stencil 17.2713 GB/s per node +Grid : Message : Stencil 13.4705 GB/s per node +Grid : Message : Average mflops/s per call per node : 661032 +Grid : Message : Average mflops/s per call per node : 808017 +Grid : Message : Average mflops/s per call per node : 819256 +Grid : Message : Average mflops/s per call per node : 669147 +Grid : Message : Average mflops/s per call per node (full): 316887 +Grid : Message : Average mflops/s per call per node (full): 337035 +Grid : Message : Average mflops/s per call per node (full): 442321 +Grid : Message : Average mflops/s per call per node (full): 306892 +Grid : Message : Stencil 13.1559 GB/s per node +Grid : Message : Stencil 16.449 GB/s per node +Grid : Message : Stencil 17.9936 GB/s per node +Grid : Message : Stencil 13.389 GB/s per node +Grid : Message : Average mflops/s per call per node : 667960 +Grid : Message : Average mflops/s per call per node : 803262 +Grid : Message : Average mflops/s per call per node : 823091 +Grid : Message : Average mflops/s per call per node : 664836 +Grid : Message : Average mflops/s per call per node (full): 315214 +Grid : Message : Average mflops/s per call per node (full): 433790 +Grid : Message : Average mflops/s per call per node (full): 441761 +Grid : Message : Average mflops/s per call per node (full): 301416 +Grid : Message : Stencil 13.7506 GB/s per node +Grid : Message : Stencil 16.4977 GB/s per node +Grid : Message : Stencil 17.039 GB/s per node +Grid : Message : Stencil 15.0107 GB/s per node +Grid : Message : Average mflops/s per call per node : 663396 +Grid : Message : Average mflops/s per call per node : 810470 +Grid : Message : Average mflops/s per call per node : 824869 +Grid : Message : Average mflops/s per call per node : 659004 +Grid : Message : Average mflops/s per call per node (full): 316219 +Grid : Message : Average mflops/s per call per node (full): 435982 +Grid : Message : Average mflops/s per call per node (full): 441402 +Grid : Message : Average mflops/s per call per node (full): 306600 +Grid : Message : Stencil 13.6079 GB/s per node +Grid : Message : Stencil 16.9354 GB/s per node +Grid : Message : Stencil 16.8547 GB/s per node +Grid : Message : Stencil 13.1611 GB/s per node +Grid : Message : Average mflops/s per call per node : 665147 +Grid : Message : Average mflops/s per call per node : 803221 +Grid : Message : Average mflops/s per call per node : 817284 +Grid : Message : Average mflops/s per call per node : 664592 +Grid : Message : Average mflops/s per call per node (full): 316696 +Grid : Message : Average mflops/s per call per node (full): 438191 +Grid : Message : Average mflops/s per call per node (full): 439513 +Grid : Message : Average mflops/s per call per node (full): 306013 +Grid : Message : Stencil 12.8309 GB/s per node +Grid : Message : Stencil 17.2165 GB/s per node +Grid : Message : Stencil 17.9916 GB/s per node +Grid : Message : Stencil 13.2875 GB/s per node +Grid : Message : Average mflops/s per call per node : 665799 +Grid : Message : Average mflops/s per call per node : 803556 +Grid : Message : Average mflops/s per call per node : 824068 +Grid : Message : Average mflops/s per call per node : 666535 +Grid : Message : Average mflops/s per call per node (full): 313823 +Grid : Message : Average mflops/s per call per node (full): 438727 +Grid : Message : Average mflops/s per call per node (full): 446218 +Grid : Message : Average mflops/s per call per node (full): 306419 +Grid : Message : Stencil 13.2736 GB/s per node +Grid : Message : Stencil 17.2401 GB/s per node +Grid : Message : Stencil 17.6393 GB/s per node +Grid : Message : Stencil 12.4333 GB/s per node +Grid : Message : Average mflops/s per call per node : 664483 +Grid : Message : Average mflops/s per call per node : 802986 +Grid : Message : Average mflops/s per call per node : 825353 +Grid : Message : Average mflops/s per call per node : 671556 +Grid : Message : Average mflops/s per call per node (full): 315141 +Grid : Message : Average mflops/s per call per node (full): 431488 +Grid : Message : Average mflops/s per call per node (full): 447340 +Grid : Message : Average mflops/s per call per node (full): 305656 +Grid : Message : Stencil 12.2857 GB/s per node +Grid : Message : Stencil 16.7884 GB/s per node +Grid : Message : Stencil 17.3337 GB/s per node +Grid : Message : Stencil 12.6979 GB/s per node +Grid : Message : Average mflops/s per call per node : 669141 +Grid : Message : Average mflops/s per call per node : 807060 +Grid : Message : Average mflops/s per call per node : 826403 +Grid : Message : Average mflops/s per call per node : 669655 +Grid : Message : Average mflops/s per call per node (full): 312097 +Grid : Message : Average mflops/s per call per node (full): 437245 +Grid : Message : Average mflops/s per call per node (full): 444011 +Grid : Message : Average mflops/s per call per node (full): 306295 +Grid : Message : Stencil 13.0719 GB/s per node +Grid : Message : Stencil 17.8789 GB/s per node +Grid : Message : Stencil 16.9782 GB/s per node +Grid : Message : Stencil 12.5719 GB/s per node +Grid : Message : Average mflops/s per call per node : 665336 +Grid : Message : Average mflops/s per call per node : 806073 +Grid : Message : Average mflops/s per call per node : 820607 +Grid : Message : Average mflops/s per call per node : 660984 +Grid : Message : Average mflops/s per call per node (full): 315686 +Grid : Message : Average mflops/s per call per node (full): 440215 +Grid : Message : Average mflops/s per call per node (full): 439389 +Grid : Message : Average mflops/s per call per node (full): 304164 +Grid : Message : Stencil 13.3607 GB/s per node +Grid : Message : Stencil 16.2373 GB/s per node +Grid : Message : Stencil 17.749 GB/s per node +Grid : Message : Stencil 13.0617 GB/s per node +Grid : Message : Average mflops/s per call per node : 667798 +Grid : Message : Average mflops/s per call per node : 808757 +Grid : Message : Average mflops/s per call per node : 826930 +Grid : Message : Average mflops/s per call per node : 667184 +Grid : Message : Average mflops/s per call per node (full): 316106 +Grid : Message : Average mflops/s per call per node (full): 431650 +Grid : Message : Average mflops/s per call per node (full): 446141 +Grid : Message : Average mflops/s per call per node (full): 305764 +Grid : Message : Stencil 12.1402 GB/s per node +Grid : Message : Stencil 17.6135 GB/s per node +Grid : Message : Stencil 18.115 GB/s per node +Grid : Message : Stencil 12.9154 GB/s per node +Grid : Message : Average mflops/s per call per node : 669426 +Grid : Message : Average mflops/s per call per node : 802030 +Grid : Message : Average mflops/s per call per node : 821277 +Grid : Message : Average mflops/s per call per node : 664313 +Grid : Message : Average mflops/s per call per node (full): 310554 +Grid : Message : Average mflops/s per call per node (full): 438004 +Grid : Message : Average mflops/s per call per node (full): 448393 +Grid : Message : Average mflops/s per call per node (full): 303893 +Grid : Message : Stencil 12.8153 GB/s per node +Grid : Message : Stencil 17.2724 GB/s per node +Grid : Message : Stencil 16.7937 GB/s per node +Grid : Message : Stencil 12.4661 GB/s per node +Grid : Message : Average mflops/s per call per node : 666144 +Grid : Message : Average mflops/s per call per node : 800199 +Grid : Message : Average mflops/s per call per node : 825550 +Grid : Message : Average mflops/s per call per node : 667956 +Grid : Message : Average mflops/s per call per node (full): 314030 +Grid : Message : Average mflops/s per call per node (full): 436611 +Grid : Message : Average mflops/s per call per node (full): 438304 +Grid : Message : Average mflops/s per call per node (full): 303741 +Grid : Message : Stencil 13.1943 GB/s per node +Grid : Message : Stencil 14.5485 GB/s per node +Grid : Message : Stencil 17.4574 GB/s per node +Grid : Message : Stencil 13.4498 GB/s per node +Grid : Message : Average mflops/s per call per node : 667448 +Grid : Message : Average mflops/s per call per node : 806169 +Grid : Message : Average mflops/s per call per node : 824639 +Grid : Message : Average mflops/s per call per node : 666959 +Grid : Message : Average mflops/s per call per node (full): 315919 +Grid : Message : Average mflops/s per call per node (full): 408858 +Grid : Message : Average mflops/s per call per node (full): 445835 +Grid : Message : Average mflops/s per call per node (full): 306758 +Grid : Message : Stencil 12.5642 GB/s per node +Grid : Message : Stencil 16.9379 GB/s per node +Grid : Message : Stencil 18.1868 GB/s per node +Grid : Message : Stencil 12.0624 GB/s per node +Grid : Message : Average mflops/s per call per node : 668822 +Grid : Message : Average mflops/s per call per node : 803972 +Grid : Message : Average mflops/s per call per node : 820042 +Grid : Message : Average mflops/s per call per node : 672336 +Grid : Message : Average mflops/s per call per node (full): 314087 +Grid : Message : Average mflops/s per call per node (full): 436495 +Grid : Message : Average mflops/s per call per node (full): 446760 +Grid : Message : Average mflops/s per call per node (full): 302805 +Grid : Message : Stencil 12.4464 GB/s per node +Grid : Message : Stencil 10.9115 GB/s per node +Grid : Message : Stencil 17.3986 GB/s per node +Grid : Message : Stencil 12.7824 GB/s per node +Grid : Message : Average mflops/s per call per node : 667093 +Grid : Message : Average mflops/s per call per node : 815614 +Grid : Message : Average mflops/s per call per node : 830593 +Grid : Message : Average mflops/s per call per node : 656955 +Grid : Message : Average mflops/s per call per node (full): 314114 +Grid : Message : Average mflops/s per call per node (full): 337799 +Grid : Message : Average mflops/s per call per node (full): 445295 +Grid : Message : Average mflops/s per call per node (full): 299650 +Grid : Message : Stencil 12.5693 GB/s per node +Grid : Message : Stencil 16.035 GB/s per node +Grid : Message : Stencil 16.6488 GB/s per node +Grid : Message : Stencil 13.8067 GB/s per node +Grid : Message : Average mflops/s per call per node : 665625 +Grid : Message : Average mflops/s per call per node : 807346 +Grid : Message : Average mflops/s per call per node : 830478 +Grid : Message : Average mflops/s per call per node : 667173 +Grid : Message : Average mflops/s per call per node (full): 313665 +Grid : Message : Average mflops/s per call per node (full): 426106 +Grid : Message : Average mflops/s per call per node (full): 426730 +Grid : Message : Average mflops/s per call per node (full): 305773 +Grid : Message : Stencil 13.5136 GB/s per node +Grid : Message : Stencil 9.41642 GB/s per node +Grid : Message : Stencil 19.2267 GB/s per node +Grid : Message : Stencil 12.8042 GB/s per node +Grid : Message : Average mflops/s per call per node : 662943 +Grid : Message : Average mflops/s per call per node : 812832 +Grid : Message : Average mflops/s per call per node : 818514 +Grid : Message : Average mflops/s per call per node : 663012 +Grid : Message : Average mflops/s per call per node (full): 314230 +Grid : Message : Average mflops/s per call per node (full): 302620 +Grid : Message : Average mflops/s per call per node (full): 449719 +Grid : Message : Average mflops/s per call per node (full): 305924 +Grid : Message : Stencil 12.9002 GB/s per node +Grid : Message : Stencil 17.2748 GB/s per node +Grid : Message : Stencil 17.5933 GB/s per node +Grid : Message : Stencil 13.646 GB/s per node +Grid : Message : Average mflops/s per call per node : 671705 +Grid : Message : Average mflops/s per call per node : 803725 +Grid : Message : Average mflops/s per call per node : 821077 +Grid : Message : Average mflops/s per call per node : 661167 +Grid : Message : Average mflops/s per call per node (full): 315733 +Grid : Message : Average mflops/s per call per node (full): 438357 +Grid : Message : Average mflops/s per call per node (full): 445776 +Grid : Message : Average mflops/s per call per node (full): 305797 +Grid : Message : Stencil 13.0162 GB/s per node +Grid : Message : Stencil 16.996 GB/s per node +Grid : Message : Stencil 17.4666 GB/s per node +Grid : Message : Stencil 13.2392 GB/s per node +Grid : Message : Average mflops/s per call per node : 664654 +Grid : Message : Average mflops/s per call per node : 802776 +Grid : Message : Average mflops/s per call per node : 820496 +Grid : Message : Average mflops/s per call per node : 661304 +Grid : Message : Average mflops/s per call per node (full): 314583 +Grid : Message : Average mflops/s per call per node (full): 437784 +Grid : Message : Average mflops/s per call per node (full): 444315 +Grid : Message : Average mflops/s per call per node (full): 305299 +Grid : Message : Stencil 13.4746 GB/s per node +Grid : Message : Stencil 16.9801 GB/s per node +Grid : Message : Stencil 17.6159 GB/s per node +Grid : Message : Stencil 13.1317 GB/s per node +Grid : Message : Average mflops/s per call per node : 666274 +Grid : Message : Average mflops/s per call per node : 802737 +Grid : Message : Average mflops/s per call per node : 822278 +Grid : Message : Average mflops/s per call per node : 658715 +Grid : Message : Average mflops/s per call per node (full): 314579 +Grid : Message : Average mflops/s per call per node (full): 432928 +Grid : Message : Average mflops/s per call per node (full): 445152 +Grid : Message : Average mflops/s per call per node (full): 303967 +Grid : Message : Stencil 13.2933 GB/s per node +Grid : Message : Stencil 16.6674 GB/s per node +Grid : Message : Stencil 17.6578 GB/s per node +Grid : Message : Stencil 12.655 GB/s per node +Grid : Message : Average mflops/s per call per node : 663792 +Grid : Message : Average mflops/s per call per node : 805521 +Grid : Message : Average mflops/s per call per node : 817418 +Grid : Message : Average mflops/s per call per node : 667583 +Grid : Message : Average mflops/s per call per node (full): 315054 +Grid : Message : Average mflops/s per call per node (full): 432370 +Grid : Message : Average mflops/s per call per node (full): 445767 +Grid : Message : Average mflops/s per call per node (full): 304227 +Grid : Message : Stencil 14.0909 GB/s per node +Grid : Message : Stencil 18.2789 GB/s per node +Grid : Message : Stencil 17.553 GB/s per node +Grid : Message : Stencil 11.7614 GB/s per node +Grid : Message : Average mflops/s per call per node : 664315 +Grid : Message : Average mflops/s per call per node : 801435 +Grid : Message : Average mflops/s per call per node : 822475 +Grid : Message : Average mflops/s per call per node : 666305 +Grid : Message : Average mflops/s per call per node (full): 316926 +Grid : Message : Average mflops/s per call per node (full): 439618 +Grid : Message : Average mflops/s per call per node (full): 445020 +Grid : Message : Average mflops/s per call per node (full): 299389 +Grid : Message : Stencil 13.56 GB/s per node +Grid : Message : Stencil 16.6586 GB/s per node +Grid : Message : Stencil 17.5614 GB/s per node +Grid : Message : Stencil 13.1493 GB/s per node +Grid : Message : Average mflops/s per call per node : 664674 +Grid : Message : Average mflops/s per call per node : 802079 +Grid : Message : Average mflops/s per call per node : 825166 +Grid : Message : Average mflops/s per call per node : 665438 +Grid : Message : Average mflops/s per call per node (full): 314723 +Grid : Message : Average mflops/s per call per node (full): 435905 +Grid : Message : Average mflops/s per call per node (full): 445889 +Grid : Message : Average mflops/s per call per node (full): 306380 +Grid : Message : Stencil 12.9297 GB/s per node +Grid : Message : Stencil 16.6985 GB/s per node +Grid : Message : Stencil 18.1772 GB/s per node +Grid : Message : Stencil 13.673 GB/s per node +Grid : Message : Average mflops/s per call per node : 666231 +Grid : Message : Average mflops/s per call per node : 800458 +Grid : Message : Average mflops/s per call per node : 821243 +Grid : Message : Average mflops/s per call per node : 660904 +Grid : Message : Average mflops/s per call per node (full): 313374 +Grid : Message : Average mflops/s per call per node (full): 432784 +Grid : Message : Average mflops/s per call per node (full): 448213 +Grid : Message : Average mflops/s per call per node (full): 304931 +Grid : Message : Stencil 13.248 GB/s per node +Grid : Message : Stencil 16.5793 GB/s per node +Grid : Message : Stencil 17.6583 GB/s per node +Grid : Message : Stencil 13.0133 GB/s per node +Grid : Message : Average mflops/s per call per node : 667207 +Grid : Message : Average mflops/s per call per node : 801544 +Grid : Message : Average mflops/s per call per node : 820776 +Grid : Message : Average mflops/s per call per node : 664668 +Grid : Message : Average mflops/s per call per node (full): 315702 +Grid : Message : Average mflops/s per call per node (full): 434485 +Grid : Message : Average mflops/s per call per node (full): 445645 +Grid : Message : Average mflops/s per call per node (full): 305435 +Grid : Message : Stencil 13.0376 GB/s per node +Grid : Message : Stencil 16.7384 GB/s per node +Grid : Message : Stencil 17.5046 GB/s per node +Grid : Message : Stencil 12.7411 GB/s per node +Grid : Message : Average mflops/s per call per node : 665202 +Grid : Message : Average mflops/s per call per node : 801357 +Grid : Message : Average mflops/s per call per node : 822786 +Grid : Message : Average mflops/s per call per node : 661364 +Grid : Message : Average mflops/s per call per node (full): 314427 +Grid : Message : Average mflops/s per call per node (full): 437039 +Grid : Message : Average mflops/s per call per node (full): 444813 +Grid : Message : Average mflops/s per call per node (full): 304747 +Grid : Message : Stencil 13.1282 GB/s per node +Grid : Message : Stencil 17.8233 GB/s per node +Grid : Message : Stencil 17.638 GB/s per node +Grid : Message : Stencil 13.3392 GB/s per node +Grid : Message : Average mflops/s per call per node : 669809 +Grid : Message : Average mflops/s per call per node : 803166 +Grid : Message : Average mflops/s per call per node : 818551 +Grid : Message : Average mflops/s per call per node : 663436 +Grid : Message : Average mflops/s per call per node (full): 315817 +Grid : Message : Average mflops/s per call per node (full): 439138 +Grid : Message : Average mflops/s per call per node (full): 443409 +Grid : Message : Average mflops/s per call per node (full): 305966 +Grid : Message : Stencil 13.3358 GB/s per node +Grid : Message : Stencil 17.2628 GB/s per node +Grid : Message : Stencil 17.9616 GB/s per node +Grid : Message : Stencil 13.314 GB/s per node +Grid : Message : Average mflops/s per call per node : 668368 +Grid : Message : Average mflops/s per call per node : 801644 +Grid : Message : Average mflops/s per call per node : 823157 +Grid : Message : Average mflops/s per call per node : 661210 +Grid : Message : Average mflops/s per call per node (full): 315243 +Grid : Message : Average mflops/s per call per node (full): 438471 +Grid : Message : Average mflops/s per call per node (full): 446383 +Grid : Message : Average mflops/s per call per node (full): 305657 +Grid : Message : Stencil 14.2109 GB/s per node +Grid : Message : Stencil 18.6749 GB/s per node +Grid : Message : Stencil 17.7836 GB/s per node +Grid : Message : Stencil 12.6261 GB/s per node +Grid : Message : Average mflops/s per call per node : 666304 +Grid : Message : Average mflops/s per call per node : 798222 +Grid : Message : Average mflops/s per call per node : 817149 +Grid : Message : Average mflops/s per call per node : 665280 +Grid : Message : Average mflops/s per call per node (full): 316455 +Grid : Message : Average mflops/s per call per node (full): 440022 +Grid : Message : Average mflops/s per call per node (full): 439907 +Grid : Message : Average mflops/s per call per node (full): 301760 +Grid : Message : Stencil 15.4239 GB/s per node +Grid : Message : Stencil 9.59995 GB/s per node +Grid : Message : Stencil 17.6231 GB/s per node +Grid : Message : Stencil 12.2973 GB/s per node +Grid : Message : Average mflops/s per call per node : 662349 +Grid : Message : Average mflops/s per call per node : 810307 +Grid : Message : Average mflops/s per call per node : 818809 +Grid : Message : Average mflops/s per call per node : 665904 +Grid : Message : Average mflops/s per call per node (full): 317328 +Grid : Message : Average mflops/s per call per node (full): 306879 +Grid : Message : Average mflops/s per call per node (full): 442167 +Grid : Message : Average mflops/s per call per node (full): 303953 +Grid : Message : Stencil 14.5719 GB/s per node +Grid : Message : Stencil 17.0462 GB/s per node +Grid : Message : Stencil 17.0002 GB/s per node +Grid : Message : Stencil 13.1762 GB/s per node +Grid : Message : Average mflops/s per call per node : 667267 +Grid : Message : Average mflops/s per call per node : 803553 +Grid : Message : Average mflops/s per call per node : 824600 +Grid : Message : Average mflops/s per call per node : 666051 +Grid : Message : Average mflops/s per call per node (full): 317919 +Grid : Message : Average mflops/s per call per node (full): 438698 +Grid : Message : Average mflops/s per call per node (full): 441306 +Grid : Message : Average mflops/s per call per node (full): 306707 +Grid : Message : Stencil 13.3023 GB/s per node +Grid : Message : Stencil 16.7035 GB/s per node +Grid : Message : Stencil 17.7461 GB/s per node +Grid : Message : Stencil 13.8032 GB/s per node +Grid : Message : Average mflops/s per call per node : 669854 +Grid : Message : Average mflops/s per call per node : 802712 +Grid : Message : Average mflops/s per call per node : 820285 +Grid : Message : Average mflops/s per call per node : 661765 +Grid : Message : Average mflops/s per call per node (full): 316185 +Grid : Message : Average mflops/s per call per node (full): 436687 +Grid : Message : Average mflops/s per call per node (full): 445842 +Grid : Message : Average mflops/s per call per node (full): 306302 +Grid : Message : Stencil 13.788 GB/s per node +Grid : Message : Stencil 16.0696 GB/s per node +Grid : Message : Stencil 17.3617 GB/s per node +Grid : Message : Stencil 13.2007 GB/s per node +Grid : Message : Average mflops/s per call per node : 666466 +Grid : Message : Average mflops/s per call per node : 805791 +Grid : Message : Average mflops/s per call per node : 825870 +Grid : Message : Average mflops/s per call per node : 663506 +Grid : Message : Average mflops/s per call per node (full): 315594 +Grid : Message : Average mflops/s per call per node (full): 428294 +Grid : Message : Average mflops/s per call per node (full): 444063 +Grid : Message : Average mflops/s per call per node (full): 305500 +Grid : Message : Stencil 13.5511 GB/s per node +Grid : Message : Stencil 8.71329 GB/s per node +Grid : Message : Stencil 17.9582 GB/s per node +Grid : Message : Stencil 14.0022 GB/s per node +Grid : Message : Average mflops/s per call per node : 670027 +Grid : Message : Average mflops/s per call per node : 809139 +Grid : Message : Average mflops/s per call per node : 823711 +Grid : Message : Average mflops/s per call per node : 660064 +Grid : Message : Average mflops/s per call per node (full): 316759 +Grid : Message : Average mflops/s per call per node (full): 283689 +Grid : Message : Average mflops/s per call per node (full): 446189 +Grid : Message : Average mflops/s per call per node (full): 306759 +Grid : Message : Stencil 12.7662 GB/s per node +Grid : Message : Stencil 16.7982 GB/s per node +Grid : Message : Stencil 19.0757 GB/s per node +Grid : Message : Stencil 13.4446 GB/s per node +Grid : Message : Average mflops/s per call per node : 670826 +Grid : Message : Average mflops/s per call per node : 808418 +Grid : Message : Average mflops/s per call per node : 822184 +Grid : Message : Average mflops/s per call per node : 664689 +Grid : Message : Average mflops/s per call per node (full): 316037 +Grid : Message : Average mflops/s per call per node (full): 437563 +Grid : Message : Average mflops/s per call per node (full): 450183 +Grid : Message : Average mflops/s per call per node (full): 307322 +Grid : Message : Stencil 14.3459 GB/s per node +Grid : Message : Stencil 16.6088 GB/s per node +Grid : Message : Stencil 17.5173 GB/s per node +Grid : Message : Stencil 12.2539 GB/s per node +Grid : Message : Average mflops/s per call per node : 659901 +Grid : Message : Average mflops/s per call per node : 806660 +Grid : Message : Average mflops/s per call per node : 824506 +Grid : Message : Average mflops/s per call per node : 666331 +Grid : Message : Average mflops/s per call per node (full): 315648 +Grid : Message : Average mflops/s per call per node (full): 436396 +Grid : Message : Average mflops/s per call per node (full): 444001 +Grid : Message : Average mflops/s per call per node (full): 303238 +Grid : Message : Stencil 13.6295 GB/s per node +Grid : Message : Stencil 15.0441 GB/s per node +Grid : Message : Stencil 16.959 GB/s per node +Grid : Message : Stencil 14.2892 GB/s per node +Grid : Message : Average mflops/s per call per node : 663747 +Grid : Message : Average mflops/s per call per node : 803580 +Grid : Message : Average mflops/s per call per node : 821219 +Grid : Message : Average mflops/s per call per node : 653734 +Grid : Message : Average mflops/s per call per node (full): 314459 +Grid : Message : Average mflops/s per call per node (full): 416182 +Grid : Message : Average mflops/s per call per node (full): 437365 +Grid : Message : Average mflops/s per call per node (full): 305736 +Grid : Message : Stencil 12.9195 GB/s per node +Grid : Message : Stencil 11.4968 GB/s per node +Grid : Message : Stencil 17.197 GB/s per node +Grid : Message : Stencil 13.6375 GB/s per node +Grid : Message : Average mflops/s per call per node : 665932 +Grid : Message : Average mflops/s per call per node : 808899 +Grid : Message : Average mflops/s per call per node : 827618 +Grid : Message : Average mflops/s per call per node : 663740 +Grid : Message : Average mflops/s per call per node (full): 314343 +Grid : Message : Average mflops/s per call per node (full): 350331 +Grid : Message : Average mflops/s per call per node (full): 437483 +Grid : Message : Average mflops/s per call per node (full): 303981 +Grid : Message : Stencil 13.2593 GB/s per node +Grid : Message : Stencil 16.3093 GB/s per node +Grid : Message : Stencil 17.8383 GB/s per node +Grid : Message : Stencil 12.615 GB/s per node +Grid : Message : Average mflops/s per call per node : 665928 +Grid : Message : Average mflops/s per call per node : 804355 +Grid : Message : Average mflops/s per call per node : 825301 +Grid : Message : Average mflops/s per call per node : 667432 +Grid : Message : Average mflops/s per call per node (full): 315382 +Grid : Message : Average mflops/s per call per node (full): 432655 +Grid : Message : Average mflops/s per call per node (full): 446212 +Grid : Message : Average mflops/s per call per node (full): 305544 +Grid : Message : Stencil 12.7735 GB/s per node +Grid : Message : Stencil 16.5774 GB/s per node +Grid : Message : Stencil 17.6756 GB/s per node +Grid : Message : Stencil 13.2914 GB/s per node +Grid : Message : Average mflops/s per call per node : 664773 +Grid : Message : Average mflops/s per call per node : 810050 +Grid : Message : Average mflops/s per call per node : 816671 +Grid : Message : Average mflops/s per call per node : 662241 +Grid : Message : Average mflops/s per call per node (full): 314265 +Grid : Message : Average mflops/s per call per node (full): 436636 +Grid : Message : Average mflops/s per call per node (full): 445247 +Grid : Message : Average mflops/s per call per node (full): 305656 +Grid : Message : Stencil 12.8344 GB/s per node +Grid : Message : Stencil 17.1858 GB/s per node +Grid : Message : Stencil 17.413 GB/s per node +Grid : Message : Stencil 12.3467 GB/s per node +Grid : Message : Average mflops/s per call per node : 666922 +Grid : Message : Average mflops/s per call per node : 802121 +Grid : Message : Average mflops/s per call per node : 826953 +Grid : Message : Average mflops/s per call per node : 665500 +Grid : Message : Average mflops/s per call per node (full): 313807 +Grid : Message : Average mflops/s per call per node (full): 436324 +Grid : Message : Average mflops/s per call per node (full): 445232 +Grid : Message : Average mflops/s per call per node (full): 302145 +Grid : Message : Stencil 15.1025 GB/s per node +Grid : Message : Stencil 9.66352 GB/s per node +Grid : Message : Stencil 17.8292 GB/s per node +Grid : Message : Stencil 14.5742 GB/s per node +Grid : Message : Average mflops/s per call per node : 662633 +Grid : Message : Average mflops/s per call per node : 812434 +Grid : Message : Average mflops/s per call per node : 826027 +Grid : Message : Average mflops/s per call per node : 655861 +Grid : Message : Average mflops/s per call per node (full): 317147 +Grid : Message : Average mflops/s per call per node (full): 308756 +Grid : Message : Average mflops/s per call per node (full): 447746 +Grid : Message : Average mflops/s per call per node (full): 306200 +Grid : Message : Stencil 13.9373 GB/s per node +Grid : Message : Stencil 15.1548 GB/s per node +Grid : Message : Stencil 17.3736 GB/s per node +Grid : Message : Stencil 13.8492 GB/s per node +Grid : Message : Average mflops/s per call per node : 664275 +Grid : Message : Average mflops/s per call per node : 805750 +Grid : Message : Average mflops/s per call per node : 823774 +Grid : Message : Average mflops/s per call per node : 661882 +Grid : Message : Average mflops/s per call per node (full): 315808 +Grid : Message : Average mflops/s per call per node (full): 418754 +Grid : Message : Average mflops/s per call per node (full): 443329 +Grid : Message : Average mflops/s per call per node (full): 305700 +Grid : Message : Stencil 13.141 GB/s per node +Grid : Message : Stencil 16.7009 GB/s per node +Grid : Message : Stencil 17.8167 GB/s per node +Grid : Message : Stencil 13.6201 GB/s per node +Grid : Message : Average mflops/s per call per node : 667151 +Grid : Message : Average mflops/s per call per node : 801398 +Grid : Message : Average mflops/s per call per node : 824131 +Grid : Message : Average mflops/s per call per node : 661978 +Grid : Message : Average mflops/s per call per node (full): 314659 +Grid : Message : Average mflops/s per call per node (full): 435937 +Grid : Message : Average mflops/s per call per node (full): 445330 +Grid : Message : Average mflops/s per call per node (full): 306688 +Grid : Message : Stencil 13.9323 GB/s per node +Grid : Message : Stencil 16.4841 GB/s per node +Grid : Message : Stencil 16.7951 GB/s per node +Grid : Message : Stencil 13.1655 GB/s per node +Grid : Message : Average mflops/s per call per node : 664108 +Grid : Message : Average mflops/s per call per node : 801113 +Grid : Message : Average mflops/s per call per node : 826510 +Grid : Message : Average mflops/s per call per node : 661766 +Grid : Message : Average mflops/s per call per node (full): 316038 +Grid : Message : Average mflops/s per call per node (full): 434101 +Grid : Message : Average mflops/s per call per node (full): 438453 +Grid : Message : Average mflops/s per call per node (full): 305318 +Grid : Message : Stencil 14.1698 GB/s per node +Grid : Message : Stencil 16.633 GB/s per node +Grid : Message : Stencil 17.4947 GB/s per node +Grid : Message : Stencil 13.9853 GB/s per node +Grid : Message : Average mflops/s per call per node : 662381 +Grid : Message : Average mflops/s per call per node : 804158 +Grid : Message : Average mflops/s per call per node : 825228 +Grid : Message : Average mflops/s per call per node : 659796 +Grid : Message : Average mflops/s per call per node (full): 315789 +Grid : Message : Average mflops/s per call per node (full): 431645 +Grid : Message : Average mflops/s per call per node (full): 444926 +Grid : Message : Average mflops/s per call per node (full): 306277 +Grid : Message : Stencil 14.2747 GB/s per node +Grid : Message : Stencil 16.6401 GB/s per node +Grid : Message : Stencil 18.2928 GB/s per node +Grid : Message : Stencil 13.065 GB/s per node +Grid : Message : Average mflops/s per call per node : 665096 +Grid : Message : Average mflops/s per call per node : 804366 +Grid : Message : Average mflops/s per call per node : 820500 +Grid : Message : Average mflops/s per call per node : 666332 +Grid : Message : Average mflops/s per call per node (full): 317067 +Grid : Message : Average mflops/s per call per node (full): 436069 +Grid : Message : Average mflops/s per call per node (full): 446419 +Grid : Message : Average mflops/s per call per node (full): 306089 +Grid : Message : Stencil 14.6541 GB/s per node +Grid : Message : Stencil 16.9482 GB/s per node +Grid : Message : Stencil 17.7746 GB/s per node +Grid : Message : Stencil 12.6571 GB/s per node +Grid : Message : Average mflops/s per call per node : 664825 +Grid : Message : Average mflops/s per call per node : 805429 +Grid : Message : Average mflops/s per call per node : 816785 +Grid : Message : Average mflops/s per call per node : 664280 +Grid : Message : Average mflops/s per call per node (full): 316962 +Grid : Message : Average mflops/s per call per node (full): 437622 +Grid : Message : Average mflops/s per call per node (full): 445335 +Grid : Message : Average mflops/s per call per node (full): 303974 +Grid : Message : Stencil 14.2193 GB/s per node +Grid : Message : Stencil 16.7243 GB/s per node +Grid : Message : Stencil 17.3418 GB/s per node +Grid : Message : Stencil 12.8809 GB/s per node +Grid : Message : Average mflops/s per call per node : 666393 +Grid : Message : Average mflops/s per call per node : 810603 +Grid : Message : Average mflops/s per call per node : 818960 +Grid : Message : Average mflops/s per call per node : 665596 +Grid : Message : Average mflops/s per call per node (full): 316810 +Grid : Message : Average mflops/s per call per node (full): 437826 +Grid : Message : Average mflops/s per call per node (full): 443884 +Grid : Message : Average mflops/s per call per node (full): 305181 +Grid : Message : Stencil 13.7367 GB/s per node +Grid : Message : Stencil 13.7684 GB/s per node +Grid : Message : Stencil 17.1511 GB/s per node +Grid : Message : Stencil 12.483 GB/s per node +Grid : Message : Average mflops/s per call per node : 665979 +Grid : Message : Average mflops/s per call per node : 805947 +Grid : Message : Average mflops/s per call per node : 821545 +Grid : Message : Average mflops/s per call per node : 663268 +Grid : Message : Average mflops/s per call per node (full): 316166 +Grid : Message : Average mflops/s per call per node (full): 395981 +Grid : Message : Average mflops/s per call per node (full): 442434 +Grid : Message : Average mflops/s per call per node (full): 303984 +Grid : Message : Stencil 13.9408 GB/s per node +Grid : Message : Stencil 17.3043 GB/s per node +Grid : Message : Stencil 17.7234 GB/s per node +Grid : Message : Stencil 14.3152 GB/s per node +Grid : Message : Average mflops/s per call per node : 667601 +Grid : Message : Average mflops/s per call per node : 801981 +Grid : Message : Average mflops/s per call per node : 822114 +Grid : Message : Average mflops/s per call per node : 662198 +Grid : Message : Average mflops/s per call per node (full): 316300 +Grid : Message : Average mflops/s per call per node (full): 438069 +Grid : Message : Average mflops/s per call per node (full): 445210 +Grid : Message : Average mflops/s per call per node (full): 306345 +Grid : Message : Stencil 12.5711 GB/s per node +Grid : Message : Stencil 15.0578 GB/s per node +Grid : Message : Stencil 17.8285 GB/s per node +Grid : Message : Stencil 12.6284 GB/s per node +Grid : Message : Average mflops/s per call per node : 671125 +Grid : Message : Average mflops/s per call per node : 806146 +Grid : Message : Average mflops/s per call per node : 821990 +Grid : Message : Average mflops/s per call per node : 661048 +Grid : Message : Average mflops/s per call per node (full): 312972 +Grid : Message : Average mflops/s per call per node (full): 416531 +Grid : Message : Average mflops/s per call per node (full): 446886 +Grid : Message : Average mflops/s per call per node (full): 302882 +Grid : Message : Stencil 12.5281 GB/s per node +Grid : Message : Stencil 15.8613 GB/s per node +Grid : Message : Stencil 17.6163 GB/s per node +Grid : Message : Stencil 13.0894 GB/s per node +Grid : Message : Average mflops/s per call per node : 670975 +Grid : Message : Average mflops/s per call per node : 801534 +Grid : Message : Average mflops/s per call per node : 823667 +Grid : Message : Average mflops/s per call per node : 663701 +Grid : Message : Average mflops/s per call per node (full): 314256 +Grid : Message : Average mflops/s per call per node (full): 425895 +Grid : Message : Average mflops/s per call per node (full): 444898 +Grid : Message : Average mflops/s per call per node (full): 305136 +Grid : Message : Stencil 13.8193 GB/s per node +Grid : Message : Stencil 16.2551 GB/s per node +Grid : Message : Stencil 17.1763 GB/s per node +Grid : Message : Stencil 13.4658 GB/s per node +Grid : Message : Average mflops/s per call per node : 666654 +Grid : Message : Average mflops/s per call per node : 798824 +Grid : Message : Average mflops/s per call per node : 822433 +Grid : Message : Average mflops/s per call per node : 662772 +Grid : Message : Average mflops/s per call per node (full): 316261 +Grid : Message : Average mflops/s per call per node (full): 431431 +Grid : Message : Average mflops/s per call per node (full): 442904 +Grid : Message : Average mflops/s per call per node (full): 306560 +Grid : Message : Stencil 14.0584 GB/s per node +Grid : Message : Stencil 16.9526 GB/s per node +Grid : Message : Stencil 16.855 GB/s per node +Grid : Message : Stencil 12.106 GB/s per node +Grid : Message : Average mflops/s per call per node : 667419 +Grid : Message : Average mflops/s per call per node : 801288 +Grid : Message : Average mflops/s per call per node : 828394 +Grid : Message : Average mflops/s per call per node : 664532 +Grid : Message : Average mflops/s per call per node (full): 315332 +Grid : Message : Average mflops/s per call per node (full): 430937 +Grid : Message : Average mflops/s per call per node (full): 439427 +Grid : Message : Average mflops/s per call per node (full): 302734 +Grid : Message : Stencil 12.7819 GB/s per node +Grid : Message : Stencil 11.0133 GB/s per node +Grid : Message : Stencil 17.3597 GB/s per node +Grid : Message : Stencil 12.6736 GB/s per node +Grid : Message : Average mflops/s per call per node : 671730 +Grid : Message : Average mflops/s per call per node : 814118 +Grid : Message : Average mflops/s per call per node : 824538 +Grid : Message : Average mflops/s per call per node : 668701 +Grid : Message : Average mflops/s per call per node (full): 315767 +Grid : Message : Average mflops/s per call per node (full): 340190 +Grid : Message : Average mflops/s per call per node (full): 444378 +Grid : Message : Average mflops/s per call per node (full): 305611 +Grid : Message : Stencil 13.3355 GB/s per node +Grid : Message : Stencil 17.0643 GB/s per node +Grid : Message : Stencil 17.77 GB/s per node +Grid : Message : Stencil 12.2757 GB/s per node +Grid : Message : Average mflops/s per call per node : 668068 +Grid : Message : Average mflops/s per call per node : 804577 +Grid : Message : Average mflops/s per call per node : 824356 +Grid : Message : Average mflops/s per call per node : 668566 +Grid : Message : Average mflops/s per call per node (full): 315740 +Grid : Message : Average mflops/s per call per node (full): 439670 +Grid : Message : Average mflops/s per call per node (full): 446852 +Grid : Message : Average mflops/s per call per node (full): 304567 +Grid : Message : Stencil 13.6138 GB/s per node +Grid : Message : Stencil 14.3006 GB/s per node +Grid : Message : Stencil 18.8566 GB/s per node +Grid : Message : Stencil 12.6303 GB/s per node +Grid : Message : Average mflops/s per call per node : 665029 +Grid : Message : Average mflops/s per call per node : 806340 +Grid : Message : Average mflops/s per call per node : 824355 +Grid : Message : Average mflops/s per call per node : 668042 +Grid : Message : Average mflops/s per call per node (full): 316228 +Grid : Message : Average mflops/s per call per node (full): 404877 +Grid : Message : Average mflops/s per call per node (full): 450779 +Grid : Message : Average mflops/s per call per node (full): 305727 +Grid : Message : Stencil 12.594 GB/s per node +Grid : Message : Stencil 16.5838 GB/s per node +Grid : Message : Stencil 17.822 GB/s per node +Grid : Message : Stencil 12.2811 GB/s per node +Grid : Message : Average mflops/s per call per node : 665494 +Grid : Message : Average mflops/s per call per node : 803374 +Grid : Message : Average mflops/s per call per node : 825090 +Grid : Message : Average mflops/s per call per node : 667290 +Grid : Message : Average mflops/s per call per node (full): 311572 +Grid : Message : Average mflops/s per call per node (full): 435649 +Grid : Message : Average mflops/s per call per node (full): 444993 +Grid : Message : Average mflops/s per call per node (full): 304229 +Grid : Message : Stencil 12.9542 GB/s per node +Grid : Message : Stencil 14.1127 GB/s per node +Grid : Message : Stencil 18.8473 GB/s per node +Grid : Message : Stencil 14.0909 GB/s per node +Grid : Message : Average mflops/s per call per node : 663959 +Grid : Message : Average mflops/s per call per node : 807391 +Grid : Message : Average mflops/s per call per node : 822112 +Grid : Message : Average mflops/s per call per node : 662313 +Grid : Message : Average mflops/s per call per node (full): 313998 +Grid : Message : Average mflops/s per call per node (full): 400634 +Grid : Message : Average mflops/s per call per node (full): 450565 +Grid : Message : Average mflops/s per call per node (full): 305393 +Grid : Message : Stencil 13.4408 GB/s per node +Grid : Message : Stencil 17.5697 GB/s per node +Grid : Message : Stencil 17.3413 GB/s per node +Grid : Message : Stencil 13.1107 GB/s per node +Grid : Message : Average mflops/s per call per node : 665884 +Grid : Message : Average mflops/s per call per node : 805244 +Grid : Message : Average mflops/s per call per node : 822770 +Grid : Message : Average mflops/s per call per node : 663204 +Grid : Message : Average mflops/s per call per node (full): 316076 +Grid : Message : Average mflops/s per call per node (full): 440380 +Grid : Message : Average mflops/s per call per node (full): 444103 +Grid : Message : Average mflops/s per call per node (full): 304950 +Grid : Message : Stencil 13.5491 GB/s per node +Grid : Message : Stencil 16.4245 GB/s per node +Grid : Message : Stencil 17.5451 GB/s per node +Grid : Message : Stencil 12.533 GB/s per node +Grid : Message : Average mflops/s per call per node : 666679 +Grid : Message : Average mflops/s per call per node : 801902 +Grid : Message : Average mflops/s per call per node : 822733 +Grid : Message : Average mflops/s per call per node : 667394 +Grid : Message : Average mflops/s per call per node (full): 317129 +Grid : Message : Average mflops/s per call per node (full): 434095 +Grid : Message : Average mflops/s per call per node (full): 443780 +Grid : Message : Average mflops/s per call per node (full): 305218 +Grid : Message : Stencil 13.6534 GB/s per node +Grid : Message : Stencil 13.8195 GB/s per node +Grid : Message : Stencil 17.2644 GB/s per node +Grid : Message : Stencil 12.3142 GB/s per node +Grid : Message : Average mflops/s per call per node : 667011 +Grid : Message : Average mflops/s per call per node : 810067 +Grid : Message : Average mflops/s per call per node : 821524 +Grid : Message : Average mflops/s per call per node : 671276 +Grid : Message : Average mflops/s per call per node (full): 317031 +Grid : Message : Average mflops/s per call per node (full): 396478 +Grid : Message : Average mflops/s per call per node (full): 443169 +Grid : Message : Average mflops/s per call per node (full): 303807 +Grid : Message : Stencil 13.3901 GB/s per node +Grid : Message : Stencil 18.0472 GB/s per node +Grid : Message : Stencil 17.2049 GB/s per node +Grid : Message : Stencil 13.2758 GB/s per node +Grid : Message : Average mflops/s per call per node : 665144 +Grid : Message : Average mflops/s per call per node : 798251 +Grid : Message : Average mflops/s per call per node : 820132 +Grid : Message : Average mflops/s per call per node : 667648 +Grid : Message : Average mflops/s per call per node (full): 316090 +Grid : Message : Average mflops/s per call per node (full): 437012 +Grid : Message : Average mflops/s per call per node (full): 438289 +Grid : Message : Average mflops/s per call per node (full): 306613 +Grid : Message : Stencil 13.1945 GB/s per node +Grid : Message : Stencil 17.1718 GB/s per node +Grid : Message : Stencil 17.3392 GB/s per node +Grid : Message : Stencil 11.9572 GB/s per node +Grid : Message : Average mflops/s per call per node : 667084 +Grid : Message : Average mflops/s per call per node : 796989 +Grid : Message : Average mflops/s per call per node : 824578 +Grid : Message : Average mflops/s per call per node : 666011 +Grid : Message : Average mflops/s per call per node (full): 316372 +Grid : Message : Average mflops/s per call per node (full): 435100 +Grid : Message : Average mflops/s per call per node (full): 443137 +Grid : Message : Average mflops/s per call per node (full): 301185 +Grid : Message : Stencil 12.8059 GB/s per node +Grid : Message : Stencil 16.412 GB/s per node +Grid : Message : Stencil 17.7064 GB/s per node +Grid : Message : Stencil 12.3333 GB/s per node +Grid : Message : Average mflops/s per call per node : 669057 +Grid : Message : Average mflops/s per call per node : 803586 +Grid : Message : Average mflops/s per call per node : 823735 +Grid : Message : Average mflops/s per call per node : 668583 +Grid : Message : Average mflops/s per call per node (full): 315115 +Grid : Message : Average mflops/s per call per node (full): 434082 +Grid : Message : Average mflops/s per call per node (full): 445782 +Grid : Message : Average mflops/s per call per node (full): 303832 +Grid : Message : Stencil 12.9454 GB/s per node +Grid : Message : Stencil 9.35683 GB/s per node +Grid : Message : Stencil 17.5506 GB/s per node +Grid : Message : Stencil 12.7689 GB/s per node +Grid : Message : Average mflops/s per call per node : 663821 +Grid : Message : Average mflops/s per call per node : 811170 +Grid : Message : Average mflops/s per call per node : 818730 +Grid : Message : Average mflops/s per call per node : 662393 +Grid : Message : Average mflops/s per call per node (full): 313741 +Grid : Message : Average mflops/s per call per node (full): 301128 +Grid : Message : Average mflops/s per call per node (full): 444151 +Grid : Message : Average mflops/s per call per node (full): 305199 +Grid : Message : Stencil 13.8853 GB/s per node +Grid : Message : Stencil 17.2209 GB/s per node +Grid : Message : Stencil 17.2958 GB/s per node +Grid : Message : Stencil 13.6379 GB/s per node +Grid : Message : Average mflops/s per call per node : 667527 +Grid : Message : Average mflops/s per call per node : 799978 +Grid : Message : Average mflops/s per call per node : 827915 +Grid : Message : Average mflops/s per call per node : 666053 +Grid : Message : Average mflops/s per call per node (full): 313304 +Grid : Message : Average mflops/s per call per node (full): 437343 +Grid : Message : Average mflops/s per call per node (full): 443923 +Grid : Message : Average mflops/s per call per node (full): 307158 +Grid : Message : Stencil 13.6277 GB/s per node +Grid : Message : Stencil 16.4862 GB/s per node +Grid : Message : Stencil 18.3482 GB/s per node +Grid : Message : Stencil 12.4 GB/s per node +Grid : Message : Average mflops/s per call per node : 664650 +Grid : Message : Average mflops/s per call per node : 803574 +Grid : Message : Average mflops/s per call per node : 818372 +Grid : Message : Average mflops/s per call per node : 661592 +Grid : Message : Average mflops/s per call per node (full): 316026 +Grid : Message : Average mflops/s per call per node (full): 434754 +Grid : Message : Average mflops/s per call per node (full): 445993 +Grid : Message : Average mflops/s per call per node (full): 302797 +Grid : Message : Stencil 12.6584 GB/s per node +Grid : Message : Stencil 14.6628 GB/s per node +Grid : Message : Stencil 17.4541 GB/s per node +Grid : Message : Stencil 12.0402 GB/s per node +Grid : Message : Average mflops/s per call per node : 666614 +Grid : Message : Average mflops/s per call per node : 808074 +Grid : Message : Average mflops/s per call per node : 820717 +Grid : Message : Average mflops/s per call per node : 670324 +Grid : Message : Average mflops/s per call per node (full): 312049 +Grid : Message : Average mflops/s per call per node (full): 410942 +Grid : Message : Average mflops/s per call per node (full): 445355 +Grid : Message : Average mflops/s per call per node (full): 303320 +Grid : Message : Stencil 12.9024 GB/s per node +Grid : Message : Stencil 16.444 GB/s per node +Grid : Message : Stencil 17.3399 GB/s per node +Grid : Message : Stencil 14.0399 GB/s per node +Grid : Message : Average mflops/s per call per node : 665405 +Grid : Message : Average mflops/s per call per node : 808103 +Grid : Message : Average mflops/s per call per node : 821609 +Grid : Message : Average mflops/s per call per node : 665294 +Grid : Message : Average mflops/s per call per node (full): 313606 +Grid : Message : Average mflops/s per call per node (full): 434873 +Grid : Message : Average mflops/s per call per node (full): 444264 +Grid : Message : Average mflops/s per call per node (full): 306771 +Grid : Message : Stencil 13.9308 GB/s per node +Grid : Message : Stencil 17.4158 GB/s per node +Grid : Message : Stencil 18.4423 GB/s per node +Grid : Message : Stencil 12.855 GB/s per node +Grid : Message : Average mflops/s per call per node : 665272 +Grid : Message : Average mflops/s per call per node : 797460 +Grid : Message : Average mflops/s per call per node : 823126 +Grid : Message : Average mflops/s per call per node : 662335 +Grid : Message : Average mflops/s per call per node (full): 314975 +Grid : Message : Average mflops/s per call per node (full): 435467 +Grid : Message : Average mflops/s per call per node (full): 449253 +Grid : Message : Average mflops/s per call per node (full): 305424 +Grid : Message : Stencil 12.9925 GB/s per node +Grid : Message : Stencil 16.3539 GB/s per node +Grid : Message : Stencil 17.2369 GB/s per node +Grid : Message : Stencil 13.4937 GB/s per node +Grid : Message : Average mflops/s per call per node : 669940 +Grid : Message : Average mflops/s per call per node : 803238 +Grid : Message : Average mflops/s per call per node : 825965 +Grid : Message : Average mflops/s per call per node : 663766 +Grid : Message : Average mflops/s per call per node (full): 314761 +Grid : Message : Average mflops/s per call per node (full): 434155 +Grid : Message : Average mflops/s per call per node (full): 444234 +Grid : Message : Average mflops/s per call per node (full): 299831 +Grid : Message : Stencil 14.106 GB/s per node +Grid : Message : Stencil 16.6361 GB/s per node +Grid : Message : Stencil 17.0601 GB/s per node +Grid : Message : Stencil 12.1837 GB/s per node +Grid : Message : Average mflops/s per call per node : 660382 +Grid : Message : Average mflops/s per call per node : 804401 +Grid : Message : Average mflops/s per call per node : 823592 +Grid : Message : Average mflops/s per call per node : 670380 +Grid : Message : Average mflops/s per call per node (full): 315078 +Grid : Message : Average mflops/s per call per node (full): 432738 +Grid : Message : Average mflops/s per call per node (full): 441811 +Grid : Message : Average mflops/s per call per node (full): 303707 +Grid : Message : Stencil 12.9924 GB/s per node +Grid : Message : Stencil 17.4822 GB/s per node +Grid : Message : Stencil 16.9255 GB/s per node +Grid : Message : Stencil 12.131 GB/s per node +Grid : Message : Average mflops/s per call per node : 666019 +Grid : Message : Average mflops/s per call per node : 804316 +Grid : Message : Average mflops/s per call per node : 828336 +Grid : Message : Average mflops/s per call per node : 669150 +Grid : Message : Average mflops/s per call per node (full): 314690 +Grid : Message : Average mflops/s per call per node (full): 438882 +Grid : Message : Average mflops/s per call per node (full): 440236 +Grid : Message : Average mflops/s per call per node (full): 303263 +Grid : Message : Stencil 12.9286 GB/s per node +Grid : Message : Stencil 16.5674 GB/s per node +Grid : Message : Stencil 17.4028 GB/s per node +Grid : Message : Stencil 12.2927 GB/s per node +Grid : Message : Average mflops/s per call per node : 663144 +Grid : Message : Average mflops/s per call per node : 802347 +Grid : Message : Average mflops/s per call per node : 822576 +Grid : Message : Average mflops/s per call per node : 664923 +Grid : Message : Average mflops/s per call per node (full): 313191 +Grid : Message : Average mflops/s per call per node (full): 434915 +Grid : Message : Average mflops/s per call per node (full): 443819 +Grid : Message : Average mflops/s per call per node (full): 303684 +Grid : Message : Stencil 12.9843 GB/s per node +Grid : Message : Stencil 16.5282 GB/s per node +Grid : Message : Stencil 16.3675 GB/s per node +Grid : Message : Stencil 12.5887 GB/s per node +Grid : Message : Average mflops/s per call per node : 663888 +Grid : Message : Average mflops/s per call per node : 806737 +Grid : Message : Average mflops/s per call per node : 820169 +Grid : Message : Average mflops/s per call per node : 668303 +Grid : Message : Average mflops/s per call per node (full): 314496 +Grid : Message : Average mflops/s per call per node (full): 434932 +Grid : Message : Average mflops/s per call per node (full): 424715 +Grid : Message : Average mflops/s per call per node (full): 305269 +Grid : Message : Stencil 14.4789 GB/s per node +Grid : Message : Stencil 9.60314 GB/s per node +Grid : Message : Stencil 17.4542 GB/s per node +Grid : Message : Stencil 12.7538 GB/s per node +Grid : Message : Average mflops/s per call per node : 661383 +Grid : Message : Average mflops/s per call per node : 814741 +Grid : Message : Average mflops/s per call per node : 824469 +Grid : Message : Average mflops/s per call per node : 665327 +Grid : Message : Average mflops/s per call per node (full): 316536 +Grid : Message : Average mflops/s per call per node (full): 306930 +Grid : Message : Average mflops/s per call per node (full): 444745 +Grid : Message : Average mflops/s per call per node (full): 304862 +Grid : Message : Stencil 14.1053 GB/s per node +Grid : Message : Stencil 17.8723 GB/s per node +Grid : Message : Stencil 17.2534 GB/s per node +Grid : Message : Stencil 12.2174 GB/s per node +Grid : Message : Average mflops/s per call per node : 661647 +Grid : Message : Average mflops/s per call per node : 803667 +Grid : Message : Average mflops/s per call per node : 821296 +Grid : Message : Average mflops/s per call per node : 667759 +Grid : Message : Average mflops/s per call per node (full): 314634 +Grid : Message : Average mflops/s per call per node (full): 438959 +Grid : Message : Average mflops/s per call per node (full): 442505 +Grid : Message : Average mflops/s per call per node (full): 304194 +Grid : Message : Stencil 13.0328 GB/s per node +Grid : Message : Stencil 17.0942 GB/s per node +Grid : Message : Stencil 18.1395 GB/s per node +Grid : Message : Stencil 12.7188 GB/s per node +Grid : Message : Average mflops/s per call per node : 666217 +Grid : Message : Average mflops/s per call per node : 804184 +Grid : Message : Average mflops/s per call per node : 821901 +Grid : Message : Average mflops/s per call per node : 665614 +Grid : Message : Average mflops/s per call per node (full): 315688 +Grid : Message : Average mflops/s per call per node (full): 438573 +Grid : Message : Average mflops/s per call per node (full): 443813 +Grid : Message : Average mflops/s per call per node (full): 305679 +Grid : Message : Stencil 12.8203 GB/s per node +Grid : Message : Stencil 16.6636 GB/s per node +Grid : Message : Stencil 17.7953 GB/s per node +Grid : Message : Stencil 14.0385 GB/s per node +Grid : Message : Average mflops/s per call per node : 671915 +Grid : Message : Average mflops/s per call per node : 806958 +Grid : Message : Average mflops/s per call per node : 827955 +Grid : Message : Average mflops/s per call per node : 661803 +Grid : Message : Average mflops/s per call per node (full): 315523 +Grid : Message : Average mflops/s per call per node (full): 435924 +Grid : Message : Average mflops/s per call per node (full): 447476 +Grid : Message : Average mflops/s per call per node (full): 306720 +Grid : Message : Stencil 13.0128 GB/s per node +Grid : Message : Stencil 16.5301 GB/s per node +Grid : Message : Stencil 17.1159 GB/s per node +Grid : Message : Stencil 12.2497 GB/s per node +Grid : Message : Average mflops/s per call per node : 668199 +Grid : Message : Average mflops/s per call per node : 804835 +Grid : Message : Average mflops/s per call per node : 824454 +Grid : Message : Average mflops/s per call per node : 672602 +Grid : Message : Average mflops/s per call per node (full): 315815 +Grid : Message : Average mflops/s per call per node (full): 435739 +Grid : Message : Average mflops/s per call per node (full): 441843 +Grid : Message : Average mflops/s per call per node (full): 303662 +Grid : Message : Stencil 12.9599 GB/s per node +Grid : Message : Stencil 16.5674 GB/s per node +Grid : Message : Stencil 18.6981 GB/s per node +Grid : Message : Stencil 12.4395 GB/s per node +Grid : Message : Average mflops/s per call per node : 668982 +Grid : Message : Average mflops/s per call per node : 805101 +Grid : Message : Average mflops/s per call per node : 819369 +Grid : Message : Average mflops/s per call per node : 662323 +Grid : Message : Average mflops/s per call per node (full): 316156 +Grid : Message : Average mflops/s per call per node (full): 434381 +Grid : Message : Average mflops/s per call per node (full): 447002 +Grid : Message : Average mflops/s per call per node (full): 301518 +Grid : Message : Stencil 13.6958 GB/s per node +Grid : Message : Stencil 17.0557 GB/s per node +Grid : Message : Stencil 17.5598 GB/s per node +Grid : Message : Stencil 12.6534 GB/s per node +Grid : Message : Average mflops/s per call per node : 667324 +Grid : Message : Average mflops/s per call per node : 800973 +Grid : Message : Average mflops/s per call per node : 824928 +Grid : Message : Average mflops/s per call per node : 662691 +Grid : Message : Average mflops/s per call per node (full): 316434 +Grid : Message : Average mflops/s per call per node (full): 435862 +Grid : Message : Average mflops/s per call per node (full): 445696 +Grid : Message : Average mflops/s per call per node (full): 303382 +Grid : Message : Stencil 13.1224 GB/s per node +Grid : Message : Stencil 17.4906 GB/s per node +Grid : Message : Stencil 17.5314 GB/s per node +Grid : Message : Stencil 12.9159 GB/s per node +Grid : Message : Average mflops/s per call per node : 669689 +Grid : Message : Average mflops/s per call per node : 801373 +Grid : Message : Average mflops/s per call per node : 823561 +Grid : Message : Average mflops/s per call per node : 663627 +Grid : Message : Average mflops/s per call per node (full): 314853 +Grid : Message : Average mflops/s per call per node (full): 438798 +Grid : Message : Average mflops/s per call per node (full): 446137 +Grid : Message : Average mflops/s per call per node (full): 305165 +Grid : Message : Stencil 13.3894 GB/s per node +Grid : Message : Stencil 17.0121 GB/s per node +Grid : Message : Stencil 18.0152 GB/s per node +Grid : Message : Stencil 13.0537 GB/s per node +Grid : Message : Average mflops/s per call per node : 668239 +Grid : Message : Average mflops/s per call per node : 803511 +Grid : Message : Average mflops/s per call per node : 820475 +Grid : Message : Average mflops/s per call per node : 668274 +Grid : Message : Average mflops/s per call per node (full): 315111 +Grid : Message : Average mflops/s per call per node (full): 434183 +Grid : Message : Average mflops/s per call per node (full): 441978 +Grid : Message : Average mflops/s per call per node (full): 305313 +Grid : Message : Stencil 13.515 GB/s per node +Grid : Message : Stencil 10.2688 GB/s per node +Grid : Message : Stencil 17.7445 GB/s per node +Grid : Message : Stencil 13.5992 GB/s per node +Grid : Message : Average mflops/s per call per node : 667074 +Grid : Message : Average mflops/s per call per node : 810297 +Grid : Message : Average mflops/s per call per node : 827854 +Grid : Message : Average mflops/s per call per node : 666083 +Grid : Message : Average mflops/s per call per node (full): 314808 +Grid : Message : Average mflops/s per call per node (full): 323184 +Grid : Message : Average mflops/s per call per node (full): 447110 +Grid : Message : Average mflops/s per call per node (full): 307365 +Grid : Message : Stencil 12.6776 GB/s per node +Grid : Message : Stencil 17.9313 GB/s per node +Grid : Message : Stencil 18.4433 GB/s per node +Grid : Message : Stencil 11.8563 GB/s per node +Grid : Message : Average mflops/s per call per node : 668535 +Grid : Message : Average mflops/s per call per node : 806107 +Grid : Message : Average mflops/s per call per node : 824265 +Grid : Message : Average mflops/s per call per node : 668328 +Grid : Message : Average mflops/s per call per node (full): 314950 +Grid : Message : Average mflops/s per call per node (full): 440526 +Grid : Message : Average mflops/s per call per node (full): 449722 +Grid : Message : Average mflops/s per call per node (full): 300696 +Grid : Message : Stencil 15.4819 GB/s per node +Grid : Message : Stencil 16.8911 GB/s per node +Grid : Message : Stencil 20.2369 GB/s per node +Grid : Message : Stencil 12.9396 GB/s per node +Grid : Message : Average mflops/s per call per node : 659746 +Grid : Message : Average mflops/s per call per node : 805149 +Grid : Message : Average mflops/s per call per node : 825933 +Grid : Message : Average mflops/s per call per node : 668331 +Grid : Message : Average mflops/s per call per node (full): 316586 +Grid : Message : Average mflops/s per call per node (full): 437437 +Grid : Message : Average mflops/s per call per node (full): 452041 +Grid : Message : Average mflops/s per call per node (full): 306154 +Grid : Message : Stencil 13.272 GB/s per node +Grid : Message : Stencil 17.3632 GB/s per node +Grid : Message : Stencil 19.4385 GB/s per node +Grid : Message : Stencil 12.092 GB/s per node +Grid : Message : Average mflops/s per call per node : 663519 +Grid : Message : Average mflops/s per call per node : 805282 +Grid : Message : Average mflops/s per call per node : 817363 +Grid : Message : Average mflops/s per call per node : 671292 +Grid : Message : Average mflops/s per call per node (full): 314996 +Grid : Message : Average mflops/s per call per node (full): 437586 +Grid : Message : Average mflops/s per call per node (full): 450000 +Grid : Message : Average mflops/s per call per node (full): 302839 +Grid : Message : Stencil 13.126 GB/s per node +Grid : Message : Stencil 17.6082 GB/s per node +Grid : Message : Stencil 17.4959 GB/s per node +Grid : Message : Stencil 12.5887 GB/s per node +Grid : Message : Average mflops/s per call per node : 666639 +Grid : Message : Average mflops/s per call per node : 804402 +Grid : Message : Average mflops/s per call per node : 824793 +Grid : Message : Average mflops/s per call per node : 663605 +Grid : Message : Average mflops/s per call per node (full): 314758 +Grid : Message : Average mflops/s per call per node (full): 439559 +Grid : Message : Average mflops/s per call per node (full): 445120 +Grid : Message : Average mflops/s per call per node (full): 300752 +Grid : Message : Stencil 14.3865 GB/s per node +Grid : Message : Stencil 13.7631 GB/s per node +Grid : Message : Stencil 17.914 GB/s per node +Grid : Message : Stencil 13.5841 GB/s per node +Grid : Message : Average mflops/s per call per node : 661551 +Grid : Message : Average mflops/s per call per node : 813095 +Grid : Message : Average mflops/s per call per node : 817403 +Grid : Message : Average mflops/s per call per node : 664747 +Grid : Message : Average mflops/s per call per node (full): 315777 +Grid : Message : Average mflops/s per call per node (full): 395402 +Grid : Message : Average mflops/s per call per node (full): 446025 +Grid : Message : Average mflops/s per call per node (full): 307213 +Grid : Message : Stencil 13.154 GB/s per node +Grid : Message : Stencil 16.589 GB/s per node +Grid : Message : Stencil 17.3772 GB/s per node +Grid : Message : Stencil 12.6145 GB/s per node +Grid : Message : Average mflops/s per call per node : 668811 +Grid : Message : Average mflops/s per call per node : 808048 +Grid : Message : Average mflops/s per call per node : 823148 +Grid : Message : Average mflops/s per call per node : 663637 +Grid : Message : Average mflops/s per call per node (full): 316526 +Grid : Message : Average mflops/s per call per node (full): 436480 +Grid : Message : Average mflops/s per call per node (full): 445049 +Grid : Message : Average mflops/s per call per node (full): 304377 +Grid : Message : Stencil 13.6708 GB/s per node +Grid : Message : Stencil 16.7315 GB/s per node +Grid : Message : Stencil 19.2646 GB/s per node +Grid : Message : Stencil 12.3974 GB/s per node +Grid : Message : Average mflops/s per call per node : 664753 +Grid : Message : Average mflops/s per call per node : 804731 +Grid : Message : Average mflops/s per call per node : 825383 +Grid : Message : Average mflops/s per call per node : 670540 +Grid : Message : Average mflops/s per call per node (full): 314606 +Grid : Message : Average mflops/s per call per node (full): 436933 +Grid : Message : Average mflops/s per call per node (full): 450415 +Grid : Message : Average mflops/s per call per node (full): 305052 +Grid : Message : Stencil 14.1461 GB/s per node +Grid : Message : Stencil 17.0066 GB/s per node +Grid : Message : Stencil 17.0841 GB/s per node +Grid : Message : Stencil 12.017 GB/s per node +Grid : Message : Average mflops/s per call per node : 664273 +Grid : Message : Average mflops/s per call per node : 803468 +Grid : Message : Average mflops/s per call per node : 825833 +Grid : Message : Average mflops/s per call per node : 666602 +Grid : Message : Average mflops/s per call per node (full): 316771 +Grid : Message : Average mflops/s per call per node (full): 436762 +Grid : Message : Average mflops/s per call per node (full): 442465 +Grid : Message : Average mflops/s per call per node (full): 302696 +Grid : Message : Stencil 13.5987 GB/s per node +Grid : Message : Stencil 16.8984 GB/s per node +Grid : Message : Stencil 17.6919 GB/s per node +Grid : Message : Stencil 12.3808 GB/s per node +Grid : Message : Average mflops/s per call per node : 667584 +Grid : Message : Average mflops/s per call per node : 801722 +Grid : Message : Average mflops/s per call per node : 823101 +Grid : Message : Average mflops/s per call per node : 669154 +Grid : Message : Average mflops/s per call per node (full): 316780 +Grid : Message : Average mflops/s per call per node (full): 438047 +Grid : Message : Average mflops/s per call per node (full): 445333 +Grid : Message : Average mflops/s per call per node (full): 304774 +Grid : Message : Stencil 13.6975 GB/s per node +Grid : Message : Stencil 18.6487 GB/s per node +Grid : Message : Stencil 17.5858 GB/s per node +Grid : Message : Stencil 14.6524 GB/s per node +Grid : Message : Average mflops/s per call per node : 667923 +Grid : Message : Average mflops/s per call per node : 807162 +Grid : Message : Average mflops/s per call per node : 825609 +Grid : Message : Average mflops/s per call per node : 659829 +Grid : Message : Average mflops/s per call per node (full): 316104 +Grid : Message : Average mflops/s per call per node (full): 441285 +Grid : Message : Average mflops/s per call per node (full): 446761 +Grid : Message : Average mflops/s per call per node (full): 306605 +Grid : Message : Stencil 16.0286 GB/s per node +Grid : Message : Stencil 12.0197 GB/s per node +Grid : Message : Stencil 17.7871 GB/s per node +Grid : Message : Stencil 13.0547 GB/s per node +Grid : Message : Average mflops/s per call per node : 664227 +Grid : Message : Average mflops/s per call per node : 809418 +Grid : Message : Average mflops/s per call per node : 822235 +Grid : Message : Average mflops/s per call per node : 668032 +Grid : Message : Average mflops/s per call per node (full): 318100 +Grid : Message : Average mflops/s per call per node (full): 361966 +Grid : Message : Average mflops/s per call per node (full): 446497 +Grid : Message : Average mflops/s per call per node (full): 306591 +Grid : Message : Stencil 13.76 GB/s per node +Grid : Message : Stencil 17.5266 GB/s per node +Grid : Message : Stencil 15.9418 GB/s per node +Grid : Message : Stencil 12.6726 GB/s per node +Grid : Message : Average mflops/s per call per node : 668512 +Grid : Message : Average mflops/s per call per node : 804067 +Grid : Message : Average mflops/s per call per node : 823981 +Grid : Message : Average mflops/s per call per node : 662906 +Grid : Message : Average mflops/s per call per node (full): 315789 +Grid : Message : Average mflops/s per call per node (full): 438450 +Grid : Message : Average mflops/s per call per node (full): 418927 +Grid : Message : Average mflops/s per call per node (full): 302969 +Grid : Message : Stencil 13.7171 GB/s per node +Grid : Message : Stencil 16.5623 GB/s per node +Grid : Message : Stencil 17.1249 GB/s per node +Grid : Message : Stencil 13.0189 GB/s per node +Grid : Message : Average mflops/s per call per node : 668511 +Grid : Message : Average mflops/s per call per node : 806847 +Grid : Message : Average mflops/s per call per node : 822286 +Grid : Message : Average mflops/s per call per node : 665505 +Grid : Message : Average mflops/s per call per node (full): 316746 +Grid : Message : Average mflops/s per call per node (full): 435621 +Grid : Message : Average mflops/s per call per node (full): 442049 +Grid : Message : Average mflops/s per call per node (full): 305990 +Grid : Message : Stencil 13.4105 GB/s per node +Grid : Message : Stencil 11.3204 GB/s per node +Grid : Message : Stencil 17.3826 GB/s per node +Grid : Message : Stencil 12.9017 GB/s per node +Grid : Message : Average mflops/s per call per node : 668608 +Grid : Message : Average mflops/s per call per node : 809832 +Grid : Message : Average mflops/s per call per node : 824581 +Grid : Message : Average mflops/s per call per node : 668790 +Grid : Message : Average mflops/s per call per node (full): 315761 +Grid : Message : Average mflops/s per call per node (full): 346191 +Grid : Message : Average mflops/s per call per node (full): 444567 +Grid : Message : Average mflops/s per call per node (full): 305959 +Grid : Message : Stencil 12.7964 GB/s per node +Grid : Message : Stencil 16.375 GB/s per node +Grid : Message : Stencil 18.0128 GB/s per node +Grid : Message : Stencil 12.5739 GB/s per node +Grid : Message : Average mflops/s per call per node : 666980 +Grid : Message : Average mflops/s per call per node : 805908 +Grid : Message : Average mflops/s per call per node : 823029 +Grid : Message : Average mflops/s per call per node : 669658 +Grid : Message : Average mflops/s per call per node (full): 314223 +Grid : Message : Average mflops/s per call per node (full): 432713 +Grid : Message : Average mflops/s per call per node (full): 446420 +Grid : Message : Average mflops/s per call per node (full): 305992 +Grid : Message : Stencil 12.5729 GB/s per node +Grid : Message : Stencil 15.7072 GB/s per node +Grid : Message : Stencil 17.8107 GB/s per node +Grid : Message : Stencil 12.6056 GB/s per node +Grid : Message : Average mflops/s per call per node : 667120 +Grid : Message : Average mflops/s per call per node : 808876 +Grid : Message : Average mflops/s per call per node : 821364 +Grid : Message : Average mflops/s per call per node : 667564 +Grid : Message : Average mflops/s per call per node (full): 313410 +Grid : Message : Average mflops/s per call per node (full): 416363 +Grid : Message : Average mflops/s per call per node (full): 446932 +Grid : Message : Average mflops/s per call per node (full): 304678 +Grid : Message : Stencil 12.945 GB/s per node +Grid : Message : Stencil 16.4332 GB/s per node +Grid : Message : Stencil 18.5724 GB/s per node +Grid : Message : Stencil 13.4346 GB/s per node +Grid : Message : Average mflops/s per call per node : 664294 +Grid : Message : Average mflops/s per call per node : 808725 +Grid : Message : Average mflops/s per call per node : 823405 +Grid : Message : Average mflops/s per call per node : 665357 +Grid : Message : Average mflops/s per call per node (full): 313774 +Grid : Message : Average mflops/s per call per node (full): 434302 +Grid : Message : Average mflops/s per call per node (full): 448348 +Grid : Message : Average mflops/s per call per node (full): 306846 +Grid : Message : Stencil 12.766 GB/s per node +Grid : Message : Stencil 10.3715 GB/s per node +Grid : Message : Stencil 17.2152 GB/s per node +Grid : Message : Stencil 13.2096 GB/s per node +Grid : Message : Average mflops/s per call per node : 668238 +Grid : Message : Average mflops/s per call per node : 809538 +Grid : Message : Average mflops/s per call per node : 821750 +Grid : Message : Average mflops/s per call per node : 665493 +Grid : Message : Average mflops/s per call per node (full): 314801 +Grid : Message : Average mflops/s per call per node (full): 325253 +Grid : Message : Average mflops/s per call per node (full): 443233 +Grid : Message : Average mflops/s per call per node (full): 305861 +Grid : Message : Stencil 14.2297 GB/s per node +Grid : Message : Stencil 8.79049 GB/s per node +Grid : Message : Stencil 17.2774 GB/s per node +Grid : Message : Stencil 12.9415 GB/s per node +Grid : Message : Average mflops/s per call per node : 664583 +Grid : Message : Average mflops/s per call per node : 805974 +Grid : Message : Average mflops/s per call per node : 819344 +Grid : Message : Average mflops/s per call per node : 664789 +Grid : Message : Average mflops/s per call per node (full): 316928 +Grid : Message : Average mflops/s per call per node (full): 286856 +Grid : Message : Average mflops/s per call per node (full): 443311 +Grid : Message : Average mflops/s per call per node (full): 305485 +Grid : Message : Stencil 12.3881 GB/s per node +Grid : Message : Stencil 10.9088 GB/s per node +Grid : Message : Stencil 17.466 GB/s per node +Grid : Message : Stencil 12.9535 GB/s per node +Grid : Message : Average mflops/s per call per node : 666538 +Grid : Message : Average mflops/s per call per node : 813628 +Grid : Message : Average mflops/s per call per node : 826126 +Grid : Message : Average mflops/s per call per node : 661329 +Grid : Message : Average mflops/s per call per node (full): 311884 +Grid : Message : Average mflops/s per call per node (full): 337706 +Grid : Message : Average mflops/s per call per node (full): 444964 +Grid : Message : Average mflops/s per call per node (full): 305091 +Grid : Message : Stencil 13.237 GB/s per node +Grid : Message : Stencil 12.3816 GB/s per node +Grid : Message : Stencil 17.6251 GB/s per node +Grid : Message : Stencil 14.167 GB/s per node +Grid : Message : Average mflops/s per call per node : 667943 +Grid : Message : Average mflops/s per call per node : 810737 +Grid : Message : Average mflops/s per call per node : 822289 +Grid : Message : Average mflops/s per call per node : 659425 +Grid : Message : Average mflops/s per call per node (full): 315870 +Grid : Message : Average mflops/s per call per node (full): 369389 +Grid : Message : Average mflops/s per call per node (full): 446280 +Grid : Message : Average mflops/s per call per node (full): 305260 +Grid : Message : Stencil 14.7861 GB/s per node +Grid : Message : Stencil 16.559 GB/s per node +Grid : Message : Stencil 17.8089 GB/s per node +Grid : Message : Stencil 13.5257 GB/s per node +Grid : Message : Average mflops/s per call per node : 663141 +Grid : Message : Average mflops/s per call per node : 805069 +Grid : Message : Average mflops/s per call per node : 815702 +Grid : Message : Average mflops/s per call per node : 664549 +Grid : Message : Average mflops/s per call per node (full): 316511 +Grid : Message : Average mflops/s per call per node (full): 435230 +Grid : Message : Average mflops/s per call per node (full): 444902 +Grid : Message : Average mflops/s per call per node (full): 306236 +Grid : Message : Stencil 13.1419 GB/s per node +Grid : Message : Stencil 18.032 GB/s per node +Grid : Message : Stencil 17.8433 GB/s per node +Grid : Message : Stencil 12.9064 GB/s per node +Grid : Message : Average mflops/s per call per node : 666758 +Grid : Message : Average mflops/s per call per node : 807885 +Grid : Message : Average mflops/s per call per node : 821856 +Grid : Message : Average mflops/s per call per node : 660427 +Grid : Message : Average mflops/s per call per node (full): 314625 +Grid : Message : Average mflops/s per call per node (full): 440284 +Grid : Message : Average mflops/s per call per node (full): 446483 +Grid : Message : Average mflops/s per call per node (full): 304466 +Grid : Message : Stencil 12.881 GB/s per node +Grid : Message : Stencil 17.0216 GB/s per node +Grid : Message : Stencil 18.7172 GB/s per node +Grid : Message : Stencil 13.37 GB/s per node +Grid : Message : Average mflops/s per call per node : 667531 +Grid : Message : Average mflops/s per call per node : 801594 +Grid : Message : Average mflops/s per call per node : 822271 +Grid : Message : Average mflops/s per call per node : 663941 +Grid : Message : Average mflops/s per call per node (full): 314968 +Grid : Message : Average mflops/s per call per node (full): 435815 +Grid : Message : Average mflops/s per call per node (full): 449114 +Grid : Message : Average mflops/s per call per node (full): 306456 +Grid : Message : Stencil 12.9034 GB/s per node +Grid : Message : Stencil 16.9758 GB/s per node +Grid : Message : Stencil 17.4356 GB/s per node +Grid : Message : Stencil 12.6256 GB/s per node +Grid : Message : Average mflops/s per call per node : 669880 +Grid : Message : Average mflops/s per call per node : 804816 +Grid : Message : Average mflops/s per call per node : 830930 +Grid : Message : Average mflops/s per call per node : 658692 +Grid : Message : Average mflops/s per call per node (full): 312269 +Grid : Message : Average mflops/s per call per node (full): 433171 +Grid : Message : Average mflops/s per call per node (full): 445211 +Grid : Message : Average mflops/s per call per node (full): 303503 +Grid : Message : Stencil 13.768 GB/s per node +Grid : Message : Stencil 16.5521 GB/s per node +Grid : Message : Stencil 17.7681 GB/s per node +Grid : Message : Stencil 12.1193 GB/s per node +Grid : Message : Average mflops/s per call per node : 667809 +Grid : Message : Average mflops/s per call per node : 805744 +Grid : Message : Average mflops/s per call per node : 824123 +Grid : Message : Average mflops/s per call per node : 668852 +Grid : Message : Average mflops/s per call per node (full): 316518 +Grid : Message : Average mflops/s per call per node (full): 431066 +Grid : Message : Average mflops/s per call per node (full): 445982 +Grid : Message : Average mflops/s per call per node (full): 303187 +Grid : Message : Stencil 13.1464 GB/s per node +Grid : Message : Stencil 16.9932 GB/s per node +Grid : Message : Stencil 18.8166 GB/s per node +Grid : Message : Stencil 13.4326 GB/s per node +Grid : Message : Average mflops/s per call per node : 669118 +Grid : Message : Average mflops/s per call per node : 806856 +Grid : Message : Average mflops/s per call per node : 823368 +Grid : Message : Average mflops/s per call per node : 666544 +Grid : Message : Average mflops/s per call per node (full): 315823 +Grid : Message : Average mflops/s per call per node (full): 437027 +Grid : Message : Average mflops/s per call per node (full): 443807 +Grid : Message : Average mflops/s per call per node (full): 307407 +Grid : Message : Stencil 13.491 GB/s per node +Grid : Message : Stencil 15.9341 GB/s per node +Grid : Message : Stencil 17.8917 GB/s per node +Grid : Message : Stencil 13.607 GB/s per node +Grid : Message : Average mflops/s per call per node : 668430 +Grid : Message : Average mflops/s per call per node : 804180 +Grid : Message : Average mflops/s per call per node : 822103 +Grid : Message : Average mflops/s per call per node : 661251 +Grid : Message : Average mflops/s per call per node (full): 316500 +Grid : Message : Average mflops/s per call per node (full): 427481 +Grid : Message : Average mflops/s per call per node (full): 443448 +Grid : Message : Average mflops/s per call per node (full): 305580 +Grid : Message : Stencil 13.5886 GB/s per node +Grid : Message : Stencil 17.1736 GB/s per node +Grid : Message : Stencil 17.3722 GB/s per node +Grid : Message : Stencil 12.6624 GB/s per node +Grid : Message : Average mflops/s per call per node : 667919 +Grid : Message : Average mflops/s per call per node : 801088 +Grid : Message : Average mflops/s per call per node : 820868 +Grid : Message : Average mflops/s per call per node : 656734 +Grid : Message : Average mflops/s per call per node (full): 313716 +Grid : Message : Average mflops/s per call per node (full): 435734 +Grid : Message : Average mflops/s per call per node (full): 443237 +Grid : Message : Average mflops/s per call per node (full): 303009 +Grid : Message : Stencil 12.5853 GB/s per node +Grid : Message : Stencil 17.522 GB/s per node +Grid : Message : Stencil 17.2681 GB/s per node +Grid : Message : Stencil 12.761 GB/s per node +Grid : Message : Average mflops/s per call per node : 670653 +Grid : Message : Average mflops/s per call per node : 807256 +Grid : Message : Average mflops/s per call per node : 821593 +Grid : Message : Average mflops/s per call per node : 660975 +Grid : Message : Average mflops/s per call per node (full): 314512 +Grid : Message : Average mflops/s per call per node (full): 438086 +Grid : Message : Average mflops/s per call per node (full): 439179 +Grid : Message : Average mflops/s per call per node (full): 303605 +Grid : Message : Stencil 13.697 GB/s per node +Grid : Message : Stencil 16.6424 GB/s per node +Grid : Message : Stencil 17.3883 GB/s per node +Grid : Message : Stencil 13.4778 GB/s per node +Grid : Message : Average mflops/s per call per node : 670865 +Grid : Message : Average mflops/s per call per node : 803527 +Grid : Message : Average mflops/s per call per node : 823427 +Grid : Message : Average mflops/s per call per node : 659487 +Grid : Message : Average mflops/s per call per node (full): 315548 +Grid : Message : Average mflops/s per call per node (full): 434599 +Grid : Message : Average mflops/s per call per node (full): 444751 +Grid : Message : Average mflops/s per call per node (full): 305546 +Grid : Message : Stencil 12.6026 GB/s per node +Grid : Message : Stencil 16.4971 GB/s per node +Grid : Message : Stencil 17.1935 GB/s per node +Grid : Message : Stencil 12.9868 GB/s per node +Grid : Message : Average mflops/s per call per node : 670945 +Grid : Message : Average mflops/s per call per node : 802586 +Grid : Message : Average mflops/s per call per node : 822190 +Grid : Message : Average mflops/s per call per node : 661417 +Grid : Message : Average mflops/s per call per node (full): 314391 +Grid : Message : Average mflops/s per call per node (full): 433381 +Grid : Message : Average mflops/s per call per node (full): 441169 +Grid : Message : Average mflops/s per call per node (full): 304820 +Grid : Message : Stencil 12.2678 GB/s per node +Grid : Message : Stencil 16.8147 GB/s per node +Grid : Message : Stencil 19.618 GB/s per node +Grid : Message : Stencil 13.8669 GB/s per node +Grid : Message : Average mflops/s per call per node : 669378 +Grid : Message : Average mflops/s per call per node : 800923 +Grid : Message : Average mflops/s per call per node : 818294 +Grid : Message : Average mflops/s per call per node : 661224 +Grid : Message : Average mflops/s per call per node (full): 311876 +Grid : Message : Average mflops/s per call per node (full): 434853 +Grid : Message : Average mflops/s per call per node (full): 449593 +Grid : Message : Average mflops/s per call per node (full): 306914 +Grid : Message : Stencil 12.4139 GB/s per node +Grid : Message : Stencil 17.2397 GB/s per node +Grid : Message : Stencil 17.7356 GB/s per node +Grid : Message : Stencil 13.2463 GB/s per node +Grid : Message : Average mflops/s per call per node : 668627 +Grid : Message : Average mflops/s per call per node : 800456 +Grid : Message : Average mflops/s per call per node : 818989 +Grid : Message : Average mflops/s per call per node : 665886 +Grid : Message : Average mflops/s per call per node (full): 313481 +Grid : Message : Average mflops/s per call per node (full): 436451 +Grid : Message : Average mflops/s per call per node (full): 435769 +Grid : Message : Average mflops/s per call per node (full): 306570 +Grid : Message : Stencil 13.5333 GB/s per node +Grid : Message : Stencil 8.13661 GB/s per node +Grid : Message : Stencil 16.9329 GB/s per node +Grid : Message : Stencil 12.8284 GB/s per node +Grid : Message : Average mflops/s per call per node : 664504 +Grid : Message : Average mflops/s per call per node : 804766 +Grid : Message : Average mflops/s per call per node : 821491 +Grid : Message : Average mflops/s per call per node : 664154 +Grid : Message : Average mflops/s per call per node (full): 316740 +Grid : Message : Average mflops/s per call per node (full): 270001 +Grid : Message : Average mflops/s per call per node (full): 439291 +Grid : Message : Average mflops/s per call per node (full): 305179 +Grid : Message : Stencil 14.5014 GB/s per node +Grid : Message : Stencil 16.4796 GB/s per node +Grid : Message : Stencil 17.684 GB/s per node +Grid : Message : Stencil 12.7725 GB/s per node +Grid : Message : Average mflops/s per call per node : 660744 +Grid : Message : Average mflops/s per call per node : 806758 +Grid : Message : Average mflops/s per call per node : 822278 +Grid : Message : Average mflops/s per call per node : 659832 +Grid : Message : Average mflops/s per call per node (full): 316390 +Grid : Message : Average mflops/s per call per node (full): 434171 +Grid : Message : Average mflops/s per call per node (full): 445325 +Grid : Message : Average mflops/s per call per node (full): 303568 +Grid : Message : Stencil 14.5585 GB/s per node +Grid : Message : Stencil 16.9893 GB/s per node +Grid : Message : Stencil 17.6812 GB/s per node +Grid : Message : Stencil 14.2824 GB/s per node +Grid : Message : Average mflops/s per call per node : 665943 +Grid : Message : Average mflops/s per call per node : 802984 +Grid : Message : Average mflops/s per call per node : 819906 +Grid : Message : Average mflops/s per call per node : 662809 +Grid : Message : Average mflops/s per call per node (full): 317714 +Grid : Message : Average mflops/s per call per node (full): 438366 +Grid : Message : Average mflops/s per call per node (full): 446131 +Grid : Message : Average mflops/s per call per node (full): 307211 +Grid : Message : Stencil 15.4553 GB/s per node +Grid : Message : Stencil 16.9426 GB/s per node +Grid : Message : Stencil 17.2209 GB/s per node +Grid : Message : Stencil 12.0195 GB/s per node +Grid : Message : Average mflops/s per call per node : 664098 +Grid : Message : Average mflops/s per call per node : 802553 +Grid : Message : Average mflops/s per call per node : 824960 +Grid : Message : Average mflops/s per call per node : 664260 +Grid : Message : Average mflops/s per call per node (full): 316082 +Grid : Message : Average mflops/s per call per node (full): 432776 +Grid : Message : Average mflops/s per call per node (full): 443286 +Grid : Message : Average mflops/s per call per node (full): 302022 +Grid : Message : Stencil 15.7701 GB/s per node +Grid : Message : Stencil 16.2108 GB/s per node +Grid : Message : Stencil 17.2166 GB/s per node +Grid : Message : Stencil 11.9306 GB/s per node +Grid : Message : Average mflops/s per call per node : 663469 +Grid : Message : Average mflops/s per call per node : 801281 +Grid : Message : Average mflops/s per call per node : 823253 +Grid : Message : Average mflops/s per call per node : 667932 +Grid : Message : Average mflops/s per call per node (full): 318237 +Grid : Message : Average mflops/s per call per node (full): 430731 +Grid : Message : Average mflops/s per call per node (full): 442433 +Grid : Message : Average mflops/s per call per node (full): 300951 +Grid : Message : Stencil 14.2805 GB/s per node +Grid : Message : Stencil 17.6347 GB/s per node +Grid : Message : Stencil 18.0806 GB/s per node +Grid : Message : Stencil 14.1374 GB/s per node +Grid : Message : Average mflops/s per call per node : 661408 +Grid : Message : Average mflops/s per call per node : 798410 +Grid : Message : Average mflops/s per call per node : 821662 +Grid : Message : Average mflops/s per call per node : 661402 +Grid : Message : Average mflops/s per call per node (full): 315896 +Grid : Message : Average mflops/s per call per node (full): 436256 +Grid : Message : Average mflops/s per call per node (full): 446757 +Grid : Message : Average mflops/s per call per node (full): 306241 +Grid : Message : Stencil 12.6773 GB/s per node +Grid : Message : Stencil 16.234 GB/s per node +Grid : Message : Stencil 18.0985 GB/s per node +Grid : Message : Stencil 13.6585 GB/s per node +Grid : Message : Average mflops/s per call per node : 668940 +Grid : Message : Average mflops/s per call per node : 806973 +Grid : Message : Average mflops/s per call per node : 819816 +Grid : Message : Average mflops/s per call per node : 663629 +Grid : Message : Average mflops/s per call per node (full): 315716 +Grid : Message : Average mflops/s per call per node (full): 432828 +Grid : Message : Average mflops/s per call per node (full): 446896 +Grid : Message : Average mflops/s per call per node (full): 307358 +Grid : Message : Stencil 13.1497 GB/s per node +Grid : Message : Stencil 17.7945 GB/s per node +Grid : Message : Stencil 17.2219 GB/s per node +Grid : Message : Stencil 13.6643 GB/s per node +Grid : Message : Average mflops/s per call per node : 661906 +Grid : Message : Average mflops/s per call per node : 795510 +Grid : Message : Average mflops/s per call per node : 823272 +Grid : Message : Average mflops/s per call per node : 660621 +Grid : Message : Average mflops/s per call per node (full): 310264 +Grid : Message : Average mflops/s per call per node (full): 438583 +Grid : Message : Average mflops/s per call per node (full): 442278 +Grid : Message : Average mflops/s per call per node (full): 306590 +Grid : Message : Stencil 13.7303 GB/s per node +Grid : Message : Stencil 14.0153 GB/s per node +Grid : Message : Stencil 17.1919 GB/s per node +Grid : Message : Stencil 12.1508 GB/s per node +Grid : Message : Average mflops/s per call per node : 665851 +Grid : Message : Average mflops/s per call per node : 806390 +Grid : Message : Average mflops/s per call per node : 826798 +Grid : Message : Average mflops/s per call per node : 668700 +Grid : Message : Average mflops/s per call per node (full): 315276 +Grid : Message : Average mflops/s per call per node (full): 401084 +Grid : Message : Average mflops/s per call per node (full): 440578 +Grid : Message : Average mflops/s per call per node (full): 303352 +Grid : Message : Stencil 12.9474 GB/s per node +Grid : Message : Stencil 16.7575 GB/s per node +Grid : Message : Stencil 18.5942 GB/s per node +Grid : Message : Stencil 12.6604 GB/s per node +Grid : Message : Average mflops/s per call per node : 660774 +Grid : Message : Average mflops/s per call per node : 806489 +Grid : Message : Average mflops/s per call per node : 824226 +Grid : Message : Average mflops/s per call per node : 665717 +Grid : Message : Average mflops/s per call per node (full): 311902 +Grid : Message : Average mflops/s per call per node (full): 438116 +Grid : Message : Average mflops/s per call per node (full): 448158 +Grid : Message : Average mflops/s per call per node (full): 304377 +Grid : Message : Stencil 14.3589 GB/s per node +Grid : Message : Stencil 16.9713 GB/s per node +Grid : Message : Stencil 17.9925 GB/s per node +Grid : Message : Stencil 11.8981 GB/s per node +Grid : Message : Average mflops/s per call per node : 660765 +Grid : Message : Average mflops/s per call per node : 801897 +Grid : Message : Average mflops/s per call per node : 821621 +Grid : Message : Average mflops/s per call per node : 670225 +Grid : Message : Average mflops/s per call per node (full): 316036 +Grid : Message : Average mflops/s per call per node (full): 436107 +Grid : Message : Average mflops/s per call per node (full): 446054 +Grid : Message : Average mflops/s per call per node (full): 301453 +Grid : Message : Stencil 13.7643 GB/s per node +Grid : Message : Stencil 16.6706 GB/s per node +Grid : Message : Stencil 17.0874 GB/s per node +Grid : Message : Stencil 12.087 GB/s per node +Grid : Message : Average mflops/s per call per node : 662129 +Grid : Message : Average mflops/s per call per node : 806221 +Grid : Message : Average mflops/s per call per node : 821227 +Grid : Message : Average mflops/s per call per node : 666438 +Grid : Message : Average mflops/s per call per node (full): 315264 +Grid : Message : Average mflops/s per call per node (full): 437614 +Grid : Message : Average mflops/s per call per node (full): 441200 +Grid : Message : Average mflops/s per call per node (full): 302746 +Grid : Message : Stencil 15.2251 GB/s per node +Grid : Message : Stencil 17.2964 GB/s per node +Grid : Message : Stencil 18.0299 GB/s per node +Grid : Message : Stencil 12.6288 GB/s per node +Grid : Message : Average mflops/s per call per node : 662399 +Grid : Message : Average mflops/s per call per node : 803931 +Grid : Message : Average mflops/s per call per node : 820320 +Grid : Message : Average mflops/s per call per node : 663829 +Grid : Message : Average mflops/s per call per node (full): 316822 +Grid : Message : Average mflops/s per call per node (full): 437254 +Grid : Message : Average mflops/s per call per node (full): 438214 +Grid : Message : Average mflops/s per call per node (full): 304733 +Grid : Message : Stencil 13.3113 GB/s per node +Grid : Message : Stencil 16.7028 GB/s per node +Grid : Message : Stencil 17.3581 GB/s per node +Grid : Message : Stencil 12.4669 GB/s per node +Grid : Message : Average mflops/s per call per node : 664624 +Grid : Message : Average mflops/s per call per node : 805500 +Grid : Message : Average mflops/s per call per node : 830041 +Grid : Message : Average mflops/s per call per node : 662596 +Grid : Message : Average mflops/s per call per node (full): 315637 +Grid : Message : Average mflops/s per call per node (full): 435982 +Grid : Message : Average mflops/s per call per node (full): 445041 +Grid : Message : Average mflops/s per call per node (full): 303697 +Grid : Message : Stencil 14.1203 GB/s per node +Grid : Message : Stencil 17.0781 GB/s per node +Grid : Message : Stencil 16.4489 GB/s per node +Grid : Message : Stencil 12.5695 GB/s per node +Grid : Message : Average mflops/s per call per node : 661332 +Grid : Message : Average mflops/s per call per node : 800493 +Grid : Message : Average mflops/s per call per node : 823953 +Grid : Message : Average mflops/s per call per node : 664719 +Grid : Message : Average mflops/s per call per node (full): 315811 +Grid : Message : Average mflops/s per call per node (full): 435198 +Grid : Message : Average mflops/s per call per node (full): 428685 +Grid : Message : Average mflops/s per call per node (full): 304638 +Grid : Message : Stencil 13.7742 GB/s per node +Grid : Message : Stencil 17.3357 GB/s per node +Grid : Message : Stencil 18.761 GB/s per node +Grid : Message : Stencil 12.7716 GB/s per node +Grid : Message : Average mflops/s per call per node : 663846 +Grid : Message : Average mflops/s per call per node : 801600 +Grid : Message : Average mflops/s per call per node : 819316 +Grid : Message : Average mflops/s per call per node : 669795 +Grid : Message : Average mflops/s per call per node (full): 315898 +Grid : Message : Average mflops/s per call per node (full): 436843 +Grid : Message : Average mflops/s per call per node (full): 447877 +Grid : Message : Average mflops/s per call per node (full): 306082 +Grid : Message : Stencil 13.2072 GB/s per node +Grid : Message : Stencil 16.8379 GB/s per node +Grid : Message : Stencil 17.4784 GB/s per node +Grid : Message : Stencil 13.3794 GB/s per node +Grid : Message : Average mflops/s per call per node : 666336 +Grid : Message : Average mflops/s per call per node : 805100 +Grid : Message : Average mflops/s per call per node : 829079 +Grid : Message : Average mflops/s per call per node : 663901 +Grid : Message : Average mflops/s per call per node (full): 315246 +Grid : Message : Average mflops/s per call per node (full): 432623 +Grid : Message : Average mflops/s per call per node (full): 445247 +Grid : Message : Average mflops/s per call per node (full): 306374 +Grid : Message : Stencil 12.3984 GB/s per node +Grid : Message : Stencil 8.37936 GB/s per node +Grid : Message : Stencil 17.0412 GB/s per node +Grid : Message : Stencil 12.0361 GB/s per node +Grid : Message : Average mflops/s per call per node : 667711 +Grid : Message : Average mflops/s per call per node : 810908 +Grid : Message : Average mflops/s per call per node : 825617 +Grid : Message : Average mflops/s per call per node : 667513 +Grid : Message : Average mflops/s per call per node (full): 312778 +Grid : Message : Average mflops/s per call per node (full): 276363 +Grid : Message : Average mflops/s per call per node (full): 441323 +Grid : Message : Average mflops/s per call per node (full): 302656 +Grid : Message : Stencil 13.0633 GB/s per node +Grid : Message : Stencil 16.331 GB/s per node +Grid : Message : Stencil 16.8624 GB/s per node +Grid : Message : Stencil 12.9714 GB/s per node +Grid : Message : Average mflops/s per call per node : 663183 +Grid : Message : Average mflops/s per call per node : 807105 +Grid : Message : Average mflops/s per call per node : 824210 +Grid : Message : Average mflops/s per call per node : 663522 +Grid : Message : Average mflops/s per call per node (full): 314204 +Grid : Message : Average mflops/s per call per node (full): 432882 +Grid : Message : Average mflops/s per call per node (full): 439099 +Grid : Message : Average mflops/s per call per node (full): 304943 +Grid : Message : Stencil 12.3296 GB/s per node +Grid : Message : Stencil 16.9761 GB/s per node +Grid : Message : Stencil 17.8134 GB/s per node +Grid : Message : Stencil 13.9537 GB/s per node +Grid : Message : Average mflops/s per call per node : 664208 +Grid : Message : Average mflops/s per call per node : 804127 +Grid : Message : Average mflops/s per call per node : 820009 +Grid : Message : Average mflops/s per call per node : 660766 +Grid : Message : Average mflops/s per call per node (full): 311581 +Grid : Message : Average mflops/s per call per node (full): 438930 +Grid : Message : Average mflops/s per call per node (full): 445649 +Grid : Message : Average mflops/s per call per node (full): 306281 +Grid : Message : Stencil 12.625 GB/s per node +Grid : Message : Stencil 17.463 GB/s per node +Grid : Message : Stencil 17.838 GB/s per node +Grid : Message : Stencil 14.241 GB/s per node +Grid : Message : Average mflops/s per call per node : 666891 +Grid : Message : Average mflops/s per call per node : 798629 +Grid : Message : Average mflops/s per call per node : 824752 +Grid : Message : Average mflops/s per call per node : 660324 +Grid : Message : Average mflops/s per call per node (full): 313583 +Grid : Message : Average mflops/s per call per node (full): 436361 +Grid : Message : Average mflops/s per call per node (full): 446309 +Grid : Message : Average mflops/s per call per node (full): 306683 +Grid : Message : Stencil 12.6714 GB/s per node +Grid : Message : Stencil 17.6931 GB/s per node +Grid : Message : Stencil 17.0934 GB/s per node +Grid : Message : Stencil 12.7118 GB/s per node +Grid : Message : Average mflops/s per call per node : 666993 +Grid : Message : Average mflops/s per call per node : 802461 +Grid : Message : Average mflops/s per call per node : 826448 +Grid : Message : Average mflops/s per call per node : 664123 +Grid : Message : Average mflops/s per call per node (full): 313724 +Grid : Message : Average mflops/s per call per node (full): 438044 +Grid : Message : Average mflops/s per call per node (full): 441526 +Grid : Message : Average mflops/s per call per node (full): 303764 +Grid : Message : Stencil 12.3299 GB/s per node +Grid : Message : Stencil 16.8856 GB/s per node +Grid : Message : Stencil 18.2979 GB/s per node +Grid : Message : Stencil 12.9924 GB/s per node +Grid : Message : Average mflops/s per call per node : 666837 +Grid : Message : Average mflops/s per call per node : 804554 +Grid : Message : Average mflops/s per call per node : 822620 +Grid : Message : Average mflops/s per call per node : 667675 +Grid : Message : Average mflops/s per call per node (full): 311836 +Grid : Message : Average mflops/s per call per node (full): 437328 +Grid : Message : Average mflops/s per call per node (full): 448041 +Grid : Message : Average mflops/s per call per node (full): 306129 +Grid : Message : Stencil 12.4805 GB/s per node +Grid : Message : Stencil 16.7107 GB/s per node +Grid : Message : Stencil 18.7081 GB/s per node +Grid : Message : Stencil 11.8256 GB/s per node +Grid : Message : Average mflops/s per call per node : 666222 +Grid : Message : Average mflops/s per call per node : 804660 +Grid : Message : Average mflops/s per call per node : 821869 +Grid : Message : Average mflops/s per call per node : 668984 +Grid : Message : Average mflops/s per call per node (full): 312524 +Grid : Message : Average mflops/s per call per node (full): 436016 +Grid : Message : Average mflops/s per call per node (full): 445127 +Grid : Message : Average mflops/s per call per node (full): 299541 +Grid : Message : Stencil 13.1368 GB/s per node +Grid : Message : Stencil 16.7553 GB/s per node +Grid : Message : Stencil 17.9153 GB/s per node +Grid : Message : Stencil 13.4052 GB/s per node +Grid : Message : Average mflops/s per call per node : 663625 +Grid : Message : Average mflops/s per call per node : 811113 +Grid : Message : Average mflops/s per call per node : 825860 +Grid : Message : Average mflops/s per call per node : 663241 +Grid : Message : Average mflops/s per call per node (full): 314396 +Grid : Message : Average mflops/s per call per node (full): 436770 +Grid : Message : Average mflops/s per call per node (full): 446415 +Grid : Message : Average mflops/s per call per node (full): 303725 +Grid : Message : Stencil 13.5957 GB/s per node +Grid : Message : Stencil 17.1293 GB/s per node +Grid : Message : Stencil 17.3253 GB/s per node +Grid : Message : Stencil 12.8351 GB/s per node +Grid : Message : Average mflops/s per call per node : 668326 +Grid : Message : Average mflops/s per call per node : 804576 +Grid : Message : Average mflops/s per call per node : 817037 +Grid : Message : Average mflops/s per call per node : 665455 +Grid : Message : Average mflops/s per call per node (full): 314691 +Grid : Message : Average mflops/s per call per node (full): 435872 +Grid : Message : Average mflops/s per call per node (full): 443981 +Grid : Message : Average mflops/s per call per node (full): 306470 +Grid : Message : Stencil 14.4764 GB/s per node +Grid : Message : Stencil 17.9404 GB/s per node +Grid : Message : Stencil 18.3239 GB/s per node +Grid : Message : Stencil 14.7409 GB/s per node +Grid : Message : Average mflops/s per call per node : 666658 +Grid : Message : Average mflops/s per call per node : 801177 +Grid : Message : Average mflops/s per call per node : 824112 +Grid : Message : Average mflops/s per call per node : 660454 +Grid : Message : Average mflops/s per call per node (full): 316984 +Grid : Message : Average mflops/s per call per node (full): 440871 +Grid : Message : Average mflops/s per call per node (full): 448721 +Grid : Message : Average mflops/s per call per node (full): 307159 +Grid : Message : Stencil 12.4847 GB/s per node +Grid : Message : Stencil 17.617 GB/s per node +Grid : Message : Stencil 17.778 GB/s per node +Grid : Message : Stencil 13.3334 GB/s per node +Grid : Message : Average mflops/s per call per node : 673753 +Grid : Message : Average mflops/s per call per node : 800196 +Grid : Message : Average mflops/s per call per node : 821568 +Grid : Message : Average mflops/s per call per node : 664242 +Grid : Message : Average mflops/s per call per node (full): 313021 +Grid : Message : Average mflops/s per call per node (full): 436903 +Grid : Message : Average mflops/s per call per node (full): 443523 +Grid : Message : Average mflops/s per call per node (full): 305924 +Grid : Message : Stencil 13.2565 GB/s per node +Grid : Message : Stencil 17.3985 GB/s per node +Grid : Message : Stencil 17.83 GB/s per node +Grid : Message : Stencil 12.8742 GB/s per node +Grid : Message : Average mflops/s per call per node : 670556 +Grid : Message : Average mflops/s per call per node : 806249 +Grid : Message : Average mflops/s per call per node : 820354 +Grid : Message : Average mflops/s per call per node : 663650 +Grid : Message : Average mflops/s per call per node (full): 317296 +Grid : Message : Average mflops/s per call per node (full): 440128 +Grid : Message : Average mflops/s per call per node (full): 445629 +Grid : Message : Average mflops/s per call per node (full): 305392 +Grid : Message : Stencil 13.6076 GB/s per node +Grid : Message : Stencil 16.6494 GB/s per node +Grid : Message : Stencil 17.5183 GB/s per node +Grid : Message : Stencil 14.4296 GB/s per node +Grid : Message : Average mflops/s per call per node : 666624 +Grid : Message : Average mflops/s per call per node : 804377 +Grid : Message : Average mflops/s per call per node : 817964 +Grid : Message : Average mflops/s per call per node : 665206 +Grid : Message : Average mflops/s per call per node (full): 316269 +Grid : Message : Average mflops/s per call per node (full): 434950 +Grid : Message : Average mflops/s per call per node (full): 443647 +Grid : Message : Average mflops/s per call per node (full): 307731 +Grid : Message : Stencil 14.0593 GB/s per node +Grid : Message : Stencil 17.2712 GB/s per node +Grid : Message : Stencil 18.0003 GB/s per node +Grid : Message : Stencil 12.4421 GB/s per node +Grid : Message : Average mflops/s per call per node : 666307 +Grid : Message : Average mflops/s per call per node : 803766 +Grid : Message : Average mflops/s per call per node : 823949 +Grid : Message : Average mflops/s per call per node : 666058 +Grid : Message : Average mflops/s per call per node (full): 317264 +Grid : Message : Average mflops/s per call per node (full): 437667 +Grid : Message : Average mflops/s per call per node (full): 437707 +Grid : Message : Average mflops/s per call per node (full): 299945 +Grid : Message : Stencil 12.4696 GB/s per node +Grid : Message : Stencil 16.5084 GB/s per node +Grid : Message : Stencil 17.5922 GB/s per node +Grid : Message : Stencil 12.8264 GB/s per node +Grid : Message : Average mflops/s per call per node : 672217 +Grid : Message : Average mflops/s per call per node : 807160 +Grid : Message : Average mflops/s per call per node : 825964 +Grid : Message : Average mflops/s per call per node : 667014 +Grid : Message : Average mflops/s per call per node (full): 313820 +Grid : Message : Average mflops/s per call per node (full): 433654 +Grid : Message : Average mflops/s per call per node (full): 446353 +Grid : Message : Average mflops/s per call per node (full): 305633 +Grid : Message : Stencil 12.6734 GB/s per node +Grid : Message : Stencil 16.4931 GB/s per node +Grid : Message : Stencil 17.477 GB/s per node +Grid : Message : Stencil 14.7119 GB/s per node +Grid : Message : Average mflops/s per call per node : 671820 +Grid : Message : Average mflops/s per call per node : 804476 +Grid : Message : Average mflops/s per call per node : 817111 +Grid : Message : Average mflops/s per call per node : 658700 +Grid : Message : Average mflops/s per call per node (full): 314863 +Grid : Message : Average mflops/s per call per node (full): 435448 +Grid : Message : Average mflops/s per call per node (full): 443580 +Grid : Message : Average mflops/s per call per node (full): 306743 +Grid : Message : Stencil 12.5016 GB/s per node +Grid : Message : Stencil 9.52792 GB/s per node +Grid : Message : Stencil 17.9562 GB/s per node +Grid : Message : Stencil 12.3768 GB/s per node +Grid : Message : Average mflops/s per call per node : 671064 +Grid : Message : Average mflops/s per call per node : 815749 +Grid : Message : Average mflops/s per call per node : 821584 +Grid : Message : Average mflops/s per call per node : 670523 +Grid : Message : Average mflops/s per call per node (full): 313644 +Grid : Message : Average mflops/s per call per node (full): 305467 +Grid : Message : Average mflops/s per call per node (full): 445200 +Grid : Message : Average mflops/s per call per node (full): 305141 +Grid : Message : Stencil 12.1447 GB/s per node +Grid : Message : Stencil 16.8099 GB/s per node +Grid : Message : Stencil 17.3329 GB/s per node +Grid : Message : Stencil 13.932 GB/s per node +Grid : Message : Average mflops/s per call per node : 669301 +Grid : Message : Average mflops/s per call per node : 806765 +Grid : Message : Average mflops/s per call per node : 821776 +Grid : Message : Average mflops/s per call per node : 657008 +Grid : Message : Average mflops/s per call per node (full): 309647 +Grid : Message : Average mflops/s per call per node (full): 438769 +Grid : Message : Average mflops/s per call per node (full): 444367 +Grid : Message : Average mflops/s per call per node (full): 304731 +Grid : Message : Stencil 12.6801 GB/s per node +Grid : Message : Stencil 16.7348 GB/s per node +Grid : Message : Stencil 17.7477 GB/s per node +Grid : Message : Stencil 12.265 GB/s per node +Grid : Message : Average mflops/s per call per node : 670334 +Grid : Message : Average mflops/s per call per node : 807859 +Grid : Message : Average mflops/s per call per node : 825198 +Grid : Message : Average mflops/s per call per node : 664489 +Grid : Message : Average mflops/s per call per node (full): 315221 +Grid : Message : Average mflops/s per call per node (full): 436968 +Grid : Message : Average mflops/s per call per node (full): 446639 +Grid : Message : Average mflops/s per call per node (full): 303895 +Grid : Message : Stencil 12.4594 GB/s per node +Grid : Message : Stencil 17.1054 GB/s per node +Grid : Message : Stencil 17.0036 GB/s per node +Grid : Message : Stencil 13.1651 GB/s per node +Grid : Message : Average mflops/s per call per node : 667348 +Grid : Message : Average mflops/s per call per node : 804015 +Grid : Message : Average mflops/s per call per node : 826743 +Grid : Message : Average mflops/s per call per node : 661906 +Grid : Message : Average mflops/s per call per node (full): 311877 +Grid : Message : Average mflops/s per call per node (full): 437038 +Grid : Message : Average mflops/s per call per node (full): 441547 +Grid : Message : Average mflops/s per call per node (full): 305557 +Grid : Message : Stencil 13.786 GB/s per node +Grid : Message : Stencil 16.8368 GB/s per node +Grid : Message : Stencil 17.6723 GB/s per node +Grid : Message : Stencil 12.2742 GB/s per node +Grid : Message : Average mflops/s per call per node : 668162 +Grid : Message : Average mflops/s per call per node : 800651 +Grid : Message : Average mflops/s per call per node : 819596 +Grid : Message : Average mflops/s per call per node : 664426 +Grid : Message : Average mflops/s per call per node (full): 316998 +Grid : Message : Average mflops/s per call per node (full): 436396 +Grid : Message : Average mflops/s per call per node (full): 443602 +Grid : Message : Average mflops/s per call per node (full): 303972 +Grid : Message : Stencil 13.7633 GB/s per node +Grid : Message : Stencil 16.2817 GB/s per node +Grid : Message : Stencil 17.8035 GB/s per node +Grid : Message : Stencil 12.6944 GB/s per node +Grid : Message : Average mflops/s per call per node : 664684 +Grid : Message : Average mflops/s per call per node : 808546 +Grid : Message : Average mflops/s per call per node : 822845 +Grid : Message : Average mflops/s per call per node : 664413 +Grid : Message : Average mflops/s per call per node (full): 316050 +Grid : Message : Average mflops/s per call per node (full): 432889 +Grid : Message : Average mflops/s per call per node (full): 446074 +Grid : Message : Average mflops/s per call per node (full): 304574 +Grid : Message : Stencil 13.4668 GB/s per node +Grid : Message : Stencil 16.8483 GB/s per node +Grid : Message : Stencil 18.8989 GB/s per node +Grid : Message : Stencil 11.9368 GB/s per node +Grid : Message : Average mflops/s per call per node : 667145 +Grid : Message : Average mflops/s per call per node : 802864 +Grid : Message : Average mflops/s per call per node : 814890 +Grid : Message : Average mflops/s per call per node : 669128 +Grid : Message : Average mflops/s per call per node (full): 315400 +Grid : Message : Average mflops/s per call per node (full): 436546 +Grid : Message : Average mflops/s per call per node (full): 447561 +Grid : Message : Average mflops/s per call per node (full): 301691 +Grid : Message : Stencil 13.2105 GB/s per node +Grid : Message : Stencil 16.9977 GB/s per node +Grid : Message : Stencil 17.0903 GB/s per node +Grid : Message : Stencil 12.057 GB/s per node +Grid : Message : Average mflops/s per call per node : 663785 +Grid : Message : Average mflops/s per call per node : 803215 +Grid : Message : Average mflops/s per call per node : 823658 +Grid : Message : Average mflops/s per call per node : 663578 +Grid : Message : Average mflops/s per call per node (full): 314991 +Grid : Message : Average mflops/s per call per node (full): 436633 +Grid : Message : Average mflops/s per call per node (full): 441939 +Grid : Message : Average mflops/s per call per node (full): 301989 +Grid : Message : Stencil 14.4148 GB/s per node +Grid : Message : Stencil 17.3747 GB/s per node +Grid : Message : Stencil 17.3421 GB/s per node +Grid : Message : Stencil 12.1089 GB/s per node +Grid : Message : Average mflops/s per call per node : 662050 +Grid : Message : Average mflops/s per call per node : 800697 +Grid : Message : Average mflops/s per call per node : 823547 +Grid : Message : Average mflops/s per call per node : 672339 +Grid : Message : Average mflops/s per call per node (full): 315394 +Grid : Message : Average mflops/s per call per node (full): 438004 +Grid : Message : Average mflops/s per call per node (full): 440362 +Grid : Message : Average mflops/s per call per node (full): 303484 +Grid : Message : Stencil 14.5963 GB/s per node +Grid : Message : Stencil 9.37368 GB/s per node +Grid : Message : Stencil 17.0918 GB/s per node +Grid : Message : Stencil 12.5873 GB/s per node +Grid : Message : Average mflops/s per call per node : 663264 +Grid : Message : Average mflops/s per call per node : 808126 +Grid : Message : Average mflops/s per call per node : 824264 +Grid : Message : Average mflops/s per call per node : 668386 +Grid : Message : Average mflops/s per call per node (full): 316801 +Grid : Message : Average mflops/s per call per node (full): 300452 +Grid : Message : Average mflops/s per call per node (full): 441820 +Grid : Message : Average mflops/s per call per node (full): 304562 +Grid : Message : Stencil 12.9733 GB/s per node +Grid : Message : Stencil 18.1228 GB/s per node +Grid : Message : Stencil 17.4571 GB/s per node +Grid : Message : Stencil 13.2088 GB/s per node +Grid : Message : Average mflops/s per call per node : 665026 +Grid : Message : Average mflops/s per call per node : 803643 +Grid : Message : Average mflops/s per call per node : 820059 +Grid : Message : Average mflops/s per call per node : 662757 +Grid : Message : Average mflops/s per call per node (full): 314393 +Grid : Message : Average mflops/s per call per node (full): 440506 +Grid : Message : Average mflops/s per call per node (full): 435037 +Grid : Message : Average mflops/s per call per node (full): 304707 +Grid : Message : Stencil 14.9553 GB/s per node +Grid : Message : Stencil 17.5984 GB/s per node +Grid : Message : Stencil 17.2416 GB/s per node +Grid : Message : Stencil 12.3825 GB/s per node +Grid : Message : Average mflops/s per call per node : 662569 +Grid : Message : Average mflops/s per call per node : 803959 +Grid : Message : Average mflops/s per call per node : 824464 +Grid : Message : Average mflops/s per call per node : 663959 +Grid : Message : Average mflops/s per call per node (full): 316442 +Grid : Message : Average mflops/s per call per node (full): 440130 +Grid : Message : Average mflops/s per call per node (full): 442776 +Grid : Message : Average mflops/s per call per node (full): 303533 +Grid : Message : Stencil 13.6284 GB/s per node +Grid : Message : Stencil 16.7526 GB/s per node +Grid : Message : Stencil 17.4366 GB/s per node +Grid : Message : Stencil 11.8692 GB/s per node +Grid : Message : Average mflops/s per call per node : 664654 +Grid : Message : Average mflops/s per call per node : 805596 +Grid : Message : Average mflops/s per call per node : 826425 +Grid : Message : Average mflops/s per call per node : 666243 +Grid : Message : Average mflops/s per call per node (full): 315764 +Grid : Message : Average mflops/s per call per node (full): 437992 +Grid : Message : Average mflops/s per call per node (full): 446462 +Grid : Message : Average mflops/s per call per node (full): 300689 +Grid : Message : Stencil 13.1773 GB/s per node +Grid : Message : Stencil 17.1496 GB/s per node +Grid : Message : Stencil 17.2845 GB/s per node +Grid : Message : Stencil 12.2689 GB/s per node +Grid : Message : Average mflops/s per call per node : 662457 +Grid : Message : Average mflops/s per call per node : 799747 +Grid : Message : Average mflops/s per call per node : 823813 +Grid : Message : Average mflops/s per call per node : 668403 +Grid : Message : Average mflops/s per call per node (full): 315019 +Grid : Message : Average mflops/s per call per node (full): 437159 +Grid : Message : Average mflops/s per call per node (full): 443460 +Grid : Message : Average mflops/s per call per node (full): 304224 +Grid : Message : Stencil 13.1181 GB/s per node +Grid : Message : Stencil 17.2829 GB/s per node +Grid : Message : Stencil 17.6311 GB/s per node +Grid : Message : Stencil 12.7872 GB/s per node +Grid : Message : Average mflops/s per call per node : 662005 +Grid : Message : Average mflops/s per call per node : 810197 +Grid : Message : Average mflops/s per call per node : 821979 +Grid : Message : Average mflops/s per call per node : 659287 +Grid : Message : Average mflops/s per call per node (full): 311259 +Grid : Message : Average mflops/s per call per node (full): 440542 +Grid : Message : Average mflops/s per call per node (full): 445679 +Grid : Message : Average mflops/s per call per node (full): 304177 +Grid : Message : Stencil 12.2806 GB/s per node +Grid : Message : Stencil 17.0642 GB/s per node +Grid : Message : Stencil 17.5733 GB/s per node +Grid : Message : Stencil 12.6914 GB/s per node +Grid : Message : Average mflops/s per call per node : 669888 +Grid : Message : Average mflops/s per call per node : 805610 +Grid : Message : Average mflops/s per call per node : 824584 +Grid : Message : Average mflops/s per call per node : 657127 +Grid : Message : Average mflops/s per call per node (full): 312348 +Grid : Message : Average mflops/s per call per node (full): 439208 +Grid : Message : Average mflops/s per call per node (full): 443666 +Grid : Message : Average mflops/s per call per node (full): 303205 +Grid : Message : Stencil 12.9661 GB/s per node +Grid : Message : Stencil 16.5873 GB/s per node +Grid : Message : Stencil 17.5306 GB/s per node +Grid : Message : Stencil 12.8656 GB/s per node +Grid : Message : Average mflops/s per call per node : 667796 +Grid : Message : Average mflops/s per call per node : 804159 +Grid : Message : Average mflops/s per call per node : 822987 +Grid : Message : Average mflops/s per call per node : 667076 +Grid : Message : Average mflops/s per call per node (full): 316412 +Grid : Message : Average mflops/s per call per node (full): 435030 +Grid : Message : Average mflops/s per call per node (full): 445900 +Grid : Message : Average mflops/s per call per node (full): 305052 +Grid : Message : Stencil 13.1916 GB/s per node +Grid : Message : Stencil 10.271 GB/s per node +Grid : Message : Stencil 18.4837 GB/s per node +Grid : Message : Stencil 12.6862 GB/s per node +Grid : Message : Average mflops/s per call per node : 667469 +Grid : Message : Average mflops/s per call per node : 809427 +Grid : Message : Average mflops/s per call per node : 815268 +Grid : Message : Average mflops/s per call per node : 665818 +Grid : Message : Average mflops/s per call per node (full): 315679 +Grid : Message : Average mflops/s per call per node (full): 323040 +Grid : Message : Average mflops/s per call per node (full): 446261 +Grid : Message : Average mflops/s per call per node (full): 304701 +Grid : Message : Stencil 13.1754 GB/s per node +Grid : Message : Stencil 15.6857 GB/s per node +Grid : Message : Stencil 17.1186 GB/s per node +Grid : Message : Stencil 12.8238 GB/s per node +Grid : Message : Average mflops/s per call per node : 668281 +Grid : Message : Average mflops/s per call per node : 804696 +Grid : Message : Average mflops/s per call per node : 824252 +Grid : Message : Average mflops/s per call per node : 661608 +Grid : Message : Average mflops/s per call per node (full): 316585 +Grid : Message : Average mflops/s per call per node (full): 424725 +Grid : Message : Average mflops/s per call per node (full): 442445 +Grid : Message : Average mflops/s per call per node (full): 304938 +Grid : Message : Stencil 14.1363 GB/s per node +Grid : Message : Stencil 17.2076 GB/s per node +Grid : Message : Stencil 17.2527 GB/s per node +Grid : Message : Stencil 12.6664 GB/s per node +Grid : Message : Average mflops/s per call per node : 664212 +Grid : Message : Average mflops/s per call per node : 807256 +Grid : Message : Average mflops/s per call per node : 828470 +Grid : Message : Average mflops/s per call per node : 658701 +Grid : Message : Average mflops/s per call per node (full): 316642 +Grid : Message : Average mflops/s per call per node (full): 440364 +Grid : Message : Average mflops/s per call per node (full): 444454 +Grid : Message : Average mflops/s per call per node (full): 300892 +Grid : Message : Stencil 12.4873 GB/s per node +Grid : Message : Stencil 18.1535 GB/s per node +Grid : Message : Stencil 17.2971 GB/s per node +Grid : Message : Stencil 12.6544 GB/s per node +Grid : Message : Average mflops/s per call per node : 669576 +Grid : Message : Average mflops/s per call per node : 804299 +Grid : Message : Average mflops/s per call per node : 824912 +Grid : Message : Average mflops/s per call per node : 666060 +Grid : Message : Average mflops/s per call per node (full): 314263 +Grid : Message : Average mflops/s per call per node (full): 442052 +Grid : Message : Average mflops/s per call per node (full): 443849 +Grid : Message : Average mflops/s per call per node (full): 304899 +Grid : Message : Stencil 12.5467 GB/s per node +Grid : Message : Stencil 16.4949 GB/s per node +Grid : Message : Stencil 17.1648 GB/s per node +Grid : Message : Stencil 12.1105 GB/s per node +Grid : Message : Average mflops/s per call per node : 668703 +Grid : Message : Average mflops/s per call per node : 801782 +Grid : Message : Average mflops/s per call per node : 818095 +Grid : Message : Average mflops/s per call per node : 667291 +Grid : Message : Average mflops/s per call per node (full): 314193 +Grid : Message : Average mflops/s per call per node (full): 433869 +Grid : Message : Average mflops/s per call per node (full): 441706 +Grid : Message : Average mflops/s per call per node (full): 303151 +Grid : Message : Stencil 13.4886 GB/s per node +Grid : Message : Stencil 16.6148 GB/s per node +Grid : Message : Stencil 17.7462 GB/s per node +Grid : Message : Stencil 11.9484 GB/s per node +Grid : Message : Average mflops/s per call per node : 666020 +Grid : Message : Average mflops/s per call per node : 806462 +Grid : Message : Average mflops/s per call per node : 821792 +Grid : Message : Average mflops/s per call per node : 667299 +Grid : Message : Average mflops/s per call per node (full): 316359 +Grid : Message : Average mflops/s per call per node (full): 433941 +Grid : Message : Average mflops/s per call per node (full): 446264 +Grid : Message : Average mflops/s per call per node (full): 301910 +Grid : Message : Stencil 13.7286 GB/s per node +Grid : Message : Stencil 16.5805 GB/s per node +Grid : Message : Stencil 17.4372 GB/s per node +Grid : Message : Stencil 13.3038 GB/s per node +Grid : Message : Average mflops/s per call per node : 664492 +Grid : Message : Average mflops/s per call per node : 805003 +Grid : Message : Average mflops/s per call per node : 819586 +Grid : Message : Average mflops/s per call per node : 660935 +Grid : Message : Average mflops/s per call per node (full): 314319 +Grid : Message : Average mflops/s per call per node (full): 434736 +Grid : Message : Average mflops/s per call per node (full): 435192 +Grid : Message : Average mflops/s per call per node (full): 306127 +Grid : Message : Stencil 13.5175 GB/s per node +Grid : Message : Stencil 16.9837 GB/s per node +Grid : Message : Stencil 17.7435 GB/s per node +Grid : Message : Stencil 13.2658 GB/s per node +Grid : Message : Average mflops/s per call per node : 667154 +Grid : Message : Average mflops/s per call per node : 803437 +Grid : Message : Average mflops/s per call per node : 822090 +Grid : Message : Average mflops/s per call per node : 660675 +Grid : Message : Average mflops/s per call per node (full): 315651 +Grid : Message : Average mflops/s per call per node (full): 437381 +Grid : Message : Average mflops/s per call per node (full): 444310 +Grid : Message : Average mflops/s per call per node (full): 305053 +Grid : Message : Stencil 12.718 GB/s per node +Grid : Message : Stencil 13.4649 GB/s per node +Grid : Message : Stencil 17.1457 GB/s per node +Grid : Message : Stencil 13.5868 GB/s per node +Grid : Message : Average mflops/s per call per node : 670258 +Grid : Message : Average mflops/s per call per node : 808276 +Grid : Message : Average mflops/s per call per node : 823242 +Grid : Message : Average mflops/s per call per node : 662121 +Grid : Message : Average mflops/s per call per node (full): 314845 +Grid : Message : Average mflops/s per call per node (full): 389476 +Grid : Message : Average mflops/s per call per node (full): 442441 +Grid : Message : Average mflops/s per call per node (full): 306231 +Grid : Message : Stencil 13.9675 GB/s per node +Grid : Message : Stencil 17.0644 GB/s per node +Grid : Message : Stencil 17.6995 GB/s per node +Grid : Message : Stencil 12.7459 GB/s per node +Grid : Message : Average mflops/s per call per node : 663407 +Grid : Message : Average mflops/s per call per node : 808739 +Grid : Message : Average mflops/s per call per node : 819816 +Grid : Message : Average mflops/s per call per node : 667386 +Grid : Message : Average mflops/s per call per node (full): 315984 +Grid : Message : Average mflops/s per call per node (full): 440156 +Grid : Message : Average mflops/s per call per node (full): 445980 +Grid : Message : Average mflops/s per call per node (full): 305803 +Grid : Message : Stencil 14.1632 GB/s per node +Grid : Message : Stencil 15.7343 GB/s per node +Grid : Message : Stencil 18.4273 GB/s per node +Grid : Message : Stencil 14.4432 GB/s per node +Grid : Message : Average mflops/s per call per node : 664937 +Grid : Message : Average mflops/s per call per node : 805253 +Grid : Message : Average mflops/s per call per node : 828233 +Grid : Message : Average mflops/s per call per node : 664513 +Grid : Message : Average mflops/s per call per node (full): 315813 +Grid : Message : Average mflops/s per call per node (full): 415168 +Grid : Message : Average mflops/s per call per node (full): 447412 +Grid : Message : Average mflops/s per call per node (full): 307131 +Grid : Message : Stencil 12.4829 GB/s per node +Grid : Message : Stencil 16.5053 GB/s per node +Grid : Message : Stencil 17.2726 GB/s per node +Grid : Message : Stencil 14.047 GB/s per node +Grid : Message : Average mflops/s per call per node : 670858 +Grid : Message : Average mflops/s per call per node : 801237 +Grid : Message : Average mflops/s per call per node : 822537 +Grid : Message : Average mflops/s per call per node : 662443 +Grid : Message : Average mflops/s per call per node (full): 313466 +Grid : Message : Average mflops/s per call per node (full): 433622 +Grid : Message : Average mflops/s per call per node (full): 443957 +Grid : Message : Average mflops/s per call per node (full): 306748 +Grid : Message : Stencil 12.8276 GB/s per node +Grid : Message : Stencil 17.1233 GB/s per node +Grid : Message : Stencil 18.0525 GB/s per node +Grid : Message : Stencil 13.1973 GB/s per node +Grid : Message : Average mflops/s per call per node : 668220 +Grid : Message : Average mflops/s per call per node : 800530 +Grid : Message : Average mflops/s per call per node : 824932 +Grid : Message : Average mflops/s per call per node : 665198 +Grid : Message : Average mflops/s per call per node (full): 315263 +Grid : Message : Average mflops/s per call per node (full): 438804 +Grid : Message : Average mflops/s per call per node (full): 446733 +Grid : Message : Average mflops/s per call per node (full): 306020 +Grid : Message : Stencil 13.3079 GB/s per node +Grid : Message : Stencil 17.2573 GB/s per node +Grid : Message : Stencil 17.4127 GB/s per node +Grid : Message : Stencil 12.1482 GB/s per node +Grid : Message : Average mflops/s per call per node : 668893 +Grid : Message : Average mflops/s per call per node : 806882 +Grid : Message : Average mflops/s per call per node : 819174 +Grid : Message : Average mflops/s per call per node : 664891 +Grid : Message : Average mflops/s per call per node (full): 315958 +Grid : Message : Average mflops/s per call per node (full): 438004 +Grid : Message : Average mflops/s per call per node (full): 443438 +Grid : Message : Average mflops/s per call per node (full): 303070 +Grid : Message : Stencil 13.6325 GB/s per node +Grid : Message : Stencil 13.3344 GB/s per node +Grid : Message : Stencil 17.6158 GB/s per node +Grid : Message : Stencil 13.8209 GB/s per node +Grid : Message : Average mflops/s per call per node : 664060 +Grid : Message : Average mflops/s per call per node : 806781 +Grid : Message : Average mflops/s per call per node : 819397 +Grid : Message : Average mflops/s per call per node : 659580 +Grid : Message : Average mflops/s per call per node (full): 315675 +Grid : Message : Average mflops/s per call per node (full): 387874 +Grid : Message : Average mflops/s per call per node (full): 443746 +Grid : Message : Average mflops/s per call per node (full): 305557 +Grid : Message : Stencil 12.8853 GB/s per node +Grid : Message : Stencil 16.7132 GB/s per node +Grid : Message : Stencil 17.1811 GB/s per node +Grid : Message : Stencil 13.2971 GB/s per node +Grid : Message : Average mflops/s per call per node : 668706 +Grid : Message : Average mflops/s per call per node : 805007 +Grid : Message : Average mflops/s per call per node : 820607 +Grid : Message : Average mflops/s per call per node : 660532 +Grid : Message : Average mflops/s per call per node (full): 315550 +Grid : Message : Average mflops/s per call per node (full): 436813 +Grid : Message : Average mflops/s per call per node (full): 441014 +Grid : Message : Average mflops/s per call per node (full): 303187 +Grid : Message : Stencil 12.3614 GB/s per node +Grid : Message : Stencil 16.6071 GB/s per node +Grid : Message : Stencil 19.2775 GB/s per node +Grid : Message : Stencil 12.0097 GB/s per node +Grid : Message : Average mflops/s per call per node : 667522 +Grid : Message : Average mflops/s per call per node : 802893 +Grid : Message : Average mflops/s per call per node : 818988 +Grid : Message : Average mflops/s per call per node : 663512 +Grid : Message : Average mflops/s per call per node (full): 312664 +Grid : Message : Average mflops/s per call per node (full): 435656 +Grid : Message : Average mflops/s per call per node (full): 449837 +Grid : Message : Average mflops/s per call per node (full): 300482 +Grid : Message : Stencil 13.3121 GB/s per node +Grid : Message : Stencil 17.2751 GB/s per node +Grid : Message : Stencil 17.4883 GB/s per node +Grid : Message : Stencil 13.266 GB/s per node +Grid : Message : Average mflops/s per call per node : 665762 +Grid : Message : Average mflops/s per call per node : 801452 +Grid : Message : Average mflops/s per call per node : 827915 +Grid : Message : Average mflops/s per call per node : 663806 +Grid : Message : Average mflops/s per call per node (full): 314945 +Grid : Message : Average mflops/s per call per node (full): 437703 +Grid : Message : Average mflops/s per call per node (full): 443208 +Grid : Message : Average mflops/s per call per node (full): 305507 +Grid : Message : Stencil 13.3057 GB/s per node +Grid : Message : Stencil 16.4284 GB/s per node +Grid : Message : Stencil 17.7674 GB/s per node +Grid : Message : Stencil 12.8116 GB/s per node +Grid : Message : Average mflops/s per call per node : 665746 +Grid : Message : Average mflops/s per call per node : 806662 +Grid : Message : Average mflops/s per call per node : 820972 +Grid : Message : Average mflops/s per call per node : 663597 +Grid : Message : Average mflops/s per call per node (full): 315385 +Grid : Message : Average mflops/s per call per node (full): 434085 +Grid : Message : Average mflops/s per call per node (full): 445698 +Grid : Message : Average mflops/s per call per node (full): 304993 +Grid : Message : Stencil 13.4849 GB/s per node +Grid : Message : Stencil 9.38352 GB/s per node +Grid : Message : Stencil 17.645 GB/s per node +Grid : Message : Stencil 13.2214 GB/s per node +Grid : Message : Average mflops/s per call per node : 663124 +Grid : Message : Average mflops/s per call per node : 809265 +Grid : Message : Average mflops/s per call per node : 823398 +Grid : Message : Average mflops/s per call per node : 664345 +Grid : Message : Average mflops/s per call per node (full): 313884 +Grid : Message : Average mflops/s per call per node (full): 301790 +Grid : Message : Average mflops/s per call per node (full): 443023 +Grid : Message : Average mflops/s per call per node (full): 304831 +Grid : Message : Stencil 13.7732 GB/s per node +Grid : Message : Stencil 15.1391 GB/s per node +Grid : Message : Stencil 18.2708 GB/s per node +Grid : Message : Stencil 14.2929 GB/s per node +Grid : Message : Average mflops/s per call per node : 664574 +Grid : Message : Average mflops/s per call per node : 800209 +Grid : Message : Average mflops/s per call per node : 821873 +Grid : Message : Average mflops/s per call per node : 659978 +Grid : Message : Average mflops/s per call per node (full): 315542 +Grid : Message : Average mflops/s per call per node (full): 417439 +Grid : Message : Average mflops/s per call per node (full): 444265 +Grid : Message : Average mflops/s per call per node (full): 306522 +Grid : Message : Stencil 12.7582 GB/s per node +Grid : Message : Stencil 16.5874 GB/s per node +Grid : Message : Stencil 17.4572 GB/s per node +Grid : Message : Stencil 12.9153 GB/s per node +Grid : Message : Average mflops/s per call per node : 665604 +Grid : Message : Average mflops/s per call per node : 803460 +Grid : Message : Average mflops/s per call per node : 822525 +Grid : Message : Average mflops/s per call per node : 664818 +Grid : Message : Average mflops/s per call per node (full): 313795 +Grid : Message : Average mflops/s per call per node (full): 435096 +Grid : Message : Average mflops/s per call per node (full): 444071 +Grid : Message : Average mflops/s per call per node (full): 305431 +Grid : Message : Stencil 13.8559 GB/s per node +Grid : Message : Stencil 17.0497 GB/s per node +Grid : Message : Stencil 16.8828 GB/s per node +Grid : Message : Stencil 12.9922 GB/s per node +Grid : Message : Average mflops/s per call per node : 659253 +Grid : Message : Average mflops/s per call per node : 805404 +Grid : Message : Average mflops/s per call per node : 819715 +Grid : Message : Average mflops/s per call per node : 664332 +Grid : Message : Average mflops/s per call per node (full): 313997 +Grid : Message : Average mflops/s per call per node (full): 435225 +Grid : Message : Average mflops/s per call per node (full): 439067 +Grid : Message : Average mflops/s per call per node (full): 305231 +Grid : Message : Stencil 13.0007 GB/s per node +Grid : Message : Stencil 8.69655 GB/s per node +Grid : Message : Stencil 17.5308 GB/s per node +Grid : Message : Stencil 12.2284 GB/s per node +Grid : Message : Average mflops/s per call per node : 665359 +Grid : Message : Average mflops/s per call per node : 813330 +Grid : Message : Average mflops/s per call per node : 824392 +Grid : Message : Average mflops/s per call per node : 666731 +Grid : Message : Average mflops/s per call per node (full): 314743 +Grid : Message : Average mflops/s per call per node (full): 284313 +Grid : Message : Average mflops/s per call per node (full): 444231 +Grid : Message : Average mflops/s per call per node (full): 303602 +Grid : Message : Stencil 14.0179 GB/s per node +Grid : Message : Stencil 16.4361 GB/s per node +Grid : Message : Stencil 17.3788 GB/s per node +Grid : Message : Stencil 13.9105 GB/s per node +Grid : Message : Average mflops/s per call per node : 661910 +Grid : Message : Average mflops/s per call per node : 801215 +Grid : Message : Average mflops/s per call per node : 825305 +Grid : Message : Average mflops/s per call per node : 662391 +Grid : Message : Average mflops/s per call per node (full): 315920 +Grid : Message : Average mflops/s per call per node (full): 432965 +Grid : Message : Average mflops/s per call per node (full): 440216 +Grid : Message : Average mflops/s per call per node (full): 306253 +Grid : Message : Stencil 14.7332 GB/s per node +Grid : Message : Stencil 17.0097 GB/s per node +Grid : Message : Stencil 18.3556 GB/s per node +Grid : Message : Stencil 12.3901 GB/s per node +Grid : Message : Average mflops/s per call per node : 660267 +Grid : Message : Average mflops/s per call per node : 804795 +Grid : Message : Average mflops/s per call per node : 822348 +Grid : Message : Average mflops/s per call per node : 665592 +Grid : Message : Average mflops/s per call per node (full): 315863 +Grid : Message : Average mflops/s per call per node (full): 439178 +Grid : Message : Average mflops/s per call per node (full): 448396 +Grid : Message : Average mflops/s per call per node (full): 303397 +Grid : Message : Stencil 13.3785 GB/s per node +Grid : Message : Stencil 17.2514 GB/s per node +Grid : Message : Stencil 17.3377 GB/s per node +Grid : Message : Stencil 14.8653 GB/s per node +Grid : Message : Average mflops/s per call per node : 668976 +Grid : Message : Average mflops/s per call per node : 804297 +Grid : Message : Average mflops/s per call per node : 823445 +Grid : Message : Average mflops/s per call per node : 659754 +Grid : Message : Average mflops/s per call per node (full): 316369 +Grid : Message : Average mflops/s per call per node (full): 439002 +Grid : Message : Average mflops/s per call per node (full): 443709 +Grid : Message : Average mflops/s per call per node (full): 306861 +Grid : Message : Stencil 14.4473 GB/s per node +Grid : Message : Stencil 18.4221 GB/s per node +Grid : Message : Stencil 18.7098 GB/s per node +Grid : Message : Stencil 12.6884 GB/s per node +Grid : Message : Average mflops/s per call per node : 665908 +Grid : Message : Average mflops/s per call per node : 797772 +Grid : Message : Average mflops/s per call per node : 821779 +Grid : Message : Average mflops/s per call per node : 665933 +Grid : Message : Average mflops/s per call per node (full): 316513 +Grid : Message : Average mflops/s per call per node (full): 439698 +Grid : Message : Average mflops/s per call per node (full): 446877 +Grid : Message : Average mflops/s per call per node (full): 304760 +Grid : Message : Stencil 12.9669 GB/s per node +Grid : Message : Stencil 16.3339 GB/s per node +Grid : Message : Stencil 17.6991 GB/s per node +Grid : Message : Stencil 13.9045 GB/s per node +Grid : Message : Average mflops/s per call per node : 671505 +Grid : Message : Average mflops/s per call per node : 807941 +Grid : Message : Average mflops/s per call per node : 819920 +Grid : Message : Average mflops/s per call per node : 661226 +Grid : Message : Average mflops/s per call per node (full): 315894 +Grid : Message : Average mflops/s per call per node (full): 433325 +Grid : Message : Average mflops/s per call per node (full): 445189 +Grid : Message : Average mflops/s per call per node (full): 305056 +Grid : Message : Stencil 13.389 GB/s per node +Grid : Message : Stencil 18.4311 GB/s per node +Grid : Message : Stencil 17.3176 GB/s per node +Grid : Message : Stencil 12.428 GB/s per node +Grid : Message : Average mflops/s per call per node : 669226 +Grid : Message : Average mflops/s per call per node : 802742 +Grid : Message : Average mflops/s per call per node : 821110 +Grid : Message : Average mflops/s per call per node : 667706 +Grid : Message : Average mflops/s per call per node (full): 316891 +Grid : Message : Average mflops/s per call per node (full): 441526 +Grid : Message : Average mflops/s per call per node (full): 442371 +Grid : Message : Average mflops/s per call per node (full): 303524 +Grid : Message : Stencil 13.2963 GB/s per node +Grid : Message : Stencil 16.6181 GB/s per node +Grid : Message : Stencil 17.976 GB/s per node +Grid : Message : Stencil 12.7132 GB/s per node +Grid : Message : Average mflops/s per call per node : 668142 +Grid : Message : Average mflops/s per call per node : 803065 +Grid : Message : Average mflops/s per call per node : 826397 +Grid : Message : Average mflops/s per call per node : 662717 +Grid : Message : Average mflops/s per call per node (full): 316414 +Grid : Message : Average mflops/s per call per node (full): 433704 +Grid : Message : Average mflops/s per call per node (full): 449195 +Grid : Message : Average mflops/s per call per node (full): 303450 +Grid : Message : Stencil 13.407 GB/s per node +Grid : Message : Stencil 16.42 GB/s per node +Grid : Message : Stencil 18.5532 GB/s per node +Grid : Message : Stencil 12.0239 GB/s per node +Grid : Message : Average mflops/s per call per node : 667511 +Grid : Message : Average mflops/s per call per node : 808754 +Grid : Message : Average mflops/s per call per node : 817579 +Grid : Message : Average mflops/s per call per node : 664542 +Grid : Message : Average mflops/s per call per node (full): 316072 +Grid : Message : Average mflops/s per call per node (full): 434448 +Grid : Message : Average mflops/s per call per node (full): 448228 +Grid : Message : Average mflops/s per call per node (full): 302366 +Grid : Message : Stencil 12.6577 GB/s per node +Grid : Message : Stencil 16.6525 GB/s per node +Grid : Message : Stencil 18.0754 GB/s per node +Grid : Message : Stencil 13.328 GB/s per node +Grid : Message : Average mflops/s per call per node : 670269 +Grid : Message : Average mflops/s per call per node : 806147 +Grid : Message : Average mflops/s per call per node : 822395 +Grid : Message : Average mflops/s per call per node : 663421 +Grid : Message : Average mflops/s per call per node (full): 315162 +Grid : Message : Average mflops/s per call per node (full): 434256 +Grid : Message : Average mflops/s per call per node (full): 443098 +Grid : Message : Average mflops/s per call per node (full): 306801 +Grid : Message : Stencil 12.5144 GB/s per node +Grid : Message : Stencil 16.1335 GB/s per node +Grid : Message : Stencil 17.0158 GB/s per node +Grid : Message : Stencil 11.8636 GB/s per node +Grid : Message : Average mflops/s per call per node : 671085 +Grid : Message : Average mflops/s per call per node : 800540 +Grid : Message : Average mflops/s per call per node : 821557 +Grid : Message : Average mflops/s per call per node : 668438 +Grid : Message : Average mflops/s per call per node (full): 314542 +Grid : Message : Average mflops/s per call per node (full): 429368 +Grid : Message : Average mflops/s per call per node (full): 441488 +Grid : Message : Average mflops/s per call per node (full): 300842 +Grid : Message : Stencil 12.9445 GB/s per node +Grid : Message : Stencil 16.5649 GB/s per node +Grid : Message : Stencil 17.6354 GB/s per node +Grid : Message : Stencil 12.6334 GB/s per node +Grid : Message : Average mflops/s per call per node : 670258 +Grid : Message : Average mflops/s per call per node : 807901 +Grid : Message : Average mflops/s per call per node : 820599 +Grid : Message : Average mflops/s per call per node : 664701 +Grid : Message : Average mflops/s per call per node (full): 316240 +Grid : Message : Average mflops/s per call per node (full): 436680 +Grid : Message : Average mflops/s per call per node (full): 444438 +Grid : Message : Average mflops/s per call per node (full): 304377 +Grid : Message : Stencil 12.9229 GB/s per node +Grid : Message : Stencil 16.476 GB/s per node +Grid : Message : Stencil 17.3273 GB/s per node +Grid : Message : Stencil 12.1586 GB/s per node +Grid : Message : Average mflops/s per call per node : 671176 +Grid : Message : Average mflops/s per call per node : 804588 +Grid : Message : Average mflops/s per call per node : 824591 +Grid : Message : Average mflops/s per call per node : 668922 +Grid : Message : Average mflops/s per call per node (full): 316452 +Grid : Message : Average mflops/s per call per node (full): 431674 +Grid : Message : Average mflops/s per call per node (full): 443769 +Grid : Message : Average mflops/s per call per node (full): 304071 +Grid : Message : Stencil 13.7492 GB/s per node +Grid : Message : Stencil 16.0205 GB/s per node +Grid : Message : Stencil 17.7682 GB/s per node +Grid : Message : Stencil 12.5497 GB/s per node +Grid : Message : Average mflops/s per call per node : 667267 +Grid : Message : Average mflops/s per call per node : 804527 +Grid : Message : Average mflops/s per call per node : 822570 +Grid : Message : Average mflops/s per call per node : 668442 +Grid : Message : Average mflops/s per call per node (full): 315015 +Grid : Message : Average mflops/s per call per node (full): 428099 +Grid : Message : Average mflops/s per call per node (full): 446695 +Grid : Message : Average mflops/s per call per node (full): 304877 +Grid : Message : Stencil 16.0749 GB/s per node +Grid : Message : Stencil 16.7101 GB/s per node +Grid : Message : Stencil 17.5895 GB/s per node +Grid : Message : Stencil 12.6814 GB/s per node +Grid : Message : Average mflops/s per call per node : 664654 +Grid : Message : Average mflops/s per call per node : 804560 +Grid : Message : Average mflops/s per call per node : 820827 +Grid : Message : Average mflops/s per call per node : 663960 +Grid : Message : Average mflops/s per call per node (full): 317931 +Grid : Message : Average mflops/s per call per node (full): 436764 +Grid : Message : Average mflops/s per call per node (full): 444919 +Grid : Message : Average mflops/s per call per node (full): 304502 +Grid : Message : Stencil 13.8452 GB/s per node +Grid : Message : Stencil 16.5895 GB/s per node +Grid : Message : Stencil 17.6105 GB/s per node +Grid : Message : Stencil 12.7434 GB/s per node +Grid : Message : Average mflops/s per call per node : 662680 +Grid : Message : Average mflops/s per call per node : 804209 +Grid : Message : Average mflops/s per call per node : 826764 +Grid : Message : Average mflops/s per call per node : 665513 +Grid : Message : Average mflops/s per call per node (full): 315795 +Grid : Message : Average mflops/s per call per node (full): 435470 +Grid : Message : Average mflops/s per call per node (full): 446375 +Grid : Message : Average mflops/s per call per node (full): 306667 +Grid : Message : Stencil 12.696 GB/s per node +Grid : Message : Stencil 17.8462 GB/s per node +Grid : Message : Stencil 17.4581 GB/s per node +Grid : Message : Stencil 12.4621 GB/s per node +Grid : Message : Average mflops/s per call per node : 668704 +Grid : Message : Average mflops/s per call per node : 801632 +Grid : Message : Average mflops/s per call per node : 821837 +Grid : Message : Average mflops/s per call per node : 665805 +Grid : Message : Average mflops/s per call per node (full): 314958 +Grid : Message : Average mflops/s per call per node (full): 439770 +Grid : Message : Average mflops/s per call per node (full): 444800 +Grid : Message : Average mflops/s per call per node (full): 304450 +Grid : Message : Stencil 13.195 GB/s per node +Grid : Message : Stencil 16.3625 GB/s per node +Grid : Message : Stencil 17.9368 GB/s per node +Grid : Message : Stencil 12.7961 GB/s per node +Grid : Message : Average mflops/s per call per node : 664900 +Grid : Message : Average mflops/s per call per node : 802314 +Grid : Message : Average mflops/s per call per node : 822453 +Grid : Message : Average mflops/s per call per node : 662885 +Grid : Message : Average mflops/s per call per node (full): 312141 +Grid : Message : Average mflops/s per call per node (full): 433682 +Grid : Message : Average mflops/s per call per node (full): 445677 +Grid : Message : Average mflops/s per call per node (full): 304522 +Grid : Message : Stencil 13.1627 GB/s per node +Grid : Message : Stencil 16.5828 GB/s per node +Grid : Message : Stencil 18.8868 GB/s per node +Grid : Message : Stencil 12.1598 GB/s per node +Grid : Message : Average mflops/s per call per node : 667526 +Grid : Message : Average mflops/s per call per node : 804126 +Grid : Message : Average mflops/s per call per node : 826797 +Grid : Message : Average mflops/s per call per node : 666150 +Grid : Message : Average mflops/s per call per node (full): 315161 +Grid : Message : Average mflops/s per call per node (full): 436237 +Grid : Message : Average mflops/s per call per node (full): 450383 +Grid : Message : Average mflops/s per call per node (full): 303367 +Grid : Message : Stencil 14.3233 GB/s per node +Grid : Message : Stencil 17.0524 GB/s per node +Grid : Message : Stencil 17.7682 GB/s per node +Grid : Message : Stencil 13.6733 GB/s per node +Grid : Message : Average mflops/s per call per node : 662040 +Grid : Message : Average mflops/s per call per node : 802978 +Grid : Message : Average mflops/s per call per node : 827704 +Grid : Message : Average mflops/s per call per node : 663840 +Grid : Message : Average mflops/s per call per node (full): 316403 +Grid : Message : Average mflops/s per call per node (full): 438560 +Grid : Message : Average mflops/s per call per node (full): 447686 +Grid : Message : Average mflops/s per call per node (full): 306479 +Grid : Message : Stencil 14.0166 GB/s per node +Grid : Message : Stencil 17.7287 GB/s per node +Grid : Message : Stencil 17.3918 GB/s per node +Grid : Message : Stencil 13.5967 GB/s per node +Grid : Message : Average mflops/s per call per node : 662621 +Grid : Message : Average mflops/s per call per node : 803027 +Grid : Message : Average mflops/s per call per node : 828930 +Grid : Message : Average mflops/s per call per node : 666495 +Grid : Message : Average mflops/s per call per node (full): 315597 +Grid : Message : Average mflops/s per call per node (full): 439267 +Grid : Message : Average mflops/s per call per node (full): 444967 +Grid : Message : Average mflops/s per call per node (full): 306897 +Grid : Message : Stencil 12.7254 GB/s per node +Grid : Message : Stencil 16.7791 GB/s per node +Grid : Message : Stencil 17.8821 GB/s per node +Grid : Message : Stencil 12.1478 GB/s per node +Grid : Message : Average mflops/s per call per node : 667639 +Grid : Message : Average mflops/s per call per node : 808509 +Grid : Message : Average mflops/s per call per node : 822811 +Grid : Message : Average mflops/s per call per node : 669762 +Grid : Message : Average mflops/s per call per node (full): 314173 +Grid : Message : Average mflops/s per call per node (full): 437440 +Grid : Message : Average mflops/s per call per node (full): 445486 +Grid : Message : Average mflops/s per call per node (full): 303879 +Grid : Message : Stencil 12.7592 GB/s per node +Grid : Message : Stencil 17.3833 GB/s per node +Grid : Message : Stencil 18.0241 GB/s per node +Grid : Message : Stencil 11.543 GB/s per node +Grid : Message : Average mflops/s per call per node : 666434 +Grid : Message : Average mflops/s per call per node : 800539 +Grid : Message : Average mflops/s per call per node : 824422 +Grid : Message : Average mflops/s per call per node : 664539 +Grid : Message : Average mflops/s per call per node (full): 314702 +Grid : Message : Average mflops/s per call per node (full): 436754 +Grid : Message : Average mflops/s per call per node (full): 438523 +Grid : Message : Average mflops/s per call per node (full): 295096 +Grid : Message : Stencil 13.6429 GB/s per node +Grid : Message : Stencil 17.1495 GB/s per node +Grid : Message : Stencil 17.9739 GB/s per node +Grid : Message : Stencil 12.6514 GB/s per node +Grid : Message : Average mflops/s per call per node : 666162 +Grid : Message : Average mflops/s per call per node : 803120 +Grid : Message : Average mflops/s per call per node : 820330 +Grid : Message : Average mflops/s per call per node : 665553 +Grid : Message : Average mflops/s per call per node (full): 315273 +Grid : Message : Average mflops/s per call per node (full): 436051 +Grid : Message : Average mflops/s per call per node (full): 445863 +Grid : Message : Average mflops/s per call per node (full): 305580 +Grid : Message : Stencil 13.2379 GB/s per node +Grid : Message : Stencil 17.1006 GB/s per node +Grid : Message : Stencil 17.7767 GB/s per node +Grid : Message : Stencil 13.0215 GB/s per node +Grid : Message : Average mflops/s per call per node : 664785 +Grid : Message : Average mflops/s per call per node : 800435 +Grid : Message : Average mflops/s per call per node : 823378 +Grid : Message : Average mflops/s per call per node : 668449 +Grid : Message : Average mflops/s per call per node (full): 314326 +Grid : Message : Average mflops/s per call per node (full): 438018 +Grid : Message : Average mflops/s per call per node (full): 446735 +Grid : Message : Average mflops/s per call per node (full): 305534 +Grid : Message : Stencil 13.5944 GB/s per node +Grid : Message : Stencil 17.0677 GB/s per node +Grid : Message : Stencil 17.4291 GB/s per node +Grid : Message : Stencil 12.6877 GB/s per node +Grid : Message : Average mflops/s per call per node : 666724 +Grid : Message : Average mflops/s per call per node : 802857 +Grid : Message : Average mflops/s per call per node : 822555 +Grid : Message : Average mflops/s per call per node : 666741 +Grid : Message : Average mflops/s per call per node (full): 315873 +Grid : Message : Average mflops/s per call per node (full): 438425 +Grid : Message : Average mflops/s per call per node (full): 445125 +Grid : Message : Average mflops/s per call per node (full): 305291 +Grid : Message : Stencil 13.3329 GB/s per node +Grid : Message : Stencil 13.2059 GB/s per node +Grid : Message : Stencil 17.6162 GB/s per node +Grid : Message : Stencil 13.8388 GB/s per node +Grid : Message : Average mflops/s per call per node : 665180 +Grid : Message : Average mflops/s per call per node : 810879 +Grid : Message : Average mflops/s per call per node : 821220 +Grid : Message : Average mflops/s per call per node : 658602 +Grid : Message : Average mflops/s per call per node (full): 315310 +Grid : Message : Average mflops/s per call per node (full): 385291 +Grid : Message : Average mflops/s per call per node (full): 445718 +Grid : Message : Average mflops/s per call per node (full): 306487 +Grid : Message : Stencil 13.7408 GB/s per node +Grid : Message : Stencil 14.8181 GB/s per node +Grid : Message : Stencil 17.5145 GB/s per node +Grid : Message : Stencil 12.4369 GB/s per node +Grid : Message : Average mflops/s per call per node : 663746 +Grid : Message : Average mflops/s per call per node : 802457 +Grid : Message : Average mflops/s per call per node : 818522 +Grid : Message : Average mflops/s per call per node : 663289 +Grid : Message : Average mflops/s per call per node (full): 315225 +Grid : Message : Average mflops/s per call per node (full): 412019 +Grid : Message : Average mflops/s per call per node (full): 445551 +Grid : Message : Average mflops/s per call per node (full): 303850 +Grid : Message : Stencil 13.3968 GB/s per node +Grid : Message : Stencil 16.7506 GB/s per node +Grid : Message : Stencil 16.9505 GB/s per node +Grid : Message : Stencil 13.0396 GB/s per node +Grid : Message : Average mflops/s per call per node : 663215 +Grid : Message : Average mflops/s per call per node : 803452 +Grid : Message : Average mflops/s per call per node : 816860 +Grid : Message : Average mflops/s per call per node : 662333 +Grid : Message : Average mflops/s per call per node (full): 314571 +Grid : Message : Average mflops/s per call per node (full): 436462 +Grid : Message : Average mflops/s per call per node (full): 438271 +Grid : Message : Average mflops/s per call per node (full): 304220 +Grid : Message : Stencil 13.2516 GB/s per node +Grid : Message : Stencil 18.4683 GB/s per node +Grid : Message : Stencil 20.1626 GB/s per node +Grid : Message : Stencil 12.3178 GB/s per node +Grid : Message : Average mflops/s per call per node : 665033 +Grid : Message : Average mflops/s per call per node : 805690 +Grid : Message : Average mflops/s per call per node : 821338 +Grid : Message : Average mflops/s per call per node : 667382 +Grid : Message : Average mflops/s per call per node (full): 313801 +Grid : Message : Average mflops/s per call per node (full): 442080 +Grid : Message : Average mflops/s per call per node (full): 451370 +Grid : Message : Average mflops/s per call per node (full): 304378 +Grid : Message : Stencil 12.4761 GB/s per node +Grid : Message : Stencil 17.063 GB/s per node +Grid : Message : Stencil 17.2998 GB/s per node +Grid : Message : Stencil 13.0949 GB/s per node +Grid : Message : Average mflops/s per call per node : 669469 +Grid : Message : Average mflops/s per call per node : 807207 +Grid : Message : Average mflops/s per call per node : 818619 +Grid : Message : Average mflops/s per call per node : 662204 +Grid : Message : Average mflops/s per call per node (full): 313152 +Grid : Message : Average mflops/s per call per node (full): 438548 +Grid : Message : Average mflops/s per call per node (full): 438583 +Grid : Message : Average mflops/s per call per node (full): 304705 +Grid : Message : Stencil 13.9202 GB/s per node +Grid : Message : Stencil 16.7782 GB/s per node +Grid : Message : Stencil 18.2511 GB/s per node +Grid : Message : Stencil 12.3493 GB/s per node +Grid : Message : Average mflops/s per call per node : 665295 +Grid : Message : Average mflops/s per call per node : 800954 +Grid : Message : Average mflops/s per call per node : 820727 +Grid : Message : Average mflops/s per call per node : 667125 +Grid : Message : Average mflops/s per call per node (full): 315384 +Grid : Message : Average mflops/s per call per node (full): 433980 +Grid : Message : Average mflops/s per call per node (full): 447066 +Grid : Message : Average mflops/s per call per node (full): 304322 +Grid : Message : Stencil 12.2009 GB/s per node +Grid : Message : Stencil 17.8894 GB/s per node +Grid : Message : Stencil 17.733 GB/s per node +Grid : Message : Stencil 12.1817 GB/s per node +Grid : Message : Average mflops/s per call per node : 669787 +Grid : Message : Average mflops/s per call per node : 799609 +Grid : Message : Average mflops/s per call per node : 818548 +Grid : Message : Average mflops/s per call per node : 667138 +Grid : Message : Average mflops/s per call per node (full): 311653 +Grid : Message : Average mflops/s per call per node (full): 438106 +Grid : Message : Average mflops/s per call per node (full): 444209 +Grid : Message : Average mflops/s per call per node (full): 304070 +Grid : Message : Stencil 14.1701 GB/s per node +Grid : Message : Stencil 17.9506 GB/s per node +Grid : Message : Stencil 18.1714 GB/s per node +Grid : Message : Stencil 12.1593 GB/s per node +Grid : Message : Average mflops/s per call per node : 664334 +Grid : Message : Average mflops/s per call per node : 804515 +Grid : Message : Average mflops/s per call per node : 814830 +Grid : Message : Average mflops/s per call per node : 662621 +Grid : Message : Average mflops/s per call per node (full): 316632 +Grid : Message : Average mflops/s per call per node (full): 441857 +Grid : Message : Average mflops/s per call per node (full): 444490 +Grid : Message : Average mflops/s per call per node (full): 302082 +Grid : Message : Stencil 14.012 GB/s per node +Grid : Message : Stencil 16.5864 GB/s per node +Grid : Message : Stencil 17.2479 GB/s per node +Grid : Message : Stencil 12.3873 GB/s per node +Grid : Message : Average mflops/s per call per node : 666742 +Grid : Message : Average mflops/s per call per node : 799540 +Grid : Message : Average mflops/s per call per node : 815256 +Grid : Message : Average mflops/s per call per node : 666099 +Grid : Message : Average mflops/s per call per node (full): 317252 +Grid : Message : Average mflops/s per call per node (full): 435178 +Grid : Message : Average mflops/s per call per node (full): 442294 +Grid : Message : Average mflops/s per call per node (full): 304068 +Grid : Message : Stencil 13.1105 GB/s per node +Grid : Message : Stencil 17.0605 GB/s per node +Grid : Message : Stencil 17.4726 GB/s per node +Grid : Message : Stencil 13.9293 GB/s per node +Grid : Message : Average mflops/s per call per node : 665709 +Grid : Message : Average mflops/s per call per node : 802462 +Grid : Message : Average mflops/s per call per node : 821252 +Grid : Message : Average mflops/s per call per node : 660691 +Grid : Message : Average mflops/s per call per node (full): 314770 +Grid : Message : Average mflops/s per call per node (full): 438294 +Grid : Message : Average mflops/s per call per node (full): 445239 +Grid : Message : Average mflops/s per call per node (full): 304619 +Grid : Message : Stencil 12.7894 GB/s per node +Grid : Message : Stencil 13.614 GB/s per node +Grid : Message : Stencil 18.0553 GB/s per node +Grid : Message : Stencil 14.1673 GB/s per node +Grid : Message : Average mflops/s per call per node : 670212 +Grid : Message : Average mflops/s per call per node : 810108 +Grid : Message : Average mflops/s per call per node : 822900 +Grid : Message : Average mflops/s per call per node : 663050 +Grid : Message : Average mflops/s per call per node (full): 315724 +Grid : Message : Average mflops/s per call per node (full): 392709 +Grid : Message : Average mflops/s per call per node (full): 447040 +Grid : Message : Average mflops/s per call per node (full): 307350 +Grid : Message : Stencil 12.6603 GB/s per node +Grid : Message : Stencil 17.4165 GB/s per node +Grid : Message : Stencil 18.1449 GB/s per node +Grid : Message : Stencil 11.9735 GB/s per node +Grid : Message : Average mflops/s per call per node : 668180 +Grid : Message : Average mflops/s per call per node : 803492 +Grid : Message : Average mflops/s per call per node : 821395 +Grid : Message : Average mflops/s per call per node : 666526 +Grid : Message : Average mflops/s per call per node (full): 314481 +Grid : Message : Average mflops/s per call per node (full): 438394 +Grid : Message : Average mflops/s per call per node (full): 446746 +Grid : Message : Average mflops/s per call per node (full): 301260 +Grid : Message : Stencil 14.5401 GB/s per node +Grid : Message : Stencil 18.8236 GB/s per node +Grid : Message : Stencil 17.4676 GB/s per node +Grid : Message : Stencil 12.6769 GB/s per node +Grid : Message : Average mflops/s per call per node : 662498 +Grid : Message : Average mflops/s per call per node : 806168 +Grid : Message : Average mflops/s per call per node : 820787 +Grid : Message : Average mflops/s per call per node : 667998 +Grid : Message : Average mflops/s per call per node (full): 316850 +Grid : Message : Average mflops/s per call per node (full): 442363 +Grid : Message : Average mflops/s per call per node (full): 444850 +Grid : Message : Average mflops/s per call per node (full): 304213 +Grid : Message : Stencil 14.4069 GB/s per node +Grid : Message : Stencil 17.4845 GB/s per node +Grid : Message : Stencil 18.0741 GB/s per node +Grid : Message : Stencil 11.8651 GB/s per node +Grid : Message : Average mflops/s per call per node : 667022 +Grid : Message : Average mflops/s per call per node : 806637 +Grid : Message : Average mflops/s per call per node : 822004 +Grid : Message : Average mflops/s per call per node : 669972 +Grid : Message : Average mflops/s per call per node (full): 317121 +Grid : Message : Average mflops/s per call per node (full): 439636 +Grid : Message : Average mflops/s per call per node (full): 439085 +Grid : Message : Average mflops/s per call per node (full): 300811 +Grid : Message : Stencil 13.3774 GB/s per node +Grid : Message : Stencil 11.3078 GB/s per node +Grid : Message : Stencil 17.8454 GB/s per node +Grid : Message : Stencil 12.793 GB/s per node +Grid : Message : Average mflops/s per call per node : 668798 +Grid : Message : Average mflops/s per call per node : 810858 +Grid : Message : Average mflops/s per call per node : 818351 +Grid : Message : Average mflops/s per call per node : 666968 +Grid : Message : Average mflops/s per call per node (full): 316429 +Grid : Message : Average mflops/s per call per node (full): 346497 +Grid : Message : Average mflops/s per call per node (full): 445644 +Grid : Message : Average mflops/s per call per node (full): 305367 +Grid : Message : Stencil 13.1528 GB/s per node +Grid : Message : Stencil 17.1553 GB/s per node +Grid : Message : Stencil 18.4991 GB/s per node +Grid : Message : Stencil 12.9203 GB/s per node +Grid : Message : Average mflops/s per call per node : 666186 +Grid : Message : Average mflops/s per call per node : 807320 +Grid : Message : Average mflops/s per call per node : 825001 +Grid : Message : Average mflops/s per call per node : 663941 +Grid : Message : Average mflops/s per call per node (full): 315095 +Grid : Message : Average mflops/s per call per node (full): 436414 +Grid : Message : Average mflops/s per call per node (full): 446638 +Grid : Message : Average mflops/s per call per node (full): 305184 +Grid : Message : Stencil 14.3512 GB/s per node +Grid : Message : Stencil 8.08582 GB/s per node +Grid : Message : Stencil 17.2098 GB/s per node +Grid : Message : Stencil 12.9761 GB/s per node +Grid : Message : Average mflops/s per call per node : 664921 +Grid : Message : Average mflops/s per call per node : 811951 +Grid : Message : Average mflops/s per call per node : 820638 +Grid : Message : Average mflops/s per call per node : 662247 +Grid : Message : Average mflops/s per call per node (full): 317454 +Grid : Message : Average mflops/s per call per node (full): 269204 +Grid : Message : Average mflops/s per call per node (full): 442623 +Grid : Message : Average mflops/s per call per node (full): 302247 +Grid : Message : Stencil 13.6523 GB/s per node +Grid : Message : Stencil 12.1863 GB/s per node +Grid : Message : Stencil 18.2226 GB/s per node +Grid : Message : Stencil 14.5407 GB/s per node +Grid : Message : Average mflops/s per call per node : 668120 +Grid : Message : Average mflops/s per call per node : 810323 +Grid : Message : Average mflops/s per call per node : 829405 +Grid : Message : Average mflops/s per call per node : 660045 +Grid : Message : Average mflops/s per call per node (full): 316597 +Grid : Message : Average mflops/s per call per node (full): 365500 +Grid : Message : Average mflops/s per call per node (full): 449915 +Grid : Message : Average mflops/s per call per node (full): 306907 +Grid : Message : Stencil 13.3278 GB/s per node +Grid : Message : Stencil 16.5323 GB/s per node +Grid : Message : Stencil 17.9966 GB/s per node +Grid : Message : Stencil 12.5225 GB/s per node +Grid : Message : Average mflops/s per call per node : 668482 +Grid : Message : Average mflops/s per call per node : 806322 +Grid : Message : Average mflops/s per call per node : 817287 +Grid : Message : Average mflops/s per call per node : 667975 +Grid : Message : Average mflops/s per call per node (full): 315330 +Grid : Message : Average mflops/s per call per node (full): 435216 +Grid : Message : Average mflops/s per call per node (full): 444432 +Grid : Message : Average mflops/s per call per node (full): 304925 +Grid : Message : Stencil 12.8598 GB/s per node +Grid : Message : Stencil 17.2892 GB/s per node +Grid : Message : Stencil 17.7201 GB/s per node +Grid : Message : Stencil 12.3424 GB/s per node +Grid : Message : Average mflops/s per call per node : 670194 +Grid : Message : Average mflops/s per call per node : 805940 +Grid : Message : Average mflops/s per call per node : 816860 +Grid : Message : Average mflops/s per call per node : 670715 +Grid : Message : Average mflops/s per call per node (full): 316157 +Grid : Message : Average mflops/s per call per node (full): 440925 +Grid : Message : Average mflops/s per call per node (full): 445275 +Grid : Message : Average mflops/s per call per node (full): 304670 +Grid : Message : Stencil 12.7423 GB/s per node +Grid : Message : Stencil 17.3792 GB/s per node +Grid : Message : Stencil 17.3747 GB/s per node +Grid : Message : Stencil 12.4431 GB/s per node +Grid : Message : Average mflops/s per call per node : 670648 +Grid : Message : Average mflops/s per call per node : 801905 +Grid : Message : Average mflops/s per call per node : 824759 +Grid : Message : Average mflops/s per call per node : 664315 +Grid : Message : Average mflops/s per call per node (full): 315426 +Grid : Message : Average mflops/s per call per node (full): 435399 +Grid : Message : Average mflops/s per call per node (full): 444680 +Grid : Message : Average mflops/s per call per node (full): 303482 +Grid : Message : Stencil 12.3573 GB/s per node +Grid : Message : Stencil 17.5181 GB/s per node +Grid : Message : Stencil 17.2293 GB/s per node +Grid : Message : Stencil 12.7231 GB/s per node +Grid : Message : Average mflops/s per call per node : 670000 +Grid : Message : Average mflops/s per call per node : 800657 +Grid : Message : Average mflops/s per call per node : 822182 +Grid : Message : Average mflops/s per call per node : 661835 +Grid : Message : Average mflops/s per call per node (full): 313166 +Grid : Message : Average mflops/s per call per node (full): 439074 +Grid : Message : Average mflops/s per call per node (full): 442154 +Grid : Message : Average mflops/s per call per node (full): 301674 +Grid : Message : Stencil 13.6493 GB/s per node +Grid : Message : Stencil 10.9089 GB/s per node +Grid : Message : Stencil 16.1128 GB/s per node +Grid : Message : Stencil 12.0041 GB/s per node +Grid : Message : Average mflops/s per call per node : 668159 +Grid : Message : Average mflops/s per call per node : 814123 +Grid : Message : Average mflops/s per call per node : 826384 +Grid : Message : Average mflops/s per call per node : 668706 +Grid : Message : Average mflops/s per call per node (full): 315568 +Grid : Message : Average mflops/s per call per node (full): 337505 +Grid : Message : Average mflops/s per call per node (full): 417961 +Grid : Message : Average mflops/s per call per node (full): 302061 +Grid : Message : Stencil 13.8407 GB/s per node +Grid : Message : Stencil 12.1634 GB/s per node +Grid : Message : Stencil 17.1616 GB/s per node +Grid : Message : Stencil 13.0852 GB/s per node +Grid : Message : Average mflops/s per call per node : 666780 +Grid : Message : Average mflops/s per call per node : 804701 +Grid : Message : Average mflops/s per call per node : 820652 +Grid : Message : Average mflops/s per call per node : 663281 +Grid : Message : Average mflops/s per call per node (full): 317003 +Grid : Message : Average mflops/s per call per node (full): 364820 +Grid : Message : Average mflops/s per call per node (full): 443175 +Grid : Message : Average mflops/s per call per node (full): 305023 +Grid : Message : Stencil 14.5455 GB/s per node +Grid : Message : Stencil 16.8268 GB/s per node +Grid : Message : Stencil 17.682 GB/s per node +Grid : Message : Stencil 12.4407 GB/s per node +Grid : Message : Average mflops/s per call per node : 665431 +Grid : Message : Average mflops/s per call per node : 805665 +Grid : Message : Average mflops/s per call per node : 828909 +Grid : Message : Average mflops/s per call per node : 668099 +Grid : Message : Average mflops/s per call per node (full): 317273 +Grid : Message : Average mflops/s per call per node (full): 437080 +Grid : Message : Average mflops/s per call per node (full): 446707 +Grid : Message : Average mflops/s per call per node (full): 305324 +Grid : Message : Stencil 12.7316 GB/s per node +Grid : Message : Stencil 17.028 GB/s per node +Grid : Message : Stencil 17.9791 GB/s per node +Grid : Message : Stencil 13.2229 GB/s per node +Grid : Message : Average mflops/s per call per node : 668528 +Grid : Message : Average mflops/s per call per node : 803356 +Grid : Message : Average mflops/s per call per node : 820340 +Grid : Message : Average mflops/s per call per node : 659327 +Grid : Message : Average mflops/s per call per node (full): 314400 +Grid : Message : Average mflops/s per call per node (full): 439496 +Grid : Message : Average mflops/s per call per node (full): 445632 +Grid : Message : Average mflops/s per call per node (full): 305293 +Grid : Message : Stencil 12.5028 GB/s per node +Grid : Message : Stencil 15.9845 GB/s per node +Grid : Message : Stencil 18.9131 GB/s per node +Grid : Message : Stencil 12.9499 GB/s per node +Grid : Message : Average mflops/s per call per node : 668710 +Grid : Message : Average mflops/s per call per node : 807360 +Grid : Message : Average mflops/s per call per node : 821502 +Grid : Message : Average mflops/s per call per node : 668162 +Grid : Message : Average mflops/s per call per node (full): 313671 +Grid : Message : Average mflops/s per call per node (full): 426907 +Grid : Message : Average mflops/s per call per node (full): 449433 +Grid : Message : Average mflops/s per call per node (full): 306237 +Grid : Message : Stencil 12.4208 GB/s per node +Grid : Message : Stencil 17.8274 GB/s per node +Grid : Message : Stencil 18.3711 GB/s per node +Grid : Message : Stencil 12.2259 GB/s per node +Grid : Message : Average mflops/s per call per node : 666707 +Grid : Message : Average mflops/s per call per node : 800849 +Grid : Message : Average mflops/s per call per node : 818819 +Grid : Message : Average mflops/s per call per node : 668127 +Grid : Message : Average mflops/s per call per node (full): 312720 +Grid : Message : Average mflops/s per call per node (full): 439674 +Grid : Message : Average mflops/s per call per node (full): 446184 +Grid : Message : Average mflops/s per call per node (full): 303664 +Grid : Message : Stencil 12.5386 GB/s per node +Grid : Message : Stencil 16.7961 GB/s per node +Grid : Message : Stencil 17.123 GB/s per node +Grid : Message : Stencil 12.187 GB/s per node +Grid : Message : Average mflops/s per call per node : 669033 +Grid : Message : Average mflops/s per call per node : 806477 +Grid : Message : Average mflops/s per call per node : 824230 +Grid : Message : Average mflops/s per call per node : 665084 +Grid : Message : Average mflops/s per call per node (full): 313818 +Grid : Message : Average mflops/s per call per node (full): 437820 +Grid : Message : Average mflops/s per call per node (full): 442023 +Grid : Message : Average mflops/s per call per node (full): 303308 +Grid : Message : Stencil 13.9051 GB/s per node +Grid : Message : Stencil 14.1793 GB/s per node +Grid : Message : Stencil 17.3172 GB/s per node +Grid : Message : Stencil 13.5939 GB/s per node +Grid : Message : Average mflops/s per call per node : 662731 +Grid : Message : Average mflops/s per call per node : 806139 +Grid : Message : Average mflops/s per call per node : 826644 +Grid : Message : Average mflops/s per call per node : 662587 +Grid : Message : Average mflops/s per call per node (full): 316606 +Grid : Message : Average mflops/s per call per node (full): 403966 +Grid : Message : Average mflops/s per call per node (full): 444894 +Grid : Message : Average mflops/s per call per node (full): 306687 +Grid : Message : Stencil 12.713 GB/s per node +Grid : Message : Stencil 17.2275 GB/s per node +Grid : Message : Stencil 17.3628 GB/s per node +Grid : Message : Stencil 12.5522 GB/s per node +Grid : Message : Average mflops/s per call per node : 667285 +Grid : Message : Average mflops/s per call per node : 799222 +Grid : Message : Average mflops/s per call per node : 825492 +Grid : Message : Average mflops/s per call per node : 669083 +Grid : Message : Average mflops/s per call per node (full): 313531 +Grid : Message : Average mflops/s per call per node (full): 437657 +Grid : Message : Average mflops/s per call per node (full): 444462 +Grid : Message : Average mflops/s per call per node (full): 305101 +Grid : Message : Stencil 13.1261 GB/s per node +Grid : Message : Stencil 17.3873 GB/s per node +Grid : Message : Stencil 18.1324 GB/s per node +Grid : Message : Stencil 13.4224 GB/s per node +Grid : Message : Average mflops/s per call per node : 662768 +Grid : Message : Average mflops/s per call per node : 805359 +Grid : Message : Average mflops/s per call per node : 818840 +Grid : Message : Average mflops/s per call per node : 664498 +Grid : Message : Average mflops/s per call per node (full): 314186 +Grid : Message : Average mflops/s per call per node (full): 437617 +Grid : Message : Average mflops/s per call per node (full): 446608 +Grid : Message : Average mflops/s per call per node (full): 306661 +Grid : Message : Stencil 13.5709 GB/s per node +Grid : Message : Stencil 16.8654 GB/s per node +Grid : Message : Stencil 17.8845 GB/s per node +Grid : Message : Stencil 13.5503 GB/s per node +Grid : Message : Average mflops/s per call per node : 659042 +Grid : Message : Average mflops/s per call per node : 808434 +Grid : Message : Average mflops/s per call per node : 819807 +Grid : Message : Average mflops/s per call per node : 661740 +Grid : Message : Average mflops/s per call per node (full): 314042 +Grid : Message : Average mflops/s per call per node (full): 439197 +Grid : Message : Average mflops/s per call per node (full): 445970 +Grid : Message : Average mflops/s per call per node (full): 305878 +Grid : Message : Stencil 14.4503 GB/s per node +Grid : Message : Stencil 16.376 GB/s per node +Grid : Message : Stencil 17.6563 GB/s per node +Grid : Message : Stencil 12.3184 GB/s per node +Grid : Message : Average mflops/s per call per node : 661059 +Grid : Message : Average mflops/s per call per node : 810627 +Grid : Message : Average mflops/s per call per node : 824265 +Grid : Message : Average mflops/s per call per node : 666180 +Grid : Message : Average mflops/s per call per node (full): 316759 +Grid : Message : Average mflops/s per call per node (full): 433890 +Grid : Message : Average mflops/s per call per node (full): 446667 +Grid : Message : Average mflops/s per call per node (full): 303785 +Grid : Message : Stencil 12.8266 GB/s per node +Grid : Message : Stencil 16.7177 GB/s per node +Grid : Message : Stencil 17.8434 GB/s per node +Grid : Message : Stencil 12.9931 GB/s per node +Grid : Message : Average mflops/s per call per node : 666756 +Grid : Message : Average mflops/s per call per node : 801430 +Grid : Message : Average mflops/s per call per node : 823620 +Grid : Message : Average mflops/s per call per node : 664019 +Grid : Message : Average mflops/s per call per node (full): 314420 +Grid : Message : Average mflops/s per call per node (full): 436518 +Grid : Message : Average mflops/s per call per node (full): 445382 +Grid : Message : Average mflops/s per call per node (full): 305582 +Grid : Message : Stencil 14.6546 GB/s per node +Grid : Message : Stencil 17.191 GB/s per node +Grid : Message : Stencil 17.9299 GB/s per node +Grid : Message : Stencil 12.0428 GB/s per node +Grid : Message : Average mflops/s per call per node : 664627 +Grid : Message : Average mflops/s per call per node : 796301 +Grid : Message : Average mflops/s per call per node : 822949 +Grid : Message : Average mflops/s per call per node : 668229 +Grid : Message : Average mflops/s per call per node (full): 316538 +Grid : Message : Average mflops/s per call per node (full): 435682 +Grid : Message : Average mflops/s per call per node (full): 436898 +Grid : Message : Average mflops/s per call per node (full): 301776 +Grid : Message : Stencil 13.7608 GB/s per node +Grid : Message : Stencil 12.5123 GB/s per node +Grid : Message : Stencil 17.7201 GB/s per node +Grid : Message : Stencil 12.7085 GB/s per node +Grid : Message : Average mflops/s per call per node : 666215 +Grid : Message : Average mflops/s per call per node : 810734 +Grid : Message : Average mflops/s per call per node : 820886 +Grid : Message : Average mflops/s per call per node : 664598 +Grid : Message : Average mflops/s per call per node (full): 315574 +Grid : Message : Average mflops/s per call per node (full): 371951 +Grid : Message : Average mflops/s per call per node (full): 445993 +Grid : Message : Average mflops/s per call per node (full): 304257 +Grid : Message : Stencil 13.5357 GB/s per node +Grid : Message : Stencil 17.5152 GB/s per node +Grid : Message : Stencil 17.4573 GB/s per node +Grid : Message : Stencil 13.2572 GB/s per node +Grid : Message : Average mflops/s per call per node : 663986 +Grid : Message : Average mflops/s per call per node : 803379 +Grid : Message : Average mflops/s per call per node : 820745 +Grid : Message : Average mflops/s per call per node : 664000 +Grid : Message : Average mflops/s per call per node (full): 315566 +Grid : Message : Average mflops/s per call per node (full): 438003 +Grid : Message : Average mflops/s per call per node (full): 440111 +Grid : Message : Average mflops/s per call per node (full): 306596 +Grid : Message : Stencil 14.4558 GB/s per node +Grid : Message : Stencil 10.5721 GB/s per node +Grid : Message : Stencil 17.7932 GB/s per node +Grid : Message : Stencil 14.146 GB/s per node +Grid : Message : Average mflops/s per call per node : 665281 +Grid : Message : Average mflops/s per call per node : 810654 +Grid : Message : Average mflops/s per call per node : 818240 +Grid : Message : Average mflops/s per call per node : 661650 +Grid : Message : Average mflops/s per call per node (full): 317179 +Grid : Message : Average mflops/s per call per node (full): 329822 +Grid : Message : Average mflops/s per call per node (full): 446330 +Grid : Message : Average mflops/s per call per node (full): 306634 +Grid : Message : Stencil 13.8995 GB/s per node +Grid : Message : Stencil 17.8563 GB/s per node +Grid : Message : Stencil 17.7542 GB/s per node +Grid : Message : Stencil 13.4172 GB/s per node +Grid : Message : Average mflops/s per call per node : 664829 +Grid : Message : Average mflops/s per call per node : 802048 +Grid : Message : Average mflops/s per call per node : 819825 +Grid : Message : Average mflops/s per call per node : 657813 +Grid : Message : Average mflops/s per call per node (full): 316146 +Grid : Message : Average mflops/s per call per node (full): 439566 +Grid : Message : Average mflops/s per call per node (full): 443082 +Grid : Message : Average mflops/s per call per node (full): 304548 +Grid : Message : Stencil 13.312 GB/s per node +Grid : Message : Stencil 16.6707 GB/s per node +Grid : Message : Stencil 18.6503 GB/s per node +Grid : Message : Stencil 13.0266 GB/s per node +Grid : Message : Average mflops/s per call per node : 667661 +Grid : Message : Average mflops/s per call per node : 800789 +Grid : Message : Average mflops/s per call per node : 823530 +Grid : Message : Average mflops/s per call per node : 662616 +Grid : Message : Average mflops/s per call per node (full): 315744 +Grid : Message : Average mflops/s per call per node (full): 435038 +Grid : Message : Average mflops/s per call per node (full): 447909 +Grid : Message : Average mflops/s per call per node (full): 303186 +Grid : Message : Stencil 12.6566 GB/s per node +Grid : Message : Stencil 17.8213 GB/s per node +Grid : Message : Stencil 17.9463 GB/s per node +Grid : Message : Stencil 13.4799 GB/s per node +Grid : Message : Average mflops/s per call per node : 666857 +Grid : Message : Average mflops/s per call per node : 803446 +Grid : Message : Average mflops/s per call per node : 814357 +Grid : Message : Average mflops/s per call per node : 658835 +Grid : Message : Average mflops/s per call per node (full): 314154 +Grid : Message : Average mflops/s per call per node (full): 440308 +Grid : Message : Average mflops/s per call per node (full): 444496 +Grid : Message : Average mflops/s per call per node (full): 305207 +Grid : Message : Stencil 12.8388 GB/s per node +Grid : Message : Stencil 17.0231 GB/s per node +Grid : Message : Stencil 18.4924 GB/s per node +Grid : Message : Stencil 12.3361 GB/s per node +Grid : Message : Average mflops/s per call per node : 669083 +Grid : Message : Average mflops/s per call per node : 804015 +Grid : Message : Average mflops/s per call per node : 821390 +Grid : Message : Average mflops/s per call per node : 667814 +Grid : Message : Average mflops/s per call per node (full): 315517 +Grid : Message : Average mflops/s per call per node (full): 434609 +Grid : Message : Average mflops/s per call per node (full): 447579 +Grid : Message : Average mflops/s per call per node (full): 303505 +Grid : Message : Stencil 13.7879 GB/s per node +Grid : Message : Stencil 16.5433 GB/s per node +Grid : Message : Stencil 17.908 GB/s per node +Grid : Message : Stencil 13.8103 GB/s per node +Grid : Message : Average mflops/s per call per node : 662509 +Grid : Message : Average mflops/s per call per node : 806916 +Grid : Message : Average mflops/s per call per node : 818770 +Grid : Message : Average mflops/s per call per node : 662643 +Grid : Message : Average mflops/s per call per node (full): 315036 +Grid : Message : Average mflops/s per call per node (full): 435654 +Grid : Message : Average mflops/s per call per node (full): 445632 +Grid : Message : Average mflops/s per call per node (full): 306895 +Grid : Message : Stencil 13.4876 GB/s per node +Grid : Message : Stencil 16.8819 GB/s per node +Grid : Message : Stencil 17.6484 GB/s per node +Grid : Message : Stencil 12.5259 GB/s per node +Grid : Message : Average mflops/s per call per node : 663824 +Grid : Message : Average mflops/s per call per node : 808188 +Grid : Message : Average mflops/s per call per node : 823638 +Grid : Message : Average mflops/s per call per node : 662030 +Grid : Message : Average mflops/s per call per node (full): 314759 +Grid : Message : Average mflops/s per call per node (full): 438549 +Grid : Message : Average mflops/s per call per node (full): 445451 +Grid : Message : Average mflops/s per call per node (full): 303669 +Grid : Message : Stencil 12.3675 GB/s per node +Grid : Message : Stencil 16.4276 GB/s per node +Grid : Message : Stencil 17.5417 GB/s per node +Grid : Message : Stencil 12.1676 GB/s per node +Grid : Message : Average mflops/s per call per node : 669071 +Grid : Message : Average mflops/s per call per node : 806133 +Grid : Message : Average mflops/s per call per node : 820619 +Grid : Message : Average mflops/s per call per node : 667351 +Grid : Message : Average mflops/s per call per node (full): 312679 +Grid : Message : Average mflops/s per call per node (full): 434983 +Grid : Message : Average mflops/s per call per node (full): 444984 +Grid : Message : Average mflops/s per call per node (full): 303637 +Grid : Message : Stencil 13.0709 GB/s per node +Grid : Message : Stencil 17.5349 GB/s per node +Grid : Message : Stencil 17.7934 GB/s per node +Grid : Message : Stencil 12.1751 GB/s per node +Grid : Message : Average mflops/s per call per node : 668344 +Grid : Message : Average mflops/s per call per node : 807623 +Grid : Message : Average mflops/s per call per node : 824665 +Grid : Message : Average mflops/s per call per node : 666928 +Grid : Message : Average mflops/s per call per node (full): 315271 +Grid : Message : Average mflops/s per call per node (full): 438487 +Grid : Message : Average mflops/s per call per node (full): 445571 +Grid : Message : Average mflops/s per call per node (full): 303691 +Grid : Message : Stencil 12.9941 GB/s per node +Grid : Message : Stencil 14.9089 GB/s per node +Grid : Message : Stencil 17.9194 GB/s per node +Grid : Message : Stencil 12.0044 GB/s per node +Grid : Message : Average mflops/s per call per node : 664444 +Grid : Message : Average mflops/s per call per node : 808597 +Grid : Message : Average mflops/s per call per node : 823230 +Grid : Message : Average mflops/s per call per node : 665374 +Grid : Message : Average mflops/s per call per node (full): 314230 +Grid : Message : Average mflops/s per call per node (full): 414717 +Grid : Message : Average mflops/s per call per node (full): 447203 +Grid : Message : Average mflops/s per call per node (full): 302432 +Grid : Message : Stencil 12.563 GB/s per node +Grid : Message : Stencil 17.4419 GB/s per node +Grid : Message : Stencil 16.3727 GB/s per node +Grid : Message : Stencil 12.8744 GB/s per node +Grid : Message : Average mflops/s per call per node : 667188 +Grid : Message : Average mflops/s per call per node : 803660 +Grid : Message : Average mflops/s per call per node : 828617 +Grid : Message : Average mflops/s per call per node : 665791 +Grid : Message : Average mflops/s per call per node (full): 313854 +Grid : Message : Average mflops/s per call per node (full): 439680 +Grid : Message : Average mflops/s per call per node (full): 432089 +Grid : Message : Average mflops/s per call per node (full): 305079 +Grid : Message : Stencil 13.1201 GB/s per node +Grid : Message : Stencil 16.4825 GB/s per node +Grid : Message : Stencil 17.2141 GB/s per node +Grid : Message : Stencil 12.8028 GB/s per node +Grid : Message : Average mflops/s per call per node : 662617 +Grid : Message : Average mflops/s per call per node : 805707 +Grid : Message : Average mflops/s per call per node : 819975 +Grid : Message : Average mflops/s per call per node : 663049 +Grid : Message : Average mflops/s per call per node (full): 314151 +Grid : Message : Average mflops/s per call per node (full): 432487 +Grid : Message : Average mflops/s per call per node (full): 441567 +Grid : Message : Average mflops/s per call per node (full): 300225 +Grid : Message : Stencil 13.494 GB/s per node +Grid : Message : Stencil 17.4044 GB/s per node +Grid : Message : Stencil 17.391 GB/s per node +Grid : Message : Stencil 13.3663 GB/s per node +Grid : Message : Average mflops/s per call per node : 659710 +Grid : Message : Average mflops/s per call per node : 804423 +Grid : Message : Average mflops/s per call per node : 819191 +Grid : Message : Average mflops/s per call per node : 659182 +Grid : Message : Average mflops/s per call per node (full): 313886 +Grid : Message : Average mflops/s per call per node (full): 439049 +Grid : Message : Average mflops/s per call per node (full): 443820 +Grid : Message : Average mflops/s per call per node (full): 304886 +Grid : Message : Stencil 13.3257 GB/s per node +Grid : Message : Stencil 16.5307 GB/s per node +Grid : Message : Stencil 17.5747 GB/s per node +Grid : Message : Stencil 14.698 GB/s per node +Grid : Message : Average mflops/s per call per node : 660623 +Grid : Message : Average mflops/s per call per node : 807067 +Grid : Message : Average mflops/s per call per node : 818778 +Grid : Message : Average mflops/s per call per node : 662805 +Grid : Message : Average mflops/s per call per node (full): 314423 +Grid : Message : Average mflops/s per call per node (full): 432619 +Grid : Message : Average mflops/s per call per node (full): 444622 +Grid : Message : Average mflops/s per call per node (full): 306808 +Grid : Message : Stencil 13.282 GB/s per node +Grid : Message : Stencil 16.7499 GB/s per node +Grid : Message : Stencil 18.5887 GB/s per node +Grid : Message : Stencil 13.7764 GB/s per node +Grid : Message : Average mflops/s per call per node : 663404 +Grid : Message : Average mflops/s per call per node : 804737 +Grid : Message : Average mflops/s per call per node : 820664 +Grid : Message : Average mflops/s per call per node : 666191 +Grid : Message : Average mflops/s per call per node (full): 314406 +Grid : Message : Average mflops/s per call per node (full): 435830 +Grid : Message : Average mflops/s per call per node (full): 448424 +Grid : Message : Average mflops/s per call per node (full): 306801 +Grid : Message : Stencil 13.2265 GB/s per node +Grid : Message : Stencil 17.4913 GB/s per node +Grid : Message : Stencil 17.7804 GB/s per node +Grid : Message : Stencil 12.8921 GB/s per node +Grid : Message : Average mflops/s per call per node : 661827 +Grid : Message : Average mflops/s per call per node : 804946 +Grid : Message : Average mflops/s per call per node : 823488 +Grid : Message : Average mflops/s per call per node : 667613 +Grid : Message : Average mflops/s per call per node (full): 314051 +Grid : Message : Average mflops/s per call per node (full): 438066 +Grid : Message : Average mflops/s per call per node (full): 446464 +Grid : Message : Average mflops/s per call per node (full): 305594 +Grid : Message : Stencil 13.2206 GB/s per node +Grid : Message : Stencil 17.0128 GB/s per node +Grid : Message : Stencil 18.401 GB/s per node +Grid : Message : Stencil 13.7508 GB/s per node +Grid : Message : Average mflops/s per call per node : 663232 +Grid : Message : Average mflops/s per call per node : 798296 +Grid : Message : Average mflops/s per call per node : 822587 +Grid : Message : Average mflops/s per call per node : 660829 +Grid : Message : Average mflops/s per call per node (full): 314733 +Grid : Message : Average mflops/s per call per node (full): 437104 +Grid : Message : Average mflops/s per call per node (full): 447399 +Grid : Message : Average mflops/s per call per node (full): 306421 +Grid : Message : Stencil 12.5284 GB/s per node +Grid : Message : Stencil 16.9445 GB/s per node +Grid : Message : Stencil 17.8376 GB/s per node +Grid : Message : Stencil 12.8427 GB/s per node +Grid : Message : Average mflops/s per call per node : 668494 +Grid : Message : Average mflops/s per call per node : 807032 +Grid : Message : Average mflops/s per call per node : 823168 +Grid : Message : Average mflops/s per call per node : 663439 +Grid : Message : Average mflops/s per call per node (full): 313697 +Grid : Message : Average mflops/s per call per node (full): 437154 +Grid : Message : Average mflops/s per call per node (full): 446625 +Grid : Message : Average mflops/s per call per node (full): 303679 +Grid : Message : Stencil 13.193 GB/s per node +Grid : Message : Stencil 16.6136 GB/s per node +Grid : Message : Stencil 17.4497 GB/s per node +Grid : Message : Stencil 12.2637 GB/s per node +Grid : Message : Average mflops/s per call per node : 666149 +Grid : Message : Average mflops/s per call per node : 805048 +Grid : Message : Average mflops/s per call per node : 825446 +Grid : Message : Average mflops/s per call per node : 663333 +Grid : Message : Average mflops/s per call per node (full): 315673 +Grid : Message : Average mflops/s per call per node (full): 436055 +Grid : Message : Average mflops/s per call per node (full): 444547 +Grid : Message : Average mflops/s per call per node (full): 303758 +Grid : Message : Stencil 13.4605 GB/s per node +Grid : Message : Stencil 16.4581 GB/s per node +Grid : Message : Stencil 18.7226 GB/s per node +Grid : Message : Stencil 12.3411 GB/s per node +Grid : Message : Average mflops/s per call per node : 667912 +Grid : Message : Average mflops/s per call per node : 807641 +Grid : Message : Average mflops/s per call per node : 827192 +Grid : Message : Average mflops/s per call per node : 663584 +Grid : Message : Average mflops/s per call per node (full): 313717 +Grid : Message : Average mflops/s per call per node (full): 434972 +Grid : Message : Average mflops/s per call per node (full): 450158 +Grid : Message : Average mflops/s per call per node (full): 303868 +Grid : Message : Stencil 12.5401 GB/s per node +Grid : Message : Stencil 11.6789 GB/s per node +Grid : Message : Stencil 17.4623 GB/s per node +Grid : Message : Stencil 14.3254 GB/s per node +Grid : Message : Average mflops/s per call per node : 671632 +Grid : Message : Average mflops/s per call per node : 813861 +Grid : Message : Average mflops/s per call per node : 829044 +Grid : Message : Average mflops/s per call per node : 657813 +Grid : Message : Average mflops/s per call per node (full): 314374 +Grid : Message : Average mflops/s per call per node (full): 354116 +Grid : Message : Average mflops/s per call per node (full): 446129 +Grid : Message : Average mflops/s per call per node (full): 306680 +Grid : Message : Stencil 12.7752 GB/s per node +Grid : Message : Stencil 16.5636 GB/s per node +Grid : Message : Stencil 17.2492 GB/s per node +Grid : Message : Stencil 12.8813 GB/s per node +Grid : Message : Average mflops/s per call per node : 667719 +Grid : Message : Average mflops/s per call per node : 803560 +Grid : Message : Average mflops/s per call per node : 827933 +Grid : Message : Average mflops/s per call per node : 661443 +Grid : Message : Average mflops/s per call per node (full): 314865 +Grid : Message : Average mflops/s per call per node (full): 434684 +Grid : Message : Average mflops/s per call per node (full): 443851 +Grid : Message : Average mflops/s per call per node (full): 304589 +Grid : Message : Stencil 14.3801 GB/s per node +Grid : Message : Stencil 9.78086 GB/s per node +Grid : Message : Stencil 17.8346 GB/s per node +Grid : Message : Stencil 12.1078 GB/s per node +Grid : Message : Average mflops/s per call per node : 662537 +Grid : Message : Average mflops/s per call per node : 813351 +Grid : Message : Average mflops/s per call per node : 821884 +Grid : Message : Average mflops/s per call per node : 664214 +Grid : Message : Average mflops/s per call per node (full): 315769 +Grid : Message : Average mflops/s per call per node (full): 311062 +Grid : Message : Average mflops/s per call per node (full): 443457 +Grid : Message : Average mflops/s per call per node (full): 302417 +Grid : Message : Stencil 13.2075 GB/s per node +Grid : Message : Stencil 12.8025 GB/s per node +Grid : Message : Stencil 17.5761 GB/s per node +Grid : Message : Stencil 15.2306 GB/s per node +Grid : Message : Average mflops/s per call per node : 662740 +Grid : Message : Average mflops/s per call per node : 807134 +Grid : Message : Average mflops/s per call per node : 820897 +Grid : Message : Average mflops/s per call per node : 658357 +Grid : Message : Average mflops/s per call per node (full): 315204 +Grid : Message : Average mflops/s per call per node (full): 377502 +Grid : Message : Average mflops/s per call per node (full): 445586 +Grid : Message : Average mflops/s per call per node (full): 307638 +Grid : Message : Stencil 12.9764 GB/s per node +Grid : Message : Stencil 17.1323 GB/s per node +Grid : Message : Stencil 18.2949 GB/s per node +Grid : Message : Stencil 12.4703 GB/s per node +Grid : Message : Average mflops/s per call per node : 665907 +Grid : Message : Average mflops/s per call per node : 800038 +Grid : Message : Average mflops/s per call per node : 822861 +Grid : Message : Average mflops/s per call per node : 662657 +Grid : Message : Average mflops/s per call per node (full): 313891 +Grid : Message : Average mflops/s per call per node (full): 436920 +Grid : Message : Average mflops/s per call per node (full): 444330 +Grid : Message : Average mflops/s per call per node (full): 298764 +Grid : Message : Stencil 15.4686 GB/s per node +Grid : Message : Stencil 16.8296 GB/s per node +Grid : Message : Stencil 17.0712 GB/s per node +Grid : Message : Stencil 12.3795 GB/s per node +Grid : Message : Average mflops/s per call per node : 662064 +Grid : Message : Average mflops/s per call per node : 804013 +Grid : Message : Average mflops/s per call per node : 824463 +Grid : Message : Average mflops/s per call per node : 670516 +Grid : Message : Average mflops/s per call per node (full): 317322 +Grid : Message : Average mflops/s per call per node (full): 437408 +Grid : Message : Average mflops/s per call per node (full): 441839 +Grid : Message : Average mflops/s per call per node (full): 305327 +Grid : Message : Stencil 13.1037 GB/s per node +Grid : Message : Stencil 16.9373 GB/s per node +Grid : Message : Stencil 17.7799 GB/s per node +Grid : Message : Stencil 13.197 GB/s per node +Grid : Message : Average mflops/s per call per node : 665706 +Grid : Message : Average mflops/s per call per node : 806766 +Grid : Message : Average mflops/s per call per node : 819907 +Grid : Message : Average mflops/s per call per node : 662447 +Grid : Message : Average mflops/s per call per node (full): 315174 +Grid : Message : Average mflops/s per call per node (full): 438405 +Grid : Message : Average mflops/s per call per node (full): 445012 +Grid : Message : Average mflops/s per call per node (full): 304715 +Grid : Message : Stencil 13.7707 GB/s per node +Grid : Message : Stencil 16.8391 GB/s per node +Grid : Message : Stencil 17.6309 GB/s per node +Grid : Message : Stencil 12.15 GB/s per node +Grid : Message : Average mflops/s per call per node : 661970 +Grid : Message : Average mflops/s per call per node : 807154 +Grid : Message : Average mflops/s per call per node : 819629 +Grid : Message : Average mflops/s per call per node : 664143 +Grid : Message : Average mflops/s per call per node (full): 314464 +Grid : Message : Average mflops/s per call per node (full): 435569 +Grid : Message : Average mflops/s per call per node (full): 441555 +Grid : Message : Average mflops/s per call per node (full): 302821 +Grid : Message : Stencil 14.2817 GB/s per node +Grid : Message : Stencil 17.0494 GB/s per node +Grid : Message : Stencil 16.4168 GB/s per node +Grid : Message : Stencil 12.3744 GB/s per node +Grid : Message : Average mflops/s per call per node : 661496 +Grid : Message : Average mflops/s per call per node : 803949 +Grid : Message : Average mflops/s per call per node : 825886 +Grid : Message : Average mflops/s per call per node : 663562 +Grid : Message : Average mflops/s per call per node (full): 316101 +Grid : Message : Average mflops/s per call per node (full): 436399 +Grid : Message : Average mflops/s per call per node (full): 428930 +Grid : Message : Average mflops/s per call per node (full): 301876 +Grid : Message : Stencil 13.2501 GB/s per node +Grid : Message : Stencil 17.3485 GB/s per node +Grid : Message : Stencil 17.4801 GB/s per node +Grid : Message : Stencil 12.7829 GB/s per node +Grid : Message : Average mflops/s per call per node : 663072 +Grid : Message : Average mflops/s per call per node : 802940 +Grid : Message : Average mflops/s per call per node : 822540 +Grid : Message : Average mflops/s per call per node : 662228 +Grid : Message : Average mflops/s per call per node (full): 314512 +Grid : Message : Average mflops/s per call per node (full): 438926 +Grid : Message : Average mflops/s per call per node (full): 442773 +Grid : Message : Average mflops/s per call per node (full): 305542 +Grid : Message : Stencil 14.7135 GB/s per node +Grid : Message : Stencil 16.5017 GB/s per node +Grid : Message : Stencil 17.6741 GB/s per node +Grid : Message : Stencil 12.5544 GB/s per node +Grid : Message : Average mflops/s per call per node : 663528 +Grid : Message : Average mflops/s per call per node : 803113 +Grid : Message : Average mflops/s per call per node : 824588 +Grid : Message : Average mflops/s per call per node : 664828 +Grid : Message : Average mflops/s per call per node (full): 316604 +Grid : Message : Average mflops/s per call per node (full): 435590 +Grid : Message : Average mflops/s per call per node (full): 446359 +Grid : Message : Average mflops/s per call per node (full): 305030 +Grid : Message : Stencil 13.0648 GB/s per node +Grid : Message : Stencil 17.6308 GB/s per node +Grid : Message : Stencil 18.0907 GB/s per node +Grid : Message : Stencil 12.67 GB/s per node +Grid : Message : Average mflops/s per call per node : 664297 +Grid : Message : Average mflops/s per call per node : 803366 +Grid : Message : Average mflops/s per call per node : 818922 +Grid : Message : Average mflops/s per call per node : 662458 +Grid : Message : Average mflops/s per call per node (full): 314283 +Grid : Message : Average mflops/s per call per node (full): 437036 +Grid : Message : Average mflops/s per call per node (full): 445853 +Grid : Message : Average mflops/s per call per node (full): 303738 +Grid : Message : Stencil 12.5967 GB/s per node +Grid : Message : Stencil 16.0996 GB/s per node +Grid : Message : Stencil 17.1523 GB/s per node +Grid : Message : Stencil 11.9602 GB/s per node +Grid : Message : Average mflops/s per call per node : 671367 +Grid : Message : Average mflops/s per call per node : 802117 +Grid : Message : Average mflops/s per call per node : 822388 +Grid : Message : Average mflops/s per call per node : 670004 +Grid : Message : Average mflops/s per call per node (full): 314223 +Grid : Message : Average mflops/s per call per node (full): 429436 +Grid : Message : Average mflops/s per call per node (full): 442282 +Grid : Message : Average mflops/s per call per node (full): 301663 +Grid : Message : Stencil 13.3652 GB/s per node +Grid : Message : Stencil 14.7891 GB/s per node +Grid : Message : Stencil 17.3862 GB/s per node +Grid : Message : Stencil 13.2581 GB/s per node +Grid : Message : Average mflops/s per call per node : 666124 +Grid : Message : Average mflops/s per call per node : 805548 +Grid : Message : Average mflops/s per call per node : 822880 +Grid : Message : Average mflops/s per call per node : 661956 +Grid : Message : Average mflops/s per call per node (full): 315515 +Grid : Message : Average mflops/s per call per node (full): 411718 +Grid : Message : Average mflops/s per call per node (full): 444732 +Grid : Message : Average mflops/s per call per node (full): 303817 +Grid : Message : Stencil 13.6342 GB/s per node +Grid : Message : Stencil 16.4294 GB/s per node +Grid : Message : Stencil 17.1953 GB/s per node +Grid : Message : Stencil 13.369 GB/s per node +Grid : Message : Average mflops/s per call per node : 665092 +Grid : Message : Average mflops/s per call per node : 804618 +Grid : Message : Average mflops/s per call per node : 824222 +Grid : Message : Average mflops/s per call per node : 657667 +Grid : Message : Average mflops/s per call per node (full): 315976 +Grid : Message : Average mflops/s per call per node (full): 433965 +Grid : Message : Average mflops/s per call per node (full): 441241 +Grid : Message : Average mflops/s per call per node (full): 304885 +Grid : Message : Stencil 14.6131 GB/s per node +Grid : Message : Stencil 16.4093 GB/s per node +Grid : Message : Stencil 18.1192 GB/s per node +Grid : Message : Stencil 13.5067 GB/s per node +Grid : Message : Average mflops/s per call per node : 665242 +Grid : Message : Average mflops/s per call per node : 807371 +Grid : Message : Average mflops/s per call per node : 822732 +Grid : Message : Average mflops/s per call per node : 661930 +Grid : Message : Average mflops/s per call per node (full): 317716 +Grid : Message : Average mflops/s per call per node (full): 431823 +Grid : Message : Average mflops/s per call per node (full): 447833 +Grid : Message : Average mflops/s per call per node (full): 306907 +Grid : Message : Stencil 14.0475 GB/s per node +Grid : Message : Stencil 14.4059 GB/s per node +Grid : Message : Stencil 18.8707 GB/s per node +Grid : Message : Stencil 12.4078 GB/s per node +Grid : Message : Average mflops/s per call per node : 667247 +Grid : Message : Average mflops/s per call per node : 807574 +Grid : Message : Average mflops/s per call per node : 819580 +Grid : Message : Average mflops/s per call per node : 670418 +Grid : Message : Average mflops/s per call per node (full): 316787 +Grid : Message : Average mflops/s per call per node (full): 407665 +Grid : Message : Average mflops/s per call per node (full): 447793 +Grid : Message : Average mflops/s per call per node (full): 304722 +Grid : Message : Stencil 12.8318 GB/s per node +Grid : Message : Stencil 16.8843 GB/s per node +Grid : Message : Stencil 18.1065 GB/s per node +Grid : Message : Stencil 11.7948 GB/s per node +Grid : Message : Average mflops/s per call per node : 667312 +Grid : Message : Average mflops/s per call per node : 799072 +Grid : Message : Average mflops/s per call per node : 826647 +Grid : Message : Average mflops/s per call per node : 667891 +Grid : Message : Average mflops/s per call per node (full): 314263 +Grid : Message : Average mflops/s per call per node (full): 436737 +Grid : Message : Average mflops/s per call per node (full): 438770 +Grid : Message : Average mflops/s per call per node (full): 297490 +Grid : Message : Stencil 13.3866 GB/s per node +Grid : Message : Stencil 12.1769 GB/s per node +Grid : Message : Stencil 17.4186 GB/s per node +Grid : Message : Stencil 11.9079 GB/s per node +Grid : Message : Average mflops/s per call per node : 664332 +Grid : Message : Average mflops/s per call per node : 810631 +Grid : Message : Average mflops/s per call per node : 821828 +Grid : Message : Average mflops/s per call per node : 664980 +Grid : Message : Average mflops/s per call per node (full): 315441 +Grid : Message : Average mflops/s per call per node (full): 364776 +Grid : Message : Average mflops/s per call per node (full): 444564 +Grid : Message : Average mflops/s per call per node (full): 301166 +Grid : Message : Stencil 13.3728 GB/s per node +Grid : Message : Stencil 17.5614 GB/s per node +Grid : Message : Stencil 18.0237 GB/s per node +Grid : Message : Stencil 13.8207 GB/s per node +Grid : Message : Average mflops/s per call per node : 667161 +Grid : Message : Average mflops/s per call per node : 800879 +Grid : Message : Average mflops/s per call per node : 815462 +Grid : Message : Average mflops/s per call per node : 661988 +Grid : Message : Average mflops/s per call per node (full): 315692 +Grid : Message : Average mflops/s per call per node (full): 437055 +Grid : Message : Average mflops/s per call per node (full): 444586 +Grid : Message : Average mflops/s per call per node (full): 305587 +Grid : Message : Stencil 12.6136 GB/s per node +Grid : Message : Stencil 17.8142 GB/s per node +Grid : Message : Stencil 18.7351 GB/s per node +Grid : Message : Stencil 12.0376 GB/s per node +Grid : Message : Average mflops/s per call per node : 667354 +Grid : Message : Average mflops/s per call per node : 803405 +Grid : Message : Average mflops/s per call per node : 823025 +Grid : Message : Average mflops/s per call per node : 665668 +Grid : Message : Average mflops/s per call per node (full): 313391 +Grid : Message : Average mflops/s per call per node (full): 440352 +Grid : Message : Average mflops/s per call per node (full): 448308 +Grid : Message : Average mflops/s per call per node (full): 302534 +Grid : Message : Stencil 13.6781 GB/s per node +Grid : Message : Stencil 16.4091 GB/s per node +Grid : Message : Stencil 17.8156 GB/s per node +Grid : Message : Stencil 12.0971 GB/s per node +Grid : Message : Average mflops/s per call per node : 661133 +Grid : Message : Average mflops/s per call per node : 801506 +Grid : Message : Average mflops/s per call per node : 820190 +Grid : Message : Average mflops/s per call per node : 669406 +Grid : Message : Average mflops/s per call per node (full): 314497 +Grid : Message : Average mflops/s per call per node (full): 433770 +Grid : Message : Average mflops/s per call per node (full): 445647 +Grid : Message : Average mflops/s per call per node (full): 303000 +Grid : Message : Stencil 14.0661 GB/s per node +Grid : Message : Stencil 16.698 GB/s per node +Grid : Message : Stencil 16.8629 GB/s per node +Grid : Message : Stencil 13.4859 GB/s per node +Grid : Message : Average mflops/s per call per node : 660056 +Grid : Message : Average mflops/s per call per node : 800700 +Grid : Message : Average mflops/s per call per node : 826733 +Grid : Message : Average mflops/s per call per node : 660054 +Grid : Message : Average mflops/s per call per node (full): 314176 +Grid : Message : Average mflops/s per call per node (full): 431452 +Grid : Message : Average mflops/s per call per node (full): 438813 +Grid : Message : Average mflops/s per call per node (full): 306706 +Grid : Message : Stencil 15.2755 GB/s per node +Grid : Message : Stencil 16.3466 GB/s per node +Grid : Message : Stencil 16.9219 GB/s per node +Grid : Message : Stencil 12.5556 GB/s per node +Grid : Message : Average mflops/s per call per node : 662693 +Grid : Message : Average mflops/s per call per node : 802976 +Grid : Message : Average mflops/s per call per node : 819842 +Grid : Message : Average mflops/s per call per node : 661755 +Grid : Message : Average mflops/s per call per node (full): 317332 +Grid : Message : Average mflops/s per call per node (full): 433726 +Grid : Message : Average mflops/s per call per node (full): 439478 +Grid : Message : Average mflops/s per call per node (full): 303366 +Grid : Message : Stencil 12.7011 GB/s per node +Grid : Message : Stencil 9.36114 GB/s per node +Grid : Message : Stencil 18.2933 GB/s per node +Grid : Message : Stencil 13.0964 GB/s per node +Grid : Message : Average mflops/s per call per node : 665822 +Grid : Message : Average mflops/s per call per node : 812056 +Grid : Message : Average mflops/s per call per node : 823525 +Grid : Message : Average mflops/s per call per node : 660552 +Grid : Message : Average mflops/s per call per node (full): 314345 +Grid : Message : Average mflops/s per call per node (full): 301000 +Grid : Message : Average mflops/s per call per node (full): 447518 +Grid : Message : Average mflops/s per call per node (full): 303723 +Grid : Message : Stencil 13.1927 GB/s per node +Grid : Message : Stencil 16.8595 GB/s per node +Grid : Message : Stencil 17.886 GB/s per node +Grid : Message : Stencil 12.3675 GB/s per node +Grid : Message : Average mflops/s per call per node : 667425 +Grid : Message : Average mflops/s per call per node : 807531 +Grid : Message : Average mflops/s per call per node : 820923 +Grid : Message : Average mflops/s per call per node : 672801 +Grid : Message : Average mflops/s per call per node (full): 315755 +Grid : Message : Average mflops/s per call per node (full): 438818 +Grid : Message : Average mflops/s per call per node (full): 444555 +Grid : Message : Average mflops/s per call per node (full): 304944 +Grid : Message : Stencil 12.9532 GB/s per node +Grid : Message : Stencil 10.464 GB/s per node +Grid : Message : Stencil 16.678 GB/s per node +Grid : Message : Stencil 13.437 GB/s per node +Grid : Message : Average mflops/s per call per node : 668065 +Grid : Message : Average mflops/s per call per node : 811810 +Grid : Message : Average mflops/s per call per node : 821025 +Grid : Message : Average mflops/s per call per node : 665160 +Grid : Message : Average mflops/s per call per node (full): 313479 +Grid : Message : Average mflops/s per call per node (full): 327432 +Grid : Message : Average mflops/s per call per node (full): 428518 +Grid : Message : Average mflops/s per call per node (full): 307295 +Grid : Message : Stencil 13.5958 GB/s per node +Grid : Message : Stencil 10.7364 GB/s per node +Grid : Message : Stencil 17.6069 GB/s per node +Grid : Message : Stencil 13.6256 GB/s per node +Grid : Message : Average mflops/s per call per node : 664957 +Grid : Message : Average mflops/s per call per node : 812429 +Grid : Message : Average mflops/s per call per node : 819163 +Grid : Message : Average mflops/s per call per node : 662325 +Grid : Message : Average mflops/s per call per node (full): 315613 +Grid : Message : Average mflops/s per call per node (full): 333839 +Grid : Message : Average mflops/s per call per node (full): 444411 +Grid : Message : Average mflops/s per call per node (full): 304561 From d45cd7e677a8d31976ecf86f54f0937f23e58d30 Mon Sep 17 00:00:00 2001 From: paboyle Date: Sun, 26 Mar 2017 09:24:26 -0400 Subject: [PATCH 081/101] Adding a simple read of NERSC test --- tests/IO/Test_nersc_read.cc | 112 ++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 tests/IO/Test_nersc_read.cc diff --git a/tests/IO/Test_nersc_read.cc b/tests/IO/Test_nersc_read.cc new file mode 100644 index 00000000..9e9280a1 --- /dev/null +++ b/tests/IO/Test_nersc_read.cc @@ -0,0 +1,112 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./tests/Test_nersc_io.cc + + Copyright (C) 2015 + +Author: Azusa Yamaguchi +Author: Peter Boyle +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + + +int main (int argc, char ** argv) +{ + Grid_init(&argc,&argv); + + + std::vector simd_layout = GridDefaultSimd(4,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + std::vector latt_size = GridDefaultLatt(); + int orthodir=3; + int orthosz =latt_size[orthodir]; + + GridCartesian Fine(latt_size,simd_layout,mpi_layout); + + LatticeGaugeField Umu(&Fine); + std::vector U(4,&Fine); + + NerscField header; + std::string file("./ckpoint_lat"); + NerscIO::readConfiguration(Umu,header,file); + + for(int mu=0;mu(Umu,mu); + } + + // Painful ; fix syntactical niceness + LatticeComplex LinkTrace(&Fine); + LinkTrace=zero; + for(int mu=0;mu Plaq_T(orthosz); + sliceSum(Plaq,Plaq_T,Nd-1); + int Nt = Plaq_T.size(); + + TComplex Plaq_T_sum; + Plaq_T_sum=zero; + for(int t=0;t Date: Tue, 28 Mar 2017 02:28:04 -0400 Subject: [PATCH 082/101] IO improvements to fail on IO error --- lib/parallelIO/BinaryIO.h | 217 +++++++++++++++++++------------------- lib/parallelIO/NerscIO.h | 44 ++++---- 2 files changed, 126 insertions(+), 135 deletions(-) diff --git a/lib/parallelIO/BinaryIO.h b/lib/parallelIO/BinaryIO.h index e2af0545..154567fc 100644 --- a/lib/parallelIO/BinaryIO.h +++ b/lib/parallelIO/BinaryIO.h @@ -35,37 +35,27 @@ Author: paboyle #endif #include #include -// 64bit endian swap is a portability pain -#ifndef __has_builtin // Optional of course. -#define __has_builtin(x) 0 // Compatibility with non-clang compilers. -#endif -#if HAVE_DECL_BE64TOH -#undef Grid_ntohll -#define Grid_ntohll be64toh -#endif -#if HAVE_DECL_NTOHLL -#undef Grid_ntohll -#define Grid_ntohll ntohll -#endif - -#ifndef Grid_ntohll +inline uint32_t byte_reverse32(uint32_t f) { + f = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ; + return f; +} +inline uint64_t byte_reverse64(uint64_t f) { + uint64_t g; + g = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ; + g = g << 32; + f = f >> 32; + g|= ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ; + return g; +} #if BYTE_ORDER == BIG_ENDIAN - -#define Grid_ntohll(A) (A) - -#else - -#if __has_builtin(__builtin_bswap64) -#define Grid_ntohll(A) __builtin_bswap64(A) +inline uint64_t Grid_ntohll(uint64_t A) { return A; } #else -#error -#endif - -#endif - +inline uint64_t Grid_ntohll(uint64_t A) { + return byte_reverse64(A); +} #endif namespace Grid { @@ -195,7 +185,7 @@ class BinaryIO { std::vector site({x,y,z,t}); if (grid->IsBoss()) { - fin.read((char *)&file_object, sizeof(file_object)); + fin.read((char *)&file_object, sizeof(file_object));assert( fin.fail()==0); bytes += sizeof(file_object); if (ieee32big) be32toh_v((void *)&file_object, sizeof(file_object)); if (ieee32) le32toh_v((void *)&file_object, sizeof(file_object)); @@ -211,11 +201,13 @@ class BinaryIO { std::cout<Broadcast(0,(void *)&csum,sizeof(csum)); return csum; } template - static inline uint32_t writeObjectSerial(Lattice &Umu,std::string file,munger munge,int offset,const std::string & format) + static inline uint32_t writeObjectSerial(Lattice &Umu,std::string file,munger munge,int offset, + const std::string & format) { typedef typename vobj::scalar_object sobj; @@ -231,7 +223,7 @@ class BinaryIO { ////////////////////////////////////////////////// std::cout<< GridLogMessage<< "Serial write I/O "<< file<IsBoss() ) { fout.open(file,std::ios::binary|std::ios::out|std::ios::in); @@ -255,23 +247,24 @@ class BinaryIO { if ( grid->IsBoss() ) { - if(ieee32big) htobe32_v((void *)&file_object,sizeof(file_object)); - if(ieee32) htole32_v((void *)&file_object,sizeof(file_object)); - if(ieee64big) htobe64_v((void *)&file_object,sizeof(file_object)); - if(ieee64) htole64_v((void *)&file_object,sizeof(file_object)); + if(ieee32big) htobe32_v((void *)&file_object,sizeof(file_object)); + if(ieee32) htole32_v((void *)&file_object,sizeof(file_object)); + if(ieee64big) htobe64_v((void *)&file_object,sizeof(file_object)); + if(ieee64) htole64_v((void *)&file_object,sizeof(file_object)); - // NB could gather an xstrip as an optimisation. - fout.write((char *)&file_object,sizeof(file_object)); - bytes+=sizeof(file_object); + // NB could gather an xstrip as an optimisation. + fout.write((char *)&file_object,sizeof(file_object));assert( fout.fail()==0); + bytes+=sizeof(file_object); } }}}} timer.Stop(); std::cout<Broadcast(0,(void *)&csum,sizeof(csum)); return csum; } - + static inline uint32_t writeRNGSerial(GridSerialRNG &serial,GridParallelRNG ¶llel,std::string file,int offset) { typedef typename GridSerialRNG::RngStateType RngStateType; @@ -305,23 +298,23 @@ class BinaryIO { int l_idx=parallel.generator_idx(o_idx,i_idx); if( rank == grid->ThisRank() ){ - // std::cout << "rank" << rank<<" Getting state for index "<Broadcast(rank,(void *)&saved[0],bytes); if ( grid->IsBoss() ) { - Uint32Checksum((uint32_t *)&saved[0],bytes,csum); - fout.write((char *)&saved[0],bytes); + Uint32Checksum((uint32_t *)&saved[0],bytes,csum); + fout.write((char *)&saved[0],bytes);assert( fout.fail()==0); } - + } if ( grid->IsBoss() ) { serial.GetState(saved,0); Uint32Checksum((uint32_t *)&saved[0],bytes,csum); - fout.write((char *)&saved[0],bytes); + fout.write((char *)&saved[0],bytes);assert( fout.fail()==0); } grid->Broadcast(0,(void *)&csum,sizeof(csum)); return csum; @@ -355,20 +348,20 @@ class BinaryIO { int l_idx=parallel.generator_idx(o_idx,i_idx); if ( grid->IsBoss() ) { - fin.read((char *)&saved[0],bytes); - Uint32Checksum((uint32_t *)&saved[0],bytes,csum); + fin.read((char *)&saved[0],bytes);assert( fin.fail()==0); + Uint32Checksum((uint32_t *)&saved[0],bytes,csum); } grid->Broadcast(0,(void *)&saved[0],bytes); if( rank == grid->ThisRank() ){ - parallel.SetState(saved,l_idx); + parallel.SetState(saved,l_idx); } } if ( grid->IsBoss() ) { - fin.read((char *)&saved[0],bytes); + fin.read((char *)&saved[0],bytes);assert( fin.fail()==0); serial.SetState(saved,0); Uint32Checksum((uint32_t *)&saved[0],bytes,csum); } @@ -380,7 +373,8 @@ class BinaryIO { template - static inline uint32_t readObjectParallel(Lattice &Umu,std::string file,munger munge,int offset,const std::string &format) + static inline uint32_t readObjectParallel(Lattice &Umu,std::string file,munger munge,int offset, + const std::string &format) { typedef typename vobj::scalar_object sobj; @@ -415,15 +409,15 @@ class BinaryIO { if ( d == 0 ) parallel[d] = 0; if (parallel[d]) { - range[d] = grid->_ldimensions[d]; - start[d] = grid->_processor_coor[d]*range[d]; - ioproc[d]= grid->_processor_coor[d]; + range[d] = grid->_ldimensions[d]; + start[d] = grid->_processor_coor[d]*range[d]; + ioproc[d]= grid->_processor_coor[d]; } else { - range[d] = grid->_gdimensions[d]; - start[d] = 0; - ioproc[d]= 0; - - if ( grid->_processor_coor[d] != 0 ) IOnode = 0; + range[d] = grid->_gdimensions[d]; + start[d] = 0; + ioproc[d]= 0; + + if ( grid->_processor_coor[d] != 0 ) IOnode = 0; } slice_vol = slice_vol * range[d]; } @@ -434,9 +428,9 @@ class BinaryIO { std::cout<< std::dec ; std::cout<< GridLogMessage<< "Parallel read I/O to "<< file << " with " <_ndimension;d++){ - std::cout<< range[d]; - if( d< grid->_ndimension-1 ) - std::cout<< " x "; + std::cout<< range[d]; + if( d< grid->_ndimension-1 ) + std::cout<< " x "; } std::cout << std::endl; } @@ -472,8 +466,8 @@ class BinaryIO { Lexicographic::CoorFromIndex(tsite,tlex,range); for(int d=0;d_ldimensions[d]; // local site - gsite[d] = tsite[d]+start[d]; // global site + lsite[d] = tsite[d]%grid->_ldimensions[d]; // local site + gsite[d] = tsite[d]+start[d]; // global site } ///////////////////////// @@ -488,28 +482,28 @@ class BinaryIO { //////////////////////////////// if (myrank == iorank) { - fin.seekg(offset+g_idx*sizeof(fileObj)); - fin.read((char *)&fileObj,sizeof(fileObj)); - bytes+=sizeof(fileObj); + fin.seekg(offset+g_idx*sizeof(fileObj)); + fin.read((char *)&fileObj,sizeof(fileObj));assert( fin.fail()==0); + bytes+=sizeof(fileObj); - if(ieee32big) be32toh_v((void *)&fileObj,sizeof(fileObj)); - if(ieee32) le32toh_v((void *)&fileObj,sizeof(fileObj)); - if(ieee64big) be64toh_v((void *)&fileObj,sizeof(fileObj)); - if(ieee64) le64toh_v((void *)&fileObj,sizeof(fileObj)); - - munge(fileObj,siteObj,csum); - + if(ieee32big) be32toh_v((void *)&fileObj,sizeof(fileObj)); + if(ieee32) le32toh_v((void *)&fileObj,sizeof(fileObj)); + if(ieee64big) be64toh_v((void *)&fileObj,sizeof(fileObj)); + if(ieee64) le64toh_v((void *)&fileObj,sizeof(fileObj)); + + munge(fileObj,siteObj,csum); + } - + // Possibly do transport through pt2pt if ( rank != iorank ) { - if ( (myrank == rank) || (myrank==iorank) ) { - grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,iorank,rank,sizeof(siteObj)); - } + if ( (myrank == rank) || (myrank==iorank) ) { + grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,iorank,rank,sizeof(siteObj)); + } } // Poke at destination if ( myrank == rank ) { - pokeLocalSite(siteObj,Umu,lsite); + pokeLocalSite(siteObj,Umu,lsite); } grid->Barrier(); // necessary? } @@ -520,7 +514,7 @@ class BinaryIO { timer.Stop(); std::cout< - static inline uint32_t writeObjectParallel(Lattice &Umu,std::string file,munger munge,int offset,const std::string & format) + static inline uint32_t writeObjectParallel(Lattice &Umu,std::string file,munger munge,int offset, + const std::string & format) { typedef typename vobj::scalar_object sobj; GridBase *grid = Umu._grid; @@ -558,15 +553,15 @@ class BinaryIO { if ( d!= grid->_ndimension-1 ) parallel[d] = 0; if (parallel[d]) { - range[d] = grid->_ldimensions[d]; - start[d] = grid->_processor_coor[d]*range[d]; - ioproc[d]= grid->_processor_coor[d]; + range[d] = grid->_ldimensions[d]; + start[d] = grid->_processor_coor[d]*range[d]; + ioproc[d]= grid->_processor_coor[d]; } else { - range[d] = grid->_gdimensions[d]; - start[d] = 0; - ioproc[d]= 0; + range[d] = grid->_gdimensions[d]; + start[d] = 0; + ioproc[d]= 0; - if ( grid->_processor_coor[d] != 0 ) IOnode = 0; + if ( grid->_processor_coor[d] != 0 ) IOnode = 0; } slice_vol = slice_vol * range[d]; @@ -577,13 +572,13 @@ class BinaryIO { grid->GlobalSum(tmp); std::cout<< GridLogMessage<< "Parallel write I/O from "<< file << " with " <_ndimension;d++){ - std::cout<< range[d]; - if( d< grid->_ndimension-1 ) - std::cout<< " x "; + std::cout<< range[d]; + if( d< grid->_ndimension-1 ) + std::cout<< " x "; } std::cout << std::endl; } - + GridStopWatch timer; timer.Start(); uint64_t bytes=0; @@ -619,8 +614,8 @@ class BinaryIO { Lexicographic::CoorFromIndex(tsite,tlex,range); for(int d=0;d_ldimensions[d]; // local site - gsite[d] = tsite[d]+start[d]; // global site + lsite[d] = tsite[d]%grid->_ldimensions[d]; // local site + gsite[d] = tsite[d]+start[d]; // global site } @@ -640,36 +635,36 @@ class BinaryIO { // Pair of nodes may need to do pt2pt send if ( rank != iorank ) { // comms is necessary - if ( (myrank == rank) || (myrank==iorank) ) { // and we have to do it - // Send to IOrank - grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,rank,iorank,sizeof(siteObj)); - } + if ( (myrank == rank) || (myrank==iorank) ) { // and we have to do it + // Send to IOrank + grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,rank,iorank,sizeof(siteObj)); + } } grid->Barrier(); // necessary? if (myrank == iorank) { - munge(siteObj,fileObj,csum); - - if(ieee32big) htobe32_v((void *)&fileObj,sizeof(fileObj)); - if(ieee32) htole32_v((void *)&fileObj,sizeof(fileObj)); - if(ieee64big) htobe64_v((void *)&fileObj,sizeof(fileObj)); - if(ieee64) htole64_v((void *)&fileObj,sizeof(fileObj)); - - fout.seekp(offset+g_idx*sizeof(fileObj)); - fout.write((char *)&fileObj,sizeof(fileObj)); - bytes+=sizeof(fileObj); + munge(siteObj,fileObj,csum); + + if(ieee32big) htobe32_v((void *)&fileObj,sizeof(fileObj)); + if(ieee32) htole32_v((void *)&fileObj,sizeof(fileObj)); + if(ieee64big) htobe64_v((void *)&fileObj,sizeof(fileObj)); + if(ieee64) htole64_v((void *)&fileObj,sizeof(fileObj)); + + fout.seekp(offset+g_idx*sizeof(fileObj)); + fout.write((char *)&fileObj,sizeof(fileObj));assert( fout.fail()==0); + bytes+=sizeof(fileObj); } } - + grid->GlobalSum(csum); grid->GlobalSum(bytes); - + timer.Stop(); std::cout<0) { std::string key=line.substr(0,eq); @@ -345,24 +347,24 @@ static inline void readConfiguration(Lattice > &Umu, // munger is a function of if ( header.data_type == std::string("4D_SU3_GAUGE") ) { if ( ieee32 || ieee32big ) { - // csum=BinaryIO::readObjectSerial, LorentzColour2x3F> - csum=BinaryIO::readObjectParallel, LorentzColour2x3F> + csum=BinaryIO::readObjectSerial, LorentzColour2x3F> + // csum=BinaryIO::readObjectParallel, LorentzColour2x3F> (Umu,file,Nersc3x2munger(), offset,format); } if ( ieee64 || ieee64big ) { - //csum=BinaryIO::readObjectSerial, LorentzColour2x3D> - csum=BinaryIO::readObjectParallel, LorentzColour2x3D> + csum=BinaryIO::readObjectSerial, LorentzColour2x3D> + // csum=BinaryIO::readObjectParallel, LorentzColour2x3D> (Umu,file,Nersc3x2munger(),offset,format); } } else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) { if ( ieee32 || ieee32big ) { - //csum=BinaryIO::readObjectSerial,LorentzColourMatrixF> - csum=BinaryIO::readObjectParallel,LorentzColourMatrixF> + csum=BinaryIO::readObjectSerial,LorentzColourMatrixF> + //csum=BinaryIO::readObjectParallel,LorentzColourMatrixF> (Umu,file,NerscSimpleMunger(),offset,format); } if ( ieee64 || ieee64big ) { - // csum=BinaryIO::readObjectSerial,LorentzColourMatrixD> - csum=BinaryIO::readObjectParallel,LorentzColourMatrixD> + csum=BinaryIO::readObjectSerial,LorentzColourMatrixD> + // csum=BinaryIO::readObjectParallel,LorentzColourMatrixD> (Umu,file,NerscSimpleMunger(),offset,format); } } else { @@ -371,12 +373,17 @@ static inline void readConfiguration(Lattice > &Umu, NerscStatistics(Umu,clone); + std::cout< @@ -416,19 +423,8 @@ static inline void writeConfiguration(Lattice > &Umu Nersc3x2unmunger munge; BinaryIO::Uint32Checksum(Umu, munge,header.checksum); offset = writeHeader(header,file); - csum=BinaryIO::writeObjectSerial(Umu,file,munge,offset,header.floating_point); - - std::string file1 = file+"para"; - int offset1 = writeHeader(header,file1); - int csum1=BinaryIO::writeObjectParallel(Umu,file1,munge,offset,header.floating_point); - //int csum1=BinaryIO::writeObjectSerial(Umu,file1,munge,offset,header.floating_point); - - - std::cout << GridLogMessage << " TESTING PARALLEL WRITE offsets " << offset1 << " "<< offset << std::endl; - std::cout << GridLogMessage << " TESTING PARALLEL WRITE csums " << csum1 << " "<(Umu,file,munge,offset,header.floating_point); + csum=BinaryIO::writeObjectParallel(Umu,file,munge,offset,header.floating_point); } else { header.floating_point = std::string("IEEE64BIG"); From 98f931827994f61b36d897dfaadb1816e558fece Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 28 Mar 2017 23:16:04 +0900 Subject: [PATCH 083/101] Build on AVX2 and MPI passing with clang++ --- lib/qcd/action/fermion/StaggeredKernels.cc | 4 +++- tests/core/Test_staggered5D.cc | 2 +- tests/core/Test_staggered5Dvec.cc | 9 +++++++++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/lib/qcd/action/fermion/StaggeredKernels.cc b/lib/qcd/action/fermion/StaggeredKernels.cc index 01a55efe..b6ec14c7 100644 --- a/lib/qcd/action/fermion/StaggeredKernels.cc +++ b/lib/qcd/action/fermion/StaggeredKernels.cc @@ -31,7 +31,7 @@ directory namespace Grid { namespace QCD { -int StaggeredKernelsStatic::Opt; +int StaggeredKernelsStatic::Opt= StaggeredKernelsStatic::OptGeneric; template StaggeredKernels::StaggeredKernels(const ImplParams &p) : Base(p){}; @@ -215,6 +215,7 @@ void StaggeredKernels::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, Dou } break; default: + std::cout<<"Oops Opt = "<::DhopSite(StencilImpl &st, LebesgueOrder &lo, Double } break; default: + std::cout<<"Oops Opt = "< Date: Tue, 28 Mar 2017 12:20:02 -0400 Subject: [PATCH 084/101] Shorten loop --- tests/core/Test_staggered5D.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/Test_staggered5D.cc b/tests/core/Test_staggered5D.cc index a7b00399..be31c438 100644 --- a/tests/core/Test_staggered5D.cc +++ b/tests/core/Test_staggered5D.cc @@ -153,7 +153,7 @@ int main (int argc, char ** argv) std::cout< Date: Tue, 28 Mar 2017 13:25:05 -0400 Subject: [PATCH 085/101] Better init --- .../fermion/ImprovedStaggeredFermion5D.cc | 33 +++++++++---------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc index 9d891e51..61a3c559 100644 --- a/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc +++ b/lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc @@ -54,22 +54,22 @@ ImprovedStaggeredFermion5D::ImprovedStaggeredFermion5D(GaugeField &_Uthin, _FiveDimRedBlackGrid(&FiveDimRedBlackGrid), _FourDimGrid (&FourDimGrid), _FourDimRedBlackGrid(&FourDimRedBlackGrid), - Stencil (_FiveDimGrid,npoint,Even,directions,displacements), - StencilEven(_FiveDimRedBlackGrid,npoint,Even,directions,displacements), // source is Even - StencilOdd (_FiveDimRedBlackGrid,npoint,Odd ,directions,displacements), // source is Odd + Stencil (&FiveDimGrid,npoint,Even,directions,displacements), + StencilEven(&FiveDimRedBlackGrid,npoint,Even,directions,displacements), // source is Even + StencilOdd (&FiveDimRedBlackGrid,npoint,Odd ,directions,displacements), // source is Odd mass(_mass), c1(_c1), c2(_c2), u0(_u0), - Umu(_FourDimGrid), - UmuEven(_FourDimRedBlackGrid), - UmuOdd (_FourDimRedBlackGrid), - UUUmu(_FourDimGrid), - UUUmuEven(_FourDimRedBlackGrid), - UUUmuOdd(_FourDimRedBlackGrid), - _tmp(&FiveDimRedBlackGrid), - Lebesgue(_FourDimGrid), - LebesgueEvenOdd(_FourDimRedBlackGrid) + Umu(&FourDimGrid), + UmuEven(&FourDimRedBlackGrid), + UmuOdd (&FourDimRedBlackGrid), + UUUmu(&FourDimGrid), + UUUmuEven(&FourDimRedBlackGrid), + UUUmuOdd(&FourDimRedBlackGrid), + Lebesgue(&FourDimGrid), + LebesgueEvenOdd(&FourDimRedBlackGrid), + _tmp(&FiveDimRedBlackGrid) { // some assertions @@ -173,8 +173,7 @@ void ImprovedStaggeredFermion5D::DhopDir(const FermionField &in, FermionFi Compressor compressor; Stencil.HaloExchange(in,compressor); - PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ for(int s=0;s::DhopInternal(StencilImpl & st, LebesgueOr // Dhop takes the 4d grid from U, and makes a 5d index for fermion if (dag == DaggerYes) { - PARALLEL_FOR_LOOP - for (int ss = 0; ss < U._grid->oSites(); ss++) { + parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) { int sU=ss; Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), LLs, sU,in, out); } } else { - PARALLEL_FOR_LOOP - for (int ss = 0; ss < U._grid->oSites(); ss++) { + parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) { int sU=ss; Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out); } From 81ead488501abd4f82239c37e8ce873df57dabb9 Mon Sep 17 00:00:00 2001 From: paboyle Date: Wed, 29 Mar 2017 04:39:52 -0400 Subject: [PATCH 086/101] Log any errors to a file --- tests/Test_cshift.cc | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/tests/Test_cshift.cc b/tests/Test_cshift.cc index e8e90076..e1dd0db8 100644 --- a/tests/Test_cshift.cc +++ b/tests/Test_cshift.cc @@ -61,7 +61,15 @@ int main (int argc, char ** argv) U=lex; } - + std::stringstream ss; + ss<<"error"; + for(int d=0;d 0){ - std::cerr<<"FAIL shift "<< shift<<" in dir "<< dir<<" ["< Date: Wed, 29 Mar 2017 04:43:55 -0400 Subject: [PATCH 087/101] Bug fix in MPI3 --- lib/communicator/Communicator_mpi3.cc | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index eac003ce..deb1ea99 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -67,7 +67,7 @@ std::vector CartesianCommunicator::ShmCommBufs; int CartesianCommunicator::NodeCount(void) { return GroupSize;}; -#undef FORCE_COMMS +#define FORCE_COMMS void *CartesianCommunicator::ShmBufferSelf(void) { return ShmCommBufs[ShmRank]; @@ -303,7 +303,7 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { std::cout< Date: Wed, 29 Mar 2017 04:44:17 -0400 Subject: [PATCH 088/101] Verbose header print by default --- lib/parallelIO/NerscIO.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/parallelIO/NerscIO.h b/lib/parallelIO/NerscIO.h index 071fa01e..bbfed9d9 100644 --- a/lib/parallelIO/NerscIO.h +++ b/lib/parallelIO/NerscIO.h @@ -263,13 +263,13 @@ static inline int readHeader(std::string file,GridBase *grid, NerscField &field getline(fin,line); // read one line and insist is removeWhitespace(line); - std::cout << "* " << line << std::endl; + std::cout << GridLogMessage << "* " << line << std::endl; assert(line==std::string("BEGIN_HEADER")); do { getline(fin,line); // read one line - std::cout << "* "<0) { std::string key=line.substr(0,eq); From 417ec56ccaa7814dfaf8fd43e203e85e61bc0100 Mon Sep 17 00:00:00 2001 From: paboyle Date: Wed, 29 Mar 2017 05:45:33 -0400 Subject: [PATCH 089/101] Release candidate --- lib/communicator/Communicator_mpi3.cc | 6 ++-- lib/parallelIO/NerscIO.h | 46 ++++++++++++++++++++------- 2 files changed, 37 insertions(+), 15 deletions(-) diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index deb1ea99..7685768c 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -67,7 +67,7 @@ std::vector CartesianCommunicator::ShmCommBufs; int CartesianCommunicator::NodeCount(void) { return GroupSize;}; -#define FORCE_COMMS +#undef FORCE_COMMS void *CartesianCommunicator::ShmBufferSelf(void) { return ShmCommBufs[ShmRank]; @@ -484,13 +484,13 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) assert(coor[j] == _processor_coor[j]); } } - + /* std::cout << GridLogMessage<< " Lexicographic "< static inline void readConfiguration(Lattice > &Umu,NerscField& header,std::string file) @@ -347,25 +349,41 @@ static inline void readConfiguration(Lattice > &Umu, // munger is a function of if ( header.data_type == std::string("4D_SU3_GAUGE") ) { if ( ieee32 || ieee32big ) { - csum=BinaryIO::readObjectSerial, LorentzColour2x3F> - // csum=BinaryIO::readObjectParallel, LorentzColour2x3F> +#ifdef PARALLEL_READ + csum=BinaryIO::readObjectParallel, LorentzColour2x3F> (Umu,file,Nersc3x2munger(), offset,format); +#else + csum=BinaryIO::readObjectSerial, LorentzColour2x3F> + (Umu,file,Nersc3x2munger(), offset,format); +#endif } if ( ieee64 || ieee64big ) { - csum=BinaryIO::readObjectSerial, LorentzColour2x3D> - // csum=BinaryIO::readObjectParallel, LorentzColour2x3D> +#ifdef PARALLEL_READ + csum=BinaryIO::readObjectParallel, LorentzColour2x3D> (Umu,file,Nersc3x2munger(),offset,format); +#else + csum=BinaryIO::readObjectSerial, LorentzColour2x3D> + (Umu,file,Nersc3x2munger(),offset,format); +#endif } } else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) { if ( ieee32 || ieee32big ) { - csum=BinaryIO::readObjectSerial,LorentzColourMatrixF> - //csum=BinaryIO::readObjectParallel,LorentzColourMatrixF> +#ifdef PARALLEL_READ + csum=BinaryIO::readObjectParallel,LorentzColourMatrixF> (Umu,file,NerscSimpleMunger(),offset,format); +#else + csum=BinaryIO::readObjectSerial,LorentzColourMatrixF> + (Umu,file,NerscSimpleMunger(),offset,format); +#endif } if ( ieee64 || ieee64big ) { - csum=BinaryIO::readObjectSerial,LorentzColourMatrixD> - // csum=BinaryIO::readObjectParallel,LorentzColourMatrixD> +#ifdef PARALLEL_READ + csum=BinaryIO::readObjectParallel,LorentzColourMatrixD> (Umu,file,NerscSimpleMunger(),offset,format); +#else + csum=BinaryIO::readObjectSerial,LorentzColourMatrixD> + (Umu,file,NerscSimpleMunger(),offset,format); +#endif } } else { assert(0); @@ -423,8 +441,11 @@ static inline void writeConfiguration(Lattice > &Umu Nersc3x2unmunger munge; BinaryIO::Uint32Checksum(Umu, munge,header.checksum); offset = writeHeader(header,file); - // csum=BinaryIO::writeObjectSerial(Umu,file,munge,offset,header.floating_point); +#ifdef PARALLEL_WRITE csum=BinaryIO::writeObjectParallel(Umu,file,munge,offset,header.floating_point); +#else + csum=BinaryIO::writeObjectSerial(Umu,file,munge,offset,header.floating_point); +#endif } else { header.floating_point = std::string("IEEE64BIG"); @@ -432,8 +453,11 @@ static inline void writeConfiguration(Lattice > &Umu NerscSimpleUnmunger munge; BinaryIO::Uint32Checksum(Umu, munge,header.checksum); offset = writeHeader(header,file); - // csum=BinaryIO::writeObjectSerial(Umu,file,munge,offset,header.floating_point); +#ifdef PARALLEL_WRITE csum=BinaryIO::writeObjectParallel(Umu,file,munge,offset,header.floating_point); +#else + csum=BinaryIO::writeObjectSerial(Umu,file,munge,offset,header.floating_point); +#endif } std::cout< uint32_t csum=BinaryIO::readRNGSerial(serial,parallel,file,offset); - std::cerr<<" Csum "<< csum << " "<< header.checksum < Date: Wed, 29 Mar 2017 18:51:23 +0900 Subject: [PATCH 090/101] Small AVX512 asm ifdef patch --- lib/qcd/action/fermion/StaggeredKernelsAsm.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc index 0c62b2a0..f95c7e28 100644 --- a/lib/qcd/action/fermion/StaggeredKernelsAsm.cc +++ b/lib/qcd/action/fermion/StaggeredKernelsAsm.cc @@ -27,8 +27,11 @@ Author: paboyle *************************************************************************************/ /* END LEGAL */ #include + +#ifdef AVX512 #include #include +#endif // Interleave operations from two directions // This looks just like a 2 spin multiply and reuse same sequence from the Wilson From cb9a297a0ae7b3bb5d84b5cd415149603eba5f9d Mon Sep 17 00:00:00 2001 From: paboyle Date: Thu, 30 Mar 2017 13:30:25 +0900 Subject: [PATCH 091/101] Chulwoo's Zmobius test --- tests/solver/Test_zmobius_cg_prec.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/solver/Test_zmobius_cg_prec.cc b/tests/solver/Test_zmobius_cg_prec.cc index c66b6246..4ae98d71 100644 --- a/tests/solver/Test_zmobius_cg_prec.cc +++ b/tests/solver/Test_zmobius_cg_prec.cc @@ -81,10 +81,12 @@ int main(int argc, char** argv) { RealD M5 = 1.8; std::vector < std::complex > omegas; for(int i=0;i temp (0.25+0.00*i, 0.0+0.00*i); - omegas.push_back(temp); + double imag = 0.; + if (i==0) imag=1.; + if (i==Ls-1) imag=-1.; + std::complex temp (0.25+0.01*i, imag*0.01); + omegas.push_back(temp); } -// DomainWallFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5); ZMobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omegas,1.,0.); LatticeFermion src_o(FrbGrid); From e0c4eeb3ec4ebb3880cc7f3c38393e01babdad63 Mon Sep 17 00:00:00 2001 From: paboyle Date: Thu, 30 Mar 2017 13:30:45 +0900 Subject: [PATCH 092/101] Compiles again --- tests/qdpxx/Test_qdpxx_loops_staples.cc | 78 ++++++++++++------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/tests/qdpxx/Test_qdpxx_loops_staples.cc b/tests/qdpxx/Test_qdpxx_loops_staples.cc index cf2e0796..3bed9601 100644 --- a/tests/qdpxx/Test_qdpxx_loops_staples.cc +++ b/tests/qdpxx/Test_qdpxx_loops_staples.cc @@ -282,8 +282,8 @@ double calc_grid_p(Grid::QCD::LatticeGaugeField & Umu) Grid::QCD::LatticeColourMatrix tmp(UGrid); tmp = Grid::zero; - Grid::QCD::PokeIndex(Umu,tmp,2); - Grid::QCD::PokeIndex(Umu,tmp,3); + Grid::QCD::PokeIndex(Umu,tmp,2); + Grid::QCD::PokeIndex(Umu,tmp,3); Grid::QCD::WilsonGaugeActionR Wilson(beta); // Just take beta = 1.0 @@ -311,7 +311,7 @@ double calc_grid_r_dir(Grid::QCD::LatticeGaugeField & Umu) std::vector U(4,UGrid); for(int mu=0;mu(Umu,mu); + U[mu] = Grid::PeekIndex(Umu,mu); } Grid::QCD::LatticeComplex rect(UGrid); @@ -322,7 +322,7 @@ double calc_grid_r_dir(Grid::QCD::LatticeGaugeField & Umu) for(int nu=0;nu::traceDirRectangle(rect,U,mu,nu); + Grid::QCD::ColourWilsonLoops::traceDirRectangle(rect,U,mu,nu); trect = Grid::sum(rect); crect = Grid::TensorRemove(trect); std::cout<< "mu/nu = "< + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +struct scal { + d internal; +}; + + Gamma::Algebra Gmu [] = { + Gamma::Algebra::GammaX, + Gamma::Algebra::GammaY, + Gamma::Algebra::GammaZ, + Gamma::Algebra::GammaT + }; + + +int main (int argc, char ** argv) +{ + Grid_init(&argc,&argv); + + int threads = GridThread::GetThreads(); + std::cout< seeds4({1,2,3,4}); + std::vector seeds5({5,6,7,8}); + + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); + + LatticeFermion src (FGrid); random(RNG5,src); + LatticeFermion phi (FGrid); random(RNG5,phi); + LatticeFermion chi (FGrid); random(RNG5,chi); + LatticeFermion result(FGrid); result=zero; + LatticeFermion ref(FGrid); ref=zero; + LatticeFermion tmp(FGrid); tmp=zero; + LatticeFermion err(FGrid); tmp=zero; + LatticeGaugeField Umu(UGrid); random(RNG4,Umu); + std::vector U(4,UGrid); + + // Only one non-zero (y) + Umu=zero; + for(int nn=0;nn0 ) + U[nn]=zero; + PokeIndex(Umu,U[nn],nn); + } + + RealD mass=0.1; + RealD M5 =1.8; + std::vector < std::complex > omegas; + for(int i=0;i temp (0.25+0.01*i, imag*0.1); + omegas.push_back(temp); + } + ZMobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omegas,1.,0.); +// DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5); + + LatticeFermion src_e (FrbGrid); + LatticeFermion src_o (FrbGrid); + LatticeFermion r_e (FrbGrid); + LatticeFermion r_o (FrbGrid); + LatticeFermion r_eo (FGrid); + LatticeFermion r_eeoo(FGrid); + + std::cout< * = < chi | Deo^dag| phi> "< * = < chi | Deo^dag| phi> "< HermOpEO(Ddwf); + HermOpEO.MpcDagMpc(chi_e,dchi_e,t1,t2); + HermOpEO.MpcDagMpc(chi_o,dchi_o,t1,t2); + + HermOpEO.MpcDagMpc(phi_e,dphi_e,t1,t2); + HermOpEO.MpcDagMpc(phi_o,dphi_o,t1,t2); + + pDce = innerProduct(phi_e,dchi_e); + pDco = innerProduct(phi_o,dchi_o); + cDpe = innerProduct(chi_e,dphi_e); + cDpo = innerProduct(chi_o,dphi_o); + + std::cout< Date: Thu, 30 Mar 2017 15:00:03 +0900 Subject: [PATCH 101/101] Pretty code --- lib/qcd/hmc/HmcRunner.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/qcd/hmc/HmcRunner.h b/lib/qcd/hmc/HmcRunner.h index 53b127cf..ed9777dc 100644 --- a/lib/qcd/hmc/HmcRunner.h +++ b/lib/qcd/hmc/HmcRunner.h @@ -114,8 +114,8 @@ class NerscHmcRunnerTemplate { */ ////////////// NoSmearing SmearingPolicy; - typedef MinimumNorm2, RepresentationsPolicy > - IntegratorType; // change here to change the algorithm + // change here to change the algorithm + typedef MinimumNorm2, RepresentationsPolicy > IntegratorType; IntegratorParameters MDpar(40, 1.0); IntegratorType MDynamics(UGrid, MDpar, TheAction, SmearingPolicy);