/************************************************************************************* Grid physics library, www.github.com/paboyle/Grid Source file: Hadrons/Modules/MContraction/A2AMesonFieldKernels.hpp Copyright (C) 2015-2018 Author: Antonin Portelli Author: Peter Boyle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ #ifndef Hadrons_MContraction_A2AMesonFieldKernels_hpp_ #define Hadrons_MContraction_A2AMesonFieldKernels_hpp_ #include #include BEGIN_HADRONS_NAMESPACE BEGIN_MODULE_NAMESPACE(MContraction) //////////////////////////////////////////////////////////////////////////////// // Cache blocked arithmetic routine // Could move to Grid ??? //////////////////////////////////////////////////////////////////////////////// template void makeMesonFieldBlock(MesonField &mat, const Field *lhs_wi, const Field *rhs_vj, const std::vector &gamma, const std::vector &mom, int orthogdim, double &time) { typedef typename Field::vector_object vobj; typedef typename vobj::scalar_object sobj; typedef typename vobj::scalar_type scalar_type; typedef typename vobj::vector_type vector_type; typedef iSpinMatrix SpinMatrix_v; typedef iSpinMatrix SpinMatrix_s; TimerArray tArray; int Lblock = mat.dimension(3); int Rblock = mat.dimension(4); GridBase *grid = lhs_wi[0]._grid; const int Nd = grid->_ndimension; const int Nsimd = grid->Nsimd(); int Nt = grid->GlobalDimensions()[orthogdim]; int Ngamma = gamma.size(); int Nmom = mom.size(); int fd=grid->_fdimensions[orthogdim]; int ld=grid->_ldimensions[orthogdim]; int rd=grid->_rdimensions[orthogdim]; // will locally sum vectors first // sum across these down to scalars // splitting the SIMD int MFrvol = rd*Lblock*Rblock*Nmom; int MFlvol = ld*Lblock*Rblock*Nmom; Vector lvSum(MFrvol); parallel_for (int r = 0; r < MFrvol; r++) { lvSum[r] = zero; } Vector lsSum(MFlvol); parallel_for (int r = 0; r < MFlvol; r++) { lsSum[r]=scalar_type(0.0); } int e1= grid->_slice_nblock[orthogdim]; int e2= grid->_slice_block [orthogdim]; int stride=grid->_slice_stride[orthogdim]; tArray.startTimer("contraction: colour trace & mom."); // Nested parallelism would be ok // Wasting cores here. Test case r parallel_for(int r=0;r_ostride[orthogdim]; // base offset for start of plane for(int n=0;n icoor(Nd); std::vector extracted(Nsimd); for(int i=0;iiCoorFromIindex(icoor,idx); int ldx = rt+icoor[orthogdim]*rd; int ij_ldx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*ldx; lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx]; } } } tArray.stopTimer("contraction: local space sum"); time = tArray.getDTimer("contraction: colour trace & mom.") + tArray.getDTimer("contraction: local space sum"); // ld loop and local only?? tArray.startTimer("contraction: spin trace"); int pd = grid->_processors[orthogdim]; int pc = grid->_processor_coor[orthogdim]; parallel_for_nest2(int lt=0;ltGlobalSumVector(&mat(0,0,0,0,0),Nmom*Ngamma*Nt*Lblock*Rblock); tArray.stopTimer("contraction: global sum"); } template void makeAslashFieldBlock(AslashField &mat, const Field *lhs_wi, const Field *rhs_vj, const std::vector &emB0, const std::vector &emB1, int orthogdim, ModuleBase *caller = nullptr) { typedef typename Field::vector_object vobj; typedef typename vobj::scalar_object sobj; typedef typename vobj::scalar_type scalar_type; typedef typename vobj::vector_type vector_type; typedef iSpinMatrix SpinMatrix_v; typedef iSpinMatrix SpinMatrix_s; int Lblock = mat.dimension(2); int Rblock = mat.dimension(3); GridBase *grid = lhs_wi[0]._grid; const int Nd = grid->_ndimension; const int Nsimd = grid->Nsimd(); int Nt = grid->GlobalDimensions()[orthogdim]; int Nem = emB0.size(); assert(emB1.size() == Nem); int fd=grid->_fdimensions[orthogdim]; int ld=grid->_ldimensions[orthogdim]; int rd=grid->_rdimensions[orthogdim]; // will locally sum vectors first // sum across these down to scalars // splitting the SIMD int MFrvol = rd*Lblock*Rblock*Nem; int MFlvol = ld*Lblock*Rblock*Nem; Vector lvSum(MFrvol); parallel_for (int r = 0; r < MFrvol; r++) { lvSum[r] = zero; } Vector lsSum(MFlvol); parallel_for (int r = 0; r < MFlvol; r++) { lsSum[r] = scalar_type(0.0); } int e1= grid->_slice_nblock[orthogdim]; int e2= grid->_slice_block [orthogdim]; int stride=grid->_slice_stride[orthogdim]; if (caller) caller->startTimer("contraction: colour trace & Aslash mul"); // Nested parallelism would be ok // Wasting cores here. Test case r parallel_for(int r=0;r_ostride[orthogdim]; // base offset for start of plane for(int n=0;nstopTimer("contraction: colour trace & Aslash mul"); // Sum across simd lanes in the plane, breaking out orthog dir. if (caller) caller->startTimer("contraction: local space sum"); parallel_for(int rt=0;rt icoor(Nd); std::vector extracted(Nsimd); for(int i=0;iiCoorFromIindex(icoor,idx); int ldx = rt+icoor[orthogdim]*rd; int ij_ldx = m+Nem*i+Nem*Lblock*j+Nem*Lblock*Rblock*ldx; lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx]; } } } if (caller) caller->stopTimer("contraction: local space sum"); // ld loop and local only?? if (caller) caller->startTimer("contraction: tensor store"); int pd = grid->_processors[orthogdim]; int pc = grid->_processor_coor[orthogdim]; parallel_for_nest2(int lt=0;ltstopTimer("contraction: tensor store"); if (caller) caller->startTimer("contraction: global sum"); grid->GlobalSumVector(&mat(0,0,0,0),Nem*Nt*Lblock*Rblock); if (caller) caller->stopTimer("contraction: global sum"); } END_MODULE_NAMESPACE END_HADRONS_NAMESPACE #endif //Hadrons_MContraction_A2AMesonField_hpp_