/************************************************************************************* Grid physics library, www.github.com/paboyle/Grid Source file: ./lib/tensors/Tensor_extract_merge.h Copyright (C) 2015 Author: Azusa Yamaguchi Author: Peter Boyle Author: neo Author: paboyle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ #ifndef GRID_EXTRACT_H #define GRID_EXTRACT_H ///////////////////////////////////////////////////////////////// // Generic extract/merge/permute ///////////////////////////////////////////////////////////////// namespace Grid{ //////////////////////////////////////////////////////////////////////////////////////////////// // Extract/merge a fundamental vector type, to pointer array with offset //////////////////////////////////////////////////////////////////////////////////////////////// template inline void extract(typename std::enable_if::value, const vsimd >::type * y, std::vector &extracted,int offset){ // FIXME: bounce off memory is painful static const int Nsimd=vsimd::Nsimd(); int Nextr=extracted.size(); int s=Nsimd/Nextr; scalar*buf = (scalar *)y; for(int i=0;i inline void merge(typename std::enable_if::value, vsimd >::type * y, std::vector &extracted,int offset){ static const int Nsimd=vsimd::Nsimd(); int Nextr=extracted.size(); int s=Nsimd/Nextr; // can have sparse occupation of simd vector if simd_layout does not fill it // replicate n-fold. Use to allow Integer masks to // predicate floating point of various width assignments and maintain conformable. scalar *buf =(scalar *) y; for(int i=0;i inline void extract(typename std::enable_if::value, const vsimd >::type &y,std::vector &extracted){ int Nextr=extracted.size(); int Nsimd=vsimd::Nsimd(); int s=Nsimd/Nextr; scalar *buf = (scalar *)&y; for(int i=0;i inline void merge(typename std::enable_if::value, vsimd >::type &y,std::vector &extracted){ int Nextr=extracted.size(); static const int Nsimd=vsimd::Nsimd(); int s=Nsimd/Nextr; scalar *buf = (scalar *)&y; for(int i=0;i inline void extract(const vobj &vec,std::vector &extracted) { typedef typename vobj::scalar_type scalar_type ; typedef typename vobj::vector_type vector_type ; static const int Nsimd=vobj::vector_type::Nsimd(); static const int words=sizeof(vobj)/sizeof(vector_type); int Nextr=extracted.size(); int s=Nsimd/Nextr; std::vector pointers(Nextr); for(int i=0;i(&vp[w],pointers,w); } } //////////////////////////////////////////////////////////////////////// // Extract to a bunch of scalar object pointers, with offset //////////////////////////////////////////////////////////////////////// template inline void extract(const vobj &vec,std::vector &extracted, int offset) { typedef typename vobj::scalar_type scalar_type ; typedef typename vobj::vector_type vector_type ; static const int words=sizeof(vobj)/sizeof(vector_type); static const int Nsimd=vobj::vector_type::Nsimd(); int Nextr=extracted.size(); int s = Nsimd/Nextr; scalar_type * vp = (scalar_type *)&vec; for(int w=0;w inline void merge(vobj &vec,std::vector &extracted) { typedef typename vobj::scalar_type scalar_type ; typedef typename vobj::vector_type vector_type ; static const int Nsimd=vobj::vector_type::Nsimd(); static const int words=sizeof(vobj)/sizeof(vector_type); int Nextr = extracted.size(); int splat=Nsimd/Nextr; std::vector pointers(Nextr); for(int i=0;i(&vp[w],pointers,w); } } //////////////////////////////////////////////////////////////////////// // Merge a bunch of different scalar object pointers, with offset //////////////////////////////////////////////////////////////////////// template inline void merge(vobj &vec,std::vector &extracted,int offset) { typedef typename vobj::scalar_type scalar_type ; typedef typename vobj::vector_type vector_type ; const int Nsimd=vobj::vector_type::Nsimd(); const int words=sizeof(vobj)/sizeof(vector_type); int Nextr=extracted.size(); int s=Nsimd/Nextr; scalar_type *pointer; scalar_type *vp = (scalar_type *)&vec; // assert( (((uint64_t)vp)&(sizeof(scalar_type)-1)) == 0); for(int w=0;w inline void merge1(vobj &vec,std::vector &extracted,int offset) { typedef typename vobj::scalar_type scalar_type ; typedef typename vobj::vector_type vector_type ; static const int Nsimd=vobj::vector_type::Nsimd(); static const int words=sizeof(vobj)/sizeof(vector_type); scalar_type *vp = (scalar_type *)&vec; // assert( (((uint64_t)vp)&(sizeof(scalar_type)-1)) == 0); for(int w=0;w inline void merge2(vobj &vec,std::vector &extracted,int offset) { typedef typename vobj::scalar_type scalar_type ; typedef typename vobj::vector_type vector_type ; const int Nsimd=vobj::vector_type::Nsimd(); const int words=sizeof(vobj)/sizeof(vector_type); scalar_type *pointer; scalar_type *vp = (scalar_type *)&vec; // assert( (((uint64_t)vp)&(sizeof(scalar_type)-1)) == 0); for(int w=0;w