mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-11-04 14:04:32 +00:00 
			
		
		
		
	Handle case of simd_layout not filling whole vector.
Useful if real complex live on same grid
This commit is contained in:
		@@ -31,18 +31,17 @@ inline void merge(typename std::enable_if<!isGridTensor<vsimd>::value, vsimd >::
 | 
				
			|||||||
		  std::vector<scalar *> &extracted,int offset){
 | 
							  std::vector<scalar *> &extracted,int offset){
 | 
				
			||||||
  int Nextr=extracted.size();
 | 
					  int Nextr=extracted.size();
 | 
				
			||||||
  int Nsimd=vsimd::Nsimd();
 | 
					  int Nsimd=vsimd::Nsimd();
 | 
				
			||||||
  int s=Nsimd/Nextr;
 | 
					  int s=Nsimd/Nextr; // can have sparse occupation of simd vector if simd_layout does not fill it
 | 
				
			||||||
 | 
					                     // replicate n-fold. Use to allow Integer masks to 
 | 
				
			||||||
 | 
					                     // predicate floating point of various width assignments and maintain conformable.
 | 
				
			||||||
  scalar *buf =(scalar *) y;
 | 
					  scalar *buf =(scalar *) y;
 | 
				
			||||||
  for(int i=0;i<Nextr;i++){
 | 
					  for(int i=0;i<Nextr;i++){
 | 
				
			||||||
    for(int ii=0;ii<s;ii++){
 | 
					    for(int ii=0;ii<s;ii++){
 | 
				
			||||||
      buf[i*s+ii]=extracted[i][offset];
 | 
					      buf[i*s+ii]=extracted[i][offset];
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					 | 
				
			||||||
////////////////////////////////////////////////////////////////////////////////////////////////
 | 
					////////////////////////////////////////////////////////////////////////////////////////////////
 | 
				
			||||||
// Extract a fundamental vector type to scalar array 
 | 
					// Extract a fundamental vector type to scalar array 
 | 
				
			||||||
////////////////////////////////////////////////////////////////////////////////////////////////
 | 
					////////////////////////////////////////////////////////////////////////////////////////////////
 | 
				
			||||||
@@ -55,8 +54,17 @@ inline void extract(typename std::enable_if<!isGridTensor<vsimd>::value, const v
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
  scalar *buf = (scalar *)&y;
 | 
					  scalar *buf = (scalar *)&y;
 | 
				
			||||||
  for(int i=0;i<Nextr;i++){
 | 
					  for(int i=0;i<Nextr;i++){
 | 
				
			||||||
    for(int ii=0;ii<s;ii++){
 | 
					    extracted[i]=buf[i*s];
 | 
				
			||||||
      extracted[i]=buf[i*s+ii];
 | 
					    for(int ii=1;ii<s;ii++){
 | 
				
			||||||
 | 
					      if ( buf[i*s]!=buf[i*s+ii] ){
 | 
				
			||||||
 | 
						std::cout << " SIMD extract failure splat="<<s<<" ii "<<ii<<" " <<Nextr<<" "<< Nsimd<<" "<<std::endl;
 | 
				
			||||||
 | 
						for(int vv=0;vv<Nsimd;vv++) {
 | 
				
			||||||
 | 
						  std::cout<< buf[vv]<<" ";
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						std::cout<<std::endl;
 | 
				
			||||||
 | 
						assert(0);
 | 
				
			||||||
 | 
					      }
 | 
				
			||||||
 | 
					      assert(buf[i*s]==buf[i*s+ii]);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -74,21 +82,7 @@ inline void merge(typename std::enable_if<!isGridTensor<vsimd>::value, vsimd >::
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
  for(int i=0;i<Nextr;i++){
 | 
					  for(int i=0;i<Nextr;i++){
 | 
				
			||||||
    for(int ii=0;ii<s;ii++){
 | 
					    for(int ii=0;ii<s;ii++){
 | 
				
			||||||
      buf[i*s+ii]=extracted[i];
 | 
					      buf[i*s+ii]=extracted[i]; // replicates value
 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
  }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
template<class vsimd,class scalar>
 | 
					 | 
				
			||||||
inline void AmergeA(typename std::enable_if<!isGridTensor<vsimd>::value, vsimd >::type  &y,std::vector<scalar> &extracted){
 | 
					 | 
				
			||||||
  int Nextr=extracted.size();
 | 
					 | 
				
			||||||
  int Nsimd=vsimd::Nsimd();
 | 
					 | 
				
			||||||
  int s=Nsimd/Nextr;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
  scalar *buf = (scalar *)&y;
 | 
					 | 
				
			||||||
  for(int i=0;i<Nextr;i++){
 | 
					 | 
				
			||||||
    for(int ii=0;ii<s;ii++){
 | 
					 | 
				
			||||||
      buf[i*s+ii]=extracted[i];
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
@@ -102,12 +96,12 @@ template<class vobj> inline void extract(const vobj &vec,std::vector<typename vo
 | 
				
			|||||||
  typedef typename vobj::vector_type vector_type ;
 | 
					  typedef typename vobj::vector_type vector_type ;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  const int Nsimd=vobj::vector_type::Nsimd();
 | 
					  const int Nsimd=vobj::vector_type::Nsimd();
 | 
				
			||||||
 | 
					  int Nextr=extracted.size();
 | 
				
			||||||
  const int words=sizeof(vobj)/sizeof(vector_type);
 | 
					  const int words=sizeof(vobj)/sizeof(vector_type);
 | 
				
			||||||
 | 
					  int s=Nsimd/Nextr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  extracted.resize(Nsimd);
 | 
					  std::vector<scalar_type *> pointers(Nextr);
 | 
				
			||||||
 | 
					  for(int i=0;i<Nextr;i++) 
 | 
				
			||||||
  std::vector<scalar_type *> pointers(Nsimd);
 | 
					 | 
				
			||||||
  for(int i=0;i<Nsimd;i++) 
 | 
					 | 
				
			||||||
    pointers[i] =(scalar_type *)& extracted[i];
 | 
					    pointers[i] =(scalar_type *)& extracted[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  vector_type *vp = (vector_type *)&vec;
 | 
					  vector_type *vp = (vector_type *)&vec;
 | 
				
			||||||
@@ -127,11 +121,11 @@ void extract(const vobj &vec,std::vector<typename vobj::scalar_object *> &extrac
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
  const int words=sizeof(vobj)/sizeof(vector_type);
 | 
					  const int words=sizeof(vobj)/sizeof(vector_type);
 | 
				
			||||||
  const int Nsimd=vobj::vector_type::Nsimd();
 | 
					  const int Nsimd=vobj::vector_type::Nsimd();
 | 
				
			||||||
 | 
					  int Nextr=extracted.size();
 | 
				
			||||||
  assert(extracted.size()==Nsimd);
 | 
					  int s = Nsimd/Nextr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  std::vector<scalar_type *> pointers(Nsimd);
 | 
					  std::vector<scalar_type *> pointers(Nsimd);
 | 
				
			||||||
  for(int i=0;i<Nsimd;i++) {
 | 
					  for(int i=0;i<Nextr;i++) {
 | 
				
			||||||
    pointers[i] =(scalar_type *)& extracted[i][offset];
 | 
					    pointers[i] =(scalar_type *)& extracted[i][offset];
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -153,10 +147,11 @@ void merge(vobj &vec,std::vector<typename vobj::scalar_object> &extracted)
 | 
				
			|||||||
  const int Nsimd=vobj::vector_type::Nsimd();
 | 
					  const int Nsimd=vobj::vector_type::Nsimd();
 | 
				
			||||||
  const int words=sizeof(vobj)/sizeof(vector_type);
 | 
					  const int words=sizeof(vobj)/sizeof(vector_type);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  assert(extracted.size()==Nsimd);
 | 
					  int Nextr = extracted.size();
 | 
				
			||||||
 | 
					  int splat=Nsimd/Nextr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  std::vector<scalar_type *> pointers(Nsimd);
 | 
					  std::vector<scalar_type *> pointers(Nextr);
 | 
				
			||||||
  for(int i=0;i<Nsimd;i++) 
 | 
					  for(int i=0;i<Nextr;i++) 
 | 
				
			||||||
    pointers[i] =(scalar_type *)& extracted[i];
 | 
					    pointers[i] =(scalar_type *)& extracted[i];
 | 
				
			||||||
  
 | 
					  
 | 
				
			||||||
  vector_type *vp = (vector_type *)&vec;
 | 
					  vector_type *vp = (vector_type *)&vec;
 | 
				
			||||||
@@ -177,14 +172,14 @@ void merge(vobj &vec,std::vector<typename vobj::scalar_object *> &extracted,int
 | 
				
			|||||||
  const int Nsimd=vobj::vector_type::Nsimd();
 | 
					  const int Nsimd=vobj::vector_type::Nsimd();
 | 
				
			||||||
  const int words=sizeof(vobj)/sizeof(vector_type);
 | 
					  const int words=sizeof(vobj)/sizeof(vector_type);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  assert(extracted.size()==Nsimd);
 | 
					  int Nextr=extracted.size();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  std::vector<scalar_type *> pointers(Nsimd);
 | 
					  std::vector<scalar_type *> pointers(Nextr);
 | 
				
			||||||
  for(int i=0;i<Nsimd;i++) 
 | 
					  for(int i=0;i<Nextr;i++) 
 | 
				
			||||||
    pointers[i] =(scalar_type *)& extracted[i][offset];
 | 
					    pointers[i] =(scalar_type *)& extracted[i][offset];
 | 
				
			||||||
  
 | 
					
 | 
				
			||||||
  vector_type *vp = (vector_type *)&vec;
 | 
					  vector_type *vp = (vector_type *)&vec;
 | 
				
			||||||
  assert((void *)vp!=NULL);
 | 
					
 | 
				
			||||||
  for(int w=0;w<words;w++){
 | 
					  for(int w=0;w<words;w++){
 | 
				
			||||||
    merge<vector_type,scalar_type>(&vp[w],pointers,w);
 | 
					    merge<vector_type,scalar_type>(&vp[w],pointers,w);
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user