1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-10 07:55:35 +00:00

Making sure I understand row-major vs column-major ordering

This commit is contained in:
Michael Marshall 2019-02-16 16:18:28 +00:00
parent bfd2770657
commit b6803a070a
2 changed files with 159 additions and 66 deletions

View File

@ -145,7 +145,9 @@ namespace Grid {
//template <typename T> struct Traits<T, typename std::enable_if<is_tensor<T>::value, void>::type> : Traits<T> {};
}
// Helper to allow iteration through an Eigen::Tensor (using a lambda)
// Calls a lamda (passing index and sequence number) for every member of an Eigen::Tensor
// For efficiency, iteration proceeds in memory order,
// ... but parameters guaranteed to be the same regardless of memory order
template <typename ETensor, typename Lambda>
typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
for_all( ETensor &ET, Lambda lambda )
@ -153,62 +155,107 @@ namespace Grid {
using Scalar = typename ETensor::Scalar; // This could be a Container - we'll check later
const std::size_t NumScalars = ET.size();
assert( NumScalars > 0 );
// Assemble a vector containing all the non-trivial dimensions (i.e. dimensions > 1)
unsigned int ScalarElementCount{1};
std::vector<size_t> NonTrivialDims;
NonTrivialDims.reserve(ET.NumDimensions + EigenIO::Traits<Scalar>::rank_non_trivial);
for(auto i = 0; i < ET.NumDimensions; i++ ) {
using Index = typename ETensor::Index;
Index ScalarElementCount{1};
const auto InnerRank = EigenIO::Traits<Scalar>::rank_non_trivial;
const auto rank{ETensor::NumIndices};
std::array<std::size_t, rank + InnerRank> Dims;
for(auto i = 0; i < rank; i++ ) {
auto dim = ET.dimension(i);
if( dim <= 1 ) {
assert( dim == 1 ); // Not expecting dimension to be <= 0
} else {
size_t s = static_cast<size_t>(dim);
assert( s == dim ); // check we didn't lose anything in the conversion
NonTrivialDims.push_back(s);
ScalarElementCount *= s;
}
assert( dim > 0 );
Dims[i] = static_cast<std::size_t>(dim);
assert( Dims[i] == dim ); // check we didn't lose anything in the conversion
ScalarElementCount *= Dims[i];
}
// Check that the number of containers is correct
// Check that the number of containers is correct ... and we didn't lose anything in conversions
assert( NumScalars == ScalarElementCount );
// If the Scalar is actually a container, add the inner Scalar's non-trivial dimensions
size_t InnerScalarCount{1};
for(auto i = 0; i < EigenIO::Traits<Scalar>::rank_non_trivial; i++ ) {
for(auto i = 0; i < InnerRank; i++ ) {
auto dim = EigenIO::Traits<Scalar>::DimensionNT(i);
assert( dim > 1 );
NonTrivialDims.push_back(dim);
Dims[rank + i] = static_cast<std::size_t>(dim);
assert( Dims[rank + i] == dim ); // check we didn't lose anything in the conversion
InnerScalarCount *= dim;
}
assert(EigenIO::Traits<Scalar>::count == InnerScalarCount);
assert(EigenIO::Traits<Scalar>::size == sizeof( Scalar ));
const unsigned int NonTrivialDimsSize = static_cast<unsigned int>(NonTrivialDims.size());
assert( NonTrivialDimsSize == NonTrivialDims.size() );
//const typename ETensor::Index TotalNumElements = NumScalars * InnerScalarCount;
using Index = typename ETensor::Index;
std::array<Index, ETensor::NumIndices> MyIndex;
std::array<std::size_t, rank + InnerRank> MyIndex;
for( auto &idx : MyIndex ) idx = 0;
std::vector<std::size_t> SubIndex(NonTrivialDimsSize);
for( auto &idx : SubIndex ) idx = 0;
Index n = 0;
Index Seq = 0;
Scalar * pScalar = ET.data();
for( std::size_t j = 0; j < NumScalars; j++ ) {
// if constexpr is C++ 17 ... but otherwise need two specialisations (Container vs Scalar)
if constexpr ( EigenIO::Traits<Scalar>::rank_non_trivial == 0 ) {
lambda(ET( MyIndex ), n++, &SubIndex[0] );
// Now increment SubIndex
for( auto i = NonTrivialDimsSize - 1; i != -1 && ++SubIndex[i] == NonTrivialDims[i]; i-- )
SubIndex[i] = 0;
}
else
{
for( typename Scalar::scalar_type &Source : ET( MyIndex ) ) {
lambda(Source, n++, &SubIndex[0] );
if constexpr ( InnerRank == 0 ) {
lambda( * pScalar, Seq++, &MyIndex[0] );
} else {
for( typename Scalar::scalar_type &Source : * pScalar ) {
lambda(Source, Seq++, &MyIndex[0] );
// Now increment SubIndex
for( auto i = NonTrivialDimsSize - 1; i != -1 && ++SubIndex[i] == NonTrivialDims[i]; i-- )
SubIndex[i] = 0;
for( auto i = rank + InnerRank - 1; i != rank - 1 && ++MyIndex[i] == Dims[i]; i-- )
MyIndex[i] = 0;
}
}
// Now increment MyIndex
for( auto i = ET.NumDimensions - 1; i != -1 && ++MyIndex[i] == ET.dimension(i); i-- )
MyIndex[i] = 0;
// Now increment the index to pass to the lambda (bearing in mind we're walking in memory order)
if( ETensor::Options & Eigen::RowMajor ) {
for( auto i = rank - 1; i != -1 && ++MyIndex[i] == Dims[i]; i-- )
MyIndex[i] = 0;
} else {
for( auto i = 0; i < rank && ++MyIndex[i] == Dims[i]; i++ )
MyIndex[i] = 0;
Seq = 0;
for( auto i = 0; i < rank + InnerRank ; i++ ) {
Seq *= Dims[i];
Seq += MyIndex[i];
}
}
pScalar++;
}
}
// Helper to dump a tensor in memory order
template <typename T>
typename std::enable_if<EigenIO::is_tensor_of_scalar<T>::value, void>::type
DumpMemoryOrder(T t, const char * pName = nullptr)
{
const auto dims = t.dimensions();
const auto rank = t.rank();
std::cout << "Dumping rank " << rank << ((T::Options & Eigen::RowMajor) ? ", row" : ", column") << "-major tensor ";
if( pName )
std::cout << pName;
for( auto d : dims ) std::cout << "[" << d << "]";
std::cout << " in memory order:" << std::endl;
const typename T::Scalar * p = t.data();
const auto size = t.size();
const typename T::Scalar * pEnd = p + size;
if( rank <= 2 ) {
for( unsigned int i = 0 ; i < t.size() ; i++ )
std::cout << "[" << i << "]=" << *p++ << " ";
std::cout << std::endl;
} else {
const auto innersize = dims[rank-2] * dims[rank-1];
using Index = typename T::Index;
std::vector<Index> idx(rank - 2);
for( auto &i : idx ) i = 0;
Index idxCounter = 0;
while( p < pEnd ) {
if( T::Options & Eigen::RowMajor ) {
if( pName )
std::cout << pName;
idxCounter = 0;
for(auto i = 0 ; i < rank - 2 ; i++)
std::cout << "[" << idx[i] << "]:";
}
for( unsigned int i = 0 ; i < innersize ; i++ )
std::cout << " [" << idxCounter++ << "]=" << *p++;
if( T::Options & Eigen::RowMajor )
std::cout << std::endl;
// Now increment MyIndex
for( auto i = rank - 3; i != -1 && ++idx[i] == dims[i]; i-- )
idx[i] = 0;
}
if( ! ( T::Options & Eigen::RowMajor ) )
std::cout << std::endl;
}
}

View File

@ -338,11 +338,11 @@ void DebugShowTensor(MyTensor &x, const char * n)
// Initialise
assert( d.size() == 3 );
for( int i = 0 ; i < d[0] ; i++ )
for( int j = 0 ; j < d[1] ; j++ )
for( int k = 0 ; k < d[2] ; k++ ) {
x(i,j,k) = std::complex<double>(SizeCalculated, -SizeCalculated);
SizeCalculated--;
}
for( int j = 0 ; j < d[1] ; j++ )
for( int k = 0 ; k < d[2] ; k++ ) {
x(i,j,k) = std::complex<double>(SizeCalculated, -SizeCalculated);
SizeCalculated--;
}
// Show raw data
std::cout << "Data follow : " << std::endl;
Complex * p = x.data();
@ -496,7 +496,66 @@ public:
inline value_type * end(void) { return m_p + N; }
};
bool DebugFelixTensorTest( void )
template <int Options>
void EigenSliceExample()
{
std::cout << "Eigen example, Options = " << Options << std::endl;
using T2 = Eigen::Tensor<int, 2, Options>;
T2 a(4, 3);
a.setValues({{0, 100, 200}, {300, 400, 500},
{600, 700, 800}, {900, 1000, 1100}});
std::cout << "a\n" << a << std::endl;
DumpMemoryOrder( a, "a" );
Eigen::array<typename T2::Index, 2> offsets = {1, 0};
Eigen::array<typename T2::Index, 2> extents = {2, 2};
T2 slice = a.slice(offsets, extents);
std::cout << "slice\n" << slice << std::endl;
DumpMemoryOrder( slice, "slice" );
std::cout << "\n========================================" << std::endl;
}
template <int Options>
void EigenSliceExample2()
{
using TestScalar = std::complex<float>;
using T3 = Eigen::Tensor<TestScalar, 3, Options>;
using T2 = Eigen::Tensor<TestScalar, 2, Options>;
T3 a(2,3,4);
std::cout << "Initialising:a";
float z = 0;
for( int i = 0 ; i < a.dimension(0) ; i++ )
for( int j = 0 ; j < a.dimension(1) ; j++ )
for( int k = 0 ; k < a.dimension(2) ; k++ ) {
TestScalar w{z, -z};
a(i,j,k) = w;
std::cout << " a(" << i << "," << j << "," << k << ")=" << w;
z++;
}
std::cout << std::endl;
//std::cout << "a initialised to:\n" << a << std::endl;
DumpMemoryOrder( a, "a" );
std::cout << "for_all(a):";
for_all( a, [&](TestScalar c, typename T3::Index n, const std::size_t * pDims ){
std::cout << " (" << pDims[0] << "," << pDims[1] << "," << pDims[2] << ")<" << n << ">=" << c;
} );
std::cout << std::endl;
Eigen::array<typename T3::Index, 3> offsets = {0,1,1};
Eigen::array<typename T3::Index, 3> extents = {1,2,2};
T3 b;
b = a.slice( offsets, extents );//.reshape(NewExtents);
std::cout << "b = a.slice( offsets, extents ):\n" << b << std::endl;
DumpMemoryOrder( b, "b" );
T2 c(3,4);
c = a.chip(0,1);
std::cout << "c = a.chip(0,0):\n" << c << std::endl;
DumpMemoryOrder( c, "c" );
//T2 d = b.reshape(extents);
//std::cout << "b.reshape(extents) is:\n" << d << std::endl;
std::cout << "\n========================================" << std::endl;
}
void DebugFelixTensorTest( void )
{
unsigned int Nmom = 2;
unsigned int Nt = 2;
@ -509,25 +568,10 @@ bool DebugFelixTensorTest( void )
using BaryonTensorMap = Eigen::TensorMap<BaryonTensorSet>;
BaryonTensorMap BField4 (&Memory[0], Nmom,4,Nt,N_1,N_2,N_3);
using TestScalar = std::complex<float>;
//typedef Eigen::TensorFixedSize<TestScalar, Eigen::Sizes<9,4,2>, Eigen::StorageOptions::RowMajor> TestTensorFixed;
using T3 = Eigen::Tensor<TestScalar, 3, Eigen::StorageOptions::RowMajor>;
using T2 = Eigen::Tensor<TestScalar, 2, Eigen::StorageOptions::RowMajor>;
T3 a(4,3,2);
for_all( a, [&](TestScalar &c, float n, const std::size_t * pDims ){
c = std::complex<float>{n,-n};
} );
std::cout << "a initialised to:\n" << a << std::endl;
Eigen::array<int, 3> offsets = {0,0,0};
Eigen::array<int, 3> extents = {1,3,2};
T2 b(3,2);
auto c = a.slice( offsets, extents).reshape(extents);
std::cout << "c is:\n" << c << std::endl;
b = a.chip(0,0);
std::cout << "b is:\n" << b << std::endl;
//b = c;
return true;
EigenSliceExample<Eigen::RowMajor>();
EigenSliceExample<0>();
EigenSliceExample2<Eigen::RowMajor>();
EigenSliceExample2<0>();
}
bool DebugGridTensorTest( void )
@ -561,7 +605,9 @@ bool DebugGridTensorTest( void )
Start += Inc;
}
i = 0;
for( auto x : toc7 ) std::cout << "toc7[" << i++ << "] = " << x << std::endl;
std::cout << "toc7:";
for( auto x : toc7 ) std::cout << " [" << i++ << "]=" << x;
std::cout << std::endl;
t2 o2;
auto a2 = TensorRemove(o2);