mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-09 23:45:36 +00:00
EigenIO writing rationalised. All indices (trivial or not) written
This commit is contained in:
parent
63c97db414
commit
c14547ddbe
@ -96,7 +96,6 @@ namespace Grid {
|
||||
template <typename T> struct Traits<T, typename std::enable_if<is_scalar<T>::value, void>::type> {
|
||||
using scalar_type = T; // Type of the underlying scalar
|
||||
using scalar_real = typename RealType<scalar_type>::type; // real type underlying scalar_type
|
||||
static constexpr unsigned int depth = 0; // How many levels of Grid Tensor there are (TensorLevel)
|
||||
static constexpr unsigned int rank = 0; // The rank of the grid tensor (i.e. how many indices used)
|
||||
static constexpr unsigned int rank_non_trivial = 0; // As per rank, but excludes those of dimension 1
|
||||
static constexpr unsigned int count = 1; // total number of elements (i.e. product of dimensions)
|
||||
@ -123,8 +122,7 @@ namespace Grid {
|
||||
template <typename T> struct Traits<iScalar<T>> {
|
||||
using scalar_type = typename Traits<T>::scalar_type;
|
||||
using scalar_real = typename RealType<scalar_type>::type;
|
||||
static constexpr unsigned int depth = 1 + Traits<T>::depth;
|
||||
static constexpr unsigned int rank = 0 + Traits<T>::rank;
|
||||
static constexpr unsigned int rank = 1 + Traits<T>::rank;
|
||||
static constexpr unsigned int rank_non_trivial = 0 + Traits<T>::rank_non_trivial;
|
||||
static constexpr unsigned int count = 1 * Traits<T>::count;
|
||||
static constexpr std::size_t scalar_size = Traits<T>::scalar_size;
|
||||
@ -137,7 +135,6 @@ namespace Grid {
|
||||
template <typename T, int N> struct Traits<iVector<T, N>> {
|
||||
using scalar_type = typename Traits<T>::scalar_type;
|
||||
using scalar_real = typename RealType<scalar_type>::type;
|
||||
static constexpr unsigned int depth = 1 + Traits<T>::depth;
|
||||
static constexpr unsigned int rank = 1 + Traits<T>::rank;
|
||||
static constexpr unsigned int rank_non_trivial = (N>1 ? 1 : 0) + Traits<T>::rank_non_trivial;
|
||||
static constexpr unsigned int count = N * Traits<T>::count;
|
||||
@ -152,7 +149,6 @@ namespace Grid {
|
||||
template <typename T, int N> struct Traits<iMatrix<T, N>> {
|
||||
using scalar_type = typename Traits<T>::scalar_type;
|
||||
using scalar_real = typename RealType<scalar_type>::type;
|
||||
static constexpr unsigned int depth = 1 + Traits<T>::depth;
|
||||
static constexpr unsigned int rank = 2 + Traits<T>::rank;
|
||||
static constexpr unsigned int rank_non_trivial = (N>1 ? 2 : 0) + Traits<T>::rank_non_trivial;
|
||||
static constexpr unsigned int count = N * N * Traits<T>::count;
|
||||
@ -377,10 +373,7 @@ namespace Grid {
|
||||
template <typename U, int N>
|
||||
void write(const std::string &s, const iMatrix<U, N> &output);
|
||||
template <typename ETensor>
|
||||
typename std::enable_if<EigenIO::is_tensor_of_scalar<ETensor>::value, void>::type
|
||||
write(const std::string &s, const ETensor &output);
|
||||
template <typename ETensor>
|
||||
typename std::enable_if<EigenIO::is_tensor_of_container<ETensor>::value, void>::type
|
||||
typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
|
||||
write(const std::string &s, const ETensor &output);
|
||||
|
||||
void scientificFormat(const bool set);
|
||||
@ -502,128 +495,64 @@ namespace Grid {
|
||||
{
|
||||
upcast->writeDefault(s, tensorToVec(output));
|
||||
}
|
||||
|
||||
// Eigen::Tensors of arithmetic/complex base type
|
||||
template <typename T>
|
||||
template <typename ETensor>
|
||||
typename std::enable_if<EigenIO::is_tensor_of_scalar<ETensor>::value, void>::type
|
||||
Writer<T>::write(const std::string &s, const ETensor &output)
|
||||
{
|
||||
const typename ETensor::Index NumElements{output.size()};
|
||||
assert( NumElements > 0 );
|
||||
if( NumElements == 1 )
|
||||
upcast->writeDefault(s, * output.data());
|
||||
else {
|
||||
// We're only interested in non-trivial dimensions (i.e. dimensions > 1)
|
||||
unsigned int TrivialDimCount{0};
|
||||
std::vector<size_t> NonTrivialDims;
|
||||
NonTrivialDims.reserve(output.NumDimensions); // Make sure we only do one malloc
|
||||
for(auto i = 0; i < output.NumDimensions; i++ ) {
|
||||
auto dim = output.dimension(i);
|
||||
if( dim <= 1 ) {
|
||||
TrivialDimCount++;
|
||||
assert( dim == 1 ); // Not expecting dimension to be <= 0
|
||||
} else {
|
||||
size_t s = static_cast<size_t>(dim);
|
||||
assert( s == dim ); // check we didn't lose anything in the conversion
|
||||
NonTrivialDims.push_back(s);
|
||||
}
|
||||
}
|
||||
// NB: NumElements > 1 implies this is not a scalar, so some dims should be left
|
||||
assert( output.NumDimensions > TrivialDimCount );
|
||||
// If the Tensor isn't in Row-Major order, then we'll need to copy it's data
|
||||
const bool CopyData{ETensor::Layout != Eigen::StorageOptions::RowMajor};
|
||||
using Scalar = typename ETensor::Scalar;
|
||||
const Scalar * pWriteBuffer;
|
||||
Scalar * pCopyBuffer = nullptr;
|
||||
if( !CopyData )
|
||||
pWriteBuffer = output.data();
|
||||
else {
|
||||
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
|
||||
pCopyBuffer = static_cast<Scalar *>(malloc(sizeof(Scalar) * NumElements));
|
||||
pWriteBuffer = pCopyBuffer;
|
||||
std::array<typename ETensor::Index, ETensor::NumIndices> MyIndex;
|
||||
for( auto &idx : MyIndex ) idx = 0;
|
||||
for( typename ETensor::Index n = 0; n < NumElements; n++ ) {
|
||||
pCopyBuffer[n] = output( MyIndex );
|
||||
// Now increment the index
|
||||
for( int i = output.NumDimensions - 1; i >= 0 && ++MyIndex[i] == output.dimension(i); i-- )
|
||||
MyIndex[i] = 0;
|
||||
}
|
||||
}
|
||||
upcast->template writeMultiDim<typename ETensor::Scalar>(s, NonTrivialDims, pWriteBuffer, NumElements);
|
||||
if( pCopyBuffer ) free( pCopyBuffer );
|
||||
}
|
||||
}
|
||||
|
||||
// Eigen::Tensors of Grid tensors (iScalar, iVector, iMatrix)
|
||||
template <typename T>
|
||||
template <typename ETensor/*, typename U, int N*/>
|
||||
typename std::enable_if<EigenIO::is_tensor_of_container<ETensor>::value, void>::type
|
||||
typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
|
||||
Writer<T>::write(const std::string &s, const ETensor &output)
|
||||
{
|
||||
const typename ETensor::Index NumElements{output.size()};
|
||||
using Index = typename ETensor::Index;
|
||||
using Container = typename ETensor::Scalar; // NB: could be same as Scalar
|
||||
using Traits = EigenIO::Traits<Container>;
|
||||
using Scalar = typename Traits::scalar_type; // type of the underlying scalar
|
||||
constexpr unsigned int TensorRank{ETensor::NumIndices};
|
||||
constexpr unsigned int ContainerRank{Traits::rank}; // Only non-zero for containers
|
||||
constexpr unsigned int TotalRank{TensorRank + ContainerRank};
|
||||
const Index NumElements{output.size()};
|
||||
assert( NumElements > 0 );
|
||||
if( NumElements == 1 )
|
||||
upcast->writeDefault(s, tensorToVec(* output.data()));
|
||||
else {
|
||||
// We're only interested in non-trivial dimensions (i.e. dimensions > 1)
|
||||
unsigned int TrivialDimCount{0};
|
||||
std::vector<size_t> NonTrivialDims;
|
||||
NonTrivialDims.reserve(output.NumDimensions + EigenIO::Traits<typename ETensor::Scalar>::rank_non_trivial); // Make sure we only do one malloc
|
||||
for(auto i = 0; i < output.NumDimensions; i++ ) {
|
||||
auto dim = output.dimension(i);
|
||||
if( dim <= 1 ) {
|
||||
TrivialDimCount++;
|
||||
assert( dim == 1 ); // Not expecting dimension to be <= 0
|
||||
} else {
|
||||
size_t s = static_cast<size_t>(dim);
|
||||
assert( s == dim ); // check we didn't lose anything in the conversion
|
||||
NonTrivialDims.push_back(s);
|
||||
}
|
||||
}
|
||||
// NB: NumElements > 1 implies this is not a scalar, so some dims should be left
|
||||
assert( output.NumDimensions > TrivialDimCount );
|
||||
// Now add the extra dimensions, based on object zero
|
||||
typename TensorToVec<typename ETensor::Scalar>::type ttv = tensorToVec(* output.data());
|
||||
Flatten<typename TensorToVec<typename ETensor::Scalar>::type> f(ttv);
|
||||
const std::vector<size_t> & ExtraDims{f.getDim()};
|
||||
assert(ExtraDims.size() == EigenIO::Traits<typename ETensor::Scalar>::rank_non_trivial);
|
||||
size_t ExtraCount{1};
|
||||
for( auto i : ExtraDims ) {
|
||||
assert( i > 0 );
|
||||
ExtraCount *= i;
|
||||
NonTrivialDims.push_back(i);
|
||||
}
|
||||
assert(EigenIO::Traits<typename ETensor::Scalar>::count == ExtraCount);
|
||||
assert(EigenIO::Traits<typename ETensor::Scalar>::size == sizeof( typename ETensor::Scalar ));
|
||||
// If the Tensor isn't in Row-Major order, then we'll need to copy it's data
|
||||
const bool CopyData{ETensor::Layout != Eigen::StorageOptions::RowMajor};
|
||||
using Scalar = typename ETensor::Scalar::scalar_type;
|
||||
const Scalar * pWriteBuffer;
|
||||
Scalar * pCopyBuffer = nullptr;
|
||||
const typename ETensor::Index TotalNumElements = NumElements * ExtraCount;
|
||||
if( !CopyData )
|
||||
pWriteBuffer = output.data()->begin();
|
||||
else {
|
||||
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
|
||||
pCopyBuffer = static_cast<Scalar *>(malloc(TotalNumElements * sizeof(Scalar)));
|
||||
pWriteBuffer = pCopyBuffer;
|
||||
Scalar * pCopy = pCopyBuffer;
|
||||
std::array<typename ETensor::Index, ETensor::NumIndices> MyIndex;
|
||||
for( auto &idx : MyIndex ) idx = 0;
|
||||
for( typename ETensor::Index n = 0; n < NumElements; n++ ) {
|
||||
// Copy the grid tensor
|
||||
for( const Scalar &Source : output( MyIndex ) )
|
||||
* pCopy ++ = Source;
|
||||
// Now increment the index
|
||||
for( int i = output.NumDimensions - 1; i >= 0 && ++MyIndex[i] == output.dimension(i); i-- )
|
||||
MyIndex[i] = 0;
|
||||
}
|
||||
}
|
||||
upcast->template writeMultiDim<Scalar>(s, NonTrivialDims, pWriteBuffer, TotalNumElements);
|
||||
if( pCopyBuffer ) free( pCopyBuffer );
|
||||
|
||||
// Get the dimensionality of the tensor
|
||||
std::vector<std::size_t> TotalDims(TotalRank);
|
||||
for(auto i = 0; i < TensorRank; i++ ) {
|
||||
auto dim = output.dimension(i);
|
||||
TotalDims[i] = static_cast<size_t>(dim);
|
||||
assert( TotalDims[i] == dim ); // check we didn't lose anything in the conversion
|
||||
}
|
||||
for(auto i = 0; i < ContainerRank; i++ )
|
||||
TotalDims[TensorRank + i] = Traits::Dimension(i);
|
||||
|
||||
// If the Tensor isn't in Row-Major order, then we'll need to copy it's data
|
||||
const bool CopyData{NumElements > 1 && ETensor::Layout != Eigen::StorageOptions::RowMajor};
|
||||
const Scalar * pWriteBuffer;
|
||||
Scalar * pCopyBuffer = nullptr;
|
||||
const Index TotalNumElements = NumElements * Traits::count;
|
||||
if( !CopyData ) {
|
||||
if constexpr ( ContainerRank == 0 )
|
||||
pWriteBuffer = output.data();
|
||||
else
|
||||
pWriteBuffer = output.data()->begin();
|
||||
} else {
|
||||
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
|
||||
pCopyBuffer = static_cast<Scalar *>(malloc(TotalNumElements * sizeof(Scalar)));
|
||||
pWriteBuffer = pCopyBuffer;
|
||||
Scalar * pCopy = pCopyBuffer;
|
||||
std::array<Index, TensorRank> MyIndex;
|
||||
for( auto &idx : MyIndex ) idx = 0;
|
||||
for( auto n = 0; n < NumElements; n++ ) {
|
||||
if constexpr ( ContainerRank == 0 )
|
||||
* pCopy ++ = output( MyIndex );
|
||||
else {
|
||||
for( const Scalar &Source : output( MyIndex ) )
|
||||
* pCopy ++ = Source;
|
||||
}
|
||||
// Now increment the index
|
||||
for( int i = output.NumDimensions - 1; i >= 0 && ++MyIndex[i] == output.dimension(i); i-- )
|
||||
MyIndex[i] = 0;
|
||||
}
|
||||
}
|
||||
upcast->template writeMultiDim<Scalar>(s, TotalDims, pWriteBuffer, TotalNumElements);
|
||||
if( pCopyBuffer ) free( pCopyBuffer );
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -875,21 +804,21 @@ namespace Grid {
|
||||
return os;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline typename std::enable_if<!std::is_base_of<Eigen::TensorBase<T, Eigen::ReadOnlyAccessors>, T>::value, bool>::type
|
||||
CompareMember(const T &lhs, const T &rhs) {
|
||||
template <typename T1, typename T2>
|
||||
static inline typename std::enable_if<!EigenIO::is_tensor<T1>::value || !EigenIO::is_tensor<T2>::value, bool>::type
|
||||
CompareMember(const T1 &lhs, const T2 &rhs) {
|
||||
return lhs == rhs;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline typename std::enable_if<std::is_base_of<Eigen::TensorBase<T, Eigen::ReadOnlyAccessors>, T>::value, bool>::type
|
||||
CompareMember(const T &lhs, const T &rhs) {
|
||||
template <typename T1, typename T2>
|
||||
static inline typename std::enable_if<EigenIO::is_tensor<T1>::value && EigenIO::is_tensor<T2>::value, bool>::type
|
||||
CompareMember(const T1 &lhs, const T2 &rhs) {
|
||||
// First check whether dimensions match (Eigen tensor library will assert if they don't match)
|
||||
bool bReturnValue = true;
|
||||
for( auto i = 0 ; bReturnValue && i < T::NumIndices ; i++ )
|
||||
bool bReturnValue = (T1::NumIndices == T2::NumIndices);
|
||||
for( auto i = 0 ; bReturnValue && i < T1::NumIndices ; i++ )
|
||||
bReturnValue = ( lhs.dimension(i)) == rhs.dimension(i);
|
||||
if( bReturnValue ) {
|
||||
Eigen::Tensor<bool, 0, T::Options> bResult = (lhs == rhs).all();
|
||||
Eigen::Tensor<bool, 0, T1::Options> bResult = (lhs == rhs).all();
|
||||
bReturnValue = bResult(0);
|
||||
}
|
||||
return bReturnValue;
|
||||
|
@ -80,21 +80,21 @@ double d = 2*M_PI;
|
||||
bool b = false;
|
||||
|
||||
template <typename W, typename R, typename O>
|
||||
void ioTest(const std::string &filename, const O &object, const std::string &name)
|
||||
void ioTest(const std::string &filename, const O &object, const std::string &name, const char * tag = "testobject" )
|
||||
{
|
||||
std::cout << "IO test: " << name << " -> " << filename << " ...";
|
||||
// writer needs to be destroyed so that writing physically happens
|
||||
{
|
||||
W writer(filename);
|
||||
|
||||
write(writer, "testobject", object);
|
||||
write(writer, tag , object);
|
||||
}
|
||||
|
||||
std::cout << " done. reading...";
|
||||
R reader(filename);
|
||||
std::unique_ptr<O> buf( new O ); // In case object too big for stack
|
||||
|
||||
read(reader, "testobject", *buf);
|
||||
read(reader, tag, *buf);
|
||||
bool good = Serializable::CompareMember(object, *buf);
|
||||
if (!good) {
|
||||
std::cout << " failure!" << std::endl;
|
||||
@ -107,10 +107,8 @@ void ioTest(const std::string &filename, const O &object, const std::string &nam
|
||||
|
||||
#ifdef HAVE_HDF5
|
||||
typedef std::complex<double> TestScalar;
|
||||
using TensorSingle = Eigen::TensorFixedSize<int, Eigen::Sizes<1>>;
|
||||
using TensorSimple = Eigen::Tensor<iMatrix<TestScalar,1>, 6>;
|
||||
typedef Eigen::Tensor<unsigned short, 5> TensorRank5UShort;
|
||||
typedef Eigen::Tensor<int, 5, Eigen::StorageOptions::RowMajor> TensorRank5IntAlt;
|
||||
typedef Eigen::TensorFixedSize<unsigned short, Eigen::Sizes<5,4,3,2,1>> TensorRank5UShort;
|
||||
typedef Eigen::TensorFixedSize<unsigned short, Eigen::Sizes<5,4,3,2>, Eigen::StorageOptions::RowMajor> TensorRank5UShortAlt;
|
||||
typedef Eigen::Tensor<TestScalar, 3, Eigen::StorageOptions::RowMajor> TensorRank3;
|
||||
typedef Eigen::TensorFixedSize<TestScalar, Eigen::Sizes<9,4,2>, Eigen::StorageOptions::RowMajor> Tensor_9_4_2;
|
||||
typedef std::vector<Tensor_9_4_2> aTensor_9_4_2;
|
||||
@ -143,8 +141,8 @@ public:
|
||||
, DistilParameterValues{2,3,1,4,5,1}
|
||||
, Perambulator(2,3,1,4,5,1)
|
||||
, Perambulator2(7,1,6,1,5,1)
|
||||
, tensorRank5UShort{5,4,3,2,1}
|
||||
, tensorRank3(7,3,2)
|
||||
, atensor_9_4_2(3)
|
||||
{
|
||||
Grid_complex<double> Flag{1,-3.1415927};
|
||||
SequentialInit(Perambulator, Flag);
|
||||
@ -162,7 +160,8 @@ public:
|
||||
};
|
||||
|
||||
#define TensorWriteReadInnerNoInit( T ) \
|
||||
ioTest<Hdf5Writer, Hdf5Reader, T>("iotest_"s + std::to_string(++TestNum) + "_" #T ".h5", t, #T);
|
||||
filename = "iotest_"s + std::to_string(++TestNum) + "_" #T ".h5"; \
|
||||
ioTest<Hdf5Writer, Hdf5Reader, T>(filename, t, #T, #T);
|
||||
#define TensorWriteReadInner( T ) SequentialInit( t ); TensorWriteReadInnerNoInit( T )
|
||||
#define TensorWriteRead( T ) { T t ; TensorWriteReadInner( T ) }
|
||||
#define TensorWriteReadV(T, ... ) { T t( __VA_ARGS__ ); TensorWriteReadInner( T ) }
|
||||
@ -170,12 +169,34 @@ public:
|
||||
|
||||
void EigenHdf5IOTest(void)
|
||||
{
|
||||
unsigned int TestNum = 0;
|
||||
using namespace std::string_literals;
|
||||
unsigned int TestNum = 0;
|
||||
std::string filename;
|
||||
using TensorSingle = Eigen::TensorFixedSize<int, Eigen::Sizes<1>>;
|
||||
TensorWriteRead( TensorSingle )
|
||||
using TensorSimple = Eigen::Tensor<iMatrix<TestScalar,1>, 6>;
|
||||
TensorWriteReadV( TensorSimple, 1, 1, 1, 1, 1, 1 )
|
||||
TensorWriteReadV( TensorRank3, 6, 3, 2 )
|
||||
TensorWriteRead ( Tensor_9_4_2 )
|
||||
{
|
||||
TensorRank5UShort t;
|
||||
TensorWriteReadInner ( TensorRank5UShort );
|
||||
std::cout << " Testing alternate memory order read ... ";
|
||||
TensorRank5UShortAlt t2;
|
||||
Hdf5Reader reader(filename);
|
||||
read(reader, "TensorRank5UShort", t2);
|
||||
bool good = true;
|
||||
for_all( t2, [&](unsigned short c, unsigned short n,
|
||||
const std::array<size_t, TensorRank5UShortAlt::NumIndices> &Dims ) {
|
||||
good = good && ( c == n );
|
||||
} );
|
||||
if (!good) {
|
||||
std::cout << " failure!" << std::endl;
|
||||
dump_tensor(t2,"t2");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
std::cout << " done." << std::endl;
|
||||
}
|
||||
TensorWriteRead ( LSCTensor )
|
||||
TensorWriteReadLarge( PerambIOTestClass )
|
||||
#ifdef DEBUG
|
||||
|
Loading…
Reference in New Issue
Block a user