1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-10 07:55:35 +00:00

Fixed existing bug in Hdf5Reader::readDefault for std::vector<U>

This commit is contained in:
Michael Marshall 2019-02-16 18:45:46 +00:00
parent 00e9416e0a
commit 74a3a5b825
3 changed files with 36 additions and 23 deletions

View File

@ -213,13 +213,42 @@ namespace Grid {
}
}
// Helper to dump a tensor
#ifdef DEBUG
template <typename T>
typename std::enable_if<EigenIO::is_tensor<T>::value, void>::type
dump_tensor_func(T &t, const char * pName = nullptr)
{
using Traits = typename EigenIO::Traits<typename T::Scalar>;
const auto rank{T::NumIndices};
const auto &dims = t.dimensions();
std::cout << "Dumping rank " << rank << ((T::Options & Eigen::RowMajor) ? ", row" : ", column") << "-major tensor ";
if( pName )
std::cout << pName;
for( auto i = 0 ; i < rank; i++ ) std::cout << "[" << dims[i] << "]";
std::cout << " in memory order:" << std::endl;
for_all( t, [&](typename Traits::scalar_type &c, typename T::Index index, const std::array<size_t, T::NumIndices + Traits::rank_non_trivial> Dims ){
std::cout << " ";
for( auto dim : Dims )
std::cout << "[" << dim << "]";
std::cout << " = " << c << std::endl;
} );
std::cout << "========================================" << std::endl;
}
#define dump_tensor(args...) dump_tensor_func(args)
#else
#define dump_tensor(args...)
#endif
// Helper to dump a tensor in memory order
// Kind of superfluous given the above
#ifdef DEBUG
template <typename T>
typename std::enable_if<EigenIO::is_tensor_of_scalar<T>::value, void>::type
DumpMemoryOrder(T t, const char * pName = nullptr)
DumpMemoryOrder(T &t, const char * pName = nullptr)
{
const auto dims = t.dimensions();
const auto rank = t.rank();
const auto &dims = t.dimensions();
std::cout << "Dumping rank " << rank << ((T::Options & Eigen::RowMajor) ? ", row" : ", column") << "-major tensor ";
if( pName )
std::cout << pName;
@ -258,6 +287,7 @@ namespace Grid {
std::cout << std::endl;
}
}
#endif
// Abstract writer/reader classes ////////////////////////////////////////////
// static polymorphism implemented using CRTP idiom

View File

@ -215,7 +215,7 @@ namespace Grid
// read the flat vector
std::vector<Element> buf(size);
if (size > dataSetThres_)
if (size * sizeof(Element) > dataSetThres_)
{
H5NS::DataSet dataSet;

View File

@ -101,20 +101,6 @@ void ioTest(const std::string &filename, const O &object, const std::string &nam
}
#ifdef DEBUG
template <typename T>
//typename std::enable_if<EigenIO::is_tensor<T>::value, void>::type
void dump_tensor(T & t)
{
using Traits = typename EigenIO::Traits<typename T::Scalar>;
for_all( t, [&](typename Traits::scalar_type &c, typename T::Index index, const std::size_t * pDims ){
std::cout << " ";
for( int i = 0 ; i < t.NumDimensions + Traits::rank_non_trivial; i++ )
std::cout << "[" << pDims[i] << "]";
std::cout << " = " << c << std::endl;
} );
std::cout << "========================================" << std::endl;
}
//typedef int TestScalar;
typedef std::complex<double> TestScalar;
typedef Eigen::Tensor<iMatrix<TestScalar,1>, 6> TestTensorSingle;
@ -159,8 +145,7 @@ bool EigenIOTest(void) {
Val += Inc;
}
ioTest<Hdf5Writer, Hdf5Reader, TestTensor>("iotest_tensor.h5", t, "eigen_tensor_instance_name");
std::cout << "t:";
dump_tensor(t);
dump_tensor(t, "t");
// Now serialise a fixed size tensor
using FixedTensor = Eigen::TensorFixedSize<TestScalar, Eigen::Sizes<8,4,3>>;
@ -173,8 +158,7 @@ bool EigenIOTest(void) {
Val += Inc;
}
ioTest<Hdf5Writer, Hdf5Reader, FixedTensor>("iotest_tensor_fixed.h5", tf, "eigen_tensor_fixed_name");
std::cout << "tf:";
dump_tensor(tf);
dump_tensor(tf, "tf");
ETSerClass o;
ioTest<Hdf5Writer, Hdf5Reader, ETSerClass>("iotest_object.h5", o, "ETSerClass_object_instance_name");
@ -192,8 +176,7 @@ bool EigenIOTest(void) {
Val += Inc;
}
ioTest<Hdf5Writer, Hdf5Reader, LSCTensor>("iotest_LSCTensor.h5", l, "LSCTensor_object_instance_name");
std::cout << "l:";
dump_tensor(l);
dump_tensor(l, "l");
// Tensor of spin colour
LCMTensor l2;