mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-04 19:25:56 +01:00
Read-back working.
This commit is contained in:
parent
c77069244d
commit
04b58de5de
@ -42,6 +42,7 @@ namespace Grid {
|
|||||||
: std::integral_constant<bool, std::is_arithmetic<T>::value> {};
|
: std::integral_constant<bool, std::is_arithmetic<T>::value> {};
|
||||||
|
|
||||||
// Eigen tensors can be composed of arithmetic scalar and complex types
|
// Eigen tensors can be composed of arithmetic scalar and complex types
|
||||||
|
// TODO Support Grid::comples from GPU port
|
||||||
template<typename T> struct is_scalar : std::integral_constant<bool,
|
template<typename T> struct is_scalar : std::integral_constant<bool,
|
||||||
std::is_arithmetic<T>::value || is_complex<T>::value> {};
|
std::is_arithmetic<T>::value || is_complex<T>::value> {};
|
||||||
|
|
||||||
@ -202,10 +203,10 @@ namespace Grid {
|
|||||||
Scalar * pScalar = ET.data();
|
Scalar * pScalar = ET.data();
|
||||||
for( std::size_t j = 0; j < NumScalars; j++ ) {
|
for( std::size_t j = 0; j < NumScalars; j++ ) {
|
||||||
// if constexpr is C++ 17 ... but otherwise need two specialisations (Container vs Scalar)
|
// if constexpr is C++ 17 ... but otherwise need two specialisations (Container vs Scalar)
|
||||||
if constexpr ( InnerRank == 0 ) {
|
if constexpr ( EigenIO::is_scalar<Scalar>::value ) {
|
||||||
lambda( * pScalar, Seq++, MyIndex );
|
lambda( * pScalar, Seq++, MyIndex );
|
||||||
} else {
|
} else {
|
||||||
for( typename Scalar::scalar_type &Source : * pScalar ) {
|
for( typename EigenIO::Traits<Scalar>::scalar_type &Source : * pScalar ) {
|
||||||
lambda(Source, Seq++, MyIndex );
|
lambda(Source, Seq++, MyIndex );
|
||||||
// Now increment SubIndex
|
// Now increment SubIndex
|
||||||
for( auto i = rank + InnerRank - 1; i != rank - 1 && ++MyIndex[i] == Dims[i]; i-- )
|
for( auto i = rank + InnerRank - 1; i != rank - 1 && ++MyIndex[i] == Dims[i]; i-- )
|
||||||
@ -244,7 +245,7 @@ namespace Grid {
|
|||||||
std::cout << pName;
|
std::cout << pName;
|
||||||
for( auto i = 0 ; i < rank; i++ ) std::cout << "[" << dims[i] << "]";
|
for( auto i = 0 ; i < rank; i++ ) std::cout << "[" << dims[i] << "]";
|
||||||
std::cout << " in memory order:" << std::endl;
|
std::cout << " in memory order:" << std::endl;
|
||||||
for_all( t, [&](typename Traits::scalar_type &c, typename T::Index index, const std::array<size_t, T::NumIndices + Traits::rank_non_trivial> Dims ){
|
for_all( t, [&](typename Traits::scalar_type &c, typename T::Index index, const std::array<size_t, T::NumIndices + Traits::rank_non_trivial> &Dims ){
|
||||||
std::cout << " ";
|
std::cout << " ";
|
||||||
for( auto dim : Dims )
|
for( auto dim : Dims )
|
||||||
std::cout << "[" << dim << "]";
|
std::cout << "[" << dim << "]";
|
||||||
@ -253,6 +254,16 @@ namespace Grid {
|
|||||||
std::cout << "========================================" << std::endl;
|
std::cout << "========================================" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
typename std::enable_if<!EigenIO::is_tensor<T>::value, void>::type
|
||||||
|
dump_tensor_func(T &t, const char * pName = nullptr)
|
||||||
|
{
|
||||||
|
std::cout << "Dumping non-tensor object ";
|
||||||
|
if( pName )
|
||||||
|
std::cout << pName;
|
||||||
|
std::cout << "=" << t;
|
||||||
|
}
|
||||||
|
|
||||||
// Helper to dump a tensor in memory order
|
// Helper to dump a tensor in memory order
|
||||||
// Kind of superfluous given the above ... just keeping in case I need to fall back to this
|
// Kind of superfluous given the above ... just keeping in case I need to fall back to this
|
||||||
#define DumpMemoryOrder(args...) DumpMemoryOrder_func(args)
|
#define DumpMemoryOrder(args...) DumpMemoryOrder_func(args)
|
||||||
@ -379,12 +390,12 @@ namespace Grid {
|
|||||||
template <typename ETensor>
|
template <typename ETensor>
|
||||||
typename std::enable_if<EigenIO::is_tensor_variable<ETensor>::value, void>::type
|
typename std::enable_if<EigenIO::is_tensor_variable<ETensor>::value, void>::type
|
||||||
Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims );
|
Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims );
|
||||||
template <typename ETensor>
|
/*template <typename ETensor>
|
||||||
typename std::enable_if<EigenIO::is_tensor_fixed<ETensor>::value, std::size_t>::type
|
typename std::enable_if<EigenIO::is_tensor_fixed<ETensor>::value, std::size_t>::type
|
||||||
DimSize(ETensor &t, std::size_t dim );
|
DimSize(ETensor &t, std::size_t dim );
|
||||||
template <typename ETensor>
|
template <typename ETensor>
|
||||||
typename std::enable_if<EigenIO::is_tensor_variable<ETensor>::value, std::size_t>::type
|
typename std::enable_if<EigenIO::is_tensor_variable<ETensor>::value, std::size_t>::type
|
||||||
DimSize(ETensor &t, std::size_t dim );
|
DimSize(ETensor &t, std::size_t dim );*/
|
||||||
protected:
|
protected:
|
||||||
template <typename U>
|
template <typename U>
|
||||||
void fromString(U &output, const std::string &s);
|
void fromString(U &output, const std::string &s);
|
||||||
@ -677,43 +688,52 @@ namespace Grid {
|
|||||||
Reader<T>::read(const std::string &s, ETensor &output)
|
Reader<T>::read(const std::string &s, ETensor &output)
|
||||||
{
|
{
|
||||||
// alias to element type
|
// alias to element type
|
||||||
using Scalar = typename EigenIO::Traits<typename ETensor::Scalar>::scalar_type;
|
using Container = typename ETensor::Scalar;
|
||||||
|
using Traits = EigenIO::Traits<Container>;
|
||||||
|
using Scalar = typename Traits::scalar_type;
|
||||||
|
|
||||||
// read the (flat) data and dimensionality
|
// read the (flat) data and dimensionality
|
||||||
std::vector<std::size_t> dimData;
|
std::vector<std::size_t> dimData;
|
||||||
std::vector<Scalar> buf;
|
std::vector<Scalar> buf;
|
||||||
upcast->readMultiDim( s, buf, dimData );
|
upcast->readMultiDim( s, buf, dimData );
|
||||||
// Make sure that the number of elements read matches dimensions read
|
// Make sure that the number of elements read matches dimensions read
|
||||||
const std::size_t NumElements{buf.size()};
|
std::size_t NumElements = 1;
|
||||||
std::size_t NumElements_check = 1;
|
|
||||||
std::size_t RankRequired = 0;
|
std::size_t RankRequired = 0;
|
||||||
std::vector<typename ETensor::Index> dimNonTrivial;
|
std::vector<typename ETensor::Index> dimNonTrivial;
|
||||||
dimNonTrivial.reserve(dimData.size());
|
dimNonTrivial.reserve(dimData.size());
|
||||||
for( auto d : dimData ) {
|
for( auto d : dimData ) {
|
||||||
NumElements_check *= d;
|
NumElements *= d;
|
||||||
if( d > 1 ) {
|
if( d > 1 ) {
|
||||||
RankRequired++;
|
RankRequired++;
|
||||||
dimNonTrivial.push_back(d);
|
dimNonTrivial.push_back(d);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//if( RankRequired == 0 ) RankRequired++;
|
assert( NumElements == buf.size() && "Number of elements read back <> product of dimensions" );
|
||||||
assert( NumElements_check == NumElements );
|
// If our scalar object is a Container, make sure it's dimensions match what we read back
|
||||||
|
const auto InnerRank{Traits::rank_non_trivial};
|
||||||
|
if ( InnerRank > 0 ) {
|
||||||
|
assert( RankRequired >= InnerRank && "Tensor Container too complex for data" );
|
||||||
|
for( auto i = InnerRank - 1 ; i != -1 ; i-- ) {
|
||||||
|
auto d = dimNonTrivial[--RankRequired];
|
||||||
|
assert( d == Traits::DimensionNT(i) && "Tensor Container dimensions don't match data" );
|
||||||
|
NumElements /= d;
|
||||||
|
dimNonTrivial.pop_back();
|
||||||
|
}
|
||||||
|
}
|
||||||
// Make sure our object has the right rank
|
// Make sure our object has the right rank
|
||||||
using Container = typename ETensor::Scalar;
|
assert( ETensor::NumDimensions >= RankRequired );
|
||||||
const auto InnerRank = EigenIO::Traits<Container>::rank_non_trivial;
|
|
||||||
assert( ETensor::NumDimensions + InnerRank >= RankRequired );
|
|
||||||
bool bShapeOK = true;
|
bool bShapeOK = true;
|
||||||
std::size_t RankNonTrivial = 0;
|
std::size_t RankNonTrivial = 0;
|
||||||
// Make sure fixed dimension objects have allocated memory
|
const auto & dims{output.dimensions()};
|
||||||
using ETDims = std::array<typename ETensor::Index, ETensor::NumDimensions>;
|
using ETDims = std::array<typename ETensor::Index, ETensor::NumDimensions>;
|
||||||
ETDims dimsNew;
|
ETDims dimsNew;
|
||||||
|
// Make sure fixed dimension objects have allocated memory
|
||||||
/*if constexpr( EigenIO::is_tensor_fixed<ETensor>::value ) {
|
/*if constexpr( EigenIO::is_tensor_fixed<ETensor>::value ) {
|
||||||
for( auto &d : dimsNew ) d = 0;
|
for( auto &d : dimsNew ) d = 0;
|
||||||
output( dimsNew ) = 0;
|
output( dimsNew ) = 0;
|
||||||
}*/
|
}*/
|
||||||
//const auto & dims{output.dimensions()};
|
|
||||||
for( auto i = 0, j = 0 ; bShapeOK && i < ETensor::NumDimensions ; i++ ) {
|
for( auto i = 0, j = 0 ; bShapeOK && i < ETensor::NumDimensions ; i++ ) {
|
||||||
auto d = DimSize( output, i );
|
auto d = dims[i];
|
||||||
if( d < 1 )
|
if( d < 1 )
|
||||||
bShapeOK = false;
|
bShapeOK = false;
|
||||||
else if( d > 1 ) {
|
else if( d > 1 ) {
|
||||||
@ -737,14 +757,14 @@ namespace Grid {
|
|||||||
std::size_t idx = 0;
|
std::size_t idx = 0;
|
||||||
for( auto n = 0 ; n < NumElements ; n++ ) {
|
for( auto n = 0 ; n < NumElements ; n++ ) {
|
||||||
Container & c = output( MyIndex );
|
Container & c = output( MyIndex );
|
||||||
if constexpr( InnerRank == 0 ) {
|
if constexpr ( EigenIO::is_scalar<Container>::value ) {
|
||||||
c = buf[idx++];
|
c = buf[idx++];
|
||||||
} else {
|
} else {
|
||||||
for( Scalar & s : c )
|
for( Scalar & s : c )
|
||||||
s = buf[idx++];
|
s = buf[idx++];
|
||||||
}
|
}
|
||||||
// Now increment the index
|
// Now increment the index
|
||||||
for( int i = output.NumDimensions - 1; i >= 0 && ++MyIndex[i] == output.dimension(i); i-- )
|
for( int i = ETensor::NumDimensions - 1; i >= 0 && ++MyIndex[i] == dims[i]; i-- )
|
||||||
MyIndex[i] = 0;
|
MyIndex[i] = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -766,7 +786,7 @@ namespace Grid {
|
|||||||
t.resize( dims );
|
t.resize( dims );
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
/*template <typename T>
|
||||||
template <typename ETensor>
|
template <typename ETensor>
|
||||||
typename std::enable_if<EigenIO::is_tensor_fixed<ETensor>::value, std::size_t>::type
|
typename std::enable_if<EigenIO::is_tensor_fixed<ETensor>::value, std::size_t>::type
|
||||||
Reader<T>::DimSize(ETensor &t, std::size_t dim )
|
Reader<T>::DimSize(ETensor &t, std::size_t dim )
|
||||||
@ -780,7 +800,7 @@ namespace Grid {
|
|||||||
Reader<T>::DimSize(ETensor &t, std::size_t dim )
|
Reader<T>::DimSize(ETensor &t, std::size_t dim )
|
||||||
{
|
{
|
||||||
return t.dimension(dim);
|
return t.dimension(dim);
|
||||||
}
|
}*/
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
template <typename U>
|
template <typename U>
|
||||||
|
@ -98,6 +98,8 @@ void ioTest(const std::string &filename, const O &object, const std::string &nam
|
|||||||
bool good = Serializable::CompareMember(object, buf);
|
bool good = Serializable::CompareMember(object, buf);
|
||||||
if (!good) {
|
if (!good) {
|
||||||
std::cout << " failure!" << std::endl;
|
std::cout << " failure!" << std::endl;
|
||||||
|
if constexpr (EigenIO::is_tensor<O>::value)
|
||||||
|
dump_tensor(buf,"???");
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
std::cout << " done." << std::endl;
|
std::cout << " done." << std::endl;
|
||||||
@ -109,21 +111,28 @@ typedef std::complex<double> TestScalar;
|
|||||||
typedef Eigen::Tensor<TestScalar, 3, Eigen::StorageOptions::RowMajor> TestTensor;
|
typedef Eigen::Tensor<TestScalar, 3, Eigen::StorageOptions::RowMajor> TestTensor;
|
||||||
typedef Eigen::TensorFixedSize<TestScalar, Eigen::Sizes<9,4,2>, Eigen::StorageOptions::RowMajor> TestTensorFixed;
|
typedef Eigen::TensorFixedSize<TestScalar, Eigen::Sizes<9,4,2>, Eigen::StorageOptions::RowMajor> TestTensorFixed;
|
||||||
typedef std::vector<TestTensorFixed> aTestTensorFixed;
|
typedef std::vector<TestTensorFixed> aTestTensorFixed;
|
||||||
typedef Eigen::TensorFixedSize<SpinColourVector, Eigen::Sizes<11,3,2>> LSCTensor;
|
typedef Eigen::TensorFixedSize<SpinColourVector, Eigen::Sizes<11,3,2>, Eigen::StorageOptions::RowMajor> LSCTensor;
|
||||||
typedef Eigen::TensorFixedSize<LorentzColourMatrix, Eigen::Sizes<5,7,2>> LCMTensor;
|
typedef Eigen::TensorFixedSize<LorentzColourMatrix, Eigen::Sizes<5,7,2>, Eigen::StorageOptions::RowMajor> LCMTensor;
|
||||||
// From Test_serialisation.cc
|
// From Test_serialisation.cc
|
||||||
class ETSerClass: Serializable {
|
class PerambIOTestClass: Serializable {
|
||||||
public:
|
public:
|
||||||
GRID_SERIALIZABLE_CLASS_MEMBERS(ETSerClass
|
using PerambTensor = Eigen::Tensor<SpinColourVector, 6, Eigen::StorageOptions::RowMajor>;
|
||||||
, SpinColourVector, scv
|
GRID_SERIALIZABLE_CLASS_MEMBERS(PerambIOTestClass
|
||||||
, SpinColourMatrix, scm
|
//, SpinColourVector, scv
|
||||||
, TestTensor, Critter
|
//, SpinColourMatrix, scm
|
||||||
, TestTensorFixed, FixedCritter
|
, PerambTensor, Perambulator
|
||||||
, aTestTensorFixed, aFixedCritter
|
, std::vector<std::string>, DistilParameterNames
|
||||||
, LSCTensor, MyLSCTensor
|
, std::vector<int>, DistilParameterValues
|
||||||
, LCMTensor, MyLCMTensor
|
//, TestTensor, Critter
|
||||||
|
//, TestTensorFixed, FixedCritter
|
||||||
|
//, aTestTensorFixed, aFixedCritter
|
||||||
|
//, LSCTensor, MyLSCTensor
|
||||||
|
//, LCMTensor, MyLCMTensor
|
||||||
);
|
);
|
||||||
ETSerClass() : Critter(7,3,2), aFixedCritter(3) {}
|
PerambIOTestClass() : Perambulator(2,3,1,4,5,1),
|
||||||
|
DistilParameterNames {"alpha", "beta", "gamma", "delta", "epsilon", "what's f?"},
|
||||||
|
DistilParameterValues{2,3,1,4,5,1}//, Critter(7,3,2), aFixedCritter(3)
|
||||||
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
bool EigenIOTest(void) {
|
bool EigenIOTest(void) {
|
||||||
@ -157,7 +166,7 @@ bool EigenIOTest(void) {
|
|||||||
Val += Inc;
|
Val += Inc;
|
||||||
}
|
}
|
||||||
ioTest<Hdf5Writer, Hdf5Reader, TestTensor>("iotest_tensor.h5", t, "eigen_tensor_instance_name");
|
ioTest<Hdf5Writer, Hdf5Reader, TestTensor>("iotest_tensor.h5", t, "eigen_tensor_instance_name");
|
||||||
dump_tensor(t, "t");
|
//dump_tensor(t, "t");
|
||||||
|
|
||||||
// Now serialise a fixed size tensor
|
// Now serialise a fixed size tensor
|
||||||
using FixedTensor = Eigen::TensorFixedSize<TestScalar, Eigen::Sizes<8,4,3>>;
|
using FixedTensor = Eigen::TensorFixedSize<TestScalar, Eigen::Sizes<8,4,3>>;
|
||||||
@ -170,11 +179,26 @@ bool EigenIOTest(void) {
|
|||||||
Val += Inc;
|
Val += Inc;
|
||||||
}
|
}
|
||||||
ioTest<Hdf5Writer, Hdf5Reader, FixedTensor>("iotest_tensor_fixed.h5", tf, "eigen_tensor_fixed_name");
|
ioTest<Hdf5Writer, Hdf5Reader, FixedTensor>("iotest_tensor_fixed.h5", tf, "eigen_tensor_fixed_name");
|
||||||
dump_tensor(tf, "tf");
|
//dump_tensor(tf, "tf");
|
||||||
|
|
||||||
|
PerambIOTestClass o;
|
||||||
|
for_all( o.Perambulator, [&](TestScalar &c, float f, const std::array<size_t,PerambIOTestClass::PerambTensor::NumIndices + EigenIO::Traits<SpinColourVector>::rank_non_trivial> &Dims ){
|
||||||
|
c = TestScalar{f,-f};
|
||||||
|
//std::cout << " a(" << Dims[0] << "," << Dims[1] << "," << Dims[2] << ")=" << c;
|
||||||
|
} );
|
||||||
|
dump_tensor(o.Perambulator, "PerambIOTestClass" );
|
||||||
|
/*for_all( o.FixedCritter, [&](TestScalar &c, float f, const std::array<size_t,TestTensorFixed::NumIndices> &Dims ){
|
||||||
|
c = TestScalar{f,-f};
|
||||||
|
//std::cout << " a(" << Dims[0] << "," << Dims[1] << "," << Dims[2] << ")=" << c;
|
||||||
|
} );
|
||||||
|
for( auto &z : o.aFixedCritter )
|
||||||
|
for_all( z, [&](TestScalar &c, float f, const std::array<size_t,TestTensorFixed::NumIndices> &Dims ){
|
||||||
|
c = TestScalar{f,-f};
|
||||||
|
//std::cout << " a(" << Dims[0] << "," << Dims[1] << "," << Dims[2] << ")=" << c;
|
||||||
|
} );*/
|
||||||
|
ioTest<Hdf5Writer, Hdf5Reader, PerambIOTestClass>("iotest_object.h5", o, "PerambIOTestClass_object_instance_name");
|
||||||
|
//DumpMemoryOrder(o.Perambulator);
|
||||||
|
|
||||||
ETSerClass o;
|
|
||||||
ioTest<Hdf5Writer, Hdf5Reader, ETSerClass>("iotest_object.h5", o, "ETSerClass_object_instance_name");
|
|
||||||
|
|
||||||
// Tensor of spin colour
|
// Tensor of spin colour
|
||||||
LSCTensor l;
|
LSCTensor l;
|
||||||
Val = 0;
|
Val = 0;
|
||||||
@ -188,7 +212,7 @@ bool EigenIOTest(void) {
|
|||||||
Val += Inc;
|
Val += Inc;
|
||||||
}
|
}
|
||||||
ioTest<Hdf5Writer, Hdf5Reader, LSCTensor>("iotest_LSCTensor.h5", l, "LSCTensor_object_instance_name");
|
ioTest<Hdf5Writer, Hdf5Reader, LSCTensor>("iotest_LSCTensor.h5", l, "LSCTensor_object_instance_name");
|
||||||
dump_tensor(l, "l");
|
//dump_tensor(l, "l");
|
||||||
|
|
||||||
// Tensor of spin colour
|
// Tensor of spin colour
|
||||||
LCMTensor l2;
|
LCMTensor l2;
|
||||||
@ -204,7 +228,8 @@ bool EigenIOTest(void) {
|
|||||||
Val += Inc;
|
Val += Inc;
|
||||||
}
|
}
|
||||||
ioTest<Hdf5Writer, Hdf5Reader, LCMTensor>("iotest_LCMTensor.h5", l2, "LCMTensor_object_instance_name");
|
ioTest<Hdf5Writer, Hdf5Reader, LCMTensor>("iotest_LCMTensor.h5", l2, "LCMTensor_object_instance_name");
|
||||||
|
//dump_tensor(l2, "l2");
|
||||||
|
|
||||||
std::cout << "Wow!" << std::endl;
|
std::cout << "Wow!" << std::endl;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -315,28 +315,30 @@ bool bNumber( int &ri, const char * & pstr, bool bGobbleWhiteSpace = true )
|
|||||||
|
|
||||||
typedef Grid::Hadrons::MDistil::NamedTensor<Complex,3,sizeof(Real)> MyTensor;
|
typedef Grid::Hadrons::MDistil::NamedTensor<Complex,3,sizeof(Real)> MyTensor;
|
||||||
|
|
||||||
void DebugShowTensor(MyTensor &x, const char * n)
|
template<typename T>
|
||||||
|
void DebugShowTensor(T &x, const char * n)
|
||||||
{
|
{
|
||||||
const MyTensor::Index s{x.size()};
|
const MyTensor::Index s{x.size()};
|
||||||
std::cout << n << ".size() = " << s << std::endl;
|
std::cout << n << ".size() = " << s << std::endl;
|
||||||
std::cout << n << ".NumDimensions = " << x.NumDimensions << " (TensorBase)" << std::endl;
|
std::cout << n << ".NumDimensions = " << x.NumDimensions << " (TensorBase)" << std::endl;
|
||||||
std::cout << n << ".NumIndices = " << x.NumIndices << std::endl;
|
std::cout << n << ".NumIndices = " << x.NumIndices << std::endl;
|
||||||
const MyTensor::Dimensions & d{x.dimensions()};
|
const auto d{x.dimensions()};
|
||||||
std::cout << n << ".dimensions().size() = " << d.size() << std::endl;
|
//std::cout << n << ".dimensions().size() = " << d.size() << std::endl;
|
||||||
std::cout << "Dimensions are ";
|
std::cout << "Dimensions are ";
|
||||||
for(auto i : d ) std::cout << "[" << i << "]";
|
for(auto i = 0; i < x.NumDimensions ; i++)
|
||||||
|
std::cout << "[" << d[i] << "]";
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
MyTensor::Index SizeCalculated{1};
|
MyTensor::Index SizeCalculated{1};
|
||||||
std::cout << "Dimensions again";
|
std::cout << "Dimensions again";
|
||||||
for(int i=0 ; i < d.size() ; i++ ) {
|
for(int i=0 ; i < x.NumDimensions ; i++ ) {
|
||||||
std::cout << " : [" << i << ", " << x.IndexNames[i] << "]=" << d[i];
|
std::cout << " : [" << i << /*", " << x.IndexNames[i] << */"]=" << x.dimension(i);
|
||||||
SizeCalculated *= d[i];
|
SizeCalculated *= d[i];
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
std::cout << "SizeCalculated = " << SizeCalculated << std::endl;\
|
std::cout << "SizeCalculated = " << SizeCalculated << std::endl;\
|
||||||
assert( SizeCalculated == s );
|
assert( SizeCalculated == s );
|
||||||
// Initialise
|
// Initialise
|
||||||
assert( d.size() == 3 );
|
assert( x.NumDimensions == 3 );
|
||||||
for( int i = 0 ; i < d[0] ; i++ )
|
for( int i = 0 ; i < d[0] ; i++ )
|
||||||
for( int j = 0 ; j < d[1] ; j++ )
|
for( int j = 0 ; j < d[1] ; j++ )
|
||||||
for( int k = 0 ; k < d[2] ; k++ ) {
|
for( int k = 0 ; k < d[2] ; k++ ) {
|
||||||
@ -345,7 +347,7 @@ void DebugShowTensor(MyTensor &x, const char * n)
|
|||||||
}
|
}
|
||||||
// Show raw data
|
// Show raw data
|
||||||
std::cout << "Data follow : " << std::endl;
|
std::cout << "Data follow : " << std::endl;
|
||||||
Complex * p = x.data();
|
typename T::Scalar * p = x.data();
|
||||||
for( auto i = 0 ; i < s ; i++ ) {
|
for( auto i = 0 ; i < s ; i++ ) {
|
||||||
if( i ) std::cout << ", ";
|
if( i ) std::cout << ", ";
|
||||||
std::cout << n << ".data()[" << i << "]=" << * p++;
|
std::cout << n << ".data()[" << i << "]=" << * p++;
|
||||||
@ -415,6 +417,10 @@ void DebugTestTypeEqualities(void)
|
|||||||
|
|
||||||
bool DebugEigenTest()
|
bool DebugEigenTest()
|
||||||
{
|
{
|
||||||
|
{
|
||||||
|
Eigen::TensorFixedSize<std::complex<double>,Eigen::Sizes<3,4,5>> x;
|
||||||
|
DebugShowTensor(x, "fixed");
|
||||||
|
}
|
||||||
const char pszTestFileName[] = "test_tensor.bin";
|
const char pszTestFileName[] = "test_tensor.bin";
|
||||||
std::array<std::string,3> as={"Alpha", "Beta", "Gamma"};
|
std::array<std::string,3> as={"Alpha", "Beta", "Gamma"};
|
||||||
MyTensor x(as, 2,1,4);
|
MyTensor x(as, 2,1,4);
|
||||||
@ -636,7 +642,7 @@ int main(int argc, char *argv[])
|
|||||||
<< ", sizeof(std::size_t) = " << sizeof(std::size_t)
|
<< ", sizeof(std::size_t) = " << sizeof(std::size_t)
|
||||||
<< ", sizeof(std::streamsize) = " << sizeof(std::streamsize)
|
<< ", sizeof(std::streamsize) = " << sizeof(std::streamsize)
|
||||||
<< ", sizeof(Eigen::Index) = " << sizeof(Eigen::Index) << std::endl;
|
<< ", sizeof(Eigen::Index) = " << sizeof(Eigen::Index) << std::endl;
|
||||||
//if( DebugEigenTest() ) return 0;
|
if( DebugEigenTest() ) return 0;
|
||||||
if(DebugGridTensorTest()) return 0;
|
if(DebugGridTensorTest()) return 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user