mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-11-04 14:04:32 +00:00 
			
		
		
		
	Eigen::Tensor serialisation. Tested on single and double precision builds
This commit is contained in:
		
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -114,3 +114,4 @@ gh-pages/
 | 
			
		||||
#####################
 | 
			
		||||
Grid/qcd/spin/gamma-gen/*.h
 | 
			
		||||
Grid/qcd/spin/gamma-gen/*.cc
 | 
			
		||||
Grid/util/Version.h
 | 
			
		||||
 
 | 
			
		||||
@@ -33,8 +33,62 @@ Author: Guido Cossu <guido.cossu@ed.ac.uk>
 | 
			
		||||
#include <type_traits>
 | 
			
		||||
#include <Grid/tensors/Tensors.h>
 | 
			
		||||
#include <Grid/serialisation/VectorUtils.h>
 | 
			
		||||
#include <Grid/Eigen/unsupported/CXX11/Tensor>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
  namespace EigenIO {
 | 
			
		||||
    // EigenIO works for scalars that are not just Grid supported scalars
 | 
			
		||||
    template<typename T, typename V = void> struct is_complex : public std::false_type {};
 | 
			
		||||
    // Support all complex types (not just Grid complex types) - even if the definitions overlap (!)
 | 
			
		||||
    template<typename T> struct is_complex<             T , typename
 | 
			
		||||
        std::enable_if< ::Grid::is_complex<             T >::value>::type> : public std::true_type {};
 | 
			
		||||
    template<typename T> struct is_complex<std::complex<T>, typename
 | 
			
		||||
        std::enable_if<!::Grid::is_complex<std::complex<T>>::value>::type> : public std::true_type {};
 | 
			
		||||
 | 
			
		||||
    // Helpers to support I/O for Eigen tensors of arithmetic scalars, complex types, or Grid tensors
 | 
			
		||||
    template<typename T, typename V = void> struct is_scalar : public std::false_type {};
 | 
			
		||||
    template<typename T> struct is_scalar<T, typename std::enable_if<std::is_arithmetic<T>::value || is_complex<T>::value>::type> : public std::true_type {};
 | 
			
		||||
 | 
			
		||||
    // Is this an Eigen tensor
 | 
			
		||||
    template<typename T> struct is_tensor : std::integral_constant<bool,
 | 
			
		||||
      std::is_base_of<Eigen::TensorBase<T, Eigen::ReadOnlyAccessors>, T>::value> {};
 | 
			
		||||
 | 
			
		||||
    // Is this an Eigen tensor of a supported scalar
 | 
			
		||||
    template<typename T, typename V = void> struct is_tensor_of_scalar : public std::false_type {};
 | 
			
		||||
    template<typename T> struct is_tensor_of_scalar<T, typename std::enable_if<is_tensor<T>::value && is_scalar<typename T::Scalar>::value>::type> : public std::true_type {};
 | 
			
		||||
 | 
			
		||||
    // Is this an Eigen tensor of a supported container
 | 
			
		||||
    template<typename T, typename V = void> struct is_tensor_of_container : public std::false_type {};
 | 
			
		||||
    template<typename T> struct is_tensor_of_container<T, typename std::enable_if<is_tensor<T>::value && isGridTensor<typename T::Scalar>::value>::type> : public std::true_type {};
 | 
			
		||||
 | 
			
		||||
    // Traits are the default for scalars, or come from GridTypeMapper for GridTensors
 | 
			
		||||
    template<typename T, typename V = void> struct Traits {};
 | 
			
		||||
    template<typename T> struct Traits<T, typename std::enable_if<is_tensor_of_scalar<T>::value>::type> : public GridTypeMapper_Base {
 | 
			
		||||
      using scalar_type = typename T::Scalar;
 | 
			
		||||
      static constexpr bool is_complex = ::Grid::EigenIO::is_complex<scalar_type>::value;
 | 
			
		||||
    };
 | 
			
		||||
    template<typename T> struct Traits<T, typename std::enable_if<is_tensor_of_container<T>::value>::type> : public GridTypeMapper<typename T::Scalar> {
 | 
			
		||||
      using scalar_type = typename GridTypeMapper<typename T::Scalar>::scalar_type;
 | 
			
		||||
      static constexpr bool is_complex = ::Grid::EigenIO::is_complex<scalar_type>::value;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    // Is this a fixed-size Eigen tensor
 | 
			
		||||
    template<typename T> struct is_tensor_fixed : public std::false_type {};
 | 
			
		||||
    template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType>
 | 
			
		||||
    struct is_tensor_fixed<Eigen::TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType>>
 | 
			
		||||
        : public std::true_type {};
 | 
			
		||||
    template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType,
 | 
			
		||||
              int MapOptions_, template <class> class MapPointer_>
 | 
			
		||||
    struct is_tensor_fixed<Eigen::TensorMap<Eigen::TensorFixedSize<Scalar_, Dimensions_,
 | 
			
		||||
                                            Options_, IndexType>, MapOptions_, MapPointer_>>
 | 
			
		||||
        : public std::true_type {};
 | 
			
		||||
 | 
			
		||||
    // Is this a variable-size Eigen tensor
 | 
			
		||||
    template<typename T, typename V = void> struct is_tensor_variable : public std::false_type {};
 | 
			
		||||
    template<typename T> struct is_tensor_variable<T, typename std::enable_if<is_tensor<T>::value
 | 
			
		||||
        && !is_tensor_fixed<T>::value>::type> : public std::true_type {};
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Abstract writer/reader classes ////////////////////////////////////////////
 | 
			
		||||
  // static polymorphism implemented using CRTP idiom
 | 
			
		||||
  class Serializable;
 | 
			
		||||
@@ -49,10 +103,10 @@ namespace Grid {
 | 
			
		||||
    void push(const std::string &s);
 | 
			
		||||
    void pop(void);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    typename std::enable_if<std::is_base_of<Serializable, U>::value, void>::type
 | 
			
		||||
    typename std::enable_if<std::is_base_of<Serializable, U>::value>::type
 | 
			
		||||
    write(const std::string& s, const U &output);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
 | 
			
		||||
    typename std::enable_if<!std::is_base_of<Serializable, U>::value && !EigenIO::is_tensor<U>::value>::type
 | 
			
		||||
    write(const std::string& s, const U &output);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void write(const std::string &s, const iScalar<U> &output);
 | 
			
		||||
@@ -60,6 +114,42 @@ namespace Grid {
 | 
			
		||||
    void write(const std::string &s, const iVector<U, N> &output);
 | 
			
		||||
    template <typename U, int N>
 | 
			
		||||
    void write(const std::string &s, const iMatrix<U, N> &output);
 | 
			
		||||
    template <typename ETensor>
 | 
			
		||||
    typename std::enable_if<EigenIO::is_tensor<ETensor>::value>::type
 | 
			
		||||
    write(const std::string &s, const ETensor &output);
 | 
			
		||||
 | 
			
		||||
    // Helper functions for Scalar vs Container specialisations
 | 
			
		||||
    template <typename ETensor>
 | 
			
		||||
    inline typename std::enable_if<EigenIO::is_tensor_of_scalar<ETensor>::value,
 | 
			
		||||
    const typename ETensor::Scalar *>::type
 | 
			
		||||
    getFirstScalar(const ETensor &output)
 | 
			
		||||
    {
 | 
			
		||||
      return output.data();
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    template <typename ETensor>
 | 
			
		||||
    inline typename std::enable_if<EigenIO::is_tensor_of_container<ETensor>::value,
 | 
			
		||||
    const typename EigenIO::Traits<ETensor>::scalar_type *>::type
 | 
			
		||||
    getFirstScalar(const ETensor &output)
 | 
			
		||||
    {
 | 
			
		||||
      return output.data()->begin();
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    template <typename S>
 | 
			
		||||
    inline typename std::enable_if<EigenIO::is_scalar<S>::value, void>::type
 | 
			
		||||
    copyScalars(S * &pCopy, const S &Source)
 | 
			
		||||
    {
 | 
			
		||||
      * pCopy ++ = Source;
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    template <typename S>
 | 
			
		||||
    inline typename std::enable_if<isGridTensor<S>::value, void>::type
 | 
			
		||||
    copyScalars(typename GridTypeMapper<S>::scalar_type * &pCopy, const S &Source)
 | 
			
		||||
    {
 | 
			
		||||
      for( const typename GridTypeMapper<S>::scalar_type &item : Source )
 | 
			
		||||
        * pCopy ++ = item;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void         scientificFormat(const bool set);
 | 
			
		||||
    bool         isScientific(void);
 | 
			
		||||
    void         setPrecision(const unsigned int prec);
 | 
			
		||||
@@ -83,7 +173,8 @@ namespace Grid {
 | 
			
		||||
    typename std::enable_if<std::is_base_of<Serializable, U>::value, void>::type
 | 
			
		||||
    read(const std::string& s, U &output);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
 | 
			
		||||
    typename std::enable_if<!std::is_base_of<Serializable, U>::value
 | 
			
		||||
                         && !EigenIO::is_tensor<U>::value, void>::type
 | 
			
		||||
    read(const std::string& s, U &output);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void read(const std::string &s, iScalar<U> &output);
 | 
			
		||||
@@ -91,6 +182,32 @@ namespace Grid {
 | 
			
		||||
    void read(const std::string &s, iVector<U, N> &output);
 | 
			
		||||
    template <typename U, int N>
 | 
			
		||||
    void read(const std::string &s, iMatrix<U, N> &output);
 | 
			
		||||
    template <typename ETensor>
 | 
			
		||||
    typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
 | 
			
		||||
    read(const std::string &s, ETensor &output);
 | 
			
		||||
    template <typename ETensor>
 | 
			
		||||
    typename std::enable_if<EigenIO::is_tensor_fixed<ETensor>::value, void>::type
 | 
			
		||||
    Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims );
 | 
			
		||||
    template <typename ETensor>
 | 
			
		||||
    typename std::enable_if<EigenIO::is_tensor_variable<ETensor>::value, void>::type
 | 
			
		||||
    Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims );
 | 
			
		||||
  
 | 
			
		||||
    // Helper functions for Scalar vs Container specialisations
 | 
			
		||||
    template <typename S>
 | 
			
		||||
    inline typename std::enable_if<EigenIO::is_scalar<S>::value, void>::type
 | 
			
		||||
    copyScalars(S &Dest, const S * &pSource)
 | 
			
		||||
    {
 | 
			
		||||
      Dest = * pSource ++;
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    template <typename S>
 | 
			
		||||
    inline typename std::enable_if<isGridTensor<S>::value, void>::type
 | 
			
		||||
    copyScalars(S &Dest, const typename GridTypeMapper<S>::scalar_type * &pSource)
 | 
			
		||||
    {
 | 
			
		||||
      for( typename GridTypeMapper<S>::scalar_type &item : Dest )
 | 
			
		||||
        item = * pSource ++;
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
  protected:
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void fromString(U &output, const std::string &s);
 | 
			
		||||
@@ -135,12 +252,14 @@ namespace Grid {
 | 
			
		||||
  
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
 | 
			
		||||
  typename std::enable_if<!std::is_base_of<Serializable, U>::value
 | 
			
		||||
                       && !EigenIO::is_tensor<U>::value, void>::type
 | 
			
		||||
  Writer<T>::write(const std::string &s, const U &output)
 | 
			
		||||
  {
 | 
			
		||||
    upcast->writeDefault(s, output);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void Writer<T>::write(const std::string &s, const iScalar<U> &output)
 | 
			
		||||
@@ -162,6 +281,58 @@ namespace Grid {
 | 
			
		||||
    upcast->writeDefault(s, tensorToVec(output));
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // Eigen::Tensors of Grid tensors (iScalar, iVector, iMatrix)
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  template <typename ETensor>
 | 
			
		||||
  typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
 | 
			
		||||
  Writer<T>::write(const std::string &s, const ETensor &output)
 | 
			
		||||
  {
 | 
			
		||||
    using Index = typename ETensor::Index;
 | 
			
		||||
    using Container = typename ETensor::Scalar; // NB: could be same as scalar
 | 
			
		||||
    using Traits = EigenIO::Traits<ETensor>;
 | 
			
		||||
    using Scalar = typename Traits::scalar_type; // type of the underlying scalar
 | 
			
		||||
    constexpr unsigned int TensorRank{ETensor::NumIndices};
 | 
			
		||||
    constexpr unsigned int ContainerRank{Traits::Rank}; // Only non-zero for containers
 | 
			
		||||
    constexpr unsigned int TotalRank{TensorRank + ContainerRank};
 | 
			
		||||
    const Index NumElements{output.size()};
 | 
			
		||||
    assert( NumElements > 0 );
 | 
			
		||||
 | 
			
		||||
    // Get the dimensionality of the tensor
 | 
			
		||||
    std::vector<std::size_t>  TotalDims(TotalRank);
 | 
			
		||||
    for(auto i = 0; i < TensorRank; i++ ) {
 | 
			
		||||
      auto dim = output.dimension(i);
 | 
			
		||||
      TotalDims[i] = static_cast<size_t>(dim);
 | 
			
		||||
      assert( TotalDims[i] == dim ); // check we didn't lose anything in the conversion
 | 
			
		||||
    }
 | 
			
		||||
    for(auto i = 0; i < ContainerRank; i++ )
 | 
			
		||||
      TotalDims[TensorRank + i] = Traits::Dimension(i);
 | 
			
		||||
 | 
			
		||||
    // If the Tensor isn't in Row-Major order, then we'll need to copy it's data
 | 
			
		||||
    const bool CopyData{NumElements > 1 && ETensor::Layout != Eigen::StorageOptions::RowMajor};
 | 
			
		||||
    const Scalar * pWriteBuffer;
 | 
			
		||||
    Scalar * pCopyBuffer = nullptr;
 | 
			
		||||
    const Index TotalNumElements = NumElements * Traits::count;
 | 
			
		||||
    if( !CopyData ) {
 | 
			
		||||
      pWriteBuffer = getFirstScalar( output );
 | 
			
		||||
    } else {
 | 
			
		||||
      // Regardless of the Eigen::Tensor storage order, the copy will be Row Major
 | 
			
		||||
      pCopyBuffer = new Scalar[TotalNumElements];
 | 
			
		||||
      pWriteBuffer = pCopyBuffer;
 | 
			
		||||
      Scalar * pCopy = pCopyBuffer;
 | 
			
		||||
      std::array<Index, TensorRank> MyIndex;
 | 
			
		||||
      for( auto &idx : MyIndex ) idx = 0;
 | 
			
		||||
      for( auto n = 0; n < NumElements; n++ ) {
 | 
			
		||||
        const Container & c = output( MyIndex );
 | 
			
		||||
        copyScalars( pCopy, c );
 | 
			
		||||
        // Now increment the index
 | 
			
		||||
        for( int i = output.NumDimensions - 1; i >= 0 && ++MyIndex[i] == output.dimension(i); i-- )
 | 
			
		||||
          MyIndex[i] = 0;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    upcast->template writeMultiDim<Scalar>(s, TotalDims, pWriteBuffer, TotalNumElements);
 | 
			
		||||
    if( pCopyBuffer ) delete [] pCopyBuffer;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  void Writer<T>::scientificFormat(const bool set)
 | 
			
		||||
  {
 | 
			
		||||
@@ -215,7 +386,8 @@ namespace Grid {
 | 
			
		||||
  
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
 | 
			
		||||
  typename std::enable_if<!std::is_base_of<Serializable, U>::value
 | 
			
		||||
                       && !EigenIO::is_tensor<U>::value, void>::type
 | 
			
		||||
  Reader<T>::read(const std::string &s, U &output)
 | 
			
		||||
  {
 | 
			
		||||
    upcast->readDefault(s, output);
 | 
			
		||||
@@ -251,6 +423,79 @@ namespace Grid {
 | 
			
		||||
    vecToTensor(output, v);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  template <typename ETensor>
 | 
			
		||||
  typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
 | 
			
		||||
  Reader<T>::read(const std::string &s, ETensor &output)
 | 
			
		||||
  {
 | 
			
		||||
    using Index = typename ETensor::Index;
 | 
			
		||||
    using Container = typename ETensor::Scalar; // NB: could be same as scalar
 | 
			
		||||
    using Traits = EigenIO::Traits<ETensor>;
 | 
			
		||||
    using Scalar = typename Traits::scalar_type; // type of the underlying scalar
 | 
			
		||||
    constexpr unsigned int TensorRank{ETensor::NumIndices};
 | 
			
		||||
    constexpr unsigned int ContainerRank{Traits::Rank}; // Only non-zero for containers
 | 
			
		||||
    constexpr unsigned int TotalRank{TensorRank + ContainerRank};
 | 
			
		||||
    using ETDims = std::array<Index, TensorRank>; // Dimensions of the tensor
 | 
			
		||||
 | 
			
		||||
    // read the (flat) data and dimensionality
 | 
			
		||||
    std::vector<std::size_t> dimData;
 | 
			
		||||
    std::vector<Scalar> buf;
 | 
			
		||||
    upcast->readMultiDim( s, buf, dimData );
 | 
			
		||||
    assert(dimData.size() == TotalRank && "EigenIO: Tensor rank mismatch" );
 | 
			
		||||
    // Make sure that the number of elements read matches dimensions read
 | 
			
		||||
    std::size_t NumContainers = 1;
 | 
			
		||||
    for( auto i = 0 ; i < TensorRank ; i++ )
 | 
			
		||||
      NumContainers *= dimData[i];
 | 
			
		||||
    // If our scalar object is a Container, make sure it's dimensions match what we read back
 | 
			
		||||
    std::size_t ElementsPerContainer = 1;
 | 
			
		||||
    for( auto i = 0 ; i < ContainerRank ; i++ ) {
 | 
			
		||||
      assert( dimData[TensorRank+i] == Traits::Dimension(i) && "Tensor Container dimensions don't match data" );
 | 
			
		||||
      ElementsPerContainer *= dimData[TensorRank+i];
 | 
			
		||||
    }
 | 
			
		||||
    assert( NumContainers * ElementsPerContainer == buf.size() && "EigenIO: Number of elements != product of dimensions" );
 | 
			
		||||
    // Now see whether the tensor is the right shape, or can be made to be
 | 
			
		||||
    const auto & dims{output.dimensions()};
 | 
			
		||||
    bool bShapeOK = (output.data() != nullptr);
 | 
			
		||||
    for( auto i = 0; bShapeOK && i < TensorRank ; i++ )
 | 
			
		||||
      if( dims[i] != dimData[i] )
 | 
			
		||||
        bShapeOK = false;
 | 
			
		||||
    // Make the tensor the same size as the data read
 | 
			
		||||
    ETDims MyIndex;
 | 
			
		||||
    if( !bShapeOK ) {
 | 
			
		||||
      for( auto i = 0 ; i < TensorRank ; i++ )
 | 
			
		||||
        MyIndex[i] = dimData[i];
 | 
			
		||||
      Reshape(output, MyIndex);
 | 
			
		||||
    }
 | 
			
		||||
    // Copy the data into the tensor
 | 
			
		||||
    for( auto &d : MyIndex ) d = 0;
 | 
			
		||||
    const Scalar * pSource = &buf[0];
 | 
			
		||||
    for( std::size_t n = 0 ; n < NumContainers ; n++ ) {
 | 
			
		||||
      Container & c = output( MyIndex );
 | 
			
		||||
      copyScalars( c, pSource );
 | 
			
		||||
      // Now increment the index
 | 
			
		||||
      for( int i = TensorRank - 1; i != -1 && ++MyIndex[i] == dims[i]; i-- )
 | 
			
		||||
        MyIndex[i] = 0;
 | 
			
		||||
    }
 | 
			
		||||
    assert( pSource == &buf[NumContainers * ElementsPerContainer] );
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  template <typename ETensor>
 | 
			
		||||
  typename std::enable_if<EigenIO::is_tensor_fixed<ETensor>::value, void>::type
 | 
			
		||||
  Reader<T>::Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims )
 | 
			
		||||
  {
 | 
			
		||||
    assert( 0 && "EigenIO: Fixed tensor dimensions can't be changed" );
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  template <typename ETensor>
 | 
			
		||||
  typename std::enable_if<EigenIO::is_tensor_variable<ETensor>::value, void>::type
 | 
			
		||||
  Reader<T>::Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims )
 | 
			
		||||
  {
 | 
			
		||||
    //t.reshape( dims );
 | 
			
		||||
    t.resize( dims );
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void Reader<T>::fromString(U &output, const std::string &s)
 | 
			
		||||
@@ -289,6 +534,48 @@ namespace Grid {
 | 
			
		||||
    {
 | 
			
		||||
      return os;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T1, typename T2>
 | 
			
		||||
    static inline typename std::enable_if<!EigenIO::is_tensor<T1>::value || !EigenIO::is_tensor<T2>::value, bool>::type
 | 
			
		||||
    CompareMember(const T1 &lhs, const T2 &rhs) {
 | 
			
		||||
      return lhs == rhs;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T1, typename T2>
 | 
			
		||||
    static inline typename std::enable_if<EigenIO::is_tensor<T1>::value && EigenIO::is_tensor<T2>::value, bool>::type
 | 
			
		||||
    CompareMember(const T1 &lhs, const T2 &rhs) {
 | 
			
		||||
      // First check whether dimensions match (Eigen tensor library will assert if they don't match)
 | 
			
		||||
      bool bReturnValue = (T1::NumIndices == T2::NumIndices);
 | 
			
		||||
      for( auto i = 0 ; bReturnValue && i < T1::NumIndices ; i++ )
 | 
			
		||||
          bReturnValue = ( lhs.dimension(i) == rhs.dimension(i) );
 | 
			
		||||
      if( bReturnValue ) {
 | 
			
		||||
        Eigen::Tensor<bool, 0, T1::Options> bResult = (lhs == rhs).all();
 | 
			
		||||
        bReturnValue = bResult(0);
 | 
			
		||||
      }
 | 
			
		||||
      return bReturnValue;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    static inline typename std::enable_if<EigenIO::is_tensor<T>::value, bool>::type
 | 
			
		||||
    CompareMember(const std::vector<T> &lhs, const std::vector<T> &rhs) {
 | 
			
		||||
      const auto NumElements{lhs.size()};
 | 
			
		||||
      bool bResult = ( NumElements == rhs.size() );
 | 
			
		||||
      for( auto i = 0 ; i < NumElements && bResult ; i++ )
 | 
			
		||||
        bResult = CompareMember(lhs[i], rhs[i]);
 | 
			
		||||
      return bResult;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    static inline typename std::enable_if<!EigenIO::is_tensor<T>::value, void>::type
 | 
			
		||||
    WriteMember(std::ostream &os, const T &object) {
 | 
			
		||||
      os << object;
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    template <typename T>
 | 
			
		||||
    static inline typename std::enable_if<EigenIO::is_tensor<T>::value, void>::type
 | 
			
		||||
    WriteMember(std::ostream &os, const T &object) {
 | 
			
		||||
      os << "Eigen::Tensor";
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  // Generic writer interface //////////////////////////////////////////////////
 | 
			
		||||
 
 | 
			
		||||
@@ -51,6 +51,8 @@ namespace Grid {
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void writeDefault(const std::string &s, const std::vector<U> &x);
 | 
			
		||||
    void writeDefault(const std::string &s, const char *x);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
 | 
			
		||||
  private:
 | 
			
		||||
    std::ofstream file_;
 | 
			
		||||
  };
 | 
			
		||||
@@ -66,6 +68,8 @@ namespace Grid {
 | 
			
		||||
    void readDefault(const std::string &s, U &output);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void readDefault(const std::string &s, std::vector<U> &output);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
 | 
			
		||||
  private:
 | 
			
		||||
    std::ifstream file_;
 | 
			
		||||
  };
 | 
			
		||||
@@ -92,6 +96,27 @@ namespace Grid {
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void BinaryWriter::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
 | 
			
		||||
  {
 | 
			
		||||
    uint64_t rank = static_cast<uint64_t>( Dimensions.size() );
 | 
			
		||||
    uint64_t tmp = 1;
 | 
			
		||||
    for( auto i = 0 ; i < rank ; i++ )
 | 
			
		||||
      tmp *= Dimensions[i];
 | 
			
		||||
    assert( tmp == NumElements && "Dimensions don't match size of data being written" );
 | 
			
		||||
    // Total number of elements
 | 
			
		||||
    write("", tmp);
 | 
			
		||||
    // Number of dimensions
 | 
			
		||||
    write("", rank);
 | 
			
		||||
    // Followed by each dimension
 | 
			
		||||
    for( auto i = 0 ; i < rank ; i++ ) {
 | 
			
		||||
      tmp = Dimensions[i];
 | 
			
		||||
      write("", tmp);
 | 
			
		||||
    }
 | 
			
		||||
    for( auto i = 0; i < NumElements; ++i)
 | 
			
		||||
      write("", pDataRowMajor[i]);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Reader template implementation ////////////////////////////////////////////
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void BinaryReader::readDefault(const std::string &s, U &output)
 | 
			
		||||
@@ -114,6 +139,30 @@ namespace Grid {
 | 
			
		||||
      read("", output[i]);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void BinaryReader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
 | 
			
		||||
  {
 | 
			
		||||
    // Number of elements
 | 
			
		||||
    uint64_t NumElements;
 | 
			
		||||
    read("", NumElements);
 | 
			
		||||
    // Number of dimensions
 | 
			
		||||
    uint64_t rank;
 | 
			
		||||
    read("", rank);
 | 
			
		||||
    // Followed by each dimension
 | 
			
		||||
    uint64_t count = 1;
 | 
			
		||||
    dim.resize(rank);
 | 
			
		||||
    uint64_t tmp;
 | 
			
		||||
    for( auto i = 0 ; i < rank ; i++ ) {
 | 
			
		||||
      read("", tmp);
 | 
			
		||||
      dim[i] = tmp;
 | 
			
		||||
      count *= tmp;
 | 
			
		||||
    }
 | 
			
		||||
    assert( count == NumElements && "Dimensions don't match size of data being read" );
 | 
			
		||||
    buf.resize(count);
 | 
			
		||||
    for( auto i = 0; i < count; ++i)
 | 
			
		||||
      read("", buf[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -3,6 +3,7 @@
 | 
			
		||||
 | 
			
		||||
#include <stack>
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <list>
 | 
			
		||||
#include <vector>
 | 
			
		||||
#include <H5Cpp.h>
 | 
			
		||||
#include <Grid/tensors/Tensors.h>
 | 
			
		||||
@@ -38,6 +39,8 @@ namespace Grid
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    typename std::enable_if<!element<std::vector<U>>::is_number, void>::type
 | 
			
		||||
    writeDefault(const std::string &s, const std::vector<U> &x);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
 | 
			
		||||
    H5NS::Group & getGroup(void);
 | 
			
		||||
  private:
 | 
			
		||||
    template <typename U>
 | 
			
		||||
@@ -66,6 +69,8 @@ namespace Grid
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    typename std::enable_if<!element<std::vector<U>>::is_number, void>::type
 | 
			
		||||
    readDefault(const std::string &s, std::vector<U> &x);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
 | 
			
		||||
    H5NS::Group & getGroup(void);
 | 
			
		||||
  private:
 | 
			
		||||
    template <typename U>
 | 
			
		||||
@@ -101,6 +106,90 @@ namespace Grid
 | 
			
		||||
  template <>
 | 
			
		||||
  void Hdf5Writer::writeDefault(const std::string &s, const std::string &x);
 | 
			
		||||
  
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void Hdf5Writer::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
 | 
			
		||||
  {
 | 
			
		||||
    // Hdf5 needs the dimensions as hsize_t
 | 
			
		||||
    const int rank = static_cast<int>(Dimensions.size());
 | 
			
		||||
    std::vector<hsize_t> dim(rank);
 | 
			
		||||
    for(int i = 0; i < rank; i++)
 | 
			
		||||
      dim[i] = Dimensions[i];
 | 
			
		||||
    // write the entire dataset to file
 | 
			
		||||
    H5NS::DataSpace dataSpace(rank, dim.data());
 | 
			
		||||
 | 
			
		||||
    size_t DataSize = NumElements * sizeof(U);
 | 
			
		||||
    if (DataSize > dataSetThres_)
 | 
			
		||||
    {
 | 
			
		||||
      // First few prime numbers from https://oeis.org/A000040
 | 
			
		||||
      static const unsigned short Primes[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31,
 | 
			
		||||
        37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109,
 | 
			
		||||
        113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
 | 
			
		||||
        197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271 };
 | 
			
		||||
      constexpr int NumPrimes = sizeof( Primes ) / sizeof( Primes[0] );
 | 
			
		||||
      // Make sure 1) each dimension; and 2) chunk size is < 4GB
 | 
			
		||||
      const hsize_t MaxElements = ( sizeof( U ) == 1 ) ? 0xffffffff : 0x100000000 / sizeof( U );
 | 
			
		||||
      hsize_t ElementsPerChunk = 1;
 | 
			
		||||
      bool bTooBig = false;
 | 
			
		||||
      for( int i = rank - 1 ; i != -1 ; i-- ) {
 | 
			
		||||
        auto &d = dim[i];
 | 
			
		||||
        if( bTooBig )
 | 
			
		||||
          d = 1; // Chunk size is already as big as can be - remaining dimensions = 1
 | 
			
		||||
        else {
 | 
			
		||||
          // If individual dimension too big, reduce by prime factors if possible
 | 
			
		||||
          for( int PrimeIdx = 0; d > MaxElements && PrimeIdx < NumPrimes; ) {
 | 
			
		||||
            if( d % Primes[PrimeIdx] )
 | 
			
		||||
              PrimeIdx++;
 | 
			
		||||
            else
 | 
			
		||||
              d /= Primes[PrimeIdx];
 | 
			
		||||
          }
 | 
			
		||||
          const char ErrorMsg[] = " dimension > 4GB without small prime factors. "
 | 
			
		||||
                                  "Hdf5IO chunk size will be inefficient. NB Serialisation is not intended for large datasets - please consider alternatives.";
 | 
			
		||||
          if( d > MaxElements ) {
 | 
			
		||||
            std::cout << GridLogWarning << "Individual" << ErrorMsg << std::endl;
 | 
			
		||||
            hsize_t quotient = d / MaxElements;
 | 
			
		||||
            if( d % MaxElements )
 | 
			
		||||
              quotient++;
 | 
			
		||||
            d /= quotient;
 | 
			
		||||
          }
 | 
			
		||||
          // Now make sure overall size is not too big
 | 
			
		||||
          hsize_t OverflowCheck = ElementsPerChunk;
 | 
			
		||||
          ElementsPerChunk *= d;
 | 
			
		||||
          assert( OverflowCheck == ElementsPerChunk / d && "Product of dimensions overflowed hsize_t" );
 | 
			
		||||
          // If product of dimensions too big, reduce by prime factors
 | 
			
		||||
          for( int PrimeIdx = 0; ElementsPerChunk > MaxElements && PrimeIdx < NumPrimes; ) {
 | 
			
		||||
            bTooBig = true;
 | 
			
		||||
            if( d % Primes[PrimeIdx] )
 | 
			
		||||
              PrimeIdx++;
 | 
			
		||||
            else {
 | 
			
		||||
              d /= Primes[PrimeIdx];
 | 
			
		||||
              ElementsPerChunk /= Primes[PrimeIdx];
 | 
			
		||||
            }
 | 
			
		||||
          }
 | 
			
		||||
          if( ElementsPerChunk > MaxElements ) {
 | 
			
		||||
            std::cout << GridLogMessage << "Product of" << ErrorMsg << std::endl;
 | 
			
		||||
            hsize_t quotient = ElementsPerChunk / MaxElements;
 | 
			
		||||
            if( ElementsPerChunk % MaxElements )
 | 
			
		||||
              quotient++;
 | 
			
		||||
            d /= quotient;
 | 
			
		||||
            ElementsPerChunk /= quotient;
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
      H5NS::DataSet           dataSet;
 | 
			
		||||
      H5NS::DSetCreatPropList plist;
 | 
			
		||||
      plist.setChunk(rank, dim.data());
 | 
			
		||||
      plist.setFletcher32();
 | 
			
		||||
      dataSet = group_.createDataSet(s, Hdf5Type<U>::type(), dataSpace, plist);
 | 
			
		||||
      dataSet.write(pDataRowMajor, Hdf5Type<U>::type());
 | 
			
		||||
    }
 | 
			
		||||
    else
 | 
			
		||||
    {
 | 
			
		||||
      H5NS::Attribute attribute;
 | 
			
		||||
      attribute = group_.createAttribute(s, Hdf5Type<U>::type(), dataSpace);
 | 
			
		||||
      attribute.write(Hdf5Type<U>::type(), pDataRowMajor);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  typename std::enable_if<element<std::vector<U>>::is_number, void>::type
 | 
			
		||||
  Hdf5Writer::writeDefault(const std::string &s, const std::vector<U> &x)
 | 
			
		||||
@@ -110,34 +199,11 @@ namespace Grid
 | 
			
		||||
    
 | 
			
		||||
    // flatten the vector and getting dimensions
 | 
			
		||||
    Flatten<std::vector<U>> flat(x);
 | 
			
		||||
    std::vector<hsize_t> dim;
 | 
			
		||||
    std::vector<size_t> dim;
 | 
			
		||||
    const auto           &flatx = flat.getFlatVector();
 | 
			
		||||
    
 | 
			
		||||
    for (auto &d: flat.getDim())
 | 
			
		||||
    {
 | 
			
		||||
      dim.push_back(d);
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    // write to file
 | 
			
		||||
    H5NS::DataSpace dataSpace(dim.size(), dim.data());
 | 
			
		||||
    
 | 
			
		||||
    if (flatx.size() > dataSetThres_)
 | 
			
		||||
    {
 | 
			
		||||
      H5NS::DataSet           dataSet;
 | 
			
		||||
      H5NS::DSetCreatPropList plist;
 | 
			
		||||
      
 | 
			
		||||
      plist.setChunk(dim.size(), dim.data());
 | 
			
		||||
      plist.setFletcher32();
 | 
			
		||||
      dataSet = group_.createDataSet(s, Hdf5Type<Element>::type(), dataSpace, plist);
 | 
			
		||||
      dataSet.write(flatx.data(), Hdf5Type<Element>::type());
 | 
			
		||||
    }
 | 
			
		||||
    else
 | 
			
		||||
    {
 | 
			
		||||
      H5NS::Attribute attribute;
 | 
			
		||||
      
 | 
			
		||||
      attribute = group_.createAttribute(s, Hdf5Type<Element>::type(), dataSpace);
 | 
			
		||||
      attribute.write(Hdf5Type<Element>::type(), flatx.data());
 | 
			
		||||
    }
 | 
			
		||||
    writeMultiDim<Element>(s, dim, &flatx[0], flatx.size());
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  template <typename U>
 | 
			
		||||
@@ -175,8 +241,7 @@ namespace Grid
 | 
			
		||||
  void Hdf5Reader::readDefault(const std::string &s, std::string &x);
 | 
			
		||||
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  typename std::enable_if<element<std::vector<U>>::is_number, void>::type
 | 
			
		||||
  Hdf5Reader::readDefault(const std::string &s, std::vector<U> &x)
 | 
			
		||||
  void Hdf5Reader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
 | 
			
		||||
  {
 | 
			
		||||
    // alias to element type
 | 
			
		||||
    typedef typename element<std::vector<U>>::type Element;
 | 
			
		||||
@@ -184,7 +249,6 @@ namespace Grid
 | 
			
		||||
    // read the dimensions
 | 
			
		||||
    H5NS::DataSpace       dataSpace;
 | 
			
		||||
    std::vector<hsize_t>  hdim;
 | 
			
		||||
    std::vector<size_t>   dim;
 | 
			
		||||
    hsize_t               size = 1;
 | 
			
		||||
    
 | 
			
		||||
    if (group_.attrExists(s))
 | 
			
		||||
@@ -204,9 +268,9 @@ namespace Grid
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    // read the flat vector
 | 
			
		||||
    std::vector<Element> buf(size);
 | 
			
		||||
    buf.resize(size);
 | 
			
		||||
    
 | 
			
		||||
    if (size > dataSetThres_)
 | 
			
		||||
    if (size * sizeof(Element) > dataSetThres_)
 | 
			
		||||
    {
 | 
			
		||||
      H5NS::DataSet dataSet;
 | 
			
		||||
      
 | 
			
		||||
@@ -220,6 +284,18 @@ namespace Grid
 | 
			
		||||
      attribute = group_.openAttribute(s);
 | 
			
		||||
      attribute.read(Hdf5Type<Element>::type(), buf.data());
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  typename std::enable_if<element<std::vector<U>>::is_number, void>::type
 | 
			
		||||
  Hdf5Reader::readDefault(const std::string &s, std::vector<U> &x)
 | 
			
		||||
  {
 | 
			
		||||
    // alias to element type
 | 
			
		||||
    typedef typename element<std::vector<U>>::type Element;
 | 
			
		||||
 | 
			
		||||
    std::vector<size_t>   dim;
 | 
			
		||||
    std::vector<Element>  buf;
 | 
			
		||||
    readMultiDim( s, buf, dim );
 | 
			
		||||
 | 
			
		||||
    // reconstruct the multidimensional vector
 | 
			
		||||
    Reconstruct<std::vector<U>> r(buf, dim);
 | 
			
		||||
 
 | 
			
		||||
@@ -109,8 +109,8 @@ THE SOFTWARE.
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
#define GRID_MACRO_MEMBER(A,B)        A B;
 | 
			
		||||
#define GRID_MACRO_COMP_MEMBER(A,B) result = (result and (lhs. B == rhs. B));
 | 
			
		||||
#define GRID_MACRO_OS_WRITE_MEMBER(A,B) os<< #A <<" " #B << " = " << obj. B << " ; " <<std::endl;
 | 
			
		||||
#define GRID_MACRO_COMP_MEMBER(A,B) result = (result and CompareMember(lhs. B, rhs. B));
 | 
			
		||||
#define GRID_MACRO_OS_WRITE_MEMBER(A,B) os<< #A <<" " #B << " = "; WriteMember( os, obj. B ); os << " ; " <<std::endl;
 | 
			
		||||
#define GRID_MACRO_READ_MEMBER(A,B) Grid::read(RD,#B,obj. B);
 | 
			
		||||
#define GRID_MACRO_WRITE_MEMBER(A,B) Grid::write(WR,#B,obj. B);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -51,6 +51,8 @@ namespace Grid
 | 
			
		||||
    void writeDefault(const std::string &s, const U &x);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void writeDefault(const std::string &s, const std::vector<U> &x);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
 | 
			
		||||
  private:
 | 
			
		||||
    void indent(void);
 | 
			
		||||
  private:
 | 
			
		||||
@@ -69,6 +71,8 @@ namespace Grid
 | 
			
		||||
    void readDefault(const std::string &s, U &output);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void readDefault(const std::string &s, std::vector<U> &output);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
 | 
			
		||||
  private:
 | 
			
		||||
    void checkIndent(void);
 | 
			
		||||
  private:
 | 
			
		||||
@@ -96,6 +100,17 @@ namespace Grid
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void TextWriter::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
 | 
			
		||||
  {
 | 
			
		||||
    uint64_t Rank = Dimensions.size();
 | 
			
		||||
    write(s, Rank);
 | 
			
		||||
    for( uint64_t d : Dimensions )
 | 
			
		||||
      write(s, d);
 | 
			
		||||
    while( NumElements-- )
 | 
			
		||||
      write(s, *pDataRowMajor++);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Reader template implementation ////////////////////////////////////////////
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void TextReader::readDefault(const std::string &s, U &output)
 | 
			
		||||
@@ -121,6 +136,23 @@ namespace Grid
 | 
			
		||||
      read("", output[i]);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void TextReader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
 | 
			
		||||
  {
 | 
			
		||||
    const char sz[] = "";
 | 
			
		||||
    uint64_t Rank;
 | 
			
		||||
    read(sz, Rank);
 | 
			
		||||
    dim.resize( Rank );
 | 
			
		||||
    size_t NumElements = 1;
 | 
			
		||||
    for( auto &d : dim ) {
 | 
			
		||||
      read(sz, d);
 | 
			
		||||
      NumElements *= d;
 | 
			
		||||
    }
 | 
			
		||||
    buf.resize( NumElements );
 | 
			
		||||
    for( auto &x : buf )
 | 
			
		||||
      read(s, x);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -53,6 +53,17 @@ namespace Grid {
 | 
			
		||||
    return os;
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // std::vector<std:vector<...>> nested to specified Rank //////////////////////////////////
 | 
			
		||||
  template<typename T, unsigned int Rank>
 | 
			
		||||
  struct NestedStdVector {
 | 
			
		||||
    typedef typename std::vector<typename NestedStdVector<T, Rank - 1>::type> type;
 | 
			
		||||
  };
 | 
			
		||||
  
 | 
			
		||||
  template<typename T>
 | 
			
		||||
  struct NestedStdVector<T,0> {
 | 
			
		||||
    typedef T type;
 | 
			
		||||
  };
 | 
			
		||||
  
 | 
			
		||||
  // Grid scalar tensors to nested std::vectors //////////////////////////////////
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  struct TensorToVec
 | 
			
		||||
 
 | 
			
		||||
@@ -57,6 +57,8 @@ namespace Grid
 | 
			
		||||
    void writeDefault(const std::string &s, const U &x);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void writeDefault(const std::string &s, const std::vector<U> &x);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
 | 
			
		||||
    std::string docString(void);
 | 
			
		||||
    std::string string(void);
 | 
			
		||||
  private:
 | 
			
		||||
@@ -79,6 +81,8 @@ namespace Grid
 | 
			
		||||
    void readDefault(const std::string &s, U &output);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void readDefault(const std::string &s, std::vector<U> &output);
 | 
			
		||||
    template <typename U>
 | 
			
		||||
    void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
 | 
			
		||||
    void readCurrentSubtree(std::string &s);
 | 
			
		||||
  private:
 | 
			
		||||
    void checkParse(const pugi::xml_parse_result &result, const std::string name);
 | 
			
		||||
@@ -122,9 +126,41 @@ namespace Grid
 | 
			
		||||
  void XmlWriter::writeDefault(const std::string &s, const std::vector<U> &x)
 | 
			
		||||
  {
 | 
			
		||||
    push(s);
 | 
			
		||||
    for (auto &x_i: x)
 | 
			
		||||
    for( auto &u : x )
 | 
			
		||||
    {
 | 
			
		||||
      write("elem", x_i);
 | 
			
		||||
      write("elem", u);
 | 
			
		||||
    }
 | 
			
		||||
    pop();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void XmlWriter::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
 | 
			
		||||
  {
 | 
			
		||||
    push(s);
 | 
			
		||||
    size_t count = 1;
 | 
			
		||||
    const int Rank = static_cast<int>( Dimensions.size() );
 | 
			
		||||
    write("rank", Rank );
 | 
			
		||||
    std::vector<size_t> MyIndex( Rank );
 | 
			
		||||
    for( auto d : Dimensions ) {
 | 
			
		||||
      write("dim", d);
 | 
			
		||||
      count *= d;
 | 
			
		||||
    }
 | 
			
		||||
    assert( count == NumElements && "XmlIO : element count doesn't match dimensions" );
 | 
			
		||||
    static const char sName[] = "tensor";
 | 
			
		||||
    for( int i = 0 ; i < Rank ; i++ ) {
 | 
			
		||||
      MyIndex[i] = 0;
 | 
			
		||||
      push(sName);
 | 
			
		||||
    }
 | 
			
		||||
    while (NumElements--) {
 | 
			
		||||
      write("elem", *pDataRowMajor++);
 | 
			
		||||
      int i;
 | 
			
		||||
      for( i = Rank - 1 ; i != -1 && ++MyIndex[i] == Dimensions[i] ; i-- )
 | 
			
		||||
        MyIndex[i] = 0;
 | 
			
		||||
      int Rollover = Rank - 1 - i;
 | 
			
		||||
      for( i = 0 ; i < Rollover ; i++ )
 | 
			
		||||
        pop();
 | 
			
		||||
      for( i = 0 ; NumElements && i < Rollover ; i++ )
 | 
			
		||||
        push(sName);
 | 
			
		||||
    }
 | 
			
		||||
    pop();
 | 
			
		||||
  }
 | 
			
		||||
@@ -145,25 +181,66 @@ namespace Grid
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void XmlReader::readDefault(const std::string &s, std::vector<U> &output)
 | 
			
		||||
  {
 | 
			
		||||
    std::string    buf;
 | 
			
		||||
    unsigned int   i = 0;
 | 
			
		||||
    
 | 
			
		||||
    if (!push(s))
 | 
			
		||||
    {
 | 
			
		||||
      std::cout << GridLogWarning << "XML: cannot open node '" << s << "'";
 | 
			
		||||
      std::cout << std::endl;
 | 
			
		||||
 | 
			
		||||
      return; 
 | 
			
		||||
    }
 | 
			
		||||
    while (node_.child("elem"))
 | 
			
		||||
    } else {
 | 
			
		||||
      for(unsigned int i = 0; node_.child("elem"); )
 | 
			
		||||
      {
 | 
			
		||||
        output.resize(i + 1);
 | 
			
		||||
      read("elem", output[i]);
 | 
			
		||||
        read("elem", output[i++]);
 | 
			
		||||
        node_.child("elem").set_name("elem-done");
 | 
			
		||||
      i++;
 | 
			
		||||
      }
 | 
			
		||||
      pop();
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <typename U>
 | 
			
		||||
  void XmlReader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
 | 
			
		||||
  {
 | 
			
		||||
    if (!push(s))
 | 
			
		||||
    {
 | 
			
		||||
      std::cout << GridLogWarning << "XML: cannot open node '" << s << "'";
 | 
			
		||||
      std::cout << std::endl;
 | 
			
		||||
    } else {
 | 
			
		||||
      static const char sName[] = "tensor";
 | 
			
		||||
      static const char sNameDone[] = "tensor-done";
 | 
			
		||||
      int Rank;
 | 
			
		||||
      read("rank", Rank);
 | 
			
		||||
      dim.resize( Rank );
 | 
			
		||||
      size_t NumElements = 1;
 | 
			
		||||
      for( auto &d : dim )
 | 
			
		||||
      {
 | 
			
		||||
        read("dim", d);
 | 
			
		||||
        node_.child("dim").set_name("dim-done");
 | 
			
		||||
        NumElements *= d;
 | 
			
		||||
      }
 | 
			
		||||
      buf.resize( NumElements );
 | 
			
		||||
      std::vector<size_t> MyIndex( Rank );
 | 
			
		||||
      for( int i = 0 ; i < Rank ; i++ ) {
 | 
			
		||||
        MyIndex[i] = 0;
 | 
			
		||||
        push(sName);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      for( auto &x : buf )
 | 
			
		||||
      {
 | 
			
		||||
        NumElements--;
 | 
			
		||||
        read("elem", x);
 | 
			
		||||
        node_.child("elem").set_name("elem-done");
 | 
			
		||||
        int i;
 | 
			
		||||
        for( i = Rank - 1 ; i != -1 && ++MyIndex[i] == dim[i] ; i-- )
 | 
			
		||||
          MyIndex[i] = 0;
 | 
			
		||||
        int Rollover = Rank - 1 - i;
 | 
			
		||||
        for( i = 0 ; i < Rollover ; i++ ) {
 | 
			
		||||
          node_.set_name(sNameDone);
 | 
			
		||||
          pop();
 | 
			
		||||
        }
 | 
			
		||||
        for( i = 0 ; NumElements && i < Rollover ; i++ )
 | 
			
		||||
          push(sName);
 | 
			
		||||
      }
 | 
			
		||||
      pop();
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -143,6 +143,7 @@ namespace Grid {
 | 
			
		||||
    typedef vRealD DoublePrecision;
 | 
			
		||||
  };
 | 
			
		||||
  template<> struct GridTypeMapper<vRealH> : public GridTypeMapper_Base {
 | 
			
		||||
    // Fixme this is incomplete until Grid supports fp16 or bfp16 arithmetic types
 | 
			
		||||
    typedef RealF  scalar_type;
 | 
			
		||||
    typedef vRealH vector_type;
 | 
			
		||||
    typedef vRealD vector_typeD;
 | 
			
		||||
@@ -153,6 +154,7 @@ namespace Grid {
 | 
			
		||||
    typedef vRealD DoublePrecision;
 | 
			
		||||
  };
 | 
			
		||||
  template<> struct GridTypeMapper<vComplexH> : public GridTypeMapper_Base {
 | 
			
		||||
    // Fixme this is incomplete until Grid supports fp16 or bfp16 arithmetic types
 | 
			
		||||
    typedef ComplexF  scalar_type;
 | 
			
		||||
    typedef vComplexH vector_type;
 | 
			
		||||
    typedef vComplexD vector_typeD;
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										198
									
								
								Grid/util/EigenUtil.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										198
									
								
								Grid/util/EigenUtil.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,198 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 
 | 
			
		||||
 Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 
 | 
			
		||||
 Source file: Grid/util/EigenUtil.h
 | 
			
		||||
 
 | 
			
		||||
 Copyright (C) 2019
 | 
			
		||||
 
 | 
			
		||||
 Author: Michael Marshall <michael.marshall@ed.ac.uk>
 | 
			
		||||
 
 | 
			
		||||
 This program is free software; you can redistribute it and/or modify
 | 
			
		||||
 it under the terms of the GNU General Public License as published by
 | 
			
		||||
 the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
 (at your option) any later version.
 | 
			
		||||
 
 | 
			
		||||
 This program is distributed in the hope that it will be useful,
 | 
			
		||||
 but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
 GNU General Public License for more details.
 | 
			
		||||
 
 | 
			
		||||
 You should have received a copy of the GNU General Public License along
 | 
			
		||||
 with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 
 | 
			
		||||
 See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
 *************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifndef GRID_UTIL_EIGENUTIL_H
 | 
			
		||||
#define GRID_UTIL_EIGENUTIL_H
 | 
			
		||||
#include <Grid/tensors/Tensor_traits.h>
 | 
			
		||||
#include <Grid/Eigen/unsupported/CXX11/Tensor>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
  // for_all helper function to call the lambda for scalar
 | 
			
		||||
  template <typename ETensor, typename Lambda>
 | 
			
		||||
  typename std::enable_if<EigenIO::is_tensor_of_scalar<ETensor>::value, void>::type
 | 
			
		||||
  for_all_do_lambda( Lambda lambda, typename ETensor::Scalar &scalar, typename ETensor::Index &Seq,
 | 
			
		||||
      const std::array<typename ETensor::Index, ETensor::NumIndices> &MyIndex,
 | 
			
		||||
      const std::array<int, EigenIO::Traits<ETensor>::Rank> &DimGridTensor,
 | 
			
		||||
            std::array<int, EigenIO::Traits<ETensor>::Rank> &GridTensorIndex)
 | 
			
		||||
  {
 | 
			
		||||
    lambda( scalar, Seq++, MyIndex, GridTensorIndex );
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // for_all helper function to call the lambda for container
 | 
			
		||||
  template <typename ETensor, typename Lambda>
 | 
			
		||||
  typename std::enable_if<EigenIO::is_tensor_of_container<ETensor>::value, void>::type
 | 
			
		||||
  for_all_do_lambda( Lambda lambda, typename ETensor::Scalar &container, typename ETensor::Index &Seq,
 | 
			
		||||
      const std::array<typename ETensor::Index, ETensor::NumIndices> &MyIndex,
 | 
			
		||||
      const std::array<int, EigenIO::Traits<ETensor>::Rank> &DimGridTensor,
 | 
			
		||||
            std::array<int, EigenIO::Traits<ETensor>::Rank> &GridTensorIndex)
 | 
			
		||||
  {
 | 
			
		||||
    using Traits = EigenIO::Traits<ETensor>;
 | 
			
		||||
    const auto InnerRank = Traits::Rank;
 | 
			
		||||
    for( typename Traits::scalar_type &Source : container ) {
 | 
			
		||||
      lambda(Source, Seq++, MyIndex, GridTensorIndex );
 | 
			
		||||
      // Now increment SubIndex
 | 
			
		||||
      for( auto i = InnerRank - 1; i != -1 && ++GridTensorIndex[i] == DimGridTensor[i]; i-- )
 | 
			
		||||
        GridTensorIndex[i] = 0;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // Calls a lamda (passing index and sequence number) for every member of an Eigen::Tensor
 | 
			
		||||
  // For efficiency, iteration proceeds in memory order,
 | 
			
		||||
  // ... but parameters guaranteed to be the same regardless of memory order
 | 
			
		||||
  template <typename ETensor, typename Lambda>
 | 
			
		||||
  typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
 | 
			
		||||
  for_all( ETensor &ET, Lambda lambda )
 | 
			
		||||
  {
 | 
			
		||||
    using Scalar = typename ETensor::Scalar; // This could be a Container - we'll check later
 | 
			
		||||
    using Index = typename ETensor::Index;
 | 
			
		||||
    using Traits = EigenIO::Traits<ETensor>;
 | 
			
		||||
    // Check that the number of elements in the container is the product of tensor dimensions
 | 
			
		||||
    const Index NumScalars = ET.size();
 | 
			
		||||
    assert( NumScalars > 0 && "EigenUtil: tensor has no elements" );
 | 
			
		||||
    Index ScalarElementCount{1};
 | 
			
		||||
    const auto rank{ETensor::NumIndices};
 | 
			
		||||
    std::array<Index, rank> DimTensor, MyIndex;
 | 
			
		||||
    for(int i = 0; i < rank; i++ ) {
 | 
			
		||||
      DimTensor[i] = ET.dimension(i);
 | 
			
		||||
      ScalarElementCount *= DimTensor[i];
 | 
			
		||||
      MyIndex[i] = 0;
 | 
			
		||||
    }
 | 
			
		||||
    assert( NumScalars == ScalarElementCount && "EigenUtil: tensor size not product of dimensions" );
 | 
			
		||||
    // Save the GridTensor dimensions
 | 
			
		||||
    const auto InnerRank{Traits::Rank};
 | 
			
		||||
    std::array<int, InnerRank> DimGridTensor, GridTensorIndex;
 | 
			
		||||
    for(int i = 0; i < InnerRank; i++ ) {
 | 
			
		||||
      DimGridTensor[i] = Traits::Dimension(i);
 | 
			
		||||
      GridTensorIndex[i] = 0;
 | 
			
		||||
    }
 | 
			
		||||
    // Now walk the tensor in memory order
 | 
			
		||||
    Index Seq = 0;
 | 
			
		||||
    Scalar * pScalar = ET.data();
 | 
			
		||||
    for( Index j = 0; j < NumScalars; j++ ) {
 | 
			
		||||
      for_all_do_lambda<ETensor, Lambda>( lambda, * pScalar, Seq, MyIndex, DimGridTensor, GridTensorIndex );
 | 
			
		||||
      // Now increment the index to pass to the lambda (bearing in mind we're walking in memory order)
 | 
			
		||||
      if( ETensor::Options & Eigen::RowMajor ) {
 | 
			
		||||
        for( auto i = rank - 1; i != -1 && ++MyIndex[i] == DimTensor[i]; i-- )
 | 
			
		||||
          MyIndex[i] = 0;
 | 
			
		||||
      } else {
 | 
			
		||||
        for( auto i = 0; i < rank && ++MyIndex[i] == DimTensor[i]; i++ )
 | 
			
		||||
          MyIndex[i] = 0;
 | 
			
		||||
        Seq = 0;
 | 
			
		||||
        for( auto i = 0; i < rank; i++ ) {
 | 
			
		||||
          Seq *= DimTensor[i];
 | 
			
		||||
          Seq += MyIndex[i];
 | 
			
		||||
        }
 | 
			
		||||
        Seq *= Traits::count;
 | 
			
		||||
      }
 | 
			
		||||
      pScalar++;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // Sequential initialisation of tensors up to specified precision
 | 
			
		||||
  // Would have preferred to define template variables for this, but that's c++ 17
 | 
			
		||||
  template <typename ETensor>
 | 
			
		||||
  typename std::enable_if<EigenIO::is_tensor<ETensor>::value && !EigenIO::Traits<ETensor>::is_complex>::type
 | 
			
		||||
  SequentialInit( ETensor &ET, typename EigenIO::Traits<ETensor>::scalar_type Inc = 1, unsigned short Precision = 0 )
 | 
			
		||||
  {
 | 
			
		||||
    using Traits = EigenIO::Traits<ETensor>;
 | 
			
		||||
    using scalar_type = typename Traits::scalar_type;
 | 
			
		||||
    using Index = typename ETensor::Index;
 | 
			
		||||
    for_all( ET, [&](scalar_type &c, Index n, const std::array<Index, ETensor::NumIndices> &TensorIndex,
 | 
			
		||||
                     const std::array<int, Traits::Rank> &ScalarIndex ) {
 | 
			
		||||
      scalar_type x = Inc * static_cast<scalar_type>(n);
 | 
			
		||||
      if( Precision ) {
 | 
			
		||||
        std::stringstream s;
 | 
			
		||||
        s << std::setprecision(Precision) << x;
 | 
			
		||||
        s >> x;
 | 
			
		||||
      }
 | 
			
		||||
      c = x;
 | 
			
		||||
    } );
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  template <typename ETensor>
 | 
			
		||||
  typename std::enable_if<EigenIO::is_tensor<ETensor>::value && EigenIO::Traits<ETensor>::is_complex>::type
 | 
			
		||||
  SequentialInit( ETensor &ET, typename EigenIO::Traits<ETensor>::scalar_type Inc={1,-1}, unsigned short Precision = 0 )
 | 
			
		||||
  {
 | 
			
		||||
    using Traits = EigenIO::Traits<ETensor>;
 | 
			
		||||
    using scalar_type = typename Traits::scalar_type;
 | 
			
		||||
    using Index = typename ETensor::Index;
 | 
			
		||||
    for_all( ET, [&](scalar_type &c, Index n, const std::array<Index, ETensor::NumIndices> &TensorIndex,
 | 
			
		||||
                     const std::array<int, Traits::Rank> &ScalarIndex ) {
 | 
			
		||||
      auto re = Inc.real();
 | 
			
		||||
      auto im = Inc.imag();
 | 
			
		||||
      re *= n;
 | 
			
		||||
      im *= n;
 | 
			
		||||
      if( Precision ) {
 | 
			
		||||
        std::stringstream s;
 | 
			
		||||
        s << std::setprecision(Precision) << re;
 | 
			
		||||
        s >> re;
 | 
			
		||||
        s.clear();
 | 
			
		||||
        s << im;
 | 
			
		||||
        s >> im;
 | 
			
		||||
      }
 | 
			
		||||
      c = scalar_type(re,im);
 | 
			
		||||
    } );
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // Helper to dump a tensor
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  typename std::enable_if<EigenIO::is_tensor<T>::value, void>::type
 | 
			
		||||
  dump_tensor(T &t, const char * pName = nullptr)
 | 
			
		||||
  {
 | 
			
		||||
    using Traits = EigenIO::Traits<T>;
 | 
			
		||||
    using scalar_type = typename Traits::scalar_type;
 | 
			
		||||
    using Index = typename T::Index;
 | 
			
		||||
    const auto rank{T::NumIndices};
 | 
			
		||||
    const auto &dims = t.dimensions();
 | 
			
		||||
    std::cout << "Dumping rank " << rank + Traits::Rank << ((T::Options & Eigen::RowMajor) ? ", row" : ", column") << "-major tensor ";
 | 
			
		||||
    if( pName )
 | 
			
		||||
      std::cout << pName;
 | 
			
		||||
    for( int i = 0 ; i < rank; i++ ) std::cout << "[" << dims[i] << "]";
 | 
			
		||||
    for( int i = 0 ; i < Traits::Rank; i++ ) std::cout << "(" << Traits::Dimension(i) << ")";
 | 
			
		||||
    std::cout << " in memory order:" << std::endl;
 | 
			
		||||
    for_all( t, [&](scalar_type &c, Index n, const std::array<Index, rank> &TensorIndex,
 | 
			
		||||
                    const std::array<int, Traits::Rank> &ScalarIndex ){
 | 
			
		||||
      std::cout << "  ";
 | 
			
		||||
      for( auto dim : TensorIndex )
 | 
			
		||||
        std::cout << "[" << dim << "]";
 | 
			
		||||
      for( auto dim : ScalarIndex )
 | 
			
		||||
        std::cout << "(" << dim << ")";
 | 
			
		||||
      std::cout << " = " << c << std::endl;
 | 
			
		||||
    } );
 | 
			
		||||
    std::cout << "========================================" << std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  template <typename T>
 | 
			
		||||
  typename std::enable_if<!EigenIO::is_tensor<T>::value, void>::type
 | 
			
		||||
  dump_tensor(T &t, const char * pName = nullptr)
 | 
			
		||||
  {
 | 
			
		||||
    std::cout << "Dumping non-tensor object ";
 | 
			
		||||
    if( pName ) std::cout << pName;
 | 
			
		||||
    std::cout << "=" << t;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -4,11 +4,12 @@
 | 
			
		||||
 | 
			
		||||
    Source file: ./tests/Test_serialisation.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015-2016
 | 
			
		||||
    Copyright (C) 2015-2019
 | 
			
		||||
 | 
			
		||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
 | 
			
		||||
Author: Antonin Portelli <antonin.portelli@me.com>
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Michael Marshall <michael.marshall@ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
@@ -28,6 +29,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/util/EigenUtil.h>
 | 
			
		||||
#include <typeinfo>
 | 
			
		||||
 | 
			
		||||
using namespace Grid;
 | 
			
		||||
using namespace Grid::QCD;
 | 
			
		||||
@@ -80,25 +83,175 @@ double   d   = 2*M_PI;
 | 
			
		||||
bool     b   = false;
 | 
			
		||||
 | 
			
		||||
template <typename W, typename R, typename O>
 | 
			
		||||
void ioTest(const std::string &filename, const O &object, const std::string &name)
 | 
			
		||||
void ioTest(const std::string &filename, const O &object, const std::string &name,
 | 
			
		||||
            const char * tag = "testobject", unsigned short Precision = 0 )
 | 
			
		||||
{
 | 
			
		||||
  std::cout << "IO test: " << name << " -> " << filename << " ...";
 | 
			
		||||
  // writer needs to be destroyed so that writing physically happens
 | 
			
		||||
  {
 | 
			
		||||
    W writer(filename);
 | 
			
		||||
 | 
			
		||||
    write(writer, "testobject", object);
 | 
			
		||||
    if( Precision )
 | 
			
		||||
      writer.setPrecision(Precision);
 | 
			
		||||
    write(writer, tag , object);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::cout << " done. reading ...";
 | 
			
		||||
  R    reader(filename);
 | 
			
		||||
  O    buf;
 | 
			
		||||
  bool good;
 | 
			
		||||
  std::unique_ptr<O> buf( new O ); // In case object too big for stack
 | 
			
		||||
 | 
			
		||||
  read(reader, "testobject", buf);
 | 
			
		||||
  good = (object == buf);
 | 
			
		||||
  std::cout << name << " IO test: " << (good ? "success" : "failure");
 | 
			
		||||
  std::cout << std::endl;
 | 
			
		||||
  if (!good) exit(EXIT_FAILURE);
 | 
			
		||||
  read(reader, tag, *buf);
 | 
			
		||||
  bool good = Serializable::CompareMember(object, *buf);
 | 
			
		||||
  if (!good) {
 | 
			
		||||
    std::cout << " failure!" << std::endl;
 | 
			
		||||
    if (EigenIO::is_tensor<O>::value)
 | 
			
		||||
      dump_tensor(*buf);
 | 
			
		||||
    exit(EXIT_FAILURE);
 | 
			
		||||
  }
 | 
			
		||||
  std::cout << " done." << std::endl;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Perform I/O tests on a range of tensor types
 | 
			
		||||
// Test coverage: scalars, complex and GridVectors in single, double and default precision
 | 
			
		||||
class TensorIO : public Serializable {
 | 
			
		||||
  using TestScalar = ComplexD;
 | 
			
		||||
  using SR3 = Eigen::Sizes<9,4,2>;
 | 
			
		||||
  using SR5 = Eigen::Sizes<5,4,3,2,1>;
 | 
			
		||||
  using ESO = Eigen::StorageOptions;
 | 
			
		||||
  using TensorRank3  = Eigen::Tensor<ComplexF, 3, ESO::RowMajor>;
 | 
			
		||||
  using TensorR5     = Eigen::TensorFixedSize<Real, SR5>;
 | 
			
		||||
  using TensorR5Alt  = Eigen::TensorFixedSize<Real, SR5, ESO::RowMajor>;
 | 
			
		||||
  using Tensor942    = Eigen::TensorFixedSize<TestScalar, SR3, ESO::RowMajor>;
 | 
			
		||||
  using aTensor942   = std::vector<Tensor942>;
 | 
			
		||||
  using Perambulator = Eigen::Tensor<SpinColourVector, 6, ESO::RowMajor>;
 | 
			
		||||
  using LSCTensor    = Eigen::TensorFixedSize<SpinColourMatrix, Eigen::Sizes<6,5>>;
 | 
			
		||||
  
 | 
			
		||||
  static const Real       FlagR;
 | 
			
		||||
  static const Complex    Flag;
 | 
			
		||||
  static const ComplexF   FlagF;
 | 
			
		||||
  static const TestScalar FlagTS;
 | 
			
		||||
  static const char * const pszFilePrefix;
 | 
			
		||||
 | 
			
		||||
  void Init(unsigned short Precision)
 | 
			
		||||
  {
 | 
			
		||||
    SequentialInit(Perambulator1, Flag, Precision);
 | 
			
		||||
    SequentialInit(Perambulator2, Flag, Precision);
 | 
			
		||||
    SequentialInit(tensorR5,      FlagR, Precision);
 | 
			
		||||
    SequentialInit(tensorRank3,   FlagF, Precision);
 | 
			
		||||
    SequentialInit(tensor_9_4_2,  FlagTS, Precision);
 | 
			
		||||
    for( auto &t : atensor_9_4_2 )
 | 
			
		||||
      SequentialInit(t, FlagTS, Precision);
 | 
			
		||||
    SequentialInit(MyLSCTensor, Flag, Precision);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // Perform an I/O test for a single Eigen tensor (of any type)
 | 
			
		||||
  template <typename W, typename R, typename T, typename... IndexTypes>
 | 
			
		||||
  static void TestOne(const char * MyTypeName, unsigned short Precision, std::string &filename,
 | 
			
		||||
                      const char * pszExtension, unsigned int &TestNum,
 | 
			
		||||
                      typename EigenIO::Traits<T>::scalar_type Flag, IndexTypes... otherDims)
 | 
			
		||||
  {
 | 
			
		||||
    using Traits = EigenIO::Traits<T>;
 | 
			
		||||
    using scalar_type = typename Traits::scalar_type;
 | 
			
		||||
    std::unique_ptr<T> pTensor{new T(otherDims...)};
 | 
			
		||||
    SequentialInit( * pTensor, Flag, Precision );
 | 
			
		||||
    filename = pszFilePrefix + std::to_string(++TestNum) + "_" + MyTypeName + pszExtension;
 | 
			
		||||
    ioTest<W, R, T>(filename, * pTensor, MyTypeName, MyTypeName);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
public:
 | 
			
		||||
  GRID_SERIALIZABLE_CLASS_MEMBERS(TensorIO
 | 
			
		||||
                                  , SpinColourVector,         spinColourVector
 | 
			
		||||
                                  , SpinColourMatrix,         spinColourMatrix
 | 
			
		||||
                                  , std::vector<std::string>, DistilParameterNames
 | 
			
		||||
                                  , std::vector<int>,         DistilParameterValues
 | 
			
		||||
                                  , Perambulator,             Perambulator1
 | 
			
		||||
                                  , Perambulator,             Perambulator2
 | 
			
		||||
                                  , TensorR5,                 tensorR5
 | 
			
		||||
                                  , TensorRank3,              tensorRank3
 | 
			
		||||
                                  , Tensor942,                tensor_9_4_2
 | 
			
		||||
                                  , aTensor942,               atensor_9_4_2
 | 
			
		||||
                                  , LSCTensor,                MyLSCTensor
 | 
			
		||||
                                  );
 | 
			
		||||
  TensorIO()
 | 
			
		||||
  : DistilParameterNames {"do", "androids", "dream", "of", "electric", "sheep?"}
 | 
			
		||||
  , DistilParameterValues{2,3,1,4,5,1}
 | 
			
		||||
  , Perambulator1(2,3,1,4,5,1)
 | 
			
		||||
  , Perambulator2(7,1,6,1,5,1)
 | 
			
		||||
  , tensorRank3(7,3,2)
 | 
			
		||||
  , atensor_9_4_2(3) {}
 | 
			
		||||
  
 | 
			
		||||
#define TEST_PARAMS( T ) #T, Precision, filename, pszExtension, TestNum
 | 
			
		||||
  
 | 
			
		||||
  // Perform a series of I/O tests for Eigen tensors, including a serialisable object
 | 
			
		||||
  template <typename WTR_, typename RDR_>
 | 
			
		||||
  static void Test(const char * pszExtension, unsigned short Precision = 0)
 | 
			
		||||
  {
 | 
			
		||||
    // Perform a series of tests on progressively more complex tensors
 | 
			
		||||
    unsigned int TestNum = 0;
 | 
			
		||||
    std::string filename;
 | 
			
		||||
    // Rank 1 tensor containing a single integer
 | 
			
		||||
    using TensorSingle = Eigen::TensorFixedSize<Integer, Eigen::Sizes<1>>;
 | 
			
		||||
    TestOne<WTR_, RDR_, TensorSingle>( TEST_PARAMS( TensorSingle ), 7 ); // lucky!
 | 
			
		||||
    // Rather convoluted way of defining a single complex number
 | 
			
		||||
    using TensorSimple = Eigen::Tensor<iMatrix<TestScalar,1>, 6>;
 | 
			
		||||
    using I = typename TensorSimple::Index; // NB: Never specified, so same for all my test tensors
 | 
			
		||||
    // Try progressively more complicated tensors
 | 
			
		||||
    TestOne<WTR_, RDR_, TensorSimple, I,I,I,I,I,I>( TEST_PARAMS( TensorSimple ), FlagTS, 1,1,1,1,1,1 );
 | 
			
		||||
    TestOne<WTR_, RDR_, TensorRank3, I, I, I>( TEST_PARAMS( TensorRank3 ), FlagF, 6, 3, 2 );
 | 
			
		||||
    TestOne<WTR_, RDR_, Tensor942>(TEST_PARAMS( Tensor942 ), FlagTS);
 | 
			
		||||
    TestOne<WTR_, RDR_, LSCTensor>(TEST_PARAMS( LSCTensor ), Flag );
 | 
			
		||||
    
 | 
			
		||||
    // Now see whether we can write a tensor in one memory order and read back in the other
 | 
			
		||||
    {
 | 
			
		||||
      TestOne<WTR_, RDR_, TensorR5>(TEST_PARAMS( TensorR5 ), FlagR);
 | 
			
		||||
      std::cout << "    Testing alternate memory order read ... ";
 | 
			
		||||
      TensorR5Alt t2;
 | 
			
		||||
      RDR_ reader(filename);
 | 
			
		||||
      ::Grid::read(reader, "TensorR5", t2);
 | 
			
		||||
      bool good = true;
 | 
			
		||||
      TensorR5 cf;
 | 
			
		||||
      SequentialInit( cf, FlagR, Precision );
 | 
			
		||||
      for_all( t2, [&](typename EigenIO::Traits<TensorR5Alt>::scalar_type c, I n,
 | 
			
		||||
                       const std::array<I, TensorR5Alt::NumIndices> &TensorIndex,
 | 
			
		||||
                       const std::array<int, EigenIO::Traits<TensorR5Alt>::Rank> &GridTensorIndex ){
 | 
			
		||||
        Real &r = cf(TensorIndex);
 | 
			
		||||
        if( c != r ){
 | 
			
		||||
          good = false;
 | 
			
		||||
          std::cout << "\nError: " << n << ": " << c << " != " << r;
 | 
			
		||||
        }
 | 
			
		||||
      } );
 | 
			
		||||
      if (!good) {
 | 
			
		||||
        std::cout << std::endl;
 | 
			
		||||
        dump_tensor(t2,"t2");
 | 
			
		||||
        exit(EXIT_FAILURE);
 | 
			
		||||
      }
 | 
			
		||||
      std::cout << " done." << std::endl;
 | 
			
		||||
    }
 | 
			
		||||
    // Now test a serialisable object containing a number of tensors
 | 
			
		||||
    {
 | 
			
		||||
      static const char MyTypeName[] = "TensorIO";
 | 
			
		||||
      filename = pszFilePrefix + std::to_string(++TestNum) + "_" + MyTypeName + pszExtension;
 | 
			
		||||
      std::unique_ptr<TensorIO> pObj{new TensorIO()};
 | 
			
		||||
      pObj->Init(Precision);
 | 
			
		||||
      ioTest<WTR_, RDR_, TensorIO>(filename, * pObj, MyTypeName, MyTypeName, Precision);
 | 
			
		||||
    }
 | 
			
		||||
    // Stress test. Too large for the XML or text readers and writers!
 | 
			
		||||
#ifdef STRESS_TEST
 | 
			
		||||
    const std::type_info &tw = typeid( WTR_ );
 | 
			
		||||
    if( tw == typeid( Hdf5Writer ) || tw == typeid( BinaryWriter ) ) {
 | 
			
		||||
      using LCMTensor=Eigen::TensorFixedSize<iMatrix<iVector<iMatrix<iVector<LorentzColourMatrix,5>,2>,7>,3>,
 | 
			
		||||
      Eigen::Sizes<2,4,11,10,9>, Eigen::StorageOptions::RowMajor>;
 | 
			
		||||
      std::cout << "sizeof( LCMTensor ) = " << sizeof( LCMTensor ) / 1024 / 1024 << " MB" << std::endl;
 | 
			
		||||
      TestOne<WTR_, RDR_, LCMTensor>(TEST_PARAMS( LCMTensor ), Flag);
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const Real                 TensorIO::FlagR {-1.001};
 | 
			
		||||
const Complex              TensorIO::Flag  {1,-3.1415927};
 | 
			
		||||
const ComplexF             TensorIO::FlagF {1,-3.1415927};
 | 
			
		||||
const TensorIO::TestScalar TensorIO::FlagTS{1,-3.1415927};
 | 
			
		||||
const char * const         TensorIO::pszFilePrefix = "tensor_";
 | 
			
		||||
 | 
			
		||||
template <typename T>
 | 
			
		||||
void tensorConvTestFn(GridSerialRNG &rng, const std::string label)
 | 
			
		||||
@@ -121,12 +274,12 @@ void tensorConvTestFn(GridSerialRNG &rng, const std::string label)
 | 
			
		||||
int main(int argc,char **argv)
 | 
			
		||||
{
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
  std::cout << std::boolalpha << "==== basic IO" << std::endl; // display true / false for boolean
 | 
			
		||||
 | 
			
		||||
  GridSerialRNG    rng;
 | 
			
		||||
 | 
			
		||||
  rng.SeedFixedIntegers(std::vector<int>({42,10,81,9}));
 | 
			
		||||
 | 
			
		||||
  std::cout << "==== basic IO" << std::endl;
 | 
			
		||||
  XmlWriter WR("bother.xml");
 | 
			
		||||
 | 
			
		||||
  // test basic type writing
 | 
			
		||||
@@ -146,7 +299,6 @@ int main(int argc,char **argv)
 | 
			
		||||
  // test serializable class writing
 | 
			
		||||
  myclass              obj(1234); // non-trivial constructor
 | 
			
		||||
  std::vector<myclass> vec;
 | 
			
		||||
  std::pair<myenum, myenum> pair;
 | 
			
		||||
 | 
			
		||||
  std::cout << "-- serialisable class writing to 'bother.xml'..." << std::endl;
 | 
			
		||||
  write(WR,"obj",obj);
 | 
			
		||||
@@ -154,15 +306,15 @@ int main(int argc,char **argv)
 | 
			
		||||
  vec.push_back(obj);
 | 
			
		||||
  vec.push_back(myclass(5678));
 | 
			
		||||
  vec.push_back(myclass(3838));
 | 
			
		||||
  pair = std::make_pair(myenum::red, myenum::blue);
 | 
			
		||||
 | 
			
		||||
  write(WR, "objvec", vec);
 | 
			
		||||
  std::cout << "-- serialisable class writing to std::cout:" << std::endl;
 | 
			
		||||
  std::cout << obj << std::endl;
 | 
			
		||||
  std::cout << "-- serialisable class comparison:" << std::endl;
 | 
			
		||||
  std::cout << "vec[0] == obj: " << ((vec[0] == obj) ? "true" : "false") << std::endl;
 | 
			
		||||
  std::cout << "vec[1] == obj: " << ((vec[1] == obj) ? "true" : "false") << std::endl;
 | 
			
		||||
  std::cout << "vec[0] == obj: " << (vec[0] == obj) << std::endl;
 | 
			
		||||
  std::cout << "vec[1] == obj: " << (vec[1] == obj) << std::endl;
 | 
			
		||||
  std::cout << "-- pair writing to std::cout:" << std::endl;
 | 
			
		||||
  std::pair<myenum, myenum> pair = std::make_pair(myenum::red, myenum::blue);
 | 
			
		||||
  std::cout << pair << std::endl;
 | 
			
		||||
 | 
			
		||||
  // read tests
 | 
			
		||||
@@ -184,7 +336,15 @@ int main(int argc,char **argv)
 | 
			
		||||
#ifdef HAVE_HDF5
 | 
			
		||||
  ioTest<Hdf5Writer, Hdf5Reader>("iotest.h5", obj, "HDF5   (object)           ");
 | 
			
		||||
  ioTest<Hdf5Writer, Hdf5Reader>("iotest.h5", vec, "HDF5   (vector of objects)");
 | 
			
		||||
  std::cout << "\n==== detailed Hdf5 tensor tests (Grid::EigenIO)" << std::endl;
 | 
			
		||||
  TensorIO::Test<Hdf5Writer, Hdf5Reader>(".h5");
 | 
			
		||||
#endif
 | 
			
		||||
  std::cout << "\n==== detailed binary tensor tests (Grid::EigenIO)" << std::endl;
 | 
			
		||||
  TensorIO::Test<BinaryWriter, BinaryReader>(".bin");
 | 
			
		||||
  std::cout << "\n==== detailed xml tensor tests (Grid::EigenIO)" << std::endl;
 | 
			
		||||
  TensorIO::Test<XmlWriter, XmlReader>(".xml", 6);
 | 
			
		||||
  std::cout << "\n==== detailed text tensor tests (Grid::EigenIO)" << std::endl;
 | 
			
		||||
  TensorIO::Test<TextWriter, TextReader>(".dat", 5);
 | 
			
		||||
 | 
			
		||||
  std::cout << "\n==== vector flattening/reconstruction" << std::endl;
 | 
			
		||||
  typedef std::vector<std::vector<std::vector<double>>> vec3d;
 | 
			
		||||
@@ -227,4 +387,6 @@ int main(int argc,char **argv)
 | 
			
		||||
  tensorConvTest(rng, ColourVector);
 | 
			
		||||
  tensorConvTest(rng, SpinMatrix);
 | 
			
		||||
  tensorConvTest(rng, SpinVector);
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user