mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 07:55:35 +00:00
Enough for tonight
This commit is contained in:
parent
bf434b6bef
commit
11467a994d
@ -42,68 +42,79 @@ namespace Grid {
|
||||
|
||||
// which types are supported scalar types for Eigen::Tensor
|
||||
template<typename T> struct is_eigen_tensor_scalar : std::integral_constant<bool,
|
||||
std::is_arithmetic<T>::value || is_complex<T>::value> {};
|
||||
// which types are grid tensors
|
||||
template <typename T> struct is_grid_tensor : public std::false_type {
|
||||
static constexpr unsigned int rank = 0;
|
||||
static constexpr unsigned int dim = 1;
|
||||
};
|
||||
template <typename T> struct is_grid_tensor<iScalar<T>> : public std::true_type {};
|
||||
template <typename T, int N> struct is_grid_tensor<iVector<T, N>> : public std::true_type {};
|
||||
template <typename T, int N> struct is_grid_tensor<iMatrix<T, N>> : public std::true_type {};
|
||||
std::is_arithmetic<T>::value || Grid::is_complex<T>::value> {};
|
||||
|
||||
// Rank and dimension of grid tensors, i.e. compositions of iScalar, iVector and iMatrix
|
||||
template <typename T> struct grid_tensor_att {
|
||||
static constexpr unsigned int depth = 0; // How many levels of Grid Tensor there are
|
||||
static constexpr unsigned int rank = 0; // The rank of the grid tensor (i.e. how many indices used)
|
||||
static constexpr unsigned int rank_non_trivial = 0; // As per rank, but excludes those of dimension 1
|
||||
static constexpr unsigned int count = 1; // total number of elements (i.e. product of dimensions)
|
||||
typedef T scalar_type; // Type of the underlying scalar
|
||||
static constexpr size_t scalar_size = sizeof(T); // Size of the underlying scalar in bytes
|
||||
static constexpr size_t size = scalar_size * count; // total size of elements in bytes
|
||||
// e.g. iScalar<iVector<Complex,1>>
|
||||
// depth = 2
|
||||
// rank = 1
|
||||
// rank_non_trivial = 0
|
||||
// count = 1
|
||||
// e.g. iVector<iMatrix<Complex,3>,4>
|
||||
// depth = 2
|
||||
// rank = 3
|
||||
// rank_non_trivial = 3
|
||||
// count = 36
|
||||
// e.g. iScalar<iVector<iMatrix<Complex,4>,3>>
|
||||
// depth = 3
|
||||
// rank = 3
|
||||
// rank_non_trivial = 3
|
||||
// count = 48
|
||||
};
|
||||
template <typename T> struct grid_tensor_att<iScalar<T>> {
|
||||
static constexpr unsigned int depth = 1 + grid_tensor_att<T>::depth;
|
||||
static constexpr unsigned int rank = 0 + grid_tensor_att<T>::rank;
|
||||
static constexpr unsigned int rank_non_trivial = 0 + grid_tensor_att<T>::rank_non_trivial;
|
||||
static constexpr unsigned int count = 1 * grid_tensor_att<T>::count;
|
||||
typedef typename grid_tensor_att<T>::scalar_type scalar_type;
|
||||
static constexpr size_t scalar_size = grid_tensor_att<T>::scalar_size;
|
||||
static constexpr size_t size = scalar_size * count;
|
||||
};
|
||||
template <typename T, int N> struct grid_tensor_att<iVector<T, N>> {
|
||||
static constexpr unsigned int depth = 1 + grid_tensor_att<T>::depth;
|
||||
static constexpr unsigned int rank = 1 + grid_tensor_att<T>::rank;
|
||||
static constexpr unsigned int rank_non_trivial = (N>1 ? 1 : 0) + grid_tensor_att<T>::rank_non_trivial;
|
||||
static constexpr unsigned int count = N * grid_tensor_att<T>::count;
|
||||
typedef typename grid_tensor_att<T>::scalar_type scalar_type;
|
||||
static constexpr size_t scalar_size = grid_tensor_att<T>::scalar_size;
|
||||
static constexpr size_t size = scalar_size * count;
|
||||
};
|
||||
template <typename T, int N> struct grid_tensor_att<iMatrix<T, N>> {
|
||||
static constexpr unsigned int depth = 1 + grid_tensor_att<T>::depth;
|
||||
static constexpr unsigned int rank = 2 + grid_tensor_att<T>::rank;
|
||||
static constexpr unsigned int rank_non_trivial = (N>1 ? 2 : 0) + grid_tensor_att<T>::rank_non_trivial;
|
||||
static constexpr unsigned int count = N * N * grid_tensor_att<T>::count;
|
||||
typedef typename grid_tensor_att<T>::scalar_type scalar_type;
|
||||
static constexpr size_t scalar_size = grid_tensor_att<T>::scalar_size;
|
||||
static constexpr size_t size = scalar_size * count;
|
||||
};
|
||||
// Helper to allow iteration through an Eigen::Tensor (using a lambda)
|
||||
template <typename ETensor, typename Lambda>
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value && is_grid_tensor<typename ETensor::Scalar>::value, void>::type
|
||||
for_all( ETensor &ET, Lambda lambda )
|
||||
{
|
||||
using Scalar = typename ETensor::Scalar::scalar_type;
|
||||
const std::size_t NumElements = ET.size();
|
||||
assert( NumElements > 0 );
|
||||
if( NumElements == 1 ) {
|
||||
const auto MyRank{grid_tensor_att<typename ETensor::Scalar>::rank_non_trivial};
|
||||
std::vector<std::size_t> SubIndex(MyRank);
|
||||
for( auto &idx : SubIndex ) idx = 0;
|
||||
typename ETensor::Index n = 0;
|
||||
for( Scalar &Source : * ET.data() ) {
|
||||
lambda(Source, n++, &SubIndex[0] );
|
||||
// Now increment SubIndex
|
||||
for( auto i = MyRank - 1; i >= 0 && ++SubIndex[i] == 11/*ReducedDims[i]*/; i-- )
|
||||
SubIndex[i] = 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// We're only interested in non-trivial dimensions (i.e. dimensions > 1)
|
||||
unsigned int TrivialDimCount{0};
|
||||
std::vector<size_t> ReducedDims;
|
||||
ReducedDims.reserve(ET.NumDimensions + grid_tensor_att<typename ETensor::Scalar>::rank_non_trivial); // Make sure we only do one malloc
|
||||
for(auto i = 0; i < ET.NumDimensions; i++ ) {
|
||||
auto dim = ET.dimension(i);
|
||||
if( dim <= 1 ) {
|
||||
TrivialDimCount++;
|
||||
assert( dim == 1 ); // Not expecting dimension to be <= 0
|
||||
} else {
|
||||
size_t s = static_cast<size_t>(dim);
|
||||
assert( s == dim ); // check we didn't lose anything in the conversion
|
||||
ReducedDims.push_back(s);
|
||||
}
|
||||
}
|
||||
// NB: NumElements > 1 implies this is not a scalar, so some dims should be left
|
||||
assert( ET.NumDimensions > TrivialDimCount );
|
||||
// Now add the extra dimensions, based on object zero
|
||||
typename TensorToVec<typename ETensor::Scalar>::type ttv = tensorToVec(* ET.data());
|
||||
Flatten<typename TensorToVec<typename ETensor::Scalar>::type> f(ttv);
|
||||
const std::vector<size_t> & ExtraDims{f.getDim()};
|
||||
assert(ExtraDims.size() == grid_tensor_att<typename ETensor::Scalar>::rank_non_trivial);
|
||||
size_t ExtraCount{1};
|
||||
for( auto i : ExtraDims ) {
|
||||
assert( i > 0 );
|
||||
ExtraCount *= i;
|
||||
ReducedDims.push_back(i);
|
||||
}
|
||||
assert(grid_tensor_att<typename ETensor::Scalar>::count == ExtraCount);
|
||||
assert(grid_tensor_att<typename ETensor::Scalar>::size == sizeof( typename ETensor::Scalar ));
|
||||
const unsigned int ReducedDimsSize = static_cast<unsigned int>(ReducedDims.size());
|
||||
assert( ReducedDimsSize == ReducedDims.size() );
|
||||
const typename ETensor::Index TotalNumElements = NumElements * ExtraCount;
|
||||
std::array<typename ETensor::Index, ETensor::NumIndices> MyIndex;
|
||||
for( auto &idx : MyIndex ) idx = 0;
|
||||
std::vector<std::size_t> SubIndex(ReducedDimsSize);
|
||||
for( auto &idx : SubIndex ) idx = 0;
|
||||
for( typename ETensor::Index n = 0; n < TotalNumElements; ) {
|
||||
for( Scalar &Source : ET( MyIndex ) ) {
|
||||
lambda(Source, n++, &SubIndex[0] );
|
||||
// Now increment MyIndex
|
||||
for( auto i = ET.NumDimensions - 1; i >= 0 && ++MyIndex[i] == ET.dimension(i); i-- )
|
||||
MyIndex[i] = 0;
|
||||
// Now increment SubIndex
|
||||
for( auto i = ReducedDimsSize - 1; i >= 0 && ++SubIndex[i] == ReducedDims[i]; i-- )
|
||||
SubIndex[i] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Static abstract writer
|
||||
template <typename T>
|
||||
@ -128,24 +139,11 @@ namespace Grid {
|
||||
template <typename U, int N>
|
||||
void write(const std::string &s, const iMatrix<U, N> &output);
|
||||
template <typename ETensor>
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value && (std::is_arithmetic<typename ETensor::Scalar>::value || Grid::is_complex<typename ETensor::Scalar>::value), void>::type
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value && is_eigen_tensor_scalar<typename ETensor::Scalar>::value, void>::type
|
||||
write(const std::string &s, const ETensor &output);
|
||||
template <typename ETensor/*, typename U, int N*/>
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value
|
||||
&& is_grid_tensor<typename ETensor::Scalar>::value
|
||||
//&& !(std::is_arithmetic<typename ETensor::Scalar>::value || Grid::is_complex<typename ETensor::Scalar>::value)
|
||||
/*&& ( std::is_base_of<typename ETensor::Scalar, iScalar<U> >::value
|
||||
|| std::is_base_of<typename ETensor::Scalar, iVector<U, N>>::value
|
||||
|| std::is_base_of<typename ETensor::Scalar, iMatrix<U, N>>::value )*/, void>::type
|
||||
template <typename ETensor>
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value && is_grid_tensor<typename ETensor::Scalar>::value, void>::type
|
||||
write(const std::string &s, const ETensor &output);
|
||||
/*template <typename ETensor, typename U, int N>
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value
|
||||
&& std::is_base_of<typename ETensor::Scalar, iVector<U, N>>::value, void>::type
|
||||
write(const std::string &s, const ETensor &output);
|
||||
template <typename ETensor, typename U, int N>
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value
|
||||
&& std::is_base_of<typename ETensor::Scalar, iMatrix<U, N>>::value, void>::type
|
||||
write(const std::string &s, const ETensor &output);*/
|
||||
|
||||
void scientificFormat(const bool set);
|
||||
bool isScientific(void);
|
||||
@ -254,18 +252,18 @@ namespace Grid {
|
||||
// Eigen::Tensors of arithmetic/complex base type
|
||||
template <typename T>
|
||||
template <typename ETensor>
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value && (std::is_arithmetic<typename ETensor::Scalar>::value || Grid::is_complex<typename ETensor::Scalar>::value), void>::type
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value && is_eigen_tensor_scalar<typename ETensor::Scalar>::value, void>::type
|
||||
Writer<T>::write(const std::string &s, const ETensor &output)
|
||||
{
|
||||
std::cout << "Eigen::Tensors of arithmetic/complex base type" << std::endl;
|
||||
const typename ETensor::Index NumElements{output.size()};
|
||||
assert( NumElements > 0 );
|
||||
if( NumElements == 1 )
|
||||
upcast->writeDefault(s, * output.data());
|
||||
else {
|
||||
// We're not interested in trivial dimensions, i.e. dimensions = 1
|
||||
// We're only interested in non-trivial dimensions (i.e. dimensions > 1)
|
||||
unsigned int TrivialDimCount{0};
|
||||
std::vector<size_t> ReducedDims;
|
||||
ReducedDims.reserve(output.NumDimensions); // Make sure we only do one malloc
|
||||
for(auto i = 0; i < output.NumDimensions; i++ ) {
|
||||
auto dim = output.dimension(i);
|
||||
if( dim <= 1 ) {
|
||||
@ -277,43 +275,48 @@ namespace Grid {
|
||||
ReducedDims.push_back(s);
|
||||
}
|
||||
}
|
||||
assert( output.NumDimensions > TrivialDimCount > 0 ); // NB: NumElements > 1 implies this is not a scalar, so some dims should be left
|
||||
// Create a single, flat vector to hold all the data
|
||||
std::vector<typename ETensor::Scalar> flat(NumElements);
|
||||
// Now copy all the data to my flat vector
|
||||
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
|
||||
std::array<typename ETensor::Index, ETensor::NumIndices> MyIndex;
|
||||
for( int i = 0 ; i < output.NumDimensions ; i++ ) MyIndex[i] = 0;
|
||||
for( typename ETensor::Index n = 0; n < NumElements; n++ ) {
|
||||
flat[n] = output( MyIndex );
|
||||
// Now increment the index
|
||||
for( int i = output.NumDimensions - 1; i >= 0 && ++MyIndex[i] == output.dimension(i); i-- )
|
||||
MyIndex[i] = 0;
|
||||
// NB: NumElements > 1 implies this is not a scalar, so some dims should be left
|
||||
assert( output.NumDimensions > TrivialDimCount );
|
||||
// If the Tensor isn't in Row-Major order, then we'll need to copy it's data
|
||||
const bool CopyData{ETensor::Layout != Eigen::StorageOptions::RowMajor};
|
||||
using Scalar = typename ETensor::Scalar;
|
||||
const Scalar * pWriteBuffer;
|
||||
Scalar * pCopyBuffer = nullptr;
|
||||
if( !CopyData )
|
||||
pWriteBuffer = output.data();
|
||||
else {
|
||||
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
|
||||
pCopyBuffer = static_cast<Scalar *>(malloc(sizeof(Scalar) * NumElements));
|
||||
pWriteBuffer = pCopyBuffer;
|
||||
std::array<typename ETensor::Index, ETensor::NumIndices> MyIndex;
|
||||
for( auto &idx : MyIndex ) idx = 0;
|
||||
for( typename ETensor::Index n = 0; n < NumElements; n++ ) {
|
||||
pCopyBuffer[n] = output( MyIndex );
|
||||
// Now increment the index
|
||||
for( int i = output.NumDimensions - 1; i >= 0 && ++MyIndex[i] == output.dimension(i); i-- )
|
||||
MyIndex[i] = 0;
|
||||
}
|
||||
}
|
||||
upcast->template writeMultiDim<typename ETensor::Scalar>(s, ReducedDims, flat);
|
||||
upcast->template writeMultiDim<typename ETensor::Scalar>(s, ReducedDims, pWriteBuffer, NumElements);
|
||||
if( pCopyBuffer ) free( pCopyBuffer );
|
||||
}
|
||||
}
|
||||
|
||||
// Eigen::Tensors of iScalar<U>
|
||||
// Eigen::Tensors of Grid tensors (iScalar, iVector, iMatrix)
|
||||
template <typename T>
|
||||
template <typename ETensor/*, typename U, int N*/>
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value
|
||||
&& is_grid_tensor<typename ETensor::Scalar>::value
|
||||
//&& !(std::is_arithmetic<typename ETensor::Scalar>::value || Grid::is_complex<typename ETensor::Scalar>::value)
|
||||
/*&& ( std::is_base_of<typename ETensor::Scalar, iScalar<U> >::value
|
||||
|| std::is_base_of<typename ETensor::Scalar, iVector<U, N>>::value
|
||||
|| std::is_base_of<typename ETensor::Scalar, iMatrix<U, N>>::value )*/, void>::type
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value && is_grid_tensor<typename ETensor::Scalar>::value, void>::type
|
||||
Writer<T>::write(const std::string &s, const ETensor &output)
|
||||
{
|
||||
std::cout << "Eigen::Tensors of iScalar<U>" << std::endl;
|
||||
const typename ETensor::Index NumElements{output.size()};
|
||||
assert( NumElements > 0 );
|
||||
if( NumElements == 1 )
|
||||
upcast->writeDefault(s, tensorToVec(* output.data()));
|
||||
else {
|
||||
// We're not interested in trivial dimensions, i.e. dimensions = 1
|
||||
// We're only interested in non-trivial dimensions (i.e. dimensions > 1)
|
||||
unsigned int TrivialDimCount{0};
|
||||
std::vector<size_t> ReducedDims;
|
||||
ReducedDims.reserve(output.NumDimensions + grid_tensor_att<typename ETensor::Scalar>::rank_non_trivial); // Make sure we only do one malloc
|
||||
for(auto i = 0; i < output.NumDimensions; i++ ) {
|
||||
auto dim = output.dimension(i);
|
||||
if( dim <= 1 ) {
|
||||
@ -325,60 +328,50 @@ namespace Grid {
|
||||
ReducedDims.push_back(s);
|
||||
}
|
||||
}
|
||||
assert( output.NumDimensions > TrivialDimCount > 0 ); // NB: NumElements > 1 implies this is not a scalar, so some dims should be left
|
||||
// NB: NumElements > 1 implies this is not a scalar, so some dims should be left
|
||||
assert( output.NumDimensions > TrivialDimCount );
|
||||
// Now add the extra dimensions, based on object zero
|
||||
typename TensorToVec<typename ETensor::Scalar>::type ttv = tensorToVec(* output.data());
|
||||
Flatten<typename TensorToVec<typename ETensor::Scalar>::type> f(ttv);
|
||||
const std::vector<size_t> & ExtraDims{f.getDim()};
|
||||
assert(ExtraDims.size() == grid_tensor_att<typename ETensor::Scalar>::rank_non_trivial);
|
||||
size_t ExtraCount{1};
|
||||
for( auto i : ExtraDims ) {
|
||||
assert( i > 0 );
|
||||
ExtraCount *= i;
|
||||
ReducedDims.push_back(i);
|
||||
}
|
||||
typedef typename ETensor::Scalar::scalar_type Scalar;
|
||||
assert( sizeof( typename ETensor::Scalar ) == ExtraCount * sizeof( Scalar ) );
|
||||
// Create a single, flat vector to hold all the data
|
||||
assert(grid_tensor_att<typename ETensor::Scalar>::count == ExtraCount);
|
||||
assert(grid_tensor_att<typename ETensor::Scalar>::size == sizeof( typename ETensor::Scalar ));
|
||||
// If the Tensor isn't in Row-Major order, then we'll need to copy it's data
|
||||
const bool CopyData{ETensor::Layout != Eigen::StorageOptions::RowMajor};
|
||||
using Scalar = typename ETensor::Scalar::scalar_type;
|
||||
const Scalar * pWriteBuffer;
|
||||
Scalar * pCopyBuffer = nullptr;
|
||||
const typename ETensor::Index TotalNumElements = NumElements * ExtraCount;
|
||||
std::vector<Scalar> flat(TotalNumElements);
|
||||
// Now copy all the data to my flat vector
|
||||
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
|
||||
std::array<typename ETensor::Index, ETensor::NumIndices> MyIndex;
|
||||
for( int i = 0 ; i < output.NumDimensions ; i++ ) MyIndex[i] = 0;
|
||||
for( typename ETensor::Index n = 0; n < TotalNumElements; ) {
|
||||
const Scalar * p = reinterpret_cast<const Scalar *>( &output( MyIndex ));
|
||||
for( auto j = 0; j < ExtraCount ; j++ )
|
||||
flat[n++] = * p++;
|
||||
// Now increment the index
|
||||
for( int i = output.NumDimensions - 1; i >= 0 && ++MyIndex[i] == output.dimension(i); i-- )
|
||||
MyIndex[i] = 0;
|
||||
if( !CopyData )
|
||||
pWriteBuffer = output.data()->begin();
|
||||
else {
|
||||
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
|
||||
pCopyBuffer = static_cast<Scalar *>(malloc(TotalNumElements * sizeof(Scalar)));
|
||||
pWriteBuffer = pCopyBuffer;
|
||||
Scalar * pCopy = pCopyBuffer;
|
||||
std::array<typename ETensor::Index, ETensor::NumIndices> MyIndex;
|
||||
for( auto &idx : MyIndex ) idx = 0;
|
||||
for( typename ETensor::Index n = 0; n < NumElements; n++ ) {
|
||||
// Copy the grid tensor
|
||||
for( const Scalar &Source : output( MyIndex ) )
|
||||
* pCopy ++ = Source;
|
||||
// Now increment the index
|
||||
for( int i = output.NumDimensions - 1; i >= 0 && ++MyIndex[i] == output.dimension(i); i-- )
|
||||
MyIndex[i] = 0;
|
||||
}
|
||||
}
|
||||
upcast->template writeMultiDim<Scalar>(s, ReducedDims, flat);
|
||||
upcast->template writeMultiDim<Scalar>(s, ReducedDims, pWriteBuffer, TotalNumElements);
|
||||
if( pCopyBuffer ) free( pCopyBuffer );
|
||||
}
|
||||
}
|
||||
|
||||
// Eigen::Tensors of iVector<U, N>
|
||||
/*template <typename T>
|
||||
template <typename ETensor, typename U, int N>
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value
|
||||
&& std::is_base_of<typename ETensor::Scalar, iVector<U, N>>::value, void>::type
|
||||
Writer<T>::write(const std::string &s, const ETensor &output)
|
||||
{
|
||||
//upcast->writeDefault(s, tensorToVec(output));
|
||||
std::cout << "I really should add code to write Eigen::Tensor (iVector) ..." << std::endl;
|
||||
}
|
||||
|
||||
// Eigen::Tensors of iMatrix<U, N>
|
||||
template <typename T>
|
||||
template <typename ETensor, typename U, int N>
|
||||
typename std::enable_if<std::is_base_of<Eigen::TensorBase<ETensor, Eigen::ReadOnlyAccessors>, ETensor>::value
|
||||
&& std::is_base_of<typename ETensor::Scalar, iMatrix<U, N>>::value, void>::type
|
||||
Writer<T>::write(const std::string &s, const ETensor &output)
|
||||
{
|
||||
//upcast->writeDefault(s, tensorToVec(output));
|
||||
std::cout << "I really should add code to write Eigen::Tensor (iMatrix) ..." << std::endl;
|
||||
}*/
|
||||
|
||||
template <typename T>
|
||||
void Writer<T>::scientificFormat(const bool set)
|
||||
{
|
||||
@ -516,7 +509,7 @@ namespace Grid {
|
||||
template <typename T>
|
||||
static inline typename std::enable_if<std::is_base_of<Eigen::TensorBase<T, Eigen::ReadOnlyAccessors>, T>::value, bool>::type
|
||||
CompareMember(const T &lhs, const T &rhs) {
|
||||
Eigen::Tensor<bool, 0> bResult = (lhs == rhs).all();
|
||||
Eigen::Tensor<bool, 0, T::Options> bResult = (lhs == rhs).all();
|
||||
return bResult(0);
|
||||
}
|
||||
|
||||
@ -526,7 +519,7 @@ namespace Grid {
|
||||
const auto NumElements{lhs.size()};
|
||||
bool bResult = ( NumElements == rhs.size() );
|
||||
for( auto i = 0 ; i < NumElements && bResult ; i++ ) {
|
||||
Eigen::Tensor<bool, 0> b = (lhs[i] == rhs[i]).all();
|
||||
Eigen::Tensor<bool, 0, T::Options> b = (lhs[i] == rhs[i]).all();
|
||||
bResult = b(0);
|
||||
}
|
||||
return bResult;
|
||||
|
@ -39,7 +39,7 @@ namespace Grid
|
||||
typename std::enable_if<!element<std::vector<U>>::is_number, void>::type
|
||||
writeDefault(const std::string &s, const std::vector<U> &x);
|
||||
template <typename U>
|
||||
void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const std::vector<U> & DataRowMajor);
|
||||
void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
|
||||
H5NS::Group & getGroup(void);
|
||||
private:
|
||||
template <typename U>
|
||||
@ -104,31 +104,32 @@ namespace Grid
|
||||
void Hdf5Writer::writeDefault(const std::string &s, const std::string &x);
|
||||
|
||||
template <typename U>
|
||||
void Hdf5Writer::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const std::vector<U> & DataRowMajor)
|
||||
void Hdf5Writer::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
|
||||
{
|
||||
// Hdf5 needs the dimensions as hsize_t
|
||||
std::vector<hsize_t> dim;
|
||||
for (auto &d: Dimensions)
|
||||
dim.push_back(d);
|
||||
int rank = static_cast<int>(Dimensions.size());
|
||||
std::vector<hsize_t> dim(rank);
|
||||
for(int i = 0; i < rank; i++)
|
||||
dim[i] = Dimensions[i];
|
||||
// write to file
|
||||
H5NS::DataSpace dataSpace(dim.size(), dim.data());
|
||||
|
||||
if (DataRowMajor.size() > dataSetThres_)
|
||||
H5NS::DataSpace dataSpace(rank, dim.data());
|
||||
|
||||
size_t DataSize = NumElements * sizeof(U);
|
||||
if (DataSize > dataSetThres_)
|
||||
{
|
||||
H5NS::DataSet dataSet;
|
||||
H5NS::DSetCreatPropList plist;
|
||||
|
||||
plist.setChunk(dim.size(), dim.data());
|
||||
plist.setChunk(rank, dim.data());
|
||||
plist.setFletcher32();
|
||||
dataSet = group_.createDataSet(s, Hdf5Type<U>::type(), dataSpace, plist);
|
||||
dataSet.write(&DataRowMajor[0], Hdf5Type<U>::type());
|
||||
dataSet.write(pDataRowMajor, Hdf5Type<U>::type());
|
||||
}
|
||||
else
|
||||
{
|
||||
H5NS::Attribute attribute;
|
||||
|
||||
attribute = group_.createAttribute(s, Hdf5Type<U>::type(), dataSpace);
|
||||
attribute.write(Hdf5Type<U>::type(), &DataRowMajor[0]);
|
||||
attribute.write(Hdf5Type<U>::type(), pDataRowMajor);
|
||||
}
|
||||
}
|
||||
|
||||
@ -145,7 +146,7 @@ namespace Grid
|
||||
const auto &flatx = flat.getFlatVector();
|
||||
for (auto &d: flat.getDim())
|
||||
dim.push_back(d);
|
||||
writeMultiDim<Element>(s, dim, flatx);
|
||||
writeMultiDim<Element>(s, dim, &flatx[0], flatx.size());
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
|
@ -173,7 +173,37 @@ class iScalar {
|
||||
return stream;
|
||||
};
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline begin() const { return &_internal; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline begin() const { return _internal.begin(); }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline end() const { return (&_internal) + grid_tensor_att<iScalar<T>>::count; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline end() const { return _internal.begin() + grid_tensor_att<iScalar<T>>::count; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline begin() { return &_internal; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline begin() { return _internal.begin(); }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline end() { return (&_internal) + grid_tensor_att<iScalar<T>>::count; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline end() { return _internal.begin() + grid_tensor_att<iScalar<T>>::count; }
|
||||
};
|
||||
///////////////////////////////////////////////////////////
|
||||
// Allows to turn scalar<scalar<scalar<double>>>> back to double.
|
||||
@ -303,6 +333,38 @@ class iVector {
|
||||
// strong_inline vtype && operator ()(int i) {
|
||||
// return _internal[i];
|
||||
// }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline begin() const { return _internal; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline begin() const { return _internal[0].begin(); }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline end() const { return _internal + grid_tensor_att<iVector<T,N>>::count; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline end() const { return _internal[0].begin() + grid_tensor_att<iVector<T,N>>::count; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline begin() { return _internal; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline begin() { return _internal[0].begin(); }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline end() { return _internal + grid_tensor_att<iVector<T,N>>::count; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline end() { return _internal[0].begin() + grid_tensor_att<iVector<T,N>>::count; }
|
||||
};
|
||||
|
||||
template <class vtype, int N>
|
||||
@ -458,6 +520,38 @@ class iMatrix {
|
||||
// strong_inline vtype && operator ()(int i,int j) {
|
||||
// return _internal[i][j];
|
||||
// }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline begin() const { return _internal[0]; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline begin() const { return _internal[0][0].begin(); }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline end() const { return _internal[0] + grid_tensor_att<iMatrix<T,N>>::count; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, const scalar_type *>::type
|
||||
strong_inline end() const { return _internal[0][0].begin() + grid_tensor_att<iMatrix<T,N>>::count; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline begin() { return _internal[0]; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline begin() { return _internal[0][0].begin(); }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<!is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline end() { return _internal[0] + grid_tensor_att<iMatrix<T,N>>::count; }
|
||||
|
||||
template <typename T = vtype>
|
||||
typename std::enable_if<is_grid_tensor<T>::value, scalar_type *>::type
|
||||
strong_inline end() { return _internal[0][0].begin() + grid_tensor_att<iMatrix<T,N>>::count; }
|
||||
};
|
||||
|
||||
template <class v>
|
||||
|
@ -288,6 +288,79 @@ namespace Grid {
|
||||
|
||||
enum { value = sizeof(real_scalar_type)/sizeof(float) };
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Review with Peter - obvious (but partial) overlap with the above
|
||||
// What of this is good and should be kept ... vs what functions should I really be using?
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Need some forward references or the below won't work
|
||||
template <class vtype>
|
||||
class iScalar;
|
||||
template <class vtype, int N>
|
||||
class iVector;
|
||||
template <class vtype, int N>
|
||||
class iMatrix;
|
||||
|
||||
// which types are grid tensors
|
||||
template <typename T> struct is_grid_tensor : public std::false_type {};
|
||||
template <typename T> struct is_grid_tensor<iScalar<T>> : public std::true_type {};
|
||||
template <typename T, int N> struct is_grid_tensor<iVector<T, N>> : public std::true_type {};
|
||||
template <typename T, int N> struct is_grid_tensor<iMatrix<T, N>> : public std::true_type {};
|
||||
|
||||
// Rank and dimension of grid tensors, i.e. compositions of iScalar, iVector and iMatrix
|
||||
// This defines the bottom level - i.e. it's a description of the underlying scalar
|
||||
template <typename T> struct grid_tensor_att {
|
||||
static constexpr unsigned int depth = 0; // How many levels of Grid Tensor there are (TensorLevel)
|
||||
static constexpr unsigned int rank = 0; // The rank of the grid tensor (i.e. how many indices used)
|
||||
static constexpr unsigned int rank_non_trivial = 0; // As per rank, but excludes those of dimension 1
|
||||
static constexpr unsigned int count = 1; // total number of elements (i.e. product of dimensions)
|
||||
using scalar_type = T; // Type of the underlying scalar
|
||||
static constexpr std::size_t scalar_size = sizeof(T); // Size of the underlying scalar in bytes
|
||||
static constexpr std::size_t size = scalar_size * count; // total size of elements in bytes
|
||||
// e.g. iScalar<iVector<Complex,1>>
|
||||
// depth = 2
|
||||
// rank = 1
|
||||
// rank_non_trivial = 0
|
||||
// count = 1
|
||||
// e.g. iVector<iMatrix<Complex,3>,4>
|
||||
// depth = 2
|
||||
// rank = 3
|
||||
// rank_non_trivial = 3
|
||||
// count = 36
|
||||
// e.g. iScalar<iVector<iMatrix<Complex,4>,3>>
|
||||
// depth = 3
|
||||
// rank = 3
|
||||
// rank_non_trivial = 3
|
||||
// count = 48
|
||||
};
|
||||
template <typename T> struct grid_tensor_att<iScalar<T>> {
|
||||
static constexpr unsigned int depth = 1 + grid_tensor_att<T>::depth;
|
||||
static constexpr unsigned int rank = 0 + grid_tensor_att<T>::rank;
|
||||
static constexpr unsigned int rank_non_trivial = 0 + grid_tensor_att<T>::rank_non_trivial;
|
||||
static constexpr unsigned int count = 1 * grid_tensor_att<T>::count;
|
||||
using scalar_type = typename grid_tensor_att<T>::scalar_type;
|
||||
static constexpr std::size_t scalar_size = grid_tensor_att<T>::scalar_size;
|
||||
static constexpr std::size_t size = scalar_size * count;
|
||||
};
|
||||
template <typename T, int N> struct grid_tensor_att<iVector<T, N>> {
|
||||
static constexpr unsigned int depth = 1 + grid_tensor_att<T>::depth;
|
||||
static constexpr unsigned int rank = 1 + grid_tensor_att<T>::rank;
|
||||
static constexpr unsigned int rank_non_trivial = (N>1 ? 1 : 0) + grid_tensor_att<T>::rank_non_trivial;
|
||||
static constexpr unsigned int count = N * grid_tensor_att<T>::count;
|
||||
using scalar_type = typename grid_tensor_att<T>::scalar_type;
|
||||
static constexpr std::size_t scalar_size = grid_tensor_att<T>::scalar_size;
|
||||
static constexpr std::size_t size = scalar_size * count;
|
||||
};
|
||||
template <typename T, int N> struct grid_tensor_att<iMatrix<T, N>> {
|
||||
static constexpr unsigned int depth = 1 + grid_tensor_att<T>::depth;
|
||||
static constexpr unsigned int rank = 2 + grid_tensor_att<T>::rank;
|
||||
static constexpr unsigned int rank_non_trivial = (N>1 ? 2 : 0) + grid_tensor_att<T>::rank_non_trivial;
|
||||
static constexpr unsigned int count = N * N * grid_tensor_att<T>::count;
|
||||
using scalar_type = typename grid_tensor_att<T>::scalar_type;
|
||||
static constexpr std::size_t scalar_size = grid_tensor_att<T>::scalar_size;
|
||||
static constexpr std::size_t size = scalar_size * count;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -103,8 +103,9 @@ void ioTest(const std::string &filename, const O &object, const std::string &nam
|
||||
#ifdef DEBUG
|
||||
//typedef int TestScalar;
|
||||
typedef std::complex<double> TestScalar;
|
||||
typedef Eigen::Tensor<TestScalar, 3> TestTensor;
|
||||
typedef Eigen::TensorFixedSize<TestScalar, Eigen::Sizes<9,4,2>> TestTensorFixed;
|
||||
typedef Eigen::Tensor<iMatrix<TestScalar,1>, 6> TestTensorSingle;
|
||||
typedef Eigen::Tensor<TestScalar, 3, Eigen::StorageOptions::RowMajor> TestTensor;
|
||||
typedef Eigen::TensorFixedSize<TestScalar, Eigen::Sizes<9,4,2>, Eigen::StorageOptions::RowMajor> TestTensorFixed;
|
||||
typedef std::vector<TestTensorFixed> aTestTensorFixed;
|
||||
typedef Eigen::TensorFixedSize<SpinColourVector, Eigen::Sizes<11,3,2>> LSCTensor;
|
||||
typedef Eigen::TensorFixedSize<LorentzColourMatrix, Eigen::Sizes<5,7,2>> LCMTensor;
|
||||
@ -124,15 +125,18 @@ public:
|
||||
};
|
||||
|
||||
bool EigenIOTest(void) {
|
||||
constexpr TestScalar Inc{1,-1};
|
||||
TestTensorSingle ts(1,1,1,1,1,1);
|
||||
ts(0,0,0,0,0,0) = Inc * 3.1415927;
|
||||
ioTest<Hdf5Writer, Hdf5Reader, TestTensorSingle>("iotest_single.h5", ts, "Singlet");
|
||||
|
||||
SpinColourVector scv, scv2;
|
||||
scv2 = scv;
|
||||
ioTest<Hdf5Writer, Hdf5Reader, SpinColourVector>("iotest_vector.h5", scv, "SpinColourVector");
|
||||
SpinColourMatrix scm;
|
||||
ioTest<Hdf5Writer, Hdf5Reader, SpinColourMatrix>("iotest_matrix.h5", scm, "SpinColourMatrix");
|
||||
|
||||
constexpr TestScalar Inc{1,-1};
|
||||
|
||||
TestTensor t(3,6,2);
|
||||
TestTensor t(6,3,2);
|
||||
TestScalar Val{Inc};
|
||||
for( int i = 0 ; i < t.dimension(0) ; i++)
|
||||
for( int j = 0 ; j < t.dimension(1) ; j++)
|
||||
@ -141,7 +145,7 @@ bool EigenIOTest(void) {
|
||||
Val += Inc;
|
||||
}
|
||||
ioTest<Hdf5Writer, Hdf5Reader, TestTensor>("iotest_tensor.h5", t, "eigen_tensor_instance_name");
|
||||
|
||||
|
||||
// Now serialise a fixed size tensor
|
||||
using FixedTensor = Eigen::TensorFixedSize<TestScalar, Eigen::Sizes<8,4,3>>;
|
||||
FixedTensor tf;
|
||||
@ -170,7 +174,15 @@ bool EigenIOTest(void) {
|
||||
Val += Inc;
|
||||
}
|
||||
ioTest<Hdf5Writer, Hdf5Reader, LSCTensor>("iotest_LSCTensor.h5", l, "LSCTensor_object_instance_name");
|
||||
|
||||
std::cout << "t:";
|
||||
for_all( l, [](std::complex<double> &c, LSCTensor::Index index, const std::size_t * pDims ){
|
||||
std::cout << " ";
|
||||
for( int i = 0 ; i < 5; i++ )
|
||||
std::cout << "[" << pDims[i] << "]";
|
||||
std::cout << " = " << c << std::endl;
|
||||
} );
|
||||
std::cout << std::endl;
|
||||
|
||||
// Tensor of spin colour
|
||||
LCMTensor l2;
|
||||
Val = 0;
|
||||
|
@ -477,6 +477,23 @@ void DebugGridTensorTest_print( int i )
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
// begin() and end() are the minimum necessary to support range-for loops
|
||||
// should really turn this into an iterator ...
|
||||
template<typename T, int N>
|
||||
class TestObject {
|
||||
public:
|
||||
using value_type = T;
|
||||
private:
|
||||
value_type * m_p;
|
||||
public:
|
||||
TestObject() {
|
||||
m_p = reinterpret_cast<value_type *>(std::malloc(N * sizeof(value_type)));
|
||||
}
|
||||
~TestObject() { std::free(m_p); }
|
||||
inline value_type * begin(void) { return m_p; }
|
||||
inline value_type * end(void) { return m_p + N; }
|
||||
};
|
||||
|
||||
bool DebugGridTensorTest( void )
|
||||
{
|
||||
typedef Complex t1;
|
||||
@ -485,7 +502,8 @@ bool DebugGridTensorTest( void )
|
||||
typedef iMatrix<t1, Nc> t4;
|
||||
typedef iVector<iMatrix<t1,1>,4> t5;
|
||||
typedef iScalar<t5> t6;
|
||||
typedef iMatrix<iVector<iScalar<iMatrix<t6, 1>>,2>,7> t7;
|
||||
typedef iMatrix<t6, 3> t7;
|
||||
typedef iMatrix<iVector<iScalar<t7>,4>,2> t8;
|
||||
int i = 1;
|
||||
DebugGridTensorTest_print<t1>( i++ );
|
||||
DebugGridTensorTest_print<t2>( i++ );
|
||||
@ -494,6 +512,27 @@ bool DebugGridTensorTest( void )
|
||||
DebugGridTensorTest_print<t5>( i++ );
|
||||
DebugGridTensorTest_print<t6>( i++ );
|
||||
DebugGridTensorTest_print<t7>( i++ );
|
||||
DebugGridTensorTest_print<t8>( i++ );
|
||||
|
||||
//using TOC7 = TestObject<std::complex<double>, 7>;
|
||||
using TOC7 = t7;
|
||||
TOC7 toc7;
|
||||
constexpr std::complex<double> Inc{1,-1};
|
||||
std::complex<double> Start{Inc};
|
||||
for( auto &x : toc7 ) {
|
||||
x = Start;
|
||||
Start += Inc;
|
||||
}
|
||||
i = 0;
|
||||
for( auto x : toc7 ) std::cout << "toc7[" << i++ << "] = " << x << std::endl;
|
||||
|
||||
t2 o2;
|
||||
auto a2 = TensorRemove(o2);
|
||||
//t3 o3;
|
||||
//t4 o4;
|
||||
//auto a3 = TensorRemove(o3);
|
||||
//auto a4 = TensorRemove(o4);
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
@ -502,8 +541,12 @@ int main(int argc, char *argv[])
|
||||
{
|
||||
#ifdef DEBUG
|
||||
// Debug only - test of Eigen::Tensor
|
||||
std::cout << "sizeof(std::streamsize) = " << sizeof(std::streamsize) << std::endl;
|
||||
std::cout << "sizeof(Eigen::Index) = " << sizeof(Eigen::Index) << std::endl;
|
||||
std::cout << "sizeof(int) = " << sizeof(int)
|
||||
<< ", sizeof(long) = " << sizeof(long)
|
||||
<< ", sizeof(size_t) = " << sizeof(size_t)
|
||||
<< ", sizeof(std::size_t) = " << sizeof(std::size_t)
|
||||
<< ", sizeof(std::streamsize) = " << sizeof(std::streamsize)
|
||||
<< ", sizeof(Eigen::Index) = " << sizeof(Eigen::Index) << std::endl;
|
||||
//if( DebugEigenTest() ) return 0;
|
||||
if(DebugGridTensorTest()) return 0;
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user