mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-09 23:45:36 +00:00
Merge branch 'feature/distil' of https://github.com/mmphys/Grid into feature/distil
This commit is contained in:
commit
5e6104e683
@ -61,15 +61,25 @@ namespace Grid {
|
||||
template<typename T, typename V = void> struct is_tensor_of_container : public std::false_type {};
|
||||
template<typename T> struct is_tensor_of_container<T, typename std::enable_if<is_tensor<T>::value && isGridTensor<typename T::Scalar>::value>::type> : public std::true_type {};
|
||||
|
||||
// Traits are the default for scalars, or come from GridTypeMapper for GridTensors
|
||||
// These traits describe the scalars inside Eigen tensors
|
||||
// I wish I could define these in reference to the scalar type (so there would be fewer traits defined)
|
||||
// but I'm unable to find a syntax to make this work
|
||||
template<typename T, typename V = void> struct Traits {};
|
||||
template<typename T> struct Traits<T, typename std::enable_if<is_tensor_of_scalar<T>::value>::type> : public GridTypeMapper_Base {
|
||||
using scalar_type = typename T::Scalar;
|
||||
// Traits are the default for scalars, or come from GridTypeMapper for GridTensors
|
||||
template<typename T> struct Traits<T, typename std::enable_if<is_tensor_of_scalar<T>::value>::type>
|
||||
: public GridTypeMapper_Base {
|
||||
using scalar_type = typename T::Scalar; // ultimate base scalar
|
||||
static constexpr bool is_complex = ::Grid::EigenIO::is_complex<scalar_type>::value;
|
||||
};
|
||||
template<typename T> struct Traits<T, typename std::enable_if<is_tensor_of_container<T>::value>::type> : public GridTypeMapper<typename T::Scalar> {
|
||||
using scalar_type = typename GridTypeMapper<typename T::Scalar>::scalar_type;
|
||||
static constexpr bool is_complex = ::Grid::EigenIO::is_complex<scalar_type>::value;
|
||||
// Traits are the default for scalars, or come from GridTypeMapper for GridTensors
|
||||
template<typename T> struct Traits<T, typename std::enable_if<is_tensor_of_container<T>::value>::type> {
|
||||
using BaseTraits = GridTypeMapper<typename T::Scalar>;
|
||||
using scalar_type = typename BaseTraits::scalar_type; // ultimate base scalar
|
||||
static constexpr bool is_complex = ::Grid::EigenIO::is_complex<scalar_type>::value;
|
||||
static constexpr int TensorLevel = BaseTraits::TensorLevel;
|
||||
static constexpr int Rank = BaseTraits::Rank;
|
||||
static constexpr std::size_t count = BaseTraits::count;
|
||||
static constexpr int Dimension(int dim) { return BaseTraits::Dimension(dim); }
|
||||
};
|
||||
|
||||
// Is this a fixed-size Eigen tensor
|
||||
@ -310,15 +320,15 @@ namespace Grid {
|
||||
// If the Tensor isn't in Row-Major order, then we'll need to copy it's data
|
||||
const bool CopyData{NumElements > 1 && ETensor::Layout != Eigen::StorageOptions::RowMajor};
|
||||
const Scalar * pWriteBuffer;
|
||||
Scalar * pCopyBuffer = nullptr;
|
||||
std::vector<Scalar> CopyBuffer;
|
||||
const Index TotalNumElements = NumElements * Traits::count;
|
||||
if( !CopyData ) {
|
||||
pWriteBuffer = getFirstScalar( output );
|
||||
} else {
|
||||
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
|
||||
pCopyBuffer = new Scalar[TotalNumElements];
|
||||
pWriteBuffer = pCopyBuffer;
|
||||
Scalar * pCopy = pCopyBuffer;
|
||||
CopyBuffer.resize( TotalNumElements );
|
||||
Scalar * pCopy = &CopyBuffer[0];
|
||||
pWriteBuffer = pCopy;
|
||||
std::array<Index, TensorRank> MyIndex;
|
||||
for( auto &idx : MyIndex ) idx = 0;
|
||||
for( auto n = 0; n < NumElements; n++ ) {
|
||||
@ -330,7 +340,6 @@ namespace Grid {
|
||||
}
|
||||
}
|
||||
upcast->template writeMultiDim<Scalar>(s, TotalDims, pWriteBuffer, TotalNumElements);
|
||||
if( pCopyBuffer ) delete [] pCopyBuffer;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -51,7 +51,7 @@ namespace Grid
|
||||
std::vector<std::string> path_;
|
||||
H5NS::H5File file_;
|
||||
H5NS::Group group_;
|
||||
unsigned int dataSetThres_{HDF5_DEF_DATASET_THRES};
|
||||
const unsigned int dataSetThres_{HDF5_DEF_DATASET_THRES};
|
||||
};
|
||||
|
||||
class Hdf5Reader: public Reader<Hdf5Reader>
|
||||
@ -117,15 +117,8 @@ namespace Grid
|
||||
// write the entire dataset to file
|
||||
H5NS::DataSpace dataSpace(rank, dim.data());
|
||||
|
||||
size_t DataSize = NumElements * sizeof(U);
|
||||
if (DataSize > dataSetThres_)
|
||||
if (NumElements > dataSetThres_)
|
||||
{
|
||||
// First few prime numbers from https://oeis.org/A000040
|
||||
static const unsigned short Primes[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31,
|
||||
37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109,
|
||||
113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
|
||||
197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271 };
|
||||
constexpr int NumPrimes = sizeof( Primes ) / sizeof( Primes[0] );
|
||||
// Make sure 1) each dimension; and 2) chunk size is < 4GB
|
||||
const hsize_t MaxElements = ( sizeof( U ) == 1 ) ? 0xffffffff : 0x100000000 / sizeof( U );
|
||||
hsize_t ElementsPerChunk = 1;
|
||||
@ -136,13 +129,9 @@ namespace Grid
|
||||
d = 1; // Chunk size is already as big as can be - remaining dimensions = 1
|
||||
else {
|
||||
// If individual dimension too big, reduce by prime factors if possible
|
||||
for( int PrimeIdx = 0; d > MaxElements && PrimeIdx < NumPrimes; ) {
|
||||
if( d % Primes[PrimeIdx] )
|
||||
PrimeIdx++;
|
||||
else
|
||||
d /= Primes[PrimeIdx];
|
||||
}
|
||||
const char ErrorMsg[] = " dimension > 4GB without small prime factors. "
|
||||
while( d > MaxElements && ( d & 1 ) == 0 )
|
||||
d >>= 1;
|
||||
const char ErrorMsg[] = " dimension > 4GB and not divisible by 2^n. "
|
||||
"Hdf5IO chunk size will be inefficient. NB Serialisation is not intended for large datasets - please consider alternatives.";
|
||||
if( d > MaxElements ) {
|
||||
std::cout << GridLogWarning << "Individual" << ErrorMsg << std::endl;
|
||||
@ -156,17 +145,13 @@ namespace Grid
|
||||
ElementsPerChunk *= d;
|
||||
assert( OverflowCheck == ElementsPerChunk / d && "Product of dimensions overflowed hsize_t" );
|
||||
// If product of dimensions too big, reduce by prime factors
|
||||
for( int PrimeIdx = 0; ElementsPerChunk > MaxElements && PrimeIdx < NumPrimes; ) {
|
||||
while( ElementsPerChunk > MaxElements && ( ElementsPerChunk & 1 ) == 0 ) {
|
||||
bTooBig = true;
|
||||
if( d % Primes[PrimeIdx] )
|
||||
PrimeIdx++;
|
||||
else {
|
||||
d /= Primes[PrimeIdx];
|
||||
ElementsPerChunk /= Primes[PrimeIdx];
|
||||
}
|
||||
d >>= 1;
|
||||
ElementsPerChunk >>= 1;
|
||||
}
|
||||
if( ElementsPerChunk > MaxElements ) {
|
||||
std::cout << GridLogMessage << "Product of" << ErrorMsg << std::endl;
|
||||
std::cout << GridLogWarning << "Product of" << ErrorMsg << std::endl;
|
||||
hsize_t quotient = ElementsPerChunk / MaxElements;
|
||||
if( ElementsPerChunk % MaxElements )
|
||||
quotient++;
|
||||
@ -270,7 +255,7 @@ namespace Grid
|
||||
// read the flat vector
|
||||
buf.resize(size);
|
||||
|
||||
if (size * sizeof(Element) > dataSetThres_)
|
||||
if (size > dataSetThres_)
|
||||
{
|
||||
H5NS::DataSet dataSet;
|
||||
|
||||
|
@ -1,271 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: Grid/util/EigenUtil.h
|
||||
|
||||
Copyright (C) 2019
|
||||
|
||||
Author: Michael Marshall <michael.marshall@ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#ifndef GRID_UTIL_EIGENUTIL_H
|
||||
#define GRID_UTIL_EIGENUTIL_H
|
||||
#include <Grid/tensors/Tensor_traits.h>
|
||||
#include <Grid/Eigen/unsupported/CXX11/Tensor>
|
||||
|
||||
namespace Grid {
|
||||
// Custom iterator for Eigen tensors
|
||||
namespace EigenUtil {
|
||||
template <typename ETensor, bool bConst> // Is the tensor constant
|
||||
class TensorIterator_raw{
|
||||
public:
|
||||
using Index = typename ETensor::Index;
|
||||
using Scalar = typename ETensor::Scalar;
|
||||
using FullIndex = std::array<Index, ETensor::NumIndices>;
|
||||
const ETensor * pET;
|
||||
const Index end; // same as pET->size()
|
||||
Index position; // position (memory order)
|
||||
Index Seq; // sequence (what our position would be if we were column major)
|
||||
FullIndex indexPos;
|
||||
FullIndex indexSize;
|
||||
|
||||
inline TensorIterator_raw( ETensor & eT, Index pos = 0 ) : pET{&eT}, position{pos}, Seq{pos}, end{pET->size()} {
|
||||
for( int i = 0 ; i < ETensor::NumIndices ; i++ ) {
|
||||
indexPos[i] = 0;
|
||||
indexSize[i] = pET->dimension(i);
|
||||
}
|
||||
}
|
||||
inline TensorIterator_raw<ETensor, bConst> & operator++() {
|
||||
auto sz = pET->size();
|
||||
if( position < sz ) {
|
||||
position++;
|
||||
if( ETensor::Options & Eigen::RowMajor ) {
|
||||
for( int i = ETensor::NumIndices - 1; i != -1 && ++indexPos[i] == indexSize[i]; i-- )
|
||||
indexPos[i] = 0;
|
||||
Seq++;
|
||||
} else {
|
||||
for( int i = 0; i < ETensor::NumIndices && ++indexPos[i] == indexSize[i]; i++ )
|
||||
indexPos[i] = 0;
|
||||
Seq = 0;
|
||||
for( int i = 0; i < ETensor::NumIndices; i++ ) {
|
||||
Seq *= indexSize[i];
|
||||
Seq += indexPos[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
return * this;
|
||||
}
|
||||
inline typename std::conditional<bConst,const Scalar &,Scalar &>::type operator*() const {
|
||||
assert( position >= 0 && position < pET->size() && "Attempt to access Eigen tensor iterator out of range" );
|
||||
return ( ( typename std::conditional<bConst,const Scalar *,Scalar*>::type ) pET->data() )[position];
|
||||
}
|
||||
inline bool operator!=(const TensorIterator_raw<ETensor, bConst> &r)
|
||||
{ return pET == nullptr || pET != r.pET || position != r.position; }
|
||||
// These functions aren't required for iterators, but they make using them easier
|
||||
inline bool AtEnd() { return position == end; }
|
||||
inline void DumpIndex(void) {
|
||||
for( auto dim : indexPos )
|
||||
std::cout << "[" << dim << "]";
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// The only way I could get these iterators to work is to put the begin() and end() functions in the Eigen namespace
|
||||
// So if Eigen ever defines these, we'll have a conflict and have to change this
|
||||
namespace Eigen {
|
||||
template <typename ETensor> using TensorIterator = Grid::EigenUtil::TensorIterator_raw< ETensor, false>;
|
||||
template <typename ETensor> using TensorIteratorConst = Grid::EigenUtil::TensorIterator_raw<const ETensor, true>;
|
||||
template <typename ETensor>
|
||||
inline typename std::enable_if<Grid::EigenIO::is_tensor<ETensor>::value, TensorIterator<ETensor>>::type
|
||||
begin( ETensor & ET ) { return TensorIterator<ETensor>(ET); }
|
||||
template <typename ETensor>
|
||||
inline typename std::enable_if<Grid::EigenIO::is_tensor<ETensor>::value, TensorIterator<ETensor>>::type
|
||||
end( ETensor & ET ) { return TensorIterator<ETensor>(ET, ET.size()); }
|
||||
template <typename ETensor>
|
||||
inline typename std::enable_if<Grid::EigenIO::is_tensor<ETensor>::value, TensorIteratorConst<ETensor>>::type
|
||||
begin( const ETensor & ET ) { return TensorIteratorConst<ETensor>(ET); }
|
||||
template <typename ETensor>
|
||||
inline typename std::enable_if<Grid::EigenIO::is_tensor<ETensor>::value, TensorIteratorConst<ETensor>>::type
|
||||
end( const ETensor & ET ) { return TensorIteratorConst<ETensor>(ET, ET.size()); }
|
||||
}
|
||||
|
||||
namespace Grid {
|
||||
// for_all helper function to call the lambda for scalar
|
||||
template <typename ETensor, typename Lambda>
|
||||
typename std::enable_if<EigenIO::is_tensor_of_scalar<ETensor>::value, void>::type
|
||||
for_all_do_lambda( Lambda lambda, typename ETensor::Scalar &scalar, typename ETensor::Index &Seq,
|
||||
const std::array<typename ETensor::Index, ETensor::NumIndices> &MyIndex,
|
||||
const std::array<int, EigenIO::Traits<ETensor>::Rank> &DimGridTensor,
|
||||
std::array<int, EigenIO::Traits<ETensor>::Rank> &GridTensorIndex)
|
||||
{
|
||||
lambda( scalar, Seq++, MyIndex, GridTensorIndex );
|
||||
}
|
||||
|
||||
// for_all helper function to call the lambda for container
|
||||
template <typename ETensor, typename Lambda>
|
||||
typename std::enable_if<EigenIO::is_tensor_of_container<ETensor>::value, void>::type
|
||||
for_all_do_lambda( Lambda lambda, typename ETensor::Scalar &container, typename ETensor::Index &Seq,
|
||||
const std::array<typename ETensor::Index, ETensor::NumIndices> &MyIndex,
|
||||
const std::array<int, EigenIO::Traits<ETensor>::Rank> &DimGridTensor,
|
||||
std::array<int, EigenIO::Traits<ETensor>::Rank> &GridTensorIndex)
|
||||
{
|
||||
using Traits = EigenIO::Traits<ETensor>;
|
||||
const int InnerRank = Traits::Rank;
|
||||
for( typename Traits::scalar_type &Source : container ) {
|
||||
lambda(Source, Seq++, MyIndex, GridTensorIndex );
|
||||
// Now increment SubIndex
|
||||
for( int i = InnerRank - 1; i != -1 && ++GridTensorIndex[i] == DimGridTensor[i]; i-- )
|
||||
GridTensorIndex[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Calls a lamda (passing index and sequence number) for every member of an Eigen::Tensor
|
||||
// For efficiency, iteration proceeds in memory order,
|
||||
// ... but parameters guaranteed to be the same regardless of memory order
|
||||
template <typename ETensor, typename Lambda>
|
||||
typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
|
||||
for_all( ETensor &ET, Lambda lambda )
|
||||
{
|
||||
using Scalar = typename ETensor::Scalar; // This could be a Container - we'll check later
|
||||
using Index = typename ETensor::Index;
|
||||
using Traits = EigenIO::Traits<ETensor>;
|
||||
// Check that the number of elements in the container is the product of tensor dimensions
|
||||
const Index NumScalars = ET.size();
|
||||
assert( NumScalars > 0 && "EigenUtil: tensor has no elements" );
|
||||
Index ScalarElementCount{1};
|
||||
const int rank{ETensor::NumIndices};
|
||||
std::array<Index, rank> DimTensor, MyIndex;
|
||||
for(int i = 0; i < rank; i++ ) {
|
||||
DimTensor[i] = ET.dimension(i);
|
||||
ScalarElementCount *= DimTensor[i];
|
||||
MyIndex[i] = 0;
|
||||
}
|
||||
assert( NumScalars == ScalarElementCount && "EigenUtil: tensor size not product of dimensions" );
|
||||
// Save the GridTensor dimensions
|
||||
const int InnerRank{Traits::Rank};
|
||||
std::array<int, InnerRank> DimGridTensor, GridTensorIndex;
|
||||
for(int i = 0; i < InnerRank; i++ ) {
|
||||
DimGridTensor[i] = Traits::Dimension(i);
|
||||
GridTensorIndex[i] = 0;
|
||||
}
|
||||
// Now walk the tensor in memory order
|
||||
Index Seq = 0;
|
||||
Scalar * pScalar = ET.data();
|
||||
for( Index j = 0; j < NumScalars; j++ ) {
|
||||
for_all_do_lambda<ETensor, Lambda>( lambda, * pScalar, Seq, MyIndex, DimGridTensor, GridTensorIndex );
|
||||
// Now increment the index to pass to the lambda (bearing in mind we're walking in memory order)
|
||||
if( ETensor::Options & Eigen::RowMajor ) {
|
||||
for( int i = rank - 1; i != -1 && ++MyIndex[i] == DimTensor[i]; i-- )
|
||||
MyIndex[i] = 0;
|
||||
} else {
|
||||
for( int i = 0; i < rank && ++MyIndex[i] == DimTensor[i]; i++ )
|
||||
MyIndex[i] = 0;
|
||||
Seq = 0;
|
||||
for( int i = 0; i < rank; i++ ) {
|
||||
Seq *= DimTensor[i];
|
||||
Seq += MyIndex[i];
|
||||
}
|
||||
Seq *= Traits::count;
|
||||
}
|
||||
pScalar++;
|
||||
}
|
||||
}
|
||||
|
||||
// Sequential initialisation of tensors up to specified precision
|
||||
// Would have preferred to define template variables for this, but that's c++ 17
|
||||
template <typename ETensor>
|
||||
typename std::enable_if<EigenIO::is_tensor<ETensor>::value && !EigenIO::Traits<ETensor>::is_complex>::type
|
||||
SequentialInit( ETensor &ET, typename EigenIO::Traits<ETensor>::scalar_type Inc = 1, unsigned short Precision = 0 )
|
||||
{
|
||||
using Traits = EigenIO::Traits<ETensor>;
|
||||
using scalar_type = typename Traits::scalar_type;
|
||||
using Index = typename ETensor::Index;
|
||||
for_all( ET, [&](scalar_type &c, Index n, const std::array<Index, ETensor::NumIndices> &TensorIndex,
|
||||
const std::array<int, Traits::Rank> &ScalarIndex ) {
|
||||
scalar_type x = Inc * static_cast<scalar_type>(n);
|
||||
if( Precision ) {
|
||||
std::stringstream s;
|
||||
s << std::setprecision(Precision) << x;
|
||||
s >> x;
|
||||
}
|
||||
c = x;
|
||||
} );
|
||||
}
|
||||
|
||||
template <typename ETensor>
|
||||
typename std::enable_if<EigenIO::is_tensor<ETensor>::value && EigenIO::Traits<ETensor>::is_complex>::type
|
||||
SequentialInit( ETensor &ET, typename EigenIO::Traits<ETensor>::scalar_type Inc={1,-1}, unsigned short Precision = 0 )
|
||||
{
|
||||
using Traits = EigenIO::Traits<ETensor>;
|
||||
using scalar_type = typename Traits::scalar_type;
|
||||
using Index = typename ETensor::Index;
|
||||
for_all( ET, [&](scalar_type &c, Index n, const std::array<Index, ETensor::NumIndices> &TensorIndex,
|
||||
const std::array<int, Traits::Rank> &ScalarIndex ) {
|
||||
auto re = Inc.real();
|
||||
auto im = Inc.imag();
|
||||
re *= n;
|
||||
im *= n;
|
||||
if( Precision ) {
|
||||
std::stringstream s;
|
||||
s << std::setprecision(Precision) << re;
|
||||
s >> re;
|
||||
s.clear();
|
||||
s << im;
|
||||
s >> im;
|
||||
}
|
||||
c = scalar_type(re,im);
|
||||
} );
|
||||
}
|
||||
|
||||
// Helper to dump a tensor
|
||||
template <typename T>
|
||||
typename std::enable_if<EigenIO::is_tensor<T>::value, void>::type
|
||||
dump_tensor(T &t, const char * pName = nullptr)
|
||||
{
|
||||
using Traits = EigenIO::Traits<T>;
|
||||
using scalar_type = typename Traits::scalar_type;
|
||||
using Index = typename T::Index;
|
||||
const int rank{T::NumIndices};
|
||||
const auto &dims = t.dimensions();
|
||||
std::cout << "Dumping rank " << rank + Traits::Rank << ((T::Options & Eigen::RowMajor) ? ", row" : ", column") << "-major tensor ";
|
||||
if( pName )
|
||||
std::cout << pName;
|
||||
for( int i = 0 ; i < rank; i++ ) std::cout << "[" << dims[i] << "]";
|
||||
for( int i = 0 ; i < Traits::Rank; i++ ) std::cout << "(" << Traits::Dimension(i) << ")";
|
||||
std::cout << " in memory order:" << std::endl;
|
||||
for( auto it = begin(t); !it.AtEnd(); ++it ) {
|
||||
std::cout << " ";
|
||||
it.DumpIndex();
|
||||
std::cout << " = " << (const typename T::Scalar)(*it) << std::endl;
|
||||
}
|
||||
std::cout << "========================================" << std::endl;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
typename std::enable_if<!EigenIO::is_tensor<T>::value, void>::type
|
||||
dump_tensor(T &t, const char * pName = nullptr)
|
||||
{
|
||||
std::cout << "Dumping non-tensor object ";
|
||||
if( pName ) std::cout << pName;
|
||||
std::cout << "=" << t;
|
||||
}
|
||||
}
|
||||
#endif
|
@ -7,6 +7,7 @@ Source file: Hadrons/Modules/MGauge/GaugeFix.hpp
|
||||
Copyright (C) 2015-2019
|
||||
|
||||
Author: Antonin Portelli <antonin.portelli@me.com>
|
||||
Author: Nils Asmussen <n.asmussen@soton.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
@ -58,6 +59,7 @@ class TGaugeFix: public Module<GaugeFixPar>
|
||||
{
|
||||
public:
|
||||
GAUGE_TYPE_ALIASES(GImpl,);
|
||||
typedef typename GImpl::GaugeLinkField GaugeMat;
|
||||
public:
|
||||
// constructor
|
||||
TGaugeFix(const std::string name);
|
||||
@ -94,7 +96,7 @@ std::vector<std::string> TGaugeFix<GImpl>::getInput(void)
|
||||
template <typename GImpl>
|
||||
std::vector<std::string> TGaugeFix<GImpl>::getOutput(void)
|
||||
{
|
||||
std::vector<std::string> out = {getName()};
|
||||
std::vector<std::string> out = {getName(), getName()+"_xform"};
|
||||
return out;
|
||||
}
|
||||
|
||||
@ -103,6 +105,7 @@ template <typename GImpl>
|
||||
void TGaugeFix<GImpl>::setup(void)
|
||||
{
|
||||
envCreateLat(GaugeField, getName());
|
||||
envCreateLat(GaugeMat, getName()+"_xform");
|
||||
}
|
||||
|
||||
|
||||
@ -116,6 +119,7 @@ void TGaugeFix<GImpl>::execute(void)
|
||||
LOG(Message) << par().gauge << std::endl;
|
||||
auto &U = envGet(GaugeField, par().gauge);
|
||||
auto &Umu = envGet(GaugeField, getName());
|
||||
auto &xform = envGet(GaugeMat, getName()+"_xform");
|
||||
LOG(Message) << "Gauge Field fetched" << std::endl;
|
||||
//do we allow maxiter etc to be user set?
|
||||
Real alpha = par().alpha;
|
||||
@ -123,8 +127,8 @@ void TGaugeFix<GImpl>::execute(void)
|
||||
Real Omega_tol = par().Omega_tol;
|
||||
Real Phi_tol = par().Phi_tol;
|
||||
bool Fourier = par().Fourier;
|
||||
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(U,alpha,maxiter,Omega_tol,Phi_tol,Fourier);
|
||||
Umu = U;
|
||||
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(Umu,xform,alpha,maxiter,Omega_tol,Phi_tol,Fourier);
|
||||
LOG(Message) << "Gauge Fixed" << std::endl;
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,6 @@ Author: Michael Marshall <michael.marshall@ed.ac.uk>
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
#include <Grid/util/EigenUtil.h>
|
||||
#include <typeinfo>
|
||||
|
||||
using namespace Grid;
|
||||
@ -103,13 +102,22 @@ void ioTest(const std::string &filename, const O &object, const std::string &nam
|
||||
bool good = Serializable::CompareMember(object, *buf);
|
||||
if (!good) {
|
||||
std::cout << " failure!" << std::endl;
|
||||
if (EigenIO::is_tensor<O>::value)
|
||||
dump_tensor(*buf);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
std::cout << " done." << std::endl;
|
||||
}
|
||||
|
||||
// The only way I could get these iterators to work is to put the begin() and end() functions in the Eigen namespace
|
||||
// So if Eigen ever defines these, we'll have a conflict and have to change this
|
||||
namespace Eigen {
|
||||
template <typename ET>
|
||||
inline typename std::enable_if<EigenIO::is_tensor<ET>::value, typename EigenIO::Traits<ET>::scalar_type *>::type
|
||||
begin( ET & et ) { return reinterpret_cast<typename Grid::EigenIO::Traits<ET>::scalar_type *>(et.data()); }
|
||||
template <typename ET>
|
||||
inline typename std::enable_if<EigenIO::is_tensor<ET>::value, typename EigenIO::Traits<ET>::scalar_type *>::type
|
||||
end( ET & et ) { return begin(et) + et.size() * EigenIO::Traits<ET>::count; }
|
||||
}
|
||||
|
||||
// Perform I/O tests on a range of tensor types
|
||||
// Test coverage: scalars, complex and GridVectors in single, double and default precision
|
||||
class TensorIO : public Serializable {
|
||||
@ -133,14 +141,14 @@ class TensorIO : public Serializable {
|
||||
|
||||
void Init(unsigned short Precision)
|
||||
{
|
||||
SequentialInit(Perambulator1, Flag, Precision);
|
||||
SequentialInit(Perambulator2, Flag, Precision);
|
||||
SequentialInit(tensorR5, FlagR, Precision);
|
||||
SequentialInit(tensorRank3, FlagF, Precision);
|
||||
SequentialInit(tensor_9_4_2, FlagTS, Precision);
|
||||
for( auto &s : Perambulator1 ) s = Flag;
|
||||
for( auto &s : Perambulator2 ) s = Flag;
|
||||
for( auto &s : tensorR5 ) s = FlagR;
|
||||
for( auto &s : tensorRank3 ) s = FlagF;
|
||||
for( auto &s : tensor_9_4_2 ) s = FlagTS;
|
||||
for( auto &t : atensor_9_4_2 )
|
||||
SequentialInit(t, FlagTS, Precision);
|
||||
SequentialInit(MyLSCTensor, Flag, Precision);
|
||||
for( auto &s : t ) s = FlagTS;
|
||||
for( auto &s : MyLSCTensor ) s = Flag;
|
||||
}
|
||||
|
||||
// Perform an I/O test for a single Eigen tensor (of any type)
|
||||
@ -152,7 +160,7 @@ class TensorIO : public Serializable {
|
||||
using Traits = EigenIO::Traits<T>;
|
||||
using scalar_type = typename Traits::scalar_type;
|
||||
std::unique_ptr<T> pTensor{new T(otherDims...)};
|
||||
SequentialInit( * pTensor, Flag, Precision );
|
||||
for( auto &s : * pTensor ) s = Flag;
|
||||
filename = pszFilePrefix + std::to_string(++TestNum) + "_" + MyTypeName + pszExtension;
|
||||
ioTest<W, R, T>(filename, * pTensor, MyTypeName, MyTypeName);
|
||||
}
|
||||
@ -191,41 +199,15 @@ public:
|
||||
// Rank 1 tensor containing a single integer
|
||||
using TensorSingle = Eigen::TensorFixedSize<Integer, Eigen::Sizes<1>>;
|
||||
TestOne<WTR_, RDR_, TensorSingle>( TEST_PARAMS( TensorSingle ), 7 ); // lucky!
|
||||
// Rather convoluted way of defining a single complex number
|
||||
using TensorSimple = Eigen::Tensor<iMatrix<TestScalar,1>, 6>;
|
||||
// Rather convoluted way of defining four complex numbers
|
||||
using TensorSimple = Eigen::Tensor<iMatrix<TestScalar,2>, 6>;
|
||||
using I = typename TensorSimple::Index; // NB: Never specified, so same for all my test tensors
|
||||
// Try progressively more complicated tensors
|
||||
TestOne<WTR_, RDR_, TensorSimple, I,I,I,I,I,I>( TEST_PARAMS( TensorSimple ), FlagTS, 1,1,1,1,1,1 );
|
||||
TestOne<WTR_, RDR_, TensorRank3, I, I, I>( TEST_PARAMS( TensorRank3 ), FlagF, 6, 3, 2 );
|
||||
TestOne<WTR_, RDR_, Tensor942>(TEST_PARAMS( Tensor942 ), FlagTS);
|
||||
TestOne<WTR_, RDR_, LSCTensor>(TEST_PARAMS( LSCTensor ), Flag );
|
||||
|
||||
// Now see whether we can write a tensor in one memory order and read back in the other
|
||||
{
|
||||
TestOne<WTR_, RDR_, TensorR5>(TEST_PARAMS( TensorR5 ), FlagR);
|
||||
std::cout << " Testing alternate memory order read ... ";
|
||||
TensorR5Alt t2;
|
||||
RDR_ reader(filename);
|
||||
::Grid::read(reader, "TensorR5", t2);
|
||||
bool good = true;
|
||||
TensorR5 cf;
|
||||
SequentialInit( cf, FlagR, Precision );
|
||||
for_all( t2, [&](typename EigenIO::Traits<TensorR5Alt>::scalar_type c, I n,
|
||||
const std::array<I, TensorR5Alt::NumIndices> &TensorIndex,
|
||||
const std::array<int, EigenIO::Traits<TensorR5Alt>::Rank> &GridTensorIndex ){
|
||||
Real &r = cf(TensorIndex);
|
||||
if( c != r ){
|
||||
good = false;
|
||||
std::cout << "\nError: " << n << ": " << c << " != " << r;
|
||||
}
|
||||
} );
|
||||
if (!good) {
|
||||
std::cout << std::endl;
|
||||
dump_tensor(t2,"t2");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
std::cout << " done." << std::endl;
|
||||
}
|
||||
TestOne<WTR_, RDR_, TensorR5>(TEST_PARAMS( TensorR5 ), FlagR);
|
||||
// Now test a serialisable object containing a number of tensors
|
||||
{
|
||||
static const char MyTypeName[] = "TensorIO";
|
||||
@ -247,10 +229,10 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
const Real TensorIO::FlagR {-1.001};
|
||||
const Complex TensorIO::Flag {1,-3.1415927};
|
||||
const ComplexF TensorIO::FlagF {1,-3.1415927};
|
||||
const TensorIO::TestScalar TensorIO::FlagTS{1,-3.1415927};
|
||||
const Real TensorIO::FlagR {1};
|
||||
const Complex TensorIO::Flag {1,-1};
|
||||
const ComplexF TensorIO::FlagF {1,-1};
|
||||
const TensorIO::TestScalar TensorIO::FlagTS{1,-1};
|
||||
const char * const TensorIO::pszFilePrefix = "tensor_";
|
||||
|
||||
template <typename T>
|
||||
|
Loading…
Reference in New Issue
Block a user