mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-09 21:50:45 +01:00
Added Scalar_ length and Scalar_Unit_Size to Perambulator file for validation
This commit is contained in:
parent
7f5354630a
commit
5b0870bb19
@ -238,7 +238,7 @@ inline GridCartesian * MakeLowerDimGrid( GridCartesian * gridHD )
|
|||||||
Perambulator object
|
Perambulator object
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
template<typename Scalar_, typename Scalar_Unit, int NumIndices_>
|
template<typename Scalar_, int NumIndices_, uint16_t Scalar_Unit_Size = sizeof(Scalar_)>
|
||||||
class NamedTensor : public Eigen::Tensor<Scalar_, NumIndices_, Eigen::RowMajor | Eigen::DontAlign>
|
class NamedTensor : public Eigen::Tensor<Scalar_, NumIndices_, Eigen::RowMajor | Eigen::DontAlign>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -269,22 +269,28 @@ public:
|
|||||||
Save NamedTensor binary format (NB: On-disk format is Big Endian)
|
Save NamedTensor binary format (NB: On-disk format is Big Endian)
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
template<typename Scalar_, typename Scalar_Unit, int NumIndices_>
|
template<typename Scalar_, int NumIndices_, uint16_t Scalar_Unit_Size>
|
||||||
void NamedTensor<Scalar_, Scalar_Unit, NumIndices_>::WriteBinary(const std::string filename) {
|
void NamedTensor<Scalar_, NumIndices_, Scalar_Unit_Size>::WriteBinary(const std::string filename) {
|
||||||
LOG(Message) << "Writing NamedTensor to \"" << filename << "\"" << std::endl;
|
LOG(Message) << "Writing NamedTensor to \"" << filename << "\"" << std::endl;
|
||||||
std::ofstream w(filename, std::ios::binary);
|
std::ofstream w(filename, std::ios::binary);
|
||||||
// Number of Scalar_Unit objects per Scalar_
|
// Enforce assumption that the scalar is composed of fundamental elements of size Scalar_Unit_Size
|
||||||
constexpr unsigned int Scalar_Unit_Size{sizeof(Scalar_Unit)};
|
assert((Scalar_Unit_Size == 1 || Scalar_Unit_Size == 2 || Scalar_Unit_Size == 4 || Scalar_Unit_Size == 8 )
|
||||||
assert((Scalar_Unit_Size == 2 || Scalar_Unit_Size == 4 || Scalar_Unit_Size == 8 )
|
&& "Scalar_Unit_Size should be 1, 2, 4 or 8");
|
||||||
&& "Scalar_Unit_Size should be 2, 4 or 8");
|
|
||||||
assert((sizeof(Scalar_) % Scalar_Unit_Size) == 0 && "Scalar_ is not composed of Scalar_Unit_Size" );
|
assert((sizeof(Scalar_) % Scalar_Unit_Size) == 0 && "Scalar_ is not composed of Scalar_Unit_Size" );
|
||||||
// Size of the data (in bytes)
|
// Size of the data (in bytes)
|
||||||
|
const uint32_t Scalar_Size{sizeof(Scalar_)};
|
||||||
const auto NumElements{this->size()};
|
const auto NumElements{this->size()};
|
||||||
const std::streamsize TotalDataSize{static_cast<std::streamsize>(NumElements * sizeof(Scalar_))};
|
const std::streamsize TotalDataSize{static_cast<std::streamsize>(NumElements * Scalar_Size)};
|
||||||
uint64_t u64 = htobe64(static_cast<uint64_t>(TotalDataSize));
|
uint64_t u64 = htobe64(static_cast<uint64_t>(TotalDataSize));
|
||||||
w.write(reinterpret_cast<const char *>(&u64), sizeof(u64));
|
w.write(reinterpret_cast<const char *>(&u64), sizeof(u64));
|
||||||
|
// Size of a Scalar_
|
||||||
|
uint32_t u32{htobe32(Scalar_Size)};
|
||||||
|
w.write(reinterpret_cast<const char *>(&u32), sizeof(u32));
|
||||||
|
// Scalar_Unit_Size
|
||||||
|
uint16_t u16{htobe16(Scalar_Unit_Size)};
|
||||||
|
w.write(reinterpret_cast<const char *>(&u16), sizeof(u16));
|
||||||
// number of dimensions which aren't 1
|
// number of dimensions which aren't 1
|
||||||
uint16_t u16 = static_cast<uint16_t>(this->NumIndices);
|
u16 = static_cast<uint16_t>(this->NumIndices);
|
||||||
for( auto dim : this->dimensions() )
|
for( auto dim : this->dimensions() )
|
||||||
if( dim == 1 )
|
if( dim == 1 )
|
||||||
u16--;
|
u16--;
|
||||||
@ -331,9 +337,9 @@ void NamedTensor<Scalar_, Scalar_Unit, NumIndices_>::WriteBinary(const std::stri
|
|||||||
* p = be16toh( * p );
|
* p = be16toh( * p );
|
||||||
// checksum
|
// checksum
|
||||||
#ifdef USE_IPP
|
#ifdef USE_IPP
|
||||||
uint32_t u32 = htobe32(GridChecksum::crc32c(this->data(), TotalDataSize));
|
u32 = htobe32(GridChecksum::crc32c(this->data(), TotalDataSize));
|
||||||
#else
|
#else
|
||||||
uint32_t u32 = htobe32(GridChecksum::crc32(this->data(), TotalDataSize));
|
u32 = htobe32(GridChecksum::crc32(this->data(), TotalDataSize));
|
||||||
#endif
|
#endif
|
||||||
w.write(reinterpret_cast<const char *>(&u32), sizeof(u32));
|
w.write(reinterpret_cast<const char *>(&u32), sizeof(u32));
|
||||||
}
|
}
|
||||||
@ -342,29 +348,36 @@ void NamedTensor<Scalar_, Scalar_Unit, NumIndices_>::WriteBinary(const std::stri
|
|||||||
Load NamedTensor binary format (NB: On-disk format is Big Endian)
|
Load NamedTensor binary format (NB: On-disk format is Big Endian)
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
template<typename Scalar_, typename Scalar_Unit, int NumIndices_>
|
template<typename Scalar_, int NumIndices_, uint16_t Scalar_Unit_Size>
|
||||||
void NamedTensor<Scalar_, Scalar_Unit, NumIndices_>::ReadBinary(const std::string filename) {
|
void NamedTensor<Scalar_, NumIndices_, Scalar_Unit_Size>::ReadBinary(const std::string filename) {
|
||||||
LOG(Message) << "Reading NamedTensor from \"" << filename << "\"" << std::endl;
|
LOG(Message) << "Reading NamedTensor from \"" << filename << "\"" << std::endl;
|
||||||
std::ifstream r(filename, std::ios::binary);
|
std::ifstream r(filename, std::ios::binary);
|
||||||
// Number of Scalar_Unit objects per Scalar_
|
// Enforce assumption that the scalar is composed of fundamental elements of size Scalar_Unit_Size
|
||||||
constexpr unsigned int Scalar_Unit_Size{sizeof(Scalar_Unit)};
|
assert((Scalar_Unit_Size == 1 || Scalar_Unit_Size == 2 || Scalar_Unit_Size == 4 || Scalar_Unit_Size == 8 )
|
||||||
assert((Scalar_Unit_Size == 2 || Scalar_Unit_Size == 4 || Scalar_Unit_Size == 8 )
|
&& "NamedTensor error: Scalar_Unit_Size should be 1, 2, 4 or 8");
|
||||||
&& "Scalar_Unit_Size should be 2, 4 or 8");
|
assert((sizeof(Scalar_) % Scalar_Unit_Size) == 0 && "NamedTensor error: Scalar_ is not composed of Scalar_Unit_Size" );
|
||||||
assert((sizeof(Scalar_) % Scalar_Unit_Size) == 0 && "Scalar_ is not composed of Scalar_Unit_Size" );
|
|
||||||
// Size of the data in bytes
|
// Size of the data in bytes
|
||||||
|
const uint32_t Scalar_Size{sizeof(Scalar_)};
|
||||||
const auto NumElements{this->size()};
|
const auto NumElements{this->size()};
|
||||||
const std::streamsize TotalDataSize{static_cast<std::streamsize>(NumElements * sizeof(Scalar_))};
|
const std::streamsize TotalDataSize{static_cast<std::streamsize>(NumElements * Scalar_Size)};
|
||||||
uint64_t u64;
|
uint64_t u64;
|
||||||
r.read(reinterpret_cast<char *>(&u64), sizeof(u64));
|
r.read(reinterpret_cast<char *>(&u64), sizeof(u64));
|
||||||
assert( TotalDataSize == be64toh( u64 ) && "Error: Size of the data in bytes" );
|
assert( TotalDataSize == be64toh( u64 ) && "NamedTensor error: Size of the data in bytes" );
|
||||||
// number of dimensions which aren't 1
|
// Size of a Scalar_
|
||||||
|
uint32_t u32;
|
||||||
|
r.read(reinterpret_cast<char *>(&u32), sizeof(u32));
|
||||||
|
assert( Scalar_Size == be32toh( u32 ) && "NamedTensor error: sizeof(Scalar_)");
|
||||||
|
// Scalar_Unit_Size
|
||||||
uint16_t u16;
|
uint16_t u16;
|
||||||
r.read(reinterpret_cast<char *>(&u16), sizeof(u16));
|
r.read(reinterpret_cast<char *>(&u16), sizeof(u16));
|
||||||
|
assert( Scalar_Unit_Size == be16toh( u16 ) && "NamedTensor error: Scalar_Unit_size");
|
||||||
|
// number of dimensions which aren't 1
|
||||||
|
r.read(reinterpret_cast<char *>(&u16), sizeof(u16));
|
||||||
u16 = be16toh( u16 );
|
u16 = be16toh( u16 );
|
||||||
for( auto dim : this->dimensions() )
|
for( auto dim : this->dimensions() )
|
||||||
if( dim == 1 )
|
if( dim == 1 )
|
||||||
u16++;
|
u16++;
|
||||||
assert( this->NumIndices == u16 && "Error: number of dimensions which aren't 1" );
|
assert( this->NumIndices == u16 && "NamedTensor error: number of dimensions which aren't 1" );
|
||||||
// dimensions together with names
|
// dimensions together with names
|
||||||
int d = 0;
|
int d = 0;
|
||||||
for( auto dim : this->dimensions() ) {
|
for( auto dim : this->dimensions() ) {
|
||||||
@ -375,11 +388,11 @@ void NamedTensor<Scalar_, Scalar_Unit, NumIndices_>::ReadBinary(const std::strin
|
|||||||
// length of dimension name
|
// length of dimension name
|
||||||
r.read(reinterpret_cast<char *>(&u16), sizeof(u16));
|
r.read(reinterpret_cast<char *>(&u16), sizeof(u16));
|
||||||
size_t l = be16toh( u16 );
|
size_t l = be16toh( u16 );
|
||||||
assert( l == IndexNames[d].size() && "length of dimension name" );
|
assert( l == IndexNames[d].size() && "NamedTensor error: length of dimension name" );
|
||||||
// dimension name
|
// dimension name
|
||||||
std::string s( l, '?' );
|
std::string s( l, '?' );
|
||||||
r.read(&s[0], l);
|
r.read(&s[0], l);
|
||||||
assert( s == IndexNames[d] && "dimension name" );
|
assert( s == IndexNames[d] && "NamedTensor error: dimension name" );
|
||||||
}
|
}
|
||||||
d++;
|
d++;
|
||||||
}
|
}
|
||||||
@ -398,7 +411,6 @@ void NamedTensor<Scalar_, Scalar_Unit, NumIndices_>::ReadBinary(const std::strin
|
|||||||
for(uint16_t * p = reinterpret_cast<uint16_t *>(pStart) ; p < pEnd ; p++ )
|
for(uint16_t * p = reinterpret_cast<uint16_t *>(pStart) ; p < pEnd ; p++ )
|
||||||
* p = be16toh( * p );
|
* p = be16toh( * p );
|
||||||
// checksum
|
// checksum
|
||||||
uint32_t u32;
|
|
||||||
r.read(reinterpret_cast<char *>(&u32), sizeof(u32));
|
r.read(reinterpret_cast<char *>(&u32), sizeof(u32));
|
||||||
u32 = be32toh( u32 );
|
u32 = be32toh( u32 );
|
||||||
#ifdef USE_IPP
|
#ifdef USE_IPP
|
||||||
@ -406,15 +418,15 @@ void NamedTensor<Scalar_, Scalar_Unit, NumIndices_>::ReadBinary(const std::strin
|
|||||||
#else
|
#else
|
||||||
u32 -= GridChecksum::crc32(this->data(), TotalDataSize);
|
u32 -= GridChecksum::crc32(this->data(), TotalDataSize);
|
||||||
#endif
|
#endif
|
||||||
assert( u32 == 0 && "Perambulator checksum invalid");
|
assert( u32 == 0 && "NamedTensor error: Perambulator checksum invalid");
|
||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************************
|
/******************************************************************************
|
||||||
Save NamedTensor Hdf5 format
|
Save NamedTensor Hdf5 format
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
template<typename Scalar_, typename Scalar_Unit, int NumIndices_>
|
template<typename Scalar_, int NumIndices_, uint16_t Scalar_Unit_Size>
|
||||||
void NamedTensor<Scalar_, Scalar_Unit, NumIndices_>::save(const std::string filename) const {
|
void NamedTensor<Scalar_, NumIndices_, Scalar_Unit_Size>::save(const std::string filename) const {
|
||||||
LOG(Message) << "Writing NamedTensor to \"" << filename << "\"" << std::endl;
|
LOG(Message) << "Writing NamedTensor to \"" << filename << "\"" << std::endl;
|
||||||
#ifndef HAVE_HDF5
|
#ifndef HAVE_HDF5
|
||||||
LOG(Message) << "Error: I/O for NamedTensor requires HDF5" << std::endl;
|
LOG(Message) << "Error: I/O for NamedTensor requires HDF5" << std::endl;
|
||||||
@ -428,8 +440,8 @@ void NamedTensor<Scalar_, Scalar_Unit, NumIndices_>::save(const std::string file
|
|||||||
Load NamedTensor Hdf5 format
|
Load NamedTensor Hdf5 format
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
template<typename Scalar_, typename Scalar_Unit, int NumIndices_>
|
template<typename Scalar_, int NumIndices_, uint16_t Scalar_Unit_Size>
|
||||||
void NamedTensor<Scalar_, Scalar_Unit, NumIndices_>::load(const std::string filename) {
|
void NamedTensor<Scalar_, NumIndices_, Scalar_Unit_Size>::load(const std::string filename) {
|
||||||
LOG(Message) << "Reading NamedTensor from \"" << filename << "\"" << std::endl;
|
LOG(Message) << "Reading NamedTensor from \"" << filename << "\"" << std::endl;
|
||||||
#ifndef HAVE_HDF5
|
#ifndef HAVE_HDF5
|
||||||
LOG(Message) << "Error: I/O for NamedTensor requires HDF5" << std::endl;
|
LOG(Message) << "Error: I/O for NamedTensor requires HDF5" << std::endl;
|
||||||
@ -446,8 +458,8 @@ void NamedTensor<Scalar_, Scalar_Unit, NumIndices_>::load(const std::string file
|
|||||||
Perambulator object
|
Perambulator object
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
template<typename Scalar_, typename Scalar_Unit, int NumIndices_>
|
template<typename Scalar_, int NumIndices_, uint16_t Scalar_Unit_Size = sizeof(Scalar_)>
|
||||||
using Perambulator = NamedTensor<Scalar_, Scalar_Unit, NumIndices_>;
|
using Perambulator = NamedTensor<Scalar_, NumIndices_, Scalar_Unit_Size>;
|
||||||
|
|
||||||
/*************************************************************************************
|
/*************************************************************************************
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ void TDistilVectors<FImpl>::execute(void)
|
|||||||
|
|
||||||
//auto &noise = envGet(std::vector<std::vector<std::vector<SpinVector>>>, par().noise);
|
//auto &noise = envGet(std::vector<std::vector<std::vector<SpinVector>>>, par().noise);
|
||||||
auto &noise = envGet(std::vector<Complex>, par().noise);
|
auto &noise = envGet(std::vector<Complex>, par().noise);
|
||||||
auto &perambulator = envGet(Perambulator<SpinVector COMMA Real COMMA 6>, par().perambulator);
|
auto &perambulator = envGet(Perambulator<SpinVector COMMA 6 COMMA sizeof(Real)>, par().perambulator);
|
||||||
auto &epack = envGet(Grid::Hadrons::EigenPack<LatticeColourVector>, par().eigenPack);
|
auto &epack = envGet(Grid::Hadrons::EigenPack<LatticeColourVector>, par().eigenPack);
|
||||||
auto &rho = envGet(std::vector<FermionField>, getName() + "_rho");
|
auto &rho = envGet(std::vector<FermionField>, getName() + "_rho");
|
||||||
auto &phi = envGet(std::vector<FermionField>, getName() + "_phi");
|
auto &phi = envGet(std::vector<FermionField>, getName() + "_phi");
|
||||||
|
@ -139,7 +139,7 @@ void TPerambLight<FImpl>::setup(void)
|
|||||||
//envCreate(std::complex<double>, getName() + "_debug_delete_me_3", 1, z);
|
//envCreate(std::complex<double>, getName() + "_debug_delete_me_3", 1, z);
|
||||||
//envCreate(std::complex<double>, getName() + "_debug_delete_me_4", 1, {0.6 COMMA -3.1});
|
//envCreate(std::complex<double>, getName() + "_debug_delete_me_4", 1, {0.6 COMMA -3.1});
|
||||||
//envCreate(std::array<std::string COMMA 3>, getName() + "_debug_delete_me_5", 1, {"One" COMMA "Two" COMMA "Three"});
|
//envCreate(std::array<std::string COMMA 3>, getName() + "_debug_delete_me_5", 1, {"One" COMMA "Two" COMMA "Three"});
|
||||||
envCreate(Perambulator<SpinVector COMMA Real COMMA 6>, getName() + "_perambulator_light", 1,
|
envCreate(Perambulator<SpinVector COMMA 6 COMMA sizeof(Real)>, getName() + "_perambulator_light", 1,
|
||||||
sIndexNames,Distil.Nt,nvec,Distil.LI,Distil.nnoise,Distil.Nt_inv,Distil.SI);
|
sIndexNames,Distil.Nt,nvec,Distil.LI,Distil.nnoise,Distil.Nt_inv,Distil.SI);
|
||||||
envCreate(std::vector<Complex>, getName() + "_noise", 1,
|
envCreate(std::vector<Complex>, getName() + "_noise", 1,
|
||||||
nvec*Distil.Ns*Distil.Nt*Distil.nnoise);
|
nvec*Distil.Ns*Distil.Nt*Distil.nnoise);
|
||||||
@ -195,7 +195,8 @@ void TPerambLight<FImpl>::execute(void)
|
|||||||
|
|
||||||
//auto &noise = envGet(std::vector<std::vector<std::vector<SpinVector>>>, par().noise);
|
//auto &noise = envGet(std::vector<std::vector<std::vector<SpinVector>>>, par().noise);
|
||||||
auto &noise = envGet(std::vector<Complex>, getName() + "_noise");
|
auto &noise = envGet(std::vector<Complex>, getName() + "_noise");
|
||||||
auto &perambulator = envGet(Perambulator<SpinVector COMMA Real COMMA 6>, getName() + "_perambulator_light");
|
auto &perambulator = envGet(Perambulator<SpinVector COMMA 6 COMMA sizeof(Real)>,
|
||||||
|
getName() + "_perambulator_light");
|
||||||
auto &epack = envGet(Grid::Hadrons::EigenPack<LatticeColourVector>, par().eigenPack);
|
auto &epack = envGet(Grid::Hadrons::EigenPack<LatticeColourVector>, par().eigenPack);
|
||||||
auto &unsmeared_sink = envGet(std::vector<FermionField>, getName() + "_unsmeared_sink");
|
auto &unsmeared_sink = envGet(std::vector<FermionField>, getName() + "_unsmeared_sink");
|
||||||
|
|
||||||
|
@ -254,7 +254,7 @@ bool bNumber( int &ri, const char * & pstr, bool bGobbleWhiteSpace = true )
|
|||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
|
|
||||||
typedef Grid::Hadrons::MDistil::NamedTensor<Complex,Real,3> MyTensor;
|
typedef Grid::Hadrons::MDistil::NamedTensor<Complex,3,sizeof(Real)> MyTensor;
|
||||||
|
|
||||||
void DebugShowTensor(MyTensor &x, const char * n)
|
void DebugShowTensor(MyTensor &x, const char * n)
|
||||||
{
|
{
|
||||||
@ -305,7 +305,7 @@ bool DebugEigenTest()
|
|||||||
// Test initialisation of an array of strings
|
// Test initialisation of an array of strings
|
||||||
for( auto a : as )
|
for( auto a : as )
|
||||||
std::cout << a << std::endl;
|
std::cout << a << std::endl;
|
||||||
Grid::Hadrons::MDistil::Perambulator<Complex,Real,3> p{as,2,7,2};
|
Grid::Hadrons::MDistil::Perambulator<Complex,3,sizeof(Real)> p{as,2,7,2};
|
||||||
DebugShowTensor(p, "p");
|
DebugShowTensor(p, "p");
|
||||||
std::cout << "p.IndexNames follow" << std::endl;
|
std::cout << "p.IndexNames follow" << std::endl;
|
||||||
for( auto a : p.IndexNames )
|
for( auto a : p.IndexNames )
|
||||||
|
Loading…
x
Reference in New Issue
Block a user