mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 07:55:35 +00:00
When saving eigenvectors, LapEvec now saves eigenvalues for every timeslice as well.
I.e. nT x nVec eigenvalues are saved in FileName.evals.conf.h5. A new named tensor, "TimesliceEvals" can be used to simplify restoring these from disk. NB: The changes in BaseIO add support so that Eigen tensors can be easily used in MPI operations, e.g. GlobalSum. See LapEvec.hpp for an example of how this is done.
This commit is contained in:
parent
2ed39ebb7a
commit
c69a3b6ef6
@ -97,6 +97,23 @@ namespace Grid {
|
|||||||
template<typename T, typename V = void> struct is_tensor_variable : public std::false_type {};
|
template<typename T, typename V = void> struct is_tensor_variable : public std::false_type {};
|
||||||
template<typename T> struct is_tensor_variable<T, typename std::enable_if<is_tensor<T>::value
|
template<typename T> struct is_tensor_variable<T, typename std::enable_if<is_tensor<T>::value
|
||||||
&& !is_tensor_fixed<T>::value>::type> : public std::true_type {};
|
&& !is_tensor_fixed<T>::value>::type> : public std::true_type {};
|
||||||
|
|
||||||
|
// Helper functions to get the ultimate scalar inside a tensor, and corresponding size
|
||||||
|
template <typename ET>
|
||||||
|
inline typename std::enable_if<is_tensor<ET>::value, const typename ET::Index>::type
|
||||||
|
getScalarCount(const ET &eigenTensor) { return eigenTensor.size() * Traits<ET>::count; }
|
||||||
|
template <typename ET>
|
||||||
|
inline typename std::enable_if<is_tensor_of_scalar<ET>::value, const typename ET::Scalar *>::type
|
||||||
|
getFirstScalar(const ET &eigenTensor) { return eigenTensor.data(); }
|
||||||
|
template <typename ET>
|
||||||
|
inline typename std::enable_if<is_tensor_of_scalar<ET>::value, typename ET::Scalar *>::type
|
||||||
|
getFirstScalar(ET &eigenTensor) { return eigenTensor.data(); }
|
||||||
|
template <typename ET>
|
||||||
|
inline typename std::enable_if<is_tensor_of_container<ET>::value, const typename Traits<ET>::scalar_type *>::type
|
||||||
|
getFirstScalar(const ET &eigenTensor) { return eigenTensor.data()->begin(); }
|
||||||
|
template <typename ET>
|
||||||
|
inline typename std::enable_if<is_tensor_of_container<ET>::value, typename Traits<ET>::scalar_type *>::type
|
||||||
|
getFirstScalar(ET &eigenTensor) { return eigenTensor.data()->begin(); }
|
||||||
}
|
}
|
||||||
|
|
||||||
// Abstract writer/reader classes ////////////////////////////////////////////
|
// Abstract writer/reader classes ////////////////////////////////////////////
|
||||||
@ -128,23 +145,6 @@ namespace Grid {
|
|||||||
typename std::enable_if<EigenIO::is_tensor<ETensor>::value>::type
|
typename std::enable_if<EigenIO::is_tensor<ETensor>::value>::type
|
||||||
write(const std::string &s, const ETensor &output);
|
write(const std::string &s, const ETensor &output);
|
||||||
|
|
||||||
// Helper functions for Scalar vs Container specialisations
|
|
||||||
template <typename ETensor>
|
|
||||||
inline typename std::enable_if<EigenIO::is_tensor_of_scalar<ETensor>::value,
|
|
||||||
const typename ETensor::Scalar *>::type
|
|
||||||
getFirstScalar(const ETensor &output)
|
|
||||||
{
|
|
||||||
return output.data();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename ETensor>
|
|
||||||
inline typename std::enable_if<EigenIO::is_tensor_of_container<ETensor>::value,
|
|
||||||
const typename EigenIO::Traits<ETensor>::scalar_type *>::type
|
|
||||||
getFirstScalar(const ETensor &output)
|
|
||||||
{
|
|
||||||
return output.data()->begin();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename S>
|
template <typename S>
|
||||||
inline typename std::enable_if<EigenIO::is_scalar<S>::value, void>::type
|
inline typename std::enable_if<EigenIO::is_scalar<S>::value, void>::type
|
||||||
copyScalars(S * &pCopy, const S &Source)
|
copyScalars(S * &pCopy, const S &Source)
|
||||||
@ -323,7 +323,7 @@ namespace Grid {
|
|||||||
std::vector<Scalar> CopyBuffer;
|
std::vector<Scalar> CopyBuffer;
|
||||||
const Index TotalNumElements = NumElements * Traits::count;
|
const Index TotalNumElements = NumElements * Traits::count;
|
||||||
if( !CopyData ) {
|
if( !CopyData ) {
|
||||||
pWriteBuffer = getFirstScalar( output );
|
pWriteBuffer = EigenIO::getFirstScalar( output );
|
||||||
} else {
|
} else {
|
||||||
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
|
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
|
||||||
CopyBuffer.resize( TotalNumElements );
|
CopyBuffer.resize( TotalNumElements );
|
||||||
|
@ -263,6 +263,11 @@ void TLapEvec<GImpl>::execute(void)
|
|||||||
const int Ntlocal{gridHD->LocalDimensions()[Tdir]};
|
const int Ntlocal{gridHD->LocalDimensions()[Tdir]};
|
||||||
const int Ntfirst{gridHD->LocalStarts()[Tdir]};
|
const int Ntfirst{gridHD->LocalStarts()[Tdir]};
|
||||||
uint32_t ConvergenceErrors{0};
|
uint32_t ConvergenceErrors{0};
|
||||||
|
const int NtFull{env().getDim(Tdir)};
|
||||||
|
TimesliceEvals Evals{ NtFull, LPar.Nvec };
|
||||||
|
for (int t = 0; t < NtFull; t++)
|
||||||
|
for (int v = 0; v < LPar.Nvec; v++)
|
||||||
|
Evals.tensor( t, v ) = 0;
|
||||||
for (int t = 0; t < Ntlocal; t++ )
|
for (int t = 0; t < Ntlocal; t++ )
|
||||||
{
|
{
|
||||||
LOG(Message) << "------------------------------------------------------------" << std::endl;
|
LOG(Message) << "------------------------------------------------------------" << std::endl;
|
||||||
@ -302,6 +307,8 @@ void TLapEvec<GImpl>::execute(void)
|
|||||||
InsertSliceLocal(eig[t].evec[i],eig4d.evec[i],0,t,Tdir);
|
InsertSliceLocal(eig[t].evec[i],eig4d.evec[i],0,t,Tdir);
|
||||||
if(t==0 && Ntfirst==0)
|
if(t==0 && Ntfirst==0)
|
||||||
eig4d.eval[i] = eig[t].eval[i]; // TODO: Discuss: is this needed? Is there a better way?
|
eig4d.eval[i] = eig[t].eval[i]; // TODO: Discuss: is this needed? Is there a better way?
|
||||||
|
if(gridLD->IsBoss()) // Only do this on one node per timeslice, so a global sum will work
|
||||||
|
Evals.tensor(t + Ntfirst,i) = eig[t].eval[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
GridLogIRL.Active( PreviousIRLLogState );
|
GridLogIRL.Active( PreviousIRLLogState );
|
||||||
@ -322,9 +329,21 @@ void TLapEvec<GImpl>::execute(void)
|
|||||||
sOperatorXml.append( b->parString() );
|
sOperatorXml.append( b->parString() );
|
||||||
sOperatorXml.append( "</options></module>" );
|
sOperatorXml.append( "</options></module>" );
|
||||||
eig4d.record.operatorXml = sOperatorXml;
|
eig4d.record.operatorXml = sOperatorXml;
|
||||||
sEigenPackName.append(".");
|
sEigenPackName.append(1, '.');
|
||||||
sEigenPackName.append(std::to_string(vm().getTrajectory()));
|
std::size_t NameLen{ sEigenPackName.length() };
|
||||||
|
const std::string sTrajNum{std::to_string(vm().getTrajectory())};
|
||||||
|
sEigenPackName.append(sTrajNum);
|
||||||
eig4d.write(sEigenPackName,false);
|
eig4d.write(sEigenPackName,false);
|
||||||
|
// Communicate eig[t].evec to boss-node, save into new object evecs
|
||||||
|
gridHD->GlobalSumVector(EigenIO::getFirstScalar(Evals.tensor),
|
||||||
|
static_cast<int>(EigenIO::getScalarCount(Evals.tensor)));
|
||||||
|
if(gridHD->IsBoss())
|
||||||
|
{
|
||||||
|
sEigenPackName.resize(NameLen);
|
||||||
|
sEigenPackName.append("evals.");
|
||||||
|
sEigenPackName.append(sTrajNum);
|
||||||
|
Evals.write( sEigenPackName );
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,5 +53,8 @@ const std::array<std::string, 4> NoiseTensor::DefaultIndexNames__{"nNoise", "nT"
|
|||||||
const std::string PerambTensor::Name__{"Perambulator"};
|
const std::string PerambTensor::Name__{"Perambulator"};
|
||||||
const std::array<std::string, 6> PerambTensor::DefaultIndexNames__{"nT", "nVec", "LI", "nNoise", "nT_inv", "SI"};
|
const std::array<std::string, 6> PerambTensor::DefaultIndexNames__{"nT", "nVec", "LI", "nNoise", "nT_inv", "SI"};
|
||||||
|
|
||||||
|
const std::string TimesliceEvals::Name__{"TimesliceEigenValues"};
|
||||||
|
const std::array<std::string, 2> TimesliceEvals::DefaultIndexNames__{"nT", "nVec"};
|
||||||
|
|
||||||
END_MODULE_NAMESPACE
|
END_MODULE_NAMESPACE
|
||||||
END_HADRONS_NAMESPACE
|
END_HADRONS_NAMESPACE
|
||||||
|
@ -185,6 +185,20 @@ class PerambTensor : public NamedTensor<SpinVector, 6>
|
|||||||
: NamedTensor{Name__, DefaultIndexNames__, nT, nVec, LI, nNoise, nT_inv, SI} {}
|
: NamedTensor{Name__, DefaultIndexNames__, nT, nVec, LI, nNoise, nT_inv, SI} {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class TimesliceEvals : public NamedTensor<RealD, 2>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static const std::string Name__;
|
||||||
|
static const std::array<std::string, 2> DefaultIndexNames__;
|
||||||
|
// Default constructor (assumes tensor will be loaded from file)
|
||||||
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TimesliceEvals() : NamedTensor{Name__, DefaultIndexNames__} {}
|
||||||
|
|
||||||
|
// Construct a named tensor explicitly specifying size of each dimension
|
||||||
|
template<typename... IndexTypes>
|
||||||
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TimesliceEvals(Eigen::Index nT, Eigen::Index nVec)
|
||||||
|
: NamedTensor{Name__, DefaultIndexNames__, nT, nVec} {}
|
||||||
|
};
|
||||||
|
|
||||||
END_MODULE_NAMESPACE
|
END_MODULE_NAMESPACE
|
||||||
END_HADRONS_NAMESPACE
|
END_HADRONS_NAMESPACE
|
||||||
#endif // Hadrons_NamedTensor_hpp_
|
#endif // Hadrons_NamedTensor_hpp_
|
||||||
|
Loading…
Reference in New Issue
Block a user