1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-10 07:55:35 +00:00

Merge pull request #260 from mmphys/feature/distil

Distillation: save eigenvalues of the Laplacian for all timeslices
This commit is contained in:
Antonin Portelli 2020-03-13 14:00:21 +00:00 committed by GitHub
commit 05ebc458e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 86 additions and 49 deletions

View File

@ -97,6 +97,23 @@ namespace Grid {
template<typename T, typename V = void> struct is_tensor_variable : public std::false_type {};
template<typename T> struct is_tensor_variable<T, typename std::enable_if<is_tensor<T>::value
&& !is_tensor_fixed<T>::value>::type> : public std::true_type {};
// Helper functions to get the ultimate scalar inside a tensor, and corresponding size
template <typename ET>
inline typename std::enable_if<is_tensor<ET>::value, const typename ET::Index>::type
getScalarCount(const ET &eigenTensor) { return eigenTensor.size() * Traits<ET>::count; }
template <typename ET>
inline typename std::enable_if<is_tensor_of_scalar<ET>::value, const typename ET::Scalar *>::type
getFirstScalar(const ET &eigenTensor) { return eigenTensor.data(); }
template <typename ET>
inline typename std::enable_if<is_tensor_of_scalar<ET>::value, typename ET::Scalar *>::type
getFirstScalar(ET &eigenTensor) { return eigenTensor.data(); }
template <typename ET>
inline typename std::enable_if<is_tensor_of_container<ET>::value, const typename Traits<ET>::scalar_type *>::type
getFirstScalar(const ET &eigenTensor) { return eigenTensor.data()->begin(); }
template <typename ET>
inline typename std::enable_if<is_tensor_of_container<ET>::value, typename Traits<ET>::scalar_type *>::type
getFirstScalar(ET &eigenTensor) { return eigenTensor.data()->begin(); }
}
// Abstract writer/reader classes ////////////////////////////////////////////
@ -128,23 +145,6 @@ namespace Grid {
typename std::enable_if<EigenIO::is_tensor<ETensor>::value>::type
write(const std::string &s, const ETensor &output);
// Helper functions for Scalar vs Container specialisations
template <typename ETensor>
inline typename std::enable_if<EigenIO::is_tensor_of_scalar<ETensor>::value,
const typename ETensor::Scalar *>::type
getFirstScalar(const ETensor &output)
{
return output.data();
}
template <typename ETensor>
inline typename std::enable_if<EigenIO::is_tensor_of_container<ETensor>::value,
const typename EigenIO::Traits<ETensor>::scalar_type *>::type
getFirstScalar(const ETensor &output)
{
return output.data()->begin();
}
template <typename S>
inline typename std::enable_if<EigenIO::is_scalar<S>::value, void>::type
copyScalars(S * &pCopy, const S &Source)
@ -318,12 +318,12 @@ namespace Grid {
TotalDims[TensorRank + i] = Traits::Dimension(i);
// If the Tensor isn't in Row-Major order, then we'll need to copy it's data
const bool CopyData{NumElements > 1 && ETensor::Layout != Eigen::StorageOptions::RowMajor};
const bool CopyData{NumElements > 1 && static_cast<int>( ETensor::Layout ) != static_cast<int>( Eigen::StorageOptions::RowMajor )};
const Scalar * pWriteBuffer;
std::vector<Scalar> CopyBuffer;
const Index TotalNumElements = NumElements * Traits::count;
if( !CopyData ) {
pWriteBuffer = getFirstScalar( output );
pWriteBuffer = EigenIO::getFirstScalar( output );
} else {
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
CopyBuffer.resize( TotalNumElements );

View File

@ -263,6 +263,11 @@ void TLapEvec<GImpl>::execute(void)
const int Ntlocal{gridHD->LocalDimensions()[Tdir]};
const int Ntfirst{gridHD->LocalStarts()[Tdir]};
uint32_t ConvergenceErrors{0};
const int NtFull{env().getDim(Tdir)};
TimesliceEvals Evals{ NtFull, LPar.Nvec };
for (int t = 0; t < NtFull; t++)
for (int v = 0; v < LPar.Nvec; v++)
Evals.tensor( t, v ) = 0;
for (int t = 0; t < Ntlocal; t++ )
{
LOG(Message) << "------------------------------------------------------------" << std::endl;
@ -302,6 +307,8 @@ void TLapEvec<GImpl>::execute(void)
InsertSliceLocal(eig[t].evec[i],eig4d.evec[i],0,t,Tdir);
if(t==0 && Ntfirst==0)
eig4d.eval[i] = eig[t].eval[i]; // TODO: Discuss: is this needed? Is there a better way?
if(gridLD->IsBoss()) // Only do this on one node per timeslice, so a global sum will work
Evals.tensor(t + Ntfirst,i) = eig[t].eval[i];
}
}
GridLogIRL.Active( PreviousIRLLogState );
@ -322,9 +329,21 @@ void TLapEvec<GImpl>::execute(void)
sOperatorXml.append( b->parString() );
sOperatorXml.append( "</options></module>" );
eig4d.record.operatorXml = sOperatorXml;
sEigenPackName.append(".");
sEigenPackName.append(std::to_string(vm().getTrajectory()));
sEigenPackName.append(1, '.');
std::size_t NameLen{ sEigenPackName.length() };
const std::string sTrajNum{std::to_string(vm().getTrajectory())};
sEigenPackName.append(sTrajNum);
eig4d.write(sEigenPackName,false);
// Communicate eig[t].evec to boss-node, save into new object evecs
gridHD->GlobalSumVector(EigenIO::getFirstScalar(Evals.tensor),
static_cast<int>(EigenIO::getScalarCount(Evals.tensor)));
if(gridHD->IsBoss())
{
sEigenPackName.resize(NameLen);
sEigenPackName.append("evals.");
sEigenPackName.append(sTrajNum);
Evals.write( sEigenPackName );
}
}
}

View File

@ -54,5 +54,8 @@ const std::array<std::string, 4> NoiseTensor::DefaultIndexNames__{"nNoise", "nT"
const std::string PerambTensor::Name__{"Perambulator"};
const std::array<std::string, 6> PerambTensor::DefaultIndexNames__{"nT", "nVec", "LI", "nNoise", "nT_inv", "SI"};
const std::string TimesliceEvals::Name__{"TimesliceEigenValues"};
const std::array<std::string, 2> TimesliceEvals::DefaultIndexNames__{"nT", "nVec"};
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE

View File

@ -143,7 +143,7 @@ void TWall<FImpl>::execute(void)
envGetTmp(LatticeComplex, coor);
p = strToVec<Real>(par().mom);
ph = Zero();
for(unsigned int mu = 0; mu < env().getNd(); mu++)
for(unsigned int mu = 0; mu < p.size(); mu++)
{
LatticeCoordinate(coor, mu);
ph = ph + (p[mu]/env().getDim(mu))*coor;

View File

@ -159,9 +159,9 @@ using LapEvecs = Grid::Hadrons::EigenPack<LatticeColourVector>;
class NoiseTensor : public NamedTensor<Complex, 4>
{
public:
static const std::string Name__;
static const std::array<std::string, 4> DefaultIndexNames__;
public:
// Default constructor (assumes tensor will be loaded from file)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NoiseTensor() : NamedTensor{Name__, DefaultIndexNames__} {}
@ -173,9 +173,9 @@ class NoiseTensor : public NamedTensor<Complex, 4>
class PerambTensor : public NamedTensor<SpinVector, 6>
{
public:
static const std::string Name__;
static const std::array<std::string, 6> DefaultIndexNames__;
public:
// Default constructor (assumes tensor will be loaded from file)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PerambTensor() : NamedTensor{Name__, DefaultIndexNames__} {}
@ -185,6 +185,20 @@ class PerambTensor : public NamedTensor<SpinVector, 6>
: NamedTensor{Name__, DefaultIndexNames__, nT, nVec, LI, nNoise, nT_inv, SI} {}
};
class TimesliceEvals : public NamedTensor<RealD, 2>
{
public:
static const std::string Name__;
static const std::array<std::string, 2> DefaultIndexNames__;
// Default constructor (assumes tensor will be loaded from file)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TimesliceEvals() : NamedTensor{Name__, DefaultIndexNames__} {}
// Construct a named tensor explicitly specifying size of each dimension
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TimesliceEvals(Eigen::Index nT, Eigen::Index nVec)
: NamedTensor{Name__, DefaultIndexNames__, nT, nVec} {}
};
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_NamedTensor_hpp_

View File

@ -39,8 +39,8 @@ These are the environment variables we will define for Grid:
Variable | Typical Value | Use
--- | --- | ---
`Grid` | `/Users/user_id/src/Grid` | Path to grid source
`GridPre` | `/Users/user_id/bin` | Path to install directory containing grid pre-requisites built from source
`Grid` | `$HOME/src/Grid` | Path to grid source
`GridPre` | `$HOME/.local` | Path to install directory containing grid pre-requisites built from source
`GridPkg` | **MacPorts**=`/opt/local`, **Homebrew**=`/usr/local` | Path to package manager install directory
Choose either of the following ways to do this, and when you're done, log out and in again. To check these have been set:
@ -54,7 +54,7 @@ Choose either of the following ways to do this, and when you're done, log out an
```apple script
do shell script "launchctl setenv Grid $HOME/src/Grid
launchctl setenv GridPre $HOME/bin
launchctl setenv GridPre $HOME/.local
launchctl setenv GridPkg /opt/local"
```
@ -82,7 +82,7 @@ Make the file `environment.plist` in `~/Library/LaunchAgents` with the following
<string>sh</string>
<string>-c</string>
<string>launchctl setenv Grid $HOME/src/Grid
launchctl setenv GridPre $HOME/bin
launchctl setenv GridPre $HOME/.local
launchctl setenv GridPkg /opt/local</string>
</array>
@ -94,16 +94,18 @@ launchctl setenv GridPkg /opt/local</string>
## 3. Install and build Open MPI -- ***optional***
Download the latest version of [Open MPI][OMPI] version 3.1 (I used 3.1.3).
NB: Grid does not have any dependencies on fortran, however many standard scientific packages do, so you may as well download GNU fortran (e.g. MacPorts ``gfortran`` package) and build Open MPI like so:
Download the latest version of [Open MPI][OMPI] version 3.1 (I used 3.1.5) and build it like so:
[OMPI]: https://www.open-mpi.org/software/ompi/v3.1/
../configure CC=clang CXX=clang++ F77=gfortran FC=gfortran CXXFLAGS=-g --prefix=$GridPre/openmpi
../configure CC=clang CXX=clang++ CXXFLAGS=-g --prefix=$GridPre/bin
make -j 4 all install
(If you don't want to bother with fortran bindings, just don't include the F77 and FC flags)
***Note the `/bin` at the end of the prefix - this is required. As a quirk of the OpenMPI installer, `--prefix` must point to the `bin` subdirectory, with other files installed in `$GridPre/include`, `$GridPre/lib`, `$GridPre/share`, etc.***
Grid does not have any dependencies on fortran, however many standard scientific packages do, so you may wish to download GNU fortran (e.g. MacPorts ``gfortran`` package) and add the following to your configure invocation:
F77=gfortran FC=gfortran
## 4. Install and build Grid pre-requisites
@ -121,17 +123,17 @@ These are the `portname`s for mandatory Grid libraries:
* git-flow-avh
* gmp
* hdf5
* mpfr
and these are the `portname`s for optional Grid libraries:
* fftw-3-single
* hdf5
* lapack
* doxygen
* OpenBLAS
***Please update this list with any packages I've missed! ... and double-check whether OpenBLAS is really for Grid***
***Please update this list with any packages I've missed! ... and double-check whether OpenBLAS is really for Grid. NB: lapack doesn't seem to work. Should it be scalapack?***
### 2. [Homebrew][Homebrew]
@ -149,7 +151,7 @@ There isn't currently a port for [C-LIME][C-LIME], so download the source and th
[C-LIME]: https://usqcd-software.github.io/c-lime/ "C-language API for Lattice QCD Interchange Message Encapsulation / Large Internet Message Encapsulation"
../configure --prefix=$GridPre/lime CC=clang
../configure CC=clang --prefix=$GridPre
make -j 4 all install
## 5. Install, Configure and Build Grid
@ -182,19 +184,19 @@ Below are shown the `configure` script invocations for three recommended configu
This is the build for every day developing and debugging with Xcode. It uses the Xcode clang c++ compiler, without MPI, and defaults to double-precision. Xcode builds the `Debug` configuration with debug symbols for full debugging:
../configure --with-hdf5=$GridPkg --with-gmp=$GridPkg --with-mpfr=$GridPkg --with-fftw=$GridPkg --with-lime=$GridPre/lime --enable-simd=GEN --enable-precision=double CXX=clang++ --prefix=$GridPre/GridDebug --enable-comms=none --enable-doxygen-doc
../configure CXX=clang++ --with-hdf5=$GridPkg --with-gmp=$GridPkg --with-mpfr=$GridPkg --with-fftw=$GridPkg --with-lime=$GridPre --enable-simd=GEN --enable-precision=double --prefix=$GridPre/GridDebug --enable-comms=none
#### 2. `Release`
Since Grid itself doesn't really have debug configurations, the release build is recommended to be the same as `Debug`, except using single-precision (handy for validation):
../configure --with-hdf5=$GridPkg --with-gmp=$GridPkg --with-mpfr=$GridPkg --with-fftw=$GridPkg --with-lime=$GridPre/lime --enable-simd=GEN --enable-precision=single CXX=clang++ --prefix=$GridPre/GridRelease --enable-comms=none --enable-doxygen-doc
../configure CXX=clang++ --with-hdf5=$GridPkg --with-gmp=$GridPkg --with-mpfr=$GridPkg --with-fftw=$GridPkg --with-lime=$GridPre --enable-simd=GEN --enable-precision=single --prefix=$GridPre/GridRelease --enable-comms=none
#### 3. `MPIDebug`
Debug configuration with MPI:
../configure --with-hdf5=$GridPkg --with-gmp=$GridPkg --with-mpfr=$GridPkg --with-fftw=$GridPkg --with-lime=$GridPre/lime --enable-simd=GEN --enable-precision=double CXX=clang++ --prefix=$GridPre/GridMPIDebug --enable-comms=mpi-auto MPICXX=$GridPre/openmpi/bin/mpicxx --enable-doxygen-doc
../configure CXX=clang++ --with-hdf5=$GridPkg --with-gmp=$GridPkg --with-mpfr=$GridPkg --with-fftw=$GridPkg --with-lime=$GridPre --enable-simd=GEN --enable-precision=double --prefix=$GridPre/GridMPIDebug --enable-comms=mpi-auto MPICXX=$GridPre/bin/mpicxx
### 5.3 Build Grid
@ -250,9 +252,9 @@ Obtain a list of header locations required by Grid by running the following from
./grid-config --cxxflags
Output should look similar to:
Output should look similar to (but will likely include duplicates):
-I$GridPre/openmpi/include -I$GridPkg/include -I$GridPre/lime/include -I$GridPkg/include -I$GridPkg/include -I$GridPkg/include -O3 -g -std=c++11
-I$GridPre/include -I$GridPkg/include -O3 -g -std=c++11
The header locations follow the `-I` switches. You can ignore the other switches, and you can ignore duplicate entries, which just mean that your package manager has installed multiple packages in the same location.
@ -265,13 +267,12 @@ Set HEADER_SEARCH_PATHS to:
followed by (***the order is important***) the locations reported by `grid-config --cxxflags`, ignoring duplicates, e.g.:
$GridPre/openmpi/include
$GridPre/include
$GridPkg/include
$GridPre/lime/include
**Note: the easiest way to set this value is to put it all on one line, space separated, and edit the text to the right of `HEADER_SEARCH_PATHS`**, i.e.:
$Grid/build$(CONFIGURATION)/Grid $Grid $GridPre/openmpi/include $GridPkg/include $GridPre/lime/include
$Grid/build$(CONFIGURATION)/Grid $Grid $GridPre/include $GridPkg/include
#### LIBRARY_SEARCH_PATHS
@ -279,13 +280,13 @@ Obtain a list of library locations required by Grid by running the following fro
./grid-config --ldflags
Output should look similar to:
Output should look similar to (but will likely include duplicates):
-L$GridPre/openmpi/lib -L$GridPkg/lib -L$GridPre/lime/lib -L$GridPkg/lib -L$GridPkg/lib -L$GridPkg/lib
-L$GridPre/lib -L$GridPkg/lib
Paste the output ***with `$Grid/build$(CONFIGURATION)/Grid $Grid/build$(CONFIGURATION)/Hadrons ` prepended*** into `LIBRARY_SEARCH_PATHS`:
$Grid/build$(CONFIGURATION)/Grid $Grid/build$(CONFIGURATION)/Hadrons $GridPre/openmpi/lib $GridPkg/lib $GridPre/lime/lib
$Grid/build$(CONFIGURATION)/Grid $Grid/build$(CONFIGURATION)/Hadrons $GridPre/lib $GridPkg/lib
### 2. Linking
@ -369,7 +370,7 @@ Instead:
3. From a terminal session, locate and run your executable using `mpirun` (*the mangled name of the project build products will not be exactly the same as this example*):
`$GridPre/openmpi/bin/mpirun -np 2 ~/Library/Developer/Xcode/DerivedData/HelloGrid-fiyyuveptaqelbbvllomcgjyvghr/Build/Products/Debug/HelloGrid --grid 4.4.4.8 --mpi 1.1.1.2`
`$GridPre/bin/mpirun -np 2 ~/Library/Developer/Xcode/DerivedData/HelloGrid-fiyyuveptaqelbbvllomcgjyvghr/Build/Products/Debug/HelloGrid --grid 4.4.4.8 --mpi 1.1.1.2`
The Xcode debugger will attach to the first process.