mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 07:55:35 +00:00
Merge branch 'develop' into feature/qed-fvol
This commit is contained in:
commit
8e61286741
@ -39,6 +39,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#include <Grid/algorithms/approx/MultiShiftFunction.h>
|
#include <Grid/algorithms/approx/MultiShiftFunction.h>
|
||||||
#include <Grid/algorithms/approx/Forecast.h>
|
#include <Grid/algorithms/approx/Forecast.h>
|
||||||
|
|
||||||
|
#include <Grid/algorithms/iterative/Deflation.h>
|
||||||
#include <Grid/algorithms/iterative/ConjugateGradient.h>
|
#include <Grid/algorithms/iterative/ConjugateGradient.h>
|
||||||
#include <Grid/algorithms/iterative/ConjugateResidual.h>
|
#include <Grid/algorithms/iterative/ConjugateResidual.h>
|
||||||
#include <Grid/algorithms/iterative/NormalEquations.h>
|
#include <Grid/algorithms/iterative/NormalEquations.h>
|
||||||
|
101
lib/algorithms/iterative/Deflation.h
Normal file
101
lib/algorithms/iterative/Deflation.h
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef GRID_DEFLATION_H
|
||||||
|
#define GRID_DEFLATION_H
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
|
||||||
|
struct ZeroGuesser {
|
||||||
|
public:
|
||||||
|
template<class Field>
|
||||||
|
void operator()(const Field &src,Field &guess) { guess = Zero(); };
|
||||||
|
};
|
||||||
|
struct SourceGuesser {
|
||||||
|
public:
|
||||||
|
template<class Field>
|
||||||
|
void operator()(const Field &src,Field &guess) { guess = src; };
|
||||||
|
};
|
||||||
|
|
||||||
|
////////////////////////////////
|
||||||
|
// Fine grid deflation
|
||||||
|
////////////////////////////////
|
||||||
|
template<class Field>
|
||||||
|
struct DeflatedGuesser {
|
||||||
|
private:
|
||||||
|
const std::vector<Field> &evec;
|
||||||
|
const std::vector<RealD> &eval;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
DeflatedGuesser(const std::vector<Field> & _evec,const std::vector<RealD> & _eval) : evec(_evec), eval(_eval) {};
|
||||||
|
|
||||||
|
void operator()(const Field &src,Field &guess) {
|
||||||
|
guess = zero;
|
||||||
|
assert(evec.size()==eval.size());
|
||||||
|
auto N = evec.size();
|
||||||
|
for (int i=0;i<N;i++) {
|
||||||
|
Field& tmp = evec[i];
|
||||||
|
axpy(guess,TensorRemove(innerProduct(tmp,src)) / eval[i],tmp,guess);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class FineField, class CoarseField>
|
||||||
|
class LocalCoherenceDeflatedGuesser {
|
||||||
|
private:
|
||||||
|
const std::vector<FineField> &subspace;
|
||||||
|
const std::vector<CoarseField> &evec_coarse;
|
||||||
|
const std::vector<RealD> &eval_coarse;
|
||||||
|
public:
|
||||||
|
|
||||||
|
LocalCoherenceDeflatedGuesser(const std::vector<FineField> &_subspace,
|
||||||
|
const std::vector<CoarseField> &_evec_coarse,
|
||||||
|
const std::vector<RealD> &_eval_coarse)
|
||||||
|
: subspace(_subspace),
|
||||||
|
evec_coarse(_evec_coarse),
|
||||||
|
eval_coarse(_eval_coarse)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void operator()(const FineField &src,FineField &guess) {
|
||||||
|
int N = (int)evec_coarse.size();
|
||||||
|
CoarseField src_coarse(evec_coarse[0]._grid);
|
||||||
|
CoarseField guess_coarse(evec_coarse[0]._grid); guess_coarse = zero;
|
||||||
|
blockProject(src,src_coarse,subspace);
|
||||||
|
for (int i=0;i<N;i++) {
|
||||||
|
CoarseField & tmp = evec_coarse[i];
|
||||||
|
axpy(guess_coarse,TensorRemove(innerProduct(tmp,src_coarse)) / eval_coarse[i],tmp,guess_coarse);
|
||||||
|
}
|
||||||
|
blockPromote(guess_coarse,guess,subspace);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
#endif
|
@ -149,19 +149,6 @@ void basisSortInPlace(std::vector<Field> & _v,std::vector<RealD>& sort_vals, boo
|
|||||||
basisReorderInPlace(_v,sort_vals,idx);
|
basisReorderInPlace(_v,sort_vals,idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
// PAB: faster to compute the inner products first then fuse loops.
|
|
||||||
// If performance critical can improve.
|
|
||||||
template<class Field>
|
|
||||||
void basisDeflate(const std::vector<Field> &_v,const std::vector<RealD>& eval,const Field& src_orig,Field& result) {
|
|
||||||
result = zero;
|
|
||||||
assert(_v.size()==eval.size());
|
|
||||||
int N = (int)_v.size();
|
|
||||||
for (int i=0;i<N;i++) {
|
|
||||||
Field& tmp = _v[i];
|
|
||||||
axpy(result,TensorRemove(innerProduct(tmp,src_orig)) / eval[i],tmp,result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////
|
||||||
// Implicitly restarted lanczos
|
// Implicitly restarted lanczos
|
||||||
/////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////
|
||||||
@ -245,12 +232,6 @@ class ImplicitlyRestartedLanczos {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
static void Deflate(const std::vector<Field> &_v,
|
|
||||||
const std::vector<RealD>& eval,
|
|
||||||
const Field& src_orig,Field& result) {
|
|
||||||
basisDeflate(_v,eval,src_orig,result);
|
|
||||||
}
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////
|
||||||
// PAB:
|
// PAB:
|
||||||
//////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////
|
||||||
|
@ -31,6 +31,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
|
||||||
struct LanczosParams : Serializable {
|
struct LanczosParams : Serializable {
|
||||||
public:
|
public:
|
||||||
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParams,
|
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParams,
|
||||||
@ -240,21 +241,6 @@ private:
|
|||||||
std::vector<CoarseField> _evec_coarse;
|
std::vector<CoarseField> _evec_coarse;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void Deflate(std::vector<FineField> subspace,
|
|
||||||
std::vector<CoarseField> evec_coarse,
|
|
||||||
std::vector<RealD> eval_coarse,
|
|
||||||
const FineField& src_orig,FineField& result)
|
|
||||||
{
|
|
||||||
int N = (int)evec_coarse.size();
|
|
||||||
CoarseField src_coarse(evec_coarse[0]._grid);
|
|
||||||
CoarseField res_coarse(evec_coarse[0]._grid); res_coarse = zero;
|
|
||||||
blockProject(src_orig,src_coarse,subspace);
|
|
||||||
for (int i=0;i<N;i++) {
|
|
||||||
CoarseField & tmp = evec_coarse[i];
|
|
||||||
axpy(res_coarse,TensorRemove(innerProduct(tmp,src_coarse)) / eval_coarse[i],tmp,res_coarse);
|
|
||||||
}
|
|
||||||
blockPromote(res_coarse,result,subspace);
|
|
||||||
};
|
|
||||||
|
|
||||||
LocalCoherenceLanczos(GridBase *FineGrid,
|
LocalCoherenceLanczos(GridBase *FineGrid,
|
||||||
GridBase *CoarseGrid,
|
GridBase *CoarseGrid,
|
||||||
|
@ -107,7 +107,12 @@ namespace Grid {
|
|||||||
};
|
};
|
||||||
|
|
||||||
template<class Matrix>
|
template<class Matrix>
|
||||||
void operator() (Matrix & _Matrix,const Field &in, Field &out){
|
void operator() (Matrix & _Matrix,const Field &in, Field &out){
|
||||||
|
ZeroGuesser guess;
|
||||||
|
(*this)(_Matrix,in,out,guess);
|
||||||
|
}
|
||||||
|
template<class Matrix, class Guesser>
|
||||||
|
void operator() (Matrix & _Matrix,const Field &in, Field &out, Guesser &guess){
|
||||||
|
|
||||||
// FIXME CGdiagonalMee not implemented virtual function
|
// FIXME CGdiagonalMee not implemented virtual function
|
||||||
// FIXME use CBfactorise to control schur decomp
|
// FIXME use CBfactorise to control schur decomp
|
||||||
@ -129,7 +134,6 @@ namespace Grid {
|
|||||||
pickCheckerboard(Odd ,src_o,in);
|
pickCheckerboard(Odd ,src_o,in);
|
||||||
pickCheckerboard(Even,sol_e,out);
|
pickCheckerboard(Even,sol_e,out);
|
||||||
pickCheckerboard(Odd ,sol_o,out);
|
pickCheckerboard(Odd ,sol_o,out);
|
||||||
|
|
||||||
std::cout << GridLogMessage << " SchurRedBlackStaggeredSolve checkerboards picked" <<std::endl;
|
std::cout << GridLogMessage << " SchurRedBlackStaggeredSolve checkerboards picked" <<std::endl;
|
||||||
|
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
@ -146,6 +150,7 @@ namespace Grid {
|
|||||||
// Call the red-black solver
|
// Call the red-black solver
|
||||||
//////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////
|
||||||
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver calling the Mpc solver" <<std::endl;
|
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver calling the Mpc solver" <<std::endl;
|
||||||
|
guess(src_o,sol_o);
|
||||||
_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
|
_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
|
||||||
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver called the Mpc solver" <<std::endl;
|
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver called the Mpc solver" <<std::endl;
|
||||||
|
|
||||||
@ -189,7 +194,12 @@ namespace Grid {
|
|||||||
CBfactorise=cb;
|
CBfactorise=cb;
|
||||||
};
|
};
|
||||||
template<class Matrix>
|
template<class Matrix>
|
||||||
void operator() (Matrix & _Matrix,const Field &in, Field &out){
|
void operator() (Matrix & _Matrix,const Field &in, Field &out){
|
||||||
|
ZeroGuesser guess;
|
||||||
|
(*this)(_Matrix,in,out,guess);
|
||||||
|
}
|
||||||
|
template<class Matrix, class Guesser>
|
||||||
|
void operator() (Matrix & _Matrix,const Field &in, Field &out,Guesser &guess){
|
||||||
|
|
||||||
// FIXME CGdiagonalMee not implemented virtual function
|
// FIXME CGdiagonalMee not implemented virtual function
|
||||||
// FIXME use CBfactorise to control schur decomp
|
// FIXME use CBfactorise to control schur decomp
|
||||||
@ -225,6 +235,7 @@ namespace Grid {
|
|||||||
// Call the red-black solver
|
// Call the red-black solver
|
||||||
//////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////
|
||||||
std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
|
std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
|
||||||
|
guess(src_o,sol_o);
|
||||||
_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
|
_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
|
||||||
|
|
||||||
///////////////////////////////////////////////////
|
///////////////////////////////////////////////////
|
||||||
@ -268,7 +279,12 @@ namespace Grid {
|
|||||||
};
|
};
|
||||||
|
|
||||||
template<class Matrix>
|
template<class Matrix>
|
||||||
void operator() (Matrix & _Matrix,const Field &in, Field &out){
|
void operator() (Matrix & _Matrix,const Field &in, Field &out){
|
||||||
|
ZeroGuesser guess;
|
||||||
|
(*this)(_Matrix,in,out,guess);
|
||||||
|
}
|
||||||
|
template<class Matrix,class Guesser>
|
||||||
|
void operator() (Matrix & _Matrix,const Field &in, Field &out,Guesser &guess){
|
||||||
|
|
||||||
// FIXME CGdiagonalMee not implemented virtual function
|
// FIXME CGdiagonalMee not implemented virtual function
|
||||||
// FIXME use CBfactorise to control schur decomp
|
// FIXME use CBfactorise to control schur decomp
|
||||||
@ -305,6 +321,7 @@ namespace Grid {
|
|||||||
//////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////
|
||||||
std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
|
std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
|
||||||
// _HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
|
// _HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
|
||||||
|
guess(src_o,tmp);
|
||||||
_HermitianRBSolver(_HermOpEO,src_o,tmp); assert(tmp.checkerboard==Odd);
|
_HermitianRBSolver(_HermOpEO,src_o,tmp); assert(tmp.checkerboard==Odd);
|
||||||
_Matrix.MooeeInv(tmp,sol_o); assert( sol_o.checkerboard ==Odd);
|
_Matrix.MooeeInv(tmp,sol_o); assert( sol_o.checkerboard ==Odd);
|
||||||
|
|
||||||
@ -347,7 +364,12 @@ namespace Grid {
|
|||||||
};
|
};
|
||||||
|
|
||||||
template<class Matrix>
|
template<class Matrix>
|
||||||
void operator() (Matrix & _Matrix,const Field &in, Field &out){
|
void operator() (Matrix & _Matrix,const Field &in, Field &out){
|
||||||
|
ZeroGuesser guess;
|
||||||
|
(*this)(_Matrix,in,out,guess);
|
||||||
|
}
|
||||||
|
template<class Matrix, class Guesser>
|
||||||
|
void operator() (Matrix & _Matrix,const Field &in, Field &out,Guesser &guess){
|
||||||
|
|
||||||
// FIXME CGdiagonalMee not implemented virtual function
|
// FIXME CGdiagonalMee not implemented virtual function
|
||||||
// FIXME use CBfactorise to control schur decomp
|
// FIXME use CBfactorise to control schur decomp
|
||||||
@ -385,6 +407,7 @@ namespace Grid {
|
|||||||
std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
|
std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
|
||||||
// _HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
|
// _HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
|
||||||
// _HermitianRBSolver(_HermOpEO,src_o,tmp); assert(tmp.checkerboard==Odd);
|
// _HermitianRBSolver(_HermOpEO,src_o,tmp); assert(tmp.checkerboard==Odd);
|
||||||
|
guess(src_o,tmp);
|
||||||
_HermitianRBSolver(src_o,tmp); assert(tmp.checkerboard==Odd);
|
_HermitianRBSolver(src_o,tmp); assert(tmp.checkerboard==Odd);
|
||||||
_Matrix.MooeeInv(tmp,sol_o); assert( sol_o.checkerboard ==Odd);
|
_Matrix.MooeeInv(tmp,sol_o); assert( sol_o.checkerboard ==Odd);
|
||||||
|
|
||||||
|
@ -44,7 +44,10 @@ void CartesianCommunicator::Init(int *argc, char ***argv)
|
|||||||
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
||||||
if ( !flag ) {
|
if ( !flag ) {
|
||||||
MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
|
MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
|
||||||
assert (provided == MPI_THREAD_MULTIPLE);
|
//If only 1 comms thread we require any threading mode other than SINGLE, but for multiple comms threads we need MULTIPLE
|
||||||
|
if( (nCommThreads == 1 && provided == MPI_THREAD_SINGLE) ||
|
||||||
|
(nCommThreads > 1 && provided != MPI_THREAD_MULTIPLE) )
|
||||||
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
Grid_quiesce_nodes();
|
Grid_quiesce_nodes();
|
||||||
@ -86,10 +89,16 @@ void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &c
|
|||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||||
{
|
{
|
||||||
MPI_Comm optimal_comm;
|
MPI_Comm optimal_comm;
|
||||||
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm); // Remap using the shared memory optimising routine
|
////////////////////////////////////////////////////
|
||||||
|
// Remap using the shared memory optimising routine
|
||||||
|
// The remap creates a comm which must be freed
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm);
|
||||||
InitFromMPICommunicator(processors,optimal_comm);
|
InitFromMPICommunicator(processors,optimal_comm);
|
||||||
SetCommunicator(optimal_comm);
|
SetCommunicator(optimal_comm);
|
||||||
|
///////////////////////////////////////////////////
|
||||||
// Free the temp communicator
|
// Free the temp communicator
|
||||||
|
///////////////////////////////////////////////////
|
||||||
MPI_Comm_free(&optimal_comm);
|
MPI_Comm_free(&optimal_comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,8 +208,10 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,
|
|||||||
// Take the right SHM buffers
|
// Take the right SHM buffers
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
SetCommunicator(comm_split);
|
SetCommunicator(comm_split);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////
|
||||||
// Free the temp communicator
|
// Free the temp communicator
|
||||||
|
///////////////////////////////////////////////
|
||||||
MPI_Comm_free(&comm_split);
|
MPI_Comm_free(&comm_split);
|
||||||
|
|
||||||
if(0){
|
if(0){
|
||||||
|
@ -133,6 +133,7 @@ class SharedMemory
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
SharedMemory() {};
|
SharedMemory() {};
|
||||||
|
~SharedMemory();
|
||||||
///////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////
|
||||||
// set the buffers & sizes
|
// set the buffers & sizes
|
||||||
///////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -399,5 +399,9 @@ void *SharedMemory::ShmBufferTranslate(int rank,void * local_p)
|
|||||||
return (void *) remote;
|
return (void *) remote;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
SharedMemory::~SharedMemory()
|
||||||
|
{
|
||||||
|
MPI_Comm_free(&ShmComm);
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -122,5 +122,7 @@ void *SharedMemory::ShmBufferTranslate(int rank,void * local_p)
|
|||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
SharedMemory::~SharedMemory()
|
||||||
|
{};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user