mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 07:55:35 +00:00
Merge branch 'develop' into feature/scalar_adjointFT
This commit is contained in:
commit
f6ba2b95ce
12
TODO
12
TODO
@ -3,19 +3,19 @@ TODO:
|
|||||||
|
|
||||||
Large item work list:
|
Large item work list:
|
||||||
|
|
||||||
1)- BG/Q port and check
|
1)- BG/Q port and check ; Andrew says ok.
|
||||||
2)- Christoph's local basis expansion Lanczos
|
2)- Christoph's local basis expansion Lanczos
|
||||||
3)- Precision conversion and sort out localConvert <-- partial
|
--
|
||||||
|
3a)- RNG I/O in ILDG/SciDAC (minor)
|
||||||
- Consistent linear solver flop count/rate -- PARTIAL, time but no flop/s yet
|
3b)- Precision conversion and sort out localConvert <-- partial/easy
|
||||||
|
3c)- Consistent linear solver flop count/rate -- PARTIAL, time but no flop/s yet
|
||||||
4)- Physical propagator interface
|
4)- Physical propagator interface
|
||||||
5)- Conserved currents
|
5)- Conserved currents
|
||||||
6)- Multigrid Wilson and DWF, compare to other Multigrid implementations
|
6)- Multigrid Wilson and DWF, compare to other Multigrid implementations
|
||||||
7)- HDCR resume
|
7)- HDCR resume
|
||||||
|
|
||||||
Recent DONE
|
Recent DONE
|
||||||
|
-- MultiRHS with spread out extra dim -- Go through filesystem with SciDAC I/O ; <-- DONE ; bmark cori
|
||||||
-- MultiRHS with spread out extra dim -- Go through filesystem with SciDAC I/O. <--- DONE
|
|
||||||
-- Lanczos Remove DenseVector, DenseMatrix; Use Eigen instead. <-- DONE
|
-- Lanczos Remove DenseVector, DenseMatrix; Use Eigen instead. <-- DONE
|
||||||
-- GaugeFix into central location <-- DONE
|
-- GaugeFix into central location <-- DONE
|
||||||
-- Scidac and Ildg metadata handling <-- DONE
|
-- Scidac and Ildg metadata handling <-- DONE
|
||||||
|
@ -40,7 +40,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
@ -58,7 +58,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
@ -93,7 +93,7 @@ int main (int argc, char ** argv)
|
|||||||
std::cout << latt_size.back() << "\t\t";
|
std::cout << latt_size.back() << "\t\t";
|
||||||
|
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
||||||
LatticeGaugeField Umu(&Grid); random(pRNG,Umu);
|
LatticeGaugeField Umu(&Grid); random(pRNG,Umu);
|
||||||
|
@ -45,31 +45,16 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#include <Grid/algorithms/iterative/SchurRedBlack.h>
|
#include <Grid/algorithms/iterative/SchurRedBlack.h>
|
||||||
#include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h>
|
#include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h>
|
||||||
#include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h>
|
#include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h>
|
||||||
|
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
|
||||||
#include <Grid/algorithms/iterative/ConjugateGradientReliableUpdate.h>
|
#include <Grid/algorithms/iterative/ConjugateGradientReliableUpdate.h>
|
||||||
|
|
||||||
// Lanczos support
|
|
||||||
//#include <Grid/algorithms/iterative/MatrixUtils.h>
|
|
||||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
||||||
#include <Grid/algorithms/CoarsenedMatrix.h>
|
#include <Grid/algorithms/CoarsenedMatrix.h>
|
||||||
#include <Grid/algorithms/FFT.h>
|
#include <Grid/algorithms/FFT.h>
|
||||||
|
|
||||||
// Eigen/lanczos
|
|
||||||
// EigCg
|
// EigCg
|
||||||
// MCR
|
|
||||||
// Pcg
|
// Pcg
|
||||||
// Multishift CG
|
|
||||||
// Hdcg
|
// Hdcg
|
||||||
// GCR
|
// GCR
|
||||||
// etc..
|
// etc..
|
||||||
|
|
||||||
// integrator/Leapfrog
|
|
||||||
// integrator/Omelyan
|
|
||||||
// integrator/ForceGradient
|
|
||||||
|
|
||||||
// montecarlo/hmc
|
|
||||||
// montecarlo/rhmc
|
|
||||||
// montecarlo/metropolis
|
|
||||||
// etc...
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -52,8 +52,8 @@ class ConjugateGradient : public OperatorFunction<Field> {
|
|||||||
MaxIterations(maxit),
|
MaxIterations(maxit),
|
||||||
ErrorOnNoConverge(err_on_no_conv){};
|
ErrorOnNoConverge(err_on_no_conv){};
|
||||||
|
|
||||||
void operator()(LinearOperatorBase<Field> &Linop, const Field &src,
|
void operator()(LinearOperatorBase<Field> &Linop, const Field &src, Field &psi) {
|
||||||
Field &psi) {
|
|
||||||
psi.checkerboard = src.checkerboard;
|
psi.checkerboard = src.checkerboard;
|
||||||
conformable(psi, src);
|
conformable(psi, src);
|
||||||
|
|
||||||
|
@ -49,6 +49,8 @@ public:
|
|||||||
template<class object> friend class Lattice;
|
template<class object> friend class Lattice;
|
||||||
|
|
||||||
GridBase(const std::vector<int> & processor_grid) : CartesianCommunicator(processor_grid) {};
|
GridBase(const std::vector<int> & processor_grid) : CartesianCommunicator(processor_grid) {};
|
||||||
|
GridBase(const std::vector<int> & processor_grid,
|
||||||
|
const CartesianCommunicator &parent) : CartesianCommunicator(processor_grid,parent) {};
|
||||||
|
|
||||||
virtual ~GridBase() = default;
|
virtual ~GridBase() = default;
|
||||||
|
|
||||||
@ -213,9 +215,6 @@ public:
|
|||||||
assert(lidx<lSites());
|
assert(lidx<lSites());
|
||||||
Lexicographic::CoorFromIndex(lcoor,lidx,_ldimensions);
|
Lexicographic::CoorFromIndex(lcoor,lidx,_ldimensions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void GlobalCoorToGlobalIndex(const std::vector<int> & gcoor,int & gidx){
|
void GlobalCoorToGlobalIndex(const std::vector<int> & gcoor,int & gidx){
|
||||||
gidx=0;
|
gidx=0;
|
||||||
int mult=1;
|
int mult=1;
|
||||||
|
@ -61,9 +61,29 @@ public:
|
|||||||
virtual int CheckerBoardShift(int source_cb,int dim,int shift, int osite){
|
virtual int CheckerBoardShift(int source_cb,int dim,int shift, int osite){
|
||||||
return shift;
|
return shift;
|
||||||
}
|
}
|
||||||
|
/////////////////////////////////////////////////////////////////////////
|
||||||
|
// Constructor takes a parent grid and possibly subdivides communicator.
|
||||||
|
/////////////////////////////////////////////////////////////////////////
|
||||||
GridCartesian(const std::vector<int> &dimensions,
|
GridCartesian(const std::vector<int> &dimensions,
|
||||||
const std::vector<int> &simd_layout,
|
const std::vector<int> &simd_layout,
|
||||||
const std::vector<int> &processor_grid) : GridBase(processor_grid)
|
const std::vector<int> &processor_grid,
|
||||||
|
const GridCartesian &parent) : GridBase(processor_grid,parent)
|
||||||
|
{
|
||||||
|
Init(dimensions,simd_layout,processor_grid);
|
||||||
|
}
|
||||||
|
/////////////////////////////////////////////////////////////////////////
|
||||||
|
// Construct from comm world
|
||||||
|
/////////////////////////////////////////////////////////////////////////
|
||||||
|
GridCartesian(const std::vector<int> &dimensions,
|
||||||
|
const std::vector<int> &simd_layout,
|
||||||
|
const std::vector<int> &processor_grid) : GridBase(processor_grid)
|
||||||
|
{
|
||||||
|
Init(dimensions,simd_layout,processor_grid);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Init(const std::vector<int> &dimensions,
|
||||||
|
const std::vector<int> &simd_layout,
|
||||||
|
const std::vector<int> &processor_grid)
|
||||||
{
|
{
|
||||||
///////////////////////
|
///////////////////////
|
||||||
// Grid information
|
// Grid information
|
||||||
|
@ -112,24 +112,57 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
GridRedBlackCartesian(const GridBase *base) : GridRedBlackCartesian(base->_fdimensions,base->_simd_layout,base->_processors) {};
|
////////////////////////////////////////////////////////////
|
||||||
|
// Create Redblack from original grid; require full grid pointer ?
|
||||||
|
////////////////////////////////////////////////////////////
|
||||||
|
GridRedBlackCartesian(const GridBase *base) : GridBase(base->_processors,*base)
|
||||||
|
{
|
||||||
|
int dims = base->_ndimension;
|
||||||
|
std::vector<int> checker_dim_mask(dims,1);
|
||||||
|
int checker_dim = 0;
|
||||||
|
Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim);
|
||||||
|
};
|
||||||
|
|
||||||
GridRedBlackCartesian(const std::vector<int> &dimensions,
|
////////////////////////////////////////////////////////////
|
||||||
|
// Create redblack from original grid, with non-trivial checker dim mask
|
||||||
|
////////////////////////////////////////////////////////////
|
||||||
|
GridRedBlackCartesian(const GridBase *base,
|
||||||
|
const std::vector<int> &checker_dim_mask,
|
||||||
|
int checker_dim
|
||||||
|
) : GridBase(base->_processors,*base)
|
||||||
|
{
|
||||||
|
Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim) ;
|
||||||
|
}
|
||||||
|
#if 0
|
||||||
|
////////////////////////////////////////////////////////////
|
||||||
|
// Create redblack grid ;; deprecate these. Should not
|
||||||
|
// need direct creation of redblack without a full grid to base on
|
||||||
|
////////////////////////////////////////////////////////////
|
||||||
|
GridRedBlackCartesian(const GridBase *base,
|
||||||
|
const std::vector<int> &dimensions,
|
||||||
const std::vector<int> &simd_layout,
|
const std::vector<int> &simd_layout,
|
||||||
const std::vector<int> &processor_grid,
|
const std::vector<int> &processor_grid,
|
||||||
const std::vector<int> &checker_dim_mask,
|
const std::vector<int> &checker_dim_mask,
|
||||||
int checker_dim
|
int checker_dim
|
||||||
) : GridBase(processor_grid)
|
) : GridBase(processor_grid,*base)
|
||||||
{
|
{
|
||||||
Init(dimensions,simd_layout,processor_grid,checker_dim_mask,checker_dim);
|
Init(dimensions,simd_layout,processor_grid,checker_dim_mask,checker_dim);
|
||||||
}
|
}
|
||||||
GridRedBlackCartesian(const std::vector<int> &dimensions,
|
|
||||||
|
////////////////////////////////////////////////////////////
|
||||||
|
// Create redblack grid
|
||||||
|
////////////////////////////////////////////////////////////
|
||||||
|
GridRedBlackCartesian(const GridBase *base,
|
||||||
|
const std::vector<int> &dimensions,
|
||||||
const std::vector<int> &simd_layout,
|
const std::vector<int> &simd_layout,
|
||||||
const std::vector<int> &processor_grid) : GridBase(processor_grid)
|
const std::vector<int> &processor_grid) : GridBase(processor_grid,*base)
|
||||||
{
|
{
|
||||||
std::vector<int> checker_dim_mask(dimensions.size(),1);
|
std::vector<int> checker_dim_mask(dimensions.size(),1);
|
||||||
Init(dimensions,simd_layout,processor_grid,checker_dim_mask,0);
|
int checker_dim = 0;
|
||||||
|
Init(dimensions,simd_layout,processor_grid,checker_dim_mask,checker_dim);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void Init(const std::vector<int> &dimensions,
|
void Init(const std::vector<int> &dimensions,
|
||||||
const std::vector<int> &simd_layout,
|
const std::vector<int> &simd_layout,
|
||||||
const std::vector<int> &processor_grid,
|
const std::vector<int> &processor_grid,
|
||||||
|
@ -67,7 +67,7 @@ void CartesianCommunicator::ShmBufferFreeAll(void) {
|
|||||||
/////////////////////////////////
|
/////////////////////////////////
|
||||||
// Grid information queries
|
// Grid information queries
|
||||||
/////////////////////////////////
|
/////////////////////////////////
|
||||||
int CartesianCommunicator::Dimensions(void) { return _ndimension; };
|
int CartesianCommunicator::Dimensions(void) { return _ndimension; };
|
||||||
int CartesianCommunicator::IsBoss(void) { return _processor==0; };
|
int CartesianCommunicator::IsBoss(void) { return _processor==0; };
|
||||||
int CartesianCommunicator::BossRank(void) { return 0; };
|
int CartesianCommunicator::BossRank(void) { return 0; };
|
||||||
int CartesianCommunicator::ThisRank(void) { return _processor; };
|
int CartesianCommunicator::ThisRank(void) { return _processor; };
|
||||||
@ -147,8 +147,13 @@ void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) {
|
|||||||
}
|
}
|
||||||
void CartesianCommunicator::ShmInitGeneric(void){
|
void CartesianCommunicator::ShmInitGeneric(void){
|
||||||
#if 1
|
#if 1
|
||||||
|
int mmap_flag =0;
|
||||||
int mmap_flag = MAP_SHARED | MAP_ANONYMOUS;
|
#ifdef MAP_ANONYMOUS
|
||||||
|
mmap_flag = mmap_flag| MAP_SHARED | MAP_ANONYMOUS;
|
||||||
|
#endif
|
||||||
|
#ifdef MAP_ANON
|
||||||
|
mmap_flag = mmap_flag| MAP_SHARED | MAP_ANON;
|
||||||
|
#endif
|
||||||
#ifdef MAP_HUGETLB
|
#ifdef MAP_HUGETLB
|
||||||
if ( Hugepages ) mmap_flag |= MAP_HUGETLB;
|
if ( Hugepages ) mmap_flag |= MAP_HUGETLB;
|
||||||
#endif
|
#endif
|
||||||
|
@ -83,6 +83,7 @@ class CartesianCommunicator {
|
|||||||
std::vector<MPI_Comm> communicator_halo;
|
std::vector<MPI_Comm> communicator_halo;
|
||||||
|
|
||||||
typedef MPI_Request CommsRequest_t;
|
typedef MPI_Request CommsRequest_t;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
typedef int CommsRequest_t;
|
typedef int CommsRequest_t;
|
||||||
#endif
|
#endif
|
||||||
@ -147,12 +148,28 @@ class CartesianCommunicator {
|
|||||||
// Must call in Grid startup
|
// Must call in Grid startup
|
||||||
////////////////////////////////////////////////
|
////////////////////////////////////////////////
|
||||||
static void Init(int *argc, char ***argv);
|
static void Init(int *argc, char ***argv);
|
||||||
|
|
||||||
////////////////////////////////////////////////
|
////////////////////////////////////////////////
|
||||||
// Constructor of any given grid
|
// Constructors to sub-divide a parent communicator
|
||||||
|
// and default to comm world
|
||||||
////////////////////////////////////////////////
|
////////////////////////////////////////////////
|
||||||
|
CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent);
|
||||||
CartesianCommunicator(const std::vector<int> &pdimensions_in);
|
CartesianCommunicator(const std::vector<int> &pdimensions_in);
|
||||||
|
|
||||||
virtual ~CartesianCommunicator();
|
virtual ~CartesianCommunicator();
|
||||||
|
|
||||||
|
|
||||||
|
private:
|
||||||
|
#if defined (GRID_COMMS_MPI)
|
||||||
|
//|| defined (GRID_COMMS_MPI3)
|
||||||
|
////////////////////////////////////////////////
|
||||||
|
// Private initialise from an MPI communicator
|
||||||
|
// Can use after an MPI_Comm_split, but hidden from user so private
|
||||||
|
////////////////////////////////////////////////
|
||||||
|
void InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base);
|
||||||
|
#endif
|
||||||
|
public:
|
||||||
|
>>>>>>> develop
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Wraps MPI_Cart routines, or implements equivalent on other impls
|
// Wraps MPI_Cart routines, or implements equivalent on other impls
|
||||||
|
@ -53,24 +53,90 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
ShmInitGeneric();
|
ShmInitGeneric();
|
||||||
}
|
}
|
||||||
|
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||||
|
{
|
||||||
|
InitFromMPICommunicator(processors,communicator_world);
|
||||||
|
// std::cout << "Passed communicator world to a new communicator" <<communicator<<std::endl;
|
||||||
|
}
|
||||||
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
|
||||||
{
|
{
|
||||||
_ndimension = processors.size();
|
_ndimension = processors.size();
|
||||||
std::vector<int> periodic(_ndimension,1);
|
assert(_ndimension = parent._ndimension);
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// split the communicator
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
int Nparent;
|
||||||
|
MPI_Comm_size(parent.communicator,&Nparent);
|
||||||
|
|
||||||
|
int childsize=1;
|
||||||
|
for(int d=0;d<processors.size();d++) {
|
||||||
|
childsize *= processors[d];
|
||||||
|
}
|
||||||
|
int Nchild = Nparent/childsize;
|
||||||
|
assert (childsize * Nchild == Nparent);
|
||||||
|
|
||||||
|
int prank; MPI_Comm_rank(parent.communicator,&prank);
|
||||||
|
int crank = prank % childsize;
|
||||||
|
int ccomm = prank / childsize;
|
||||||
|
|
||||||
|
MPI_Comm comm_split;
|
||||||
|
if ( Nchild > 1 ) {
|
||||||
|
|
||||||
|
std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl;
|
||||||
|
std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"] ";
|
||||||
|
for(int d=0;d<parent._processors.size();d++) std::cout << parent._processors[d] << " ";
|
||||||
|
std::cout<<std::endl;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage<<" child grid["<< _ndimension <<"] ";
|
||||||
|
for(int d=0;d<processors.size();d++) std::cout << processors[d] << " ";
|
||||||
|
std::cout<<std::endl;
|
||||||
|
|
||||||
|
int ierr= MPI_Comm_split(parent.communicator, ccomm,crank,&comm_split);
|
||||||
|
assert(ierr==0);
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Declare victory
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
std::cout << GridLogMessage<<"Divided communicator "<< parent._Nprocessors<<" into "
|
||||||
|
<<Nchild <<" communicators with " << childsize << " ranks"<<std::endl;
|
||||||
|
} else {
|
||||||
|
comm_split=parent.communicator;
|
||||||
|
// std::cout << "Passed parental communicator to a new communicator" <<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Set up from the new split communicator
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
InitFromMPICommunicator(processors,comm_split);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Take an MPI_Comm and self assemble
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base)
|
||||||
|
{
|
||||||
|
// if ( communicator_base != communicator_world ) {
|
||||||
|
// std::cout << "Cartesian communicator created with a non-world communicator"<<std::endl;
|
||||||
|
// }
|
||||||
|
_ndimension = processors.size();
|
||||||
|
_processor_coor.resize(_ndimension);
|
||||||
|
|
||||||
|
/////////////////////////////////
|
||||||
|
// Count the requested nodes
|
||||||
|
/////////////////////////////////
|
||||||
_Nprocessors=1;
|
_Nprocessors=1;
|
||||||
_processors = processors;
|
_processors = processors;
|
||||||
_processor_coor.resize(_ndimension);
|
|
||||||
|
|
||||||
MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator);
|
|
||||||
MPI_Comm_rank(communicator,&_processor);
|
|
||||||
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
|
|
||||||
|
|
||||||
for(int i=0;i<_ndimension;i++){
|
for(int i=0;i<_ndimension;i++){
|
||||||
_Nprocessors*=_processors[i];
|
_Nprocessors*=_processors[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
int Size;
|
std::vector<int> periodic(_ndimension,1);
|
||||||
|
MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],1,&communicator);
|
||||||
|
MPI_Comm_rank(communicator,&_processor);
|
||||||
|
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
|
||||||
|
|
||||||
|
int Size;
|
||||||
MPI_Comm_size(communicator,&Size);
|
MPI_Comm_size(communicator,&Size);
|
||||||
|
|
||||||
assert(Size==_Nprocessors);
|
assert(Size==_Nprocessors);
|
||||||
|
@ -450,6 +450,15 @@ void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &c
|
|||||||
assert(lr!=-1);
|
assert(lr!=-1);
|
||||||
Lexicographic::CoorFromIndex(coor,lr,_processors);
|
Lexicographic::CoorFromIndex(coor,lr,_processors);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////
|
||||||
|
// Try to subdivide communicator
|
||||||
|
//////////////////////////////////
|
||||||
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
|
||||||
|
: CartesianCommunicator(processors)
|
||||||
|
{
|
||||||
|
std::cout << "Attempts to split MPI3 communicators will fail until implemented" <<std::endl;
|
||||||
|
}
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||||
{
|
{
|
||||||
int ierr;
|
int ierr;
|
||||||
|
@ -38,6 +38,9 @@ void CartesianCommunicator::Init(int *argc, char *** arv)
|
|||||||
ShmInitGeneric();
|
ShmInitGeneric();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
|
||||||
|
: CartesianCommunicator(processors) {}
|
||||||
|
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||||
{
|
{
|
||||||
_processors = processors;
|
_processors = processors;
|
||||||
|
@ -75,6 +75,11 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
ShmInitGeneric();
|
ShmInitGeneric();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
|
||||||
|
: CartesianCommunicator(processors)
|
||||||
|
{
|
||||||
|
std::cout << "Attempts to split SHMEM communicators will fail " <<std::endl;
|
||||||
|
}
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||||
{
|
{
|
||||||
_ndimension = processors.size();
|
_ndimension = processors.size();
|
||||||
|
@ -63,7 +63,7 @@ SOFTWARE.
|
|||||||
#error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers"
|
#error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers"
|
||||||
#endif
|
#endif
|
||||||
#elif defined(__GNUC__)
|
#elif defined(__GNUC__)
|
||||||
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40900
|
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40805
|
||||||
#error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers"
|
#error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers"
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
@ -544,7 +544,6 @@ static void sliceInnerProductMatrix( Eigen::MatrixXcd &mat, const Lattice<vobj>
|
|||||||
for(int i=0;i<Nblock;i++){
|
for(int i=0;i<Nblock;i++){
|
||||||
for(int j=0;j<Nblock;j++){
|
for(int j=0;j<Nblock;j++){
|
||||||
auto tmp = innerProduct(Left[i],Right[j]);
|
auto tmp = innerProduct(Left[i],Right[j]);
|
||||||
// vector_typeD rtmp = TensorRemove(tmp);
|
|
||||||
auto rtmp = TensorRemove(tmp);
|
auto rtmp = TensorRemove(tmp);
|
||||||
mat_thread(i,j) += Reduce(rtmp);
|
mat_thread(i,j) += Reduce(rtmp);
|
||||||
}}
|
}}
|
||||||
|
@ -84,10 +84,6 @@ namespace QCD {
|
|||||||
stream << "GRID_";
|
stream << "GRID_";
|
||||||
stream << ScidacWordMnemonic<stype>();
|
stream << ScidacWordMnemonic<stype>();
|
||||||
|
|
||||||
// std::cout << " Lorentz N/S/V/M : " << _LorentzN<<" "<<_LorentzScalar<<"/"<<_LorentzVector<<"/"<<_LorentzMatrix<<std::endl;
|
|
||||||
// std::cout << " Spin N/S/V/M : " << _SpinN <<" "<<_SpinScalar <<"/"<<_SpinVector <<"/"<<_SpinMatrix<<std::endl;
|
|
||||||
// std::cout << " Colour N/S/V/M : " << _ColourN <<" "<<_ColourScalar <<"/"<<_ColourVector <<"/"<<_ColourMatrix<<std::endl;
|
|
||||||
|
|
||||||
if ( _LorentzVector ) stream << "_LorentzVector"<<_LorentzN;
|
if ( _LorentzVector ) stream << "_LorentzVector"<<_LorentzN;
|
||||||
if ( _LorentzMatrix ) stream << "_LorentzMatrix"<<_LorentzN;
|
if ( _LorentzMatrix ) stream << "_LorentzMatrix"<<_LorentzN;
|
||||||
|
|
||||||
@ -182,7 +178,7 @@ class GridLimeReader : public BinaryIO {
|
|||||||
/////////////////////////////////////////////
|
/////////////////////////////////////////////
|
||||||
// Open the file
|
// Open the file
|
||||||
/////////////////////////////////////////////
|
/////////////////////////////////////////////
|
||||||
void open(std::string &_filename)
|
void open(const std::string &_filename)
|
||||||
{
|
{
|
||||||
filename= _filename;
|
filename= _filename;
|
||||||
File = fopen(filename.c_str(), "r");
|
File = fopen(filename.c_str(), "r");
|
||||||
@ -210,19 +206,33 @@ class GridLimeReader : public BinaryIO {
|
|||||||
|
|
||||||
while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {
|
while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {
|
||||||
|
|
||||||
std::cout << GridLogMessage << limeReaderType(LimeR) <<std::endl;
|
uint64_t file_bytes =limeReaderBytes(LimeR);
|
||||||
|
|
||||||
if ( strncmp(limeReaderType(LimeR), record_name.c_str(),strlen(record_name.c_str()) ) ) {
|
|
||||||
|
|
||||||
|
// std::cout << GridLogMessage << limeReaderType(LimeR) << " "<< file_bytes <<" bytes "<<std::endl;
|
||||||
|
// std::cout << GridLogMessage<< " readLimeObject seeking "<< record_name <<" found record :" <<limeReaderType(LimeR) <<std::endl;
|
||||||
|
|
||||||
|
if ( !strncmp(limeReaderType(LimeR), record_name.c_str(),strlen(record_name.c_str()) ) ) {
|
||||||
|
|
||||||
|
// std::cout << GridLogMessage<< " readLimeLatticeBinaryObject matches ! " <<std::endl;
|
||||||
|
|
||||||
|
uint64_t PayloadSize = sizeof(sobj) * field._grid->_gsites;
|
||||||
|
|
||||||
|
// std::cout << "R sizeof(sobj)= " <<sizeof(sobj)<<std::endl;
|
||||||
|
// std::cout << "R Gsites " <<field._grid->_gsites<<std::endl;
|
||||||
|
// std::cout << "R Payload expected " <<PayloadSize<<std::endl;
|
||||||
|
// std::cout << "R file size " <<file_bytes <<std::endl;
|
||||||
|
|
||||||
|
assert(PayloadSize == file_bytes);// Must match or user error
|
||||||
|
|
||||||
off_t offset= ftell(File);
|
off_t offset= ftell(File);
|
||||||
|
// std::cout << " ReadLatticeObject from offset "<<offset << std::endl;
|
||||||
BinarySimpleMunger<sobj,sobj> munge;
|
BinarySimpleMunger<sobj,sobj> munge;
|
||||||
BinaryIO::readLatticeObject< sobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
|
BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
|
||||||
|
|
||||||
/////////////////////////////////////////////
|
/////////////////////////////////////////////
|
||||||
// Insist checksum is next record
|
// Insist checksum is next record
|
||||||
/////////////////////////////////////////////
|
/////////////////////////////////////////////
|
||||||
readLimeObject(scidacChecksum_,std::string("scidacChecksum"),record_name);
|
readLimeObject(scidacChecksum_,std::string("scidacChecksum"),std::string(SCIDAC_CHECKSUM));
|
||||||
|
|
||||||
/////////////////////////////////////////////
|
/////////////////////////////////////////////
|
||||||
// Verify checksums
|
// Verify checksums
|
||||||
@ -242,11 +252,19 @@ class GridLimeReader : public BinaryIO {
|
|||||||
// should this be a do while; can we miss a first record??
|
// should this be a do while; can we miss a first record??
|
||||||
while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {
|
while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {
|
||||||
|
|
||||||
|
// std::cout << GridLogMessage<< " readLimeObject seeking "<< record_name <<" found record :" <<limeReaderType(LimeR) <<std::endl;
|
||||||
|
|
||||||
uint64_t nbytes = limeReaderBytes(LimeR);//size of this record (configuration)
|
uint64_t nbytes = limeReaderBytes(LimeR);//size of this record (configuration)
|
||||||
|
|
||||||
if ( strncmp(limeReaderType(LimeR), record_name.c_str(),strlen(record_name.c_str()) ) ) {
|
if ( !strncmp(limeReaderType(LimeR), record_name.c_str(),strlen(record_name.c_str()) ) ) {
|
||||||
|
|
||||||
|
// std::cout << GridLogMessage<< " readLimeObject matches ! " << record_name <<std::endl;
|
||||||
|
|
||||||
std::vector<char> xmlc(nbytes+1,'\0');
|
std::vector<char> xmlc(nbytes+1,'\0');
|
||||||
limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR);
|
limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR);
|
||||||
|
|
||||||
|
// std::cout << GridLogMessage<< " readLimeObject matches XML " << &xmlc[0] <<std::endl;
|
||||||
|
|
||||||
XmlReader RD(&xmlc[0],"");
|
XmlReader RD(&xmlc[0],"");
|
||||||
read(RD,object_name,object);
|
read(RD,object_name,object);
|
||||||
return;
|
return;
|
||||||
@ -261,13 +279,14 @@ class GridLimeWriter : public BinaryIO {
|
|||||||
public:
|
public:
|
||||||
///////////////////////////////////////////////////
|
///////////////////////////////////////////////////
|
||||||
// FIXME: format for RNG? Now just binary out instead
|
// FIXME: format for RNG? Now just binary out instead
|
||||||
|
// FIXME: collective calls or not ?
|
||||||
|
// : must know if I am the I/O boss
|
||||||
///////////////////////////////////////////////////
|
///////////////////////////////////////////////////
|
||||||
|
|
||||||
FILE *File;
|
FILE *File;
|
||||||
LimeWriter *LimeW;
|
LimeWriter *LimeW;
|
||||||
std::string filename;
|
std::string filename;
|
||||||
|
|
||||||
void open(std::string &_filename) {
|
void open(const std::string &_filename) {
|
||||||
filename= _filename;
|
filename= _filename;
|
||||||
File = fopen(filename.c_str(), "w");
|
File = fopen(filename.c_str(), "w");
|
||||||
LimeW = limeCreateWriter(File); assert(LimeW != NULL );
|
LimeW = limeCreateWriter(File); assert(LimeW != NULL );
|
||||||
@ -302,14 +321,18 @@ class GridLimeWriter : public BinaryIO {
|
|||||||
write(WR,object_name,object);
|
write(WR,object_name,object);
|
||||||
xmlstring = WR.XmlString();
|
xmlstring = WR.XmlString();
|
||||||
}
|
}
|
||||||
|
// std::cout << "WriteLimeObject" << record_name <<std::endl;
|
||||||
uint64_t nbytes = xmlstring.size();
|
uint64_t nbytes = xmlstring.size();
|
||||||
|
// std::cout << " xmlstring "<< nbytes<< " " << xmlstring <<std::endl;
|
||||||
int err;
|
int err;
|
||||||
LimeRecordHeader *h = limeCreateHeader(MB, ME,(char *)record_name.c_str(), nbytes); assert(h!= NULL);
|
LimeRecordHeader *h = limeCreateHeader(MB, ME,const_cast<char *>(record_name.c_str()), nbytes);
|
||||||
|
assert(h!= NULL);
|
||||||
|
|
||||||
err=limeWriteRecordHeader(h, LimeW); assert(err>=0);
|
err=limeWriteRecordHeader(h, LimeW); assert(err>=0);
|
||||||
err=limeWriteRecordData(&xmlstring[0], &nbytes, LimeW); assert(err>=0);
|
err=limeWriteRecordData(&xmlstring[0], &nbytes, LimeW); assert(err>=0);
|
||||||
err=limeWriterCloseRecord(LimeW); assert(err>=0);
|
err=limeWriterCloseRecord(LimeW); assert(err>=0);
|
||||||
limeDestroyHeader(h);
|
limeDestroyHeader(h);
|
||||||
|
// std::cout << " File offset is now"<<ftell(File) << std::endl;
|
||||||
}
|
}
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
// Write a generic lattice field and csum
|
// Write a generic lattice field and csum
|
||||||
@ -326,6 +349,11 @@ class GridLimeWriter : public BinaryIO {
|
|||||||
uint64_t PayloadSize = sizeof(sobj) * field._grid->_gsites;
|
uint64_t PayloadSize = sizeof(sobj) * field._grid->_gsites;
|
||||||
createLimeRecordHeader(record_name, 0, 0, PayloadSize);
|
createLimeRecordHeader(record_name, 0, 0, PayloadSize);
|
||||||
|
|
||||||
|
|
||||||
|
// std::cout << "W sizeof(sobj)" <<sizeof(sobj)<<std::endl;
|
||||||
|
// std::cout << "W Gsites " <<field._grid->_gsites<<std::endl;
|
||||||
|
// std::cout << "W Payload expected " <<PayloadSize<<std::endl;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////
|
||||||
// NB: FILE and iostream are jointly writing disjoint sequences in the
|
// NB: FILE and iostream are jointly writing disjoint sequences in the
|
||||||
// the same file through different file handles (integer units).
|
// the same file through different file handles (integer units).
|
||||||
@ -340,6 +368,7 @@ class GridLimeWriter : public BinaryIO {
|
|||||||
// v) Continue writing scidac record.
|
// v) Continue writing scidac record.
|
||||||
////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////
|
||||||
off_t offset = ftell(File);
|
off_t offset = ftell(File);
|
||||||
|
// std::cout << " Writing to offset "<<offset << std::endl;
|
||||||
std::string format = getFormatString<vobj>();
|
std::string format = getFormatString<vobj>();
|
||||||
BinarySimpleMunger<sobj,sobj> munge;
|
BinarySimpleMunger<sobj,sobj> munge;
|
||||||
BinaryIO::writeLatticeObject<vobj,sobj>(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
|
BinaryIO::writeLatticeObject<vobj,sobj>(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
|
||||||
@ -354,7 +383,7 @@ class GridLimeWriter : public BinaryIO {
|
|||||||
checksum.suma= streama.str();
|
checksum.suma= streama.str();
|
||||||
checksum.sumb= streamb.str();
|
checksum.sumb= streamb.str();
|
||||||
std::cout << GridLogMessage<<" writing scidac checksums "<<std::hex<<scidac_csuma<<"/"<<scidac_csumb<<std::dec<<std::endl;
|
std::cout << GridLogMessage<<" writing scidac checksums "<<std::hex<<scidac_csuma<<"/"<<scidac_csumb<<std::dec<<std::endl;
|
||||||
writeLimeObject(0,1,checksum,std::string("scidacChecksum" ),std::string(SCIDAC_CHECKSUM));
|
writeLimeObject(0,1,checksum,std::string("scidacChecksum"),std::string(SCIDAC_CHECKSUM));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -371,11 +400,9 @@ class ScidacWriter : public GridLimeWriter {
|
|||||||
////////////////////////////////////////////////
|
////////////////////////////////////////////////
|
||||||
// Write generic lattice field in scidac format
|
// Write generic lattice field in scidac format
|
||||||
////////////////////////////////////////////////
|
////////////////////////////////////////////////
|
||||||
template <class vobj, class userRecord>
|
template <class vobj, class userRecord>
|
||||||
void writeScidacFieldRecord(Lattice<vobj> &field,userRecord _userRecord)
|
void writeScidacFieldRecord(Lattice<vobj> &field,userRecord _userRecord)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
|
||||||
uint64_t nbytes;
|
|
||||||
GridBase * grid = field._grid;
|
GridBase * grid = field._grid;
|
||||||
|
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
@ -397,6 +424,66 @@ class ScidacWriter : public GridLimeWriter {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class ScidacReader : public GridLimeReader {
|
||||||
|
public:
|
||||||
|
|
||||||
|
template<class SerialisableUserFile>
|
||||||
|
void readScidacFileRecord(GridBase *grid,SerialisableUserFile &_userFile)
|
||||||
|
{
|
||||||
|
scidacFile _scidacFile(grid);
|
||||||
|
readLimeObject(_scidacFile,_scidacFile.SerialisableClassName(),std::string(SCIDAC_PRIVATE_FILE_XML));
|
||||||
|
readLimeObject(_userFile,_userFile.SerialisableClassName(),std::string(SCIDAC_FILE_XML));
|
||||||
|
}
|
||||||
|
////////////////////////////////////////////////
|
||||||
|
// Write generic lattice field in scidac format
|
||||||
|
////////////////////////////////////////////////
|
||||||
|
template <class vobj, class userRecord>
|
||||||
|
void readScidacFieldRecord(Lattice<vobj> &field,userRecord &_userRecord)
|
||||||
|
{
|
||||||
|
typedef typename vobj::scalar_object sobj;
|
||||||
|
GridBase * grid = field._grid;
|
||||||
|
|
||||||
|
////////////////////////////////////////
|
||||||
|
// fill the Grid header
|
||||||
|
////////////////////////////////////////
|
||||||
|
FieldMetaData header;
|
||||||
|
scidacRecord _scidacRecord;
|
||||||
|
scidacFile _scidacFile;
|
||||||
|
|
||||||
|
//////////////////////////////////////////////
|
||||||
|
// Fill the Lime file record by record
|
||||||
|
//////////////////////////////////////////////
|
||||||
|
readLimeObject(header ,std::string("FieldMetaData"),std::string(GRID_FORMAT)); // Open message
|
||||||
|
readLimeObject(_userRecord,_userRecord.SerialisableClassName(),std::string(SCIDAC_RECORD_XML));
|
||||||
|
readLimeObject(_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML));
|
||||||
|
readLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA));
|
||||||
|
}
|
||||||
|
void skipPastBinaryRecord(void) {
|
||||||
|
std::string rec_name(ILDG_BINARY_DATA);
|
||||||
|
while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {
|
||||||
|
if ( !strncmp(limeReaderType(LimeR), rec_name.c_str(),strlen(rec_name.c_str()) ) ) {
|
||||||
|
skipPastObjectRecord(std::string(SCIDAC_CHECKSUM));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void skipPastObjectRecord(std::string rec_name) {
|
||||||
|
while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {
|
||||||
|
if ( !strncmp(limeReaderType(LimeR), rec_name.c_str(),strlen(rec_name.c_str()) ) ) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void skipScidacFieldRecord() {
|
||||||
|
skipPastObjectRecord(std::string(GRID_FORMAT));
|
||||||
|
skipPastObjectRecord(std::string(SCIDAC_RECORD_XML));
|
||||||
|
skipPastObjectRecord(std::string(SCIDAC_PRIVATE_RECORD_XML));
|
||||||
|
skipPastBinaryRecord();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
class IldgWriter : public ScidacWriter {
|
class IldgWriter : public ScidacWriter {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -425,8 +512,6 @@ class IldgWriter : public ScidacWriter {
|
|||||||
typedef iLorentzColourMatrix<vsimd> vobj;
|
typedef iLorentzColourMatrix<vsimd> vobj;
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
|
|
||||||
uint64_t nbytes;
|
|
||||||
|
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
// fill the Grid header
|
// fill the Grid header
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
|
@ -64,6 +64,11 @@ namespace Grid {
|
|||||||
// file compatability, so should be correct to assume the undocumented but defacto file structure.
|
// file compatability, so should be correct to assume the undocumented but defacto file structure.
|
||||||
/////////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
struct emptyUserRecord : Serializable {
|
||||||
|
GRID_SERIALIZABLE_CLASS_MEMBERS(emptyUserRecord,int,dummy);
|
||||||
|
emptyUserRecord() { dummy=0; };
|
||||||
|
};
|
||||||
|
|
||||||
////////////////////////
|
////////////////////////
|
||||||
// Scidac private file xml
|
// Scidac private file xml
|
||||||
// <?xml version="1.0" encoding="UTF-8"?><scidacFile><version>1.1</version><spacetime>4</spacetime><dims>16 16 16 32 </dims><volfmt>0</volfmt></scidacFile>
|
// <?xml version="1.0" encoding="UTF-8"?><scidacFile><version>1.1</version><spacetime>4</spacetime><dims>16 16 16 32 </dims><volfmt>0</volfmt></scidacFile>
|
||||||
|
@ -85,6 +85,9 @@ namespace Grid {
|
|||||||
nd=4;
|
nd=4;
|
||||||
dimension.resize(4);
|
dimension.resize(4);
|
||||||
boundary.resize(4);
|
boundary.resize(4);
|
||||||
|
scidac_checksuma=0;
|
||||||
|
scidac_checksumb=0;
|
||||||
|
checksum=0;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -104,6 +107,7 @@ namespace Grid {
|
|||||||
header.nd = nd;
|
header.nd = nd;
|
||||||
header.dimension.resize(nd);
|
header.dimension.resize(nd);
|
||||||
header.boundary.resize(nd);
|
header.boundary.resize(nd);
|
||||||
|
header.data_start = 0;
|
||||||
for(int d=0;d<nd;d++) {
|
for(int d=0;d<nd;d++) {
|
||||||
header.dimension[d] = grid->_fdimensions[d];
|
header.dimension[d] = grid->_fdimensions[d];
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ GridCartesian *SpaceTimeGrid::makeFiveDimGrid(int Ls,const GridCartesian
|
|||||||
simd5.push_back(FourDimGrid->_simd_layout[d]);
|
simd5.push_back(FourDimGrid->_simd_layout[d]);
|
||||||
mpi5.push_back(FourDimGrid->_processors[d]);
|
mpi5.push_back(FourDimGrid->_processors[d]);
|
||||||
}
|
}
|
||||||
return new GridCartesian(latt5,simd5,mpi5);
|
return new GridCartesian(latt5,simd5,mpi5,*FourDimGrid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -68,18 +68,14 @@ GridRedBlackCartesian *SpaceTimeGrid::makeFiveDimRedBlackGrid(int Ls,const GridC
|
|||||||
{
|
{
|
||||||
int N4=FourDimGrid->_ndimension;
|
int N4=FourDimGrid->_ndimension;
|
||||||
int cbd=1;
|
int cbd=1;
|
||||||
std::vector<int> latt5(1,Ls);
|
|
||||||
std::vector<int> simd5(1,1);
|
|
||||||
std::vector<int> mpi5(1,1);
|
|
||||||
std::vector<int> cb5(1,0);
|
std::vector<int> cb5(1,0);
|
||||||
|
|
||||||
for(int d=0;d<N4;d++){
|
for(int d=0;d<N4;d++){
|
||||||
latt5.push_back(FourDimGrid->_fdimensions[d]);
|
|
||||||
simd5.push_back(FourDimGrid->_simd_layout[d]);
|
|
||||||
mpi5.push_back(FourDimGrid->_processors[d]);
|
|
||||||
cb5.push_back( 1);
|
cb5.push_back( 1);
|
||||||
}
|
}
|
||||||
return new GridRedBlackCartesian(latt5,simd5,mpi5,cb5,cbd);
|
GridCartesian *tmp = makeFiveDimGrid(Ls,FourDimGrid);
|
||||||
|
GridRedBlackCartesian *ret = new GridRedBlackCartesian(tmp,cb5,cbd);
|
||||||
|
delete tmp;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -97,26 +93,24 @@ GridCartesian *SpaceTimeGrid::makeFiveDimDWFGrid(int Ls,const GridCartes
|
|||||||
simd5.push_back(1);
|
simd5.push_back(1);
|
||||||
mpi5.push_back(FourDimGrid->_processors[d]);
|
mpi5.push_back(FourDimGrid->_processors[d]);
|
||||||
}
|
}
|
||||||
return new GridCartesian(latt5,simd5,mpi5);
|
return new GridCartesian(latt5,simd5,mpi5,*FourDimGrid);
|
||||||
}
|
}
|
||||||
|
///////////////////////////////////////////////////
|
||||||
|
// Interface is inefficient and forces the deletion
|
||||||
|
// Pass in the non-redblack grid
|
||||||
|
///////////////////////////////////////////////////
|
||||||
GridRedBlackCartesian *SpaceTimeGrid::makeFiveDimDWFRedBlackGrid(int Ls,const GridCartesian *FourDimGrid)
|
GridRedBlackCartesian *SpaceTimeGrid::makeFiveDimDWFRedBlackGrid(int Ls,const GridCartesian *FourDimGrid)
|
||||||
{
|
{
|
||||||
int N4=FourDimGrid->_ndimension;
|
int N4=FourDimGrid->_ndimension;
|
||||||
int nsimd = FourDimGrid->Nsimd();
|
|
||||||
int cbd=1;
|
int cbd=1;
|
||||||
std::vector<int> latt5(1,Ls);
|
|
||||||
std::vector<int> simd5(1,nsimd);
|
|
||||||
std::vector<int> mpi5(1,1);
|
|
||||||
std::vector<int> cb5(1,0);
|
std::vector<int> cb5(1,0);
|
||||||
|
|
||||||
for(int d=0;d<N4;d++){
|
for(int d=0;d<N4;d++){
|
||||||
latt5.push_back(FourDimGrid->_fdimensions[d]);
|
|
||||||
simd5.push_back(1);
|
|
||||||
mpi5.push_back(FourDimGrid->_processors[d]);
|
|
||||||
cb5.push_back(1);
|
cb5.push_back(1);
|
||||||
}
|
}
|
||||||
return new GridRedBlackCartesian(latt5,simd5,mpi5,cb5,cbd);
|
GridCartesian *tmp = makeFiveDimDWFGrid(Ls,FourDimGrid);
|
||||||
|
GridRedBlackCartesian *ret = new GridRedBlackCartesian(tmp,cb5,cbd);
|
||||||
|
delete tmp;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ namespace Grid{
|
|||||||
class Lexicographic {
|
class Lexicographic {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
static inline void CoorFromIndex (std::vector<int>& coor,int index,std::vector<int> &dims){
|
static inline void CoorFromIndex (std::vector<int>& coor,int index,const std::vector<int> &dims){
|
||||||
int nd= dims.size();
|
int nd= dims.size();
|
||||||
coor.resize(nd);
|
coor.resize(nd);
|
||||||
for(int d=0;d<nd;d++){
|
for(int d=0;d<nd;d++){
|
||||||
@ -16,7 +16,7 @@ namespace Grid{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void IndexFromCoor (std::vector<int>& coor,int &index,std::vector<int> &dims){
|
static inline void IndexFromCoor (const std::vector<int>& coor,int &index,const std::vector<int> &dims){
|
||||||
int nd=dims.size();
|
int nd=dims.size();
|
||||||
int stride=1;
|
int stride=1;
|
||||||
index=0;
|
index=0;
|
||||||
|
@ -48,7 +48,7 @@ int main(int argc, char ** argv) {
|
|||||||
double volume = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
double volume = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
||||||
|
|
||||||
GridCartesian Fine(latt_size,simd_layout,mpi_layout);
|
GridCartesian Fine(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian rbFine(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian rbFine(&Fine);
|
||||||
GridParallelRNG fRNG(&Fine);
|
GridParallelRNG fRNG(&Fine);
|
||||||
|
|
||||||
// fRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
// fRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
||||||
|
@ -47,7 +47,7 @@ int main (int argc, char ** argv)
|
|||||||
mask[0]=0;
|
mask[0]=0;
|
||||||
|
|
||||||
GridCartesian Fine (latt_size,simd_layout,mpi_layout);
|
GridCartesian Fine (latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBFine(latt_size,simd_layout,mpi_layout,mask,1);
|
GridRedBlackCartesian RBFine(&Fine,mask,1);
|
||||||
|
|
||||||
GridParallelRNG FineRNG(&Fine); FineRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
GridParallelRNG FineRNG(&Fine); FineRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ int main (int argc, char ** argv)
|
|||||||
mask[0]=0;
|
mask[0]=0;
|
||||||
|
|
||||||
GridCartesian Fine (latt_size,simd_layout,mpi_layout);
|
GridCartesian Fine (latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBFine(latt_size,simd_layout,mpi_layout,mask,1);
|
GridRedBlackCartesian RBFine(&Fine,mask,1);
|
||||||
|
|
||||||
GridParallelRNG FineRNG(&Fine); FineRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
GridParallelRNG FineRNG(&Fine); FineRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ int main (int argc, char ** argv)
|
|||||||
vol = vol * latt_size[d];
|
vol = vol * latt_size[d];
|
||||||
}
|
}
|
||||||
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
|
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGRID(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGRID(&GRID);
|
||||||
|
|
||||||
LatticeComplexD one(&GRID);
|
LatticeComplexD one(&GRID);
|
||||||
LatticeComplexD zz(&GRID);
|
LatticeComplexD zz(&GRID);
|
||||||
|
@ -40,7 +40,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
@ -84,7 +84,7 @@ int main(int argc, char **argv) {
|
|||||||
double volume = latt_size[0] * latt_size[1] * latt_size[2] * latt_size[3];
|
double volume = latt_size[0] * latt_size[1] * latt_size[2] * latt_size[3];
|
||||||
|
|
||||||
GridCartesian Fine(latt_size, simd_layout, mpi_layout);
|
GridCartesian Fine(latt_size, simd_layout, mpi_layout);
|
||||||
GridRedBlackCartesian rbFine(latt_size, simd_layout, mpi_layout);
|
GridRedBlackCartesian rbFine(&Fine);
|
||||||
GridParallelRNG FineRNG(&Fine);
|
GridParallelRNG FineRNG(&Fine);
|
||||||
GridSerialRNG SerialRNG;
|
GridSerialRNG SerialRNG;
|
||||||
GridSerialRNG SerialRNG1;
|
GridSerialRNG SerialRNG1;
|
||||||
|
@ -40,7 +40,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
@ -51,7 +51,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
@ -52,7 +52,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
@ -42,7 +42,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
|
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
@ -42,7 +42,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
|
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
@ -42,7 +42,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
|
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
@ -42,7 +42,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
|
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
@ -71,7 +71,7 @@ int main(int argc, char **argv) {
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd, vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd, vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size, simd_layout, mpi_layout);
|
GridCartesian Grid(latt_size, simd_layout, mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size, simd_layout, mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
std::vector<int> seeds({1, 2, 3, 4, 5});
|
std::vector<int> seeds({1, 2, 3, 4, 5});
|
||||||
GridSerialRNG sRNG;
|
GridSerialRNG sRNG;
|
||||||
@ -149,4 +149,4 @@ JSON
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
195
tests/solver/Test_dwf_mrhs_cg.cc
Normal file
195
tests/solver/Test_dwf_mrhs_cg.cc
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/Test_dwf_mrhs_cg.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
using namespace Grid::QCD;
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
typedef typename DomainWallFermionR::FermionField FermionField;
|
||||||
|
typedef typename DomainWallFermionR::ComplexField ComplexField;
|
||||||
|
typename DomainWallFermionR::ImplParams params;
|
||||||
|
|
||||||
|
const int Ls=8;
|
||||||
|
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
std::vector<int> latt_size = GridDefaultLatt();
|
||||||
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
|
std::vector<int> mpi_split (mpi_layout.size(),1);
|
||||||
|
|
||||||
|
std::cout << "UGrid (world root)"<<std::endl;
|
||||||
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
||||||
|
|
||||||
|
std::cout << "FGrid (child of UGrid)"<<std::endl;
|
||||||
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||||
|
|
||||||
|
int nrhs = UGrid->RankCount() ;
|
||||||
|
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
// Split into 1^4 mpi communicators
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
std::cout << "SGrid (world root)"<<std::endl;
|
||||||
|
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
|
||||||
|
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
||||||
|
mpi_split,
|
||||||
|
*UGrid);
|
||||||
|
|
||||||
|
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
|
||||||
|
std::cout << "SFGrid"<<std::endl;
|
||||||
|
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
|
||||||
|
std::cout << "SrbGrid"<<std::endl;
|
||||||
|
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid);
|
||||||
|
std::cout << "SFrbGrid"<<std::endl;
|
||||||
|
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
// Set up the problem as a 4d spreadout job
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
std::vector<int> seeds({1,2,3,4});
|
||||||
|
|
||||||
|
GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds);
|
||||||
|
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
|
||||||
|
std::vector<FermionField> src(nrhs,FGrid);
|
||||||
|
std::vector<FermionField> result(nrhs,FGrid);
|
||||||
|
|
||||||
|
for(int s=0;s<nrhs;s++) random(pRNG5,src[s]);
|
||||||
|
for(int s=0;s<nrhs;s++) result[s] = zero;
|
||||||
|
|
||||||
|
LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Bounce these fields to disk
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Writing out in parallel view "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
emptyUserRecord record;
|
||||||
|
std::string file("./scratch.scidac");
|
||||||
|
std::string filef("./scratch.scidac.ferm");
|
||||||
|
int me = UGrid->ThisRank();
|
||||||
|
LatticeGaugeField s_Umu(SGrid);
|
||||||
|
FermionField s_src(SFGrid);
|
||||||
|
FermionField s_res(SFGrid);
|
||||||
|
|
||||||
|
{
|
||||||
|
FGrid->Barrier();
|
||||||
|
ScidacWriter _ScidacWriter;
|
||||||
|
_ScidacWriter.open(file);
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Writing out gauge field "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
_ScidacWriter.writeScidacFieldRecord(Umu,record);
|
||||||
|
_ScidacWriter.close();
|
||||||
|
FGrid->Barrier();
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Reading in gauge field "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
ScidacReader _ScidacReader;
|
||||||
|
_ScidacReader.open(file);
|
||||||
|
_ScidacReader.readScidacFieldRecord(s_Umu,record);
|
||||||
|
_ScidacReader.close();
|
||||||
|
FGrid->Barrier();
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Read in gauge field "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
{
|
||||||
|
for(int n=0;n<nrhs;n++){
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Writing out record "<<n<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
|
||||||
|
std::stringstream filefn; filefn << filef << "."<< n;
|
||||||
|
ScidacWriter _ScidacWriter;
|
||||||
|
_ScidacWriter.open(filefn.str());
|
||||||
|
_ScidacWriter.writeScidacFieldRecord(src[n],record);
|
||||||
|
_ScidacWriter.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
FGrid->Barrier();
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Reading back in the single process view "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
|
||||||
|
for(int n=0;n<nrhs;n++){
|
||||||
|
if ( n==me ) {
|
||||||
|
std::stringstream filefn; filefn << filef << "."<< n;
|
||||||
|
ScidacReader _ScidacReader;
|
||||||
|
_ScidacReader.open(filefn.str());
|
||||||
|
_ScidacReader.readScidacFieldRecord(s_src,record);
|
||||||
|
_ScidacReader.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
FGrid->Barrier();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Set up N-solvers as trivially parallel
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
RealD mass=0.01;
|
||||||
|
RealD M5=1.8;
|
||||||
|
DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Calling DWF CG "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
|
||||||
|
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
|
||||||
|
ConjugateGradient<FermionField> CG((1.0e-8/(me+1)),10000);
|
||||||
|
s_res = zero;
|
||||||
|
CG(HermOp,s_src,s_res);
|
||||||
|
|
||||||
|
///////////////////////////////////////
|
||||||
|
// Share the information
|
||||||
|
///////////////////////////////////////
|
||||||
|
std::vector<uint32_t> iterations(nrhs,0);
|
||||||
|
iterations[me] = CG.IterationsToComplete;
|
||||||
|
|
||||||
|
for(int n=0;n<nrhs;n++){
|
||||||
|
UGrid->GlobalSum(iterations[n]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
// Report how long they all took
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
for(int r=0;r<nrhs;r++){
|
||||||
|
std::cout << GridLogMessage<<" Rank "<<r<<" "<< iterations[r]<<" CG iterations"<<std::endl;
|
||||||
|
}
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
@ -40,7 +40,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
std::vector<int> seeds({1,2,3,4,5});
|
std::vector<int> seeds({1,2,3,4,5});
|
||||||
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
||||||
|
@ -27,7 +27,6 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/Grid.h>
|
||||||
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
|
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace Grid;
|
using namespace Grid;
|
||||||
|
@ -57,7 +57,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
std::vector<int> seeds({1,2,3,4});
|
std::vector<int> seeds({1,2,3,4});
|
||||||
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
||||||
|
@ -57,7 +57,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
std::vector<int> seeds({1,2,3,4});
|
std::vector<int> seeds({1,2,3,4});
|
||||||
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
||||||
|
@ -52,7 +52,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
std::vector<int> seeds({1,2,3,4});
|
std::vector<int> seeds({1,2,3,4});
|
||||||
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
||||||
|
@ -52,7 +52,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
std::vector<int> seeds({1,2,3,4});
|
std::vector<int> seeds({1,2,3,4});
|
||||||
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
||||||
|
@ -52,7 +52,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
std::vector<int> seeds({1,2,3,4});
|
std::vector<int> seeds({1,2,3,4});
|
||||||
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
||||||
|
@ -52,7 +52,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
GridRedBlackCartesian RBGrid(&Grid);
|
||||||
|
|
||||||
std::vector<int> seeds({1,2,3,4});
|
std::vector<int> seeds({1,2,3,4});
|
||||||
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
|
||||||
|
Loading…
Reference in New Issue
Block a user