mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-09 21:50:45 +01:00
Merge branch 'develop' of github.com:paboyle/Grid into develop
This commit is contained in:
commit
57f899d79c
@ -141,7 +141,7 @@ class WilsonFermion : public WilsonKernels<Impl>, public WilsonFermionStatic {
|
|||||||
GridRedBlackCartesian &Hgrid, RealD _mass,
|
GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||||
const ImplParams &p = ImplParams(),
|
const ImplParams &p = ImplParams(),
|
||||||
const WilsonAnisotropyCoefficients &anis = WilsonAnisotropyCoefficients() );
|
const WilsonAnisotropyCoefficients &anis = WilsonAnisotropyCoefficients() );
|
||||||
|
|
||||||
// DoubleStore impl dependent
|
// DoubleStore impl dependent
|
||||||
void ImportGauge(const GaugeField &_Umu);
|
void ImportGauge(const GaugeField &_Umu);
|
||||||
|
|
||||||
|
Binary file not shown.
@ -80,7 +80,8 @@ primary_domain = 'cpp'
|
|||||||
# a list of builtin themes.
|
# a list of builtin themes.
|
||||||
#
|
#
|
||||||
html_theme = 'alabaster'
|
html_theme = 'alabaster'
|
||||||
|
html_use_smartypants = False
|
||||||
|
smart_quotes = False
|
||||||
# Theme options are theme-specific and customize the look and feel of a theme
|
# Theme options are theme-specific and customize the look and feel of a theme
|
||||||
# further. For a list of options available for each theme, see the
|
# further. For a list of options available for each theme, see the
|
||||||
# documentation.
|
# documentation.
|
||||||
|
@ -10,7 +10,7 @@ examples.
|
|||||||
|
|
||||||
|
|
||||||
MPI initialization
|
MPI initialization
|
||||||
------------------
|
--------------------
|
||||||
|
|
||||||
Grid supports threaded MPI sends and receives and, if running with
|
Grid supports threaded MPI sends and receives and, if running with
|
||||||
more than one thread, requires the MPI_THREAD_MULTIPLE mode of message
|
more than one thread, requires the MPI_THREAD_MULTIPLE mode of message
|
||||||
@ -21,39 +21,46 @@ appropriate initialization call is::
|
|||||||
assert(MPI_THREAD_MULTIPLE == provided);
|
assert(MPI_THREAD_MULTIPLE == provided);
|
||||||
|
|
||||||
Grid Initialization
|
Grid Initialization
|
||||||
-------------------
|
---------------------
|
||||||
|
|
||||||
Grid itself is initialized with a call::
|
Grid itself is initialized with a call::
|
||||||
|
|
||||||
Grid_init(&argc, &argv);
|
Grid_init(&argc, &argv);
|
||||||
|
|
||||||
.. todo:: CD: Where are the command-line arguments explained above?
|
Command line options include::
|
||||||
|
|
||||||
|
--mpi n.n.n.n : default MPI decomposition
|
||||||
|
--threads n : default number of OMP threads
|
||||||
|
--grid n.n.n.n : default Grid size
|
||||||
|
|
||||||
where `argc` and `argv` are constructed to simulate the command-line
|
where `argc` and `argv` are constructed to simulate the command-line
|
||||||
options described above. At a minimum one must provide the `--grid`
|
options described above. At a minimum one usually provides the
|
||||||
and `--mpi` parameters. The latter specifies the grid of processors
|
`--grid` and `--mpi` parameters. The former specifies the lattice
|
||||||
(MPI ranks).
|
dimensions and the latter specifies the grid of processors (MPI
|
||||||
|
ranks). If these parameters are not specified with the `Grid_init`
|
||||||
|
call, they need to be supplied later when creating Grid fields.
|
||||||
|
|
||||||
The following Grid procedures are useful for verifying that Grid is
|
The following Grid procedures are useful for verifying that Grid
|
||||||
properly initialized.
|
"default" values are properly initialized.
|
||||||
|
|
||||||
============================================================= ===========================================================================================================
|
============================================================= ===========================================================================================================
|
||||||
Grid procedure returns
|
Grid procedure returns
|
||||||
============================================================= ===========================================================================================================
|
============================================================= ===========================================================================================================
|
||||||
std::vector<int> GridDefaultLatt(); lattice size
|
std::vector<int> GridDefaultLatt(); lattice size
|
||||||
std::vector<int> GridDefaultSimd(int Nd,vComplex::Nsimd()); SIMD layout
|
std::vector<int> GridDefaultSimd(int Nd,vComplex::Nsimd()); SIMD layout
|
||||||
std::vector<int> GridDefaultMpi(); MPI layout
|
std::vector<int> GridDefaultMpi(); MPI layout
|
||||||
int Grid::GridThread::GetThreads(); number of threads
|
int Grid::GridThread::GetThreads(); number of threads
|
||||||
|
============================================================= ===========================================================================================================
|
||||||
|
|
||||||
|
|
||||||
MPI coordination
|
MPI coordination
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
Grid wants to use its own numbering of MPI ranks and its own
|
Grid wants to use its own numbering of MPI ranks and its own
|
||||||
assignment of the lattice coordinates with each rank. Obviously, the
|
assignment of the lattice coordinates with each rank. Obviously, the
|
||||||
calling program and Grid must agree on these conventions. It is
|
calling program and Grid must agree on these conventions. One should
|
||||||
convenient to use Grid's Cartesian communicator class to discover the
|
use Grid's Cartesian communicator class to discover the processor
|
||||||
processor assignments. For a four-dimensional processor grid one can
|
assignments. For a four-dimensional processor grid one can define::
|
||||||
define::
|
|
||||||
|
|
||||||
static Grid::CartesianCommunicator *grid_cart = NULL;
|
static Grid::CartesianCommunicator *grid_cart = NULL;
|
||||||
grid_cart = new Grid::CartesianCommunicator(processors);
|
grid_cart = new Grid::CartesianCommunicator(processors);
|
||||||
@ -94,14 +101,38 @@ index number of the subcommunicator. Once this is done,::
|
|||||||
|
|
||||||
returns a rank that agrees with Grid's `peRank`.
|
returns a rank that agrees with Grid's `peRank`.
|
||||||
|
|
||||||
|
QMP coordination
|
||||||
|
----------------
|
||||||
|
|
||||||
|
If the calling program uses the SciDAC QMP message-passing package, a
|
||||||
|
call to QMP_comm_split() instead can be used to reassign the ranks.
|
||||||
|
In the example below, `peGrid` gives the processor-grid dimensions,
|
||||||
|
usually set on the command line with `-qmp-geom`.
|
||||||
|
|
||||||
|
**Example**::
|
||||||
|
|
||||||
|
int NDIM = QMP_get_allocated_number_of_dimensions();
|
||||||
|
Grid::Grid_init(argc,argv);
|
||||||
|
FgridBase::grid_initted=true;
|
||||||
|
std::vector<int> processors;
|
||||||
|
for(int i=0;i<NDIM;i++) processors.push_back(peGrid[i]);
|
||||||
|
Grid::CartesianCommunicator grid_cart(processors);
|
||||||
|
std::vector<int> pePos(NDIM);
|
||||||
|
for(int i=NDIM-1;i>=0;i--)
|
||||||
|
pePos[i] = grid_cart._processor_coor[i];
|
||||||
|
int peRank = grid_cart->RankFromProcessorCoor(pePos);
|
||||||
|
QMP_comm_split(QMP_comm_get_default(),0,peRank,&qmp_comm);
|
||||||
|
QMP_comm_set_default(qmp_comm);
|
||||||
|
|
||||||
|
|
||||||
Mapping fields between Grid and user layouts
|
Mapping fields between Grid and user layouts
|
||||||
-------------------------------------------
|
---------------------------------------------
|
||||||
|
|
||||||
In order to map data between layouts, it is important to know
|
In order to map data between calling-program and Grid layouts, it is
|
||||||
how the lattice sites are distributed across the processor grid. A
|
important to know how the lattice sites are distributed across the
|
||||||
lattice site with coordinates `r[mu]` is assigned to the processor with
|
processor grid. A lattice site with coordinates `r[mu]` is assigned
|
||||||
processor coordinates `pePos[mu]` according to the rule::
|
to the processor with processor coordinates `pePos[mu]` according to
|
||||||
|
the rule::
|
||||||
|
|
||||||
pePos[mu] = r[mu]/dim[mu]
|
pePos[mu] = r[mu]/dim[mu]
|
||||||
|
|
||||||
@ -116,7 +147,7 @@ defined on the appropriate grid, whether it be a full lattice (4D
|
|||||||
`GridCartesian`), one of the checkerboards (4D
|
`GridCartesian`), one of the checkerboards (4D
|
||||||
`GridRedBlackCartesian`), a five-dimensional full grid (5D
|
`GridRedBlackCartesian`), a five-dimensional full grid (5D
|
||||||
`GridCartesian`), or a five-dimensional checkerboard (5D
|
`GridCartesian`), or a five-dimensional checkerboard (5D
|
||||||
`GridRedBlackCartesian`). For example, an improved staggered fermion
|
`GridRedBlackCartesian`). For example, an improved staggered-fermion
|
||||||
color-vector field `cv` on a single checkerboard would be constructed
|
color-vector field `cv` on a single checkerboard would be constructed
|
||||||
using
|
using
|
||||||
|
|
||||||
@ -131,12 +162,16 @@ using
|
|||||||
|
|
||||||
typename ImprovedStaggeredFermion::FermionField cv(RBGrid);
|
typename ImprovedStaggeredFermion::FermionField cv(RBGrid);
|
||||||
|
|
||||||
To map data within an MPI rank, the external code must iterate over
|
The example above assumes that the grid default values were set in the
|
||||||
the sites belonging to that rank (full or checkerboard as
|
`Grid_init` call. If not, they can be set at this point and passed
|
||||||
appropriate). To import data into Grid, the external data on a single
|
when `GridCartesian` is instantiated here. To map data within an MPI
|
||||||
site with coordinates `r` is first copied into the appropriate Grid
|
rank, the external code must iterate over the sites belonging to that
|
||||||
scalar object `s`. Then it is copied into the Grid lattice field `l`
|
rank (full or checkerboard as appropriate). Note that the site
|
||||||
with `pokeLocalSite`::
|
coordinates are specified relative to the origin of the lattice
|
||||||
|
subvolume on that rank. To import data into Grid, the external data on
|
||||||
|
a single site with coordinates `r` is first copied into the
|
||||||
|
appropriate Grid scalar object `s`. Then it is copied into the Grid
|
||||||
|
lattice field `l` with `pokeLocalSite`::
|
||||||
|
|
||||||
pokeLocalSite(const sobj &s, Lattice<vobj> &l, Coordinate &r);
|
pokeLocalSite(const sobj &s, Lattice<vobj> &l, Coordinate &r);
|
||||||
|
|
||||||
@ -156,7 +191,7 @@ there to the lattice colour-vector field `cv`, as defined above.
|
|||||||
indexToCoords(idx,r);
|
indexToCoords(idx,r);
|
||||||
ColourVector cVec;
|
ColourVector cVec;
|
||||||
for(int col=0; col<Nc; col++)
|
for(int col=0; col<Nc; col++)
|
||||||
cVec._internal._internal._internal[col] =
|
cVec()()(col) =
|
||||||
Complex(src[idx].c[col].real, src[idx].c[col].imag);
|
Complex(src[idx].c[col].real, src[idx].c[col].imag);
|
||||||
|
|
||||||
pokeLocalSite(cVec, cv, r);
|
pokeLocalSite(cVec, cv, r);
|
||||||
@ -177,21 +212,21 @@ Grid 5D fermion field `cv5`.
|
|||||||
|
|
||||||
**Example**::
|
**Example**::
|
||||||
|
|
||||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt();
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt();
|
||||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid) typename ImprovedStaggeredFermion5D::FermionField cv5(FrbGrid);
|
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid) typename ImprovedStaggeredFermion5D::FermionField cv5(FrbGrid);
|
||||||
|
|
||||||
std::vector<int> r(4);
|
std::vector<int> r(4);
|
||||||
indexToCoords(idx,r);
|
indexToCoords(idx,r);
|
||||||
std::vector<int> r5(1,0);
|
std::vector<int> r5(1,0);
|
||||||
for( int d = 0; d < 4; d++ ) r5.push_back(r[d]);
|
for( int d = 0; d < 4; d++ ) r5.push_back(r[d]);
|
||||||
|
|
||||||
for( int j = 0; j < Ls; j++ ){
|
for( int j = 0; j < Ls; j++ ){
|
||||||
r5[0] = j;
|
r5[0] = j;
|
||||||
ColourVector cVec;
|
ColourVector cVec;
|
||||||
for(int col=0; col<Nc; col++){
|
for(int col=0; col<Nc; col++){
|
||||||
cVec._internal._internal._internal[col] =
|
cVec()()(col) =
|
||||||
Complex(src[j][idx].c[col].real, src[j][idx].c[col].imag);
|
Complex(src[j][idx].c[col].real, src[j][idx].c[col].imag);
|
||||||
}
|
}
|
||||||
pokeLocalSite(cVec, *(out->cv), r5);
|
pokeLocalSite(cVec, *(out->cv), r5);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -146,13 +146,7 @@ int main(int argc, char **argv) {
|
|||||||
std::cout << GridLogMessage << "Denominator report, Dw(m) term (includes CG) : " << std::endl;
|
std::cout << GridLogMessage << "Denominator report, Dw(m) term (includes CG) : " << std::endl;
|
||||||
DenOp.Report();
|
DenOp.Report();
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Grid_finalize();
|
Grid_finalize();
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
} // main
|
} // main
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user