mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-12 20:27:06 +01:00
Merge branch 'feature/gen-simd' into feature/doxygen
# Conflicts: # Makefile.am # configure.ac
This commit is contained in:
@ -40,14 +40,6 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <mm_malloc.h>
|
||||
#endif
|
||||
|
||||
#ifdef GRID_COMMS_SHMEM
|
||||
extern "C" {
|
||||
#include <mpp/shmem.h>
|
||||
extern void * shmem_align(size_t, size_t);
|
||||
extern void shmem_free(void *);
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
@ -65,28 +57,85 @@ public:
|
||||
typedef _Tp value_type;
|
||||
|
||||
template<typename _Tp1> struct rebind { typedef alignedAllocator<_Tp1> other; };
|
||||
|
||||
alignedAllocator() throw() { }
|
||||
|
||||
alignedAllocator(const alignedAllocator&) throw() { }
|
||||
|
||||
template<typename _Tp1> alignedAllocator(const alignedAllocator<_Tp1>&) throw() { }
|
||||
|
||||
~alignedAllocator() throw() { }
|
||||
|
||||
pointer address(reference __x) const { return &__x; }
|
||||
// const_pointer address(const_reference __x) const { return &__x; }
|
||||
|
||||
size_type max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
|
||||
|
||||
pointer allocate(size_type __n, const void* _p= 0)
|
||||
{
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
_Tp * ptr = (_Tp *) _mm_malloc(__n*sizeof(_Tp),128);
|
||||
#else
|
||||
_Tp * ptr = (_Tp *) memalign(128,__n*sizeof(_Tp));
|
||||
#endif
|
||||
|
||||
_Tp tmp;
|
||||
#ifdef GRID_NUMA
|
||||
#pragma omp parallel for schedule(static)
|
||||
for(int i=0;i<__n;i++){
|
||||
ptr[i]=tmp;
|
||||
}
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void deallocate(pointer __p, size_type) {
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
_mm_free((void *)__p);
|
||||
#else
|
||||
free((void *)__p);
|
||||
#endif
|
||||
}
|
||||
void construct(pointer __p, const _Tp& __val) { };
|
||||
void construct(pointer __p) { };
|
||||
void destroy(pointer __p) { };
|
||||
};
|
||||
template<typename _Tp> inline bool operator==(const alignedAllocator<_Tp>&, const alignedAllocator<_Tp>&){ return true; }
|
||||
template<typename _Tp> inline bool operator!=(const alignedAllocator<_Tp>&, const alignedAllocator<_Tp>&){ return false; }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
// MPI3 : comms must use shm region
|
||||
// SHMEM: comms must use symmetric heap
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef GRID_COMMS_SHMEM
|
||||
|
||||
_Tp *ptr = (_Tp *) shmem_align(__n*sizeof(_Tp),64);
|
||||
|
||||
|
||||
extern "C" {
|
||||
#include <mpp/shmem.h>
|
||||
extern void * shmem_align(size_t, size_t);
|
||||
extern void shmem_free(void *);
|
||||
}
|
||||
#define PARANOID_SYMMETRIC_HEAP
|
||||
#endif
|
||||
|
||||
template<typename _Tp>
|
||||
class commAllocator {
|
||||
public:
|
||||
typedef std::size_t size_type;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef _Tp* pointer;
|
||||
typedef const _Tp* const_pointer;
|
||||
typedef _Tp& reference;
|
||||
typedef const _Tp& const_reference;
|
||||
typedef _Tp value_type;
|
||||
|
||||
template<typename _Tp1> struct rebind { typedef commAllocator<_Tp1> other; };
|
||||
commAllocator() throw() { }
|
||||
commAllocator(const commAllocator&) throw() { }
|
||||
template<typename _Tp1> commAllocator(const commAllocator<_Tp1>&) throw() { }
|
||||
~commAllocator() throw() { }
|
||||
pointer address(reference __x) const { return &__x; }
|
||||
size_type max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
|
||||
|
||||
#ifdef GRID_COMMS_SHMEM
|
||||
pointer allocate(size_type __n, const void* _p= 0)
|
||||
{
|
||||
#ifdef CRAY
|
||||
_Tp *ptr = (_Tp *) shmem_align(__n*sizeof(_Tp),64);
|
||||
#else
|
||||
_Tp *ptr = (_Tp *) shmem_align(64,__n*sizeof(_Tp));
|
||||
#endif
|
||||
#ifdef PARANOID_SYMMETRIC_HEAP
|
||||
static void * bcast;
|
||||
static long psync[_SHMEM_REDUCE_SYNC_SIZE];
|
||||
@ -96,55 +145,47 @@ public:
|
||||
|
||||
if ( bcast != ptr ) {
|
||||
std::printf("inconsistent alloc pe %d %lx %lx \n",shmem_my_pe(),bcast,ptr);std::fflush(stdout);
|
||||
BACKTRACEFILE();
|
||||
// BACKTRACEFILE();
|
||||
exit(0);
|
||||
}
|
||||
|
||||
assert( bcast == (void *) ptr);
|
||||
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
void deallocate(pointer __p, size_type) {
|
||||
shmem_free((void *)__p);
|
||||
}
|
||||
#else
|
||||
|
||||
pointer allocate(size_type __n, const void* _p= 0)
|
||||
{
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
_Tp * ptr = (_Tp *) _mm_malloc(__n*sizeof(_Tp),128);
|
||||
#else
|
||||
_Tp * ptr = (_Tp *) memalign(128,__n*sizeof(_Tp));
|
||||
#endif
|
||||
|
||||
#endif
|
||||
_Tp tmp;
|
||||
#undef FIRST_TOUCH_OPTIMISE
|
||||
#ifdef FIRST_TOUCH_OPTIMISE
|
||||
#pragma omp parallel for
|
||||
for(int i=0;i<__n;i++){
|
||||
ptr[i]=tmp;
|
||||
}
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void deallocate(pointer __p, size_type) {
|
||||
#ifdef GRID_COMMS_SHMEM
|
||||
shmem_free((void *)__p);
|
||||
#else
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
_mm_free((void *)__p);
|
||||
#else
|
||||
free((void *)__p);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
void construct(pointer __p, const _Tp& __val) { };
|
||||
void construct(pointer __p) { };
|
||||
|
||||
void destroy(pointer __p) { };
|
||||
};
|
||||
template<typename _Tp> inline bool operator==(const commAllocator<_Tp>&, const commAllocator<_Tp>&){ return true; }
|
||||
template<typename _Tp> inline bool operator!=(const commAllocator<_Tp>&, const commAllocator<_Tp>&){ return false; }
|
||||
|
||||
template<typename _Tp> inline bool
|
||||
operator==(const alignedAllocator<_Tp>&, const alignedAllocator<_Tp>&){ return true; }
|
||||
|
||||
template<typename _Tp> inline bool
|
||||
operator!=(const alignedAllocator<_Tp>&, const alignedAllocator<_Tp>&){ return false; }
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Template typedefs
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
template<class T> using Vector = std::vector<T,alignedAllocator<T> >;
|
||||
template<class T> using commVector = std::vector<T,commAllocator<T> >;
|
||||
template<class T> using Matrix = std::vector<std::vector<T,alignedAllocator<T> > >;
|
||||
|
||||
}; // namespace Grid
|
||||
#endif
|
||||
|
@ -38,6 +38,14 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/cshift/Cshift_mpi.h>
|
||||
#endif
|
||||
|
||||
#ifdef GRID_COMMS_MPI3
|
||||
#include <Grid/cshift/Cshift_mpi.h>
|
||||
#endif
|
||||
|
||||
#ifdef GRID_COMMS_MPI3L
|
||||
#include <Grid/cshift/Cshift_mpi.h>
|
||||
#endif
|
||||
|
||||
#ifdef GRID_COMMS_SHMEM
|
||||
#include <Grid/cshift/Cshift_mpi.h> // uses same implementation of communicator
|
||||
#endif
|
||||
|
284
lib/FFT.h
284
lib/FFT.h
@ -29,9 +29,15 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#ifndef _GRID_FFT_H_
|
||||
#define _GRID_FFT_H_
|
||||
|
||||
#ifdef HAVE_FFTW
|
||||
#ifdef HAVE_FFTW
|
||||
#ifdef USE_MKL
|
||||
#include <fftw/fftw3.h>
|
||||
#else
|
||||
#include <fftw3.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
namespace Grid {
|
||||
|
||||
template<class scalar> struct FFTW { };
|
||||
@ -98,179 +104,199 @@ namespace Grid {
|
||||
#define FFTW_BACKWARD (+1)
|
||||
#endif
|
||||
|
||||
class FFT {
|
||||
class FFT {
|
||||
private:
|
||||
|
||||
|
||||
GridCartesian *vgrid;
|
||||
GridCartesian *sgrid;
|
||||
|
||||
|
||||
int Nd;
|
||||
double flops;
|
||||
double flops_call;
|
||||
uint64_t usec;
|
||||
|
||||
|
||||
std::vector<int> dimensions;
|
||||
std::vector<int> processors;
|
||||
std::vector<int> processor_coor;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
|
||||
static const int forward=FFTW_FORWARD;
|
||||
static const int backward=FFTW_BACKWARD;
|
||||
|
||||
|
||||
double Flops(void) {return flops;}
|
||||
double MFlops(void) {return flops/usec;}
|
||||
double USec(void) {return (double)usec;}
|
||||
|
||||
FFT ( GridCartesian * grid ) :
|
||||
vgrid(grid),
|
||||
Nd(grid->_ndimension),
|
||||
dimensions(grid->_fdimensions),
|
||||
processors(grid->_processors),
|
||||
processor_coor(grid->_processor_coor)
|
||||
FFT ( GridCartesian * grid ) :
|
||||
vgrid(grid),
|
||||
Nd(grid->_ndimension),
|
||||
dimensions(grid->_fdimensions),
|
||||
processors(grid->_processors),
|
||||
processor_coor(grid->_processor_coor)
|
||||
{
|
||||
flops=0;
|
||||
usec =0;
|
||||
std::vector<int> layout(Nd,1);
|
||||
sgrid = new GridCartesian(dimensions,layout,processors);
|
||||
};
|
||||
|
||||
~FFT ( void) {
|
||||
delete sgrid;
|
||||
|
||||
~FFT ( void) {
|
||||
delete sgrid;
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void FFT_dim(Lattice<vobj> &result,const Lattice<vobj> &source,int dim, int inverse){
|
||||
void FFT_dim_mask(Lattice<vobj> &result,const Lattice<vobj> &source,std::vector<int> mask,int sign){
|
||||
|
||||
conformable(result._grid,vgrid);
|
||||
conformable(source._grid,vgrid);
|
||||
Lattice<vobj> tmp(vgrid);
|
||||
tmp = source;
|
||||
for(int d=0;d<Nd;d++){
|
||||
if( mask[d] ) {
|
||||
FFT_dim(result,tmp,d,sign);
|
||||
tmp=result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void FFT_all_dim(Lattice<vobj> &result,const Lattice<vobj> &source,int sign){
|
||||
std::vector<int> mask(Nd,1);
|
||||
FFT_dim_mask(result,source,mask,sign);
|
||||
}
|
||||
|
||||
|
||||
template<class vobj>
|
||||
void FFT_dim(Lattice<vobj> &result,const Lattice<vobj> &source,int dim, int sign){
|
||||
#ifndef HAVE_FFTW
|
||||
assert(0);
|
||||
#else
|
||||
conformable(result._grid,vgrid);
|
||||
conformable(source._grid,vgrid);
|
||||
|
||||
int L = vgrid->_ldimensions[dim];
|
||||
int G = vgrid->_fdimensions[dim];
|
||||
|
||||
|
||||
std::vector<int> layout(Nd,1);
|
||||
std::vector<int> pencil_gd(vgrid->_fdimensions);
|
||||
|
||||
pencil_gd[dim] = G*processors[dim];
|
||||
|
||||
|
||||
pencil_gd[dim] = G*processors[dim];
|
||||
|
||||
// Pencil global vol LxLxGxLxL per node
|
||||
GridCartesian pencil_g(pencil_gd,layout,processors);
|
||||
|
||||
|
||||
// Construct pencils
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
typedef typename sobj::scalar_type scalar;
|
||||
|
||||
Lattice<sobj> pgbuf(&pencil_g);
|
||||
|
||||
|
||||
Lattice<vobj> ssource(vgrid); ssource =source;
|
||||
Lattice<sobj> pgsource(&pencil_g);
|
||||
Lattice<sobj> pgresult(&pencil_g); pgresult=zero;
|
||||
|
||||
#ifndef HAVE_FFTW
|
||||
assert(0);
|
||||
#else
|
||||
typedef typename FFTW<scalar>::FFTW_scalar FFTW_scalar;
|
||||
typedef typename FFTW<scalar>::FFTW_plan FFTW_plan;
|
||||
|
||||
{
|
||||
int Ncomp = sizeof(sobj)/sizeof(scalar);
|
||||
int Nlow = 1;
|
||||
for(int d=0;d<dim;d++){
|
||||
Nlow*=vgrid->_ldimensions[d];
|
||||
}
|
||||
|
||||
int rank = 1; /* 1d transforms */
|
||||
int n[] = {G}; /* 1d transforms of length G */
|
||||
int howmany = Ncomp;
|
||||
int odist,idist,istride,ostride;
|
||||
idist = odist = 1; /* Distance between consecutive FT's */
|
||||
istride = ostride = Ncomp*Nlow; /* distance between two elements in the same FT */
|
||||
int *inembed = n, *onembed = n;
|
||||
|
||||
|
||||
int sign = FFTW_FORWARD;
|
||||
if (inverse) sign = FFTW_BACKWARD;
|
||||
|
||||
FFTW_plan p;
|
||||
{
|
||||
FFTW_scalar *in = (FFTW_scalar *)&pgsource._odata[0];
|
||||
FFTW_scalar *out= (FFTW_scalar *)&pgresult._odata[0];
|
||||
p = FFTW<scalar>::fftw_plan_many_dft(rank,n,howmany,
|
||||
in,inembed,
|
||||
istride,idist,
|
||||
out,onembed,
|
||||
ostride, odist,
|
||||
sign,FFTW_ESTIMATE);
|
||||
}
|
||||
|
||||
double add,mul,fma;
|
||||
FFTW<scalar>::fftw_flops(p,&add,&mul,&fma);
|
||||
flops_call = add+mul+2.0*fma;
|
||||
|
||||
GridStopWatch timer;
|
||||
|
||||
// Barrel shift and collect global pencil
|
||||
for(int p=0;p<processors[dim];p++) {
|
||||
|
||||
for(int idx=0;idx<sgrid->lSites();idx++) {
|
||||
|
||||
std::vector<int> lcoor(Nd);
|
||||
sgrid->LocalIndexToLocalCoor(idx,lcoor);
|
||||
|
||||
sobj s;
|
||||
|
||||
peekLocalSite(s,ssource,lcoor);
|
||||
|
||||
lcoor[dim]+=p*L;
|
||||
|
||||
pokeLocalSite(s,pgsource,lcoor);
|
||||
}
|
||||
|
||||
ssource = Cshift(ssource,dim,L);
|
||||
}
|
||||
|
||||
// Loop over orthog coords
|
||||
int NN=pencil_g.lSites();
|
||||
|
||||
GridStopWatch Timer;
|
||||
Timer.Start();
|
||||
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int idx=0;idx<NN;idx++) {
|
||||
|
||||
std::vector<int> lcoor(Nd);
|
||||
pencil_g.LocalIndexToLocalCoor(idx,lcoor);
|
||||
|
||||
if ( lcoor[dim] == 0 ) { // restricts loop to plane at lcoor[dim]==0
|
||||
FFTW_scalar *in = (FFTW_scalar *)&pgsource._odata[idx];
|
||||
FFTW_scalar *out= (FFTW_scalar *)&pgresult._odata[idx];
|
||||
FFTW<scalar>::fftw_execute_dft(p,in,out);
|
||||
}
|
||||
}
|
||||
|
||||
Timer.Stop();
|
||||
usec += Timer.useconds();
|
||||
flops+= flops_call*NN;
|
||||
|
||||
int pc = processor_coor[dim];
|
||||
for(int idx=0;idx<sgrid->lSites();idx++) {
|
||||
std::vector<int> lcoor(Nd);
|
||||
sgrid->LocalIndexToLocalCoor(idx,lcoor);
|
||||
std::vector<int> gcoor = lcoor;
|
||||
// extract the result
|
||||
sobj s;
|
||||
gcoor[dim] = lcoor[dim]+L*pc;
|
||||
peekLocalSite(s,pgresult,gcoor);
|
||||
pokeLocalSite(s,result,lcoor);
|
||||
}
|
||||
|
||||
FFTW<scalar>::fftw_destroy_plan(p);
|
||||
|
||||
int Ncomp = sizeof(sobj)/sizeof(scalar);
|
||||
int Nlow = 1;
|
||||
for(int d=0;d<dim;d++){
|
||||
Nlow*=vgrid->_ldimensions[d];
|
||||
}
|
||||
|
||||
int rank = 1; /* 1d transforms */
|
||||
int n[] = {G}; /* 1d transforms of length G */
|
||||
int howmany = Ncomp;
|
||||
int odist,idist,istride,ostride;
|
||||
idist = odist = 1; /* Distance between consecutive FT's */
|
||||
istride = ostride = Ncomp*Nlow; /* distance between two elements in the same FT */
|
||||
int *inembed = n, *onembed = n;
|
||||
|
||||
scalar div;
|
||||
if ( sign == backward ) div = 1.0/G;
|
||||
else if ( sign == forward ) div = 1.0;
|
||||
else assert(0);
|
||||
|
||||
FFTW_plan p;
|
||||
{
|
||||
FFTW_scalar *in = (FFTW_scalar *)&pgbuf._odata[0];
|
||||
FFTW_scalar *out= (FFTW_scalar *)&pgbuf._odata[0];
|
||||
p = FFTW<scalar>::fftw_plan_many_dft(rank,n,howmany,
|
||||
in,inembed,
|
||||
istride,idist,
|
||||
out,onembed,
|
||||
ostride, odist,
|
||||
sign,FFTW_ESTIMATE);
|
||||
}
|
||||
|
||||
// Barrel shift and collect global pencil
|
||||
std::vector<int> lcoor(Nd), gcoor(Nd);
|
||||
result = source;
|
||||
for(int p=0;p<processors[dim];p++) {
|
||||
PARALLEL_REGION
|
||||
{
|
||||
std::vector<int> cbuf(Nd);
|
||||
sobj s;
|
||||
|
||||
PARALLEL_FOR_LOOP_INTERN
|
||||
for(int idx=0;idx<sgrid->lSites();idx++) {
|
||||
sgrid->LocalIndexToLocalCoor(idx,cbuf);
|
||||
peekLocalSite(s,result,cbuf);
|
||||
cbuf[dim]+=p*L;
|
||||
pokeLocalSite(s,pgbuf,cbuf);
|
||||
}
|
||||
}
|
||||
result = Cshift(result,dim,L);
|
||||
}
|
||||
|
||||
// Loop over orthog coords
|
||||
int NN=pencil_g.lSites();
|
||||
GridStopWatch timer;
|
||||
timer.Start();
|
||||
PARALLEL_REGION
|
||||
{
|
||||
std::vector<int> cbuf(Nd);
|
||||
|
||||
PARALLEL_FOR_LOOP_INTERN
|
||||
for(int idx=0;idx<NN;idx++) {
|
||||
pencil_g.LocalIndexToLocalCoor(idx, cbuf);
|
||||
if ( cbuf[dim] == 0 ) { // restricts loop to plane at lcoor[dim]==0
|
||||
FFTW_scalar *in = (FFTW_scalar *)&pgbuf._odata[idx];
|
||||
FFTW_scalar *out= (FFTW_scalar *)&pgbuf._odata[idx];
|
||||
FFTW<scalar>::fftw_execute_dft(p,in,out);
|
||||
}
|
||||
}
|
||||
}
|
||||
timer.Stop();
|
||||
|
||||
// performance counting
|
||||
double add,mul,fma;
|
||||
FFTW<scalar>::fftw_flops(p,&add,&mul,&fma);
|
||||
flops_call = add+mul+2.0*fma;
|
||||
usec += timer.useconds();
|
||||
flops+= flops_call*NN;
|
||||
|
||||
// writing out result
|
||||
int pc = processor_coor[dim];
|
||||
PARALLEL_REGION
|
||||
{
|
||||
std::vector<int> clbuf(Nd), cgbuf(Nd);
|
||||
sobj s;
|
||||
|
||||
PARALLEL_FOR_LOOP_INTERN
|
||||
for(int idx=0;idx<sgrid->lSites();idx++) {
|
||||
sgrid->LocalIndexToLocalCoor(idx,clbuf);
|
||||
cgbuf = clbuf;
|
||||
cgbuf[dim] = clbuf[dim]+L*pc;
|
||||
peekLocalSite(s,pgbuf,cgbuf);
|
||||
s = s * div;
|
||||
pokeLocalSite(s,result,clbuf);
|
||||
}
|
||||
}
|
||||
|
||||
// destroying plan
|
||||
FFTW<scalar>::fftw_destroy_plan(p);
|
||||
#endif
|
||||
|
||||
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -77,11 +77,10 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/Stencil.h>
|
||||
#include <Grid/Algorithms.h>
|
||||
#include <Grid/parallelIO/BinaryIO.h>
|
||||
#include <Grid/qcd/QCD.h>
|
||||
#include <Grid/parallelIO/NerscIO.h>
|
||||
|
||||
#include <Grid/FFT.h>
|
||||
|
||||
#include <Grid/qcd/QCD.h>
|
||||
#include <Grid/parallelIO/NerscIO.h>
|
||||
#include <Grid/qcd/hmc/NerscCheckpointer.h>
|
||||
#include <Grid/qcd/hmc/HmcRunner.h>
|
||||
|
||||
|
205
lib/Init.cc
205
lib/Init.cc
@ -44,9 +44,33 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid.h>
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
|
||||
|
||||
#include <fenv.h>
|
||||
#ifdef __APPLE__
|
||||
static int
|
||||
feenableexcept (unsigned int excepts)
|
||||
{
|
||||
static fenv_t fenv;
|
||||
unsigned int new_excepts = excepts & FE_ALL_EXCEPT,
|
||||
old_excepts; // previous masks
|
||||
|
||||
if ( fegetenv (&fenv) ) return -1;
|
||||
old_excepts = fenv.__control & FE_ALL_EXCEPT;
|
||||
|
||||
// unmask
|
||||
fenv.__control &= ~new_excepts;
|
||||
fenv.__mxcsr &= ~(new_excepts << 7);
|
||||
|
||||
return ( fesetenv (&fenv) ? -1 : old_excepts );
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// Convenience functions to access stadard command line arg
|
||||
// driven parallelism controls
|
||||
@ -123,6 +147,13 @@ void GridCmdOptionIntVector(std::string &str,std::vector<int> & vec)
|
||||
return;
|
||||
}
|
||||
|
||||
void GridCmdOptionInt(std::string &str,int & val)
|
||||
{
|
||||
std::stringstream ss(str);
|
||||
ss>>val;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
void GridParseLayout(char **argv,int argc,
|
||||
std::vector<int> &latt,
|
||||
@ -153,14 +184,12 @@ void GridParseLayout(char **argv,int argc,
|
||||
assert(ompthreads.size()==1);
|
||||
GridThread::SetThreads(ompthreads[0]);
|
||||
}
|
||||
|
||||
if( GridCmdOptionExists(argv,argv+argc,"--cores") ){
|
||||
std::vector<int> cores(0);
|
||||
int cores;
|
||||
arg= GridCmdOptionPayload(argv,argv+argc,"--cores");
|
||||
GridCmdOptionIntVector(arg,cores);
|
||||
GridThread::SetCores(cores[0]);
|
||||
GridCmdOptionInt(arg,cores);
|
||||
GridThread::SetCores(cores);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
std::string GridCmdVectorIntToString(const std::vector<int> & vec){
|
||||
@ -169,33 +198,40 @@ std::string GridCmdVectorIntToString(const std::vector<int> & vec){
|
||||
return oss.str();
|
||||
}
|
||||
/////////////////////////////////////////////////////////
|
||||
//
|
||||
// Reinit guard
|
||||
/////////////////////////////////////////////////////////
|
||||
static int Grid_is_initialised = 0;
|
||||
|
||||
|
||||
void Grid_init(int *argc,char ***argv)
|
||||
{
|
||||
CartesianCommunicator::Init(argc,argv);
|
||||
|
||||
// Parse command line args.
|
||||
|
||||
GridLogger::StopWatch.Start();
|
||||
|
||||
std::string arg;
|
||||
|
||||
////////////////////////////////////
|
||||
// Shared memory block size
|
||||
////////////////////////////////////
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--shm") ){
|
||||
int MB;
|
||||
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--shm");
|
||||
GridCmdOptionInt(arg,MB);
|
||||
CartesianCommunicator::MAX_MPI_SHM_BYTES = MB*1024*1024;
|
||||
}
|
||||
|
||||
CartesianCommunicator::Init(argc,argv);
|
||||
|
||||
////////////////////////////////////
|
||||
// Logging
|
||||
////////////////////////////////////
|
||||
|
||||
std::vector<std::string> logstreams;
|
||||
std::string defaultLog("Error,Warning,Message,Performance");
|
||||
|
||||
GridCmdOptionCSL(defaultLog,logstreams);
|
||||
GridLogConfigure(logstreams);
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--help") ){
|
||||
std::cout<<GridLogMessage<<"--help : this message"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--debug-signals : catch sigsegv and print a blame report"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--debug-stdout : print stdout from EVERY node"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--decomposition : report on default omp,mpi and simd decomposition"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--mpi n.n.n.n : default MPI decomposition"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--threads n : default number of OMP threads"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--grid n.n.n.n : default Grid size"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--log list : comma separted list of streams from Error,Warning,Message,Performance,Iterative,Integrator,Debug,Colours"<<std::endl;
|
||||
exit(EXIT_SUCCESS);
|
||||
if( !GridCmdOptionExists(*argv,*argv+*argc,"--debug-stdout") ){
|
||||
Grid_quiesce_nodes();
|
||||
}
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--log") ){
|
||||
@ -204,35 +240,39 @@ void Grid_init(int *argc,char ***argv)
|
||||
GridLogConfigure(logstreams);
|
||||
}
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--debug-signals") ){
|
||||
Grid_debug_handler_init();
|
||||
}
|
||||
if( !GridCmdOptionExists(*argv,*argv+*argc,"--debug-stdout") ){
|
||||
Grid_quiesce_nodes();
|
||||
}
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--dslash-opt") ){
|
||||
QCD::WilsonKernelsStatic::HandOpt=1;
|
||||
}
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--lebesgue") ){
|
||||
LebesgueOrder::UseLebesgueOrder=1;
|
||||
////////////////////////////////////
|
||||
// Help message
|
||||
////////////////////////////////////
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--help") ){
|
||||
std::cout<<GridLogMessage<<" --help : this message"<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Geometry:"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --mpi n.n.n.n : default MPI decomposition"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --threads n : default number of OMP threads"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --grid n.n.n.n : default Grid size"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --shm M : allocate M megabytes of shared memory for comms"<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Verbose and debug:"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --log list : comma separted list of streams from Error,Warning,Message,Performance,Iterative,Integrator,Debug,Colours"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --decomposition : report on default omp,mpi and simd decomposition"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --debug-signals : catch sigsegv and print a blame report"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --debug-stdout : print stdout from EVERY node"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --notimestamp : suppress millisecond resolution stamps"<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Performance:"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --dslash-generic: Wilson kernel for generic Nc"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --dslash-unroll : Wilson kernel for Nc=3"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --dslash-asm : Wilson kernel for AVX512"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --lebesgue : Cache oblivious Lebesgue curve/Morton order/Z-graph stencil looping"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --cacheblocking n.m.o.p : Hypercuboidal cache blocking"<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--cacheblocking") ){
|
||||
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--cacheblocking");
|
||||
GridCmdOptionIntVector(arg,LebesgueOrder::Block);
|
||||
}
|
||||
GridParseLayout(*argv,*argc,
|
||||
Grid_default_latt,
|
||||
Grid_default_mpi);
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--decomposition") ){
|
||||
std::cout<<GridLogMessage<<"Grid Decomposition\n";
|
||||
std::cout<<GridLogMessage<<"\tOpenMP threads : "<<GridThread::GetThreads()<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tMPI tasks : "<<GridCmdVectorIntToString(GridDefaultMpi())<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvRealF : "<<sizeof(vRealF)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vRealF::Nsimd()))<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvRealD : "<<sizeof(vRealD)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vRealD::Nsimd()))<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvComplexF : "<<sizeof(vComplexF)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vComplexF::Nsimd()))<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvComplexD : "<<sizeof(vComplexD)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vComplexD::Nsimd()))<<std::endl;
|
||||
}
|
||||
////////////////////////////////////
|
||||
// Banner
|
||||
////////////////////////////////////
|
||||
|
||||
std::string COL_RED = GridLogColours.colour["RED"];
|
||||
std::string COL_PURPLE = GridLogColours.colour["PURPLE"];
|
||||
@ -241,20 +281,19 @@ void Grid_init(int *argc,char ***argv)
|
||||
std::string COL_BLUE = GridLogColours.colour["BLUE"];
|
||||
std::string COL_YELLOW = GridLogColours.colour["YELLOW"];
|
||||
std::string COL_BACKGROUND = GridLogColours.colour["NORMAL"];
|
||||
|
||||
|
||||
std::cout <<std::endl;
|
||||
std::cout <<COL_RED << "__|__|__|__|__"<< "|__|__|_"<<COL_PURPLE<<"_|__|__|"<< "__|__|__|__|__"<<std::endl;
|
||||
std::cout <<COL_RED << "__|__|__|__|__"<< "|__|__|_"<<COL_PURPLE<<"_|__|__|"<< "__|__|__|__|__"<<std::endl;
|
||||
std::cout <<COL_RED << "__|__| | | "<< "| | | "<<COL_PURPLE<<" | | |"<< " | | | _|__"<<std::endl;
|
||||
std::cout <<COL_RED << "__|__ "<< " "<<COL_PURPLE<<" "<< " _|__"<<std::endl;
|
||||
std::cout <<COL_RED << "__|_ | | | "<< "| | | "<<COL_PURPLE<<" | | |"<< " | | | _|__"<<std::endl;
|
||||
std::cout <<COL_RED << "__|_ "<< " "<<COL_PURPLE<<" "<< " _|__"<<std::endl;
|
||||
std::cout <<COL_RED << "__|_ "<<COL_GREEN<<" GGGG "<<COL_RED<<" RRRR "<<COL_BLUE <<" III "<<COL_PURPLE<<"DDDD "<<COL_PURPLE<<" _|__"<<std::endl;
|
||||
std::cout <<COL_RED << "__|_ "<<COL_GREEN<<"G "<<COL_RED<<" R R "<<COL_BLUE <<" I "<<COL_PURPLE<<"D D "<<COL_PURPLE<<" _|__"<<std::endl;
|
||||
std::cout <<COL_RED << "__|_ "<<COL_GREEN<<"G "<<COL_RED<<" R R "<<COL_BLUE <<" I "<<COL_PURPLE<<"D D"<<COL_PURPLE<<" _|__"<<std::endl;
|
||||
std::cout <<COL_BLUE << "__|_ "<<COL_GREEN<<"G GG "<<COL_RED<<" RRRR "<<COL_BLUE <<" I "<<COL_PURPLE<<"D D"<<COL_GREEN <<" _|__"<<std::endl;
|
||||
std::cout <<COL_BLUE << "__|_ "<<COL_GREEN<<"G G "<<COL_RED<<" R R "<<COL_BLUE <<" I "<<COL_PURPLE<<"D D "<<COL_GREEN <<" _|__"<<std::endl;
|
||||
std::cout <<COL_BLUE << "__|_ "<<COL_GREEN<<" GGGG "<<COL_RED<<" R R "<<COL_BLUE <<" III "<<COL_PURPLE<<"DDDD "<<COL_GREEN <<" _|__"<<std::endl;
|
||||
std::cout <<COL_BLUE << "__|__ "<< " "<<COL_GREEN <<" "<< " _|__"<<std::endl;
|
||||
std::cout <<COL_BLUE << "__|_ "<< " "<<COL_GREEN <<" "<< " _|__"<<std::endl;
|
||||
std::cout <<COL_BLUE << "__|__|__|__|__"<< "|__|__|_"<<COL_GREEN <<"_|__|__|"<< "__|__|__|__|__"<<std::endl;
|
||||
std::cout <<COL_BLUE << "__|__|__|__|__"<< "|__|__|_"<<COL_GREEN <<"_|__|__|"<< "__|__|__|__|__"<<std::endl;
|
||||
std::cout <<COL_BLUE << " | | | | "<< "| | | "<<COL_GREEN <<" | | |"<< " | | | | "<<std::endl;
|
||||
@ -274,12 +313,63 @@ void Grid_init(int *argc,char ***argv)
|
||||
std::cout << "GNU General Public License for more details."<<std::endl;
|
||||
std::cout << COL_BACKGROUND <<std::endl;
|
||||
std::cout << std::endl;
|
||||
|
||||
////////////////////////////////////
|
||||
// Debug and performance options
|
||||
////////////////////////////////////
|
||||
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--debug-signals") ){
|
||||
Grid_debug_handler_init();
|
||||
}
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--dslash-unroll") ){
|
||||
QCD::WilsonKernelsStatic::Opt=QCD::WilsonKernelsStatic::OptHandUnroll;
|
||||
}
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--dslash-asm") ){
|
||||
QCD::WilsonKernelsStatic::Opt=QCD::WilsonKernelsStatic::OptInlineAsm;
|
||||
}
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--dslash-generic") ){
|
||||
QCD::WilsonKernelsStatic::Opt=QCD::WilsonKernelsStatic::OptGeneric;
|
||||
}
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--lebesgue") ){
|
||||
LebesgueOrder::UseLebesgueOrder=1;
|
||||
}
|
||||
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--cacheblocking") ){
|
||||
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--cacheblocking");
|
||||
GridCmdOptionIntVector(arg,LebesgueOrder::Block);
|
||||
}
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--notimestamp") ){
|
||||
GridLogTimestamp(0);
|
||||
} else {
|
||||
GridLogTimestamp(1);
|
||||
}
|
||||
|
||||
GridParseLayout(*argv,*argc,
|
||||
Grid_default_latt,
|
||||
Grid_default_mpi);
|
||||
|
||||
std::cout << GridLogMessage << "Requesting "<< CartesianCommunicator::MAX_MPI_SHM_BYTES <<" byte stencil comms buffers "<<std::endl;
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--decomposition") ){
|
||||
std::cout<<GridLogMessage<<"Grid Decomposition\n";
|
||||
std::cout<<GridLogMessage<<"\tOpenMP threads : "<<GridThread::GetThreads()<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tMPI tasks : "<<GridCmdVectorIntToString(GridDefaultMpi())<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvRealF : "<<sizeof(vRealF)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vRealF::Nsimd()))<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvRealD : "<<sizeof(vRealD)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vRealD::Nsimd()))<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvComplexF : "<<sizeof(vComplexF)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vComplexF::Nsimd()))<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvComplexD : "<<sizeof(vComplexD)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vComplexD::Nsimd()))<<std::endl;
|
||||
}
|
||||
|
||||
|
||||
Grid_is_initialised = 1;
|
||||
}
|
||||
|
||||
|
||||
void Grid_finalize(void)
|
||||
{
|
||||
#ifdef GRID_COMMS_MPI
|
||||
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3)
|
||||
MPI_Finalize();
|
||||
Grid_unquiesce_nodes();
|
||||
#endif
|
||||
@ -326,10 +416,7 @@ void Grid_sa_signal_handler(int sig,siginfo_t *si,void * ptr)
|
||||
exit(0);
|
||||
return;
|
||||
};
|
||||
#ifdef GRID_FPE
|
||||
#define _GNU_SOURCE
|
||||
#include <fenv.h>
|
||||
#endif
|
||||
|
||||
void Grid_debug_handler_init(void)
|
||||
{
|
||||
struct sigaction sa,osa;
|
||||
@ -338,9 +425,9 @@ void Grid_debug_handler_init(void)
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
sigaction(SIGSEGV,&sa,NULL);
|
||||
sigaction(SIGTRAP,&sa,NULL);
|
||||
#ifdef GRID_FPE
|
||||
|
||||
feenableexcept( FE_INVALID|FE_OVERFLOW|FE_DIVBYZERO);
|
||||
|
||||
sigaction(SIGFPE,&sa,NULL);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ namespace Grid {
|
||||
|
||||
void Grid_init(int *argc,char ***argv);
|
||||
void Grid_finalize(void);
|
||||
|
||||
// internal, controled with --handle
|
||||
void Grid_sa_signal_handler(int sig,siginfo_t *si,void * ptr);
|
||||
void Grid_debug_handler_init(void);
|
||||
@ -44,6 +45,7 @@ namespace Grid {
|
||||
const std::vector<int> &GridDefaultMpi(void);
|
||||
const int &GridThreads(void) ;
|
||||
void GridSetThreads(int t) ;
|
||||
void GridLogTimestamp(int);
|
||||
|
||||
// Common parsing chores
|
||||
std::string GridCmdOptionPayload(char ** begin, char ** end, const std::string & option);
|
||||
@ -52,6 +54,7 @@ namespace Grid {
|
||||
void GridCmdOptionCSL(std::string str,std::vector<std::string> & vec);
|
||||
void GridCmdOptionIntVector(std::string &str,std::vector<int> & vec);
|
||||
|
||||
|
||||
void GridParseLayout(char **argv,int argc,
|
||||
std::vector<int> &latt,
|
||||
std::vector<int> &simd,
|
||||
|
22
lib/Log.cc
22
lib/Log.cc
@ -31,11 +31,31 @@ directory
|
||||
/* END LEGAL */
|
||||
#include <Grid.h>
|
||||
|
||||
#include <cxxabi.h>
|
||||
|
||||
namespace Grid {
|
||||
|
||||
std::string demangle(const char* name) {
|
||||
|
||||
int status = -4; // some arbitrary value to eliminate the compiler warning
|
||||
|
||||
// enable c++11 by passing the flag -std=c++11 to g++
|
||||
std::unique_ptr<char, void(*)(void*)> res {
|
||||
abi::__cxa_demangle(name, NULL, NULL, &status),
|
||||
std::free
|
||||
};
|
||||
|
||||
return (status==0) ? res.get() : name ;
|
||||
}
|
||||
|
||||
GridStopWatch Logger::StopWatch;
|
||||
int Logger::timestamp;
|
||||
std::ostream Logger::devnull(0);
|
||||
|
||||
void GridLogTimestamp(int on){
|
||||
Logger::Timestamp(on);
|
||||
}
|
||||
|
||||
Colours GridLogColours(0);
|
||||
GridLogger GridLogError(1, "Error", GridLogColours, "RED");
|
||||
GridLogger GridLogWarning(1, "Warning", GridLogColours, "YELLOW");
|
||||
@ -73,7 +93,7 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||
////////////////////////////////////////////////////////////
|
||||
void Grid_quiesce_nodes(void) {
|
||||
int me = 0;
|
||||
#ifdef GRID_COMMS_MPI
|
||||
#if defined(GRID_COMMS_MPI) || defined(GRID_COMMS_MPI3) || defined(GRID_COMMS_MPI3L)
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &me);
|
||||
#endif
|
||||
#ifdef GRID_COMMS_SHMEM
|
||||
|
62
lib/Log.h
62
lib/Log.h
@ -37,10 +37,11 @@
|
||||
#include <execinfo.h>
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
namespace Grid {
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Dress the output; use std::chrono for time stamping via the StopWatch class
|
||||
int Rank(void); // used for early stage debug before library init
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
class Colours{
|
||||
@ -55,7 +56,6 @@ public:
|
||||
|
||||
void Active(bool activate){
|
||||
is_active=activate;
|
||||
|
||||
if (is_active){
|
||||
colour["BLACK"] ="\033[30m";
|
||||
colour["RED"] ="\033[31m";
|
||||
@ -66,21 +66,18 @@ public:
|
||||
colour["CYAN"] ="\033[36m";
|
||||
colour["WHITE"] ="\033[37m";
|
||||
colour["NORMAL"] ="\033[0;39m";
|
||||
} else {
|
||||
colour["BLACK"] ="";
|
||||
colour["RED"] ="";
|
||||
colour["GREEN"] ="";
|
||||
colour["YELLOW"]="";
|
||||
colour["BLUE"] ="";
|
||||
colour["PURPLE"]="";
|
||||
colour["CYAN"] ="";
|
||||
colour["WHITE"] ="";
|
||||
colour["NORMAL"]="";
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
} else {
|
||||
colour["BLACK"] ="";
|
||||
colour["RED"] ="";
|
||||
colour["GREEN"] ="";
|
||||
colour["YELLOW"]="";
|
||||
colour["BLUE"] ="";
|
||||
colour["PURPLE"]="";
|
||||
colour["CYAN"] ="";
|
||||
colour["WHITE"] ="";
|
||||
colour["NORMAL"]="";
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@ -88,6 +85,7 @@ class Logger {
|
||||
protected:
|
||||
Colours &Painter;
|
||||
int active;
|
||||
static int timestamp;
|
||||
std::string name, topName;
|
||||
std::string COLOUR;
|
||||
|
||||
@ -99,25 +97,28 @@ public:
|
||||
std::string evidence() {return Painter.colour["YELLOW"];}
|
||||
std::string colour() {return Painter.colour[COLOUR];}
|
||||
|
||||
Logger(std::string topNm, int on, std::string nm, Colours& col_class, std::string col)
|
||||
: active(on),
|
||||
name(nm),
|
||||
topName(topNm),
|
||||
Painter(col_class),
|
||||
COLOUR(col){} ;
|
||||
Logger(std::string topNm, int on, std::string nm, Colours& col_class, std::string col) : active(on),
|
||||
name(nm),
|
||||
topName(topNm),
|
||||
Painter(col_class),
|
||||
COLOUR(col) {} ;
|
||||
|
||||
void Active(int on) {active = on;};
|
||||
int isActive(void) {return active;};
|
||||
static void Timestamp(int on) {timestamp = on;};
|
||||
|
||||
friend std::ostream& operator<< (std::ostream& stream, Logger& log){
|
||||
|
||||
if ( log.active ) {
|
||||
StopWatch.Stop();
|
||||
GridTime now = StopWatch.Elapsed();
|
||||
StopWatch.Start();
|
||||
stream << log.background()<< log.topName << log.background()<< " : ";
|
||||
stream << log.colour() <<std::setw(14) << std::left << log.name << log.background() << " : ";
|
||||
stream << log.evidence()<< now << log.background() << " : " << log.colour();
|
||||
if ( log.timestamp ) {
|
||||
StopWatch.Stop();
|
||||
GridTime now = StopWatch.Elapsed();
|
||||
StopWatch.Start();
|
||||
stream << log.evidence()<< now << log.background() << " : " ;
|
||||
}
|
||||
stream << log.colour();
|
||||
return stream;
|
||||
} else {
|
||||
return devnull;
|
||||
@ -143,13 +144,14 @@ extern GridLogger GridLogIterative ;
|
||||
extern GridLogger GridLogIntegrator ;
|
||||
extern Colours GridLogColours;
|
||||
|
||||
std::string demangle(const char* name) ;
|
||||
|
||||
#define _NBACKTRACE (256)
|
||||
extern void * Grid_backtrace_buffer[_NBACKTRACE];
|
||||
|
||||
#define BACKTRACEFILE() {\
|
||||
char string[20]; \
|
||||
std::sprintf(string,"backtrace.%d",Rank()); \
|
||||
std::sprintf(string,"backtrace.%d",CartesianCommunicator::RankWorld()); \
|
||||
std::FILE * fp = std::fopen(string,"w"); \
|
||||
BACKTRACEFP(fp)\
|
||||
std::fclose(fp); \
|
||||
@ -161,7 +163,7 @@ std::fclose(fp); \
|
||||
int symbols = backtrace (Grid_backtrace_buffer,_NBACKTRACE);\
|
||||
char **strings = backtrace_symbols(Grid_backtrace_buffer,symbols);\
|
||||
for (int i = 0; i < symbols; i++){\
|
||||
std::fprintf (fp,"BackTrace Strings: %d %s\n",i, strings[i]); std::fflush(fp); \
|
||||
std::fprintf (fp,"BackTrace Strings: %d %s\n",i, demangle(strings[i]).c_str()); std::fflush(fp); \
|
||||
}\
|
||||
}
|
||||
#else
|
||||
|
@ -1,14 +1,27 @@
|
||||
extra_sources=
|
||||
if BUILD_COMMS_MPI
|
||||
extra_sources+=communicator/Communicator_mpi.cc
|
||||
extra_sources+=communicator/Communicator_base.cc
|
||||
endif
|
||||
|
||||
if BUILD_COMMS_MPI3
|
||||
extra_sources+=communicator/Communicator_mpi3.cc
|
||||
extra_sources+=communicator/Communicator_base.cc
|
||||
endif
|
||||
|
||||
if BUILD_COMMS_MPI3L
|
||||
extra_sources+=communicator/Communicator_mpi3_leader.cc
|
||||
extra_sources+=communicator/Communicator_base.cc
|
||||
endif
|
||||
|
||||
if BUILD_COMMS_SHMEM
|
||||
extra_sources+=communicator/Communicator_shmem.cc
|
||||
extra_sources+=communicator/Communicator_base.cc
|
||||
endif
|
||||
|
||||
if BUILD_COMMS_NONE
|
||||
extra_sources+=communicator/Communicator_none.cc
|
||||
extra_sources+=communicator/Communicator_base.cc
|
||||
endif
|
||||
|
||||
#
|
||||
|
@ -43,6 +43,9 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#else
|
||||
#include <sys/syscall.h>
|
||||
#endif
|
||||
#ifdef __x86_64__
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
|
||||
@ -86,7 +89,6 @@ inline uint64_t cyclecount(void){
|
||||
return tmp;
|
||||
}
|
||||
#elif defined __x86_64__
|
||||
#include <x86intrin.h>
|
||||
inline uint64_t cyclecount(void){
|
||||
return __rdtsc();
|
||||
// unsigned int dummy;
|
||||
|
12
lib/Simd.h
12
lib/Simd.h
@ -237,6 +237,18 @@ namespace Grid {
|
||||
stream<<">";
|
||||
return stream;
|
||||
}
|
||||
inline std::ostream& operator<< (std::ostream& stream, const vInteger &o){
|
||||
int nn=vInteger::Nsimd();
|
||||
std::vector<Integer,alignedAllocator<Integer> > buf(nn);
|
||||
vstore(o,&buf[0]);
|
||||
stream<<"<";
|
||||
for(int i=0;i<nn;i++){
|
||||
stream<<buf[i];
|
||||
if(i<nn-1) stream<<",";
|
||||
}
|
||||
stream<<">";
|
||||
return stream;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
247
lib/Stat.cc
Normal file
247
lib/Stat.cc
Normal file
@ -0,0 +1,247 @@
|
||||
#include <Grid.h>
|
||||
#include <PerfCount.h>
|
||||
#include <Stat.h>
|
||||
|
||||
|
||||
namespace Grid {
|
||||
|
||||
|
||||
bool PmuStat::pmu_initialized=false;
|
||||
|
||||
|
||||
void PmuStat::init(const char *regname)
|
||||
{
|
||||
#ifdef __x86_64__
|
||||
name = regname;
|
||||
if (!pmu_initialized)
|
||||
{
|
||||
std::cout<<"initialising pmu"<<std::endl;
|
||||
pmu_initialized = true;
|
||||
pmu_init();
|
||||
}
|
||||
clear();
|
||||
#endif
|
||||
}
|
||||
void PmuStat::clear(void)
|
||||
{
|
||||
#ifdef __x86_64__
|
||||
count = 0;
|
||||
tregion = 0;
|
||||
pmc0 = 0;
|
||||
pmc1 = 0;
|
||||
inst = 0;
|
||||
cyc = 0;
|
||||
ref = 0;
|
||||
tcycles = 0;
|
||||
reads = 0;
|
||||
writes = 0;
|
||||
#endif
|
||||
}
|
||||
void PmuStat::print(void)
|
||||
{
|
||||
#ifdef __x86_64__
|
||||
std::cout <<"Reg "<<std::string(name)<<":\n";
|
||||
std::cout <<" region "<<tregion<<std::endl;
|
||||
std::cout <<" cycles "<<tcycles<<std::endl;
|
||||
std::cout <<" inst "<<inst <<std::endl;
|
||||
std::cout <<" cyc "<<cyc <<std::endl;
|
||||
std::cout <<" ref "<<ref <<std::endl;
|
||||
std::cout <<" pmc0 "<<pmc0 <<std::endl;
|
||||
std::cout <<" pmc1 "<<pmc1 <<std::endl;
|
||||
std::cout <<" count "<<count <<std::endl;
|
||||
std::cout <<" reads "<<reads <<std::endl;
|
||||
std::cout <<" writes "<<writes <<std::endl;
|
||||
#endif
|
||||
}
|
||||
void PmuStat::start(void)
|
||||
{
|
||||
#ifdef __x86_64__
|
||||
pmu_start();
|
||||
++count;
|
||||
xmemctrs(&mrstart, &mwstart);
|
||||
tstart = __rdtsc();
|
||||
#endif
|
||||
}
|
||||
void PmuStat::enter(int t)
|
||||
{
|
||||
#ifdef __x86_64__
|
||||
counters[0][t] = __rdpmc(0);
|
||||
counters[1][t] = __rdpmc(1);
|
||||
counters[2][t] = __rdpmc((1<<30)|0);
|
||||
counters[3][t] = __rdpmc((1<<30)|1);
|
||||
counters[4][t] = __rdpmc((1<<30)|2);
|
||||
counters[5][t] = __rdtsc();
|
||||
#endif
|
||||
}
|
||||
void PmuStat::exit(int t)
|
||||
{
|
||||
#ifdef __x86_64__
|
||||
counters[0][t] = __rdpmc(0) - counters[0][t];
|
||||
counters[1][t] = __rdpmc(1) - counters[1][t];
|
||||
counters[2][t] = __rdpmc((1<<30)|0) - counters[2][t];
|
||||
counters[3][t] = __rdpmc((1<<30)|1) - counters[3][t];
|
||||
counters[4][t] = __rdpmc((1<<30)|2) - counters[4][t];
|
||||
counters[5][t] = __rdtsc() - counters[5][t];
|
||||
#endif
|
||||
}
|
||||
void PmuStat::accum(int nthreads)
|
||||
{
|
||||
#ifdef __x86_64__
|
||||
tend = __rdtsc();
|
||||
xmemctrs(&mrend, &mwend);
|
||||
pmu_stop();
|
||||
for (int t = 0; t < nthreads; ++t) {
|
||||
pmc0 += counters[0][t];
|
||||
pmc1 += counters[1][t];
|
||||
inst += counters[2][t];
|
||||
cyc += counters[3][t];
|
||||
ref += counters[4][t];
|
||||
tcycles += counters[5][t];
|
||||
}
|
||||
uint64_t region = tend - tstart;
|
||||
tregion += region;
|
||||
uint64_t mreads = mrend - mrstart;
|
||||
reads += mreads;
|
||||
uint64_t mwrites = mwend - mwstart;
|
||||
writes += mwrites;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void PmuStat::pmu_fini(void) {}
|
||||
void PmuStat::pmu_start(void) {};
|
||||
void PmuStat::pmu_stop(void) {};
|
||||
void PmuStat::pmu_init(void)
|
||||
{
|
||||
#ifdef _KNIGHTS_LANDING_
|
||||
KNLsetup();
|
||||
#endif
|
||||
}
|
||||
void PmuStat::xmemctrs(uint64_t *mr, uint64_t *mw)
|
||||
{
|
||||
#ifdef _KNIGHTS_LANDING_
|
||||
ctrs c;
|
||||
KNLreadctrs(c);
|
||||
uint64_t emr = 0, emw = 0;
|
||||
for (int i = 0; i < NEDC; ++i)
|
||||
{
|
||||
emr += c.edcrd[i];
|
||||
emw += c.edcwr[i];
|
||||
}
|
||||
*mr = emr;
|
||||
*mw = emw;
|
||||
#else
|
||||
*mr = *mw = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef _KNIGHTS_LANDING_
|
||||
|
||||
struct knl_gbl_ PmuStat::gbl;
|
||||
|
||||
#define PMU_MEM
|
||||
|
||||
void PmuStat::KNLevsetup(const char *ename, int &fd, int event, int umask)
|
||||
{
|
||||
char fname[1024];
|
||||
snprintf(fname, sizeof(fname), "%s/type", ename);
|
||||
FILE *fp = fopen(fname, "r");
|
||||
if (fp == 0) {
|
||||
::printf("open %s", fname);
|
||||
::exit(0);
|
||||
}
|
||||
int type;
|
||||
int ret = fscanf(fp, "%d", &type);
|
||||
assert(ret == 1);
|
||||
fclose(fp);
|
||||
// std::cout << "Using PMU type "<<type<<" from " << std::string(ename) <<std::endl;
|
||||
|
||||
struct perf_event_attr hw = {};
|
||||
hw.size = sizeof(hw);
|
||||
hw.type = type;
|
||||
// see /sys/devices/uncore_*/format/*
|
||||
// All of the events we are interested in are configured the same way, but
|
||||
// that isn't always true. Proper code would parse the format files
|
||||
hw.config = event | (umask << 8);
|
||||
//hw.read_format = PERF_FORMAT_GROUP;
|
||||
// unfortunately the above only works within a single PMU; might
|
||||
// as well just read them one at a time
|
||||
int cpu = 0;
|
||||
fd = perf_event_open(&hw, -1, cpu, -1, 0);
|
||||
if (fd == -1) {
|
||||
::printf("CPU %d, box %s, event 0x%lx", cpu, ename, hw.config);
|
||||
::exit(0);
|
||||
} else {
|
||||
// std::cout << "event "<<std::string(ename)<<" set up for fd "<<fd<<" hw.config "<<hw.config <<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void PmuStat::KNLsetup(void){
|
||||
|
||||
int ret;
|
||||
char fname[1024];
|
||||
|
||||
// MC RPQ inserts and WPQ inserts (reads & writes)
|
||||
for (int mc = 0; mc < NMC; ++mc)
|
||||
{
|
||||
::snprintf(fname, sizeof(fname), "/sys/devices/uncore_imc_%d",mc);
|
||||
// RPQ Inserts
|
||||
KNLevsetup(fname, gbl.mc_rd[mc], 0x1, 0x1);
|
||||
// WPQ Inserts
|
||||
KNLevsetup(fname, gbl.mc_wr[mc], 0x2, 0x1);
|
||||
}
|
||||
// EDC RPQ inserts and WPQ inserts
|
||||
for (int edc=0; edc < NEDC; ++edc)
|
||||
{
|
||||
::snprintf(fname, sizeof(fname), "/sys/devices/uncore_edc_eclk_%d",edc);
|
||||
// RPQ inserts
|
||||
KNLevsetup(fname, gbl.edc_rd[edc], 0x1, 0x1);
|
||||
// WPQ inserts
|
||||
KNLevsetup(fname, gbl.edc_wr[edc], 0x2, 0x1);
|
||||
}
|
||||
// EDC HitE, HitM, MissE, MissM
|
||||
for (int edc=0; edc < NEDC; ++edc)
|
||||
{
|
||||
::snprintf(fname, sizeof(fname), "/sys/devices/uncore_edc_uclk_%d", edc);
|
||||
KNLevsetup(fname, gbl.edc_hite[edc], 0x2, 0x1);
|
||||
KNLevsetup(fname, gbl.edc_hitm[edc], 0x2, 0x2);
|
||||
KNLevsetup(fname, gbl.edc_misse[edc], 0x2, 0x4);
|
||||
KNLevsetup(fname, gbl.edc_missm[edc], 0x2, 0x8);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t PmuStat::KNLreadctr(int fd)
|
||||
{
|
||||
uint64_t data;
|
||||
size_t s = ::read(fd, &data, sizeof(data));
|
||||
if (s != sizeof(uint64_t)){
|
||||
::printf("read counter %lu", s);
|
||||
::exit(0);
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
void PmuStat::KNLreadctrs(ctrs &c)
|
||||
{
|
||||
for (int i = 0; i < NMC; ++i)
|
||||
{
|
||||
c.mcrd[i] = KNLreadctr(gbl.mc_rd[i]);
|
||||
c.mcwr[i] = KNLreadctr(gbl.mc_wr[i]);
|
||||
}
|
||||
for (int i = 0; i < NEDC; ++i)
|
||||
{
|
||||
c.edcrd[i] = KNLreadctr(gbl.edc_rd[i]);
|
||||
c.edcwr[i] = KNLreadctr(gbl.edc_wr[i]);
|
||||
}
|
||||
for (int i = 0; i < NEDC; ++i)
|
||||
{
|
||||
c.edchite[i] = KNLreadctr(gbl.edc_hite[i]);
|
||||
c.edchitm[i] = KNLreadctr(gbl.edc_hitm[i]);
|
||||
c.edcmisse[i] = KNLreadctr(gbl.edc_misse[i]);
|
||||
c.edcmissm[i] = KNLreadctr(gbl.edc_missm[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
104
lib/Stat.h
Normal file
104
lib/Stat.h
Normal file
@ -0,0 +1,104 @@
|
||||
#ifndef _GRID_STAT_H
|
||||
#define _GRID_STAT_H
|
||||
|
||||
#ifdef AVX512
|
||||
#define _KNIGHTS_LANDING_ROOTONLY
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Extra KNL counters from MCDRAM
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef _KNIGHTS_LANDING_
|
||||
#define NMC 6
|
||||
#define NEDC 8
|
||||
struct ctrs
|
||||
{
|
||||
uint64_t mcrd[NMC];
|
||||
uint64_t mcwr[NMC];
|
||||
uint64_t edcrd[NEDC];
|
||||
uint64_t edcwr[NEDC];
|
||||
uint64_t edchite[NEDC];
|
||||
uint64_t edchitm[NEDC];
|
||||
uint64_t edcmisse[NEDC];
|
||||
uint64_t edcmissm[NEDC];
|
||||
};
|
||||
// Peter/Azusa:
|
||||
// Our modification of a code provided by Larry Meadows from Intel
|
||||
// Verified by email exchange non-NDA, ok for github. Should be as uses /sys/devices/ FS
|
||||
// so is already public and in the linux kernel for KNL.
|
||||
struct knl_gbl_
|
||||
{
|
||||
int mc_rd[NMC];
|
||||
int mc_wr[NMC];
|
||||
int edc_rd[NEDC];
|
||||
int edc_wr[NEDC];
|
||||
int edc_hite[NEDC];
|
||||
int edc_hitm[NEDC];
|
||||
int edc_misse[NEDC];
|
||||
int edc_missm[NEDC];
|
||||
};
|
||||
#endif
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class PmuStat
|
||||
{
|
||||
uint64_t counters[8][256];
|
||||
#ifdef _KNIGHTS_LANDING_
|
||||
static struct knl_gbl_ gbl;
|
||||
#endif
|
||||
const char *name;
|
||||
|
||||
uint64_t reads; // memory reads
|
||||
uint64_t writes; // memory writes
|
||||
uint64_t mrstart; // memory read counter at start of parallel region
|
||||
uint64_t mrend; // memory read counter at end of parallel region
|
||||
uint64_t mwstart; // memory write counter at start of parallel region
|
||||
uint64_t mwend; // memory write counter at end of parallel region
|
||||
|
||||
// cumulative counters
|
||||
uint64_t count; // number of invocations
|
||||
uint64_t tregion; // total time in parallel region (from thread 0)
|
||||
uint64_t tcycles; // total cycles inside parallel region
|
||||
uint64_t inst, ref, cyc; // fixed counters
|
||||
uint64_t pmc0, pmc1;// pmu
|
||||
// add memory counters here
|
||||
// temp variables
|
||||
uint64_t tstart; // tsc at start of parallel region
|
||||
uint64_t tend; // tsc at end of parallel region
|
||||
// map for ctrs values
|
||||
// 0 pmc0 start
|
||||
// 1 pmc0 end
|
||||
// 2 pmc1 start
|
||||
// 3 pmc1 end
|
||||
// 4 tsc start
|
||||
// 5 tsc end
|
||||
static bool pmu_initialized;
|
||||
public:
|
||||
static bool is_init(void){ return pmu_initialized;}
|
||||
static void pmu_init(void);
|
||||
static void pmu_fini(void);
|
||||
static void pmu_start(void);
|
||||
static void pmu_stop(void);
|
||||
void accum(int nthreads);
|
||||
static void xmemctrs(uint64_t *mr, uint64_t *mw);
|
||||
void start(void);
|
||||
void enter(int t);
|
||||
void exit(int t);
|
||||
void print(void);
|
||||
void init(const char *regname);
|
||||
void clear(void);
|
||||
#ifdef _KNIGHTS_LANDING_
|
||||
static void KNLsetup(void);
|
||||
static uint64_t KNLreadctr(int fd);
|
||||
static void KNLreadctrs(ctrs &c);
|
||||
static void KNLevsetup(const char *ename, int &fd, int event, int umask);
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
|
1694
lib/Stencil.h
1694
lib/Stencil.h
File diff suppressed because it is too large
Load Diff
@ -37,11 +37,20 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
#ifdef GRID_OMP
|
||||
#include <omp.h>
|
||||
#define PARALLEL_FOR_LOOP _Pragma("omp parallel for ")
|
||||
#define PARALLEL_NESTED_LOOP2 _Pragma("omp parallel for collapse(2)")
|
||||
#ifdef GRID_NUMA
|
||||
#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(static)")
|
||||
#define PARALLEL_FOR_LOOP_INTERN _Pragma("omp for schedule(static)")
|
||||
#else
|
||||
#define PARALLEL_FOR_LOOP
|
||||
#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(runtime)")
|
||||
#define PARALLEL_FOR_LOOP_INTERN _Pragma("omp for schedule(runtime)")
|
||||
#endif
|
||||
#define PARALLEL_NESTED_LOOP2 _Pragma("omp parallel for collapse(2)")
|
||||
#define PARALLEL_REGION _Pragma("omp parallel")
|
||||
#else
|
||||
#define PARALLEL_FOR_LOOP
|
||||
#define PARALLEL_FOR_LOOP_INTERN
|
||||
#define PARALLEL_NESTED_LOOP2
|
||||
#define PARALLEL_REGION
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
@ -123,6 +132,22 @@ class GridThread {
|
||||
ThreadBarrier();
|
||||
};
|
||||
|
||||
static void bcopy(const void *src, void *dst, size_t len) {
|
||||
#ifdef GRID_OMP
|
||||
#pragma omp parallel
|
||||
{
|
||||
const char *c_src =(char *) src;
|
||||
char *c_dest=(char *) dst;
|
||||
int me,mywork,myoff;
|
||||
GridThread::GetWorkBarrier(len,me, mywork,myoff);
|
||||
bcopy(&c_src[myoff],&c_dest[myoff],mywork);
|
||||
}
|
||||
#else
|
||||
bcopy(src,dst,len);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ PARALLEL_FOR_LOOP
|
||||
} else if(SE->_is_local) {
|
||||
nbr = in._odata[SE->_offset];
|
||||
} else {
|
||||
nbr = Stencil.comm_buf[SE->_offset];
|
||||
nbr = Stencil.CommBuf()[SE->_offset];
|
||||
}
|
||||
res = res + A[point]._odata[ss]*nbr;
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ class ConjugateGradient : public OperatorFunction<Field> {
|
||||
<< LinalgTimer.Elapsed();
|
||||
std::cout << std::endl;
|
||||
|
||||
if (ErrorOnNoConverge) assert(true_residual / Tolerance < 1000.0);
|
||||
if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -31,7 +31,11 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
#include <string.h> //memset
|
||||
#ifdef USE_LAPACK
|
||||
#include <lapacke.h>
|
||||
void LAPACK_dstegr(char *jobz, char *range, int *n, double *d, double *e,
|
||||
double *vl, double *vu, int *il, int *iu, double *abstol,
|
||||
int *m, double *w, double *z, int *ldz, int *isuppz,
|
||||
double *work, int *lwork, int *iwork, int *liwork,
|
||||
int *info);
|
||||
#endif
|
||||
#include "DenseMatrix.h"
|
||||
#include "EigenSort.h"
|
||||
|
@ -77,15 +77,12 @@ public:
|
||||
// GridCartesian / GridRedBlackCartesian
|
||||
////////////////////////////////////////////////////////////////
|
||||
virtual int CheckerBoarded(int dim)=0;
|
||||
virtual int CheckerBoard(std::vector<int> site)=0;
|
||||
virtual int CheckerBoard(std::vector<int> &site)=0;
|
||||
virtual int CheckerBoardDestination(int source_cb,int shift,int dim)=0;
|
||||
virtual int CheckerBoardShift(int source_cb,int dim,int shift,int osite)=0;
|
||||
virtual int CheckerBoardShiftForCB(int source_cb,int dim,int shift,int cb)=0;
|
||||
int CheckerBoardFromOindex (int Oindex){
|
||||
std::vector<int> ocoor;
|
||||
oCoorFromOindex(ocoor,Oindex);
|
||||
return CheckerBoard(ocoor);
|
||||
}
|
||||
virtual int CheckerBoardFromOindex (int Oindex)=0;
|
||||
virtual int CheckerBoardFromOindexTable (int Oindex)=0;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Local layout calculations
|
||||
|
@ -39,10 +39,17 @@ class GridCartesian: public GridBase {
|
||||
|
||||
public:
|
||||
|
||||
virtual int CheckerBoardFromOindexTable (int Oindex) {
|
||||
return 0;
|
||||
}
|
||||
virtual int CheckerBoardFromOindex (int Oindex)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
virtual int CheckerBoarded(int dim){
|
||||
return 0;
|
||||
}
|
||||
virtual int CheckerBoard(std::vector<int> site){
|
||||
virtual int CheckerBoard(std::vector<int> &site){
|
||||
return 0;
|
||||
}
|
||||
virtual int CheckerBoardDestination(int cb,int shift,int dim){
|
||||
|
@ -43,12 +43,13 @@ class GridRedBlackCartesian : public GridBase
|
||||
public:
|
||||
std::vector<int> _checker_dim_mask;
|
||||
int _checker_dim;
|
||||
std::vector<int> _checker_board;
|
||||
|
||||
virtual int CheckerBoarded(int dim){
|
||||
if( dim==_checker_dim) return 1;
|
||||
else return 0;
|
||||
}
|
||||
virtual int CheckerBoard(std::vector<int> site){
|
||||
virtual int CheckerBoard(std::vector<int> &site){
|
||||
int linear=0;
|
||||
assert(site.size()==_ndimension);
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
@ -72,12 +73,20 @@ public:
|
||||
// or by looping over x,y,z and multiply rather than computing checkerboard.
|
||||
|
||||
if ( (source_cb+ocb)&1 ) {
|
||||
|
||||
return (shift)/2;
|
||||
} else {
|
||||
return (shift+1)/2;
|
||||
}
|
||||
}
|
||||
virtual int CheckerBoardFromOindexTable (int Oindex) {
|
||||
return _checker_board[Oindex];
|
||||
}
|
||||
virtual int CheckerBoardFromOindex (int Oindex)
|
||||
{
|
||||
std::vector<int> ocoor;
|
||||
oCoorFromOindex(ocoor,Oindex);
|
||||
return CheckerBoard(ocoor);
|
||||
}
|
||||
virtual int CheckerBoardShift(int source_cb,int dim,int shift,int osite){
|
||||
|
||||
if(dim != _checker_dim) return shift;
|
||||
@ -169,7 +178,7 @@ public:
|
||||
// all elements of a simd vector must have same checkerboard.
|
||||
// If Ls vectorised, this must still be the case; e.g. dwf rb5d
|
||||
if ( _simd_layout[d]>1 ) {
|
||||
if ( d != _checker_dim ) {
|
||||
if ( checker_dim_mask[d] ) {
|
||||
assert( (_rdimensions[d]&0x1) == 0 );
|
||||
}
|
||||
}
|
||||
@ -185,6 +194,8 @@ public:
|
||||
_ostride[d] = _ostride[d-1]*_rdimensions[d-1];
|
||||
_istride[d] = _istride[d-1]*_simd_layout[d-1];
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -205,6 +216,18 @@ public:
|
||||
_slice_nblock[d]=nblock;
|
||||
block = block*_rdimensions[d];
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Create a checkerboard lookup table
|
||||
////////////////////////////////////////////////
|
||||
int rvol = 1;
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
rvol=rvol * _rdimensions[d];
|
||||
}
|
||||
_checker_board.resize(rvol);
|
||||
for(int osite=0;osite<_osites;osite++){
|
||||
_checker_board[osite] = CheckerBoardFromOindex (osite);
|
||||
}
|
||||
|
||||
};
|
||||
protected:
|
||||
|
124
lib/communicator/Communicator_base.cc
Normal file
124
lib/communicator/Communicator_base.cc
Normal file
@ -0,0 +1,124 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/communicator/Communicator_none.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include "Grid.h"
|
||||
namespace Grid {
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Info that is setup once and indept of cartesian layout
|
||||
///////////////////////////////////////////////////////////////
|
||||
void * CartesianCommunicator::ShmCommBuf;
|
||||
uint64_t CartesianCommunicator::MAX_MPI_SHM_BYTES = 128*1024*1024;
|
||||
|
||||
/////////////////////////////////
|
||||
// Alloc, free shmem region
|
||||
/////////////////////////////////
|
||||
void *CartesianCommunicator::ShmBufferMalloc(size_t bytes){
|
||||
// bytes = (bytes+sizeof(vRealD))&(~(sizeof(vRealD)-1));// align up bytes
|
||||
void *ptr = (void *)heap_top;
|
||||
heap_top += bytes;
|
||||
heap_bytes+= bytes;
|
||||
if (heap_bytes >= MAX_MPI_SHM_BYTES) {
|
||||
std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm <MB> flag" <<std::endl;
|
||||
std::cout<< " Parameter specified in units of MB (megabytes) " <<std::endl;
|
||||
std::cout<< " Current value is " << (MAX_MPI_SHM_BYTES/(1024*1024)) <<std::endl;
|
||||
assert(heap_bytes<MAX_MPI_SHM_BYTES);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
void CartesianCommunicator::ShmBufferFreeAll(void) {
|
||||
heap_top =(size_t)ShmBufferSelf();
|
||||
heap_bytes=0;
|
||||
}
|
||||
|
||||
/////////////////////////////////
|
||||
// Grid information queries
|
||||
/////////////////////////////////
|
||||
int CartesianCommunicator::IsBoss(void) { return _processor==0; };
|
||||
int CartesianCommunicator::BossRank(void) { return 0; };
|
||||
int CartesianCommunicator::ThisRank(void) { return _processor; };
|
||||
const std::vector<int> & CartesianCommunicator::ThisProcessorCoor(void) { return _processor_coor; };
|
||||
const std::vector<int> & CartesianCommunicator::ProcessorGrid(void) { return _processors; };
|
||||
int CartesianCommunicator::ProcessorCount(void) { return _Nprocessors; };
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// very VERY rarely (Log, serial RNG) we need world without a grid
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void CartesianCommunicator::GlobalSum(ComplexF &c)
|
||||
{
|
||||
GlobalSumVector((float *)&c,2);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSumVector(ComplexF *c,int N)
|
||||
{
|
||||
GlobalSumVector((float *)c,2*N);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(ComplexD &c)
|
||||
{
|
||||
GlobalSumVector((double *)&c,2);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N)
|
||||
{
|
||||
GlobalSumVector((double *)c,2*N);
|
||||
}
|
||||
|
||||
#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPI3L)
|
||||
|
||||
void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes)
|
||||
{
|
||||
SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
|
||||
}
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall)
|
||||
{
|
||||
SendToRecvFromComplete(waitall);
|
||||
}
|
||||
void CartesianCommunicator::StencilBarrier(void){};
|
||||
|
||||
commVector<uint8_t> CartesianCommunicator::ShmBufStorageVector;
|
||||
|
||||
void *CartesianCommunicator::ShmBufferSelf(void) { return ShmCommBuf; }
|
||||
|
||||
void *CartesianCommunicator::ShmBuffer(int rank) {
|
||||
return NULL;
|
||||
}
|
||||
void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) {
|
||||
return NULL;
|
||||
}
|
||||
void CartesianCommunicator::ShmInitGeneric(void){
|
||||
ShmBufStorageVector.resize(MAX_MPI_SHM_BYTES);
|
||||
ShmCommBuf=(void *)&ShmBufStorageVector[0];
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
@ -34,123 +35,196 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#ifdef GRID_COMMS_MPI
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#ifdef GRID_COMMS_MPI3
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#ifdef GRID_COMMS_MPI3L
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#ifdef GRID_COMMS_SHMEM
|
||||
#include <mpp/shmem.h>
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
|
||||
class CartesianCommunicator {
|
||||
public:
|
||||
|
||||
// 65536 ranks per node adequate for now
|
||||
// 128MB shared memory for comms enought for 48^4 local vol comms
|
||||
// Give external control (command line override?) of this
|
||||
|
||||
static const int MAXLOG2RANKSPERNODE = 16;
|
||||
static uint64_t MAX_MPI_SHM_BYTES;
|
||||
|
||||
// Communicator should know nothing of the physics grid, only processor grid.
|
||||
int _Nprocessors; // How many in all
|
||||
std::vector<int> _processors; // Which dimensions get relayed out over processors lanes.
|
||||
int _processor; // linear processor rank
|
||||
std::vector<int> _processor_coor; // linear processor coordinate
|
||||
unsigned long _ndimension;
|
||||
|
||||
int _Nprocessors; // How many in all
|
||||
std::vector<int> _processors; // Which dimensions get relayed out over processors lanes.
|
||||
int _processor; // linear processor rank
|
||||
std::vector<int> _processor_coor; // linear processor coordinate
|
||||
unsigned long _ndimension;
|
||||
|
||||
#ifdef GRID_COMMS_MPI
|
||||
MPI_Comm communicator;
|
||||
typedef MPI_Request CommsRequest_t;
|
||||
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPI3L)
|
||||
static MPI_Comm communicator_world;
|
||||
MPI_Comm communicator;
|
||||
typedef MPI_Request CommsRequest_t;
|
||||
#else
|
||||
typedef int CommsRequest_t;
|
||||
typedef int CommsRequest_t;
|
||||
#endif
|
||||
|
||||
static void Init(int *argc, char ***argv);
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// Helper functionality for SHM Windows common to all other impls
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// Longer term; drop this in favour of a master / slave model with
|
||||
// cartesian communicator on a subset of ranks, slave ranks controlled
|
||||
// by group leader with data xfer via shared memory
|
||||
////////////////////////////////////////////////////////////////////
|
||||
#ifdef GRID_COMMS_MPI3
|
||||
|
||||
// Constructor
|
||||
CartesianCommunicator(const std::vector<int> &pdimensions_in);
|
||||
static int ShmRank;
|
||||
static int ShmSize;
|
||||
static int GroupRank;
|
||||
static int GroupSize;
|
||||
static int WorldRank;
|
||||
static int WorldSize;
|
||||
|
||||
// Wraps MPI_Cart routines
|
||||
void ShiftedRanks(int dim,int shift,int & source, int & dest);
|
||||
int RankFromProcessorCoor(std::vector<int> &coor);
|
||||
void ProcessorCoorFromRank(int rank,std::vector<int> &coor);
|
||||
std::vector<int> WorldDims;
|
||||
std::vector<int> GroupDims;
|
||||
std::vector<int> ShmDims;
|
||||
|
||||
std::vector<int> GroupCoor;
|
||||
std::vector<int> ShmCoor;
|
||||
std::vector<int> WorldCoor;
|
||||
|
||||
/////////////////////////////////
|
||||
// Grid information queries
|
||||
/////////////////////////////////
|
||||
int IsBoss(void) { return _processor==0; };
|
||||
int BossRank(void) { return 0; };
|
||||
int ThisRank(void) { return _processor; };
|
||||
const std::vector<int> & ThisProcessorCoor(void) { return _processor_coor; };
|
||||
const std::vector<int> & ProcessorGrid(void) { return _processors; };
|
||||
int ProcessorCount(void) { return _Nprocessors; };
|
||||
static std::vector<int> GroupRanks;
|
||||
static std::vector<int> MyGroup;
|
||||
static int ShmSetup;
|
||||
static MPI_Win ShmWindow;
|
||||
static MPI_Comm ShmComm;
|
||||
|
||||
std::vector<int> LexicographicToWorldRank;
|
||||
|
||||
static std::vector<void *> ShmCommBufs;
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Reduction
|
||||
////////////////////////////////////////////////////////////
|
||||
void GlobalSum(RealF &);
|
||||
void GlobalSumVector(RealF *,int N);
|
||||
#else
|
||||
static void ShmInitGeneric(void);
|
||||
static commVector<uint8_t> ShmBufStorageVector;
|
||||
#endif
|
||||
|
||||
void GlobalSum(RealD &);
|
||||
void GlobalSumVector(RealD *,int N);
|
||||
/////////////////////////////////
|
||||
// Grid information and queries
|
||||
// Implemented in Communicator_base.C
|
||||
/////////////////////////////////
|
||||
static void * ShmCommBuf;
|
||||
size_t heap_top;
|
||||
size_t heap_bytes;
|
||||
|
||||
void GlobalSum(uint32_t &);
|
||||
void GlobalSum(uint64_t &);
|
||||
void *ShmBufferSelf(void);
|
||||
void *ShmBuffer(int rank);
|
||||
void *ShmBufferTranslate(int rank,void * local_p);
|
||||
void *ShmBufferMalloc(size_t bytes);
|
||||
void ShmBufferFreeAll(void) ;
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Must call in Grid startup
|
||||
////////////////////////////////////////////////
|
||||
static void Init(int *argc, char ***argv);
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Constructor of any given grid
|
||||
////////////////////////////////////////////////
|
||||
CartesianCommunicator(const std::vector<int> &pdimensions_in);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Wraps MPI_Cart routines, or implements equivalent on other impls
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
void ShiftedRanks(int dim,int shift,int & source, int & dest);
|
||||
int RankFromProcessorCoor(std::vector<int> &coor);
|
||||
void ProcessorCoorFromRank(int rank,std::vector<int> &coor);
|
||||
|
||||
int IsBoss(void) ;
|
||||
int BossRank(void) ;
|
||||
int ThisRank(void) ;
|
||||
const std::vector<int> & ThisProcessorCoor(void) ;
|
||||
const std::vector<int> & ProcessorGrid(void) ;
|
||||
int ProcessorCount(void) ;
|
||||
|
||||
void GlobalSum(ComplexF &c)
|
||||
{
|
||||
GlobalSumVector((float *)&c,2);
|
||||
}
|
||||
void GlobalSumVector(ComplexF *c,int N)
|
||||
{
|
||||
GlobalSumVector((float *)c,2*N);
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// very VERY rarely (Log, serial RNG) we need world without a grid
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
static int RankWorld(void) ;
|
||||
static void BroadcastWorld(int root,void* data, int bytes);
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Reduction
|
||||
////////////////////////////////////////////////////////////
|
||||
void GlobalSum(RealF &);
|
||||
void GlobalSumVector(RealF *,int N);
|
||||
void GlobalSum(RealD &);
|
||||
void GlobalSumVector(RealD *,int N);
|
||||
void GlobalSum(uint32_t &);
|
||||
void GlobalSum(uint64_t &);
|
||||
void GlobalSum(ComplexF &c);
|
||||
void GlobalSumVector(ComplexF *c,int N);
|
||||
void GlobalSum(ComplexD &c);
|
||||
void GlobalSumVector(ComplexD *c,int N);
|
||||
|
||||
template<class obj> void GlobalSum(obj &o){
|
||||
typedef typename obj::scalar_type scalar_type;
|
||||
int words = sizeof(obj)/sizeof(scalar_type);
|
||||
scalar_type * ptr = (scalar_type *)& o;
|
||||
GlobalSumVector(ptr,words);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Face exchange, buffer swap in translational invariant way
|
||||
////////////////////////////////////////////////////////////
|
||||
void SendToRecvFrom(void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes);
|
||||
|
||||
void SendRecvPacket(void *xmit,
|
||||
void *recv,
|
||||
int xmit_to_rank,
|
||||
int recv_from_rank,
|
||||
int bytes);
|
||||
|
||||
void SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes);
|
||||
|
||||
void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
|
||||
|
||||
void GlobalSum(ComplexD &c)
|
||||
{
|
||||
GlobalSumVector((double *)&c,2);
|
||||
}
|
||||
void GlobalSumVector(ComplexD *c,int N)
|
||||
{
|
||||
GlobalSumVector((double *)c,2*N);
|
||||
}
|
||||
|
||||
template<class obj> void GlobalSum(obj &o){
|
||||
typedef typename obj::scalar_type scalar_type;
|
||||
int words = sizeof(obj)/sizeof(scalar_type);
|
||||
scalar_type * ptr = (scalar_type *)& o;
|
||||
GlobalSumVector(ptr,words);
|
||||
}
|
||||
////////////////////////////////////////////////////////////
|
||||
// Face exchange, buffer swap in translational invariant way
|
||||
////////////////////////////////////////////////////////////
|
||||
void SendToRecvFrom(void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes);
|
||||
void StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes);
|
||||
|
||||
void StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
|
||||
void StencilBarrier(void);
|
||||
|
||||
void SendRecvPacket(void *xmit,
|
||||
void *recv,
|
||||
int xmit_to_rank,
|
||||
int recv_from_rank,
|
||||
int bytes);
|
||||
|
||||
void SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes);
|
||||
void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Barrier
|
||||
////////////////////////////////////////////////////////////
|
||||
void Barrier(void);
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Broadcast a buffer and composite larger
|
||||
////////////////////////////////////////////////////////////
|
||||
void Broadcast(int root,void* data, int bytes);
|
||||
template<class obj> void Broadcast(int root,obj &data)
|
||||
////////////////////////////////////////////////////////////
|
||||
// Barrier
|
||||
////////////////////////////////////////////////////////////
|
||||
void Barrier(void);
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Broadcast a buffer and composite larger
|
||||
////////////////////////////////////////////////////////////
|
||||
void Broadcast(int root,void* data, int bytes);
|
||||
|
||||
template<class obj> void Broadcast(int root,obj &data)
|
||||
{
|
||||
Broadcast(root,(void *)&data,sizeof(data));
|
||||
};
|
||||
|
||||
static void BroadcastWorld(int root,void* data, int bytes);
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -30,21 +30,23 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
namespace Grid {
|
||||
|
||||
// Should error check all MPI calls.
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Info that is setup once and indept of cartesian layout
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
MPI_Comm CartesianCommunicator::communicator_world;
|
||||
|
||||
// Should error check all MPI calls.
|
||||
void CartesianCommunicator::Init(int *argc, char ***argv) {
|
||||
int flag;
|
||||
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
||||
if ( !flag ) {
|
||||
MPI_Init(argc,argv);
|
||||
}
|
||||
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
||||
ShmInitGeneric();
|
||||
}
|
||||
|
||||
int Rank(void) {
|
||||
int pe;
|
||||
MPI_Comm_rank(MPI_COMM_WORLD,&pe);
|
||||
return pe;
|
||||
}
|
||||
|
||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||
{
|
||||
_ndimension = processors.size();
|
||||
@ -54,7 +56,7 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||
_processors = processors;
|
||||
_processor_coor.resize(_ndimension);
|
||||
|
||||
MPI_Cart_create(MPI_COMM_WORLD, _ndimension,&_processors[0],&periodic[0],1,&communicator);
|
||||
MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator);
|
||||
MPI_Comm_rank(communicator,&_processor);
|
||||
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
|
||||
|
||||
@ -67,7 +69,6 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||
|
||||
assert(Size==_Nprocessors);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
@ -168,7 +169,6 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &
|
||||
int nreq=list.size();
|
||||
std::vector<MPI_Status> status(nreq);
|
||||
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
||||
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
@ -187,14 +187,22 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
||||
communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// Should only be used prior to Grid Init finished.
|
||||
// Check for this?
|
||||
///////////////////////////////////////////////////////
|
||||
int CartesianCommunicator::RankWorld(void){
|
||||
int r;
|
||||
MPI_Comm_rank(communicator_world,&r);
|
||||
return r;
|
||||
}
|
||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
||||
{
|
||||
int ierr= MPI_Bcast(data,
|
||||
bytes,
|
||||
MPI_BYTE,
|
||||
root,
|
||||
MPI_COMM_WORLD);
|
||||
communicator_world);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
|
580
lib/communicator/Communicator_mpi3.cc
Normal file
580
lib/communicator/Communicator_mpi3.cc
Normal file
@ -0,0 +1,580 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/communicator/Communicator_mpi.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include "Grid.h"
|
||||
#include <mpi.h>
|
||||
|
||||
namespace Grid {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Info that is setup once and indept of cartesian layout
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
int CartesianCommunicator::ShmSetup = 0;
|
||||
|
||||
int CartesianCommunicator::ShmRank;
|
||||
int CartesianCommunicator::ShmSize;
|
||||
int CartesianCommunicator::GroupRank;
|
||||
int CartesianCommunicator::GroupSize;
|
||||
int CartesianCommunicator::WorldRank;
|
||||
int CartesianCommunicator::WorldSize;
|
||||
|
||||
MPI_Comm CartesianCommunicator::communicator_world;
|
||||
MPI_Comm CartesianCommunicator::ShmComm;
|
||||
MPI_Win CartesianCommunicator::ShmWindow;
|
||||
|
||||
std::vector<int> CartesianCommunicator::GroupRanks;
|
||||
std::vector<int> CartesianCommunicator::MyGroup;
|
||||
std::vector<void *> CartesianCommunicator::ShmCommBufs;
|
||||
|
||||
void *CartesianCommunicator::ShmBufferSelf(void)
|
||||
{
|
||||
return ShmCommBufs[ShmRank];
|
||||
}
|
||||
void *CartesianCommunicator::ShmBuffer(int rank)
|
||||
{
|
||||
int gpeer = GroupRanks[rank];
|
||||
if (gpeer == MPI_UNDEFINED){
|
||||
return NULL;
|
||||
} else {
|
||||
return ShmCommBufs[gpeer];
|
||||
}
|
||||
}
|
||||
void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p)
|
||||
{
|
||||
int gpeer = GroupRanks[rank];
|
||||
if (gpeer == MPI_UNDEFINED){
|
||||
return NULL;
|
||||
} else {
|
||||
uint64_t offset = (uint64_t)local_p - (uint64_t)ShmCommBufs[ShmRank];
|
||||
uint64_t remote = (uint64_t)ShmCommBufs[gpeer]+offset;
|
||||
return (void *) remote;
|
||||
}
|
||||
}
|
||||
|
||||
void CartesianCommunicator::Init(int *argc, char ***argv) {
|
||||
int flag;
|
||||
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
||||
if ( !flag ) {
|
||||
MPI_Init(argc,argv);
|
||||
}
|
||||
|
||||
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
||||
MPI_Comm_rank(communicator_world,&WorldRank);
|
||||
MPI_Comm_size(communicator_world,&WorldSize);
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// Split into groups that can share memory
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
MPI_Comm_split_type(communicator_world, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&ShmComm);
|
||||
MPI_Comm_rank(ShmComm ,&ShmRank);
|
||||
MPI_Comm_size(ShmComm ,&ShmSize);
|
||||
GroupSize = WorldSize/ShmSize;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// find world ranks in our SHM group (i.e. which ranks are on our node)
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
MPI_Group WorldGroup, ShmGroup;
|
||||
MPI_Comm_group (communicator_world, &WorldGroup);
|
||||
MPI_Comm_group (ShmComm, &ShmGroup);
|
||||
|
||||
std::vector<int> world_ranks(WorldSize);
|
||||
GroupRanks.resize(WorldSize);
|
||||
for(int r=0;r<WorldSize;r++) world_ranks[r]=r;
|
||||
|
||||
MPI_Group_translate_ranks (WorldGroup,WorldSize,&world_ranks[0],ShmGroup, &GroupRanks[0]);
|
||||
|
||||
///////////////////////////////////////////////////////////////////
|
||||
// Identify who is in my group and noninate the leader
|
||||
///////////////////////////////////////////////////////////////////
|
||||
int g=0;
|
||||
MyGroup.resize(ShmSize);
|
||||
for(int rank=0;rank<WorldSize;rank++){
|
||||
if(GroupRanks[rank]!=MPI_UNDEFINED){
|
||||
assert(g<ShmSize);
|
||||
MyGroup[g++] = rank;
|
||||
}
|
||||
}
|
||||
|
||||
std::sort(MyGroup.begin(),MyGroup.end(),std::less<int>());
|
||||
int myleader = MyGroup[0];
|
||||
|
||||
std::vector<int> leaders_1hot(WorldSize,0);
|
||||
std::vector<int> leaders_group(GroupSize,0);
|
||||
leaders_1hot [ myleader ] = 1;
|
||||
|
||||
///////////////////////////////////////////////////////////////////
|
||||
// global sum leaders over comm world
|
||||
///////////////////////////////////////////////////////////////////
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,communicator_world);
|
||||
assert(ierr==0);
|
||||
|
||||
///////////////////////////////////////////////////////////////////
|
||||
// find the group leaders world rank
|
||||
///////////////////////////////////////////////////////////////////
|
||||
int group=0;
|
||||
for(int l=0;l<WorldSize;l++){
|
||||
if(leaders_1hot[l]){
|
||||
leaders_group[group++] = l;
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////
|
||||
// Identify the rank of the group in which I (and my leader) live
|
||||
///////////////////////////////////////////////////////////////////
|
||||
GroupRank=-1;
|
||||
for(int g=0;g<GroupSize;g++){
|
||||
if (myleader == leaders_group[g]){
|
||||
GroupRank=g;
|
||||
}
|
||||
}
|
||||
assert(GroupRank!=-1);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// allocate the shared window for our group
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
ShmCommBuf = 0;
|
||||
ierr = MPI_Win_allocate_shared(MAX_MPI_SHM_BYTES,1,MPI_INFO_NULL,ShmComm,&ShmCommBuf,&ShmWindow);
|
||||
assert(ierr==0);
|
||||
// KNL hack -- force to numa-domain 1 in flat
|
||||
#if 0
|
||||
//#include <numaif.h>
|
||||
for(uint64_t page=0;page<MAX_MPI_SHM_BYTES;page+=4096){
|
||||
void *pages = (void *) ( page + ShmCommBuf );
|
||||
int status;
|
||||
int flags=MPOL_MF_MOVE_ALL;
|
||||
int nodes=1; // numa domain == MCDRAM
|
||||
unsigned long count=1;
|
||||
ierr= move_pages(0,count, &pages,&nodes,&status,flags);
|
||||
if (ierr && (page==0)) perror("numa relocate command failed");
|
||||
}
|
||||
#endif
|
||||
MPI_Win_lock_all (MPI_MODE_NOCHECK, ShmWindow);
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Plan: allocate a fixed SHM region. Scratch that is just used via some scheme during stencil comms, with no allocate free.
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
ShmCommBufs.resize(ShmSize);
|
||||
for(int r=0;r<ShmSize;r++){
|
||||
MPI_Aint sz;
|
||||
int dsp_unit;
|
||||
MPI_Win_shared_query (ShmWindow, r, &sz, &dsp_unit, &ShmCommBufs[r]);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Verbose for now
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
if (WorldRank == 0){
|
||||
std::cout<<GridLogMessage<< "Grid MPI-3 configuration: detected ";
|
||||
std::cout<< WorldSize << " Ranks " ;
|
||||
std::cout<< GroupSize << " Nodes " ;
|
||||
std::cout<< ShmSize << " with ranks-per-node "<<std::endl;
|
||||
|
||||
std::cout<<GridLogMessage <<"Grid MPI-3 configuration: allocated shared memory region of size ";
|
||||
std::cout<<std::hex << MAX_MPI_SHM_BYTES <<" ShmCommBuf address = "<<ShmCommBuf << std::dec<<std::endl;
|
||||
|
||||
for(int g=0;g<GroupSize;g++){
|
||||
std::cout<<GridLogMessage<<" Node "<<g<<" led by MPI rank "<<leaders_group[g]<<std::endl;
|
||||
}
|
||||
|
||||
std::cout<<GridLogMessage<<" Boss Node Shm Pointers are {";
|
||||
for(int g=0;g<ShmSize;g++){
|
||||
std::cout<<std::hex<<ShmCommBufs[g]<<std::dec;
|
||||
if(g!=ShmSize-1) std::cout<<",";
|
||||
else std::cout<<"}"<<std::endl;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for(int g=0;g<GroupSize;g++){
|
||||
if ( (ShmRank == 0) && (GroupRank==g) ) std::cout<<GridLogMessage<<"["<<g<<"] Node Group "<<g<<" is ranks {";
|
||||
for(int r=0;r<ShmSize;r++){
|
||||
if ( (ShmRank == 0) && (GroupRank==g) ) {
|
||||
std::cout<<MyGroup[r];
|
||||
if(r<ShmSize-1) std::cout<<",";
|
||||
else std::cout<<"}"<<std::endl;
|
||||
}
|
||||
MPI_Barrier(communicator_world);
|
||||
}
|
||||
}
|
||||
|
||||
assert(ShmSetup==0); ShmSetup=1;
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Want to implement some magic ... Group sub-cubes into those on same node
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
|
||||
{
|
||||
std::vector<int> coor = _processor_coor;
|
||||
|
||||
assert(std::abs(shift) <_processors[dim]);
|
||||
|
||||
coor[dim] = (_processor_coor[dim] + shift + _processors[dim])%_processors[dim];
|
||||
Lexicographic::IndexFromCoor(coor,source,_processors);
|
||||
source = LexicographicToWorldRank[source];
|
||||
|
||||
coor[dim] = (_processor_coor[dim] - shift + _processors[dim])%_processors[dim];
|
||||
Lexicographic::IndexFromCoor(coor,dest,_processors);
|
||||
dest = LexicographicToWorldRank[dest];
|
||||
}
|
||||
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor)
|
||||
{
|
||||
int rank;
|
||||
Lexicographic::IndexFromCoor(coor,rank,_processors);
|
||||
rank = LexicographicToWorldRank[rank];
|
||||
return rank;
|
||||
}
|
||||
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor)
|
||||
{
|
||||
Lexicographic::CoorFromIndex(coor,rank,_processors);
|
||||
rank = LexicographicToWorldRank[rank];
|
||||
}
|
||||
|
||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||
{
|
||||
int ierr;
|
||||
|
||||
communicator=communicator_world;
|
||||
|
||||
_ndimension = processors.size();
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Assert power of two shm_size.
|
||||
////////////////////////////////////////////////////////////////
|
||||
int log2size = -1;
|
||||
for(int i=0;i<=MAXLOG2RANKSPERNODE;i++){
|
||||
if ( (0x1<<i) == ShmSize ) {
|
||||
log2size = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(log2size != -1);
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Identify subblock of ranks on node spreading across dims
|
||||
// in a maximally symmetrical way
|
||||
////////////////////////////////////////////////////////////////
|
||||
int dim = 0;
|
||||
|
||||
std::vector<int> WorldDims = processors;
|
||||
|
||||
ShmDims.resize(_ndimension,1);
|
||||
GroupDims.resize(_ndimension);
|
||||
|
||||
ShmCoor.resize(_ndimension);
|
||||
GroupCoor.resize(_ndimension);
|
||||
WorldCoor.resize(_ndimension);
|
||||
|
||||
for(int l2=0;l2<log2size;l2++){
|
||||
while ( WorldDims[dim] / ShmDims[dim] <= 1 ) dim=(dim+1)%_ndimension;
|
||||
ShmDims[dim]*=2;
|
||||
dim=(dim+1)%_ndimension;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Establish torus of processes and nodes with sub-blockings
|
||||
////////////////////////////////////////////////////////////////
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
GroupDims[d] = WorldDims[d]/ShmDims[d];
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Check processor counts match
|
||||
////////////////////////////////////////////////////////////////
|
||||
_Nprocessors=1;
|
||||
_processors = processors;
|
||||
_processor_coor.resize(_ndimension);
|
||||
for(int i=0;i<_ndimension;i++){
|
||||
_Nprocessors*=_processors[i];
|
||||
}
|
||||
assert(WorldSize==_Nprocessors);
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Establish mapping between lexico physics coord and WorldRank
|
||||
//
|
||||
////////////////////////////////////////////////////////////////
|
||||
LexicographicToWorldRank.resize(WorldSize,0);
|
||||
Lexicographic::CoorFromIndex(GroupCoor,GroupRank,GroupDims);
|
||||
Lexicographic::CoorFromIndex(ShmCoor,ShmRank,ShmDims);
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
WorldCoor[d] = GroupCoor[d]*ShmDims[d]+ShmCoor[d];
|
||||
}
|
||||
_processor_coor = WorldCoor;
|
||||
|
||||
int lexico;
|
||||
Lexicographic::IndexFromCoor(WorldCoor,lexico,WorldDims);
|
||||
LexicographicToWorldRank[lexico]=WorldRank;
|
||||
_processor = lexico;
|
||||
|
||||
///////////////////////////////////////////////////////////////////
|
||||
// global sum Lexico to World mapping
|
||||
///////////////////////////////////////////////////////////////////
|
||||
ierr=MPI_Allreduce(MPI_IN_PLACE,&LexicographicToWorldRank[0],WorldSize,MPI_INT,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
|
||||
};
|
||||
|
||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(uint64_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(float &f){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSumVector(float *f,int N)
|
||||
{
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(double &d)
|
||||
{
|
||||
int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
||||
{
|
||||
int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
|
||||
// Basic Halo comms primitive
|
||||
void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes)
|
||||
{
|
||||
std::vector<CommsRequest_t> reqs(0);
|
||||
SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes);
|
||||
SendToRecvFromComplete(reqs);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
|
||||
void *recv,
|
||||
int sender,
|
||||
int receiver,
|
||||
int bytes)
|
||||
{
|
||||
MPI_Status stat;
|
||||
assert(sender != receiver);
|
||||
int tag = sender;
|
||||
if ( _processor == sender ) {
|
||||
MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator);
|
||||
}
|
||||
if ( _processor == receiver ) {
|
||||
MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat);
|
||||
}
|
||||
}
|
||||
|
||||
// Basic Halo comms primitive
|
||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes)
|
||||
{
|
||||
#if 0
|
||||
this->StencilBarrier();
|
||||
|
||||
MPI_Request xrq;
|
||||
MPI_Request rrq;
|
||||
|
||||
static int sequence;
|
||||
|
||||
int ierr;
|
||||
int tag;
|
||||
int check;
|
||||
|
||||
assert(dest != _processor);
|
||||
assert(from != _processor);
|
||||
|
||||
int gdest = GroupRanks[dest];
|
||||
int gfrom = GroupRanks[from];
|
||||
int gme = GroupRanks[_processor];
|
||||
|
||||
sequence++;
|
||||
|
||||
char *from_ptr = (char *)ShmCommBufs[ShmRank];
|
||||
|
||||
int small = (bytes<MAX_MPI_SHM_BYTES);
|
||||
|
||||
typedef uint64_t T;
|
||||
int words = bytes/sizeof(T);
|
||||
|
||||
assert(((size_t)bytes &(sizeof(T)-1))==0);
|
||||
assert(gme == ShmRank);
|
||||
|
||||
if ( small && (gdest !=MPI_UNDEFINED) ) {
|
||||
|
||||
char *to_ptr = (char *)ShmCommBufs[gdest];
|
||||
|
||||
assert(gme != gdest);
|
||||
|
||||
T *ip = (T *)xmit;
|
||||
T *op = (T *)to_ptr;
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int w=0;w<words;w++) {
|
||||
op[w]=ip[w];
|
||||
}
|
||||
|
||||
bcopy(&_processor,&to_ptr[bytes],sizeof(_processor));
|
||||
bcopy(& sequence,&to_ptr[bytes+4],sizeof(sequence));
|
||||
} else {
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
}
|
||||
|
||||
this->StencilBarrier();
|
||||
|
||||
if (small && (gfrom !=MPI_UNDEFINED) ) {
|
||||
T *ip = (T *)from_ptr;
|
||||
T *op = (T *)recv;
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int w=0;w<words;w++) {
|
||||
op[w]=ip[w];
|
||||
}
|
||||
bcopy(&from_ptr[bytes] ,&tag ,sizeof(tag));
|
||||
bcopy(&from_ptr[bytes+4],&check,sizeof(check));
|
||||
assert(check==sequence);
|
||||
assert(tag==from);
|
||||
} else {
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
}
|
||||
|
||||
this->StencilBarrier();
|
||||
|
||||
#else
|
||||
MPI_Request xrq;
|
||||
MPI_Request rrq;
|
||||
int rank = _processor;
|
||||
int ierr;
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||
ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||
|
||||
assert(ierr==0);
|
||||
|
||||
list.push_back(xrq);
|
||||
list.push_back(rrq);
|
||||
#endif
|
||||
}
|
||||
|
||||
void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes)
|
||||
{
|
||||
MPI_Request xrq;
|
||||
MPI_Request rrq;
|
||||
|
||||
int ierr;
|
||||
|
||||
assert(dest != _processor);
|
||||
assert(from != _processor);
|
||||
|
||||
int gdest = GroupRanks[dest];
|
||||
int gfrom = GroupRanks[from];
|
||||
int gme = GroupRanks[_processor];
|
||||
|
||||
assert(gme == ShmRank);
|
||||
|
||||
if ( gdest == MPI_UNDEFINED ) {
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
}
|
||||
|
||||
if ( gfrom ==MPI_UNDEFINED) {
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||
{
|
||||
SendToRecvFromComplete(list);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::StencilBarrier(void)
|
||||
{
|
||||
MPI_Win_sync (ShmWindow);
|
||||
MPI_Barrier (ShmComm);
|
||||
MPI_Win_sync (ShmWindow);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||
{
|
||||
int nreq=list.size();
|
||||
std::vector<MPI_Status> status(nreq);
|
||||
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::Barrier(void)
|
||||
{
|
||||
int ierr = MPI_Barrier(communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
||||
{
|
||||
int ierr=MPI_Bcast(data,
|
||||
bytes,
|
||||
MPI_BYTE,
|
||||
root,
|
||||
communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
||||
{
|
||||
int ierr= MPI_Bcast(data,
|
||||
bytes,
|
||||
MPI_BYTE,
|
||||
root,
|
||||
communicator_world);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
}
|
||||
|
874
lib/communicator/Communicator_mpi3_leader.cc
Normal file
874
lib/communicator/Communicator_mpi3_leader.cc
Normal file
@ -0,0 +1,874 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/communicator/Communicator_mpi.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include "Grid.h"
|
||||
#include <mpi.h>
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/// Workarounds:
|
||||
/// i) bloody mac os doesn't implement unnamed semaphores since it is "optional" posix.
|
||||
/// darwin dispatch semaphores don't seem to be multiprocess.
|
||||
///
|
||||
/// ii) openmpi under --mca shmem posix works with two squadrons per node;
|
||||
/// openmpi under default mca settings (I think --mca shmem mmap) on MacOS makes two squadrons map the SAME
|
||||
/// memory as each other, despite their living on different communicators. This appears to be a bug in OpenMPI.
|
||||
///
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#include <semaphore.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
|
||||
typedef sem_t *Grid_semaphore;
|
||||
|
||||
#define SEM_INIT(S) S = sem_open(sem_name,0,0600,0); assert ( S != SEM_FAILED );
|
||||
#define SEM_INIT_EXCL(S) sem_unlink(sem_name); S = sem_open(sem_name,O_CREAT|O_EXCL,0600,0); assert ( S != SEM_FAILED );
|
||||
#define SEM_POST(S) assert ( sem_post(S) == 0 );
|
||||
#define SEM_WAIT(S) assert ( sem_wait(S) == 0 );
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
namespace Grid {
|
||||
|
||||
enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL };
|
||||
|
||||
struct Descriptor {
|
||||
uint64_t buf;
|
||||
size_t bytes;
|
||||
int rank;
|
||||
int tag;
|
||||
int command;
|
||||
MPI_Request request;
|
||||
};
|
||||
|
||||
const int pool = 48;
|
||||
|
||||
class SlaveState {
|
||||
public:
|
||||
volatile int head;
|
||||
volatile int start;
|
||||
volatile int tail;
|
||||
volatile Descriptor Descrs[pool];
|
||||
};
|
||||
|
||||
class Slave {
|
||||
public:
|
||||
Grid_semaphore sem_head;
|
||||
Grid_semaphore sem_tail;
|
||||
SlaveState *state;
|
||||
MPI_Comm squadron;
|
||||
uint64_t base;
|
||||
int universe_rank;
|
||||
int vertical_rank;
|
||||
char sem_name [NAME_MAX];
|
||||
////////////////////////////////////////////////////////////
|
||||
// Descriptor circular pointers
|
||||
////////////////////////////////////////////////////////////
|
||||
Slave() {};
|
||||
|
||||
void Init(SlaveState * _state,MPI_Comm _squadron,int _universe_rank,int _vertical_rank);
|
||||
|
||||
void SemInit(void) {
|
||||
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
||||
// printf("SEM_NAME: %s \n",sem_name);
|
||||
SEM_INIT(sem_head);
|
||||
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
||||
// printf("SEM_NAME: %s \n",sem_name);
|
||||
SEM_INIT(sem_tail);
|
||||
}
|
||||
void SemInitExcl(void) {
|
||||
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
||||
// printf("SEM_INIT_EXCL: %s \n",sem_name);
|
||||
SEM_INIT_EXCL(sem_head);
|
||||
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
||||
// printf("SEM_INIT_EXCL: %s \n",sem_name);
|
||||
SEM_INIT_EXCL(sem_tail);
|
||||
}
|
||||
void WakeUpDMA(void) {
|
||||
SEM_POST(sem_head);
|
||||
};
|
||||
void WakeUpCompute(void) {
|
||||
SEM_POST(sem_tail);
|
||||
};
|
||||
void WaitForCommand(void) {
|
||||
SEM_WAIT(sem_head);
|
||||
};
|
||||
void WaitForComplete(void) {
|
||||
SEM_WAIT(sem_tail);
|
||||
};
|
||||
void EventLoop (void) {
|
||||
// std::cout<< " Entering event loop "<<std::endl;
|
||||
while(1){
|
||||
WaitForCommand();
|
||||
// std::cout << "Getting command "<<std::endl;
|
||||
Event();
|
||||
}
|
||||
}
|
||||
|
||||
int Event (void) ;
|
||||
|
||||
uint64_t QueueCommand(int command,void *buf, int bytes, int hashtag, MPI_Comm comm,int u_rank) ;
|
||||
|
||||
void WaitAll() {
|
||||
// std::cout << "Queueing WAIT command "<<std::endl;
|
||||
QueueCommand(COMMAND_WAITALL,0,0,0,squadron,0);
|
||||
// std::cout << "Waking up DMA "<<std::endl;
|
||||
WakeUpDMA();
|
||||
// std::cout << "Waiting from semaphore "<<std::endl;
|
||||
WaitForComplete();
|
||||
// std::cout << "Checking FIFO is empty "<<std::endl;
|
||||
assert ( state->tail == state->head );
|
||||
}
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// One instance of a data mover.
|
||||
// Master and Slave must agree on location in shared memory
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class MPIoffloadEngine {
|
||||
public:
|
||||
|
||||
static std::vector<Slave> Slaves;
|
||||
|
||||
static int ShmSetup;
|
||||
|
||||
static int UniverseRank;
|
||||
static int UniverseSize;
|
||||
|
||||
static MPI_Comm communicator_universe;
|
||||
static MPI_Comm communicator_cached;
|
||||
|
||||
static MPI_Comm HorizontalComm;
|
||||
static int HorizontalRank;
|
||||
static int HorizontalSize;
|
||||
|
||||
static MPI_Comm VerticalComm;
|
||||
static MPI_Win VerticalWindow;
|
||||
static int VerticalSize;
|
||||
static int VerticalRank;
|
||||
|
||||
static std::vector<void *> VerticalShmBufs;
|
||||
static std::vector<std::vector<int> > UniverseRanks;
|
||||
static std::vector<int> UserCommunicatorToWorldRanks;
|
||||
|
||||
static MPI_Group WorldGroup, CachedGroup;
|
||||
|
||||
static void CommunicatorInit (MPI_Comm &communicator_world,
|
||||
MPI_Comm &ShmComm,
|
||||
void * &ShmCommBuf);
|
||||
|
||||
static void MapCommRankToWorldRank(int &hashtag, int & comm_world_peer,int tag, MPI_Comm comm,int commrank);
|
||||
|
||||
/////////////////////////////////////////////////////////
|
||||
// routines for master proc must handle any communicator
|
||||
/////////////////////////////////////////////////////////
|
||||
|
||||
static void QueueSend(int slave,void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
||||
// std::cout<< " Queueing send "<< bytes<< " slave "<< slave << " to comm "<<rank <<std::endl;
|
||||
Slaves[slave].QueueCommand(COMMAND_ISEND,buf,bytes,tag,comm,rank);
|
||||
// std::cout << "Queued send command to rank "<< rank<< " via "<<slave <<std::endl;
|
||||
Slaves[slave].WakeUpDMA();
|
||||
// std::cout << "Waking up DMA "<< slave<<std::endl;
|
||||
};
|
||||
|
||||
static void QueueRecv(int slave, void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
||||
// std::cout<< " Queueing recv "<< bytes<< " slave "<< slave << " from comm "<<rank <<std::endl;
|
||||
Slaves[slave].QueueCommand(COMMAND_IRECV,buf,bytes,tag,comm,rank);
|
||||
// std::cout << "Queued recv command from rank "<< rank<< " via "<<slave <<std::endl;
|
||||
Slaves[slave].WakeUpDMA();
|
||||
// std::cout << "Waking up DMA "<< slave<<std::endl;
|
||||
};
|
||||
|
||||
static void WaitAll() {
|
||||
for(int s=1;s<VerticalSize;s++) {
|
||||
// std::cout << "Waiting for slave "<< s<<std::endl;
|
||||
Slaves[s].WaitAll();
|
||||
}
|
||||
// std::cout << " Wait all Complete "<<std::endl;
|
||||
};
|
||||
|
||||
static void GetWork(int nwork, int me, int & mywork, int & myoff,int units){
|
||||
int basework = nwork/units;
|
||||
int backfill = units-(nwork%units);
|
||||
if ( me >= units ) {
|
||||
mywork = myoff = 0;
|
||||
} else {
|
||||
mywork = (nwork+me)/units;
|
||||
myoff = basework * me;
|
||||
if ( me > backfill )
|
||||
myoff+= (me-backfill);
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
static void QueueMultiplexedSend(void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
||||
uint8_t * cbuf = (uint8_t *) buf;
|
||||
int mywork, myoff, procs;
|
||||
procs = VerticalSize-1;
|
||||
for(int s=0;s<procs;s++) {
|
||||
GetWork(bytes,s,mywork,myoff,procs);
|
||||
QueueSend(s+1,&cbuf[myoff],mywork,tag,comm,rank);
|
||||
}
|
||||
};
|
||||
|
||||
static void QueueMultiplexedRecv(void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
||||
uint8_t * cbuf = (uint8_t *) buf;
|
||||
int mywork, myoff, procs;
|
||||
procs = VerticalSize-1;
|
||||
for(int s=0;s<procs;s++) {
|
||||
GetWork(bytes,s,mywork,myoff,procs);
|
||||
QueueRecv(s+1,&cbuf[myoff],mywork,tag,comm,rank);
|
||||
}
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Info that is setup once and indept of cartesian layout
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::vector<Slave> MPIoffloadEngine::Slaves;
|
||||
|
||||
int MPIoffloadEngine::UniverseRank;
|
||||
int MPIoffloadEngine::UniverseSize;
|
||||
|
||||
MPI_Comm MPIoffloadEngine::communicator_universe;
|
||||
MPI_Comm MPIoffloadEngine::communicator_cached;
|
||||
MPI_Group MPIoffloadEngine::WorldGroup;
|
||||
MPI_Group MPIoffloadEngine::CachedGroup;
|
||||
|
||||
MPI_Comm MPIoffloadEngine::HorizontalComm;
|
||||
int MPIoffloadEngine::HorizontalRank;
|
||||
int MPIoffloadEngine::HorizontalSize;
|
||||
|
||||
MPI_Comm MPIoffloadEngine::VerticalComm;
|
||||
int MPIoffloadEngine::VerticalSize;
|
||||
int MPIoffloadEngine::VerticalRank;
|
||||
MPI_Win MPIoffloadEngine::VerticalWindow;
|
||||
std::vector<void *> MPIoffloadEngine::VerticalShmBufs;
|
||||
std::vector<std::vector<int> > MPIoffloadEngine::UniverseRanks;
|
||||
std::vector<int> MPIoffloadEngine::UserCommunicatorToWorldRanks;
|
||||
|
||||
int MPIoffloadEngine::ShmSetup = 0;
|
||||
|
||||
void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
|
||||
MPI_Comm &ShmComm,
|
||||
void * &ShmCommBuf)
|
||||
{
|
||||
int flag;
|
||||
assert(ShmSetup==0);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Universe is all nodes prior to squadron grouping
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_universe);
|
||||
MPI_Comm_rank(communicator_universe,&UniverseRank);
|
||||
MPI_Comm_size(communicator_universe,&UniverseSize);
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// Split into groups that can share memory (Verticals)
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
#undef MPI_SHARED_MEM_DEBUG
|
||||
#ifdef MPI_SHARED_MEM_DEBUG
|
||||
MPI_Comm_split(communicator_universe,(UniverseRank/4),UniverseRank,&VerticalComm);
|
||||
#else
|
||||
MPI_Comm_split_type(communicator_universe, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&VerticalComm);
|
||||
#endif
|
||||
MPI_Comm_rank(VerticalComm ,&VerticalRank);
|
||||
MPI_Comm_size(VerticalComm ,&VerticalSize);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Split into horizontal groups by rank in squadron
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
MPI_Comm_split(communicator_universe,VerticalRank,UniverseRank,&HorizontalComm);
|
||||
MPI_Comm_rank(HorizontalComm,&HorizontalRank);
|
||||
MPI_Comm_size(HorizontalComm,&HorizontalSize);
|
||||
assert(HorizontalSize*VerticalSize==UniverseSize);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// What is my place in the world
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
int WorldRank=0;
|
||||
if(VerticalRank==0) WorldRank = HorizontalRank;
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&WorldRank,1,MPI_INT,MPI_SUM,VerticalComm);
|
||||
assert(ierr==0);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Where is the world in the universe?
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
UniverseRanks = std::vector<std::vector<int> >(HorizontalSize,std::vector<int>(VerticalSize,0));
|
||||
UniverseRanks[WorldRank][VerticalRank] = UniverseRank;
|
||||
for(int w=0;w<HorizontalSize;w++){
|
||||
ierr=MPI_Allreduce(MPI_IN_PLACE,&UniverseRanks[w][0],VerticalSize,MPI_INT,MPI_SUM,communicator_universe);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// allocate the shared window for our group, pass back Shm info to CartesianCommunicator
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
VerticalShmBufs.resize(VerticalSize);
|
||||
|
||||
#undef MPI_SHARED_MEM
|
||||
#ifdef MPI_SHARED_MEM
|
||||
ierr = MPI_Win_allocate_shared(CartesianCommunicator::MAX_MPI_SHM_BYTES,1,MPI_INFO_NULL,VerticalComm,&ShmCommBuf,&VerticalWindow);
|
||||
ierr|= MPI_Win_lock_all (MPI_MODE_NOCHECK, VerticalWindow);
|
||||
assert(ierr==0);
|
||||
// std::cout<<"SHM "<<ShmCommBuf<<std::endl;
|
||||
|
||||
for(int r=0;r<VerticalSize;r++){
|
||||
MPI_Aint sz;
|
||||
int dsp_unit;
|
||||
MPI_Win_shared_query (VerticalWindow, r, &sz, &dsp_unit, &VerticalShmBufs[r]);
|
||||
// std::cout<<"SHM "<<r<<" " <<VerticalShmBufs[r]<<std::endl;
|
||||
}
|
||||
#else
|
||||
char shm_name [NAME_MAX];
|
||||
MPI_Barrier(VerticalComm);
|
||||
|
||||
if ( VerticalRank == 0 ) {
|
||||
for(int r=0;r<VerticalSize;r++){
|
||||
|
||||
size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES;
|
||||
if ( r>0 ) size = sizeof(SlaveState);
|
||||
|
||||
sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",WorldRank,r);
|
||||
|
||||
shm_unlink(shm_name);
|
||||
|
||||
int fd=shm_open(shm_name,O_RDWR|O_CREAT,0600);
|
||||
if ( fd < 0 ) {
|
||||
perror("failed shm_open");
|
||||
assert(0);
|
||||
}
|
||||
|
||||
ftruncate(fd, size);
|
||||
|
||||
VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
|
||||
if ( VerticalShmBufs[r] == MAP_FAILED ) {
|
||||
perror("failed mmap");
|
||||
assert(0);
|
||||
}
|
||||
|
||||
uint64_t * check = (uint64_t *) VerticalShmBufs[r];
|
||||
check[0] = WorldRank;
|
||||
check[1] = r;
|
||||
|
||||
// std::cout<<"SHM "<<r<<" " <<VerticalShmBufs[r]<<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
MPI_Barrier(VerticalComm);
|
||||
|
||||
if ( VerticalRank != 0 ) {
|
||||
for(int r=0;r<VerticalSize;r++){
|
||||
|
||||
size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES ;
|
||||
if ( r>0 ) size = sizeof(SlaveState);
|
||||
|
||||
sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",WorldRank,r);
|
||||
|
||||
int fd=shm_open(shm_name,O_RDWR|O_CREAT,0600);
|
||||
if ( fd<0 ) {
|
||||
perror("failed shm_open");
|
||||
assert(0);
|
||||
}
|
||||
VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
|
||||
uint64_t * check = (uint64_t *) VerticalShmBufs[r];
|
||||
assert(check[0]== WorldRank);
|
||||
assert(check[1]== r);
|
||||
std::cerr<<"SHM "<<r<<" " <<VerticalShmBufs[r]<<std::endl;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
MPI_Barrier(VerticalComm);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Map rank of leader on node in their in new world, to the
|
||||
// rank in this vertical plane's horizontal communicator
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
communicator_world = HorizontalComm;
|
||||
ShmComm = VerticalComm;
|
||||
ShmCommBuf = VerticalShmBufs[0];
|
||||
MPI_Comm_group (communicator_world, &WorldGroup);
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Start the slave data movers
|
||||
///////////////////////////////////////////////////////////
|
||||
if ( VerticalRank != 0 ) {
|
||||
Slave indentured;
|
||||
indentured.Init( (SlaveState *) VerticalShmBufs[VerticalRank], VerticalComm, UniverseRank,VerticalRank);
|
||||
indentured.SemInitExcl();// init semaphore in shared memory
|
||||
MPI_Barrier(VerticalComm);
|
||||
MPI_Barrier(VerticalComm);
|
||||
indentured.EventLoop();
|
||||
assert(0);
|
||||
} else {
|
||||
Slaves.resize(VerticalSize);
|
||||
for(int i=1;i<VerticalSize;i++){
|
||||
Slaves[i].Init((SlaveState *)VerticalShmBufs[i],VerticalComm, UniverseRanks[HorizontalRank][i],i);
|
||||
}
|
||||
MPI_Barrier(VerticalComm);
|
||||
for(int i=1;i<VerticalSize;i++){
|
||||
Slaves[i].SemInit();// init semaphore in shared memory
|
||||
}
|
||||
MPI_Barrier(VerticalComm);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Verbose for now
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
ShmSetup=1;
|
||||
|
||||
if (UniverseRank == 0){
|
||||
|
||||
std::cout<<GridLogMessage << "Grid MPI-3 configuration: detected ";
|
||||
std::cout<<UniverseSize << " Ranks " ;
|
||||
std::cout<<HorizontalSize << " Nodes " ;
|
||||
std::cout<<VerticalSize << " with ranks-per-node "<<std::endl;
|
||||
|
||||
std::cout<<GridLogMessage << "Grid MPI-3 configuration: using one lead process per node " << std::endl;
|
||||
std::cout<<GridLogMessage << "Grid MPI-3 configuration: reduced communicator has size " << HorizontalSize << std::endl;
|
||||
|
||||
for(int g=0;g<HorizontalSize;g++){
|
||||
std::cout<<GridLogMessage<<" Node "<<g<<" led by MPI rank "<< UniverseRanks[g][0]<<std::endl;
|
||||
}
|
||||
|
||||
for(int g=0;g<HorizontalSize;g++){
|
||||
std::cout<<GridLogMessage<<" { ";
|
||||
for(int s=0;s<VerticalSize;s++){
|
||||
std::cout<< UniverseRanks[g][s];
|
||||
if ( s<VerticalSize-1 ) {
|
||||
std::cout<<",";
|
||||
}
|
||||
}
|
||||
std::cout<<" } "<<std::endl;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Map the communicator into communicator_world, and find the neighbour.
|
||||
// Cache the mappings; cache size is 1.
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void MPIoffloadEngine::MapCommRankToWorldRank(int &hashtag, int & comm_world_peer,int tag, MPI_Comm comm,int rank) {
|
||||
|
||||
if ( comm == HorizontalComm ) {
|
||||
comm_world_peer = rank;
|
||||
// std::cout << " MapCommRankToWorldRank horiz " <<rank<<"->"<<comm_world_peer<<std::endl;
|
||||
} else if ( comm == communicator_cached ) {
|
||||
comm_world_peer = UserCommunicatorToWorldRanks[rank];
|
||||
// std::cout << " MapCommRankToWorldRank cached " <<rank<<"->"<<comm_world_peer<<std::endl;
|
||||
} else {
|
||||
|
||||
int size;
|
||||
|
||||
MPI_Comm_size(comm,&size);
|
||||
|
||||
UserCommunicatorToWorldRanks.resize(size);
|
||||
|
||||
std::vector<int> cached_ranks(size);
|
||||
|
||||
for(int r=0;r<size;r++) {
|
||||
cached_ranks[r]=r;
|
||||
}
|
||||
|
||||
communicator_cached=comm;
|
||||
|
||||
MPI_Comm_group(communicator_cached, &CachedGroup);
|
||||
|
||||
MPI_Group_translate_ranks(CachedGroup,size,&cached_ranks[0],WorldGroup, &UserCommunicatorToWorldRanks[0]);
|
||||
|
||||
comm_world_peer = UserCommunicatorToWorldRanks[rank];
|
||||
// std::cout << " MapCommRankToWorldRank cache miss " <<rank<<"->"<<comm_world_peer<<std::endl;
|
||||
|
||||
assert(comm_world_peer != MPI_UNDEFINED);
|
||||
}
|
||||
|
||||
assert( (tag & (~0xFFFFL)) ==0);
|
||||
|
||||
uint64_t icomm = (uint64_t)comm;
|
||||
int comm_hash = ((icomm>>0 )&0xFFFF)^((icomm>>16)&0xFFFF)
|
||||
^ ((icomm>>32)&0xFFFF)^((icomm>>48)&0xFFFF);
|
||||
|
||||
// hashtag = (comm_hash<<15) | tag;
|
||||
hashtag = tag;
|
||||
|
||||
};
|
||||
|
||||
void Slave::Init(SlaveState * _state,MPI_Comm _squadron,int _universe_rank,int _vertical_rank)
|
||||
{
|
||||
squadron=_squadron;
|
||||
universe_rank=_universe_rank;
|
||||
vertical_rank=_vertical_rank;
|
||||
state =_state;
|
||||
// std::cout << "state "<<_state<<" comm "<<_squadron<<" universe_rank"<<universe_rank <<std::endl;
|
||||
state->head = state->tail = state->start = 0;
|
||||
base = (uint64_t)MPIoffloadEngine::VerticalShmBufs[0];
|
||||
int rank; MPI_Comm_rank(_squadron,&rank);
|
||||
}
|
||||
#define PERI_PLUS(A) ( (A+1)%pool )
|
||||
int Slave::Event (void) {
|
||||
|
||||
static int tail_last;
|
||||
static int head_last;
|
||||
static int start_last;
|
||||
int ierr;
|
||||
|
||||
////////////////////////////////////////////////////
|
||||
// Try to advance the start pointers
|
||||
////////////////////////////////////////////////////
|
||||
int s=state->start;
|
||||
if ( s != state->head ) {
|
||||
switch ( state->Descrs[s].command ) {
|
||||
case COMMAND_ISEND:
|
||||
/*
|
||||
std::cout<< " Send "<<s << " ptr "<< state<<" "<< state->Descrs[s].buf<< "["<<state->Descrs[s].bytes<<"]"
|
||||
<< " to " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag
|
||||
<< " Comm " << MPIoffloadEngine::communicator_universe<< " me " <<universe_rank<< std::endl;
|
||||
*/
|
||||
ierr = MPI_Isend((void *)(state->Descrs[s].buf+base),
|
||||
state->Descrs[s].bytes,
|
||||
MPI_CHAR,
|
||||
state->Descrs[s].rank,
|
||||
state->Descrs[s].tag,
|
||||
MPIoffloadEngine::communicator_universe,
|
||||
(MPI_Request *)&state->Descrs[s].request);
|
||||
assert(ierr==0);
|
||||
state->start = PERI_PLUS(s);
|
||||
return 1;
|
||||
break;
|
||||
|
||||
case COMMAND_IRECV:
|
||||
/*
|
||||
std::cout<< " Recv "<<s << " ptr "<< state<<" "<< state->Descrs[s].buf<< "["<<state->Descrs[s].bytes<<"]"
|
||||
<< " from " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag
|
||||
<< " Comm " << MPIoffloadEngine::communicator_universe<< " me "<< universe_rank<< std::endl;
|
||||
*/
|
||||
ierr=MPI_Irecv((void *)(state->Descrs[s].buf+base),
|
||||
state->Descrs[s].bytes,
|
||||
MPI_CHAR,
|
||||
state->Descrs[s].rank,
|
||||
state->Descrs[s].tag,
|
||||
MPIoffloadEngine::communicator_universe,
|
||||
(MPI_Request *)&state->Descrs[s].request);
|
||||
|
||||
// std::cout<< " Request is "<<state->Descrs[s].request<<std::endl;
|
||||
// std::cout<< " Request0 is "<<state->Descrs[0].request<<std::endl;
|
||||
assert(ierr==0);
|
||||
state->start = PERI_PLUS(s);
|
||||
return 1;
|
||||
break;
|
||||
|
||||
case COMMAND_WAITALL:
|
||||
|
||||
for(int t=state->tail;t!=s; t=PERI_PLUS(t) ){
|
||||
MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE);
|
||||
};
|
||||
s=PERI_PLUS(s);
|
||||
state->start = s;
|
||||
state->tail = s;
|
||||
|
||||
WakeUpCompute();
|
||||
|
||||
return 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// External interaction with the queue
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
uint64_t Slave::QueueCommand(int command,void *buf, int bytes, int tag, MPI_Comm comm,int commrank)
|
||||
{
|
||||
/////////////////////////////////////////
|
||||
// Spin; if FIFO is full until not full
|
||||
/////////////////////////////////////////
|
||||
int head =state->head;
|
||||
int next = PERI_PLUS(head);
|
||||
|
||||
// Set up descriptor
|
||||
int worldrank;
|
||||
int hashtag;
|
||||
MPI_Comm communicator;
|
||||
MPI_Request request;
|
||||
|
||||
MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,tag,comm,commrank);
|
||||
|
||||
uint64_t relative= (uint64_t)buf - base;
|
||||
state->Descrs[head].buf = relative;
|
||||
state->Descrs[head].bytes = bytes;
|
||||
state->Descrs[head].rank = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank];
|
||||
state->Descrs[head].tag = hashtag;
|
||||
state->Descrs[head].command= command;
|
||||
|
||||
/*
|
||||
if ( command == COMMAND_ISEND ) {
|
||||
std::cout << "QueueSend from "<< universe_rank <<" to commrank " << commrank
|
||||
<< " to worldrank " << worldrank <<std::endl;
|
||||
std::cout << " via VerticalRank "<< vertical_rank <<" to universerank " << MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank]<<std::endl;
|
||||
std::cout << " QueueCommand "<<buf<<"["<<bytes<<"]" << std::endl;
|
||||
}
|
||||
if ( command == COMMAND_IRECV ) {
|
||||
std::cout << "QueueRecv on "<< universe_rank <<" from commrank " << commrank
|
||||
<< " from worldrank " << worldrank <<std::endl;
|
||||
std::cout << " via VerticalRank "<< vertical_rank <<" from universerank " << MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank]<<std::endl;
|
||||
std::cout << " QueueSend "<<buf<<"["<<bytes<<"]" << std::endl;
|
||||
}
|
||||
*/
|
||||
// Block until FIFO has space
|
||||
while( state->tail==next );
|
||||
|
||||
// Msync on weak order architectures
|
||||
// Advance pointer
|
||||
state->head = next;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Info that is setup once and indept of cartesian layout
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
MPI_Comm CartesianCommunicator::communicator_world;
|
||||
|
||||
void CartesianCommunicator::Init(int *argc, char ***argv)
|
||||
{
|
||||
int flag;
|
||||
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
||||
if ( !flag ) {
|
||||
MPI_Init(argc,argv);
|
||||
}
|
||||
communicator_world = MPI_COMM_WORLD;
|
||||
MPI_Comm ShmComm;
|
||||
MPIoffloadEngine::CommunicatorInit (communicator_world,ShmComm,ShmCommBuf);
|
||||
}
|
||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
|
||||
{
|
||||
int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest);
|
||||
assert(ierr==0);
|
||||
}
|
||||
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor)
|
||||
{
|
||||
int rank;
|
||||
int ierr=MPI_Cart_rank (communicator, &coor[0], &rank);
|
||||
assert(ierr==0);
|
||||
return rank;
|
||||
}
|
||||
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor)
|
||||
{
|
||||
coor.resize(_ndimension);
|
||||
int ierr=MPI_Cart_coords (communicator, rank, _ndimension,&coor[0]);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||
{
|
||||
_ndimension = processors.size();
|
||||
std::vector<int> periodic(_ndimension,1);
|
||||
|
||||
_Nprocessors=1;
|
||||
_processors = processors;
|
||||
|
||||
for(int i=0;i<_ndimension;i++){
|
||||
_Nprocessors*=_processors[i];
|
||||
}
|
||||
|
||||
int Size;
|
||||
MPI_Comm_size(communicator_world,&Size);
|
||||
assert(Size==_Nprocessors);
|
||||
|
||||
_processor_coor.resize(_ndimension);
|
||||
MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator);
|
||||
MPI_Comm_rank (communicator,&_processor);
|
||||
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
|
||||
};
|
||||
|
||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(uint64_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(float &f){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSumVector(float *f,int N)
|
||||
{
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(double &d)
|
||||
{
|
||||
int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
||||
{
|
||||
int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
// Basic Halo comms primitive
|
||||
void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes)
|
||||
{
|
||||
std::vector<CommsRequest_t> reqs(0);
|
||||
SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes);
|
||||
SendToRecvFromComplete(reqs);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
|
||||
void *recv,
|
||||
int sender,
|
||||
int receiver,
|
||||
int bytes)
|
||||
{
|
||||
MPI_Status stat;
|
||||
assert(sender != receiver);
|
||||
int tag = sender;
|
||||
if ( _processor == sender ) {
|
||||
MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator);
|
||||
}
|
||||
if ( _processor == receiver ) {
|
||||
MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat);
|
||||
}
|
||||
}
|
||||
|
||||
// Basic Halo comms primitive
|
||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes)
|
||||
{
|
||||
MPI_Request xrq;
|
||||
MPI_Request rrq;
|
||||
int rank = _processor;
|
||||
int ierr;
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||
ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||
|
||||
assert(ierr==0);
|
||||
|
||||
list.push_back(xrq);
|
||||
list.push_back(rrq);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes)
|
||||
{
|
||||
uint64_t xmit_i = (uint64_t) xmit;
|
||||
uint64_t recv_i = (uint64_t) recv;
|
||||
uint64_t shm = (uint64_t) ShmCommBuf;
|
||||
// assert xmit and recv lie in shared memory region
|
||||
assert( (xmit_i >= shm) && (xmit_i+bytes <= shm+MAX_MPI_SHM_BYTES) );
|
||||
assert( (recv_i >= shm) && (recv_i+bytes <= shm+MAX_MPI_SHM_BYTES) );
|
||||
assert(from!=_processor);
|
||||
assert(dest!=_processor);
|
||||
MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest);
|
||||
MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from);
|
||||
}
|
||||
|
||||
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||
{
|
||||
MPIoffloadEngine::WaitAll();
|
||||
}
|
||||
|
||||
void CartesianCommunicator::StencilBarrier(void)
|
||||
{
|
||||
}
|
||||
|
||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||
{
|
||||
int nreq=list.size();
|
||||
std::vector<MPI_Status> status(nreq);
|
||||
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::Barrier(void)
|
||||
{
|
||||
int ierr = MPI_Barrier(communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
||||
{
|
||||
int ierr=MPI_Bcast(data,
|
||||
bytes,
|
||||
MPI_BYTE,
|
||||
root,
|
||||
communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
||||
{
|
||||
int ierr= MPI_Bcast(data,
|
||||
bytes,
|
||||
MPI_BYTE,
|
||||
root,
|
||||
communicator_world);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
void *CartesianCommunicator::ShmBufferSelf(void) { return ShmCommBuf; }
|
||||
|
||||
void *CartesianCommunicator::ShmBuffer(int rank) {
|
||||
return NULL;
|
||||
}
|
||||
void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
@ -28,12 +28,15 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include "Grid.h"
|
||||
namespace Grid {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Info that is setup once and indept of cartesian layout
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void CartesianCommunicator::Init(int *argc, char *** arv)
|
||||
{
|
||||
ShmInitGeneric();
|
||||
}
|
||||
|
||||
int Rank(void ){ return 0; };
|
||||
|
||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||
{
|
||||
_processors = processors;
|
||||
@ -89,30 +92,17 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &
|
||||
assert(0);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::Barrier(void)
|
||||
{
|
||||
}
|
||||
|
||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
||||
{
|
||||
}
|
||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
int CartesianCommunicator::RankWorld(void){return 0;}
|
||||
void CartesianCommunicator::Barrier(void){}
|
||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes) {}
|
||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) { }
|
||||
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor) { return 0;}
|
||||
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor){ coor = _processor_coor ;}
|
||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
|
||||
{
|
||||
source =0;
|
||||
dest=0;
|
||||
}
|
||||
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -39,14 +39,24 @@ namespace Grid {
|
||||
BACKTRACEFILE(); \
|
||||
}\
|
||||
}
|
||||
int Rank(void) {
|
||||
return shmem_my_pe();
|
||||
}
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Info that is setup once and indept of cartesian layout
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
typedef struct HandShake_t {
|
||||
uint64_t seq_local;
|
||||
uint64_t seq_remote;
|
||||
} HandShake;
|
||||
|
||||
std::array<long,_SHMEM_REDUCE_SYNC_SIZE> make_psync_init(void) {
|
||||
array<long,_SHMEM_REDUCE_SYNC_SIZE> ret;
|
||||
ret.fill(SHMEM_SYNC_VALUE);
|
||||
return ret;
|
||||
}
|
||||
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync_init = make_psync_init();
|
||||
|
||||
static Vector< HandShake > XConnections;
|
||||
static Vector< HandShake > RConnections;
|
||||
|
||||
@ -61,7 +71,9 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
||||
RConnections[pe].seq_remote= 0;
|
||||
}
|
||||
shmem_barrier_all();
|
||||
ShmInitGeneric();
|
||||
}
|
||||
|
||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||
{
|
||||
_ndimension = processors.size();
|
||||
@ -89,7 +101,7 @@ void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||
static long long source ;
|
||||
static long long dest ;
|
||||
static long long llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
|
||||
static long psync[_SHMEM_REDUCE_SYNC_SIZE];
|
||||
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
||||
|
||||
// int nreduce=1;
|
||||
// int pestart=0;
|
||||
@ -105,7 +117,7 @@ void CartesianCommunicator::GlobalSum(uint64_t &u){
|
||||
static long long source ;
|
||||
static long long dest ;
|
||||
static long long llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
|
||||
static long psync[_SHMEM_REDUCE_SYNC_SIZE];
|
||||
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
||||
|
||||
// int nreduce=1;
|
||||
// int pestart=0;
|
||||
@ -121,7 +133,7 @@ void CartesianCommunicator::GlobalSum(float &f){
|
||||
static float source ;
|
||||
static float dest ;
|
||||
static float llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
|
||||
static long psync[_SHMEM_REDUCE_SYNC_SIZE];
|
||||
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
||||
|
||||
source = f;
|
||||
dest =0.0;
|
||||
@ -133,7 +145,7 @@ void CartesianCommunicator::GlobalSumVector(float *f,int N)
|
||||
static float source ;
|
||||
static float dest = 0 ;
|
||||
static float llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
|
||||
static long psync[_SHMEM_REDUCE_SYNC_SIZE];
|
||||
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
||||
|
||||
if ( shmem_addr_accessible(f,_processor) ){
|
||||
shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync);
|
||||
@ -152,7 +164,7 @@ void CartesianCommunicator::GlobalSum(double &d)
|
||||
static double source;
|
||||
static double dest ;
|
||||
static double llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
|
||||
static long psync[_SHMEM_REDUCE_SYNC_SIZE];
|
||||
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
||||
|
||||
source = d;
|
||||
dest = 0;
|
||||
@ -164,7 +176,8 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
||||
static double source ;
|
||||
static double dest ;
|
||||
static double llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
|
||||
static long psync[_SHMEM_REDUCE_SYNC_SIZE];
|
||||
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
||||
|
||||
|
||||
if ( shmem_addr_accessible(d,_processor) ){
|
||||
shmem_double_sum_to_all(d,d,N,0,0,_Nprocessors,llwrk,psync);
|
||||
@ -230,12 +243,9 @@ void CartesianCommunicator::SendRecvPacket(void *xmit,
|
||||
|
||||
if ( _processor == sender ) {
|
||||
|
||||
printf("Sender SHMEM pt2pt %d -> %d\n",sender,receiver);
|
||||
// Check he has posted a receive
|
||||
while(SendSeq->seq_remote == SendSeq->seq_local);
|
||||
|
||||
printf("Sender receive %d posted\n",sender,receiver);
|
||||
|
||||
// Advance our send count
|
||||
seq = ++(SendSeq->seq_local);
|
||||
|
||||
@ -244,26 +254,19 @@ void CartesianCommunicator::SendRecvPacket(void *xmit,
|
||||
shmem_putmem(recv,xmit,bytes,receiver);
|
||||
shmem_fence();
|
||||
|
||||
printf("Sender sent payload %d\n",seq);
|
||||
//Notify him we're done
|
||||
shmem_putmem((void *)&(RecvSeq->seq_remote),&seq,sizeof(seq),receiver);
|
||||
shmem_fence();
|
||||
printf("Sender ringing door bell %d\n",seq);
|
||||
}
|
||||
if ( _processor == receiver ) {
|
||||
|
||||
printf("Receiver SHMEM pt2pt %d->%d\n",sender,receiver);
|
||||
// Post a receive
|
||||
seq = ++(RecvSeq->seq_local);
|
||||
shmem_putmem((void *)&(SendSeq->seq_remote),&seq,sizeof(seq),sender);
|
||||
|
||||
printf("Receiver Opening letter box %d\n",seq);
|
||||
|
||||
|
||||
// Now wait until he has advanced our reception counter
|
||||
while(RecvSeq->seq_remote != RecvSeq->seq_local);
|
||||
|
||||
printf("Receiver Got the mail %d\n",seq);
|
||||
}
|
||||
}
|
||||
|
||||
@ -291,7 +294,7 @@ void CartesianCommunicator::Barrier(void)
|
||||
}
|
||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
||||
{
|
||||
static long psync[_SHMEM_REDUCE_SYNC_SIZE];
|
||||
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
||||
static uint32_t word;
|
||||
uint32_t *array = (uint32_t *) data;
|
||||
assert( (bytes % 4)==0);
|
||||
@ -314,7 +317,7 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
||||
}
|
||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
||||
{
|
||||
static long psync[_SHMEM_REDUCE_SYNC_SIZE];
|
||||
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
||||
static uint32_t word;
|
||||
uint32_t *array = (uint32_t *) data;
|
||||
assert( (bytes % 4)==0);
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
@ -44,7 +45,7 @@ public:
|
||||
// Gather for when there is no need to SIMD split with compression
|
||||
///////////////////////////////////////////////////////////////////
|
||||
template<class vobj,class cobj,class compressor> void
|
||||
Gather_plane_simple (const Lattice<vobj> &rhs,std::vector<cobj,alignedAllocator<cobj> > &buffer,int dimension,int plane,int cbmask,compressor &compress, int off=0)
|
||||
Gather_plane_simple (const Lattice<vobj> &rhs,commVector<cobj> &buffer,int dimension,int plane,int cbmask,compressor &compress, int off=0)
|
||||
{
|
||||
int rd = rhs._grid->_rdimensions[dimension];
|
||||
|
||||
@ -56,6 +57,7 @@ Gather_plane_simple (const Lattice<vobj> &rhs,std::vector<cobj,alignedAllocator<
|
||||
|
||||
int e1=rhs._grid->_slice_nblock[dimension];
|
||||
int e2=rhs._grid->_slice_block[dimension];
|
||||
|
||||
int stride=rhs._grid->_slice_stride[dimension];
|
||||
if ( cbmask == 0x3 ) {
|
||||
PARALLEL_NESTED_LOOP2
|
||||
@ -68,15 +70,20 @@ PARALLEL_NESTED_LOOP2
|
||||
}
|
||||
} else {
|
||||
int bo=0;
|
||||
std::vector<std::pair<int,int> > table;
|
||||
for(int n=0;n<e1;n++){
|
||||
for(int b=0;b<e2;b++){
|
||||
int o = n*stride;
|
||||
int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);// Could easily be a table lookup
|
||||
int ocb=1<<rhs._grid->CheckerBoardFromOindexTable(o+b);
|
||||
if ( ocb &cbmask ) {
|
||||
buffer[off+bo++]=compress(rhs._odata[so+o+b]);
|
||||
table.push_back(std::pair<int,int> (bo++,o+b));
|
||||
}
|
||||
}
|
||||
}
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int i=0;i<table.size();i++){
|
||||
buffer[off+table[i].first]=compress(rhs._odata[so+table[i].second]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,6 +114,7 @@ PARALLEL_NESTED_LOOP2
|
||||
int o = n*n1;
|
||||
int offset = b+n*n2;
|
||||
cobj temp =compress(rhs._odata[so+o+b]);
|
||||
|
||||
extract<cobj>(temp,pointers,offset);
|
||||
|
||||
}
|
||||
@ -114,6 +122,7 @@ PARALLEL_NESTED_LOOP2
|
||||
} else {
|
||||
|
||||
assert(0); //Fixme think this is buggy
|
||||
|
||||
for(int n=0;n<e1;n++){
|
||||
for(int b=0;b<e2;b++){
|
||||
int o=n*rhs._grid->_slice_stride[dimension];
|
||||
@ -132,7 +141,7 @@ PARALLEL_NESTED_LOOP2
|
||||
//////////////////////////////////////////////////////
|
||||
// Gather for when there is no need to SIMD split
|
||||
//////////////////////////////////////////////////////
|
||||
template<class vobj> void Gather_plane_simple (const Lattice<vobj> &rhs,std::vector<vobj,alignedAllocator<vobj> > &buffer, int dimension,int plane,int cbmask)
|
||||
template<class vobj> void Gather_plane_simple (const Lattice<vobj> &rhs,commVector<vobj> &buffer, int dimension,int plane,int cbmask)
|
||||
{
|
||||
SimpleCompressor<vobj> dontcompress;
|
||||
Gather_plane_simple (rhs,buffer,dimension,plane,cbmask,dontcompress);
|
||||
@ -150,7 +159,7 @@ template<class vobj> void Gather_plane_extract(const Lattice<vobj> &rhs,std::vec
|
||||
//////////////////////////////////////////////////////
|
||||
// Scatter for when there is no need to SIMD split
|
||||
//////////////////////////////////////////////////////
|
||||
template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,std::vector<vobj,alignedAllocator<vobj> > &buffer, int dimension,int plane,int cbmask)
|
||||
template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,commVector<vobj> &buffer, int dimension,int plane,int cbmask)
|
||||
{
|
||||
int rd = rhs._grid->_rdimensions[dimension];
|
||||
|
||||
|
@ -119,8 +119,8 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
|
||||
assert(shift<fd);
|
||||
|
||||
int buffer_size = rhs._grid->_slice_nblock[dimension]*rhs._grid->_slice_block[dimension];
|
||||
std::vector<vobj,alignedAllocator<vobj> > send_buf(buffer_size);
|
||||
std::vector<vobj,alignedAllocator<vobj> > recv_buf(buffer_size);
|
||||
commVector<vobj> send_buf(buffer_size);
|
||||
commVector<vobj> recv_buf(buffer_size);
|
||||
|
||||
int cb= (cbmask==0x2)? Odd : Even;
|
||||
int sshift= rhs._grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,cb);
|
||||
@ -191,8 +191,8 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
||||
int buffer_size = grid->_slice_nblock[dimension]*grid->_slice_block[dimension];
|
||||
int words = sizeof(vobj)/sizeof(vector_type);
|
||||
|
||||
std::vector<Vector<scalar_object> > send_buf_extract(Nsimd,Vector<scalar_object>(buffer_size) );
|
||||
std::vector<Vector<scalar_object> > recv_buf_extract(Nsimd,Vector<scalar_object>(buffer_size) );
|
||||
std::vector<commVector<scalar_object> > send_buf_extract(Nsimd,commVector<scalar_object>(buffer_size) );
|
||||
std::vector<commVector<scalar_object> > recv_buf_extract(Nsimd,commVector<scalar_object>(buffer_size) );
|
||||
|
||||
int bytes = buffer_size*sizeof(scalar_object);
|
||||
|
||||
|
412
lib/fftw/fftw3.h
412
lib/fftw/fftw3.h
@ -1,412 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2007-14 Matteo Frigo
|
||||
* Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
|
||||
*
|
||||
* The following statement of license applies *only* to this header file,
|
||||
* and *not* to the other files distributed with FFTW or derived therefrom:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
||||
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
||||
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/***************************** NOTE TO USERS *********************************
|
||||
*
|
||||
* THIS IS A HEADER FILE, NOT A MANUAL
|
||||
*
|
||||
* If you want to know how to use FFTW, please read the manual,
|
||||
* online at http://www.fftw.org/doc/ and also included with FFTW.
|
||||
* For a quick start, see the manual's tutorial section.
|
||||
*
|
||||
* (Reading header files to learn how to use a library is a habit
|
||||
* stemming from code lacking a proper manual. Arguably, it's a
|
||||
* *bad* habit in most cases, because header files can contain
|
||||
* interfaces that are not part of the public, stable API.)
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef FFTW3_H
|
||||
#define FFTW3_H
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif /* __cplusplus */
|
||||
|
||||
/* If <complex.h> is included, use the C99 complex type. Otherwise
|
||||
define a type bit-compatible with C99 complex */
|
||||
#if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined(complex) && defined(I)
|
||||
# define FFTW_DEFINE_COMPLEX(R, C) typedef R _Complex C
|
||||
#else
|
||||
# define FFTW_DEFINE_COMPLEX(R, C) typedef R C[2]
|
||||
#endif
|
||||
|
||||
#define FFTW_CONCAT(prefix, name) prefix ## name
|
||||
#define FFTW_MANGLE_DOUBLE(name) FFTW_CONCAT(fftw_, name)
|
||||
#define FFTW_MANGLE_FLOAT(name) FFTW_CONCAT(fftwf_, name)
|
||||
#define FFTW_MANGLE_LONG_DOUBLE(name) FFTW_CONCAT(fftwl_, name)
|
||||
#define FFTW_MANGLE_QUAD(name) FFTW_CONCAT(fftwq_, name)
|
||||
|
||||
/* IMPORTANT: for Windows compilers, you should add a line
|
||||
#define FFTW_DLL
|
||||
here and in kernel/ifftw.h if you are compiling/using FFTW as a
|
||||
DLL, in order to do the proper importing/exporting, or
|
||||
alternatively compile with -DFFTW_DLL or the equivalent
|
||||
command-line flag. This is not necessary under MinGW/Cygwin, where
|
||||
libtool does the imports/exports automatically. */
|
||||
#if defined(FFTW_DLL) && (defined(_WIN32) || defined(__WIN32__))
|
||||
/* annoying Windows syntax for shared-library declarations */
|
||||
# if defined(COMPILING_FFTW) /* defined in api.h when compiling FFTW */
|
||||
# define FFTW_EXTERN extern __declspec(dllexport)
|
||||
# else /* user is calling FFTW; import symbol */
|
||||
# define FFTW_EXTERN extern __declspec(dllimport)
|
||||
# endif
|
||||
#else
|
||||
# define FFTW_EXTERN extern
|
||||
#endif
|
||||
|
||||
enum fftw_r2r_kind_do_not_use_me {
|
||||
FFTW_R2HC=0, FFTW_HC2R=1, FFTW_DHT=2,
|
||||
FFTW_REDFT00=3, FFTW_REDFT01=4, FFTW_REDFT10=5, FFTW_REDFT11=6,
|
||||
FFTW_RODFT00=7, FFTW_RODFT01=8, FFTW_RODFT10=9, FFTW_RODFT11=10
|
||||
};
|
||||
|
||||
struct fftw_iodim_do_not_use_me {
|
||||
int n; /* dimension size */
|
||||
int is; /* input stride */
|
||||
int os; /* output stride */
|
||||
};
|
||||
|
||||
#include <stddef.h> /* for ptrdiff_t */
|
||||
struct fftw_iodim64_do_not_use_me {
|
||||
ptrdiff_t n; /* dimension size */
|
||||
ptrdiff_t is; /* input stride */
|
||||
ptrdiff_t os; /* output stride */
|
||||
};
|
||||
|
||||
typedef void (*fftw_write_char_func_do_not_use_me)(char c, void *);
|
||||
typedef int (*fftw_read_char_func_do_not_use_me)(void *);
|
||||
|
||||
/*
|
||||
huge second-order macro that defines prototypes for all API
|
||||
functions. We expand this macro for each supported precision
|
||||
|
||||
X: name-mangling macro
|
||||
R: real data type
|
||||
C: complex data type
|
||||
*/
|
||||
|
||||
#define FFTW_DEFINE_API(X, R, C) \
|
||||
\
|
||||
FFTW_DEFINE_COMPLEX(R, C); \
|
||||
\
|
||||
typedef struct X(plan_s) *X(plan); \
|
||||
\
|
||||
typedef struct fftw_iodim_do_not_use_me X(iodim); \
|
||||
typedef struct fftw_iodim64_do_not_use_me X(iodim64); \
|
||||
\
|
||||
typedef enum fftw_r2r_kind_do_not_use_me X(r2r_kind); \
|
||||
\
|
||||
typedef fftw_write_char_func_do_not_use_me X(write_char_func); \
|
||||
typedef fftw_read_char_func_do_not_use_me X(read_char_func); \
|
||||
\
|
||||
FFTW_EXTERN void X(execute)(const X(plan) p); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft)(int rank, const int *n, \
|
||||
C *in, C *out, int sign, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft_1d)(int n, C *in, C *out, int sign, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_2d)(int n0, int n1, \
|
||||
C *in, C *out, int sign, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_3d)(int n0, int n1, int n2, \
|
||||
C *in, C *out, int sign, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_many_dft)(int rank, const int *n, \
|
||||
int howmany, \
|
||||
C *in, const int *inembed, \
|
||||
int istride, int idist, \
|
||||
C *out, const int *onembed, \
|
||||
int ostride, int odist, \
|
||||
int sign, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru_dft)(int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
C *in, C *out, \
|
||||
int sign, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru_split_dft)(int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
R *ri, R *ii, R *ro, R *io, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_dft)(int rank, \
|
||||
const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
C *in, C *out, \
|
||||
int sign, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_split_dft)(int rank, \
|
||||
const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
R *ri, R *ii, R *ro, R *io, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN void X(execute_dft)(const X(plan) p, C *in, C *out); \
|
||||
FFTW_EXTERN void X(execute_split_dft)(const X(plan) p, R *ri, R *ii, \
|
||||
R *ro, R *io); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_many_dft_r2c)(int rank, const int *n, \
|
||||
int howmany, \
|
||||
R *in, const int *inembed, \
|
||||
int istride, int idist, \
|
||||
C *out, const int *onembed, \
|
||||
int ostride, int odist, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft_r2c)(int rank, const int *n, \
|
||||
R *in, C *out, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft_r2c_1d)(int n,R *in,C *out,unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_r2c_2d)(int n0, int n1, \
|
||||
R *in, C *out, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_r2c_3d)(int n0, int n1, \
|
||||
int n2, \
|
||||
R *in, C *out, unsigned flags); \
|
||||
\
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_many_dft_c2r)(int rank, const int *n, \
|
||||
int howmany, \
|
||||
C *in, const int *inembed, \
|
||||
int istride, int idist, \
|
||||
R *out, const int *onembed, \
|
||||
int ostride, int odist, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft_c2r)(int rank, const int *n, \
|
||||
C *in, R *out, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft_c2r_1d)(int n,C *in,R *out,unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_c2r_2d)(int n0, int n1, \
|
||||
C *in, R *out, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_c2r_3d)(int n0, int n1, \
|
||||
int n2, \
|
||||
C *in, R *out, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru_dft_r2c)(int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
R *in, C *out, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru_dft_c2r)(int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
C *in, R *out, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru_split_dft_r2c)( \
|
||||
int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
R *in, R *ro, R *io, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru_split_dft_c2r)( \
|
||||
int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
R *ri, R *ii, R *out, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_dft_r2c)(int rank, \
|
||||
const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
R *in, C *out, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_dft_c2r)(int rank, \
|
||||
const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
C *in, R *out, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_split_dft_r2c)( \
|
||||
int rank, const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
R *in, R *ro, R *io, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_split_dft_c2r)( \
|
||||
int rank, const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
R *ri, R *ii, R *out, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN void X(execute_dft_r2c)(const X(plan) p, R *in, C *out); \
|
||||
FFTW_EXTERN void X(execute_dft_c2r)(const X(plan) p, C *in, R *out); \
|
||||
\
|
||||
FFTW_EXTERN void X(execute_split_dft_r2c)(const X(plan) p, \
|
||||
R *in, R *ro, R *io); \
|
||||
FFTW_EXTERN void X(execute_split_dft_c2r)(const X(plan) p, \
|
||||
R *ri, R *ii, R *out); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_many_r2r)(int rank, const int *n, \
|
||||
int howmany, \
|
||||
R *in, const int *inembed, \
|
||||
int istride, int idist, \
|
||||
R *out, const int *onembed, \
|
||||
int ostride, int odist, \
|
||||
const X(r2r_kind) *kind, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_r2r)(int rank, const int *n, R *in, R *out, \
|
||||
const X(r2r_kind) *kind, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_r2r_1d)(int n, R *in, R *out, \
|
||||
X(r2r_kind) kind, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_r2r_2d)(int n0, int n1, R *in, R *out, \
|
||||
X(r2r_kind) kind0, X(r2r_kind) kind1, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_r2r_3d)(int n0, int n1, int n2, \
|
||||
R *in, R *out, X(r2r_kind) kind0, \
|
||||
X(r2r_kind) kind1, X(r2r_kind) kind2, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru_r2r)(int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
R *in, R *out, \
|
||||
const X(r2r_kind) *kind, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_r2r)(int rank, const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
R *in, R *out, \
|
||||
const X(r2r_kind) *kind, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN void X(execute_r2r)(const X(plan) p, R *in, R *out); \
|
||||
\
|
||||
FFTW_EXTERN void X(destroy_plan)(X(plan) p); \
|
||||
FFTW_EXTERN void X(forget_wisdom)(void); \
|
||||
FFTW_EXTERN void X(cleanup)(void); \
|
||||
\
|
||||
FFTW_EXTERN void X(set_timelimit)(double t); \
|
||||
\
|
||||
FFTW_EXTERN void X(plan_with_nthreads)(int nthreads); \
|
||||
FFTW_EXTERN int X(init_threads)(void); \
|
||||
FFTW_EXTERN void X(cleanup_threads)(void); \
|
||||
\
|
||||
FFTW_EXTERN int X(export_wisdom_to_filename)(const char *filename); \
|
||||
FFTW_EXTERN void X(export_wisdom_to_file)(FILE *output_file); \
|
||||
FFTW_EXTERN char *X(export_wisdom_to_string)(void); \
|
||||
FFTW_EXTERN void X(export_wisdom)(X(write_char_func) write_char, \
|
||||
void *data); \
|
||||
FFTW_EXTERN int X(import_system_wisdom)(void); \
|
||||
FFTW_EXTERN int X(import_wisdom_from_filename)(const char *filename); \
|
||||
FFTW_EXTERN int X(import_wisdom_from_file)(FILE *input_file); \
|
||||
FFTW_EXTERN int X(import_wisdom_from_string)(const char *input_string); \
|
||||
FFTW_EXTERN int X(import_wisdom)(X(read_char_func) read_char, void *data); \
|
||||
\
|
||||
FFTW_EXTERN void X(fprint_plan)(const X(plan) p, FILE *output_file); \
|
||||
FFTW_EXTERN void X(print_plan)(const X(plan) p); \
|
||||
FFTW_EXTERN char *X(sprint_plan)(const X(plan) p); \
|
||||
\
|
||||
FFTW_EXTERN void *X(malloc)(size_t n); \
|
||||
FFTW_EXTERN R *X(alloc_real)(size_t n); \
|
||||
FFTW_EXTERN C *X(alloc_complex)(size_t n); \
|
||||
FFTW_EXTERN void X(free)(void *p); \
|
||||
\
|
||||
FFTW_EXTERN void X(flops)(const X(plan) p, \
|
||||
double *add, double *mul, double *fmas); \
|
||||
FFTW_EXTERN double X(estimate_cost)(const X(plan) p); \
|
||||
FFTW_EXTERN double X(cost)(const X(plan) p); \
|
||||
\
|
||||
FFTW_EXTERN int X(alignment_of)(R *p); \
|
||||
FFTW_EXTERN const char X(version)[]; \
|
||||
FFTW_EXTERN const char X(cc)[]; \
|
||||
FFTW_EXTERN const char X(codelet_optim)[];
|
||||
|
||||
|
||||
/* end of FFTW_DEFINE_API macro */
|
||||
|
||||
FFTW_DEFINE_API(FFTW_MANGLE_DOUBLE, double, fftw_complex)
|
||||
FFTW_DEFINE_API(FFTW_MANGLE_FLOAT, float, fftwf_complex)
|
||||
FFTW_DEFINE_API(FFTW_MANGLE_LONG_DOUBLE, long double, fftwl_complex)
|
||||
|
||||
/* __float128 (quad precision) is a gcc extension on i386, x86_64, and ia64
|
||||
for gcc >= 4.6 (compiled in FFTW with --enable-quad-precision) */
|
||||
#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) \
|
||||
&& !(defined(__ICC) || defined(__INTEL_COMPILER)) \
|
||||
&& (defined(__i386__) || defined(__x86_64__) || defined(__ia64__))
|
||||
# if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined(complex) && defined(I)
|
||||
/* note: __float128 is a typedef, which is not supported with the _Complex
|
||||
keyword in gcc, so instead we use this ugly __attribute__ version.
|
||||
However, we can't simply pass the __attribute__ version to
|
||||
FFTW_DEFINE_API because the __attribute__ confuses gcc in pointer
|
||||
types. Hence redefining FFTW_DEFINE_COMPLEX. Ugh. */
|
||||
# undef FFTW_DEFINE_COMPLEX
|
||||
# define FFTW_DEFINE_COMPLEX(R, C) typedef _Complex float __attribute__((mode(TC))) C
|
||||
# endif
|
||||
FFTW_DEFINE_API(FFTW_MANGLE_QUAD, __float128, fftwq_complex)
|
||||
#endif
|
||||
|
||||
#define FFTW_FORWARD (-1)
|
||||
#define FFTW_BACKWARD (+1)
|
||||
|
||||
#define FFTW_NO_TIMELIMIT (-1.0)
|
||||
|
||||
/* documented flags */
|
||||
#define FFTW_MEASURE (0U)
|
||||
#define FFTW_DESTROY_INPUT (1U << 0)
|
||||
#define FFTW_UNALIGNED (1U << 1)
|
||||
#define FFTW_CONSERVE_MEMORY (1U << 2)
|
||||
#define FFTW_EXHAUSTIVE (1U << 3) /* NO_EXHAUSTIVE is default */
|
||||
#define FFTW_PRESERVE_INPUT (1U << 4) /* cancels FFTW_DESTROY_INPUT */
|
||||
#define FFTW_PATIENT (1U << 5) /* IMPATIENT is default */
|
||||
#define FFTW_ESTIMATE (1U << 6)
|
||||
#define FFTW_WISDOM_ONLY (1U << 21)
|
||||
|
||||
/* undocumented beyond-guru flags */
|
||||
#define FFTW_ESTIMATE_PATIENT (1U << 7)
|
||||
#define FFTW_BELIEVE_PCOST (1U << 8)
|
||||
#define FFTW_NO_DFT_R2HC (1U << 9)
|
||||
#define FFTW_NO_NONTHREADED (1U << 10)
|
||||
#define FFTW_NO_BUFFERING (1U << 11)
|
||||
#define FFTW_NO_INDIRECT_OP (1U << 12)
|
||||
#define FFTW_ALLOW_LARGE_GENERIC (1U << 13) /* NO_LARGE_GENERIC is default */
|
||||
#define FFTW_NO_RANK_SPLITS (1U << 14)
|
||||
#define FFTW_NO_VRANK_SPLITS (1U << 15)
|
||||
#define FFTW_NO_VRECURSE (1U << 16)
|
||||
#define FFTW_NO_SIMD (1U << 17)
|
||||
#define FFTW_NO_SLOW (1U << 18)
|
||||
#define FFTW_NO_FIXED_RADIX_LARGE_N (1U << 19)
|
||||
#define FFTW_ALLOW_PRUNING (1U << 20)
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* FFTW3_H */
|
@ -261,6 +261,7 @@ GridUnopClass(UnaryExp, exp(a));
|
||||
GridBinOpClass(BinaryAdd, lhs + rhs);
|
||||
GridBinOpClass(BinarySub, lhs - rhs);
|
||||
GridBinOpClass(BinaryMul, lhs *rhs);
|
||||
GridBinOpClass(BinaryDiv, lhs /rhs);
|
||||
|
||||
GridBinOpClass(BinaryAnd, lhs &rhs);
|
||||
GridBinOpClass(BinaryOr, lhs | rhs);
|
||||
@ -385,6 +386,7 @@ GRID_DEF_UNOP(exp, UnaryExp);
|
||||
GRID_DEF_BINOP(operator+, BinaryAdd);
|
||||
GRID_DEF_BINOP(operator-, BinarySub);
|
||||
GRID_DEF_BINOP(operator*, BinaryMul);
|
||||
GRID_DEF_BINOP(operator/, BinaryDiv);
|
||||
|
||||
GRID_DEF_BINOP(operator&, BinaryAnd);
|
||||
GRID_DEF_BINOP(operator|, BinaryOr);
|
||||
|
@ -65,9 +65,6 @@ public:
|
||||
|
||||
class LatticeExpressionBase {};
|
||||
|
||||
template<class T> using Vector = std::vector<T,alignedAllocator<T> >; // Aligned allocator??
|
||||
template<class T> using Matrix = std::vector<std::vector<T,alignedAllocator<T> > >; // Aligned allocator??
|
||||
|
||||
template <typename Op, typename T1>
|
||||
class LatticeUnaryExpression : public std::pair<Op,std::tuple<T1> > , public LatticeExpressionBase {
|
||||
public:
|
||||
@ -303,17 +300,6 @@ PARALLEL_FOR_LOOP
|
||||
*this = (*this)+r;
|
||||
return *this;
|
||||
}
|
||||
|
||||
strong_inline friend Lattice<vobj> operator / (const Lattice<vobj> &lhs,const Lattice<vobj> &rhs){
|
||||
conformable(lhs,rhs);
|
||||
Lattice<vobj> ret(lhs._grid);
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||
ret._odata[ss] = lhs._odata[ss]*pow(rhs._odata[ss],-1.0);
|
||||
}
|
||||
return ret;
|
||||
};
|
||||
|
||||
}; // class Lattice
|
||||
|
||||
template<class vobj> std::ostream& operator<< (std::ostream& stream, const Lattice<vobj> &o){
|
||||
|
@ -154,7 +154,7 @@ PARALLEL_FOR_LOOP
|
||||
template<class vobj,class sobj>
|
||||
void peekLocalSite(sobj &s,const Lattice<vobj> &l,std::vector<int> &site){
|
||||
|
||||
GridBase *grid=l._grid;
|
||||
GridBase *grid = l._grid;
|
||||
|
||||
typedef typename vobj::scalar_type scalar_type;
|
||||
typedef typename vobj::vector_type vector_type;
|
||||
@ -164,16 +164,18 @@ PARALLEL_FOR_LOOP
|
||||
assert( l.checkerboard== l._grid->CheckerBoard(site));
|
||||
assert( sizeof(sobj)*Nsimd == sizeof(vobj));
|
||||
|
||||
static const int words=sizeof(vobj)/sizeof(vector_type);
|
||||
int odx,idx;
|
||||
idx= grid->iIndex(site);
|
||||
odx= grid->oIndex(site);
|
||||
|
||||
std::vector<sobj> buf(Nsimd);
|
||||
|
||||
extract(l._odata[odx],buf);
|
||||
scalar_type * vp = (scalar_type *)&l._odata[odx];
|
||||
scalar_type * pt = (scalar_type *)&s;
|
||||
|
||||
for(int w=0;w<words;w++){
|
||||
pt[w] = vp[idx+w*Nsimd];
|
||||
}
|
||||
|
||||
s = buf[idx];
|
||||
|
||||
return;
|
||||
};
|
||||
|
||||
@ -190,18 +192,17 @@ PARALLEL_FOR_LOOP
|
||||
assert( l.checkerboard== l._grid->CheckerBoard(site));
|
||||
assert( sizeof(sobj)*Nsimd == sizeof(vobj));
|
||||
|
||||
static const int words=sizeof(vobj)/sizeof(vector_type);
|
||||
int odx,idx;
|
||||
idx= grid->iIndex(site);
|
||||
odx= grid->oIndex(site);
|
||||
|
||||
std::vector<sobj> buf(Nsimd);
|
||||
|
||||
// extract-modify-merge cycle is easiest way and this is not perf critical
|
||||
extract(l._odata[odx],buf);
|
||||
scalar_type * vp = (scalar_type *)&l._odata[odx];
|
||||
scalar_type * pt = (scalar_type *)&s;
|
||||
|
||||
buf[idx] = s;
|
||||
|
||||
merge(l._odata[odx],buf);
|
||||
for(int w=0;w<words;w++){
|
||||
vp[idx+w*Nsimd] = pt[w];
|
||||
}
|
||||
|
||||
return;
|
||||
};
|
||||
|
@ -294,11 +294,12 @@ namespace Grid {
|
||||
int rank,o_idx,i_idx;
|
||||
_grid->GlobalIndexToGlobalCoor(gidx,gcoor);
|
||||
_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
|
||||
|
||||
|
||||
int l_idx=generator_idx(o_idx,i_idx);
|
||||
|
||||
std::vector<int> site_seeds(4);
|
||||
for(int i=0;i<4;i++){
|
||||
|
||||
const int num_rand_seed=16;
|
||||
std::vector<int> site_seeds(num_rand_seed);
|
||||
for(int i=0;i<site_seeds.size();i++){
|
||||
site_seeds[i]= ui(pseeder);
|
||||
}
|
||||
|
||||
|
@ -457,7 +457,7 @@ class BinaryIO {
|
||||
// available (how short sighted is that?)
|
||||
//////////////////////////////////////////////////////////
|
||||
Umu = zero;
|
||||
static uint32_t csum=0;
|
||||
static uint32_t csum; csum=0;
|
||||
fobj fileObj;
|
||||
static sobj siteObj; // Static to place in symmetric region for SHMEM
|
||||
|
||||
|
@ -50,6 +50,30 @@ namespace QCD {
|
||||
mass(_mass)
|
||||
{ }
|
||||
|
||||
template<class Impl>
|
||||
void CayleyFermion5D<Impl>::Dminus(const FermionField &psi, FermionField &chi)
|
||||
{
|
||||
int Ls=this->Ls;
|
||||
FermionField tmp(psi._grid);
|
||||
|
||||
this->DW(psi,tmp,DaggerNo);
|
||||
|
||||
for(int s=0;s<Ls;s++){
|
||||
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp,s,s);// chi = (1-c[s] D_W) psi
|
||||
}
|
||||
}
|
||||
template<class Impl>
|
||||
void CayleyFermion5D<Impl>::DminusDag(const FermionField &psi, FermionField &chi)
|
||||
{
|
||||
int Ls=this->Ls;
|
||||
FermionField tmp(psi._grid);
|
||||
|
||||
this->DW(psi,tmp,DaggerYes);
|
||||
|
||||
for(int s=0;s<Ls;s++){
|
||||
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp,s,s);// chi = (1-c[s] D_W) psi
|
||||
}
|
||||
}
|
||||
template<class Impl>
|
||||
void CayleyFermion5D<Impl>::M5D (const FermionField &psi, FermionField &chi)
|
||||
{
|
||||
|
@ -56,6 +56,9 @@ namespace Grid {
|
||||
virtual void M5D (const FermionField &psi, FermionField &chi);
|
||||
virtual void M5Ddag(const FermionField &psi, FermionField &chi);
|
||||
|
||||
virtual void Dminus(const FermionField &psi, FermionField &chi);
|
||||
virtual void DminusDag(const FermionField &psi, FermionField &chi);
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Instantiate different versions depending on Impl
|
||||
/////////////////////////////////////////////////////
|
||||
@ -117,6 +120,7 @@ namespace Grid {
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
RealD _mass,RealD _M5,const ImplParams &p= ImplParams());
|
||||
|
||||
|
||||
protected:
|
||||
void SetCoefficientsZolotarev(RealD zolohi,Approx::zolotarev_data *zdata,RealD b,RealD c);
|
||||
void SetCoefficientsTanh(Approx::zolotarev_data *zdata,RealD b,RealD c);
|
||||
|
@ -42,6 +42,10 @@ namespace Grid {
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
public:
|
||||
|
||||
void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m) {
|
||||
this->MomentumSpacePropagatorHt(out,in,_m);
|
||||
};
|
||||
|
||||
virtual void Instantiatable(void) {};
|
||||
// Constructors
|
||||
DomainWallFermion(GaugeField &_Umu,
|
||||
@ -51,6 +55,7 @@ namespace Grid {
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
RealD _mass,RealD _M5,const ImplParams &p= ImplParams()) :
|
||||
|
||||
|
||||
CayleyFermion5D<Impl>(_Umu,
|
||||
FiveDimGrid,
|
||||
FiveDimRedBlackGrid,
|
||||
|
@ -91,6 +91,20 @@ namespace Grid {
|
||||
virtual void Mdiag (const FermionField &in, FermionField &out) { Mooee(in,out);}; // Same as Mooee applied to both CB's
|
||||
virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp)=0; // case by case Wilson, Clover, Cayley, ContFrac, PartFrac
|
||||
|
||||
|
||||
virtual void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m) { assert(0);};
|
||||
|
||||
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass) {
|
||||
FFT theFFT((GridCartesian *) in._grid);
|
||||
|
||||
FermionField in_k(in._grid);
|
||||
FermionField prop_k(in._grid);
|
||||
|
||||
theFFT.FFT_all_dim(in_k,in,FFT::forward);
|
||||
this->MomentumSpacePropagator(prop_k,in_k,mass);
|
||||
theFFT.FFT_all_dim(out,prop_k,FFT::backward);
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////
|
||||
// Updates gauge field during HMC
|
||||
///////////////////////////////////////////////
|
||||
|
@ -33,511 +33,500 @@ directory
|
||||
#define GRID_QCD_FERMION_OPERATOR_IMPL_H
|
||||
|
||||
namespace Grid {
|
||||
|
||||
namespace QCD {
|
||||
namespace QCD {
|
||||
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Template parameter class constructs to package
|
||||
// externally control Fermion implementations
|
||||
// in orthogonal directions
|
||||
//
|
||||
// Ultimately need Impl to always define types where XXX is opaque
|
||||
//
|
||||
// typedef typename XXX Simd;
|
||||
// typedef typename XXX GaugeLinkField;
|
||||
// typedef typename XXX GaugeField;
|
||||
// typedef typename XXX GaugeActField;
|
||||
// typedef typename XXX FermionField;
|
||||
// typedef typename XXX DoubledGaugeField;
|
||||
// typedef typename XXX SiteSpinor;
|
||||
// typedef typename XXX SiteHalfSpinor;
|
||||
// typedef typename XXX Compressor;
|
||||
//
|
||||
// and Methods:
|
||||
// void ImportGauge(GridBase *GaugeGrid,DoubledGaugeField &Uds,const GaugeField &Umu)
|
||||
// void DoubleStore(GridBase *GaugeGrid,DoubledGaugeField &Uds,const GaugeField &Umu)
|
||||
// void multLink(SiteHalfSpinor &phi,const SiteDoubledGaugeField &U,const SiteHalfSpinor &chi,int mu,StencilEntry *SE,StencilImpl &St)
|
||||
// void InsertForce4D(GaugeField &mat,const FermionField &Btilde,const FermionField &A,int mu)
|
||||
// void InsertForce5D(GaugeField &mat,const FermionField &Btilde,const FermionField &A,int mu)
|
||||
//
|
||||
//
|
||||
// To acquire the typedefs from "Base" (either a base class or template param) use:
|
||||
//
|
||||
// INHERIT_GIMPL_TYPES(Base)
|
||||
// INHERIT_FIMPL_TYPES(Base)
|
||||
// INHERIT_IMPL_TYPES(Base)
|
||||
//
|
||||
// The Fermion operators will do the following:
|
||||
//
|
||||
// struct MyOpParams {
|
||||
// RealD mass;
|
||||
// };
|
||||
//
|
||||
//
|
||||
// template<class Impl>
|
||||
// class MyOp : public<Impl> {
|
||||
// public:
|
||||
//
|
||||
// INHERIT_ALL_IMPL_TYPES(Impl);
|
||||
//
|
||||
// MyOp(MyOpParams Myparm, ImplParams &ImplParam) : Impl(ImplParam)
|
||||
// {
|
||||
//
|
||||
// };
|
||||
//
|
||||
// }
|
||||
//////////////////////////////////////////////
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Implementation dependent fermion types
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
//////////////////////////////////////////////
|
||||
// Template parameter class constructs to package
|
||||
// externally control Fermion implementations
|
||||
// in orthogonal directions
|
||||
//
|
||||
// Ultimately need Impl to always define types where XXX is opaque
|
||||
//
|
||||
// typedef typename XXX Simd;
|
||||
// typedef typename XXX GaugeLinkField;
|
||||
// typedef typename XXX GaugeField;
|
||||
// typedef typename XXX GaugeActField;
|
||||
// typedef typename XXX FermionField;
|
||||
// typedef typename XXX DoubledGaugeField;
|
||||
// typedef typename XXX SiteSpinor;
|
||||
// typedef typename XXX SiteHalfSpinor;
|
||||
// typedef typename XXX Compressor;
|
||||
//
|
||||
// and Methods:
|
||||
// void ImportGauge(GridBase *GaugeGrid,DoubledGaugeField &Uds,const GaugeField &Umu)
|
||||
// void DoubleStore(GridBase *GaugeGrid,DoubledGaugeField &Uds,const GaugeField &Umu)
|
||||
// void multLink(SiteHalfSpinor &phi,const SiteDoubledGaugeField &U,const SiteHalfSpinor &chi,int mu,StencilEntry *SE,StencilImpl &St)
|
||||
// void InsertForce4D(GaugeField &mat,const FermionField &Btilde,const FermionField &A,int mu)
|
||||
// void InsertForce5D(GaugeField &mat,const FermionField &Btilde,const FermionField &A,int mu)
|
||||
//
|
||||
//
|
||||
// To acquire the typedefs from "Base" (either a base class or template param) use:
|
||||
//
|
||||
// INHERIT_GIMPL_TYPES(Base)
|
||||
// INHERIT_FIMPL_TYPES(Base)
|
||||
// INHERIT_IMPL_TYPES(Base)
|
||||
//
|
||||
// The Fermion operators will do the following:
|
||||
//
|
||||
// struct MyOpParams {
|
||||
// RealD mass;
|
||||
// };
|
||||
//
|
||||
//
|
||||
// template<class Impl>
|
||||
// class MyOp : public<Impl> {
|
||||
// public:
|
||||
//
|
||||
// INHERIT_ALL_IMPL_TYPES(Impl);
|
||||
//
|
||||
// MyOp(MyOpParams Myparm, ImplParams &ImplParam) : Impl(ImplParam)
|
||||
// {
|
||||
//
|
||||
// };
|
||||
//
|
||||
// }
|
||||
//////////////////////////////////////////////
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Implementation dependent fermion types
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#define INHERIT_FIMPL_TYPES(Impl)\
|
||||
typedef typename Impl::FermionField FermionField; \
|
||||
typedef typename Impl::DoubledGaugeField DoubledGaugeField; \
|
||||
typedef typename Impl::SiteSpinor SiteSpinor; \
|
||||
typedef typename Impl::SiteHalfSpinor SiteHalfSpinor; \
|
||||
typedef typename Impl::Compressor Compressor; \
|
||||
typedef typename Impl::StencilImpl StencilImpl; \
|
||||
typedef typename Impl::ImplParams ImplParams; \
|
||||
typedef typename Impl::Coeff_t Coeff_t;
|
||||
|
||||
typedef typename Impl::FermionField FermionField; \
|
||||
typedef typename Impl::DoubledGaugeField DoubledGaugeField; \
|
||||
typedef typename Impl::SiteSpinor SiteSpinor; \
|
||||
typedef typename Impl::SiteHalfSpinor SiteHalfSpinor; \
|
||||
typedef typename Impl::Compressor Compressor; \
|
||||
typedef typename Impl::StencilImpl StencilImpl; \
|
||||
typedef typename Impl::ImplParams ImplParams; \
|
||||
typedef typename Impl::Coeff_t Coeff_t;
|
||||
|
||||
#define INHERIT_IMPL_TYPES(Base) \
|
||||
INHERIT_GIMPL_TYPES(Base) \
|
||||
INHERIT_FIMPL_TYPES(Base)
|
||||
INHERIT_GIMPL_TYPES(Base) \
|
||||
INHERIT_FIMPL_TYPES(Base)
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// Single flavour four spinors with colour index
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
template <class S, class Representation = FundamentalRepresentation,class _Coeff_t = RealD >
|
||||
class WilsonImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation::Dimension > > {
|
||||
|
||||
public:
|
||||
|
||||
static const int Dimension = Representation::Dimension;
|
||||
typedef PeriodicGaugeImpl<GaugeImplTypes<S, Dimension > > Gimpl;
|
||||
|
||||
//Necessary?
|
||||
constexpr bool is_fundamental() const{return Dimension == Nc ? 1 : 0;}
|
||||
|
||||
///////
|
||||
// Single flavour four spinors with colour index
|
||||
///////
|
||||
template <class S, class Representation = FundamentalRepresentation,class _Coeff_t = RealD >
|
||||
class WilsonImpl
|
||||
: public PeriodicGaugeImpl<GaugeImplTypes<S, Representation::Dimension > > {
|
||||
public:
|
||||
static const int Dimension = Representation::Dimension;
|
||||
typedef PeriodicGaugeImpl<GaugeImplTypes<S, Dimension > > Gimpl;
|
||||
|
||||
//Necessary?
|
||||
constexpr bool is_fundamental() const{return Dimension == Nc ? 1 : 0;}
|
||||
const bool LsVectorised=false;
|
||||
typedef _Coeff_t Coeff_t;
|
||||
|
||||
const bool LsVectorised=false;
|
||||
typedef _Coeff_t Coeff_t;
|
||||
|
||||
|
||||
INHERIT_GIMPL_TYPES(Gimpl);
|
||||
INHERIT_GIMPL_TYPES(Gimpl);
|
||||
|
||||
template <typename vtype> using iImplSpinor = iScalar<iVector<iVector<vtype, Dimension>, Ns> >;
|
||||
template <typename vtype> using iImplHalfSpinor = iScalar<iVector<iVector<vtype, Dimension>, Nhs> >;
|
||||
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Dimension> >, Nds>;
|
||||
template <typename vtype> using iImplSpinor = iScalar<iVector<iVector<vtype, Dimension>, Ns> >;
|
||||
template <typename vtype> using iImplHalfSpinor = iScalar<iVector<iVector<vtype, Dimension>, Nhs> >;
|
||||
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Dimension> >, Nds>;
|
||||
|
||||
typedef iImplSpinor<Simd> SiteSpinor;
|
||||
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
||||
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
|
||||
|
||||
typedef Lattice<SiteSpinor> FermionField;
|
||||
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||
|
||||
typedef WilsonCompressor<SiteHalfSpinor, SiteSpinor> Compressor;
|
||||
typedef WilsonImplParams ImplParams;
|
||||
typedef WilsonStencil<SiteSpinor, SiteHalfSpinor> StencilImpl;
|
||||
|
||||
ImplParams Params;
|
||||
|
||||
WilsonImpl(const ImplParams &p = ImplParams()) : Params(p){};
|
||||
|
||||
typedef iImplSpinor<Simd> SiteSpinor;
|
||||
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
||||
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
|
||||
bool overlapCommsCompute(void) { return Params.overlapCommsCompute; };
|
||||
|
||||
typedef Lattice<SiteSpinor> FermionField;
|
||||
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||
inline void multLink(SiteHalfSpinor &phi,
|
||||
const SiteDoubledGaugeField &U,
|
||||
const SiteHalfSpinor &chi,
|
||||
int mu,
|
||||
StencilEntry *SE,
|
||||
StencilImpl &St) {
|
||||
mult(&phi(), &U(mu), &chi());
|
||||
}
|
||||
|
||||
typedef WilsonCompressor<SiteHalfSpinor, SiteSpinor> Compressor;
|
||||
typedef WilsonImplParams ImplParams;
|
||||
typedef WilsonStencil<SiteSpinor, SiteHalfSpinor> StencilImpl;
|
||||
template <class ref>
|
||||
inline void loadLinkElement(Simd ®, ref &memory) {
|
||||
reg = memory;
|
||||
}
|
||||
|
||||
ImplParams Params;
|
||||
|
||||
WilsonImpl(const ImplParams &p = ImplParams()) : Params(p){};
|
||||
|
||||
bool overlapCommsCompute(void) { return Params.overlapCommsCompute; };
|
||||
|
||||
inline void multLink(SiteHalfSpinor &phi,
|
||||
const SiteDoubledGaugeField &U,
|
||||
const SiteHalfSpinor &chi,
|
||||
int mu,
|
||||
StencilEntry *SE,
|
||||
StencilImpl &St) {
|
||||
mult(&phi(), &U(mu), &chi());
|
||||
inline void DoubleStore(GridBase *GaugeGrid,
|
||||
DoubledGaugeField &Uds,
|
||||
const GaugeField &Umu) {
|
||||
conformable(Uds._grid, GaugeGrid);
|
||||
conformable(Umu._grid, GaugeGrid);
|
||||
GaugeLinkField U(GaugeGrid);
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
U = PeekIndex<LorentzIndex>(Umu, mu);
|
||||
PokeIndex<LorentzIndex>(Uds, U, mu);
|
||||
U = adj(Cshift(U, mu, -1));
|
||||
PokeIndex<LorentzIndex>(Uds, U, mu + 4);
|
||||
}
|
||||
}
|
||||
|
||||
inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A,int mu){
|
||||
GaugeLinkField link(mat._grid);
|
||||
link = TraceIndex<SpinIndex>(outerProduct(Btilde,A));
|
||||
PokeIndex<LorentzIndex>(mat,link,mu);
|
||||
}
|
||||
|
||||
template <class ref>
|
||||
inline void loadLinkElement(Simd ®,
|
||||
ref &memory) {
|
||||
reg = memory;
|
||||
}
|
||||
inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField Ã,int mu){
|
||||
|
||||
inline void DoubleStore(GridBase *GaugeGrid,
|
||||
DoubledGaugeField &Uds,
|
||||
const GaugeField &Umu) {
|
||||
conformable(Uds._grid, GaugeGrid);
|
||||
conformable(Umu._grid, GaugeGrid);
|
||||
GaugeLinkField U(GaugeGrid);
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
U = PeekIndex<LorentzIndex>(Umu, mu);
|
||||
PokeIndex<LorentzIndex>(Uds, U, mu);
|
||||
U = adj(Cshift(U, mu, -1));
|
||||
PokeIndex<LorentzIndex>(Uds, U, mu + 4);
|
||||
int Ls=Btilde._grid->_fdimensions[0];
|
||||
GaugeLinkField tmp(mat._grid);
|
||||
tmp = zero;
|
||||
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int sss=0;sss<tmp._grid->oSites();sss++){
|
||||
int sU=sss;
|
||||
for(int s=0;s<Ls;s++){
|
||||
int sF = s+Ls*sU;
|
||||
tmp[sU] = tmp[sU]+ traceIndex<SpinIndex>(outerProduct(Btilde[sF],Atilde[sF])); // ordering here
|
||||
}
|
||||
}
|
||||
PokeIndex<LorentzIndex>(mat,tmp,mu);
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A,int mu){
|
||||
GaugeLinkField link(mat._grid);
|
||||
link = TraceIndex<SpinIndex>(outerProduct(Btilde,A));
|
||||
PokeIndex<LorentzIndex>(mat,link,mu);
|
||||
}
|
||||
|
||||
inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField Ã,int mu){
|
||||
|
||||
int Ls=Btilde._grid->_fdimensions[0];
|
||||
|
||||
GaugeLinkField tmp(mat._grid);
|
||||
tmp = zero;
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int sss=0;sss<tmp._grid->oSites();sss++){
|
||||
int sU=sss;
|
||||
for(int s=0;s<Ls;s++){
|
||||
int sF = s+Ls*sU;
|
||||
tmp[sU] = tmp[sU]+ traceIndex<SpinIndex>(outerProduct(Btilde[sF],Atilde[sF])); // ordering here
|
||||
}
|
||||
}
|
||||
PokeIndex<LorentzIndex>(mat,tmp,mu);
|
||||
|
||||
}
|
||||
};
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// Single flavour four spinors with colour index, 5d redblack
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
///////
|
||||
// Single flavour four spinors with colour index, 5d redblack
|
||||
///////
|
||||
template<class S,int Nrepresentation=Nc,class _Coeff_t = RealD>
|
||||
class DomainWallVec5dImpl : public PeriodicGaugeImpl< GaugeImplTypes< S,Nrepresentation> > {
|
||||
public:
|
||||
template<class S,int Nrepresentation=Nc,class _Coeff_t = RealD>
|
||||
class DomainWallVec5dImpl : public PeriodicGaugeImpl< GaugeImplTypes< S,Nrepresentation> > {
|
||||
public:
|
||||
|
||||
static const int Dimension = Nrepresentation;
|
||||
const bool LsVectorised=true;
|
||||
typedef _Coeff_t Coeff_t;
|
||||
typedef PeriodicGaugeImpl<GaugeImplTypes<S, Nrepresentation> > Gimpl;
|
||||
static const int Dimension = Nrepresentation;
|
||||
const bool LsVectorised=true;
|
||||
typedef _Coeff_t Coeff_t;
|
||||
typedef PeriodicGaugeImpl<GaugeImplTypes<S, Nrepresentation> > Gimpl;
|
||||
|
||||
INHERIT_GIMPL_TYPES(Gimpl);
|
||||
|
||||
template <typename vtype> using iImplSpinor = iScalar<iVector<iVector<vtype, Nrepresentation>, Ns> >;
|
||||
template <typename vtype> using iImplHalfSpinor = iScalar<iVector<iVector<vtype, Nrepresentation>, Nhs> >;
|
||||
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Nrepresentation> >, Nds>;
|
||||
template <typename vtype> using iImplGaugeField = iVector<iScalar<iMatrix<vtype, Nrepresentation> >, Nd>;
|
||||
template <typename vtype> using iImplGaugeLink = iScalar<iScalar<iMatrix<vtype, Nrepresentation> > >;
|
||||
|
||||
typedef iImplSpinor<Simd> SiteSpinor;
|
||||
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
||||
typedef Lattice<SiteSpinor> FermionField;
|
||||
|
||||
// Make the doubled gauge field a *scalar*
|
||||
typedef iImplDoubledGaugeField<typename Simd::scalar_type> SiteDoubledGaugeField; // This is a scalar
|
||||
typedef iImplGaugeField<typename Simd::scalar_type> SiteScalarGaugeField; // scalar
|
||||
typedef iImplGaugeLink<typename Simd::scalar_type> SiteScalarGaugeLink; // scalar
|
||||
|
||||
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||
|
||||
typedef WilsonCompressor<SiteHalfSpinor, SiteSpinor> Compressor;
|
||||
typedef WilsonImplParams ImplParams;
|
||||
typedef WilsonStencil<SiteSpinor, SiteHalfSpinor> StencilImpl;
|
||||
|
||||
ImplParams Params;
|
||||
|
||||
DomainWallVec5dImpl(const ImplParams &p = ImplParams()) : Params(p){};
|
||||
|
||||
bool overlapCommsCompute(void) { return false; };
|
||||
|
||||
template <class ref>
|
||||
inline void loadLinkElement(Simd ®, ref &memory) {
|
||||
vsplat(reg, memory);
|
||||
}
|
||||
|
||||
INHERIT_GIMPL_TYPES(Gimpl);
|
||||
|
||||
template <typename vtype> using iImplSpinor = iScalar<iVector<iVector<vtype, Nrepresentation>, Ns> >;
|
||||
template <typename vtype> using iImplHalfSpinor = iScalar<iVector<iVector<vtype, Nrepresentation>, Nhs> >;
|
||||
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Nrepresentation> >, Nds>;
|
||||
template <typename vtype> using iImplGaugeField = iVector<iScalar<iMatrix<vtype, Nrepresentation> >, Nd>;
|
||||
template <typename vtype> using iImplGaugeLink = iScalar<iScalar<iMatrix<vtype, Nrepresentation> > >;
|
||||
|
||||
typedef iImplSpinor<Simd> SiteSpinor;
|
||||
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
||||
typedef Lattice<SiteSpinor> FermionField;
|
||||
|
||||
// Make the doubled gauge field a *scalar*
|
||||
typedef iImplDoubledGaugeField<typename Simd::scalar_type>
|
||||
SiteDoubledGaugeField; // This is a scalar
|
||||
typedef iImplGaugeField<typename Simd::scalar_type>
|
||||
SiteScalarGaugeField; // scalar
|
||||
typedef iImplGaugeLink<typename Simd::scalar_type>
|
||||
SiteScalarGaugeLink; // scalar
|
||||
|
||||
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||
|
||||
typedef WilsonCompressor<SiteHalfSpinor, SiteSpinor> Compressor;
|
||||
typedef WilsonImplParams ImplParams;
|
||||
typedef WilsonStencil<SiteSpinor, SiteHalfSpinor> StencilImpl;
|
||||
|
||||
ImplParams Params;
|
||||
|
||||
DomainWallVec5dImpl(const ImplParams &p = ImplParams()) : Params(p){};
|
||||
|
||||
bool overlapCommsCompute(void) { return false; };
|
||||
|
||||
template <class ref>
|
||||
inline void loadLinkElement(Simd ®, ref &memory) {
|
||||
vsplat(reg, memory);
|
||||
}
|
||||
inline void multLink(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U,
|
||||
const SiteHalfSpinor &chi, int mu, StencilEntry *SE,
|
||||
StencilImpl &St) {
|
||||
SiteGaugeLink UU;
|
||||
for (int i = 0; i < Nrepresentation; i++) {
|
||||
for (int j = 0; j < Nrepresentation; j++) {
|
||||
vsplat(UU()()(i, j), U(mu)()(i, j));
|
||||
}
|
||||
}
|
||||
mult(&phi(), &UU(), &chi());
|
||||
inline void multLink(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U,
|
||||
const SiteHalfSpinor &chi, int mu, StencilEntry *SE,
|
||||
StencilImpl &St) {
|
||||
SiteGaugeLink UU;
|
||||
for (int i = 0; i < Nrepresentation; i++) {
|
||||
for (int j = 0; j < Nrepresentation; j++) {
|
||||
vsplat(UU()()(i, j), U(mu)()(i, j));
|
||||
}
|
||||
}
|
||||
mult(&phi(), &UU(), &chi());
|
||||
}
|
||||
|
||||
inline void DoubleStore(GridBase *GaugeGrid, DoubledGaugeField &Uds,
|
||||
const GaugeField &Umu) {
|
||||
SiteScalarGaugeField ScalarUmu;
|
||||
SiteDoubledGaugeField ScalarUds;
|
||||
|
||||
GaugeLinkField U(Umu._grid);
|
||||
GaugeField Uadj(Umu._grid);
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
U = PeekIndex<LorentzIndex>(Umu, mu);
|
||||
U = adj(Cshift(U, mu, -1));
|
||||
PokeIndex<LorentzIndex>(Uadj, U, mu);
|
||||
}
|
||||
|
||||
for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) {
|
||||
std::vector<int> lcoor;
|
||||
GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor);
|
||||
|
||||
peekLocalSite(ScalarUmu, Umu, lcoor);
|
||||
for (int mu = 0; mu < 4; mu++) ScalarUds(mu) = ScalarUmu(mu);
|
||||
|
||||
peekLocalSite(ScalarUmu, Uadj, lcoor);
|
||||
for (int mu = 0; mu < 4; mu++) ScalarUds(mu + 4) = ScalarUmu(mu);
|
||||
|
||||
pokeLocalSite(ScalarUds, Uds, lcoor);
|
||||
}
|
||||
}
|
||||
inline void DoubleStore(GridBase *GaugeGrid, DoubledGaugeField &Uds,const GaugeField &Umu)
|
||||
{
|
||||
SiteScalarGaugeField ScalarUmu;
|
||||
SiteDoubledGaugeField ScalarUds;
|
||||
|
||||
GaugeLinkField U(Umu._grid);
|
||||
GaugeField Uadj(Umu._grid);
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
U = PeekIndex<LorentzIndex>(Umu, mu);
|
||||
U = adj(Cshift(U, mu, -1));
|
||||
PokeIndex<LorentzIndex>(Uadj, U, mu);
|
||||
}
|
||||
|
||||
for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) {
|
||||
std::vector<int> lcoor;
|
||||
GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor);
|
||||
|
||||
inline void InsertForce4D(GaugeField &mat, FermionField &Btilde,
|
||||
FermionField &A, int mu) {
|
||||
peekLocalSite(ScalarUmu, Umu, lcoor);
|
||||
for (int mu = 0; mu < 4; mu++) ScalarUds(mu) = ScalarUmu(mu);
|
||||
|
||||
peekLocalSite(ScalarUmu, Uadj, lcoor);
|
||||
for (int mu = 0; mu < 4; mu++) ScalarUds(mu + 4) = ScalarUmu(mu);
|
||||
|
||||
pokeLocalSite(ScalarUds, Uds, lcoor);
|
||||
}
|
||||
}
|
||||
|
||||
inline void InsertForce4D(GaugeField &mat, FermionField &Btilde,FermionField &A, int mu)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
inline void InsertForce5D(GaugeField &mat, FermionField &Btilde,FermionField Ã, int mu)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
inline void InsertForce5D(GaugeField &mat, FermionField &Btilde,
|
||||
FermionField Ã, int mu) {
|
||||
assert(0);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Flavour doubled spinors; is Gparity the only? what about C*?
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
template <class S, int Nrepresentation,class _Coeff_t = RealD>
|
||||
class GparityWilsonImpl
|
||||
: public ConjugateGaugeImpl<GaugeImplTypes<S, Nrepresentation> > {
|
||||
public:
|
||||
static const int Dimension = Nrepresentation;
|
||||
template <class S, int Nrepresentation,class _Coeff_t = RealD>
|
||||
class GparityWilsonImpl : public ConjugateGaugeImpl<GaugeImplTypes<S, Nrepresentation> > {
|
||||
public:
|
||||
|
||||
const bool LsVectorised=false;
|
||||
static const int Dimension = Nrepresentation;
|
||||
|
||||
typedef _Coeff_t Coeff_t;
|
||||
typedef ConjugateGaugeImpl< GaugeImplTypes<S,Nrepresentation> > Gimpl;
|
||||
const bool LsVectorised=false;
|
||||
|
||||
typedef _Coeff_t Coeff_t;
|
||||
typedef ConjugateGaugeImpl< GaugeImplTypes<S,Nrepresentation> > Gimpl;
|
||||
|
||||
INHERIT_GIMPL_TYPES(Gimpl);
|
||||
|
||||
INHERIT_GIMPL_TYPES(Gimpl);
|
||||
template <typename vtype> using iImplSpinor = iVector<iVector<iVector<vtype, Nrepresentation>, Ns>, Ngp>;
|
||||
template <typename vtype> using iImplHalfSpinor = iVector<iVector<iVector<vtype, Nrepresentation>, Nhs>, Ngp>;
|
||||
template <typename vtype> using iImplDoubledGaugeField = iVector<iVector<iScalar<iMatrix<vtype, Nrepresentation> >, Nds>, Ngp>;
|
||||
|
||||
template <typename vtype>
|
||||
using iImplSpinor =
|
||||
iVector<iVector<iVector<vtype, Nrepresentation>, Ns>, Ngp>;
|
||||
template <typename vtype>
|
||||
using iImplHalfSpinor =
|
||||
iVector<iVector<iVector<vtype, Nrepresentation>, Nhs>, Ngp>;
|
||||
template <typename vtype>
|
||||
using iImplDoubledGaugeField =
|
||||
iVector<iVector<iScalar<iMatrix<vtype, Nrepresentation> >, Nds>, Ngp>;
|
||||
typedef iImplSpinor<Simd> SiteSpinor;
|
||||
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
||||
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
|
||||
|
||||
typedef Lattice<SiteSpinor> FermionField;
|
||||
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||
|
||||
typedef WilsonCompressor<SiteHalfSpinor, SiteSpinor> Compressor;
|
||||
typedef WilsonStencil<SiteSpinor, SiteHalfSpinor> StencilImpl;
|
||||
|
||||
typedef GparityWilsonImplParams ImplParams;
|
||||
|
||||
typedef iImplSpinor<Simd> SiteSpinor;
|
||||
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
||||
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
|
||||
|
||||
typedef Lattice<SiteSpinor> FermionField;
|
||||
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||
|
||||
typedef WilsonCompressor<SiteHalfSpinor, SiteSpinor> Compressor;
|
||||
typedef WilsonStencil<SiteSpinor, SiteHalfSpinor> StencilImpl;
|
||||
ImplParams Params;
|
||||
|
||||
typedef GparityWilsonImplParams ImplParams;
|
||||
|
||||
ImplParams Params;
|
||||
GparityWilsonImpl(const ImplParams &p = ImplParams()) : Params(p){};
|
||||
|
||||
bool overlapCommsCompute(void) { return Params.overlapCommsCompute; };
|
||||
|
||||
GparityWilsonImpl(const ImplParams &p = ImplParams()) : Params(p){};
|
||||
// provide the multiply by link that is differentiated between Gparity (with
|
||||
// flavour index) and non-Gparity
|
||||
inline void multLink(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U,
|
||||
const SiteHalfSpinor &chi, int mu, StencilEntry *SE,
|
||||
StencilImpl &St) {
|
||||
|
||||
bool overlapCommsCompute(void) { return Params.overlapCommsCompute; };
|
||||
|
||||
// provide the multiply by link that is differentiated between Gparity (with
|
||||
// flavour index) and non-Gparity
|
||||
inline void multLink(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U,
|
||||
const SiteHalfSpinor &chi, int mu, StencilEntry *SE,
|
||||
StencilImpl &St) {
|
||||
typedef SiteHalfSpinor vobj;
|
||||
typedef typename SiteHalfSpinor::scalar_object sobj;
|
||||
typedef SiteHalfSpinor vobj;
|
||||
typedef typename SiteHalfSpinor::scalar_object sobj;
|
||||
|
||||
vobj vtmp;
|
||||
sobj stmp;
|
||||
vobj vtmp;
|
||||
sobj stmp;
|
||||
|
||||
GridBase *grid = St._grid;
|
||||
GridBase *grid = St._grid;
|
||||
|
||||
const int Nsimd = grid->Nsimd();
|
||||
const int Nsimd = grid->Nsimd();
|
||||
|
||||
int direction = St._directions[mu];
|
||||
int distance = St._distances[mu];
|
||||
int ptype = St._permute_type[mu];
|
||||
int sl = St._grid->_simd_layout[direction];
|
||||
int direction = St._directions[mu];
|
||||
int distance = St._distances[mu];
|
||||
int ptype = St._permute_type[mu];
|
||||
int sl = St._grid->_simd_layout[direction];
|
||||
|
||||
// Fixme X.Y.Z.T hardcode in stencil
|
||||
int mmu = mu % Nd;
|
||||
|
||||
// Fixme X.Y.Z.T hardcode in stencil
|
||||
int mmu = mu % Nd;
|
||||
// assert our assumptions
|
||||
assert((distance == 1) || (distance == -1)); // nearest neighbour stencil hard code
|
||||
assert((sl == 1) || (sl == 2));
|
||||
|
||||
std::vector<int> icoor;
|
||||
|
||||
// assert our assumptions
|
||||
assert((distance == 1) || (distance == -1)); // nearest neighbour stencil hard code
|
||||
assert((sl == 1) || (sl == 2));
|
||||
|
||||
std::vector<int> icoor;
|
||||
|
||||
if ( SE->_around_the_world && Params.twists[mmu] ) {
|
||||
if ( SE->_around_the_world && Params.twists[mmu] ) {
|
||||
|
||||
if ( sl == 2 ) {
|
||||
if ( sl == 2 ) {
|
||||
|
||||
std::vector<sobj> vals(Nsimd);
|
||||
|
||||
std::vector<sobj> vals(Nsimd);
|
||||
extract(chi,vals);
|
||||
for(int s=0;s<Nsimd;s++){
|
||||
|
||||
extract(chi,vals);
|
||||
for(int s=0;s<Nsimd;s++){
|
||||
|
||||
grid->iCoorFromIindex(icoor,s);
|
||||
grid->iCoorFromIindex(icoor,s);
|
||||
|
||||
assert((icoor[direction]==0)||(icoor[direction]==1));
|
||||
assert((icoor[direction]==0)||(icoor[direction]==1));
|
||||
|
||||
int permute_lane;
|
||||
if ( distance == 1) {
|
||||
permute_lane = icoor[direction]?1:0;
|
||||
} else {
|
||||
permute_lane = icoor[direction]?0:1;
|
||||
int permute_lane;
|
||||
if ( distance == 1) {
|
||||
permute_lane = icoor[direction]?1:0;
|
||||
} else {
|
||||
permute_lane = icoor[direction]?0:1;
|
||||
}
|
||||
|
||||
if ( permute_lane ) {
|
||||
stmp(0) = vals[s](1);
|
||||
stmp(1) = vals[s](0);
|
||||
vals[s] = stmp;
|
||||
}
|
||||
|
||||
if ( permute_lane ) {
|
||||
stmp(0) = vals[s](1);
|
||||
stmp(1) = vals[s](0);
|
||||
vals[s] = stmp;
|
||||
}
|
||||
}
|
||||
merge(vtmp,vals);
|
||||
}
|
||||
merge(vtmp,vals);
|
||||
|
||||
} else {
|
||||
vtmp(0) = chi(1);
|
||||
vtmp(1) = chi(0);
|
||||
}
|
||||
mult(&phi(0),&U(0)(mu),&vtmp(0));
|
||||
mult(&phi(1),&U(1)(mu),&vtmp(1));
|
||||
|
||||
} else {
|
||||
mult(&phi(0),&U(0)(mu),&chi(0));
|
||||
mult(&phi(1),&U(1)(mu),&chi(1));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
} else {
|
||||
vtmp(0) = chi(1);
|
||||
vtmp(1) = chi(0);
|
||||
}
|
||||
mult(&phi(0),&U(0)(mu),&vtmp(0));
|
||||
mult(&phi(1),&U(1)(mu),&vtmp(1));
|
||||
|
||||
} else {
|
||||
mult(&phi(0),&U(0)(mu),&chi(0));
|
||||
mult(&phi(1),&U(1)(mu),&chi(1));
|
||||
}
|
||||
inline void DoubleStore(GridBase *GaugeGrid,DoubledGaugeField &Uds,const GaugeField &Umu)
|
||||
{
|
||||
conformable(Uds._grid,GaugeGrid);
|
||||
conformable(Umu._grid,GaugeGrid);
|
||||
|
||||
GaugeLinkField Utmp (GaugeGrid);
|
||||
GaugeLinkField U (GaugeGrid);
|
||||
GaugeLinkField Uconj(GaugeGrid);
|
||||
|
||||
Lattice<iScalar<vInteger> > coor(GaugeGrid);
|
||||
|
||||
}
|
||||
|
||||
inline void DoubleStore(GridBase *GaugeGrid,DoubledGaugeField &Uds,const GaugeField &Umu)
|
||||
{
|
||||
|
||||
conformable(Uds._grid,GaugeGrid);
|
||||
conformable(Umu._grid,GaugeGrid);
|
||||
|
||||
GaugeLinkField Utmp (GaugeGrid);
|
||||
GaugeLinkField U (GaugeGrid);
|
||||
GaugeLinkField Uconj(GaugeGrid);
|
||||
|
||||
Lattice<iScalar<vInteger> > coor(GaugeGrid);
|
||||
|
||||
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
|
||||
LatticeCoordinate(coor,mu);
|
||||
LatticeCoordinate(coor,mu);
|
||||
|
||||
U = PeekIndex<LorentzIndex>(Umu,mu);
|
||||
Uconj = conjugate(U);
|
||||
U = PeekIndex<LorentzIndex>(Umu,mu);
|
||||
Uconj = conjugate(U);
|
||||
|
||||
// This phase could come from a simple bc 1,1,-1,1 ..
|
||||
int neglink = GaugeGrid->GlobalDimensions()[mu]-1;
|
||||
if ( Params.twists[mu] ) {
|
||||
Uconj = where(coor==neglink,-Uconj,Uconj);
|
||||
}
|
||||
|
||||
// This phase could come from a simple bc 1,1,-1,1 ..
|
||||
int neglink = GaugeGrid->GlobalDimensions()[mu]-1;
|
||||
if ( Params.twists[mu] ) {
|
||||
Uconj = where(coor==neglink,-Uconj,Uconj);
|
||||
}
|
||||
PARALLEL_FOR_LOOP
|
||||
for(auto ss=U.begin();ss<U.end();ss++){
|
||||
Uds[ss](0)(mu) = U[ss]();
|
||||
Uds[ss](1)(mu) = Uconj[ss]();
|
||||
}
|
||||
|
||||
U = adj(Cshift(U ,mu,-1)); // correct except for spanning the boundary
|
||||
Uconj = adj(Cshift(Uconj,mu,-1));
|
||||
|
||||
Utmp = U;
|
||||
if ( Params.twists[mu] ) {
|
||||
Utmp = where(coor==0,Uconj,Utmp);
|
||||
}
|
||||
|
||||
PARALLEL_FOR_LOOP
|
||||
for(auto ss=U.begin();ss<U.end();ss++){
|
||||
Uds[ss](0)(mu) = U[ss]();
|
||||
Uds[ss](1)(mu) = Uconj[ss]();
|
||||
}
|
||||
PARALLEL_FOR_LOOP
|
||||
for(auto ss=U.begin();ss<U.end();ss++){
|
||||
Uds[ss](0)(mu+4) = Utmp[ss]();
|
||||
}
|
||||
|
||||
U = adj(Cshift(U ,mu,-1)); // correct except for spanning the boundary
|
||||
Uconj = adj(Cshift(Uconj,mu,-1));
|
||||
Utmp = Uconj;
|
||||
if ( Params.twists[mu] ) {
|
||||
Utmp = where(coor==0,U,Utmp);
|
||||
}
|
||||
|
||||
Utmp = U;
|
||||
if ( Params.twists[mu] ) {
|
||||
Utmp = where(coor==0,Uconj,Utmp);
|
||||
}
|
||||
PARALLEL_FOR_LOOP
|
||||
for(auto ss=U.begin();ss<U.end();ss++){
|
||||
Uds[ss](1)(mu+4) = Utmp[ss]();
|
||||
}
|
||||
|
||||
PARALLEL_FOR_LOOP
|
||||
for(auto ss=U.begin();ss<U.end();ss++){
|
||||
Uds[ss](0)(mu+4) = Utmp[ss]();
|
||||
}
|
||||
|
||||
Utmp = Uconj;
|
||||
if ( Params.twists[mu] ) {
|
||||
Utmp = where(coor==0,U,Utmp);
|
||||
}
|
||||
|
||||
PARALLEL_FOR_LOOP
|
||||
for(auto ss=U.begin();ss<U.end();ss++){
|
||||
Uds[ss](1)(mu+4) = Utmp[ss]();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline void InsertForce4D(GaugeField &mat, FermionField &Btilde,
|
||||
FermionField &A, int mu) {
|
||||
// DhopDir provides U or Uconj depending on coor/flavour.
|
||||
GaugeLinkField link(mat._grid);
|
||||
// use lorentz for flavour as hack.
|
||||
auto tmp = TraceIndex<SpinIndex>(outerProduct(Btilde, A));
|
||||
PARALLEL_FOR_LOOP
|
||||
for (auto ss = tmp.begin(); ss < tmp.end(); ss++) {
|
||||
link[ss]() = tmp[ss](0, 0) - conjugate(tmp[ss](1, 1));
|
||||
}
|
||||
PokeIndex<LorentzIndex>(mat, link, mu);
|
||||
return;
|
||||
}
|
||||
inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A, int mu) {
|
||||
|
||||
// DhopDir provides U or Uconj depending on coor/flavour.
|
||||
GaugeLinkField link(mat._grid);
|
||||
// use lorentz for flavour as hack.
|
||||
auto tmp = TraceIndex<SpinIndex>(outerProduct(Btilde, A));
|
||||
PARALLEL_FOR_LOOP
|
||||
for (auto ss = tmp.begin(); ss < tmp.end(); ss++) {
|
||||
link[ss]() = tmp[ss](0, 0) - conjugate(tmp[ss](1, 1));
|
||||
}
|
||||
PokeIndex<LorentzIndex>(mat, link, mu);
|
||||
return;
|
||||
}
|
||||
|
||||
inline void InsertForce5D(GaugeField &mat, FermionField &Btilde,
|
||||
FermionField Ã, int mu) {
|
||||
int Ls = Btilde._grid->_fdimensions[0];
|
||||
inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField Ã, int mu) {
|
||||
|
||||
int Ls = Btilde._grid->_fdimensions[0];
|
||||
|
||||
GaugeLinkField tmp(mat._grid);
|
||||
tmp = zero;
|
||||
PARALLEL_FOR_LOOP
|
||||
for (int ss = 0; ss < tmp._grid->oSites(); ss++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
int sF = s + Ls * ss;
|
||||
auto ttmp = traceIndex<SpinIndex>(outerProduct(Btilde[sF], Atilde[sF]));
|
||||
tmp[ss]() = tmp[ss]() + ttmp(0, 0) + conjugate(ttmp(1, 1));
|
||||
}
|
||||
}
|
||||
PokeIndex<LorentzIndex>(mat, tmp, mu);
|
||||
return;
|
||||
}
|
||||
};
|
||||
GaugeLinkField tmp(mat._grid);
|
||||
tmp = zero;
|
||||
PARALLEL_FOR_LOOP
|
||||
for (int ss = 0; ss < tmp._grid->oSites(); ss++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
int sF = s + Ls * ss;
|
||||
auto ttmp = traceIndex<SpinIndex>(outerProduct(Btilde[sF], Atilde[sF]));
|
||||
tmp[ss]() = tmp[ss]() + ttmp(0, 0) + conjugate(ttmp(1, 1));
|
||||
}
|
||||
}
|
||||
PokeIndex<LorentzIndex>(mat, tmp, mu);
|
||||
return;
|
||||
}
|
||||
|
||||
typedef WilsonImpl<vComplex, FundamentalRepresentation > WilsonImplR; // Real.. whichever prec
|
||||
typedef WilsonImpl<vComplexF, FundamentalRepresentation > WilsonImplF; // Float
|
||||
typedef WilsonImpl<vComplexD, FundamentalRepresentation > WilsonImplD; // Double
|
||||
};
|
||||
|
||||
typedef WilsonImpl<vComplex, FundamentalRepresentation > WilsonImplR; // Real.. whichever prec
|
||||
typedef WilsonImpl<vComplexF, FundamentalRepresentation > WilsonImplF; // Float
|
||||
typedef WilsonImpl<vComplexD, FundamentalRepresentation > WilsonImplD; // Double
|
||||
|
||||
typedef WilsonImpl<vComplex, FundamentalRepresentation, ComplexD > ZWilsonImplR; // Real.. whichever prec
|
||||
typedef WilsonImpl<vComplexF, FundamentalRepresentation, ComplexD > ZWilsonImplF; // Float
|
||||
typedef WilsonImpl<vComplexD, FundamentalRepresentation, ComplexD > ZWilsonImplD; // Double
|
||||
typedef WilsonImpl<vComplex, FundamentalRepresentation, ComplexD > ZWilsonImplR; // Real.. whichever prec
|
||||
typedef WilsonImpl<vComplexF, FundamentalRepresentation, ComplexD > ZWilsonImplF; // Float
|
||||
typedef WilsonImpl<vComplexD, FundamentalRepresentation, ComplexD > ZWilsonImplD; // Double
|
||||
|
||||
typedef WilsonImpl<vComplex, AdjointRepresentation > WilsonAdjImplR; // Real.. whichever prec
|
||||
typedef WilsonImpl<vComplexF, AdjointRepresentation > WilsonAdjImplF; // Float
|
||||
typedef WilsonImpl<vComplexD, AdjointRepresentation > WilsonAdjImplD; // Double
|
||||
|
||||
typedef WilsonImpl<vComplex, TwoIndexSymmetricRepresentation > WilsonTwoIndexSymmetricImplR; // Real.. whichever prec
|
||||
typedef WilsonImpl<vComplexF, TwoIndexSymmetricRepresentation > WilsonTwoIndexSymmetricImplF; // Float
|
||||
typedef WilsonImpl<vComplexD, TwoIndexSymmetricRepresentation > WilsonTwoIndexSymmetricImplD; // Double
|
||||
|
||||
typedef DomainWallVec5dImpl<vComplex ,Nc> DomainWallVec5dImplR; // Real.. whichever prec
|
||||
typedef DomainWallVec5dImpl<vComplexF,Nc> DomainWallVec5dImplF; // Float
|
||||
typedef DomainWallVec5dImpl<vComplexD,Nc> DomainWallVec5dImplD; // Double
|
||||
|
||||
typedef DomainWallVec5dImpl<vComplex ,Nc,ComplexD> ZDomainWallVec5dImplR; // Real.. whichever prec
|
||||
typedef DomainWallVec5dImpl<vComplexF,Nc,ComplexD> ZDomainWallVec5dImplF; // Float
|
||||
typedef DomainWallVec5dImpl<vComplexD,Nc,ComplexD> ZDomainWallVec5dImplD; // Double
|
||||
|
||||
typedef GparityWilsonImpl<vComplex , Nc> GparityWilsonImplR; // Real.. whichever prec
|
||||
typedef GparityWilsonImpl<vComplexF, Nc> GparityWilsonImplF; // Float
|
||||
typedef GparityWilsonImpl<vComplexD, Nc> GparityWilsonImplD; // Double
|
||||
|
||||
typedef WilsonImpl<vComplex, AdjointRepresentation > WilsonAdjImplR; // Real.. whichever prec
|
||||
typedef WilsonImpl<vComplexF, AdjointRepresentation > WilsonAdjImplF; // Float
|
||||
typedef WilsonImpl<vComplexD, AdjointRepresentation > WilsonAdjImplD; // Double
|
||||
}}
|
||||
|
||||
typedef WilsonImpl<vComplex, TwoIndexSymmetricRepresentation > WilsonTwoIndexSymmetricImplR; // Real.. whichever prec
|
||||
typedef WilsonImpl<vComplexF, TwoIndexSymmetricRepresentation > WilsonTwoIndexSymmetricImplF; // Float
|
||||
typedef WilsonImpl<vComplexD, TwoIndexSymmetricRepresentation > WilsonTwoIndexSymmetricImplD; // Double
|
||||
|
||||
typedef DomainWallVec5dImpl<vComplex ,Nc> DomainWallVec5dImplR; // Real.. whichever prec
|
||||
typedef DomainWallVec5dImpl<vComplexF,Nc> DomainWallVec5dImplF; // Float
|
||||
typedef DomainWallVec5dImpl<vComplexD,Nc> DomainWallVec5dImplD; // Double
|
||||
|
||||
typedef DomainWallVec5dImpl<vComplex ,Nc,ComplexD> ZDomainWallVec5dImplR; // Real.. whichever prec
|
||||
typedef DomainWallVec5dImpl<vComplexF,Nc,ComplexD> ZDomainWallVec5dImplF; // Float
|
||||
typedef DomainWallVec5dImpl<vComplexD,Nc,ComplexD> ZDomainWallVec5dImplD; // Double
|
||||
|
||||
typedef GparityWilsonImpl<vComplex, Nc> GparityWilsonImplR; // Real.. whichever prec
|
||||
typedef GparityWilsonImpl<vComplexF, Nc> GparityWilsonImplF; // Float
|
||||
typedef GparityWilsonImpl<vComplexD, Nc> GparityWilsonImplD; // Double
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -42,7 +42,11 @@ namespace Grid {
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
public:
|
||||
|
||||
// Constructors
|
||||
void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m) {
|
||||
this->MomentumSpacePropagatorHw(out,in,_m);
|
||||
};
|
||||
|
||||
// Constructors
|
||||
OverlapWilsonCayleyTanhFermion(GaugeField &_Umu,
|
||||
GridCartesian &FiveDimGrid,
|
||||
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||
|
@ -101,6 +101,7 @@ void WilsonFermion<Impl>::Meooe(const FermionField &in, FermionField &out) {
|
||||
DhopOE(in, out, DaggerNo);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
|
||||
if (in.checkerboard == Odd) {
|
||||
@ -109,32 +110,87 @@ void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
|
||||
DhopOE(in, out, DaggerYes);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
typename FermionField::scalar_type scal(4.0 + mass);
|
||||
out = scal * in;
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
typename FermionField::scalar_type scal(4.0 + mass);
|
||||
out = scal * in;
|
||||
}
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
Mooee(in, out);
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
Mooee(in, out);
|
||||
}
|
||||
template<class Impl>
|
||||
void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
out = (1.0/(4.0+mass))*in;
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
MooeeInv(in,out);
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
out = (1.0 / (4.0 + mass)) * in;
|
||||
}
|
||||
template<class Impl>
|
||||
void WilsonFermion<Impl>::MomentumSpacePropagator(FermionField &out, const FermionField &in,RealD _m) {
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in,
|
||||
FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
MooeeInv(in, out);
|
||||
}
|
||||
// what type LatticeComplex
|
||||
conformable(_grid,out._grid);
|
||||
|
||||
typedef typename FermionField::vector_type vector_type;
|
||||
typedef typename FermionField::scalar_type ScalComplex;
|
||||
|
||||
typedef Lattice<iSinglet<vector_type> > LatComplex;
|
||||
|
||||
Gamma::GammaMatrix Gmu [] = {
|
||||
Gamma::GammaX,
|
||||
Gamma::GammaY,
|
||||
Gamma::GammaZ,
|
||||
Gamma::GammaT
|
||||
};
|
||||
|
||||
std::vector<int> latt_size = _grid->_fdimensions;
|
||||
|
||||
FermionField num (_grid); num = zero;
|
||||
LatComplex wilson(_grid); wilson= zero;
|
||||
LatComplex one (_grid); one = ScalComplex(1.0,0.0);
|
||||
|
||||
LatComplex denom(_grid); denom= zero;
|
||||
LatComplex kmu(_grid);
|
||||
ScalComplex ci(0.0,1.0);
|
||||
// momphase = n * 2pi / L
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
|
||||
LatticeCoordinate(kmu,mu);
|
||||
|
||||
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||
|
||||
kmu = TwoPiL * kmu;
|
||||
|
||||
wilson = wilson + 2.0*sin(kmu*0.5)*sin(kmu*0.5); // Wilson term
|
||||
|
||||
num = num - sin(kmu)*ci*(Gamma(Gmu[mu])*in); // derivative term
|
||||
|
||||
denom=denom + sin(kmu)*sin(kmu);
|
||||
}
|
||||
|
||||
wilson = wilson + _m; // 2 sin^2 k/2 + m
|
||||
|
||||
num = num + wilson*in; // -i gmu sin k + 2 sin^2 k/2 + m
|
||||
|
||||
denom= denom+wilson*wilson; // sin^2 k + (2 sin^2 k/2 + m)^2
|
||||
|
||||
denom= one/denom;
|
||||
|
||||
out = num*denom; // [ -i gmu sin k + 2 sin^2 k/2 + m] / [ sin^2 k + (2 sin^2 k/2 + m)^2 ]
|
||||
|
||||
}
|
||||
|
||||
|
||||
///////////////////////////////////
|
||||
// Internal
|
||||
@ -166,7 +222,7 @@ void WilsonFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U,
|
||||
////////////////////////
|
||||
PARALLEL_FOR_LOOP
|
||||
for (int sss = 0; sss < B._grid->oSites(); sss++) {
|
||||
Kernels::DiracOptDhopDir(st, U, st.comm_buf, sss, sss, B, Btilde, mu,
|
||||
Kernels::DiracOptDhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu,
|
||||
gamma);
|
||||
}
|
||||
|
||||
@ -277,7 +333,7 @@ void WilsonFermion<Impl>::DhopDirDisp(const FermionField &in, FermionField &out,
|
||||
|
||||
PARALLEL_FOR_LOOP
|
||||
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||
Kernels::DiracOptDhopDir(Stencil, Umu, Stencil.comm_buf, sss, sss, in, out,
|
||||
Kernels::DiracOptDhopDir(Stencil, Umu, Stencil.CommBuf(), sss, sss, in, out,
|
||||
dirdisp, gamma);
|
||||
}
|
||||
};
|
||||
@ -295,13 +351,13 @@ void WilsonFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
|
||||
if (dag == DaggerYes) {
|
||||
PARALLEL_FOR_LOOP
|
||||
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||
Kernels::DiracOptDhopSiteDag(st, lo, U, st.comm_buf, sss, sss, 1, 1, in,
|
||||
Kernels::DiracOptDhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in,
|
||||
out);
|
||||
}
|
||||
} else {
|
||||
PARALLEL_FOR_LOOP
|
||||
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||
Kernels::DiracOptDhopSite(st, lo, U, st.comm_buf, sss, sss, 1, 1, in,
|
||||
Kernels::DiracOptDhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in,
|
||||
out);
|
||||
}
|
||||
}
|
||||
|
@ -78,16 +78,15 @@ class WilsonFermion : public WilsonKernels<Impl>, public WilsonFermionStatic {
|
||||
virtual void MooeeInv(const FermionField &in, FermionField &out);
|
||||
virtual void MooeeInvDag(const FermionField &in, FermionField &out);
|
||||
|
||||
virtual void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _mass) ;
|
||||
|
||||
////////////////////////
|
||||
// Derivative interface
|
||||
////////////////////////
|
||||
// Interface calls an internal routine
|
||||
void DhopDeriv(GaugeField &mat, const FermionField &U, const FermionField &V,
|
||||
int dag);
|
||||
void DhopDerivOE(GaugeField &mat, const FermionField &U,
|
||||
const FermionField &V, int dag);
|
||||
void DhopDerivEO(GaugeField &mat, const FermionField &U,
|
||||
const FermionField &V, int dag);
|
||||
void DhopDeriv(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// non-hermitian hopping term; half cb or both
|
||||
|
@ -184,44 +184,37 @@ void WilsonFermion5D<Impl>::Report(void)
|
||||
|
||||
if ( DhopCalls > 0 ) {
|
||||
std::cout << GridLogMessage << "#### Dhop calls report " << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Number of Dhop Calls : " << DhopCalls << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Total Communication time : " << DhopCommTime
|
||||
<< " us" << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D CommTime/Calls : "
|
||||
<< DhopCommTime / DhopCalls << " us" << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Total Compute time : "
|
||||
<< DhopComputeTime << " us" << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D ComputeTime/Calls : "
|
||||
<< DhopComputeTime / DhopCalls << " us" << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Number of Dhop Calls : " << DhopCalls << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Total Communication time : " << DhopCommTime<< " us" << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D CommTime/Calls : " << DhopCommTime / DhopCalls << " us" << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Total Compute time : " << DhopComputeTime << " us" << std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D ComputeTime/Calls : " << DhopComputeTime / DhopCalls << " us" << std::endl;
|
||||
|
||||
RealD mflops = 1344*volume*DhopCalls/DhopComputeTime;
|
||||
RealD mflops = 1344*volume*DhopCalls/DhopComputeTime/2; // 2 for red black counting
|
||||
std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per node : " << mflops/NP << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per rank : " << mflops/NP << std::endl;
|
||||
|
||||
}
|
||||
|
||||
if ( DerivCalls > 0 ) {
|
||||
std::cout << GridLogMessage << "#### Deriv calls report "<< std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Number of Deriv Calls : " <<DerivCalls <<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Total Communication time : " <<DerivCommTime <<" us"<<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D CommTime/Calls : " <<DerivCommTime/DerivCalls<<" us" <<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Total Compute time : " <<DerivComputeTime <<" us"<<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D ComputeTime/Calls : " <<DerivComputeTime/DerivCalls<<" us" <<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Total Dhop Compute time : " <<DerivDhopComputeTime <<" us"<<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Dhop ComputeTime/Calls : " <<DerivDhopComputeTime/DerivCalls<<" us" <<std::endl;
|
||||
|
||||
|
||||
|
||||
RealD mflops = 144*volume*DerivCalls/DerivDhopComputeTime;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per node : " << mflops/NP << std::endl;
|
||||
|
||||
std::cout << GridLogMessage << "#### Deriv calls report "<< std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Number of Deriv Calls : " <<DerivCalls <<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Total Communication time : " <<DerivCommTime <<" us"<<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D CommTime/Calls : " <<DerivCommTime/DerivCalls<<" us" <<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Total Compute time : " <<DerivComputeTime <<" us"<<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D ComputeTime/Calls : " <<DerivComputeTime/DerivCalls<<" us" <<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Total Dhop Compute time : " <<DerivDhopComputeTime <<" us"<<std::endl;
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Dhop ComputeTime/Calls : " <<DerivDhopComputeTime/DerivCalls<<" us" <<std::endl;
|
||||
|
||||
RealD mflops = 144*volume*DerivCalls/DerivDhopComputeTime;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per node : " << mflops/NP << std::endl;
|
||||
}
|
||||
|
||||
if (DerivCalls > 0 || DhopCalls > 0){
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Stencil"<<std::endl; Stencil.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion5D StencilEven"<<std::endl; StencilEven.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion5D StencilOdd"<<std::endl; StencilOdd.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Stencil"<<std::endl; Stencil.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion5D StencilEven"<<std::endl; StencilEven.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion5D StencilOdd"<<std::endl; StencilOdd.Report();
|
||||
}
|
||||
}
|
||||
|
||||
@ -275,7 +268,7 @@ PARALLEL_FOR_LOOP
|
||||
for(int s=0;s<Ls;s++){
|
||||
int sU=ss;
|
||||
int sF = s+Ls*sU;
|
||||
Kernels::DiracOptDhopDir(Stencil,Umu,Stencil.comm_buf,sF,sU,in,out,dirdisp,gamma);
|
||||
Kernels::DiracOptDhopDir(Stencil,Umu,Stencil.CommBuf(),sF,sU,in,out,dirdisp,gamma);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -327,8 +320,7 @@ void WilsonFermion5D<Impl>::DerivInternal(StencilImpl & st,
|
||||
assert(sF < B._grid->oSites());
|
||||
assert(sU < U._grid->oSites());
|
||||
|
||||
Kernels::DiracOptDhopDir(st, U, st.comm_buf, sF, sU, B, Btilde, mu,
|
||||
gamma);
|
||||
Kernels::DiracOptDhopDir(st, U, st.CommBuf(), sF, sU, B, Btilde, mu, gamma);
|
||||
|
||||
////////////////////////////
|
||||
// spin trace outer product
|
||||
@ -342,10 +334,10 @@ void WilsonFermion5D<Impl>::DerivInternal(StencilImpl & st,
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::DhopDeriv( GaugeField &mat,
|
||||
const FermionField &A,
|
||||
const FermionField &B,
|
||||
int dag)
|
||||
void WilsonFermion5D<Impl>::DhopDeriv(GaugeField &mat,
|
||||
const FermionField &A,
|
||||
const FermionField &B,
|
||||
int dag)
|
||||
{
|
||||
conformable(A._grid,FermionGrid());
|
||||
conformable(A._grid,B._grid);
|
||||
@ -358,9 +350,9 @@ void WilsonFermion5D<Impl>::DhopDeriv( GaugeField &mat,
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::DhopDerivEO(GaugeField &mat,
|
||||
const FermionField &A,
|
||||
const FermionField &B,
|
||||
int dag)
|
||||
const FermionField &A,
|
||||
const FermionField &B,
|
||||
int dag)
|
||||
{
|
||||
conformable(A._grid,FermionRedBlackGrid());
|
||||
conformable(GaugeRedBlackGrid(),mat._grid);
|
||||
@ -376,9 +368,9 @@ void WilsonFermion5D<Impl>::DhopDerivEO(GaugeField &mat,
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::DhopDerivOE(GaugeField &mat,
|
||||
const FermionField &A,
|
||||
const FermionField &B,
|
||||
int dag)
|
||||
const FermionField &A,
|
||||
const FermionField &B,
|
||||
int dag)
|
||||
{
|
||||
conformable(A._grid,FermionRedBlackGrid());
|
||||
conformable(GaugeRedBlackGrid(),mat._grid);
|
||||
@ -393,10 +385,9 @@ void WilsonFermion5D<Impl>::DhopDerivOE(GaugeField &mat,
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
|
||||
DoubledGaugeField & U,
|
||||
const FermionField &in, FermionField &out,int dag)
|
||||
DoubledGaugeField & U,
|
||||
const FermionField &in, FermionField &out,int dag)
|
||||
{
|
||||
DhopCalls++;
|
||||
// assert((dag==DaggerNo) ||(dag==DaggerYes));
|
||||
Compressor compressor(dag);
|
||||
|
||||
@ -413,16 +404,35 @@ void WilsonFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
|
||||
for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
||||
int sU = ss;
|
||||
int sF = LLs * sU;
|
||||
Kernels::DiracOptDhopSiteDag(st, lo, U, st.comm_buf, sF, sU, LLs, 1, in,
|
||||
out);
|
||||
Kernels::DiracOptDhopSiteDag(st, lo, U, st.CommBuf(), sF, sU, LLs, 1, in, out);
|
||||
}
|
||||
#ifdef AVX512
|
||||
} else if (stat.is_init() ) {
|
||||
|
||||
int nthreads;
|
||||
stat.start();
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp master
|
||||
nthreads = omp_get_num_threads();
|
||||
int mythread = omp_get_thread_num();
|
||||
stat.enter(mythread);
|
||||
#pragma omp for nowait
|
||||
for(int ss=0;ss<U._grid->oSites();ss++) {
|
||||
int sU=ss;
|
||||
int sF=LLs*sU;
|
||||
Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out);
|
||||
}
|
||||
stat.exit(mythread);
|
||||
}
|
||||
stat.accum(nthreads);
|
||||
#endif
|
||||
} else {
|
||||
PARALLEL_FOR_LOOP
|
||||
for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
||||
int sU = ss;
|
||||
int sF = LLs * sU;
|
||||
Kernels::DiracOptDhopSite(st, lo, U, st.comm_buf, sF, sU, LLs, 1, in,
|
||||
out);
|
||||
Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out);
|
||||
}
|
||||
}
|
||||
DhopComputeTime+=usecond();
|
||||
@ -432,6 +442,7 @@ void WilsonFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::DhopOE(const FermionField &in, FermionField &out,int dag)
|
||||
{
|
||||
DhopCalls++;
|
||||
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
||||
conformable(in._grid,out._grid); // drops the cb check
|
||||
|
||||
@ -443,6 +454,7 @@ void WilsonFermion5D<Impl>::DhopOE(const FermionField &in, FermionField &out,int
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag)
|
||||
{
|
||||
DhopCalls++;
|
||||
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
||||
conformable(in._grid,out._grid); // drops the cb check
|
||||
|
||||
@ -454,6 +466,7 @@ void WilsonFermion5D<Impl>::DhopEO(const FermionField &in, FermionField &out,int
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::Dhop(const FermionField &in, FermionField &out,int dag)
|
||||
{
|
||||
DhopCalls+=2;
|
||||
conformable(in._grid,FermionGrid()); // verifies full grid
|
||||
conformable(in._grid,out._grid);
|
||||
|
||||
@ -469,6 +482,148 @@ void WilsonFermion5D<Impl>::DW(const FermionField &in, FermionField &out,int dag
|
||||
axpy(out,4.0-M5,in,out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::MomentumSpacePropagatorHt(FermionField &out,const FermionField &in, RealD mass)
|
||||
{
|
||||
// what type LatticeComplex
|
||||
GridBase *_grid = _FourDimGrid;
|
||||
conformable(_grid,out._grid);
|
||||
|
||||
typedef typename FermionField::vector_type vector_type;
|
||||
typedef typename FermionField::scalar_type ScalComplex;
|
||||
typedef iSinglet<ScalComplex> Tcomplex;
|
||||
typedef Lattice<iSinglet<vector_type> > LatComplex;
|
||||
|
||||
Gamma::GammaMatrix Gmu [] = {
|
||||
Gamma::GammaX,
|
||||
Gamma::GammaY,
|
||||
Gamma::GammaZ,
|
||||
Gamma::GammaT
|
||||
};
|
||||
|
||||
std::vector<int> latt_size = _grid->_fdimensions;
|
||||
|
||||
|
||||
FermionField num (_grid); num = zero;
|
||||
|
||||
LatComplex sk(_grid); sk = zero;
|
||||
LatComplex sk2(_grid); sk2= zero;
|
||||
LatComplex W(_grid); W= zero;
|
||||
LatComplex a(_grid); a= zero;
|
||||
LatComplex one (_grid); one = ScalComplex(1.0,0.0);
|
||||
LatComplex denom(_grid); denom= zero;
|
||||
LatComplex cosha(_grid);
|
||||
LatComplex kmu(_grid);
|
||||
LatComplex Wea(_grid);
|
||||
LatComplex Wema(_grid);
|
||||
|
||||
ScalComplex ci(0.0,1.0);
|
||||
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
|
||||
LatticeCoordinate(kmu,mu);
|
||||
|
||||
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||
|
||||
kmu = TwoPiL * kmu;
|
||||
|
||||
sk2 = sk2 + 2.0*sin(kmu*0.5)*sin(kmu*0.5);
|
||||
sk = sk + sin(kmu) *sin(kmu);
|
||||
|
||||
num = num - sin(kmu)*ci*(Gamma(Gmu[mu])*in);
|
||||
|
||||
}
|
||||
|
||||
W = one - M5 + sk2;
|
||||
|
||||
////////////////////////////////////////////
|
||||
// Cosh alpha -> alpha
|
||||
////////////////////////////////////////////
|
||||
cosha = (one + W*W + sk) / (W*2.0);
|
||||
|
||||
// FIXME Need a Lattice acosh
|
||||
for(int idx=0;idx<_grid->lSites();idx++){
|
||||
std::vector<int> lcoor(Nd);
|
||||
Tcomplex cc;
|
||||
RealD sgn;
|
||||
_grid->LocalIndexToLocalCoor(idx,lcoor);
|
||||
peekLocalSite(cc,cosha,lcoor);
|
||||
assert((double)real(cc)>=1.0);
|
||||
assert(fabs((double)imag(cc))<=1.0e-15);
|
||||
cc = ScalComplex(::acosh(real(cc)),0.0);
|
||||
pokeLocalSite(cc,a,lcoor);
|
||||
}
|
||||
|
||||
Wea = ( exp( a) * W );
|
||||
Wema= ( exp(-a) * W );
|
||||
|
||||
num = num + ( one - Wema ) * mass * in;
|
||||
denom= ( Wea - one ) + mass*mass * (one - Wema);
|
||||
out = num/denom;
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::MomentumSpacePropagatorHw(FermionField &out,const FermionField &in,RealD mass)
|
||||
{
|
||||
Gamma::GammaMatrix Gmu [] = {
|
||||
Gamma::GammaX,
|
||||
Gamma::GammaY,
|
||||
Gamma::GammaZ,
|
||||
Gamma::GammaT
|
||||
};
|
||||
|
||||
GridBase *_grid = _FourDimGrid;
|
||||
conformable(_grid,out._grid);
|
||||
|
||||
typedef typename FermionField::vector_type vector_type;
|
||||
typedef typename FermionField::scalar_type ScalComplex;
|
||||
|
||||
typedef Lattice<iSinglet<vector_type> > LatComplex;
|
||||
|
||||
|
||||
std::vector<int> latt_size = _grid->_fdimensions;
|
||||
|
||||
LatComplex sk(_grid); sk = zero;
|
||||
LatComplex sk2(_grid); sk2= zero;
|
||||
|
||||
LatComplex w_k(_grid); w_k= zero;
|
||||
LatComplex b_k(_grid); b_k= zero;
|
||||
|
||||
LatComplex one (_grid); one = ScalComplex(1.0,0.0);
|
||||
|
||||
FermionField num (_grid); num = zero;
|
||||
LatComplex denom(_grid); denom= zero;
|
||||
LatComplex kmu(_grid);
|
||||
ScalComplex ci(0.0,1.0);
|
||||
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
|
||||
LatticeCoordinate(kmu,mu);
|
||||
|
||||
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||
|
||||
kmu = TwoPiL * kmu;
|
||||
|
||||
sk2 = sk2 + 2.0*sin(kmu*0.5)*sin(kmu*0.5);
|
||||
sk = sk + sin(kmu)*sin(kmu);
|
||||
|
||||
num = num - sin(kmu)*ci*(Gamma(Gmu[mu])*in);
|
||||
|
||||
}
|
||||
num = num + mass * in ;
|
||||
|
||||
b_k = sk2 - M5;
|
||||
|
||||
w_k = sqrt(sk + b_k*b_k);
|
||||
|
||||
denom= ( w_k + b_k + mass*mass) ;
|
||||
|
||||
denom= one/denom;
|
||||
out = num*denom;
|
||||
|
||||
}
|
||||
|
||||
|
||||
FermOpTemplateInstantiate(WilsonFermion5D);
|
||||
GparityFermOpTemplateInstantiate(WilsonFermion5D);
|
||||
|
||||
|
@ -31,9 +31,21 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#ifndef GRID_QCD_WILSON_FERMION_5D_H
|
||||
#define GRID_QCD_WILSON_FERMION_5D_H
|
||||
|
||||
namespace Grid {
|
||||
#include <Grid/Stat.h>
|
||||
|
||||
namespace QCD {
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// This is the 4d red black case appropriate to support
|
||||
//
|
||||
// parity = (x+y+z+t)|2;
|
||||
// generalised five dim fermions like mobius, zolotarev etc..
|
||||
//
|
||||
// i.e. even even contains fifth dim hopping term.
|
||||
//
|
||||
// [DIFFERS from original CPS red black implementation parity = (x+y+z+t+s)|2 ]
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// This is the 4d red black case appropriate to support
|
||||
@ -60,6 +72,7 @@ namespace Grid {
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
typedef WilsonKernels<Impl> Kernels;
|
||||
PmuStat stat;
|
||||
|
||||
void Report(void);
|
||||
void ZeroCounters(void);
|
||||
@ -99,6 +112,9 @@ namespace Grid {
|
||||
virtual void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
virtual void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
|
||||
void MomentumSpacePropagatorHt(FermionField &out,const FermionField &in,RealD mass) ;
|
||||
void MomentumSpacePropagatorHw(FermionField &out,const FermionField &in,RealD mass) ;
|
||||
|
||||
// Implement hopping term non-hermitian hopping term; half cb or both
|
||||
// Implement s-diagonal DW
|
||||
void DW (const FermionField &in, FermionField &out,int dag);
|
||||
@ -108,78 +124,78 @@ namespace Grid {
|
||||
|
||||
// add a DhopComm
|
||||
// -- suboptimal interface will presently trigger multiple comms.
|
||||
void DhopDir(const FermionField &in, FermionField &out,int dir,int disp);
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// New methods added
|
||||
///////////////////////////////////////////////////////////////
|
||||
void DerivInternal(StencilImpl & st,
|
||||
DoubledGaugeField & U,
|
||||
GaugeField &mat,
|
||||
const FermionField &A,
|
||||
const FermionField &B,
|
||||
int dag);
|
||||
|
||||
void DhopInternal(StencilImpl & st,
|
||||
LebesgueOrder &lo,
|
||||
DoubledGaugeField &U,
|
||||
const FermionField &in,
|
||||
FermionField &out,
|
||||
int dag);
|
||||
|
||||
// Constructors
|
||||
WilsonFermion5D(GaugeField &_Umu,
|
||||
GridCartesian &FiveDimGrid,
|
||||
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||
GridCartesian &FourDimGrid,
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
double _M5,const ImplParams &p= ImplParams());
|
||||
|
||||
// Constructors
|
||||
/*
|
||||
void DhopDir(const FermionField &in, FermionField &out,int dir,int disp);
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// New methods added
|
||||
///////////////////////////////////////////////////////////////
|
||||
void DerivInternal(StencilImpl & st,
|
||||
DoubledGaugeField & U,
|
||||
GaugeField &mat,
|
||||
const FermionField &A,
|
||||
const FermionField &B,
|
||||
int dag);
|
||||
|
||||
void DhopInternal(StencilImpl & st,
|
||||
LebesgueOrder &lo,
|
||||
DoubledGaugeField &U,
|
||||
const FermionField &in,
|
||||
FermionField &out,
|
||||
int dag);
|
||||
|
||||
// Constructors
|
||||
WilsonFermion5D(GaugeField &_Umu,
|
||||
GridCartesian &FiveDimGrid,
|
||||
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||
GridCartesian &FourDimGrid,
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
double _M5,const ImplParams &p= ImplParams());
|
||||
|
||||
// Constructors
|
||||
/*
|
||||
WilsonFermion5D(int simd,
|
||||
GaugeField &_Umu,
|
||||
GridCartesian &FiveDimGrid,
|
||||
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||
GridCartesian &FourDimGrid,
|
||||
double _M5,const ImplParams &p= ImplParams());
|
||||
*/
|
||||
GaugeField &_Umu,
|
||||
GridCartesian &FiveDimGrid,
|
||||
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||
GridCartesian &FourDimGrid,
|
||||
double _M5,const ImplParams &p= ImplParams());
|
||||
*/
|
||||
|
||||
// DoubleStore
|
||||
void ImportGauge(const GaugeField &_Umu);
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Data members require to support the functionality
|
||||
///////////////////////////////////////////////////////////////
|
||||
public:
|
||||
|
||||
// Add these to the support from Wilson
|
||||
GridBase *_FourDimGrid;
|
||||
GridBase *_FourDimRedBlackGrid;
|
||||
GridBase *_FiveDimGrid;
|
||||
GridBase *_FiveDimRedBlackGrid;
|
||||
|
||||
double M5;
|
||||
int Ls;
|
||||
|
||||
//Defines the stencils for even and odd
|
||||
StencilImpl Stencil;
|
||||
StencilImpl StencilEven;
|
||||
StencilImpl StencilOdd;
|
||||
|
||||
// Copy of the gauge field , with even and odd subsets
|
||||
DoubledGaugeField Umu;
|
||||
DoubledGaugeField UmuEven;
|
||||
DoubledGaugeField UmuOdd;
|
||||
|
||||
LebesgueOrder Lebesgue;
|
||||
LebesgueOrder LebesgueEvenOdd;
|
||||
|
||||
// Comms buffer
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > comm_buf;
|
||||
|
||||
};
|
||||
|
||||
// DoubleStore
|
||||
void ImportGauge(const GaugeField &_Umu);
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Data members require to support the functionality
|
||||
///////////////////////////////////////////////////////////////
|
||||
public:
|
||||
|
||||
// Add these to the support from Wilson
|
||||
GridBase *_FourDimGrid;
|
||||
GridBase *_FourDimRedBlackGrid;
|
||||
GridBase *_FiveDimGrid;
|
||||
GridBase *_FiveDimRedBlackGrid;
|
||||
|
||||
double M5;
|
||||
int Ls;
|
||||
|
||||
//Defines the stencils for even and odd
|
||||
StencilImpl Stencil;
|
||||
StencilImpl StencilEven;
|
||||
StencilImpl StencilOdd;
|
||||
|
||||
// Copy of the gauge field , with even and odd subsets
|
||||
DoubledGaugeField Umu;
|
||||
DoubledGaugeField UmuEven;
|
||||
DoubledGaugeField UmuOdd;
|
||||
|
||||
LebesgueOrder Lebesgue;
|
||||
LebesgueOrder LebesgueEvenOdd;
|
||||
|
||||
// Comms buffer
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > comm_buf;
|
||||
|
||||
};
|
||||
}
|
||||
}
|
||||
}}
|
||||
|
||||
#endif
|
||||
|
@ -32,8 +32,7 @@ directory
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
int WilsonKernelsStatic::HandOpt;
|
||||
int WilsonKernelsStatic::AsmOpt;
|
||||
int WilsonKernelsStatic::Opt;
|
||||
|
||||
template <class Impl>
|
||||
WilsonKernels<Impl>::WilsonKernels(const ImplParams &p) : Base(p){};
|
||||
@ -43,10 +42,9 @@ WilsonKernels<Impl>::WilsonKernels(const ImplParams &p) : Base(p){};
|
||||
////////////////////////////////////////////
|
||||
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::DiracOptGenericDhopSiteDag(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf, int sF,
|
||||
int sU, const FermionField &in, FermionField &out) {
|
||||
void WilsonKernels<Impl>::DiracOptGenericDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionField &in, FermionField &out) {
|
||||
SiteHalfSpinor tmp;
|
||||
SiteHalfSpinor chi;
|
||||
SiteHalfSpinor *chi_p;
|
||||
@ -220,10 +218,9 @@ void WilsonKernels<Impl>::DiracOptGenericDhopSiteDag(
|
||||
|
||||
// Need controls to do interior, exterior, or both
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::DiracOptGenericDhopSite(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf, int sF,
|
||||
int sU, const FermionField &in, FermionField &out) {
|
||||
void WilsonKernels<Impl>::DiracOptGenericDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionField &in, FermionField &out) {
|
||||
SiteHalfSpinor tmp;
|
||||
SiteHalfSpinor chi;
|
||||
SiteHalfSpinor *chi_p;
|
||||
@ -396,10 +393,9 @@ void WilsonKernels<Impl>::DiracOptGenericDhopSite(
|
||||
};
|
||||
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::DiracOptDhopDir(
|
||||
StencilImpl &st, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf, int sF,
|
||||
int sU, const FermionField &in, FermionField &out, int dir, int gamma) {
|
||||
void WilsonKernels<Impl>::DiracOptDhopDir( StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int sF,
|
||||
int sU, const FermionField &in, FermionField &out, int dir, int gamma) {
|
||||
|
||||
SiteHalfSpinor tmp;
|
||||
SiteHalfSpinor chi;
|
||||
SiteSpinor result;
|
||||
|
@ -32,175 +32,152 @@ directory
|
||||
#define GRID_QCD_DHOP_H
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
namespace QCD {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Helper routines that implement Wilson stencil for a single site.
|
||||
// Common to both the WilsonFermion and WilsonFermion5D
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class WilsonKernelsStatic {
|
||||
public:
|
||||
// S-direction is INNERMOST and takes no part in the parity.
|
||||
static int AsmOpt; // these are a temporary hack
|
||||
static int HandOpt; // these are a temporary hack
|
||||
};
|
||||
|
||||
template<class Impl> class WilsonKernels : public FermionOperator<Impl> , public WilsonKernelsStatic {
|
||||
public:
|
||||
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
typedef FermionOperator<Impl> Base;
|
||||
|
||||
public:
|
||||
|
||||
template <bool EnableBool = true>
|
||||
typename std::enable_if<Impl::Dimension == 3 && Nc == 3 &&EnableBool, void>::type
|
||||
DiracOptDhopSite(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in,
|
||||
FermionField &out) {
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Helper routines that implement Wilson stencil for a single site.
|
||||
// Common to both the WilsonFermion and WilsonFermion5D
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class WilsonKernelsStatic {
|
||||
public:
|
||||
enum { OptGeneric, OptHandUnroll, OptInlineAsm };
|
||||
// S-direction is INNERMOST and takes no part in the parity.
|
||||
static int Opt; // these are a temporary hack
|
||||
};
|
||||
|
||||
template<class Impl> class WilsonKernels : public FermionOperator<Impl> , public WilsonKernelsStatic {
|
||||
public:
|
||||
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
typedef FermionOperator<Impl> Base;
|
||||
|
||||
public:
|
||||
|
||||
template <bool EnableBool = true>
|
||||
typename std::enable_if<Impl::Dimension == 3 && Nc == 3 &&EnableBool, void>::type
|
||||
DiracOptDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out)
|
||||
{
|
||||
switch(Opt) {
|
||||
#ifdef AVX512
|
||||
if (AsmOpt) {
|
||||
WilsonKernels<Impl>::DiracOptAsmDhopSite(st, lo, U, buf, sF, sU, Ls, Ns,
|
||||
in, out);
|
||||
|
||||
} else {
|
||||
#else
|
||||
{
|
||||
case OptInlineAsm:
|
||||
WilsonKernels<Impl>::DiracOptAsmDhopSite(st,lo,U,buf,sF,sU,Ls,Ns,in,out);
|
||||
break;
|
||||
#endif
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
if (HandOpt)
|
||||
WilsonKernels<Impl>::DiracOptHandDhopSite(st, lo, U, buf, sF, sU,
|
||||
in, out);
|
||||
else
|
||||
WilsonKernels<Impl>::DiracOptGenericDhopSite(st, lo, U, buf, sF, sU,
|
||||
in, out);
|
||||
sF++;
|
||||
}
|
||||
sU++;
|
||||
}
|
||||
}
|
||||
case OptHandUnroll:
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
WilsonKernels<Impl>::DiracOptHandDhopSite(st,lo,U,buf,sF,sU,in,out);
|
||||
sF++;
|
||||
}
|
||||
|
||||
template <bool EnableBool = true>
|
||||
typename std::enable_if<(Impl::Dimension != 3 || (Impl::Dimension == 3 && Nc != 3)) && EnableBool, void>::type
|
||||
DiracOptDhopSite(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in,
|
||||
FermionField &out) {
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
WilsonKernels<Impl>::DiracOptGenericDhopSite(st, lo, U, buf, sF, sU, in,
|
||||
out);
|
||||
sF++;
|
||||
}
|
||||
sU++;
|
||||
}
|
||||
}
|
||||
|
||||
template <bool EnableBool = true>
|
||||
typename std::enable_if<Impl::Dimension == 3 && Nc == 3 && EnableBool,
|
||||
void>::type
|
||||
DiracOptDhopSiteDag(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in,
|
||||
FermionField &out) {
|
||||
#ifdef AVX512
|
||||
if (AsmOpt) {
|
||||
WilsonKernels<Impl>::DiracOptAsmDhopSiteDag(st, lo, U, buf, sF, sU, Ls,
|
||||
Ns, in, out);
|
||||
} else {
|
||||
#else
|
||||
{
|
||||
#endif
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
if (HandOpt)
|
||||
WilsonKernels<Impl>::DiracOptHandDhopSiteDag(st, lo, U, buf, sF, sU,
|
||||
in, out);
|
||||
else
|
||||
WilsonKernels<Impl>::DiracOptGenericDhopSiteDag(st, lo, U, buf, sF,
|
||||
sU, in, out);
|
||||
sF++;
|
||||
}
|
||||
sU++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <bool EnableBool = true>
|
||||
typename std::enable_if<
|
||||
(Impl::Dimension != 3 || (Impl::Dimension == 3 && Nc != 3)) && EnableBool,
|
||||
void>::type
|
||||
DiracOptDhopSiteDag(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in,
|
||||
FermionField &out) {
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
WilsonKernels<Impl>::DiracOptGenericDhopSiteDag(st, lo, U, buf, sF, sU,
|
||||
in, out);
|
||||
sF++;
|
||||
}
|
||||
sU++;
|
||||
}
|
||||
}
|
||||
|
||||
void DiracOptDhopDir(
|
||||
StencilImpl &st, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF, int sU, const FermionField &in, FermionField &out, int dirdisp,
|
||||
int gamma);
|
||||
|
||||
private:
|
||||
// Specialised variants
|
||||
void DiracOptGenericDhopSite(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF, int sU, const FermionField &in, FermionField &out);
|
||||
|
||||
void DiracOptGenericDhopSiteDag(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF, int sU, const FermionField &in, FermionField &out);
|
||||
|
||||
void DiracOptAsmDhopSite(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in,
|
||||
FermionField &out);
|
||||
|
||||
void DiracOptAsmDhopSiteDag(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in,
|
||||
FermionField &out);
|
||||
|
||||
void DiracOptHandDhopSite(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF, int sU, const FermionField &in, FermionField &out);
|
||||
|
||||
void DiracOptHandDhopSiteDag(
|
||||
StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor, alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF, int sU, const FermionField &in, FermionField &out);
|
||||
|
||||
public:
|
||||
WilsonKernels(const ImplParams &p = ImplParams());
|
||||
};
|
||||
|
||||
sU++;
|
||||
}
|
||||
break;
|
||||
case OptGeneric:
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
WilsonKernels<Impl>::DiracOptGenericDhopSite(st,lo,U,buf,sF,sU,in,out);
|
||||
sF++;
|
||||
}
|
||||
sU++;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool EnableBool = true>
|
||||
typename std::enable_if<(Impl::Dimension != 3 || (Impl::Dimension == 3 && Nc != 3)) && EnableBool, void>::type
|
||||
DiracOptDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) {
|
||||
// no kernel choice
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
WilsonKernels<Impl>::DiracOptGenericDhopSite(st, lo, U, buf, sF, sU, in, out);
|
||||
sF++;
|
||||
}
|
||||
sU++;
|
||||
}
|
||||
}
|
||||
|
||||
template <bool EnableBool = true>
|
||||
typename std::enable_if<Impl::Dimension == 3 && Nc == 3 && EnableBool,void>::type
|
||||
DiracOptDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) {
|
||||
|
||||
switch(Opt) {
|
||||
#ifdef AVX512
|
||||
case OptInlineAsm:
|
||||
WilsonKernels<Impl>::DiracOptAsmDhopSiteDag(st,lo,U,buf,sF,sU,Ls,Ns,in,out);
|
||||
break;
|
||||
#endif
|
||||
case OptHandUnroll:
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
WilsonKernels<Impl>::DiracOptHandDhopSiteDag(st,lo,U,buf,sF,sU,in,out);
|
||||
sF++;
|
||||
}
|
||||
sU++;
|
||||
}
|
||||
break;
|
||||
case OptGeneric:
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
WilsonKernels<Impl>::DiracOptGenericDhopSiteDag(st,lo,U,buf,sF,sU,in,out);
|
||||
sF++;
|
||||
}
|
||||
sU++;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool EnableBool = true>
|
||||
typename std::enable_if<(Impl::Dimension != 3 || (Impl::Dimension == 3 && Nc != 3)) && EnableBool,void>::type
|
||||
DiracOptDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) {
|
||||
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
WilsonKernels<Impl>::DiracOptGenericDhopSiteDag(st,lo,U,buf,sF,sU,in,out);
|
||||
sF++;
|
||||
}
|
||||
sU++;
|
||||
}
|
||||
}
|
||||
|
||||
void DiracOptDhopDir(StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionField &in, FermionField &out, int dirdisp, int gamma);
|
||||
|
||||
private:
|
||||
// Specialised variants
|
||||
void DiracOptGenericDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionField &in, FermionField &out);
|
||||
|
||||
void DiracOptGenericDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionField &in, FermionField &out);
|
||||
|
||||
void DiracOptAsmDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in,FermionField &out);
|
||||
|
||||
void DiracOptAsmDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out);
|
||||
|
||||
void DiracOptHandDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionField &in, FermionField &out);
|
||||
|
||||
void DiracOptHandDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionField &in, FermionField &out);
|
||||
|
||||
public:
|
||||
|
||||
WilsonKernels(const ImplParams &p = ImplParams());
|
||||
|
||||
};
|
||||
|
||||
}}
|
||||
|
||||
#endif
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -33,48 +34,46 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
namespace QCD {
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Default to no assembler implementation
|
||||
///////////////////////////////////////////////////////////
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl >::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl >::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Default to no assembler implementation
|
||||
///////////////////////////////////////////////////////////
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl >::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl >::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
#if defined(AVX512)
|
||||
|
||||
|
||||
#include <simd/Intel512wilson.h>
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// If we are AVX512 specialise the single precision routine
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
#include <simd/Intel512wilson.h>
|
||||
|
||||
#include <simd/Intel512single.h>
|
||||
|
||||
static Vector<vComplexF> signs;
|
||||
|
||||
int setupSigns(void ){
|
||||
Vector<vComplexF> bother(2);
|
||||
signs = bother;
|
||||
vrsign(signs[0]);
|
||||
visign(signs[1]);
|
||||
return 1;
|
||||
}
|
||||
static int signInit = setupSigns();
|
||||
static Vector<vComplexF> signsF;
|
||||
|
||||
template<typename vtype>
|
||||
int setupSigns(Vector<vtype>& signs ){
|
||||
Vector<vtype> bother(2);
|
||||
signs = bother;
|
||||
vrsign(signs[0]);
|
||||
visign(signs[1]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int signInitF = setupSigns(signsF);
|
||||
|
||||
#define label(A) ilabel(A)
|
||||
#define ilabel(A) ".globl\n" #A ":\n"
|
||||
@ -82,19 +81,19 @@ namespace Grid {
|
||||
#define MAYBEPERM(A,perm) if (perm) { A ; }
|
||||
#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf)
|
||||
#define FX(A) WILSONASM_ ##A
|
||||
#define COMPLEX_TYPE vComplexF
|
||||
#define signs signsF
|
||||
|
||||
#undef KERNEL_DAG
|
||||
template<>
|
||||
void WilsonKernels<WilsonImplF>::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
#include <qcd/action/fermion/WilsonKernelsAsmBody.h>
|
||||
|
||||
#define KERNEL_DAG
|
||||
template<>
|
||||
void WilsonKernels<WilsonImplF>::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
#include <qcd/action/fermion/WilsonKernelsAsmBody.h>
|
||||
|
||||
#undef VMOVIDUP
|
||||
@ -104,36 +103,94 @@ namespace Grid {
|
||||
#undef FX
|
||||
#define FX(A) DWFASM_ ## A
|
||||
#define MAYBEPERM(A,B)
|
||||
#define VMOVIDUP(A,B,C) VBCASTIDUPf(A,B,C)
|
||||
#define VMOVRDUP(A,B,C) VBCASTRDUPf(A,B,C)
|
||||
//#define VMOVIDUP(A,B,C) VBCASTIDUPf(A,B,C)
|
||||
//#define VMOVRDUP(A,B,C) VBCASTRDUPf(A,B,C)
|
||||
#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN_LS(ptr,pf)
|
||||
|
||||
#undef KERNEL_DAG
|
||||
template<>
|
||||
void WilsonKernels<DomainWallVec5dImplF>::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
template<> void
|
||||
WilsonKernels<DomainWallVec5dImplF>::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
#include <qcd/action/fermion/WilsonKernelsAsmBody.h>
|
||||
|
||||
#define KERNEL_DAG
|
||||
template<>
|
||||
void WilsonKernels<DomainWallVec5dImplF>::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
template<> void
|
||||
WilsonKernels<DomainWallVec5dImplF>::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
#include <qcd/action/fermion/WilsonKernelsAsmBody.h>
|
||||
#undef COMPLEX_TYPE
|
||||
#undef signs
|
||||
#undef VMOVRDUP
|
||||
#undef MAYBEPERM
|
||||
#undef MULT_2SPIN
|
||||
#undef FX
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// If we are AVX512 specialise the double precision routine
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
#include <simd/Intel512double.h>
|
||||
|
||||
static Vector<vComplexD> signsD;
|
||||
#define signs signsD
|
||||
static int signInitD = setupSigns(signsD);
|
||||
|
||||
#define MAYBEPERM(A,perm) if (perm) { A ; }
|
||||
#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf)
|
||||
#define FX(A) WILSONASM_ ##A
|
||||
#define COMPLEX_TYPE vComplexD
|
||||
|
||||
#undef KERNEL_DAG
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
#include <qcd/action/fermion/WilsonKernelsAsmBody.h>
|
||||
|
||||
#define KERNEL_DAG
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
#include <qcd/action/fermion/WilsonKernelsAsmBody.h>
|
||||
|
||||
#endif
|
||||
#undef VMOVIDUP
|
||||
#undef VMOVRDUP
|
||||
#undef MAYBEPERM
|
||||
#undef MULT_2SPIN
|
||||
#undef FX
|
||||
#define FX(A) DWFASM_ ## A
|
||||
#define MAYBEPERM(A,B)
|
||||
//#define VMOVIDUP(A,B,C) VBCASTIDUPd(A,B,C)
|
||||
//#define VMOVRDUP(A,B,C) VBCASTRDUPd(A,B,C)
|
||||
#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN_LS(ptr,pf)
|
||||
|
||||
#undef KERNEL_DAG
|
||||
template<> void
|
||||
WilsonKernels<DomainWallVec5dImplD>::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
#include <qcd/action/fermion/WilsonKernelsAsmBody.h>
|
||||
|
||||
#define KERNEL_DAG
|
||||
template<> void
|
||||
WilsonKernels<DomainWallVec5dImplD>::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out)
|
||||
#include <qcd/action/fermion/WilsonKernelsAsmBody.h>
|
||||
|
||||
#undef COMPLEX_TYPE
|
||||
#undef signs
|
||||
#undef VMOVRDUP
|
||||
#undef MAYBEPERM
|
||||
#undef MULT_2SPIN
|
||||
#undef FX
|
||||
|
||||
#endif //AVX512
|
||||
|
||||
#define INSTANTIATE_ASM(A)\
|
||||
template void WilsonKernels<A>::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,\
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,\
|
||||
template void WilsonKernels<A>::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out);\
|
||||
template void WilsonKernels<A>::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,\
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,\
|
||||
\
|
||||
template void WilsonKernels<A>::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\
|
||||
int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out);\
|
||||
|
||||
|
||||
INSTANTIATE_ASM(WilsonImplF);
|
||||
INSTANTIATE_ASM(WilsonImplD);
|
||||
INSTANTIATE_ASM(ZWilsonImplF);
|
||||
@ -144,6 +201,6 @@ INSTANTIATE_ASM(DomainWallVec5dImplF);
|
||||
INSTANTIATE_ASM(DomainWallVec5dImplD);
|
||||
INSTANTIATE_ASM(ZDomainWallVec5dImplF);
|
||||
INSTANTIATE_ASM(ZDomainWallVec5dImplD);
|
||||
}
|
||||
}
|
||||
|
||||
}}
|
||||
|
||||
|
@ -5,7 +5,9 @@
|
||||
const uint64_t plocal =(uint64_t) & in._odata[0];
|
||||
|
||||
// vComplexF isigns[2] = { signs[0], signs[1] };
|
||||
vComplexF *isigns = &signs[0];
|
||||
//COMPLEX_TYPE is vComplexF of vComplexD depending
|
||||
//on the chosen precision
|
||||
COMPLEX_TYPE *isigns = &signs[0];
|
||||
|
||||
MASK_REGS;
|
||||
int nmax=U._grid->oSites();
|
||||
@ -134,7 +136,9 @@
|
||||
////////////////////////////////
|
||||
// Xm
|
||||
////////////////////////////////
|
||||
#ifndef STREAM_STORE
|
||||
basep= (uint64_t) &out._odata[ss];
|
||||
#endif
|
||||
// basep= st.GetPFInfo(nent,plocal); nent++;
|
||||
if ( local ) {
|
||||
LOAD64(%r10,isigns); // times i => shuffle and xor the real part sign bit
|
||||
@ -229,7 +233,9 @@
|
||||
LOAD_CHI(base);
|
||||
}
|
||||
base= (uint64_t) &out._odata[ss];
|
||||
#ifndef STREAM_STORE
|
||||
PREFETCH_CHIMU(base);
|
||||
#endif
|
||||
{
|
||||
MULT_2SPIN_DIR_PFTM(Tm,basep);
|
||||
}
|
||||
|
@ -311,10 +311,9 @@ namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int ss,int sU,const FermionField &in, FermionField &out)
|
||||
template<class Impl> void
|
||||
WilsonKernels<Impl>::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionField &in, FermionField &out)
|
||||
{
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
@ -554,10 +553,9 @@ namespace QCD {
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int ss,int sU,const FermionField &in, FermionField &out)
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionField &in, FermionField &out)
|
||||
{
|
||||
// std::cout << "Hand op Dhop "<<std::endl;
|
||||
typedef typename Simd::scalar_type S;
|
||||
@ -798,38 +796,35 @@ namespace QCD {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Specialise Gparity to simple implementation
|
||||
////////////////////////////////////////////////
|
||||
template<>
|
||||
void WilsonKernels<GparityWilsonImplF>::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF,int sU,const FermionField &in, FermionField &out)
|
||||
template<> void
|
||||
WilsonKernels<GparityWilsonImplF>::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,
|
||||
SiteHalfSpinor *buf,
|
||||
int sF,int sU,const FermionField &in, FermionField &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template<>
|
||||
void WilsonKernels<GparityWilsonImplF>::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF,int sU,const FermionField &in, FermionField &out)
|
||||
template<> void
|
||||
WilsonKernels<GparityWilsonImplF>::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,
|
||||
SiteHalfSpinor *buf,
|
||||
int sF,int sU,const FermionField &in, FermionField &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template<>
|
||||
void WilsonKernels<GparityWilsonImplD>::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF,int sU,const FermionField &in, FermionField &out)
|
||||
template<> void
|
||||
WilsonKernels<GparityWilsonImplD>::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
|
||||
int sF,int sU,const FermionField &in, FermionField &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template<>
|
||||
void WilsonKernels<GparityWilsonImplD>::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,
|
||||
int sF,int sU,const FermionField &in, FermionField &out)
|
||||
template<> void
|
||||
WilsonKernels<GparityWilsonImplD>::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
|
||||
int sF,int sU,const FermionField &in, FermionField &out)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
@ -840,12 +835,10 @@ void WilsonKernels<GparityWilsonImplD>::DiracOptHandDhopSiteDag(StencilImpl &st,
|
||||
// Need Nc=3 though //
|
||||
|
||||
#define INSTANTIATE_THEM(A) \
|
||||
template void WilsonKernels<A>::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,\
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,\
|
||||
int ss,int sU,const FermionField &in, FermionField &out);\
|
||||
template void WilsonKernels<A>::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,\
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > &buf,\
|
||||
int ss,int sU,const FermionField &in, FermionField &out);
|
||||
template void WilsonKernels<A>::DiracOptHandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,\
|
||||
int ss,int sU,const FermionField &in, FermionField &out); \
|
||||
template void WilsonKernels<A>::DiracOptHandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,\
|
||||
int ss,int sU,const FermionField &in, FermionField &out);
|
||||
|
||||
INSTANTIATE_THEM(WilsonImplF);
|
||||
INSTANTIATE_THEM(WilsonImplD);
|
||||
|
@ -116,7 +116,7 @@ class NerscHmcRunnerTemplate {
|
||||
NoSmearing<Gimpl> SmearingPolicy;
|
||||
typedef MinimumNorm2<GaugeField, NoSmearing<Gimpl>, RepresentationsPolicy >
|
||||
IntegratorType; // change here to change the algorithm
|
||||
IntegratorParameters MDpar(20, 1.0);
|
||||
IntegratorParameters MDpar(40, 1.0);
|
||||
IntegratorType MDynamics(UGrid, MDpar, TheAction, SmearingPolicy);
|
||||
|
||||
// Checkpoint strategy
|
||||
|
@ -39,8 +39,8 @@ namespace QCD{
|
||||
//on the 5d (rb4d) checkerboarded lattices
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
template<class vobj>
|
||||
void axpibg5x(Lattice<vobj> &z,const Lattice<vobj> &x,RealD a,RealD b)
|
||||
template<class vobj,class Coeff>
|
||||
void axpibg5x(Lattice<vobj> &z,const Lattice<vobj> &x,Coeff a,Coeff b)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,z);
|
||||
@ -57,8 +57,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void axpby_ssp(Lattice<vobj> &z, RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void axpby_ssp(Lattice<vobj> &z, Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
@ -72,8 +72,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void ag5xpby_ssp(Lattice<vobj> &z,RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void ag5xpby_ssp(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
@ -90,8 +90,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void axpbg5y_ssp(Lattice<vobj> &z,RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void axpbg5y_ssp(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
@ -108,8 +108,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void ag5xpbg5y_ssp(Lattice<vobj> &z,RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void ag5xpbg5y_ssp(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
@ -127,8 +127,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void axpby_ssp_pminus(Lattice<vobj> &z,RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void axpby_ssp_pminus(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
@ -144,8 +144,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void axpby_ssp_pplus(Lattice<vobj> &z,RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void axpby_ssp_pplus(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
|
@ -674,6 +674,37 @@ class SU {
|
||||
out += la;
|
||||
}
|
||||
}
|
||||
/*
|
||||
add GaugeTrans
|
||||
*/
|
||||
|
||||
template<typename GaugeField,typename GaugeMat>
|
||||
static void GaugeTransform( GaugeField &Umu, GaugeMat &g){
|
||||
GridBase *grid = Umu._grid;
|
||||
conformable(grid,g._grid);
|
||||
|
||||
GaugeMat U(grid);
|
||||
GaugeMat ag(grid); ag = adj(g);
|
||||
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
U= PeekIndex<LorentzIndex>(Umu,mu);
|
||||
U = g*U*Cshift(ag, mu, 1);
|
||||
PokeIndex<LorentzIndex>(Umu,U,mu);
|
||||
}
|
||||
}
|
||||
template<typename GaugeMat>
|
||||
static void GaugeTransform( std::vector<GaugeMat> &U, GaugeMat &g){
|
||||
GridBase *grid = g._grid;
|
||||
GaugeMat ag(grid); ag = adj(g);
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
U[mu] = g*U[mu]*Cshift(ag, mu, 1);
|
||||
}
|
||||
}
|
||||
template<typename GaugeField,typename GaugeMat>
|
||||
static void RandomGaugeTransform(GridParallelRNG &pRNG, GaugeField &Umu, GaugeMat &g){
|
||||
LieRandomize(pRNG,g,1.0);
|
||||
GaugeTransform(Umu,g);
|
||||
}
|
||||
|
||||
// Projects the algebra components a lattice matrix (of dimension ncol*ncol -1 )
|
||||
// inverse operation: FundamentalLieAlgebraMatrix
|
||||
@ -702,23 +733,33 @@ class SU {
|
||||
PokeIndex<LorentzIndex>(out, Umu, mu);
|
||||
}
|
||||
}
|
||||
static void TepidConfiguration(GridParallelRNG &pRNG,
|
||||
LatticeGaugeField &out) {
|
||||
LatticeMatrix Umu(out._grid);
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
LieRandomize(pRNG, Umu, 0.01);
|
||||
PokeIndex<LorentzIndex>(out, Umu, mu);
|
||||
template<typename GaugeField>
|
||||
static void TepidConfiguration(GridParallelRNG &pRNG,GaugeField &out){
|
||||
typedef typename GaugeField::vector_type vector_type;
|
||||
typedef iSUnMatrix<vector_type> vMatrixType;
|
||||
typedef Lattice<vMatrixType> LatticeMatrixType;
|
||||
|
||||
LatticeMatrixType Umu(out._grid);
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
LieRandomize(pRNG,Umu,0.01);
|
||||
PokeIndex<LorentzIndex>(out,Umu,mu);
|
||||
}
|
||||
}
|
||||
static void ColdConfiguration(GridParallelRNG &pRNG, LatticeGaugeField &out) {
|
||||
LatticeMatrix Umu(out._grid);
|
||||
Umu = 1.0;
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
PokeIndex<LorentzIndex>(out, Umu, mu);
|
||||
template<typename GaugeField>
|
||||
static void ColdConfiguration(GridParallelRNG &pRNG,GaugeField &out){
|
||||
typedef typename GaugeField::vector_type vector_type;
|
||||
typedef iSUnMatrix<vector_type> vMatrixType;
|
||||
typedef Lattice<vMatrixType> LatticeMatrixType;
|
||||
|
||||
LatticeMatrixType Umu(out._grid);
|
||||
Umu=1.0;
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
PokeIndex<LorentzIndex>(out,Umu,mu);
|
||||
}
|
||||
}
|
||||
|
||||
static void taProj(const LatticeMatrix &in, LatticeMatrix &out) {
|
||||
template<typename LatticeMatrixType>
|
||||
static void taProj( const LatticeMatrixType &in, LatticeMatrixType &out){
|
||||
out = Ta(in);
|
||||
}
|
||||
template <typename LatticeMatrixType>
|
||||
|
@ -522,4 +522,4 @@ typedef WilsonLoops<PeriodicGimplR> SU3WilsonLoops;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -365,6 +365,18 @@ namespace Optimization {
|
||||
}
|
||||
};
|
||||
|
||||
struct Div{
|
||||
// Real float
|
||||
inline __m256 operator()(__m256 a, __m256 b){
|
||||
return _mm256_div_ps(a,b);
|
||||
}
|
||||
// Real double
|
||||
inline __m256d operator()(__m256d a, __m256d b){
|
||||
return _mm256_div_pd(a,b);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
inline __m256 operator()(__m256 in){
|
||||
@ -437,14 +449,13 @@ namespace Optimization {
|
||||
|
||||
};
|
||||
|
||||
#if defined (AVX2) || defined (AVXFMA4)
|
||||
#define _mm256_alignr_epi32(ret,a,b,n) ret=(__m256) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*4)%16)
|
||||
#define _mm256_alignr_epi64(ret,a,b,n) ret=(__m256d) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*8)%16)
|
||||
#if defined (AVX2)
|
||||
#define _mm256_alignr_epi32_grid(ret,a,b,n) ret=(__m256) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*4)%16)
|
||||
#define _mm256_alignr_epi64_grid(ret,a,b,n) ret=(__m256d) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*8)%16)
|
||||
#endif
|
||||
|
||||
#if defined (AVX1)
|
||||
|
||||
#define _mm256_alignr_epi32(ret,a,b,n) { \
|
||||
#if defined (AVX1) || defined (AVXFMA)
|
||||
#define _mm256_alignr_epi32_grid(ret,a,b,n) { \
|
||||
__m128 aa, bb; \
|
||||
\
|
||||
aa = _mm256_extractf128_ps(a,1); \
|
||||
@ -458,7 +469,7 @@ namespace Optimization {
|
||||
ret = _mm256_insertf128_ps(ret,aa,0); \
|
||||
}
|
||||
|
||||
#define _mm256_alignr_epi64(ret,a,b,n) { \
|
||||
#define _mm256_alignr_epi64_grid(ret,a,b,n) { \
|
||||
__m128d aa, bb; \
|
||||
\
|
||||
aa = _mm256_extractf128_pd(a,1); \
|
||||
@ -474,19 +485,6 @@ namespace Optimization {
|
||||
|
||||
#endif
|
||||
|
||||
inline std::ostream & operator << (std::ostream& stream, const __m256 a)
|
||||
{
|
||||
const float *p=(const float *)&a;
|
||||
stream<< "{"<<p[0]<<","<<p[1]<<","<<p[2]<<","<<p[3]<<","<<p[4]<<","<<p[5]<<","<<p[6]<<","<<p[7]<<"}";
|
||||
return stream;
|
||||
};
|
||||
inline std::ostream & operator<< (std::ostream& stream, const __m256d a)
|
||||
{
|
||||
const double *p=(const double *)&a;
|
||||
stream<< "{"<<p[0]<<","<<p[1]<<","<<p[2]<<","<<p[3]<<"}";
|
||||
return stream;
|
||||
};
|
||||
|
||||
struct Rotate{
|
||||
|
||||
static inline __m256 rotate(__m256 in,int n){
|
||||
@ -518,11 +516,10 @@ namespace Optimization {
|
||||
__m256 tmp = Permute::Permute0(in);
|
||||
__m256 ret;
|
||||
if ( n > 3 ) {
|
||||
_mm256_alignr_epi32(ret,in,tmp,n);
|
||||
_mm256_alignr_epi32_grid(ret,in,tmp,n);
|
||||
} else {
|
||||
_mm256_alignr_epi32(ret,tmp,in,n);
|
||||
_mm256_alignr_epi32_grid(ret,tmp,in,n);
|
||||
}
|
||||
// std::cout << " align epi32 n=" <<n<<" in "<<tmp<<in<<" -> "<< ret <<std::endl;
|
||||
return ret;
|
||||
};
|
||||
|
||||
@ -531,18 +528,15 @@ namespace Optimization {
|
||||
__m256d tmp = Permute::Permute0(in);
|
||||
__m256d ret;
|
||||
if ( n > 1 ) {
|
||||
_mm256_alignr_epi64(ret,in,tmp,n);
|
||||
_mm256_alignr_epi64_grid(ret,in,tmp,n);
|
||||
} else {
|
||||
_mm256_alignr_epi64(ret,tmp,in,n);
|
||||
_mm256_alignr_epi64_grid(ret,tmp,in,n);
|
||||
}
|
||||
// std::cout << " align epi64 n=" <<n<<" in "<<tmp<<in<<" -> "<< ret <<std::endl;
|
||||
return ret;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
//Complex float Reduce
|
||||
template<>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, __m256>::operator()(__m256 in){
|
||||
@ -631,6 +625,7 @@ namespace Optimization {
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
|
@ -32,6 +32,16 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
namespace Grid{
|
||||
namespace Optimization {
|
||||
|
||||
union u512f {
|
||||
__m512 v;
|
||||
float f[16];
|
||||
};
|
||||
|
||||
union u512d {
|
||||
__m512d v;
|
||||
double f[8];
|
||||
};
|
||||
|
||||
struct Vsplat{
|
||||
//Complex float
|
||||
@ -221,6 +231,17 @@ namespace Optimization {
|
||||
}
|
||||
};
|
||||
|
||||
struct Div{
|
||||
// Real float
|
||||
inline __m512 operator()(__m512 a, __m512 b){
|
||||
return _mm512_div_ps(a,b);
|
||||
}
|
||||
// Real double
|
||||
inline __m512d operator()(__m512d a, __m512d b){
|
||||
return _mm512_div_pd(a,b);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
@ -350,7 +371,67 @@ namespace Optimization {
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Some Template specialization
|
||||
|
||||
// Hack for CLANG until mm512_reduce_add_ps etc... are implemented in GCC and Clang releases
|
||||
#ifndef __INTEL_COMPILER
|
||||
#warning "Slow reduction due to incomplete reduce intrinsics"
|
||||
//Complex float Reduce
|
||||
template<>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, __m512>::operator()(__m512 in){
|
||||
__m512 v1,v2;
|
||||
v1=Optimization::Permute::Permute0(in); // avx 512; quad complex single
|
||||
v1= _mm512_add_ps(v1,in);
|
||||
v2=Optimization::Permute::Permute1(v1);
|
||||
v1 = _mm512_add_ps(v1,v2);
|
||||
v2=Optimization::Permute::Permute2(v1);
|
||||
v1 = _mm512_add_ps(v1,v2);
|
||||
u512f conv; conv.v = v1;
|
||||
return Grid::ComplexF(conv.f[0],conv.f[1]);
|
||||
}
|
||||
|
||||
//Real float Reduce
|
||||
template<>
|
||||
inline Grid::RealF Reduce<Grid::RealF, __m512>::operator()(__m512 in){
|
||||
__m512 v1,v2;
|
||||
v1 = Optimization::Permute::Permute0(in); // avx 512; octo-double
|
||||
v1 = _mm512_add_ps(v1,in);
|
||||
v2 = Optimization::Permute::Permute1(v1);
|
||||
v1 = _mm512_add_ps(v1,v2);
|
||||
v2 = Optimization::Permute::Permute2(v1);
|
||||
v1 = _mm512_add_ps(v1,v2);
|
||||
v2 = Optimization::Permute::Permute3(v1);
|
||||
v1 = _mm512_add_ps(v1,v2);
|
||||
u512f conv; conv.v=v1;
|
||||
return conv.f[0];
|
||||
}
|
||||
|
||||
|
||||
//Complex double Reduce
|
||||
template<>
|
||||
inline Grid::ComplexD Reduce<Grid::ComplexD, __m512d>::operator()(__m512d in){
|
||||
__m512d v1;
|
||||
v1 = Optimization::Permute::Permute0(in); // sse 128; paired complex single
|
||||
v1 = _mm512_add_pd(v1,in);
|
||||
v1 = Optimization::Permute::Permute1(in); // sse 128; paired complex single
|
||||
v1 = _mm512_add_pd(v1,in);
|
||||
u512d conv; conv.v = v1;
|
||||
return Grid::ComplexD(conv.f[0],conv.f[1]);
|
||||
}
|
||||
|
||||
//Real double Reduce
|
||||
template<>
|
||||
inline Grid::RealD Reduce<Grid::RealD, __m512d>::operator()(__m512d in){
|
||||
__m512d v1,v2;
|
||||
v1 = Optimization::Permute::Permute0(in); // avx 512; quad double
|
||||
v1 = _mm512_add_pd(v1,in);
|
||||
v2 = Optimization::Permute::Permute1(v1);
|
||||
v1 = _mm512_add_pd(v1,v2);
|
||||
v2 = Optimization::Permute::Permute2(v1);
|
||||
v1 = _mm512_add_pd(v1,v2);
|
||||
u512d conv; conv.v = v1;
|
||||
return conv.f[0];
|
||||
}
|
||||
#else
|
||||
//Complex float Reduce
|
||||
template<>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, __m512>::operator()(__m512 in){
|
||||
@ -362,7 +443,6 @@ namespace Optimization {
|
||||
return _mm512_reduce_add_ps(in);
|
||||
}
|
||||
|
||||
|
||||
//Complex double Reduce
|
||||
template<>
|
||||
inline Grid::ComplexD Reduce<Grid::ComplexD, __m512d>::operator()(__m512d in){
|
||||
@ -382,6 +462,7 @@ namespace Optimization {
|
||||
printf("Reduce : Missing integer implementation -> FIX\n");
|
||||
assert(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
}
|
||||
@ -418,6 +499,7 @@ namespace Optimization {
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
||||
|
@ -6,8 +6,7 @@
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: neo <cossu@post.kek.jp>
|
||||
Author: Antonin Portelli <antonin.portelli@me.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -27,133 +26,352 @@ Author: neo <cossu@post.kek.jp>
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
static_assert(GEN_SIMD_WIDTH % 16u == 0, "SIMD vector size is not an integer multiple of 16 bytes");
|
||||
|
||||
//#define VECTOR_LOOPS
|
||||
|
||||
// playing with compiler pragmas
|
||||
#ifdef VECTOR_LOOPS
|
||||
#ifdef __clang__
|
||||
#define VECTOR_FOR(i, w, inc)\
|
||||
_Pragma("clang loop unroll(full) vectorize(enable) interleave(enable) vectorize_width(w)")\
|
||||
for (unsigned int i = 0; i < w; i += inc)
|
||||
#elif defined __INTEL_COMPILER
|
||||
#define VECTOR_FOR(i, w, inc)\
|
||||
_Pragma("simd vectorlength(w*8)")\
|
||||
for (unsigned int i = 0; i < w; i += inc)
|
||||
#else
|
||||
#define VECTOR_FOR(i, w, inc)\
|
||||
for (unsigned int i = 0; i < w; i += inc)
|
||||
#endif
|
||||
#else
|
||||
#define VECTOR_FOR(i, w, inc)\
|
||||
for (unsigned int i = 0; i < w; i += inc)
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
namespace Optimization {
|
||||
|
||||
template<class vtype>
|
||||
union uconv {
|
||||
float f;
|
||||
vtype v;
|
||||
// type traits giving the number of elements for each vector type
|
||||
template <typename T> struct W;
|
||||
template <> struct W<double> {
|
||||
constexpr static unsigned int c = GEN_SIMD_WIDTH/16u;
|
||||
constexpr static unsigned int r = GEN_SIMD_WIDTH/8u;
|
||||
};
|
||||
|
||||
union u128f {
|
||||
float v;
|
||||
float f[4];
|
||||
};
|
||||
union u128d {
|
||||
double v;
|
||||
double f[2];
|
||||
template <> struct W<float> {
|
||||
constexpr static unsigned int c = GEN_SIMD_WIDTH/8u;
|
||||
constexpr static unsigned int r = GEN_SIMD_WIDTH/4u;
|
||||
};
|
||||
|
||||
// SIMD vector types
|
||||
template <typename T>
|
||||
struct vec {
|
||||
alignas(GEN_SIMD_WIDTH) T v[W<T>::r];
|
||||
};
|
||||
|
||||
typedef vec<float> vecf;
|
||||
typedef vec<double> vecd;
|
||||
|
||||
struct Vsplat{
|
||||
//Complex float
|
||||
inline u128f operator()(float a, float b){
|
||||
u128f out;
|
||||
out.f[0] = a;
|
||||
out.f[1] = b;
|
||||
out.f[2] = a;
|
||||
out.f[3] = b;
|
||||
// Complex
|
||||
template <typename T>
|
||||
inline vec<T> operator()(T a, T b){
|
||||
vec<T> out;
|
||||
|
||||
VECTOR_FOR(i, W<T>::r, 2)
|
||||
{
|
||||
out.v[i] = a;
|
||||
out.v[i+1] = b;
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
// Real float
|
||||
inline u128f operator()(float a){
|
||||
u128f out;
|
||||
out.f[0] = a;
|
||||
out.f[1] = a;
|
||||
out.f[2] = a;
|
||||
out.f[3] = a;
|
||||
|
||||
// Real
|
||||
template <typename T>
|
||||
inline vec<T> operator()(T a){
|
||||
vec<T> out;
|
||||
|
||||
VECTOR_FOR(i, W<T>::r, 1)
|
||||
{
|
||||
out.v[i] = a;
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
//Complex double
|
||||
inline u128d operator()(double a, double b){
|
||||
u128d out;
|
||||
out.f[0] = a;
|
||||
out.f[1] = b;
|
||||
return out;
|
||||
}
|
||||
//Real double
|
||||
inline u128d operator()(double a){
|
||||
u128d out;
|
||||
out.f[0] = a;
|
||||
out.f[1] = a;
|
||||
return out;
|
||||
}
|
||||
//Integer
|
||||
|
||||
// Integer
|
||||
inline int operator()(Integer a){
|
||||
return a;
|
||||
}
|
||||
};
|
||||
|
||||
struct Vstore{
|
||||
//Float
|
||||
inline void operator()(u128f a, float* F){
|
||||
memcpy(F,a.f,4*sizeof(float));
|
||||
}
|
||||
//Double
|
||||
inline void operator()(u128d a, double* D){
|
||||
memcpy(D,a.f,2*sizeof(double));
|
||||
// Real
|
||||
template <typename T>
|
||||
inline void operator()(vec<T> a, T *D){
|
||||
*((vec<T> *)D) = a;
|
||||
}
|
||||
//Integer
|
||||
inline void operator()(int a, Integer* I){
|
||||
I[0] = a;
|
||||
inline void operator()(int a, Integer *I){
|
||||
*I = a;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
struct Vstream{
|
||||
//Float
|
||||
inline void operator()(float * a, u128f b){
|
||||
memcpy(a,b.f,4*sizeof(float));
|
||||
// Real
|
||||
template <typename T>
|
||||
inline void operator()(T * a, vec<T> b){
|
||||
*((vec<T> *)a) = b;
|
||||
}
|
||||
//Double
|
||||
inline void operator()(double * a, u128d b){
|
||||
memcpy(a,b.f,2*sizeof(double));
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
struct Vset{
|
||||
// Complex float
|
||||
inline u128f operator()(Grid::ComplexF *a){
|
||||
u128f out;
|
||||
out.f[0] = a[0].real();
|
||||
out.f[1] = a[0].imag();
|
||||
out.f[2] = a[1].real();
|
||||
out.f[3] = a[1].imag();
|
||||
// Complex
|
||||
template <typename T>
|
||||
inline vec<T> operator()(std::complex<T> *a){
|
||||
vec<T> out;
|
||||
|
||||
VECTOR_FOR(i, W<T>::c, 1)
|
||||
{
|
||||
out.v[2*i] = a[i].real();
|
||||
out.v[2*i+1] = a[i].imag();
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
// Complex double
|
||||
inline u128d operator()(Grid::ComplexD *a){
|
||||
u128d out;
|
||||
out.f[0] = a[0].real();
|
||||
out.f[1] = a[0].imag();
|
||||
return out;
|
||||
}
|
||||
// Real float
|
||||
inline u128f operator()(float *a){
|
||||
u128f out;
|
||||
out.f[0] = a[0];
|
||||
out.f[1] = a[1];
|
||||
out.f[2] = a[2];
|
||||
out.f[3] = a[3];
|
||||
return out;
|
||||
}
|
||||
// Real double
|
||||
inline u128d operator()(double *a){
|
||||
u128d out;
|
||||
out.f[0] = a[0];
|
||||
out.f[1] = a[1];
|
||||
|
||||
// Real
|
||||
template <typename T>
|
||||
inline vec<T> operator()(T *a){
|
||||
vec<T> out;
|
||||
|
||||
out = *((vec<T> *)a);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
// Integer
|
||||
inline int operator()(Integer *a){
|
||||
return a[0];
|
||||
return *a;
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Arithmetic operations
|
||||
/////////////////////////////////////////////////////
|
||||
struct Sum{
|
||||
// Complex/Real
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
|
||||
VECTOR_FOR(i, W<T>::r, 1)
|
||||
{
|
||||
out.v[i] = a.v[i] + b.v[i];
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
//I nteger
|
||||
inline int operator()(int a, int b){
|
||||
return a + b;
|
||||
}
|
||||
};
|
||||
|
||||
struct Sub{
|
||||
// Complex/Real
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
|
||||
VECTOR_FOR(i, W<T>::r, 1)
|
||||
{
|
||||
out.v[i] = a.v[i] - b.v[i];
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
//Integer
|
||||
inline int operator()(int a, int b){
|
||||
return a-b;
|
||||
}
|
||||
};
|
||||
|
||||
struct Mult{
|
||||
// Real
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
|
||||
VECTOR_FOR(i, W<T>::r, 1)
|
||||
{
|
||||
out.v[i] = a.v[i]*b.v[i];
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
// Integer
|
||||
inline int operator()(int a, int b){
|
||||
return a*b;
|
||||
}
|
||||
};
|
||||
|
||||
#define cmul(a, b, c, i)\
|
||||
c[i] = a[i]*b[i] - a[i+1]*b[i+1];\
|
||||
c[i+1] = a[i]*b[i+1] + a[i+1]*b[i];
|
||||
|
||||
struct MultComplex{
|
||||
// Complex
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
|
||||
VECTOR_FOR(i, W<T>::c, 1)
|
||||
{
|
||||
cmul(a.v, b.v, out.v, 2*i);
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
#undef cmul
|
||||
|
||||
struct Div{
|
||||
// Real
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
|
||||
VECTOR_FOR(i, W<T>::r, 1)
|
||||
{
|
||||
out.v[i] = a.v[i]/b.v[i];
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
#define conj(a, b, i)\
|
||||
b[i] = a[i];\
|
||||
b[i+1] = -a[i+1];
|
||||
|
||||
struct Conj{
|
||||
// Complex
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a){
|
||||
vec<T> out;
|
||||
|
||||
VECTOR_FOR(i, W<T>::c, 1)
|
||||
{
|
||||
conj(a.v, out.v, 2*i);
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
#undef conj
|
||||
|
||||
#define timesmi(a, b, i)\
|
||||
b[i] = a[i+1];\
|
||||
b[i+1] = -a[i];
|
||||
|
||||
struct TimesMinusI{
|
||||
// Complex
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
|
||||
VECTOR_FOR(i, W<T>::c, 1)
|
||||
{
|
||||
timesmi(a.v, out.v, 2*i);
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
#undef timesmi
|
||||
|
||||
#define timesi(a, b, i)\
|
||||
b[i] = -a[i+1];\
|
||||
b[i+1] = a[i];
|
||||
|
||||
struct TimesI{
|
||||
// Complex
|
||||
template <typename T>
|
||||
inline vec<T> operator()(vec<T> a, vec<T> b){
|
||||
vec<T> out;
|
||||
|
||||
VECTOR_FOR(i, W<T>::c, 1)
|
||||
{
|
||||
timesi(a.v, out.v, 2*i);
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
#undef timesi
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Some Template specialization
|
||||
#define perm(a, b, n, w)\
|
||||
unsigned int _mask = w >> (n + 1);\
|
||||
VECTOR_FOR(i, w, 1)\
|
||||
{\
|
||||
b[i] = a[i^_mask];\
|
||||
}
|
||||
|
||||
#define DECL_PERMUTE_N(n)\
|
||||
template <typename T>\
|
||||
static inline vec<T> Permute##n(vec<T> in) {\
|
||||
vec<T> out;\
|
||||
perm(in.v, out.v, n, W<T>::r);\
|
||||
return out;\
|
||||
}
|
||||
|
||||
struct Permute{
|
||||
DECL_PERMUTE_N(0);
|
||||
DECL_PERMUTE_N(1);
|
||||
DECL_PERMUTE_N(2);
|
||||
DECL_PERMUTE_N(3);
|
||||
};
|
||||
|
||||
#undef perm
|
||||
#undef DECL_PERMUTE_N
|
||||
|
||||
#define rot(a, b, n, w)\
|
||||
VECTOR_FOR(i, w, 1)\
|
||||
{\
|
||||
b[i] = a[(i + n)%w];\
|
||||
}
|
||||
|
||||
struct Rotate{
|
||||
template <typename T>
|
||||
static inline vec<T> rotate(vec<T> in, int n){
|
||||
vec<T> out;
|
||||
|
||||
rot(in.v, out.v, n, W<T>::r);
|
||||
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
#undef rot
|
||||
|
||||
#define acc(v, a, off, step, n)\
|
||||
for (unsigned int i = off; i < n; i += step)\
|
||||
{\
|
||||
a += v[i];\
|
||||
}
|
||||
|
||||
template <typename Out_type, typename In_type>
|
||||
struct Reduce{
|
||||
//Need templated class to overload output type
|
||||
@ -164,316 +382,67 @@ namespace Optimization {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Arithmetic operations
|
||||
/////////////////////////////////////////////////////
|
||||
struct Sum{
|
||||
//Complex/Real float
|
||||
inline u128f operator()(u128f a, u128f b){
|
||||
u128f out;
|
||||
out.f[0] = a.f[0] + b.f[0];
|
||||
out.f[1] = a.f[1] + b.f[1];
|
||||
out.f[2] = a.f[2] + b.f[2];
|
||||
out.f[3] = a.f[3] + b.f[3];
|
||||
return out;
|
||||
}
|
||||
//Complex/Real double
|
||||
inline u128d operator()(u128d a, u128d b){
|
||||
u128d out;
|
||||
out.f[0] = a.f[0] + b.f[0];
|
||||
out.f[1] = a.f[1] + b.f[1];
|
||||
return out;
|
||||
}
|
||||
//Integer
|
||||
inline int operator()(int a, int b){
|
||||
return a + b;
|
||||
}
|
||||
};
|
||||
|
||||
struct Sub{
|
||||
//Complex/Real float
|
||||
inline u128f operator()(u128f a, u128f b){
|
||||
u128f out;
|
||||
out.f[0] = a.f[0] - b.f[0];
|
||||
out.f[1] = a.f[1] - b.f[1];
|
||||
out.f[2] = a.f[2] - b.f[2];
|
||||
out.f[3] = a.f[3] - b.f[3];
|
||||
return out;
|
||||
}
|
||||
//Complex/Real double
|
||||
inline u128d operator()(u128d a, u128d b){
|
||||
u128d out;
|
||||
out.f[0] = a.f[0] - b.f[0];
|
||||
out.f[1] = a.f[1] - b.f[1];
|
||||
return out;
|
||||
}
|
||||
//Integer
|
||||
inline int operator()(int a, int b){
|
||||
return a-b;
|
||||
}
|
||||
};
|
||||
|
||||
struct MultComplex{
|
||||
// Complex float
|
||||
inline u128f operator()(u128f a, u128f b){
|
||||
u128f out;
|
||||
out.f[0] = a.f[0]*b.f[0] - a.f[1]*b.f[1];
|
||||
out.f[1] = a.f[0]*b.f[1] + a.f[1]*b.f[0];
|
||||
out.f[2] = a.f[2]*b.f[2] - a.f[3]*b.f[3];
|
||||
out.f[3] = a.f[2]*b.f[3] + a.f[3]*b.f[2];
|
||||
return out;
|
||||
}
|
||||
// Complex double
|
||||
inline u128d operator()(u128d a, u128d b){
|
||||
u128d out;
|
||||
out.f[0] = a.f[0]*b.f[0] - a.f[1]*b.f[1];
|
||||
out.f[1] = a.f[0]*b.f[1] + a.f[1]*b.f[0];
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct Mult{
|
||||
//CK: Appear unneeded
|
||||
// inline float mac(float a, float b,double c){
|
||||
// return 0;
|
||||
// }
|
||||
// inline double mac(double a, double b,double c){
|
||||
// return 0;
|
||||
// }
|
||||
|
||||
// Real float
|
||||
inline u128f operator()(u128f a, u128f b){
|
||||
u128f out;
|
||||
out.f[0] = a.f[0]*b.f[0];
|
||||
out.f[1] = a.f[1]*b.f[1];
|
||||
out.f[2] = a.f[2]*b.f[2];
|
||||
out.f[3] = a.f[3]*b.f[3];
|
||||
return out;
|
||||
}
|
||||
// Real double
|
||||
inline u128d operator()(u128d a, u128d b){
|
||||
u128d out;
|
||||
out.f[0] = a.f[0]*b.f[0];
|
||||
out.f[1] = a.f[1]*b.f[1];
|
||||
return out;
|
||||
}
|
||||
// Integer
|
||||
inline int operator()(int a, int b){
|
||||
return a*b;
|
||||
}
|
||||
};
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
inline u128f operator()(u128f in){
|
||||
u128f out;
|
||||
out.f[0] = in.f[0];
|
||||
out.f[1] = -in.f[1];
|
||||
out.f[2] = in.f[2];
|
||||
out.f[3] = -in.f[3];
|
||||
return out;
|
||||
}
|
||||
// Complex double
|
||||
inline u128d operator()(u128d in){
|
||||
u128d out;
|
||||
out.f[0] = in.f[0];
|
||||
out.f[1] = -in.f[1];
|
||||
return out;
|
||||
}
|
||||
// do not define for integer input
|
||||
};
|
||||
|
||||
struct TimesMinusI{
|
||||
//Complex single
|
||||
inline u128f operator()(u128f in, u128f ret){ //note ret is ignored
|
||||
u128f out;
|
||||
out.f[0] = in.f[1];
|
||||
out.f[1] = -in.f[0];
|
||||
out.f[2] = in.f[3];
|
||||
out.f[3] = -in.f[2];
|
||||
return out;
|
||||
}
|
||||
//Complex double
|
||||
inline u128d operator()(u128d in, u128d ret){
|
||||
u128d out;
|
||||
out.f[0] = in.f[1];
|
||||
out.f[1] = -in.f[0];
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
struct TimesI{
|
||||
//Complex single
|
||||
inline u128f operator()(u128f in, u128f ret){ //note ret is ignored
|
||||
u128f out;
|
||||
out.f[0] = -in.f[1];
|
||||
out.f[1] = in.f[0];
|
||||
out.f[2] = -in.f[3];
|
||||
out.f[3] = in.f[2];
|
||||
return out;
|
||||
}
|
||||
//Complex double
|
||||
inline u128d operator()(u128d in, u128d ret){
|
||||
u128d out;
|
||||
out.f[0] = -in.f[1];
|
||||
out.f[1] = in.f[0];
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Some Template specialization
|
||||
struct Permute{
|
||||
//We just have to mirror the permutes of Grid_sse4.h
|
||||
static inline u128f Permute0(u128f in){ //AB CD -> CD AB
|
||||
u128f out;
|
||||
out.f[0] = in.f[2];
|
||||
out.f[1] = in.f[3];
|
||||
out.f[2] = in.f[0];
|
||||
out.f[3] = in.f[1];
|
||||
return out;
|
||||
};
|
||||
static inline u128f Permute1(u128f in){ //AB CD -> BA DC
|
||||
u128f out;
|
||||
out.f[0] = in.f[1];
|
||||
out.f[1] = in.f[0];
|
||||
out.f[2] = in.f[3];
|
||||
out.f[3] = in.f[2];
|
||||
return out;
|
||||
};
|
||||
static inline u128f Permute2(u128f in){
|
||||
return in;
|
||||
};
|
||||
static inline u128f Permute3(u128f in){
|
||||
return in;
|
||||
};
|
||||
|
||||
static inline u128d Permute0(u128d in){ //AB -> BA
|
||||
u128d out;
|
||||
out.f[0] = in.f[1];
|
||||
out.f[1] = in.f[0];
|
||||
return out;
|
||||
};
|
||||
static inline u128d Permute1(u128d in){
|
||||
return in;
|
||||
};
|
||||
static inline u128d Permute2(u128d in){
|
||||
return in;
|
||||
};
|
||||
static inline u128d Permute3(u128d in){
|
||||
return in;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
template < typename vtype >
|
||||
void permute(vtype &a, vtype b, int perm) {
|
||||
};
|
||||
|
||||
struct Rotate{
|
||||
|
||||
static inline u128f rotate(u128f in,int n){
|
||||
u128f out;
|
||||
switch(n){
|
||||
case 0:
|
||||
out.f[0] = in.f[0];
|
||||
out.f[1] = in.f[1];
|
||||
out.f[2] = in.f[2];
|
||||
out.f[3] = in.f[3];
|
||||
break;
|
||||
case 1:
|
||||
out.f[0] = in.f[1];
|
||||
out.f[1] = in.f[2];
|
||||
out.f[2] = in.f[3];
|
||||
out.f[3] = in.f[0];
|
||||
break;
|
||||
case 2:
|
||||
out.f[0] = in.f[2];
|
||||
out.f[1] = in.f[3];
|
||||
out.f[2] = in.f[0];
|
||||
out.f[3] = in.f[1];
|
||||
break;
|
||||
case 3:
|
||||
out.f[0] = in.f[3];
|
||||
out.f[1] = in.f[0];
|
||||
out.f[2] = in.f[1];
|
||||
out.f[3] = in.f[2];
|
||||
break;
|
||||
default: assert(0);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
static inline u128d rotate(u128d in,int n){
|
||||
u128d out;
|
||||
switch(n){
|
||||
case 0:
|
||||
out.f[0] = in.f[0];
|
||||
out.f[1] = in.f[1];
|
||||
break;
|
||||
case 1:
|
||||
out.f[0] = in.f[1];
|
||||
out.f[1] = in.f[0];
|
||||
break;
|
||||
default: assert(0);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
};
|
||||
|
||||
//Complex float Reduce
|
||||
template<>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, u128f>::operator()(u128f in){ //2 complex
|
||||
return Grid::ComplexF(in.f[0] + in.f[2], in.f[1] + in.f[3]);
|
||||
template <>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, vecf>::operator()(vecf in){
|
||||
float a = 0.f, b = 0.f;
|
||||
|
||||
acc(in.v, a, 0, 2, W<float>::r);
|
||||
acc(in.v, b, 1, 2, W<float>::r);
|
||||
|
||||
return Grid::ComplexF(a, b);
|
||||
}
|
||||
|
||||
//Real float Reduce
|
||||
template<>
|
||||
inline Grid::RealF Reduce<Grid::RealF, u128f>::operator()(u128f in){ //4 floats
|
||||
return in.f[0] + in.f[1] + in.f[2] + in.f[3];
|
||||
inline Grid::RealF Reduce<Grid::RealF, vecf>::operator()(vecf in){
|
||||
float a = 0.;
|
||||
|
||||
acc(in.v, a, 0, 1, W<float>::r);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
|
||||
//Complex double Reduce
|
||||
template<>
|
||||
inline Grid::ComplexD Reduce<Grid::ComplexD, u128d>::operator()(u128d in){ //1 complex
|
||||
return Grid::ComplexD(in.f[0],in.f[1]);
|
||||
inline Grid::ComplexD Reduce<Grid::ComplexD, vecd>::operator()(vecd in){
|
||||
double a = 0., b = 0.;
|
||||
|
||||
acc(in.v, a, 0, 2, W<double>::r);
|
||||
acc(in.v, b, 1, 2, W<double>::r);
|
||||
|
||||
return Grid::ComplexD(a, b);
|
||||
}
|
||||
|
||||
//Real double Reduce
|
||||
template<>
|
||||
inline Grid::RealD Reduce<Grid::RealD, u128d>::operator()(u128d in){ //2 doubles
|
||||
return in.f[0] + in.f[1];
|
||||
inline Grid::RealD Reduce<Grid::RealD, vecd>::operator()(vecd in){
|
||||
double a = 0.f;
|
||||
|
||||
acc(in.v, a, 0, 1, W<double>::r);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
//Integer Reduce
|
||||
template<>
|
||||
inline Integer Reduce<Integer, int>::operator()(int in){
|
||||
// FIXME unimplemented
|
||||
printf("Reduce : Missing integer implementation -> FIX\n");
|
||||
assert(0);
|
||||
return in;
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// Here assign types
|
||||
|
||||
typedef Optimization::u128f SIMD_Ftype; // Single precision type
|
||||
typedef Optimization::u128d SIMD_Dtype; // Double precision type
|
||||
typedef Optimization::vecf SIMD_Ftype; // Single precision type
|
||||
typedef Optimization::vecd SIMD_Dtype; // Double precision type
|
||||
typedef int SIMD_Itype; // Integer type
|
||||
|
||||
// prefetch utilities
|
||||
inline void v_prefetch0(int size, const char *ptr){};
|
||||
inline void prefetch_HINT_T0(const char *ptr){};
|
||||
|
||||
|
||||
|
||||
// Gpermute function
|
||||
template < typename VectorSIMD >
|
||||
inline void Gpermute(VectorSIMD &y,const VectorSIMD &b, int perm ) {
|
||||
Optimization::permute(y.v,b.v,perm);
|
||||
}
|
||||
|
||||
|
||||
// Function name aliases
|
||||
typedef Optimization::Vsplat VsplatSIMD;
|
||||
typedef Optimization::Vstore VstoreSIMD;
|
||||
@ -481,16 +450,13 @@ namespace Optimization {
|
||||
typedef Optimization::Vstream VstreamSIMD;
|
||||
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S,T>;
|
||||
|
||||
|
||||
|
||||
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
||||
typedef Optimization::TimesI TimesISIMD;
|
||||
|
||||
}
|
||||
|
@ -236,6 +236,17 @@ namespace Optimization {
|
||||
}
|
||||
};
|
||||
|
||||
struct Div{
|
||||
// Real float
|
||||
inline __m512 operator()(__m512 a, __m512 b){
|
||||
return _mm512_div_ps(a,b);
|
||||
}
|
||||
// Real double
|
||||
inline __m512d operator()(__m512d a, __m512d b){
|
||||
return _mm512_div_pd(a,b);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
@ -429,6 +440,7 @@ namespace Optimization {
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
|
@ -224,6 +224,18 @@ namespace Optimization {
|
||||
}
|
||||
};
|
||||
|
||||
struct Div{
|
||||
// Real float
|
||||
inline __m128 operator()(__m128 a, __m128 b){
|
||||
return _mm_div_ps(a,b);
|
||||
}
|
||||
// Real double
|
||||
inline __m128d operator()(__m128d a, __m128d b){
|
||||
return _mm_div_pd(a,b);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
inline __m128 operator()(__m128 in){
|
||||
@ -372,6 +384,8 @@ namespace Optimization {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// Here assign types
|
||||
|
||||
@ -398,6 +412,7 @@ namespace Optimization {
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
|
@ -38,7 +38,7 @@ directory
|
||||
#ifndef GRID_VECTOR_TYPES
|
||||
#define GRID_VECTOR_TYPES
|
||||
|
||||
#ifdef GENERIC_VEC
|
||||
#ifdef GEN
|
||||
#include "Grid_generic.h"
|
||||
#endif
|
||||
#ifdef SSE4
|
||||
@ -77,38 +77,24 @@ struct RealPart<std::complex<T> > {
|
||||
//////////////////////////////////////
|
||||
// demote a vector to real type
|
||||
//////////////////////////////////////
|
||||
|
||||
// type alias used to simplify the syntax of std::enable_if
|
||||
template <typename T>
|
||||
using Invoke = typename T::type;
|
||||
template <typename Condition, typename ReturnType>
|
||||
using EnableIf = Invoke<std::enable_if<Condition::value, ReturnType> >;
|
||||
template <typename Condition, typename ReturnType>
|
||||
using NotEnableIf = Invoke<std::enable_if<!Condition::value, ReturnType> >;
|
||||
template <typename T> using Invoke = typename T::type;
|
||||
template <typename Condition, typename ReturnType> using EnableIf = Invoke<std::enable_if<Condition::value, ReturnType> >;
|
||||
template <typename Condition, typename ReturnType> using NotEnableIf = Invoke<std::enable_if<!Condition::value, ReturnType> >;
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Check for complexity with type traits
|
||||
template <typename T>
|
||||
struct is_complex : public std::false_type {};
|
||||
template <>
|
||||
struct is_complex<std::complex<double> > : public std::true_type {};
|
||||
template <>
|
||||
struct is_complex<std::complex<float> > : public std::true_type {};
|
||||
template <typename T> struct is_complex : public std::false_type {};
|
||||
template <> struct is_complex<std::complex<double> > : public std::true_type {};
|
||||
template <> struct is_complex<std::complex<float> > : public std::true_type {};
|
||||
|
||||
template <typename T>
|
||||
using IfReal = Invoke<std::enable_if<std::is_floating_point<T>::value, int> >;
|
||||
template <typename T>
|
||||
using IfComplex = Invoke<std::enable_if<is_complex<T>::value, int> >;
|
||||
template <typename T>
|
||||
using IfInteger = Invoke<std::enable_if<std::is_integral<T>::value, int> >;
|
||||
template <typename T> using IfReal = Invoke<std::enable_if<std::is_floating_point<T>::value, int> >;
|
||||
template <typename T> using IfComplex = Invoke<std::enable_if<is_complex<T>::value, int> >;
|
||||
template <typename T> using IfInteger = Invoke<std::enable_if<std::is_integral<T>::value, int> >;
|
||||
|
||||
template <typename T>
|
||||
using IfNotReal =
|
||||
Invoke<std::enable_if<!std::is_floating_point<T>::value, int> >;
|
||||
template <typename T>
|
||||
using IfNotComplex = Invoke<std::enable_if<!is_complex<T>::value, int> >;
|
||||
template <typename T>
|
||||
using IfNotInteger = Invoke<std::enable_if<!std::is_integral<T>::value, int> >;
|
||||
template <typename T> using IfNotReal = Invoke<std::enable_if<!std::is_floating_point<T>::value, int> >;
|
||||
template <typename T> using IfNotComplex = Invoke<std::enable_if<!is_complex<T>::value, int> >;
|
||||
template <typename T> using IfNotInteger = Invoke<std::enable_if<!std::is_integral<T>::value, int> >;
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Define the operation templates functors
|
||||
@ -285,6 +271,20 @@ class Grid_simd {
|
||||
return a * b;
|
||||
}
|
||||
|
||||
//////////////////////////////////
|
||||
// Divides
|
||||
//////////////////////////////////
|
||||
friend inline Grid_simd operator/(const Scalar_type &a, Grid_simd b) {
|
||||
Grid_simd va;
|
||||
vsplat(va, a);
|
||||
return va / b;
|
||||
}
|
||||
friend inline Grid_simd operator/(Grid_simd b, const Scalar_type &a) {
|
||||
Grid_simd va;
|
||||
vsplat(va, a);
|
||||
return b / a;
|
||||
}
|
||||
|
||||
///////////////////////
|
||||
// Unary negation
|
||||
///////////////////////
|
||||
@ -428,7 +428,6 @@ inline void rotate(Grid_simd<S,V> &ret,Grid_simd<S,V> b,int nrot)
|
||||
ret.v = Optimization::Rotate::rotate(b.v,2*nrot);
|
||||
}
|
||||
|
||||
|
||||
template <class S, class V>
|
||||
inline void vbroadcast(Grid_simd<S,V> &ret,const Grid_simd<S,V> &src,int lane){
|
||||
S* typepun =(S*) &src;
|
||||
@ -512,7 +511,6 @@ template <class S, class V, IfInteger<S> = 0>
|
||||
inline void vfalse(Grid_simd<S, V> &ret) {
|
||||
vsplat(ret, 0);
|
||||
}
|
||||
|
||||
template <class S, class V>
|
||||
inline void zeroit(Grid_simd<S, V> &z) {
|
||||
vzero(z);
|
||||
@ -530,7 +528,6 @@ inline void vstream(Grid_simd<S, V> &out, const Grid_simd<S, V> &in) {
|
||||
typedef typename S::value_type T;
|
||||
binary<void>((T *)&out.v, in.v, VstreamSIMD());
|
||||
}
|
||||
|
||||
template <class S, class V, IfInteger<S> = 0>
|
||||
inline void vstream(Grid_simd<S, V> &out, const Grid_simd<S, V> &in) {
|
||||
out = in;
|
||||
@ -569,6 +566,34 @@ inline Grid_simd<S, V> operator*(Grid_simd<S, V> a, Grid_simd<S, V> b) {
|
||||
return ret;
|
||||
};
|
||||
|
||||
// Distinguish between complex types and others
|
||||
template <class S, class V, IfComplex<S> = 0>
|
||||
inline Grid_simd<S, V> operator/(Grid_simd<S, V> a, Grid_simd<S, V> b) {
|
||||
typedef Grid_simd<S, V> simd;
|
||||
|
||||
simd ret;
|
||||
simd den;
|
||||
typename simd::conv_t conv;
|
||||
|
||||
ret = a * conjugate(b) ;
|
||||
den = b * conjugate(b) ;
|
||||
|
||||
|
||||
auto real_den = toReal(den);
|
||||
|
||||
ret.v=binary<V>(ret.v, real_den.v, DivSIMD());
|
||||
|
||||
return ret;
|
||||
};
|
||||
|
||||
// Real/Integer types
|
||||
template <class S, class V, IfNotComplex<S> = 0>
|
||||
inline Grid_simd<S, V> operator/(Grid_simd<S, V> a, Grid_simd<S, V> b) {
|
||||
Grid_simd<S, V> ret;
|
||||
ret.v = binary<V>(a.v, b.v, DivSIMD());
|
||||
return ret;
|
||||
};
|
||||
|
||||
///////////////////////
|
||||
// Conjugate
|
||||
///////////////////////
|
||||
@ -582,7 +607,6 @@ template <class S, class V, IfNotComplex<S> = 0>
|
||||
inline Grid_simd<S, V> conjugate(const Grid_simd<S, V> &in) {
|
||||
return in; // for real objects
|
||||
}
|
||||
|
||||
// Suppress adj for integer types... // odd; why conjugate above but not adj??
|
||||
template <class S, class V, IfNotInteger<S> = 0>
|
||||
inline Grid_simd<S, V> adj(const Grid_simd<S, V> &in) {
|
||||
@ -596,14 +620,12 @@ template <class S, class V, IfComplex<S> = 0>
|
||||
inline void timesMinusI(Grid_simd<S, V> &ret, const Grid_simd<S, V> &in) {
|
||||
ret.v = binary<V>(in.v, ret.v, TimesMinusISIMD());
|
||||
}
|
||||
|
||||
template <class S, class V, IfComplex<S> = 0>
|
||||
inline Grid_simd<S, V> timesMinusI(const Grid_simd<S, V> &in) {
|
||||
Grid_simd<S, V> ret;
|
||||
timesMinusI(ret, in);
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class S, class V, IfNotComplex<S> = 0>
|
||||
inline Grid_simd<S, V> timesMinusI(const Grid_simd<S, V> &in) {
|
||||
return in;
|
||||
@ -616,14 +638,12 @@ template <class S, class V, IfComplex<S> = 0>
|
||||
inline void timesI(Grid_simd<S, V> &ret, const Grid_simd<S, V> &in) {
|
||||
ret.v = binary<V>(in.v, ret.v, TimesISIMD());
|
||||
}
|
||||
|
||||
template <class S, class V, IfComplex<S> = 0>
|
||||
inline Grid_simd<S, V> timesI(const Grid_simd<S, V> &in) {
|
||||
Grid_simd<S, V> ret;
|
||||
timesI(ret, in);
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class S, class V, IfNotComplex<S> = 0>
|
||||
inline Grid_simd<S, V> timesI(const Grid_simd<S, V> &in) {
|
||||
return in;
|
||||
|
@ -53,7 +53,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
#define ZMULMEM2SPd(O,P,tmp,B,C,Briir,Biirr,Criir,Ciirr)\
|
||||
VSHUFMEMd(O,P,tmp) \
|
||||
VMULMEMd(O,P,B,Biirr) \
|
||||
VMULMEMd(O,P,B,Biirr) \
|
||||
VMULMEMd(O,P,C,Ciirr) \
|
||||
VMULd(tmp,B,Briir) \
|
||||
VMULd(tmp,C,Criir)
|
||||
|
@ -37,7 +37,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
// Opcodes common
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#define MASK_REGS \
|
||||
__asm__ ("mov $0xAAAA, %%eax \n"\
|
||||
__asm__ ("mov $0xAAAA, %%eax \n"\
|
||||
"kmovw %%eax, %%k6 \n"\
|
||||
"mov $0x5555, %%eax \n"\
|
||||
"kmovw %%eax, %%k7 \n" : : : "%eax");
|
||||
@ -138,9 +138,14 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#define ZLOADf(OFF,PTR,ri,ir) VLOADf(OFF,PTR,ir) VSHUFf(ir,ri)
|
||||
#define ZLOADd(OFF,PTR,ri,ir) VLOADd(OFF,PTR,ir) VSHUFd(ir,ri)
|
||||
|
||||
|
||||
#define STREAM_STORE
|
||||
#ifdef STREAM_STORE
|
||||
#define VSTOREf(OFF,PTR,SRC) "vmovntps " #SRC "," #OFF "*64(" #PTR ")" ";\n"
|
||||
#define VSTOREd(OFF,PTR,SRC) "vmovntpd " #SRC "," #OFF "*64(" #PTR ")" ";\n"
|
||||
#else
|
||||
#define VSTOREf(OFF,PTR,SRC) "vmovaps " #SRC "," #OFF "*64(" #PTR ")" ";\n"
|
||||
#define VSTOREd(OFF,PTR,SRC) "vmovapd " #SRC "," #OFF "*64(" #PTR ")" ";\n"
|
||||
#endif
|
||||
|
||||
// Swaps Re/Im ; could unify this with IMCI
|
||||
#define VSHUFd(A,DEST) "vpshufd $0x4e," #A "," #DEST ";\n"
|
||||
|
@ -32,7 +32,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
namespace Grid {
|
||||
|
||||
int LebesgueOrder::UseLebesgueOrder;
|
||||
std::vector<int> LebesgueOrder::Block({2,2,2,2});
|
||||
std::vector<int> LebesgueOrder::Block({8,2,2,2});
|
||||
|
||||
LebesgueOrder::IndexInteger LebesgueOrder::alignup(IndexInteger n){
|
||||
n--; // 1000 0011 --> 1000 0010
|
||||
|
@ -126,6 +126,36 @@ iVector<rtype,N> operator * (const iVector<mtype,N>& lhs,const iScalar<vtype>& r
|
||||
mult(&ret,&lhs,&rhs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
// Divide by scalar
|
||||
//////////////////////////////////////////////////////////////////
|
||||
template<class rtype,class vtype> strong_inline
|
||||
iScalar<rtype> operator / (const iScalar<rtype>& lhs,const iScalar<vtype>& rhs)
|
||||
{
|
||||
iScalar<rtype> ret;
|
||||
ret._internal = lhs._internal/rhs._internal;
|
||||
return ret;
|
||||
}
|
||||
template<class rtype,class vtype,int N> strong_inline
|
||||
iVector<rtype,N> operator / (const iVector<rtype,N>& lhs,const iScalar<vtype>& rhs)
|
||||
{
|
||||
iVector<rtype,N> ret;
|
||||
for(int i=0;i<N;i++){
|
||||
ret._internal[i] = lhs._internal[i]/rhs._internal;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
template<class rtype,class vtype,int N> strong_inline
|
||||
iMatrix<rtype,N> operator / (const iMatrix<rtype,N>& lhs,const iScalar<vtype>& rhs)
|
||||
{
|
||||
iMatrix<rtype,N> ret;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
ret._internal[i][j] = lhs._internal[i][j]/rhs._internal;
|
||||
}}
|
||||
return ret;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
// Glue operators to mult routines. Must resolve return type cleverly from typeof(internal)
|
||||
|
@ -250,8 +250,7 @@ void merge(vobj &vec,std::vector<typename vobj::scalar_object *> &extracted,int
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj> inline
|
||||
void merge1(vobj &vec,std::vector<typename vobj::scalar_object *> &extracted,int offset)
|
||||
template<class vobj> inline void merge1(vobj &vec,std::vector<typename vobj::scalar_object *> &extracted,int offset)
|
||||
{
|
||||
typedef typename vobj::scalar_type scalar_type ;
|
||||
typedef typename vobj::vector_type vector_type ;
|
||||
@ -269,8 +268,7 @@ void merge1(vobj &vec,std::vector<typename vobj::scalar_object *> &extracted,int
|
||||
}}
|
||||
}
|
||||
|
||||
template<class vobj> inline
|
||||
void merge2(vobj &vec,std::vector<typename vobj::scalar_object *> &extracted,int offset)
|
||||
template<class vobj> inline void merge2(vobj &vec,std::vector<typename vobj::scalar_object *> &extracted,int offset)
|
||||
{
|
||||
typedef typename vobj::scalar_type scalar_type ;
|
||||
typedef typename vobj::vector_type vector_type ;
|
||||
|
Reference in New Issue
Block a user