mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-14 13:57:07 +01:00
Merge branch 'develop' into release/v0.6.0
This commit is contained in:
276
lib/FFT.h
276
lib/FFT.h
@ -30,8 +30,10 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#define _GRID_FFT_H_
|
||||
|
||||
#ifdef HAVE_FFTW
|
||||
#include <Grid/fftw/fftw3.h>
|
||||
#include <fftw3.h>
|
||||
#endif
|
||||
|
||||
|
||||
namespace Grid {
|
||||
|
||||
template<class scalar> struct FFTW { };
|
||||
@ -98,174 +100,198 @@ namespace Grid {
|
||||
#define FFTW_BACKWARD (+1)
|
||||
#endif
|
||||
|
||||
class FFT {
|
||||
class FFT {
|
||||
private:
|
||||
|
||||
|
||||
GridCartesian *vgrid;
|
||||
GridCartesian *sgrid;
|
||||
|
||||
|
||||
int Nd;
|
||||
double flops;
|
||||
double flops_call;
|
||||
uint64_t usec;
|
||||
|
||||
|
||||
std::vector<int> dimensions;
|
||||
std::vector<int> processors;
|
||||
std::vector<int> processor_coor;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
|
||||
static const int forward=FFTW_FORWARD;
|
||||
static const int backward=FFTW_BACKWARD;
|
||||
|
||||
|
||||
double Flops(void) {return flops;}
|
||||
double MFlops(void) {return flops/usec;}
|
||||
|
||||
FFT ( GridCartesian * grid ) :
|
||||
vgrid(grid),
|
||||
Nd(grid->_ndimension),
|
||||
dimensions(grid->_fdimensions),
|
||||
processors(grid->_processors),
|
||||
processor_coor(grid->_processor_coor)
|
||||
|
||||
FFT ( GridCartesian * grid ) :
|
||||
vgrid(grid),
|
||||
Nd(grid->_ndimension),
|
||||
dimensions(grid->_fdimensions),
|
||||
processors(grid->_processors),
|
||||
processor_coor(grid->_processor_coor)
|
||||
{
|
||||
flops=0;
|
||||
usec =0;
|
||||
std::vector<int> layout(Nd,1);
|
||||
sgrid = new GridCartesian(dimensions,layout,processors);
|
||||
};
|
||||
|
||||
~FFT ( void) {
|
||||
delete sgrid;
|
||||
|
||||
~FFT ( void) {
|
||||
delete sgrid;
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void FFT_dim(Lattice<vobj> &result,const Lattice<vobj> &source,int dim, int inverse){
|
||||
void FFT_dim_mask(Lattice<vobj> &result,const Lattice<vobj> &source,std::vector<int> mask,int sign){
|
||||
|
||||
conformable(result._grid,vgrid);
|
||||
conformable(source._grid,vgrid);
|
||||
Lattice<vobj> tmp(vgrid);
|
||||
tmp = source;
|
||||
for(int d=0;d<Nd;d++){
|
||||
if( mask[d] ) {
|
||||
FFT_dim(result,tmp,d,sign);
|
||||
tmp=result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void FFT_all_dim(Lattice<vobj> &result,const Lattice<vobj> &source,int sign){
|
||||
std::vector<int> mask(Nd,1);
|
||||
FFT_dim_mask(result,source,mask,sign);
|
||||
}
|
||||
|
||||
|
||||
template<class vobj>
|
||||
void FFT_dim(Lattice<vobj> &result,const Lattice<vobj> &source,int dim, int sign){
|
||||
#ifndef HAVE_FFTW
|
||||
assert(0);
|
||||
#else
|
||||
conformable(result._grid,vgrid);
|
||||
conformable(source._grid,vgrid);
|
||||
|
||||
int L = vgrid->_ldimensions[dim];
|
||||
int G = vgrid->_fdimensions[dim];
|
||||
|
||||
|
||||
std::vector<int> layout(Nd,1);
|
||||
std::vector<int> pencil_gd(vgrid->_fdimensions);
|
||||
|
||||
pencil_gd[dim] = G*processors[dim];
|
||||
|
||||
|
||||
pencil_gd[dim] = G*processors[dim];
|
||||
|
||||
// Pencil global vol LxLxGxLxL per node
|
||||
GridCartesian pencil_g(pencil_gd,layout,processors);
|
||||
|
||||
|
||||
// Construct pencils
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
typedef typename sobj::scalar_type scalar;
|
||||
|
||||
Lattice<sobj> pgbuf(&pencil_g);
|
||||
|
||||
|
||||
Lattice<vobj> ssource(vgrid); ssource =source;
|
||||
Lattice<sobj> pgsource(&pencil_g);
|
||||
Lattice<sobj> pgresult(&pencil_g); pgresult=zero;
|
||||
|
||||
#ifndef HAVE_FFTW
|
||||
assert(0);
|
||||
#else
|
||||
typedef typename FFTW<scalar>::FFTW_scalar FFTW_scalar;
|
||||
typedef typename FFTW<scalar>::FFTW_plan FFTW_plan;
|
||||
|
||||
{
|
||||
int Ncomp = sizeof(sobj)/sizeof(scalar);
|
||||
int Nlow = 1;
|
||||
for(int d=0;d<dim;d++){
|
||||
Nlow*=vgrid->_ldimensions[d];
|
||||
}
|
||||
|
||||
int rank = 1; /* 1d transforms */
|
||||
int n[] = {G}; /* 1d transforms of length G */
|
||||
int howmany = Ncomp;
|
||||
int odist,idist,istride,ostride;
|
||||
idist = odist = 1; /* Distance between consecutive FT's */
|
||||
istride = ostride = Ncomp*Nlow; /* distance between two elements in the same FT */
|
||||
int *inembed = n, *onembed = n;
|
||||
|
||||
|
||||
int sign = FFTW_FORWARD;
|
||||
if (inverse) sign = FFTW_BACKWARD;
|
||||
|
||||
FFTW_plan p;
|
||||
{
|
||||
FFTW_scalar *in = (FFTW_scalar *)&pgsource._odata[0];
|
||||
FFTW_scalar *out= (FFTW_scalar *)&pgresult._odata[0];
|
||||
p = FFTW<scalar>::fftw_plan_many_dft(rank,n,howmany,
|
||||
in,inembed,
|
||||
istride,idist,
|
||||
out,onembed,
|
||||
ostride, odist,
|
||||
sign,FFTW_ESTIMATE);
|
||||
}
|
||||
|
||||
std::vector<int> lcoor(Nd), gcoor(Nd);
|
||||
|
||||
// Barrel shift and collect global pencil
|
||||
for(int p=0;p<processors[dim];p++) {
|
||||
|
||||
for(int idx=0;idx<sgrid->lSites();idx++) {
|
||||
|
||||
|
||||
sgrid->LocalIndexToLocalCoor(idx,lcoor);
|
||||
|
||||
sobj s;
|
||||
|
||||
peekLocalSite(s,ssource,lcoor);
|
||||
|
||||
lcoor[dim]+=p*L;
|
||||
|
||||
pokeLocalSite(s,pgsource,lcoor);
|
||||
}
|
||||
|
||||
ssource = Cshift(ssource,dim,L);
|
||||
}
|
||||
|
||||
// Loop over orthog coords
|
||||
int NN=pencil_g.lSites();
|
||||
GridStopWatch timer;
|
||||
timer.Start();
|
||||
|
||||
//PARALLEL_FOR_LOOP
|
||||
for(int idx=0;idx<NN;idx++) {
|
||||
pencil_g.LocalIndexToLocalCoor(idx,lcoor);
|
||||
|
||||
if ( lcoor[dim] == 0 ) { // restricts loop to plane at lcoor[dim]==0
|
||||
FFTW_scalar *in = (FFTW_scalar *)&pgsource._odata[idx];
|
||||
FFTW_scalar *out= (FFTW_scalar *)&pgresult._odata[idx];
|
||||
FFTW<scalar>::fftw_execute_dft(p,in,out);
|
||||
}
|
||||
}
|
||||
|
||||
timer.Stop();
|
||||
|
||||
double add,mul,fma;
|
||||
FFTW<scalar>::fftw_flops(p,&add,&mul,&fma);
|
||||
flops_call = add+mul+2.0*fma;
|
||||
usec += timer.useconds();
|
||||
flops+= flops_call*NN;
|
||||
int pc = processor_coor[dim];
|
||||
for(int idx=0;idx<sgrid->lSites();idx++) {
|
||||
sgrid->LocalIndexToLocalCoor(idx,lcoor);
|
||||
gcoor = lcoor;
|
||||
// extract the result
|
||||
sobj s;
|
||||
gcoor[dim] = lcoor[dim]+L*pc;
|
||||
peekLocalSite(s,pgresult,gcoor);
|
||||
pokeLocalSite(s,result,lcoor);
|
||||
}
|
||||
|
||||
FFTW<scalar>::fftw_destroy_plan(p);
|
||||
|
||||
int Ncomp = sizeof(sobj)/sizeof(scalar);
|
||||
int Nlow = 1;
|
||||
for(int d=0;d<dim;d++){
|
||||
Nlow*=vgrid->_ldimensions[d];
|
||||
}
|
||||
|
||||
int rank = 1; /* 1d transforms */
|
||||
int n[] = {G}; /* 1d transforms of length G */
|
||||
int howmany = Ncomp;
|
||||
int odist,idist,istride,ostride;
|
||||
idist = odist = 1; /* Distance between consecutive FT's */
|
||||
istride = ostride = Ncomp*Nlow; /* distance between two elements in the same FT */
|
||||
int *inembed = n, *onembed = n;
|
||||
|
||||
scalar div;
|
||||
if ( sign == backward ) div = 1.0/G;
|
||||
else if ( sign == forward ) div = 1.0;
|
||||
else assert(0);
|
||||
|
||||
FFTW_plan p;
|
||||
{
|
||||
FFTW_scalar *in = (FFTW_scalar *)&pgbuf._odata[0];
|
||||
FFTW_scalar *out= (FFTW_scalar *)&pgbuf._odata[0];
|
||||
p = FFTW<scalar>::fftw_plan_many_dft(rank,n,howmany,
|
||||
in,inembed,
|
||||
istride,idist,
|
||||
out,onembed,
|
||||
ostride, odist,
|
||||
sign,FFTW_ESTIMATE);
|
||||
}
|
||||
|
||||
// Barrel shift and collect global pencil
|
||||
std::vector<int> lcoor(Nd), gcoor(Nd);
|
||||
result = source;
|
||||
for(int p=0;p<processors[dim];p++) {
|
||||
PARALLEL_REGION
|
||||
{
|
||||
std::vector<int> cbuf(Nd);
|
||||
sobj s;
|
||||
|
||||
PARALLEL_FOR_LOOP_INTERN
|
||||
for(int idx=0;idx<sgrid->lSites();idx++) {
|
||||
sgrid->LocalIndexToLocalCoor(idx,cbuf);
|
||||
peekLocalSite(s,result,cbuf);
|
||||
cbuf[dim]+=p*L;
|
||||
pokeLocalSite(s,pgbuf,cbuf);
|
||||
}
|
||||
}
|
||||
result = Cshift(result,dim,L);
|
||||
}
|
||||
|
||||
// Loop over orthog coords
|
||||
int NN=pencil_g.lSites();
|
||||
GridStopWatch timer;
|
||||
timer.Start();
|
||||
PARALLEL_REGION
|
||||
{
|
||||
std::vector<int> cbuf(Nd);
|
||||
|
||||
PARALLEL_FOR_LOOP_INTERN
|
||||
for(int idx=0;idx<NN;idx++) {
|
||||
pencil_g.LocalIndexToLocalCoor(idx, cbuf);
|
||||
if ( cbuf[dim] == 0 ) { // restricts loop to plane at lcoor[dim]==0
|
||||
FFTW_scalar *in = (FFTW_scalar *)&pgbuf._odata[idx];
|
||||
FFTW_scalar *out= (FFTW_scalar *)&pgbuf._odata[idx];
|
||||
FFTW<scalar>::fftw_execute_dft(p,in,out);
|
||||
}
|
||||
}
|
||||
}
|
||||
timer.Stop();
|
||||
|
||||
// performance counting
|
||||
double add,mul,fma;
|
||||
FFTW<scalar>::fftw_flops(p,&add,&mul,&fma);
|
||||
flops_call = add+mul+2.0*fma;
|
||||
usec += timer.useconds();
|
||||
flops+= flops_call*NN;
|
||||
|
||||
// writing out result
|
||||
int pc = processor_coor[dim];
|
||||
PARALLEL_REGION
|
||||
{
|
||||
std::vector<int> clbuf(Nd), cgbuf(Nd);
|
||||
sobj s;
|
||||
|
||||
PARALLEL_FOR_LOOP_INTERN
|
||||
for(int idx=0;idx<sgrid->lSites();idx++) {
|
||||
sgrid->LocalIndexToLocalCoor(idx,clbuf);
|
||||
cgbuf = clbuf;
|
||||
cgbuf[dim] = clbuf[dim]+L*pc;
|
||||
peekLocalSite(s,pgbuf,cgbuf);
|
||||
s = s * div;
|
||||
pokeLocalSite(s,result,clbuf);
|
||||
}
|
||||
}
|
||||
|
||||
// destroying plan
|
||||
FFTW<scalar>::fftw_destroy_plan(p);
|
||||
#endif
|
||||
|
||||
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -77,11 +77,10 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/Stencil.h>
|
||||
#include <Grid/Algorithms.h>
|
||||
#include <Grid/parallelIO/BinaryIO.h>
|
||||
#include <Grid/qcd/QCD.h>
|
||||
#include <Grid/parallelIO/NerscIO.h>
|
||||
|
||||
#include <Grid/FFT.h>
|
||||
|
||||
#include <Grid/qcd/QCD.h>
|
||||
#include <Grid/parallelIO/NerscIO.h>
|
||||
#include <Grid/qcd/hmc/NerscCheckpointer.h>
|
||||
#include <Grid/qcd/hmc/HmcRunner.h>
|
||||
|
||||
|
39
lib/Init.cc
39
lib/Init.cc
@ -44,9 +44,33 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid.h>
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
|
||||
|
||||
#include <fenv.h>
|
||||
#ifdef __APPLE__
|
||||
static int
|
||||
feenableexcept (unsigned int excepts)
|
||||
{
|
||||
static fenv_t fenv;
|
||||
unsigned int new_excepts = excepts & FE_ALL_EXCEPT,
|
||||
old_excepts; // previous masks
|
||||
|
||||
if ( fegetenv (&fenv) ) return -1;
|
||||
old_excepts = fenv.__control & FE_ALL_EXCEPT;
|
||||
|
||||
// unmask
|
||||
fenv.__control &= ~new_excepts;
|
||||
fenv.__mxcsr &= ~(new_excepts << 7);
|
||||
|
||||
return ( fesetenv (&fenv) ? -1 : old_excepts );
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// Convenience functions to access stadard command line arg
|
||||
// driven parallelism controls
|
||||
@ -234,7 +258,7 @@ void Grid_init(int *argc,char ***argv)
|
||||
std::cout<<GridLogMessage<<" --decomposition : report on default omp,mpi and simd decomposition"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --debug-signals : catch sigsegv and print a blame report"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --debug-stdout : print stdout from EVERY node"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --timestamp : tag with millisecond resolution stamps"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --notimestamp : suppress millisecond resolution stamps"<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Performance:"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --dslash-generic: Wilson kernel for generic Nc"<<std::endl;
|
||||
@ -316,7 +340,9 @@ void Grid_init(int *argc,char ***argv)
|
||||
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--cacheblocking");
|
||||
GridCmdOptionIntVector(arg,LebesgueOrder::Block);
|
||||
}
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--timestamp") ){
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--notimestamp") ){
|
||||
GridLogTimestamp(0);
|
||||
} else {
|
||||
GridLogTimestamp(1);
|
||||
}
|
||||
|
||||
@ -390,10 +416,7 @@ void Grid_sa_signal_handler(int sig,siginfo_t *si,void * ptr)
|
||||
exit(0);
|
||||
return;
|
||||
};
|
||||
#ifdef GRID_FPE
|
||||
#define _GNU_SOURCE
|
||||
#include <fenv.h>
|
||||
#endif
|
||||
|
||||
void Grid_debug_handler_init(void)
|
||||
{
|
||||
struct sigaction sa,osa;
|
||||
@ -402,9 +425,9 @@ void Grid_debug_handler_init(void)
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
sigaction(SIGSEGV,&sa,NULL);
|
||||
sigaction(SIGTRAP,&sa,NULL);
|
||||
#ifdef GRID_FPE
|
||||
|
||||
feenableexcept( FE_INVALID|FE_OVERFLOW|FE_DIVBYZERO);
|
||||
|
||||
sigaction(SIGFPE,&sa,NULL);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -54,6 +54,7 @@ namespace Grid {
|
||||
void GridCmdOptionCSL(std::string str,std::vector<std::string> & vec);
|
||||
void GridCmdOptionIntVector(std::string &str,std::vector<int> & vec);
|
||||
|
||||
|
||||
void GridParseLayout(char **argv,int argc,
|
||||
std::vector<int> &latt,
|
||||
std::vector<int> &simd,
|
||||
|
15
lib/Log.cc
15
lib/Log.cc
@ -31,8 +31,23 @@ directory
|
||||
/* END LEGAL */
|
||||
#include <Grid.h>
|
||||
|
||||
#include <cxxabi.h>
|
||||
|
||||
namespace Grid {
|
||||
|
||||
std::string demangle(const char* name) {
|
||||
|
||||
int status = -4; // some arbitrary value to eliminate the compiler warning
|
||||
|
||||
// enable c++11 by passing the flag -std=c++11 to g++
|
||||
std::unique_ptr<char, void(*)(void*)> res {
|
||||
abi::__cxa_demangle(name, NULL, NULL, &status),
|
||||
std::free
|
||||
};
|
||||
|
||||
return (status==0) ? res.get() : name ;
|
||||
}
|
||||
|
||||
GridStopWatch Logger::StopWatch;
|
||||
int Logger::timestamp;
|
||||
std::ostream Logger::devnull(0);
|
||||
|
@ -144,6 +144,7 @@ extern GridLogger GridLogIterative ;
|
||||
extern GridLogger GridLogIntegrator ;
|
||||
extern Colours GridLogColours;
|
||||
|
||||
std::string demangle(const char* name) ;
|
||||
|
||||
#define _NBACKTRACE (256)
|
||||
extern void * Grid_backtrace_buffer[_NBACKTRACE];
|
||||
@ -162,7 +163,7 @@ std::fclose(fp); \
|
||||
int symbols = backtrace (Grid_backtrace_buffer,_NBACKTRACE);\
|
||||
char **strings = backtrace_symbols(Grid_backtrace_buffer,symbols);\
|
||||
for (int i = 0; i < symbols; i++){\
|
||||
std::fprintf (fp,"BackTrace Strings: %d %s\n",i, strings[i]); std::fflush(fp); \
|
||||
std::fprintf (fp,"BackTrace Strings: %d %s\n",i, demangle(strings[i]).c_str()); std::fflush(fp); \
|
||||
}\
|
||||
}
|
||||
#else
|
||||
|
12
lib/Simd.h
12
lib/Simd.h
@ -237,6 +237,18 @@ namespace Grid {
|
||||
stream<<">";
|
||||
return stream;
|
||||
}
|
||||
inline std::ostream& operator<< (std::ostream& stream, const vInteger &o){
|
||||
int nn=vInteger::Nsimd();
|
||||
std::vector<Integer,alignedAllocator<Integer> > buf(nn);
|
||||
vstore(o,&buf[0]);
|
||||
stream<<"<";
|
||||
for(int i=0;i<nn;i++){
|
||||
stream<<buf[i];
|
||||
if(i<nn-1) stream<<",";
|
||||
}
|
||||
stream<<">";
|
||||
return stream;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -38,14 +38,19 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#ifdef GRID_OMP
|
||||
#include <omp.h>
|
||||
#ifdef GRID_NUMA
|
||||
#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(static)")
|
||||
#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(static)")
|
||||
#define PARALLEL_FOR_LOOP_INTERN _Pragma("omp for schedule(static)")
|
||||
#else
|
||||
#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(runtime)")
|
||||
#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(runtime)")
|
||||
#define PARALLEL_FOR_LOOP_INTERN _Pragma("omp for schedule(runtime)")
|
||||
#endif
|
||||
#define PARALLEL_NESTED_LOOP2 _Pragma("omp parallel for collapse(2)")
|
||||
#define PARALLEL_REGION _Pragma("omp parallel")
|
||||
#else
|
||||
#define PARALLEL_FOR_LOOP
|
||||
#define PARALLEL_FOR_LOOP
|
||||
#define PARALLEL_FOR_LOOP_INTERN
|
||||
#define PARALLEL_NESTED_LOOP2
|
||||
#define PARALLEL_REGION
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
|
@ -39,6 +39,10 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
///
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#include <semaphore.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
|
||||
typedef sem_t *Grid_semaphore;
|
||||
|
||||
#define SEM_INIT(S) S = sem_open(sem_name,0,0600,0); assert ( S != SEM_FAILED );
|
||||
@ -48,7 +52,6 @@ typedef sem_t *Grid_semaphore;
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
|
||||
namespace Grid {
|
||||
|
||||
enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL };
|
||||
@ -91,18 +94,18 @@ public:
|
||||
|
||||
void SemInit(void) {
|
||||
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
||||
printf("SEM_NAME: %s \n",sem_name);
|
||||
// printf("SEM_NAME: %s \n",sem_name);
|
||||
SEM_INIT(sem_head);
|
||||
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
||||
printf("SEM_NAME: %s \n",sem_name);
|
||||
// printf("SEM_NAME: %s \n",sem_name);
|
||||
SEM_INIT(sem_tail);
|
||||
}
|
||||
void SemInitExcl(void) {
|
||||
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
||||
printf("SEM_INIT_EXCL: %s \n",sem_name);
|
||||
// printf("SEM_INIT_EXCL: %s \n",sem_name);
|
||||
SEM_INIT_EXCL(sem_head);
|
||||
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
||||
printf("SEM_INIT_EXCL: %s \n",sem_name);
|
||||
// printf("SEM_INIT_EXCL: %s \n",sem_name);
|
||||
SEM_INIT_EXCL(sem_tail);
|
||||
}
|
||||
void WakeUpDMA(void) {
|
||||
@ -118,7 +121,7 @@ public:
|
||||
SEM_WAIT(sem_tail);
|
||||
};
|
||||
void EventLoop (void) {
|
||||
std::cout<< " Entering event loop "<<std::endl;
|
||||
// std::cout<< " Entering event loop "<<std::endl;
|
||||
while(1){
|
||||
WaitForCommand();
|
||||
// std::cout << "Getting command "<<std::endl;
|
||||
@ -291,7 +294,7 @@ void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// Split into groups that can share memory (Verticals)
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
#define MPI_SHARED_MEM_DEBUG
|
||||
#undef MPI_SHARED_MEM_DEBUG
|
||||
#ifdef MPI_SHARED_MEM_DEBUG
|
||||
MPI_Comm_split(communicator_universe,(UniverseRank/4),UniverseRank,&VerticalComm);
|
||||
#else
|
||||
@ -527,7 +530,7 @@ void Slave::Init(SlaveState * _state,MPI_Comm _squadron,int _universe_rank,int _
|
||||
universe_rank=_universe_rank;
|
||||
vertical_rank=_vertical_rank;
|
||||
state =_state;
|
||||
std::cout << "state "<<_state<<" comm "<<_squadron<<" universe_rank"<<universe_rank <<std::endl;
|
||||
// std::cout << "state "<<_state<<" comm "<<_squadron<<" universe_rank"<<universe_rank <<std::endl;
|
||||
state->head = state->tail = state->start = 0;
|
||||
base = (uint64_t)MPIoffloadEngine::VerticalShmBufs[0];
|
||||
int rank; MPI_Comm_rank(_squadron,&rank);
|
||||
|
412
lib/fftw/fftw3.h
412
lib/fftw/fftw3.h
@ -1,412 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2007-14 Matteo Frigo
|
||||
* Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
|
||||
*
|
||||
* The following statement of license applies *only* to this header file,
|
||||
* and *not* to the other files distributed with FFTW or derived therefrom:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
||||
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
||||
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/***************************** NOTE TO USERS *********************************
|
||||
*
|
||||
* THIS IS A HEADER FILE, NOT A MANUAL
|
||||
*
|
||||
* If you want to know how to use FFTW, please read the manual,
|
||||
* online at http://www.fftw.org/doc/ and also included with FFTW.
|
||||
* For a quick start, see the manual's tutorial section.
|
||||
*
|
||||
* (Reading header files to learn how to use a library is a habit
|
||||
* stemming from code lacking a proper manual. Arguably, it's a
|
||||
* *bad* habit in most cases, because header files can contain
|
||||
* interfaces that are not part of the public, stable API.)
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef FFTW3_H
|
||||
#define FFTW3_H
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif /* __cplusplus */
|
||||
|
||||
/* If <complex.h> is included, use the C99 complex type. Otherwise
|
||||
define a type bit-compatible with C99 complex */
|
||||
#if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined(complex) && defined(I)
|
||||
# define FFTW_DEFINE_COMPLEX(R, C) typedef R _Complex C
|
||||
#else
|
||||
# define FFTW_DEFINE_COMPLEX(R, C) typedef R C[2]
|
||||
#endif
|
||||
|
||||
#define FFTW_CONCAT(prefix, name) prefix ## name
|
||||
#define FFTW_MANGLE_DOUBLE(name) FFTW_CONCAT(fftw_, name)
|
||||
#define FFTW_MANGLE_FLOAT(name) FFTW_CONCAT(fftwf_, name)
|
||||
#define FFTW_MANGLE_LONG_DOUBLE(name) FFTW_CONCAT(fftwl_, name)
|
||||
#define FFTW_MANGLE_QUAD(name) FFTW_CONCAT(fftwq_, name)
|
||||
|
||||
/* IMPORTANT: for Windows compilers, you should add a line
|
||||
#define FFTW_DLL
|
||||
here and in kernel/ifftw.h if you are compiling/using FFTW as a
|
||||
DLL, in order to do the proper importing/exporting, or
|
||||
alternatively compile with -DFFTW_DLL or the equivalent
|
||||
command-line flag. This is not necessary under MinGW/Cygwin, where
|
||||
libtool does the imports/exports automatically. */
|
||||
#if defined(FFTW_DLL) && (defined(_WIN32) || defined(__WIN32__))
|
||||
/* annoying Windows syntax for shared-library declarations */
|
||||
# if defined(COMPILING_FFTW) /* defined in api.h when compiling FFTW */
|
||||
# define FFTW_EXTERN extern __declspec(dllexport)
|
||||
# else /* user is calling FFTW; import symbol */
|
||||
# define FFTW_EXTERN extern __declspec(dllimport)
|
||||
# endif
|
||||
#else
|
||||
# define FFTW_EXTERN extern
|
||||
#endif
|
||||
|
||||
enum fftw_r2r_kind_do_not_use_me {
|
||||
FFTW_R2HC=0, FFTW_HC2R=1, FFTW_DHT=2,
|
||||
FFTW_REDFT00=3, FFTW_REDFT01=4, FFTW_REDFT10=5, FFTW_REDFT11=6,
|
||||
FFTW_RODFT00=7, FFTW_RODFT01=8, FFTW_RODFT10=9, FFTW_RODFT11=10
|
||||
};
|
||||
|
||||
struct fftw_iodim_do_not_use_me {
|
||||
int n; /* dimension size */
|
||||
int is; /* input stride */
|
||||
int os; /* output stride */
|
||||
};
|
||||
|
||||
#include <stddef.h> /* for ptrdiff_t */
|
||||
struct fftw_iodim64_do_not_use_me {
|
||||
ptrdiff_t n; /* dimension size */
|
||||
ptrdiff_t is; /* input stride */
|
||||
ptrdiff_t os; /* output stride */
|
||||
};
|
||||
|
||||
typedef void (*fftw_write_char_func_do_not_use_me)(char c, void *);
|
||||
typedef int (*fftw_read_char_func_do_not_use_me)(void *);
|
||||
|
||||
/*
|
||||
huge second-order macro that defines prototypes for all API
|
||||
functions. We expand this macro for each supported precision
|
||||
|
||||
X: name-mangling macro
|
||||
R: real data type
|
||||
C: complex data type
|
||||
*/
|
||||
|
||||
#define FFTW_DEFINE_API(X, R, C) \
|
||||
\
|
||||
FFTW_DEFINE_COMPLEX(R, C); \
|
||||
\
|
||||
typedef struct X(plan_s) *X(plan); \
|
||||
\
|
||||
typedef struct fftw_iodim_do_not_use_me X(iodim); \
|
||||
typedef struct fftw_iodim64_do_not_use_me X(iodim64); \
|
||||
\
|
||||
typedef enum fftw_r2r_kind_do_not_use_me X(r2r_kind); \
|
||||
\
|
||||
typedef fftw_write_char_func_do_not_use_me X(write_char_func); \
|
||||
typedef fftw_read_char_func_do_not_use_me X(read_char_func); \
|
||||
\
|
||||
FFTW_EXTERN void X(execute)(const X(plan) p); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft)(int rank, const int *n, \
|
||||
C *in, C *out, int sign, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft_1d)(int n, C *in, C *out, int sign, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_2d)(int n0, int n1, \
|
||||
C *in, C *out, int sign, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_3d)(int n0, int n1, int n2, \
|
||||
C *in, C *out, int sign, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_many_dft)(int rank, const int *n, \
|
||||
int howmany, \
|
||||
C *in, const int *inembed, \
|
||||
int istride, int idist, \
|
||||
C *out, const int *onembed, \
|
||||
int ostride, int odist, \
|
||||
int sign, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru_dft)(int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
C *in, C *out, \
|
||||
int sign, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru_split_dft)(int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
R *ri, R *ii, R *ro, R *io, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_dft)(int rank, \
|
||||
const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
C *in, C *out, \
|
||||
int sign, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_split_dft)(int rank, \
|
||||
const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
R *ri, R *ii, R *ro, R *io, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN void X(execute_dft)(const X(plan) p, C *in, C *out); \
|
||||
FFTW_EXTERN void X(execute_split_dft)(const X(plan) p, R *ri, R *ii, \
|
||||
R *ro, R *io); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_many_dft_r2c)(int rank, const int *n, \
|
||||
int howmany, \
|
||||
R *in, const int *inembed, \
|
||||
int istride, int idist, \
|
||||
C *out, const int *onembed, \
|
||||
int ostride, int odist, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft_r2c)(int rank, const int *n, \
|
||||
R *in, C *out, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft_r2c_1d)(int n,R *in,C *out,unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_r2c_2d)(int n0, int n1, \
|
||||
R *in, C *out, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_r2c_3d)(int n0, int n1, \
|
||||
int n2, \
|
||||
R *in, C *out, unsigned flags); \
|
||||
\
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_many_dft_c2r)(int rank, const int *n, \
|
||||
int howmany, \
|
||||
C *in, const int *inembed, \
|
||||
int istride, int idist, \
|
||||
R *out, const int *onembed, \
|
||||
int ostride, int odist, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft_c2r)(int rank, const int *n, \
|
||||
C *in, R *out, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_dft_c2r_1d)(int n,C *in,R *out,unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_c2r_2d)(int n0, int n1, \
|
||||
C *in, R *out, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_dft_c2r_3d)(int n0, int n1, \
|
||||
int n2, \
|
||||
C *in, R *out, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru_dft_r2c)(int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
R *in, C *out, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru_dft_c2r)(int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
C *in, R *out, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru_split_dft_r2c)( \
|
||||
int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
R *in, R *ro, R *io, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru_split_dft_c2r)( \
|
||||
int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
R *ri, R *ii, R *out, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_dft_r2c)(int rank, \
|
||||
const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
R *in, C *out, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_dft_c2r)(int rank, \
|
||||
const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
C *in, R *out, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_split_dft_r2c)( \
|
||||
int rank, const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
R *in, R *ro, R *io, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_split_dft_c2r)( \
|
||||
int rank, const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
R *ri, R *ii, R *out, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN void X(execute_dft_r2c)(const X(plan) p, R *in, C *out); \
|
||||
FFTW_EXTERN void X(execute_dft_c2r)(const X(plan) p, C *in, R *out); \
|
||||
\
|
||||
FFTW_EXTERN void X(execute_split_dft_r2c)(const X(plan) p, \
|
||||
R *in, R *ro, R *io); \
|
||||
FFTW_EXTERN void X(execute_split_dft_c2r)(const X(plan) p, \
|
||||
R *ri, R *ii, R *out); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_many_r2r)(int rank, const int *n, \
|
||||
int howmany, \
|
||||
R *in, const int *inembed, \
|
||||
int istride, int idist, \
|
||||
R *out, const int *onembed, \
|
||||
int ostride, int odist, \
|
||||
const X(r2r_kind) *kind, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_r2r)(int rank, const int *n, R *in, R *out, \
|
||||
const X(r2r_kind) *kind, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_r2r_1d)(int n, R *in, R *out, \
|
||||
X(r2r_kind) kind, unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_r2r_2d)(int n0, int n1, R *in, R *out, \
|
||||
X(r2r_kind) kind0, X(r2r_kind) kind1, \
|
||||
unsigned flags); \
|
||||
FFTW_EXTERN X(plan) X(plan_r2r_3d)(int n0, int n1, int n2, \
|
||||
R *in, R *out, X(r2r_kind) kind0, \
|
||||
X(r2r_kind) kind1, X(r2r_kind) kind2, \
|
||||
unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru_r2r)(int rank, const X(iodim) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim) *howmany_dims, \
|
||||
R *in, R *out, \
|
||||
const X(r2r_kind) *kind, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN X(plan) X(plan_guru64_r2r)(int rank, const X(iodim64) *dims, \
|
||||
int howmany_rank, \
|
||||
const X(iodim64) *howmany_dims, \
|
||||
R *in, R *out, \
|
||||
const X(r2r_kind) *kind, unsigned flags); \
|
||||
\
|
||||
FFTW_EXTERN void X(execute_r2r)(const X(plan) p, R *in, R *out); \
|
||||
\
|
||||
FFTW_EXTERN void X(destroy_plan)(X(plan) p); \
|
||||
FFTW_EXTERN void X(forget_wisdom)(void); \
|
||||
FFTW_EXTERN void X(cleanup)(void); \
|
||||
\
|
||||
FFTW_EXTERN void X(set_timelimit)(double t); \
|
||||
\
|
||||
FFTW_EXTERN void X(plan_with_nthreads)(int nthreads); \
|
||||
FFTW_EXTERN int X(init_threads)(void); \
|
||||
FFTW_EXTERN void X(cleanup_threads)(void); \
|
||||
\
|
||||
FFTW_EXTERN int X(export_wisdom_to_filename)(const char *filename); \
|
||||
FFTW_EXTERN void X(export_wisdom_to_file)(FILE *output_file); \
|
||||
FFTW_EXTERN char *X(export_wisdom_to_string)(void); \
|
||||
FFTW_EXTERN void X(export_wisdom)(X(write_char_func) write_char, \
|
||||
void *data); \
|
||||
FFTW_EXTERN int X(import_system_wisdom)(void); \
|
||||
FFTW_EXTERN int X(import_wisdom_from_filename)(const char *filename); \
|
||||
FFTW_EXTERN int X(import_wisdom_from_file)(FILE *input_file); \
|
||||
FFTW_EXTERN int X(import_wisdom_from_string)(const char *input_string); \
|
||||
FFTW_EXTERN int X(import_wisdom)(X(read_char_func) read_char, void *data); \
|
||||
\
|
||||
FFTW_EXTERN void X(fprint_plan)(const X(plan) p, FILE *output_file); \
|
||||
FFTW_EXTERN void X(print_plan)(const X(plan) p); \
|
||||
FFTW_EXTERN char *X(sprint_plan)(const X(plan) p); \
|
||||
\
|
||||
FFTW_EXTERN void *X(malloc)(size_t n); \
|
||||
FFTW_EXTERN R *X(alloc_real)(size_t n); \
|
||||
FFTW_EXTERN C *X(alloc_complex)(size_t n); \
|
||||
FFTW_EXTERN void X(free)(void *p); \
|
||||
\
|
||||
FFTW_EXTERN void X(flops)(const X(plan) p, \
|
||||
double *add, double *mul, double *fmas); \
|
||||
FFTW_EXTERN double X(estimate_cost)(const X(plan) p); \
|
||||
FFTW_EXTERN double X(cost)(const X(plan) p); \
|
||||
\
|
||||
FFTW_EXTERN int X(alignment_of)(R *p); \
|
||||
FFTW_EXTERN const char X(version)[]; \
|
||||
FFTW_EXTERN const char X(cc)[]; \
|
||||
FFTW_EXTERN const char X(codelet_optim)[];
|
||||
|
||||
|
||||
/* end of FFTW_DEFINE_API macro */
|
||||
|
||||
FFTW_DEFINE_API(FFTW_MANGLE_DOUBLE, double, fftw_complex)
|
||||
FFTW_DEFINE_API(FFTW_MANGLE_FLOAT, float, fftwf_complex)
|
||||
FFTW_DEFINE_API(FFTW_MANGLE_LONG_DOUBLE, long double, fftwl_complex)
|
||||
|
||||
/* __float128 (quad precision) is a gcc extension on i386, x86_64, and ia64
|
||||
for gcc >= 4.6 (compiled in FFTW with --enable-quad-precision) */
|
||||
#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) \
|
||||
&& !(defined(__ICC) || defined(__INTEL_COMPILER)) \
|
||||
&& (defined(__i386__) || defined(__x86_64__) || defined(__ia64__))
|
||||
# if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined(complex) && defined(I)
|
||||
/* note: __float128 is a typedef, which is not supported with the _Complex
|
||||
keyword in gcc, so instead we use this ugly __attribute__ version.
|
||||
However, we can't simply pass the __attribute__ version to
|
||||
FFTW_DEFINE_API because the __attribute__ confuses gcc in pointer
|
||||
types. Hence redefining FFTW_DEFINE_COMPLEX. Ugh. */
|
||||
# undef FFTW_DEFINE_COMPLEX
|
||||
# define FFTW_DEFINE_COMPLEX(R, C) typedef _Complex float __attribute__((mode(TC))) C
|
||||
# endif
|
||||
FFTW_DEFINE_API(FFTW_MANGLE_QUAD, __float128, fftwq_complex)
|
||||
#endif
|
||||
|
||||
#define FFTW_FORWARD (-1)
|
||||
#define FFTW_BACKWARD (+1)
|
||||
|
||||
#define FFTW_NO_TIMELIMIT (-1.0)
|
||||
|
||||
/* documented flags */
|
||||
#define FFTW_MEASURE (0U)
|
||||
#define FFTW_DESTROY_INPUT (1U << 0)
|
||||
#define FFTW_UNALIGNED (1U << 1)
|
||||
#define FFTW_CONSERVE_MEMORY (1U << 2)
|
||||
#define FFTW_EXHAUSTIVE (1U << 3) /* NO_EXHAUSTIVE is default */
|
||||
#define FFTW_PRESERVE_INPUT (1U << 4) /* cancels FFTW_DESTROY_INPUT */
|
||||
#define FFTW_PATIENT (1U << 5) /* IMPATIENT is default */
|
||||
#define FFTW_ESTIMATE (1U << 6)
|
||||
#define FFTW_WISDOM_ONLY (1U << 21)
|
||||
|
||||
/* undocumented beyond-guru flags */
|
||||
#define FFTW_ESTIMATE_PATIENT (1U << 7)
|
||||
#define FFTW_BELIEVE_PCOST (1U << 8)
|
||||
#define FFTW_NO_DFT_R2HC (1U << 9)
|
||||
#define FFTW_NO_NONTHREADED (1U << 10)
|
||||
#define FFTW_NO_BUFFERING (1U << 11)
|
||||
#define FFTW_NO_INDIRECT_OP (1U << 12)
|
||||
#define FFTW_ALLOW_LARGE_GENERIC (1U << 13) /* NO_LARGE_GENERIC is default */
|
||||
#define FFTW_NO_RANK_SPLITS (1U << 14)
|
||||
#define FFTW_NO_VRANK_SPLITS (1U << 15)
|
||||
#define FFTW_NO_VRECURSE (1U << 16)
|
||||
#define FFTW_NO_SIMD (1U << 17)
|
||||
#define FFTW_NO_SLOW (1U << 18)
|
||||
#define FFTW_NO_FIXED_RADIX_LARGE_N (1U << 19)
|
||||
#define FFTW_ALLOW_PRUNING (1U << 20)
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* FFTW3_H */
|
@ -261,6 +261,7 @@ GridUnopClass(UnaryExp, exp(a));
|
||||
GridBinOpClass(BinaryAdd, lhs + rhs);
|
||||
GridBinOpClass(BinarySub, lhs - rhs);
|
||||
GridBinOpClass(BinaryMul, lhs *rhs);
|
||||
GridBinOpClass(BinaryDiv, lhs /rhs);
|
||||
|
||||
GridBinOpClass(BinaryAnd, lhs &rhs);
|
||||
GridBinOpClass(BinaryOr, lhs | rhs);
|
||||
@ -385,6 +386,7 @@ GRID_DEF_UNOP(exp, UnaryExp);
|
||||
GRID_DEF_BINOP(operator+, BinaryAdd);
|
||||
GRID_DEF_BINOP(operator-, BinarySub);
|
||||
GRID_DEF_BINOP(operator*, BinaryMul);
|
||||
GRID_DEF_BINOP(operator/, BinaryDiv);
|
||||
|
||||
GRID_DEF_BINOP(operator&, BinaryAnd);
|
||||
GRID_DEF_BINOP(operator|, BinaryOr);
|
||||
|
@ -300,17 +300,6 @@ PARALLEL_FOR_LOOP
|
||||
*this = (*this)+r;
|
||||
return *this;
|
||||
}
|
||||
|
||||
strong_inline friend Lattice<vobj> operator / (const Lattice<vobj> &lhs,const Lattice<vobj> &rhs){
|
||||
conformable(lhs,rhs);
|
||||
Lattice<vobj> ret(lhs._grid);
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||
ret._odata[ss] = lhs._odata[ss]*pow(rhs._odata[ss],-1.0);
|
||||
}
|
||||
return ret;
|
||||
};
|
||||
|
||||
}; // class Lattice
|
||||
|
||||
template<class vobj> std::ostream& operator<< (std::ostream& stream, const Lattice<vobj> &o){
|
||||
|
@ -294,7 +294,7 @@ namespace Grid {
|
||||
int rank,o_idx,i_idx;
|
||||
_grid->GlobalIndexToGlobalCoor(gidx,gcoor);
|
||||
_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
|
||||
|
||||
|
||||
int l_idx=generator_idx(o_idx,i_idx);
|
||||
|
||||
const int num_rand_seed=16;
|
||||
|
@ -457,7 +457,7 @@ class BinaryIO {
|
||||
// available (how short sighted is that?)
|
||||
//////////////////////////////////////////////////////////
|
||||
Umu = zero;
|
||||
static uint32_t csum=0;
|
||||
static uint32_t csum; csum=0;
|
||||
fobj fileObj;
|
||||
static sobj siteObj; // Static to place in symmetric region for SHMEM
|
||||
|
||||
|
@ -50,6 +50,30 @@ namespace QCD {
|
||||
mass(_mass)
|
||||
{ }
|
||||
|
||||
template<class Impl>
|
||||
void CayleyFermion5D<Impl>::Dminus(const FermionField &psi, FermionField &chi)
|
||||
{
|
||||
int Ls=this->Ls;
|
||||
FermionField tmp(psi._grid);
|
||||
|
||||
this->DW(psi,tmp,DaggerNo);
|
||||
|
||||
for(int s=0;s<Ls;s++){
|
||||
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp,s,s);// chi = (1-c[s] D_W) psi
|
||||
}
|
||||
}
|
||||
template<class Impl>
|
||||
void CayleyFermion5D<Impl>::DminusDag(const FermionField &psi, FermionField &chi)
|
||||
{
|
||||
int Ls=this->Ls;
|
||||
FermionField tmp(psi._grid);
|
||||
|
||||
this->DW(psi,tmp,DaggerYes);
|
||||
|
||||
for(int s=0;s<Ls;s++){
|
||||
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp,s,s);// chi = (1-c[s] D_W) psi
|
||||
}
|
||||
}
|
||||
template<class Impl>
|
||||
void CayleyFermion5D<Impl>::M5D (const FermionField &psi, FermionField &chi)
|
||||
{
|
||||
|
@ -56,6 +56,9 @@ namespace Grid {
|
||||
virtual void M5D (const FermionField &psi, FermionField &chi);
|
||||
virtual void M5Ddag(const FermionField &psi, FermionField &chi);
|
||||
|
||||
virtual void Dminus(const FermionField &psi, FermionField &chi);
|
||||
virtual void DminusDag(const FermionField &psi, FermionField &chi);
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Instantiate different versions depending on Impl
|
||||
/////////////////////////////////////////////////////
|
||||
@ -117,6 +120,7 @@ namespace Grid {
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
RealD _mass,RealD _M5,const ImplParams &p= ImplParams());
|
||||
|
||||
|
||||
protected:
|
||||
void SetCoefficientsZolotarev(RealD zolohi,Approx::zolotarev_data *zdata,RealD b,RealD c);
|
||||
void SetCoefficientsTanh(Approx::zolotarev_data *zdata,RealD b,RealD c);
|
||||
|
@ -42,6 +42,10 @@ namespace Grid {
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
public:
|
||||
|
||||
void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m) {
|
||||
this->MomentumSpacePropagatorHt(out,in,_m);
|
||||
};
|
||||
|
||||
virtual void Instantiatable(void) {};
|
||||
// Constructors
|
||||
DomainWallFermion(GaugeField &_Umu,
|
||||
@ -51,6 +55,7 @@ namespace Grid {
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
RealD _mass,RealD _M5,const ImplParams &p= ImplParams()) :
|
||||
|
||||
|
||||
CayleyFermion5D<Impl>(_Umu,
|
||||
FiveDimGrid,
|
||||
FiveDimRedBlackGrid,
|
||||
|
@ -91,6 +91,20 @@ namespace Grid {
|
||||
virtual void Mdiag (const FermionField &in, FermionField &out) { Mooee(in,out);}; // Same as Mooee applied to both CB's
|
||||
virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp)=0; // case by case Wilson, Clover, Cayley, ContFrac, PartFrac
|
||||
|
||||
|
||||
virtual void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m) { assert(0);};
|
||||
|
||||
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass) {
|
||||
FFT theFFT((GridCartesian *) in._grid);
|
||||
|
||||
FermionField in_k(in._grid);
|
||||
FermionField prop_k(in._grid);
|
||||
|
||||
theFFT.FFT_all_dim(in_k,in,FFT::forward);
|
||||
this->MomentumSpacePropagator(prop_k,in_k,mass);
|
||||
theFFT.FFT_all_dim(out,prop_k,FFT::backward);
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////
|
||||
// Updates gauge field during HMC
|
||||
///////////////////////////////////////////////
|
||||
|
@ -42,7 +42,11 @@ namespace Grid {
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
public:
|
||||
|
||||
// Constructors
|
||||
void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m) {
|
||||
this->MomentumSpacePropagatorHw(out,in,_m);
|
||||
};
|
||||
|
||||
// Constructors
|
||||
OverlapWilsonCayleyTanhFermion(GaugeField &_Umu,
|
||||
GridCartesian &FiveDimGrid,
|
||||
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||
|
@ -101,6 +101,7 @@ void WilsonFermion<Impl>::Meooe(const FermionField &in, FermionField &out) {
|
||||
DhopOE(in, out, DaggerNo);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
|
||||
if (in.checkerboard == Odd) {
|
||||
@ -109,32 +110,87 @@ void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
|
||||
DhopOE(in, out, DaggerYes);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
typename FermionField::scalar_type scal(4.0 + mass);
|
||||
out = scal * in;
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
typename FermionField::scalar_type scal(4.0 + mass);
|
||||
out = scal * in;
|
||||
}
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
Mooee(in, out);
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
Mooee(in, out);
|
||||
}
|
||||
template<class Impl>
|
||||
void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
out = (1.0/(4.0+mass))*in;
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
MooeeInv(in,out);
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
out = (1.0 / (4.0 + mass)) * in;
|
||||
}
|
||||
template<class Impl>
|
||||
void WilsonFermion<Impl>::MomentumSpacePropagator(FermionField &out, const FermionField &in,RealD _m) {
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in,
|
||||
FermionField &out) {
|
||||
out.checkerboard = in.checkerboard;
|
||||
MooeeInv(in, out);
|
||||
}
|
||||
// what type LatticeComplex
|
||||
conformable(_grid,out._grid);
|
||||
|
||||
typedef typename FermionField::vector_type vector_type;
|
||||
typedef typename FermionField::scalar_type ScalComplex;
|
||||
|
||||
typedef Lattice<iSinglet<vector_type> > LatComplex;
|
||||
|
||||
Gamma::GammaMatrix Gmu [] = {
|
||||
Gamma::GammaX,
|
||||
Gamma::GammaY,
|
||||
Gamma::GammaZ,
|
||||
Gamma::GammaT
|
||||
};
|
||||
|
||||
std::vector<int> latt_size = _grid->_fdimensions;
|
||||
|
||||
FermionField num (_grid); num = zero;
|
||||
LatComplex wilson(_grid); wilson= zero;
|
||||
LatComplex one (_grid); one = ScalComplex(1.0,0.0);
|
||||
|
||||
LatComplex denom(_grid); denom= zero;
|
||||
LatComplex kmu(_grid);
|
||||
ScalComplex ci(0.0,1.0);
|
||||
// momphase = n * 2pi / L
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
|
||||
LatticeCoordinate(kmu,mu);
|
||||
|
||||
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||
|
||||
kmu = TwoPiL * kmu;
|
||||
|
||||
wilson = wilson + 2.0*sin(kmu*0.5)*sin(kmu*0.5); // Wilson term
|
||||
|
||||
num = num - sin(kmu)*ci*(Gamma(Gmu[mu])*in); // derivative term
|
||||
|
||||
denom=denom + sin(kmu)*sin(kmu);
|
||||
}
|
||||
|
||||
wilson = wilson + _m; // 2 sin^2 k/2 + m
|
||||
|
||||
num = num + wilson*in; // -i gmu sin k + 2 sin^2 k/2 + m
|
||||
|
||||
denom= denom+wilson*wilson; // sin^2 k + (2 sin^2 k/2 + m)^2
|
||||
|
||||
denom= one/denom;
|
||||
|
||||
out = num*denom; // [ -i gmu sin k + 2 sin^2 k/2 + m] / [ sin^2 k + (2 sin^2 k/2 + m)^2 ]
|
||||
|
||||
}
|
||||
|
||||
|
||||
///////////////////////////////////
|
||||
// Internal
|
||||
|
@ -78,16 +78,15 @@ class WilsonFermion : public WilsonKernels<Impl>, public WilsonFermionStatic {
|
||||
virtual void MooeeInv(const FermionField &in, FermionField &out);
|
||||
virtual void MooeeInvDag(const FermionField &in, FermionField &out);
|
||||
|
||||
virtual void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _mass) ;
|
||||
|
||||
////////////////////////
|
||||
// Derivative interface
|
||||
////////////////////////
|
||||
// Interface calls an internal routine
|
||||
void DhopDeriv(GaugeField &mat, const FermionField &U, const FermionField &V,
|
||||
int dag);
|
||||
void DhopDerivOE(GaugeField &mat, const FermionField &U,
|
||||
const FermionField &V, int dag);
|
||||
void DhopDerivEO(GaugeField &mat, const FermionField &U,
|
||||
const FermionField &V, int dag);
|
||||
void DhopDeriv(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// non-hermitian hopping term; half cb or both
|
||||
|
@ -482,6 +482,148 @@ void WilsonFermion5D<Impl>::DW(const FermionField &in, FermionField &out,int dag
|
||||
axpy(out,4.0-M5,in,out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::MomentumSpacePropagatorHt(FermionField &out,const FermionField &in, RealD mass)
|
||||
{
|
||||
// what type LatticeComplex
|
||||
GridBase *_grid = _FourDimGrid;
|
||||
conformable(_grid,out._grid);
|
||||
|
||||
typedef typename FermionField::vector_type vector_type;
|
||||
typedef typename FermionField::scalar_type ScalComplex;
|
||||
typedef iSinglet<ScalComplex> Tcomplex;
|
||||
typedef Lattice<iSinglet<vector_type> > LatComplex;
|
||||
|
||||
Gamma::GammaMatrix Gmu [] = {
|
||||
Gamma::GammaX,
|
||||
Gamma::GammaY,
|
||||
Gamma::GammaZ,
|
||||
Gamma::GammaT
|
||||
};
|
||||
|
||||
std::vector<int> latt_size = _grid->_fdimensions;
|
||||
|
||||
|
||||
FermionField num (_grid); num = zero;
|
||||
|
||||
LatComplex sk(_grid); sk = zero;
|
||||
LatComplex sk2(_grid); sk2= zero;
|
||||
LatComplex W(_grid); W= zero;
|
||||
LatComplex a(_grid); a= zero;
|
||||
LatComplex one (_grid); one = ScalComplex(1.0,0.0);
|
||||
LatComplex denom(_grid); denom= zero;
|
||||
LatComplex cosha(_grid);
|
||||
LatComplex kmu(_grid);
|
||||
LatComplex Wea(_grid);
|
||||
LatComplex Wema(_grid);
|
||||
|
||||
ScalComplex ci(0.0,1.0);
|
||||
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
|
||||
LatticeCoordinate(kmu,mu);
|
||||
|
||||
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||
|
||||
kmu = TwoPiL * kmu;
|
||||
|
||||
sk2 = sk2 + 2.0*sin(kmu*0.5)*sin(kmu*0.5);
|
||||
sk = sk + sin(kmu) *sin(kmu);
|
||||
|
||||
num = num - sin(kmu)*ci*(Gamma(Gmu[mu])*in);
|
||||
|
||||
}
|
||||
|
||||
W = one - M5 + sk2;
|
||||
|
||||
////////////////////////////////////////////
|
||||
// Cosh alpha -> alpha
|
||||
////////////////////////////////////////////
|
||||
cosha = (one + W*W + sk) / (W*2.0);
|
||||
|
||||
// FIXME Need a Lattice acosh
|
||||
for(int idx=0;idx<_grid->lSites();idx++){
|
||||
std::vector<int> lcoor(Nd);
|
||||
Tcomplex cc;
|
||||
RealD sgn;
|
||||
_grid->LocalIndexToLocalCoor(idx,lcoor);
|
||||
peekLocalSite(cc,cosha,lcoor);
|
||||
assert((double)real(cc)>=1.0);
|
||||
assert(fabs((double)imag(cc))<=1.0e-15);
|
||||
cc = ScalComplex(::acosh(real(cc)),0.0);
|
||||
pokeLocalSite(cc,a,lcoor);
|
||||
}
|
||||
|
||||
Wea = ( exp( a) * W );
|
||||
Wema= ( exp(-a) * W );
|
||||
|
||||
num = num + ( one - Wema ) * mass * in;
|
||||
denom= ( Wea - one ) + mass*mass * (one - Wema);
|
||||
out = num/denom;
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::MomentumSpacePropagatorHw(FermionField &out,const FermionField &in,RealD mass)
|
||||
{
|
||||
Gamma::GammaMatrix Gmu [] = {
|
||||
Gamma::GammaX,
|
||||
Gamma::GammaY,
|
||||
Gamma::GammaZ,
|
||||
Gamma::GammaT
|
||||
};
|
||||
|
||||
GridBase *_grid = _FourDimGrid;
|
||||
conformable(_grid,out._grid);
|
||||
|
||||
typedef typename FermionField::vector_type vector_type;
|
||||
typedef typename FermionField::scalar_type ScalComplex;
|
||||
|
||||
typedef Lattice<iSinglet<vector_type> > LatComplex;
|
||||
|
||||
|
||||
std::vector<int> latt_size = _grid->_fdimensions;
|
||||
|
||||
LatComplex sk(_grid); sk = zero;
|
||||
LatComplex sk2(_grid); sk2= zero;
|
||||
|
||||
LatComplex w_k(_grid); w_k= zero;
|
||||
LatComplex b_k(_grid); b_k= zero;
|
||||
|
||||
LatComplex one (_grid); one = ScalComplex(1.0,0.0);
|
||||
|
||||
FermionField num (_grid); num = zero;
|
||||
LatComplex denom(_grid); denom= zero;
|
||||
LatComplex kmu(_grid);
|
||||
ScalComplex ci(0.0,1.0);
|
||||
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
|
||||
LatticeCoordinate(kmu,mu);
|
||||
|
||||
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||
|
||||
kmu = TwoPiL * kmu;
|
||||
|
||||
sk2 = sk2 + 2.0*sin(kmu*0.5)*sin(kmu*0.5);
|
||||
sk = sk + sin(kmu)*sin(kmu);
|
||||
|
||||
num = num - sin(kmu)*ci*(Gamma(Gmu[mu])*in);
|
||||
|
||||
}
|
||||
num = num + mass * in ;
|
||||
|
||||
b_k = sk2 - M5;
|
||||
|
||||
w_k = sqrt(sk + b_k*b_k);
|
||||
|
||||
denom= ( w_k + b_k + mass*mass) ;
|
||||
|
||||
denom= one/denom;
|
||||
out = num*denom;
|
||||
|
||||
}
|
||||
|
||||
|
||||
FermOpTemplateInstantiate(WilsonFermion5D);
|
||||
GparityFermOpTemplateInstantiate(WilsonFermion5D);
|
||||
|
||||
|
@ -47,68 +47,82 @@ namespace QCD {
|
||||
// [DIFFERS from original CPS red black implementation parity = (x+y+z+t+s)|2 ]
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class WilsonFermion5DStatic {
|
||||
public:
|
||||
// S-direction is INNERMOST and takes no part in the parity.
|
||||
static const std::vector<int> directions;
|
||||
static const std::vector<int> displacements;
|
||||
const int npoint = 8;
|
||||
};
|
||||
|
||||
template<class Impl>
|
||||
class WilsonFermion5D : public WilsonKernels<Impl>, public WilsonFermion5DStatic
|
||||
{
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
typedef WilsonKernels<Impl> Kernels;
|
||||
PmuStat stat;
|
||||
|
||||
void Report(void);
|
||||
void ZeroCounters(void);
|
||||
double DhopCalls;
|
||||
double DhopCommTime;
|
||||
double DhopComputeTime;
|
||||
|
||||
double DerivCalls;
|
||||
double DerivCommTime;
|
||||
double DerivComputeTime;
|
||||
double DerivDhopComputeTime;
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Implement the abstract base
|
||||
///////////////////////////////////////////////////////////////
|
||||
GridBase *GaugeGrid(void) { return _FourDimGrid ;}
|
||||
GridBase *GaugeRedBlackGrid(void) { return _FourDimRedBlackGrid ;}
|
||||
GridBase *FermionGrid(void) { return _FiveDimGrid;}
|
||||
GridBase *FermionRedBlackGrid(void) { return _FiveDimRedBlackGrid;}
|
||||
|
||||
// full checkerboard operations; leave unimplemented as abstract for now
|
||||
virtual RealD M (const FermionField &in, FermionField &out){assert(0); return 0.0;};
|
||||
virtual RealD Mdag (const FermionField &in, FermionField &out){assert(0); return 0.0;};
|
||||
|
||||
// half checkerboard operations; leave unimplemented as abstract for now
|
||||
virtual void Meooe (const FermionField &in, FermionField &out){assert(0);};
|
||||
virtual void Mooee (const FermionField &in, FermionField &out){assert(0);};
|
||||
virtual void MooeeInv (const FermionField &in, FermionField &out){assert(0);};
|
||||
|
||||
virtual void MeooeDag (const FermionField &in, FermionField &out){assert(0);};
|
||||
virtual void MooeeDag (const FermionField &in, FermionField &out){assert(0);};
|
||||
virtual void MooeeInvDag (const FermionField &in, FermionField &out){assert(0);};
|
||||
virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp){assert(0);}; // case by case Wilson, Clover, Cayley, ContFrac, PartFrac
|
||||
|
||||
// These can be overridden by fancy 5d chiral action
|
||||
virtual void DhopDeriv (GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
virtual void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
virtual void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
|
||||
// Implement hopping term non-hermitian hopping term; half cb or both
|
||||
// Implement s-diagonal DW
|
||||
void DW (const FermionField &in, FermionField &out,int dag);
|
||||
void Dhop (const FermionField &in, FermionField &out,int dag);
|
||||
void DhopOE(const FermionField &in, FermionField &out,int dag);
|
||||
void DhopEO(const FermionField &in, FermionField &out,int dag);
|
||||
|
||||
// add a DhopComm
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// This is the 4d red black case appropriate to support
|
||||
//
|
||||
// parity = (x+y+z+t)|2;
|
||||
// generalised five dim fermions like mobius, zolotarev etc..
|
||||
//
|
||||
// i.e. even even contains fifth dim hopping term.
|
||||
//
|
||||
// [DIFFERS from original CPS red black implementation parity = (x+y+z+t+s)|2 ]
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class WilsonFermion5DStatic {
|
||||
public:
|
||||
// S-direction is INNERMOST and takes no part in the parity.
|
||||
static const std::vector<int> directions;
|
||||
static const std::vector<int> displacements;
|
||||
const int npoint = 8;
|
||||
};
|
||||
|
||||
template<class Impl>
|
||||
class WilsonFermion5D : public WilsonKernels<Impl>, public WilsonFermion5DStatic
|
||||
{
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
typedef WilsonKernels<Impl> Kernels;
|
||||
PmuStat stat;
|
||||
|
||||
void Report(void);
|
||||
void ZeroCounters(void);
|
||||
double DhopCalls;
|
||||
double DhopCommTime;
|
||||
double DhopComputeTime;
|
||||
|
||||
double DerivCalls;
|
||||
double DerivCommTime;
|
||||
double DerivComputeTime;
|
||||
double DerivDhopComputeTime;
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Implement the abstract base
|
||||
///////////////////////////////////////////////////////////////
|
||||
GridBase *GaugeGrid(void) { return _FourDimGrid ;}
|
||||
GridBase *GaugeRedBlackGrid(void) { return _FourDimRedBlackGrid ;}
|
||||
GridBase *FermionGrid(void) { return _FiveDimGrid;}
|
||||
GridBase *FermionRedBlackGrid(void) { return _FiveDimRedBlackGrid;}
|
||||
|
||||
// full checkerboard operations; leave unimplemented as abstract for now
|
||||
virtual RealD M (const FermionField &in, FermionField &out){assert(0); return 0.0;};
|
||||
virtual RealD Mdag (const FermionField &in, FermionField &out){assert(0); return 0.0;};
|
||||
|
||||
// half checkerboard operations; leave unimplemented as abstract for now
|
||||
virtual void Meooe (const FermionField &in, FermionField &out){assert(0);};
|
||||
virtual void Mooee (const FermionField &in, FermionField &out){assert(0);};
|
||||
virtual void MooeeInv (const FermionField &in, FermionField &out){assert(0);};
|
||||
|
||||
virtual void MeooeDag (const FermionField &in, FermionField &out){assert(0);};
|
||||
virtual void MooeeDag (const FermionField &in, FermionField &out){assert(0);};
|
||||
virtual void MooeeInvDag (const FermionField &in, FermionField &out){assert(0);};
|
||||
virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp){assert(0);}; // case by case Wilson, Clover, Cayley, ContFrac, PartFrac
|
||||
|
||||
// These can be overridden by fancy 5d chiral action
|
||||
virtual void DhopDeriv (GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
virtual void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
virtual void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||
|
||||
void MomentumSpacePropagatorHt(FermionField &out,const FermionField &in,RealD mass) ;
|
||||
void MomentumSpacePropagatorHw(FermionField &out,const FermionField &in,RealD mass) ;
|
||||
|
||||
// Implement hopping term non-hermitian hopping term; half cb or both
|
||||
// Implement s-diagonal DW
|
||||
void DW (const FermionField &in, FermionField &out,int dag);
|
||||
void Dhop (const FermionField &in, FermionField &out,int dag);
|
||||
void DhopOE(const FermionField &in, FermionField &out,int dag);
|
||||
void DhopEO(const FermionField &in, FermionField &out,int dag);
|
||||
|
||||
// add a DhopComm
|
||||
// -- suboptimal interface will presently trigger multiple comms.
|
||||
void DhopDir(const FermionField &in, FermionField &out,int dir,int disp);
|
||||
|
||||
|
@ -61,14 +61,8 @@ public:
|
||||
switch(Opt) {
|
||||
#ifdef AVX512
|
||||
case OptInlineAsm:
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
WilsonKernels<Impl>::DiracOptAsmDhopSite(st,lo,U,buf,sF,sU,Ls,Ns,in,out);
|
||||
sF++;
|
||||
}
|
||||
sU++;
|
||||
}
|
||||
break;
|
||||
WilsonKernels<Impl>::DiracOptAsmDhopSite(st,lo,U,buf,sF,sU,Ls,Ns,in,out);
|
||||
break;
|
||||
#endif
|
||||
case OptHandUnroll:
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
@ -115,13 +109,7 @@ public:
|
||||
switch(Opt) {
|
||||
#ifdef AVX512
|
||||
case OptInlineAsm:
|
||||
for (int site = 0; site < Ns; site++) {
|
||||
for (int s = 0; s < Ls; s++) {
|
||||
WilsonKernels<Impl>::DiracOptAsmDhopSiteDag(st,lo,U,buf,sF,sU,Ls,Ns,in,out);
|
||||
sF++;
|
||||
}
|
||||
sU++;
|
||||
}
|
||||
WilsonKernels<Impl>::DiracOptAsmDhopSiteDag(st,lo,U,buf,sF,sU,Ls,Ns,in,out);
|
||||
break;
|
||||
#endif
|
||||
case OptHandUnroll:
|
||||
|
@ -39,8 +39,8 @@ namespace QCD{
|
||||
//on the 5d (rb4d) checkerboarded lattices
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
template<class vobj>
|
||||
void axpibg5x(Lattice<vobj> &z,const Lattice<vobj> &x,RealD a,RealD b)
|
||||
template<class vobj,class Coeff>
|
||||
void axpibg5x(Lattice<vobj> &z,const Lattice<vobj> &x,Coeff a,Coeff b)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,z);
|
||||
@ -57,8 +57,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void axpby_ssp(Lattice<vobj> &z, RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void axpby_ssp(Lattice<vobj> &z, Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
@ -72,8 +72,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void ag5xpby_ssp(Lattice<vobj> &z,RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void ag5xpby_ssp(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
@ -90,8 +90,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void axpbg5y_ssp(Lattice<vobj> &z,RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void axpbg5y_ssp(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
@ -108,8 +108,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void ag5xpbg5y_ssp(Lattice<vobj> &z,RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void ag5xpbg5y_ssp(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
@ -127,8 +127,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void axpby_ssp_pminus(Lattice<vobj> &z,RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void axpby_ssp_pminus(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
@ -144,8 +144,8 @@ PARALLEL_FOR_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
void axpby_ssp_pplus(Lattice<vobj> &z,RealD a,const Lattice<vobj> &x,RealD b,const Lattice<vobj> &y,int s,int sp)
|
||||
template<class vobj,class Coeff>
|
||||
void axpby_ssp_pplus(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,const Lattice<vobj> &y,int s,int sp)
|
||||
{
|
||||
z.checkerboard = x.checkerboard;
|
||||
conformable(x,y);
|
||||
|
@ -674,6 +674,37 @@ class SU {
|
||||
out += la;
|
||||
}
|
||||
}
|
||||
/*
|
||||
add GaugeTrans
|
||||
*/
|
||||
|
||||
template<typename GaugeField,typename GaugeMat>
|
||||
static void GaugeTransform( GaugeField &Umu, GaugeMat &g){
|
||||
GridBase *grid = Umu._grid;
|
||||
conformable(grid,g._grid);
|
||||
|
||||
GaugeMat U(grid);
|
||||
GaugeMat ag(grid); ag = adj(g);
|
||||
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
U= PeekIndex<LorentzIndex>(Umu,mu);
|
||||
U = g*U*Cshift(ag, mu, 1);
|
||||
PokeIndex<LorentzIndex>(Umu,U,mu);
|
||||
}
|
||||
}
|
||||
template<typename GaugeMat>
|
||||
static void GaugeTransform( std::vector<GaugeMat> &U, GaugeMat &g){
|
||||
GridBase *grid = g._grid;
|
||||
GaugeMat ag(grid); ag = adj(g);
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
U[mu] = g*U[mu]*Cshift(ag, mu, 1);
|
||||
}
|
||||
}
|
||||
template<typename GaugeField,typename GaugeMat>
|
||||
static void RandomGaugeTransform(GridParallelRNG &pRNG, GaugeField &Umu, GaugeMat &g){
|
||||
LieRandomize(pRNG,g,1.0);
|
||||
GaugeTransform(Umu,g);
|
||||
}
|
||||
|
||||
// Projects the algebra components a lattice matrix (of dimension ncol*ncol -1 )
|
||||
// inverse operation: FundamentalLieAlgebraMatrix
|
||||
@ -702,23 +733,33 @@ class SU {
|
||||
PokeIndex<LorentzIndex>(out, Umu, mu);
|
||||
}
|
||||
}
|
||||
static void TepidConfiguration(GridParallelRNG &pRNG,
|
||||
LatticeGaugeField &out) {
|
||||
LatticeMatrix Umu(out._grid);
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
LieRandomize(pRNG, Umu, 0.01);
|
||||
PokeIndex<LorentzIndex>(out, Umu, mu);
|
||||
template<typename GaugeField>
|
||||
static void TepidConfiguration(GridParallelRNG &pRNG,GaugeField &out){
|
||||
typedef typename GaugeField::vector_type vector_type;
|
||||
typedef iSUnMatrix<vector_type> vMatrixType;
|
||||
typedef Lattice<vMatrixType> LatticeMatrixType;
|
||||
|
||||
LatticeMatrixType Umu(out._grid);
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
LieRandomize(pRNG,Umu,0.01);
|
||||
PokeIndex<LorentzIndex>(out,Umu,mu);
|
||||
}
|
||||
}
|
||||
static void ColdConfiguration(GridParallelRNG &pRNG, LatticeGaugeField &out) {
|
||||
LatticeMatrix Umu(out._grid);
|
||||
Umu = 1.0;
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
PokeIndex<LorentzIndex>(out, Umu, mu);
|
||||
template<typename GaugeField>
|
||||
static void ColdConfiguration(GridParallelRNG &pRNG,GaugeField &out){
|
||||
typedef typename GaugeField::vector_type vector_type;
|
||||
typedef iSUnMatrix<vector_type> vMatrixType;
|
||||
typedef Lattice<vMatrixType> LatticeMatrixType;
|
||||
|
||||
LatticeMatrixType Umu(out._grid);
|
||||
Umu=1.0;
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
PokeIndex<LorentzIndex>(out,Umu,mu);
|
||||
}
|
||||
}
|
||||
|
||||
static void taProj(const LatticeMatrix &in, LatticeMatrix &out) {
|
||||
template<typename LatticeMatrixType>
|
||||
static void taProj( const LatticeMatrixType &in, LatticeMatrixType &out){
|
||||
out = Ta(in);
|
||||
}
|
||||
template <typename LatticeMatrixType>
|
||||
|
@ -522,4 +522,4 @@ typedef WilsonLoops<PeriodicGimplR> SU3WilsonLoops;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -365,6 +365,18 @@ namespace Optimization {
|
||||
}
|
||||
};
|
||||
|
||||
struct Div{
|
||||
// Real float
|
||||
inline __m256 operator()(__m256 a, __m256 b){
|
||||
return _mm256_div_ps(a,b);
|
||||
}
|
||||
// Real double
|
||||
inline __m256d operator()(__m256d a, __m256d b){
|
||||
return _mm256_div_pd(a,b);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
inline __m256 operator()(__m256 in){
|
||||
@ -437,14 +449,13 @@ namespace Optimization {
|
||||
|
||||
};
|
||||
|
||||
#if defined (AVX2) || defined (AVXFMA4)
|
||||
#define _mm256_alignr_epi32(ret,a,b,n) ret=(__m256) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*4)%16)
|
||||
#define _mm256_alignr_epi64(ret,a,b,n) ret=(__m256d) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*8)%16)
|
||||
#if defined (AVX2)
|
||||
#define _mm256_alignr_epi32_grid(ret,a,b,n) ret=(__m256) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*4)%16)
|
||||
#define _mm256_alignr_epi64_grid(ret,a,b,n) ret=(__m256d) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*8)%16)
|
||||
#endif
|
||||
|
||||
#if defined (AVX1) || defined (AVXFMA)
|
||||
|
||||
#define _mm256_alignr_epi32(ret,a,b,n) { \
|
||||
#if defined (AVX1) || defined (AVXFMA)
|
||||
#define _mm256_alignr_epi32_grid(ret,a,b,n) { \
|
||||
__m128 aa, bb; \
|
||||
\
|
||||
aa = _mm256_extractf128_ps(a,1); \
|
||||
@ -458,7 +469,7 @@ namespace Optimization {
|
||||
ret = _mm256_insertf128_ps(ret,aa,0); \
|
||||
}
|
||||
|
||||
#define _mm256_alignr_epi64(ret,a,b,n) { \
|
||||
#define _mm256_alignr_epi64_grid(ret,a,b,n) { \
|
||||
__m128d aa, bb; \
|
||||
\
|
||||
aa = _mm256_extractf128_pd(a,1); \
|
||||
@ -474,19 +485,6 @@ namespace Optimization {
|
||||
|
||||
#endif
|
||||
|
||||
inline std::ostream & operator << (std::ostream& stream, const __m256 a)
|
||||
{
|
||||
const float *p=(const float *)&a;
|
||||
stream<< "{"<<p[0]<<","<<p[1]<<","<<p[2]<<","<<p[3]<<","<<p[4]<<","<<p[5]<<","<<p[6]<<","<<p[7]<<"}";
|
||||
return stream;
|
||||
};
|
||||
inline std::ostream & operator<< (std::ostream& stream, const __m256d a)
|
||||
{
|
||||
const double *p=(const double *)&a;
|
||||
stream<< "{"<<p[0]<<","<<p[1]<<","<<p[2]<<","<<p[3]<<"}";
|
||||
return stream;
|
||||
};
|
||||
|
||||
struct Rotate{
|
||||
|
||||
static inline __m256 rotate(__m256 in,int n){
|
||||
@ -518,11 +516,10 @@ namespace Optimization {
|
||||
__m256 tmp = Permute::Permute0(in);
|
||||
__m256 ret;
|
||||
if ( n > 3 ) {
|
||||
_mm256_alignr_epi32(ret,in,tmp,n);
|
||||
_mm256_alignr_epi32_grid(ret,in,tmp,n);
|
||||
} else {
|
||||
_mm256_alignr_epi32(ret,tmp,in,n);
|
||||
_mm256_alignr_epi32_grid(ret,tmp,in,n);
|
||||
}
|
||||
// std::cout << " align epi32 n=" <<n<<" in "<<tmp<<in<<" -> "<< ret <<std::endl;
|
||||
return ret;
|
||||
};
|
||||
|
||||
@ -531,18 +528,15 @@ namespace Optimization {
|
||||
__m256d tmp = Permute::Permute0(in);
|
||||
__m256d ret;
|
||||
if ( n > 1 ) {
|
||||
_mm256_alignr_epi64(ret,in,tmp,n);
|
||||
_mm256_alignr_epi64_grid(ret,in,tmp,n);
|
||||
} else {
|
||||
_mm256_alignr_epi64(ret,tmp,in,n);
|
||||
_mm256_alignr_epi64_grid(ret,tmp,in,n);
|
||||
}
|
||||
// std::cout << " align epi64 n=" <<n<<" in "<<tmp<<in<<" -> "<< ret <<std::endl;
|
||||
return ret;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
//Complex float Reduce
|
||||
template<>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, __m256>::operator()(__m256 in){
|
||||
@ -631,6 +625,7 @@ namespace Optimization {
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
|
@ -240,6 +240,17 @@ namespace Optimization {
|
||||
}
|
||||
};
|
||||
|
||||
struct Div{
|
||||
// Real float
|
||||
inline __m512 operator()(__m512 a, __m512 b){
|
||||
return _mm512_div_ps(a,b);
|
||||
}
|
||||
// Real double
|
||||
inline __m512d operator()(__m512d a, __m512d b){
|
||||
return _mm512_div_pd(a,b);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
@ -497,6 +508,7 @@ namespace Optimization {
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
||||
|
@ -244,6 +244,17 @@ namespace Optimization {
|
||||
}
|
||||
};
|
||||
|
||||
struct Div{
|
||||
// Real float
|
||||
inline __m512 operator()(__m512 a, __m512 b){
|
||||
return _mm512_div_ps(a,b);
|
||||
}
|
||||
// Real double
|
||||
inline __m512d operator()(__m512d a, __m512d b){
|
||||
return _mm512_div_pd(a,b);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
@ -437,6 +448,7 @@ namespace Optimization {
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
|
@ -224,6 +224,18 @@ namespace Optimization {
|
||||
}
|
||||
};
|
||||
|
||||
struct Div{
|
||||
// Real float
|
||||
inline __m128 operator()(__m128 a, __m128 b){
|
||||
return _mm_div_ps(a,b);
|
||||
}
|
||||
// Real double
|
||||
inline __m128d operator()(__m128d a, __m128d b){
|
||||
return _mm_div_pd(a,b);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
inline __m128 operator()(__m128 in){
|
||||
@ -372,6 +384,8 @@ namespace Optimization {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// Here assign types
|
||||
|
||||
@ -398,6 +412,7 @@ namespace Optimization {
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
|
@ -77,38 +77,24 @@ struct RealPart<std::complex<T> > {
|
||||
//////////////////////////////////////
|
||||
// demote a vector to real type
|
||||
//////////////////////////////////////
|
||||
|
||||
// type alias used to simplify the syntax of std::enable_if
|
||||
template <typename T>
|
||||
using Invoke = typename T::type;
|
||||
template <typename Condition, typename ReturnType>
|
||||
using EnableIf = Invoke<std::enable_if<Condition::value, ReturnType> >;
|
||||
template <typename Condition, typename ReturnType>
|
||||
using NotEnableIf = Invoke<std::enable_if<!Condition::value, ReturnType> >;
|
||||
template <typename T> using Invoke = typename T::type;
|
||||
template <typename Condition, typename ReturnType> using EnableIf = Invoke<std::enable_if<Condition::value, ReturnType> >;
|
||||
template <typename Condition, typename ReturnType> using NotEnableIf = Invoke<std::enable_if<!Condition::value, ReturnType> >;
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Check for complexity with type traits
|
||||
template <typename T>
|
||||
struct is_complex : public std::false_type {};
|
||||
template <>
|
||||
struct is_complex<std::complex<double> > : public std::true_type {};
|
||||
template <>
|
||||
struct is_complex<std::complex<float> > : public std::true_type {};
|
||||
template <typename T> struct is_complex : public std::false_type {};
|
||||
template <> struct is_complex<std::complex<double> > : public std::true_type {};
|
||||
template <> struct is_complex<std::complex<float> > : public std::true_type {};
|
||||
|
||||
template <typename T>
|
||||
using IfReal = Invoke<std::enable_if<std::is_floating_point<T>::value, int> >;
|
||||
template <typename T>
|
||||
using IfComplex = Invoke<std::enable_if<is_complex<T>::value, int> >;
|
||||
template <typename T>
|
||||
using IfInteger = Invoke<std::enable_if<std::is_integral<T>::value, int> >;
|
||||
template <typename T> using IfReal = Invoke<std::enable_if<std::is_floating_point<T>::value, int> >;
|
||||
template <typename T> using IfComplex = Invoke<std::enable_if<is_complex<T>::value, int> >;
|
||||
template <typename T> using IfInteger = Invoke<std::enable_if<std::is_integral<T>::value, int> >;
|
||||
|
||||
template <typename T>
|
||||
using IfNotReal =
|
||||
Invoke<std::enable_if<!std::is_floating_point<T>::value, int> >;
|
||||
template <typename T>
|
||||
using IfNotComplex = Invoke<std::enable_if<!is_complex<T>::value, int> >;
|
||||
template <typename T>
|
||||
using IfNotInteger = Invoke<std::enable_if<!std::is_integral<T>::value, int> >;
|
||||
template <typename T> using IfNotReal = Invoke<std::enable_if<!std::is_floating_point<T>::value, int> >;
|
||||
template <typename T> using IfNotComplex = Invoke<std::enable_if<!is_complex<T>::value, int> >;
|
||||
template <typename T> using IfNotInteger = Invoke<std::enable_if<!std::is_integral<T>::value, int> >;
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Define the operation templates functors
|
||||
@ -285,6 +271,20 @@ class Grid_simd {
|
||||
return a * b;
|
||||
}
|
||||
|
||||
//////////////////////////////////
|
||||
// Divides
|
||||
//////////////////////////////////
|
||||
friend inline Grid_simd operator/(const Scalar_type &a, Grid_simd b) {
|
||||
Grid_simd va;
|
||||
vsplat(va, a);
|
||||
return va / b;
|
||||
}
|
||||
friend inline Grid_simd operator/(Grid_simd b, const Scalar_type &a) {
|
||||
Grid_simd va;
|
||||
vsplat(va, a);
|
||||
return b / a;
|
||||
}
|
||||
|
||||
///////////////////////
|
||||
// Unary negation
|
||||
///////////////////////
|
||||
@ -428,7 +428,6 @@ inline void rotate(Grid_simd<S,V> &ret,Grid_simd<S,V> b,int nrot)
|
||||
ret.v = Optimization::Rotate::rotate(b.v,2*nrot);
|
||||
}
|
||||
|
||||
|
||||
template <class S, class V>
|
||||
inline void vbroadcast(Grid_simd<S,V> &ret,const Grid_simd<S,V> &src,int lane){
|
||||
S* typepun =(S*) &src;
|
||||
@ -512,7 +511,6 @@ template <class S, class V, IfInteger<S> = 0>
|
||||
inline void vfalse(Grid_simd<S, V> &ret) {
|
||||
vsplat(ret, 0);
|
||||
}
|
||||
|
||||
template <class S, class V>
|
||||
inline void zeroit(Grid_simd<S, V> &z) {
|
||||
vzero(z);
|
||||
@ -530,7 +528,6 @@ inline void vstream(Grid_simd<S, V> &out, const Grid_simd<S, V> &in) {
|
||||
typedef typename S::value_type T;
|
||||
binary<void>((T *)&out.v, in.v, VstreamSIMD());
|
||||
}
|
||||
|
||||
template <class S, class V, IfInteger<S> = 0>
|
||||
inline void vstream(Grid_simd<S, V> &out, const Grid_simd<S, V> &in) {
|
||||
out = in;
|
||||
@ -569,6 +566,34 @@ inline Grid_simd<S, V> operator*(Grid_simd<S, V> a, Grid_simd<S, V> b) {
|
||||
return ret;
|
||||
};
|
||||
|
||||
// Distinguish between complex types and others
|
||||
template <class S, class V, IfComplex<S> = 0>
|
||||
inline Grid_simd<S, V> operator/(Grid_simd<S, V> a, Grid_simd<S, V> b) {
|
||||
typedef Grid_simd<S, V> simd;
|
||||
|
||||
simd ret;
|
||||
simd den;
|
||||
typename simd::conv_t conv;
|
||||
|
||||
ret = a * conjugate(b) ;
|
||||
den = b * conjugate(b) ;
|
||||
|
||||
|
||||
auto real_den = toReal(den);
|
||||
|
||||
ret.v=binary<V>(ret.v, real_den.v, DivSIMD());
|
||||
|
||||
return ret;
|
||||
};
|
||||
|
||||
// Real/Integer types
|
||||
template <class S, class V, IfNotComplex<S> = 0>
|
||||
inline Grid_simd<S, V> operator/(Grid_simd<S, V> a, Grid_simd<S, V> b) {
|
||||
Grid_simd<S, V> ret;
|
||||
ret.v = binary<V>(a.v, b.v, DivSIMD());
|
||||
return ret;
|
||||
};
|
||||
|
||||
///////////////////////
|
||||
// Conjugate
|
||||
///////////////////////
|
||||
@ -582,7 +607,6 @@ template <class S, class V, IfNotComplex<S> = 0>
|
||||
inline Grid_simd<S, V> conjugate(const Grid_simd<S, V> &in) {
|
||||
return in; // for real objects
|
||||
}
|
||||
|
||||
// Suppress adj for integer types... // odd; why conjugate above but not adj??
|
||||
template <class S, class V, IfNotInteger<S> = 0>
|
||||
inline Grid_simd<S, V> adj(const Grid_simd<S, V> &in) {
|
||||
@ -596,14 +620,12 @@ template <class S, class V, IfComplex<S> = 0>
|
||||
inline void timesMinusI(Grid_simd<S, V> &ret, const Grid_simd<S, V> &in) {
|
||||
ret.v = binary<V>(in.v, ret.v, TimesMinusISIMD());
|
||||
}
|
||||
|
||||
template <class S, class V, IfComplex<S> = 0>
|
||||
inline Grid_simd<S, V> timesMinusI(const Grid_simd<S, V> &in) {
|
||||
Grid_simd<S, V> ret;
|
||||
timesMinusI(ret, in);
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class S, class V, IfNotComplex<S> = 0>
|
||||
inline Grid_simd<S, V> timesMinusI(const Grid_simd<S, V> &in) {
|
||||
return in;
|
||||
@ -616,14 +638,12 @@ template <class S, class V, IfComplex<S> = 0>
|
||||
inline void timesI(Grid_simd<S, V> &ret, const Grid_simd<S, V> &in) {
|
||||
ret.v = binary<V>(in.v, ret.v, TimesISIMD());
|
||||
}
|
||||
|
||||
template <class S, class V, IfComplex<S> = 0>
|
||||
inline Grid_simd<S, V> timesI(const Grid_simd<S, V> &in) {
|
||||
Grid_simd<S, V> ret;
|
||||
timesI(ret, in);
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class S, class V, IfNotComplex<S> = 0>
|
||||
inline Grid_simd<S, V> timesI(const Grid_simd<S, V> &in) {
|
||||
return in;
|
||||
|
@ -126,6 +126,36 @@ iVector<rtype,N> operator * (const iVector<mtype,N>& lhs,const iScalar<vtype>& r
|
||||
mult(&ret,&lhs,&rhs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
// Divide by scalar
|
||||
//////////////////////////////////////////////////////////////////
|
||||
template<class rtype,class vtype> strong_inline
|
||||
iScalar<rtype> operator / (const iScalar<rtype>& lhs,const iScalar<vtype>& rhs)
|
||||
{
|
||||
iScalar<rtype> ret;
|
||||
ret._internal = lhs._internal/rhs._internal;
|
||||
return ret;
|
||||
}
|
||||
template<class rtype,class vtype,int N> strong_inline
|
||||
iVector<rtype,N> operator / (const iVector<rtype,N>& lhs,const iScalar<vtype>& rhs)
|
||||
{
|
||||
iVector<rtype,N> ret;
|
||||
for(int i=0;i<N;i++){
|
||||
ret._internal[i] = lhs._internal[i]/rhs._internal;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
template<class rtype,class vtype,int N> strong_inline
|
||||
iMatrix<rtype,N> operator / (const iMatrix<rtype,N>& lhs,const iScalar<vtype>& rhs)
|
||||
{
|
||||
iMatrix<rtype,N> ret;
|
||||
for(int i=0;i<N;i++){
|
||||
for(int j=0;j<N;j++){
|
||||
ret._internal[i][j] = lhs._internal[i][j]/rhs._internal;
|
||||
}}
|
||||
return ret;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
// Glue operators to mult routines. Must resolve return type cleverly from typeof(internal)
|
||||
|
Reference in New Issue
Block a user