diff --git a/Makefile.am b/Makefile.am index 90c5cd71..18b3ddc3 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,10 @@ # additional include paths necessary to compile the C++ library SUBDIRS = lib benchmarks tests +.PHONY: tests + +tests: all + $(MAKE) -C tests tests + AM_CXXFLAGS += -I$(top_builddir)/include ACLOCAL_AMFLAGS = -I m4 diff --git a/README b/README deleted file mode 100644 index 17e92fa0..00000000 --- a/README +++ /dev/null @@ -1,44 +0,0 @@ -This library provides data parallel C++ container classes with internal memory layout -that is transformed to map efficiently to SIMD architectures. CSHIFT facilities -are provided, similar to HPF and cmfortran, and user control is given over the mapping of -array indices to both MPI tasks and SIMD processing elements. - -* Identically shaped arrays then be processed with perfect data parallelisation. -* Such identically shapped arrays are called conformable arrays. - -The transformation is based on the observation that Cartesian array processing involves -identical processing to be performed on different regions of the Cartesian array. - -The library will (eventually) both geometrically decompose into MPI tasks and across SIMD lanes. - -Data parallel array operations can then be specified with a SINGLE data parallel paradigm, but -optimally use MPI, OpenMP and SIMD parallelism under the hood. This is a significant simplification -for most programmers. - -The layout transformations are parametrised by the SIMD vector length. This adapts according to the architecture. -Presently SSE2 (128 bit) AVX, AVX2 (256 bit) and IMCI and AVX512 (512 bit) targets are supported. - -These are presented as - - vRealF, vRealD, vComplexF, vComplexD - -internal vector data types. These may be useful in themselves for other programmers. -The corresponding scalar types are named - - RealF, RealD, ComplexF, ComplexD - -MPI parallelism is UNIMPLEMENTED and for now only OpenMP and SIMD parallelism is present in the library. - - You can give `configure' initial values for configuration parameters -by setting variables in the command line or in the environment. Here -is are examples: - - ./configure CXX=clang++ CXXFLAGS="-std=c++11 -O3 -msse4" --enable-simd=SSE4 - - ./configure CXX=clang++ CXXFLAGS="-std=c++11 -O3 -mavx" --enable-simd=AVX1 - - ./configure CXX=clang++ CXXFLAGS="-std=c++11 -O3 -mavx2" --enable-simd=AVX2 - - ./configure CXX=icpc CXXFLAGS="-std=c++11 -O3 -mmic" --enable-simd=AVX512 --host=none - - diff --git a/README b/README new file mode 120000 index 00000000..42061c01 --- /dev/null +++ b/README @@ -0,0 +1 @@ +README.md \ No newline at end of file diff --git a/README.md b/README.md index 2a0acad6..f4a376f1 100644 --- a/README.md +++ b/README.md @@ -16,11 +16,27 @@ **Data parallel C++ mathematical object library.** -Please send all pull requests to the `develop` branch. - License: GPL v2. -Last update 2016/08/03. +Last update Nov 2016. + +_Please do not send pull requests to the `master` branch which is reserved for releases._ + +### Bug report + +_To help us tracking and solving more efficiently issues with Grid, please report problems using the issue system of GitHub rather than sending emails to Grid developers._ + +When you file an issue, please go though the following checklist: + +1. Check that the code is pointing to the `HEAD` of `develop` or any commit in `master` which is tagged with a version number. +2. Give a description of the target platform (CPU, network, compiler). Please give the full CPU part description, using for example `cat /proc/cpuinfo | grep 'model name' | uniq` (Linux) or `sysctl machdep.cpu.brand_string` (macOS) and the full output the `--version` option of your compiler. +3. Give the exact `configure` command used. +4. Attach `config.log`. +5. Attach `config.summary`. +6. Attach the output of `make V=1`. +7. Describe the issue and any previous attempt to solve it. If relevant, show how to reproduce the issue using a minimal working example. + + ### Description This library provides data parallel C++ container classes with internal memory layout @@ -29,7 +45,7 @@ are provided, similar to HPF and cmfortran, and user control is given over the m array indices to both MPI tasks and SIMD processing elements. * Identically shaped arrays then be processed with perfect data parallelisation. -* Such identically shapped arrays are called conformable arrays. +* Such identically shaped arrays are called conformable arrays. The transformation is based on the observation that Cartesian array processing involves identical processing to be performed on different regions of the Cartesian array. @@ -42,7 +58,7 @@ optimally use MPI, OpenMP and SIMD parallelism under the hood. This is a signifi for most programmers. The layout transformations are parametrised by the SIMD vector length. This adapts according to the architecture. -Presently SSE4 (128 bit) AVX, AVX2 (256 bit) and IMCI and AVX512 (512 bit) targets are supported (ARM NEON and BG/Q QPX on the way). +Presently SSE4 (128 bit) AVX, AVX2, QPX (256 bit), IMCI, and AVX512 (512 bit) targets are supported (ARM NEON on the way). These are presented as `vRealF`, `vRealD`, `vComplexF`, and `vComplexD` internal vector data types. These may be useful in themselves for other programmers. The corresponding scalar types are named `RealF`, `RealD`, `ComplexF` and `ComplexD`. @@ -50,7 +66,7 @@ The corresponding scalar types are named `RealF`, `RealD`, `ComplexF` and `Compl MPI, OpenMP, and SIMD parallelism are present in the library. Please see https://arxiv.org/abs/1512.03487 for more detail. -### Installation +### Quick start First, start by cloning the repository: ``` bash @@ -71,12 +87,10 @@ mkdir build; cd build ../configure --enable-precision=double --enable-simd=AVX --enable-comms=mpi-auto --prefix= ``` -where `--enable-precision=` set the default precision (`single` or `double`), -`--enable-simd=` set the SIMD type (see possible values below), `--enable- -comms=` set the protocol used for communications (`none`, `mpi`, `mpi-auto` or -`shmem`), and `` should be replaced by the prefix path where you want to -install Grid. The `mpi-auto` communication option set `configure` to determine -automatically how to link to MPI. Other options are available, use `configure +where `--enable-precision=` set the default precision, +`--enable-simd=` set the SIMD type, `--enable- +comms=`, and `` should be replaced by the prefix path where you want to +install Grid. Other options are detailed in the next section, you can also use `configure --help` to display them. Like with any other program using GNU autotool, the `CXX`, `CXXFLAGS`, `LDFLAGS`, ... environment variables can be modified to customise the build. @@ -92,25 +106,88 @@ To minimise the build time, only the tests at the root of the `tests` directory ``` bash make -C tests/ tests ``` +If you want to build all the tests at once just use `make tests`. + +### Build configuration options + +- `--prefix=`: installation prefix for Grid. +- `--with-gmp=`: look for GMP in the UNIX prefix `` +- `--with-mpfr=`: look for MPFR in the UNIX prefix `` +- `--with-fftw=`: look for FFTW in the UNIX prefix `` +- `--enable-lapack[=]`: enable LAPACK support in Lanczos eigensolver. A UNIX prefix containing the library can be specified (optional). +- `--enable-mkl[=]`: use Intel MKL for FFT (and LAPACK if enabled) routines. A UNIX prefix containing the library can be specified (optional). +- `--enable-numa`: ??? +- `--enable-simd=`: setup Grid for the SIMD target `` (default: `GEN`). A list of possible SIMD targets is detailed in a section below. +- `--enable-precision={single|double}`: set the default precision (default: `double`). +- `--enable-precision=`: Use `` for message passing (default: `none`). A list of possible SIMD targets is detailed in a section below. +- `--enable-rng={ranlux48|mt19937}`: choose the RNG (default: `ranlux48 `). +- `--disable-timers`: disable system dependent high-resolution timers. +- `--enable-chroma`: enable Chroma regression tests. + +### Possible communication interfaces + +The following options can be use with the `--enable-comms=` option to target different communication interfaces: + +| `` | Description | +| -------------- | ------------------------------------------------------------- | +| `none` | no communications | +| `mpi[-auto]` | MPI communications | +| `mpi3[-auto]` | MPI communications using MPI 3 shared memory | +| `mpi3l[-auto]` | MPI communications using MPI 3 shared memory and leader model | +| `shmem ` | Cray SHMEM communications | + +For the MPI interfaces the optional `-auto` suffix instructs the `configure` scripts to determine all the necessary compilation and linking flags. This is done by extracting the informations from the MPI wrapper specified in the environment variable `MPICXX` (if not specified `configure` will scan though a list of default names). ### Possible SIMD types The following options can be use with the `--enable-simd=` option to target different SIMD instruction sets: -| String | Description | +| `` | Description | | ----------- | -------------------------------------- | | `GEN` | generic portable vector code | | `SSE4` | SSE 4.2 (128 bit) | | `AVX` | AVX (256 bit) | -| `AVXFMA4` | AVX (256 bit) + FMA | +| `AVXFMA` | AVX (256 bit) + FMA | +| `AVXFMA4` | AVX (256 bit) + FMA4 | | `AVX2` | AVX 2 (256 bit) | | `AVX512` | AVX 512 bit | -| `AVX512MIC` | AVX 512 bit for Intel MIC architecture | -| `ICMI` | Intel ICMI instructions (512 bit) | +| `QPX` | QPX (256 bit) | Alternatively, some CPU codenames can be directly used: -| String | Description | +| `` | Description | | ----------- | -------------------------------------- | -| `KNC` | [Intel Knights Corner](http://ark.intel.com/products/codename/57721/Knights-Corner) | -| `KNL` | [Intel Knights Landing](http://ark.intel.com/products/codename/48999/Knights-Landing) | \ No newline at end of file +| `KNC` | [Intel Xeon Phi codename Knights Corner](http://ark.intel.com/products/codename/57721/Knights-Corner) | +| `KNL` | [Intel Xeon Phi codename Knights Landing](http://ark.intel.com/products/codename/48999/Knights-Landing) | +| `BGQ` | Blue Gene/Q | + +#### Notes: +- We currently support AVX512 only for the Intel compiler. Support for GCC and clang will appear in future versions of Grid when the AVX512 support within GCC and clang will be more advanced. +- For BG/Q only [bgclang](http://trac.alcf.anl.gov/projects/llvm-bgq) is supported. We do not presently plan to support more compilers for this platform. +- BG/Q performances are currently rather poor. This is being investigated for future versions. + +### Build setup for Intel Knights Landing platform + +The following configuration is recommended for the Intel Knights Landing platform: + +``` bash +../configure --enable-precision=double\ + --enable-simd=KNL \ + --enable-comms=mpi-auto \ + --with-gmp= \ + --with-mpfr= \ + --enable-mkl \ + CXX=icpc MPICXX=mpiicpc +``` + +where `` is the UNIX prefix where GMP and MPFR are installed. If you are working on a Cray machine that does not use the `mpiicpc` wrapper, please use: + +``` bash +../configure --enable-precision=double\ + --enable-simd=KNL \ + --enable-comms=mpi \ + --with-gmp= \ + --with-mpfr= \ + --enable-mkl \ + CXX=CC CC=cc +``` \ No newline at end of file diff --git a/VERSION b/VERSION index c12f9497..e7abbba7 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,6 @@ -Version : 0.5.0 +Version : 0.6.0 - AVX512, AVX2, AVX, SSE good - Clang 3.5 and above, ICPC v16 and above, GCC 4.9 and above +- MPI and MPI3 +- HiRep, Smearing, Generic gauge group diff --git a/benchmarks/Benchmark_comms.cc b/benchmarks/Benchmark_comms.cc index d0cd96c7..969a2a42 100644 --- a/benchmarks/Benchmark_comms.cc +++ b/benchmarks/Benchmark_comms.cc @@ -42,15 +42,14 @@ int main (int argc, char ** argv) int Nloop=10; int nmu=0; - for(int mu=0;mu<4;mu++) if (mpi_layout[mu]>1) nmu++; + for(int mu=0;mu1) nmu++; + std::cout< latt_size ({lat*mpi_layout[0], @@ -125,7 +124,7 @@ int main (int argc, char ** argv) std::cout< latt_size ({lat,lat,lat,lat}); @@ -194,128 +193,169 @@ int main (int argc, char ** argv) } } -#if 0 + Nloop=100; std::cout< latt_size ({lat,lat,lat,lat}); + std::vector latt_size ({lat*mpi_layout[0], + lat*mpi_layout[1], + lat*mpi_layout[2], + lat*mpi_layout[3]}); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - std::vector > xbuf(8,std::vector(lat*lat*lat*Ls)); - std::vector > rbuf(8,std::vector(lat*lat*lat*Ls)); - + std::vector xbuf(8); + std::vector rbuf(8); + Grid.ShmBufferFreeAll(); + for(int d=0;d<8;d++){ + xbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); + rbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); + } int ncomm; int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD); + double start=usecond(); + for(int i=0;i empty; - std::vector > requests_fwd(Nd,empty); - std::vector > requests_bwd(Nd,empty); + std::vector requests; - for(int mu=0;mu<4;mu++){ ncomm=0; - if (mpi_layout[mu]>1 ) { - ncomm++; - - int comm_proc; - int xmit_to_rank; - int recv_from_rank; - - comm_proc=1; - Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); - Grid.SendToRecvFromInit(requests_fwd[mu], - (void *)&xbuf[mu][0], - xmit_to_rank, - (void *)&rbuf[mu][0], - recv_from_rank, - bytes); - - comm_proc = mpi_layout[mu]-1; - Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); - Grid.SendToRecvFromInit(requests_bwd[mu], - (void *)&xbuf[mu+4][0], - xmit_to_rank, - (void *)&rbuf[mu+4][0], - recv_from_rank, - bytes); - - } - } - - { - double start=usecond(); - for(int i=0;i1 ) { - for(int mu=0;mu<4;mu++){ + ncomm++; + int comm_proc=1; + int xmit_to_rank; + int recv_from_rank; - if (mpi_layout[mu]>1 ) { - - Grid.SendToRecvFromBegin(requests_fwd[mu]); - Grid.SendToRecvFromComplete(requests_fwd[mu]); - Grid.SendToRecvFromBegin(requests_bwd[mu]); - Grid.SendToRecvFromComplete(requests_bwd[mu]); - } - } - Grid.Barrier(); - } + Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); + Grid.StencilSendToRecvFromBegin(requests, + (void *)&xbuf[mu][0], + xmit_to_rank, + (void *)&rbuf[mu][0], + recv_from_rank, + bytes); - double stop=usecond(); - - double dbytes = bytes; - double xbytes = Nloop*dbytes*2.0*ncomm; - double rbytes = xbytes; - double bidibytes = xbytes+rbytes; - - double time = stop-start; - - std::cout<1 ) { - - Grid.SendToRecvFromBegin(requests_fwd[mu]); - Grid.SendToRecvFromBegin(requests_bwd[mu]); - Grid.SendToRecvFromComplete(requests_fwd[mu]); - Grid.SendToRecvFromComplete(requests_bwd[mu]); - } } - Grid.Barrier(); } - - double stop=usecond(); - - double dbytes = bytes; - double xbytes = Nloop*dbytes*2.0*ncomm; - double rbytes = xbytes; - double bidibytes = xbytes+rbytes; - - double time = stop-start; - - std::cout< latt_size ({lat*mpi_layout[0], + lat*mpi_layout[1], + lat*mpi_layout[2], + lat*mpi_layout[3]}); + + GridCartesian Grid(latt_size,simd_layout,mpi_layout); + + std::vector xbuf(8); + std::vector rbuf(8); + Grid.ShmBufferFreeAll(); + for(int d=0;d<8;d++){ + xbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); + rbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); + } + + int ncomm; + int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD); + + double start=usecond(); + for(int i=0;i requests; + + ncomm=0; + for(int mu=0;mu<4;mu++){ + + if (mpi_layout[mu]>1 ) { + + ncomm++; + int comm_proc=1; + int xmit_to_rank; + int recv_from_rank; + + Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); + Grid.StencilSendToRecvFromBegin(requests, + (void *)&xbuf[mu][0], + xmit_to_rank, + (void *)&rbuf[mu][0], + recv_from_rank, + bytes); + // Grid.StencilSendToRecvFromComplete(requests); + // requests.resize(0); + + comm_proc = mpi_layout[mu]-1; + + Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); + Grid.StencilSendToRecvFromBegin(requests, + (void *)&xbuf[mu+4][0], + xmit_to_rank, + (void *)&rbuf[mu+4][0], + recv_from_rank, + bytes); + Grid.StencilSendToRecvFromComplete(requests); + requests.resize(0); + + } + } + Grid.Barrier(); + + } + double stop=usecond(); + + double dbytes = bytes; + double xbytes = Nloop*dbytes*2.0*ncomm; + double rbytes = xbytes; + double bidibytes = xbytes+rbytes; + + double time = stop-start; // microseconds + + std::cout< WilsonFermion5DR; typedef WilsonFermion5D WilsonFermion5DF; typedef WilsonFermion5D WilsonFermion5DD; @@ -54,15 +53,11 @@ int main (int argc, char ** argv) { Grid_init(&argc,&argv); - if( GridCmdOptionExists(argv,argv+argc,"--asynch") ){ - overlapComms = true; - } - int threads = GridThread::GetThreads(); std::cout< latt4 = GridDefaultLatt(); - const int Ls=16; + const int Ls=8; GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); @@ -126,17 +121,24 @@ int main (int argc, char ** argv) RealD NP = UGrid->_Nprocessors; - for(int doasm=1;doasm<2;doasm++){ - - QCD::WilsonKernelsStatic::AsmOpt=doasm; - DomainWallFermionR Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5); - - std::cout<Barrier(); Dw.ZeroCounters(); double t0=usecond(); for(int i=0;iBarrier(); double volume=Ls; for(int mu=0;mu::Dhop "< WilsonFermion5DR; LatticeFermion ssrc(sFGrid); LatticeFermion sref(sFGrid); @@ -180,6 +194,7 @@ int main (int argc, char ** argv) pokeSite(tmp,ssrc,site); }}}}} std::cout<Barrier(); double t0=usecond(); sDw.ZeroCounters(); for(int i=0;iBarrier(); double volume=Ls; for(int mu=0;mu::DhopEO "<Barrier(); sDw.ZeroCounters(); sDw.stat.init("DhopEO"); double t0=usecond(); @@ -255,6 +282,7 @@ int main (int argc, char ** argv) sDw.DhopEO(ssrc_o, sr_e, DaggerNo); } double t1=usecond(); + FGrid->Barrier(); sDw.stat.print(); double volume=Ls; for(int mu=0;mu1.0e-5) { + if(error>1.0e-4) { setCheckerboard(ssrc,ssrc_o); setCheckerboard(ssrc,ssrc_e); std::cout<< ssrc << std::endl; @@ -308,13 +336,13 @@ int main (int argc, char ** argv) ref = -0.5*ref; } Dw.Dhop(src,result,1); - std::cout << GridLogMessage << "Naive wilson implementation Dag" << std::endl; + std::cout << GridLogMessage << "Compare to naive wilson implementation Dag to verify correctness" << std::endl; std::cout<Barrier(); double t0=usecond(); for(int i=0;iBarrier(); double volume=Ls; for(int mu=0;mu -Author: paboyle - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#include - -using namespace std; -using namespace Grid; -using namespace Grid::QCD; - -template -struct scal { - d internal; -}; - - Gamma::GammaMatrix Gmu [] = { - Gamma::GammaX, - Gamma::GammaY, - Gamma::GammaZ, - Gamma::GammaT - }; - -bool overlapComms = false; - - -int main (int argc, char ** argv) -{ - Grid_init(&argc,&argv); - - if( GridCmdOptionExists(argv,argv+argc,"--asynch") ){ - overlapComms = true; - } - - int threads = GridThread::GetThreads(); - std::cout< latt4 = GridDefaultLatt(); - const int Ls=16; - GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); - GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); - GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); - GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); - - std::vector seeds4({1,2,3,4}); - std::vector seeds5({5,6,7,8}); - - GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); - GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); - - LatticeFermion src (FGrid); random(RNG5,src); - LatticeFermion result(FGrid); result=zero; - LatticeFermion ref(FGrid); ref=zero; - LatticeFermion tmp(FGrid); - LatticeFermion err(FGrid); - - ColourMatrix cm = Complex(1.0,0.0); - - LatticeGaugeField Umu(UGrid); - random(RNG4,Umu); - - LatticeGaugeField Umu5d(FGrid); - - // replicate across fifth dimension - for(int ss=0;ssoSites();ss++){ - for(int s=0;s U(4,FGrid); - for(int mu=0;mu(Umu5d,mu); - } - - if (1) - { - ref = zero; - for(int mu=0;mu_Nprocessors; - - - QCD::WilsonKernelsStatic::AsmOpt=1; - - DomainWallFermionR Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,params); - - std::cout< seeds({1,2,3,4}); RealD mass = 0.1; + std::cout << GridLogMessage<< "*****************************************************************" < - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#include - - -using namespace Grid; -using namespace Grid::QCD; - - -int bench(std::ofstream &os, std::vector &latt4,int Ls); - -int main(int argc,char **argv) -{ - Grid_init(&argc,&argv); - std::ofstream os("zmm.dat"); - - os << "#V Ls Lxy Lzt C++ Asm OMP L1 " < grid({L,L,m*L,m*L}); - std::cout << GridLogMessage <<"\t"; - for(int i=0;i<4;i++) { - std::cout << grid[i]<<"x"; - } - std::cout << Ls<<"\t\t"; - bench(os,grid,Ls); - } - } - } -} - -int bench(std::ofstream &os, std::vector &latt4,int Ls) -{ - - GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(latt4, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); - GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); - GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); - GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); - - std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); - std::vector mpi_layout = GridDefaultMpi(); - int threads = GridThread::GetThreads(); - - std::vector seeds4({1,2,3,4}); - std::vector seeds5({5,6,7,8}); - - GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds4); - - LatticeFermion src (FGrid); - LatticeFermion tmp (FGrid); - LatticeFermion srce(FrbGrid); - - LatticeFermion resulto(FrbGrid); resulto=zero; - LatticeFermion resulta(FrbGrid); resulta=zero; - LatticeFermion junk(FrbGrid); junk=zero; - LatticeFermion diff(FrbGrid); - LatticeGaugeField Umu(UGrid); - - double mfc, mfa, mfo, mfl1; - - GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); - GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); - random(RNG5,src); -#if 1 - random(RNG4,Umu); -#else - int mmu=2; - std::vector U(4,UGrid); - for(int mu=0;mu(Umu,mu); - if ( mu!=mmu ) U[mu] = zero; - if ( mu==mmu ) U[mu] = 1.0; - PokeIndex(Umu,U[mu],mu); - } -#endif - pickCheckerboard(Even,srce,src); - - RealD mass=0.1; - RealD M5 =1.8; - DomainWallFermionR Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5); - - int ncall=50; - double t0=usecond(); - for(int i=0;i]]) AC_CHECK_DECLS([be64toh],[], [], [[#include ]]) -############### Checks for typedefs, structures, and compiler characteristics -AC_TYPE_SIZE_T -AC_TYPE_UINT32_T -AC_TYPE_UINT64_T - -############### GMP and MPFR ################# +############### GMP and MPFR AC_ARG_WITH([gmp], [AS_HELP_STRING([--with-gmp=prefix], [try this for a non-standard install prefix of the GMP library])], @@ -54,10 +60,17 @@ AC_ARG_WITH([mpfr], [AM_CXXFLAGS="-I$with_mpfr/include $AM_CXXFLAGS"] [AM_LDFLAGS="-L$with_mpfr/lib $AM_LDFLAGS"]) -################## lapack #################### +############### FFTW3 +AC_ARG_WITH([fftw], + [AS_HELP_STRING([--with-fftw=prefix], + [try this for a non-standard install prefix of the FFTW3 library])], + [AM_CXXFLAGS="-I$with_fftw/include $AM_CXXFLAGS"] + [AM_LDFLAGS="-L$with_fftw/lib $AM_LDFLAGS"]) + +############### lapack AC_ARG_ENABLE([lapack], [AC_HELP_STRING([--enable-lapack=yes|no|prefix], [enable LAPACK])], - [ac_LAPACK=${enable_lapack}],[ac_LAPACK=no]) + [ac_LAPACK=${enable_lapack}], [ac_LAPACK=no]) case ${ac_LAPACK} in no) @@ -67,10 +80,26 @@ case ${ac_LAPACK} in *) AM_CXXFLAGS="-I$ac_LAPACK/include $AM_CXXFLAGS" AM_LDFLAGS="-L$ac_LAPACK/lib $AM_LDFLAGS" - AC_DEFINE([USE_LAPACK],[1],[use LAPACK]) + AC_DEFINE([USE_LAPACK],[1],[use LAPACK]);; esac -################## first-touch #################### +############### MKL +AC_ARG_ENABLE([mkl], + [AC_HELP_STRING([--enable-mkl=yes|no|prefix], [enable Intel MKL for LAPACK & FFTW])], + [ac_MKL=${enable_mkl}], [ac_MKL=no]) + +case ${ac_MKL} in + no) + ;; + yes) + AC_DEFINE([USE_MKL], [1], [Define to 1 if you use the Intel MKL]);; + *) + AM_CXXFLAGS="-I$ac_MKL/include $AM_CXXFLAGS" + AM_LDFLAGS="-L$ac_MKL/lib $AM_LDFLAGS" + AC_DEFINE([USE_MKL], [1], [Define to 1 if you use the Intel MKL]);; +esac + +############### first-touch AC_ARG_ENABLE([numa], [AC_HELP_STRING([--enable-numa=yes|no|prefix], [enable first touch numa opt])], [ac_NUMA=${enable_NUMA}],[ac_NUMA=no]) @@ -84,56 +113,44 @@ case ${ac_NUMA} in AC_DEFINE([GRID_NUMA],[1],[First touch numa locality]);; esac -################## FFTW3 #################### -AC_ARG_WITH([fftw], - [AS_HELP_STRING([--with-fftw=prefix], - [try this for a non-standard install prefix of the FFTW3 library])], - [AM_CXXFLAGS="-I$with_fftw/include $AM_CXXFLAGS"] - [AM_LDFLAGS="-L$with_fftw/lib $AM_LDFLAGS"]) - -################ Get compiler informations -AC_LANG([C++]) -AX_CXX_COMPILE_STDCXX_11([noext],[mandatory]) -AX_COMPILER_VENDOR -AC_DEFINE_UNQUOTED([CXX_COMP_VENDOR],["$ax_cv_cxx_compiler_vendor"], - [vendor of C++ compiler that will compile the code]) -AX_GXX_VERSION -AC_DEFINE_UNQUOTED([GXX_VERSION],["$GXX_VERSION"], - [version of g++ that will compile the code]) - ############### Checks for library functions CXXFLAGS_CPY=$CXXFLAGS LDFLAGS_CPY=$LDFLAGS CXXFLAGS="$AM_CXXFLAGS $CXXFLAGS" LDFLAGS="$AM_LDFLAGS $LDFLAGS" + AC_CHECK_FUNCS([gettimeofday]) -AC_CHECK_LIB([gmp],[__gmpf_init], - [AC_CHECK_LIB([mpfr],[mpfr_init], - [AC_DEFINE([HAVE_LIBMPFR], [1], [Define to 1 if you have the `MPFR' library (-lmpfr).])] - [have_mpfr=true] - [LIBS="$LIBS -lmpfr"], - [AC_MSG_ERROR([MPFR library not found])])] - [AC_DEFINE([HAVE_LIBGMP], [1], [Define to 1 if you have the `GMP' library (-lgmp).])] - [have_gmp=true] - [LIBS="$LIBS -lgmp"], - [AC_MSG_WARN([**** GMP library not found, Grid can still compile but RHMC will not work ****])]) + +if test "${ac_MKL}x" != "nox"; then + AC_SEARCH_LIBS([mkl_set_interface_layer], [mkl_rt], [], + [AC_MSG_ERROR("MKL enabled but library not found")]) +fi + +AC_SEARCH_LIBS([__gmpf_init], [gmp], + [AC_SEARCH_LIBS([mpfr_init], [mpfr], + [AC_DEFINE([HAVE_LIBMPFR], [1], + [Define to 1 if you have the `MPFR' library])] + [have_mpfr=true], [AC_MSG_ERROR([MPFR library not found])])] + [AC_DEFINE([HAVE_LIBGMP], [1], [Define to 1 if you have the `GMP' library])] + [have_gmp=true]) if test "${ac_LAPACK}x" != "nox"; then - AC_CHECK_LIB([lapack],[LAPACKE_sbdsdc],[], - [AC_MSG_ERROR("LAPACK enabled but library not found")]) -fi -AC_CHECK_LIB([fftw3],[fftw_execute], - [AC_DEFINE([HAVE_FFTW],[1],[Define to 1 if you have the `FFTW' library (-lfftw3).])] - [have_fftw=true] - [LIBS="$LIBS -lfftw3 -lfftw3f"], - [AC_MSG_WARN([**** FFTW library not found, Grid can still compile but FFT-based routines will not work ****])]) + AC_SEARCH_LIBS([LAPACKE_sbdsdc], [lapack], [], + [AC_MSG_ERROR("LAPACK enabled but library not found")]) +fi + +AC_SEARCH_LIBS([fftw_execute], [fftw3], + [AC_SEARCH_LIBS([fftwf_execute], [fftw3f], [], + [AC_MSG_ERROR("single precision FFTW library not found")])] + [AC_DEFINE([HAVE_FFTW], [1], [Define to 1 if you have the `FFTW' library])] + [have_fftw=true]) + CXXFLAGS=$CXXFLAGS_CPY LDFLAGS=$LDFLAGS_CPY ############### SIMD instruction selection -AC_ARG_ENABLE([simd],[AC_HELP_STRING([--enable-simd=SSE4|AVX|AVXFMA4|AVXFMA|AVX2|AVX512|AVX512MIC|IMCI|KNL|KNC],\ - [Select instructions to be SSE4.0, AVX 1.0, AVX 2.0+FMA, AVX 512, IMCI])],\ - [ac_SIMD=${enable_simd}],[ac_SIMD=GEN]) +AC_ARG_ENABLE([simd],[AC_HELP_STRING([--enable-simd=], + [select SIMD target (cf. README.md)])], [ac_SIMD=${enable_simd}], [ac_SIMD=GEN]) case ${ax_cv_cxx_compiler_vendor} in clang|gnu) @@ -153,12 +170,15 @@ case ${ax_cv_cxx_compiler_vendor} in AVX2) AC_DEFINE([AVX2],[1],[AVX2 intrinsics]) SIMD_FLAGS='-mavx2 -mfma';; - AVX512|AVX512MIC|KNL) + AVX512) AC_DEFINE([AVX512],[1],[AVX512 intrinsics]) SIMD_FLAGS='-mavx512f -mavx512pf -mavx512er -mavx512cd';; - IMCI|KNC) + KNC) AC_DEFINE([IMCI],[1],[IMCI intrinsics for Knights Corner]) SIMD_FLAGS='';; + KNL) + AC_DEFINE([AVX512],[1],[AVX512 intrinsics]) + SIMD_FLAGS='-march=knl';; GEN) AC_DEFINE([GENERIC_VEC],[1],[generic vector code]) SIMD_FLAGS='';; @@ -176,9 +196,6 @@ case ${ax_cv_cxx_compiler_vendor} in AVX) AC_DEFINE([AVX1],[1],[AVX intrinsics]) SIMD_FLAGS='-mavx -xavx';; - AVXFMA4) - AC_DEFINE([AVXFMA4],[1],[AVX intrinsics with FMA4]) - SIMD_FLAGS='-mavx -mfma';; AVXFMA) AC_DEFINE([AVXFMA],[1],[AVX intrinsics with FMA4]) SIMD_FLAGS='-mavx -mfma';; @@ -188,12 +205,12 @@ case ${ax_cv_cxx_compiler_vendor} in AVX512) AC_DEFINE([AVX512],[1],[AVX512 intrinsics]) SIMD_FLAGS='-xcore-avx512';; - AVX512MIC|KNL) - AC_DEFINE([AVX512],[1],[AVX512 intrinsics for Knights Landing]) - SIMD_FLAGS='-xmic-avx512';; - IMCI|KNC) + KNC) AC_DEFINE([IMCI],[1],[IMCI Intrinsics for Knights Corner]) SIMD_FLAGS='';; + KNL) + AC_DEFINE([AVX512],[1],[AVX512 intrinsics for Knights Landing]) + SIMD_FLAGS='-xmic-avx512';; GEN) AC_DEFINE([GENERIC_VEC],[1],[generic vector code]) SIMD_FLAGS='';; @@ -208,14 +225,18 @@ AM_CXXFLAGS="$SIMD_FLAGS $AM_CXXFLAGS" AM_CFLAGS="$SIMD_FLAGS $AM_CFLAGS" case ${ac_SIMD} in - AVX512|AVX512MIC|KNL) + AVX512|KNL) AC_DEFINE([TEST_ZMM],[1],[compile ZMM test]);; *) ;; esac -############### precision selection -AC_ARG_ENABLE([precision],[AC_HELP_STRING([--enable-precision=single|double],[Select default word size of Real])],[ac_PRECISION=${enable_precision}],[ac_PRECISION=double]) +############### Precision selection +AC_ARG_ENABLE([precision], + [AC_HELP_STRING([--enable-precision=single|double], + [Select default word size of Real])], + [ac_PRECISION=${enable_precision}],[ac_PRECISION=double]) + case ${ac_PRECISION} in single) AC_DEFINE([GRID_DEFAULT_PRECISION_SINGLE],[1],[GRID_DEFAULT_PRECISION is SINGLE] ) @@ -226,43 +247,56 @@ case ${ac_PRECISION} in esac ############### communication type selection -AC_ARG_ENABLE([comms],[AC_HELP_STRING([--enable-comms=none|mpi|mpi-auto|shmem],[Select communications])],[ac_COMMS=${enable_comms}],[ac_COMMS=none]) +AC_ARG_ENABLE([comms],[AC_HELP_STRING([--enable-comms=none|mpi|mpi-auto|mpi3|mpi3-auto|shmem], + [Select communications])],[ac_COMMS=${enable_comms}],[ac_COMMS=none]) case ${ac_COMMS} in none) - AC_DEFINE([GRID_COMMS_NONE],[1],[GRID_COMMS_NONE] ) + AC_DEFINE([GRID_COMMS_NONE],[1],[GRID_COMMS_NONE] ) + comms_type='none' ;; - mpi-auto) - AC_DEFINE([GRID_COMMS_MPI],[1],[GRID_COMMS_MPI] ) - LX_FIND_MPI - if test "x$have_CXX_mpi" = 'xno'; then AC_MSG_ERROR(["MPI not found"]); fi - AM_CXXFLAGS="$MPI_CXXFLAGS $AM_CXXFLAGS" - AM_CFLAGS="$MPI_CFLAGS $AM_CFLAGS" - AM_LDFLAGS="`echo $MPI_CXXLDFLAGS | sed -E 's/-l@<:@^ @:>@+//g'` $AM_LDFLAGS" - LIBS="`echo $MPI_CXXLDFLAGS | sed -E 's/-L@<:@^ @:>@+//g'` $LIBS" + mpi3l*) + AC_DEFINE([GRID_COMMS_MPI3L],[1],[GRID_COMMS_MPI3L] ) + comms_type='mpi3l' ;; - mpi) - AC_DEFINE([GRID_COMMS_MPI],[1],[GRID_COMMS_MPI] ) + mpi3*) + AC_DEFINE([GRID_COMMS_MPI3],[1],[GRID_COMMS_MPI3] ) + comms_type='mpi3' ;; - mpi3) - AC_DEFINE([GRID_COMMS_MPI3],[1],[GRID_COMMS_MPI3] ) + mpi*) + AC_DEFINE([GRID_COMMS_MPI],[1],[GRID_COMMS_MPI] ) + comms_type='mpi' ;; shmem) - AC_DEFINE([GRID_COMMS_SHMEM],[1],[GRID_COMMS_SHMEM] ) + AC_DEFINE([GRID_COMMS_SHMEM],[1],[GRID_COMMS_SHMEM] ) + comms_type='shmem' ;; *) - AC_MSG_ERROR([${ac_COMMS} unsupported --enable-comms option]); + AC_MSG_ERROR([${ac_COMMS} unsupported --enable-comms option]); ;; esac -AM_CONDITIONAL(BUILD_COMMS_SHMEM,[ test "X${ac_COMMS}X" == "XshmemX" ]) -AM_CONDITIONAL(BUILD_COMMS_MPI,[ test "X${ac_COMMS}X" == "XmpiX" || test "X${ac_COMMS}X" == "Xmpi-autoX" ]) -AM_CONDITIONAL(BUILD_COMMS_MPI3,[ test "X${ac_COMMS}X" == "Xmpi3X"] ) -AM_CONDITIONAL(BUILD_COMMS_NONE,[ test "X${ac_COMMS}X" == "XnoneX" ]) +case ${ac_COMMS} in + *-auto) + LX_FIND_MPI + if test "x$have_CXX_mpi" = 'xno'; then AC_MSG_ERROR(["MPI not found"]); fi + AM_CXXFLAGS="$MPI_CXXFLAGS $AM_CXXFLAGS" + AM_CFLAGS="$MPI_CFLAGS $AM_CFLAGS" + AM_LDFLAGS="`echo $MPI_CXXLDFLAGS | sed -E 's/-l@<:@^ @:>@+//g'` $AM_LDFLAGS" + LIBS="`echo $MPI_CXXLDFLAGS | sed -E 's/-L@<:@^ @:>@+//g'` $LIBS";; + *) + ;; +esac + +AM_CONDITIONAL(BUILD_COMMS_SHMEM, [ test "${comms_type}X" == "shmemX" ]) +AM_CONDITIONAL(BUILD_COMMS_MPI, [ test "${comms_type}X" == "mpiX" ]) +AM_CONDITIONAL(BUILD_COMMS_MPI3, [ test "${comms_type}X" == "mpi3X" ] ) +AM_CONDITIONAL(BUILD_COMMS_MPI3L, [ test "${comms_type}X" == "mpi3lX" ] ) +AM_CONDITIONAL(BUILD_COMMS_NONE, [ test "${comms_type}X" == "noneX" ]) ############### RNG selection AC_ARG_ENABLE([rng],[AC_HELP_STRING([--enable-rng=ranlux48|mt19937],\ - [Select Random Number Generator to be used])],\ - [ac_RNG=${enable_rng}],[ac_RNG=ranlux48]) + [Select Random Number Generator to be used])],\ + [ac_RNG=${enable_rng}],[ac_RNG=ranlux48]) case ${ac_RNG} in ranlux48) @@ -276,10 +310,11 @@ case ${ac_RNG} in ;; esac -############### timer option +############### Timer option AC_ARG_ENABLE([timers],[AC_HELP_STRING([--enable-timers],\ - [Enable system dependent high res timers])],\ - [ac_TIMERS=${enable_timers}],[ac_TIMERS=yes]) + [Enable system dependent high res timers])],\ + [ac_TIMERS=${enable_timers}],[ac_TIMERS=yes]) + case ${ac_TIMERS} in yes) AC_DEFINE([TIMERS_ON],[1],[TIMERS_ON] ) @@ -293,7 +328,9 @@ case ${ac_TIMERS} in esac ############### Chroma regression test -AC_ARG_ENABLE([chroma],[AC_HELP_STRING([--enable-chroma],[Expect chroma compiled under c++11 ])],ac_CHROMA=yes,ac_CHROMA=no) +AC_ARG_ENABLE([chroma],[AC_HELP_STRING([--enable-chroma], + [Expect chroma compiled under c++11 ])],ac_CHROMA=yes,ac_CHROMA=no) + case ${ac_CHROMA} in yes|no) ;; @@ -301,6 +338,7 @@ case ${ac_CHROMA} in AC_MSG_ERROR([${ac_CHROMA} unsupported --enable-chroma option]); ;; esac + AM_CONDITIONAL(BUILD_CHROMA_REGRESSION,[ test "X${ac_CHROMA}X" == "XyesX" ]) ############### Doxygen @@ -332,35 +370,36 @@ AC_CONFIG_FILES(tests/qdpxx/Makefile) AC_CONFIG_FILES(benchmarks/Makefile) AC_OUTPUT -echo " -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Summary of configuration for $PACKAGE v$VERSION ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ----- PLATFORM ---------------------------------------- -- architecture (build) : $build_cpu -- os (build) : $build_os -- architecture (target) : $target_cpu -- os (target) : $target_os -- compiler vendor : ${ax_cv_cxx_compiler_vendor} -- compiler version : ${ax_cv_gxx_version} +architecture (build) : $build_cpu +os (build) : $build_os +architecture (target) : $target_cpu +os (target) : $target_os +compiler vendor : ${ax_cv_cxx_compiler_vendor} +compiler version : ${ax_cv_gxx_version} ----- BUILD OPTIONS ----------------------------------- -- SIMD : ${ac_SIMD} -- Threading : ${ac_openmp} -- Communications type : ${ac_COMMS} -- Default precision : ${ac_PRECISION} -- RNG choice : ${ac_RNG} -- GMP : `if test "x$have_gmp" = xtrue; then echo yes; else echo no; fi` -- LAPACK : ${ac_LAPACK} -- FFTW : `if test "x$have_fftw" = xtrue; then echo yes; else echo no; fi` -- build DOXYGEN documentation : `if test "x$enable_doc" = xyes; then echo yes; else echo no; fi` -- graphs and diagrams : `if test "x$enable_dot" = xyes; then echo yes; else echo no; fi` +SIMD : ${ac_SIMD} +Threading : ${ac_openmp} +Communications type : ${comms_type} +Default precision : ${ac_PRECISION} +RNG choice : ${ac_RNG} +GMP : `if test "x$have_gmp" = xtrue; then echo yes; else echo no; fi` +LAPACK : ${ac_LAPACK} +FFTW : `if test "x$have_fftw" = xtrue; then echo yes; else echo no; fi` +build DOXYGEN documentation : `if test "x$enable_doc" = xyes; then echo yes; else echo no; fi` +graphs and diagrams : `if test "x$enable_dot" = xyes; then echo yes; else echo no; fi` ----- BUILD FLAGS ------------------------------------- -- CXXFLAGS: +CXXFLAGS: `echo ${AM_CXXFLAGS} ${CXXFLAGS} | tr ' ' '\n' | sed 's/^-/ -/g'` -- LDFLAGS: +LDFLAGS: `echo ${AM_LDFLAGS} ${LDFLAGS} | tr ' ' '\n' | sed 's/^-/ -/g'` -- LIBS: +LIBS: `echo ${LIBS} | tr ' ' '\n' | sed 's/^-/ -/g'` -------------------------------------------------------- -" +-------------------------------------------------------" > config.summary +echo "" +cat config.summary +echo "" diff --git a/lib/Cshift.h b/lib/Cshift.h index 4eeb6cea..cd162e35 100644 --- a/lib/Cshift.h +++ b/lib/Cshift.h @@ -42,6 +42,10 @@ Author: Peter Boyle #include #endif +#ifdef GRID_COMMS_MPI3L +#include +#endif + #ifdef GRID_COMMS_SHMEM #include // uses same implementation of communicator #endif diff --git a/lib/FFT.h b/lib/FFT.h index 9a59ed01..b5b31d82 100644 --- a/lib/FFT.h +++ b/lib/FFT.h @@ -29,9 +29,15 @@ Author: Peter Boyle #ifndef _GRID_FFT_H_ #define _GRID_FFT_H_ -#ifdef HAVE_FFTW +#ifdef HAVE_FFTW +#ifdef USE_MKL +#include +#else #include #endif +#endif + + namespace Grid { template struct FFTW { }; @@ -98,174 +104,199 @@ namespace Grid { #define FFTW_BACKWARD (+1) #endif - class FFT { + class FFT { private: - + GridCartesian *vgrid; GridCartesian *sgrid; - + int Nd; double flops; double flops_call; uint64_t usec; - + std::vector dimensions; std::vector processors; std::vector processor_coor; - + public: - + static const int forward=FFTW_FORWARD; static const int backward=FFTW_BACKWARD; - + double Flops(void) {return flops;} double MFlops(void) {return flops/usec;} + double USec(void) {return (double)usec;} - FFT ( GridCartesian * grid ) : - vgrid(grid), - Nd(grid->_ndimension), - dimensions(grid->_fdimensions), - processors(grid->_processors), - processor_coor(grid->_processor_coor) + FFT ( GridCartesian * grid ) : + vgrid(grid), + Nd(grid->_ndimension), + dimensions(grid->_fdimensions), + processors(grid->_processors), + processor_coor(grid->_processor_coor) { flops=0; usec =0; std::vector layout(Nd,1); sgrid = new GridCartesian(dimensions,layout,processors); }; - - ~FFT ( void) { - delete sgrid; + + ~FFT ( void) { + delete sgrid; } template - void FFT_dim(Lattice &result,const Lattice &source,int dim, int inverse){ + void FFT_dim_mask(Lattice &result,const Lattice &source,std::vector mask,int sign){ conformable(result._grid,vgrid); conformable(source._grid,vgrid); + Lattice tmp(vgrid); + tmp = source; + for(int d=0;d + void FFT_all_dim(Lattice &result,const Lattice &source,int sign){ + std::vector mask(Nd,1); + FFT_dim_mask(result,source,mask,sign); + } + + + template + void FFT_dim(Lattice &result,const Lattice &source,int dim, int sign){ +#ifndef HAVE_FFTW + assert(0); +#else + conformable(result._grid,vgrid); + conformable(source._grid,vgrid); int L = vgrid->_ldimensions[dim]; int G = vgrid->_fdimensions[dim]; - + std::vector layout(Nd,1); std::vector pencil_gd(vgrid->_fdimensions); - - pencil_gd[dim] = G*processors[dim]; - + + pencil_gd[dim] = G*processors[dim]; + // Pencil global vol LxLxGxLxL per node GridCartesian pencil_g(pencil_gd,layout,processors); - + // Construct pencils typedef typename vobj::scalar_object sobj; typedef typename sobj::scalar_type scalar; + + Lattice pgbuf(&pencil_g); + - Lattice ssource(vgrid); ssource =source; - Lattice pgsource(&pencil_g); - Lattice pgresult(&pencil_g); pgresult=zero; - -#ifndef HAVE_FFTW - assert(0); -#else typedef typename FFTW::FFTW_scalar FFTW_scalar; typedef typename FFTW::FFTW_plan FFTW_plan; - - { - int Ncomp = sizeof(sobj)/sizeof(scalar); - int Nlow = 1; - for(int d=0;d_ldimensions[d]; - } - - int rank = 1; /* 1d transforms */ - int n[] = {G}; /* 1d transforms of length G */ - int howmany = Ncomp; - int odist,idist,istride,ostride; - idist = odist = 1; /* Distance between consecutive FT's */ - istride = ostride = Ncomp*Nlow; /* distance between two elements in the same FT */ - int *inembed = n, *onembed = n; - - - int sign = FFTW_FORWARD; - if (inverse) sign = FFTW_BACKWARD; - - FFTW_plan p; - { - FFTW_scalar *in = (FFTW_scalar *)&pgsource._odata[0]; - FFTW_scalar *out= (FFTW_scalar *)&pgresult._odata[0]; - p = FFTW::fftw_plan_many_dft(rank,n,howmany, - in,inembed, - istride,idist, - out,onembed, - ostride, odist, - sign,FFTW_ESTIMATE); - } - - std::vector lcoor(Nd), gcoor(Nd); - - // Barrel shift and collect global pencil - for(int p=0;plSites();idx++) { - - - sgrid->LocalIndexToLocalCoor(idx,lcoor); - - sobj s; - - peekLocalSite(s,ssource,lcoor); - - lcoor[dim]+=p*L; - - pokeLocalSite(s,pgsource,lcoor); - } - - ssource = Cshift(ssource,dim,L); - } - - // Loop over orthog coords - int NN=pencil_g.lSites(); - GridStopWatch timer; - timer.Start(); - -//PARALLEL_FOR_LOOP - for(int idx=0;idx::fftw_execute_dft(p,in,out); - } - } - - timer.Stop(); - - double add,mul,fma; - FFTW::fftw_flops(p,&add,&mul,&fma); - flops_call = add+mul+2.0*fma; - usec += timer.useconds(); - flops+= flops_call*NN; - int pc = processor_coor[dim]; - for(int idx=0;idxlSites();idx++) { - sgrid->LocalIndexToLocalCoor(idx,lcoor); - gcoor = lcoor; - // extract the result - sobj s; - gcoor[dim] = lcoor[dim]+L*pc; - peekLocalSite(s,pgresult,gcoor); - pokeLocalSite(s,result,lcoor); - } - - FFTW::fftw_destroy_plan(p); + + int Ncomp = sizeof(sobj)/sizeof(scalar); + int Nlow = 1; + for(int d=0;d_ldimensions[d]; } + + int rank = 1; /* 1d transforms */ + int n[] = {G}; /* 1d transforms of length G */ + int howmany = Ncomp; + int odist,idist,istride,ostride; + idist = odist = 1; /* Distance between consecutive FT's */ + istride = ostride = Ncomp*Nlow; /* distance between two elements in the same FT */ + int *inembed = n, *onembed = n; + + scalar div; + if ( sign == backward ) div = 1.0/G; + else if ( sign == forward ) div = 1.0; + else assert(0); + + FFTW_plan p; + { + FFTW_scalar *in = (FFTW_scalar *)&pgbuf._odata[0]; + FFTW_scalar *out= (FFTW_scalar *)&pgbuf._odata[0]; + p = FFTW::fftw_plan_many_dft(rank,n,howmany, + in,inembed, + istride,idist, + out,onembed, + ostride, odist, + sign,FFTW_ESTIMATE); + } + + // Barrel shift and collect global pencil + std::vector lcoor(Nd), gcoor(Nd); + result = source; + for(int p=0;p cbuf(Nd); + sobj s; + + PARALLEL_FOR_LOOP_INTERN + for(int idx=0;idxlSites();idx++) { + sgrid->LocalIndexToLocalCoor(idx,cbuf); + peekLocalSite(s,result,cbuf); + cbuf[dim]+=p*L; + pokeLocalSite(s,pgbuf,cbuf); + } + } + result = Cshift(result,dim,L); + } + + // Loop over orthog coords + int NN=pencil_g.lSites(); + GridStopWatch timer; + timer.Start(); + PARALLEL_REGION + { + std::vector cbuf(Nd); + + PARALLEL_FOR_LOOP_INTERN + for(int idx=0;idx::fftw_execute_dft(p,in,out); + } + } + } + timer.Stop(); + + // performance counting + double add,mul,fma; + FFTW::fftw_flops(p,&add,&mul,&fma); + flops_call = add+mul+2.0*fma; + usec += timer.useconds(); + flops+= flops_call*NN; + + // writing out result + int pc = processor_coor[dim]; + PARALLEL_REGION + { + std::vector clbuf(Nd), cgbuf(Nd); + sobj s; + + PARALLEL_FOR_LOOP_INTERN + for(int idx=0;idxlSites();idx++) { + sgrid->LocalIndexToLocalCoor(idx,clbuf); + cgbuf = clbuf; + cgbuf[dim] = clbuf[dim]+L*pc; + peekLocalSite(s,pgbuf,cgbuf); + s = s * div; + pokeLocalSite(s,result,clbuf); + } + } + + // destroying plan + FFTW::fftw_destroy_plan(p); #endif - - } - }; - - } #endif diff --git a/lib/Grid.h b/lib/Grid.h index 486ee4d3..0c5983f3 100644 --- a/lib/Grid.h +++ b/lib/Grid.h @@ -77,11 +77,10 @@ Author: paboyle #include #include #include -#include -#include - #include +#include +#include #include #include diff --git a/lib/Init.cc b/lib/Init.cc index a2a8526a..d6d6b9f8 100644 --- a/lib/Init.cc +++ b/lib/Init.cc @@ -44,9 +44,33 @@ Author: paboyle #include #include #include +#include +#include + + +#include +#ifdef __APPLE__ +static int +feenableexcept (unsigned int excepts) +{ + static fenv_t fenv; + unsigned int new_excepts = excepts & FE_ALL_EXCEPT, + old_excepts; // previous masks + + if ( fegetenv (&fenv) ) return -1; + old_excepts = fenv.__control & FE_ALL_EXCEPT; + + // unmask + fenv.__control &= ~new_excepts; + fenv.__mxcsr &= ~(new_excepts << 7); + + return ( fesetenv (&fenv) ? -1 : old_excepts ); +} +#endif namespace Grid { + ////////////////////////////////////////////////////// // Convenience functions to access stadard command line arg // driven parallelism controls @@ -123,6 +147,13 @@ void GridCmdOptionIntVector(std::string &str,std::vector & vec) return; } +void GridCmdOptionInt(std::string &str,int & val) +{ + std::stringstream ss(str); + ss>>val; + return; +} + void GridParseLayout(char **argv,int argc, std::vector &latt, @@ -153,14 +184,12 @@ void GridParseLayout(char **argv,int argc, assert(ompthreads.size()==1); GridThread::SetThreads(ompthreads[0]); } - if( GridCmdOptionExists(argv,argv+argc,"--cores") ){ - std::vector cores(0); + int cores; arg= GridCmdOptionPayload(argv,argv+argc,"--cores"); - GridCmdOptionIntVector(arg,cores); - GridThread::SetCores(cores[0]); + GridCmdOptionInt(arg,cores); + GridThread::SetCores(cores); } - } std::string GridCmdVectorIntToString(const std::vector & vec){ @@ -169,7 +198,7 @@ std::string GridCmdVectorIntToString(const std::vector & vec){ return oss.str(); } ///////////////////////////////////////////////////////// -// +// Reinit guard ///////////////////////////////////////////////////////// static int Grid_is_initialised = 0; @@ -178,27 +207,31 @@ void Grid_init(int *argc,char ***argv) { GridLogger::StopWatch.Start(); + std::string arg; + + //////////////////////////////////// + // Shared memory block size + //////////////////////////////////// + if( GridCmdOptionExists(*argv,*argv+*argc,"--shm") ){ + int MB; + arg= GridCmdOptionPayload(*argv,*argv+*argc,"--shm"); + GridCmdOptionInt(arg,MB); + CartesianCommunicator::MAX_MPI_SHM_BYTES = MB*1024*1024; + } + CartesianCommunicator::Init(argc,argv); - // Parse command line args. + //////////////////////////////////// + // Logging + //////////////////////////////////// - std::string arg; std::vector logstreams; std::string defaultLog("Error,Warning,Message,Performance"); - GridCmdOptionCSL(defaultLog,logstreams); GridLogConfigure(logstreams); - if( GridCmdOptionExists(*argv,*argv+*argc,"--help") ){ - std::cout< -#endif + void Grid_debug_handler_init(void) { struct sigaction sa,osa; @@ -346,9 +425,9 @@ void Grid_debug_handler_init(void) sa.sa_flags = SA_SIGINFO; sigaction(SIGSEGV,&sa,NULL); sigaction(SIGTRAP,&sa,NULL); -#ifdef GRID_FPE + feenableexcept( FE_INVALID|FE_OVERFLOW|FE_DIVBYZERO); + sigaction(SIGFPE,&sa,NULL); -#endif } } diff --git a/lib/Init.h b/lib/Init.h index d68cee37..6b70d42d 100644 --- a/lib/Init.h +++ b/lib/Init.h @@ -54,6 +54,7 @@ namespace Grid { void GridCmdOptionCSL(std::string str,std::vector & vec); void GridCmdOptionIntVector(std::string &str,std::vector & vec); + void GridParseLayout(char **argv,int argc, std::vector &latt, std::vector &simd, diff --git a/lib/Log.cc b/lib/Log.cc index d4ac42ee..7521657b 100644 --- a/lib/Log.cc +++ b/lib/Log.cc @@ -31,8 +31,23 @@ directory /* END LEGAL */ #include +#include + namespace Grid { + std::string demangle(const char* name) { + + int status = -4; // some arbitrary value to eliminate the compiler warning + + // enable c++11 by passing the flag -std=c++11 to g++ + std::unique_ptr res { + abi::__cxa_demangle(name, NULL, NULL, &status), + std::free + }; + + return (status==0) ? res.get() : name ; + } + GridStopWatch Logger::StopWatch; int Logger::timestamp; std::ostream Logger::devnull(0); @@ -78,7 +93,7 @@ void GridLogConfigure(std::vector &logstreams) { //////////////////////////////////////////////////////////// void Grid_quiesce_nodes(void) { int me = 0; -#if defined(GRID_COMMS_MPI) || defined(GRID_COMMS_MPI3) +#if defined(GRID_COMMS_MPI) || defined(GRID_COMMS_MPI3) || defined(GRID_COMMS_MPI3L) MPI_Comm_rank(MPI_COMM_WORLD, &me); #endif #ifdef GRID_COMMS_SHMEM diff --git a/lib/Log.h b/lib/Log.h index dd3fe927..d7422ca9 100644 --- a/lib/Log.h +++ b/lib/Log.h @@ -144,6 +144,7 @@ extern GridLogger GridLogIterative ; extern GridLogger GridLogIntegrator ; extern Colours GridLogColours; + std::string demangle(const char* name) ; #define _NBACKTRACE (256) extern void * Grid_backtrace_buffer[_NBACKTRACE]; @@ -162,7 +163,7 @@ std::fclose(fp); \ int symbols = backtrace (Grid_backtrace_buffer,_NBACKTRACE);\ char **strings = backtrace_symbols(Grid_backtrace_buffer,symbols);\ for (int i = 0; i < symbols; i++){\ - std::fprintf (fp,"BackTrace Strings: %d %s\n",i, strings[i]); std::fflush(fp); \ + std::fprintf (fp,"BackTrace Strings: %d %s\n",i, demangle(strings[i]).c_str()); std::fflush(fp); \ }\ } #else diff --git a/lib/Makefile.am b/lib/Makefile.am index 3285a545..a779135f 100644 --- a/lib/Makefile.am +++ b/lib/Makefile.am @@ -9,6 +9,11 @@ if BUILD_COMMS_MPI3 extra_sources+=communicator/Communicator_base.cc endif +if BUILD_COMMS_MPI3L + extra_sources+=communicator/Communicator_mpi3_leader.cc + extra_sources+=communicator/Communicator_base.cc +endif + if BUILD_COMMS_SHMEM extra_sources+=communicator/Communicator_shmem.cc extra_sources+=communicator/Communicator_base.cc diff --git a/lib/PerfCount.h b/lib/PerfCount.h index 9ac58883..5ab07c02 100644 --- a/lib/PerfCount.h +++ b/lib/PerfCount.h @@ -43,6 +43,9 @@ Author: paboyle #else #include #endif +#ifdef __x86_64__ +#include +#endif namespace Grid { @@ -86,7 +89,6 @@ inline uint64_t cyclecount(void){ return tmp; } #elif defined __x86_64__ -#include inline uint64_t cyclecount(void){ return __rdtsc(); // unsigned int dummy; diff --git a/lib/Simd.h b/lib/Simd.h index 9568c555..adc2849d 100644 --- a/lib/Simd.h +++ b/lib/Simd.h @@ -237,6 +237,18 @@ namespace Grid { stream<<">"; return stream; } + inline std::ostream& operator<< (std::ostream& stream, const vInteger &o){ + int nn=vInteger::Nsimd(); + std::vector > buf(nn); + vstore(o,&buf[0]); + stream<<"<"; + for(int i=0;i"; + return stream; + } } diff --git a/lib/Threads.h b/lib/Threads.h index 08e5d545..2f270b73 100644 --- a/lib/Threads.h +++ b/lib/Threads.h @@ -38,14 +38,19 @@ Author: paboyle #ifdef GRID_OMP #include #ifdef GRID_NUMA -#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(static)") +#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(static)") +#define PARALLEL_FOR_LOOP_INTERN _Pragma("omp for schedule(static)") #else -#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(runtime)") +#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(runtime)") +#define PARALLEL_FOR_LOOP_INTERN _Pragma("omp for schedule(runtime)") #endif #define PARALLEL_NESTED_LOOP2 _Pragma("omp parallel for collapse(2)") +#define PARALLEL_REGION _Pragma("omp parallel") #else -#define PARALLEL_FOR_LOOP +#define PARALLEL_FOR_LOOP +#define PARALLEL_FOR_LOOP_INTERN #define PARALLEL_NESTED_LOOP2 +#define PARALLEL_REGION #endif namespace Grid { diff --git a/lib/algorithms/CoarsenedMatrix.h b/lib/algorithms/CoarsenedMatrix.h index 3ae0af75..fd9acc91 100644 --- a/lib/algorithms/CoarsenedMatrix.h +++ b/lib/algorithms/CoarsenedMatrix.h @@ -282,7 +282,7 @@ PARALLEL_FOR_LOOP } else if(SE->_is_local) { nbr = in._odata[SE->_offset]; } else { - nbr = Stencil.comm_buf[SE->_offset]; + nbr = Stencil.CommBuf()[SE->_offset]; } res = res + A[point]._odata[ss]*nbr; } diff --git a/lib/algorithms/iterative/ConjugateGradient.h b/lib/algorithms/iterative/ConjugateGradient.h index f340eb38..cf3872c8 100644 --- a/lib/algorithms/iterative/ConjugateGradient.h +++ b/lib/algorithms/iterative/ConjugateGradient.h @@ -154,7 +154,7 @@ class ConjugateGradient : public OperatorFunction { << LinalgTimer.Elapsed(); std::cout << std::endl; - if (ErrorOnNoConverge) assert(true_residual / Tolerance < 1000.0); + if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0); return; } diff --git a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h index 1a1a0a16..5d6deae0 100644 --- a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h +++ b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h @@ -31,7 +31,11 @@ Author: paboyle #include //memset #ifdef USE_LAPACK -#include +void LAPACK_dstegr(char *jobz, char *range, int *n, double *d, double *e, + double *vl, double *vu, int *il, int *iu, double *abstol, + int *m, double *w, double *z, int *ldz, int *isuppz, + double *work, int *lwork, int *iwork, int *liwork, + int *info); #endif #include "DenseMatrix.h" #include "EigenSort.h" diff --git a/lib/communicator/Communicator_base.cc b/lib/communicator/Communicator_base.cc index 9810987d..b003d867 100644 --- a/lib/communicator/Communicator_base.cc +++ b/lib/communicator/Communicator_base.cc @@ -31,14 +31,8 @@ namespace Grid { /////////////////////////////////////////////////////////////// // Info that is setup once and indept of cartesian layout /////////////////////////////////////////////////////////////// -int CartesianCommunicator::ShmRank; -int CartesianCommunicator::ShmSize; -int CartesianCommunicator::GroupRank; -int CartesianCommunicator::GroupSize; -int CartesianCommunicator::WorldRank; -int CartesianCommunicator::WorldSize; -int CartesianCommunicator::Slave; void * CartesianCommunicator::ShmCommBuf; +uint64_t CartesianCommunicator::MAX_MPI_SHM_BYTES = 128*1024*1024; ///////////////////////////////// // Alloc, free shmem region @@ -48,8 +42,12 @@ void *CartesianCommunicator::ShmBufferMalloc(size_t bytes){ void *ptr = (void *)heap_top; heap_top += bytes; heap_bytes+= bytes; - std::cout <<"Shm alloc "<= MAX_MPI_SHM_BYTES) { + std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm flag" < &list, void *xmit, diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index 9a15d2b0..94ad1093 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -1,3 +1,4 @@ + /************************************************************************************* Grid physics library, www.github.com/paboyle/Grid @@ -37,6 +38,9 @@ Author: Peter Boyle #ifdef GRID_COMMS_MPI3 #include #endif +#ifdef GRID_COMMS_MPI3L +#include +#endif #ifdef GRID_COMMS_SHMEM #include #endif @@ -51,7 +55,7 @@ class CartesianCommunicator { // Give external control (command line override?) of this static const int MAXLOG2RANKSPERNODE = 16; - static const uint64_t MAX_MPI_SHM_BYTES = 128*1024*1024; + static uint64_t MAX_MPI_SHM_BYTES; // Communicator should know nothing of the physics grid, only processor grid. int _Nprocessors; // How many in all @@ -60,9 +64,9 @@ class CartesianCommunicator { std::vector _processor_coor; // linear processor coordinate unsigned long _ndimension; -#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) - MPI_Comm communicator; +#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPI3L) static MPI_Comm communicator_world; + MPI_Comm communicator; typedef MPI_Request CommsRequest_t; #else typedef int CommsRequest_t; @@ -75,7 +79,15 @@ class CartesianCommunicator { // cartesian communicator on a subset of ranks, slave ranks controlled // by group leader with data xfer via shared memory //////////////////////////////////////////////////////////////////// -#ifdef GRID_COMMS_MPI3 +#ifdef GRID_COMMS_MPI3 + + static int ShmRank; + static int ShmSize; + static int GroupRank; + static int GroupSize; + static int WorldRank; + static int WorldSize; + std::vector WorldDims; std::vector GroupDims; std::vector ShmDims; @@ -83,7 +95,7 @@ class CartesianCommunicator { std::vector GroupCoor; std::vector ShmCoor; std::vector WorldCoor; - + static std::vector GroupRanks; static std::vector MyGroup; static int ShmSetup; @@ -93,13 +105,20 @@ class CartesianCommunicator { std::vector LexicographicToWorldRank; static std::vector ShmCommBufs; + #else static void ShmInitGeneric(void); static commVector ShmBufStorageVector; #endif + + ///////////////////////////////// + // Grid information and queries + // Implemented in Communicator_base.C + ///////////////////////////////// static void * ShmCommBuf; size_t heap_top; size_t heap_bytes; + void *ShmBufferSelf(void); void *ShmBuffer(int rank); void *ShmBufferTranslate(int rank,void * local_p); @@ -123,28 +142,12 @@ class CartesianCommunicator { int RankFromProcessorCoor(std::vector &coor); void ProcessorCoorFromRank(int rank,std::vector &coor); - ///////////////////////////////// - // Grid information and queries - ///////////////////////////////// - static int ShmRank; - static int ShmSize; - static int GroupSize; - static int GroupRank; - static int WorldRank; - static int WorldSize; - static int Slave; - int IsBoss(void) ; int BossRank(void) ; int ThisRank(void) ; const std::vector & ThisProcessorCoor(void) ; const std::vector & ProcessorGrid(void) ; int ProcessorCount(void) ; - static int Ranks (void); - static int Nodes (void); - static int Cores (void); - static int NodeRank (void); - static int CoreRank (void); //////////////////////////////////////////////////////////////////////////////// // very VERY rarely (Log, serial RNG) we need world without a grid diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index a638eebb..65ced9c7 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -44,13 +44,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { MPI_Init(argc,argv); } MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); - MPI_Comm_rank(communicator_world,&WorldRank); - MPI_Comm_size(communicator_world,&WorldSize); - ShmRank=0; - ShmSize=1; - GroupRank=WorldRank; - GroupSize=WorldSize; - Slave =0; ShmInitGeneric(); } @@ -198,6 +191,11 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes) // Should only be used prior to Grid Init finished. // Check for this? /////////////////////////////////////////////////////// +int CartesianCommunicator::RankWorld(void){ + int r; + MPI_Comm_rank(communicator_world,&r); + return r; +} void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) { int ierr= MPI_Bcast(data, diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index 14a152ab..c707ec1f 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -30,12 +30,18 @@ Author: Peter Boyle namespace Grid { - /////////////////////////////////////////////////////////////////////////////////////////////////// // Info that is setup once and indept of cartesian layout /////////////////////////////////////////////////////////////////////////////////////////////////// int CartesianCommunicator::ShmSetup = 0; +int CartesianCommunicator::ShmRank; +int CartesianCommunicator::ShmSize; +int CartesianCommunicator::GroupRank; +int CartesianCommunicator::GroupSize; +int CartesianCommunicator::WorldRank; +int CartesianCommunicator::WorldSize; + MPI_Comm CartesianCommunicator::communicator_world; MPI_Comm CartesianCommunicator::ShmComm; MPI_Win CartesianCommunicator::ShmWindow; @@ -97,15 +103,15 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { std::vector world_ranks(WorldSize); GroupRanks.resize(WorldSize); - MyGroup.resize(ShmSize); for(int r=0;r + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include "Grid.h" +#include + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////// +/// Workarounds: +/// i) bloody mac os doesn't implement unnamed semaphores since it is "optional" posix. +/// darwin dispatch semaphores don't seem to be multiprocess. +/// +/// ii) openmpi under --mca shmem posix works with two squadrons per node; +/// openmpi under default mca settings (I think --mca shmem mmap) on MacOS makes two squadrons map the SAME +/// memory as each other, despite their living on different communicators. This appears to be a bug in OpenMPI. +/// +//////////////////////////////////////////////////////////////////////////////////////////////////////////////// +#include +#include +#include +#include + +typedef sem_t *Grid_semaphore; + +#define SEM_INIT(S) S = sem_open(sem_name,0,0600,0); assert ( S != SEM_FAILED ); +#define SEM_INIT_EXCL(S) sem_unlink(sem_name); S = sem_open(sem_name,O_CREAT|O_EXCL,0600,0); assert ( S != SEM_FAILED ); +#define SEM_POST(S) assert ( sem_post(S) == 0 ); +#define SEM_WAIT(S) assert ( sem_wait(S) == 0 ); + +#include + +namespace Grid { + +enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL }; + +struct Descriptor { + uint64_t buf; + size_t bytes; + int rank; + int tag; + int command; + MPI_Request request; +}; + +const int pool = 48; + +class SlaveState { +public: + volatile int head; + volatile int start; + volatile int tail; + volatile Descriptor Descrs[pool]; +}; + +class Slave { +public: + Grid_semaphore sem_head; + Grid_semaphore sem_tail; + SlaveState *state; + MPI_Comm squadron; + uint64_t base; + int universe_rank; + int vertical_rank; + char sem_name [NAME_MAX]; + //////////////////////////////////////////////////////////// + // Descriptor circular pointers + //////////////////////////////////////////////////////////// + Slave() {}; + + void Init(SlaveState * _state,MPI_Comm _squadron,int _universe_rank,int _vertical_rank); + + void SemInit(void) { + sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank); + // printf("SEM_NAME: %s \n",sem_name); + SEM_INIT(sem_head); + sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank); + // printf("SEM_NAME: %s \n",sem_name); + SEM_INIT(sem_tail); + } + void SemInitExcl(void) { + sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank); + // printf("SEM_INIT_EXCL: %s \n",sem_name); + SEM_INIT_EXCL(sem_head); + sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank); + // printf("SEM_INIT_EXCL: %s \n",sem_name); + SEM_INIT_EXCL(sem_tail); + } + void WakeUpDMA(void) { + SEM_POST(sem_head); + }; + void WakeUpCompute(void) { + SEM_POST(sem_tail); + }; + void WaitForCommand(void) { + SEM_WAIT(sem_head); + }; + void WaitForComplete(void) { + SEM_WAIT(sem_tail); + }; + void EventLoop (void) { + // std::cout<< " Entering event loop "<tail == state->head ); + } +}; + +//////////////////////////////////////////////////////////////////////// +// One instance of a data mover. +// Master and Slave must agree on location in shared memory +//////////////////////////////////////////////////////////////////////// + +class MPIoffloadEngine { +public: + + static std::vector Slaves; + + static int ShmSetup; + + static int UniverseRank; + static int UniverseSize; + + static MPI_Comm communicator_universe; + static MPI_Comm communicator_cached; + + static MPI_Comm HorizontalComm; + static int HorizontalRank; + static int HorizontalSize; + + static MPI_Comm VerticalComm; + static MPI_Win VerticalWindow; + static int VerticalSize; + static int VerticalRank; + + static std::vector VerticalShmBufs; + static std::vector > UniverseRanks; + static std::vector UserCommunicatorToWorldRanks; + + static MPI_Group WorldGroup, CachedGroup; + + static void CommunicatorInit (MPI_Comm &communicator_world, + MPI_Comm &ShmComm, + void * &ShmCommBuf); + + static void MapCommRankToWorldRank(int &hashtag, int & comm_world_peer,int tag, MPI_Comm comm,int commrank); + + ///////////////////////////////////////////////////////// + // routines for master proc must handle any communicator + ///////////////////////////////////////////////////////// + + static void QueueSend(int slave,void *buf, int bytes, int tag, MPI_Comm comm,int rank) { + // std::cout<< " Queueing send "<< bytes<< " slave "<< slave << " to comm "<= units ) { + mywork = myoff = 0; + } else { + mywork = (nwork+me)/units; + myoff = basework * me; + if ( me > backfill ) + myoff+= (me-backfill); + } + return; + }; + + static void QueueMultiplexedSend(void *buf, int bytes, int tag, MPI_Comm comm,int rank) { + uint8_t * cbuf = (uint8_t *) buf; + int mywork, myoff, procs; + procs = VerticalSize-1; + for(int s=0;s MPIoffloadEngine::Slaves; + +int MPIoffloadEngine::UniverseRank; +int MPIoffloadEngine::UniverseSize; + +MPI_Comm MPIoffloadEngine::communicator_universe; +MPI_Comm MPIoffloadEngine::communicator_cached; +MPI_Group MPIoffloadEngine::WorldGroup; +MPI_Group MPIoffloadEngine::CachedGroup; + +MPI_Comm MPIoffloadEngine::HorizontalComm; +int MPIoffloadEngine::HorizontalRank; +int MPIoffloadEngine::HorizontalSize; + +MPI_Comm MPIoffloadEngine::VerticalComm; +int MPIoffloadEngine::VerticalSize; +int MPIoffloadEngine::VerticalRank; +MPI_Win MPIoffloadEngine::VerticalWindow; +std::vector MPIoffloadEngine::VerticalShmBufs; +std::vector > MPIoffloadEngine::UniverseRanks; +std::vector MPIoffloadEngine::UserCommunicatorToWorldRanks; + +int MPIoffloadEngine::ShmSetup = 0; + +void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world, + MPI_Comm &ShmComm, + void * &ShmCommBuf) +{ + int flag; + assert(ShmSetup==0); + + ////////////////////////////////////////////////////////////////////// + // Universe is all nodes prior to squadron grouping + ////////////////////////////////////////////////////////////////////// + MPI_Comm_dup (MPI_COMM_WORLD,&communicator_universe); + MPI_Comm_rank(communicator_universe,&UniverseRank); + MPI_Comm_size(communicator_universe,&UniverseSize); + + ///////////////////////////////////////////////////////////////////// + // Split into groups that can share memory (Verticals) + ///////////////////////////////////////////////////////////////////// +#undef MPI_SHARED_MEM_DEBUG +#ifdef MPI_SHARED_MEM_DEBUG + MPI_Comm_split(communicator_universe,(UniverseRank/4),UniverseRank,&VerticalComm); +#else + MPI_Comm_split_type(communicator_universe, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&VerticalComm); +#endif + MPI_Comm_rank(VerticalComm ,&VerticalRank); + MPI_Comm_size(VerticalComm ,&VerticalSize); + + ////////////////////////////////////////////////////////////////////// + // Split into horizontal groups by rank in squadron + ////////////////////////////////////////////////////////////////////// + MPI_Comm_split(communicator_universe,VerticalRank,UniverseRank,&HorizontalComm); + MPI_Comm_rank(HorizontalComm,&HorizontalRank); + MPI_Comm_size(HorizontalComm,&HorizontalSize); + assert(HorizontalSize*VerticalSize==UniverseSize); + + //////////////////////////////////////////////////////////////////////////////// + // What is my place in the world + //////////////////////////////////////////////////////////////////////////////// + int WorldRank=0; + if(VerticalRank==0) WorldRank = HorizontalRank; + int ierr=MPI_Allreduce(MPI_IN_PLACE,&WorldRank,1,MPI_INT,MPI_SUM,VerticalComm); + assert(ierr==0); + + //////////////////////////////////////////////////////////////////////////////// + // Where is the world in the universe? + //////////////////////////////////////////////////////////////////////////////// + UniverseRanks = std::vector >(HorizontalSize,std::vector(VerticalSize,0)); + UniverseRanks[WorldRank][VerticalRank] = UniverseRank; + for(int w=0;w0 ) size = sizeof(SlaveState); + + sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",WorldRank,r); + + shm_unlink(shm_name); + + int fd=shm_open(shm_name,O_RDWR|O_CREAT,0600); + if ( fd < 0 ) { + perror("failed shm_open"); + assert(0); + } + + ftruncate(fd, size); + + VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + + if ( VerticalShmBufs[r] == MAP_FAILED ) { + perror("failed mmap"); + assert(0); + } + + uint64_t * check = (uint64_t *) VerticalShmBufs[r]; + check[0] = WorldRank; + check[1] = r; + + // std::cout<<"SHM "<0 ) size = sizeof(SlaveState); + + sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",WorldRank,r); + + int fd=shm_open(shm_name,O_RDWR|O_CREAT,0600); + if ( fd<0 ) { + perror("failed shm_open"); + assert(0); + } + VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + + uint64_t * check = (uint64_t *) VerticalShmBufs[r]; + assert(check[0]== WorldRank); + assert(check[1]== r); + std::cerr<<"SHM "<"<"< cached_ranks(size); + + for(int r=0;r"<>0 )&0xFFFF)^((icomm>>16)&0xFFFF) + ^ ((icomm>>32)&0xFFFF)^((icomm>>48)&0xFFFF); + + // hashtag = (comm_hash<<15) | tag; + hashtag = tag; + +}; + +void Slave::Init(SlaveState * _state,MPI_Comm _squadron,int _universe_rank,int _vertical_rank) +{ + squadron=_squadron; + universe_rank=_universe_rank; + vertical_rank=_vertical_rank; + state =_state; + // std::cout << "state "<<_state<<" comm "<<_squadron<<" universe_rank"<head = state->tail = state->start = 0; + base = (uint64_t)MPIoffloadEngine::VerticalShmBufs[0]; + int rank; MPI_Comm_rank(_squadron,&rank); +} +#define PERI_PLUS(A) ( (A+1)%pool ) +int Slave::Event (void) { + + static int tail_last; + static int head_last; + static int start_last; + int ierr; + + //////////////////////////////////////////////////// + // Try to advance the start pointers + //////////////////////////////////////////////////// + int s=state->start; + if ( s != state->head ) { + switch ( state->Descrs[s].command ) { + case COMMAND_ISEND: + /* + std::cout<< " Send "<Descrs[s].buf<< "["<Descrs[s].bytes<<"]" + << " to " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag + << " Comm " << MPIoffloadEngine::communicator_universe<< " me " <Descrs[s].buf+base), + state->Descrs[s].bytes, + MPI_CHAR, + state->Descrs[s].rank, + state->Descrs[s].tag, + MPIoffloadEngine::communicator_universe, + (MPI_Request *)&state->Descrs[s].request); + assert(ierr==0); + state->start = PERI_PLUS(s); + return 1; + break; + + case COMMAND_IRECV: + /* + std::cout<< " Recv "<Descrs[s].buf<< "["<Descrs[s].bytes<<"]" + << " from " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag + << " Comm " << MPIoffloadEngine::communicator_universe<< " me "<< universe_rank<< std::endl; + */ + ierr=MPI_Irecv((void *)(state->Descrs[s].buf+base), + state->Descrs[s].bytes, + MPI_CHAR, + state->Descrs[s].rank, + state->Descrs[s].tag, + MPIoffloadEngine::communicator_universe, + (MPI_Request *)&state->Descrs[s].request); + + // std::cout<< " Request is "<Descrs[s].request<Descrs[0].request<start = PERI_PLUS(s); + return 1; + break; + + case COMMAND_WAITALL: + + for(int t=state->tail;t!=s; t=PERI_PLUS(t) ){ + MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE); + }; + s=PERI_PLUS(s); + state->start = s; + state->tail = s; + + WakeUpCompute(); + + return 1; + break; + + default: + assert(0); + break; + } + } + return 0; +} + ////////////////////////////////////////////////////////////////////////////// + // External interaction with the queue + ////////////////////////////////////////////////////////////////////////////// + +uint64_t Slave::QueueCommand(int command,void *buf, int bytes, int tag, MPI_Comm comm,int commrank) +{ + ///////////////////////////////////////// + // Spin; if FIFO is full until not full + ///////////////////////////////////////// + int head =state->head; + int next = PERI_PLUS(head); + + // Set up descriptor + int worldrank; + int hashtag; + MPI_Comm communicator; + MPI_Request request; + + MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,tag,comm,commrank); + + uint64_t relative= (uint64_t)buf - base; + state->Descrs[head].buf = relative; + state->Descrs[head].bytes = bytes; + state->Descrs[head].rank = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank]; + state->Descrs[head].tag = hashtag; + state->Descrs[head].command= command; + + /* + if ( command == COMMAND_ISEND ) { + std::cout << "QueueSend from "<< universe_rank <<" to commrank " << commrank + << " to worldrank " << worldrank <tail==next ); + + // Msync on weak order architectures + // Advance pointer + state->head = next; + + return 0; +} + + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// Info that is setup once and indept of cartesian layout +/////////////////////////////////////////////////////////////////////////////////////////////////// + +MPI_Comm CartesianCommunicator::communicator_world; + +void CartesianCommunicator::Init(int *argc, char ***argv) +{ + int flag; + MPI_Initialized(&flag); // needed to coexist with other libs apparently + if ( !flag ) { + MPI_Init(argc,argv); + } + communicator_world = MPI_COMM_WORLD; + MPI_Comm ShmComm; + MPIoffloadEngine::CommunicatorInit (communicator_world,ShmComm,ShmCommBuf); +} +void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest) +{ + int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest); + assert(ierr==0); +} +int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) +{ + int rank; + int ierr=MPI_Cart_rank (communicator, &coor[0], &rank); + assert(ierr==0); + return rank; +} +void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) +{ + coor.resize(_ndimension); + int ierr=MPI_Cart_coords (communicator, rank, _ndimension,&coor[0]); + assert(ierr==0); +} + +CartesianCommunicator::CartesianCommunicator(const std::vector &processors) +{ + _ndimension = processors.size(); + std::vector periodic(_ndimension,1); + + _Nprocessors=1; + _processors = processors; + + for(int i=0;i<_ndimension;i++){ + _Nprocessors*=_processors[i]; + } + + int Size; + MPI_Comm_size(communicator_world,&Size); + assert(Size==_Nprocessors); + + _processor_coor.resize(_ndimension); + MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator); + MPI_Comm_rank (communicator,&_processor); + MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); +}; + +void CartesianCommunicator::GlobalSum(uint32_t &u){ + int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); + assert(ierr==0); +} +void CartesianCommunicator::GlobalSum(uint64_t &u){ + int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator); + assert(ierr==0); +} +void CartesianCommunicator::GlobalSum(float &f){ + int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); + assert(ierr==0); +} +void CartesianCommunicator::GlobalSumVector(float *f,int N) +{ + int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator); + assert(ierr==0); +} +void CartesianCommunicator::GlobalSum(double &d) +{ + int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator); + assert(ierr==0); +} +void CartesianCommunicator::GlobalSumVector(double *d,int N) +{ + int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator); + assert(ierr==0); +} + +// Basic Halo comms primitive +void CartesianCommunicator::SendToRecvFrom(void *xmit, + int dest, + void *recv, + int from, + int bytes) +{ + std::vector reqs(0); + SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes); + SendToRecvFromComplete(reqs); +} + +void CartesianCommunicator::SendRecvPacket(void *xmit, + void *recv, + int sender, + int receiver, + int bytes) +{ + MPI_Status stat; + assert(sender != receiver); + int tag = sender; + if ( _processor == sender ) { + MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator); + } + if ( _processor == receiver ) { + MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat); + } +} + +// Basic Halo comms primitive +void CartesianCommunicator::SendToRecvFromBegin(std::vector &list, + void *xmit, + int dest, + void *recv, + int from, + int bytes) +{ + MPI_Request xrq; + MPI_Request rrq; + int rank = _processor; + int ierr; + ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); + ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); + + assert(ierr==0); + + list.push_back(xrq); + list.push_back(rrq); +} + +void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, + void *xmit, + int dest, + void *recv, + int from, + int bytes) +{ + uint64_t xmit_i = (uint64_t) xmit; + uint64_t recv_i = (uint64_t) recv; + uint64_t shm = (uint64_t) ShmCommBuf; + // assert xmit and recv lie in shared memory region + assert( (xmit_i >= shm) && (xmit_i+bytes <= shm+MAX_MPI_SHM_BYTES) ); + assert( (recv_i >= shm) && (recv_i+bytes <= shm+MAX_MPI_SHM_BYTES) ); + assert(from!=_processor); + assert(dest!=_processor); + MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest); + MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from); +} + + +void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector &list) +{ + MPIoffloadEngine::WaitAll(); +} + +void CartesianCommunicator::StencilBarrier(void) +{ +} + +void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) +{ + int nreq=list.size(); + std::vector status(nreq); + int ierr = MPI_Waitall(nreq,&list[0],&status[0]); + assert(ierr==0); +} + +void CartesianCommunicator::Barrier(void) +{ + int ierr = MPI_Barrier(communicator); + assert(ierr==0); +} + +void CartesianCommunicator::Broadcast(int root,void* data, int bytes) +{ + int ierr=MPI_Bcast(data, + bytes, + MPI_BYTE, + root, + communicator); + assert(ierr==0); +} + +void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) +{ + int ierr= MPI_Bcast(data, + bytes, + MPI_BYTE, + root, + communicator_world); + assert(ierr==0); +} + +void *CartesianCommunicator::ShmBufferSelf(void) { return ShmCommBuf; } + +void *CartesianCommunicator::ShmBuffer(int rank) { + return NULL; +} +void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) { + return NULL; +} + + +}; + diff --git a/lib/communicator/Communicator_none.cc b/lib/communicator/Communicator_none.cc index 23b909b6..bcc1f5d5 100644 --- a/lib/communicator/Communicator_none.cc +++ b/lib/communicator/Communicator_none.cc @@ -34,13 +34,6 @@ namespace Grid { void CartesianCommunicator::Init(int *argc, char *** arv) { - WorldRank = 0; - WorldSize = 1; - ShmRank=0; - ShmSize=1; - GroupRank=WorldRank; - GroupSize=WorldSize; - Slave =0; ShmInitGeneric(); } @@ -94,11 +87,13 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector &lis { assert(0); } + void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) { assert(0); } +int CartesianCommunicator::RankWorld(void){return 0;} void CartesianCommunicator::Barrier(void){} void CartesianCommunicator::Broadcast(int root,void* data, int bytes) {} void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) { } diff --git a/lib/communicator/Communicator_shmem.cc b/lib/communicator/Communicator_shmem.cc index d5f3ed76..56e03224 100644 --- a/lib/communicator/Communicator_shmem.cc +++ b/lib/communicator/Communicator_shmem.cc @@ -50,11 +50,16 @@ typedef struct HandShake_t { uint64_t seq_remote; } HandShake; +std::array make_psync_init(void) { + array ret; + ret.fill(SHMEM_SYNC_VALUE); + return ret; +} +static std::array psync_init = make_psync_init(); static Vector< HandShake > XConnections; static Vector< HandShake > RConnections; - void CartesianCommunicator::Init(int *argc, char ***argv) { shmem_init(); XConnections.resize(shmem_n_pes()); @@ -65,13 +70,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { RConnections[pe].seq_local = 0; RConnections[pe].seq_remote= 0; } - WorldSize = shmem_n_pes(); - WorldRank = shmem_my_pe(); - ShmRank=0; - ShmSize=1; - GroupRank=WorldRank; - GroupSize=WorldSize; - Slave =0; shmem_barrier_all(); ShmInitGeneric(); } @@ -103,7 +101,7 @@ void CartesianCommunicator::GlobalSum(uint32_t &u){ static long long source ; static long long dest ; static long long llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static long psync[_SHMEM_REDUCE_SYNC_SIZE]; + static std::array psync = psync_init; // int nreduce=1; // int pestart=0; @@ -119,7 +117,7 @@ void CartesianCommunicator::GlobalSum(uint64_t &u){ static long long source ; static long long dest ; static long long llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static long psync[_SHMEM_REDUCE_SYNC_SIZE]; + static std::array psync = psync_init; // int nreduce=1; // int pestart=0; @@ -135,7 +133,7 @@ void CartesianCommunicator::GlobalSum(float &f){ static float source ; static float dest ; static float llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static long psync[_SHMEM_REDUCE_SYNC_SIZE]; + static std::array psync = psync_init; source = f; dest =0.0; @@ -147,7 +145,7 @@ void CartesianCommunicator::GlobalSumVector(float *f,int N) static float source ; static float dest = 0 ; static float llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static long psync[_SHMEM_REDUCE_SYNC_SIZE]; + static std::array psync = psync_init; if ( shmem_addr_accessible(f,_processor) ){ shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync); @@ -166,7 +164,7 @@ void CartesianCommunicator::GlobalSum(double &d) static double source; static double dest ; static double llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static long psync[_SHMEM_REDUCE_SYNC_SIZE]; + static std::array psync = psync_init; source = d; dest = 0; @@ -178,7 +176,8 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N) static double source ; static double dest ; static double llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static long psync[_SHMEM_REDUCE_SYNC_SIZE]; + static std::array psync = psync_init; + if ( shmem_addr_accessible(d,_processor) ){ shmem_double_sum_to_all(d,d,N,0,0,_Nprocessors,llwrk,psync); @@ -295,7 +294,7 @@ void CartesianCommunicator::Barrier(void) } void CartesianCommunicator::Broadcast(int root,void* data, int bytes) { - static long psync[_SHMEM_REDUCE_SYNC_SIZE]; + static std::array psync = psync_init; static uint32_t word; uint32_t *array = (uint32_t *) data; assert( (bytes % 4)==0); @@ -318,7 +317,7 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes) } void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) { - static long psync[_SHMEM_REDUCE_SYNC_SIZE]; + static std::array psync = psync_init; static uint32_t word; uint32_t *array = (uint32_t *) data; assert( (bytes % 4)==0); diff --git a/lib/fftw/fftw3.h b/lib/fftw/fftw3.h deleted file mode 100644 index 1bf34396..00000000 --- a/lib/fftw/fftw3.h +++ /dev/null @@ -1,412 +0,0 @@ -/* - * Copyright (c) 2003, 2007-14 Matteo Frigo - * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology - * - * The following statement of license applies *only* to this header file, - * and *not* to the other files distributed with FFTW or derived therefrom: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/***************************** NOTE TO USERS ********************************* - * - * THIS IS A HEADER FILE, NOT A MANUAL - * - * If you want to know how to use FFTW, please read the manual, - * online at http://www.fftw.org/doc/ and also included with FFTW. - * For a quick start, see the manual's tutorial section. - * - * (Reading header files to learn how to use a library is a habit - * stemming from code lacking a proper manual. Arguably, it's a - * *bad* habit in most cases, because header files can contain - * interfaces that are not part of the public, stable API.) - * - ****************************************************************************/ - -#ifndef FFTW3_H -#define FFTW3_H - -#include - -#ifdef __cplusplus -extern "C" -{ -#endif /* __cplusplus */ - -/* If is included, use the C99 complex type. Otherwise - define a type bit-compatible with C99 complex */ -#if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined(complex) && defined(I) -# define FFTW_DEFINE_COMPLEX(R, C) typedef R _Complex C -#else -# define FFTW_DEFINE_COMPLEX(R, C) typedef R C[2] -#endif - -#define FFTW_CONCAT(prefix, name) prefix ## name -#define FFTW_MANGLE_DOUBLE(name) FFTW_CONCAT(fftw_, name) -#define FFTW_MANGLE_FLOAT(name) FFTW_CONCAT(fftwf_, name) -#define FFTW_MANGLE_LONG_DOUBLE(name) FFTW_CONCAT(fftwl_, name) -#define FFTW_MANGLE_QUAD(name) FFTW_CONCAT(fftwq_, name) - -/* IMPORTANT: for Windows compilers, you should add a line - #define FFTW_DLL - here and in kernel/ifftw.h if you are compiling/using FFTW as a - DLL, in order to do the proper importing/exporting, or - alternatively compile with -DFFTW_DLL or the equivalent - command-line flag. This is not necessary under MinGW/Cygwin, where - libtool does the imports/exports automatically. */ -#if defined(FFTW_DLL) && (defined(_WIN32) || defined(__WIN32__)) - /* annoying Windows syntax for shared-library declarations */ -# if defined(COMPILING_FFTW) /* defined in api.h when compiling FFTW */ -# define FFTW_EXTERN extern __declspec(dllexport) -# else /* user is calling FFTW; import symbol */ -# define FFTW_EXTERN extern __declspec(dllimport) -# endif -#else -# define FFTW_EXTERN extern -#endif - -enum fftw_r2r_kind_do_not_use_me { - FFTW_R2HC=0, FFTW_HC2R=1, FFTW_DHT=2, - FFTW_REDFT00=3, FFTW_REDFT01=4, FFTW_REDFT10=5, FFTW_REDFT11=6, - FFTW_RODFT00=7, FFTW_RODFT01=8, FFTW_RODFT10=9, FFTW_RODFT11=10 -}; - -struct fftw_iodim_do_not_use_me { - int n; /* dimension size */ - int is; /* input stride */ - int os; /* output stride */ -}; - -#include /* for ptrdiff_t */ -struct fftw_iodim64_do_not_use_me { - ptrdiff_t n; /* dimension size */ - ptrdiff_t is; /* input stride */ - ptrdiff_t os; /* output stride */ -}; - -typedef void (*fftw_write_char_func_do_not_use_me)(char c, void *); -typedef int (*fftw_read_char_func_do_not_use_me)(void *); - -/* - huge second-order macro that defines prototypes for all API - functions. We expand this macro for each supported precision - - X: name-mangling macro - R: real data type - C: complex data type -*/ - -#define FFTW_DEFINE_API(X, R, C) \ - \ -FFTW_DEFINE_COMPLEX(R, C); \ - \ -typedef struct X(plan_s) *X(plan); \ - \ -typedef struct fftw_iodim_do_not_use_me X(iodim); \ -typedef struct fftw_iodim64_do_not_use_me X(iodim64); \ - \ -typedef enum fftw_r2r_kind_do_not_use_me X(r2r_kind); \ - \ -typedef fftw_write_char_func_do_not_use_me X(write_char_func); \ -typedef fftw_read_char_func_do_not_use_me X(read_char_func); \ - \ -FFTW_EXTERN void X(execute)(const X(plan) p); \ - \ -FFTW_EXTERN X(plan) X(plan_dft)(int rank, const int *n, \ - C *in, C *out, int sign, unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_dft_1d)(int n, C *in, C *out, int sign, \ - unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_dft_2d)(int n0, int n1, \ - C *in, C *out, int sign, unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_dft_3d)(int n0, int n1, int n2, \ - C *in, C *out, int sign, unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_many_dft)(int rank, const int *n, \ - int howmany, \ - C *in, const int *inembed, \ - int istride, int idist, \ - C *out, const int *onembed, \ - int ostride, int odist, \ - int sign, unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_guru_dft)(int rank, const X(iodim) *dims, \ - int howmany_rank, \ - const X(iodim) *howmany_dims, \ - C *in, C *out, \ - int sign, unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_guru_split_dft)(int rank, const X(iodim) *dims, \ - int howmany_rank, \ - const X(iodim) *howmany_dims, \ - R *ri, R *ii, R *ro, R *io, \ - unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_guru64_dft)(int rank, \ - const X(iodim64) *dims, \ - int howmany_rank, \ - const X(iodim64) *howmany_dims, \ - C *in, C *out, \ - int sign, unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_guru64_split_dft)(int rank, \ - const X(iodim64) *dims, \ - int howmany_rank, \ - const X(iodim64) *howmany_dims, \ - R *ri, R *ii, R *ro, R *io, \ - unsigned flags); \ - \ -FFTW_EXTERN void X(execute_dft)(const X(plan) p, C *in, C *out); \ -FFTW_EXTERN void X(execute_split_dft)(const X(plan) p, R *ri, R *ii, \ - R *ro, R *io); \ - \ -FFTW_EXTERN X(plan) X(plan_many_dft_r2c)(int rank, const int *n, \ - int howmany, \ - R *in, const int *inembed, \ - int istride, int idist, \ - C *out, const int *onembed, \ - int ostride, int odist, \ - unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_dft_r2c)(int rank, const int *n, \ - R *in, C *out, unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_dft_r2c_1d)(int n,R *in,C *out,unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_dft_r2c_2d)(int n0, int n1, \ - R *in, C *out, unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_dft_r2c_3d)(int n0, int n1, \ - int n2, \ - R *in, C *out, unsigned flags); \ - \ - \ -FFTW_EXTERN X(plan) X(plan_many_dft_c2r)(int rank, const int *n, \ - int howmany, \ - C *in, const int *inembed, \ - int istride, int idist, \ - R *out, const int *onembed, \ - int ostride, int odist, \ - unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_dft_c2r)(int rank, const int *n, \ - C *in, R *out, unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_dft_c2r_1d)(int n,C *in,R *out,unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_dft_c2r_2d)(int n0, int n1, \ - C *in, R *out, unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_dft_c2r_3d)(int n0, int n1, \ - int n2, \ - C *in, R *out, unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_guru_dft_r2c)(int rank, const X(iodim) *dims, \ - int howmany_rank, \ - const X(iodim) *howmany_dims, \ - R *in, C *out, \ - unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_guru_dft_c2r)(int rank, const X(iodim) *dims, \ - int howmany_rank, \ - const X(iodim) *howmany_dims, \ - C *in, R *out, \ - unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_guru_split_dft_r2c)( \ - int rank, const X(iodim) *dims, \ - int howmany_rank, \ - const X(iodim) *howmany_dims, \ - R *in, R *ro, R *io, \ - unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_guru_split_dft_c2r)( \ - int rank, const X(iodim) *dims, \ - int howmany_rank, \ - const X(iodim) *howmany_dims, \ - R *ri, R *ii, R *out, \ - unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_guru64_dft_r2c)(int rank, \ - const X(iodim64) *dims, \ - int howmany_rank, \ - const X(iodim64) *howmany_dims, \ - R *in, C *out, \ - unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_guru64_dft_c2r)(int rank, \ - const X(iodim64) *dims, \ - int howmany_rank, \ - const X(iodim64) *howmany_dims, \ - C *in, R *out, \ - unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_guru64_split_dft_r2c)( \ - int rank, const X(iodim64) *dims, \ - int howmany_rank, \ - const X(iodim64) *howmany_dims, \ - R *in, R *ro, R *io, \ - unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_guru64_split_dft_c2r)( \ - int rank, const X(iodim64) *dims, \ - int howmany_rank, \ - const X(iodim64) *howmany_dims, \ - R *ri, R *ii, R *out, \ - unsigned flags); \ - \ -FFTW_EXTERN void X(execute_dft_r2c)(const X(plan) p, R *in, C *out); \ -FFTW_EXTERN void X(execute_dft_c2r)(const X(plan) p, C *in, R *out); \ - \ -FFTW_EXTERN void X(execute_split_dft_r2c)(const X(plan) p, \ - R *in, R *ro, R *io); \ -FFTW_EXTERN void X(execute_split_dft_c2r)(const X(plan) p, \ - R *ri, R *ii, R *out); \ - \ -FFTW_EXTERN X(plan) X(plan_many_r2r)(int rank, const int *n, \ - int howmany, \ - R *in, const int *inembed, \ - int istride, int idist, \ - R *out, const int *onembed, \ - int ostride, int odist, \ - const X(r2r_kind) *kind, unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_r2r)(int rank, const int *n, R *in, R *out, \ - const X(r2r_kind) *kind, unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_r2r_1d)(int n, R *in, R *out, \ - X(r2r_kind) kind, unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_r2r_2d)(int n0, int n1, R *in, R *out, \ - X(r2r_kind) kind0, X(r2r_kind) kind1, \ - unsigned flags); \ -FFTW_EXTERN X(plan) X(plan_r2r_3d)(int n0, int n1, int n2, \ - R *in, R *out, X(r2r_kind) kind0, \ - X(r2r_kind) kind1, X(r2r_kind) kind2, \ - unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_guru_r2r)(int rank, const X(iodim) *dims, \ - int howmany_rank, \ - const X(iodim) *howmany_dims, \ - R *in, R *out, \ - const X(r2r_kind) *kind, unsigned flags); \ - \ -FFTW_EXTERN X(plan) X(plan_guru64_r2r)(int rank, const X(iodim64) *dims, \ - int howmany_rank, \ - const X(iodim64) *howmany_dims, \ - R *in, R *out, \ - const X(r2r_kind) *kind, unsigned flags); \ - \ -FFTW_EXTERN void X(execute_r2r)(const X(plan) p, R *in, R *out); \ - \ -FFTW_EXTERN void X(destroy_plan)(X(plan) p); \ -FFTW_EXTERN void X(forget_wisdom)(void); \ -FFTW_EXTERN void X(cleanup)(void); \ - \ -FFTW_EXTERN void X(set_timelimit)(double t); \ - \ -FFTW_EXTERN void X(plan_with_nthreads)(int nthreads); \ -FFTW_EXTERN int X(init_threads)(void); \ -FFTW_EXTERN void X(cleanup_threads)(void); \ - \ -FFTW_EXTERN int X(export_wisdom_to_filename)(const char *filename); \ -FFTW_EXTERN void X(export_wisdom_to_file)(FILE *output_file); \ -FFTW_EXTERN char *X(export_wisdom_to_string)(void); \ -FFTW_EXTERN void X(export_wisdom)(X(write_char_func) write_char, \ - void *data); \ -FFTW_EXTERN int X(import_system_wisdom)(void); \ -FFTW_EXTERN int X(import_wisdom_from_filename)(const char *filename); \ -FFTW_EXTERN int X(import_wisdom_from_file)(FILE *input_file); \ -FFTW_EXTERN int X(import_wisdom_from_string)(const char *input_string); \ -FFTW_EXTERN int X(import_wisdom)(X(read_char_func) read_char, void *data); \ - \ -FFTW_EXTERN void X(fprint_plan)(const X(plan) p, FILE *output_file); \ -FFTW_EXTERN void X(print_plan)(const X(plan) p); \ -FFTW_EXTERN char *X(sprint_plan)(const X(plan) p); \ - \ -FFTW_EXTERN void *X(malloc)(size_t n); \ -FFTW_EXTERN R *X(alloc_real)(size_t n); \ -FFTW_EXTERN C *X(alloc_complex)(size_t n); \ -FFTW_EXTERN void X(free)(void *p); \ - \ -FFTW_EXTERN void X(flops)(const X(plan) p, \ - double *add, double *mul, double *fmas); \ -FFTW_EXTERN double X(estimate_cost)(const X(plan) p); \ -FFTW_EXTERN double X(cost)(const X(plan) p); \ - \ -FFTW_EXTERN int X(alignment_of)(R *p); \ -FFTW_EXTERN const char X(version)[]; \ -FFTW_EXTERN const char X(cc)[]; \ -FFTW_EXTERN const char X(codelet_optim)[]; - - -/* end of FFTW_DEFINE_API macro */ - -FFTW_DEFINE_API(FFTW_MANGLE_DOUBLE, double, fftw_complex) -FFTW_DEFINE_API(FFTW_MANGLE_FLOAT, float, fftwf_complex) -FFTW_DEFINE_API(FFTW_MANGLE_LONG_DOUBLE, long double, fftwl_complex) - -/* __float128 (quad precision) is a gcc extension on i386, x86_64, and ia64 - for gcc >= 4.6 (compiled in FFTW with --enable-quad-precision) */ -#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) \ - && !(defined(__ICC) || defined(__INTEL_COMPILER)) \ - && (defined(__i386__) || defined(__x86_64__) || defined(__ia64__)) -# if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined(complex) && defined(I) -/* note: __float128 is a typedef, which is not supported with the _Complex - keyword in gcc, so instead we use this ugly __attribute__ version. - However, we can't simply pass the __attribute__ version to - FFTW_DEFINE_API because the __attribute__ confuses gcc in pointer - types. Hence redefining FFTW_DEFINE_COMPLEX. Ugh. */ -# undef FFTW_DEFINE_COMPLEX -# define FFTW_DEFINE_COMPLEX(R, C) typedef _Complex float __attribute__((mode(TC))) C -# endif -FFTW_DEFINE_API(FFTW_MANGLE_QUAD, __float128, fftwq_complex) -#endif - -#define FFTW_FORWARD (-1) -#define FFTW_BACKWARD (+1) - -#define FFTW_NO_TIMELIMIT (-1.0) - -/* documented flags */ -#define FFTW_MEASURE (0U) -#define FFTW_DESTROY_INPUT (1U << 0) -#define FFTW_UNALIGNED (1U << 1) -#define FFTW_CONSERVE_MEMORY (1U << 2) -#define FFTW_EXHAUSTIVE (1U << 3) /* NO_EXHAUSTIVE is default */ -#define FFTW_PRESERVE_INPUT (1U << 4) /* cancels FFTW_DESTROY_INPUT */ -#define FFTW_PATIENT (1U << 5) /* IMPATIENT is default */ -#define FFTW_ESTIMATE (1U << 6) -#define FFTW_WISDOM_ONLY (1U << 21) - -/* undocumented beyond-guru flags */ -#define FFTW_ESTIMATE_PATIENT (1U << 7) -#define FFTW_BELIEVE_PCOST (1U << 8) -#define FFTW_NO_DFT_R2HC (1U << 9) -#define FFTW_NO_NONTHREADED (1U << 10) -#define FFTW_NO_BUFFERING (1U << 11) -#define FFTW_NO_INDIRECT_OP (1U << 12) -#define FFTW_ALLOW_LARGE_GENERIC (1U << 13) /* NO_LARGE_GENERIC is default */ -#define FFTW_NO_RANK_SPLITS (1U << 14) -#define FFTW_NO_VRANK_SPLITS (1U << 15) -#define FFTW_NO_VRECURSE (1U << 16) -#define FFTW_NO_SIMD (1U << 17) -#define FFTW_NO_SLOW (1U << 18) -#define FFTW_NO_FIXED_RADIX_LARGE_N (1U << 19) -#define FFTW_ALLOW_PRUNING (1U << 20) - -#ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ - -#endif /* FFTW3_H */ diff --git a/lib/lattice/Lattice_ET.h b/lib/lattice/Lattice_ET.h index 7ebac99d..1bb83901 100644 --- a/lib/lattice/Lattice_ET.h +++ b/lib/lattice/Lattice_ET.h @@ -261,6 +261,7 @@ GridUnopClass(UnaryExp, exp(a)); GridBinOpClass(BinaryAdd, lhs + rhs); GridBinOpClass(BinarySub, lhs - rhs); GridBinOpClass(BinaryMul, lhs *rhs); +GridBinOpClass(BinaryDiv, lhs /rhs); GridBinOpClass(BinaryAnd, lhs &rhs); GridBinOpClass(BinaryOr, lhs | rhs); @@ -385,6 +386,7 @@ GRID_DEF_UNOP(exp, UnaryExp); GRID_DEF_BINOP(operator+, BinaryAdd); GRID_DEF_BINOP(operator-, BinarySub); GRID_DEF_BINOP(operator*, BinaryMul); +GRID_DEF_BINOP(operator/, BinaryDiv); GRID_DEF_BINOP(operator&, BinaryAnd); GRID_DEF_BINOP(operator|, BinaryOr); diff --git a/lib/lattice/Lattice_base.h b/lib/lattice/Lattice_base.h index e04c1443..e4dc1ca8 100644 --- a/lib/lattice/Lattice_base.h +++ b/lib/lattice/Lattice_base.h @@ -300,17 +300,6 @@ PARALLEL_FOR_LOOP *this = (*this)+r; return *this; } - - strong_inline friend Lattice operator / (const Lattice &lhs,const Lattice &rhs){ - conformable(lhs,rhs); - Lattice ret(lhs._grid); -PARALLEL_FOR_LOOP - for(int ss=0;ssoSites();ss++){ - ret._odata[ss] = lhs._odata[ss]*pow(rhs._odata[ss],-1.0); - } - return ret; - }; - }; // class Lattice template std::ostream& operator<< (std::ostream& stream, const Lattice &o){ diff --git a/lib/lattice/Lattice_rng.h b/lib/lattice/Lattice_rng.h index 2df5e2d1..51cc16ec 100644 --- a/lib/lattice/Lattice_rng.h +++ b/lib/lattice/Lattice_rng.h @@ -294,7 +294,7 @@ namespace Grid { int rank,o_idx,i_idx; _grid->GlobalIndexToGlobalCoor(gidx,gcoor); _grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor); - + int l_idx=generator_idx(o_idx,i_idx); const int num_rand_seed=16; diff --git a/lib/parallelIO/BinaryIO.h b/lib/parallelIO/BinaryIO.h index 5eddb57d..e2af0545 100644 --- a/lib/parallelIO/BinaryIO.h +++ b/lib/parallelIO/BinaryIO.h @@ -457,7 +457,7 @@ class BinaryIO { // available (how short sighted is that?) ////////////////////////////////////////////////////////// Umu = zero; - static uint32_t csum=0; + static uint32_t csum; csum=0; fobj fileObj; static sobj siteObj; // Static to place in symmetric region for SHMEM diff --git a/lib/qcd/action/fermion/CayleyFermion5D.cc b/lib/qcd/action/fermion/CayleyFermion5D.cc index c4196043..57b047d4 100644 --- a/lib/qcd/action/fermion/CayleyFermion5D.cc +++ b/lib/qcd/action/fermion/CayleyFermion5D.cc @@ -50,6 +50,30 @@ namespace QCD { mass(_mass) { } +template +void CayleyFermion5D::Dminus(const FermionField &psi, FermionField &chi) +{ + int Ls=this->Ls; + FermionField tmp(psi._grid); + + this->DW(psi,tmp,DaggerNo); + + for(int s=0;s +void CayleyFermion5D::DminusDag(const FermionField &psi, FermionField &chi) +{ + int Ls=this->Ls; + FermionField tmp(psi._grid); + + this->DW(psi,tmp,DaggerYes); + + for(int s=0;s void CayleyFermion5D::M5D (const FermionField &psi, FermionField &chi) { diff --git a/lib/qcd/action/fermion/CayleyFermion5D.h b/lib/qcd/action/fermion/CayleyFermion5D.h index 53a93a05..1d8c2b95 100644 --- a/lib/qcd/action/fermion/CayleyFermion5D.h +++ b/lib/qcd/action/fermion/CayleyFermion5D.h @@ -56,6 +56,9 @@ namespace Grid { virtual void M5D (const FermionField &psi, FermionField &chi); virtual void M5Ddag(const FermionField &psi, FermionField &chi); + virtual void Dminus(const FermionField &psi, FermionField &chi); + virtual void DminusDag(const FermionField &psi, FermionField &chi); + ///////////////////////////////////////////////////// // Instantiate different versions depending on Impl ///////////////////////////////////////////////////// @@ -117,6 +120,7 @@ namespace Grid { GridRedBlackCartesian &FourDimRedBlackGrid, RealD _mass,RealD _M5,const ImplParams &p= ImplParams()); + protected: void SetCoefficientsZolotarev(RealD zolohi,Approx::zolotarev_data *zdata,RealD b,RealD c); void SetCoefficientsTanh(Approx::zolotarev_data *zdata,RealD b,RealD c); diff --git a/lib/qcd/action/fermion/DomainWallFermion.h b/lib/qcd/action/fermion/DomainWallFermion.h index b72ca8ec..c0b6b6aa 100644 --- a/lib/qcd/action/fermion/DomainWallFermion.h +++ b/lib/qcd/action/fermion/DomainWallFermion.h @@ -42,6 +42,10 @@ namespace Grid { INHERIT_IMPL_TYPES(Impl); public: + void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m) { + this->MomentumSpacePropagatorHt(out,in,_m); + }; + virtual void Instantiatable(void) {}; // Constructors DomainWallFermion(GaugeField &_Umu, @@ -51,6 +55,7 @@ namespace Grid { GridRedBlackCartesian &FourDimRedBlackGrid, RealD _mass,RealD _M5,const ImplParams &p= ImplParams()) : + CayleyFermion5D(_Umu, FiveDimGrid, FiveDimRedBlackGrid, diff --git a/lib/qcd/action/fermion/FermionOperator.h b/lib/qcd/action/fermion/FermionOperator.h index ea5583eb..742c6e08 100644 --- a/lib/qcd/action/fermion/FermionOperator.h +++ b/lib/qcd/action/fermion/FermionOperator.h @@ -91,6 +91,20 @@ namespace Grid { virtual void Mdiag (const FermionField &in, FermionField &out) { Mooee(in,out);}; // Same as Mooee applied to both CB's virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp)=0; // case by case Wilson, Clover, Cayley, ContFrac, PartFrac + + virtual void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m) { assert(0);}; + + virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass) { + FFT theFFT((GridCartesian *) in._grid); + + FermionField in_k(in._grid); + FermionField prop_k(in._grid); + + theFFT.FFT_all_dim(in_k,in,FFT::forward); + this->MomentumSpacePropagator(prop_k,in_k,mass); + theFFT.FFT_all_dim(out,prop_k,FFT::backward); + }; + /////////////////////////////////////////////// // Updates gauge field during HMC /////////////////////////////////////////////// diff --git a/lib/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h b/lib/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h index cd23ddd5..9cab0e22 100644 --- a/lib/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h +++ b/lib/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h @@ -42,7 +42,11 @@ namespace Grid { INHERIT_IMPL_TYPES(Impl); public: - // Constructors + void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m) { + this->MomentumSpacePropagatorHw(out,in,_m); + }; + + // Constructors OverlapWilsonCayleyTanhFermion(GaugeField &_Umu, GridCartesian &FiveDimGrid, GridRedBlackCartesian &FiveDimRedBlackGrid, diff --git a/lib/qcd/action/fermion/WilsonFermion.cc b/lib/qcd/action/fermion/WilsonFermion.cc index 845250fc..39a769d5 100644 --- a/lib/qcd/action/fermion/WilsonFermion.cc +++ b/lib/qcd/action/fermion/WilsonFermion.cc @@ -100,6 +100,7 @@ void WilsonFermion::Meooe(const FermionField &in, FermionField &out) { DhopOE(in, out, DaggerNo); } } + template void WilsonFermion::MeooeDag(const FermionField &in, FermionField &out) { if (in.checkerboard == Odd) { @@ -108,32 +109,87 @@ void WilsonFermion::MeooeDag(const FermionField &in, FermionField &out) { DhopOE(in, out, DaggerYes); } } + + template + void WilsonFermion::Mooee(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + typename FermionField::scalar_type scal(4.0 + mass); + out = scal * in; + } -template -void WilsonFermion::Mooee(const FermionField &in, FermionField &out) { - out.checkerboard = in.checkerboard; - typename FermionField::scalar_type scal(4.0 + mass); - out = scal * in; -} + template + void WilsonFermion::MooeeDag(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + Mooee(in, out); + } -template -void WilsonFermion::MooeeDag(const FermionField &in, FermionField &out) { - out.checkerboard = in.checkerboard; - Mooee(in, out); -} + template + void WilsonFermion::MooeeInv(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + out = (1.0/(4.0+mass))*in; + } + + template + void WilsonFermion::MooeeInvDag(const FermionField &in, FermionField &out) { + out.checkerboard = in.checkerboard; + MooeeInv(in,out); + } -template -void WilsonFermion::MooeeInv(const FermionField &in, FermionField &out) { - out.checkerboard = in.checkerboard; - out = (1.0 / (4.0 + mass)) * in; -} + template + void WilsonFermion::MomentumSpacePropagator(FermionField &out, const FermionField &in,RealD _m) { -template -void WilsonFermion::MooeeInvDag(const FermionField &in, - FermionField &out) { - out.checkerboard = in.checkerboard; - MooeeInv(in, out); -} + // what type LatticeComplex + conformable(_grid,out._grid); + + typedef typename FermionField::vector_type vector_type; + typedef typename FermionField::scalar_type ScalComplex; + + typedef Lattice > LatComplex; + + Gamma::GammaMatrix Gmu [] = { + Gamma::GammaX, + Gamma::GammaY, + Gamma::GammaZ, + Gamma::GammaT + }; + + std::vector latt_size = _grid->_fdimensions; + + FermionField num (_grid); num = zero; + LatComplex wilson(_grid); wilson= zero; + LatComplex one (_grid); one = ScalComplex(1.0,0.0); + + LatComplex denom(_grid); denom= zero; + LatComplex kmu(_grid); + ScalComplex ci(0.0,1.0); + // momphase = n * 2pi / L + for(int mu=0;mu, public WilsonFermionStatic { virtual void MooeeInv(const FermionField &in, FermionField &out); virtual void MooeeInvDag(const FermionField &in, FermionField &out); + virtual void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _mass) ; + //////////////////////// // Derivative interface //////////////////////// // Interface calls an internal routine - void DhopDeriv(GaugeField &mat, const FermionField &U, const FermionField &V, - int dag); - void DhopDerivOE(GaugeField &mat, const FermionField &U, - const FermionField &V, int dag); - void DhopDerivEO(GaugeField &mat, const FermionField &U, - const FermionField &V, int dag); + void DhopDeriv(GaugeField &mat,const FermionField &U,const FermionField &V,int dag); + void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag); + void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag); /////////////////////////////////////////////////////////////// // non-hermitian hopping term; half cb or both diff --git a/lib/qcd/action/fermion/WilsonFermion5D.cc b/lib/qcd/action/fermion/WilsonFermion5D.cc index 8c059667..4c2d24bf 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.cc +++ b/lib/qcd/action/fermion/WilsonFermion5D.cc @@ -482,6 +482,148 @@ void WilsonFermion5D::DW(const FermionField &in, FermionField &out,int dag axpy(out,4.0-M5,in,out); } +template +void WilsonFermion5D::MomentumSpacePropagatorHt(FermionField &out,const FermionField &in, RealD mass) +{ + // what type LatticeComplex + GridBase *_grid = _FourDimGrid; + conformable(_grid,out._grid); + + typedef typename FermionField::vector_type vector_type; + typedef typename FermionField::scalar_type ScalComplex; + typedef iSinglet Tcomplex; + typedef Lattice > LatComplex; + + Gamma::GammaMatrix Gmu [] = { + Gamma::GammaX, + Gamma::GammaY, + Gamma::GammaZ, + Gamma::GammaT + }; + + std::vector latt_size = _grid->_fdimensions; + + + FermionField num (_grid); num = zero; + + LatComplex sk(_grid); sk = zero; + LatComplex sk2(_grid); sk2= zero; + LatComplex W(_grid); W= zero; + LatComplex a(_grid); a= zero; + LatComplex one (_grid); one = ScalComplex(1.0,0.0); + LatComplex denom(_grid); denom= zero; + LatComplex cosha(_grid); + LatComplex kmu(_grid); + LatComplex Wea(_grid); + LatComplex Wema(_grid); + + ScalComplex ci(0.0,1.0); + + for(int mu=0;mu alpha + //////////////////////////////////////////// + cosha = (one + W*W + sk) / (W*2.0); + + // FIXME Need a Lattice acosh + for(int idx=0;idx<_grid->lSites();idx++){ + std::vector lcoor(Nd); + Tcomplex cc; + RealD sgn; + _grid->LocalIndexToLocalCoor(idx,lcoor); + peekLocalSite(cc,cosha,lcoor); + assert((double)real(cc)>=1.0); + assert(fabs((double)imag(cc))<=1.0e-15); + cc = ScalComplex(::acosh(real(cc)),0.0); + pokeLocalSite(cc,a,lcoor); + } + + Wea = ( exp( a) * W ); + Wema= ( exp(-a) * W ); + + num = num + ( one - Wema ) * mass * in; + denom= ( Wea - one ) + mass*mass * (one - Wema); + out = num/denom; +} + +template +void WilsonFermion5D::MomentumSpacePropagatorHw(FermionField &out,const FermionField &in,RealD mass) +{ + Gamma::GammaMatrix Gmu [] = { + Gamma::GammaX, + Gamma::GammaY, + Gamma::GammaZ, + Gamma::GammaT + }; + + GridBase *_grid = _FourDimGrid; + conformable(_grid,out._grid); + + typedef typename FermionField::vector_type vector_type; + typedef typename FermionField::scalar_type ScalComplex; + + typedef Lattice > LatComplex; + + + std::vector latt_size = _grid->_fdimensions; + + LatComplex sk(_grid); sk = zero; + LatComplex sk2(_grid); sk2= zero; + + LatComplex w_k(_grid); w_k= zero; + LatComplex b_k(_grid); b_k= zero; + + LatComplex one (_grid); one = ScalComplex(1.0,0.0); + + FermionField num (_grid); num = zero; + LatComplex denom(_grid); denom= zero; + LatComplex kmu(_grid); + ScalComplex ci(0.0,1.0); + + for(int mu=0;mu directions; - static const std::vector displacements; - const int npoint = 8; - }; - - template - class WilsonFermion5D : public WilsonKernels, public WilsonFermion5DStatic - { - public: - INHERIT_IMPL_TYPES(Impl); - typedef WilsonKernels Kernels; - PmuStat stat; - - void Report(void); - void ZeroCounters(void); - double DhopCalls; - double DhopCommTime; - double DhopComputeTime; - - double DerivCalls; - double DerivCommTime; - double DerivComputeTime; - double DerivDhopComputeTime; - - /////////////////////////////////////////////////////////////// - // Implement the abstract base - /////////////////////////////////////////////////////////////// - GridBase *GaugeGrid(void) { return _FourDimGrid ;} - GridBase *GaugeRedBlackGrid(void) { return _FourDimRedBlackGrid ;} - GridBase *FermionGrid(void) { return _FiveDimGrid;} - GridBase *FermionRedBlackGrid(void) { return _FiveDimRedBlackGrid;} - - // full checkerboard operations; leave unimplemented as abstract for now - virtual RealD M (const FermionField &in, FermionField &out){assert(0); return 0.0;}; - virtual RealD Mdag (const FermionField &in, FermionField &out){assert(0); return 0.0;}; - - // half checkerboard operations; leave unimplemented as abstract for now - virtual void Meooe (const FermionField &in, FermionField &out){assert(0);}; - virtual void Mooee (const FermionField &in, FermionField &out){assert(0);}; - virtual void MooeeInv (const FermionField &in, FermionField &out){assert(0);}; - - virtual void MeooeDag (const FermionField &in, FermionField &out){assert(0);}; - virtual void MooeeDag (const FermionField &in, FermionField &out){assert(0);}; - virtual void MooeeInvDag (const FermionField &in, FermionField &out){assert(0);}; - virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp){assert(0);}; // case by case Wilson, Clover, Cayley, ContFrac, PartFrac - - // These can be overridden by fancy 5d chiral action - virtual void DhopDeriv (GaugeField &mat,const FermionField &U,const FermionField &V,int dag); - virtual void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag); - virtual void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag); - - // Implement hopping term non-hermitian hopping term; half cb or both - // Implement s-diagonal DW - void DW (const FermionField &in, FermionField &out,int dag); - void Dhop (const FermionField &in, FermionField &out,int dag); - void DhopOE(const FermionField &in, FermionField &out,int dag); - void DhopEO(const FermionField &in, FermionField &out,int dag); - - // add a DhopComm + //////////////////////////////////////////////////////////////////////////////// + // This is the 4d red black case appropriate to support + // + // parity = (x+y+z+t)|2; + // generalised five dim fermions like mobius, zolotarev etc.. + // + // i.e. even even contains fifth dim hopping term. + // + // [DIFFERS from original CPS red black implementation parity = (x+y+z+t+s)|2 ] + //////////////////////////////////////////////////////////////////////////////// + + class WilsonFermion5DStatic { + public: + // S-direction is INNERMOST and takes no part in the parity. + static const std::vector directions; + static const std::vector displacements; + const int npoint = 8; + }; + + template + class WilsonFermion5D : public WilsonKernels, public WilsonFermion5DStatic + { + public: + INHERIT_IMPL_TYPES(Impl); + typedef WilsonKernels Kernels; + PmuStat stat; + + void Report(void); + void ZeroCounters(void); + double DhopCalls; + double DhopCommTime; + double DhopComputeTime; + + double DerivCalls; + double DerivCommTime; + double DerivComputeTime; + double DerivDhopComputeTime; + + /////////////////////////////////////////////////////////////// + // Implement the abstract base + /////////////////////////////////////////////////////////////// + GridBase *GaugeGrid(void) { return _FourDimGrid ;} + GridBase *GaugeRedBlackGrid(void) { return _FourDimRedBlackGrid ;} + GridBase *FermionGrid(void) { return _FiveDimGrid;} + GridBase *FermionRedBlackGrid(void) { return _FiveDimRedBlackGrid;} + + // full checkerboard operations; leave unimplemented as abstract for now + virtual RealD M (const FermionField &in, FermionField &out){assert(0); return 0.0;}; + virtual RealD Mdag (const FermionField &in, FermionField &out){assert(0); return 0.0;}; + + // half checkerboard operations; leave unimplemented as abstract for now + virtual void Meooe (const FermionField &in, FermionField &out){assert(0);}; + virtual void Mooee (const FermionField &in, FermionField &out){assert(0);}; + virtual void MooeeInv (const FermionField &in, FermionField &out){assert(0);}; + + virtual void MeooeDag (const FermionField &in, FermionField &out){assert(0);}; + virtual void MooeeDag (const FermionField &in, FermionField &out){assert(0);}; + virtual void MooeeInvDag (const FermionField &in, FermionField &out){assert(0);}; + virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp){assert(0);}; // case by case Wilson, Clover, Cayley, ContFrac, PartFrac + + // These can be overridden by fancy 5d chiral action + virtual void DhopDeriv (GaugeField &mat,const FermionField &U,const FermionField &V,int dag); + virtual void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag); + virtual void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag); + + void MomentumSpacePropagatorHt(FermionField &out,const FermionField &in,RealD mass) ; + void MomentumSpacePropagatorHw(FermionField &out,const FermionField &in,RealD mass) ; + + // Implement hopping term non-hermitian hopping term; half cb or both + // Implement s-diagonal DW + void DW (const FermionField &in, FermionField &out,int dag); + void Dhop (const FermionField &in, FermionField &out,int dag); + void DhopOE(const FermionField &in, FermionField &out,int dag); + void DhopEO(const FermionField &in, FermionField &out,int dag); + + // add a DhopComm // -- suboptimal interface will presently trigger multiple comms. void DhopDir(const FermionField &in, FermionField &out,int dir,int disp); diff --git a/lib/qcd/action/fermion/WilsonKernels.cc b/lib/qcd/action/fermion/WilsonKernels.cc index 52ee8704..43776c86 100644 --- a/lib/qcd/action/fermion/WilsonKernels.cc +++ b/lib/qcd/action/fermion/WilsonKernels.cc @@ -32,8 +32,7 @@ directory namespace Grid { namespace QCD { -int WilsonKernelsStatic::HandOpt; -int WilsonKernelsStatic::AsmOpt; +int WilsonKernelsStatic::Opt; template WilsonKernels::WilsonKernels(const ImplParams &p) : Base(p){}; diff --git a/lib/qcd/action/fermion/WilsonKernels.h b/lib/qcd/action/fermion/WilsonKernels.h index 669ee4be..47da2b14 100644 --- a/lib/qcd/action/fermion/WilsonKernels.h +++ b/lib/qcd/action/fermion/WilsonKernels.h @@ -40,9 +40,9 @@ namespace QCD { //////////////////////////////////////////////////////////////////////////////////////////////////////////////// class WilsonKernelsStatic { public: + enum { OptGeneric, OptHandUnroll, OptInlineAsm }; // S-direction is INNERMOST and takes no part in the parity. - static int AsmOpt; // these are a temporary hack - static int HandOpt; // these are a temporary hack + static int Opt; // these are a temporary hack }; template class WilsonKernels : public FermionOperator , public WilsonKernelsStatic { @@ -56,24 +56,34 @@ public: template typename std::enable_if::type DiracOptDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, - int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) { + int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) + { + switch(Opt) { #ifdef AVX512 - if (AsmOpt) { - WilsonKernels::DiracOptAsmDhopSite(st,lo,U,buf,sF,sU,Ls,Ns,in,out); - } else { -#else - { + case OptInlineAsm: + WilsonKernels::DiracOptAsmDhopSite(st,lo,U,buf,sF,sU,Ls,Ns,in,out); + break; #endif + case OptHandUnroll: for (int site = 0; site < Ns; site++) { for (int s = 0; s < Ls; s++) { - if (HandOpt) - WilsonKernels::DiracOptHandDhopSite(st,lo,U,buf,sF,sU,in,out); - else - WilsonKernels::DiracOptGenericDhopSite(st,lo,U,buf,sF,sU,in,out); + WilsonKernels::DiracOptHandDhopSite(st,lo,U,buf,sF,sU,in,out); sF++; } sU++; } + break; + case OptGeneric: + for (int site = 0; site < Ns; site++) { + for (int s = 0; s < Ls; s++) { + WilsonKernels::DiracOptGenericDhopSite(st,lo,U,buf,sF,sU,in,out); + sF++; + } + sU++; + } + break; + default: + assert(0); } } @@ -81,7 +91,7 @@ public: typename std::enable_if<(Impl::Dimension != 3 || (Impl::Dimension == 3 && Nc != 3)) && EnableBool, void>::type DiracOptDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) { - + // no kernel choice for (int site = 0; site < Ns; site++) { for (int s = 0; s < Ls; s++) { WilsonKernels::DiracOptGenericDhopSite(st, lo, U, buf, sF, sU, in, out); @@ -95,23 +105,33 @@ public: typename std::enable_if::type DiracOptDhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf, int sF, int sU, int Ls, int Ns, const FermionField &in, FermionField &out) { + + switch(Opt) { #ifdef AVX512 - if (AsmOpt) { + case OptInlineAsm: WilsonKernels::DiracOptAsmDhopSiteDag(st,lo,U,buf,sF,sU,Ls,Ns,in,out); - } else { -#else - { + break; #endif + case OptHandUnroll: for (int site = 0; site < Ns; site++) { for (int s = 0; s < Ls; s++) { - if (HandOpt) - WilsonKernels::DiracOptHandDhopSiteDag(st,lo,U,buf,sF,sU,in,out); - else - WilsonKernels::DiracOptGenericDhopSiteDag(st,lo,U,buf,sF,sU,in,out); + WilsonKernels::DiracOptHandDhopSiteDag(st,lo,U,buf,sF,sU,in,out); sF++; } sU++; } + break; + case OptGeneric: + for (int site = 0; site < Ns; site++) { + for (int s = 0; s < Ls; s++) { + WilsonKernels::DiracOptGenericDhopSiteDag(st,lo,U,buf,sF,sU,in,out); + sF++; + } + sU++; + } + break; + default: + assert(0); } } diff --git a/lib/qcd/action/fermion/WilsonKernelsAsm.cc b/lib/qcd/action/fermion/WilsonKernelsAsm.cc index 74862400..d7a9edd3 100644 --- a/lib/qcd/action/fermion/WilsonKernelsAsm.cc +++ b/lib/qcd/action/fermion/WilsonKernelsAsm.cc @@ -10,6 +10,7 @@ Author: Peter Boyle Author: paboyle +Author: Guido Cossu This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -53,24 +54,26 @@ WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo, } #if defined(AVX512) - +#include + /////////////////////////////////////////////////////////// // If we are AVX512 specialise the single precision routine /////////////////////////////////////////////////////////// - -#include + #include -static Vector signs; - - int setupSigns(void ){ - Vector bother(2); +static Vector signsF; + + template + int setupSigns(Vector& signs ){ + Vector bother(2); signs = bother; vrsign(signs[0]); visign(signs[1]); return 1; } - static int signInit = setupSigns(); + + static int signInitF = setupSigns(signsF); #define label(A) ilabel(A) #define ilabel(A) ".globl\n" #A ":\n" @@ -78,6 +81,8 @@ static Vector signs; #define MAYBEPERM(A,perm) if (perm) { A ; } #define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf) #define FX(A) WILSONASM_ ##A +#define COMPLEX_TYPE vComplexF +#define signs signsF #undef KERNEL_DAG template<> void @@ -98,8 +103,8 @@ WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder #undef FX #define FX(A) DWFASM_ ## A #define MAYBEPERM(A,B) -#define VMOVIDUP(A,B,C) VBCASTIDUPf(A,B,C) -#define VMOVRDUP(A,B,C) VBCASTRDUPf(A,B,C) +//#define VMOVIDUP(A,B,C) VBCASTIDUPf(A,B,C) +//#define VMOVRDUP(A,B,C) VBCASTRDUPf(A,B,C) #define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN_LS(ptr,pf) #undef KERNEL_DAG @@ -113,8 +118,71 @@ template<> void WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) #include +#undef COMPLEX_TYPE +#undef signs +#undef VMOVRDUP +#undef MAYBEPERM +#undef MULT_2SPIN +#undef FX + +/////////////////////////////////////////////////////////// +// If we are AVX512 specialise the double precision routine +/////////////////////////////////////////////////////////// + +#include + +static Vector signsD; +#define signs signsD +static int signInitD = setupSigns(signsD); + +#define MAYBEPERM(A,perm) if (perm) { A ; } +#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN(ptr,pf) +#define FX(A) WILSONASM_ ##A +#define COMPLEX_TYPE vComplexD + +#undef KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#define KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include -#endif +#undef VMOVIDUP +#undef VMOVRDUP +#undef MAYBEPERM +#undef MULT_2SPIN +#undef FX +#define FX(A) DWFASM_ ## A +#define MAYBEPERM(A,B) +//#define VMOVIDUP(A,B,C) VBCASTIDUPd(A,B,C) +//#define VMOVRDUP(A,B,C) VBCASTRDUPd(A,B,C) +#define MULT_2SPIN(ptr,pf) MULT_ADDSUB_2SPIN_LS(ptr,pf) + +#undef KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#define KERNEL_DAG +template<> void +WilsonKernels::DiracOptAsmDhopSiteDag(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U,SiteHalfSpinor *buf, + int ss,int ssU,int Ls,int Ns,const FermionField &in, FermionField &out) +#include + +#undef COMPLEX_TYPE +#undef signs +#undef VMOVRDUP +#undef MAYBEPERM +#undef MULT_2SPIN +#undef FX + +#endif //AVX512 #define INSTANTIATE_ASM(A)\ template void WilsonKernels::DiracOptAsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\ diff --git a/lib/qcd/action/fermion/WilsonKernelsAsmBody.h b/lib/qcd/action/fermion/WilsonKernelsAsmBody.h index 12579d8c..72e13754 100644 --- a/lib/qcd/action/fermion/WilsonKernelsAsmBody.h +++ b/lib/qcd/action/fermion/WilsonKernelsAsmBody.h @@ -5,7 +5,9 @@ const uint64_t plocal =(uint64_t) & in._odata[0]; // vComplexF isigns[2] = { signs[0], signs[1] }; - vComplexF *isigns = &signs[0]; + //COMPLEX_TYPE is vComplexF of vComplexD depending + //on the chosen precision + COMPLEX_TYPE *isigns = &signs[0]; MASK_REGS; int nmax=U._grid->oSites(); diff --git a/lib/qcd/hmc/HmcRunner.h b/lib/qcd/hmc/HmcRunner.h index a31ba784..53b127cf 100644 --- a/lib/qcd/hmc/HmcRunner.h +++ b/lib/qcd/hmc/HmcRunner.h @@ -116,7 +116,7 @@ class NerscHmcRunnerTemplate { NoSmearing SmearingPolicy; typedef MinimumNorm2, RepresentationsPolicy > IntegratorType; // change here to change the algorithm - IntegratorParameters MDpar(20, 1.0); + IntegratorParameters MDpar(40, 1.0); IntegratorType MDynamics(UGrid, MDpar, TheAction, SmearingPolicy); // Checkpoint strategy diff --git a/lib/qcd/utils/LinalgUtils.h b/lib/qcd/utils/LinalgUtils.h index c2cb95ee..e7e6a794 100644 --- a/lib/qcd/utils/LinalgUtils.h +++ b/lib/qcd/utils/LinalgUtils.h @@ -39,8 +39,8 @@ namespace QCD{ //on the 5d (rb4d) checkerboarded lattices //////////////////////////////////////////////////////////////////////// -template -void axpibg5x(Lattice &z,const Lattice &x,RealD a,RealD b) +template +void axpibg5x(Lattice &z,const Lattice &x,Coeff a,Coeff b) { z.checkerboard = x.checkerboard; conformable(x,z); @@ -57,8 +57,8 @@ PARALLEL_FOR_LOOP } } -template -void axpby_ssp(Lattice &z, RealD a,const Lattice &x,RealD b,const Lattice &y,int s,int sp) +template +void axpby_ssp(Lattice &z, Coeff a,const Lattice &x,Coeff b,const Lattice &y,int s,int sp) { z.checkerboard = x.checkerboard; conformable(x,y); @@ -72,8 +72,8 @@ PARALLEL_FOR_LOOP } } -template -void ag5xpby_ssp(Lattice &z,RealD a,const Lattice &x,RealD b,const Lattice &y,int s,int sp) +template +void ag5xpby_ssp(Lattice &z,Coeff a,const Lattice &x,Coeff b,const Lattice &y,int s,int sp) { z.checkerboard = x.checkerboard; conformable(x,y); @@ -90,8 +90,8 @@ PARALLEL_FOR_LOOP } } -template -void axpbg5y_ssp(Lattice &z,RealD a,const Lattice &x,RealD b,const Lattice &y,int s,int sp) +template +void axpbg5y_ssp(Lattice &z,Coeff a,const Lattice &x,Coeff b,const Lattice &y,int s,int sp) { z.checkerboard = x.checkerboard; conformable(x,y); @@ -108,8 +108,8 @@ PARALLEL_FOR_LOOP } } -template -void ag5xpbg5y_ssp(Lattice &z,RealD a,const Lattice &x,RealD b,const Lattice &y,int s,int sp) +template +void ag5xpbg5y_ssp(Lattice &z,Coeff a,const Lattice &x,Coeff b,const Lattice &y,int s,int sp) { z.checkerboard = x.checkerboard; conformable(x,y); @@ -127,8 +127,8 @@ PARALLEL_FOR_LOOP } } -template -void axpby_ssp_pminus(Lattice &z,RealD a,const Lattice &x,RealD b,const Lattice &y,int s,int sp) +template +void axpby_ssp_pminus(Lattice &z,Coeff a,const Lattice &x,Coeff b,const Lattice &y,int s,int sp) { z.checkerboard = x.checkerboard; conformable(x,y); @@ -144,8 +144,8 @@ PARALLEL_FOR_LOOP } } -template -void axpby_ssp_pplus(Lattice &z,RealD a,const Lattice &x,RealD b,const Lattice &y,int s,int sp) +template +void axpby_ssp_pplus(Lattice &z,Coeff a,const Lattice &x,Coeff b,const Lattice &y,int s,int sp) { z.checkerboard = x.checkerboard; conformable(x,y); diff --git a/lib/qcd/utils/SUn.h b/lib/qcd/utils/SUn.h index 20cd1889..914c3bad 100644 --- a/lib/qcd/utils/SUn.h +++ b/lib/qcd/utils/SUn.h @@ -674,6 +674,37 @@ class SU { out += la; } } +/* + add GaugeTrans +*/ + +template + static void GaugeTransform( GaugeField &Umu, GaugeMat &g){ + GridBase *grid = Umu._grid; + conformable(grid,g._grid); + + GaugeMat U(grid); + GaugeMat ag(grid); ag = adj(g); + + for(int mu=0;mu(Umu,mu); + U = g*U*Cshift(ag, mu, 1); + PokeIndex(Umu,U,mu); + } + } + template + static void GaugeTransform( std::vector &U, GaugeMat &g){ + GridBase *grid = g._grid; + GaugeMat ag(grid); ag = adj(g); + for(int mu=0;mu + static void RandomGaugeTransform(GridParallelRNG &pRNG, GaugeField &Umu, GaugeMat &g){ + LieRandomize(pRNG,g,1.0); + GaugeTransform(Umu,g); + } // Projects the algebra components a lattice matrix (of dimension ncol*ncol -1 ) // inverse operation: FundamentalLieAlgebraMatrix @@ -702,23 +733,33 @@ class SU { PokeIndex(out, Umu, mu); } } - static void TepidConfiguration(GridParallelRNG &pRNG, - LatticeGaugeField &out) { - LatticeMatrix Umu(out._grid); - for (int mu = 0; mu < Nd; mu++) { - LieRandomize(pRNG, Umu, 0.01); - PokeIndex(out, Umu, mu); + template + static void TepidConfiguration(GridParallelRNG &pRNG,GaugeField &out){ + typedef typename GaugeField::vector_type vector_type; + typedef iSUnMatrix vMatrixType; + typedef Lattice LatticeMatrixType; + + LatticeMatrixType Umu(out._grid); + for(int mu=0;mu(out,Umu,mu); } } - static void ColdConfiguration(GridParallelRNG &pRNG, LatticeGaugeField &out) { - LatticeMatrix Umu(out._grid); - Umu = 1.0; - for (int mu = 0; mu < Nd; mu++) { - PokeIndex(out, Umu, mu); + template + static void ColdConfiguration(GridParallelRNG &pRNG,GaugeField &out){ + typedef typename GaugeField::vector_type vector_type; + typedef iSUnMatrix vMatrixType; + typedef Lattice LatticeMatrixType; + + LatticeMatrixType Umu(out._grid); + Umu=1.0; + for(int mu=0;mu(out,Umu,mu); } } - static void taProj(const LatticeMatrix &in, LatticeMatrix &out) { + template + static void taProj( const LatticeMatrixType &in, LatticeMatrixType &out){ out = Ta(in); } template diff --git a/lib/qcd/utils/WilsonLoops.h b/lib/qcd/utils/WilsonLoops.h index 10022f50..03d45c07 100644 --- a/lib/qcd/utils/WilsonLoops.h +++ b/lib/qcd/utils/WilsonLoops.h @@ -522,4 +522,4 @@ typedef WilsonLoops SU3WilsonLoops; } } -#endif \ No newline at end of file +#endif diff --git a/lib/simd/Grid_avx.h b/lib/simd/Grid_avx.h index 05f2f1ab..f50eae2b 100644 --- a/lib/simd/Grid_avx.h +++ b/lib/simd/Grid_avx.h @@ -365,6 +365,18 @@ namespace Optimization { } }; + struct Div{ + // Real float + inline __m256 operator()(__m256 a, __m256 b){ + return _mm256_div_ps(a,b); + } + // Real double + inline __m256d operator()(__m256d a, __m256d b){ + return _mm256_div_pd(a,b); + } + }; + + struct Conj{ // Complex single inline __m256 operator()(__m256 in){ @@ -437,14 +449,13 @@ namespace Optimization { }; -#if defined (AVX2) || defined (AVXFMA4) -#define _mm256_alignr_epi32(ret,a,b,n) ret=(__m256) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*4)%16) -#define _mm256_alignr_epi64(ret,a,b,n) ret=(__m256d) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*8)%16) +#if defined (AVX2) +#define _mm256_alignr_epi32_grid(ret,a,b,n) ret=(__m256) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*4)%16) +#define _mm256_alignr_epi64_grid(ret,a,b,n) ret=(__m256d) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*8)%16) #endif -#if defined (AVX1) || defined (AVXFMA) - -#define _mm256_alignr_epi32(ret,a,b,n) { \ +#if defined (AVX1) || defined (AVXFMA) +#define _mm256_alignr_epi32_grid(ret,a,b,n) { \ __m128 aa, bb; \ \ aa = _mm256_extractf128_ps(a,1); \ @@ -458,7 +469,7 @@ namespace Optimization { ret = _mm256_insertf128_ps(ret,aa,0); \ } -#define _mm256_alignr_epi64(ret,a,b,n) { \ +#define _mm256_alignr_epi64_grid(ret,a,b,n) { \ __m128d aa, bb; \ \ aa = _mm256_extractf128_pd(a,1); \ @@ -474,19 +485,6 @@ namespace Optimization { #endif - inline std::ostream & operator << (std::ostream& stream, const __m256 a) - { - const float *p=(const float *)&a; - stream<< "{"< 3 ) { - _mm256_alignr_epi32(ret,in,tmp,n); + _mm256_alignr_epi32_grid(ret,in,tmp,n); } else { - _mm256_alignr_epi32(ret,tmp,in,n); + _mm256_alignr_epi32_grid(ret,tmp,in,n); } - // std::cout << " align epi32 n=" < "<< ret < 1 ) { - _mm256_alignr_epi64(ret,in,tmp,n); + _mm256_alignr_epi64_grid(ret,in,tmp,n); } else { - _mm256_alignr_epi64(ret,tmp,in,n); + _mm256_alignr_epi64_grid(ret,tmp,in,n); } - // std::cout << " align epi64 n=" < "<< ret < inline Grid::ComplexF Reduce::operator()(__m256 in){ @@ -631,6 +625,7 @@ namespace Optimization { // Arithmetic operations typedef Optimization::Sum SumSIMD; typedef Optimization::Sub SubSIMD; + typedef Optimization::Div DivSIMD; typedef Optimization::Mult MultSIMD; typedef Optimization::MultComplex MultComplexSIMD; typedef Optimization::Conj ConjSIMD; diff --git a/lib/simd/Grid_avx512.h b/lib/simd/Grid_avx512.h index 62789462..93e4c9ad 100644 --- a/lib/simd/Grid_avx512.h +++ b/lib/simd/Grid_avx512.h @@ -240,6 +240,17 @@ namespace Optimization { } }; + struct Div{ + // Real float + inline __m512 operator()(__m512 a, __m512 b){ + return _mm512_div_ps(a,b); + } + // Real double + inline __m512d operator()(__m512d a, __m512d b){ + return _mm512_div_pd(a,b); + } + }; + struct Conj{ // Complex single @@ -371,14 +382,8 @@ namespace Optimization { // Some Template specialization // Hack for CLANG until mm512_reduce_add_ps etc... are implemented in GCC and Clang releases -<<<<<<< HEAD -#define GNU_CLANG_COMPILER -#ifdef GNU_CLANG_COMPILER -======= - #ifndef __INTEL_COMPILER #warning "Slow reduction due to incomplete reduce intrinsics" ->>>>>>> develop //Complex float Reduce template<> inline Grid::ComplexF Reduce::operator()(__m512 in){ @@ -503,6 +508,7 @@ namespace Optimization { typedef Optimization::Sum SumSIMD; typedef Optimization::Sub SubSIMD; typedef Optimization::Mult MultSIMD; + typedef Optimization::Div DivSIMD; typedef Optimization::MultComplex MultComplexSIMD; typedef Optimization::Conj ConjSIMD; typedef Optimization::TimesMinusI TimesMinusISIMD; diff --git a/lib/simd/Grid_imci.h b/lib/simd/Grid_imci.h index 119a31a3..48cff96d 100644 --- a/lib/simd/Grid_imci.h +++ b/lib/simd/Grid_imci.h @@ -244,6 +244,17 @@ namespace Optimization { } }; + struct Div{ + // Real float + inline __m512 operator()(__m512 a, __m512 b){ + return _mm512_div_ps(a,b); + } + // Real double + inline __m512d operator()(__m512d a, __m512d b){ + return _mm512_div_pd(a,b); + } + }; + struct Conj{ // Complex single @@ -437,6 +448,7 @@ namespace Optimization { // Arithmetic operations typedef Optimization::Sum SumSIMD; typedef Optimization::Sub SubSIMD; + typedef Optimization::Div DivSIMD; typedef Optimization::Mult MultSIMD; typedef Optimization::MultComplex MultComplexSIMD; typedef Optimization::Conj ConjSIMD; diff --git a/lib/simd/Grid_sse4.h b/lib/simd/Grid_sse4.h index a4002373..560eda11 100644 --- a/lib/simd/Grid_sse4.h +++ b/lib/simd/Grid_sse4.h @@ -224,6 +224,18 @@ namespace Optimization { } }; + struct Div{ + // Real float + inline __m128 operator()(__m128 a, __m128 b){ + return _mm_div_ps(a,b); + } + // Real double + inline __m128d operator()(__m128d a, __m128d b){ + return _mm_div_pd(a,b); + } + }; + + struct Conj{ // Complex single inline __m128 operator()(__m128 in){ @@ -372,6 +384,8 @@ namespace Optimization { } } + + ////////////////////////////////////////////////////////////////////////////////////// // Here assign types @@ -398,6 +412,7 @@ namespace Optimization { // Arithmetic operations typedef Optimization::Sum SumSIMD; typedef Optimization::Sub SubSIMD; + typedef Optimization::Div DivSIMD; typedef Optimization::Mult MultSIMD; typedef Optimization::MultComplex MultComplexSIMD; typedef Optimization::Conj ConjSIMD; diff --git a/lib/simd/Grid_vector_types.h b/lib/simd/Grid_vector_types.h index 30576fb6..e1592eef 100644 --- a/lib/simd/Grid_vector_types.h +++ b/lib/simd/Grid_vector_types.h @@ -77,38 +77,24 @@ struct RealPart > { ////////////////////////////////////// // demote a vector to real type ////////////////////////////////////// - // type alias used to simplify the syntax of std::enable_if -template -using Invoke = typename T::type; -template -using EnableIf = Invoke >; -template -using NotEnableIf = Invoke >; +template using Invoke = typename T::type; +template using EnableIf = Invoke >; +template using NotEnableIf = Invoke >; //////////////////////////////////////////////////////// // Check for complexity with type traits -template -struct is_complex : public std::false_type {}; -template <> -struct is_complex > : public std::true_type {}; -template <> -struct is_complex > : public std::true_type {}; +template struct is_complex : public std::false_type {}; +template <> struct is_complex > : public std::true_type {}; +template <> struct is_complex > : public std::true_type {}; -template -using IfReal = Invoke::value, int> >; -template -using IfComplex = Invoke::value, int> >; -template -using IfInteger = Invoke::value, int> >; +template using IfReal = Invoke::value, int> >; +template using IfComplex = Invoke::value, int> >; +template using IfInteger = Invoke::value, int> >; -template -using IfNotReal = - Invoke::value, int> >; -template -using IfNotComplex = Invoke::value, int> >; -template -using IfNotInteger = Invoke::value, int> >; +template using IfNotReal = Invoke::value, int> >; +template using IfNotComplex = Invoke::value, int> >; +template using IfNotInteger = Invoke::value, int> >; //////////////////////////////////////////////////////// // Define the operation templates functors @@ -285,6 +271,20 @@ class Grid_simd { return a * b; } + ////////////////////////////////// + // Divides + ////////////////////////////////// + friend inline Grid_simd operator/(const Scalar_type &a, Grid_simd b) { + Grid_simd va; + vsplat(va, a); + return va / b; + } + friend inline Grid_simd operator/(Grid_simd b, const Scalar_type &a) { + Grid_simd va; + vsplat(va, a); + return b / a; + } + /////////////////////// // Unary negation /////////////////////// @@ -428,7 +428,6 @@ inline void rotate(Grid_simd &ret,Grid_simd b,int nrot) ret.v = Optimization::Rotate::rotate(b.v,2*nrot); } - template inline void vbroadcast(Grid_simd &ret,const Grid_simd &src,int lane){ S* typepun =(S*) &src; @@ -512,7 +511,6 @@ template = 0> inline void vfalse(Grid_simd &ret) { vsplat(ret, 0); } - template inline void zeroit(Grid_simd &z) { vzero(z); @@ -530,7 +528,6 @@ inline void vstream(Grid_simd &out, const Grid_simd &in) { typedef typename S::value_type T; binary((T *)&out.v, in.v, VstreamSIMD()); } - template = 0> inline void vstream(Grid_simd &out, const Grid_simd &in) { out = in; @@ -569,6 +566,34 @@ inline Grid_simd operator*(Grid_simd a, Grid_simd b) { return ret; }; +// Distinguish between complex types and others +template = 0> +inline Grid_simd operator/(Grid_simd a, Grid_simd b) { + typedef Grid_simd simd; + + simd ret; + simd den; + typename simd::conv_t conv; + + ret = a * conjugate(b) ; + den = b * conjugate(b) ; + + + auto real_den = toReal(den); + + ret.v=binary(ret.v, real_den.v, DivSIMD()); + + return ret; +}; + +// Real/Integer types +template = 0> +inline Grid_simd operator/(Grid_simd a, Grid_simd b) { + Grid_simd ret; + ret.v = binary(a.v, b.v, DivSIMD()); + return ret; +}; + /////////////////////// // Conjugate /////////////////////// @@ -582,7 +607,6 @@ template = 0> inline Grid_simd conjugate(const Grid_simd &in) { return in; // for real objects } - // Suppress adj for integer types... // odd; why conjugate above but not adj?? template = 0> inline Grid_simd adj(const Grid_simd &in) { @@ -596,14 +620,12 @@ template = 0> inline void timesMinusI(Grid_simd &ret, const Grid_simd &in) { ret.v = binary(in.v, ret.v, TimesMinusISIMD()); } - template = 0> inline Grid_simd timesMinusI(const Grid_simd &in) { Grid_simd ret; timesMinusI(ret, in); return ret; } - template = 0> inline Grid_simd timesMinusI(const Grid_simd &in) { return in; @@ -616,14 +638,12 @@ template = 0> inline void timesI(Grid_simd &ret, const Grid_simd &in) { ret.v = binary(in.v, ret.v, TimesISIMD()); } - template = 0> inline Grid_simd timesI(const Grid_simd &in) { Grid_simd ret; timesI(ret, in); return ret; } - template = 0> inline Grid_simd timesI(const Grid_simd &in) { return in; diff --git a/lib/stencil/Lebesgue.cc b/lib/stencil/Lebesgue.cc index c34b5c96..c30c14c7 100644 --- a/lib/stencil/Lebesgue.cc +++ b/lib/stencil/Lebesgue.cc @@ -32,7 +32,7 @@ Author: paboyle namespace Grid { int LebesgueOrder::UseLebesgueOrder; -std::vector LebesgueOrder::Block({2,2,2,2}); +std::vector LebesgueOrder::Block({8,2,2,2}); LebesgueOrder::IndexInteger LebesgueOrder::alignup(IndexInteger n){ n--; // 1000 0011 --> 1000 0010 diff --git a/lib/tensors/Tensor_arith_mul.h b/lib/tensors/Tensor_arith_mul.h index 917f7373..c24853b7 100644 --- a/lib/tensors/Tensor_arith_mul.h +++ b/lib/tensors/Tensor_arith_mul.h @@ -126,6 +126,36 @@ iVector operator * (const iVector& lhs,const iScalar& r mult(&ret,&lhs,&rhs); return ret; } + +////////////////////////////////////////////////////////////////// +// Divide by scalar +////////////////////////////////////////////////////////////////// +template strong_inline +iScalar operator / (const iScalar& lhs,const iScalar& rhs) +{ + iScalar ret; + ret._internal = lhs._internal/rhs._internal; + return ret; +} +template strong_inline +iVector operator / (const iVector& lhs,const iScalar& rhs) +{ + iVector ret; + for(int i=0;i strong_inline +iMatrix operator / (const iMatrix& lhs,const iScalar& rhs) +{ + iMatrix ret; + for(int i=0;i wilson.t$omp -for vol in 4.4.4.4 4.4.4.8 4.4.8.8 4.8.8.8 8.8.8.8 8.8.8.16 8.8.16.16 8.16.16.16 -do -perf=` ./benchmarks/Grid_wilson --grid $vol --omp $omp | grep mflop | awk '{print $3}'` -echo $vol $perf >> wilson.t$omp -done -done \ No newline at end of file diff --git a/scripts/build-all b/scripts/build-all deleted file mode 100755 index b97dca19..00000000 --- a/scripts/build-all +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -e - -DIRS="clang-avx clang-avx-openmp clang-avx-openmp-mpi clang-avx-mpi clang-avx2 clang-avx2-openmp clang-avx2-openmp-mpi clang-avx2-mpi clang-sse" -EXTRADIRS="g++-avx g++-sse4 icpc-avx icpc-avx2 icpc-avx512" -BLACK="\033[30m" -RED="\033[31m" -GREEN="\033[32m" -YELLOW="\033[33m" -BLUE="\033[34m" -PINK="\033[35m" -CYAN="\033[36m" -WHITE="\033[37m" -NORMAL="\033[0;39m" - -for D in $DIRS -do - -echo -echo -e $RED ============================== -echo -e $GREEN $D -echo -e $RED ============================== -echo -e $BLUE - - cd builds/$D - make clean all -j 8 - cd ../../ -echo -e $NORMAL -done - -if [ "X$1" == "Xextra" ] -then -for D in $EXTRADIRS -do - -echo -echo -e $RED ============================== -echo -e $RED $D -echo -e $RED ============================== -echo -e $BLUE - - cd builds/$D - make clean all -j 8 - cd ../../ -echo -e $NORMAL -done -fi \ No newline at end of file diff --git a/scripts/configure-all b/scripts/configure-all deleted file mode 100755 index ad91034d..00000000 --- a/scripts/configure-all +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -DIRS="clang-avx clang-avx-openmp clang-avx-openmp-mpi clang-avx-mpi clang-avx2 clang-avx2-openmp clang-avx2-openmp-mpi clang-avx2-mpi icpc-avx icpc-avx2 icpc-avx512 g++-sse4 g++-avx clang-sse icpc-avx-openmp-mpi icpc-avx-openmp" - -for D in $DIRS -do - mkdir -p builds/$D - cd builds/$D - ../../scripts/configure-commands $D - cd ../.. -done diff --git a/scripts/configure-commands b/scripts/configure-commands deleted file mode 100755 index a3599d1f..00000000 --- a/scripts/configure-commands +++ /dev/null @@ -1,89 +0,0 @@ -#!/bin/bash -WD=$1 -BLACK="\033[30m" -RED="\033[31m" -GREEN="\033[32m" -YELLOW="\033[33m" -BLUE="\033[34m" -PINK="\033[35m" -CYAN="\033[36m" -WHITE="\033[37m" -NORMAL="\033[0;39m" -echo -echo -e $RED ============================== -echo -e $GREEN $WD -echo -e $RED ============================== -echo -e $YELLOW - -case $WD in -g++-avx) - CXX=g++ ../../configure --enable-simd=AVX CXXFLAGS="-mavx -O3 -std=c++11" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -g++-avx-openmp) - CXX=g++ ../../configure --enable-simd=AVX CXXFLAGS="-mavx -fopenmp -O3 -std=c++11" LIBS="-fopenmp -lgmp -lmpfr" --enable-comms=none - ;; -g++5-sse4) - CXX=g++-5 ../../configure --enable-simd=SSE4 CXXFLAGS="-msse4 -O3 -std=c++11" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -g++5-avx) - CXX=g++-5 ../../configure --enable-simd=AVX CXXFLAGS="-mavx -O3 -std=c++11" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -icpc-avx) - CXX=icpc ../../configure --enable-simd=AVX CXXFLAGS="-mavx -O3 -std=c++11" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -icpc-avx-openmp-mpi) -CXX=icpc ../../configure --enable-simd=AVX CXXFLAGS="-mavx -fopenmp -O3 -I/opt/local/include/openmpi-mp/ -std=c++11" LDFLAGS=-L/opt/local/lib/openmpi-mp/ LIBS="-lmpi -lmpi_cxx -fopenmp -lgmp -lmpfr" --enable-comms=mpi -;; -icpc-avx-openmp) -CXX=icpc ../../configure --enable-precision=single --enable-simd=AVX CXXFLAGS="-mavx -fopenmp -O3 -std=c++11" LIBS="-fopenmp -lgmp -lmpfr" --enable-comms=mpi -;; -icpc-avx2) - CXX=icpc ../../configure --enable-simd=AVX2 CXXFLAGS="-march=core-avx2 -O3 -std=c++11" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -icpc-avx512) - CXX=icpc ../../configure --enable-simd=AVX512 CXXFLAGS="-xCOMMON-AVX512 -O3 -std=c++11" --host=none LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -icpc-mic) - CXX=icpc ../../configure --host=none --enable-simd=IMCI CXXFLAGS="-mmic -O3 -std=c++11" LDFLAGS=-mmic LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -icpc-mic-avx512) - CXX=icpc ../../configure --host=none --enable-simd=IMCI CXXFLAGS="-xCOMMON_AVX512 -O3 -std=c++11" LDFLAGS=-xCOMMON_AVX512 LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -clang-sse) -CXX=clang++ ../../configure --enable-precision=single --enable-simd=SSE4 CXXFLAGS="-msse4 -O3 -std=c++11" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -clang-avx) -CXX=clang++ ../../configure --enable-simd=AVX CXXFLAGS="-mavx -O3 -std=c++11" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -clang-avx2) -CXX=clang++ ../../configure --enable-simd=AVX2 CXXFLAGS="-mavx2 -mfma -O3 -std=c++11" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -clang-avx-openmp) -CXX=clang-omp++ ../../configure --enable-precision=double --enable-simd=AVX CXXFLAGS="-mavx -fopenmp -O3 -std=c++11" LDFLAGS="-fopenmp" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -clang-xc30) -CXX=$HOME/Clang/install/bin/clang++ ../../configure --enable-simd=AVX CXXFLAGS="-mavx -O3 -std=c++11 -I/opt/gcc/4.9.2/snos/include/g++/x86_64-suse-linux/ -I/opt/gcc/4.9.2/snos/include/g++/ " LDFLAGS="" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -clang-xc30-openmp) -CXX=$HOME/Clang/install/bin/clang++ ../../configure --enable-simd=AVX CXXFLAGS="-mavx -fopenmp -O3 -std=c++11 -I/opt/gcc/4.9.2/snos/include/g++/x86_64-suse-linux/ -I/opt/gcc/4.9.2/snos/include/g++/ " LDFLAGS="-fopenmp" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -clang-avx2-openmp) -CXX=clang-omp++ ../../configure --enable-simd=AVX2 CXXFLAGS="-mavx2 -mfma -fopenmp -O3 -std=c++11" LDFLAGS="-fopenmp" LIBS="-lgmp -lmpfr" --enable-comms=none - ;; -clang-avx-openmp-mpi) -CXX=clang-omp++ ../../configure --enable-simd=AVX CXXFLAGS="-mavx -fopenmp -O3 -I/opt/local/include/openmpi-mp/ -std=c++11" LDFLAGS=-L/opt/local/lib/openmpi-mp/ LIBS="-lmpi -lmpi_cxx -fopenmp -lgmp -lmpfr" --enable-comms=mpi -;; -clang-avx2-openmp-mpi) -CXX=clang-omp++ ../../configure --enable-simd=AVX2 CXXFLAGS="-mavx2 -mfma -fopenmp -O3 -I/opt/local/include/openmpi-mp/ -std=c++11" LDFLAGS=-L/opt/local/lib/openmpi-mp/ LIBS="-lmpi -lmpi_cxx -fopenmp -lgmp -lmpfr" --enable-comms=mpi -;; -clang-avx-mpi) -CXX=clang++ ../../configure --enable-simd=AVX CXXFLAGS="-mavx -O3 -I/opt/local/include/openmpi-mp/ -std=c++11" LDFLAGS=-L/opt/local/lib/openmpi-mp/ LIBS="-lmpi -lmpi_cxx -lgmp -lmpfr" --enable-comms=mpi -;; -clang-avx2-mpi) -CXX=clang++ ../../configure --enable-simd=AVX2 CXXFLAGS="-mavx2 -mfma -O3 -I/opt/local/include/openmpi-mp/ -std=c++11" LDFLAGS=-L/opt/local/lib/openmpi-mp/ LIBS="-lmpi -lmpi_cxx -lgmp -lmpfr" --enable-comms=mpi -;; -clang-avx2) -CXX=clang++ ../../configure --enable-simd=AVX2 CXXFLAGS="-mavx2 -mfma -O3 -std=c++11" LDFLAGS="-L/usr/local/lib/" LIBS="-lgmp -lmpfr" --enable-comms=none -;; -esac -echo -e $NORMAL diff --git a/scripts/configure-cray b/scripts/configure-cray deleted file mode 100755 index db581493..00000000 --- a/scripts/configure-cray +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -DIRS="g++-avx-openmp g++-avx clang-xc30 clang-xc30-openmp" - -for D in $DIRS -do - mkdir -p builds/$D - cd builds/$D - ../../scripts/configure-commands $D - cd ../.. -done diff --git a/scripts/configure-mic b/scripts/configure-mic deleted file mode 100755 index 668845fe..00000000 --- a/scripts/configure-mic +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -DIRS="build-icpc-mic" - -for D in $DIRS -do - mkdir -p $D - cd $D - ../configure-commands - cd .. -done diff --git a/scripts/copyright b/scripts/copyright index 92772f16..cc9ed6e5 100755 --- a/scripts/copyright +++ b/scripts/copyright @@ -12,6 +12,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: $1 Copyright (C) 2015 +Copyright (C) 2016 EOF @@ -38,8 +39,21 @@ See the full license in the file "LICENSE" in the top level distribution directo /* END LEGAL */ EOF + cat message > tmp.fil -cat $1 >> tmp.fil + +NOTICE=`grep -n "END LEGAL" $1 | awk '{ print $1 }' ` + +if [ "X$NOTICE" != "X" ] +then + echo "found notice ending on line $NOTICE" + awk 'BEGIN { P=0 } { if ( P ) print } /END LEGAL/{P=1} ' $1 >> tmp.fil +else + cat $1 >> tmp.fil + +fi + + cp tmp.fil $1 shift diff --git a/scripts/cray-modules b/scripts/cray-modules deleted file mode 100644 index 95de2b0b..00000000 --- a/scripts/cray-modules +++ /dev/null @@ -1,2 +0,0 @@ -module swap PrgEnv-cray PrgEnv-intel -module swap intel/14.0.4.211 intel/15.0.2.164 diff --git a/scripts/filelist b/scripts/filelist index 0802c2a6..dddb605b 100755 --- a/scripts/filelist +++ b/scripts/filelist @@ -20,15 +20,20 @@ for subdir in $dirs; do TESTS=`ls T*.cc` TESTLIST=`echo ${TESTS} | sed s/.cc//g ` PREF=`[ $subdir = '.' ] && echo noinst || echo EXTRA` - echo "tests: ${TESTLIST}" > Make.inc + SUB=`[ $subdir = '.' ] && echo subtests` + echo "tests: ${TESTLIST} ${SUB}" > Make.inc echo ${PREF}_PROGRAMS = ${TESTLIST} >> Make.inc echo >> Make.inc for f in $TESTS; do - BNAME=`basename $f .cc` - echo ${BNAME}_SOURCES=$f >> Make.inc - echo ${BNAME}_LDADD=-lGrid>> Make.inc - echo >> Make.inc + BNAME=`basename $f .cc` + echo ${BNAME}_SOURCES=$f >> Make.inc + echo ${BNAME}_LDADD=-lGrid>> Make.inc + echo >> Make.inc done + if [ $subdir != '.' ]; then + echo CLEANFILES = ${TESTLIST} >> Make.inc + echo >> Make.inc + fi done # benchmarks Make.inc diff --git a/scripts/reconfigure_script b/scripts/reconfigure_script deleted file mode 100755 index d8d7212d..00000000 --- a/scripts/reconfigure_script +++ /dev/null @@ -1,4 +0,0 @@ -aclocal -I m4 -autoheader -f -automake -f --add-missing -autoconf -f diff --git a/scripts/update_fftw.sh b/scripts/update_fftw.sh deleted file mode 100755 index 20e42080..00000000 --- a/scripts/update_fftw.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -if (( $# != 1 )); then - echo "usage: `basename $0` " 1>&2 - exit 1 -fi -ARC=$1 - -INITDIR=`pwd` -rm -rf lib/fftw -mkdir lib/fftw - -ARCDIR=`tar -tf ${ARC} | head -n1 | sed -e 's@/.*@@'` -tar -xf ${ARC} -cp ${ARCDIR}/api/fftw3.h lib/fftw/ - -cd ${INITDIR} -rm -rf ${ARCDIR} diff --git a/scripts/wilson.gnu b/scripts/wilson.gnu deleted file mode 100644 index 69bca5b5..00000000 --- a/scripts/wilson.gnu +++ /dev/null @@ -1,7 +0,0 @@ -plot 'wilson.t1' u 2 w l t "AVX1-OMP=1" -replot 'wilson.t2' u 2 w l t "AVX1-OMP=2" -replot 'wilson.t4' u 2 w l t "AVX1-OMP=4" -set terminal 'pdf' -set output 'wilson_clang.pdf' -replot -quit diff --git a/tests/Makefile.am b/tests/Makefile.am index 2b8cc2d7..2e7c1f0a 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -4,4 +4,9 @@ if BUILD_CHROMA_REGRESSION SUBDIRS+= qdpxx endif +.PHONY: subtests + include Make.inc + +subtests: + for d in $(SUBDIRS); do $(MAKE) -C $${d} tests; done diff --git a/tests/core/Test_fft.cc b/tests/core/Test_fft.cc index 2fdaeed2..03c8bc52 100644 --- a/tests/core/Test_fft.cc +++ b/tests/core/Test_fft.cc @@ -1,6 +1,6 @@ /************************************************************************************* - Grid physics library, www.github.com/paboyle/Grid + grid` physics library, www.github.com/paboyle/Grid Source file: ./tests/Test_cshift.cc @@ -46,57 +46,79 @@ int main (int argc, char ** argv) for(int d=0;d p({1,2,3,2}); + std::vector p({1,3,2,3}); one = ComplexD(1.0,0.0); zz = ComplexD(0.0,0.0); ComplexD ci(0.0,1.0); + + std::cout<<"*************************************************"< seeds({1,2,3,4}); + GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds); // naughty seeding + GridParallelRNG pRNG(&GRID); + pRNG.SeedFixedIntegers(seeds); + + LatticeGaugeFieldD Umu(&GRID); + + SU3::ColdConfiguration(pRNG,Umu); // Unit gauge + // Umu=zero; + //////////////////////////////////////////////////// + // Wilson test + //////////////////////////////////////////////////// + { + LatticeFermionD src(&GRID); gaussian(pRNG,src); + LatticeFermionD tmp(&GRID); + LatticeFermionD ref(&GRID); + + RealD mass=0.01; + WilsonFermionD Dw(Umu,GRID,RBGRID,mass); + + Dw.M(src,tmp); + + std::cout << "Dw src = " < 1/2 gmu (eip - emip) = i sinp gmu + Kinetic = Kinetic + sin(kmu)*ci*(Gamma(Gmu[mu])*src5_p); + + } + + // NB implicit sum over mu + // + // 1-1/2 Dw = 1 - 1/2 ( eip+emip) + // = - 1/2 (ei - 2 + emi) + // = - 1/4 2 (eih - eimh)(eih - eimh) + // = 2 sink/2 ink/2 = sk2 + + W = one - M5 + sk2; + Kinetic = Kinetic + W * src5_p; + + LatticeCoordinate(scoor,sdir); + + tmp5 = Cshift(src5_p,sdir,+1); + tmp5 = (tmp5 - G5*tmp5)*0.5; + tmp5 = where(scoor==Integer(Ls-1),mass*tmp5,-tmp5); + Kinetic = Kinetic + tmp5; + + tmp5 = Cshift(src5_p,sdir,-1); + tmp5 = (tmp5 + G5*tmp5)*0.5; + tmp5 = where(scoor==Integer(0),mass*tmp5,-tmp5); + Kinetic = Kinetic + tmp5; + + std::cout<<"Momentum space Ddwf "<< norm2(Kinetic)< point(4,0); + src=zero; + SpinColourVectorD ferm; gaussian(sRNG,ferm); + pokeSite(ferm,src,point); + + const int Ls=32; + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,&GRID); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,&GRID); + + RealD mass=0.01; + RealD M5 =0.8; + DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,GRID,RBGRID,mass,M5); + + // Momentum space prop + std::cout << " Solving by FFT and Feynman rules" < HermOp(Ddwf); + ConjugateGradient CG(1.0e-16,10000); + CG(HermOp,src5,result5); + + //////////////////////////////////////////////////////////////////////// + // Domain wall physical field propagator + //////////////////////////////////////////////////////////////////////// + /* + psi = chiralProjectMinus(psi_5[0]); + psi += chiralProjectPlus(psi_5[Ls-1]); + */ + ExtractSlice(tmp,result5,0 ,sdir); result4 = (tmp-G5*tmp)*0.5; + ExtractSlice(tmp,result5,Ls-1,sdir); result4 = result4+(tmp+G5*tmp)*0.5; + + std::cout << " Taking difference" < point(4,0); + src=zero; + SpinColourVectorD ferm; gaussian(sRNG,ferm); + pokeSite(ferm,src,point); + + const int Ls=48; + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,&GRID); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,&GRID); + + RealD mass=0.01; + RealD M5 =0.8; + + OverlapWilsonCayleyTanhFermionD Dov(Umu,*FGrid,*FrbGrid,GRID,RBGRID,mass,M5,1.0); + + // Momentum space prop + std::cout << " Solving by FFT and Feynman rules" < HermOp(Dov); + ConjugateGradient CG(1.0e-16,10000); + CG(HermOp,src5,result5); + + //////////////////////////////////////////////////////////////////////// + // Domain wall physical field propagator + //////////////////////////////////////////////////////////////////////// + /* + psi = chiralProjectMinus(psi_5[0]); + psi += chiralProjectPlus(psi_5[Ls-1]); + */ + ExtractSlice(tmp,result5,0 ,sdir); result4 = (tmp-G5*tmp)*0.5; + ExtractSlice(tmp,result5,Ls-1,sdir); result4 = result4+(tmp+G5*tmp)*0.5; + + std::cout << " Taking difference" < QEDGimplTypesD; + typedef Photon QEDGaction; + + QEDGaction Maxwell(QEDGaction::FEYNMAN_L); + QEDGaction::GaugeField Prop(&GRID);Prop=zero; + QEDGaction::GaugeField Source(&GRID);Source=zero; + + Maxwell.FreePropagator (Source,Prop); + std::cout << " MaxwellFree propagator\n"; + */ + } Grid_finalize(); } diff --git a/tests/core/Test_fft_gfix.cc b/tests/core/Test_fft_gfix.cc new file mode 100644 index 00000000..c6b77a13 --- /dev/null +++ b/tests/core/Test_fft_gfix.cc @@ -0,0 +1,300 @@ + /************************************************************************************* + + grid` physics library, www.github.com/paboyle/Grid + + Source file: ./tests/Test_cshift.cc + + Copyright (C) 2015 + +Author: Azusa Yamaguchi +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +using namespace Grid; +using namespace Grid::QCD; + +template +class FourierAcceleratedGaugeFixer : public Gimpl { + public: + INHERIT_GIMPL_TYPES(Gimpl); + + typedef typename Gimpl::GaugeLinkField GaugeMat; + typedef typename Gimpl::GaugeField GaugeLorentz; + + static void GaugeLinkToLieAlgebraField(const std::vector &U,std::vector &A) { + for(int mu=0;mu &A,GaugeMat &dmuAmu) { + dmuAmu=zero; + for(int mu=0;mu::avgPlaquette(Umu); + Real org_link_trace=WilsonLoops::linkTrace(Umu); + Real old_trace = org_link_trace; + Real trG; + + std::vector U(Nd,grid); + GaugeMat dmuAmu(grid); + + for(int i=0;i(Umu,mu); + //trG = SteepestDescentStep(U,alpha,dmuAmu); + trG = FourierAccelSteepestDescentStep(U,alpha,dmuAmu); + for(int mu=0;mu(Umu,U[mu],mu); + // Monitor progress and convergence test + // infrequently to minimise cost overhead + if ( i %20 == 0 ) { + Real plaq =WilsonLoops::avgPlaquette(Umu); + Real link_trace=WilsonLoops::linkTrace(Umu); + + std::cout << GridLogMessage << " Iteration "< &U,Real & alpha, GaugeMat & dmuAmu) { + GridBase *grid = U[0]._grid; + + std::vector A(Nd,grid); + GaugeMat g(grid); + + GaugeLinkToLieAlgebraField(U,A); + ExpiAlphaDmuAmu(A,g,alpha,dmuAmu); + + + Real vol = grid->gSites(); + Real trG = TensorRemove(sum(trace(g))).real()/vol/Nc; + + SU::GaugeTransform(U,g); + + return trG; + } + + static Real FourierAccelSteepestDescentStep(std::vector &U,Real & alpha, GaugeMat & dmuAmu) { + + GridBase *grid = U[0]._grid; + + Real vol = grid->gSites(); + + FFT theFFT((GridCartesian *)grid); + + LatticeComplex Fp(grid); + LatticeComplex psq(grid); psq=zero; + LatticeComplex pmu(grid); + LatticeComplex one(grid); one = Complex(1.0,0.0); + + GaugeMat g(grid); + GaugeMat dmuAmu_p(grid); + std::vector A(Nd,grid); + + GaugeLinkToLieAlgebraField(U,A); + + DmuAmu(A,dmuAmu); + + theFFT.FFT_all_dim(dmuAmu_p,dmuAmu,FFT::forward); + + ////////////////////////////////// + // Work out Fp = psq_max/ psq... + ////////////////////////////////// + std::vector latt_size = grid->GlobalDimensions(); + std::vector coor(grid->_ndimension,0); + for(int mu=0;mu::taExp(ciadmam,g); + + Real trG = TensorRemove(sum(trace(g))).real()/vol/Nc; + + SU::GaugeTransform(U,g); + + return trG; + } + + static void ExpiAlphaDmuAmu(const std::vector &A,GaugeMat &g,Real & alpha, GaugeMat &dmuAmu) { + GridBase *grid = g._grid; + Complex cialpha(0.0,-alpha); + GaugeMat ciadmam(grid); + DmuAmu(A,dmuAmu); + ciadmam = dmuAmu*cialpha; + SU::taExp(ciadmam,g); + } +/* + //////////////////////////////////////////////////////////////// + // NB The FT for fields living on links has an extra phase in it + // Could add these to the FFT class as a later task since this code + // might be reused elsewhere ???? + //////////////////////////////////////////////////////////////// + static void InverseFourierTransformAmu(FFT &theFFT,const std::vector &Ap,std::vector &Ax) { + GridBase * grid = theFFT.Grid(); + std::vector latt_size = grid->GlobalDimensions(); + + ComplexField pmu(grid); + ComplexField pha(grid); + GaugeMat Apha(grid); + + Complex ci(0.0,1.0); + + for(int mu=0;mu &Ax,std::vector &Ap) { + GridBase * grid = theFFT.Grid(); + std::vector latt_size = grid->GlobalDimensions(); + + ComplexField pmu(grid); + ComplexField pha(grid); + Complex ci(0.0,1.0); + + // Sign convention for FFTW calls: + // A(x)= Sum_p e^ipx A(p) / V + // A(p)= Sum_p e^-ipx A(x) + + for(int mu=0;mu seeds({1,2,3,4}); + + Grid_init(&argc,&argv); + + int threads = GridThread::GetThreads(); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout( { vComplex::Nsimd(),1,1,1}); + std::vector mpi_layout = GridDefaultMpi(); + + int vol = 1; + for(int d=0;d::avgPlaquette(Umu); + std::cout << " Initial plaquette "<::SteepestDescentGaugeFix(Umu,alpha,10000,1.0e-10, 1.0e-10); + + + plaq=WilsonLoops::avgPlaquette(Umu); + std::cout << " Final plaquette "< +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + Grid_init(&argc,&argv); + + int threads = GridThread::GetThreads(); + std::cout< latt_size ({N,N}); + std::vector simd_layout({vComplexD::Nsimd(),1}); + std::vector mpi_layout ({1,1}); + + int vol = 1; + int nd = latt_size.size(); + for(int d=0;dInteger(N2+W),zz,Charge); + } + + // std::cout << Charge < k(4,&GRID); + LatticeComplexD ksq(&GRID); + + ksq=zero; + for(int mu=0;mu (L/2), k[mu]-L, k[mu]); + + // std::cout << k[mu]< zero_mode(nd,0); + TComplexD Tone = ComplexD(1.0,0.0); + pokeSite(Tone,ksq,zero_mode); + + // std::cout << "Charge\n" << Charge <(mom,mommu,mu); // fourth order exponential approx - parallel_for(auto i=mom.begin();i Nf2(FermOp, CG, CG); // Set smearing (true/false), default: false - Nf2.is_smeared = true; + Nf2.is_smeared = false; // Collect actions ActionLevel Level1(1);