1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-14 22:07:05 +01:00

Compare commits

..

129 Commits

Author SHA1 Message Date
dc5b909f99 Merge branch 'develop' of https://github.com/paboyle/Grid into feature/Lanczos 2018-06-12 19:11:20 -04:00
855b249f57 Merge branch 'develop' of https://github.com/paboyle/Grid into merge2 2018-06-12 19:08:06 -04:00
95d4b46446 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-06-08 11:30:29 +01:00
5dfd216a34 Better thread safety 2018-06-04 21:08:44 +01:00
c2e8d0aa88 Solve g++ problem on the lanczos test 2018-06-04 18:34:15 +01:00
bf96a4bdbf Merge branch 'master' into develop 2018-06-04 14:03:11 +01:00
84685c9bc3 Overflow fix 2018-06-04 13:42:07 +01:00
013ea4e8d1 Merge branch 'feature/staggered-comms-compute' into develop 2018-05-21 13:11:56 +01:00
7fbbb31a50 Merge branch 'develop' into feature/staggered-comms-compute
Conflicts:
	lib/qcd/action/fermion/ImprovedStaggeredFermion.cc
2018-05-21 13:07:29 +01:00
0e127b1fc7 New file single prec test 2018-05-21 12:57:13 +01:00
68c028b0a6 Comment 2018-05-21 12:54:25 +01:00
7836cc2d74 No checksum output on log for scidac 2018-05-15 10:10:08 +01:00
a61e0df54b Travis fix for Lime 2018-05-14 19:56:12 +01:00
f871fb0c6d check file is opened correctly in the Lime reader 2018-05-11 18:06:28 +01:00
25d1cadd3b Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-05-07 18:55:09 +01:00
c24d53bbd1 Further debug of RNG I/O 2018-05-07 18:55:05 +01:00
3c7a4106ed Trap for deadly empty comm thread option 2018-05-07 17:26:39 +01:00
6eed167f0c Merge branch 'release/0.8.1' 2018-05-04 17:34:11 +01:00
4ad0df6fde Bump volume for Gerardo 2018-05-04 17:33:23 +01:00
68a5079f33 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-05-04 14:13:54 +01:00
8634e19f1b Update 2018-05-04 14:13:35 +01:00
9ada378e38 Add timing 2018-05-04 10:58:01 +01:00
bfbf2f1fa0 no threaded stencil benchmark if OpenMP is not supported 2018-05-03 16:20:01 +01:00
587bfcc0f4 Add Timing 2018-05-03 12:10:31 +01:00
8c658de179 Compressor speed up (a little); streaming stores 2018-05-02 17:52:16 +01:00
ba37d51ee9 Debugging the RNG IO 2018-05-02 15:32:06 +01:00
4f4181c54a Merge branch 'feature/staggered-comms-compute' of https://github.com/paboyle/Grid into feature/staggered-comms-compute 2018-05-02 14:59:13 +01:00
4d4ac2517b Adding Scalar field theory example for Scidac format 2018-05-02 14:36:32 +01:00
e568c24d1d Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-05-02 14:29:25 +01:00
b458326744 Checkpointer module update 2018-05-02 14:29:22 +01:00
6e7d5e2243 HMC: added Scidac checkpointer and support for metadata 2018-05-02 14:28:59 +01:00
b35169f1dd MultiShift for Staggered 2018-05-02 14:22:37 +01:00
441ad7498d add Iterative counter 2018-05-02 14:21:30 +01:00
6f6c5c549a Split off gparity 2018-05-02 14:11:23 +01:00
1584e17b54 Revert to fast versoin 2018-05-02 14:10:55 +01:00
12982a4455 Hypercube optimisation 2018-05-02 14:10:21 +01:00
172f412102 shmget reintroduce 2018-05-02 14:07:41 +01:00
a64497265d TIming 2018-05-02 14:07:28 +01:00
c45f24a1b5 Improvements for tesseract 2018-04-30 21:50:00 +01:00
aaf37ee4d7 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2018-04-27 11:45:13 +01:00
1dddd17e3c Benchmark improvements from tesseract 2018-04-27 11:44:46 +01:00
661f1d3e8e Merge branch 'release/0.8.0' into develop 2018-04-27 11:22:33 +01:00
edcf9b9293 Merge branch 'release/0.8.0' 2018-04-27 11:13:19 +01:00
96272f3841 Merge staggered fix linear operator and reduction 2018-04-26 10:33:19 +01:00
5c936d88a0 Merge branch 'feature/staggered-comms-compute' of https://github.com/paboyle/Grid into feature/staggered-comms-compute 2018-04-26 10:18:37 +01:00
1c64ee926e Faster staggered operator with m^2 term trivial used 2018-04-26 10:17:49 +01:00
2cbb72a81c Provide info if EE term is trivial (m^2 factor)
Better timing in staggered 4d case
2018-04-26 10:10:07 +01:00
31d83ee046 Enable special treatment of constEE cases 2018-04-26 10:08:46 +01:00
a9e8758a01 Improvements to staggered tests timings 2018-04-26 10:08:05 +01:00
3e125c5b61 Faster linalg on CG optimised against staggered
Sum overhead is bigger for staggered
2018-04-26 10:07:19 +01:00
eac6ec4b5e Faster reductions, important on single node staggered 2018-04-26 10:03:57 +01:00
213f8db6a2 Microsecond resultion 2018-04-26 10:01:39 +01:00
8bb11e5039 Making tests compile 2018-03-08 23:02:19 -05:00
3686827df5 Reverted ImplicitlyRestartedLanczos.h 2018-03-08 19:22:25 -05:00
60589a93a3 Merge branch 'develop' of https://github.com/paboyle/Grid into merge 2018-03-08 18:02:43 -05:00
bc1f5be265 Merge branch 'dev-IRBL-ypj' of https://github.com/yongchull/Grid into merge 2018-03-08 18:02:06 -05:00
80302e95a8 MILC Interface 2018-03-08 15:34:03 +00:00
1c430ec71c Moved #include 2018-03-07 16:22:54 -05:00
0b63e2e9cd Merge branch 'develop' of https://github.com/paboyle/Grid into merge 2018-03-07 15:24:11 -05:00
b938202081 Overlapped Comm for Wilson DhopInternal 2018-03-07 14:08:43 +00:00
386b4fcb04 update convergence check 2018-03-05 18:37:51 -05:00
11219a8f7a add explicit restart method rbl 2018-03-05 18:12:42 -05:00
0f468e2179 OverlappedComm for Staggered 5D and 4D. 2018-02-22 12:50:09 +00:00
3caf0e8b09 print Ritz values before and after the shift; rollback to simple last lanczos vector rebuild; print formatting is changed 2018-02-02 18:43:37 -05:00
bc5ba39278 relocate the printing block of converged eigenvalues 2018-01-31 12:40:33 -05:00
fbe1209f7e count converged eigenvalues not assuming candidates are sorted 2018-01-31 12:10:24 -05:00
ebb1bebf24 Merge pull request #145 from yongchull/fix-skylake-avx512
patch to compile with AVX512 for SkyLake Xeon processor using GCC7.2.…
2018-01-28 11:01:43 +00:00
53a9260a94 patch to compile with AVX512 for SkyLake Xeon processor using GCC7.2.0. Beside bug fixes in the source code, a option 'SKL' is added to configure.ac for SkyLake processor specific AVX512 instruction flags when using GCC. Code can be compiled with --enable-simd=SKL using GCC 7.2.0, but Test_simd fails. AVX512 support for complex double type with non-intel compilers makes this error. 2018-01-27 10:00:38 -05:00
dc6f637e70 change GparityDomainWallFermion to ZMobius and add command line options to read boundary phase and omega 2018-01-27 08:21:27 -05:00
97b9c6f03d No option for interior/exterior split of asm kernels since different directions get interleaved 2018-01-22 11:04:19 +00:00
63982819c6 No option to overlap comms and compute for asm implementation since different directions are interleaved
in the kernels, introducing if else structure would be too painful
2018-01-22 11:03:39 +00:00
24162c9ead Staggered overlap comms comput 2018-01-09 13:02:52 +00:00
44b218a595 force hermicity to the block alpha and force diagonal of the block beta to be real 2017-12-29 23:26:17 -05:00
bfc0306a43 job parameters can be provided by cmdarg. 2017-12-29 00:58:24 -05:00
4c0ae75ac5 read configuration file 2017-12-27 16:34:18 -05:00
3cb8cb7282 'typename' is added to compile with AVX512 using GCC7.2.0; a semicolon was missing in Grid_avx512.h and the bug is fixed. Option SKL is added to configure script for skylake processor specific AVX512 operations. Code can be compiled with --enable-simd=SKL using GCC 7.2.0, but Test_simd fails. AVX512 support for complex double type with non-intel compilers makes this error; it needs a review. 2017-12-23 14:54:07 -05:00
89c4e9b168 first complete version of IRBL; requires practical test and clean up 2017-12-21 23:13:39 -05:00
fe406e230d block with a single vector case is working with IRBL 2017-12-18 11:26:42 -05:00
3dbc8586fa Merge branch 'develop' of https://github.com/paboyle/Grid into feature/Lanczos 2017-12-12 21:23:01 -05:00
7305910c95 Fixing an initialization found by Yong-Chull 2017-12-12 18:46:11 -05:00
5139eaf491 block Lanczos construction is added. 2017-12-03 23:55:22 -05:00
2c35c89b92 fix vector assign bug 2017-11-27 13:39:52 -05:00
91cc33e907 IRBL development test bed is added; bootstrap.sh is updated to avoid repeated download of the EIGEN package after the first build. 2017-11-05 23:46:03 -05:00
5d44346be3 Merge branch 'develop' of https://github.com/paboyle/Grid into feature/Lanczos 2017-10-30 15:49:17 -04:00
3a754fcd51 Merge branch 'develop' of https://github.com/paboyle/Grid into feature/Lanczos 2017-10-27 17:34:35 -04:00
137886c316 Addressed when coor.size() != dim.size() in Lexicographic 2017-10-19 10:28:57 -04:00
ef61b549e6 Merge branch 'feature/Lanczos' into ckelly_develop4 2017-10-10 13:41:43 -04:00
3006663b9c Schur solver for staggered type (hermition Mpc) opertors 2017-08-31 21:32:01 -04:00
0145685f96 Added Staggered Type Preconditioned operator 2017-08-18 01:44:31 -04:00
e73e4b4002 Minor changes fixes 2017-08-11 01:35:25 -04:00
caa6605b43 Still tweaking memory saving routines in Lanczos 2017-08-07 00:01:04 -04:00
522c9248ae Merge branch 'develop' of https://github.com/paboyle/Grid into feature/Lanczos 2017-08-06 23:58:21 -04:00
191fbf85fc Added ImplicitlyRestartedLanczosCJ to Algorithms.h 2017-07-28 15:33:59 -04:00
93650f3a61 Adding back (temporarily) dense matrix routines until Lanczos is fininalized 2017-07-24 21:49:25 -04:00
cab4b4d063 Deleting old include file references 2017-07-24 20:51:31 -04:00
cf4b30b2dd re-adding ImplcitlyRestartedLanczos 2017-07-24 20:40:25 -04:00
c51d0b4078 Merge branch 'develop' of https://github.com/paboyle/Grid into feature/Lanczos 2017-07-24 20:35:29 -04:00
2f4cbeb4d5 Minor changes 2017-06-12 18:25:18 -04:00
fb7c4fb815 Recovering lapack interface without array allocation 2017-06-07 00:00:59 -04:00
00bb71e5af Checking in before reworking lapack interface 2017-06-06 16:26:41 -04:00
cfed2c1ea0 Broken Lanczos. Going back to an older verion temporarily. 2017-06-06 12:14:45 -04:00
b1b15f0b70 Further fixes from multidimensional array 2017-06-05 23:13:41 -04:00
927c7ae3ed changed allocation for LAPACK temporaries, to avoid crashing with some compilers (reported by Christoph) 2017-05-25 21:43:53 -04:00
05d04ceff8 Adding SimpleLanczos 2017-05-25 12:30:47 -04:00
8313367a50 Merge branch 'develop' of https://github.com/paboyle/Grid into feature/Lanczos 2017-05-25 12:30:06 -04:00
5c479ce663 Merge branch 'develop' of https://github.com/paboyle/Grid into feature/Lanczos 2017-05-24 18:58:53 -04:00
4bf9d65bf8 Checking in memory saving version of Lanczos 2017-05-24 18:57:32 -04:00
3a056c4dff Re-adding Bisection for SimpleLanczos 2017-05-22 18:23:03 -04:00
b0ba651654 Turning off the final sort for now 2017-05-19 10:49:09 -04:00
25d4c175c3 Cleaning up Lanczos 2017-05-18 18:33:47 -04:00
a8d7986e1c Temporary (hopefully) change to run with GCC for now. 2017-05-05 10:55:07 -04:00
92ec509bfa Commiting to move to Jlab 2017-05-04 19:32:00 -04:00
e80a87ff7f Checking in before modifying 2017-05-04 16:05:07 -04:00
867fe93018 First Rotate reorg done. 2017-05-02 01:26:22 -04:00
09651c3326 Checking in before rearranging Lanczos 2017-05-02 00:47:18 -04:00
f87f2a3f8b Merge branch 'develop' of https://github.com/paboyle/Grid into feature/Lanczos 2017-05-01 12:00:47 -04:00
a07556dd5f Added back the convergence test from evecs of tridiagonal matrix. Bugfixes 2017-04-15 09:32:15 -04:00
f80a847aef Merge branch 'develop' into bugfix/dminus 2017-04-06 23:49:10 -04:00
93cb5d4e97 Working version of Lanczos without the extra copy. 2017-04-06 23:35:30 -04:00
9e48b7dfda MEM_SAVE in Lanczos seems to be working, but not pretty 2017-04-06 22:21:56 -04:00
d0c2c9c71f Merge branch 'develop' of https://github.com/paboyle/Grid into bugfix/dminus 2017-04-04 15:20:17 -04:00
c8cafa77ca Checking in the latest Lacnzos 2017-04-04 15:18:12 -04:00
a3bcad3804 Added preconditioned SYM2 solver (SchurRedBlackDiagTwoSolve) 2017-03-30 20:33:27 -04:00
5a5b66292b Merge branch 'develop' of https://github.com/paboyle/Grid into bugfix/dminus 2017-03-30 10:44:02 -04:00
e63be32ad2 zmobius Meooe5D fixed? 2017-03-28 03:48:50 -04:00
6aa106d906 Fixing zmobius coeffs again 2017-03-27 21:57:07 -04:00
33d59c8869 Adding Zmobius prec test 2017-03-27 21:40:27 -04:00
a833fd8dbf Merge branch 'develop' of https://github.com/paboyle/Grid into bugfix/dminus 2017-03-27 21:37:26 -04:00
e9712bc7fb Zmobius test was wrong! (only mobius) checking in again 2017-03-16 23:04:28 -04:00
86 changed files with 10775 additions and 1239 deletions

6
.gitignore vendored
View File

@ -88,12 +88,18 @@ Thumbs.db
###################
build*/*
# bootstrap #
#############
*.tar.bz2*
# IDE related files #
#####################
*.xcodeproj/*
build.sh
.vscode
*.code-workspace
.ctags
tags
# Eigen source #
################

View File

@ -19,6 +19,8 @@ before_install:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install libmpc; fi
install:
- export CWD=`pwd`
- echo $CWD
- export CC=$CC$VERSION
- export CXX=$CXX$VERSION
- echo $PATH
@ -36,11 +38,22 @@ script:
- ./bootstrap.sh
- mkdir build
- cd build
- ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=none
- mkdir lime
- cd lime
- mkdir build
- cd build
- wget http://usqcd-software.github.io/downloads/c-lime/lime-1.3.2.tar.gz
- tar xf lime-1.3.2.tar.gz
- cd lime-1.3.2
- ./configure --prefix=$CWD/build/lime/install
- make -j4
- make install
- cd $CWD/build
- ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=none --with-lime=$CWD/build/lime/install
- make -j4
- ./benchmarks/Benchmark_dwf --threads 1 --debug-signals
- echo make clean
- ../configure --enable-precision=double --enable-simd=SSE4 --enable-comms=none
- ../configure --enable-precision=double --enable-simd=SSE4 --enable-comms=none --with-lime=$CWD/build/lime/install
- make -j4
- ./benchmarks/Benchmark_dwf --threads 1 --debug-signals
- make check

View File

@ -158,8 +158,10 @@ public:
dbytes=0;
ncomm=0;
parallel_for(int dir=0;dir<8;dir++){
#ifdef GRID_OMP
#pragma omp parallel for num_threads(Grid::CartesianCommunicator::nCommThreads)
#endif
for(int dir=0;dir<8;dir++){
double tbytes;
int mu =dir % 4;
@ -175,9 +177,14 @@ public:
int comm_proc = mpi_layout[mu]-1;
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
}
#ifdef GRID_OMP
int tid = omp_get_thread_num();
#else
int tid = dir;
#endif
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
(void *)&rbuf[dir][0], recv_from_rank,
bytes,dir);
bytes,tid);
#ifdef GRID_OMP
#pragma omp atomic

View File

@ -169,7 +169,11 @@ int main (int argc, char ** argv)
for(int lat=4;lat<=maxlat;lat+=4){
for(int Ls=8;Ls<=8;Ls*=2){
std::vector<int> latt_size ({lat,lat,lat,lat});
std::vector<int> latt_size ({lat*mpi_layout[0],
lat*mpi_layout[1],
lat*mpi_layout[2],
lat*mpi_layout[3]});
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
RealD Nrank = Grid._Nprocessors;
@ -446,7 +450,7 @@ int main (int argc, char ** argv)
}
#ifdef GRID_OMP
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
std::cout<<GridLogMessage << "= Benchmarking threaded STENCIL halo exchange in "<<nmu<<" dimensions"<<std::endl;
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
@ -485,7 +489,8 @@ int main (int argc, char ** argv)
dbytes=0;
ncomm=0;
parallel_for(int dir=0;dir<8;dir++){
#pragma omp parallel for num_threads(Grid::CartesianCommunicator::nCommThreads)
for(int dir=0;dir<8;dir++){
double tbytes;
int mu =dir % 4;
@ -502,9 +507,9 @@ int main (int argc, char ** argv)
int comm_proc = mpi_layout[mu]-1;
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
}
int tid = omp_get_thread_num();
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
(void *)&rbuf[dir][0], recv_from_rank, bytes,dir);
(void *)&rbuf[dir][0], recv_from_rank, bytes,tid);
#pragma omp atomic
dbytes+=tbytes;
@ -532,7 +537,7 @@ int main (int argc, char ** argv)
}
}
#endif
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
std::cout<<GridLogMessage << "= All done; Bye Bye"<<std::endl;
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;

View File

@ -1,9 +1,16 @@
#!/usr/bin/env bash
EIGEN_URL='http://bitbucket.org/eigen/eigen/get/3.3.3.tar.bz2'
EIGEN_SRC='3.3.3.tar.bz2'
EIGEN_URL="http://bitbucket.org/eigen/eigen/get/${EIGEN_SRC}"
echo "-- deploying Eigen source..."
wget ${EIGEN_URL} --no-check-certificate && ./scripts/update_eigen.sh `basename ${EIGEN_URL}` && rm `basename ${EIGEN_URL}`
if [ -f ${EIGEN_SRC} ]; then
echo "-- skip deploying Eigen source..."
else
echo "-- deploying Eigen source..."
wget ${EIGEN_URL} --no-check-certificate
./scripts/update_eigen.sh `basename ${EIGEN_URL}`
#rm `basename ${EIGEN_URL}`
fi
echo '-- generating Make.inc files...'
./scripts/filelist

View File

@ -340,7 +340,7 @@ case ${ac_PRECISION} in
esac
###################### Shared memory allocation technique under MPI3
AC_ARG_ENABLE([shm],[AC_HELP_STRING([--enable-shm=shmopen|hugetlbfs|shmnone],
AC_ARG_ENABLE([shm],[AC_HELP_STRING([--enable-shm=shmopen|shmget|hugetlbfs|shmnone],
[Select SHM allocation technique])],[ac_SHM=${enable_shm}],[ac_SHM=shmopen])
case ${ac_SHM} in
@ -349,6 +349,10 @@ case ${ac_SHM} in
AC_DEFINE([GRID_MPI3_SHMOPEN],[1],[GRID_MPI3_SHMOPEN] )
;;
shmget)
AC_DEFINE([GRID_MPI3_SHMGET],[1],[GRID_MPI3_SHMGET] )
;;
shmnone)
AC_DEFINE([GRID_MPI3_SHM_NONE],[1],[GRID_MPI3_SHM_NONE] )
;;
@ -366,7 +370,7 @@ esac
AC_ARG_ENABLE([shmpath],[AC_HELP_STRING([--enable-shmpath=path],
[Select SHM mmap base path for hugetlbfs])],
[ac_SHMPATH=${enable_shmpath}],
[ac_SHMPATH=/var/lib/hugetlbfs/pagesize-2MB/])
[ac_SHMPATH=/var/lib/hugetlbfs/global/pagesize-2MB/])
AC_DEFINE_UNQUOTED([GRID_SHM_PATH],["$ac_SHMPATH"],[Path to a hugetlbfs filesystem for MMAPing])
############### communication type selection

View File

@ -49,6 +49,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
#include <Grid/algorithms/iterative/ConjugateGradientReliableUpdate.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczosCJ.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h>
#include <Grid/algorithms/CoarsenedMatrix.h>
#include <Grid/algorithms/FFT.h>

View File

@ -51,7 +51,7 @@ namespace Grid {
virtual void Op (const Field &in, Field &out) = 0; // Abstract base
virtual void AdjOp (const Field &in, Field &out) = 0; // Abstract base
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2)=0;
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2) = 0;
virtual void HermOp(const Field &in, Field &out)=0;
};
@ -309,36 +309,59 @@ namespace Grid {
class SchurStaggeredOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
Field tmp;
RealD mass;
double tMpc;
double tIP;
double tMeo;
double taxpby_norm;
uint64_t ncall;
public:
SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){};
void Report(void)
{
std::cout << GridLogMessage << " HermOpAndNorm.Mpc "<< tMpc/ncall<<" usec "<<std::endl;
std::cout << GridLogMessage << " HermOpAndNorm.IP "<< tIP /ncall<<" usec "<<std::endl;
std::cout << GridLogMessage << " Mpc.MeoMoe "<< tMeo/ncall<<" usec "<<std::endl;
std::cout << GridLogMessage << " Mpc.axpby_norm "<< taxpby_norm/ncall<<" usec "<<std::endl;
}
SchurStaggeredOperator (Matrix &Mat): _Mat(Mat), tmp(_Mat.RedBlackGrid())
{
assert( _Mat.isTrivialEE() );
mass = _Mat.Mass();
tMpc=0;
tIP =0;
tMeo=0;
taxpby_norm=0;
ncall=0;
}
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
GridLogIterative.TimingMode(1);
std::cout << GridLogIterative << " HermOpAndNorm "<<std::endl;
ncall++;
tMpc-=usecond();
n2 = Mpc(in,out);
std::cout << GridLogIterative << " HermOpAndNorm.Mpc "<<std::endl;
tMpc+=usecond();
tIP-=usecond();
ComplexD dot= innerProduct(in,out);
std::cout << GridLogIterative << " HermOpAndNorm.innerProduct "<<std::endl;
tIP+=usecond();
n1 = real(dot);
}
virtual void HermOp(const Field &in, Field &out){
std::cout << GridLogIterative << " HermOp "<<std::endl;
Mpc(in,out);
ncall++;
tMpc-=usecond();
_Mat.Meooe(in,out);
_Mat.Meooe(out,tmp);
tMpc+=usecond();
taxpby_norm-=usecond();
axpby(out,-1.0,mass*mass,tmp,in);
taxpby_norm+=usecond();
}
virtual RealD Mpc (const Field &in, Field &out) {
Field tmp(in._grid);
Field tmp2(in._grid);
std::cout << GridLogIterative << " HermOp.Mpc "<<std::endl;
_Mat.Mooee(in,out);
_Mat.Mooee(out,tmp);
std::cout << GridLogIterative << " HermOp.MooeeMooee "<<std::endl;
tMeo-=usecond();
_Mat.Meooe(in,out);
_Mat.Meooe(out,tmp2);
std::cout << GridLogIterative << " HermOp.MeooeMeooe "<<std::endl;
RealD nn=axpy_norm(out,-1.0,tmp2,tmp);
std::cout << GridLogIterative << " HermOp.axpy_norm "<<std::endl;
_Mat.Meooe(out,tmp);
tMeo+=usecond();
taxpby_norm-=usecond();
RealD nn=axpby_norm(out,-1.0,mass*mass,tmp,in);
taxpby_norm+=usecond();
return nn;
}
virtual RealD MpcDag (const Field &in, Field &out){
@ -350,6 +373,75 @@ namespace Grid {
};
template<class Matrix,class Field> using SchurStagOperator = SchurStaggeredOperator<Matrix,Field>;
#if 0
// This is specific to (Z)mobius fermions
template<class Matrix, class Field>
class KappaSimilarityTransform {
public:
// INHERIT_IMPL_TYPES(Matrix);
typedef typename Matrix::Coeff_t Coeff_t;
std::vector<Coeff_t> kappa, kappaDag, kappaInv, kappaInvDag;
KappaSimilarityTransform (Matrix &zmob) {
for (int i=0;i<(int)zmob.bs.size();i++) {
Coeff_t k = 1.0 / ( 2.0 * (zmob.bs[i] *(4 - zmob.M5) + 1.0) );
kappa.push_back( k );
kappaDag.push_back( conj(k) );
kappaInv.push_back( 1.0 / k );
kappaInvDag.push_back( 1.0 / conj(k) );
}
}
template<typename vobj>
void sscale(const Lattice<vobj>& in, Lattice<vobj>& out, Coeff_t* s) {
GridBase *grid=out._grid;
out.checkerboard = in.checkerboard;
assert(grid->_simd_layout[0] == 1); // should be fine for ZMobius for now
int Ls = grid->_rdimensions[0];
parallel_for(int ss=0;ss<grid->oSites();ss++){
vobj tmp = s[ss % Ls]*in._odata[ss];
vstream(out._odata[ss],tmp);
}
}
RealD sscale_norm(const Field& in, Field& out, Coeff_t* s) {
sscale(in,out,s);
return norm2(out);
}
virtual RealD M (const Field& in, Field& out) { return sscale_norm(in,out,&kappa[0]); }
virtual RealD MDag (const Field& in, Field& out) { return sscale_norm(in,out,&kappaDag[0]);}
virtual RealD MInv (const Field& in, Field& out) { return sscale_norm(in,out,&kappaInv[0]);}
virtual RealD MInvDag (const Field& in, Field& out) { return sscale_norm(in,out,&kappaInvDag[0]);}
};
template<class Matrix,class Field>
class SchurDiagTwoKappaOperator : public SchurOperatorBase<Field> {
public:
KappaSimilarityTransform<Matrix, Field> _S;
SchurDiagTwoOperator<Matrix, Field> _Mat;
SchurDiagTwoKappaOperator (Matrix &Mat): _S(Mat), _Mat(Mat) {};
virtual RealD Mpc (const Field &in, Field &out) {
Field tmp(in._grid);
_S.MInv(in,out);
_Mat.Mpc(out,tmp);
return _S.M(tmp,out);
}
virtual RealD MpcDag (const Field &in, Field &out){
Field tmp(in._grid);
_S.MDag(in,out);
_Mat.MpcDag(out,tmp);
return _S.MInvDag(tmp,out);
}
};
#endif
/////////////////////////////////////////////////////////////
// Base classes for functions of operators

View File

@ -54,10 +54,16 @@ struct ChebyParams : Serializable {
public:
void csv(std::ostream &out){
RealD diff = hi-lo;
#if 0
RealD delta = (hi-lo)*1.0e-9;
for (RealD x=lo; x<hi; x+=delta) {
delta*=1.1;
#else
RealD diff = hi-lo;
//for (RealD x=lo-0.2*diff; x<hi+0.2*diff; x+=(hi-lo)/1000) {
for (RealD x=lo-0.2*diff; x<hi+0.2*diff; x+=diff/1000.0) { // ypj [note] divide by float
#endif
RealD f = approx(x);
out<< x<<" "<<f<<std::endl;
}
@ -89,7 +95,7 @@ struct ChebyParams : Serializable {
if(order < 2) exit(-1);
Coeffs.resize(order);
Coeffs.assign(0.,order);
Coeffs.assign(order,0.);
Coeffs[order-1] = 1.;
};

View File

@ -0,0 +1,137 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/DenseMatrix.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_DENSE_MATRIX_H
#define GRID_DENSE_MATRIX_H
namespace Grid {
/////////////////////////////////////////////////////////////
// Matrix untils
/////////////////////////////////////////////////////////////
template<class T> using DenseVector = std::vector<T>;
template<class T> using DenseMatrix = DenseVector<DenseVector<T> >;
template<class T> void Size(DenseVector<T> & vec, int &N)
{
N= vec.size();
}
template<class T> void Size(DenseMatrix<T> & mat, int &N,int &M)
{
N= mat.size();
M= mat[0].size();
}
template<class T> void SizeSquare(DenseMatrix<T> & mat, int &N)
{
int M; Size(mat,N,M);
assert(N==M);
}
template<class T> void Resize(DenseVector<T > & mat, int N) {
mat.resize(N);
}
template<class T> void Resize(DenseMatrix<T > & mat, int N, int M) {
mat.resize(N);
for(int i=0;i<N;i++){
mat[i].resize(M);
}
}
template<class T> void Fill(DenseMatrix<T> & mat, T&val) {
int N,M;
Size(mat,N,M);
for(int i=0;i<N;i++){
for(int j=0;j<M;j++){
mat[i][j] = val;
}}
}
/** Transpose of a matrix **/
template<class T> DenseMatrix<T> Transpose(DenseMatrix<T> & mat){
int N,M;
Size(mat,N,M);
DenseMatrix<T> C; Resize(C,M,N);
for(int i=0;i<M;i++){
for(int j=0;j<N;j++){
C[i][j] = mat[j][i];
}}
return C;
}
/** Set DenseMatrix to unit matrix **/
template<class T> void Unity(DenseMatrix<T> &A){
int N; SizeSquare(A,N);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
if ( i==j ) A[i][j] = 1;
else A[i][j] = 0;
}
}
}
/** Add C * I to matrix **/
template<class T>
void PlusUnit(DenseMatrix<T> & A,T c){
int dim; SizeSquare(A,dim);
for(int i=0;i<dim;i++){A[i][i] = A[i][i] + c;}
}
/** return the Hermitian conjugate of matrix **/
template<class T>
DenseMatrix<T> HermitianConj(DenseMatrix<T> &mat){
int dim; SizeSquare(mat,dim);
DenseMatrix<T> C; Resize(C,dim,dim);
for(int i=0;i<dim;i++){
for(int j=0;j<dim;j++){
C[i][j] = conj(mat[j][i]);
}
}
return C;
}
/**Get a square submatrix**/
template <class T>
DenseMatrix<T> GetSubMtx(DenseMatrix<T> &A,int row_st, int row_end, int col_st, int col_end)
{
DenseMatrix<T> H; Resize(H,row_end - row_st,col_end-col_st);
for(int i = row_st; i<row_end; i++){
for(int j = col_st; j<col_end; j++){
H[i-row_st][j-col_st]=A[i][j];
}}
return H;
}
}
#include "Householder.h"
#include "Francis.h"
#endif

View File

@ -0,0 +1,525 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/Francis.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef FRANCIS_H
#define FRANCIS_H
#include <cstdlib>
#include <string>
#include <cmath>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <fstream>
#include <complex>
#include <algorithm>
//#include <timer.h>
//#include <lapacke.h>
//#include <Eigen/Dense>
namespace Grid {
template <class T> int SymmEigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small);
template <class T> int Eigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small);
/**
Find the eigenvalues of an upper hessenberg matrix using the Francis QR algorithm.
H =
x x x x x x x x x
x x x x x x x x x
0 x x x x x x x x
0 0 x x x x x x x
0 0 0 x x x x x x
0 0 0 0 x x x x x
0 0 0 0 0 x x x x
0 0 0 0 0 0 x x x
0 0 0 0 0 0 0 x x
Factorization is P T P^H where T is upper triangular (mod cc blocks) and P is orthagonal/unitary.
**/
template <class T>
int QReigensystem(DenseMatrix<T> &Hin, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small)
{
DenseMatrix<T> H = Hin;
int N ; SizeSquare(H,N);
int M = N;
Fill(evals,0);
Fill(evecs,0);
T s,t,x=0,y=0,z=0;
T u,d;
T apd,amd,bc;
DenseVector<T> p(N,0);
T nrm = Norm(H); ///DenseMatrix Norm
int n, m;
int e = 0;
int it = 0;
int tot_it = 0;
int l = 0;
int r = 0;
DenseMatrix<T> P; Resize(P,N,N); Unity(P);
DenseVector<int> trows(N,0);
/// Check if the matrix is really hessenberg, if not abort
RealD sth = 0;
for(int j=0;j<N;j++){
for(int i=j+2;i<N;i++){
sth = abs(H[i][j]);
if(sth > small){
std::cout << "Non hessenberg H = " << sth << " > " << small << std::endl;
exit(1);
}
}
}
do{
std::cout << "Francis QR Step N = " << N << std::endl;
/** Check for convergence
x x x x x
0 x x x x
0 0 x x x
0 0 x x x
0 0 0 0 x
for this matrix l = 4
**/
do{
l = Chop_subdiag(H,nrm,e,small);
r = 0; ///May have converged on more than one eval
///Single eval
if(l == N-1){
evals[e] = H[l][l];
N--; e++; r++; it = 0;
}
///RealD eval
if(l == N-2){
trows[l+1] = 1; ///Needed for UTSolve
apd = H[l][l] + H[l+1][l+1];
amd = H[l][l] - H[l+1][l+1];
bc = (T)4.0*H[l+1][l]*H[l][l+1];
evals[e] = (T)0.5*( apd + sqrt(amd*amd + bc) );
evals[e+1] = (T)0.5*( apd - sqrt(amd*amd + bc) );
N-=2; e+=2; r++; it = 0;
}
} while(r>0);
if(N ==0) break;
DenseVector<T > ck; Resize(ck,3);
DenseVector<T> v; Resize(v,3);
for(int m = N-3; m >= l; m--){
///Starting vector essentially random shift.
if(it%10 == 0 && N >= 3 && it > 0){
s = (T)1.618033989*( abs( H[N-1][N-2] ) + abs( H[N-2][N-3] ) );
t = (T)0.618033989*( abs( H[N-1][N-2] ) + abs( H[N-2][N-3] ) );
x = H[m][m]*H[m][m] + H[m][m+1]*H[m+1][m] - s*H[m][m] + t;
y = H[m+1][m]*(H[m][m] + H[m+1][m+1] - s);
z = H[m+1][m]*H[m+2][m+1];
}
///Starting vector implicit Q theorem
else{
s = (H[N-2][N-2] + H[N-1][N-1]);
t = (H[N-2][N-2]*H[N-1][N-1] - H[N-2][N-1]*H[N-1][N-2]);
x = H[m][m]*H[m][m] + H[m][m+1]*H[m+1][m] - s*H[m][m] + t;
y = H[m+1][m]*(H[m][m] + H[m+1][m+1] - s);
z = H[m+1][m]*H[m+2][m+1];
}
ck[0] = x; ck[1] = y; ck[2] = z;
if(m == l) break;
/** Some stupid thing from numerical recipies, seems to work**/
// PAB.. for heaven's sake quote page, purpose, evidence it works.
// what sort of comment is that!?!?!?
u=abs(H[m][m-1])*(abs(y)+abs(z));
d=abs(x)*(abs(H[m-1][m-1])+abs(H[m][m])+abs(H[m+1][m+1]));
if ((T)abs(u+d) == (T)abs(d) ){
l = m; break;
}
//if (u < small){l = m; break;}
}
if(it > 100000){
std::cout << "QReigensystem: bugger it got stuck after 100000 iterations" << std::endl;
std::cout << "got " << e << " evals " << l << " " << N << std::endl;
exit(1);
}
normalize(ck); ///Normalization cancels in PHP anyway
T beta;
Householder_vector<T >(ck, 0, 2, v, beta);
Householder_mult<T >(H,v,beta,0,l,l+2,0);
Householder_mult<T >(H,v,beta,0,l,l+2,1);
///Accumulate eigenvector
Householder_mult<T >(P,v,beta,0,l,l+2,1);
int sw = 0; ///Are we on the last row?
for(int k=l;k<N-2;k++){
x = H[k+1][k];
y = H[k+2][k];
z = (T)0.0;
if(k+3 <= N-1){
z = H[k+3][k];
} else{
sw = 1;
v[2] = (T)0.0;
}
ck[0] = x; ck[1] = y; ck[2] = z;
normalize(ck);
Householder_vector<T >(ck, 0, 2-sw, v, beta);
Householder_mult<T >(H,v, beta,0,k+1,k+3-sw,0);
Householder_mult<T >(H,v, beta,0,k+1,k+3-sw,1);
///Accumulate eigenvector
Householder_mult<T >(P,v, beta,0,k+1,k+3-sw,1);
}
it++;
tot_it++;
}while(N > 1);
N = evals.size();
///Annoying - UT solves in reverse order;
DenseVector<T> tmp; Resize(tmp,N);
for(int i=0;i<N;i++){
tmp[i] = evals[N-i-1];
}
evals = tmp;
UTeigenvectors(H, trows, evals, evecs);
for(int i=0;i<evals.size();i++){evecs[i] = P*evecs[i]; normalize(evecs[i]);}
return tot_it;
}
template <class T>
int my_Wilkinson(DenseMatrix<T> &Hin, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small)
{
/**
Find the eigenvalues of an upper Hessenberg matrix using the Wilkinson QR algorithm.
H =
x x 0 0 0 0
x x x 0 0 0
0 x x x 0 0
0 0 x x x 0
0 0 0 x x x
0 0 0 0 x x
Factorization is P T P^H where T is upper triangular (mod cc blocks) and P is orthagonal/unitary. **/
return my_Wilkinson(Hin, evals, evecs, small, small);
}
template <class T>
int my_Wilkinson(DenseMatrix<T> &Hin, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small, RealD tol)
{
int N; SizeSquare(Hin,N);
int M = N;
///I don't want to modify the input but matricies must be passed by reference
//Scale a matrix by its "norm"
//RealD Hnorm = abs( Hin.LargestDiag() ); H = H*(1.0/Hnorm);
DenseMatrix<T> H; H = Hin;
RealD Hnorm = abs(Norm(Hin));
H = H * (1.0 / Hnorm);
// TODO use openmp and memset
Fill(evals,0);
Fill(evecs,0);
T s, t, x = 0, y = 0, z = 0;
T u, d;
T apd, amd, bc;
DenseVector<T> p; Resize(p,N); Fill(p,0);
T nrm = Norm(H); ///DenseMatrix Norm
int n, m;
int e = 0;
int it = 0;
int tot_it = 0;
int l = 0;
int r = 0;
DenseMatrix<T> P; Resize(P,N,N);
Unity(P);
DenseVector<int> trows(N, 0);
/// Check if the matrix is really symm tridiag
RealD sth = 0;
for(int j = 0; j < N; ++j)
{
for(int i = j + 2; i < N; ++i)
{
if(abs(H[i][j]) > tol || abs(H[j][i]) > tol)
{
std::cout << "Non Tridiagonal H(" << i << ","<< j << ") = |" << Real( real( H[j][i] ) ) << "| > " << tol << std::endl;
std::cout << "Warning tridiagonalize and call again" << std::endl;
// exit(1); // see what is going on
//return;
}
}
}
do{
do{
//Jasper
//Check if the subdiagonal term is small enough (<small)
//if true then it is converged.
//check start from H.dim - e - 1
//How to deal with more than 2 are converged?
//What if Chop_symm_subdiag return something int the middle?
//--------------
l = Chop_symm_subdiag(H,nrm, e, small);
r = 0; ///May have converged on more than one eval
//Jasper
//In this case
// x x 0 0 0 0
// x x x 0 0 0
// 0 x x x 0 0
// 0 0 x x x 0
// 0 0 0 x x 0
// 0 0 0 0 0 x <- l
//--------------
///Single eval
if(l == N - 1)
{
evals[e] = H[l][l];
N--;
e++;
r++;
it = 0;
}
//Jasper
// x x 0 0 0 0
// x x x 0 0 0
// 0 x x x 0 0
// 0 0 x x 0 0
// 0 0 0 0 x x <- l
// 0 0 0 0 x x
//--------------
///RealD eval
if(l == N - 2)
{
trows[l + 1] = 1; ///Needed for UTSolve
apd = H[l][l] + H[l + 1][ l + 1];
amd = H[l][l] - H[l + 1][l + 1];
bc = (T) 4.0 * H[l + 1][l] * H[l][l + 1];
evals[e] = (T) 0.5 * (apd + sqrt(amd * amd + bc));
evals[e + 1] = (T) 0.5 * (apd - sqrt(amd * amd + bc));
N -= 2;
e += 2;
r++;
it = 0;
}
}while(r > 0);
//Jasper
//Already converged
//--------------
if(N == 0) break;
DenseVector<T> ck,v; Resize(ck,2); Resize(v,2);
for(int m = N - 3; m >= l; m--)
{
///Starting vector essentially random shift.
if(it%10 == 0 && N >= 3 && it > 0)
{
t = abs(H[N - 1][N - 2]) + abs(H[N - 2][N - 3]);
x = H[m][m] - t;
z = H[m + 1][m];
} else {
///Starting vector implicit Q theorem
d = (H[N - 2][N - 2] - H[N - 1][N - 1]) * (T) 0.5;
t = H[N - 1][N - 1] - H[N - 1][N - 2] * H[N - 1][N - 2]
/ (d + sign(d) * sqrt(d * d + H[N - 1][N - 2] * H[N - 1][N - 2]));
x = H[m][m] - t;
z = H[m + 1][m];
}
//Jasper
//why it is here????
//-----------------------
if(m == l)
break;
u = abs(H[m][m - 1]) * (abs(y) + abs(z));
d = abs(x) * (abs(H[m - 1][m - 1]) + abs(H[m][m]) + abs(H[m + 1][m + 1]));
if ((T)abs(u + d) == (T)abs(d))
{
l = m;
break;
}
}
//Jasper
if(it > 1000000)
{
std::cout << "Wilkinson: bugger it got stuck after 100000 iterations" << std::endl;
std::cout << "got " << e << " evals " << l << " " << N << std::endl;
exit(1);
}
//
T s, c;
Givens_calc<T>(x, z, c, s);
Givens_mult<T>(H, l, l + 1, c, -s, 0);
Givens_mult<T>(H, l, l + 1, c, s, 1);
Givens_mult<T>(P, l, l + 1, c, s, 1);
//
for(int k = l; k < N - 2; ++k)
{
x = H.A[k + 1][k];
z = H.A[k + 2][k];
Givens_calc<T>(x, z, c, s);
Givens_mult<T>(H, k + 1, k + 2, c, -s, 0);
Givens_mult<T>(H, k + 1, k + 2, c, s, 1);
Givens_mult<T>(P, k + 1, k + 2, c, s, 1);
}
it++;
tot_it++;
}while(N > 1);
N = evals.size();
///Annoying - UT solves in reverse order;
DenseVector<T> tmp(N);
for(int i = 0; i < N; ++i)
tmp[i] = evals[N-i-1];
evals = tmp;
//
UTeigenvectors(H, trows, evals, evecs);
//UTSymmEigenvectors(H, trows, evals, evecs);
for(int i = 0; i < evals.size(); ++i)
{
evecs[i] = P * evecs[i];
normalize(evecs[i]);
evals[i] = evals[i] * Hnorm;
}
// // FIXME this is to test
// Hin.write("evecs3", evecs);
// Hin.write("evals3", evals);
// // check rsd
// for(int i = 0; i < M; i++) {
// vector<T> Aevec = Hin * evecs[i];
// RealD norm2(0.);
// for(int j = 0; j < M; j++) {
// norm2 += (Aevec[j] - evals[i] * evecs[i][j]) * (Aevec[j] - evals[i] * evecs[i][j]);
// }
// }
return tot_it;
}
template <class T>
void Hess(DenseMatrix<T > &A, DenseMatrix<T> &Q, int start){
/**
turn a matrix A =
x x x x x
x x x x x
x x x x x
x x x x x
x x x x x
into
x x x x x
x x x x x
0 x x x x
0 0 x x x
0 0 0 x x
with householder rotations
Slow.
*/
int N ; SizeSquare(A,N);
DenseVector<T > p; Resize(p,N); Fill(p,0);
for(int k=start;k<N-2;k++){
//cerr << "hess" << k << std::endl;
DenseVector<T > ck,v; Resize(ck,N-k-1); Resize(v,N-k-1);
for(int i=k+1;i<N;i++){ck[i-k-1] = A(i,k);} ///kth column
normalize(ck); ///Normalization cancels in PHP anyway
T beta;
Householder_vector<T >(ck, 0, ck.size()-1, v, beta); ///Householder vector
Householder_mult<T>(A,v,beta,start,k+1,N-1,0); ///A -> PA
Householder_mult<T >(A,v,beta,start,k+1,N-1,1); ///PA -> PAP^H
///Accumulate eigenvector
Householder_mult<T >(Q,v,beta,start,k+1,N-1,1); ///Q -> QP^H
}
/*for(int l=0;l<N-2;l++){
for(int k=l+2;k<N;k++){
A(0,k,l);
}
}*/
}
template <class T>
void Tri(DenseMatrix<T > &A, DenseMatrix<T> &Q, int start){
///Tridiagonalize a matrix
int N; SizeSquare(A,N);
Hess(A,Q,start);
/*for(int l=0;l<N-2;l++){
for(int k=l+2;k<N;k++){
A(0,l,k);
}
}*/
}
template <class T>
void ForceTridiagonal(DenseMatrix<T> &A){
///Tridiagonalize a matrix
int N ; SizeSquare(A,N);
for(int l=0;l<N-2;l++){
for(int k=l+2;k<N;k++){
A[l][k]=0;
A[k][l]=0;
}
}
}
template <class T>
int my_SymmEigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){
///Solve a symmetric eigensystem, not necessarily in tridiagonal form
int N; SizeSquare(Ain,N);
DenseMatrix<T > A; A = Ain;
DenseMatrix<T > Q; Resize(Q,N,N); Unity(Q);
Tri(A,Q,0);
int it = my_Wilkinson<T>(A, evals, evecs, small);
for(int k=0;k<N;k++){evecs[k] = Q*evecs[k];}
return it;
}
template <class T>
int Wilkinson(DenseMatrix<T> &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){
return my_Wilkinson(Ain, evals, evecs, small);
}
template <class T>
int SymmEigensystem(DenseMatrix<T> &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){
return my_SymmEigensystem(Ain, evals, evecs, small);
}
template <class T>
int Eigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){
///Solve a general eigensystem, not necessarily in tridiagonal form
int N = Ain.dim;
DenseMatrix<T > A(N); A = Ain;
DenseMatrix<T > Q(N);Q.Unity();
Hess(A,Q,0);
int it = QReigensystem<T>(A, evals, evecs, small);
for(int k=0;k<N;k++){evecs[k] = Q*evecs[k];}
return it;
}
}
#endif

View File

@ -0,0 +1,242 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/Householder.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef HOUSEHOLDER_H
#define HOUSEHOLDER_H
#define TIMER(A) std::cout << GridLogMessage << __FUNC__ << " file "<< __FILE__ <<" line " << __LINE__ << std::endl;
#define ENTER() std::cout << GridLogMessage << "ENTRY "<<__FUNC__ << " file "<< __FILE__ <<" line " << __LINE__ << std::endl;
#define LEAVE() std::cout << GridLogMessage << "EXIT "<<__FUNC__ << " file "<< __FILE__ <<" line " << __LINE__ << std::endl;
#include <cstdlib>
#include <string>
#include <cmath>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <fstream>
#include <complex>
#include <algorithm>
namespace Grid {
/** Comparison function for finding the max element in a vector **/
template <class T> bool cf(T i, T j) {
return abs(i) < abs(j);
}
/**
Calculate a real Givens angle
**/
template <class T> inline void Givens_calc(T y, T z, T &c, T &s){
RealD mz = (RealD)abs(z);
if(mz==0.0){
c = 1; s = 0;
}
if(mz >= (RealD)abs(y)){
T t = -y/z;
s = (T)1.0 / sqrt ((T)1.0 + t * t);
c = s * t;
} else {
T t = -z/y;
c = (T)1.0 / sqrt ((T)1.0 + t * t);
s = c * t;
}
}
template <class T> inline void Givens_mult(DenseMatrix<T> &A, int i, int k, T c, T s, int dir)
{
int q ; SizeSquare(A,q);
if(dir == 0){
for(int j=0;j<q;j++){
T nu = A[i][j];
T w = A[k][j];
A[i][j] = (c*nu + s*w);
A[k][j] = (-s*nu + c*w);
}
}
if(dir == 1){
for(int j=0;j<q;j++){
T nu = A[j][i];
T w = A[j][k];
A[j][i] = (c*nu - s*w);
A[j][k] = (s*nu + c*w);
}
}
}
/**
from input = x;
Compute the complex Householder vector, v, such that
P = (I - b v transpose(v) )
b = 2/v.v
P | x | | x | k = 0
| x | | 0 |
| x | = | 0 |
| x | | 0 | j = 3
| x | | x |
These are the "Unreduced" Householder vectors.
**/
template <class T> inline void Householder_vector(DenseVector<T> input, int k, int j, DenseVector<T> &v, T &beta)
{
int N ; Size(input,N);
T m = *max_element(input.begin() + k, input.begin() + j + 1, cf<T> );
if(abs(m) > 0.0){
T alpha = 0;
for(int i=k; i<j+1; i++){
v[i] = input[i]/m;
alpha = alpha + v[i]*conj(v[i]);
}
alpha = sqrt(alpha);
beta = (T)1.0/(alpha*(alpha + abs(v[k]) ));
if(abs(v[k]) > 0.0) v[k] = v[k] + (v[k]/abs(v[k]))*alpha;
else v[k] = -alpha;
} else{
for(int i=k; i<j+1; i++){
v[i] = 0.0;
}
}
}
/**
from input = x;
Compute the complex Householder vector, v, such that
P = (I - b v transpose(v) )
b = 2/v.v
Px = alpha*e_dir
These are the "Unreduced" Householder vectors.
**/
template <class T> inline void Householder_vector(DenseVector<T> input, int k, int j, int dir, DenseVector<T> &v, T &beta)
{
int N = input.size();
T m = *max_element(input.begin() + k, input.begin() + j + 1, cf);
if(abs(m) > 0.0){
T alpha = 0;
for(int i=k; i<j+1; i++){
v[i] = input[i]/m;
alpha = alpha + v[i]*conj(v[i]);
}
alpha = sqrt(alpha);
beta = 1.0/(alpha*(alpha + abs(v[dir]) ));
if(abs(v[dir]) > 0.0) v[dir] = v[dir] + (v[dir]/abs(v[dir]))*alpha;
else v[dir] = -alpha;
}else{
for(int i=k; i<j+1; i++){
v[i] = 0.0;
}
}
}
/**
Compute the product PA if trans = 0
AP if trans = 1
P = (I - b v transpose(v) )
b = 2/v.v
start at element l of matrix A
v is of length j - k + 1 of v are nonzero
**/
template <class T> inline void Householder_mult(DenseMatrix<T> &A , DenseVector<T> v, T beta, int l, int k, int j, int trans)
{
int N ; SizeSquare(A,N);
if(abs(beta) > 0.0){
for(int p=l; p<N; p++){
T s = 0;
if(trans==0){
for(int i=k;i<j+1;i++) s += conj(v[i-k])*A[i][p];
s *= beta;
for(int i=k;i<j+1;i++){ A[i][p] = A[i][p]-s*conj(v[i-k]);}
} else {
for(int i=k;i<j+1;i++){ s += conj(v[i-k])*A[p][i];}
s *= beta;
for(int i=k;i<j+1;i++){ A[p][i]=A[p][i]-s*conj(v[i-k]);}
}
}
}
}
/**
Compute the product PA if trans = 0
AP if trans = 1
P = (I - b v transpose(v) )
b = 2/v.v
start at element l of matrix A
v is of length j - k + 1 of v are nonzero
A is tridiagonal
**/
template <class T> inline void Householder_mult_tri(DenseMatrix<T> &A , DenseVector<T> v, T beta, int l, int M, int k, int j, int trans)
{
if(abs(beta) > 0.0){
int N ; SizeSquare(A,N);
DenseMatrix<T> tmp; Resize(tmp,N,N); Fill(tmp,0);
T s;
for(int p=l; p<M; p++){
s = 0;
if(trans==0){
for(int i=k;i<j+1;i++) s = s + conj(v[i-k])*A[i][p];
}else{
for(int i=k;i<j+1;i++) s = s + v[i-k]*A[p][i];
}
s = beta*s;
if(trans==0){
for(int i=k;i<j+1;i++) tmp[i][p] = tmp(i,p) - s*v[i-k];
}else{
for(int i=k;i<j+1;i++) tmp[p][i] = tmp[p][i] - s*conj(v[i-k]);
}
}
for(int p=l; p<M; p++){
if(trans==0){
for(int i=k;i<j+1;i++) A[i][p] = A[i][p] + tmp[i][p];
}else{
for(int i=k;i<j+1;i++) A[p][i] = A[p][i] + tmp[p][i];
}
}
}
}
}
#endif

View File

@ -54,6 +54,7 @@ class ConjugateGradient : public OperatorFunction<Field> {
void operator()(LinearOperatorBase<Field> &Linop, const Field &src, Field &psi) {
psi.checkerboard = src.checkerboard;
conformable(psi, src);
@ -69,7 +70,6 @@ class ConjugateGradient : public OperatorFunction<Field> {
Linop.HermOpAndNorm(psi, mmp, d, b);
r = src - mmp;
p = r;
@ -96,38 +96,44 @@ class ConjugateGradient : public OperatorFunction<Field> {
<< "ConjugateGradient: k=0 residual " << cp << " target " << rsq << std::endl;
GridStopWatch LinalgTimer;
GridStopWatch InnerTimer;
GridStopWatch AxpyNormTimer;
GridStopWatch LinearCombTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
int k;
for (k = 1; k <= MaxIterations; k++) {
for (k = 1; k <= MaxIterations*1000; k++) {
c = cp;
MatrixTimer.Start();
Linop.HermOpAndNorm(p, mmp, d, qq);
Linop.HermOp(p, mmp);
MatrixTimer.Stop();
LinalgTimer.Start();
// RealD qqck = norm2(mmp);
// ComplexD dck = innerProduct(p,mmp);
InnerTimer.Start();
ComplexD dc = innerProduct(p,mmp);
InnerTimer.Stop();
d = dc.real();
a = c / d;
b_pred = a * (a * qq - d) / c;
AxpyNormTimer.Start();
cp = axpy_norm(r, -a, mmp, r);
AxpyNormTimer.Stop();
b = cp / c;
// Fuse these loops ; should be really easy
psi = a * p + psi;
p = p * b + r;
LinearCombTimer.Start();
parallel_for(int ss=0;ss<src._grid->oSites();ss++){
vstream(psi[ss], a * p[ss] + psi[ss]);
vstream(p [ss], b * p[ss] + r[ss]);
}
LinearCombTimer.Stop();
LinalgTimer.Stop();
std::cout << GridLogIterative << "ConjugateGradient: Iteration " << k
<< " residual " << cp << " target " << rsq << std::endl;
std::cout << GridLogDebug << "a = "<< a << " b_pred = "<< b_pred << " b = "<< b << std::endl;
std::cout << GridLogDebug << "qq = "<< qq << " d = "<< d << " c = "<< c << std::endl;
// Stopping condition
if (cp <= rsq) {
@ -148,6 +154,9 @@ class ConjugateGradient : public OperatorFunction<Field> {
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tLinalg " << LinalgTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tInner " << InnerTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tAxpyNorm " << AxpyNormTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tLinearComb " << LinearCombTimer.Elapsed() <<std::endl;
if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0);

View File

@ -60,6 +60,7 @@ namespace Grid {
}
void operator() (const FieldD &src_d_in, FieldD &sol_d){
TotalInnerIterations = 0;
GridStopWatch TotalTimer;

View File

@ -43,6 +43,7 @@ namespace Grid {
public:
RealD Tolerance;
Integer MaxIterations;
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
int verbose;
MultiShiftFunction shifts;
@ -163,7 +164,16 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
for(int s=0;s<nshift;s++) {
axpby(psi[s],0.,-bs[s]*alpha[s],src,src);
}
///////////////////////////////////////
// Timers
///////////////////////////////////////
GridStopWatch AXPYTimer;
GridStopWatch ShiftTimer;
GridStopWatch QRTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
// Iteration loop
int k;
@ -171,7 +181,9 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
for (k=1;k<=MaxIterations;k++){
a = c /cp;
AXPYTimer.Start();
axpy(p,a,p,r);
AXPYTimer.Stop();
// Note to self - direction ps is iterated seperately
// for each shift. Does not appear to have any scope
@ -180,6 +192,7 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
// However SAME r is used. Could load "r" and update
// ALL ps[s]. 2/3 Bandwidth saving
// New Kernel: Load r, vector of coeffs, vector of pointers ps
AXPYTimer.Start();
for(int s=0;s<nshift;s++){
if ( ! converged[s] ) {
if (s==0){
@ -190,22 +203,34 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
}
}
}
AXPYTimer.Stop();
cp=c;
MatrixTimer.Start();
//Linop.HermOpAndNorm(p,mmp,d,qq); // d is used
// The below is faster on KNL
Linop.HermOp(p,mmp);
d=real(innerProduct(p,mmp));
Linop.HermOpAndNorm(p,mmp,d,qq);
MatrixTimer.Stop();
AXPYTimer.Start();
axpy(mmp,mass[0],p,mmp);
AXPYTimer.Stop();
RealD rn = norm2(p);
d += rn*mass[0];
bp=b;
b=-cp/d;
AXPYTimer.Start();
c=axpy_norm(r,b,mmp,r);
AXPYTimer.Stop();
// Toggle the recurrence history
bs[0] = b;
iz = 1-iz;
ShiftTimer.Start();
for(int s=1;s<nshift;s++){
if((!converged[s])){
RealD z0 = z[s][1-iz];
@ -215,6 +240,7 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
bs[s] = b*z[s][iz]/z0; // NB sign rel to Mike
}
}
ShiftTimer.Stop();
for(int s=0;s<nshift;s++){
int ss = s;
@ -257,6 +283,9 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
if ( all_converged ){
SolverTimer.Stop();
std::cout<<GridLogMessage<< "CGMultiShift: All shifts have converged iteration "<<k<<std::endl;
std::cout<<GridLogMessage<< "CGMultiShift: Checking solutions"<<std::endl;
@ -269,8 +298,19 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
RealD cn = norm2(src);
std::cout<<GridLogMessage<<"CGMultiShift: shift["<<s<<"] true residual "<<std::sqrt(rn/cn)<<std::endl;
}
std::cout << GridLogMessage << "Time Breakdown "<<std::endl;
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tAXPY " << AXPYTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMarix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tShift " << ShiftTimer.Elapsed() <<std::endl;
IterationsToComplete = k;
return;
}
}
// ugly hack
std::cout<<GridLogMessage<<"CG multi shift did not converge"<<std::endl;

View File

@ -0,0 +1,979 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Chulwoo Jung
Author: Yong-Chull Jang <ypj@quark.phy.bnl.gov>
Author: Guido Cossu
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_IRBL_H
#define GRID_IRBL_H
#include <string.h> //memset
#define Glog std::cout << GridLogMessage
namespace Grid {
enum class LanczosType { irbl, rbl };
/////////////////////////////////////////////////////////////
// Implicitly restarted block lanczos
/////////////////////////////////////////////////////////////
template<class Field>
class ImplicitlyRestartedBlockLanczos {
private:
std::string cname = std::string("ImplicitlyRestartedBlockLanczos");
int MaxIter; // Max iterations
int Nstop; // Number of evecs checked for convergence
int Nu; // Numbeer of vecs in the unit block
int Nk; // Number of converged sought
int Nm; // total number of vectors
int Nblock_k; // Nk/Nu
int Nblock_m; // Nm/Nu
int Nconv_test_interval; // Number of skipped vectors when checking a convergence
RealD eresid;
IRLdiagonalisation diagonalisation;
////////////////////////////////////
// Embedded objects
////////////////////////////////////
SortEigen<Field> _sort;
LinearOperatorBase<Field> &_Linop;
OperatorFunction<Field> &_poly;
/////////////////////////
// Constructor
/////////////////////////
public:
ImplicitlyRestartedBlockLanczos(LinearOperatorBase<Field> &Linop, // op
OperatorFunction<Field> & poly, // polynomial
int _Nstop, // really sought vecs
int _Nconv_test_interval, // conv check interval
int _Nu, // vecs in the unit block
int _Nk, // sought vecs
int _Nm, // total vecs
RealD _eresid, // resid in lmd deficit
int _MaxIter, // Max iterations
IRLdiagonalisation _diagonalisation = IRLdiagonaliseWithEigen)
: _Linop(Linop), _poly(poly),
Nstop(_Nstop), Nconv_test_interval(_Nconv_test_interval),
Nu(_Nu), Nk(_Nk), Nm(_Nm),
Nblock_m(_Nm/_Nu), Nblock_k(_Nk/_Nu),
//eresid(_eresid), MaxIter(10),
eresid(_eresid), MaxIter(_MaxIter),
diagonalisation(_diagonalisation)
{ assert( (Nk%Nu==0) && (Nm%Nu==0) ); };
////////////////////////////////
// Helpers
////////////////////////////////
static RealD normalize(Field& v)
{
RealD nn = norm2(v);
nn = sqrt(nn);
v = v * (1.0/nn);
return nn;
}
void orthogonalize(Field& w, std::vector<Field>& evec, int k)
{
typedef typename Field::scalar_type MyComplex;
MyComplex ip;
for(int j=0; j<k; ++j){
ip = innerProduct(evec[j],w);
w = w - ip * evec[j];
}
normalize(w);
}
void orthogonalize_blockhead(Field& w, std::vector<Field>& evec, int k, int Nu)
{
typedef typename Field::scalar_type MyComplex;
MyComplex ip;
for(int j=0; j<k; ++j){
ip = innerProduct(evec[j*Nu],w);
w = w - ip * evec[j*Nu];
}
normalize(w);
}
void calc(std::vector<RealD>& eval,
std::vector<Field>& evec,
const std::vector<Field>& src, int& Nconv, LanczosType Impl)
{
switch (Impl) {
case LanczosType::irbl:
calc_irbl(eval,evec,src,Nconv);
break;
case LanczosType::rbl:
calc_rbl(eval,evec,src,Nconv);
break;
}
}
void calc_irbl(std::vector<RealD>& eval,
std::vector<Field>& evec,
const std::vector<Field>& src, int& Nconv)
{
std::string fname = std::string(cname+"::calc_irbl()");
GridBase *grid = evec[0]._grid;
assert(grid == src[0]._grid);
assert( Nu = src.size() );
Glog << std::string(74,'*') << std::endl;
Glog << fname + " starting iteration 0 / "<< MaxIter<< std::endl;
Glog << std::string(74,'*') << std::endl;
Glog <<" -- seek Nk = "<< Nk <<" vectors"<< std::endl;
Glog <<" -- accept Nstop = "<< Nstop <<" vectors"<< std::endl;
Glog <<" -- total Nm = "<< Nm <<" vectors"<< std::endl;
Glog <<" -- size of eval = "<< eval.size() << std::endl;
Glog <<" -- size of evec = "<< evec.size() << std::endl;
if ( diagonalisation == IRLdiagonaliseWithEigen ) {
Glog << "Diagonalisation is Eigen "<< std::endl;
} else {
abort();
}
Glog << std::string(74,'*') << std::endl;
assert(Nm == evec.size() && Nm == eval.size());
std::vector<std::vector<ComplexD>> lmd(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<std::vector<ComplexD>> lme(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<std::vector<ComplexD>> lmd2(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<std::vector<ComplexD>> lme2(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<RealD> eval2(Nm);
std::vector<RealD> resid(Nk);
Eigen::MatrixXcd Qt = Eigen::MatrixXcd::Zero(Nm,Nm);
Eigen::MatrixXcd Q = Eigen::MatrixXcd::Zero(Nm,Nm);
std::vector<int> Iconv(Nm);
std::vector<Field> B(Nm,grid); // waste of space replicating
std::vector<Field> f(Nu,grid);
std::vector<Field> f_copy(Nu,grid);
Field v(grid);
Nconv = 0;
RealD beta_k;
// set initial vector
for (int i=0; i<Nu; ++i) {
Glog << "norm2(src[" << i << "])= "<< norm2(src[i]) << std::endl;
evec[i] = src[i];
orthogonalize(evec[i],evec,i);
Glog << "norm2(evec[" << i << "])= "<< norm2(evec[i]) << std::endl;
}
// initial Nblock_k steps
for(int b=0; b<Nblock_k; ++b) blockwiseStep(lmd,lme,evec,f,f_copy,b);
// restarting loop begins
int iter;
for(iter = 0; iter<MaxIter; ++iter){
Glog <<"#Restart iteration = "<< iter << std::endl;
// additional (Nblock_m - Nblock_k) steps
for(int b=Nblock_k; b<Nblock_m; ++b) blockwiseStep(lmd,lme,evec,f,f_copy,b);
// getting eigenvalues
for(int u=0; u<Nu; ++u){
for(int k=0; k<Nm; ++k){
lmd2[u][k] = lmd[u][k];
lme2[u][k] = lme[u][k];
}
}
Qt = Eigen::MatrixXcd::Identity(Nm,Nm);
diagonalize(eval2,lmd2,lme2,Nu,Nm,Nm,Qt,grid);
_sort.push(eval2,Nm);
Glog << "#Ritz value before shift: "<< std::endl;
for(int i=0; i<Nm; ++i){
std::cout.precision(13);
std::cout << "[" << std::setw(4)<< std::setiosflags(std::ios_base::right) <<i<<"] ";
std::cout << "Rval = "<<std::setw(20)<< std::setiosflags(std::ios_base::left)<< eval2[i] << std::endl;
}
//----------------------------------------------------------------------
if ( Nm>Nk ) {
Glog <<" #Apply shifted QR transformations "<<std::endl;
//int k2 = Nk+Nu;
int k2 = Nk;
Eigen::MatrixXcd BTDM = Eigen::MatrixXcd::Identity(Nm,Nm);
Q = Eigen::MatrixXcd::Identity(Nm,Nm);
unpackHermitBlockTriDiagMatToEigen(lmd,lme,Nu,Nblock_m,Nm,Nm,BTDM);
for(int ip=Nk; ip<Nm; ++ip){
shiftedQRDecompEigen(BTDM,Nu,Nm,eval2[ip],Q);
}
packHermitBlockTriDiagMatfromEigen(lmd,lme,Nu,Nblock_m,Nm,Nm,BTDM);
for(int i=0; i<k2; ++i) B[i] = 0.0;
for(int j=0; j<k2; ++j){
for(int k=0; k<Nm; ++k){
B[j].checkerboard = evec[k].checkerboard;
B[j] += evec[k]*Q(k,j);
}
}
for(int i=0; i<k2; ++i) evec[i] = B[i];
// reconstruct initial vector for additional pole space
blockwiseStep(lmd,lme,evec,f,f_copy,Nblock_k-1);
// getting eigenvalues
for(int u=0; u<Nu; ++u){
for(int k=0; k<Nm; ++k){
lmd2[u][k] = lmd[u][k];
lme2[u][k] = lme[u][k];
}
}
Qt = Eigen::MatrixXcd::Identity(Nm,Nm);
diagonalize(eval2,lmd2,lme2,Nu,Nk,Nm,Qt,grid);
_sort.push(eval2,Nk);
Glog << "#Ritz value after shift: "<< std::endl;
for(int i=0; i<Nk; ++i){
std::cout.precision(13);
std::cout << "[" << std::setw(4)<< std::setiosflags(std::ios_base::right) <<i<<"] ";
std::cout << "Rval = "<<std::setw(20)<< std::setiosflags(std::ios_base::left)<< eval2[i] << std::endl;
}
}
//----------------------------------------------------------------------
// Convergence test
Glog <<" #Convergence test: "<<std::endl;
for(int k = 0; k<Nk; ++k) B[k]=0.0;
for(int j = 0; j<Nk; ++j){
for(int k = 0; k<Nk; ++k){
B[j].checkerboard = evec[k].checkerboard;
B[j] += evec[k]*Qt(k,j);
}
}
Nconv = 0;
for(int i=0; i<Nk; ++i){
_Linop.HermOp(B[i],v);
RealD vnum = real(innerProduct(B[i],v)); // HermOp.
RealD vden = norm2(B[i]);
eval2[i] = vnum/vden;
v -= eval2[i]*B[i];
RealD vv = norm2(v);
resid[i] = vv;
std::cout.precision(13);
std::cout << "[" << std::setw(4)<< std::setiosflags(std::ios_base::right) <<i<<"] ";
std::cout << "eval = "<<std::setw(20)<< std::setiosflags(std::ios_base::left)<< eval2[i];
std::cout << " resid^2 = "<< std::setw(20)<< std::setiosflags(std::ios_base::right)<< vv<< std::endl;
// change the criteria as evals are supposed to be sorted, all evals smaller(larger) than Nstop should have converged
//if( (vv<eresid*eresid) && (i == Nconv) ){
if (vv<eresid*eresid) {
Iconv[Nconv] = i;
++Nconv;
}
} // i-loop end
Glog <<" #modes converged: "<<Nconv<<std::endl;
for(int i=0; i<Nconv; ++i){
std::cout.precision(13);
std::cout << "[" << std::setw(4)<< std::setiosflags(std::ios_base::right) <<Iconv[i]<<"] ";
std::cout << "eval_conv = "<<std::setw(20)<< std::setiosflags(std::ios_base::left)<< eval2[Iconv[i]];
std::cout << " resid^2 = "<< std::setw(20)<< std::setiosflags(std::ios_base::right)<< resid[Iconv[i]]<< std::endl;
}
if ( Nconv>=Nstop ) break;
} // end of iter loop
Glog << std::string(74,'*') << std::endl;
if ( Nconv<Nstop ) {
Glog << fname + " NOT converged ; Summary :\n";
} else {
Glog << fname + " CONVERGED ; Summary :\n";
// Sort convered eigenpairs.
eval.resize(Nconv);
evec.resize(Nconv,grid);
for(int i=0; i<Nconv; ++i){
eval[i] = eval2[Iconv[i]];
evec[i] = B[Iconv[i]];
}
_sort.push(eval,evec,Nconv);
}
Glog << std::string(74,'*') << std::endl;
Glog << " -- Iterations = "<< iter << "\n";
//Glog << " -- beta(k) = "<< beta_k << "\n";
Glog << " -- Nconv = "<< Nconv << "\n";
Glog << std::string(74,'*') << std::endl;
}
void calc_rbl(std::vector<RealD>& eval,
std::vector<Field>& evec,
const std::vector<Field>& src, int& Nconv)
{
std::string fname = std::string(cname+"::calc_rbl()");
GridBase *grid = evec[0]._grid;
assert(grid == src[0]._grid);
assert( Nu = src.size() );
int Np = (Nm-Nk);
if (Np > 0 && MaxIter > 1) Np /= MaxIter;
int Nblock_p = Np/Nu;
Glog << std::string(74,'*') << std::endl;
Glog << fname + " starting iteration 0 / "<< MaxIter<< std::endl;
Glog << std::string(74,'*') << std::endl;
Glog <<" -- seek (min) Nk = "<< Nk <<" vectors"<< std::endl;
Glog <<" -- seek (inc) Np = "<< Np <<" vectors"<< std::endl;
Glog <<" -- seek (max) Nm = "<< Nm <<" vectors"<< std::endl;
Glog <<" -- accept Nstop = "<< Nstop <<" vectors"<< std::endl;
Glog <<" -- size of eval = "<< eval.size() << std::endl;
Glog <<" -- size of evec = "<< evec.size() << std::endl;
if ( diagonalisation == IRLdiagonaliseWithEigen ) {
Glog << "Diagonalisation is Eigen "<< std::endl;
} else {
abort();
}
Glog << std::string(74,'*') << std::endl;
assert(Nm == evec.size() && Nm == eval.size());
std::vector<std::vector<ComplexD>> lmd(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<std::vector<ComplexD>> lme(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<std::vector<ComplexD>> lmd2(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<std::vector<ComplexD>> lme2(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<RealD> eval2(Nk);
std::vector<RealD> resid(Nm);
Eigen::MatrixXcd Qt = Eigen::MatrixXcd::Zero(Nm,Nm);
Eigen::MatrixXcd Q = Eigen::MatrixXcd::Zero(Nm,Nm);
std::vector<int> Iconv(Nm);
std::vector<Field> B(Nm,grid); // waste of space replicating
std::vector<Field> f(Nu,grid);
std::vector<Field> f_copy(Nu,grid);
Field v(grid);
Nconv = 0;
RealD beta_k;
// set initial vector
for (int i=0; i<Nu; ++i) {
Glog << "norm2(src[" << i << "])= "<< norm2(src[i]) << std::endl;
evec[i] = src[i];
orthogonalize(evec[i],evec,i);
Glog << "norm2(evec[" << i << "])= "<< norm2(evec[i]) << std::endl;
}
// initial Nblock_k steps
for(int b=0; b<Nblock_k; ++b) blockwiseStep(lmd,lme,evec,f,f_copy,b);
// restarting loop begins
int iter;
int Nblock_l, Nblock_r;
int Nl, Nr;
int Nconv_guess = 0;
for(iter = 0; iter<MaxIter; ++iter){
Glog <<"#Restart iteration = "<< iter << std::endl;
Nblock_l = Nblock_k + iter*Nblock_p;
Nblock_r = Nblock_l + Nblock_p;
Nl = Nblock_l*Nu;
Nr = Nblock_r*Nu;
eval2.resize(Nr);
// additional Nblock_p steps
for(int b=Nblock_l; b<Nblock_r; ++b) blockwiseStep(lmd,lme,evec,f,f_copy,b);
// getting eigenvalues
for(int u=0; u<Nu; ++u){
for(int k=0; k<Nr; ++k){
lmd2[u][k] = lmd[u][k];
lme2[u][k] = lme[u][k];
}
}
Qt = Eigen::MatrixXcd::Identity(Nr,Nr);
diagonalize(eval2,lmd2,lme2,Nu,Nr,Nr,Qt,grid);
_sort.push(eval2,Nr);
Glog << "#Ritz value: "<< std::endl;
for(int i=0; i<Nr; ++i){
std::cout.precision(13);
std::cout << "[" << std::setw(4)<< std::setiosflags(std::ios_base::right) <<i<<"] ";
std::cout << "Rval = "<<std::setw(20)<< std::setiosflags(std::ios_base::left)<< eval2[i] << std::endl;
}
// Convergence test
Glog <<" #Convergence test: "<<std::endl;
Nconv = 0;
for(int k = 0; k<Nr; ++k) B[k]=0.0;
for(int j = 0; j<Nr; j+=Nconv_test_interval){
if ( j/Nconv_test_interval == Nconv ) {
Glog <<" #rotation for next check point evec"
<< std::setw(4)<< std::setiosflags(std::ios_base::right)
<< "["<< j <<"]" <<std::endl;
for(int k = 0; k<Nr; ++k){
B[j].checkerboard = evec[k].checkerboard;
B[j] += evec[k]*Qt(k,j);
}
_Linop.HermOp(B[j],v);
RealD vnum = real(innerProduct(B[j],v)); // HermOp.
RealD vden = norm2(B[j]);
eval2[j] = vnum/vden;
v -= eval2[j]*B[j];
RealD vv = norm2(v);
resid[j] = vv;
std::cout.precision(13);
std::cout << "[" << std::setw(4)<< std::setiosflags(std::ios_base::right) <<j<<"] ";
std::cout << "eval = "<<std::setw(20)<< std::setiosflags(std::ios_base::left)<< eval2[j];
std::cout << " resid^2 = "<< std::setw(20)<< std::setiosflags(std::ios_base::right)<< vv<< std::endl;
// change the criteria as evals are supposed to be sorted, all evals smaller(larger) than Nstop should have converged
//if( (vv<eresid*eresid) && (i == Nconv) ){
if (vv<eresid*eresid) {
Iconv[Nconv] = j;
++Nconv;
}
} else {
break;
}
} // j-loop end
Glog <<" #modes converged: "<<Nconv<<std::endl;
for(int i=0; i<Nconv; ++i){
std::cout.precision(13);
std::cout << "[" << std::setw(4)<< std::setiosflags(std::ios_base::right) <<Iconv[i]<<"] ";
std::cout << "eval_conv = "<<std::setw(20)<< std::setiosflags(std::ios_base::left)<< eval2[Iconv[i]];
std::cout << " resid^2 = "<< std::setw(20)<< std::setiosflags(std::ios_base::right)<< resid[Iconv[i]]<< std::endl;
}
(Nconv > 0 ) ? Nconv_guess = 1 + (Nconv-1)*Nconv_test_interval : Nconv_guess = 0;
if ( Nconv_guess >= Nstop ) break;
} // end of iter loop
Glog << std::string(74,'*') << std::endl;
if ( Nconv_guess < Nstop ) {
Glog << fname + " NOT converged ; Summary :\n";
} else {
Glog << fname + " CONVERGED ; Summary :\n";
// Sort convered eigenpairs.
eval.resize(Nconv);
evec.resize(Nconv,grid);
for(int i=0; i<Nconv; ++i){
eval[i] = eval2[Iconv[i]];
evec[i] = B[Iconv[i]];
}
_sort.push(eval,evec,Nconv);
}
Glog << std::string(74,'*') << std::endl;
Glog << " -- Iterations = "<< iter << "\n";
//Glog << " -- beta(k) = "<< beta_k << "\n";
Glog << " -- Nconv = "<< Nconv << "\n";
Glog << " -- Nconv (guess) = "<< Nconv_guess << "\n";
Glog << std::string(74,'*') << std::endl;
}
private:
void blockwiseStep(std::vector<std::vector<ComplexD>>& lmd,
std::vector<std::vector<ComplexD>>& lme,
std::vector<Field>& evec,
std::vector<Field>& w,
std::vector<Field>& w_copy,
int b)
{
const RealD tiny = 1.0e-20;
int Nu = w.size();
int Nm = evec.size();
assert( b < Nm/Nu );
// converts block index to full indicies for an interval [L,R)
int L = Nu*b;
int R = Nu*(b+1);
Real beta;
// 3. wk:=Avkβkv_{k1}
for (int k=L, u=0; k<R; ++k, ++u) {
_poly(_Linop,evec[k],w[u]);
}
if (b>0) {
for (int u=0; u<Nu; ++u) {
//for (int k=L-Nu; k<L; ++k) {
for (int k=L-Nu+u; k<L; ++k) {
w[u] = w[u] - evec[k] * conjugate(lme[u][k]);
}
}
}
// 4. αk:=(vk,wk)
//for (int u=0; u<Nu; ++u) {
// for (int k=L; k<R; ++k) {
// lmd[u][k] = innerProduct(evec[k],w[u]); // lmd = transpose of alpha
// }
// lmd[u][L+u] = real(lmd[u][L+u]); // force diagonal to be real
//}
for (int u=0; u<Nu; ++u) {
for (int k=L+u; k<R; ++k) {
lmd[u][k] = innerProduct(evec[k],w[u]); // lmd = transpose of alpha
lmd[k-L][u+L] = conjugate(lmd[u][k]); // force hermicity
}
lmd[u][L+u] = real(lmd[u][L+u]); // force diagonal to be real
}
// 5. wk:=wkαkvk
for (int u=0; u<Nu; ++u) {
for (int k=L; k<R; ++k) {
w[u] = w[u] - evec[k]*lmd[u][k];
}
w_copy[u] = w[u];
}
// In block version, the steps 6 and 7 in Lanczos construction is
// replaced by the QR decomposition of new basis block.
// It results block version beta and orthonormal block basis.
// Here, QR decomposition is done by using Gram-Schmidt.
for (int u=0; u<Nu; ++u) {
for (int k=L; k<R; ++k) {
lme[u][k] = 0.0;
}
}
#if 0
beta = normalize(w[0]);
for (int u=1; u<Nu; ++u) {
//orthogonalize(w[u],w_copy,u);
orthogonalize(w[u],w,u);
}
#else
// re-orthogonalization for numerical stability
for (int u=0; u<Nu; ++u) {
orthogonalize(w[u],evec,R);
}
// QR part
for (int u=1; u<Nu; ++u) {
orthogonalize(w[u],w,u);
}
#endif
for (int u=0; u<Nu; ++u) {
//for (int v=0; v<Nu; ++v) {
for (int v=u; v<Nu; ++v) {
lme[u][L+v] = innerProduct(w[u],w_copy[v]);
}
lme[u][L+u] = real(lme[u][L+u]); // force diagonal to be real
}
//lme[0][L] = beta;
for (int u=0; u<Nu; ++u) {
Glog << "norm2(w[" << u << "])= "<< norm2(w[u]) << std::endl;
for (int k=L+u; k<R; ++k) {
Glog <<" In block "<< b << ",";
std::cout <<" beta[" << u << "," << k-L << "] = ";
std::cout << lme[u][k] << std::endl;
}
}
#if 0
// re-orthogonalization for numerical stability
if (b>0) {
for (int u=0; u<Nu; ++u) {
orthogonalize(w[u],evec,R);
}
for (int u=1; u<Nu; ++u) {
orthogonalize(w[u],w,u);
}
}
//if (b>0) {
// orthogonalize_blockhead(w[0],evec,b,Nu);
// for (int u=1; u<Nu; ++u) {
// orthogonalize(w[u],w,u);
// }
//}
#endif
if (b < Nm/Nu-1) {
for (int u=0; u<Nu; ++u) {
evec[R+u] = w[u];
}
}
}
void diagonalize_Eigen(std::vector<RealD>& eval,
std::vector<std::vector<ComplexD>>& lmd,
std::vector<std::vector<ComplexD>>& lme,
int Nu, int Nk, int Nm,
Eigen::MatrixXcd & Qt, // Nm x Nm
GridBase *grid)
{
assert( Nk%Nu == 0 && Nm%Nu == 0 );
assert( Nk <= Nm );
Eigen::MatrixXcd BlockTriDiag = Eigen::MatrixXcd::Zero(Nk,Nk);
for ( int u=0; u<Nu; ++u ) {
for (int k=0; k<Nk; ++k ) {
BlockTriDiag(k,u+(k/Nu)*Nu) = lmd[u][k];
}
}
for ( int u=0; u<Nu; ++u ) {
for (int k=Nu; k<Nk; ++k ) {
BlockTriDiag(k-Nu,u+(k/Nu)*Nu) = conjugate(lme[u][k-Nu]);
BlockTriDiag(u+(k/Nu)*Nu,k-Nu) = lme[u][k-Nu];
}
}
//std::cout << BlockTriDiag << std::endl;
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXcd> eigensolver(BlockTriDiag);
for (int i = 0; i < Nk; i++) {
eval[Nk-1-i] = eigensolver.eigenvalues()(i);
}
for (int i = 0; i < Nk; i++) {
for (int j = 0; j < Nk; j++) {
Qt(j,Nk-1-i) = eigensolver.eigenvectors()(j,i);
//Qt(Nk-1-i,j) = eigensolver.eigenvectors()(i,j);
//Qt(i,j) = eigensolver.eigenvectors()(i,j);
}
}
}
void diagonalize(std::vector<RealD>& eval,
std::vector<std::vector<ComplexD>>& lmd,
std::vector<std::vector<ComplexD>>& lme,
int Nu, int Nk, int Nm,
Eigen::MatrixXcd & Qt,
GridBase *grid)
{
Qt = Eigen::MatrixXcd::Identity(Nm,Nm);
if ( diagonalisation == IRLdiagonaliseWithEigen ) {
diagonalize_Eigen(eval,lmd,lme,Nu,Nk,Nm,Qt,grid);
} else {
assert(0);
}
}
void unpackHermitBlockTriDiagMatToEigen(
std::vector<std::vector<ComplexD>>& lmd,
std::vector<std::vector<ComplexD>>& lme,
int Nu, int Nb, int Nk, int Nm,
Eigen::MatrixXcd& M)
{
//Glog << "unpackHermitBlockTriDiagMatToEigen() begin" << '\n';
assert( Nk%Nu == 0 && Nm%Nu == 0 );
assert( Nk <= Nm );
M = Eigen::MatrixXcd::Zero(Nk,Nk);
// rearrange
for ( int u=0; u<Nu; ++u ) {
for (int k=0; k<Nk; ++k ) {
M(k,u+(k/Nu)*Nu) = lmd[u][k];
}
}
for ( int u=0; u<Nu; ++u ) {
for (int k=Nu; k<Nk; ++k ) {
M(k-Nu,u+(k/Nu)*Nu) = conjugate(lme[u][k-Nu]);
M(u+(k/Nu)*Nu,k-Nu) = lme[u][k-Nu];
}
}
//Glog << "unpackHermitBlockTriDiagMatToEigen() end" << endl;
}
void packHermitBlockTriDiagMatfromEigen(
std::vector<std::vector<ComplexD>>& lmd,
std::vector<std::vector<ComplexD>>& lme,
int Nu, int Nb, int Nk, int Nm,
Eigen::MatrixXcd& M)
{
//Glog << "packHermitBlockTriDiagMatfromEigen() begin" << '\n';
assert( Nk%Nu == 0 && Nm%Nu == 0 );
assert( Nk <= Nm );
// rearrange
for ( int u=0; u<Nu; ++u ) {
for (int k=0; k<Nk; ++k ) {
lmd[u][k] = M(k,u+(k/Nu)*Nu);
}
}
for ( int u=0; u<Nu; ++u ) {
for (int k=Nu; k<Nk; ++k ) {
lme[u][k-Nu] = M(u+(k/Nu)*Nu,k-Nu);
}
}
//Glog << "packHermitBlockTriDiagMatfromEigen() end" << endl;
}
// assume the input matrix M is a band matrix
void shiftedQRDecompEigen(Eigen::MatrixXcd& M, int Nu, int Nm,
RealD Dsh,
Eigen::MatrixXcd& Qprod)
{
//Glog << "shiftedQRDecompEigen() begin" << '\n';
Eigen::MatrixXcd Q = Eigen::MatrixXcd::Zero(Nm,Nm);
Eigen::MatrixXcd R = Eigen::MatrixXcd::Zero(Nm,Nm);
Eigen::MatrixXcd Mtmp = Eigen::MatrixXcd::Zero(Nm,Nm);
Mtmp = M;
for (int i=0; i<Nm; ++i ) {
Mtmp(i,i) = M(i,i) - Dsh;
}
Eigen::HouseholderQR<Eigen::MatrixXcd> QRD(Mtmp);
Q = QRD.householderQ();
R = QRD.matrixQR(); // upper triangular part is the R matrix.
// lower triangular part used to represent series
// of Q sequence.
// equivalent operation of Qprod *= Q
//M = Eigen::MatrixXcd::Zero(Nm,Nm);
//for (int i=0; i<Nm; ++i) {
// for (int j=0; j<Nm-2*(Nu+1); ++j) {
// for (int k=0; k<2*(Nu+1)+j; ++k) {
// M(i,j) += Qprod(i,k)*Q(k,j);
// }
// }
//}
//for (int i=0; i<Nm; ++i) {
// for (int j=Nm-2*(Nu+1); j<Nm; ++j) {
// for (int k=0; k<Nm; ++k) {
// M(i,j) += Qprod(i,k)*Q(k,j);
// }
// }
//}
Mtmp = Eigen::MatrixXcd::Zero(Nm,Nm);
for (int i=0; i<Nm; ++i) {
for (int j=0; j<Nm-(Nu+1); ++j) {
for (int k=0; k<Nu+1+j; ++k) {
Mtmp(i,j) += Qprod(i,k)*Q(k,j);
}
}
}
for (int i=0; i<Nm; ++i) {
for (int j=Nm-(Nu+1); j<Nm; ++j) {
for (int k=0; k<Nm; ++k) {
Mtmp(i,j) += Qprod(i,k)*Q(k,j);
}
}
}
//static int ntimes = 2;
//for (int j=0; j<Nm-(ntimes*Nu); ++j) {
// for (int i=ntimes*Nu+j; i<Nm; ++i) {
// Mtmp(i,j) = 0.0;
// }
//}
//ntimes++;
Qprod = Mtmp;
// equivalent operation of M = Q.adjoint()*(M*Q)
Mtmp = Eigen::MatrixXcd::Zero(Nm,Nm);
for (int a=0, i=0, kmax=0; a<Nu+1; ++a) {
for (int j=0; j<Nm-a; ++j) {
i = j+a;
kmax = (Nu+1)+j;
if (kmax > Nm) kmax = Nm;
for (int k=i; k<kmax; ++k) {
Mtmp(i,j) += R(i,k)*Q(k,j);
}
Mtmp(j,i) = conj(Mtmp(i,j));
}
}
for (int i=0; i<Nm; ++i) {
Mtmp(i,i) = real(Mtmp(i,i)) + Dsh;
}
M = Mtmp;
//M = Q.adjoint()*(M*Q);
//for (int i=0; i<Nm; ++i) {
// for (int j=0; j<Nm; ++j) {
// if (i==j) M(i,i) = real(M(i,i));
// if (j>i) M(i,j) = conj(M(j,i));
// if (i-j > Nu || j-i > Nu) M(i,j) = 0.;
// }
//}
//Glog << "shiftedQRDecompEigen() end" << endl;
}
void exampleQRDecompEigen(void)
{
Eigen::MatrixXd A = Eigen::MatrixXd::Zero(3,3);
Eigen::MatrixXd Q = Eigen::MatrixXd::Zero(3,3);
Eigen::MatrixXd R = Eigen::MatrixXd::Zero(3,3);
Eigen::MatrixXd P = Eigen::MatrixXd::Zero(3,3);
A(0,0) = 12.0;
A(0,1) = -51.0;
A(0,2) = 4.0;
A(1,0) = 6.0;
A(1,1) = 167.0;
A(1,2) = -68.0;
A(2,0) = -4.0;
A(2,1) = 24.0;
A(2,2) = -41.0;
Glog << "matrix A before ColPivHouseholder" << std::endl;
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
Glog << "A[" << i << "," << j << "] = " << A(i,j) << '\n';
}
}
Glog << std::endl;
Eigen::ColPivHouseholderQR<Eigen::MatrixXd> QRD(A);
Glog << "matrix A after ColPivHouseholder" << std::endl;
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
Glog << "A[" << i << "," << j << "] = " << A(i,j) << '\n';
}
}
Glog << std::endl;
Glog << "HouseholderQ with sequence lenth = nonzeroPiviots" << std::endl;
Q = QRD.householderQ().setLength(QRD.nonzeroPivots());
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
Glog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n';
}
}
Glog << std::endl;
Glog << "HouseholderQ with sequence lenth = 1" << std::endl;
Q = QRD.householderQ().setLength(1);
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
Glog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n';
}
}
Glog << std::endl;
Glog << "HouseholderQ with sequence lenth = 2" << std::endl;
Q = QRD.householderQ().setLength(2);
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
Glog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n';
}
}
Glog << std::endl;
Glog << "matrixR" << std::endl;
R = QRD.matrixR();
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
Glog << "R[" << i << "," << j << "] = " << R(i,j) << '\n';
}
}
Glog << std::endl;
Glog << "rank = " << QRD.rank() << std::endl;
Glog << "threshold = " << QRD.threshold() << std::endl;
Glog << "matrixP" << std::endl;
P = QRD.colsPermutation();
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
Glog << "P[" << i << "," << j << "] = " << P(i,j) << '\n';
}
}
Glog << std::endl;
Glog << "QR decomposition without column pivoting" << std::endl;
A(0,0) = 12.0;
A(0,1) = -51.0;
A(0,2) = 4.0;
A(1,0) = 6.0;
A(1,1) = 167.0;
A(1,2) = -68.0;
A(2,0) = -4.0;
A(2,1) = 24.0;
A(2,2) = -41.0;
Glog << "matrix A before Householder" << std::endl;
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
Glog << "A[" << i << "," << j << "] = " << A(i,j) << '\n';
}
}
Glog << std::endl;
Eigen::HouseholderQR<Eigen::MatrixXd> QRDplain(A);
Glog << "HouseholderQ" << std::endl;
Q = QRDplain.householderQ();
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
Glog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n';
}
}
Glog << std::endl;
Glog << "matrix A after Householder" << std::endl;
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
Glog << "A[" << i << "," << j << "] = " << A(i,j) << '\n';
}
}
Glog << std::endl;
}
};
}
#undef Glog
#endif

View File

@ -0,0 +1,835 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Chulwoo Jung
Author: Yong-Chull Jang <ypj@quark.phy.bnl.gov>
Author: Guido Cossu
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_IRBL_H
#define GRID_IRBL_H
#include <string.h> //memset
#define clog std::cout << GridLogMessage
namespace Grid {
/////////////////////////////////////////////////////////////
// Implicitly restarted block lanczos
/////////////////////////////////////////////////////////////
template<class Field>
class ImplicitlyRestartedBlockLanczos {
private:
std::string cname = std::string("ImplicitlyRestartedBlockLanczos");
int MaxIter; // Max iterations
int Nstop; // Number of evecs checked for convergence
int Nu; // Numbeer of vecs in the unit block
int Nk; // Number of converged sought
int Nm; // total number of vectors
int Nblock_k; // Nk/Nu
int Nblock_m; // Nm/Nu
RealD eresid;
IRLdiagonalisation diagonalisation;
////////////////////////////////////
// Embedded objects
////////////////////////////////////
SortEigen<Field> _sort;
LinearOperatorBase<Field> &_Linop;
OperatorFunction<Field> &_poly;
/////////////////////////
// Constructor
/////////////////////////
public:
ImplicitlyRestartedBlockLanczos(LinearOperatorBase<Field> &Linop, // op
OperatorFunction<Field> & poly, // polynomial
int _Nstop, // really sought vecs
int _Nu, // vecs in the unit block
int _Nk, // sought vecs
int _Nm, // total vecs
RealD _eresid, // resid in lmd deficit
int _MaxIter, // Max iterations
IRLdiagonalisation _diagonalisation = IRLdiagonaliseWithEigen)
: _Linop(Linop), _poly(poly),
Nstop(_Nstop), Nu(_Nu), Nk(_Nk), Nm(_Nm),
Nblock_m(_Nm/_Nu), Nblock_k(_Nk/_Nu),
//eresid(_eresid), MaxIter(10),
eresid(_eresid), MaxIter(_MaxIter),
diagonalisation(_diagonalisation)
{ assert( (Nk%Nu==0) && (Nm%Nu==0) ); };
////////////////////////////////
// Helpers
////////////////////////////////
static RealD normalize(Field& v)
{
RealD nn = norm2(v);
nn = sqrt(nn);
v = v * (1.0/nn);
return nn;
}
void orthogonalize(Field& w, std::vector<Field>& evec, int k)
{
typedef typename Field::scalar_type MyComplex;
MyComplex ip;
for(int j=0; j<k; ++j){
ip = innerProduct(evec[j],w);
w = w - ip * evec[j];
}
normalize(w);
}
/* Rudy Arthur's thesis pp.137
------------------------
Require: M > K P = M K †
Compute the factorization AVM = VM HM + fM eM
repeat
Q=I
for i = 1,...,P do
QiRi =HM θiI Q = QQi
H M = Q †i H M Q i
end for
βK =HM(K+1,K) σK =Q(M,K)
r=vK+1βK +rσK
VK =VM(1:M)Q(1:M,1:K)
HK =HM(1:K,1:K)
→AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM
until convergence
*/
void calc(std::vector<RealD>& eval,
std::vector<Field>& evec,
const std::vector<Field>& src, int& Nconv)
{
std::string fname = std::string(cname+"::calc()");
GridBase *grid = evec[0]._grid;
assert(grid == src[0]._grid);
assert( Nu = src.size() );
clog << std::string(74,'*') << std::endl;
clog << fname + " starting iteration 0 / "<< MaxIter<< std::endl;
clog << std::string(74,'*') << std::endl;
clog <<" -- seek Nk = "<< Nk <<" vectors"<< std::endl;
clog <<" -- accept Nstop = "<< Nstop <<" vectors"<< std::endl;
clog <<" -- total Nm = "<< Nm <<" vectors"<< std::endl;
clog <<" -- size of eval = "<< eval.size() << std::endl;
clog <<" -- size of evec = "<< evec.size() << std::endl;
if ( diagonalisation == IRLdiagonaliseWithEigen ) {
clog << "Diagonalisation is Eigen "<< std::endl;
} else {
abort();
}
clog << std::string(74,'*') << std::endl;
assert(Nm == evec.size() && Nm == eval.size());
std::vector<std::vector<ComplexD>> lmd(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<std::vector<ComplexD>> lme(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<std::vector<ComplexD>> lmd2(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<std::vector<ComplexD>> lme2(Nu,std::vector<ComplexD>(Nm,0.0));
std::vector<RealD> eval2(Nm);
Eigen::MatrixXcd Qt = Eigen::MatrixXcd::Zero(Nm,Nm);
Eigen::MatrixXcd Q = Eigen::MatrixXcd::Zero(Nm,Nm);
std::vector<int> Iconv(Nm);
std::vector<Field> B(Nm,grid); // waste of space replicating
std::vector<Field> f(Nu,grid);
std::vector<Field> f_copy(Nu,grid);
Field v(grid);
Nconv = 0;
RealD beta_k;
// set initial vector
for (int i=0; i<Nu; ++i) {
clog << "norm2(src[" << i << "])= "<< norm2(src[i]) << std::endl;
evec[i] = src[i];
orthogonalize(evec[i],evec,i);
clog << "norm2(evec[" << i << "])= "<< norm2(evec[i]) << std::endl;
}
// initial Nblock_k steps
for(int b=0; b<Nblock_k; ++b) blockwiseStep(lmd,lme,evec,f,f_copy,b);
// restarting loop begins
int iter;
for(iter = 0; iter<MaxIter; ++iter){
clog <<" **********************"<< std::endl;
clog <<" Restart iteration = "<< iter << std::endl;
clog <<" **********************"<< std::endl;
// additional (Nblock_m - Nblock_k) steps
for(int b=Nblock_k; b<Nblock_m; ++b) blockwiseStep(lmd,lme,evec,f,f_copy,b);
for(int k=0; k<Nm; ++k) {
clog << "ckpt A1: lme[" << k << "] = " << lme[0][k] << '\n';
}
for(int k=0; k<Nm; ++k) {
clog << "ckpt A2: lmd[" << k << "] = " << lmd[0][k] << '\n';
}
// residual vector
#if 1 // ypj[fixme] temporary to check a case when block has one vector
for ( int i=0; i<Nu; ++i) f_copy[i] = f[i];
for ( int i=0; i<Nu; ++i) {
f[i] = f_copy[0]*lme[0][Nm-Nu+i];
for ( int j=1; j<Nu; ++j) {
f[i] += f_copy[j]*lme[j][Nm-Nu+i];
}
//clog << "ckpt C (i= " << i << ")" << '\n';
//clog << "norm2(f) = " << norm2(f[i]) << std::endl;
}
#endif
// getting eigenvalues
for(int u=0; u<Nu; ++u){
for(int k=0; k<Nm; ++k){
lmd2[u][k] = lmd[u][k];
lme2[u][k] = lme[u][k];
}
}
Qt = Eigen::MatrixXcd::Identity(Nm,Nm);
diagonalize(eval2,lmd2,lme2,Nu,Nm,Nm,Qt,grid);
//for(int k=0; k<Nm; ++k){
// clog << "ckpt D " << '\n';
// clog << "eval2 [" << k << "] = " << eval2[k] << std::endl;
//}
// sorting
_sort.push(eval2,Nm);
//for(int k=0; k<Nm; ++k){
// clog << "ckpt E " << '\n';
// clog << "eval2 [" << k << "] = " << eval2[k] << std::endl;
//}
// Implicitly shifted QR transformations
Eigen::MatrixXcd BTDM = Eigen::MatrixXcd::Identity(Nm,Nm);
Q = Eigen::MatrixXcd::Identity(Nm,Nm);
unpackHermitBlockTriDiagMatToEigen(lmd,lme,Nu,Nblock_m,Nm,Nm,BTDM);
for(int ip=Nk; ip<Nm; ++ip){
clog << "ckpt B1: shift[" << ip << "] = " << eval2[ip] << endl;
shiftedQRDecompEigen(BTDM,Nm,eval2[ip],Q);
}
BTDM = Q.adjoint()*(BTDM*Q);
for (int i=0; i<Nm; ++i ) {
for (int j=i+1; j<Nm; ++j ) {
BTDM(i,j) = BTDM(j,i);
}
//BTDM(i,i) = real(BTDM(i,i));
}
packHermitBlockTriDiagMatfromEigen(lmd,lme,Nu,Nblock_m,Nm,Nm,BTDM);
//for (int i=0; i<Nm; ++i) {
// for (int j=0; j<Nm; ++j) {
// clog << "ckpt G1: M[" << i << "," << j << "] = " << BTDM(i,j) << '\n';
// }
//}
//for (int i=0; i<Nm; ++i) {
// for (int j=0; j<Nm; ++j) {
// clog << "ckpt G2: Q[" << i << "," << j << "] = " << Q(i,j) << '\n';
// }
//}
for (int i=0; i<Nm; ++i) {
clog << "ckpt C1: lme[" << i << "] = " << lme[0][i] << '\n';
}
for (int i=0; i<Nm; ++i) {
clog << "ckpt C2: lmd[" << i << "] = " << lmd[0][i] << '\n';
}
for(int i=0; i<Nk+Nu; ++i) B[i] = 0.0;
for(int j=0; j<Nk+Nu; ++j){
for(int k=0; k<Nm; ++k){
B[j].checkerboard = evec[k].checkerboard;
B[j] += evec[k]*Q(k,j);
}
}
for(int i=0; i<Nk+Nu; ++i) {
evec[i] = B[i];
//clog << "ckpt F: norm2_evec[= " << i << "]" << norm2(evec[i]) << std::endl;
}
#if 1 // ypj[fixme] temporary to check a case when block has one vector
// Compressed vector f and beta(k2)
f[0] *= Q(Nm-1,Nk-1);
f[0] += lme[0][Nk-1] * evec[Nk]; // was commented out
std::cout<< GridLogMessage<<"ckpt D1: Q[Nm-1,Nk-1] = "<<Q(Nm-1,Nk-1)<<std::endl;
beta_k = norm2(f[0]);
beta_k = sqrt(beta_k);
std::cout<< GridLogMessage<<"ckpt D2: beta(k) = "<<beta_k<<std::endl;
RealD betar = 1.0/beta_k;
evec[Nk] = betar * f[0];
lme[0][Nk-1] = beta_k;
#endif
// Convergence test
for(int u=0; u<Nu; ++u){
for(int k=0; k<Nm; ++k){
lmd2[u][k] = lmd[u][k];
lme2[u][k] = lme[u][k];
}
}
Qt = Eigen::MatrixXcd::Identity(Nm,Nm);
diagonalize(eval2,lmd2,lme2,Nu,Nk,Nm,Qt,grid);
for(int k = 0; k<Nk; ++k) B[k]=0.0;
for(int j = 0; j<Nk; ++j){
for(int k = 0; k<Nk; ++k){
B[j].checkerboard = evec[k].checkerboard;
B[j] += evec[k]*Qt(k,j);
}
}
//for (int i=0; i<Nk; ++i) {
// for (int j=0; j<Nk; ++j) {
// clog << "ckpt H1: R[" << i << "," << j << "] = " << Qt(i,j) << '\n';
// }
//}
//for (int i=0; i<Nk; ++i) {
// clog << "ckpt H2: eval2[" << i << "] = " << eval2[i] << '\n';
//}
//for(int j=0; j<Nk; ++j) {
// clog << "ckpt I: norm2_B[ " << j << "]" << norm2(B[j]) << std::endl;
//}
Nconv = 0;
for(int i=0; i<Nk; ++i){
_Linop.HermOp(B[i],v);
RealD vnum = real(innerProduct(B[i],v)); // HermOp.
RealD vden = norm2(B[i]);
eval2[i] = vnum/vden;
v -= eval2[i]*B[i];
RealD vv = norm2(v);
std::cout.precision(13);
clog << "[" << std::setw(3)<< std::setiosflags(std::ios_base::right) <<i<<"] ";
std::cout << "eval = "<<std::setw(25)<< std::setiosflags(std::ios_base::left)<< eval2[i];
std::cout << " |H B[i] - eval[i]B[i]|^2 "<< std::setw(25)<< std::setiosflags(std::ios_base::right)<< vv<< std::endl;
// change the criteria as evals are supposed to be sorted, all evals smaller(larger) than Nstop should have converged
if( (vv<eresid*eresid) && (i == Nconv) ){
//if( (vv<eresid*eresid) ){
Iconv[Nconv] = i;
++Nconv;
}
} // i-loop end
clog <<" #modes converged: "<<Nconv<<std::endl;
if( Nconv>=Nstop ){
goto converged;
}
} // end of iter loop
clog <<"**************************************************************************"<< std::endl;
std::cout<< GridLogError << fname + " NOT converged.";
clog <<"**************************************************************************"<< std::endl;
abort();
converged:
// Sorting
eval.resize(Nconv);
evec.resize(Nconv,grid);
for(int i=0; i<Nconv; ++i){
eval[i] = eval2[Iconv[i]];
evec[i] = B[Iconv[i]];
}
_sort.push(eval,evec,Nconv);
clog <<"**************************************************************************"<< std::endl;
clog << fname + " CONVERGED ; Summary :\n";
clog <<"**************************************************************************"<< std::endl;
clog << " -- Iterations = "<< iter << "\n";
clog << " -- beta(k) = "<< beta_k << "\n";
clog << " -- Nconv = "<< Nconv << "\n";
clog <<"**************************************************************************"<< std::endl;
}
private:
/* Saad PP. 195
1. Choose an initial vector v1 of 2-norm unity. Set β1 ≡ 0, v0 ≡ 0
2. For k = 1,2,...,m Do:
3. wk:=Avkβkv_{k1}
4. αk:=(wk,vk) //
5. wk:=wkαkvk // wk orthog vk
6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop
7. vk+1 := wk/βk+1
8. EndDo
*/
void blockwiseStep(std::vector<std::vector<ComplexD>>& lmd,
std::vector<std::vector<ComplexD>>& lme,
std::vector<Field>& evec,
std::vector<Field>& w,
std::vector<Field>& w_copy,
int b)
{
const RealD tiny = 1.0e-20;
int Nu = w.size();
int Nm = evec.size();
assert( b < Nm/Nu );
// converts block index to full indicies for an interval [L,R)
int L = Nu*b;
int R = Nu*(b+1);
Real beta;
// 3. wk:=Avkβkv_{k1}
for (int k=L, u=0; k<R; ++k, ++u) {
_poly(_Linop,evec[k],w[u]);
}
if (b>0) {
for (int u=0; u<Nu; ++u) {
for (int k=L-Nu; k<L; ++k) {
w[u] = w[u] - evec[k] * conjugate(lme[u][k]);
//clog << "ckpt A (k= " << k+1 << ")" << '\n';
//clog << "lme = " << lme[u][k] << '\n';
//clog << "lme = " << conjugate(lme[u][k]) << '\n';
}
//clog << "norm(w) = " << norm2(w[u]) << std::endl;
}
}
// 4. αk:=(vk,wk)
for (int u=0; u<Nu; ++u) {
for (int k=L; k<R; ++k) {
lmd[u][k] = innerProduct(evec[k],w[u]); // lmd = transpose of alpha
}
lmd[u][L+u] = real(lmd[u][L+u]); // force diagonal to be real
//clog << "ckpt B (k= " << L+u << ")" << '\n';
//clog << "lmd = " << lmd[u][L+u] << std::endl;
}
// 5. wk:=wkαkvk
for (int u=0; u<Nu; ++u) {
for (int k=L; k<R; ++k) {
w[u] = w[u] - evec[k]*lmd[u][k];
}
w_copy[u] = w[u];
}
// In block version, the steps 6 and 7 in Lanczos construction is
// replaced by the QR decomposition of new basis block.
// It results block version beta and orthonormal block basis.
// Here, QR decomposition is done by using Gram-Schmidt
for (int u=0; u<Nu; ++u) {
for (int k=L; k<R; ++k) {
lme[u][k] = 0.0;
}
}
beta = normalize(w[0]);
for (int u=1; u<Nu; ++u) {
//orthogonalize(w[u],w_copy,u);
orthogonalize(w[u],w,u);
}
for (int u=0; u<Nu; ++u) {
for (int v=0; v<Nu; ++v) {
lme[u][L+v] = innerProduct(w[u],w_copy[v]);
}
}
lme[0][L] = beta;
#if 0
for (int u=0; u<Nu; ++u) {
for (int k=L+u; k<R; ++k) {
if (lme[u][k] < tiny) {
clog <<" In block "<< b << ",";
std::cout <<" beta[" << u << "," << k-L << "] = ";
std::cout << lme[u][k] << std::endl;
}
}
}
#else
for (int u=0; u<Nu; ++u) {
clog << "norm2(w[" << u << "])= "<< norm2(w[u]) << std::endl;
for (int k=L+u; k<R; ++k) {
clog <<" In block "<< b << ",";
std::cout <<" beta[" << u << "," << k-L << "] = ";
std::cout << lme[u][k] << std::endl;
}
}
#endif
// re-orthogonalization for numerical stability
if (b>0) {
for (int u=0; u<Nu; ++u) {
orthogonalize(w[u],evec,R);
}
}
if (b < Nm/Nu-1) {
for (int u=0; u<Nu; ++u) {
evec[R+u] = w[u];
}
}
}
void diagonalize_Eigen(std::vector<RealD>& eval,
std::vector<std::vector<ComplexD>>& lmd,
std::vector<std::vector<ComplexD>>& lme,
int Nu, int Nk, int Nm,
Eigen::MatrixXcd & Qt, // Nm x Nm
GridBase *grid)
{
assert( Nk%Nu == 0 && Nm%Nu == 0 );
assert( Nk <= Nm );
Eigen::MatrixXcd BlockTriDiag = Eigen::MatrixXcd::Zero(Nk,Nk);
for ( int u=0; u<Nu; ++u ) {
for (int k=0; k<Nk; ++k ) {
BlockTriDiag(k,u+(k/Nu)*Nu) = lmd[u][k];
}
}
for ( int u=0; u<Nu; ++u ) {
for (int k=Nu; k<Nk; ++k ) {
BlockTriDiag(k-Nu,u+(k/Nu)*Nu) = conjugate(lme[u][k-Nu]);
BlockTriDiag(u+(k/Nu)*Nu,k-Nu) = lme[u][k-Nu];
}
}
//std::cout << BlockTriDiag << std::endl;
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXcd> eigensolver(BlockTriDiag);
for (int i = 0; i < Nk; i++) {
eval[Nk-1-i] = eigensolver.eigenvalues()(i);
}
for (int i = 0; i < Nk; i++) {
for (int j = 0; j < Nk; j++) {
Qt(j,Nk-1-i) = eigensolver.eigenvectors()(j,i);
//Qt(Nk-1-i,j) = eigensolver.eigenvectors()(i,j);
//Qt(i,j) = eigensolver.eigenvectors()(i,j);
}
}
}
void diagonalize(std::vector<RealD>& eval,
std::vector<std::vector<ComplexD>>& lmd,
std::vector<std::vector<ComplexD>>& lme,
int Nu, int Nk, int Nm,
Eigen::MatrixXcd & Qt,
GridBase *grid)
{
Qt = Eigen::MatrixXcd::Identity(Nm,Nm);
if ( diagonalisation == IRLdiagonaliseWithEigen ) {
diagonalize_Eigen(eval,lmd,lme,Nu,Nk,Nm,Qt,grid);
} else {
assert(0);
}
}
void unpackHermitBlockTriDiagMatToEigen(
std::vector<std::vector<ComplexD>>& lmd,
std::vector<std::vector<ComplexD>>& lme,
int Nu, int Nb, int Nk, int Nm,
Eigen::MatrixXcd& M)
{
//clog << "unpackHermitBlockTriDiagMatToEigen() begin" << '\n';
assert( Nk%Nu == 0 && Nm%Nu == 0 );
assert( Nk <= Nm );
M = Eigen::MatrixXcd::Zero(Nk,Nk);
// rearrange
for ( int u=0; u<Nu; ++u ) {
for (int k=0; k<Nk; ++k ) {
M(k,u+(k/Nu)*Nu) = lmd[u][k];
}
}
for ( int u=0; u<Nu; ++u ) {
for (int k=Nu; k<Nk; ++k ) {
M(k-Nu,u+(k/Nu)*Nu) = conjugate(lme[u][k-Nu]);
M(u+(k/Nu)*Nu,k-Nu) = lme[u][k-Nu];
}
}
//clog << "unpackHermitBlockTriDiagMatToEigen() end" << endl;
}
void packHermitBlockTriDiagMatfromEigen(
std::vector<std::vector<ComplexD>>& lmd,
std::vector<std::vector<ComplexD>>& lme,
int Nu, int Nb, int Nk, int Nm,
Eigen::MatrixXcd& M)
{
//clog << "packHermitBlockTriDiagMatfromEigen() begin" << '\n';
assert( Nk%Nu == 0 && Nm%Nu == 0 );
assert( Nk <= Nm );
// rearrange
for ( int u=0; u<Nu; ++u ) {
for (int k=0; k<Nk; ++k ) {
lmd[u][k] = M(k,u+(k/Nu)*Nu);
}
}
for ( int u=0; u<Nu; ++u ) {
for (int k=Nu; k<Nk; ++k ) {
lme[u][k-Nu] = M(u+(k/Nu)*Nu,k-Nu);
}
}
//clog << "packHermitBlockTriDiagMatfromEigen() end" << endl;
}
// void shiftedQRDecompEigen(Eigen::MatrixXcd& M, int Nm,
// RealD Dsh,
// Eigen::MatrixXcd& Qprod, int Nk)
// {
// //clog << "shiftedQRDecompEigen() begin" << '\n';
// Eigen::MatrixXcd Mtmp = Eigen::MatrixXcd::Zero(Nm,Nm);
// Eigen::MatrixXcd Q = Eigen::MatrixXcd::Zero(Nm,Nm);
//
// Mtmp = M;
// for (int i=0; i<Nm; ++i ) {
// Mtmp(i,i) = M(i,i) - Dsh;
// }
//
// Eigen::HouseholderQR<Eigen::MatrixXcd> QRD(Mtmp);
// Q = QRD.householderQ();
//
// M = Q.adjoint()*(M*Q);
//#if 0
// Qprod *= Q;
//#else
// Mtmp = Qprod*Q;
//
// Eigen::HouseholderQR<Eigen::MatrixXcd> QRD2(Mtmp);
// Qprod = QRD2.householderQ();
//
// Mtmp -= Qprod;
// clog << "Frobenius norm ||Qprod(after) - Qprod|| = " << Mtmp.norm() << std::endl;
//#endif
// //clog << "shiftedQRDecompEigen() end" << endl;
// }
void shiftedQRDecompEigen(Eigen::MatrixXcd& M, int Nm,
RealD Dsh,
Eigen::MatrixXcd& Qprod)
{
//clog << "shiftedQRDecompEigen() begin" << '\n';
Eigen::MatrixXcd Mtmp = Eigen::MatrixXcd::Zero(Nm,Nm);
//Eigen::MatrixXcd Qtmp = Eigen::MatrixXcd::Zero(Nm,Nm);
Mtmp = Qprod.adjoint()*(M*Qprod);
for (int i=0; i<Nm; ++i ) {
for (int j=i+1; j<Nm; ++j ) {
Mtmp(i,j) = Mtmp(j,i);
}
}
for (int i=0; i<Nm; ++i ) {
Mtmp(i,i) -= Dsh;
//Mtmp(i,i) = real(Mtmp(i,i)-Dsh);
}
Eigen::HouseholderQR<Eigen::MatrixXcd> QRD(Mtmp);
//Qtmp = Qprod*QRD.householderQ();
//Eigen::HouseholderQR<Eigen::MatrixXcd> QRD2(Qtmp);
//Qprod = QRD2.householderQ();
Qprod *= QRD.householderQ();
//ComplexD p;
//RealD r;
//r = 0.;
//for (int k=0; k<Nm; ++k) r += real(conj(Qprod(k,0))*Qprod(k,0));
//r = sqrt(r);
//for (int k=0; k<Nm; ++k) Qprod(k,0) /= r;
//
//for (int i=1; i<Nm; ++i) {
// for (int j=0; j<i; ++j) {
// p = 0.;
// for (int k=0; k<Nm; ++k) {
// p += conj(Qprod(k,j))*Qprod(k,i);
// }
// for (int k=0; k<Nm; ++k) {
// Qprod(k,i) -= p*Qprod(k,j);
// }
// }
// r = 0.;
// for (int k=0; k<Nm; ++k) r += real(conj(Qprod(k,i))*Qprod(k,i));
// r = sqrt(r);
// for (int k=0; k<Nm; ++k) Qprod(k,i) /= r;
//}
//clog << "shiftedQRDecompEigen() end" << endl;
}
void exampleQRDecompEigen(void)
{
Eigen::MatrixXd A = Eigen::MatrixXd::Zero(3,3);
Eigen::MatrixXd Q = Eigen::MatrixXd::Zero(3,3);
Eigen::MatrixXd R = Eigen::MatrixXd::Zero(3,3);
Eigen::MatrixXd P = Eigen::MatrixXd::Zero(3,3);
A(0,0) = 12.0;
A(0,1) = -51.0;
A(0,2) = 4.0;
A(1,0) = 6.0;
A(1,1) = 167.0;
A(1,2) = -68.0;
A(2,0) = -4.0;
A(2,1) = 24.0;
A(2,2) = -41.0;
clog << "matrix A before ColPivHouseholder" << std::endl;
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
clog << "A[" << i << "," << j << "] = " << A(i,j) << '\n';
}
}
clog << std::endl;
Eigen::ColPivHouseholderQR<Eigen::MatrixXd> QRD(A);
clog << "matrix A after ColPivHouseholder" << std::endl;
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
clog << "A[" << i << "," << j << "] = " << A(i,j) << '\n';
}
}
clog << std::endl;
clog << "HouseholderQ with sequence lenth = nonzeroPiviots" << std::endl;
Q = QRD.householderQ().setLength(QRD.nonzeroPivots());
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
clog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n';
}
}
clog << std::endl;
clog << "HouseholderQ with sequence lenth = 1" << std::endl;
Q = QRD.householderQ().setLength(1);
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
clog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n';
}
}
clog << std::endl;
clog << "HouseholderQ with sequence lenth = 2" << std::endl;
Q = QRD.householderQ().setLength(2);
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
clog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n';
}
}
clog << std::endl;
clog << "matrixR" << std::endl;
R = QRD.matrixR();
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
clog << "R[" << i << "," << j << "] = " << R(i,j) << '\n';
}
}
clog << std::endl;
clog << "rank = " << QRD.rank() << std::endl;
clog << "threshold = " << QRD.threshold() << std::endl;
clog << "matrixP" << std::endl;
P = QRD.colsPermutation();
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
clog << "P[" << i << "," << j << "] = " << P(i,j) << '\n';
}
}
clog << std::endl;
clog << "QR decomposition without column pivoting" << std::endl;
A(0,0) = 12.0;
A(0,1) = -51.0;
A(0,2) = 4.0;
A(1,0) = 6.0;
A(1,1) = 167.0;
A(1,2) = -68.0;
A(2,0) = -4.0;
A(2,1) = 24.0;
A(2,2) = -41.0;
clog << "matrix A before Householder" << std::endl;
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
clog << "A[" << i << "," << j << "] = " << A(i,j) << '\n';
}
}
clog << std::endl;
Eigen::HouseholderQR<Eigen::MatrixXd> QRDplain(A);
clog << "HouseholderQ" << std::endl;
Q = QRDplain.householderQ();
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
clog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n';
}
}
clog << std::endl;
clog << "matrix A after Householder" << std::endl;
for ( int i=0; i<3; i++ ) {
for ( int j=0; j<3; j++ ) {
clog << "A[" << i << "," << j << "] = " << A(i,j) << '\n';
}
}
clog << std::endl;
}
};
}
#undef clog
#endif

File diff suppressed because it is too large Load Diff

View File

@ -57,8 +57,9 @@ void basisRotate(std::vector<Field> &basis,Eigen::MatrixXd& Qt,int j0, int j1, i
parallel_region
{
std::vector < vobj > B(Nm); // Thread private
std::vector < vobj , commAllocator<vobj> > B(Nm); // Thread private
parallel_for_internal(int ss=0;ss < grid->oSites();ss++){
for(int j=j0; j<j1; ++j) B[j]=0.;

View File

@ -0,0 +1,625 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Chulwoo Jung
Author: Guido Cossu
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_IRL_H
#define GRID_IRL_H
#include <string.h> //memset
namespace Grid {
enum IRLdiagonalisation {
IRLdiagonaliseWithDSTEGR,
IRLdiagonaliseWithQR,
IRLdiagonaliseWithEigen
};
////////////////////////////////////////////////////////////////////////////////
// Helper class for sorting the evalues AND evectors by Field
// Use pointer swizzle on vectors
////////////////////////////////////////////////////////////////////////////////
template<class Field>
class SortEigen {
private:
static bool less_lmd(RealD left,RealD right){
return left > right;
}
static bool less_pair(std::pair<RealD,Field const*>& left,
std::pair<RealD,Field const*>& right){
return left.first > (right.first);
}
public:
void push(std::vector<RealD>& lmd,std::vector<Field>& evec,int N) {
////////////////////////////////////////////////////////////////////////
// PAB: FIXME: VERY VERY VERY wasteful: takes a copy of the entire vector set.
// : The vector reorder should be done by pointer swizzle somehow
////////////////////////////////////////////////////////////////////////
std::vector<Field> cpy(lmd.size(),evec[0]._grid);
for(int i=0;i<lmd.size();i++) cpy[i] = evec[i];
std::vector<std::pair<RealD, Field const*> > emod(lmd.size());
for(int i=0;i<lmd.size();++i) emod[i] = std::pair<RealD,Field const*>(lmd[i],&cpy[i]);
partial_sort(emod.begin(),emod.begin()+N,emod.end(),less_pair);
typename std::vector<std::pair<RealD, Field const*> >::iterator it = emod.begin();
for(int i=0;i<N;++i){
lmd[i]=it->first;
evec[i]=*(it->second);
++it;
}
}
void push(std::vector<RealD>& lmd,int N) {
std::partial_sort(lmd.begin(),lmd.begin()+N,lmd.end(),less_lmd);
}
bool saturated(RealD lmd, RealD thrs) {
return fabs(lmd) > fabs(thrs);
}
};
/////////////////////////////////////////////////////////////
// Implicitly restarted lanczos
/////////////////////////////////////////////////////////////
template<class Field>
class ImplicitlyRestartedLanczos {
private:
int MaxIter; // Max iterations
int Nstop; // Number of evecs checked for convergence
int Nk; // Number of converged sought
int Nm; // Nm -- total number of vectors
RealD eresid;
IRLdiagonalisation diagonalisation;
////////////////////////////////////
// Embedded objects
////////////////////////////////////
SortEigen<Field> _sort;
LinearOperatorBase<Field> &_Linop;
OperatorFunction<Field> &_poly;
/////////////////////////
// Constructor
/////////////////////////
public:
ImplicitlyRestartedLanczos(LinearOperatorBase<Field> &Linop, // op
OperatorFunction<Field> & poly, // polynomial
int _Nstop, // really sought vecs
int _Nk, // sought vecs
int _Nm, // total vecs
RealD _eresid, // resid in lmd deficit
int _MaxIter, // Max iterations
IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen ) :
_Linop(Linop), _poly(poly),
Nstop(_Nstop), Nk(_Nk), Nm(_Nm),
eresid(_eresid), MaxIter(_MaxIter),
diagonalisation(_diagonalisation)
{ };
////////////////////////////////
// Helpers
////////////////////////////////
static RealD normalise(Field& v)
{
RealD nn = norm2(v);
nn = sqrt(nn);
v = v * (1.0/nn);
return nn;
}
void orthogonalize(Field& w, std::vector<Field>& evec, int k)
{
typedef typename Field::scalar_type MyComplex;
MyComplex ip;
for(int j=0; j<k; ++j){
ip = innerProduct(evec[j],w);
w = w - ip * evec[j];
}
normalise(w);
}
/* Rudy Arthur's thesis pp.137
------------------------
Require: M > K P = M K †
Compute the factorization AVM = VM HM + fM eM
repeat
Q=I
for i = 1,...,P do
QiRi =HM θiI Q = QQi
H M = Q †i H M Q i
end for
βK =HM(K+1,K) σK =Q(M,K)
r=vK+1βK +rσK
VK =VM(1:M)Q(1:M,1:K)
HK =HM(1:K,1:K)
→AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM
until convergence
*/
void calc(std::vector<RealD>& eval, std::vector<Field>& evec, const Field& src, int& Nconv)
{
GridBase *grid = evec[0]._grid;
assert(grid == src._grid);
std::cout << GridLogMessage <<"**************************************************************************"<< std::endl;
std::cout << GridLogMessage <<" ImplicitlyRestartedLanczos::calc() starting iteration 0 / "<< MaxIter<< std::endl;
std::cout << GridLogMessage <<"**************************************************************************"<< std::endl;
std::cout << GridLogMessage <<" -- seek Nk = " << Nk <<" vectors"<< std::endl;
std::cout << GridLogMessage <<" -- accept Nstop = " << Nstop <<" vectors"<< std::endl;
std::cout << GridLogMessage <<" -- total Nm = " << Nm <<" vectors"<< std::endl;
std::cout << GridLogMessage <<" -- size of eval = " << eval.size() << std::endl;
std::cout << GridLogMessage <<" -- size of evec = " << evec.size() << std::endl;
if ( diagonalisation == IRLdiagonaliseWithDSTEGR ) {
std::cout << GridLogMessage << "Diagonalisation is DSTEGR "<<std::endl;
} else if ( diagonalisation == IRLdiagonaliseWithQR ) {
std::cout << GridLogMessage << "Diagonalisation is QR "<<std::endl;
} else if ( diagonalisation == IRLdiagonaliseWithEigen ) {
std::cout << GridLogMessage << "Diagonalisation is Eigen "<<std::endl;
}
std::cout << GridLogMessage <<"**************************************************************************"<< std::endl;
assert(Nm == evec.size() && Nm == eval.size());
std::vector<RealD> lme(Nm);
std::vector<RealD> lme2(Nm);
std::vector<RealD> eval2(Nm);
Eigen::MatrixXd Qt = Eigen::MatrixXd::Zero(Nm,Nm);
std::vector<int> Iconv(Nm);
std::vector<Field> B(Nm,grid); // waste of space replicating
Field f(grid);
Field v(grid);
int k1 = 1;
int k2 = Nk;
Nconv = 0;
RealD beta_k;
// Set initial vector
evec[0] = src;
std::cout << GridLogMessage <<"norm2(src)= " << norm2(src)<<std::endl;
normalise(evec[0]);
std::cout << GridLogMessage <<"norm2(evec[0])= " << norm2(evec[0]) <<std::endl;
// Initial Nk steps
for(int k=0; k<Nk; ++k) step(eval,lme,evec,f,Nm,k);
// Restarting loop begins
int iter;
for(iter = 0; iter<MaxIter; ++iter){
std::cout<< GridLogMessage <<" **********************"<< std::endl;
std::cout<< GridLogMessage <<" Restart iteration = "<< iter << std::endl;
std::cout<< GridLogMessage <<" **********************"<< std::endl;
for(int k=Nk; k<Nm; ++k) step(eval,lme,evec,f,Nm,k);
f *= lme[Nm-1];
// getting eigenvalues
for(int k=0; k<Nm; ++k){
eval2[k] = eval[k+k1-1];
lme2[k] = lme[k+k1-1];
}
Qt = Eigen::MatrixXd::Identity(Nm,Nm);
diagonalize(eval2,lme2,Nm,Nm,Qt,grid);
// sorting
_sort.push(eval2,Nm);
// Implicitly shifted QR transformations
Qt = Eigen::MatrixXd::Identity(Nm,Nm);
for(int ip=k2; ip<Nm; ++ip){
// Eigen replacement for qr_decomp ???
qr_decomp(eval,lme,Nm,Nm,Qt,eval2[ip],k1,Nm);
}
for(int i=0; i<(Nk+1); ++i) B[i] = 0.0;
for(int j=k1-1; j<k2+1; ++j){
for(int k=0; k<Nm; ++k){
B[j].checkerboard = evec[k].checkerboard;
B[j] += Qt(j,k) * evec[k];
}
}
for(int j=k1-1; j<k2+1; ++j) evec[j] = B[j];
// Compressed vector f and beta(k2)
f *= Qt(k2-1,Nm-1);
f += lme[k2-1] * evec[k2];
beta_k = norm2(f);
beta_k = sqrt(beta_k);
std::cout<< GridLogMessage<<" beta(k) = "<<beta_k<<std::endl;
RealD betar = 1.0/beta_k;
evec[k2] = betar * f;
lme[k2-1] = beta_k;
// Convergence test
for(int k=0; k<Nm; ++k){
eval2[k] = eval[k];
lme2[k] = lme[k];
}
Qt = Eigen::MatrixXd::Identity(Nm,Nm);
diagonalize(eval2,lme2,Nk,Nm,Qt,grid);
for(int k = 0; k<Nk; ++k) B[k]=0.0;
for(int j = 0; j<Nk; ++j){
for(int k = 0; k<Nk; ++k){
B[j].checkerboard = evec[k].checkerboard;
B[j] += Qt(j,k) * evec[k];
}
}
Nconv = 0;
for(int i=0; i<Nk; ++i){
_Linop.HermOp(B[i],v);
RealD vnum = real(innerProduct(B[i],v)); // HermOp.
RealD vden = norm2(B[i]);
eval2[i] = vnum/vden;
v -= eval2[i]*B[i];
RealD vv = norm2(v);
std::cout.precision(13);
std::cout << GridLogMessage << "[" << std::setw(3)<< std::setiosflags(std::ios_base::right) <<i<<"] ";
std::cout << "eval = "<<std::setw(25)<< std::setiosflags(std::ios_base::left)<< eval2[i];
std::cout << " |H B[i] - eval[i]B[i]|^2 "<< std::setw(25)<< std::setiosflags(std::ios_base::right)<< vv<< std::endl;
// change the criteria as evals are supposed to be sorted, all evals smaller(larger) than Nstop should have converged
if((vv<eresid*eresid) && (i == Nconv) ){
Iconv[Nconv] = i;
++Nconv;
}
} // i-loop end
std::cout<< GridLogMessage <<" #modes converged: "<<Nconv<<std::endl;
if( Nconv>=Nstop ){
goto converged;
}
} // end of iter loop
std::cout << GridLogMessage <<"**************************************************************************"<< std::endl;
std::cout<< GridLogError <<" ImplicitlyRestartedLanczos::calc() NOT converged.";
std::cout << GridLogMessage <<"**************************************************************************"<< std::endl;
abort();
converged:
// Sorting
eval.resize(Nconv);
evec.resize(Nconv,grid);
for(int i=0; i<Nconv; ++i){
eval[i] = eval2[Iconv[i]];
evec[i] = B[Iconv[i]];
}
_sort.push(eval,evec,Nconv);
std::cout << GridLogMessage <<"**************************************************************************"<< std::endl;
std::cout << GridLogMessage << "ImplicitlyRestartedLanczos CONVERGED ; Summary :\n";
std::cout << GridLogMessage <<"**************************************************************************"<< std::endl;
std::cout << GridLogMessage << " -- Iterations = "<< iter << "\n";
std::cout << GridLogMessage << " -- beta(k) = "<< beta_k << "\n";
std::cout << GridLogMessage << " -- Nconv = "<< Nconv << "\n";
std::cout << GridLogMessage <<"**************************************************************************"<< std::endl;
}
private:
/* Saad PP. 195
1. Choose an initial vector v1 of 2-norm unity. Set β1 ≡ 0, v0 ≡ 0
2. For k = 1,2,...,m Do:
3. wk:=Avkβkv_{k1}
4. αk:=(wk,vk) //
5. wk:=wkαkvk // wk orthog vk
6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop
7. vk+1 := wk/βk+1
8. EndDo
*/
void step(std::vector<RealD>& lmd,
std::vector<RealD>& lme,
std::vector<Field>& evec,
Field& w,int Nm,int k)
{
const RealD tiny = 1.0e-20;
assert( k< Nm );
_poly(_Linop,evec[k],w); // 3. wk:=Avkβkv_{k1}
if(k>0) w -= lme[k-1] * evec[k-1];
ComplexD zalph = innerProduct(evec[k],w); // 4. αk:=(wk,vk)
RealD alph = real(zalph);
w = w - alph * evec[k];// 5. wk:=wkαkvk
RealD beta = normalise(w); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop
// 7. vk+1 := wk/βk+1
lmd[k] = alph;
lme[k] = beta;
if ( k > 0 ) orthogonalize(w,evec,k); // orthonormalise
if ( k < Nm-1) evec[k+1] = w;
if ( beta < tiny ) std::cout << GridLogMessage << " beta is tiny "<<beta<<std::endl;
}
void diagonalize_Eigen(std::vector<RealD>& lmd, std::vector<RealD>& lme,
int Nk, int Nm,
Eigen::MatrixXd & Qt, // Nm x Nm
GridBase *grid)
{
Eigen::MatrixXd TriDiag = Eigen::MatrixXd::Zero(Nk,Nk);
for(int i=0;i<Nk;i++) TriDiag(i,i) = lmd[i];
for(int i=0;i<Nk-1;i++) TriDiag(i,i+1) = lme[i];
for(int i=0;i<Nk-1;i++) TriDiag(i+1,i) = lme[i];
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> eigensolver(TriDiag);
for (int i = 0; i < Nk; i++) {
lmd[Nk-1-i] = eigensolver.eigenvalues()(i);
}
for (int i = 0; i < Nk; i++) {
for (int j = 0; j < Nk; j++) {
Qt(Nk-1-i,j) = eigensolver.eigenvectors()(j,i);
}
}
}
///////////////////////////////////////////////////////////////////////////
// File could end here if settle on Eigen ???
///////////////////////////////////////////////////////////////////////////
void qr_decomp(std::vector<RealD>& lmd, // Nm
std::vector<RealD>& lme, // Nm
int Nk, int Nm, // Nk, Nm
Eigen::MatrixXd& Qt, // Nm x Nm matrix
RealD Dsh, int kmin, int kmax)
{
int k = kmin-1;
RealD x;
RealD Fden = 1.0/hypot(lmd[k]-Dsh,lme[k]);
RealD c = ( lmd[k] -Dsh) *Fden;
RealD s = -lme[k] *Fden;
RealD tmpa1 = lmd[k];
RealD tmpa2 = lmd[k+1];
RealD tmpb = lme[k];
lmd[k] = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb;
lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb;
lme[k] = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb;
x =-s*lme[k+1];
lme[k+1] = c*lme[k+1];
for(int i=0; i<Nk; ++i){
RealD Qtmp1 = Qt(k,i);
RealD Qtmp2 = Qt(k+1,i);
Qt(k,i) = c*Qtmp1 - s*Qtmp2;
Qt(k+1,i)= s*Qtmp1 + c*Qtmp2;
}
// Givens transformations
for(int k = kmin; k < kmax-1; ++k){
RealD Fden = 1.0/hypot(x,lme[k-1]);
RealD c = lme[k-1]*Fden;
RealD s = - x*Fden;
RealD tmpa1 = lmd[k];
RealD tmpa2 = lmd[k+1];
RealD tmpb = lme[k];
lmd[k] = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb;
lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb;
lme[k] = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb;
lme[k-1] = c*lme[k-1] -s*x;
if(k != kmax-2){
x = -s*lme[k+1];
lme[k+1] = c*lme[k+1];
}
for(int i=0; i<Nk; ++i){
RealD Qtmp1 = Qt(k,i);
RealD Qtmp2 = Qt(k+1,i);
Qt(k,i) = c*Qtmp1 -s*Qtmp2;
Qt(k+1,i) = s*Qtmp1 +c*Qtmp2;
}
}
}
void diagonalize(std::vector<RealD>& lmd, std::vector<RealD>& lme,
int Nk, int Nm,
Eigen::MatrixXd & Qt,
GridBase *grid)
{
Qt = Eigen::MatrixXd::Identity(Nm,Nm);
if ( diagonalisation == IRLdiagonaliseWithDSTEGR ) {
diagonalize_lapack(lmd,lme,Nk,Nm,Qt,grid);
} else if ( diagonalisation == IRLdiagonaliseWithQR ) {
diagonalize_QR(lmd,lme,Nk,Nm,Qt,grid);
} else if ( diagonalisation == IRLdiagonaliseWithEigen ) {
diagonalize_Eigen(lmd,lme,Nk,Nm,Qt,grid);
} else {
assert(0);
}
}
#ifdef USE_LAPACK
void LAPACK_dstegr(char *jobz, char *range, int *n, double *d, double *e,
double *vl, double *vu, int *il, int *iu, double *abstol,
int *m, double *w, double *z, int *ldz, int *isuppz,
double *work, int *lwork, int *iwork, int *liwork,
int *info);
#endif
void diagonalize_lapack(std::vector<RealD>& lmd,
std::vector<RealD>& lme,
int Nk, int Nm,
Eigen::MatrixXd& Qt,
GridBase *grid)
{
#ifdef USE_LAPACK
const int size = Nm;
int NN = Nk;
double evals_tmp[NN];
double evec_tmp[NN][NN];
memset(evec_tmp[0],0,sizeof(double)*NN*NN);
double DD[NN];
double EE[NN];
for (int i = 0; i< NN; i++) {
for (int j = i - 1; j <= i + 1; j++) {
if ( j < NN && j >= 0 ) {
if (i==j) DD[i] = lmd[i];
if (i==j) evals_tmp[i] = lmd[i];
if (j==(i-1)) EE[j] = lme[j];
}
}
}
int evals_found;
int lwork = ( (18*NN) > (1+4*NN+NN*NN)? (18*NN):(1+4*NN+NN*NN)) ;
int liwork = 3+NN*10 ;
int iwork[liwork];
double work[lwork];
int isuppz[2*NN];
char jobz = 'V'; // calculate evals & evecs
char range = 'I'; // calculate all evals
// char range = 'A'; // calculate all evals
char uplo = 'U'; // refer to upper half of original matrix
char compz = 'I'; // Compute eigenvectors of tridiagonal matrix
int ifail[NN];
int info;
int total = grid->_Nprocessors;
int node = grid->_processor;
int interval = (NN/total)+1;
double vl = 0.0, vu = 0.0;
int il = interval*node+1 , iu = interval*(node+1);
if (iu > NN) iu=NN;
double tol = 0.0;
if (1) {
memset(evals_tmp,0,sizeof(double)*NN);
if ( il <= NN){
LAPACK_dstegr(&jobz, &range, &NN,
(double*)DD, (double*)EE,
&vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A'
&tol, // tolerance
&evals_found, evals_tmp, (double*)evec_tmp, &NN,
isuppz,
work, &lwork, iwork, &liwork,
&info);
for (int i = iu-1; i>= il-1; i--){
evals_tmp[i] = evals_tmp[i - (il-1)];
if (il>1) evals_tmp[i-(il-1)]=0.;
for (int j = 0; j< NN; j++){
evec_tmp[i][j] = evec_tmp[i - (il-1)][j];
if (il>1) evec_tmp[i-(il-1)][j]=0.;
}
}
}
{
grid->GlobalSumVector(evals_tmp,NN);
grid->GlobalSumVector((double*)evec_tmp,NN*NN);
}
}
// Safer to sort instead of just reversing it,
// but the document of the routine says evals are sorted in increasing order.
// qr gives evals in decreasing order.
for(int i=0;i<NN;i++){
lmd [NN-1-i]=evals_tmp[i];
for(int j=0;j<NN;j++){
Qt((NN-1-i),j)=evec_tmp[i][j];
}
}
#else
assert(0);
#endif
}
void diagonalize_QR(std::vector<RealD>& lmd, std::vector<RealD>& lme,
int Nk, int Nm,
Eigen::MatrixXd & Qt,
GridBase *grid)
{
int Niter = 100*Nm;
int kmin = 1;
int kmax = Nk;
// (this should be more sophisticated)
for(int iter=0; iter<Niter; ++iter){
// determination of 2x2 leading submatrix
RealD dsub = lmd[kmax-1]-lmd[kmax-2];
RealD dd = sqrt(dsub*dsub + 4.0*lme[kmax-2]*lme[kmax-2]);
RealD Dsh = 0.5*(lmd[kmax-2]+lmd[kmax-1] +dd*(dsub/fabs(dsub)));
// (Dsh: shift)
// transformation
qr_decomp(lmd,lme,Nk,Nm,Qt,Dsh,kmin,kmax); // Nk, Nm
// Convergence criterion (redef of kmin and kamx)
for(int j=kmax-1; j>= kmin; --j){
RealD dds = fabs(lmd[j-1])+fabs(lmd[j]);
if(fabs(lme[j-1])+dds > dds){
kmax = j+1;
goto continued;
}
}
Niter = iter;
return;
continued:
for(int j=0; j<kmax-1; ++j){
RealD dds = fabs(lmd[j])+fabs(lmd[j+1]);
if(fabs(lme[j])+dds > dds){
kmin = j+1;
break;
}
}
}
std::cout << GridLogError << "[QL method] Error - Too many iteration: "<<Niter<<"\n";
abort();
}
};
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,930 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h
Copyright (C) 2015
Author: Chulwoo Jung <chulwoo@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_LANC_H
#define GRID_LANC_H
#include <string.h> //memset
#ifdef USE_LAPACK
#ifdef USE_MKL
#include<mkl_lapack.h>
#else
void LAPACK_dstegr (char *jobz, char *range, int *n, double *d, double *e,
double *vl, double *vu, int *il, int *iu, double *abstol,
int *m, double *w, double *z, int *ldz, int *isuppz,
double *work, int *lwork, int *iwork, int *liwork,
int *info);
//#include <lapacke/lapacke.h>
#endif
#endif
#include <Grid/algorithms/densematrix/DenseMatrix.h>
// eliminate temorary vector in calc()
#define MEM_SAVE
namespace Grid
{
struct Bisection
{
#if 0
static void get_eig2 (int row_num, std::vector < RealD > &ALPHA,
std::vector < RealD > &BETA,
std::vector < RealD > &eig)
{
int i, j;
std::vector < RealD > evec1 (row_num + 3);
std::vector < RealD > evec2 (row_num + 3);
RealD eps2;
ALPHA[1] = 0.;
BETHA[1] = 0.;
for (i = 0; i < row_num - 1; i++)
{
ALPHA[i + 1] = A[i * (row_num + 1)].real ();
BETHA[i + 2] = A[i * (row_num + 1) + 1].real ();
}
ALPHA[row_num] = A[(row_num - 1) * (row_num + 1)].real ();
bisec (ALPHA, BETHA, row_num, 1, row_num, 1e-10, 1e-10, evec1, eps2);
bisec (ALPHA, BETHA, row_num, 1, row_num, 1e-16, 1e-16, evec2, eps2);
// Do we really need to sort here?
int begin = 1;
int end = row_num;
int swapped = 1;
while (swapped)
{
swapped = 0;
for (i = begin; i < end; i++)
{
if (mag (evec2[i]) > mag (evec2[i + 1]))
{
swap (evec2 + i, evec2 + i + 1);
swapped = 1;
}
}
end--;
for (i = end - 1; i >= begin; i--)
{
if (mag (evec2[i]) > mag (evec2[i + 1]))
{
swap (evec2 + i, evec2 + i + 1);
swapped = 1;
}
}
begin++;
}
for (i = 0; i < row_num; i++)
{
for (j = 0; j < row_num; j++)
{
if (i == j)
H[i * row_num + j] = evec2[i + 1];
else
H[i * row_num + j] = 0.;
}
}
}
#endif
static void bisec (std::vector < RealD > &c,
std::vector < RealD > &b,
int n,
int m1,
int m2,
RealD eps1,
RealD relfeh, std::vector < RealD > &x, RealD & eps2)
{
std::vector < RealD > wu (n + 2);
RealD h, q, x1, xu, x0, xmin, xmax;
int i, a, k;
b[1] = 0.0;
xmin = c[n] - fabs (b[n]);
xmax = c[n] + fabs (b[n]);
for (i = 1; i < n; i++)
{
h = fabs (b[i]) + fabs (b[i + 1]);
if (c[i] + h > xmax)
xmax = c[i] + h;
if (c[i] - h < xmin)
xmin = c[i] - h;
}
xmax *= 2.;
eps2 = relfeh * ((xmin + xmax) > 0.0 ? xmax : -xmin);
if (eps1 <= 0.0)
eps1 = eps2;
eps2 = 0.5 * eps1 + 7.0 * (eps2);
x0 = xmax;
for (i = m1; i <= m2; i++)
{
x[i] = xmax;
wu[i] = xmin;
}
for (k = m2; k >= m1; k--)
{
xu = xmin;
i = k;
do
{
if (xu < wu[i])
{
xu = wu[i];
i = m1 - 1;
}
i--;
}
while (i >= m1);
if (x0 > x[k])
x0 = x[k];
while ((x0 - xu) > 2 * relfeh * (fabs (xu) + fabs (x0)) + eps1)
{
x1 = (xu + x0) / 2;
a = 0;
q = 1.0;
for (i = 1; i <= n; i++)
{
q =
c[i] - x1 -
((q != 0.0) ? b[i] * b[i] / q : fabs (b[i]) / relfeh);
if (q < 0)
a++;
}
// printf("x1=%0.14e a=%d\n",x1,a);
if (a < k)
{
if (a < m1)
{
xu = x1;
wu[m1] = x1;
}
else
{
xu = x1;
wu[a + 1] = x1;
if (x[a] > x1)
x[a] = x1;
}
}
else
x0 = x1;
}
printf ("x0=%0.14e xu=%0.14e k=%d\n", x0, xu, k);
x[k] = (x0 + xu) / 2;
}
}
};
/////////////////////////////////////////////////////////////
// Implicitly restarted lanczos
/////////////////////////////////////////////////////////////
template < class Field > class SimpleLanczos
{
const RealD small = 1.0e-16;
public:
int lock;
int get;
int Niter;
int converged;
int Nstop; // Number of evecs checked for convergence
int Nk; // Number of converged sought
int Np; // Np -- Number of spare vecs in kryloc space
int Nm; // Nm -- total number of vectors
RealD OrthoTime;
RealD eresid;
SortEigen < Field > _sort;
LinearOperatorBase < Field > &_Linop;
OperatorFunction < Field > &_poly;
/////////////////////////
// Constructor
/////////////////////////
void init (void)
{
};
void Abort (int ff, DenseVector < RealD > &evals,
DenseVector < DenseVector < RealD > >&evecs);
SimpleLanczos (LinearOperatorBase < Field > &Linop, // op
OperatorFunction < Field > &poly, // polynmial
int _Nstop, // sought vecs
int _Nk, // sought vecs
int _Nm, // spare vecs
RealD _eresid, // resid in lmdue deficit
int _Niter): // Max iterations
_Linop (Linop),
_poly (poly),
Nstop (_Nstop), Nk (_Nk), Nm (_Nm), eresid (_eresid), Niter (_Niter)
{
Np = Nm - Nk;
assert (Np > 0);
};
/////////////////////////
// Sanity checked this routine (step) against Saad.
/////////////////////////
void RitzMatrix (DenseVector < Field > &evec, int k)
{
if (1)
return;
GridBase *grid = evec[0]._grid;
Field w (grid);
std::cout << GridLogMessage << "RitzMatrix " << std::endl;
for (int i = 0; i < k; i++)
{
_Linop.HermOp (evec[i], w);
// _poly(_Linop,evec[i],w);
std::cout << GridLogMessage << "[" << i << "] ";
for (int j = 0; j < k; j++)
{
ComplexD in = innerProduct (evec[j], w);
if (fabs ((double) i - j) > 1)
{
if (abs (in) > 1.0e-9)
{
std::cout << GridLogMessage << "oops" << std::endl;
abort ();
}
else
std::cout << GridLogMessage << " 0 ";
}
else
{
std::cout << GridLogMessage << " " << in << " ";
}
}
std::cout << GridLogMessage << std::endl;
}
}
void step (DenseVector < RealD > &lmd,
DenseVector < RealD > &lme,
Field & last, Field & current, Field & next, uint64_t k)
{
if (lmd.size () <= k)
lmd.resize (k + Nm);
if (lme.size () <= k)
lme.resize (k + Nm);
// _poly(_Linop,current,next ); // 3. wk:=Avkβkv_{k1}
_Linop.HermOp (current, next); // 3. wk:=Avkβkv_{k1}
if (k > 0)
{
next -= lme[k - 1] * last;
}
// std::cout<<GridLogMessage << "<last|next>" << innerProduct(last,next) <<std::endl;
ComplexD zalph = innerProduct (current, next); // 4. αk:=(wk,vk)
RealD alph = real (zalph);
next = next - alph * current; // 5. wk:=wkαkvk
// std::cout<<GridLogMessage << "<current|next>" << innerProduct(current,next) <<std::endl;
RealD beta = normalise (next); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop
// 7. vk+1 := wk/βk+1
// norm=beta;
int interval = Nm / 100 + 1;
if ((k % interval) == 0)
std::
cout << GridLogMessage << k << " : alpha = " << zalph << " beta " <<
beta << std::endl;
const RealD tiny = 1.0e-20;
if (beta < tiny)
{
std::cout << GridLogMessage << " beta is tiny " << beta << std::
endl;
}
lmd[k] = alph;
lme[k] = beta;
}
void qr_decomp (DenseVector < RealD > &lmd,
DenseVector < RealD > &lme,
int Nk,
int Nm,
DenseVector < RealD > &Qt, RealD Dsh, int kmin, int kmax)
{
int k = kmin - 1;
RealD x;
RealD Fden = 1.0 / hypot (lmd[k] - Dsh, lme[k]);
RealD c = (lmd[k] - Dsh) * Fden;
RealD s = -lme[k] * Fden;
RealD tmpa1 = lmd[k];
RealD tmpa2 = lmd[k + 1];
RealD tmpb = lme[k];
lmd[k] = c * c * tmpa1 + s * s * tmpa2 - 2.0 * c * s * tmpb;
lmd[k + 1] = s * s * tmpa1 + c * c * tmpa2 + 2.0 * c * s * tmpb;
lme[k] = c * s * (tmpa1 - tmpa2) + (c * c - s * s) * tmpb;
x = -s * lme[k + 1];
lme[k + 1] = c * lme[k + 1];
for (int i = 0; i < Nk; ++i)
{
RealD Qtmp1 = Qt[i + Nm * k];
RealD Qtmp2 = Qt[i + Nm * (k + 1)];
Qt[i + Nm * k] = c * Qtmp1 - s * Qtmp2;
Qt[i + Nm * (k + 1)] = s * Qtmp1 + c * Qtmp2;
}
// Givens transformations
for (int k = kmin; k < kmax - 1; ++k)
{
RealD Fden = 1.0 / hypot (x, lme[k - 1]);
RealD c = lme[k - 1] * Fden;
RealD s = -x * Fden;
RealD tmpa1 = lmd[k];
RealD tmpa2 = lmd[k + 1];
RealD tmpb = lme[k];
lmd[k] = c * c * tmpa1 + s * s * tmpa2 - 2.0 * c * s * tmpb;
lmd[k + 1] = s * s * tmpa1 + c * c * tmpa2 + 2.0 * c * s * tmpb;
lme[k] = c * s * (tmpa1 - tmpa2) + (c * c - s * s) * tmpb;
lme[k - 1] = c * lme[k - 1] - s * x;
if (k != kmax - 2)
{
x = -s * lme[k + 1];
lme[k + 1] = c * lme[k + 1];
}
for (int i = 0; i < Nk; ++i)
{
RealD Qtmp1 = Qt[i + Nm * k];
RealD Qtmp2 = Qt[i + Nm * (k + 1)];
Qt[i + Nm * k] = c * Qtmp1 - s * Qtmp2;
Qt[i + Nm * (k + 1)] = s * Qtmp1 + c * Qtmp2;
}
}
}
#ifdef USE_LAPACK
#ifdef USE_MKL
#define LAPACK_INT MKL_INT
#else
#define LAPACK_INT long long
#endif
void diagonalize_lapack (DenseVector < RealD > &lmd, DenseVector < RealD > &lme, int N1, // all
int N2, // get
GridBase * grid)
{
const int size = Nm;
LAPACK_INT NN = N1;
double evals_tmp[NN];
double DD[NN];
double EE[NN];
for (int i = 0; i < NN; i++)
for (int j = i - 1; j <= i + 1; j++)
if (j < NN && j >= 0)
{
if (i == j)
DD[i] = lmd[i];
if (i == j)
evals_tmp[i] = lmd[i];
if (j == (i - 1))
EE[j] = lme[j];
}
LAPACK_INT evals_found;
LAPACK_INT lwork =
((18 * NN) >
(1 + 4 * NN + NN * NN) ? (18 * NN) : (1 + 4 * NN + NN * NN));
LAPACK_INT liwork = 3 + NN * 10;
LAPACK_INT iwork[liwork];
double work[lwork];
LAPACK_INT isuppz[2 * NN];
char jobz = 'N'; // calculate evals only
char range = 'I'; // calculate il-th to iu-th evals
// char range = 'A'; // calculate all evals
char uplo = 'U'; // refer to upper half of original matrix
char compz = 'I'; // Compute eigenvectors of tridiagonal matrix
int ifail[NN];
LAPACK_INT info;
// int total = QMP_get_number_of_nodes();
// int node = QMP_get_node_number();
// GridBase *grid = evec[0]._grid;
int total = grid->_Nprocessors;
int node = grid->_processor;
int interval = (NN / total) + 1;
double vl = 0.0, vu = 0.0;
LAPACK_INT il = interval * node + 1, iu = interval * (node + 1);
if (iu > NN)
iu = NN;
double tol = 0.0;
if (1)
{
memset (evals_tmp, 0, sizeof (double) * NN);
if (il <= NN)
{
printf ("total=%d node=%d il=%d iu=%d\n", total, node, il, iu);
#ifdef USE_MKL
dstegr (&jobz, &range, &NN,
#else
LAPACK_dstegr (&jobz, &range, &NN,
#endif
(double *) DD, (double *) EE, &vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A'
&tol, // tolerance
&evals_found, evals_tmp, (double *) NULL, &NN,
isuppz, work, &lwork, iwork, &liwork, &info);
for (int i = iu - 1; i >= il - 1; i--)
{
printf ("node=%d evals_found=%d evals_tmp[%d] = %g\n", node,
evals_found, i - (il - 1), evals_tmp[i - (il - 1)]);
evals_tmp[i] = evals_tmp[i - (il - 1)];
if (il > 1)
evals_tmp[i - (il - 1)] = 0.;
}
}
{
grid->GlobalSumVector (evals_tmp, NN);
}
}
// cheating a bit. It is better to sort instead of just reversing it, but the document of the routine says evals are sorted in increasing order. qr gives evals in decreasing order.
}
#undef LAPACK_INT
#endif
void diagonalize (DenseVector < RealD > &lmd,
DenseVector < RealD > &lme,
int N2, int N1, GridBase * grid)
{
#ifdef USE_LAPACK
const int check_lapack = 0; // just use lapack if 0, check against lapack if 1
if (!check_lapack)
return diagonalize_lapack (lmd, lme, N2, N1, grid);
// diagonalize_lapack(lmd2,lme2,Nm2,Nm,Qt,grid);
#endif
}
#if 1
static RealD normalise (Field & v)
{
RealD nn = norm2 (v);
nn = sqrt (nn);
v = v * (1.0 / nn);
return nn;
}
void orthogonalize (Field & w, DenseVector < Field > &evec, int k)
{
double t0 = -usecond () / 1e6;
typedef typename Field::scalar_type MyComplex;
MyComplex ip;
if (0)
{
for (int j = 0; j < k; ++j)
{
normalise (evec[j]);
for (int i = 0; i < j; i++)
{
ip = innerProduct (evec[i], evec[j]); // are the evecs normalised? ; this assumes so.
evec[j] = evec[j] - ip * evec[i];
}
}
}
for (int j = 0; j < k; ++j)
{
ip = innerProduct (evec[j], w); // are the evecs normalised? ; this assumes so.
w = w - ip * evec[j];
}
normalise (w);
t0 += usecond () / 1e6;
OrthoTime += t0;
}
void setUnit_Qt (int Nm, DenseVector < RealD > &Qt)
{
for (int i = 0; i < Qt.size (); ++i)
Qt[i] = 0.0;
for (int k = 0; k < Nm; ++k)
Qt[k + k * Nm] = 1.0;
}
void calc (DenseVector < RealD > &eval, const Field & src, int &Nconv)
{
GridBase *grid = src._grid;
// assert(grid == src._grid);
std::
cout << GridLogMessage << " -- Nk = " << Nk << " Np = " << Np << std::
endl;
std::cout << GridLogMessage << " -- Nm = " << Nm << std::endl;
std::cout << GridLogMessage << " -- size of eval = " << eval.
size () << std::endl;
// assert(c.size() && Nm == eval.size());
DenseVector < RealD > lme (Nm);
DenseVector < RealD > lmd (Nm);
Field current (grid);
Field last (grid);
Field next (grid);
Nconv = 0;
RealD beta_k;
// Set initial vector
// (uniform vector) Why not src??
// evec[0] = 1.0;
current = src;
std::cout << GridLogMessage << "norm2(src)= " << norm2 (src) << std::
endl;
normalise (current);
std::
cout << GridLogMessage << "norm2(evec[0])= " << norm2 (current) <<
std::endl;
// Initial Nk steps
OrthoTime = 0.;
double t0 = usecond () / 1e6;
RealD norm; // sqrt norm of last vector
uint64_t iter = 0;
bool initted = false;
std::vector < RealD > low (Nstop * 10);
std::vector < RealD > high (Nstop * 10);
RealD cont = 0.;
while (1) {
cont = 0.;
std::vector < RealD > lme2 (Nm);
std::vector < RealD > lmd2 (Nm);
for (uint64_t k = 0; k < Nm; ++k, iter++) {
step (lmd, lme, last, current, next, iter);
last = current;
current = next;
}
double t1 = usecond () / 1e6;
std::cout << GridLogMessage << "IRL::Initial steps: " << t1 -
t0 << "seconds" << std::endl;
t0 = t1;
std::
cout << GridLogMessage << "IRL::Initial steps:OrthoTime " <<
OrthoTime << "seconds" << std::endl;
// getting eigenvalues
lmd2.resize (iter + 2);
lme2.resize (iter + 2);
for (uint64_t k = 0; k < iter; ++k) {
lmd2[k + 1] = lmd[k];
lme2[k + 2] = lme[k];
}
t1 = usecond () / 1e6;
std::cout << GridLogMessage << "IRL:: copy: " << t1 -
t0 << "seconds" << std::endl;
t0 = t1;
{
int total = grid->_Nprocessors;
int node = grid->_processor;
int interval = (Nstop / total) + 1;
int iu = (iter + 1) - (interval * node + 1);
int il = (iter + 1) - (interval * (node + 1));
std::vector < RealD > eval2 (iter + 3);
RealD eps2;
Bisection::bisec (lmd2, lme2, iter, il, iu, 1e-16, 1e-10, eval2,
eps2);
// diagonalize(eval2,lme2,iter,Nk,grid);
RealD diff = 0.;
for (int i = il; i <= iu; i++) {
if (initted)
diff =
fabs (eval2[i] - high[iu-i]) / (fabs (eval2[i]) +
fabs (high[iu-i]));
if (initted && (diff > eresid))
cont = 1.;
if (initted)
printf ("eval[%d]=%0.14e %0.14e, %0.14e\n", i, eval2[i],
high[iu-i], diff);
high[iu-i] = eval2[i];
}
il = (interval * node + 1);
iu = (interval * (node + 1));
Bisection::bisec (lmd2, lme2, iter, il, iu, 1e-16, 1e-10, eval2,
eps2);
for (int i = il; i <= iu; i++) {
if (initted)
diff =
fabs (eval2[i] - low[i]) / (fabs (eval2[i]) +
fabs (low[i]));
if (initted && (diff > eresid))
cont = 1.;
if (initted)
printf ("eval[%d]=%0.14e %0.14e, %0.14e\n", i, eval2[i],
low[i], diff);
low[i] = eval2[i];
}
t1 = usecond () / 1e6;
std::cout << GridLogMessage << "IRL:: diagonalize: " << t1 -
t0 << "seconds" << std::endl;
t0 = t1;
}
for (uint64_t k = 0; k < Nk; ++k) {
// eval[k] = eval2[k];
}
if (initted)
{
grid->GlobalSumVector (&cont, 1);
if (cont < 1.) return;
}
initted = true;
}
}
/**
There is some matrix Q such that for any vector y
Q.e_1 = y and Q is unitary.
**/
template < class T >
static T orthQ (DenseMatrix < T > &Q, DenseVector < T > y)
{
int N = y.size (); //Matrix Size
Fill (Q, 0.0);
T tau;
for (int i = 0; i < N; i++)
{
Q[i][0] = y[i];
}
T sig = conj (y[0]) * y[0];
T tau0 = fabs (sqrt (sig));
for (int j = 1; j < N; j++)
{
sig += conj (y[j]) * y[j];
tau = abs (sqrt (sig));
if (abs (tau0) > 0.0)
{
T gam = conj ((y[j] / tau) / tau0);
for (int k = 0; k <= j - 1; k++)
{
Q[k][j] = -gam * y[k];
}
Q[j][j] = tau0 / tau;
}
else
{
Q[j - 1][j] = 1.0;
}
tau0 = tau;
}
return tau;
}
/**
There is some matrix Q such that for any vector y
Q.e_k = y and Q is unitary.
**/
template < class T >
static T orthU (DenseMatrix < T > &Q, DenseVector < T > y)
{
T tau = orthQ (Q, y);
SL (Q);
return tau;
}
/**
Wind up with a matrix with the first con rows untouched
say con = 2
Q is such that Qdag H Q has {x, x, val, 0, 0, 0, 0, ...} as 1st colum
and the matrix is upper hessenberg
and with f and Q appropriately modidied with Q is the arnoldi factorization
**/
template < class T > static void Lock (DenseMatrix < T > &H, ///Hess mtx
DenseMatrix < T > &Q, ///Lock Transform
T val, ///value to be locked
int con, ///number already locked
RealD small, int dfg, bool herm)
{
//ForceTridiagonal(H);
int M = H.dim;
DenseVector < T > vec;
Resize (vec, M - con);
DenseMatrix < T > AH;
Resize (AH, M - con, M - con);
AH = GetSubMtx (H, con, M, con, M);
DenseMatrix < T > QQ;
Resize (QQ, M - con, M - con);
Unity (Q);
Unity (QQ);
DenseVector < T > evals;
Resize (evals, M - con);
DenseMatrix < T > evecs;
Resize (evecs, M - con, M - con);
Wilkinson < T > (AH, evals, evecs, small);
int k = 0;
RealD cold = abs (val - evals[k]);
for (int i = 1; i < M - con; i++)
{
RealD cnew = abs (val - evals[i]);
if (cnew < cold)
{
k = i;
cold = cnew;
}
}
vec = evecs[k];
ComplexD tau;
orthQ (QQ, vec);
//orthQM(QQ,AH,vec);
AH = Hermitian (QQ) * AH;
AH = AH * QQ;
for (int i = con; i < M; i++)
{
for (int j = con; j < M; j++)
{
Q[i][j] = QQ[i - con][j - con];
H[i][j] = AH[i - con][j - con];
}
}
for (int j = M - 1; j > con + 2; j--)
{
DenseMatrix < T > U;
Resize (U, j - 1 - con, j - 1 - con);
DenseVector < T > z;
Resize (z, j - 1 - con);
T nm = norm (z);
for (int k = con + 0; k < j - 1; k++)
{
z[k - con] = conj (H (j, k + 1));
}
normalise (z);
RealD tmp = 0;
for (int i = 0; i < z.size () - 1; i++)
{
tmp = tmp + abs (z[i]);
}
if (tmp < small / ((RealD) z.size () - 1.0))
{
continue;
}
tau = orthU (U, z);
DenseMatrix < T > Hb;
Resize (Hb, j - 1 - con, M);
for (int a = 0; a < M; a++)
{
for (int b = 0; b < j - 1 - con; b++)
{
T sum = 0;
for (int c = 0; c < j - 1 - con; c++)
{
sum += H[a][con + 1 + c] * U[c][b];
} //sum += H(a,con+1+c)*U(c,b);}
Hb[b][a] = sum;
}
}
for (int k = con + 1; k < j; k++)
{
for (int l = 0; l < M; l++)
{
H[l][k] = Hb[k - 1 - con][l];
}
} //H(Hb[k-1-con][l] , l,k);}}
DenseMatrix < T > Qb;
Resize (Qb, M, M);
for (int a = 0; a < M; a++)
{
for (int b = 0; b < j - 1 - con; b++)
{
T sum = 0;
for (int c = 0; c < j - 1 - con; c++)
{
sum += Q[a][con + 1 + c] * U[c][b];
} //sum += Q(a,con+1+c)*U(c,b);}
Qb[b][a] = sum;
}
}
for (int k = con + 1; k < j; k++)
{
for (int l = 0; l < M; l++)
{
Q[l][k] = Qb[k - 1 - con][l];
}
} //Q(Qb[k-1-con][l] , l,k);}}
DenseMatrix < T > Hc;
Resize (Hc, M, M);
for (int a = 0; a < j - 1 - con; a++)
{
for (int b = 0; b < M; b++)
{
T sum = 0;
for (int c = 0; c < j - 1 - con; c++)
{
sum += conj (U[c][a]) * H[con + 1 + c][b];
} //sum += conj( U(c,a) )*H(con+1+c,b);}
Hc[b][a] = sum;
}
}
for (int k = 0; k < M; k++)
{
for (int l = con + 1; l < j; l++)
{
H[l][k] = Hc[k][l - 1 - con];
}
} //H(Hc[k][l-1-con] , l,k);}}
}
}
#endif
};
}
#endif

View File

@ -0,0 +1,122 @@
#include <math.h>
#include <stdlib.h>
#include <vector>
struct Bisection {
static void get_eig2(int row_num,std::vector<RealD> &ALPHA,std::vector<RealD> &BETA, std::vector<RealD> & eig)
{
int i,j;
std::vector<RealD> evec1(row_num+3);
std::vector<RealD> evec2(row_num+3);
RealD eps2;
ALPHA[1]=0.;
BETHA[1]=0.;
for(i=0;i<row_num-1;i++) {
ALPHA[i+1] = A[i*(row_num+1)].real();
BETHA[i+2] = A[i*(row_num+1)+1].real();
}
ALPHA[row_num] = A[(row_num-1)*(row_num+1)].real();
bisec(ALPHA,BETHA,row_num,1,row_num,1e-10,1e-10,evec1,eps2);
bisec(ALPHA,BETHA,row_num,1,row_num,1e-16,1e-16,evec2,eps2);
// Do we really need to sort here?
int begin=1;
int end = row_num;
int swapped=1;
while(swapped) {
swapped=0;
for(i=begin;i<end;i++){
if(mag(evec2[i])>mag(evec2[i+1])) {
swap(evec2+i,evec2+i+1);
swapped=1;
}
}
end--;
for(i=end-1;i>=begin;i--){
if(mag(evec2[i])>mag(evec2[i+1])) {
swap(evec2+i,evec2+i+1);
swapped=1;
}
}
begin++;
}
for(i=0;i<row_num;i++){
for(j=0;j<row_num;j++) {
if(i==j) H[i*row_num+j]=evec2[i+1];
else H[i*row_num+j]=0.;
}
}
}
static void bisec(std::vector<RealD> &c,
std::vector<RealD> &b,
int n,
int m1,
int m2,
RealD eps1,
RealD relfeh,
std::vector<RealD> &x,
RealD &eps2)
{
std::vector<RealD> wu(n+2);
RealD h,q,x1,xu,x0,xmin,xmax;
int i,a,k;
b[1]=0.0;
xmin=c[n]-fabs(b[n]);
xmax=c[n]+fabs(b[n]);
for(i=1;i<n;i++){
h=fabs(b[i])+fabs(b[i+1]);
if(c[i]+h>xmax) xmax= c[i]+h;
if(c[i]-h<xmin) xmin= c[i]-h;
}
xmax *=2.;
eps2=relfeh*((xmin+xmax)>0.0 ? xmax : -xmin);
if(eps1<=0.0) eps1=eps2;
eps2=0.5*eps1+7.0*(eps2);
x0=xmax;
for(i=m1;i<=m2;i++){
x[i]=xmax;
wu[i]=xmin;
}
for(k=m2;k>=m1;k--){
xu=xmin;
i=k;
do{
if(xu<wu[i]){
xu=wu[i];
i=m1-1;
}
i--;
}while(i>=m1);
if(x0>x[k]) x0=x[k];
while((x0-xu)>2*relfeh*(fabs(xu)+fabs(x0))+eps1){
x1=(xu+x0)/2;
a=0;
q=1.0;
for(i=1;i<=n;i++){
q=c[i]-x1-((q!=0.0)? b[i]*b[i]/q:fabs(b[i])/relfeh);
if(q<0) a++;
}
// printf("x1=%e a=%d\n",x1,a);
if(a<k){
if(a<m1){
xu=x1;
wu[m1]=x1;
}else {
xu=x1;
wu[a+1]=x1;
if(x[a]>x1) x[a]=x1;
}
}else x0=x1;
}
x[k]=(x0+xu)/2;
}
}
}

View File

@ -114,19 +114,151 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
assert(WorldNode!=-1);
_ShmSetup=1;
}
void GlobalSharedMemory::OptimalCommunicator(const std::vector<int> &processors,Grid_MPI_Comm & optimal_comm)
// Gray encode support
int BinaryToGray (int binary) {
int gray = (binary>>1)^binary;
return gray;
}
int Log2Size(int TwoToPower,int MAXLOG2)
{
////////////////////////////////////////////////////////////////
// Assert power of two shm_size.
////////////////////////////////////////////////////////////////
int log2size = -1;
for(int i=0;i<=MAXLOG2RANKSPERNODE;i++){
if ( (0x1<<i) == WorldShmSize ) {
for(int i=0;i<=MAXLOG2;i++){
if ( (0x1<<i) == TwoToPower ) {
log2size = i;
break;
}
}
return log2size;
}
void GlobalSharedMemory::OptimalCommunicator(const std::vector<int> &processors,Grid_MPI_Comm & optimal_comm)
{
#undef HYPERCUBE
#ifdef HYPERCUBE
////////////////////////////////////////////////////////////////
// Assert power of two shm_size.
////////////////////////////////////////////////////////////////
int log2size = Log2Size(WorldShmSize,MAXLOG2RANKSPERNODE);
assert(log2size != -1);
////////////////////////////////////////////////////////////////
// Identify the hypercube coordinate of this node using hostname
////////////////////////////////////////////////////////////////
// n runs 0...7 9...16 18...25 27...34 (8*4) 5 bits
// i runs 0..7 3 bits
// r runs 0..3 2 bits
// 2^10 = 1024 nodes
const int maxhdim = 10;
std::vector<int> HyperCubeCoords(maxhdim,0);
std::vector<int> RootHyperCubeCoords(maxhdim,0);
int R;
int I;
int N;
const int namelen = _POSIX_HOST_NAME_MAX;
char name[namelen];
// Parse ICE-XA hostname to get hypercube location
gethostname(name,namelen);
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
assert(nscan==3);
int nlo = N%9;
int nhi = N/9;
uint32_t hypercoor = (R<<8)|(I<<5)|(nhi<<3)|nlo ;
uint32_t rootcoor = hypercoor;
//////////////////////////////////////////////////////////////////
// Print debug info
//////////////////////////////////////////////////////////////////
for(int d=0;d<maxhdim;d++){
HyperCubeCoords[d] = (hypercoor>>d)&0x1;
}
std::string hname(name);
std::cout << "hostname "<<hname<<std::endl;
std::cout << "R " << R << " I " << I << " N "<< N<<
<< " hypercoor 0x"<<std::hex<<hypercoor<<std::dec<<std::endl;
//////////////////////////////////////////////////////////////////
// broadcast node 0's base coordinate for this partition.
//////////////////////////////////////////////////////////////////
MPI_Bcast(&rootcoor, sizeof(rootcoor), MPI_BYTE, 0, WorldComm);
hypercoor=hypercoor-rootcoor;
assert(hypercoor<WorldSize);
assert(hypercoor>=0);
//////////////////////////////////////
// Printing
//////////////////////////////////////
for(int d=0;d<maxhdim;d++){
HyperCubeCoords[d] = (hypercoor>>d)&0x1;
}
////////////////////////////////////////////////////////////////
// Identify subblock of ranks on node spreading across dims
// in a maximally symmetrical way
////////////////////////////////////////////////////////////////
int ndimension = processors.size();
std::vector<int> processor_coor(ndimension);
std::vector<int> WorldDims = processors; std::vector<int> ShmDims (ndimension,1); std::vector<int> NodeDims (ndimension);
std::vector<int> ShmCoor (ndimension); std::vector<int> NodeCoor (ndimension); std::vector<int> WorldCoor(ndimension);
std::vector<int> HyperCoor(ndimension);
int dim = 0;
for(int l2=0;l2<log2size;l2++){
while ( (WorldDims[dim] / ShmDims[dim]) <= 1 ) dim=(dim+1)%ndimension;
ShmDims[dim]*=2;
dim=(dim+1)%ndimension;
}
////////////////////////////////////////////////////////////////
// Establish torus of processes and nodes with sub-blockings
////////////////////////////////////////////////////////////////
for(int d=0;d<ndimension;d++){
NodeDims[d] = WorldDims[d]/ShmDims[d];
}
////////////////////////////////////////////////////////////////
// Map Hcube according to physical lattice
// must partition. Loop over dims and find out who would join.
////////////////////////////////////////////////////////////////
int hcoor = hypercoor;
for(int d=0;d<ndimension;d++){
int bits = Log2Size(NodeDims[d],MAXLOG2RANKSPERNODE);
int msk = (0x1<<bits)-1;
HyperCoor[d]=hcoor & msk;
HyperCoor[d]=BinaryToGray(HyperCoor[d]); // Space filling curve magic
hcoor = hcoor >> bits;
}
////////////////////////////////////////////////////////////////
// Check processor counts match
////////////////////////////////////////////////////////////////
int Nprocessors=1;
for(int i=0;i<ndimension;i++){
Nprocessors*=processors[i];
}
assert(WorldSize==Nprocessors);
////////////////////////////////////////////////////////////////
// Establish mapping between lexico physics coord and WorldRank
////////////////////////////////////////////////////////////////
int rank;
Lexicographic::CoorFromIndexReversed(NodeCoor,WorldNode ,NodeDims);
for(int d=0;d<ndimension;d++) NodeCoor[d]=HyperCoor[d];
Lexicographic::CoorFromIndexReversed(ShmCoor ,WorldShmRank,ShmDims);
for(int d=0;d<ndimension;d++) WorldCoor[d] = NodeCoor[d]*ShmDims[d]+ShmCoor[d];
Lexicographic::IndexFromCoorReversed(WorldCoor,rank,WorldDims);
/////////////////////////////////////////////////////////////////
// Build the new communicator
/////////////////////////////////////////////////////////////////
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
assert(ierr==0);
#else
////////////////////////////////////////////////////////////////
// Assert power of two shm_size.
////////////////////////////////////////////////////////////////
int log2size = Log2Size(WorldShmSize,MAXLOG2RANKSPERNODE);
assert(log2size != -1);
////////////////////////////////////////////////////////////////
@ -175,15 +307,77 @@ void GlobalSharedMemory::OptimalCommunicator(const std::vector<int> &processors,
/////////////////////////////////////////////////////////////////
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
assert(ierr==0);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////
// SHMGET
////////////////////////////////////////////////////////////////////////////////////////////
#ifdef GRID_MPI3_SHMGET
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << "SharedMemoryAllocate "<< bytes<< " shmget implementation "<<std::endl;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// allocate the shared windows for our group
//////////////////////////////////////////////////////////////////////////////////////////////////////////
MPI_Barrier(WorldShmComm);
WorldShmCommBufs.resize(WorldShmSize);
std::vector<int> shmids(WorldShmSize);
if ( WorldShmRank == 0 ) {
for(int r=0;r<WorldShmSize;r++){
size_t size = bytes;
key_t key = IPC_PRIVATE;
int flags = IPC_CREAT | SHM_R | SHM_W;
#ifdef SHM_HUGETLB
if (Hugepages) flags|=SHM_HUGETLB;
#endif
if ((shmids[r]= shmget(key,size, flags)) ==-1) {
int errsv = errno;
printf("Errno %d\n",errsv);
printf("key %d\n",key);
printf("size %lld\n",size);
printf("flags %d\n",flags);
perror("shmget");
exit(1);
}
}
}
MPI_Barrier(WorldShmComm);
MPI_Bcast(&shmids[0],WorldShmSize*sizeof(int),MPI_BYTE,0,WorldShmComm);
MPI_Barrier(WorldShmComm);
for(int r=0;r<WorldShmSize;r++){
WorldShmCommBufs[r] = (uint64_t *)shmat(shmids[r], NULL,0);
if (WorldShmCommBufs[r] == (uint64_t *)-1) {
perror("Shared memory attach failure");
shmctl(shmids[r], IPC_RMID, NULL);
exit(2);
}
}
MPI_Barrier(WorldShmComm);
///////////////////////////////////
// Mark for clean up
///////////////////////////////////
for(int r=0;r<WorldShmSize;r++){
shmctl(shmids[r], IPC_RMID,(struct shmid_ds *)NULL);
}
MPI_Barrier(WorldShmComm);
_ShmAlloc=1;
_ShmAllocBytes = bytes;
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////
// Hugetlbfs mapping intended
////////////////////////////////////////////////////////////////////////////////////////////
#ifdef GRID_MPI3_SHMMMAP
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << "SharedMemoryAllocate "<< bytes<< " MMAP implementation "<<std::endl;
std::cout << "SharedMemoryAllocate "<< bytes<< " MMAP implementation "<< GRID_SHM_PATH <<std::endl;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -193,7 +387,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
WorldShmCommBufs.resize(WorldShmSize);
////////////////////////////////////////////////////////////////////////////////////////////
// Hugetlbf and others map filesystems as mappable huge pages
// Hugetlbfs and others map filesystems as mappable huge pages
////////////////////////////////////////////////////////////////////////////////////////////
char shm_name [NAME_MAX];
for(int r=0;r<WorldShmSize;r++){
@ -344,6 +538,9 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
}
#endif
////////////////////////////////////////////////////////
// Global shared functionality finished
// Now move to per communicator functionality

View File

@ -244,19 +244,11 @@ namespace Grid {
template<class sobj,class vobj> strong_inline
RealD axpy_norm(Lattice<vobj> &ret,sobj a,const Lattice<vobj> &x,const Lattice<vobj> &y){
ret.checkerboard = x.checkerboard;
conformable(ret,x);
conformable(x,y);
axpy(ret,a,x,y);
return norm2(ret);
return axpy_norm_fast(ret,a,x,y);
}
template<class sobj,class vobj> strong_inline
RealD axpby_norm(Lattice<vobj> &ret,sobj a,sobj b,const Lattice<vobj> &x,const Lattice<vobj> &y){
ret.checkerboard = x.checkerboard;
conformable(ret,x);
conformable(x,y);
axpby(ret,a,b,x,y);
return norm2(ret); // FIXME implement parallel norm in ss loop
return axpby_norm_fast(ret,a,b,x,y);
}
}

View File

@ -33,7 +33,7 @@ namespace Grid {
// Deterministic Reduction operations
////////////////////////////////////////////////////////////////////////////////////////////////////
template<class vobj> inline RealD norm2(const Lattice<vobj> &arg){
ComplexD nrm = innerProduct(arg,arg);
auto nrm = innerProduct(arg,arg);
return std::real(nrm);
}
@ -43,31 +43,84 @@ inline ComplexD innerProduct(const Lattice<vobj> &left,const Lattice<vobj> &righ
{
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_typeD vector_type;
scalar_type nrm;
GridBase *grid = left._grid;
std::vector<vector_type,alignedAllocator<vector_type> > sumarray(grid->SumArraySize());
const int pad = 8;
ComplexD inner;
Vector<ComplexD> sumarray(grid->SumArraySize()*pad);
parallel_for(int thr=0;thr<grid->SumArraySize();thr++){
int nwork, mywork, myoff;
GridThread::GetWork(left._grid->oSites(),thr,mywork,myoff);
decltype(innerProductD(left._odata[0],right._odata[0])) vnrm=zero; // private to thread; sub summation
decltype(innerProductD(left._odata[0],right._odata[0])) vinner=zero; // private to thread; sub summation
for(int ss=myoff;ss<mywork+myoff; ss++){
vnrm = vnrm + innerProductD(left._odata[ss],right._odata[ss]);
vinner = vinner + innerProductD(left._odata[ss],right._odata[ss]);
}
sumarray[thr]=TensorRemove(vnrm) ;
// All threads sum across SIMD; reduce serial work at end
// one write per cacheline with streaming store
ComplexD tmp = Reduce(TensorRemove(vinner)) ;
vstream(sumarray[thr*pad],tmp);
}
vector_type vvnrm; vvnrm=zero; // sum across threads
inner=0.0;
for(int i=0;i<grid->SumArraySize();i++){
vvnrm = vvnrm+sumarray[i];
inner = inner+sumarray[i*pad];
}
nrm = Reduce(vvnrm);// sum across simd
right._grid->GlobalSum(nrm);
return nrm;
right._grid->GlobalSum(inner);
return inner;
}
/////////////////////////
// Fast axpby_norm
// z = a x + b y
// return norm z
/////////////////////////
template<class sobj,class vobj> strong_inline RealD
axpy_norm_fast(Lattice<vobj> &z,sobj a,const Lattice<vobj> &x,const Lattice<vobj> &y)
{
sobj one(1.0);
return axpby_norm_fast(z,a,one,x,y);
}
template<class sobj,class vobj> strong_inline RealD
axpby_norm_fast(Lattice<vobj> &z,sobj a,sobj b,const Lattice<vobj> &x,const Lattice<vobj> &y)
{
const int pad = 8;
z.checkerboard = x.checkerboard;
conformable(z,x);
conformable(x,y);
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_typeD vector_type;
RealD nrm;
GridBase *grid = x._grid;
Vector<RealD> sumarray(grid->SumArraySize()*pad);
parallel_for(int thr=0;thr<grid->SumArraySize();thr++){
int nwork, mywork, myoff;
GridThread::GetWork(x._grid->oSites(),thr,mywork,myoff);
// private to thread; sub summation
decltype(innerProductD(z._odata[0],z._odata[0])) vnrm=zero;
for(int ss=myoff;ss<mywork+myoff; ss++){
vobj tmp = a*x._odata[ss]+b*y._odata[ss];
vnrm = vnrm + innerProductD(tmp,tmp);
vstream(z._odata[ss],tmp);
}
vstream(sumarray[thr*pad],real(Reduce(TensorRemove(vnrm)))) ;
}
nrm = 0.0; // sum across threads; linear in thread count but fast
for(int i=0;i<grid->SumArraySize();i++){
nrm = nrm+sumarray[i*pad];
}
z._grid->GlobalSum(nrm);
return nrm;
}
template<class Op,class T1>
inline auto sum(const LatticeUnaryExpression<Op,T1> & expr)

View File

@ -158,10 +158,19 @@ namespace Grid {
// tens of seconds per trajectory so this is clean in all reasonable cases,
// and margin of safety is orders of magnitude.
// We could hack Sitmo to skip in the higher order words of state if necessary
//
// Replace with 2^30 ; avoid problem on large volumes
//
/////////////////////////////////////////////////////////////////////////////////////
// uint64_t skip = site+1; // Old init Skipped then drew. Checked compat with faster init
const int shift = 30;
uint64_t skip = site;
skip = skip<<40;
skip = skip<<shift;
assert((skip >> shift)==site); // check for overflow
eng.discard(skip);
// std::cout << " Engine " <<site << " state " <<eng<<std::endl;
}

View File

@ -263,7 +263,7 @@ PARALLEL_CRITICAL
GridBase *grid,
std::vector<fobj> &iodata,
std::string file,
uint64_t offset,
uint64_t& offset,
const std::string &format, int control,
uint32_t &nersc_csum,
uint32_t &scidac_csuma,
@ -431,14 +431,20 @@ PARALLEL_CRITICAL
MPI_Abort(MPI_COMM_WORLD, 1); //assert(ierr == 0);
}
std::cout << GridLogDebug << "MPI read I/O set view " << file << std::endl;
std::cout << GridLogDebug << "MPI write I/O set view " << file << std::endl;
ierr = MPI_File_set_view(fh, disp, mpiObject, fileArray, "native", MPI_INFO_NULL);
assert(ierr == 0);
std::cout << GridLogDebug << "MPI read I/O write all " << file << std::endl;
std::cout << GridLogDebug << "MPI write I/O write all " << file << std::endl;
ierr = MPI_File_write_all(fh, &iodata[0], 1, localArray, &status);
assert(ierr == 0);
MPI_Offset os;
MPI_File_get_position(fh, &os);
MPI_File_get_byte_offset(fh, os, &disp);
offset = disp;
MPI_File_close(&fh);
MPI_Type_free(&fileArray);
MPI_Type_free(&localArray);
@ -448,7 +454,7 @@ PARALLEL_CRITICAL
} else {
std::cout << GridLogMessage << "IOobject: C++ write I/O " << file << " : "
<< iodata.size() * sizeof(fobj) << " bytes" << std::endl;
<< iodata.size() * sizeof(fobj) << " bytes and offset " << offset << std::endl;
std::ofstream fout;
fout.exceptions ( std::fstream::failbit | std::fstream::badbit );
@ -495,6 +501,7 @@ PARALLEL_CRITICAL
exit(1);
#endif
}
offset = fout.tellp();
fout.close();
}
timer.Stop();
@ -699,7 +706,6 @@ PARALLEL_CRITICAL
IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|BINARYIO_LEXICOGRAPHIC,
nersc_csum,scidac_csuma,scidac_csumb);
iodata.resize(1);
{
std::vector<RngStateType> tmp(RngStateCount);

View File

@ -182,6 +182,11 @@ class GridLimeReader : public BinaryIO {
{
filename= _filename;
File = fopen(filename.c_str(), "r");
if (File == nullptr)
{
std::cerr << "cannot open file '" << filename << "'" << std::endl;
abort();
}
LimeR = limeCreateReader(File);
}
/////////////////////////////////////////////

View File

@ -49,7 +49,8 @@ inline double usecond(void) {
typedef std::chrono::system_clock GridClock;
typedef std::chrono::time_point<GridClock> GridTimePoint;
typedef std::chrono::milliseconds GridTime;
typedef std::chrono::milliseconds GridMillisecs;
typedef std::chrono::microseconds GridTime;
typedef std::chrono::microseconds GridUsecs;
inline std::ostream& operator<< (std::ostream & stream, const std::chrono::milliseconds & time)
@ -57,6 +58,11 @@ inline std::ostream& operator<< (std::ostream & stream, const std::chrono::milli
stream << time.count()<<" ms";
return stream;
}
inline std::ostream& operator<< (std::ostream & stream, const std::chrono::microseconds & time)
{
stream << time.count()<<" usec";
return stream;
}
class GridStopWatch {
private:

View File

@ -444,6 +444,8 @@ void CayleyFermion5D<Impl>::SetCoefficientsInternal(RealD zolo_hi,std::vector<Co
assert(omega[i]!=Coeff_t(0.0));
bs[i] = 0.5*(bpc/omega[i] + bmc);
cs[i] = 0.5*(bpc/omega[i] - bmc);
std::cout<<GridLogMessage << "CayleyFermion5D "<<i<<" bs="<<bs[i]<<" cs="<<cs[i]<< std::endl;
}
////////////////////////////////////////////////////////

View File

@ -470,6 +470,7 @@ void CayleyFermion5D<Impl>::MooeeInternalAsm(const FermionField &psi, FermionFie
a0 = a0+incr;
a1 = a1+incr;
a2 = a2+sizeof(typename Simd::scalar_type);
a2 = a2+sizeof(typename Simd::scalar_type); //ypj [debug]
}}
{
int lexa = s1+LLs*site;
@ -701,7 +702,8 @@ void CayleyFermion5D<Impl>::MooeeInternalZAsm(const FermionField &psi, FermionFi
}
a0 = a0+incr;
a1 = a1+incr;
a2 = a2+sizeof(typename Simd::scalar_type);
a2 = a2+sizeof(typename Simd::scalar_type);
a2 = a2+sizeof(typename Simd::scalar_type); // ypj [debug]
}}
{
int lexa = s1+LLs*site;

View File

@ -475,7 +475,8 @@ namespace QCD {
}
a0 = a0 + incr;
a1 = a1 + incr;
a2 = a2 + sizeof(typename Simd::scalar_type);
a2 = a2 + sizeof(typename Simd::scalar_type);
a2 = a2 + sizeof(typename Simd::scalar_type); // ypj [debug]
}
}

View File

@ -63,8 +63,8 @@ Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
#include <Grid/qcd/action/fermion/MobiusFermion.h>
#include <Grid/qcd/action/fermion/MobiusEOFAFermion.h>
#include <Grid/qcd/action/fermion/ZMobiusFermion.h>
#include <Grid/qcd/action/fermion/SchurDiagTwoKappa.h>
#include <Grid/qcd/action/fermion/ScaledShamirFermion.h>
//#include <Grid/qcd/action/fermion/SchurDiagTwoKappa.h>
#include <Grid/qcd/action/fermion/MobiusZolotarevFermion.h>
#include <Grid/qcd/action/fermion/ShamirZolotarevFermion.h>
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h>

View File

@ -63,9 +63,12 @@ namespace Grid {
virtual RealD M (const FermionField &in, FermionField &out)=0;
virtual RealD Mdag (const FermionField &in, FermionField &out)=0;
// half checkerboard operaions
// Query the even even properties to make algorithmic decisions
virtual int ConstEE(void) { return 1; }; // clover returns zero as EE depends on gauge field
virtual int isTrivialEE(void) { return 0; };
virtual RealD Mass(void) {return 0.0;};
// half checkerboard operaions
virtual void Meooe (const FermionField &in, FermionField &out)=0;
virtual void MeooeDag (const FermionField &in, FermionField &out)=0;
virtual void Mooee (const FermionField &in, FermionField &out)=0;

View File

@ -764,7 +764,12 @@ class StaggeredImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation:
inline void loadLinkElement(Simd &reg, ref &memory) {
reg = memory;
}
inline void InsertGaugeField(DoubledGaugeField &U_ds,
const GaugeLinkField &U,int mu)
{
PokeIndex<LorentzIndex>(U_ds, U, mu);
}
inline void DoubleStore(GridBase *GaugeGrid,
DoubledGaugeField &UUUds, // for Naik term
DoubledGaugeField &Uds,
@ -803,8 +808,10 @@ class StaggeredImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation:
U = U *phases;
Udag = Udag *phases;
PokeIndex<LorentzIndex>(Uds, U, mu);
PokeIndex<LorentzIndex>(Uds, Udag, mu + 4);
InsertGaugeField(Uds,U,mu);
InsertGaugeField(Uds,Udag,mu+4);
// PokeIndex<LorentzIndex>(Uds, U, mu);
// PokeIndex<LorentzIndex>(Uds, Udag, mu + 4);
// 3 hop based on thin links. Crazy huh ?
U = PeekIndex<LorentzIndex>(Uthin, mu);
@ -816,8 +823,8 @@ class StaggeredImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation:
UUU = UUU *phases;
UUUdag = UUUdag *phases;
PokeIndex<LorentzIndex>(UUUds, UUU, mu);
PokeIndex<LorentzIndex>(UUUds, UUUdag, mu+4);
InsertGaugeField(UUUds,UUU,mu);
InsertGaugeField(UUUds,UUUdag,mu+4);
}
}
@ -910,6 +917,23 @@ class StaggeredImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation:
mac(&phi(), &UU(), &chi());
}
inline void InsertGaugeField(DoubledGaugeField &U_ds,const GaugeLinkField &U,int mu)
{
GridBase *GaugeGrid = U_ds._grid;
parallel_for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) {
SiteScalarGaugeLink ScalarU;
SiteDoubledGaugeField ScalarUds;
std::vector<int> lcoor;
GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor);
peekLocalSite(ScalarUds, U_ds, lcoor);
peekLocalSite(ScalarU, U, lcoor);
ScalarUds(mu) = ScalarU();
}
}
inline void DoubleStore(GridBase *GaugeGrid,
DoubledGaugeField &UUUds, // for Naik term
DoubledGaugeField &Uds,
@ -951,23 +975,8 @@ class StaggeredImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation:
U = U *phases;
Udag = Udag *phases;
for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) {
SiteScalarGaugeLink ScalarU;
SiteDoubledGaugeField ScalarUds;
std::vector<int> lcoor;
GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor);
peekLocalSite(ScalarUds, Uds, lcoor);
peekLocalSite(ScalarU, U, lcoor);
ScalarUds(mu) = ScalarU();
peekLocalSite(ScalarU, Udag, lcoor);
ScalarUds(mu + 4) = ScalarU();
pokeLocalSite(ScalarUds, Uds, lcoor);
}
InsertGaugeField(Uds,U,mu);
InsertGaugeField(Uds,Udag,mu+4);
// 3 hop based on thin links. Crazy huh ?
U = PeekIndex<LorentzIndex>(Uthin, mu);
@ -979,24 +988,8 @@ class StaggeredImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation:
UUU = UUU *phases;
UUUdag = UUUdag *phases;
for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) {
SiteScalarGaugeLink ScalarU;
SiteDoubledGaugeField ScalarUds;
std::vector<int> lcoor;
GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor);
peekLocalSite(ScalarUds, UUUds, lcoor);
peekLocalSite(ScalarU, UUU, lcoor);
ScalarUds(mu) = ScalarU();
peekLocalSite(ScalarU, UUUdag, lcoor);
ScalarUds(mu + 4) = ScalarU();
pokeLocalSite(ScalarUds, UUUds, lcoor);
}
InsertGaugeField(UUUds,UUU,mu);
InsertGaugeField(UUUds,UUUdag,mu+4);
}
}

View File

@ -44,6 +44,7 @@ ImprovedStaggeredFermionStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3, 3,
template <class Impl>
ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid,
RealD _mass,
RealD _c1, RealD _c2,RealD _u0,
const ImplParams &p)
: Kernels(p),
_grid(&Fgrid),
@ -62,6 +63,16 @@ ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GridCartesian &Fgrid, G
UUUmuOdd(&Hgrid) ,
_tmp(&Hgrid)
{
int vol4;
int LLs=1;
c1=_c1;
c2=_c2;
u0=_u0;
vol4= _grid->oSites();
Stencil.BuildSurfaceList(LLs,vol4);
vol4= _cbgrid->oSites();
StencilEven.BuildSurfaceList(LLs,vol4);
StencilOdd.BuildSurfaceList(LLs,vol4);
}
template <class Impl>
@ -69,22 +80,10 @@ ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GaugeField &_Uthin, Gau
GridRedBlackCartesian &Hgrid, RealD _mass,
RealD _c1, RealD _c2,RealD _u0,
const ImplParams &p)
: ImprovedStaggeredFermion(Fgrid,Hgrid,_mass,p)
: ImprovedStaggeredFermion(Fgrid,Hgrid,_mass,_c1,_c2,_u0,p)
{
c1=_c1;
c2=_c2;
u0=_u0;
ImportGauge(_Uthin,_Ufat);
}
template <class Impl>
ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GaugeField &_Uthin,GaugeField &_Utriple, GaugeField &_Ufat, GridCartesian &Fgrid,
GridRedBlackCartesian &Hgrid, RealD _mass,
const ImplParams &p)
: ImprovedStaggeredFermion(Fgrid,Hgrid,_mass,p)
{
ImportGaugeSimple(_Utriple,_Ufat);
}
////////////////////////////////////////////////////////////
// Momentum space propagator should be
@ -98,11 +97,6 @@ ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GaugeField &_Uthin,Gaug
// of above link to implmement fourier based solver.
////////////////////////////////////////////////////////////
template <class Impl>
void ImprovedStaggeredFermion<Impl>::ImportGauge(const GaugeField &_Uthin)
{
ImportGauge(_Uthin,_Uthin);
};
template <class Impl>
void ImprovedStaggeredFermion<Impl>::ImportGaugeSimple(const GaugeField &_Utriple,const GaugeField &_Ufat)
{
/////////////////////////////////////////////////////////////////
@ -125,6 +119,20 @@ void ImprovedStaggeredFermion<Impl>::ImportGaugeSimple(const GaugeField &_Utripl
PokeIndex<LorentzIndex>(Umu, -U, mu+4);
}
CopyGaugeCheckerboards();
}
template <class Impl>
void ImprovedStaggeredFermion<Impl>::ImportGaugeSimple(const DoubledGaugeField &_UUU,const DoubledGaugeField &_U)
{
Umu = _U;
UUUmu = _UUU;
CopyGaugeCheckerboards();
}
template <class Impl>
void ImprovedStaggeredFermion<Impl>::CopyGaugeCheckerboards(void)
{
pickCheckerboard(Even, UmuEven, Umu);
pickCheckerboard(Odd, UmuOdd , Umu);
pickCheckerboard(Even, UUUmuEven,UUUmu);
@ -160,10 +168,7 @@ void ImprovedStaggeredFermion<Impl>::ImportGauge(const GaugeField &_Uthin,const
PokeIndex<LorentzIndex>(UUUmu, U*(-0.5*c2/u0/u0/u0), mu+4);
}
pickCheckerboard(Even, UmuEven, Umu);
pickCheckerboard(Odd, UmuOdd , Umu);
pickCheckerboard(Even, UUUmuEven, UUUmu);
pickCheckerboard(Odd, UUUmuOdd, UUUmu);
CopyGaugeCheckerboards();
}
/////////////////////////////
@ -322,6 +327,7 @@ void ImprovedStaggeredFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionF
template <class Impl>
void ImprovedStaggeredFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int dag) {
DhopCalls+=2;
conformable(in._grid, _grid); // verifies full grid
conformable(in._grid, out._grid);
@ -332,6 +338,7 @@ void ImprovedStaggeredFermion<Impl>::Dhop(const FermionField &in, FermionField &
template <class Impl>
void ImprovedStaggeredFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int dag) {
DhopCalls+=1;
conformable(in._grid, _cbgrid); // verifies half grid
conformable(in._grid, out._grid); // drops the cb check
@ -343,6 +350,7 @@ void ImprovedStaggeredFermion<Impl>::DhopOE(const FermionField &in, FermionField
template <class Impl>
void ImprovedStaggeredFermion<Impl>::DhopEO(const FermionField &in, FermionField &out, int dag) {
DhopCalls+=1;
conformable(in._grid, _cbgrid); // verifies half grid
conformable(in._grid, out._grid); // drops the cb check
@ -374,25 +382,193 @@ void ImprovedStaggeredFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder
DoubledGaugeField &U,
DoubledGaugeField &UUU,
const FermionField &in,
FermionField &out, int dag) {
FermionField &out, int dag)
{
#ifdef GRID_OMP
if ( StaggeredKernelsStatic::Comms == StaggeredKernelsStatic::CommsAndCompute )
DhopInternalOverlappedComms(st,lo,U,UUU,in,out,dag);
else
#endif
DhopInternalSerialComms(st,lo,U,UUU,in,out,dag);
}
template <class Impl>
void ImprovedStaggeredFermion<Impl>::DhopInternalOverlappedComms(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,
DoubledGaugeField &UUU,
const FermionField &in,
FermionField &out, int dag)
{
#ifdef GRID_OMP
Compressor compressor;
int len = U._grid->oSites();
const int LLs = 1;
DhopTotalTime -= usecond();
DhopFaceTime -= usecond();
st.Prepare();
st.HaloGather(in,compressor);
st.CommsMergeSHM(compressor);
DhopFaceTime += usecond();
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Ugly explicit thread mapping introduced for OPA reasons.
//////////////////////////////////////////////////////////////////////////////////////////////////////
DhopComputeTime -= usecond();
#pragma omp parallel
{
int tid = omp_get_thread_num();
int nthreads = omp_get_num_threads();
int ncomms = CartesianCommunicator::nCommThreads;
if (ncomms == -1) ncomms = 1;
assert(nthreads > ncomms);
if (tid >= ncomms) {
nthreads -= ncomms;
int ttid = tid - ncomms;
int n = len;
int chunk = n / nthreads;
int rem = n % nthreads;
int myblock, myn;
if (ttid < rem) {
myblock = ttid * chunk + ttid;
myn = chunk+1;
} else {
myblock = ttid*chunk + rem;
myn = chunk;
}
// do the compute
if (dag == DaggerYes) {
for (int ss = myblock; ss < myblock+myn; ++ss) {
int sU = ss;
// Interior = 1; Exterior = 0; must implement for staggered
Kernels::DhopSiteDag(st,lo,U,UUU,st.CommBuf(),1,sU,in,out,1,0);
}
} else {
for (int ss = myblock; ss < myblock+myn; ++ss) {
// Interior = 1; Exterior = 0;
int sU = ss;
Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),1,sU,in,out,1,0);
}
}
} else {
st.CommunicateThreaded();
}
}
DhopComputeTime += usecond();
// First to enter, last to leave timing
DhopFaceTime -= usecond();
st.CommsMerge(compressor);
DhopFaceTime -= usecond();
DhopComputeTime2 -= usecond();
if (dag == DaggerYes) {
int sz=st.surface_list.size();
parallel_for (int ss = 0; ss < sz; ss++) {
int sU = st.surface_list[ss];
Kernels::DhopSiteDag(st,lo,U,UUU,st.CommBuf(),1,sU,in,out,0,1);
}
} else {
int sz=st.surface_list.size();
parallel_for (int ss = 0; ss < sz; ss++) {
int sU = st.surface_list[ss];
Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),1,sU,in,out,0,1);
}
}
DhopComputeTime2 += usecond();
#else
assert(0);
#endif
}
template <class Impl>
void ImprovedStaggeredFermion<Impl>::DhopInternalSerialComms(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,
DoubledGaugeField &UUU,
const FermionField &in,
FermionField &out, int dag)
{
assert((dag == DaggerNo) || (dag == DaggerYes));
DhopTotalTime -= usecond();
DhopCommTime -= usecond();
Compressor compressor;
st.HaloExchange(in, compressor);
DhopCommTime += usecond();
DhopComputeTime -= usecond();
if (dag == DaggerYes) {
PARALLEL_FOR_LOOP
for (int sss = 0; sss < in._grid->oSites(); sss++) {
parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) {
Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), 1, sss, in, out);
}
} else {
PARALLEL_FOR_LOOP
for (int sss = 0; sss < in._grid->oSites(); sss++) {
parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) {
Kernels::DhopSite(st, lo, U, UUU, st.CommBuf(), 1, sss, in, out);
}
}
DhopComputeTime += usecond();
DhopTotalTime += usecond();
};
////////////////////////////////////////////////////////////////
// Reporting
////////////////////////////////////////////////////////////////
template<class Impl>
void ImprovedStaggeredFermion<Impl>::Report(void)
{
std::vector<int> latt = GridDefaultLatt();
RealD volume = 1; for(int mu=0;mu<Nd;mu++) volume=volume*latt[mu];
RealD NP = _grid->_Nprocessors;
RealD NN = _grid->NodeCount();
std::cout << GridLogMessage << "#### Dhop calls report " << std::endl;
std::cout << GridLogMessage << "ImprovedStaggeredFermion Number of DhopEO Calls : "
<< DhopCalls << std::endl;
std::cout << GridLogMessage << "ImprovedStaggeredFermion TotalTime /Calls : "
<< DhopTotalTime / DhopCalls << " us" << std::endl;
std::cout << GridLogMessage << "ImprovedStaggeredFermion CommTime /Calls : "
<< DhopCommTime / DhopCalls << " us" << std::endl;
std::cout << GridLogMessage << "ImprovedStaggeredFermion ComputeTime/Calls : "
<< DhopComputeTime / DhopCalls << " us" << std::endl;
// Average the compute time
_grid->GlobalSum(DhopComputeTime);
DhopComputeTime/=NP;
RealD mflops = 1154*volume*DhopCalls/DhopComputeTime/2; // 2 for red black counting
std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl;
std::cout << GridLogMessage << "Average mflops/s per call per rank : " << mflops/NP << std::endl;
std::cout << GridLogMessage << "Average mflops/s per call per node : " << mflops/NN << std::endl;
RealD Fullmflops = 1154*volume*DhopCalls/(DhopTotalTime)/2; // 2 for red black counting
std::cout << GridLogMessage << "Average mflops/s per call (full) : " << Fullmflops << std::endl;
std::cout << GridLogMessage << "Average mflops/s per call per rank (full): " << Fullmflops/NP << std::endl;
std::cout << GridLogMessage << "Average mflops/s per call per node (full): " << Fullmflops/NN << std::endl;
std::cout << GridLogMessage << "ImprovedStaggeredFermion Stencil" <<std::endl; Stencil.Report();
std::cout << GridLogMessage << "ImprovedStaggeredFermion StencilEven"<<std::endl; StencilEven.Report();
std::cout << GridLogMessage << "ImprovedStaggeredFermion StencilOdd" <<std::endl; StencilOdd.Report();
}
template<class Impl>
void ImprovedStaggeredFermion<Impl>::ZeroCounters(void)
{
DhopCalls = 0;
DhopTotalTime = 0;
DhopCommTime = 0;
DhopComputeTime = 0;
DhopFaceTime = 0;
Stencil.ZeroCounters();
StencilEven.ZeroCounters();
StencilOdd.ZeroCounters();
}
////////////////////////////////////////////////////////
// Conserved current - not yet implemented.
////////////////////////////////////////////////////////

View File

@ -49,6 +49,18 @@ class ImprovedStaggeredFermion : public StaggeredKernels<Impl>, public ImprovedS
FermionField _tmp;
FermionField &tmp(void) { return _tmp; }
////////////////////////////////////////
// Performance monitoring
////////////////////////////////////////
void Report(void);
void ZeroCounters(void);
double DhopTotalTime;
double DhopCalls;
double DhopCommTime;
double DhopComputeTime;
double DhopComputeTime2;
double DhopFaceTime;
///////////////////////////////////////////////////////////////
// Implement the abstract base
///////////////////////////////////////////////////////////////
@ -105,25 +117,34 @@ class ImprovedStaggeredFermion : public StaggeredKernels<Impl>, public ImprovedS
void DhopInternal(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU,
const FermionField &in, FermionField &out, int dag);
void DhopInternalSerialComms(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU,
const FermionField &in, FermionField &out, int dag);
void DhopInternalOverlappedComms(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU,
const FermionField &in, FermionField &out, int dag);
// Constructor
//////////////////////////////////////////////////////////////////////////
// Grid own interface Constructor
//////////////////////////////////////////////////////////////////////////
ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Ufat, GridCartesian &Fgrid,
GridRedBlackCartesian &Hgrid, RealD _mass,
RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0=1.0,
const ImplParams &p = ImplParams());
ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Utriple, GaugeField &_Ufat, GridCartesian &Fgrid,
GridRedBlackCartesian &Hgrid, RealD _mass,
RealD _c1, RealD _c2,RealD _u0,
const ImplParams &p = ImplParams());
//////////////////////////////////////////////////////////////////////////
// MILC constructor no gauge fields
//////////////////////////////////////////////////////////////////////////
ImprovedStaggeredFermion(GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid, RealD _mass,
RealD _c1=1.0, RealD _c2=1.0,RealD _u0=1.0,
const ImplParams &p = ImplParams());
// DoubleStore impl dependent
void ImportGaugeSimple(const GaugeField &_Utriple, const GaugeField &_Ufat);
void ImportGauge(const GaugeField &_Uthin, const GaugeField &_Ufat);
void ImportGauge(const GaugeField &_Uthin);
void ImportGauge (const GaugeField &_Uthin ) { assert(0); }
void ImportGauge (const GaugeField &_Uthin ,const GaugeField &_Ufat);
void ImportGaugeSimple(const GaugeField &_UUU ,const GaugeField &_U);
void ImportGaugeSimple(const DoubledGaugeField &_UUU,const DoubledGaugeField &_U);
DoubledGaugeField &GetU(void) { return Umu ; } ;
DoubledGaugeField &GetUUU(void) { return UUUmu; };
void CopyGaugeCheckerboards(void);
///////////////////////////////////////////////////////////////
// Data members require to support the functionality
@ -132,7 +153,8 @@ class ImprovedStaggeredFermion : public StaggeredKernels<Impl>, public ImprovedS
// protected:
public:
// any other parameters of action ???
virtual int isTrivialEE(void) { return 1; };
virtual RealD Mass(void) { return mass; }
RealD mass;
RealD u0;
RealD c1;

View File

@ -41,8 +41,7 @@ ImprovedStaggeredFermion5DStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3,
// 5d lattice for DWF.
template<class Impl>
ImprovedStaggeredFermion5D<Impl>::ImprovedStaggeredFermion5D(GaugeField &_Uthin,GaugeField &_Ufat,
GridCartesian &FiveDimGrid,
ImprovedStaggeredFermion5D<Impl>::ImprovedStaggeredFermion5D(GridCartesian &FiveDimGrid,
GridRedBlackCartesian &FiveDimRedBlackGrid,
GridCartesian &FourDimGrid,
GridRedBlackCartesian &FourDimRedBlackGrid,
@ -121,16 +120,74 @@ ImprovedStaggeredFermion5D<Impl>::ImprovedStaggeredFermion5D(GaugeField &_Uthin,
assert(FiveDimGrid._simd_layout[0] ==1);
}
int LLs = FiveDimGrid._rdimensions[0];
int vol4= FourDimGrid.oSites();
Stencil.BuildSurfaceList(LLs,vol4);
// Allocate the required comms buffer
vol4=FourDimRedBlackGrid.oSites();
StencilEven.BuildSurfaceList(LLs,vol4);
StencilOdd.BuildSurfaceList(LLs,vol4);
}
template <class Impl>
void ImprovedStaggeredFermion5D<Impl>::CopyGaugeCheckerboards(void)
{
pickCheckerboard(Even, UmuEven, Umu);
pickCheckerboard(Odd, UmuOdd , Umu);
pickCheckerboard(Even, UUUmuEven,UUUmu);
pickCheckerboard(Odd, UUUmuOdd, UUUmu);
}
template<class Impl>
ImprovedStaggeredFermion5D<Impl>::ImprovedStaggeredFermion5D(GaugeField &_Uthin,GaugeField &_Ufat,
GridCartesian &FiveDimGrid,
GridRedBlackCartesian &FiveDimRedBlackGrid,
GridCartesian &FourDimGrid,
GridRedBlackCartesian &FourDimRedBlackGrid,
RealD _mass,
RealD _c1,RealD _c2, RealD _u0,
const ImplParams &p) :
ImprovedStaggeredFermion5D(FiveDimGrid,FiveDimRedBlackGrid,
FourDimGrid,FourDimRedBlackGrid,
_mass,_c1,_c2,_u0,p)
{
ImportGauge(_Uthin,_Ufat);
}
///////////////////////////////////////////////////
// For MILC use; pass three link U's and 1 link U
///////////////////////////////////////////////////
template <class Impl>
void ImprovedStaggeredFermion5D<Impl>::ImportGauge(const GaugeField &_Uthin)
void ImprovedStaggeredFermion5D<Impl>::ImportGaugeSimple(const GaugeField &_Utriple,const GaugeField &_Ufat)
{
ImportGauge(_Uthin,_Uthin);
};
/////////////////////////////////////////////////////////////////
// Trivial import; phases and fattening and such like preapplied
/////////////////////////////////////////////////////////////////
for (int mu = 0; mu < Nd; mu++) {
auto U = PeekIndex<LorentzIndex>(_Utriple, mu);
Impl::InsertGaugeField(UUUmu,U,mu);
U = adj( Cshift(U, mu, -3));
Impl::InsertGaugeField(UUUmu,-U,mu+4);
U = PeekIndex<LorentzIndex>(_Ufat, mu);
Impl::InsertGaugeField(Umu,U,mu);
U = adj( Cshift(U, mu, -1));
Impl::InsertGaugeField(Umu,-U,mu+4);
}
CopyGaugeCheckerboards();
}
template <class Impl>
void ImprovedStaggeredFermion5D<Impl>::ImportGaugeSimple(const DoubledGaugeField &_UUU,const DoubledGaugeField &_U)
{
/////////////////////////////////////////////////////////////////
// Trivial import; phases and fattening and such like preapplied
/////////////////////////////////////////////////////////////////
Umu = _U;
UUUmu = _UUU;
CopyGaugeCheckerboards();
}
template<class Impl>
void ImprovedStaggeredFermion5D<Impl>::ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat)
{
@ -159,10 +216,7 @@ void ImprovedStaggeredFermion5D<Impl>::ImportGauge(const GaugeField &_Uthin,cons
PokeIndex<LorentzIndex>(UUUmu, U*(-0.5*c2/u0/u0/u0), mu+4);
}
pickCheckerboard(Even, UmuEven, Umu);
pickCheckerboard(Odd, UmuOdd , Umu);
pickCheckerboard(Even, UUUmuEven, UUUmu);
pickCheckerboard(Odd, UUUmuOdd, UUUmu);
CopyGaugeCheckerboards();
}
template<class Impl>
void ImprovedStaggeredFermion5D<Impl>::DhopDir(const FermionField &in, FermionField &out,int dir5,int disp)
@ -223,6 +277,162 @@ void ImprovedStaggeredFermion5D<Impl>::DhopDerivOE(GaugeField &mat,
assert(0);
}
/*CHANGE */
template<class Impl>
void ImprovedStaggeredFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
DoubledGaugeField & U,DoubledGaugeField & UUU,
const FermionField &in, FermionField &out,int dag)
{
#ifdef GRID_OMP
if ( StaggeredKernelsStatic::Comms == StaggeredKernelsStatic::CommsAndCompute )
DhopInternalOverlappedComms(st,lo,U,UUU,in,out,dag);
else
#endif
DhopInternalSerialComms(st,lo,U,UUU,in,out,dag);
}
template<class Impl>
void ImprovedStaggeredFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st, LebesgueOrder &lo,
DoubledGaugeField & U,DoubledGaugeField & UUU,
const FermionField &in, FermionField &out,int dag)
{
#ifdef GRID_OMP
// assert((dag==DaggerNo) ||(dag==DaggerYes));
Compressor compressor;
int LLs = in._grid->_rdimensions[0];
int len = U._grid->oSites();
DhopFaceTime-=usecond();
st.Prepare();
st.HaloGather(in,compressor);
// st.HaloExchangeOptGather(in,compressor); // Wilson compressor
st.CommsMergeSHM(compressor);// Could do this inside parallel region overlapped with comms
DhopFaceTime+=usecond();
double ctime=0;
double ptime=0;
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Ugly explicit thread mapping introduced for OPA reasons.
//////////////////////////////////////////////////////////////////////////////////////////////////////
#pragma omp parallel reduction(max:ctime) reduction(max:ptime)
{
int tid = omp_get_thread_num();
int nthreads = omp_get_num_threads();
int ncomms = CartesianCommunicator::nCommThreads;
if (ncomms == -1) ncomms = 1;
assert(nthreads > ncomms);
if (tid >= ncomms) {
double start = usecond();
nthreads -= ncomms;
int ttid = tid - ncomms;
int n = U._grid->oSites(); // 4d vol
int chunk = n / nthreads;
int rem = n % nthreads;
int myblock, myn;
if (ttid < rem) {
myblock = ttid * chunk + ttid;
myn = chunk+1;
} else {
myblock = ttid*chunk + rem;
myn = chunk;
}
// do the compute
if (dag == DaggerYes) {
for (int ss = myblock; ss < myblock+myn; ++ss) {
int sU = ss;
// Interior = 1; Exterior = 0; must implement for staggered
Kernels::DhopSiteDag(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out,1,0); //<---------
}
} else {
for (int ss = myblock; ss < myblock+myn; ++ss) {
// Interior = 1; Exterior = 0;
int sU = ss;
Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out,1,0); //<------------
}
}
ptime = usecond() - start;
} else {
double start = usecond();
st.CommunicateThreaded();
ctime = usecond() - start;
}
}
DhopCommTime += ctime;
DhopComputeTime+=ptime;
// First to enter, last to leave timing
st.CollateThreads();
DhopFaceTime-=usecond();
st.CommsMerge(compressor);
DhopFaceTime+=usecond();
DhopComputeTime2-=usecond();
if (dag == DaggerYes) {
int sz=st.surface_list.size();
parallel_for (int ss = 0; ss < sz; ss++) {
int sU = st.surface_list[ss];
Kernels::DhopSiteDag(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out,0,1); //<----------
}
} else {
int sz=st.surface_list.size();
parallel_for (int ss = 0; ss < sz; ss++) {
int sU = st.surface_list[ss];
Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out,0,1);//<----------
}
}
DhopComputeTime2+=usecond();
#else
assert(0);
#endif
}
template<class Impl>
void ImprovedStaggeredFermion5D<Impl>::DhopInternalSerialComms(StencilImpl & st, LebesgueOrder &lo,
DoubledGaugeField & U,DoubledGaugeField & UUU,
const FermionField &in, FermionField &out,int dag)
{
Compressor compressor;
int LLs = in._grid->_rdimensions[0];
//double t1=usecond();
DhopTotalTime -= usecond();
DhopCommTime -= usecond();
st.HaloExchange(in,compressor);
DhopCommTime += usecond();
DhopComputeTime -= usecond();
// Dhop takes the 4d grid from U, and makes a 5d index for fermion
if (dag == DaggerYes) {
parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
int sU=ss;
Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), LLs, sU,in, out);
}
} else {
parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
int sU=ss;
Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out);
}
}
DhopComputeTime += usecond();
DhopTotalTime += usecond();
//double t2=usecond();
//std::cout << __FILE__ << " " << __func__ << " Total Time " << DhopTotalTime << std::endl;
//std::cout << __FILE__ << " " << __func__ << " Total Time Org " << t2-t1 << std::endl;
//std::cout << __FILE__ << " " << __func__ << " Comml Time " << DhopCommTime << std::endl;
//std::cout << __FILE__ << " " << __func__ << " Compute Time " << DhopComputeTime << std::endl;
}
/*CHANGE END*/
/* ORG
template<class Impl>
void ImprovedStaggeredFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
DoubledGaugeField & U,DoubledGaugeField & UUU,
@ -254,6 +464,7 @@ void ImprovedStaggeredFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOr
DhopComputeTime += usecond();
DhopTotalTime += usecond();
}
*/
template<class Impl>
@ -336,6 +547,9 @@ void ImprovedStaggeredFermion5D<Impl>::ZeroCounters(void)
DhopTotalTime = 0;
DhopCommTime = 0;
DhopComputeTime = 0;
DhopFaceTime = 0;
Stencil.ZeroCounters();
StencilEven.ZeroCounters();
StencilOdd.ZeroCounters();

View File

@ -64,6 +64,8 @@ namespace QCD {
double DhopCalls;
double DhopCommTime;
double DhopComputeTime;
double DhopComputeTime2;
double DhopFaceTime;
///////////////////////////////////////////////////////////////
// Implement the abstract base
@ -119,7 +121,27 @@ namespace QCD {
FermionField &out,
int dag);
void DhopInternalOverlappedComms(StencilImpl & st,
LebesgueOrder &lo,
DoubledGaugeField &U,
DoubledGaugeField &UUU,
const FermionField &in,
FermionField &out,
int dag);
void DhopInternalSerialComms(StencilImpl & st,
LebesgueOrder &lo,
DoubledGaugeField &U,
DoubledGaugeField &UUU,
const FermionField &in,
FermionField &out,
int dag);
// Constructors
////////////////////////////////////////////////////////////////////////////////////////////////
// Grid internal interface -- Thin link and fat link, with coefficients
////////////////////////////////////////////////////////////////////////////////////////////////
ImprovedStaggeredFermion5D(GaugeField &_Uthin,
GaugeField &_Ufat,
GridCartesian &FiveDimGrid,
@ -127,17 +149,37 @@ namespace QCD {
GridCartesian &FourDimGrid,
GridRedBlackCartesian &FourDimRedBlackGrid,
double _mass,
RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0=1.0,
RealD _c1, RealD _c2,RealD _u0,
const ImplParams &p= ImplParams());
// DoubleStore
void ImportGauge(const GaugeField &_U);
void ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat);
////////////////////////////////////////////////////////////////////////////////////////////////
// MILC constructor ; triple links, no rescale factors; must be externally pre multiplied
////////////////////////////////////////////////////////////////////////////////////////////////
ImprovedStaggeredFermion5D(GridCartesian &FiveDimGrid,
GridRedBlackCartesian &FiveDimRedBlackGrid,
GridCartesian &FourDimGrid,
GridRedBlackCartesian &FourDimRedBlackGrid,
double _mass,
RealD _c1=1.0, RealD _c2=1.0,RealD _u0=1.0,
const ImplParams &p= ImplParams());
// DoubleStore gauge field in operator
void ImportGauge (const GaugeField &_Uthin ) { assert(0); }
void ImportGauge (const GaugeField &_Uthin ,const GaugeField &_Ufat);
void ImportGaugeSimple(const GaugeField &_UUU,const GaugeField &_U);
void ImportGaugeSimple(const DoubledGaugeField &_UUU,const DoubledGaugeField &_U);
// Give a reference; can be used to do an assignment or copy back out after import
// if Carleton wants to cache them and not use the ImportSimple
DoubledGaugeField &GetU(void) { return Umu ; } ;
DoubledGaugeField &GetUUU(void) { return UUUmu; };
void CopyGaugeCheckerboards(void);
///////////////////////////////////////////////////////////////
// Data members require to support the functionality
///////////////////////////////////////////////////////////////
public:
virtual int isTrivialEE(void) { return 1; };
virtual RealD Mass(void) { return mass; }
GridBase *_FourDimGrid;
GridBase *_FourDimRedBlackGrid;

View File

@ -853,7 +853,8 @@ namespace QCD {
a0 = a0 + incr;
a1 = a1 + incr;
a2 = a2 + sizeof(typename Simd::scalar_type);
a2 = a2 + sizeof(typename Simd::scalar_type);
a2 = a2 + sizeof(typename Simd::scalar_type); // ypj [debug]
}
}

View File

@ -1,3 +1,4 @@
#if 1
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -97,6 +98,117 @@ namespace Grid {
}
};
#if 0
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Copied from DiagTwoSolve
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SchurRedBlackDiagTwoSolve {
private:
OperatorFunction<Field> & _HermitianRBSolver;
int CBfactorise;
public:
/////////////////////////////////////////////////////
// Wrap the usual normal equations Schur trick
/////////////////////////////////////////////////////
SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver) :
_HermitianRBSolver(HermitianRBSolver)
{
CBfactorise=0;
};
template<class Matrix>
void operator() (Matrix & _Matrix,const Field &in, Field &out){
// FIXME CGdiagonalMee not implemented virtual function
// FIXME use CBfactorise to control schur decomp
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
Field src_e(grid);
Field src_o(grid);
Field sol_e(grid);
Field sol_o(grid);
Field tmp(grid);
Field Mtmp(grid);
Field resid(fgrid);
pickCheckerboard(Even,src_e,in);
pickCheckerboard(Odd ,src_o,in);
pickCheckerboard(Even,sol_e,out);
pickCheckerboard(Odd ,sol_o,out);
/////////////////////////////////////////////////////
// src_o = Mdag * (source_o - Moe MeeInv source_e)
/////////////////////////////////////////////////////
_Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even);
_Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd);
tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd);
// get the right MpcDag
_HermOpEO.MpcDag(tmp,src_o); assert(src_o.checkerboard ==Odd);
//////////////////////////////////////////////////////////////
// Call the red-black solver
//////////////////////////////////////////////////////////////
std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
// _HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
_HermitianRBSolver(_HermOpEO,src_o,tmp); assert(tmp.checkerboard==Odd);
_Matrix.MooeeInv(tmp,sol_o); assert( sol_o.checkerboard ==Odd);
///////////////////////////////////////////////////
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
///////////////////////////////////////////////////
_Matrix.Meooe(sol_o,tmp); assert( tmp.checkerboard ==Even);
src_e = src_e-tmp; assert( src_e.checkerboard ==Even);
_Matrix.MooeeInv(src_e,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(out,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(out,sol_o); assert( sol_o.checkerboard ==Odd );
// Verify the unprec residual
_Matrix.M(out,resid);
resid = resid-in;
RealD ns = norm2(in);
RealD nr = norm2(resid);
std::cout<<GridLogMessage << "SchurRedBlackDiagTwoKappa solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
}
};
#endif
namespace QCD{
//
// Determinant is det of middle factor
// This assumes Mee is indept of U.
//
//
template<class Impl>
class SchurDifferentiableDiagTwo: public SchurDiagTwoOperator<FermionOperator<Impl>,typename Impl::FermionField>
{
public:
INHERIT_IMPL_TYPES(Impl);
typedef FermionOperator<Impl> Matrix;
SchurDifferentiableDiagTwo (Matrix &Mat) : SchurDiagTwoOperator<Matrix,FermionField>(Mat) {};
};
#if 0
template<class Impl>
class SchurDifferentiableDiagTwoKappa : public SchurDiagTwoKappaOperator<FermionOperator<Impl>,typename Impl::FermionField>
{
public:
INHERIT_IMPL_TYPES(Impl);
typedef FermionOperator<Impl> Matrix;
SchurDifferentiableDiagTwoKappa (Matrix &Mat) : SchurDiagTwoKappaOperator<Matrix,FermionField>(Mat) {};
};
#endif
}
}
#endif
#endif

View File

@ -32,223 +32,241 @@ namespace Grid {
namespace QCD {
int StaggeredKernelsStatic::Opt= StaggeredKernelsStatic::OptGeneric;
int StaggeredKernelsStatic::Comms = StaggeredKernelsStatic::CommsAndCompute;
#define GENERIC_STENCIL_LEG(U,Dir,skew,multLink) \
SE = st.GetEntry(ptype, Dir+skew, sF); \
if (SE->_is_local ) { \
if (SE->_permute) { \
chi_p = &chi; \
permute(chi, in._odata[SE->_offset], ptype); \
} else { \
chi_p = &in._odata[SE->_offset]; \
} \
} else { \
chi_p = &buf[SE->_offset]; \
} \
multLink(Uchi, U._odata[sU], *chi_p, Dir);
#define GENERIC_STENCIL_LEG_INT(U,Dir,skew,multLink) \
SE = st.GetEntry(ptype, Dir+skew, sF); \
if (SE->_is_local ) { \
if (SE->_permute) { \
chi_p = &chi; \
permute(chi, in._odata[SE->_offset], ptype); \
} else { \
chi_p = &in._odata[SE->_offset]; \
} \
} else if ( st.same_node[Dir] ) { \
chi_p = &buf[SE->_offset]; \
} \
if (SE->_is_local || st.same_node[Dir] ) { \
multLink(Uchi, U._odata[sU], *chi_p, Dir); \
}
#define GENERIC_STENCIL_LEG_EXT(U,Dir,skew,multLink) \
SE = st.GetEntry(ptype, Dir+skew, sF); \
if ((!SE->_is_local) && (!st.same_node[Dir]) ) { \
nmu++; \
chi_p = &buf[SE->_offset]; \
multLink(Uchi, U._odata[sU], *chi_p, Dir); \
}
template <class Impl>
StaggeredKernels<Impl>::StaggeredKernels(const ImplParams &p) : Base(p){};
////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// Generic implementation; move to different file?
////////////////////////////////////////////
// Int, Ext, Int+Ext cases for comms overlap
////////////////////////////////////////////////////////////////////////////////////
template <class Impl>
void StaggeredKernels<Impl>::DhopSiteDepth(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
SiteSpinor *buf, int sF,
int sU, const FermionField &in, SiteSpinor &out,int threeLink) {
void StaggeredKernels<Impl>::DhopSiteGeneric(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out, int dag) {
const SiteSpinor *chi_p;
SiteSpinor chi;
SiteSpinor Uchi;
StencilEntry *SE;
int ptype;
int skew = 0;
if (threeLink) skew=8;
///////////////////////////
// Xp
///////////////////////////
int skew;
SE = st.GetEntry(ptype, Xp+skew, sF);
if (SE->_is_local) {
if (SE->_permute) {
chi_p = &chi;
permute(chi, in._odata[SE->_offset], ptype);
} else {
chi_p = &in._odata[SE->_offset];
}
} else {
chi_p = &buf[SE->_offset];
for(int s=0;s<LLs;s++){
int sF=LLs*sU+s;
skew = 0;
GENERIC_STENCIL_LEG(U,Xp,skew,Impl::multLink);
GENERIC_STENCIL_LEG(U,Yp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(U,Zp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(U,Tp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(U,Xm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(U,Ym,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(U,Zm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(U,Tm,skew,Impl::multLinkAdd);
skew=8;
GENERIC_STENCIL_LEG(UUU,Xp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(UUU,Yp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(UUU,Zp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(UUU,Tp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(UUU,Xm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(UUU,Ym,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(UUU,Zm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG(UUU,Tm,skew,Impl::multLinkAdd);
if ( dag ) {
Uchi = - Uchi;
}
vstream(out._odata[sF], Uchi);
}
Impl::multLink(Uchi, U._odata[sU], *chi_p, Xp);
///////////////////////////
// Yp
///////////////////////////
SE = st.GetEntry(ptype, Yp+skew, sF);
if (SE->_is_local) {
if (SE->_permute) {
chi_p = &chi;
permute(chi, in._odata[SE->_offset], ptype);
} else {
chi_p = &in._odata[SE->_offset];
}
} else {
chi_p = &buf[SE->_offset];
}
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Yp);
///////////////////////////
// Zp
///////////////////////////
SE = st.GetEntry(ptype, Zp+skew, sF);
if (SE->_is_local) {
if (SE->_permute) {
chi_p = &chi;
permute(chi, in._odata[SE->_offset], ptype);
} else {
chi_p = &in._odata[SE->_offset];
}
} else {
chi_p = &buf[SE->_offset];
}
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Zp);
///////////////////////////
// Tp
///////////////////////////
SE = st.GetEntry(ptype, Tp+skew, sF);
if (SE->_is_local) {
if (SE->_permute) {
chi_p = &chi;
permute(chi, in._odata[SE->_offset], ptype);
} else {
chi_p = &in._odata[SE->_offset];
}
} else {
chi_p = &buf[SE->_offset];
}
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Tp);
///////////////////////////
// Xm
///////////////////////////
SE = st.GetEntry(ptype, Xm+skew, sF);
if (SE->_is_local) {
if (SE->_permute) {
chi_p = &chi;
permute(chi, in._odata[SE->_offset], ptype);
} else {
chi_p = &in._odata[SE->_offset];
}
} else {
chi_p = &buf[SE->_offset];
}
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Xm);
///////////////////////////
// Ym
///////////////////////////
SE = st.GetEntry(ptype, Ym+skew, sF);
if (SE->_is_local) {
if (SE->_permute) {
chi_p = &chi;
permute(chi, in._odata[SE->_offset], ptype);
} else {
chi_p = &in._odata[SE->_offset];
}
} else {
chi_p = &buf[SE->_offset];
}
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Ym);
///////////////////////////
// Zm
///////////////////////////
SE = st.GetEntry(ptype, Zm+skew, sF);
if (SE->_is_local) {
if (SE->_permute) {
chi_p = &chi;
permute(chi, in._odata[SE->_offset], ptype);
} else {
chi_p = &in._odata[SE->_offset];
}
} else {
chi_p = &buf[SE->_offset];
}
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Zm);
///////////////////////////
// Tm
///////////////////////////
SE = st.GetEntry(ptype, Tm+skew, sF);
if (SE->_is_local) {
if (SE->_permute) {
chi_p = &chi;
permute(chi, in._odata[SE->_offset], ptype);
} else {
chi_p = &in._odata[SE->_offset];
}
} else {
chi_p = &buf[SE->_offset];
}
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Tm);
vstream(out, Uchi);
};
///////////////////////////////////////////////////
// Only contributions from interior of our node
///////////////////////////////////////////////////
template <class Impl>
void StaggeredKernels<Impl>::DhopSiteGenericInt(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag) {
const SiteSpinor *chi_p;
SiteSpinor chi;
SiteSpinor Uchi;
StencilEntry *SE;
int ptype;
int skew ;
for(int s=0;s<LLs;s++){
int sF=LLs*sU+s;
skew = 0;
Uchi=zero;
GENERIC_STENCIL_LEG_INT(U,Xp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(U,Yp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(U,Zp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(U,Tp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(U,Xm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(U,Ym,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(U,Zm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(U,Tm,skew,Impl::multLinkAdd);
skew=8;
GENERIC_STENCIL_LEG_INT(UUU,Xp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(UUU,Yp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(UUU,Zp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(UUU,Tp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(UUU,Xm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(UUU,Ym,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(UUU,Zm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_INT(UUU,Tm,skew,Impl::multLinkAdd);
if ( dag ) {
Uchi = - Uchi;
}
vstream(out._odata[sF], Uchi);
}
};
///////////////////////////////////////////////////
// Only contributions from exterior of our node
///////////////////////////////////////////////////
template <class Impl>
void StaggeredKernels<Impl>::DhopSiteGenericExt(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag) {
const SiteSpinor *chi_p;
SiteSpinor chi;
SiteSpinor Uchi;
StencilEntry *SE;
int ptype;
int nmu=0;
int skew ;
for(int s=0;s<LLs;s++){
int sF=LLs*sU+s;
skew = 0;
Uchi=zero;
GENERIC_STENCIL_LEG_EXT(U,Xp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(U,Yp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(U,Zp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(U,Tp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(U,Xm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(U,Ym,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(U,Zm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(U,Tm,skew,Impl::multLinkAdd);
skew=8;
GENERIC_STENCIL_LEG_EXT(UUU,Xp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(UUU,Yp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(UUU,Zp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(UUU,Tp,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(UUU,Xm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(UUU,Ym,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(UUU,Zm,skew,Impl::multLinkAdd);
GENERIC_STENCIL_LEG_EXT(UUU,Tm,skew,Impl::multLinkAdd);
if ( nmu ) {
if ( dag ) {
out._odata[sF] = out._odata[sF] - Uchi;
} else {
out._odata[sF] = out._odata[sF] + Uchi;
}
}
}
};
////////////////////////////////////////////////////////////////////////////////////
// Driving / wrapping routine to select right kernel
////////////////////////////////////////////////////////////////////////////////////
template <class Impl>
void StaggeredKernels<Impl>::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out) {
SiteSpinor naik;
SiteSpinor naive;
int oneLink =0;
int threeLink=1;
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,
int interior,int exterior)
{
int dag=1;
switch(Opt) {
#ifdef AVX512
//FIXME; move the sign into the Asm routine
case OptInlineAsm:
DhopSiteAsm(st,lo,U,UUU,buf,LLs,sU,in,out);
for(int s=0;s<LLs;s++) {
int sF=s+LLs*sU;
out._odata[sF]=-out._odata[sF];
}
break;
#endif
case OptHandUnroll:
DhopSiteHand(st,lo,U,UUU,buf,LLs,sU,in,out,dag);
break;
case OptGeneric:
for(int s=0;s<LLs;s++){
int sF=s+LLs*sU;
DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink);
DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink);
out._odata[sF] =-naive-naik;
}
break;
default:
std::cout<<"Oops Opt = "<<Opt<<std::endl;
assert(0);
break;
}
DhopSite(st,lo,U,UUU,buf,LLs,sU,in,out,dag,interior,exterior);
};
template <class Impl>
void StaggeredKernels<Impl>::DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,
int interior,int exterior)
{
int dag=0;
DhopSite(st,lo,U,UUU,buf,LLs,sU,in,out,dag,interior,exterior);
};
template <class Impl>
void StaggeredKernels<Impl>::DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs,
int sU, const FermionField &in, FermionField &out)
int sU, const FermionField &in, FermionField &out,
int dag,int interior,int exterior)
{
int oneLink =0;
int threeLink=1;
SiteSpinor naik;
SiteSpinor naive;
int dag=0;
switch(Opt) {
#ifdef AVX512
case OptInlineAsm:
DhopSiteAsm(st,lo,U,UUU,buf,LLs,sU,in,out);
if ( interior && exterior ) {
DhopSiteAsm(st,lo,U,UUU,buf,LLs,sU,in,out,dag);
} else {
std::cout << GridLogError << "Cannot overlap comms and compute with Staggered assembly"<<std::endl;
assert(0);
}
break;
#endif
case OptHandUnroll:
DhopSiteHand(st,lo,U,UUU,buf,LLs,sU,in,out,dag);
if ( interior && exterior ) {
DhopSiteHand (st,lo,U,UUU,buf,LLs,sU,in,out,dag);
} else if ( interior ) {
DhopSiteHandInt(st,lo,U,UUU,buf,LLs,sU,in,out,dag);
} else if ( exterior ) {
DhopSiteHandExt(st,lo,U,UUU,buf,LLs,sU,in,out,dag);
}
break;
case OptGeneric:
for(int s=0;s<LLs;s++){
int sF=LLs*sU+s;
// assert(sF<in._odata.size());
// assert(sU< U._odata.size());
// assert(sF>=0); assert(sU>=0);
DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink);
DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink);
out._odata[sF] =naive+naik;
if ( interior && exterior ) {
DhopSiteGeneric (st,lo,U,UUU,buf,LLs,sU,in,out,dag);
} else if ( interior ) {
DhopSiteGenericInt(st,lo,U,UUU,buf,LLs,sU,in,out,dag);
} else if ( exterior ) {
DhopSiteGenericExt(st,lo,U,UUU,buf,LLs,sU,in,out,dag);
}
break;
default:

View File

@ -38,8 +38,9 @@ namespace QCD {
class StaggeredKernelsStatic {
public:
enum { OptGeneric, OptHandUnroll, OptInlineAsm };
// S-direction is INNERMOST and takes no part in the parity.
static int Opt; // these are a temporary hack
enum { CommsAndCompute, CommsThenCompute };
static int Opt;
static int Comms;
};
template<class Impl> class StaggeredKernels : public FermionOperator<Impl> , public StaggeredKernelsStatic {
@ -53,24 +54,62 @@ public:
void DhopDir(StencilImpl &st, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf,
int sF, int sU, const FermionField &in, FermionField &out, int dir,int disp);
void DhopSiteDepth(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf,
int sF, int sU, const FermionField &in, SiteSpinor &out,int threeLink);
///////////////////////////////////////////////////////////////////////////////////////
// Generic Nc kernels
///////////////////////////////////////////////////////////////////////////////////////
void DhopSiteGeneric(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor * buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag);
void DhopSiteGenericInt(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor * buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag);
void DhopSiteGenericExt(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor * buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag);
///////////////////////////////////////////////////////////////////////////////////////
// Nc=3 specific kernels
///////////////////////////////////////////////////////////////////////////////////////
void DhopSiteHand(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,DoubledGaugeField &UUU,
SiteSpinor * buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag);
void DhopSiteHandInt(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,DoubledGaugeField &UUU,
SiteSpinor * buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag);
void DhopSiteHandExt(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,DoubledGaugeField &UUU,
SiteSpinor * buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag);
void DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf,
int sF, int sU, const FermionField &in, SiteSpinor&out,int threeLink);
///////////////////////////////////////////////////////////////////////////////////////
// Asm Nc=3 specific kernels
///////////////////////////////////////////////////////////////////////////////////////
void DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,DoubledGaugeField &UUU,
SiteSpinor * buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag);
///////////////////////////////////////////////////////////////////////////////////////////////////
// Generic interface; fan out to right routine
///////////////////////////////////////////////////////////////////////////////////////////////////
void DhopSite(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor * buf, int LLs, int sU,
const FermionField &in, FermionField &out, int interior=1,int exterior=1);
void DhopSiteHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,SiteSpinor * buf,
int LLs, int sU, const FermionField &in, FermionField &out, int dag);
void DhopSiteDag(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor * buf, int LLs, int sU,
const FermionField &in, FermionField &out, int interior=1,int exterior=1);
void DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU, SiteSpinor * buf,
int LLs, int sU, const FermionField &in, FermionField &out);
void DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf,
int sF, int sU, const FermionField &in, FermionField &out);
void DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor *buf,
int LLs, int sU, const FermionField &in, FermionField &out);
void DhopSite(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor * buf, int LLs, int sU,
const FermionField &in, FermionField &out, int dag, int interior,int exterior);
public:

View File

@ -560,16 +560,53 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
VSTORE(2,%0,pUChi_02) \
: : "r" (out) : "memory" );
#define nREDUCE(out) \
asm ( \
VADD(UChi_00,UChi_10,UChi_00) \
VADD(UChi_01,UChi_11,UChi_01) \
VADD(UChi_02,UChi_12,UChi_02) \
VADD(UChi_30,UChi_20,UChi_30) \
VADD(UChi_31,UChi_21,UChi_31) \
VADD(UChi_32,UChi_22,UChi_32) \
VADD(UChi_00,UChi_30,UChi_00) \
VADD(UChi_01,UChi_31,UChi_01) \
VADD(UChi_02,UChi_32,UChi_02) ); \
asm (VZERO(Chi_00) \
VSUB(UChi_00,Chi_00,UChi_00) \
VSUB(UChi_01,Chi_00,UChi_01) \
VSUB(UChi_02,Chi_00,UChi_02) ); \
asm ( \
VSTORE(0,%0,pUChi_00) \
VSTORE(1,%0,pUChi_01) \
VSTORE(2,%0,pUChi_02) \
: : "r" (out) : "memory" );
#define REDUCEa(out) \
asm ( \
VADD(UChi_00,UChi_10,UChi_00) \
VADD(UChi_01,UChi_11,UChi_01) \
VADD(UChi_02,UChi_12,UChi_02) ); \
asm ( \
VSTORE(0,%0,pUChi_00) \
VSTORE(1,%0,pUChi_01) \
VSTORE(2,%0,pUChi_02) \
: : "r" (out) : "memory" );
// FIXME is sign right in the VSUB ?
#define nREDUCEa(out) \
asm ( \
VSTORE(0,%0,pUChi_00) \
VSTORE(1,%0,pUChi_01) \
VSTORE(2,%0,pUChi_02) \
: : "r" (out) : "memory" );
VADD(UChi_00,UChi_10,UChi_00) \
VADD(UChi_01,UChi_11,UChi_01) \
VADD(UChi_02,UChi_12,UChi_02) ); \
asm (VZERO(Chi_00) \
VSUB(UChi_00,Chi_00,UChi_00) \
VSUB(UChi_01,Chi_00,UChi_01) \
VSUB(UChi_02,Chi_00,UChi_02) ); \
asm ( \
VSTORE(0,%0,pUChi_00) \
VSTORE(1,%0,pUChi_01) \
VSTORE(2,%0,pUChi_02) \
: : "r" (out) : "memory" );
#define PERMUTE_DIR(dir) \
permute##dir(Chi_0,Chi_0);\
@ -581,10 +618,9 @@ namespace QCD {
template <class Impl>
void StaggeredKernels<Impl>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,
DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs,
int sU, const FermionField &in, FermionField &out)
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag)
{
assert(0);
};
@ -645,10 +681,9 @@ void StaggeredKernels<Impl>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
// This is the single precision 5th direction vectorised kernel
#include <simd/Intel512single.h>
template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,
DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs,
int sU, const FermionField &in, FermionField &out)
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag)
{
#ifdef AVX512
uint64_t gauge0,gauge1,gauge2,gauge3;
@ -685,7 +720,11 @@ template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilImpl
MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
addr0 = (uint64_t) &out._odata[sF];
REDUCE(addr0);
if ( dag ) {
nREDUCE(addr0);
} else {
REDUCE(addr0);
}
}
#else
assert(0);
@ -695,10 +734,9 @@ template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilImpl
#include <simd/Intel512double.h>
template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,
DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs,
int sU, const FermionField &in, FermionField &out)
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag)
{
#ifdef AVX512
uint64_t gauge0,gauge1,gauge2,gauge3;
@ -734,7 +772,11 @@ template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilImpl
MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
addr0 = (uint64_t) &out._odata[sF];
REDUCE(addr0);
if ( dag ) {
nREDUCE(addr0);
} else {
REDUCE(addr0);
}
}
#else
assert(0);
@ -776,10 +818,9 @@ template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilImpl
#include <simd/Intel512single.h>
template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,
DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs,
int sU, const FermionField &in, FermionField &out)
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag)
{
#ifdef AVX512
uint64_t gauge0,gauge1,gauge2,gauge3;
@ -832,7 +873,11 @@ template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilImpl &st,
MULT_ADD_XYZT(gauge2,gauge3);
addr0 = (uint64_t) &out._odata[sF];
REDUCEa(addr0);
if ( dag ) {
nREDUCEa(addr0);
} else {
REDUCEa(addr0);
}
}
#else
assert(0);
@ -841,10 +886,9 @@ template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilImpl &st,
#include <simd/Intel512double.h>
template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,
DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs,
int sU, const FermionField &in, FermionField &out)
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag)
{
#ifdef AVX512
uint64_t gauge0,gauge1,gauge2,gauge3;
@ -897,7 +941,11 @@ template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(StencilImpl &st,
MULT_ADD_XYZT(gauge2,gauge3);
addr0 = (uint64_t) &out._odata[sF];
REDUCEa(addr0);
if ( dag ) {
nREDUCEa(addr0);
} else {
REDUCEa(addr0);
}
}
#else
assert(0);
@ -909,7 +957,7 @@ template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(StencilImpl &st,
DoubledGaugeField &U, \
DoubledGaugeField &UUU, \
SiteSpinor *buf, int LLs, \
int sU, const FermionField &in, FermionField &out);
int sU, const FermionField &in, FermionField &out,int dag);
KERNEL_INSTANTIATE(StaggeredKernels,DhopSiteAsm,StaggeredImplD);
KERNEL_INSTANTIATE(StaggeredKernels,DhopSiteAsm,StaggeredImplF);

View File

@ -28,7 +28,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
/* END LEGAL */
#include <Grid.h>
#define REGISTER
#define LOAD_CHI(b) \
const SiteSpinor & ref (b[offset]); \
@ -59,7 +58,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
UChi ## _1 += U_12*Chi_2;\
UChi ## _2 += U_22*Chi_2;
#define MULT_ADD(A,UChi) \
#define MULT_ADD(U,A,UChi) \
auto & ref(U._odata[sU](A)); \
Impl::loadLinkElement(U_00,ref()(0,0)); \
Impl::loadLinkElement(U_10,ref()(1,0)); \
@ -82,241 +81,319 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#define PERMUTE_DIR(dir) \
permute##dir(Chi_0,Chi_0);\
permute##dir(Chi_1,Chi_1);\
permute##dir(Chi_2,Chi_2);
permute##dir(Chi_0,Chi_0); \
permute##dir(Chi_1,Chi_1); \
permute##dir(Chi_2,Chi_2);
#define HAND_STENCIL_LEG_BASE(Dir,Perm,skew) \
SE=st.GetEntry(ptype,Dir+skew,sF); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if ( local ) { \
LOAD_CHI(in._odata); \
if ( perm) { \
PERMUTE_DIR(Perm); \
} \
} else { \
LOAD_CHI(buf); \
}
#define HAND_STENCIL_LEG_BEGIN(Dir,Perm,skew,even) \
HAND_STENCIL_LEG_BASE(Dir,Perm,skew) \
{ \
MULT(Dir,even); \
}
#define HAND_STENCIL_LEG(U,Dir,Perm,skew,even) \
HAND_STENCIL_LEG_BASE(Dir,Perm,skew) \
{ \
MULT_ADD(U,Dir,even); \
}
#define HAND_STENCIL_LEG_INT(U,Dir,Perm,skew,even) \
SE=st.GetEntry(ptype,Dir+skew,sF); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if ( local ) { \
LOAD_CHI(in._odata); \
if ( perm) { \
PERMUTE_DIR(Perm); \
} \
} else if ( st.same_node[Dir] ) { \
LOAD_CHI(buf); \
} \
if (SE->_is_local || st.same_node[Dir] ) { \
MULT_ADD(U,Dir,even); \
}
#define HAND_STENCIL_LEG_EXT(U,Dir,Perm,skew,even) \
SE=st.GetEntry(ptype,Dir+skew,sF); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if ((!SE->_is_local) && (!st.same_node[Dir]) ) { \
nmu++; \
{ LOAD_CHI(buf); } \
{ MULT_ADD(U,Dir,even); } \
}
namespace Grid {
namespace QCD {
template <class Impl>
void StaggeredKernels<Impl>::DhopSiteHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs,
int sU, const FermionField &in, FermionField &out, int dag)
{
SiteSpinor naik;
SiteSpinor naive;
int oneLink =0;
int threeLink=1;
int skew(0);
Real scale(1.0);
if(dag) scale = -1.0;
for(int s=0;s<LLs;s++){
int sF=s+LLs*sU;
DhopSiteDepthHand(st,lo,U,buf,sF,sU,in,naive,oneLink);
DhopSiteDepthHand(st,lo,UUU,buf,sF,sU,in,naik,threeLink);
out._odata[sF] =scale*(naive+naik);
}
}
template <class Impl>
void StaggeredKernels<Impl>::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
SiteSpinor *buf, int sF,
int sU, const FermionField &in, SiteSpinor &out,int threeLink)
void StaggeredKernels<Impl>::DhopSiteHand(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag)
{
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
REGISTER Simd even_0; // 12 regs on knc
REGISTER Simd even_1;
REGISTER Simd even_2;
REGISTER Simd odd_0; // 12 regs on knc
REGISTER Simd odd_1;
REGISTER Simd odd_2;
Simd even_0; // 12 regs on knc
Simd even_1;
Simd even_2;
Simd odd_0; // 12 regs on knc
Simd odd_1;
Simd odd_2;
REGISTER Simd Chi_0; // two spinor; 6 regs
REGISTER Simd Chi_1;
REGISTER Simd Chi_2;
REGISTER Simd U_00; // two rows of U matrix
REGISTER Simd U_10;
REGISTER Simd U_20;
REGISTER Simd U_01;
REGISTER Simd U_11;
REGISTER Simd U_21; // 2 reg left.
REGISTER Simd U_02;
REGISTER Simd U_12;
REGISTER Simd U_22;
int skew = 0;
if (threeLink) skew=8;
Simd Chi_0; // two spinor; 6 regs
Simd Chi_1;
Simd Chi_2;
Simd U_00; // two rows of U matrix
Simd U_10;
Simd U_20;
Simd U_01;
Simd U_11;
Simd U_21; // 2 reg left.
Simd U_02;
Simd U_12;
Simd U_22;
SiteSpinor result;
int offset,local,perm, ptype;
StencilEntry *SE;
int skew;
// Xp
SE=st.GetEntry(ptype,Xp+skew,sF);
offset = SE->_offset;
local = SE->_is_local;
perm = SE->_permute;
if ( local ) {
LOAD_CHI(in._odata);
if ( perm) {
PERMUTE_DIR(3); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
}
} else {
LOAD_CHI(buf);
}
{
MULT(Xp,even);
}
// Yp
SE=st.GetEntry(ptype,Yp+skew,sF);
offset = SE->_offset;
local = SE->_is_local;
perm = SE->_permute;
if ( local ) {
LOAD_CHI(in._odata);
if ( perm) {
PERMUTE_DIR(2); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
}
} else {
LOAD_CHI(buf);
}
{
MULT(Yp,odd);
}
for(int s=0;s<LLs;s++){
int sF=s+LLs*sU;
// Zp
SE=st.GetEntry(ptype,Zp+skew,sF);
offset = SE->_offset;
local = SE->_is_local;
perm = SE->_permute;
if ( local ) {
LOAD_CHI(in._odata);
if ( perm) {
PERMUTE_DIR(1); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
skew = 0;
HAND_STENCIL_LEG_BEGIN(Xp,3,skew,even);
HAND_STENCIL_LEG_BEGIN(Yp,2,skew,odd);
HAND_STENCIL_LEG (U,Zp,1,skew,even);
HAND_STENCIL_LEG (U,Tp,0,skew,odd);
HAND_STENCIL_LEG (U,Xm,3,skew,even);
HAND_STENCIL_LEG (U,Ym,2,skew,odd);
HAND_STENCIL_LEG (U,Zm,1,skew,even);
HAND_STENCIL_LEG (U,Tm,0,skew,odd);
skew = 8;
HAND_STENCIL_LEG(UUU,Xp,3,skew,even);
HAND_STENCIL_LEG(UUU,Yp,2,skew,odd);
HAND_STENCIL_LEG(UUU,Zp,1,skew,even);
HAND_STENCIL_LEG(UUU,Tp,0,skew,odd);
HAND_STENCIL_LEG(UUU,Xm,3,skew,even);
HAND_STENCIL_LEG(UUU,Ym,2,skew,odd);
HAND_STENCIL_LEG(UUU,Zm,1,skew,even);
HAND_STENCIL_LEG(UUU,Tm,0,skew,odd);
if ( dag ) {
result()()(0) = - even_0 - odd_0;
result()()(1) = - even_1 - odd_1;
result()()(2) = - even_2 - odd_2;
} else {
result()()(0) = even_0 + odd_0;
result()()(1) = even_1 + odd_1;
result()()(2) = even_2 + odd_2;
}
} else {
LOAD_CHI(buf);
vstream(out._odata[sF],result);
}
{
MULT_ADD(Zp,even);
}
// Tp
SE=st.GetEntry(ptype,Tp+skew,sF);
offset = SE->_offset;
local = SE->_is_local;
perm = SE->_permute;
if ( local ) {
LOAD_CHI(in._odata);
if ( perm) {
PERMUTE_DIR(0); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
}
} else {
LOAD_CHI(buf);
}
{
MULT_ADD(Tp,odd);
}
// Xm
SE=st.GetEntry(ptype,Xm+skew,sF);
offset = SE->_offset;
local = SE->_is_local;
perm = SE->_permute;
if ( local ) {
LOAD_CHI(in._odata);
if ( perm) {
PERMUTE_DIR(3); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
}
} else {
LOAD_CHI(buf);
}
{
MULT_ADD(Xm,even);
}
// Ym
SE=st.GetEntry(ptype,Ym+skew,sF);
offset = SE->_offset;
local = SE->_is_local;
perm = SE->_permute;
if ( local ) {
LOAD_CHI(in._odata);
if ( perm) {
PERMUTE_DIR(2); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
}
} else {
LOAD_CHI(buf);
}
{
MULT_ADD(Ym,odd);
}
// Zm
SE=st.GetEntry(ptype,Zm+skew,sF);
offset = SE->_offset;
local = SE->_is_local;
perm = SE->_permute;
if ( local ) {
LOAD_CHI(in._odata);
if ( perm) {
PERMUTE_DIR(1); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
}
} else {
LOAD_CHI(buf);
}
{
MULT_ADD(Zm,even);
}
// Tm
SE=st.GetEntry(ptype,Tm+skew,sF);
offset = SE->_offset;
local = SE->_is_local;
perm = SE->_permute;
if ( local ) {
LOAD_CHI(in._odata);
if ( perm) {
PERMUTE_DIR(0); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
}
} else {
LOAD_CHI(buf);
}
{
MULT_ADD(Tm,odd);
}
vstream(out()()(0),even_0+odd_0);
vstream(out()()(1),even_1+odd_1);
vstream(out()()(2),even_2+odd_2);
}
template <class Impl>
void StaggeredKernels<Impl>::DhopSiteHandInt(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag)
{
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
Simd even_0; // 12 regs on knc
Simd even_1;
Simd even_2;
Simd odd_0; // 12 regs on knc
Simd odd_1;
Simd odd_2;
Simd Chi_0; // two spinor; 6 regs
Simd Chi_1;
Simd Chi_2;
Simd U_00; // two rows of U matrix
Simd U_10;
Simd U_20;
Simd U_01;
Simd U_11;
Simd U_21; // 2 reg left.
Simd U_02;
Simd U_12;
Simd U_22;
SiteSpinor result;
int offset,local,perm, ptype;
StencilEntry *SE;
int skew;
for(int s=0;s<LLs;s++){
int sF=s+LLs*sU;
even_0 = zero; even_1 = zero; even_2 = zero;
odd_0 = zero; odd_1 = zero; odd_2 = zero;
skew = 0;
HAND_STENCIL_LEG_INT(U,Xp,3,skew,even);
HAND_STENCIL_LEG_INT(U,Yp,2,skew,odd);
HAND_STENCIL_LEG_INT(U,Zp,1,skew,even);
HAND_STENCIL_LEG_INT(U,Tp,0,skew,odd);
HAND_STENCIL_LEG_INT(U,Xm,3,skew,even);
HAND_STENCIL_LEG_INT(U,Ym,2,skew,odd);
HAND_STENCIL_LEG_INT(U,Zm,1,skew,even);
HAND_STENCIL_LEG_INT(U,Tm,0,skew,odd);
skew = 8;
HAND_STENCIL_LEG_INT(UUU,Xp,3,skew,even);
HAND_STENCIL_LEG_INT(UUU,Yp,2,skew,odd);
HAND_STENCIL_LEG_INT(UUU,Zp,1,skew,even);
HAND_STENCIL_LEG_INT(UUU,Tp,0,skew,odd);
HAND_STENCIL_LEG_INT(UUU,Xm,3,skew,even);
HAND_STENCIL_LEG_INT(UUU,Ym,2,skew,odd);
HAND_STENCIL_LEG_INT(UUU,Zm,1,skew,even);
HAND_STENCIL_LEG_INT(UUU,Tm,0,skew,odd);
// Assume every site must be connected to at least one interior point. No 1^4 subvols.
if ( dag ) {
result()()(0) = - even_0 - odd_0;
result()()(1) = - even_1 - odd_1;
result()()(2) = - even_2 - odd_2;
} else {
result()()(0) = even_0 + odd_0;
result()()(1) = even_1 + odd_1;
result()()(2) = even_2 + odd_2;
}
vstream(out._odata[sF],result);
}
}
template <class Impl>
void StaggeredKernels<Impl>::DhopSiteHandExt(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
const FermionField &in, FermionField &out,int dag)
{
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
Simd even_0; // 12 regs on knc
Simd even_1;
Simd even_2;
Simd odd_0; // 12 regs on knc
Simd odd_1;
Simd odd_2;
Simd Chi_0; // two spinor; 6 regs
Simd Chi_1;
Simd Chi_2;
Simd U_00; // two rows of U matrix
Simd U_10;
Simd U_20;
Simd U_01;
Simd U_11;
Simd U_21; // 2 reg left.
Simd U_02;
Simd U_12;
Simd U_22;
SiteSpinor result;
int offset,local,perm, ptype;
StencilEntry *SE;
int skew;
for(int s=0;s<LLs;s++){
int sF=s+LLs*sU;
even_0 = zero; even_1 = zero; even_2 = zero;
odd_0 = zero; odd_1 = zero; odd_2 = zero;
int nmu=0;
skew = 0;
HAND_STENCIL_LEG_EXT(U,Xp,3,skew,even);
HAND_STENCIL_LEG_EXT(U,Yp,2,skew,odd);
HAND_STENCIL_LEG_EXT(U,Zp,1,skew,even);
HAND_STENCIL_LEG_EXT(U,Tp,0,skew,odd);
HAND_STENCIL_LEG_EXT(U,Xm,3,skew,even);
HAND_STENCIL_LEG_EXT(U,Ym,2,skew,odd);
HAND_STENCIL_LEG_EXT(U,Zm,1,skew,even);
HAND_STENCIL_LEG_EXT(U,Tm,0,skew,odd);
skew = 8;
HAND_STENCIL_LEG_EXT(UUU,Xp,3,skew,even);
HAND_STENCIL_LEG_EXT(UUU,Yp,2,skew,odd);
HAND_STENCIL_LEG_EXT(UUU,Zp,1,skew,even);
HAND_STENCIL_LEG_EXT(UUU,Tp,0,skew,odd);
HAND_STENCIL_LEG_EXT(UUU,Xm,3,skew,even);
HAND_STENCIL_LEG_EXT(UUU,Ym,2,skew,odd);
HAND_STENCIL_LEG_EXT(UUU,Zm,1,skew,even);
HAND_STENCIL_LEG_EXT(UUU,Tm,0,skew,odd);
// Add sum of all exterior connected stencil legs
if ( nmu ) {
if ( dag ) {
result()()(0) = - even_0 - odd_0;
result()()(1) = - even_1 - odd_1;
result()()(2) = - even_2 - odd_2;
} else {
result()()(0) = even_0 + odd_0;
result()()(1) = even_1 + odd_1;
result()()(2) = even_2 + odd_2;
}
out._odata[sF] = out._odata[sF] + result;
}
}
}
#define DHOP_SITE_HAND_INSTANTIATE(IMPL) \
template void StaggeredKernels<IMPL>::DhopSiteHand(StencilImpl &st, LebesgueOrder &lo, \
DoubledGaugeField &U,DoubledGaugeField &UUU, \
SiteSpinor *buf, int LLs, \
int sU, const FermionField &in, FermionField &out, int dag);
SiteSpinor *buf, int LLs, int sU, \
const FermionField &in, FermionField &out, int dag); \
\
template void StaggeredKernels<IMPL>::DhopSiteHandInt(StencilImpl &st, LebesgueOrder &lo, \
DoubledGaugeField &U,DoubledGaugeField &UUU, \
SiteSpinor *buf, int LLs, int sU, \
const FermionField &in, FermionField &out, int dag); \
\
template void StaggeredKernels<IMPL>::DhopSiteHandExt(StencilImpl &st, LebesgueOrder &lo, \
DoubledGaugeField &U,DoubledGaugeField &UUU, \
SiteSpinor *buf, int LLs, int sU, \
const FermionField &in, FermionField &out, int dag); \
#define DHOP_SITE_DEPTH_HAND_INSTANTIATE(IMPL) \
template void StaggeredKernels<IMPL>::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, \
SiteSpinor *buf, int sF, \
int sU, const FermionField &in, SiteSpinor &out,int threeLink) ;
DHOP_SITE_HAND_INSTANTIATE(StaggeredImplD);
DHOP_SITE_HAND_INSTANTIATE(StaggeredImplF);
DHOP_SITE_HAND_INSTANTIATE(StaggeredVec5dImplD);
DHOP_SITE_HAND_INSTANTIATE(StaggeredVec5dImplF);
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredImplD);
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredImplF);
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredVec5dImplD);
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredVec5dImplF);
}}
}
}

View File

@ -69,39 +69,47 @@ class WilsonCompressorTemplate< _HCspinor, _Hspinor, _Spinor, projector,
/*****************************************************/
/* Compress includes precision change if mpi data is not same */
/*****************************************************/
inline void Compress(SiteHalfSpinor *buf,Integer o,const SiteSpinor &in) {
projector::Proj(buf[o],in,mu,dag);
inline void Compress(SiteHalfSpinor * __restrict__ buf,Integer o,const SiteSpinor &in) {
SiteHalfSpinor tmp;
projector::Proj(tmp,in,mu,dag);
vstream(buf[o],tmp);
}
/*****************************************************/
/* Exchange includes precision change if mpi data is not same */
/*****************************************************/
inline void Exchange(SiteHalfSpinor *mp,
SiteHalfSpinor *vp0,
SiteHalfSpinor *vp1,
inline void Exchange(SiteHalfSpinor * __restrict__ mp,
const SiteHalfSpinor * __restrict__ vp0,
const SiteHalfSpinor * __restrict__ vp1,
Integer type,Integer o){
exchange(mp[2*o],mp[2*o+1],vp0[o],vp1[o],type);
SiteHalfSpinor tmp1;
SiteHalfSpinor tmp2;
exchange(tmp1,tmp2,vp0[o],vp1[o],type);
vstream(mp[2*o ],tmp1);
vstream(mp[2*o+1],tmp2);
}
/*****************************************************/
/* Have a decompression step if mpi data is not same */
/*****************************************************/
inline void Decompress(SiteHalfSpinor *out,
SiteHalfSpinor *in, Integer o) {
inline void Decompress(SiteHalfSpinor * __restrict__ out,
SiteHalfSpinor * __restrict__ in, Integer o) {
assert(0);
}
/*****************************************************/
/* Compress Exchange */
/*****************************************************/
inline void CompressExchange(SiteHalfSpinor *out0,
SiteHalfSpinor *out1,
const SiteSpinor *in,
inline void CompressExchange(SiteHalfSpinor * __restrict__ out0,
SiteHalfSpinor * __restrict__ out1,
const SiteSpinor * __restrict__ in,
Integer j,Integer k, Integer m,Integer type){
SiteHalfSpinor temp1, temp2,temp3,temp4;
projector::Proj(temp1,in[k],mu,dag);
projector::Proj(temp2,in[m],mu,dag);
exchange(out0[j],out1[j],temp1,temp2,type);
exchange(temp3,temp4,temp1,temp2,type);
vstream(out0[j],temp3);
vstream(out1[j],temp4);
}
/*****************************************************/
@ -266,41 +274,16 @@ public:
if ( timer4 ) std::cout << GridLogMessage << " timer4 " <<timer4 <<std::endl;
}
std::vector<int> same_node;
std::vector<int> surface_list;
WilsonStencil(GridBase *grid,
int npoints,
int checkerboard,
const std::vector<int> &directions,
const std::vector<int> &distances)
: CartesianStencil<vobj,cobj> (grid,npoints,checkerboard,directions,distances) ,
same_node(npoints)
: CartesianStencil<vobj,cobj> (grid,npoints,checkerboard,directions,distances)
{
ZeroCountersi();
surface_list.resize(0);
};
void BuildSurfaceList(int Ls,int vol4){
// find same node for SHM
// Here we know the distance is 1 for WilsonStencil
for(int point=0;point<this->_npoints;point++){
same_node[point] = this->SameNode(point);
}
for(int site = 0 ;site< vol4;site++){
int local = 1;
for(int point=0;point<this->_npoints;point++){
if( (!this->GetNodeLocal(site*Ls,point)) && (!same_node[point]) ){
local = 0;
}
}
if(local == 0) {
surface_list.push_back(site);
}
}
}
template < class compressor>
void HaloExchangeOpt(const Lattice<vobj> &source,compressor &compress)
@ -361,23 +344,23 @@ public:
int dag = compress.dag;
int face_idx=0;
if ( dag ) {
assert(same_node[Xp]==this->HaloGatherDir(source,XpCompress,Xp,face_idx));
assert(same_node[Yp]==this->HaloGatherDir(source,YpCompress,Yp,face_idx));
assert(same_node[Zp]==this->HaloGatherDir(source,ZpCompress,Zp,face_idx));
assert(same_node[Tp]==this->HaloGatherDir(source,TpCompress,Tp,face_idx));
assert(same_node[Xm]==this->HaloGatherDir(source,XmCompress,Xm,face_idx));
assert(same_node[Ym]==this->HaloGatherDir(source,YmCompress,Ym,face_idx));
assert(same_node[Zm]==this->HaloGatherDir(source,ZmCompress,Zm,face_idx));
assert(same_node[Tm]==this->HaloGatherDir(source,TmCompress,Tm,face_idx));
assert(this->same_node[Xp]==this->HaloGatherDir(source,XpCompress,Xp,face_idx));
assert(this->same_node[Yp]==this->HaloGatherDir(source,YpCompress,Yp,face_idx));
assert(this->same_node[Zp]==this->HaloGatherDir(source,ZpCompress,Zp,face_idx));
assert(this->same_node[Tp]==this->HaloGatherDir(source,TpCompress,Tp,face_idx));
assert(this->same_node[Xm]==this->HaloGatherDir(source,XmCompress,Xm,face_idx));
assert(this->same_node[Ym]==this->HaloGatherDir(source,YmCompress,Ym,face_idx));
assert(this->same_node[Zm]==this->HaloGatherDir(source,ZmCompress,Zm,face_idx));
assert(this->same_node[Tm]==this->HaloGatherDir(source,TmCompress,Tm,face_idx));
} else {
assert(same_node[Xp]==this->HaloGatherDir(source,XmCompress,Xp,face_idx));
assert(same_node[Yp]==this->HaloGatherDir(source,YmCompress,Yp,face_idx));
assert(same_node[Zp]==this->HaloGatherDir(source,ZmCompress,Zp,face_idx));
assert(same_node[Tp]==this->HaloGatherDir(source,TmCompress,Tp,face_idx));
assert(same_node[Xm]==this->HaloGatherDir(source,XpCompress,Xm,face_idx));
assert(same_node[Ym]==this->HaloGatherDir(source,YpCompress,Ym,face_idx));
assert(same_node[Zm]==this->HaloGatherDir(source,ZpCompress,Zm,face_idx));
assert(same_node[Tm]==this->HaloGatherDir(source,TpCompress,Tm,face_idx));
assert(this->same_node[Xp]==this->HaloGatherDir(source,XmCompress,Xp,face_idx));
assert(this->same_node[Yp]==this->HaloGatherDir(source,YmCompress,Yp,face_idx));
assert(this->same_node[Zp]==this->HaloGatherDir(source,ZmCompress,Zp,face_idx));
assert(this->same_node[Tp]==this->HaloGatherDir(source,TmCompress,Tp,face_idx));
assert(this->same_node[Xm]==this->HaloGatherDir(source,XpCompress,Xm,face_idx));
assert(this->same_node[Ym]==this->HaloGatherDir(source,YpCompress,Ym,face_idx));
assert(this->same_node[Zm]==this->HaloGatherDir(source,ZpCompress,Zm,face_idx));
assert(this->same_node[Tm]==this->HaloGatherDir(source,TpCompress,Tm,face_idx));
}
this->face_table_computed=1;
assert(this->u_comm_offset==this->_unified_buffer_size);

View File

@ -348,15 +348,98 @@ void WilsonFermion<Impl>::DhopDirDisp(const FermionField &in, FermionField &out,
parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) {
Kernels::DhopDir(Stencil, Umu, Stencil.CommBuf(), sss, sss, in, out, dirdisp, gamma);
}
};
}
/*Change starts*/
template <class Impl>
void WilsonFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,
const FermionField &in,
FermionField &out, int dag) {
assert((dag == DaggerNo) || (dag == DaggerYes));
#ifdef GRID_OMP
if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute )
DhopInternalOverlappedComms(st,lo,U,in,out,dag);
else
#endif
DhopInternalSerial(st,lo,U,in,out,dag);
}
template <class Impl>
void WilsonFermion<Impl>::DhopInternalOverlappedComms(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,
const FermionField &in,
FermionField &out, int dag) {
assert((dag == DaggerNo) || (dag == DaggerYes));
#ifdef GRID_OMP
Compressor compressor;
int len = U._grid->oSites();
const int LLs = 1;
st.Prepare();
st.HaloGather(in,compressor);
st.CommsMergeSHM(compressor);
#pragma omp parallel
{
int tid = omp_get_thread_num();
int nthreads = omp_get_num_threads();
int ncomms = CartesianCommunicator::nCommThreads;
if (ncomms == -1) ncomms = 1;
assert(nthreads > ncomms);
if (tid >= ncomms) {
nthreads -= ncomms;
int ttid = tid - ncomms;
int n = len;
int chunk = n / nthreads;
int rem = n % nthreads;
int myblock, myn;
if (ttid < rem) {
myblock = ttid * chunk + ttid;
myn = chunk+1;
} else {
myblock = ttid*chunk + rem;
myn = chunk;
}
// do the compute
if (dag == DaggerYes) {
for (int sss = myblock; sss < myblock+myn; ++sss) {
Kernels::DhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out);
}
} else {
for (int sss = myblock; sss < myblock+myn; ++sss) {
Kernels::DhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out);
}
} //else
} else {
st.CommunicateThreaded();
}
Compressor compressor(dag);
if (dag == DaggerYes) {
parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) {
Kernels::DhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out);
}
} else {
parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) {
Kernels::DhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out);
}
}
} //pragma
#else
assert(0);
#endif
};
template <class Impl>
void WilsonFermion<Impl>::DhopInternalSerial(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U,
const FermionField &in,
FermionField &out, int dag) {
assert((dag == DaggerNo) || (dag == DaggerYes));
Compressor compressor(dag);
st.HaloExchange(in, compressor);
@ -370,6 +453,7 @@ void WilsonFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
}
}
};
/*Change ends */
/*******************************************************************************
* Conserved current utilities for Wilson fermions, for contracting propagators

View File

@ -130,6 +130,12 @@ class WilsonFermion : public WilsonKernels<Impl>, public WilsonFermionStatic {
void DhopInternal(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
const FermionField &in, FermionField &out, int dag);
void DhopInternalSerial(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
const FermionField &in, FermionField &out, int dag);
void DhopInternalOverlappedComms(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
const FermionField &in, FermionField &out, int dag);
// Constructor
WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
GridRedBlackCartesian &Hgrid, RealD _mass,
@ -145,6 +151,8 @@ class WilsonFermion : public WilsonKernels<Impl>, public WilsonFermionStatic {
// protected:
public:
virtual RealD Mass(void) { return mass; }
virtual int isTrivialEE(void) { return 1; };
RealD mass;
RealD diag_mass;

View File

@ -445,8 +445,7 @@ void WilsonFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st, Lebesg
}
}
ptime = usecond() - start;
}
{
} else {
double start = usecond();
st.CommunicateThreaded();
ctime = usecond() - start;

View File

@ -53,7 +53,7 @@ template<class Impl> class WilsonKernels : public FermionOperator<Impl> , public
typedef FermionOperator<Impl> Base;
public:
template <bool EnableBool = true>
typename std::enable_if<Impl::isFundamental==true && Nc == 3 &&EnableBool, void>::type
DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
@ -70,27 +70,27 @@ public:
break;
#endif
case OptHandUnroll:
for (int site = 0; site < Ns; site++) {
for (int s = 0; s < Ls; s++) {
if(interior&&exterior) WilsonKernels<Impl>::HandDhopSite(st,lo,U,buf,sF,sU,in,out);
else if (interior) WilsonKernels<Impl>::HandDhopSiteInt(st,lo,U,buf,sF,sU,in,out);
else if (exterior) WilsonKernels<Impl>::HandDhopSiteExt(st,lo,U,buf,sF,sU,in,out);
sF++;
}
sU++;
}
for (int site = 0; site < Ns; site++) {
for (int s = 0; s < Ls; s++) {
if(interior&&exterior) WilsonKernels<Impl>::HandDhopSite(st,lo,U,buf,sF,sU,in,out);
else if (interior) WilsonKernels<Impl>::HandDhopSiteInt(st,lo,U,buf,sF,sU,in,out);
else if (exterior) WilsonKernels<Impl>::HandDhopSiteExt(st,lo,U,buf,sF,sU,in,out);
sF++;
}
sU++;
}
break;
case OptGeneric:
for (int site = 0; site < Ns; site++) {
for (int s = 0; s < Ls; s++) {
if(interior&&exterior) WilsonKernels<Impl>::GenericDhopSite(st,lo,U,buf,sF,sU,in,out);
else if (interior) WilsonKernels<Impl>::GenericDhopSiteInt(st,lo,U,buf,sF,sU,in,out);
else if (exterior) WilsonKernels<Impl>::GenericDhopSiteExt(st,lo,U,buf,sF,sU,in,out);
else assert(0);
sF++;
}
sU++;
}
for (int site = 0; site < Ns; site++) {
for (int s = 0; s < Ls; s++) {
if(interior&&exterior) WilsonKernels<Impl>::GenericDhopSite(st,lo,U,buf,sF,sU,in,out);
else if (interior) WilsonKernels<Impl>::GenericDhopSiteInt(st,lo,U,buf,sF,sU,in,out);
else if (exterior) WilsonKernels<Impl>::GenericDhopSiteExt(st,lo,U,buf,sF,sU,in,out);
else assert(0);
sF++;
}
sU++;
}
break;
default:
assert(0);
@ -232,6 +232,7 @@ private:
void GenericDhopSiteDagExt(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
int sF, int sU, const FermionField &in, FermionField &out);
void AsmDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
int sF, int sU, int Ls, int Ns, const FermionField &in,FermionField &out);

View File

@ -30,181 +30,60 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#define REGISTER
#define LOAD_CHIMU_BODY(F) \
Chimu_00=ref(F)(0)(0); \
Chimu_01=ref(F)(0)(1); \
Chimu_02=ref(F)(0)(2); \
Chimu_10=ref(F)(1)(0); \
Chimu_11=ref(F)(1)(1); \
Chimu_12=ref(F)(1)(2); \
Chimu_20=ref(F)(2)(0); \
Chimu_21=ref(F)(2)(1); \
Chimu_22=ref(F)(2)(2); \
Chimu_30=ref(F)(3)(0); \
Chimu_31=ref(F)(3)(1); \
Chimu_32=ref(F)(3)(2)
#define LOAD_CHIMU \
{const SiteSpinor & ref (in._odata[offset]); \
Chimu_00=ref()(0)(0);\
Chimu_01=ref()(0)(1);\
Chimu_02=ref()(0)(2);\
Chimu_10=ref()(1)(0);\
Chimu_11=ref()(1)(1);\
Chimu_12=ref()(1)(2);\
Chimu_20=ref()(2)(0);\
Chimu_21=ref()(2)(1);\
Chimu_22=ref()(2)(2);\
Chimu_30=ref()(3)(0);\
Chimu_31=ref()(3)(1);\
Chimu_32=ref()(3)(2);}
#define LOAD_CHIMU(DIR,F,PERM) \
{ const SiteSpinor & ref (in._odata[offset]); LOAD_CHIMU_BODY(F); }
#define LOAD_CHI_BODY(F) \
Chi_00 = ref(F)(0)(0);\
Chi_01 = ref(F)(0)(1);\
Chi_02 = ref(F)(0)(2);\
Chi_10 = ref(F)(1)(0);\
Chi_11 = ref(F)(1)(1);\
Chi_12 = ref(F)(1)(2)
#define LOAD_CHI(DIR,F,PERM) \
{const SiteHalfSpinor &ref(buf[offset]); LOAD_CHI_BODY(F); }
//G-parity implementations using in-place intrinsic ops
//1l 1h -> 1h 1l
//0l 0h , 1h 1l -> 0l 1h 0h,1l
//0h,1l -> 1l,0h
//if( (distance == 1 && !perm_will_occur) || (distance == -1 && perm_will_occur) )
//Pulled fermion through forwards face, GPBC on upper component
//Need 0= 0l 1h 1= 1l 0h
//else if( (distance == -1 && !perm) || (distance == 1 && perm) )
//Pulled fermion through backwards face, GPBC on lower component
//Need 0= 1l 0h 1= 0l 1h
//1l 1h -> 1h 1l
//0l 0h , 1h 1l -> 0l 1h 0h,1l
#define DO_TWIST_0L_1H(INTO,S,C,F, PERM, tmp1, tmp2, tmp3) \
permute##PERM(tmp1, ref(1)(S)(C)); \
exchange##PERM(tmp2,tmp3, ref(0)(S)(C), tmp1); \
INTO = tmp2;
//0l 0h -> 0h 0l
//1l 1h, 0h 0l -> 1l 0h, 1h 0l
#define DO_TWIST_1L_0H(INTO,S,C,F, PERM, tmp1, tmp2, tmp3) \
permute##PERM(tmp1, ref(0)(S)(C)); \
exchange##PERM(tmp2,tmp3, ref(1)(S)(C), tmp1); \
INTO = tmp2;
#define LOAD_CHI_SETUP(DIR,F) \
g = F; \
direction = st._directions[DIR]; \
distance = st._distances[DIR]; \
sl = st._grid->_simd_layout[direction]; \
inplace_twist = 0; \
if(SE->_around_the_world && this->Params.twists[DIR % 4]){ \
if(sl == 1){ \
g = (F+1) % 2; \
}else{ \
inplace_twist = 1; \
} \
}
#define LOAD_CHIMU_GPARITY_INPLACE_TWIST(DIR,F,PERM) \
{ const SiteSpinor &ref(in._odata[offset]); \
LOAD_CHI_SETUP(DIR,F); \
if(!inplace_twist){ \
LOAD_CHIMU_BODY(g); \
}else{ \
if( ( F==0 && ((distance == 1 && !perm) || (distance == -1 && perm)) ) || \
( F==1 && ((distance == -1 && !perm) || (distance == 1 && perm)) ) ){ \
DO_TWIST_0L_1H(Chimu_00,0,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_01,0,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chimu_02,0,2,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_10,1,0,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chimu_11,1,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_12,1,2,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chimu_20,2,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_21,2,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chimu_22,2,2,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_30,3,0,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chimu_31,3,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_32,3,2,F,PERM, U_11,U_20,U_21); \
}else{ \
DO_TWIST_1L_0H(Chimu_00,0,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_01,0,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chimu_02,0,2,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_10,1,0,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chimu_11,1,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_12,1,2,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chimu_20,2,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_21,2,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chimu_22,2,2,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_30,3,0,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chimu_31,3,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_32,3,2,F,PERM, U_11,U_20,U_21); \
} \
} \
}
#define LOAD_CHI_GPARITY_INPLACE_TWIST(DIR,F,PERM) \
{ const SiteHalfSpinor &ref(buf[offset]); \
LOAD_CHI_SETUP(DIR,F); \
if(!inplace_twist){ \
LOAD_CHI_BODY(g); \
}else{ \
if( ( F==0 && ((distance == 1 && !perm) || (distance == -1 && perm)) ) || \
( F==1 && ((distance == -1 && !perm) || (distance == 1 && perm)) ) ){ \
DO_TWIST_0L_1H(Chi_00,0,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chi_01,0,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chi_02,0,2,F,PERM, UChi_00,UChi_01,UChi_02); \
DO_TWIST_0L_1H(Chi_10,1,0,F,PERM, UChi_10,UChi_11,UChi_12); \
DO_TWIST_0L_1H(Chi_11,1,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chi_12,1,2,F,PERM, U_11,U_20,U_21); \
}else{ \
DO_TWIST_1L_0H(Chi_00,0,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chi_01,0,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chi_02,0,2,F,PERM, UChi_00,UChi_01,UChi_02); \
DO_TWIST_1L_0H(Chi_10,1,0,F,PERM, UChi_10,UChi_11,UChi_12); \
DO_TWIST_1L_0H(Chi_11,1,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chi_12,1,2,F,PERM, U_11,U_20,U_21); \
} \
} \
}
#define LOAD_CHI_GPARITY(DIR,F,PERM) LOAD_CHI_GPARITY_INPLACE_TWIST(DIR,F,PERM)
#define LOAD_CHIMU_GPARITY(DIR,F,PERM) LOAD_CHIMU_GPARITY_INPLACE_TWIST(DIR,F,PERM)
#define LOAD_CHI\
{const SiteHalfSpinor &ref(buf[offset]); \
Chi_00 = ref()(0)(0);\
Chi_01 = ref()(0)(1);\
Chi_02 = ref()(0)(2);\
Chi_10 = ref()(1)(0);\
Chi_11 = ref()(1)(1);\
Chi_12 = ref()(1)(2);}
// To splat or not to splat depends on the implementation
#define MULT_2SPIN_BODY \
Impl::loadLinkElement(U_00,ref()(0,0)); \
Impl::loadLinkElement(U_10,ref()(1,0)); \
Impl::loadLinkElement(U_20,ref()(2,0)); \
Impl::loadLinkElement(U_01,ref()(0,1)); \
Impl::loadLinkElement(U_11,ref()(1,1)); \
Impl::loadLinkElement(U_21,ref()(2,1)); \
UChi_00 = U_00*Chi_00; \
UChi_10 = U_00*Chi_10; \
UChi_01 = U_10*Chi_00; \
UChi_11 = U_10*Chi_10; \
UChi_02 = U_20*Chi_00; \
UChi_12 = U_20*Chi_10; \
UChi_00+= U_01*Chi_01; \
UChi_10+= U_01*Chi_11; \
UChi_01+= U_11*Chi_01; \
UChi_11+= U_11*Chi_11; \
UChi_02+= U_21*Chi_01; \
UChi_12+= U_21*Chi_11; \
Impl::loadLinkElement(U_00,ref()(0,2)); \
Impl::loadLinkElement(U_10,ref()(1,2)); \
Impl::loadLinkElement(U_20,ref()(2,2)); \
UChi_00+= U_00*Chi_02; \
UChi_10+= U_00*Chi_12; \
UChi_01+= U_10*Chi_02; \
UChi_11+= U_10*Chi_12; \
UChi_02+= U_20*Chi_02; \
UChi_12+= U_20*Chi_12
#define MULT_2SPIN(A,F) \
{auto & ref(U._odata[sU](A)); MULT_2SPIN_BODY; }
#define MULT_2SPIN_GPARITY(A,F) \
{auto & ref(U._odata[sU](F)(A)); MULT_2SPIN_BODY; }
#define MULT_2SPIN(A)\
{auto & ref(U._odata[sU](A)); \
Impl::loadLinkElement(U_00,ref()(0,0)); \
Impl::loadLinkElement(U_10,ref()(1,0)); \
Impl::loadLinkElement(U_20,ref()(2,0)); \
Impl::loadLinkElement(U_01,ref()(0,1)); \
Impl::loadLinkElement(U_11,ref()(1,1)); \
Impl::loadLinkElement(U_21,ref()(2,1)); \
UChi_00 = U_00*Chi_00;\
UChi_10 = U_00*Chi_10;\
UChi_01 = U_10*Chi_00;\
UChi_11 = U_10*Chi_10;\
UChi_02 = U_20*Chi_00;\
UChi_12 = U_20*Chi_10;\
UChi_00+= U_01*Chi_01;\
UChi_10+= U_01*Chi_11;\
UChi_01+= U_11*Chi_01;\
UChi_11+= U_11*Chi_11;\
UChi_02+= U_21*Chi_01;\
UChi_12+= U_21*Chi_11;\
Impl::loadLinkElement(U_00,ref()(0,2)); \
Impl::loadLinkElement(U_10,ref()(1,2)); \
Impl::loadLinkElement(U_20,ref()(2,2)); \
UChi_00+= U_00*Chi_02;\
UChi_10+= U_00*Chi_12;\
UChi_01+= U_10*Chi_02;\
UChi_11+= U_10*Chi_12;\
UChi_02+= U_20*Chi_02;\
UChi_12+= U_20*Chi_12;}
#define PERMUTE_DIR(dir) \
@ -428,87 +307,84 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
result_31-= UChi_11; \
result_32-= UChi_12;
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON) \
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU_IMPL(DIR,F,PERM); \
LOAD_CHIMU; \
PROJ; \
if ( perm) { \
PERMUTE_DIR(PERM); \
} \
} else { \
LOAD_CHI_IMPL(DIR,F,PERM); \
LOAD_CHI; \
} \
MULT_2SPIN_IMPL(DIR,F); \
MULT_2SPIN(DIR); \
RECON;
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON) \
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU_IMPL(DIR,F,PERM); \
LOAD_CHIMU; \
PROJ; \
if ( perm) { \
PERMUTE_DIR(PERM); \
} \
} else if ( st.same_node[DIR] ) { \
LOAD_CHI_IMPL(DIR,F,PERM); \
LOAD_CHI; \
} \
if (local || st.same_node[DIR] ) { \
MULT_2SPIN_IMPL(DIR,F); \
MULT_2SPIN(DIR); \
RECON; \
}
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON) \
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if((!SE->_is_local)&&(!st.same_node[DIR]) ) { \
LOAD_CHI_IMPL(DIR,F,PERM); \
MULT_2SPIN_IMPL(DIR,F); \
LOAD_CHI; \
MULT_2SPIN(DIR); \
RECON; \
nmu++; \
}
#define HAND_RESULT(ss,F) \
#define HAND_RESULT(ss) \
{ \
SiteSpinor & ref (out._odata[ss]); \
vstream(ref(F)(0)(0),result_00); \
vstream(ref(F)(0)(1),result_01); \
vstream(ref(F)(0)(2),result_02); \
vstream(ref(F)(1)(0),result_10); \
vstream(ref(F)(1)(1),result_11); \
vstream(ref(F)(1)(2),result_12); \
vstream(ref(F)(2)(0),result_20); \
vstream(ref(F)(2)(1),result_21); \
vstream(ref(F)(2)(2),result_22); \
vstream(ref(F)(3)(0),result_30); \
vstream(ref(F)(3)(1),result_31); \
vstream(ref(F)(3)(2),result_32); \
vstream(ref()(0)(0),result_00); \
vstream(ref()(0)(1),result_01); \
vstream(ref()(0)(2),result_02); \
vstream(ref()(1)(0),result_10); \
vstream(ref()(1)(1),result_11); \
vstream(ref()(1)(2),result_12); \
vstream(ref()(2)(0),result_20); \
vstream(ref()(2)(1),result_21); \
vstream(ref()(2)(2),result_22); \
vstream(ref()(3)(0),result_30); \
vstream(ref()(3)(1),result_31); \
vstream(ref()(3)(2),result_32); \
}
#define HAND_RESULT_EXT(ss,F) \
#define HAND_RESULT_EXT(ss) \
if (nmu){ \
SiteSpinor & ref (out._odata[ss]); \
ref(F)(0)(0)+=result_00; \
ref(F)(0)(1)+=result_01; \
ref(F)(0)(2)+=result_02; \
ref(F)(1)(0)+=result_10; \
ref(F)(1)(1)+=result_11; \
ref(F)(1)(2)+=result_12; \
ref(F)(2)(0)+=result_20; \
ref(F)(2)(1)+=result_21; \
ref(F)(2)(2)+=result_22; \
ref(F)(3)(0)+=result_30; \
ref(F)(3)(1)+=result_31; \
ref(F)(3)(2)+=result_32; \
ref()(0)(0)+=result_00; \
ref()(0)(1)+=result_01; \
ref()(0)(2)+=result_02; \
ref()(1)(0)+=result_10; \
ref()(1)(1)+=result_11; \
ref()(1)(2)+=result_12; \
ref()(2)(0)+=result_20; \
ref()(2)(1)+=result_21; \
ref()(2)(2)+=result_22; \
ref()(3)(0)+=result_30; \
ref()(3)(1)+=result_31; \
ref()(3)(2)+=result_32; \
}
@ -587,18 +463,15 @@ WilsonKernels<Impl>::HandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGauge
int offset,local,perm, ptype;
StencilEntry *SE;
#define HAND_DOP_SITE(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(TM_PROJ,0,Tp,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(XP_PROJ,3,Xm,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(YP_PROJ,2,Ym,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(ZP_PROJ,1,Zm,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(TP_PROJ,0,Tm,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT(ss,F)
HAND_DOP_SITE(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
HAND_STENCIL_LEG(TM_PROJ,0,Tp,TM_RECON_ACCUM);
HAND_STENCIL_LEG(XP_PROJ,3,Xm,XP_RECON_ACCUM);
HAND_STENCIL_LEG(YP_PROJ,2,Ym,YP_RECON_ACCUM);
HAND_STENCIL_LEG(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
HAND_STENCIL_LEG(TP_PROJ,0,Tm,TP_RECON_ACCUM);
HAND_RESULT(ss);
}
template<class Impl>
@ -612,19 +485,16 @@ void WilsonKernels<Impl>::HandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,Doub
StencilEntry *SE;
int offset,local,perm, ptype;
#define HAND_DOP_SITE_DAG(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
HAND_STENCIL_LEG(XP_PROJ,3,Xp,XP_RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(YP_PROJ,2,Yp,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(ZP_PROJ,1,Zp,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(TP_PROJ,0,Tp,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(XM_PROJ,3,Xm,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(YM_PROJ,2,Ym,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(ZM_PROJ,1,Zm,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(TM_PROJ,0,Tm,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT(ss,F)
HAND_DOP_SITE_DAG(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
HAND_STENCIL_LEG(XP_PROJ,3,Xp,XP_RECON);
HAND_STENCIL_LEG(YP_PROJ,2,Yp,YP_RECON_ACCUM);
HAND_STENCIL_LEG(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
HAND_STENCIL_LEG(TP_PROJ,0,Tp,TP_RECON_ACCUM);
HAND_STENCIL_LEG(XM_PROJ,3,Xm,XM_RECON_ACCUM);
HAND_STENCIL_LEG(YM_PROJ,2,Ym,YM_RECON_ACCUM);
HAND_STENCIL_LEG(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
HAND_STENCIL_LEG(TM_PROJ,0,Tm,TM_RECON_ACCUM);
HAND_RESULT(ss);
}
template<class Impl> void
@ -639,20 +509,16 @@ WilsonKernels<Impl>::HandDhopSiteInt(StencilImpl &st,LebesgueOrder &lo,DoubledGa
int offset,local,perm, ptype;
StencilEntry *SE;
#define HAND_DOP_SITE_INT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
ZERO_RESULT; \
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xp,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(YM_PROJ,2,Yp,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tp,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xm,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(YP_PROJ,2,Ym,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tm,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT(ss,F)
HAND_DOP_SITE_INT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
ZERO_RESULT;
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xp,XM_RECON_ACCUM);
HAND_STENCIL_LEG_INT(YM_PROJ,2,Yp,YM_RECON_ACCUM);
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tp,TM_RECON_ACCUM);
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xm,XP_RECON_ACCUM);
HAND_STENCIL_LEG_INT(YP_PROJ,2,Ym,YP_RECON_ACCUM);
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tm,TP_RECON_ACCUM);
HAND_RESULT(ss);
}
template<class Impl>
@ -666,20 +532,16 @@ void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilImpl &st,LebesgueOrder &lo,D
StencilEntry *SE;
int offset,local,perm, ptype;
#define HAND_DOP_SITE_DAG_INT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
ZERO_RESULT; \
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xp,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(YP_PROJ,2,Yp,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tp,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xm,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(YM_PROJ,2,Ym,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tm,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT(ss,F)
HAND_DOP_SITE_DAG_INT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
ZERO_RESULT;
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
HAND_STENCIL_LEG_INT(YP_PROJ,2,Yp,YP_RECON_ACCUM);
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tp,TP_RECON_ACCUM);
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xm,XM_RECON_ACCUM);
HAND_STENCIL_LEG_INT(YM_PROJ,2,Ym,YM_RECON_ACCUM);
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tm,TM_RECON_ACCUM);
HAND_RESULT(ss);
}
template<class Impl> void
@ -695,20 +557,16 @@ WilsonKernels<Impl>::HandDhopSiteExt(StencilImpl &st,LebesgueOrder &lo,DoubledGa
int offset,local,perm, ptype;
StencilEntry *SE;
int nmu=0;
#define HAND_DOP_SITE_EXT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
ZERO_RESULT; \
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xp,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Yp,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tp,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xm,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Ym,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tm,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT_EXT(ss,F)
HAND_DOP_SITE_EXT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
ZERO_RESULT;
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xp,XM_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Yp,YM_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tp,TM_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xm,XP_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Ym,YP_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tm,TP_RECON_ACCUM);
HAND_RESULT_EXT(ss);
}
template<class Impl>
@ -723,193 +581,18 @@ void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilImpl &st,LebesgueOrder &lo,D
StencilEntry *SE;
int offset,local,perm, ptype;
int nmu=0;
#define HAND_DOP_SITE_DAG_EXT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
ZERO_RESULT; \
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xp,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Yp,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tp,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xm,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Ym,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tm,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT_EXT(ss,F)
HAND_DOP_SITE_DAG_EXT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
ZERO_RESULT;
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Yp,YP_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tp,TP_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xm,XM_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Ym,YM_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tm,TM_RECON_ACCUM);
HAND_RESULT_EXT(ss);
}
////////////////////////////////////////////////
// Specialise Gparity to simple implementation
////////////////////////////////////////////////
#define HAND_SPECIALISE_EMPTY(IMPL) \
template<> void \
WilsonKernels<IMPL>::HandDhopSite(StencilImpl &st, \
LebesgueOrder &lo, \
DoubledGaugeField &U, \
SiteHalfSpinor *buf, \
int sF,int sU, \
const FermionField &in, \
FermionField &out){ assert(0); } \
template<> void \
WilsonKernels<IMPL>::HandDhopSiteDag(StencilImpl &st, \
LebesgueOrder &lo, \
DoubledGaugeField &U, \
SiteHalfSpinor *buf, \
int sF,int sU, \
const FermionField &in, \
FermionField &out){ assert(0); } \
template<> void \
WilsonKernels<IMPL>::HandDhopSiteInt(StencilImpl &st, \
LebesgueOrder &lo, \
DoubledGaugeField &U, \
SiteHalfSpinor *buf, \
int sF,int sU, \
const FermionField &in, \
FermionField &out){ assert(0); } \
template<> void \
WilsonKernels<IMPL>::HandDhopSiteExt(StencilImpl &st, \
LebesgueOrder &lo, \
DoubledGaugeField &U, \
SiteHalfSpinor *buf, \
int sF,int sU, \
const FermionField &in, \
FermionField &out){ assert(0); } \
template<> void \
WilsonKernels<IMPL>::HandDhopSiteDagInt(StencilImpl &st, \
LebesgueOrder &lo, \
DoubledGaugeField &U, \
SiteHalfSpinor *buf, \
int sF,int sU, \
const FermionField &in, \
FermionField &out){ assert(0); } \
template<> void \
WilsonKernels<IMPL>::HandDhopSiteDagExt(StencilImpl &st, \
LebesgueOrder &lo, \
DoubledGaugeField &U, \
SiteHalfSpinor *buf, \
int sF,int sU, \
const FermionField &in, \
FermionField &out){ assert(0); } \
#define HAND_SPECIALISE_GPARITY(IMPL) \
template<> void \
WilsonKernels<IMPL>::HandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
StencilEntry *SE; \
HAND_DOP_SITE(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
HAND_DOP_SITE(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
} \
\
template<> \
void WilsonKernels<IMPL>::HandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
StencilEntry *SE; \
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
HAND_DOP_SITE_DAG(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
HAND_DOP_SITE_DAG(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
} \
\
template<> void \
WilsonKernels<IMPL>::HandDhopSiteInt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
StencilEntry *SE; \
HAND_DOP_SITE_INT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
HAND_DOP_SITE_INT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
} \
\
template<> \
void WilsonKernels<IMPL>::HandDhopSiteDagInt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
StencilEntry *SE; \
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
HAND_DOP_SITE_DAG_INT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
HAND_DOP_SITE_DAG_INT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
} \
\
template<> void \
WilsonKernels<IMPL>::HandDhopSiteExt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
StencilEntry *SE; \
int nmu=0; \
HAND_DOP_SITE_EXT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
nmu = 0; \
HAND_DOP_SITE_EXT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
} \
template<> \
void WilsonKernels<IMPL>::HandDhopSiteDagExt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
StencilEntry *SE; \
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
int nmu=0; \
HAND_DOP_SITE_DAG_EXT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
nmu = 0; \
HAND_DOP_SITE_DAG_EXT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
}
HAND_SPECIALISE_GPARITY(GparityWilsonImplF);
HAND_SPECIALISE_GPARITY(GparityWilsonImplD);
HAND_SPECIALISE_GPARITY(GparityWilsonImplFH);
HAND_SPECIALISE_GPARITY(GparityWilsonImplDF);
////////////// Wilson ; uses this implementation /////////////////////
#define INSTANTIATE_THEM(A) \
@ -930,8 +613,6 @@ INSTANTIATE_THEM(WilsonImplF);
INSTANTIATE_THEM(WilsonImplD);
INSTANTIATE_THEM(ZWilsonImplF);
INSTANTIATE_THEM(ZWilsonImplD);
INSTANTIATE_THEM(GparityWilsonImplF);
INSTANTIATE_THEM(GparityWilsonImplD);
INSTANTIATE_THEM(DomainWallVec5dImplF);
INSTANTIATE_THEM(DomainWallVec5dImplD);
INSTANTIATE_THEM(ZDomainWallVec5dImplF);
@ -940,12 +621,11 @@ INSTANTIATE_THEM(WilsonImplFH);
INSTANTIATE_THEM(WilsonImplDF);
INSTANTIATE_THEM(ZWilsonImplFH);
INSTANTIATE_THEM(ZWilsonImplDF);
INSTANTIATE_THEM(GparityWilsonImplFH);
INSTANTIATE_THEM(GparityWilsonImplDF);
INSTANTIATE_THEM(DomainWallVec5dImplFH);
INSTANTIATE_THEM(DomainWallVec5dImplDF);
INSTANTIATE_THEM(ZDomainWallVec5dImplFH);
INSTANTIATE_THEM(ZDomainWallVec5dImplDF);
INSTANTIATE_THEM(WilsonTwoIndexAntiSymmetricImplF);
INSTANTIATE_THEM(WilsonTwoIndexAntiSymmetricImplD);
}}

View File

@ -0,0 +1,878 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/fermion/WilsonKernelsHand.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/qcd/action/fermion/FermionCore.h>
#define REGISTER
#define LOAD_CHIMU_BODY(F) \
Chimu_00=ref(F)(0)(0); \
Chimu_01=ref(F)(0)(1); \
Chimu_02=ref(F)(0)(2); \
Chimu_10=ref(F)(1)(0); \
Chimu_11=ref(F)(1)(1); \
Chimu_12=ref(F)(1)(2); \
Chimu_20=ref(F)(2)(0); \
Chimu_21=ref(F)(2)(1); \
Chimu_22=ref(F)(2)(2); \
Chimu_30=ref(F)(3)(0); \
Chimu_31=ref(F)(3)(1); \
Chimu_32=ref(F)(3)(2)
#define LOAD_CHIMU(DIR,F,PERM) \
{ const SiteSpinor & ref (in._odata[offset]); LOAD_CHIMU_BODY(F); }
#define LOAD_CHI_BODY(F) \
Chi_00 = ref(F)(0)(0);\
Chi_01 = ref(F)(0)(1);\
Chi_02 = ref(F)(0)(2);\
Chi_10 = ref(F)(1)(0);\
Chi_11 = ref(F)(1)(1);\
Chi_12 = ref(F)(1)(2)
#define LOAD_CHI(DIR,F,PERM) \
{const SiteHalfSpinor &ref(buf[offset]); LOAD_CHI_BODY(F); }
//G-parity implementations using in-place intrinsic ops
//1l 1h -> 1h 1l
//0l 0h , 1h 1l -> 0l 1h 0h,1l
//0h,1l -> 1l,0h
//if( (distance == 1 && !perm_will_occur) || (distance == -1 && perm_will_occur) )
//Pulled fermion through forwards face, GPBC on upper component
//Need 0= 0l 1h 1= 1l 0h
//else if( (distance == -1 && !perm) || (distance == 1 && perm) )
//Pulled fermion through backwards face, GPBC on lower component
//Need 0= 1l 0h 1= 0l 1h
//1l 1h -> 1h 1l
//0l 0h , 1h 1l -> 0l 1h 0h,1l
#define DO_TWIST_0L_1H(INTO,S,C,F, PERM, tmp1, tmp2, tmp3) \
permute##PERM(tmp1, ref(1)(S)(C)); \
exchange##PERM(tmp2,tmp3, ref(0)(S)(C), tmp1); \
INTO = tmp2;
//0l 0h -> 0h 0l
//1l 1h, 0h 0l -> 1l 0h, 1h 0l
#define DO_TWIST_1L_0H(INTO,S,C,F, PERM, tmp1, tmp2, tmp3) \
permute##PERM(tmp1, ref(0)(S)(C)); \
exchange##PERM(tmp2,tmp3, ref(1)(S)(C), tmp1); \
INTO = tmp2;
#define LOAD_CHI_SETUP(DIR,F) \
g = F; \
direction = st._directions[DIR]; \
distance = st._distances[DIR]; \
sl = st._grid->_simd_layout[direction]; \
inplace_twist = 0; \
if(SE->_around_the_world && this->Params.twists[DIR % 4]){ \
if(sl == 1){ \
g = (F+1) % 2; \
}else{ \
inplace_twist = 1; \
} \
}
#define LOAD_CHIMU_GPARITY_INPLACE_TWIST(DIR,F,PERM) \
{ const SiteSpinor &ref(in._odata[offset]); \
LOAD_CHI_SETUP(DIR,F); \
if(!inplace_twist){ \
LOAD_CHIMU_BODY(g); \
}else{ \
if( ( F==0 && ((distance == 1 && !perm) || (distance == -1 && perm)) ) || \
( F==1 && ((distance == -1 && !perm) || (distance == 1 && perm)) ) ){ \
DO_TWIST_0L_1H(Chimu_00,0,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_01,0,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chimu_02,0,2,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_10,1,0,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chimu_11,1,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_12,1,2,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chimu_20,2,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_21,2,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chimu_22,2,2,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_30,3,0,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chimu_31,3,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chimu_32,3,2,F,PERM, U_11,U_20,U_21); \
}else{ \
DO_TWIST_1L_0H(Chimu_00,0,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_01,0,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chimu_02,0,2,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_10,1,0,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chimu_11,1,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_12,1,2,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chimu_20,2,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_21,2,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chimu_22,2,2,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_30,3,0,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chimu_31,3,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chimu_32,3,2,F,PERM, U_11,U_20,U_21); \
} \
} \
}
#define LOAD_CHI_GPARITY_INPLACE_TWIST(DIR,F,PERM) \
{ const SiteHalfSpinor &ref(buf[offset]); \
LOAD_CHI_SETUP(DIR,F); \
if(!inplace_twist){ \
LOAD_CHI_BODY(g); \
}else{ \
if( ( F==0 && ((distance == 1 && !perm) || (distance == -1 && perm)) ) || \
( F==1 && ((distance == -1 && !perm) || (distance == 1 && perm)) ) ){ \
DO_TWIST_0L_1H(Chi_00,0,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chi_01,0,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_0L_1H(Chi_02,0,2,F,PERM, UChi_00,UChi_01,UChi_02); \
DO_TWIST_0L_1H(Chi_10,1,0,F,PERM, UChi_10,UChi_11,UChi_12); \
DO_TWIST_0L_1H(Chi_11,1,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_0L_1H(Chi_12,1,2,F,PERM, U_11,U_20,U_21); \
}else{ \
DO_TWIST_1L_0H(Chi_00,0,0,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chi_01,0,1,F,PERM, U_11,U_20,U_21); \
DO_TWIST_1L_0H(Chi_02,0,2,F,PERM, UChi_00,UChi_01,UChi_02); \
DO_TWIST_1L_0H(Chi_10,1,0,F,PERM, UChi_10,UChi_11,UChi_12); \
DO_TWIST_1L_0H(Chi_11,1,1,F,PERM, U_00,U_01,U_10); \
DO_TWIST_1L_0H(Chi_12,1,2,F,PERM, U_11,U_20,U_21); \
} \
} \
}
#define LOAD_CHI_GPARITY(DIR,F,PERM) LOAD_CHI_GPARITY_INPLACE_TWIST(DIR,F,PERM)
#define LOAD_CHIMU_GPARITY(DIR,F,PERM) LOAD_CHIMU_GPARITY_INPLACE_TWIST(DIR,F,PERM)
// To splat or not to splat depends on the implementation
#define MULT_2SPIN_BODY \
Impl::loadLinkElement(U_00,ref()(0,0)); \
Impl::loadLinkElement(U_10,ref()(1,0)); \
Impl::loadLinkElement(U_20,ref()(2,0)); \
Impl::loadLinkElement(U_01,ref()(0,1)); \
Impl::loadLinkElement(U_11,ref()(1,1)); \
Impl::loadLinkElement(U_21,ref()(2,1)); \
UChi_00 = U_00*Chi_00; \
UChi_10 = U_00*Chi_10; \
UChi_01 = U_10*Chi_00; \
UChi_11 = U_10*Chi_10; \
UChi_02 = U_20*Chi_00; \
UChi_12 = U_20*Chi_10; \
UChi_00+= U_01*Chi_01; \
UChi_10+= U_01*Chi_11; \
UChi_01+= U_11*Chi_01; \
UChi_11+= U_11*Chi_11; \
UChi_02+= U_21*Chi_01; \
UChi_12+= U_21*Chi_11; \
Impl::loadLinkElement(U_00,ref()(0,2)); \
Impl::loadLinkElement(U_10,ref()(1,2)); \
Impl::loadLinkElement(U_20,ref()(2,2)); \
UChi_00+= U_00*Chi_02; \
UChi_10+= U_00*Chi_12; \
UChi_01+= U_10*Chi_02; \
UChi_11+= U_10*Chi_12; \
UChi_02+= U_20*Chi_02; \
UChi_12+= U_20*Chi_12
#define MULT_2SPIN(A,F) \
{auto & ref(U._odata[sU](A)); MULT_2SPIN_BODY; }
#define MULT_2SPIN_GPARITY(A,F) \
{auto & ref(U._odata[sU](F)(A)); MULT_2SPIN_BODY; }
#define PERMUTE_DIR(dir) \
permute##dir(Chi_00,Chi_00);\
permute##dir(Chi_01,Chi_01);\
permute##dir(Chi_02,Chi_02);\
permute##dir(Chi_10,Chi_10);\
permute##dir(Chi_11,Chi_11);\
permute##dir(Chi_12,Chi_12);
// hspin(0)=fspin(0)+timesI(fspin(3));
// hspin(1)=fspin(1)+timesI(fspin(2));
#define XP_PROJ \
Chi_00 = Chimu_00+timesI(Chimu_30);\
Chi_01 = Chimu_01+timesI(Chimu_31);\
Chi_02 = Chimu_02+timesI(Chimu_32);\
Chi_10 = Chimu_10+timesI(Chimu_20);\
Chi_11 = Chimu_11+timesI(Chimu_21);\
Chi_12 = Chimu_12+timesI(Chimu_22);
#define YP_PROJ \
Chi_00 = Chimu_00-Chimu_30;\
Chi_01 = Chimu_01-Chimu_31;\
Chi_02 = Chimu_02-Chimu_32;\
Chi_10 = Chimu_10+Chimu_20;\
Chi_11 = Chimu_11+Chimu_21;\
Chi_12 = Chimu_12+Chimu_22;
#define ZP_PROJ \
Chi_00 = Chimu_00+timesI(Chimu_20); \
Chi_01 = Chimu_01+timesI(Chimu_21); \
Chi_02 = Chimu_02+timesI(Chimu_22); \
Chi_10 = Chimu_10-timesI(Chimu_30); \
Chi_11 = Chimu_11-timesI(Chimu_31); \
Chi_12 = Chimu_12-timesI(Chimu_32);
#define TP_PROJ \
Chi_00 = Chimu_00+Chimu_20; \
Chi_01 = Chimu_01+Chimu_21; \
Chi_02 = Chimu_02+Chimu_22; \
Chi_10 = Chimu_10+Chimu_30; \
Chi_11 = Chimu_11+Chimu_31; \
Chi_12 = Chimu_12+Chimu_32;
// hspin(0)=fspin(0)-timesI(fspin(3));
// hspin(1)=fspin(1)-timesI(fspin(2));
#define XM_PROJ \
Chi_00 = Chimu_00-timesI(Chimu_30);\
Chi_01 = Chimu_01-timesI(Chimu_31);\
Chi_02 = Chimu_02-timesI(Chimu_32);\
Chi_10 = Chimu_10-timesI(Chimu_20);\
Chi_11 = Chimu_11-timesI(Chimu_21);\
Chi_12 = Chimu_12-timesI(Chimu_22);
#define YM_PROJ \
Chi_00 = Chimu_00+Chimu_30;\
Chi_01 = Chimu_01+Chimu_31;\
Chi_02 = Chimu_02+Chimu_32;\
Chi_10 = Chimu_10-Chimu_20;\
Chi_11 = Chimu_11-Chimu_21;\
Chi_12 = Chimu_12-Chimu_22;
#define ZM_PROJ \
Chi_00 = Chimu_00-timesI(Chimu_20); \
Chi_01 = Chimu_01-timesI(Chimu_21); \
Chi_02 = Chimu_02-timesI(Chimu_22); \
Chi_10 = Chimu_10+timesI(Chimu_30); \
Chi_11 = Chimu_11+timesI(Chimu_31); \
Chi_12 = Chimu_12+timesI(Chimu_32);
#define TM_PROJ \
Chi_00 = Chimu_00-Chimu_20; \
Chi_01 = Chimu_01-Chimu_21; \
Chi_02 = Chimu_02-Chimu_22; \
Chi_10 = Chimu_10-Chimu_30; \
Chi_11 = Chimu_11-Chimu_31; \
Chi_12 = Chimu_12-Chimu_32;
// fspin(0)=hspin(0);
// fspin(1)=hspin(1);
// fspin(2)=timesMinusI(hspin(1));
// fspin(3)=timesMinusI(hspin(0));
#define XP_RECON\
result_00 = UChi_00;\
result_01 = UChi_01;\
result_02 = UChi_02;\
result_10 = UChi_10;\
result_11 = UChi_11;\
result_12 = UChi_12;\
result_20 = timesMinusI(UChi_10);\
result_21 = timesMinusI(UChi_11);\
result_22 = timesMinusI(UChi_12);\
result_30 = timesMinusI(UChi_00);\
result_31 = timesMinusI(UChi_01);\
result_32 = timesMinusI(UChi_02);
#define XP_RECON_ACCUM\
result_00+=UChi_00;\
result_01+=UChi_01;\
result_02+=UChi_02;\
result_10+=UChi_10;\
result_11+=UChi_11;\
result_12+=UChi_12;\
result_20-=timesI(UChi_10);\
result_21-=timesI(UChi_11);\
result_22-=timesI(UChi_12);\
result_30-=timesI(UChi_00);\
result_31-=timesI(UChi_01);\
result_32-=timesI(UChi_02);
#define XM_RECON\
result_00 = UChi_00;\
result_01 = UChi_01;\
result_02 = UChi_02;\
result_10 = UChi_10;\
result_11 = UChi_11;\
result_12 = UChi_12;\
result_20 = timesI(UChi_10);\
result_21 = timesI(UChi_11);\
result_22 = timesI(UChi_12);\
result_30 = timesI(UChi_00);\
result_31 = timesI(UChi_01);\
result_32 = timesI(UChi_02);
#define XM_RECON_ACCUM\
result_00+= UChi_00;\
result_01+= UChi_01;\
result_02+= UChi_02;\
result_10+= UChi_10;\
result_11+= UChi_11;\
result_12+= UChi_12;\
result_20+= timesI(UChi_10);\
result_21+= timesI(UChi_11);\
result_22+= timesI(UChi_12);\
result_30+= timesI(UChi_00);\
result_31+= timesI(UChi_01);\
result_32+= timesI(UChi_02);
#define YP_RECON_ACCUM\
result_00+= UChi_00;\
result_01+= UChi_01;\
result_02+= UChi_02;\
result_10+= UChi_10;\
result_11+= UChi_11;\
result_12+= UChi_12;\
result_20+= UChi_10;\
result_21+= UChi_11;\
result_22+= UChi_12;\
result_30-= UChi_00;\
result_31-= UChi_01;\
result_32-= UChi_02;
#define YM_RECON_ACCUM\
result_00+= UChi_00;\
result_01+= UChi_01;\
result_02+= UChi_02;\
result_10+= UChi_10;\
result_11+= UChi_11;\
result_12+= UChi_12;\
result_20-= UChi_10;\
result_21-= UChi_11;\
result_22-= UChi_12;\
result_30+= UChi_00;\
result_31+= UChi_01;\
result_32+= UChi_02;
#define ZP_RECON_ACCUM\
result_00+= UChi_00;\
result_01+= UChi_01;\
result_02+= UChi_02;\
result_10+= UChi_10;\
result_11+= UChi_11;\
result_12+= UChi_12;\
result_20-= timesI(UChi_00); \
result_21-= timesI(UChi_01); \
result_22-= timesI(UChi_02); \
result_30+= timesI(UChi_10); \
result_31+= timesI(UChi_11); \
result_32+= timesI(UChi_12);
#define ZM_RECON_ACCUM\
result_00+= UChi_00;\
result_01+= UChi_01;\
result_02+= UChi_02;\
result_10+= UChi_10;\
result_11+= UChi_11;\
result_12+= UChi_12;\
result_20+= timesI(UChi_00); \
result_21+= timesI(UChi_01); \
result_22+= timesI(UChi_02); \
result_30-= timesI(UChi_10); \
result_31-= timesI(UChi_11); \
result_32-= timesI(UChi_12);
#define TP_RECON_ACCUM\
result_00+= UChi_00;\
result_01+= UChi_01;\
result_02+= UChi_02;\
result_10+= UChi_10;\
result_11+= UChi_11;\
result_12+= UChi_12;\
result_20+= UChi_00; \
result_21+= UChi_01; \
result_22+= UChi_02; \
result_30+= UChi_10; \
result_31+= UChi_11; \
result_32+= UChi_12;
#define TM_RECON_ACCUM\
result_00+= UChi_00;\
result_01+= UChi_01;\
result_02+= UChi_02;\
result_10+= UChi_10;\
result_11+= UChi_11;\
result_12+= UChi_12;\
result_20-= UChi_00; \
result_21-= UChi_01; \
result_22-= UChi_02; \
result_30-= UChi_10; \
result_31-= UChi_11; \
result_32-= UChi_12;
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU_IMPL(DIR,F,PERM); \
PROJ; \
if ( perm) { \
PERMUTE_DIR(PERM); \
} \
} else { \
LOAD_CHI_IMPL(DIR,F,PERM); \
} \
MULT_2SPIN_IMPL(DIR,F); \
RECON;
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU_IMPL(DIR,F,PERM); \
PROJ; \
if ( perm) { \
PERMUTE_DIR(PERM); \
} \
} else if ( st.same_node[DIR] ) { \
LOAD_CHI_IMPL(DIR,F,PERM); \
} \
if (local || st.same_node[DIR] ) { \
MULT_2SPIN_IMPL(DIR,F); \
RECON; \
}
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
if((!SE->_is_local)&&(!st.same_node[DIR]) ) { \
LOAD_CHI_IMPL(DIR,F,PERM); \
MULT_2SPIN_IMPL(DIR,F); \
RECON; \
nmu++; \
}
#define HAND_RESULT(ss,F) \
{ \
SiteSpinor & ref (out._odata[ss]); \
vstream(ref(F)(0)(0),result_00); \
vstream(ref(F)(0)(1),result_01); \
vstream(ref(F)(0)(2),result_02); \
vstream(ref(F)(1)(0),result_10); \
vstream(ref(F)(1)(1),result_11); \
vstream(ref(F)(1)(2),result_12); \
vstream(ref(F)(2)(0),result_20); \
vstream(ref(F)(2)(1),result_21); \
vstream(ref(F)(2)(2),result_22); \
vstream(ref(F)(3)(0),result_30); \
vstream(ref(F)(3)(1),result_31); \
vstream(ref(F)(3)(2),result_32); \
}
#define HAND_RESULT_EXT(ss,F) \
if (nmu){ \
SiteSpinor & ref (out._odata[ss]); \
ref(F)(0)(0)+=result_00; \
ref(F)(0)(1)+=result_01; \
ref(F)(0)(2)+=result_02; \
ref(F)(1)(0)+=result_10; \
ref(F)(1)(1)+=result_11; \
ref(F)(1)(2)+=result_12; \
ref(F)(2)(0)+=result_20; \
ref(F)(2)(1)+=result_21; \
ref(F)(2)(2)+=result_22; \
ref(F)(3)(0)+=result_30; \
ref(F)(3)(1)+=result_31; \
ref(F)(3)(2)+=result_32; \
}
#define HAND_DECLARATIONS(a) \
Simd result_00; \
Simd result_01; \
Simd result_02; \
Simd result_10; \
Simd result_11; \
Simd result_12; \
Simd result_20; \
Simd result_21; \
Simd result_22; \
Simd result_30; \
Simd result_31; \
Simd result_32; \
Simd Chi_00; \
Simd Chi_01; \
Simd Chi_02; \
Simd Chi_10; \
Simd Chi_11; \
Simd Chi_12; \
Simd UChi_00; \
Simd UChi_01; \
Simd UChi_02; \
Simd UChi_10; \
Simd UChi_11; \
Simd UChi_12; \
Simd U_00; \
Simd U_10; \
Simd U_20; \
Simd U_01; \
Simd U_11; \
Simd U_21;
#define ZERO_RESULT \
result_00=zero; \
result_01=zero; \
result_02=zero; \
result_10=zero; \
result_11=zero; \
result_12=zero; \
result_20=zero; \
result_21=zero; \
result_22=zero; \
result_30=zero; \
result_31=zero; \
result_32=zero;
#define Chimu_00 Chi_00
#define Chimu_01 Chi_01
#define Chimu_02 Chi_02
#define Chimu_10 Chi_10
#define Chimu_11 Chi_11
#define Chimu_12 Chi_12
#define Chimu_20 UChi_00
#define Chimu_21 UChi_01
#define Chimu_22 UChi_02
#define Chimu_30 UChi_10
#define Chimu_31 UChi_11
#define Chimu_32 UChi_12
namespace Grid {
namespace QCD {
template<class Impl> void
WilsonKernels<Impl>::HandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionField &in, FermionField &out)
{
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
HAND_DECLARATIONS(ignore);
int offset,local,perm, ptype;
StencilEntry *SE;
#define HAND_DOP_SITE(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(TM_PROJ,0,Tp,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(XP_PROJ,3,Xm,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(YP_PROJ,2,Ym,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(ZP_PROJ,1,Zm,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(TP_PROJ,0,Tm,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT(ss,F)
HAND_DOP_SITE(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
}
template<class Impl>
void WilsonKernels<Impl>::HandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionField &in, FermionField &out)
{
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
HAND_DECLARATIONS(ignore);
StencilEntry *SE;
int offset,local,perm, ptype;
#define HAND_DOP_SITE_DAG(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
HAND_STENCIL_LEG(XP_PROJ,3,Xp,XP_RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(YP_PROJ,2,Yp,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(ZP_PROJ,1,Zp,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(TP_PROJ,0,Tp,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(XM_PROJ,3,Xm,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(YM_PROJ,2,Ym,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(ZM_PROJ,1,Zm,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG(TM_PROJ,0,Tm,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT(ss,F)
HAND_DOP_SITE_DAG(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
}
template<class Impl> void
WilsonKernels<Impl>::HandDhopSiteInt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionField &in, FermionField &out)
{
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
HAND_DECLARATIONS(ignore);
int offset,local,perm, ptype;
StencilEntry *SE;
#define HAND_DOP_SITE_INT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
ZERO_RESULT; \
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xp,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(YM_PROJ,2,Yp,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tp,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xm,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(YP_PROJ,2,Ym,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tm,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT(ss,F)
HAND_DOP_SITE_INT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
}
template<class Impl>
void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionField &in, FermionField &out)
{
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
HAND_DECLARATIONS(ignore);
StencilEntry *SE;
int offset,local,perm, ptype;
#define HAND_DOP_SITE_DAG_INT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
ZERO_RESULT; \
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xp,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(YP_PROJ,2,Yp,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tp,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xm,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(YM_PROJ,2,Ym,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tm,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT(ss,F)
HAND_DOP_SITE_DAG_INT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
}
template<class Impl> void
WilsonKernels<Impl>::HandDhopSiteExt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionField &in, FermionField &out)
{
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
HAND_DECLARATIONS(ignore);
int offset,local,perm, ptype;
StencilEntry *SE;
int nmu=0;
#define HAND_DOP_SITE_EXT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
ZERO_RESULT; \
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xp,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Yp,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tp,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xm,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Ym,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tm,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT_EXT(ss,F)
HAND_DOP_SITE_EXT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
}
template<class Impl>
void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionField &in, FermionField &out)
{
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
HAND_DECLARATIONS(ignore);
StencilEntry *SE;
int offset,local,perm, ptype;
int nmu=0;
#define HAND_DOP_SITE_DAG_EXT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
ZERO_RESULT; \
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xp,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Yp,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tp,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xm,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Ym,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tm,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
HAND_RESULT_EXT(ss,F)
HAND_DOP_SITE_DAG_EXT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
}
#define HAND_SPECIALISE_GPARITY(IMPL) \
template<> void \
WilsonKernels<IMPL>::HandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
StencilEntry *SE; \
HAND_DOP_SITE(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
HAND_DOP_SITE(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
} \
\
template<> \
void WilsonKernels<IMPL>::HandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
StencilEntry *SE; \
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
HAND_DOP_SITE_DAG(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
HAND_DOP_SITE_DAG(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
} \
\
template<> void \
WilsonKernels<IMPL>::HandDhopSiteInt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
StencilEntry *SE; \
HAND_DOP_SITE_INT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
HAND_DOP_SITE_INT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
} \
\
template<> \
void WilsonKernels<IMPL>::HandDhopSiteDagInt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
StencilEntry *SE; \
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
HAND_DOP_SITE_DAG_INT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
HAND_DOP_SITE_DAG_INT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
} \
\
template<> void \
WilsonKernels<IMPL>::HandDhopSiteExt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
StencilEntry *SE; \
int nmu=0; \
HAND_DOP_SITE_EXT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
nmu = 0; \
HAND_DOP_SITE_EXT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
} \
template<> \
void WilsonKernels<IMPL>::HandDhopSiteDagExt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out) \
{ \
typedef IMPL Impl; \
typedef typename Simd::scalar_type S; \
typedef typename Simd::vector_type V; \
\
HAND_DECLARATIONS(ignore); \
\
StencilEntry *SE; \
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
int nmu=0; \
HAND_DOP_SITE_DAG_EXT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
nmu = 0; \
HAND_DOP_SITE_DAG_EXT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
}
HAND_SPECIALISE_GPARITY(GparityWilsonImplF);
HAND_SPECIALISE_GPARITY(GparityWilsonImplD);
HAND_SPECIALISE_GPARITY(GparityWilsonImplFH);
HAND_SPECIALISE_GPARITY(GparityWilsonImplDF);
////////////// Wilson ; uses this implementation /////////////////////
#define INSTANTIATE_THEM(A) \
template void WilsonKernels<A>::HandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,\
int ss,int sU,const FermionField &in, FermionField &out); \
template void WilsonKernels<A>::HandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out);\
template void WilsonKernels<A>::HandDhopSiteInt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,\
int ss,int sU,const FermionField &in, FermionField &out); \
template void WilsonKernels<A>::HandDhopSiteDagInt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out); \
template void WilsonKernels<A>::HandDhopSiteExt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf,\
int ss,int sU,const FermionField &in, FermionField &out); \
template void WilsonKernels<A>::HandDhopSiteDagExt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
int ss,int sU,const FermionField &in, FermionField &out);
INSTANTIATE_THEM(GparityWilsonImplF);
INSTANTIATE_THEM(GparityWilsonImplD);
INSTANTIATE_THEM(GparityWilsonImplFH);
INSTANTIATE_THEM(GparityWilsonImplDF);
}}

View File

@ -140,6 +140,7 @@ namespace Grid{
};
}
}
#endif

View File

@ -48,6 +48,22 @@ with this program; if not, write to the Free Software Foundation, Inc.,
} \
}
#define RegisterLoadCheckPointerMetadataFunction(NAME) \
template < class Metadata > \
void Load##NAME##Checkpointer(const CheckpointerParameters& Params_, const Metadata& M_) { \
if (!have_CheckPointer) { \
std::cout << GridLogDebug << "Loading Metadata Checkpointer " << #NAME \
<< std::endl; \
CP = std::unique_ptr<CheckpointerBaseModule>( \
new NAME##CPModule<ImplementationPolicy, Metadata >(Params_, M_)); \
have_CheckPointer = true; \
} else { \
std::cout << GridLogError << "Checkpointer already loaded " \
<< std::endl; \
exit(1); \
} \
}
namespace Grid {
namespace QCD {
@ -77,7 +93,7 @@ class HMCResourceManager {
bool have_CheckPointer;
// NOTE: operator << is not overloaded for std::vector<string>
// so thsi function is necessary
// so this function is necessary
void output_vector_string(const std::vector<std::string> &vs){
for (auto &i: vs)
std::cout << i << " ";
@ -254,6 +270,7 @@ class HMCResourceManager {
RegisterLoadCheckPointerFunction(Nersc);
#ifdef HAVE_LIME
RegisterLoadCheckPointerFunction(ILDG);
RegisterLoadCheckPointerMetadataFunction(Scidac);
#endif
////////////////////////////////////////////////////////

View File

@ -76,6 +76,14 @@ class BaseHmcCheckpointer : public HmcObservable<typename Impl::Field> {
}
}
void check_filename(const std::string &filename){
std::ifstream f(filename.c_str());
if(!f.good()){
std::cout << GridLogError << "Filename " << filename << " not found. Aborting. " << std::endl;
abort();
};
}
virtual void initialize(const CheckpointerParameters &Params) = 0;
virtual void CheckpointRestore(int traj, typename Impl::Field &U,

View File

@ -93,6 +93,9 @@ class BinaryHmcCheckpointer : public BaseHmcCheckpointer<Impl> {
void CheckpointRestore(int traj, Field &U, GridSerialRNG &sRNG, GridParallelRNG &pRNG) {
std::string config, rng;
this->build_filenames(traj, Params, config, rng);
this->check_filename(rng);
this->check_filename(config);
BinarySimpleMunger<sobj_double, sobj> munge;

View File

@ -136,6 +136,20 @@ class ILDGCPModule: public CheckPointerModule< ImplementationPolicy> {
};
template<class ImplementationPolicy, class Metadata>
class ScidacCPModule: public CheckPointerModule< ImplementationPolicy> {
typedef CheckPointerModule< ImplementationPolicy> CPBase;
Metadata M;
// acquire resource
virtual void initialize(){
this->CheckPointPtr.reset(new ScidacHmcCheckpointer<ImplementationPolicy, Metadata>(this->Par_, M));
}
public:
ScidacCPModule(typename CPBase::APar Par, Metadata M_):M(M_), CPBase(Par) {}
template <class ReaderClass>
ScidacCPModule(Reader<ReaderClass>& Reader) : Parametrized<typename CPBase::APar>(Reader), M(Reader){};
};
#endif

View File

@ -34,6 +34,7 @@ directory
#include <Grid/qcd/hmc/checkpointers/NerscCheckpointer.h>
#include <Grid/qcd/hmc/checkpointers/BinaryCheckpointer.h>
#include <Grid/qcd/hmc/checkpointers/ILDGCheckpointer.h>
#include <Grid/qcd/hmc/checkpointers/ScidacCheckpointer.h>
//#include <Grid/qcd/hmc/checkpointers/CheckPointerModules.h>

View File

@ -95,6 +95,10 @@ class ILDGHmcCheckpointer : public BaseHmcCheckpointer<Implementation> {
GridParallelRNG &pRNG) {
std::string config, rng;
this->build_filenames(traj, Params, config, rng);
this->check_filename(rng);
this->check_filename(config);
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
BinaryIO::readRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);

View File

@ -69,6 +69,9 @@ class NerscHmcCheckpointer : public BaseHmcCheckpointer<Gimpl> {
GridParallelRNG &pRNG) {
std::string config, rng;
this->build_filenames(traj, Params, config, rng);
this->check_filename(rng);
this->check_filename(config);
FieldMetaData header;
NerscIO::readRNGState(sRNG, pRNG, header, rng);

View File

@ -0,0 +1,122 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/hmc/ScidacCheckpointer.h
Copyright (C) 2018
Author: Guido Cossu <guido.cossu@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef SCIDAC_CHECKPOINTER
#define SCIDAC_CHECKPOINTER
#ifdef HAVE_LIME
#include <iostream>
#include <sstream>
#include <string>
namespace Grid {
namespace QCD {
// For generic fields
template <class Implementation, class Metadata>
class ScidacHmcCheckpointer : public BaseHmcCheckpointer<Implementation> {
private:
CheckpointerParameters Params;
Metadata MData;
typedef typename Implementation::Field Field;
public:
//INHERIT_GIMPL_TYPES(Implementation);
ScidacHmcCheckpointer(const CheckpointerParameters &Params_) { initialize(Params_); }
ScidacHmcCheckpointer(const CheckpointerParameters &Params_, const Metadata& M_):MData(M_) { initialize(Params_); }
void initialize(const CheckpointerParameters &Params_) {
Params = Params_;
// check here that the format is valid
int ieee32big = (Params.format == std::string("IEEE32BIG"));
int ieee32 = (Params.format == std::string("IEEE32"));
int ieee64big = (Params.format == std::string("IEEE64BIG"));
int ieee64 = (Params.format == std::string("IEEE64"));
if (!(ieee64big || ieee32 || ieee32big || ieee64)) {
std::cout << GridLogError << "Unrecognized file format " << Params.format
<< std::endl;
std::cout << GridLogError
<< "Allowed: IEEE32BIG | IEEE32 | IEEE64BIG | IEEE64"
<< std::endl;
exit(1);
}
}
void TrajectoryComplete(int traj, Field &U, GridSerialRNG &sRNG,
GridParallelRNG &pRNG) {
if ((traj % Params.saveInterval) == 0) {
std::string config, rng;
this->build_filenames(traj, Params, config, rng);
GridBase *grid = U._grid;
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
BinaryIO::writeRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
ScidacWriter _ScidacWriter(grid->IsBoss());
_ScidacWriter.open(config);
_ScidacWriter.writeScidacFieldRecord(U, MData);
_ScidacWriter.close();
std::cout << GridLogMessage << "Written Scidac Configuration on " << config << std::endl;
}
};
void CheckpointRestore(int traj, Field &U, GridSerialRNG &sRNG,
GridParallelRNG &pRNG) {
std::string config, rng;
this->build_filenames(traj, Params, config, rng);
this->check_filename(rng);
this->check_filename(config);
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
BinaryIO::readRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
Metadata md_content;
ScidacReader _ScidacReader;
_ScidacReader.open(config);
_ScidacReader.readScidacFieldRecord(U,md_content); // format from the header
_ScidacReader.close();
std::cout << GridLogMessage << "Read Scidac Configuration from " << config
<< " checksum " << std::hex
<< nersc_csum<<"/"
<< scidac_csuma<<"/"
<< scidac_csumb
<< std::dec << std::endl;
};
};
}
}
#endif // HAVE_LIME
#endif // ILDG_CHECKPOINTER

View File

@ -50,6 +50,7 @@ GridCartesian *SpaceTimeGrid::makeFourDimDWFGrid(const std::vector<int> & latt,c
GridCartesian *SpaceTimeGrid::makeFiveDimGrid(int Ls,const GridCartesian *FourDimGrid)
{
int N4=FourDimGrid->_ndimension;
assert(N4==4);
std::vector<int> latt5(1,Ls);
std::vector<int> simd5(1,1);

View File

@ -556,7 +556,7 @@ namespace Optimization {
v3 = _mm256_add_epi32(v1, v2);
v1 = _mm256_hadd_epi32(v3, v3);
v2 = _mm256_hadd_epi32(v1, v1);
u1 = _mm256_castsi256_si128(v2); // upper half
u1 = _mm256_castsi256_si128(v2); // upper half ypj[debug] ; was missing
u2 = _mm256_extracti128_si256(v2, 1); // lower half
ret = _mm_add_epi32(u1, u2);
return _mm_cvtsi128_si32(ret);

View File

@ -66,6 +66,8 @@ void Gather_plane_simple_table (std::vector<std::pair<int,int> >& table,const La
parallel_for(int i=0;i<num;i++){
compress.Compress(&buffer[off],table[i].first,rhs._odata[so+table[i].second]);
}
// Further optimisatoin: i) streaming store the result
// ii) software prefetch the first element of the next table entry
}
///////////////////////////////////////////////////////////////////
@ -148,7 +150,9 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
std::vector<int> _distances;
std::vector<int> _comm_buf_size;
std::vector<int> _permute_type;
std::vector<int> same_node;
std::vector<int> surface_list;
Vector<StencilEntry> _entries;
std::vector<Packet> Packets;
std::vector<Merge> Mergers;
@ -199,7 +203,7 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
int dimension = _directions[point];
int displacement = _distances[point];
assert( (displacement==1) || (displacement==-1));
int pd = _grid->_processors[dimension];
int fd = _grid->_fdimensions[dimension];
@ -214,9 +218,12 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
if ( ! comm_dim ) return 1;
int nbr_proc;
if (displacement==1) nbr_proc = 1;
else nbr_proc = pd-1;
if (displacement>0) nbr_proc = 1;
else nbr_proc = pd-1;
// FIXME this logic needs to be sorted for three link term
// assert( (displacement==1) || (displacement==-1));
// Present hack only works for >= 4^4 subvol per node
_grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
void *shm = (void *) _grid->ShmBufferTranslate(recv_from_rank,u_recv_buf_p);
@ -505,25 +512,24 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
template<class decompressor>
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
mergetime-=usecond();
for(int i=0;i<mm.size();i++){
mergetime-=usecond();
parallel_for(int o=0;o<mm[i].buffer_size/2;o++){
decompress.Exchange(mm[i].mpointer,
mm[i].vpointers[0],
mm[i].vpointers[1],
mm[i].type,o);
}
mergetime+=usecond();
}
mergetime+=usecond();
decompresstime-=usecond();
for(int i=0;i<dd.size();i++){
decompresstime-=usecond();
parallel_for(int o=0;o<dd[i].buffer_size;o++){
decompress.Decompress(dd[i].kernel_p,dd[i].mpi_p,o);
}
decompresstime+=usecond();
}
decompresstime+=usecond();
}
////////////////////////////////////////
// Set up routines
@ -538,6 +544,29 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
}
};
// Move interior/exterior split into the generic stencil
// FIXME Explicit Ls in interface is a pain. Should just use a vol
void BuildSurfaceList(int Ls,int vol4){
// find same node for SHM
// Here we know the distance is 1 for WilsonStencil
for(int point=0;point<this->_npoints;point++){
same_node[point] = this->SameNode(point);
}
for(int site = 0 ;site< vol4;site++){
int local = 1;
for(int point=0;point<this->_npoints;point++){
if( (!this->GetNodeLocal(site*Ls,point)) && (!same_node[point]) ){
local = 0;
}
}
if(local == 0) {
surface_list.push_back(site);
}
}
}
CartesianStencil(GridBase *grid,
int npoints,
int checkerboard,
@ -548,7 +577,8 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
comm_bytes_thr(npoints),
comm_enter_thr(npoints),
comm_leave_thr(npoints),
comm_time_thr(npoints)
comm_time_thr(npoints),
same_node(npoints)
{
face_table_computed=0;
_npoints = npoints;
@ -556,6 +586,7 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
_directions = directions;
_distances = distances;
_unified_buffer_size=0;
surface_list.resize(0);
int osites = _grid->oSites();

View File

@ -40,7 +40,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(static)")
#define PARALLEL_FOR_LOOP_INTERN _Pragma("omp for schedule(static)")
#define PARALLEL_NESTED_LOOP2 _Pragma("omp parallel for schedule(static) collapse(2)")
#define PARALLEL_NESTED_LOOP2 _Pragma("omp parallel for collapse(2)")
#define PARALLEL_REGION _Pragma("omp parallel")
#define PARALLEL_CRITICAL _Pragma("omp critical")
#else

View File

@ -158,6 +158,14 @@ void GridCmdOptionInt(std::string &str,int & val)
return;
}
// ypj [add]
void GridCmdOptionFloat(std::string &str,double & val)
{
std::stringstream ss(str);
ss>>val;
return;
}
void GridParseLayout(char **argv,int argc,
std::vector<int> &latt,
@ -368,8 +376,10 @@ void Grid_init(int *argc,char ***argv)
}
if( GridCmdOptionExists(*argv,*argv+*argc,"--comms-overlap") ){
QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsAndCompute;
QCD::StaggeredKernelsStatic::Comms = QCD::StaggeredKernelsStatic::CommsAndCompute;
} else {
QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute;
QCD::StaggeredKernelsStatic::Comms = QCD::StaggeredKernelsStatic::CommsThenCompute;
}
if( GridCmdOptionExists(*argv,*argv+*argc,"--comms-concurrent") ){
CartesianCommunicator::SetCommunicatorPolicy(CartesianCommunicator::CommunicatorPolicyConcurrent);
@ -385,6 +395,7 @@ void Grid_init(int *argc,char ***argv)
if( GridCmdOptionExists(*argv,*argv+*argc,"--comms-threads") ){
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--comms-threads");
GridCmdOptionInt(arg,CartesianCommunicator::nCommThreads);
assert(CartesianCommunicator::nCommThreads > 0);
}
if( GridCmdOptionExists(*argv,*argv+*argc,"--cacheblocking") ){
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--cacheblocking");

View File

@ -54,6 +54,9 @@ namespace Grid {
std::string GridCmdVectorIntToString(const std::vector<int> & vec);
void GridCmdOptionCSL(std::string str,std::vector<std::string> & vec);
void GridCmdOptionIntVector(std::string &str,std::vector<int> & vec);
// ypj [add]
void GridCmdOptionInt(std::string &str,int & val);
void GridCmdOptionFloat(std::string &str,double & val);
void GridParseLayout(char **argv,int argc,

View File

@ -18,6 +18,10 @@ namespace Grid{
static inline void IndexFromCoor (const std::vector<int>& coor,int &index,const std::vector<int> &dims){
int nd=dims.size();
if(nd > coor.size()) {
std::cout<< "coor.size "<<coor.size()<<" >dims.size "<<dims.size()<<std::endl;
assert(0);
}
int stride=1;
index=0;
for(int d=0;d<nd;d++){

View File

@ -167,7 +167,7 @@ int main (int argc, char ** argv) {
RealD mass = Params.mass;
RealD M5 = Params.M5;
std::vector<int> blockSize = Params.blockSize;
std::vector<int> latt({16,16,16,16});
std::vector<int> latt({32,32,32,32});
uint64_t vol = Ls*latt[0]*latt[1]*latt[2]*latt[3];
double mat_flop= 2.0*1320.0*vol;
// Grids

View File

@ -141,6 +141,7 @@ int main (int argc, char ** argv)
t1=usecond();
std::cout<<GridLogMessage << "Called Ds ASM"<<std::endl;
std::cout<<GridLogMessage << "norm src "<< norm2(src)<<std::endl;
std::cout<<GridLogMessage << "norm result "<< norm2(tmp)<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
@ -160,7 +161,8 @@ int main (int argc, char ** argv)
localConvert(sresult,tmp);
std::cout<<GridLogMessage << "Called sDs unroll"<<std::endl;
std::cout<<GridLogMessage << "norm result "<< norm2(sresult)<<std::endl;
std::cout<<GridLogMessage << "norm ssrc "<< norm2(ssrc)<<std::endl;
std::cout<<GridLogMessage << "norm sresult "<< norm2(sresult)<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
@ -181,6 +183,7 @@ int main (int argc, char ** argv)
localConvert(sresult,tmp);
std::cout<<GridLogMessage << "Called sDs asm"<<std::endl;
std::cout<<GridLogMessage << "norm ssrc "<< norm2(ssrc)<<std::endl;
std::cout<<GridLogMessage << "norm result "<< norm2(sresult)<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)*extra<<std::endl;

View File

@ -0,0 +1,196 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./benchmarks/Benchmark_wilson.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
std::vector<int> latt_size = GridDefaultLatt();
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
std::vector<int> mpi_layout = GridDefaultMpi();
const int Ls=16;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
std::cout << GridLogMessage << "Making s innermost grids"<<std::endl;
GridCartesian * sUGrid = SpaceTimeGrid::makeFourDimDWFGrid(GridDefaultLatt(),GridDefaultMpi());
GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid);
GridCartesian * sFGrid = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid);
GridRedBlackCartesian * sFrbGrid = SpaceTimeGrid::makeFiveDimDWFRedBlackGrid(Ls,UGrid);
int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
std::vector<int> seeds({1,2,3,4});
GridParallelRNG pRNG4(UGrid);
GridParallelRNG pRNG5(FGrid);
pRNG4.SeedFixedIntegers(seeds);
pRNG5.SeedFixedIntegers(seeds);
typedef typename ImprovedStaggeredFermion5DF::FermionField FermionField;
typedef typename ImprovedStaggeredFermion5DF::ComplexField ComplexField;
typename ImprovedStaggeredFermion5DF::ImplParams params;
FermionField src (FGrid);
random(pRNG5,src);
/*
std::vector<int> site({0,1,2,0,0});
ColourVector cv = zero;
cv()()(0)=1.0;
src = zero;
pokeSite(cv,src,site);
*/
FermionField result(FGrid); result=zero;
FermionField tmp(FGrid); tmp=zero;
FermionField err(FGrid); tmp=zero;
FermionField phi (FGrid); random(pRNG5,phi);
FermionField chi (FGrid); random(pRNG5,chi);
LatticeGaugeFieldF Umu(UGrid);
SU3::HotConfiguration(pRNG4,Umu);
/*
for(int mu=1;mu<4;mu++){
auto tmp = PeekIndex<LorentzIndex>(Umu,mu);
tmp = zero;
PokeIndex<LorentzIndex>(Umu,tmp,mu);
}
*/
double volume=Ls;
for(int mu=0;mu<Nd;mu++){
volume=volume*latt_size[mu];
}
RealD mass=0.1;
RealD c1=9.0/8.0;
RealD c2=-1.0/24.0;
RealD u0=1.0;
ImprovedStaggeredFermion5DF Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,c1,c2,u0,params);
ImprovedStaggeredFermionVec5dF sDs(Umu,Umu,*sFGrid,*sFrbGrid,*sUGrid,*sUrbGrid,mass,c1,c2,u0,params);
std::cout<<GridLogMessage<<"=========================================================="<<std::endl;
std::cout<<GridLogMessage<<"= Testing Dhop against cshift implementation "<<std::endl;
std::cout<<GridLogMessage<<"=========================================================="<<std::endl;
int ncall=1000;
int ncall1=1000;
double t0(0),t1(0);
double flops=(16*(3*(6+8+8)) + 15*3*2)*volume*ncall; // == 66*16 + == 1146
std::cout<<GridLogMessage << "Calling staggered operator"<<std::endl;
t0=usecond();
for(int i=0;i<ncall1;i++){
Ds.Dhop(src,result,0);
}
t1=usecond();
std::cout<<GridLogMessage << "Called Ds"<<std::endl;
std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
std::cout<<GridLogMessage << "Calling vectorised staggered operator"<<std::endl;
#ifdef AVX512
QCD::StaggeredKernelsStatic::Opt=QCD::StaggeredKernelsStatic::OptInlineAsm;
#else
QCD::StaggeredKernelsStatic::Opt=QCD::StaggeredKernelsStatic::OptGeneric;
#endif
t0=usecond();
for(int i=0;i<ncall1;i++){
Ds.Dhop(src,tmp,0);
}
t1=usecond();
std::cout<<GridLogMessage << "Called Ds ASM"<<std::endl;
std::cout<<GridLogMessage << "norm src "<< norm2(src)<<std::endl;
std::cout<<GridLogMessage << "norm result "<< norm2(tmp)<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
err = tmp-result;
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
FermionField ssrc (sFGrid); localConvert(src,ssrc);
FermionField sresult(sFGrid); sresult=zero;
QCD::StaggeredKernelsStatic::Opt=QCD::StaggeredKernelsStatic::OptHandUnroll;
t0=usecond();
for(int i=0;i<ncall1;i++){
sDs.Dhop(ssrc,sresult,0);
}
t1=usecond();
localConvert(sresult,tmp);
std::cout<<GridLogMessage << "Called sDs unroll"<<std::endl;
std::cout<<GridLogMessage << "norm ssrc "<< norm2(ssrc)<<std::endl;
std::cout<<GridLogMessage << "norm sresult "<< norm2(sresult)<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
#ifdef AVX512
QCD::StaggeredKernelsStatic::Opt=QCD::StaggeredKernelsStatic::OptInlineAsm;
#else
QCD::StaggeredKernelsStatic::Opt=QCD::StaggeredKernelsStatic::OptGeneric;
#endif
err = tmp-result;
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
int extra=1;
t0=usecond();
for(int i=0;i<ncall1*extra;i++){
sDs.Dhop(ssrc,sresult,0);
}
t1=usecond();
localConvert(sresult,tmp);
std::cout<<GridLogMessage << "Called sDs asm"<<std::endl;
std::cout<<GridLogMessage << "norm ssrc "<< norm2(ssrc)<<std::endl;
std::cout<<GridLogMessage << "norm result "<< norm2(sresult)<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)*extra<<std::endl;
err = tmp-result;
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
Grid_finalize();
}

View File

@ -34,6 +34,8 @@ class ScalarActionParameters : Serializable {
double, lambda,
double, g);
ScalarActionParameters() = default;
template <class ReaderClass >
ScalarActionParameters(Reader<ReaderClass>& Reader){
read(Reader, "ScalarAction", *this);
@ -124,10 +126,13 @@ int main(int argc, char **argv) {
ScalarGrid.set_rb(new GridRedBlackCartesian(ScalarGrid.get_full()));
TheHMC.Resources.AddGrid("scalar", ScalarGrid);
std::cout << "Lattice size : " << GridDefaultLatt() << std::endl;
ScalarActionParameters SPar(Reader);
// Checkpointer definition
CheckpointerParameters CPparams(Reader);
TheHMC.Resources.LoadBinaryCheckpointer(CPparams);
//TheHMC.Resources.LoadBinaryCheckpointer(CPparams);
TheHMC.Resources.LoadScidacCheckpointer(CPparams, SPar);
RNGModuleParameters RNGpar(Reader);
TheHMC.Resources.SetRNGSeeds(RNGpar);
@ -140,7 +145,6 @@ int main(int argc, char **argv) {
// Collect actions, here use more encapsulation
// Scalar action in adjoint representation
ScalarActionParameters SPar(Reader);
ScalarAction Saction(SPar.mass_squared, SPar.lambda, SPar.g);
// Collect actions

View File

@ -33,6 +33,7 @@ namespace Grid{
GRID_SERIALIZABLE_CLASS_MEMBERS(ActionParameters,
double, beta)
ActionParameters() = default;
template <class ReaderClass >
ActionParameters(Reader<ReaderClass>& Reader){
@ -68,11 +69,15 @@ int main(int argc, char **argv) {
}
Serialiser Reader(TheHMC.ParameterFile);
// Read parameters from input file
ActionParameters WilsonPar(Reader);
// Checkpointer definition
CheckpointerParameters CPparams(Reader);
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
//TheHMC.Resources.LoadNerscCheckpointer(CPparams);
// Store metadata in the Scidac checkpointer
TheHMC.Resources.LoadScidacCheckpointer(CPparams, WilsonPar);
RNGModuleParameters RNGpar(Reader);
TheHMC.Resources.SetRNGSeeds(RNGpar);
@ -91,8 +96,6 @@ int main(int argc, char **argv) {
// need wrappers of the fermionic classes
// that have a complex construction
// standard
ActionParameters WilsonPar(Reader);
//RealD beta = 6.4 ;
WilsonGaugeActionR Waction(WilsonPar.beta);
ActionLevel<HMCWrapper::Field> Level1(1);

View File

@ -0,0 +1,317 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_block_lanczos.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
//typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename ZMobiusFermionR::FermionField FermionField;
RealD AllZero(RealD x){ return 0.;}
class CmdJobParams
{
public:
std::string gaugefile;
int Ls;
double mass;
double M5;
double mob_b;
std::vector<ComplexD> omega;
std::vector<Complex> boundary_phase;
LanczosType Impl;
int Nu;
int Nk;
int Np;
int Nm;
int Nstop;
int Ntest;
int MaxIter;
double resid;
double low;
double high;
int order;
CmdJobParams()
: gaugefile("Hot"),
Ls(8), mass(0.01), M5(1.8), mob_b(1.5),
Impl(LanczosType::irbl),
Nu(4), Nk(200), Np(200), Nstop(100), Ntest(1), MaxIter(10), resid(1.0e-8),
low(0.2), high(5.5), order(11)
{Nm=Nk+Np;};
void Parse(char **argv, int argc);
};
void CmdJobParams::Parse(char **argv,int argc)
{
std::string arg;
std::vector<int> vi;
double re,im;
int expect, idx;
std::string vstr;
std::ifstream pfile;
if( GridCmdOptionExists(argv,argv+argc,"--gconf") ){
gaugefile = GridCmdOptionPayload(argv,argv+argc,"--gconf");
}
if( GridCmdOptionExists(argv,argv+argc,"--phase") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--phase");
pfile.open(arg);
assert(pfile);
expect = 0;
while( pfile >> vstr ) {
if ( vstr.compare("boundary_phase") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(expect==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
GridCmdOptionFloat(vstr,im);
boundary_phase.push_back({re,im});
expect++;
}
}
pfile.close();
} else {
for (int i=0; i<4; ++i) boundary_phase.push_back({1.,0.});
}
if( GridCmdOptionExists(argv,argv+argc,"--omega") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--omega");
pfile.open(arg);
assert(pfile);
Ls = 0;
while( pfile >> vstr ) {
if ( vstr.compare("omega") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(Ls==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
GridCmdOptionFloat(vstr,im);
omega.push_back({re,im});
Ls++;
}
}
pfile.close();
} else {
if( GridCmdOptionExists(argv,argv+argc,"--Ls") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--Ls");
GridCmdOptionInt(arg,Ls);
}
}
if( GridCmdOptionExists(argv,argv+argc,"--mass") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--mass");
GridCmdOptionFloat(arg,mass);
}
if( GridCmdOptionExists(argv,argv+argc,"--M5") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--M5");
GridCmdOptionFloat(arg,M5);
}
if( GridCmdOptionExists(argv,argv+argc,"--mob_b") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--mob_b");
GridCmdOptionFloat(arg,mob_b);
}
if( GridCmdOptionExists(argv,argv+argc,"--irbl") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--irbl");
GridCmdOptionIntVector(arg,vi);
Nu = vi[0];
Nk = vi[1];
Np = vi[2];
Nstop = vi[3];
MaxIter = vi[4];
// ypj[fixme] mode overriding message is needed.
Impl = LanczosType::irbl;
Nm = Nk+Np;
}
// block Lanczos with explicit extension of its dimensions
if( GridCmdOptionExists(argv,argv+argc,"--rbl") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--rbl");
GridCmdOptionIntVector(arg,vi);
Nu = vi[0];
Nk = vi[1];
Np = vi[2]; // vector space is enlarged by adding Np vectors
Nstop = vi[3];
MaxIter = vi[4];
// ypj[fixme] mode overriding message is needed.
Impl = LanczosType::rbl;
Nm = Nk+Np*MaxIter;
}
if( GridCmdOptionExists(argv,argv+argc,"--check_int") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--check_int");
GridCmdOptionInt(arg,Ntest);
}
if( GridCmdOptionExists(argv,argv+argc,"--resid") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--resid");
GridCmdOptionFloat(arg,resid);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_l") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_l");
GridCmdOptionFloat(arg,low);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_u") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_u");
GridCmdOptionFloat(arg,high);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_n") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_n");
GridCmdOptionInt(arg,order);
}
if ( CartesianCommunicator::RankWorld() == 0 ) {
std::streamsize ss = std::cout.precision();
std::cout << GridLogMessage <<" Gauge Configuration "<< gaugefile << '\n';
std::cout.precision(15);
for ( int i=0; i<4; ++i ) std::cout << GridLogMessage <<" boundary_phase["<< i << "] = " << boundary_phase[i] << '\n';
std::cout.precision(ss);
std::cout << GridLogMessage <<" Ls "<< Ls << '\n';
std::cout << GridLogMessage <<" mass "<< mass << '\n';
std::cout << GridLogMessage <<" M5 "<< M5 << '\n';
std::cout << GridLogMessage <<" mob_b "<< mob_b << '\n';
std::cout.precision(15);
for ( int i=0; i<Ls; ++i ) std::cout << GridLogMessage <<" omega["<< i << "] = " << omega[i] << '\n';
std::cout.precision(ss);
std::cout << GridLogMessage <<" Nu "<< Nu << '\n';
std::cout << GridLogMessage <<" Nk "<< Nk << '\n';
std::cout << GridLogMessage <<" Np "<< Np << '\n';
std::cout << GridLogMessage <<" Nm "<< Nm << '\n';
std::cout << GridLogMessage <<" Nstop "<< Nstop << '\n';
std::cout << GridLogMessage <<" Ntest "<< Ntest << '\n';
std::cout << GridLogMessage <<" MaxIter "<< MaxIter << '\n';
std::cout << GridLogMessage <<" resid "<< resid << '\n';
std::cout << GridLogMessage <<" Cheby Poly "<< low << "," << high << "," << order << std::endl;
}
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
CmdJobParams JP;
JP.Parse(argv,argc);
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(JP.Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(JP.Ls,UGrid);
printf("UGrid=%p UrbGrid=%p FGrid=%p FrbGrid=%p\n",UGrid,UrbGrid,FGrid,FrbGrid);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG RNG5rb(FrbGrid); RNG5.SeedFixedIntegers(seeds5);
// ypj [note] why seed RNG5 again? bug? In this case, run with a default seed().
//GridParallelRNG RNG5rb(FrbGrid); //RNG5rb.SeedFixedIntegers(seeds5);
LatticeGaugeField Umu(UGrid);
std::vector<LatticeColourMatrix> U(4,UGrid);
if ( JP.gaugefile.compare("Hot") == 0 ) {
SU3::HotConfiguration(RNG4, Umu);
} else {
FieldMetaData header;
NerscIO::readConfiguration(Umu,header,JP.gaugefile);
// ypj [fixme] additional checks for the loaded configuration?
}
for(int mu=0;mu<Nd;mu++){
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
}
RealD mass = JP.mass;
RealD M5 = JP.M5;
// ypj [fixme] flexible support for a various Fermions
// RealD mob_b = JP.mob_b; // Gparity
// std::vector<ComplexD> omega; // ZMobius
// GparityMobiusFermionD ::ImplParams params;
// std::vector<int> twists({1,1,1,0});
// params.twists = twists;
// GparityMobiusFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,mob_b,mob_b-1.,params);
// SchurDiagTwoOperator<GparityMobiusFermionR,FermionField> HermOp(Ddwf);
//WilsonFermionR::ImplParams params;
ZMobiusFermionR::ImplParams params;
params.overlapCommsCompute = true;
params.boundary_phases = JP.boundary_phase;
ZMobiusFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,JP.omega,1.,0.,params);
SchurDiagTwoOperator<ZMobiusFermionR,FermionField> HermOp(Ddwf);
//std::vector<double> Coeffs { 0.,-1.};
// ypj [note] this may not be supported by some compilers
std::vector<double> Coeffs({ 0.,-1.});
Polynomial<FermionField> PolyX(Coeffs);
//Chebyshev<FermionField> Cheb(0.2,5.5,11);
Chebyshev<FermionField> Cheb(JP.low,JP.high,JP.order);
// Cheb.csv(std::cout);
ImplicitlyRestartedBlockLanczos<FermionField> IRBL(HermOp,
Cheb,
JP.Nstop, JP.Ntest,
JP.Nu, JP.Nk, JP.Nm,
JP.resid,
JP.MaxIter);
std::vector<RealD> eval(JP.Nm);
std::vector<FermionField> src(JP.Nu,FrbGrid);
for ( int i=0; i<JP.Nu; ++i ) gaussian(RNG5rb,src[i]);
std::vector<FermionField> evec(JP.Nm,FrbGrid);
for(int i=0;i<1;++i){
std::cout << GridLogMessage << i <<" / "<< JP.Nm <<" grid pointer "<< evec[i]._grid << std::endl;
};
int Nconv;
IRBL.calc(eval,evec,src,Nconv,JP.Impl);
Grid_finalize();
}

View File

@ -22,6 +22,7 @@
*/
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
#include <Grid/parallelIO/IldgIO.h>
/////////////////////////////////////////////////////////////////////////////
// The following are now decoupled from the Lanczos and deal with grids.
// Safe to replace functionality

View File

@ -33,6 +33,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
#include <Grid/algorithms/iterative/LocalCoherenceLanczos.h>
#include <Grid/parallelIO/IldgIOtypes.h>
#include <Grid/parallelIO/IldgIO.h>
using namespace std;
using namespace Grid;
@ -57,7 +59,7 @@ public:
void checkpointFine(std::string evecs_file,std::string evals_file)
{
assert(this->subspace.size()==nbasis);
emptyUserRecord record;
Grid::emptyUserRecord record;
Grid::QCD::ScidacWriter WR(this->_FineGrid->IsBoss());
WR.open(evecs_file);
for(int k=0;k<nbasis;k++) {

View File

@ -75,16 +75,16 @@ int main (int argc, char ** argv)
SchurDiagTwoOperator<GparityMobiusFermionR,FermionField> HermOp(Ddwf);
// SchurDiagMooeeOperator<DomainWallFermionR,LatticeFermion> HermOp(Ddwf);
const int Nstop = 30;
const int Nk = 40;
const int Np = 40;
const int Nstop = 120;
const int Nk = 240;
const int Np = 240;
const int Nm = Nk+Np;
const int MaxIt= 10000;
const int MaxIt= 10;
RealD resid = 1.0e-8;
std::vector<double> Coeffs { 0.,-1.};
Polynomial<FermionField> PolyX(Coeffs);
Chebyshev<FermionField> Cheby(0.2,5.,11);
Chebyshev<FermionField> Cheby(0.2,5.5,11);
FunctionHermOp<FermionField> OpCheby(Cheby,HermOp);
PlainHermOp<FermionField> Op (HermOp);

View File

@ -58,7 +58,7 @@ int main(int argc, char** argv) {
GridParallelRNG RNG4(UGrid);
RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG RNG5rb(FrbGrid);
RNG5.SeedFixedIntegers(seeds5);
RNG5.SeedFixedIntegers(seeds5); // ypj [note] Does it mean RNG5rb? RNG5rb is never used.
LatticeGaugeField Umu(UGrid);
SU3::HotConfiguration(RNG4, Umu);

View File

@ -26,17 +26,18 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/parallelIO/IldgIOtypes.h>
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
using namespace std;
//using namespace std;
using namespace Grid;
using namespace Grid::QCD;
int main (int argc, char ** argv)
{
typedef typename DomainWallFermionR::FermionField FermionField;
typedef typename DomainWallFermionR::ComplexField ComplexField;
typename DomainWallFermionR::ImplParams params;
typedef typename Grid::QCD::DomainWallFermionR::FermionField FermionField;
typedef typename Grid::QCD::DomainWallFermionR::ComplexField ComplexField;
typename Grid::QCD::DomainWallFermionR::ImplParams params;
const int Ls=4;
@ -102,7 +103,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Writing out in parallel view "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
emptyUserRecord record;
// Grid::emptyUserRecord record;
std::string file("./scratch.scidac");
std::string filef("./scratch.scidac.ferm");
@ -115,20 +116,20 @@ int main (int argc, char ** argv)
{
FGrid->Barrier();
ScidacWriter _ScidacWriter(FGrid->IsBoss());
_ScidacWriter.open(file);
// _ScidacWriter.open(file);
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Writing out gauge field "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
_ScidacWriter.writeScidacFieldRecord(Umu,record);
_ScidacWriter.close();
// _ScidacWriter.writeScidacFieldRecord(Umu,record);
// _ScidacWriter.close();
FGrid->Barrier();
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Reading in gauge field "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
ScidacReader _ScidacReader;
_ScidacReader.open(file);
_ScidacReader.readScidacFieldRecord(s_Umu,record);
_ScidacReader.close();
// ScidacReader _ScidacReader;
// _ScidacReader.open(file);
// _ScidacReader.readScidacFieldRecord(s_Umu,record);
// _ScidacReader.close();
FGrid->Barrier();
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Read in gauge field "<<std::endl;
@ -145,9 +146,9 @@ int main (int argc, char ** argv)
std::stringstream filefn; filefn << filef << "."<< n;
ScidacWriter _ScidacWriter(FGrid->IsBoss());
_ScidacWriter.open(filefn.str());
_ScidacWriter.writeScidacFieldRecord(src[n],record);
_ScidacWriter.close();
// _ScidacWriter.open(filefn.str());
// _ScidacWriter.writeScidacFieldRecord(src[n],record);
// _ScidacWriter.close();
}
FGrid->Barrier();
@ -159,10 +160,10 @@ int main (int argc, char ** argv)
for(int n=0;n<nrhs;n++){
if ( n==me ) {
std::stringstream filefn; filefn << filef << "."<< n;
ScidacReader _ScidacReader;
_ScidacReader.open(filefn.str());
_ScidacReader.readScidacFieldRecord(s_src,record);
_ScidacReader.close();
// ScidacReader _ScidacReader;
// _ScidacReader.open(filefn.str());
// _ScidacReader.readScidacFieldRecord(s_src,record);
// _ScidacReader.close();
}
}
FGrid->Barrier();

View File

@ -74,8 +74,16 @@ int main (int argc, char ** argv)
LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
double volume=1;
for(int mu=0;mu<Nd;mu++){
volume=volume*latt_size[mu];
}
RealD mass=0.003;
ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass);
RealD c1=9.0/8.0;
RealD c2=-1.0/24.0;
RealD u0=1.0;
ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,c1,c2,u0);
SchurStaggeredOperator<ImprovedStaggeredFermion5DR,FermionField> HermOp(Ds);
ConjugateGradient<FermionField> CG(1.0e-8,10000);
@ -87,14 +95,26 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Calling 4d CG "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
ImprovedStaggeredFermionR Ds4d(Umu,Umu,*UGrid,*UrbGrid,mass);
ImprovedStaggeredFermionR Ds4d(Umu,Umu,*UGrid,*UrbGrid,mass,c1,c2,u0);
SchurStaggeredOperator<ImprovedStaggeredFermionR,FermionField> HermOp4d(Ds4d);
FermionField src4d(UGrid); random(pRNG,src4d);
FermionField src4d_o(UrbGrid); pickCheckerboard(Odd,src4d_o,src4d);
FermionField result4d_o(UrbGrid);
double deodoe_flops=(16*(3*(6+8+8)) + 15*3*2)*volume; // == 66*16 + == 1146
result4d_o=zero;
CG(HermOp4d,src4d_o,result4d_o);
{
double t1=usecond();
CG(HermOp4d,src4d_o,result4d_o);
double t2=usecond();
double ncall=CG.IterationsToComplete;
double flops = deodoe_flops * ncall;
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
HermOp4d.Report();
}
Ds4d.Report();
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
@ -103,7 +123,17 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
Ds.ZeroCounters();
result_o=zero;
CG(HermOp,src_o,result_o);
{
double t1=usecond();
CG(HermOp,src_o,result_o);
double t2=usecond();
double ncall=CG.IterationsToComplete*Ls;
double flops = deodoe_flops * ncall;
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
HermOp.Report();
}
Ds.Report();
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
@ -112,7 +142,18 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
Ds.ZeroCounters();
result_o=zero;
mCG(HermOp,src_o,result_o);
{
double t1=usecond();
mCG(HermOp,src_o,result_o);
double t2=usecond();
double ncall=mCG.IterationsToComplete*Ls;
double flops = deodoe_flops * ncall;
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
HermOp.Report();
}
Ds.Report();
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
@ -121,7 +162,17 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
Ds.ZeroCounters();
result_o=zero;
BCGrQ(HermOp,src_o,result_o);
{
double t1=usecond();
BCGrQ(HermOp,src_o,result_o);
double t2=usecond();
double ncall=BCGrQ.IterationsToComplete*Ls;
double flops = deodoe_flops * ncall;
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
HermOp.Report();
}
Ds.Report();
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;

View File

@ -74,7 +74,16 @@ int main (int argc, char ** argv)
LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
RealD mass=0.003;
ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass);
RealD c1=9.0/8.0;
RealD c2=-1.0/24.0;
RealD u0=1.0;
double volume=1;
for(int mu=0;mu<Nd;mu++){
volume=volume*latt_size[mu];
}
ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,c1,c2,u0);
MdagMLinearOperator<ImprovedStaggeredFermion5DR,FermionField> HermOp(Ds);
ConjugateGradient<FermionField> CG(1.0e-8,10000);
@ -86,11 +95,23 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Calling 4d CG "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
ImprovedStaggeredFermionR Ds4d(Umu,Umu,*UGrid,*UrbGrid,mass);
ImprovedStaggeredFermionR Ds4d(Umu,Umu,*UGrid,*UrbGrid,mass,c1,c2,u0);
MdagMLinearOperator<ImprovedStaggeredFermionR,FermionField> HermOp4d(Ds4d);
FermionField src4d(UGrid); random(pRNG,src4d);
FermionField result4d(UGrid); result4d=zero;
CG(HermOp4d,src4d,result4d);
double deodoe_flops=(16*(3*(6+8+8)) + 15*3*2)*volume; // == 66*16 + == 1146
{
double t1=usecond();
CG(HermOp4d,src4d,result4d);
double t2=usecond();
double ncall=CG.IterationsToComplete;
double flops = deodoe_flops * ncall;
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
}
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
@ -98,9 +119,18 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << " Calling 5d CG for "<<Ls <<" right hand sides" <<std::endl;
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
result=zero;
{
Ds.ZeroCounters();
double t1=usecond();
CG(HermOp,src,result);
double t2=usecond();
double ncall=CG.IterationsToComplete;
double flops = deodoe_flops * ncall;
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
Ds.Report();
}
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
@ -108,7 +138,16 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
result=zero;
Ds.ZeroCounters();
{
double t1=usecond();
mCG(HermOp,src,result);
double t2=usecond();
double ncall=CG.IterationsToComplete;
double flops = deodoe_flops * ncall;
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
}
Ds.Report();
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
@ -117,7 +156,16 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
result=zero;
Ds.ZeroCounters();
{
double t1=usecond();
BCGrQ(HermOp,src,result);
double t2=usecond();
double ncall=CG.IterationsToComplete;
double flops = deodoe_flops * ncall;
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
}
Ds.Report();
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;

View File

@ -71,7 +71,10 @@ int main (int argc, char ** argv)
}
RealD mass=0.003;
ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass);
RealD c1=9.0/8.0;
RealD c2=-1.0/24.0;
RealD u0=1.0;
ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0);
FermionField res_o(&RBGrid);
FermionField src_o(&RBGrid);
@ -80,7 +83,19 @@ int main (int argc, char ** argv)
SchurStaggeredOperator<ImprovedStaggeredFermionR,FermionField> HermOpEO(Ds);
ConjugateGradient<FermionField> CG(1.0e-8,10000);
double t1=usecond();
CG(HermOpEO,src_o,res_o);
double t2=usecond();
// Schur solver: uses DeoDoe => volume * 1146
double ncall=CG.IterationsToComplete;
double flops=(16*(3*(6+8+8)) + 15*3*2)*volume*ncall; // == 66*16 + == 1146
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
FermionField tmp(&RBGrid);

View File

@ -65,7 +65,10 @@ int main (int argc, char ** argv)
FermionField resid(&Grid);
RealD mass=0.1;
ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass);
RealD c1=9.0/8.0;
RealD c2=-1.0/24.0;
RealD u0=1.0;
ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0);
ConjugateGradient<FermionField> CG(1.0e-8,10000);
SchurRedBlackStaggeredSolve<FermionField> SchurSolver(CG);

View File

@ -73,7 +73,10 @@ int main (int argc, char ** argv)
}
RealD mass=0.1;
ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass);
RealD c1=9.0/8.0;
RealD c2=-1.0/24.0;
RealD u0=1.0;
ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0);
MdagMLinearOperator<ImprovedStaggeredFermionR,FermionField> HermOp(Ds);
ConjugateGradient<FermionField> CG(1.0e-6,10000);

View File

@ -0,0 +1,121 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_wilson_cg_unprec.cc
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
template<class d>
struct scal {
d internal;
};
Gamma::Algebra Gmu [] = {
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT
};
int main (int argc, char ** argv)
{
typedef typename ImprovedStaggeredFermionR::FermionField FermionField;
typename ImprovedStaggeredFermionR::ImplParams params;
Grid_init(&argc,&argv);
std::vector<int> latt_size = GridDefaultLatt();
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
std::vector<int> mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
GridRedBlackCartesian RBGrid(&Grid);
std::vector<int> seeds({1,2,3,4});
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
LatticeGaugeField Umu(&Grid); SU3::HotConfiguration(pRNG,Umu);
double volume=1;
for(int mu=0;mu<Nd;mu++){
volume=volume*latt_size[mu];
}
////////////////////////////////////////
// sqrt
////////////////////////////////////////
double lo=0.001;
double hi=1.0;
int precision=64;
int degree=10;
AlgRemez remez(lo,hi,precision);
remez.generateApprox(degree,1,2);
MultiShiftFunction Sqrt(remez,1.0e-6,false);
std::cout<<GridLogMessage << "Generating degree "<<degree<<" for x^(1/2)"<<std::endl;
////////////////////////////////////////////
// Setup staggered
////////////////////////////////////////////
RealD mass=0.003;
RealD c1=9.0/8.0;
RealD c2=-1.0/24.0;
RealD u0=1.0;
ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0);
SchurStaggeredOperator<ImprovedStaggeredFermionR,FermionField> HermOpEO(Ds);
FermionField src(&Grid); random(pRNG,src);
FermionField src_o(&RBGrid);
pickCheckerboard(Odd,src_o,src);
/////////////////////////////////
//Multishift CG
/////////////////////////////////
std::vector<FermionField> result(degree,&RBGrid);
ConjugateGradientMultiShift<FermionField> MSCG(10000,Sqrt);
double deodoe_flops=(1205+15*degree)*volume; // == 66*16 + == 1146
double t1=usecond();
MSCG(HermOpEO,src_o,result);
double t2=usecond();
double ncall=MSCG.IterationsToComplete;
double flops = deodoe_flops * ncall;
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
// HermOpEO.Report();
Grid_finalize();
}