diff --git a/TODO b/TODO index c37cbf8b..83bfda5e 100644 --- a/TODO +++ b/TODO @@ -3,19 +3,19 @@ TODO: Large item work list: -1)- BG/Q port and check +1)- BG/Q port and check ; Andrew says ok. 2)- Christoph's local basis expansion Lanczos -3)- Precision conversion and sort out localConvert <-- partial - - - Consistent linear solver flop count/rate -- PARTIAL, time but no flop/s yet +-- +3a)- RNG I/O in ILDG/SciDAC (minor) +3b)- Precision conversion and sort out localConvert <-- partial/easy +3c)- Consistent linear solver flop count/rate -- PARTIAL, time but no flop/s yet 4)- Physical propagator interface 5)- Conserved currents 6)- Multigrid Wilson and DWF, compare to other Multigrid implementations 7)- HDCR resume Recent DONE - --- MultiRHS with spread out extra dim -- Go through filesystem with SciDAC I/O. <--- DONE +-- MultiRHS with spread out extra dim -- Go through filesystem with SciDAC I/O ; <-- DONE ; bmark cori -- Lanczos Remove DenseVector, DenseMatrix; Use Eigen instead. <-- DONE -- GaugeFix into central location <-- DONE -- Scidac and Ildg metadata handling <-- DONE diff --git a/benchmarks/Benchmark_staggered.cc b/benchmarks/Benchmark_staggered.cc index dc2dcf91..f5325b28 100644 --- a/benchmarks/Benchmark_staggered.cc +++ b/benchmarks/Benchmark_staggered.cc @@ -40,7 +40,7 @@ int main (int argc, char ** argv) std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); int threads = GridThread::GetThreads(); std::cout< simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); int threads = GridThread::GetThreads(); std::cout< #include #include #include +#include #include #include @@ -44,31 +45,16 @@ Author: Peter Boyle #include #include #include +#include #include - -// Lanczos support -//#include #include #include #include -// Eigen/lanczos // EigCg -// MCR // Pcg -// Multishift CG // Hdcg // GCR // etc.. -// integrator/Leapfrog -// integrator/Omelyan -// integrator/ForceGradient - -// montecarlo/hmc -// montecarlo/rhmc -// montecarlo/metropolis -// etc... - - #endif diff --git a/lib/algorithms/FFT.h b/lib/algorithms/FFT.h index 240f338b..ec558ad9 100644 --- a/lib/algorithms/FFT.h +++ b/lib/algorithms/FFT.h @@ -230,6 +230,7 @@ namespace Grid { // Barrel shift and collect global pencil std::vector lcoor(Nd), gcoor(Nd); result = source; + int pc = processor_coor[dim]; for(int p=0;plSites();idx++) { sgrid->LocalIndexToLocalCoor(idx,cbuf); peekLocalSite(s,result,cbuf); - cbuf[dim]+=p*L; + cbuf[dim]+=((pc+p) % processors[dim])*L; + // cbuf[dim]+=p*L; pokeLocalSite(s,pgbuf,cbuf); } } @@ -278,7 +280,6 @@ namespace Grid { flops+= flops_call*NN; // writing out result - int pc = processor_coor[dim]; PARALLEL_REGION { std::vector clbuf(Nd), cgbuf(Nd); diff --git a/lib/algorithms/LinearOperator.h b/lib/algorithms/LinearOperator.h index 6cb77296..f1b8820e 100644 --- a/lib/algorithms/LinearOperator.h +++ b/lib/algorithms/LinearOperator.h @@ -162,15 +162,10 @@ namespace Grid { _Mat.M(in,out); } void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ - ComplexD dot; - _Mat.M(in,out); - dot= innerProduct(in,out); - n1=real(dot); - - dot = innerProduct(out,out); - n2=real(dot); + ComplexD dot= innerProduct(in,out); n1=real(dot); + n2=norm2(out); } void HermOp(const Field &in, Field &out){ _Mat.M(in,out); @@ -192,10 +187,10 @@ namespace Grid { ni=Mpc(in,tmp); no=MpcDag(tmp,out); } - void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ + virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ MpcDagMpc(in,out,n1,n2); } - void HermOp(const Field &in, Field &out){ + virtual void HermOp(const Field &in, Field &out){ RealD n1,n2; HermOpAndNorm(in,out,n1,n2); } @@ -212,7 +207,6 @@ namespace Grid { void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); } - }; template class SchurDiagMooeeOperator : public SchurOperatorBase { @@ -270,7 +264,6 @@ namespace Grid { return axpy_norm(out,-1.0,tmp,in); } }; - template class SchurDiagTwoOperator : public SchurOperatorBase { protected: @@ -299,6 +292,45 @@ namespace Grid { return axpy_norm(out,-1.0,tmp,in); } }; + /////////////////////////////////////////////////////////////////////////////////////////////////// + // Left handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) psi = eta --> ( 1 - Moo^-1 Moe Mee^-1 Meo ) psi = Moo^-1 eta + // Right handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) Moo^-1 Moo psi = eta --> ( 1 - Moe Mee^-1 Meo ) Moo^-1 phi=eta ; psi = Moo^-1 phi + /////////////////////////////////////////////////////////////////////////////////////////////////// + template using SchurDiagOneRH = SchurDiagTwoOperator ; + template using SchurDiagOneLH = SchurDiagOneOperator ; + /////////////////////////////////////////////////////////////////////////////////////////////////// + // Staggered use + /////////////////////////////////////////////////////////////////////////////////////////////////// + template + class SchurStaggeredOperator : public SchurOperatorBase { + protected: + Matrix &_Mat; + public: + SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){}; + virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ + n2 = Mpc(in,out); + ComplexD dot= innerProduct(in,out); + n1 = real(dot); + } + virtual void HermOp(const Field &in, Field &out){ + Mpc(in,out); + } + virtual RealD Mpc (const Field &in, Field &out) { + Field tmp(in._grid); + _Mat.Meooe(in,tmp); + _Mat.MooeeInv(tmp,out); + _Mat.MeooeDag(out,tmp); + _Mat.Mooee(in,out); + return axpy_norm(out,-1.0,tmp,out); + } + virtual RealD MpcDag (const Field &in, Field &out){ + return Mpc(in,out); + } + virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no) { + assert(0);// Never need with staggered + } + }; + template using SchurStagOperator = SchurStaggeredOperator; ///////////////////////////////////////////////////////////// diff --git a/lib/algorithms/approx/Chebyshev.h b/lib/algorithms/approx/Chebyshev.h index 2793f138..f8c21a05 100644 --- a/lib/algorithms/approx/Chebyshev.h +++ b/lib/algorithms/approx/Chebyshev.h @@ -8,6 +8,7 @@ Author: Peter Boyle Author: paboyle +Author: Christoph Lehner This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -193,6 +194,47 @@ namespace Grid { return sum; }; + RealD approxD(RealD x) + { + RealD Un; + RealD Unm; + RealD Unp; + + RealD y=( x-0.5*(hi+lo))/(0.5*(hi-lo)); + + RealD U0=1; + RealD U1=2*y; + + RealD sum; + sum = Coeffs[1]*U0; + sum+= Coeffs[2]*U1*2.0; + + Un =U1; + Unm=U0; + for(int i=2;i::quiet_NaN(); + } + // Implement the required interface void operator() (LinearOperatorBase &Linop, const Field &in, Field &out) { diff --git a/lib/algorithms/approx/Forecast.h b/lib/algorithms/approx/Forecast.h new file mode 100644 index 00000000..87eb84a6 --- /dev/null +++ b/lib/algorithms/approx/Forecast.h @@ -0,0 +1,152 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/algorithms/approx/Forecast.h + +Copyright (C) 2015 + +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#ifndef INCLUDED_FORECAST_H +#define INCLUDED_FORECAST_H + +namespace Grid { + + // Abstract base class. + // Takes a matrix (Mat), a source (phi), and a vector of Fields (chi) + // and returns a forecasted solution to the system D*psi = phi (psi). + template + class Forecast + { + public: + virtual Field operator()(Matrix &Mat, const Field& phi, const std::vector& chi) = 0; + }; + + // Implementation of Brower et al.'s chronological inverter (arXiv:hep-lat/9509012), + // used to forecast solutions across poles of the EOFA heatbath. + // + // Modified from CPS (cps_pp/src/util/dirac_op/d_op_base/comsrc/minresext.C) + template + class ChronoForecast : public Forecast + { + public: + Field operator()(Matrix &Mat, const Field& phi, const std::vector& prev_solns) + { + int degree = prev_solns.size(); + Field chi(phi); // forecasted solution + + // Trivial cases + if(degree == 0){ chi = zero; return chi; } + else if(degree == 1){ return prev_solns[0]; } + + RealD dot; + ComplexD xp; + Field r(phi); // residual + Field Mv(phi); + std::vector v(prev_solns); // orthonormalized previous solutions + std::vector MdagMv(degree,phi); + + // Array to hold the matrix elements + std::vector> G(degree, std::vector(degree)); + + // Solution and source vectors + std::vector a(degree); + std::vector b(degree); + + // Orthonormalize the vector basis + for(int i=0; i std::abs(G[k][k])){ k = j; } } + if(k != i){ + xp = b[k]; + b[k] = b[i]; + b[i] = xp; + for(int j=0; j=0; i--){ + a[i] = 0.0; + for(int j=i+1; j //memset + +#include +#include + +#include +#include +#include +#include + +namespace Grid { + +///////////////////////////////////////////////////////////// +// Implicitly restarted lanczos +///////////////////////////////////////////////////////////// + + template + class BlockImplicitlyRestartedLanczos { + + const RealD small = 1.0e-16; +public: + int lock; + int get; + int Niter; + int converged; + + int Nminres; // Minimum number of restarts; only check for convergence after + int Nstop; // Number of evecs checked for convergence + int Nk; // Number of converged sought + int Np; // Np -- Number of spare vecs in kryloc space + int Nm; // Nm -- total number of vectors + + int orth_period; + + RealD OrthoTime; + + RealD eresid, betastp; + SortEigen _sort; + LinearFunction &_HermOp; + LinearFunction &_HermOpTest; + ///////////////////////// + // Constructor + ///////////////////////// + + BlockImplicitlyRestartedLanczos( + LinearFunction & HermOp, + LinearFunction & HermOpTest, + int _Nstop, // sought vecs + int _Nk, // sought vecs + int _Nm, // spare vecs + RealD _eresid, // resid in lmdue deficit + RealD _betastp, // if beta(k) < betastp: converged + int _Niter, // Max iterations + int _Nminres, int _orth_period = 1) : + _HermOp(HermOp), + _HermOpTest(HermOpTest), + Nstop(_Nstop), + Nk(_Nk), + Nm(_Nm), + eresid(_eresid), + betastp(_betastp), + Niter(_Niter), + Nminres(_Nminres), + orth_period(_orth_period) + { + Np = Nm-Nk; assert(Np>0); + }; + + BlockImplicitlyRestartedLanczos( + LinearFunction & HermOp, + LinearFunction & HermOpTest, + int _Nk, // sought vecs + int _Nm, // spare vecs + RealD _eresid, // resid in lmdue deficit + RealD _betastp, // if beta(k) < betastp: converged + int _Niter, // Max iterations + int _Nminres, + int _orth_period = 1) : + _HermOp(HermOp), + _HermOpTest(HermOpTest), + Nstop(_Nk), + Nk(_Nk), + Nm(_Nm), + eresid(_eresid), + betastp(_betastp), + Niter(_Niter), + Nminres(_Nminres), + orth_period(_orth_period) + { + Np = Nm-Nk; assert(Np>0); + }; + + +/* Saad PP. 195 +1. Choose an initial vector v1 of 2-norm unity. Set β1 ≡ 0, v0 ≡ 0 +2. For k = 1,2,...,m Do: +3. wk:=Avk−βkv_{k−1} +4. αk:=(wk,vk) // +5. wk:=wk−αkvk // wk orthog vk +6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop +7. vk+1 := wk/βk+1 +8. EndDo + */ + void step(std::vector& lmd, + std::vector& lme, + BasisFieldVector& evec, + Field& w,int Nm,int k) + { + assert( k< Nm ); + + GridStopWatch gsw_op,gsw_o; + + Field& evec_k = evec[k]; + + gsw_op.Start(); + _HermOp(evec_k,w); + gsw_op.Stop(); + + if(k>0){ + w -= lme[k-1] * evec[k-1]; + } + + ComplexD zalph = innerProduct(evec_k,w); // 4. αk:=(wk,vk) + RealD alph = real(zalph); + + w = w - alph * evec_k;// 5. wk:=wk−αkvk + + RealD beta = normalise(w); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop + // 7. vk+1 := wk/βk+1 + + std::cout<0 && k % orth_period == 0) { + orthogonalize(w,evec,k); // orthonormalise + } + gsw_o.Stop(); + + if(k < Nm-1) { + evec[k+1] = w; + } + + std::cout << GridLogMessage << "Timing: operator=" << gsw_op.Elapsed() << + " orth=" << gsw_o.Elapsed() << std::endl; + + } + + void qr_decomp(std::vector& lmd, + std::vector& lme, + int Nk, + int Nm, + std::vector& Qt, + RealD Dsh, + int kmin, + int kmax) + { + int k = kmin-1; + RealD x; + + RealD Fden = 1.0/hypot(lmd[k]-Dsh,lme[k]); + RealD c = ( lmd[k] -Dsh) *Fden; + RealD s = -lme[k] *Fden; + + RealD tmpa1 = lmd[k]; + RealD tmpa2 = lmd[k+1]; + RealD tmpb = lme[k]; + + lmd[k] = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb; + lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb; + lme[k] = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb; + x =-s*lme[k+1]; + lme[k+1] = c*lme[k+1]; + + for(int i=0; i& lmd, + std::vector& lme, + int N1, + int N2, + std::vector& Qt, + GridBase *grid){ + + std::cout << GridLogMessage << "diagonalize_lapack start\n"; + GridStopWatch gsw; + + const int size = Nm; + // tevals.resize(size); + // tevecs.resize(size); + LAPACK_INT NN = N1; + std::vector evals_tmp(NN); + std::vector evec_tmp(NN*NN); + memset(&evec_tmp[0],0,sizeof(double)*NN*NN); + // double AA[NN][NN]; + std::vector DD(NN); + std::vector EE(NN); + for (int i = 0; i< NN; i++) + for (int j = i - 1; j <= i + 1; j++) + if ( j < NN && j >= 0 ) { + if (i==j) DD[i] = lmd[i]; + if (i==j) evals_tmp[i] = lmd[i]; + if (j==(i-1)) EE[j] = lme[j]; + } + LAPACK_INT evals_found; + LAPACK_INT lwork = ( (18*NN) > (1+4*NN+NN*NN)? (18*NN):(1+4*NN+NN*NN)) ; + LAPACK_INT liwork = 3+NN*10 ; + std::vector iwork(liwork); + std::vector work(lwork); + std::vector isuppz(2*NN); + char jobz = 'V'; // calculate evals & evecs + char range = 'I'; // calculate all evals + // char range = 'A'; // calculate all evals + char uplo = 'U'; // refer to upper half of original matrix + char compz = 'I'; // Compute eigenvectors of tridiagonal matrix + std::vector ifail(NN); + LAPACK_INT info; + // int total = QMP_get_number_of_nodes(); + // int node = QMP_get_node_number(); + // GridBase *grid = evec[0]._grid; + int total = grid->_Nprocessors; + int node = grid->_processor; + int interval = (NN/total)+1; + double vl = 0.0, vu = 0.0; + LAPACK_INT il = interval*node+1 , iu = interval*(node+1); + if (iu > NN) iu=NN; + double tol = 0.0; + if (1) { + memset(&evals_tmp[0],0,sizeof(double)*NN); + if ( il <= NN){ + std::cout << GridLogMessage << "dstegr started" << std::endl; + gsw.Start(); + dstegr(&jobz, &range, &NN, + (double*)&DD[0], (double*)&EE[0], + &vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A' + &tol, // tolerance + &evals_found, &evals_tmp[0], (double*)&evec_tmp[0], &NN, + &isuppz[0], + &work[0], &lwork, &iwork[0], &liwork, + &info); + gsw.Stop(); + std::cout << GridLogMessage << "dstegr completed in " << gsw.Elapsed() << std::endl; + for (int i = iu-1; i>= il-1; i--){ + evals_tmp[i] = evals_tmp[i - (il-1)]; + if (il>1) evals_tmp[i-(il-1)]=0.; + for (int j = 0; j< NN; j++){ + evec_tmp[i*NN + j] = evec_tmp[(i - (il-1)) * NN + j]; + if (il>1) evec_tmp[(i-(il-1)) * NN + j]=0.; + } + } + } + { + // QMP_sum_double_array(evals_tmp,NN); + // QMP_sum_double_array((double *)evec_tmp,NN*NN); + grid->GlobalSumVector(&evals_tmp[0],NN); + grid->GlobalSumVector(&evec_tmp[0],NN*NN); + } + } + // cheating a bit. It is better to sort instead of just reversing it, but the document of the routine says evals are sorted in increasing order. qr gives evals in decreasing order. + for(int i=0;i& lmd, + std::vector& lme, + int N2, + int N1, + std::vector& Qt, + GridBase *grid) + { + +#ifdef USE_LAPACK_IRL + const int check_lapack=0; // just use lapack if 0, check against lapack if 1 + + if(!check_lapack) + return diagonalize_lapack(lmd,lme,N2,N1,Qt,grid); + + std::vector lmd2(N1); + std::vector lme2(N1); + std::vector Qt2(N1*N1); + for(int k=0; k= kmin; --j){ + RealD dds = fabs(lmd[j-1])+fabs(lmd[j]); + if(fabs(lme[j-1])+dds > dds){ + kmax = j+1; + goto continued; + } + } + Niter = iter; +#ifdef USE_LAPACK_IRL + if(check_lapack){ + const double SMALL=1e-8; + diagonalize_lapack(lmd2,lme2,N2,N1,Qt2,grid); + std::vector lmd3(N2); + for(int k=0; kSMALL) std::cout<SMALL) std::cout<SMALL) std::cout< dds){ + kmin = j+1; + break; + } + } + } + std::cout< + static RealD normalise(T& v) + { + RealD nn = norm2(v); + nn = sqrt(nn); + v = v * (1.0/nn); + return nn; + } + + void orthogonalize(Field& w, + BasisFieldVector& evec, + int k) + { + double t0=-usecond()/1e6; + + evec.orthogonalize(w,k); + + normalise(w); + t0+=usecond()/1e6; + OrthoTime +=t0; + } + + void setUnit_Qt(int Nm, std::vector &Qt) { + for(int i=0; i K P = M − K † +Compute the factorization AVM = VM HM + fM eM +repeat + Q=I + for i = 1,...,P do + QiRi =HM −θiI Q = QQi + H M = Q †i H M Q i + end for + βK =HM(K+1,K) σK =Q(M,K) + r=vK+1βK +rσK + VK =VM(1:M)Q(1:M,1:K) + HK =HM(1:K,1:K) + →AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM +until convergence +*/ + + void calc(std::vector& eval, + BasisFieldVector& evec, + const Field& src, + int& Nconv, + bool reverse, + int SkipTest) + { + + GridBase *grid = evec._v[0]._grid;//evec.get(0 + evec_offset)._grid; + assert(grid == src._grid); + + std::cout< lme(Nm); + std::vector lme2(Nm); + std::vector eval2(Nm); + std::vector eval2_copy(Nm); + std::vector Qt(Nm*Nm); + + + Field f(grid); + Field v(grid); + + int k1 = 1; + int k2 = Nk; + + Nconv = 0; + + RealD beta_k; + + // Set initial vector + evec[0] = src; + normalise(evec[0]); + std:: cout<0); + evec.rotate(Qt,k1-1,k2+1,0,Nm,Nm); + + t1=usecond()/1e6; + std::cout<= Nminres) { + std::cout << GridLogMessage << "Rotation to test convergence " << std::endl; + + Field ev0_orig(grid); + ev0_orig = evec[0]; + + evec.rotate(Qt,0,Nk,0,Nk,Nm); + + { + std::cout << GridLogMessage << "Test convergence" << std::endl; + Field B(grid); + + for(int j = 0; j=Nstop || beta_k < betastp){ + goto converged; + } + + std::cout << GridLogMessage << "Rotate back" << std::endl; + //B[j] +=Qt[k+_Nm*j] * _v[k]._odata[ss]; + { + Eigen::MatrixXd qm = Eigen::MatrixXd::Zero(Nk,Nk); + for (int k=0;k QtI(Nm*Nm); + for (int k=0;k +class BlockProjector { +public: + + BasisFieldVector& _evec; + BlockedGrid& _bgrid; + + BlockProjector(BasisFieldVector& evec, BlockedGrid& bgrid) : _evec(evec), _bgrid(bgrid) { + } + + void createOrthonormalBasis(RealD thres = 0.0) { + + GridStopWatch sw; + sw.Start(); + + int cnt = 0; + +#pragma omp parallel shared(cnt) + { + int lcnt = 0; + +#pragma omp for + for (int b=0;b<_bgrid._o_blocks;b++) { + + for (int i=0;i<_evec._Nm;i++) { + + auto nrm0 = _bgrid.block_sp(b,_evec._v[i],_evec._v[i]); + + // |i> -= |j> + for (int j=0;j + void coarseToFine(const CoarseField& in, Field& out) { + + out = zero; + out.checkerboard = _evec._v[0].checkerboard; + + int Nbasis = sizeof(in._odata[0]._internal._internal) / sizeof(in._odata[0]._internal._internal[0]); + assert(Nbasis == _evec._Nm); + +#pragma omp parallel for + for (int b=0;b<_bgrid._o_blocks;b++) { + for (int j=0;j<_evec._Nm;j++) { + _bgrid.block_caxpy(b,out,in._odata[b]._internal._internal[j],_evec._v[j],out); + } + } + + } + + template + void fineToCoarse(const Field& in, CoarseField& out) { + + out = zero; + + int Nbasis = sizeof(out._odata[0]._internal._internal) / sizeof(out._odata[0]._internal._internal[0]); + assert(Nbasis == _evec._Nm); + + + Field tmp(_bgrid._grid); + tmp = in; + +#pragma omp parallel for + for (int b=0;b<_bgrid._o_blocks;b++) { + for (int j=0;j<_evec._Nm;j++) { + // |rhs> -= |j> + auto c = _bgrid.block_sp(b,_evec._v[j],tmp); + _bgrid.block_caxpy(b,tmp,-c,_evec._v[j],tmp); // may make this more numerically stable + out._odata[b]._internal._internal[j] = c; + } + } + + } + + template + void deflateFine(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + result = zero; + for (int i=0;i + void deflateCoarse(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + CoarseField src_coarse(_coef._v[0]._grid); + CoarseField result_coarse = src_coarse; + result_coarse = zero; + fineToCoarse(src_orig,src_coarse); + for (int i=0;i + void deflate(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + // Deflation on coarse Grid is much faster, so use it by default. Deflation on fine Grid is kept for legacy reasons for now. + deflateCoarse(_coef,eval,N,src_orig,result); + } + +}; +} diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h new file mode 100644 index 00000000..821272de --- /dev/null +++ b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h @@ -0,0 +1,401 @@ +namespace Grid { + +template +class BlockedGrid { +public: + GridBase* _grid; + typedef typename Field::scalar_type Coeff_t; + typedef typename Field::vector_type vCoeff_t; + + std::vector _bs; // block size + std::vector _nb; // number of blocks + std::vector _l; // local dimensions irrespective of cb + std::vector _l_cb; // local dimensions of checkerboarded vector + std::vector _l_cb_o; // local dimensions of inner checkerboarded vector + std::vector _bs_cb; // block size in checkerboarded vector + std::vector _nb_o; // number of blocks of simd o-sites + + int _nd, _blocks, _cf_size, _cf_block_size, _cf_o_block_size, _o_blocks, _block_sites; + + BlockedGrid(GridBase* grid, const std::vector& block_size) : + _grid(grid), _bs(block_size), _nd((int)_bs.size()), + _nb(block_size), _l(block_size), _l_cb(block_size), _nb_o(block_size), + _l_cb_o(block_size), _bs_cb(block_size) { + + _blocks = 1; + _o_blocks = 1; + _l = grid->FullDimensions(); + _l_cb = grid->LocalDimensions(); + _l_cb_o = grid->_rdimensions; + + _cf_size = 1; + _block_sites = 1; + for (int i=0;i<_nd;i++) { + _l[i] /= grid->_processors[i]; + + assert(!(_l[i] % _bs[i])); // lattice must accommodate choice of blocksize + + int r = _l[i] / _l_cb[i]; + assert(!(_bs[i] % r)); // checkerboarding must accommodate choice of blocksize + _bs_cb[i] = _bs[i] / r; + _block_sites *= _bs_cb[i]; + _nb[i] = _l[i] / _bs[i]; + _nb_o[i] = _nb[i] / _grid->_simd_layout[i]; + if (_nb[i] % _grid->_simd_layout[i]) { // simd must accommodate choice of blocksize + std::cout << GridLogMessage << "Problem: _nb[" << i << "] = " << _nb[i] << " _grid->_simd_layout[" << i << "] = " << _grid->_simd_layout[i] << std::endl; + assert(0); + } + _blocks *= _nb[i]; + _o_blocks *= _nb_o[i]; + _cf_size *= _l[i]; + } + + _cf_size *= 12 / 2; + _cf_block_size = _cf_size / _blocks; + _cf_o_block_size = _cf_size / _o_blocks; + + std::cout << GridLogMessage << "BlockedGrid:" << std::endl; + std::cout << GridLogMessage << " _l = " << _l << std::endl; + std::cout << GridLogMessage << " _l_cb = " << _l_cb << std::endl; + std::cout << GridLogMessage << " _l_cb_o = " << _l_cb_o << std::endl; + std::cout << GridLogMessage << " _bs = " << _bs << std::endl; + std::cout << GridLogMessage << " _bs_cb = " << _bs_cb << std::endl; + + std::cout << GridLogMessage << " _nb = " << _nb << std::endl; + std::cout << GridLogMessage << " _nb_o = " << _nb_o << std::endl; + std::cout << GridLogMessage << " _blocks = " << _blocks << std::endl; + std::cout << GridLogMessage << " _o_blocks = " << _o_blocks << std::endl; + std::cout << GridLogMessage << " sizeof(vCoeff_t) = " << sizeof(vCoeff_t) << std::endl; + std::cout << GridLogMessage << " _cf_size = " << _cf_size << std::endl; + std::cout << GridLogMessage << " _cf_block_size = " << _cf_block_size << std::endl; + std::cout << GridLogMessage << " _block_sites = " << _block_sites << std::endl; + std::cout << GridLogMessage << " _grid->oSites() = " << _grid->oSites() << std::endl; + + // _grid->Barrier(); + //abort(); + } + + void block_to_coor(int b, std::vector& x0) { + + std::vector bcoor; + bcoor.resize(_nd); + x0.resize(_nd); + assert(b < _o_blocks); + Lexicographic::CoorFromIndex(bcoor,b,_nb_o); + int i; + + for (i=0;i<_nd;i++) { + x0[i] = bcoor[i]*_bs_cb[i]; + } + + //std::cout << GridLogMessage << "Map block b -> " << x0 << std::endl; + + } + + void block_site_to_o_coor(const std::vector& x0, std::vector& coor, int i) { + Lexicographic::CoorFromIndex(coor,i,_bs_cb); + for (int j=0;j<_nd;j++) + coor[j] += x0[j]; + } + + int block_site_to_o_site(const std::vector& x0, int i) { + std::vector coor; coor.resize(_nd); + block_site_to_o_coor(x0,coor,i); + Lexicographic::IndexFromCoor(coor,i,_l_cb_o); + return i; + } + + vCoeff_t block_sp(int b, const Field& x, const Field& y) { + + std::vector x0; + block_to_coor(b,x0); + + vCoeff_t ret = 0.0; + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + ret += TensorRemove(innerProduct(x._odata[ss],y._odata[ss])); + } + + return ret; + + } + + vCoeff_t block_sp(int b, const Field& x, const std::vector< ComplexD >& y) { + + std::vector x0; + block_to_coor(b,x0); + + constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t); + int lsize = _cf_o_block_size / _block_sites; + + std::vector< ComplexD > ret(nsimd); + for (int i=0;i + void vcaxpy(iScalar& r,const vCoeff_t& a,const iScalar& x,const iScalar& y) { + vcaxpy(r._internal,a,x._internal,y._internal); + } + + template + void vcaxpy(iVector& r,const vCoeff_t& a,const iVector& x,const iVector& y) { + for (int i=0;i x0; + block_to_coor(b,x0); + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + vcaxpy(ret._odata[ss],a,x._odata[ss],y._odata[ss]); + } + + } + + void block_caxpy(int b, std::vector< ComplexD >& ret, const vCoeff_t& a, const Field& x, const std::vector< ComplexD >& y) { + std::vector x0; + block_to_coor(b,x0); + + constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t); + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + int n = lsize / nsimd; + for (int l=0;l& x) { + std::vector x0; + block_to_coor(b,x0); + + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + for (int l=0;l& x) { + std::vector x0; + block_to_coor(b,x0); + + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + for (int l=0;l + void vcscale(iScalar& r,const vCoeff_t& a,const iScalar& x) { + vcscale(r._internal,a,x._internal); + } + + template + void vcscale(iVector& r,const vCoeff_t& a,const iVector& x) { + for (int i=0;i x0; + block_to_coor(b,x0); + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + vcscale(ret._odata[ss],a,ret._odata[ss]); + } + } + + void getCanonicalBlockOffset(int cb, std::vector& x0) { + const int ndim = 5; + assert(_nb.size() == ndim); + std::vector _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] }; + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + x0.resize(ndim); + + assert(cb >= 0); + assert(cb < _nbc[0]*_nbc[1]*_nbc[2]*_nbc[3]*_nbc[4]); + + Lexicographic::CoorFromIndex(x0,cb,_nbc); + int i; + + for (i=0;i& buf) { + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + std::vector ldim = v._grid->LocalDimensions(); + std::vector cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] }; + const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4]; + // take canonical block cb of v and put it in canonical ordering in buf + std::vector cx0; + getCanonicalBlockOffset(cb,cx0); + +#pragma omp parallel + { + std::vector co0,cl0; + co0=cx0; cl0=cx0; + +#pragma omp for + for (int i=0;i<_nbsc;i++) { + Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo + for (int j=0;j<(int)_bsc.size();j++) + cl0[j] = cx0[j] + co0[j]; + + std::vector l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] }; + int oi = v._grid->oIndex(l0); + int ii = v._grid->iIndex(l0); + int lti = i; + + //if (cb < 2 && i<2) + // std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl; + + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) { + Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii]; + int ti = 12*lti + 3*s + c; + ld = Coeff_t(buf[2*ti+0], buf[2*ti+1]); + } + } + } + } + + void peekBlockOfVectorCanonical(int cb,const Field& v,std::vector& buf) { + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + std::vector ldim = v._grid->LocalDimensions(); + std::vector cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] }; + const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4]; + // take canonical block cb of v and put it in canonical ordering in buf + std::vector cx0; + getCanonicalBlockOffset(cb,cx0); + + buf.resize(_cf_block_size * 2); + +#pragma omp parallel + { + std::vector co0,cl0; + co0=cx0; cl0=cx0; + +#pragma omp for + for (int i=0;i<_nbsc;i++) { + Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo + for (int j=0;j<(int)_bsc.size();j++) + cl0[j] = cx0[j] + co0[j]; + + std::vector l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] }; + int oi = v._grid->oIndex(l0); + int ii = v._grid->iIndex(l0); + int lti = i; + + //if (cb < 2 && i<2) + // std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl; + + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) { + Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii]; + int ti = 12*lti + 3*s + c; + buf[2*ti+0] = ld.real(); + buf[2*ti+1] = ld.imag(); + } + } + } + } + + int globalToLocalCanonicalBlock(int slot,const std::vector& src_nodes,int nb) { + // processor coordinate + int _nd = (int)src_nodes.size(); + std::vector _src_nodes = src_nodes; + std::vector pco(_nd); + Lexicographic::CoorFromIndex(pco,slot,_src_nodes); + std::vector cpco = { pco[1], pco[2], pco[3], pco[4], pco[0] }; + + // get local block + std::vector _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] }; + assert(_nd == 5); + std::vector c_src_local_blocks(_nd); + for (int i=0;i<_nd;i++) { + assert(_grid->_fdimensions[i] % (src_nodes[i] * _bs[i]) == 0); + c_src_local_blocks[(i+4) % 5] = _grid->_fdimensions[i] / src_nodes[i] / _bs[i]; + } + std::vector cbcoor(_nd); // coordinate of block in slot in canonical form + Lexicographic::CoorFromIndex(cbcoor,nb,c_src_local_blocks); + + // cpco, cbcoor + std::vector clbcoor(_nd); + for (int i=0;i<_nd;i++) { + int cgcoor = cpco[i] * c_src_local_blocks[i] + cbcoor[i]; // global block coordinate + int pcoor = cgcoor / _nbc[i]; // processor coordinate in my Grid + int tpcoor = _grid->_processor_coor[(i+1)%5]; + if (pcoor != tpcoor) + return -1; + clbcoor[i] = cgcoor - tpcoor * _nbc[i]; // canonical local block coordinate for canonical dimension i + } + + int lnb; + Lexicographic::IndexFromCoor(clbcoor,lnb,_nbc); + //std::cout << "Mapped slot = " << slot << " nb = " << nb << " to " << lnb << std::endl; + return lnb; + } + + + }; + +} diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h new file mode 100644 index 00000000..e715fc25 --- /dev/null +++ b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h @@ -0,0 +1,163 @@ +namespace Grid { + +template +class BasisFieldVector { + public: + int _Nm; + + typedef typename Field::scalar_type Coeff_t; + typedef typename Field::vector_type vCoeff_t; + typedef typename Field::vector_object vobj; + typedef typename vobj::scalar_object sobj; + + std::vector _v; // _Nfull vectors + + void report(int n,GridBase* value) { + + std::cout << GridLogMessage << "BasisFieldVector allocated:\n"; + std::cout << GridLogMessage << " Delta N = " << n << "\n"; + std::cout << GridLogMessage << " Size of full vectors (size) = " << + ((double)n*sizeof(vobj)*value->oSites() / 1024./1024./1024.) << " GB\n"; + std::cout << GridLogMessage << " Size = " << _v.size() << " Capacity = " << _v.capacity() << std::endl; + + value->Barrier(); + + if (value->IsBoss()) { + system("cat /proc/meminfo"); + } + + value->Barrier(); + + } + + BasisFieldVector(int Nm,GridBase* value) : _Nm(Nm), _v(Nm,value) { + report(Nm,value); + } + + ~BasisFieldVector() { + } + + Field& operator[](int i) { + return _v[i]; + } + + void orthogonalize(Field& w, int k) { + for(int j=0; j& Qt,int j0, int j1, int k0,int k1,int Nm) { + + GridBase* grid = _v[0]._grid; + +#pragma omp parallel + { + std::vector < vobj > B(Nm); + +#pragma omp for + for(int ss=0;ss < grid->oSites();ss++){ + for(int j=j0; j _Nm) + _v.reserve(n); + + _v.resize(n,_v[0]._grid); + + if (n < _Nm) + _v.shrink_to_fit(); + + report(n - _Nm,_v[0]._grid); + + _Nm = n; + } + + std::vector getIndex(std::vector& sort_vals) { + + std::vector idx(sort_vals.size()); + iota(idx.begin(), idx.end(), 0); + + // sort indexes based on comparing values in v + sort(idx.begin(), idx.end(), + [&sort_vals](int i1, int i2) {return ::fabs(sort_vals[i1]) < ::fabs(sort_vals[i2]);}); + + return idx; + } + + void reorderInPlace(std::vector& sort_vals, std::vector& idx) { + GridStopWatch gsw; + gsw.Start(); + + int nswaps = 0; + for (size_t i=0;i& sort_vals, bool reverse) { + + std::vector idx = getIndex(sort_vals); + if (reverse) + std::reverse(idx.begin(), idx.end()); + + reorderInPlace(sort_vals,idx); + + } + + void deflate(const std::vector& eval,const Field& src_orig,Field& result) { + result = zero; + int N = (int)_v.size(); + for (int i=0;i step) { + crc = crc32(crc,&data[blk],step); + blk += step; + len -= step; + } + + crc = crc32(crc,&data[blk],len); + return crc; + + } + + static int get_bfm_index( int* pos, int co, int* s ) { + + int ls = s[0]; + int NtHalf = s[4] / 2; + int simd_coor = pos[4] / NtHalf; + int regu_coor = (pos[1] + s[1] * (pos[2] + s[2] * ( pos[3] + s[3] * (pos[4] % NtHalf) ) )) / 2; + + return regu_coor * ls * 48 + pos[0] * 48 + co * 4 + simd_coor * 2; + } + + static void get_read_geometry(const GridBase* _grid,const std::vector& cnodes, + std::map >& slots, + std::vector& slot_lvol, + std::vector& lvol, + int64_t& slot_lsites,int& ntotal) { + + int _nd = (int)cnodes.size(); + std::vector nodes = cnodes; + + slots.clear(); + slot_lvol.clear(); + lvol.clear(); + + int i; + ntotal = 1; + int64_t lsites = 1; + slot_lsites = 1; + for (i=0;i<_nd;i++) { + assert(_grid->_fdimensions[i] % nodes[i] == 0); + slot_lvol.push_back(_grid->_fdimensions[i] / nodes[i]); + lvol.push_back(_grid->_fdimensions[i] / _grid->_processors[i]); + lsites *= lvol.back(); + slot_lsites *= slot_lvol.back(); + ntotal *= nodes[i]; + } + + std::vector lcoor, gcoor, scoor; + lcoor.resize(_nd); gcoor.resize(_nd); scoor.resize(_nd); + + // create mapping of indices to slots + for (int lidx = 0; lidx < lsites; lidx++) { + Lexicographic::CoorFromIndex(lcoor,lidx,lvol); + for (int i=0;i<_nd;i++) { + gcoor[i] = lcoor[i] + _grid->_processor_coor[i]*lvol[i]; + scoor[i] = gcoor[i] / slot_lvol[i]; + } + int slot; + Lexicographic::IndexFromCoor(scoor,slot,nodes); + auto sl = slots.find(slot); + if (sl == slots.end()) + slots[slot] = std::vector(); + slots[slot].push_back(lidx); + } + } + + static void canonical_block_to_coarse_coordinates(GridBase* _coarsegrid,int nb,int& ii,int& oi) { + // canonical nb needs to be mapped in a coordinate on my coarsegrid (ii,io) + std::vector _l = _coarsegrid->LocalDimensions(); + std::vector _cl = { _l[1], _l[2], _l[3], _l[4], _l[0] }; + std::vector _cc(_l.size()); + Lexicographic::CoorFromIndex(_cc,nb,_cl); + std::vector _c = { _cc[4], _cc[0], _cc[1], _cc[2], _cc[3] }; + ii = _coarsegrid->iIndex(_c); + oi = _coarsegrid->oIndex(_c); + } + + template + static bool read_argonne(BasisFieldVector& ret,const char* dir, const std::vector& cnodes) { + + GridBase* _grid = ret._v[0]._grid; + + std::map > slots; + std::vector slot_lvol, lvol; + int64_t slot_lsites; + int ntotal; + get_read_geometry(_grid,cnodes, + slots,slot_lvol,lvol,slot_lsites, + ntotal); + int _nd = (int)lvol.size(); + + // this is slow code to read the argonne file format for debugging purposes + int nperdir = ntotal / 32; + if (nperdir < 1) + nperdir=1; + std::cout << GridLogMessage << " Read " << dir << " nodes = " << cnodes << std::endl; + std::cout << GridLogMessage << " lvol = " << lvol << std::endl; + + // for error messages + char hostname[1024]; + gethostname(hostname, 1024); + + // now load one slot at a time and fill the vector + for (auto sl=slots.begin();sl!=slots.end();sl++) { + std::vector& idx = sl->second; + int slot = sl->first; + std::vector rdata; + + char buf[4096]; + + sprintf(buf,"%s/checksums.txt",dir); printf("read_argonne: Reading from %s\n",buf); + FILE* f = fopen(buf,"rt"); + if (!f) { + fprintf(stderr,"Node %s cannot read %s\n",hostname,buf); fflush(stderr); + return false; + } + + for (int l=0;l<3+slot;l++) + fgets(buf,sizeof(buf),f); + uint32_t crc_exp = strtol(buf, NULL, 16); + fclose(f); + + // load one slot vector + sprintf(buf,"%s/%2.2d/%10.10d",dir,slot/nperdir,slot); + f = fopen(buf,"rb"); + if (!f) { + fprintf(stderr,"Node %s cannot read %s\n",hostname,buf); fflush(stderr); + return false; + } + + fseeko(f,0,SEEK_END); + off_t total_size = ftello(f); + fseeko(f,0,SEEK_SET); + + int64_t size = slot_lsites / 2 * 24*4; + rdata.resize(size); + + assert(total_size % size == 0); + + int _Nfull = total_size / size; + ret._v.resize(_Nfull,ret._v[0]); + ret._Nm = _Nfull; + + uint32_t crc = 0x0; + GridStopWatch gsw,gsw2; + for (int nev = 0;nev < _Nfull;nev++) { + + gsw.Start(); + assert(fread(&rdata[0],size,1,f) == 1); + gsw.Stop(); + + gsw2.Start(); + crc = crc32_threaded((unsigned char*)&rdata[0],size,crc); + gsw2.Stop(); + + for (int i=0;i lcoor, gcoor, scoor, slcoor; + lcoor.resize(_nd); gcoor.resize(_nd); + slcoor.resize(_nd); scoor.resize(_nd); + +#pragma omp for + for (int64_t lidx = 0; lidx < idx.size(); lidx++) { + int llidx = idx[lidx]; + Lexicographic::CoorFromIndex(lcoor,llidx,lvol); + for (int i=0;i<_nd;i++) { + gcoor[i] = lcoor[i] + _grid->_processor_coor[i]*lvol[i]; + scoor[i] = gcoor[i] / slot_lvol[i]; + slcoor[i] = gcoor[i] - scoor[i]*slot_lvol[i]; + } + + if ((lcoor[1]+lcoor[2]+lcoor[3]+lcoor[4]) % 2 == 1) { + // poke + iScalar, 4> > sc; + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) + sc()(s)(c) = *(std::complex*)&rdata[get_bfm_index(&slcoor[0],c+s*3, &slot_lvol[0] )]; + + pokeLocalSite(sc,ret._v[nev],lcoor); + } + + } + } + } + + fclose(f); + std::cout << GridLogMessage << "Loading slot " << slot << " with " << idx.size() << " points and " + << _Nfull << " vectors in " + << gsw.Elapsed() << " at " + << ( (double)size * _Nfull / 1024./1024./1024. / gsw.useconds()*1000.*1000. ) + << " GB/s " << " crc32 = " << std::hex << crc << " crc32_expected = " << crc_exp << std::dec + << " computed at " + << ( (double)size * _Nfull / 1024./1024./1024. / gsw2.useconds()*1000.*1000. ) + << " GB/s " + << std::endl; + + assert(crc == crc_exp); + } + + _grid->Barrier(); + std::cout << GridLogMessage << "Loading complete" << std::endl; + + return true; + } + + template + static bool read_argonne(BasisFieldVector& ret,const char* dir) { + + + GridBase* _grid = ret._v[0]._grid; + + char buf[4096]; + sprintf(buf,"%s/nodes.txt",dir); + FILE* f = fopen(buf,"rt"); + if (!f) { + if (_grid->IsBoss()) { + fprintf(stderr,"Attempting to load eigenvectors without secifying node layout failed due to absence of nodes.txt\n"); + fflush(stderr); + } + return false; + } + + + std::vector nodes((int)_grid->_processors.size()); + for (int i =0;i<(int)_grid->_processors.size();i++) + assert(fscanf(f,"%d\n",&nodes[i])==1); + fclose(f); + + return read_argonne(ret,dir,nodes); + } + + static void flush_bytes(FILE* f, std::vector& fbuf) { + if (fbuf.size()) { + + if (fwrite(&fbuf[0],fbuf.size(),1,f) != 1) { + fprintf(stderr,"Write failed of %g GB!\n",(double)fbuf.size() / 1024./1024./1024.); + exit(2); + } + + fbuf.resize(0); + + } + } + + static void write_bytes(void* buf, int64_t s, FILE* f, std::vector& fbuf, uint32_t& crc) { + static double data_counter = 0.0; + static GridStopWatch gsw_crc, gsw_flush1,gsw_flush2,gsw_write,gsw_memcpy; + if (s == 0) + return; + + // checksum + gsw_crc.Start(); + crc = crc32_threaded((unsigned char*)buf,s,crc); + gsw_crc.Stop(); + + if (s > fbuf.capacity()) { + // cannot buffer this, so first flush current buffer contents and then write this directly to file + gsw_flush1.Start(); + flush_bytes(f,fbuf); + gsw_flush1.Stop(); + + gsw_write.Start(); + if (fwrite(buf,s,1,f) != 1) { + fprintf(stderr,"Write failed of %g GB!\n",(double)s / 1024./1024./1024.); + exit(2); + } + gsw_write.Stop(); + + } + + // no room left in buffer, flush to disk + if (fbuf.size() + s > fbuf.capacity()) { + gsw_flush2.Start(); + flush_bytes(f,fbuf); + gsw_flush2.Stop(); + } + + // then fill buffer again + { + gsw_memcpy.Start(); + size_t t = fbuf.size(); + fbuf.resize(t + s); + memcpy(&fbuf[t],buf,s); + gsw_memcpy.Stop(); + } + + data_counter += (double)s; + if (data_counter > 1024.*1024.*20.) { + std::cout << GridLogMessage << "Writing " << ((double)data_counter / 1024./1024./1024.) << " GB at" + " crc = " << gsw_crc.Elapsed() << " flush1 = " << gsw_flush1.Elapsed() << " flush2 = " << gsw_flush2.Elapsed() << + " write = " << gsw_write.Elapsed() << " memcpy = " << gsw_memcpy.Elapsed() << std::endl; + data_counter = 0.0; + gsw_crc.Reset(); + gsw_write.Reset(); + gsw_memcpy.Reset(); + gsw_flush1.Reset(); + gsw_flush2.Reset(); + } + } + + static void write_floats(FILE* f, std::vector& fbuf, uint32_t& crc, float* buf, int64_t n) { + write_bytes(buf,n*sizeof(float),f,fbuf,crc); + } + + static void read_floats(char* & ptr, float* out, int64_t n) { + float* in = (float*)ptr; + ptr += 4*n; + + for (int64_t i=0;i 0, [0,6] -> 1; reconstruct 0 -> -3, 1-> 3 + // + // N=2 + // [-6,-2] -> 0, [-2,2] -> 1, [2,6] -> 2; reconstruct 0 -> -4, 1->0, 2->4 + int ret = (int) ( (float)(N+1) * ( (in - min) / (max - min) ) ); + if (ret == N+1) { + ret = N; + } + return ret; + } + + static float fp_unmap(int val, float min, float max, int N) { + return min + (float)(val + 0.5) * (max - min) / (float)( N + 1 ); + } + +#define SHRT_UMAX 65535 +#define FP16_BASE 1.4142135623730950488 +#define FP16_COEF_EXP_SHARE_FLOATS 10 + static float unmap_fp16_exp(unsigned short e) { + float de = (float)((int)e - SHRT_UMAX / 2); + return ::pow( FP16_BASE, de ); + } + + // can assume that v >=0 and need to guarantee that unmap_fp16_exp(map_fp16_exp(v)) >= v + static unsigned short map_fp16_exp(float v) { + // float has exponents 10^{-44.85} .. 10^{38.53} + int exp = (int)ceil(::log(v) / ::log(FP16_BASE)) + SHRT_UMAX / 2; + if (exp < 0 || exp > SHRT_UMAX) { + fprintf(stderr,"Error in map_fp16_exp(%g,%d)\n",v,exp); + exit(3); + } + + return (unsigned short)exp; + } + + template + static void read_floats_fp16(char* & ptr, OPT* out, int64_t n, int nsc) { + + int64_t nsites = n / nsc; + if (n % nsc) { + fprintf(stderr,"Invalid size in write_floats_fp16\n"); + exit(4); + } + + unsigned short* in = (unsigned short*)ptr; + ptr += 2*(n+nsites); + + // do for each site + for (int64_t site = 0;site + static void write_floats_fp16(FILE* f, std::vector& fbuf, uint32_t& crc, OPT* in, int64_t n, int nsc) { + + int64_t nsites = n / nsc; + if (n % nsc) { + fprintf(stderr,"Invalid size in write_floats_fp16\n"); + exit(4); + } + + unsigned short* buf = (unsigned short*)malloc( sizeof(short) * (n + nsites) ); + if (!buf) { + fprintf(stderr,"Out of mem\n"); + exit(1); + } + + // do for each site +#pragma omp parallel for + for (int64_t site = 0;site max) + max = fabs(ev[i]); + } + + unsigned short exp = map_fp16_exp(max); + max = unmap_fp16_exp(exp); + min = -max; + + *bptr++ = exp; + + for (int i=0;i SHRT_UMAX) { + fprintf(stderr,"Assert failed: val = %d (%d), ev[i] = %.15g, max = %.15g, exp = %d\n",val,SHRT_UMAX,ev[i],max,(int)exp); + exit(48); + } + *bptr++ = (unsigned short)val; + } + + } + + write_bytes(buf,sizeof(short)*(n + nsites),f,fbuf,crc); + + free(buf); + } + + template + static bool read_compressed_vectors(const char* dir,BlockProjector& pr,BasisFieldVector& coef, int ngroups = 1) { + + const BasisFieldVector& basis = pr._evec; + GridBase* _grid = basis._v[0]._grid; + + // for error messages + char hostname[1024]; + gethostname(hostname, 1024); + + std::cout << GridLogMessage << "Ready on host " << hostname << " with " << ngroups << " reader groups" << std::endl; + + // first read metadata + char buf[4096]; + sprintf(buf,"%s/metadata.txt",dir); + + std::vector s,b,nb,nn,crc32; + s.resize(5); b.resize(5); nb.resize(5); nn.resize(5); + uint32_t neig, nkeep, nkeep_single, blocks, _FP16_COEF_EXP_SHARE_FLOATS; + uint32_t nprocessors = 1; + + FILE* f = 0; + uint32_t status = 0; + if (_grid->IsBoss()) { + f = fopen(buf,"rb"); + status=f ? 1 : 0; + } + _grid->GlobalSum(status); + std::cout << GridLogMessage << "Read params status " << status << std::endl; + + if (!status) { + return false; + } + +#define _IRL_READ_INT(buf,p) if (f) { assert(fscanf(f,buf,p)==1); } else { *(p) = 0; } _grid->GlobalSum(*(p)); + + for (int i=0;i<5;i++) { + sprintf(buf,"s[%d] = %%d\n",i); + _IRL_READ_INT(buf,&s[(i+1)%5]); + } + for (int i=0;i<5;i++) { + sprintf(buf,"b[%d] = %%d\n",i); + _IRL_READ_INT(buf,&b[(i+1)%5]); + } + for (int i=0;i<5;i++) { + sprintf(buf,"nb[%d] = %%d\n",i); + _IRL_READ_INT(buf,&nb[(i+1)%5]); + } + _IRL_READ_INT("neig = %d\n",&neig); + _IRL_READ_INT("nkeep = %d\n",&nkeep); + _IRL_READ_INT("nkeep_single = %d\n",&nkeep_single); + _IRL_READ_INT("blocks = %d\n",&blocks); + _IRL_READ_INT("FP16_COEF_EXP_SHARE_FLOATS = %d\n",&_FP16_COEF_EXP_SHARE_FLOATS); + + for (int i=0;i<5;i++) { + assert(_grid->FullDimensions()[i] % s[i] == 0); + nn[i] = _grid->FullDimensions()[i] / s[i]; + nprocessors *= nn[i]; + } + + std::cout << GridLogMessage << "Reading data that was generated on node-layout " << nn << std::endl; + + crc32.resize(nprocessors); + for (int i =0;i > slots; + std::vector slot_lvol, lvol; + int64_t slot_lsites; + int ntotal; + std::vector _nn(nn.begin(),nn.end()); + get_read_geometry(_grid,_nn, + slots,slot_lvol,lvol,slot_lsites, + ntotal); + int _nd = (int)lvol.size(); + + // types + typedef typename Field::scalar_type Coeff_t; + typedef typename CoarseField::scalar_type CoeffCoarse_t; + + // slot layout + int nperdir = ntotal / 32; + if (nperdir < 1) + nperdir=1; + + // add read groups + for (int ngroup=0;ngroupThisRank() % ngroups == ngroup; + + std::cout << GridLogMessage << "Reading in group " << ngroup << " / " << ngroups << std::endl; + + // load all necessary slots and store them appropriately + for (auto sl=slots.begin();sl!=slots.end();sl++) { + + std::vector& idx = sl->second; + int slot = sl->first; + std::vector rdata; + + char buf[4096]; + + if (action) { + // load one slot vector + sprintf(buf,"%s/%2.2d/%10.10d.compressed",dir,slot/nperdir,slot); + f = fopen(buf,"rb"); + if (!f) { + fprintf(stderr,"Node %s cannot read %s\n",hostname,buf); fflush(stderr); + return false; + } + } + + uint32_t crc = 0x0; + off_t size; + + GridStopWatch gsw; + _grid->Barrier(); + gsw.Start(); + + std::vector raw_in(0); + if (action) { + fseeko(f,0,SEEK_END); + size = ftello(f); + fseeko(f,0,SEEK_SET); + + raw_in.resize(size); + assert(fread(&raw_in[0],size,1,f) == 1); + } + + _grid->Barrier(); + gsw.Stop(); + + RealD totalGB = (RealD)size / 1024./1024./1024 * _grid->_Nprocessors; + RealD seconds = gsw.useconds() / 1e6; + + if (action) { + std::cout << GridLogMessage << "[" << slot << "] Read " << totalGB << " GB of compressed data at " << totalGB/seconds << " GB/s" << std::endl; + + uint32_t crc_comp = crc32_threaded((unsigned char*)&raw_in[0],size,0); + + if (crc_comp != crc32[slot]) { + std::cout << "Node " << hostname << " found crc mismatch for file " << buf << " (" << std::hex << crc_comp << " vs " << crc32[slot] << std::dec << ")" << std::endl; + std::cout << "Byte size: " << size << std::endl; + } + + assert(crc_comp == crc32[slot]); + } + + _grid->Barrier(); + + if (action) { + fclose(f); + } + + char* ptr = &raw_in[0]; + + GridStopWatch gsw2; + gsw2.Start(); + if (action) { + int nsingleCap = nkeep_single; + if (pr._evec.size() < nsingleCap) + nsingleCap = pr._evec.size(); + + int _cf_block_size = slot_lsites * 12 / 2 / blocks; + +#define FP_16_SIZE(a,b) (( (a) + (a/b) )*2) + + // first read single precision basis vectors +#pragma omp parallel + { + std::vector buf(_cf_block_size * 2); +#pragma omp for + for (int nb=0;nb buf(_cf_block_size * 2); +#pragma omp for + for (int nb=0;nb buf1(nkeep_single*2); + std::vector buf2((nkeep - nkeep_single)*2); + +#pragma omp for + for (int j=0;j<(int)coef.size();j++) + for (int nb=0;nb + static void write_compressed_vectors(const char* dir,const BlockProjector& pr, + const BasisFieldVector& coef, + int nsingle,int writer_nodes = 0) { + + GridStopWatch gsw; + + const BasisFieldVector& basis = pr._evec; + GridBase* _grid = basis._v[0]._grid; + std::vector _l = _grid->FullDimensions(); + for (int i=0;i<(int)_l.size();i++) + _l[i] /= _grid->_processors[i]; + + _grid->Barrier(); + gsw.Start(); + + char buf[4096]; + + // Making the directories is somewhat tricky. + // If we run on a joint filesystem we would just + // have the boss create the directories and then + // have a barrier. We also want to be able to run + // on local /scratch, so potentially all nodes need + // to create their own directories. So do the following + // for now. + for (int j=0;j<_grid->_Nprocessors;j++) { + if (j == _grid->ThisRank()) { + conditionalMkDir(dir); + for (int i=0;i<32;i++) { + sprintf(buf,"%s/%2.2d",dir,i); + conditionalMkDir(buf); + } + _grid->Barrier(); // make sure directories are ready + } + } + + + typedef typename Field::scalar_type Coeff_t; + typedef typename CoarseField::scalar_type CoeffCoarse_t; + + int nperdir = _grid->_Nprocessors / 32; + if (nperdir < 1) + nperdir=1; + + int slot; + Lexicographic::IndexFromCoor(_grid->_processor_coor,slot,_grid->_processors); + + int64_t off = 0x0; + uint32_t crc = 0x0; + if (writer_nodes < 1) + writer_nodes = _grid->_Nprocessors; + int groups = _grid->_Nprocessors / writer_nodes; + if (groups<1) + groups = 1; + + std::cout << GridLogMessage << " Write " << dir << " nodes = " << writer_nodes << std::endl; + + for (int group=0;groupBarrier(); + if (_grid->ThisRank() % groups == group) { + + sprintf(buf,"%s/%2.2d/%10.10d.compressed",dir,slot/nperdir,slot); + FILE* f = fopen(buf,"wb"); + assert(f); + + //buffer does not seem to help + //assert(!setvbuf ( f , NULL , _IOFBF , 1024*1024*2 )); + + int nsingleCap = nsingle; + if (pr._evec.size() < nsingleCap) + nsingleCap = pr._evec.size(); + + GridStopWatch gsw1,gsw2,gsw3,gsw4,gsw5; + + gsw1.Start(); + + std::vector fbuf; + fbuf.reserve( 1024 * 1024 * 8 ); + + // first write single precision basis vectors + for (int nb=0;nb buf; + pr._bgrid.peekBlockOfVectorCanonical(nb,pr._evec._v[i],buf); + +#if 0 + { + RealD nrm = 0.0; + for (int j=0;j<(int)buf.size();j++) + nrm += buf[j]*buf[j]; + std::cout << GridLogMessage << "Norm: " << nrm << std::endl; + } +#endif + write_floats(f,fbuf,crc, &buf[0], buf.size() ); + } + } + + gsw1.Stop(); + gsw2.Start(); + + // then write fixed precision basis vectors + for (int nb=0;nb buf; + pr._bgrid.peekBlockOfVectorCanonical(nb,pr._evec._v[i],buf); + write_floats_fp16(f,fbuf,crc, &buf[0], buf.size(), 24); + } + } + + gsw2.Stop(); + assert(coef._v[0]._grid->_isites*coef._v[0]._grid->_osites == pr._bgrid._blocks); + + gsw3.Start(); + for (int j=0;j<(int)coef.size();j++) { + + int64_t size1 = nsingleCap*2; + int64_t size2 = 2*(pr._evec.size()-nsingleCap); + int64_t size = size1; + if (size2>size) + size=size2; + std::vector buf(size); + + //RealD nrmTest = 0.0; + for (int nb=0;nbGlobalSum(nrmTest); + //std::cout << GridLogMessage << "Test norm: " << nrmTest << std::endl; + } + gsw3.Stop(); + + flush_bytes(f,fbuf); + + off = ftello(f); + fclose(f); + + std::cout<Barrier(); + gsw.Stop(); + + RealD totalGB = (RealD)off / 1024./1024./1024 * _grid->_Nprocessors; + RealD seconds = gsw.useconds() / 1e6; + std::cout << GridLogMessage << "Write " << totalGB << " GB of compressed data at " << totalGB/seconds << " GB/s in " << seconds << " s" << std::endl; + + // gather crcs + std::vector crcs(_grid->_Nprocessors); + for (int i=0;i<_grid->_Nprocessors;i++) { + crcs[i] = 0x0; + } + crcs[slot] = crc; + for (int i=0;i<_grid->_Nprocessors;i++) { + _grid->GlobalSum(crcs[i]); + } + + if (_grid->IsBoss()) { + sprintf(buf,"%s/metadata.txt",dir); + FILE* f = fopen(buf,"wb"); + assert(f); + for (int i=0;i<5;i++) + fprintf(f,"s[%d] = %d\n",i,_grid->FullDimensions()[(i+1)%5] / _grid->_processors[(i+1)%5]); + for (int i=0;i<5;i++) + fprintf(f,"b[%d] = %d\n",i,pr._bgrid._bs[(i+1)%5]); + for (int i=0;i<5;i++) + fprintf(f,"nb[%d] = %d\n",i,pr._bgrid._nb[(i+1)%5]); + fprintf(f,"neig = %d\n",(int)coef.size()); + fprintf(f,"nkeep = %d\n",(int)pr._evec.size()); + fprintf(f,"nkeep_single = %d\n",nsingle); + fprintf(f,"blocks = %d\n",pr._bgrid._blocks); + fprintf(f,"FP16_COEF_EXP_SHARE_FLOATS = %d\n",FP16_COEF_EXP_SHARE_FLOATS); + for (int i =0;i<_grid->_Nprocessors;i++) + fprintf(f,"crc32[%d] = %X\n",i,crcs[i]); + fclose(f); + } + + } + + template + static void write_argonne(const BasisFieldVector& ret,const char* dir) { + + GridBase* _grid = ret._v[0]._grid; + std::vector _l = _grid->FullDimensions(); + for (int i=0;i<(int)_l.size();i++) + _l[i] /= _grid->_processors[i]; + + char buf[4096]; + + if (_grid->IsBoss()) { + mkdir(dir,ACCESSPERMS); + + for (int i=0;i<32;i++) { + sprintf(buf,"%s/%2.2d",dir,i); + mkdir(buf,ACCESSPERMS); + } + } + + _grid->Barrier(); // make sure directories are ready + + + int nperdir = _grid->_Nprocessors / 32; + if (nperdir < 1) + nperdir=1; + std::cout << GridLogMessage << " Write " << dir << " nodes = " << _grid->_Nprocessors << std::endl; + + int slot; + Lexicographic::IndexFromCoor(_grid->_processor_coor,slot,_grid->_processors); + //printf("Slot: %d <> %d\n",slot, _grid->ThisRank()); + + sprintf(buf,"%s/%2.2d/%10.10d",dir,slot/nperdir,slot); + FILE* f = fopen(buf,"wb"); + assert(f); + + int N = (int)ret._v.size(); + uint32_t crc = 0x0; + int64_t cf_size = _grid->oSites()*_grid->iSites()*12; + std::vector< float > rdata(cf_size*2); + + GridStopWatch gsw1,gsw2; + + for (int i=0;i coor(_l.size()); + for (coor[1] = 0;coor[1]<_l[1];coor[1]++) { + for (coor[2] = 0;coor[2]<_l[2];coor[2]++) { + for (coor[3] = 0;coor[3]<_l[3];coor[3]++) { + for (coor[4] = 0;coor[4]<_l[4];coor[4]++) { + for (coor[0] = 0;coor[0]<_l[0];coor[0]++) { + + if ((coor[1]+coor[2]+coor[3]+coor[4]) % 2 == 1) { + // peek + iScalar, 4> > sc; + peekLocalSite(sc,ret._v[i],coor); + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) + *(std::complex*)&rdata[get_bfm_index(&coor[0],c+s*3, &_l[0] )] = sc()(s)(c); + } + } + } + } + } + } + + // endian flip + for (int i=0;i crcs(_grid->_Nprocessors); + for (int i=0;i<_grid->_Nprocessors;i++) { + crcs[i] = 0x0; + } + crcs[slot] = crc; + for (int i=0;i<_grid->_Nprocessors;i++) { + _grid->GlobalSum(crcs[i]); + } + + if (_grid->IsBoss()) { + sprintf(buf,"%s/checksums.txt",dir); + FILE* f = fopen(buf,"wt"); + assert(f); + fprintf(f,"00000000\n\n"); + for (int i =0;i<_grid->_Nprocessors;i++) + fprintf(f,"%X\n",crcs[i]); + fclose(f); + + sprintf(buf,"%s/nodes.txt",dir); + f = fopen(buf,"wt"); + assert(f); + for (int i =0;i<(int)_grid->_processors.size();i++) + fprintf(f,"%d\n",_grid->_processors[i]); + fclose(f); + } + + + std::cout << GridLogMessage << "Writing slot " << slot << " with " + << N << " vectors in " + << gsw2.Elapsed() << " at " + << ( (double)cf_size*2*4 * N / 1024./1024./1024. / gsw2.useconds()*1000.*1000. ) + << " GB/s with crc computed at " + << ( (double)cf_size*2*4 * N / 1024./1024./1024. / gsw1.useconds()*1000.*1000. ) + << " GB/s " + << std::endl; + + _grid->Barrier(); + std::cout << GridLogMessage << "Writing complete" << std::endl; + + } + } + +} diff --git a/lib/algorithms/iterative/ConjugateGradient.h b/lib/algorithms/iterative/ConjugateGradient.h index ed453161..5c968e04 100644 --- a/lib/algorithms/iterative/ConjugateGradient.h +++ b/lib/algorithms/iterative/ConjugateGradient.h @@ -52,8 +52,8 @@ class ConjugateGradient : public OperatorFunction { MaxIterations(maxit), ErrorOnNoConverge(err_on_no_conv){}; - void operator()(LinearOperatorBase &Linop, const Field &src, - Field &psi) { + void operator()(LinearOperatorBase &Linop, const Field &src, Field &psi) { + psi.checkerboard = src.checkerboard; conformable(psi, src); diff --git a/lib/algorithms/iterative/SchurRedBlack.h b/lib/algorithms/iterative/SchurRedBlack.h index 5caabb4b..a309386b 100644 --- a/lib/algorithms/iterative/SchurRedBlack.h +++ b/lib/algorithms/iterative/SchurRedBlack.h @@ -53,16 +53,110 @@ Author: Peter Boyle * M psi = eta *********************** *Odd - * i) (D_oo)^{\dag} D_oo psi_o = (D_oo)^dag L^{-1} eta_o + * i) D_oo psi_o = L^{-1} eta_o * eta_o' = (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e) + * (D_oo)^{\dag} D_oo psi_o = (D_oo)^dag L^{-1} eta_o *Even * ii) Mee psi_e + Meo psi_o = src_e * * => sol_e = M_ee^-1 * ( src_e - Meo sol_o )... * + * + * TODO: Other options: + * + * a) change checkerboards for Schur e<->o + * + * Left precon by Moo^-1 + * b) Doo^{dag} M_oo^-dag Moo^-1 Doo psi_0 = (D_oo)^dag M_oo^-dag Moo^-1 L^{-1} eta_o + * eta_o' = (D_oo)^dag M_oo^-dag Moo^-1 (eta_o - Moe Mee^{-1} eta_e) + * + * Right precon by Moo^-1 + * c) M_oo^-dag Doo^{dag} Doo Moo^-1 phi_0 = M_oo^-dag (D_oo)^dag L^{-1} eta_o + * eta_o' = M_oo^-dag (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e) + * psi_o = M_oo^-1 phi_o + * TODO: Deflation */ namespace Grid { + /////////////////////////////////////////////////////////////////////////////////////////////////////// + // Take a matrix and form a Red Black solver calling a Herm solver + // Use of RB info prevents making SchurRedBlackSolve conform to standard interface + /////////////////////////////////////////////////////////////////////////////////////////////////////// + + template class SchurRedBlackStaggeredSolve { + private: + OperatorFunction & _HermitianRBSolver; + int CBfactorise; + public: + + ///////////////////////////////////////////////////// + // Wrap the usual normal equations Schur trick + ///////////////////////////////////////////////////// + SchurRedBlackStaggeredSolve(OperatorFunction &HermitianRBSolver) : + _HermitianRBSolver(HermitianRBSolver) + { + CBfactorise=0; + }; + + template + void operator() (Matrix & _Matrix,const Field &in, Field &out){ + + // FIXME CGdiagonalMee not implemented virtual function + // FIXME use CBfactorise to control schur decomp + GridBase *grid = _Matrix.RedBlackGrid(); + GridBase *fgrid= _Matrix.Grid(); + + SchurStaggeredOperator _HermOpEO(_Matrix); + + Field src_e(grid); + Field src_o(grid); + Field sol_e(grid); + Field sol_o(grid); + Field tmp(grid); + Field Mtmp(grid); + Field resid(fgrid); + + pickCheckerboard(Even,src_e,in); + pickCheckerboard(Odd ,src_o,in); + pickCheckerboard(Even,sol_e,out); + pickCheckerboard(Odd ,sol_o,out); + + ///////////////////////////////////////////////////// + // src_o = Mdag * (source_o - Moe MeeInv source_e) + ///////////////////////////////////////////////////// + _Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even); + _Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd); + tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd); + + _Matrix.Mooee(tmp,src_o); assert(src_o.checkerboard ==Odd); + + ////////////////////////////////////////////////////////////// + // Call the red-black solver + ////////////////////////////////////////////////////////////// + std::cout< using SchurRedBlackStagSolve = SchurRedBlackStaggeredSolve; + /////////////////////////////////////////////////////////////////////////////////////////////////////// // Take a matrix and form a Red Black solver calling a Herm solver // Use of RB info prevents making SchurRedBlackSolve conform to standard interface @@ -76,12 +170,10 @@ namespace Grid { ///////////////////////////////////////////////////// // Wrap the usual normal equations Schur trick ///////////////////////////////////////////////////// - SchurRedBlackDiagMooeeSolve(OperatorFunction &HermitianRBSolver) : - _HermitianRBSolver(HermitianRBSolver) - { - CBfactorise=0; - }; - + SchurRedBlackDiagMooeeSolve(OperatorFunction &HermitianRBSolver,int cb=0) : _HermitianRBSolver(HermitianRBSolver) + { + CBfactorise=cb; + }; template void operator() (Matrix & _Matrix,const Field &in, Field &out){ @@ -141,5 +233,166 @@ namespace Grid { } }; + + /////////////////////////////////////////////////////////////////////////////////////////////////////// + // Take a matrix and form a Red Black solver calling a Herm solver + // Use of RB info prevents making SchurRedBlackSolve conform to standard interface + /////////////////////////////////////////////////////////////////////////////////////////////////////// + template class SchurRedBlackDiagTwoSolve { + private: + OperatorFunction & _HermitianRBSolver; + int CBfactorise; + public: + + ///////////////////////////////////////////////////// + // Wrap the usual normal equations Schur trick + ///////////////////////////////////////////////////// + SchurRedBlackDiagTwoSolve(OperatorFunction &HermitianRBSolver) : + _HermitianRBSolver(HermitianRBSolver) + { + CBfactorise=0; + }; + + template + void operator() (Matrix & _Matrix,const Field &in, Field &out){ + + // FIXME CGdiagonalMee not implemented virtual function + // FIXME use CBfactorise to control schur decomp + GridBase *grid = _Matrix.RedBlackGrid(); + GridBase *fgrid= _Matrix.Grid(); + + SchurDiagTwoOperator _HermOpEO(_Matrix); + + Field src_e(grid); + Field src_o(grid); + Field sol_e(grid); + Field sol_o(grid); + Field tmp(grid); + Field Mtmp(grid); + Field resid(fgrid); + + pickCheckerboard(Even,src_e,in); + pickCheckerboard(Odd ,src_o,in); + pickCheckerboard(Even,sol_e,out); + pickCheckerboard(Odd ,sol_o,out); + + ///////////////////////////////////////////////////// + // src_o = Mdag * (source_o - Moe MeeInv source_e) + ///////////////////////////////////////////////////// + _Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even); + _Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd); + tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd); + + // get the right MpcDag + _HermOpEO.MpcDag(tmp,src_o); assert(src_o.checkerboard ==Odd); + + ////////////////////////////////////////////////////////////// + // Call the red-black solver + ////////////////////////////////////////////////////////////// + std::cout< class SchurRedBlackDiagTwoMixed { + private: + LinearFunction & _HermitianRBSolver; + int CBfactorise; + public: + + ///////////////////////////////////////////////////// + // Wrap the usual normal equations Schur trick + ///////////////////////////////////////////////////// + SchurRedBlackDiagTwoMixed(LinearFunction &HermitianRBSolver) : + _HermitianRBSolver(HermitianRBSolver) + { + CBfactorise=0; + }; + + template + void operator() (Matrix & _Matrix,const Field &in, Field &out){ + + // FIXME CGdiagonalMee not implemented virtual function + // FIXME use CBfactorise to control schur decomp + GridBase *grid = _Matrix.RedBlackGrid(); + GridBase *fgrid= _Matrix.Grid(); + + SchurDiagTwoOperator _HermOpEO(_Matrix); + + Field src_e(grid); + Field src_o(grid); + Field sol_e(grid); + Field sol_o(grid); + Field tmp(grid); + Field Mtmp(grid); + Field resid(fgrid); + + pickCheckerboard(Even,src_e,in); + pickCheckerboard(Odd ,src_o,in); + pickCheckerboard(Even,sol_e,out); + pickCheckerboard(Odd ,sol_o,out); + + ///////////////////////////////////////////////////// + // src_o = Mdag * (source_o - Moe MeeInv source_e) + ///////////////////////////////////////////////////// + _Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even); + _Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd); + tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd); + + // get the right MpcDag + _HermOpEO.MpcDag(tmp,src_o); assert(src_o.checkerboard ==Odd); + + ////////////////////////////////////////////////////////////// + // Call the red-black solver + ////////////////////////////////////////////////////////////// + std::cout< friend class Lattice; GridBase(const std::vector & processor_grid) : CartesianCommunicator(processor_grid) {}; + GridBase(const std::vector & processor_grid, + const CartesianCommunicator &parent) : CartesianCommunicator(processor_grid,parent) {}; // Physics Grid information. std::vector _simd_layout;// Which dimensions get relayed out over simd lanes. @@ -210,9 +212,6 @@ public: assert(lidx & gcoor,int & gidx){ gidx=0; int mult=1; diff --git a/lib/cartesian/Cartesian_full.h b/lib/cartesian/Cartesian_full.h index 815e3b22..a6a85ab7 100644 --- a/lib/cartesian/Cartesian_full.h +++ b/lib/cartesian/Cartesian_full.h @@ -61,9 +61,29 @@ public: virtual int CheckerBoardShift(int source_cb,int dim,int shift, int osite){ return shift; } + ///////////////////////////////////////////////////////////////////////// + // Constructor takes a parent grid and possibly subdivides communicator. + ///////////////////////////////////////////////////////////////////////// GridCartesian(const std::vector &dimensions, - const std::vector &simd_layout, - const std::vector &processor_grid) : GridBase(processor_grid) + const std::vector &simd_layout, + const std::vector &processor_grid, + const GridCartesian &parent) : GridBase(processor_grid,parent) + { + Init(dimensions,simd_layout,processor_grid); + } + ///////////////////////////////////////////////////////////////////////// + // Construct from comm world + ///////////////////////////////////////////////////////////////////////// + GridCartesian(const std::vector &dimensions, + const std::vector &simd_layout, + const std::vector &processor_grid) : GridBase(processor_grid) + { + Init(dimensions,simd_layout,processor_grid); + } + + void Init(const std::vector &dimensions, + const std::vector &simd_layout, + const std::vector &processor_grid) { /////////////////////// // Grid information diff --git a/lib/cartesian/Cartesian_red_black.h b/lib/cartesian/Cartesian_red_black.h index b1a5b9ef..f89cacc5 100644 --- a/lib/cartesian/Cartesian_red_black.h +++ b/lib/cartesian/Cartesian_red_black.h @@ -112,24 +112,57 @@ public: } }; - GridRedBlackCartesian(const GridBase *base) : GridRedBlackCartesian(base->_fdimensions,base->_simd_layout,base->_processors) {}; + //////////////////////////////////////////////////////////// + // Create Redblack from original grid; require full grid pointer ? + //////////////////////////////////////////////////////////// + GridRedBlackCartesian(const GridBase *base) : GridBase(base->_processors,*base) + { + int dims = base->_ndimension; + std::vector checker_dim_mask(dims,1); + int checker_dim = 0; + Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim); + }; - GridRedBlackCartesian(const std::vector &dimensions, + //////////////////////////////////////////////////////////// + // Create redblack from original grid, with non-trivial checker dim mask + //////////////////////////////////////////////////////////// + GridRedBlackCartesian(const GridBase *base, + const std::vector &checker_dim_mask, + int checker_dim + ) : GridBase(base->_processors,*base) + { + Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim) ; + } +#if 0 + //////////////////////////////////////////////////////////// + // Create redblack grid ;; deprecate these. Should not + // need direct creation of redblack without a full grid to base on + //////////////////////////////////////////////////////////// + GridRedBlackCartesian(const GridBase *base, + const std::vector &dimensions, const std::vector &simd_layout, const std::vector &processor_grid, const std::vector &checker_dim_mask, int checker_dim - ) : GridBase(processor_grid) + ) : GridBase(processor_grid,*base) { Init(dimensions,simd_layout,processor_grid,checker_dim_mask,checker_dim); } - GridRedBlackCartesian(const std::vector &dimensions, + + //////////////////////////////////////////////////////////// + // Create redblack grid + //////////////////////////////////////////////////////////// + GridRedBlackCartesian(const GridBase *base, + const std::vector &dimensions, const std::vector &simd_layout, - const std::vector &processor_grid) : GridBase(processor_grid) + const std::vector &processor_grid) : GridBase(processor_grid,*base) { std::vector checker_dim_mask(dimensions.size(),1); - Init(dimensions,simd_layout,processor_grid,checker_dim_mask,0); + int checker_dim = 0; + Init(dimensions,simd_layout,processor_grid,checker_dim_mask,checker_dim); } +#endif + void Init(const std::vector &dimensions, const std::vector &simd_layout, const std::vector &processor_grid, diff --git a/lib/communicator/Communicator_base.cc b/lib/communicator/Communicator_base.cc index 20c310c0..ce9a3cf0 100644 --- a/lib/communicator/Communicator_base.cc +++ b/lib/communicator/Communicator_base.cc @@ -67,7 +67,7 @@ void CartesianCommunicator::ShmBufferFreeAll(void) { ///////////////////////////////// // Grid information queries ///////////////////////////////// -int CartesianCommunicator::Dimensions(void) { return _ndimension; }; +int CartesianCommunicator::Dimensions(void) { return _ndimension; }; int CartesianCommunicator::IsBoss(void) { return _processor==0; }; int CartesianCommunicator::BossRank(void) { return 0; }; int CartesianCommunicator::ThisRank(void) { return _processor; }; @@ -96,6 +96,113 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N) GlobalSumVector((double *)c,2*N); } + +#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) + +CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent) +{ + _ndimension = processors.size(); + assert(_ndimension = parent._ndimension); + + ////////////////////////////////////////////////////////////////////////////////////////////////////// + // split the communicator + ////////////////////////////////////////////////////////////////////////////////////////////////////// + int Nparent; + MPI_Comm_size(parent.communicator,&Nparent); + + int childsize=1; + for(int d=0;d ccoor(_ndimension); // coor within subcommunicator + std::vector scoor(_ndimension); // coor of split within parent + std::vector ssize(_ndimension); // coor of split within parent + + for(int d=0;d<_ndimension;d++){ + ccoor[d] = parent._processor_coor[d] % processors[d]; + scoor[d] = parent._processor_coor[d] / processors[d]; + ssize[d] = parent._processors[d]/ processors[d]; + } + int crank,srank; // rank within subcomm ; rank of subcomm within blocks of subcomms + Lexicographic::IndexFromCoor(ccoor,crank,processors); + Lexicographic::IndexFromCoor(scoor,srank,ssize); + + MPI_Comm comm_split; + if ( Nchild > 1 ) { + + // std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec< &processors, MPI_Comm communicator_base) +{ + // if ( communicator_base != communicator_world ) { + // std::cout << "Cartesian communicator created with a non-world communicator"< periodic(_ndimension,1); + MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],1,&communicator); + MPI_Comm_rank(communicator,&_processor); + MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); + + int Size; + MPI_Comm_size(communicator,&Size); + +#ifdef GRID_COMMS_MPIT + communicator_halo.resize (2*_ndimension); + for(int i=0;i<_ndimension*2;i++){ + MPI_Comm_dup(communicator,&communicator_halo[i]); + } +#endif + + assert(Size==_Nprocessors); +} + +CartesianCommunicator::CartesianCommunicator(const std::vector &processors) +{ + InitFromMPICommunicator(processors,communicator_world); +} + +#endif + #if !defined( GRID_COMMS_MPI3) int CartesianCommunicator::NodeCount(void) { return ProcessorCount();}; @@ -147,8 +254,13 @@ void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) { } void CartesianCommunicator::ShmInitGeneric(void){ #if 1 - - int mmap_flag = MAP_SHARED | MAP_ANONYMOUS; + int mmap_flag =0; +#ifdef MAP_ANONYMOUS + mmap_flag = mmap_flag| MAP_SHARED | MAP_ANONYMOUS; +#endif +#ifdef MAP_ANON + mmap_flag = mmap_flag| MAP_SHARED | MAP_ANON; +#endif #ifdef MAP_HUGETLB if ( Hugepages ) mmap_flag |= MAP_HUGETLB; #endif diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index ac866ced..8ff22dbd 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -83,6 +83,7 @@ class CartesianCommunicator { std::vector communicator_halo; typedef MPI_Request CommsRequest_t; + #else typedef int CommsRequest_t; #endif @@ -147,11 +148,23 @@ class CartesianCommunicator { // Must call in Grid startup //////////////////////////////////////////////// static void Init(int *argc, char ***argv); - + //////////////////////////////////////////////// - // Constructor of any given grid + // Constructors to sub-divide a parent communicator + // and default to comm world //////////////////////////////////////////////// + CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent); CartesianCommunicator(const std::vector &pdimensions_in); + + private: +#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) + //////////////////////////////////////////////// + // Private initialise from an MPI communicator + // Can use after an MPI_Comm_split, but hidden from user so private + //////////////////////////////////////////////// + void InitFromMPICommunicator(const std::vector &processors, MPI_Comm communicator_base); +#endif + public: //////////////////////////////////////////////////////////////////////////////////////// // Wraps MPI_Cart routines, or implements equivalent on other impls @@ -249,6 +262,23 @@ class CartesianCommunicator { // Broadcast a buffer and composite larger //////////////////////////////////////////////////////////// void Broadcast(int root,void* data, int bytes); + + //////////////////////////////////////////////////////////// + // All2All down one dimension + //////////////////////////////////////////////////////////// + template void AllToAll(int dim,std::vector &in, std::vector &out){ + assert(dim>=0); + assert(dim<_ndimension); + int numnode = _processors[dim]; + // std::cerr << " AllToAll in.size() "< void Broadcast(int root,obj &data) { diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index bd2a62fb..678e4517 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -52,29 +52,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); ShmInitGeneric(); } - -CartesianCommunicator::CartesianCommunicator(const std::vector &processors) -{ - _ndimension = processors.size(); - std::vector periodic(_ndimension,1); - - _Nprocessors=1; - _processors = processors; - _processor_coor.resize(_ndimension); - - MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator); - MPI_Comm_rank(communicator,&_processor); - MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); - - for(int i=0;i<_ndimension;i++){ - _Nprocessors*=_processors[i]; - } - - int Size; - MPI_Comm_size(communicator,&Size); - - assert(Size==_Nprocessors); -} void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); @@ -210,6 +187,21 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes) root, communicator); assert(ierr==0); +} +void CartesianCommunicator::AllToAll(int dim,void *in,void *out,int bytes) +{ + std::vector row(_ndimension,1); + assert(dim>=0 && dim<_ndimension); + + // Split the communicator + row[dim] = _processors[dim]; + + CartesianCommunicator Comm(row,*this); + Comm.AllToAll(in,out,bytes); +} +void CartesianCommunicator::AllToAll(void *in,void *out,int bytes) +{ + MPI_Alltoall(in ,bytes,MPI_BYTE,out,bytes,MPI_BYTE,communicator); } /////////////////////////////////////////////////////// // Should only be used prior to Grid Init finished. @@ -230,5 +222,7 @@ void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) assert(ierr==0); } + + } diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index 44aa1024..dce9588a 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -450,6 +450,15 @@ void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &c assert(lr!=-1); Lexicographic::CoorFromIndex(coor,lr,_processors); } + +////////////////////////////////// +// Try to subdivide communicator +////////////////////////////////// +CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent) + : CartesianCommunicator(processors) +{ + std::cout << "Attempts to split MPI3 communicators will fail until implemented" < &processors) { int ierr; diff --git a/lib/communicator/Communicator_mpit.cc b/lib/communicator/Communicator_mpit.cc index eb6ef87d..5137c27b 100644 --- a/lib/communicator/Communicator_mpit.cc +++ b/lib/communicator/Communicator_mpit.cc @@ -53,33 +53,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { ShmInitGeneric(); } -CartesianCommunicator::CartesianCommunicator(const std::vector &processors) -{ - _ndimension = processors.size(); - std::vector periodic(_ndimension,1); - - _Nprocessors=1; - _processors = processors; - _processor_coor.resize(_ndimension); - - MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator); - MPI_Comm_rank(communicator,&_processor); - MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); - - for(int i=0;i<_ndimension;i++){ - _Nprocessors*=_processors[i]; - } - - communicator_halo.resize (2*_ndimension); - for(int i=0;i<_ndimension*2;i++){ - MPI_Comm_dup(communicator,&communicator_halo[i]); - } - - int Size; - MPI_Comm_size(communicator,&Size); - - assert(Size==_Nprocessors); -} void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); diff --git a/lib/communicator/Communicator_none.cc b/lib/communicator/Communicator_none.cc index 5319ab93..e9d71a15 100644 --- a/lib/communicator/Communicator_none.cc +++ b/lib/communicator/Communicator_none.cc @@ -38,6 +38,9 @@ void CartesianCommunicator::Init(int *argc, char *** arv) ShmInitGeneric(); } +CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent) + : CartesianCommunicator(processors) {} + CartesianCommunicator::CartesianCommunicator(const std::vector &processors) { _processors = processors; @@ -95,6 +98,10 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector & { assert(0); } +void CartesianCommunicator::AllToAll(int dim,void *in,void *out,int bytes) +{ + bcopy(in,out,bytes); +} int CartesianCommunicator::RankWorld(void){return 0;} void CartesianCommunicator::Barrier(void){} diff --git a/lib/communicator/Communicator_shmem.cc b/lib/communicator/Communicator_shmem.cc index 3c76c808..ed49285d 100644 --- a/lib/communicator/Communicator_shmem.cc +++ b/lib/communicator/Communicator_shmem.cc @@ -75,6 +75,11 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { ShmInitGeneric(); } +CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent) + : CartesianCommunicator(processors) +{ + std::cout << "Attempts to split SHMEM communicators will fail " < &processors) { _ndimension = processors.size(); diff --git a/lib/json/json.hpp b/lib/json/json.hpp index e7c42920..9d589120 100644 --- a/lib/json/json.hpp +++ b/lib/json/json.hpp @@ -63,7 +63,7 @@ SOFTWARE. #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" #endif #elif defined(__GNUC__) - #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40900 + #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40805 #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" #endif #endif diff --git a/lib/lattice/Lattice_reduction.h b/lib/lattice/Lattice_reduction.h index db012c8c..8a3fbece 100644 --- a/lib/lattice/Lattice_reduction.h +++ b/lib/lattice/Lattice_reduction.h @@ -544,7 +544,6 @@ static void sliceInnerProductMatrix( Eigen::MatrixXcd &mat, const Lattice for(int i=0;i &out, const Lattice &in){ merge(out._odata[out_oidx], ptrs, 0); } } + +//////////////////////////////////////////////////////////////////////////////// +// Communicate between grids +//////////////////////////////////////////////////////////////////////////////// +// +// All to all plan +// +// Subvolume on fine grid is v. Vectors a,b,c,d +// +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// SIMPLEST CASE: +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Mesh of nodes (2) ; subdivide to 1 subdivisions +// +// Lex ord: +// N0 va0 vb0 N1 va1 vb1 +// +// For each dimension do an all to all +// +// full AllToAll(0) +// N0 va0 va1 N1 vb0 vb1 +// +// REARRANGE +// N0 va01 N1 vb01 +// +// Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract". +// NB: Easiest to programme if keep in lex order. +// +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// SIMPLE CASE: +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Mesh of nodes (2x2) ; subdivide to 1x1 subdivisions +// +// Lex ord: +// N0 va0 vb0 vc0 vd0 N1 va1 vb1 vc1 vd1 +// N2 va2 vb2 vc2 vd2 N3 va3 vb3 vc3 vd3 +// +// Ratio = full[dim] / split[dim] +// +// For each dimension do an all to all; get Nvec -> Nvec / ratio +// Ldim -> Ldim * ratio +// LocalVol -> LocalVol * ratio +// full AllToAll(0) +// N0 va0 vb0 va1 vb1 N1 vc0 vd0 vc1 vd1 +// N2 va2 vb2 va3 vb3 N3 vc2 vd2 vc3 vd3 +// +// REARRANGE +// N0 va01 vb01 N1 vc01 vd01 +// N2 va23 vb23 N3 vc23 vd23 +// +// full AllToAll(1) // Not what is wanted. FIXME +// N0 va01 va23 N1 vc01 vc23 +// N2 vb01 vb23 N3 vd01 vd23 +// +// REARRANGE +// N0 va0123 N1 vc0123 +// N2 vb0123 N3 vd0123 +// +// Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract". +// NB: Easiest to programme if keep in lex order. +// +///////////////////////////////////////////////////////// +template +void Grid_split(std::vector > & full,Lattice & split) +{ + typedef typename Vobj::scalar_object Sobj; + + int full_vecs = full.size(); + + assert(full_vecs>=1); + + GridBase * full_grid = full[0]._grid; + GridBase *split_grid = split._grid; + + int ndim = full_grid->_ndimension; + int full_nproc = full_grid->_Nprocessors; + int split_nproc =split_grid->_Nprocessors; + + //////////////////////////////// + // Checkerboard management + //////////////////////////////// + int cb = full[0].checkerboard; + split.checkerboard = cb; + + ////////////////////////////// + // Checks + ////////////////////////////// + assert(full_grid->_ndimension==split_grid->_ndimension); + for(int n=0;n_gdimensions[d]==split._grid->_gdimensions[d]); + assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]); + } + } + + int nvector =full_nproc/split_nproc; + assert(nvector*split_nproc==full_nproc); + assert(nvector == full_vecs); + + std::vector ratio(ndim); + for(int d=0;d_processors[d]/ split_grid->_processors[d]; + } + + int lsites = full_grid->lSites(); + Integer sz = lsites * nvector; + std::vector tmpdata(sz); + std::vector alldata(sz); + std::vector scalardata(lsites); + for(int v=0;v ldims = full_grid->_ldimensions; + std::vector lcoor(ndim); + + for(int d=0;dAllToAll(d,alldata,tmpdata); + + ////////////////////////////////////////// + //Local volume for this dimension is expanded by ratio of processor extents + // Number of vectors is decreased by same factor + // Rearrange to lexico for bigger volume + ////////////////////////////////////////// + nvec /= ratio[d]; + auto rdims = ldims; rdims[d] *= ratio[d]; + auto rsites= lsites*ratio[d]; + for(int v=0;v_processors[d] > 1 ) { + tmpdata = alldata; + split_grid->AllToAll(d,tmpdata,alldata); + } + } + } + + vectorizeFromLexOrdArray(alldata,split); +} + +template +void Grid_split(Lattice &full,Lattice & split) +{ + int nvector = full._grid->_Nprocessors / split._grid->_Nprocessors; + std::vector > full_v(nvector,full._grid); + for(int n=0;n +void Grid_unsplit(std::vector > & full,Lattice & split) +{ + typedef typename Vobj::scalar_object Sobj; + + int full_vecs = full.size(); + + assert(full_vecs>=1); + + GridBase * full_grid = full[0]._grid; + GridBase *split_grid = split._grid; + + int ndim = full_grid->_ndimension; + int full_nproc = full_grid->_Nprocessors; + int split_nproc =split_grid->_Nprocessors; + + //////////////////////////////// + // Checkerboard management + //////////////////////////////// + int cb = full[0].checkerboard; + split.checkerboard = cb; + + ////////////////////////////// + // Checks + ////////////////////////////// + assert(full_grid->_ndimension==split_grid->_ndimension); + for(int n=0;n_gdimensions[d]==split._grid->_gdimensions[d]); + assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]); + } + } + + int nvector =full_nproc/split_nproc; + assert(nvector*split_nproc==full_nproc); + assert(nvector == full_vecs); + + std::vector ratio(ndim); + for(int d=0;d_processors[d]/ split_grid->_processors[d]; + } + + int lsites = full_grid->lSites(); + Integer sz = lsites * nvector; + std::vector tmpdata(sz); + std::vector alldata(sz); + std::vector scalardata(lsites); + + unvectorizeToLexOrdArray(alldata,split); + + ///////////////////////////////////////////////////////////////// + // Start from split grid and work towards full grid + ///////////////////////////////////////////////////////////////// + std::vector lcoor(ndim); + std::vector rcoor(ndim); + + int nvec = 1; + lsites = split_grid->lSites(); + std::vector ldims = split_grid->_ldimensions; + + for(int d=ndim-1;d>=0;d--){ + + if ( ratio[d] != 1 ) { + + if ( split_grid->_processors[d] > 1 ) { + tmpdata = alldata; + split_grid->AllToAll(d,tmpdata,alldata); + } + + ////////////////////////////////////////// + //Local volume for this dimension is expanded by ratio of processor extents + // Number of vectors is decreased by same factor + // Rearrange to lexico for bigger volume + ////////////////////////////////////////// + auto rsites= lsites/ratio[d]; + auto rdims = ldims; rdims[d]/=ratio[d]; + + for(int v=0;v smaller local volume + // lsite, lcoor --> bigger original (single node?) volume + // For loop over each site within smaller subvol + for(int rsite=0;rsiteAllToAll(d,tmpdata,alldata); + } + } + + lsites = full_grid->lSites(); + for(int v=0;v(); - // std::cout << " Lorentz N/S/V/M : " << _LorentzN<<" "<<_LorentzScalar<<"/"<<_LorentzVector<<"/"<<_LorentzMatrix<_gsites; + + // std::cout << "R sizeof(sobj)= " <_gsites< munge; - BinaryIO::readLatticeObject< sobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); + BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); ///////////////////////////////////////////// // Insist checksum is next record ///////////////////////////////////////////// - readLimeObject(scidacChecksum_,std::string("scidacChecksum"),record_name); + readLimeObject(scidacChecksum_,std::string("scidacChecksum"),std::string(SCIDAC_CHECKSUM)); ///////////////////////////////////////////// // Verify checksums @@ -242,11 +252,19 @@ class GridLimeReader : public BinaryIO { // should this be a do while; can we miss a first record?? while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) { + // std::cout << GridLogMessage<< " readLimeObject seeking "<< record_name <<" found record :" < xmlc(nbytes+1,'\0'); limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR); + + // std::cout << GridLogMessage<< " readLimeObject matches XML " << &xmlc[0] <(record_name.c_str()), nbytes); + assert(h!= NULL); err=limeWriteRecordHeader(h, LimeW); assert(err>=0); err=limeWriteRecordData(&xmlstring[0], &nbytes, LimeW); assert(err>=0); err=limeWriterCloseRecord(LimeW); assert(err>=0); limeDestroyHeader(h); + // std::cout << " File offset is now"<_gsites; createLimeRecordHeader(record_name, 0, 0, PayloadSize); + + // std::cout << "W sizeof(sobj)" <_gsites<(); BinarySimpleMunger munge; BinaryIO::writeLatticeObject(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); @@ -354,7 +383,7 @@ class GridLimeWriter : public BinaryIO { checksum.suma= streama.str(); checksum.sumb= streamb.str(); std::cout << GridLogMessage<<" writing scidac checksums "< + template void writeScidacFieldRecord(Lattice &field,userRecord _userRecord) { - typedef typename vobj::scalar_object sobj; - uint64_t nbytes; GridBase * grid = field._grid; //////////////////////////////////////// @@ -397,6 +424,66 @@ class ScidacWriter : public GridLimeWriter { } }; + +class ScidacReader : public GridLimeReader { + public: + + template + void readScidacFileRecord(GridBase *grid,SerialisableUserFile &_userFile) + { + scidacFile _scidacFile(grid); + readLimeObject(_scidacFile,_scidacFile.SerialisableClassName(),std::string(SCIDAC_PRIVATE_FILE_XML)); + readLimeObject(_userFile,_userFile.SerialisableClassName(),std::string(SCIDAC_FILE_XML)); + } + //////////////////////////////////////////////// + // Write generic lattice field in scidac format + //////////////////////////////////////////////// + template + void readScidacFieldRecord(Lattice &field,userRecord &_userRecord) + { + typedef typename vobj::scalar_object sobj; + GridBase * grid = field._grid; + + //////////////////////////////////////// + // fill the Grid header + //////////////////////////////////////// + FieldMetaData header; + scidacRecord _scidacRecord; + scidacFile _scidacFile; + + ////////////////////////////////////////////// + // Fill the Lime file record by record + ////////////////////////////////////////////// + readLimeObject(header ,std::string("FieldMetaData"),std::string(GRID_FORMAT)); // Open message + readLimeObject(_userRecord,_userRecord.SerialisableClassName(),std::string(SCIDAC_RECORD_XML)); + readLimeObject(_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML)); + readLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA)); + } + void skipPastBinaryRecord(void) { + std::string rec_name(ILDG_BINARY_DATA); + while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) { + if ( !strncmp(limeReaderType(LimeR), rec_name.c_str(),strlen(rec_name.c_str()) ) ) { + skipPastObjectRecord(std::string(SCIDAC_CHECKSUM)); + return; + } + } + } + void skipPastObjectRecord(std::string rec_name) { + while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) { + if ( !strncmp(limeReaderType(LimeR), rec_name.c_str(),strlen(rec_name.c_str()) ) ) { + return; + } + } + } + void skipScidacFieldRecord() { + skipPastObjectRecord(std::string(GRID_FORMAT)); + skipPastObjectRecord(std::string(SCIDAC_RECORD_XML)); + skipPastObjectRecord(std::string(SCIDAC_PRIVATE_RECORD_XML)); + skipPastBinaryRecord(); + } +}; + + class IldgWriter : public ScidacWriter { public: @@ -425,8 +512,6 @@ class IldgWriter : public ScidacWriter { typedef iLorentzColourMatrix vobj; typedef typename vobj::scalar_object sobj; - uint64_t nbytes; - //////////////////////////////////////// // fill the Grid header //////////////////////////////////////// diff --git a/lib/parallelIO/IldgIOtypes.h b/lib/parallelIO/IldgIOtypes.h index c3a5321c..5b397e14 100644 --- a/lib/parallelIO/IldgIOtypes.h +++ b/lib/parallelIO/IldgIOtypes.h @@ -64,6 +64,11 @@ namespace Grid { // file compatability, so should be correct to assume the undocumented but defacto file structure. ///////////////////////////////////////////////////////////////////////////////// +struct emptyUserRecord : Serializable { + GRID_SERIALIZABLE_CLASS_MEMBERS(emptyUserRecord,int,dummy); + emptyUserRecord() { dummy=0; }; +}; + //////////////////////// // Scidac private file xml // 1.1416 16 16 32 0 diff --git a/lib/parallelIO/MetaData.h b/lib/parallelIO/MetaData.h index 6d45d0a5..ccc8b18f 100644 --- a/lib/parallelIO/MetaData.h +++ b/lib/parallelIO/MetaData.h @@ -85,6 +85,9 @@ namespace Grid { nd=4; dimension.resize(4); boundary.resize(4); + scidac_checksuma=0; + scidac_checksumb=0; + checksum=0; } }; @@ -104,6 +107,7 @@ namespace Grid { header.nd = nd; header.dimension.resize(nd); header.boundary.resize(nd); + header.data_start = 0; for(int d=0;d_fdimensions[d]; } diff --git a/lib/qcd/action/fermion/AbstractEOFAFermion.h b/lib/qcd/action/fermion/AbstractEOFAFermion.h new file mode 100644 index 00000000..15faa401 --- /dev/null +++ b/lib/qcd/action/fermion/AbstractEOFAFermion.h @@ -0,0 +1,100 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/AbstractEOFAFermion.h + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef GRID_QCD_ABSTRACT_EOFA_FERMION_H +#define GRID_QCD_ABSTRACT_EOFA_FERMION_H + +#include + +namespace Grid { +namespace QCD { + + // DJM: Abstract base class for EOFA fermion types. + // Defines layout of additional EOFA-specific parameters and operators. + // Use to construct EOFA pseudofermion actions that are agnostic to + // Shamir / Mobius / etc., and ensure that no one can construct EOFA + // pseudofermion action with non-EOFA fermion type. + template + class AbstractEOFAFermion : public CayleyFermion5D { + public: + INHERIT_IMPL_TYPES(Impl); + + public: + // Fermion operator: D(mq1) + shift*\gamma_{5}*R_{5}*\Delta_{\pm}(mq2,mq3)*P_{\pm} + RealD mq1; + RealD mq2; + RealD mq3; + RealD shift; + int pm; + + RealD alpha; // Mobius scale + RealD k; // EOFA normalization constant + + virtual void Instantiatable(void) = 0; + + // EOFA-specific operations + // Force user to implement in derived classes + virtual void Omega (const FermionField& in, FermionField& out, int sign, int dag) = 0; + virtual void Dtilde (const FermionField& in, FermionField& out) = 0; + virtual void DtildeInv(const FermionField& in, FermionField& out) = 0; + + // Implement derivatives in base class: + // for EOFA both DWF and Mobius just need d(Dw)/dU + virtual void MDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag){ + this->DhopDeriv(mat, U, V, dag); + }; + virtual void MoeDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag){ + this->DhopDerivOE(mat, U, V, dag); + }; + virtual void MeoDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag){ + this->DhopDerivEO(mat, U, V, dag); + }; + + // Recompute 5D coefficients for different value of shift constant + // (needed for heatbath loop over poles) + virtual void RefreshShiftCoefficients(RealD new_shift) = 0; + + // Constructors + AbstractEOFAFermion(GaugeField& _Umu, GridCartesian& FiveDimGrid, GridRedBlackCartesian& FiveDimRedBlackGrid, + GridCartesian& FourDimGrid, GridRedBlackCartesian& FourDimRedBlackGrid, + RealD _mq1, RealD _mq2, RealD _mq3, RealD _shift, int _pm, + RealD _M5, RealD _b, RealD _c, const ImplParams& p=ImplParams()) + : CayleyFermion5D(_Umu, FiveDimGrid, FiveDimRedBlackGrid, FourDimGrid, FourDimRedBlackGrid, + _mq1, _M5, p), mq1(_mq1), mq2(_mq2), mq3(_mq3), shift(_shift), pm(_pm) + { + int Ls = this->Ls; + this->alpha = _b + _c; + this->k = this->alpha * (_mq3-_mq2) * std::pow(this->alpha+1.0,2*Ls) / + ( std::pow(this->alpha+1.0,Ls) + _mq2*std::pow(this->alpha-1.0,Ls) ) / + ( std::pow(this->alpha+1.0,Ls) + _mq3*std::pow(this->alpha-1.0,Ls) ); + }; + }; +}} + +#endif diff --git a/lib/qcd/action/fermion/CayleyFermion5D.cc b/lib/qcd/action/fermion/CayleyFermion5D.cc index 838b1c3d..eace6484 100644 --- a/lib/qcd/action/fermion/CayleyFermion5D.cc +++ b/lib/qcd/action/fermion/CayleyFermion5D.cc @@ -77,7 +77,6 @@ void CayleyFermion5D::DminusDag(const FermionField &psi, FermionField &chi } } - template void CayleyFermion5D::CayleyReport(void) { this->Report(); @@ -119,7 +118,6 @@ template void CayleyFermion5D::CayleyZeroCounters(void) MooeeInvTime=0; } - template void CayleyFermion5D::M5D (const FermionField &psi, FermionField &chi) { diff --git a/lib/qcd/action/fermion/CayleyFermion5D.h b/lib/qcd/action/fermion/CayleyFermion5D.h index cce13e12..ef75235a 100644 --- a/lib/qcd/action/fermion/CayleyFermion5D.h +++ b/lib/qcd/action/fermion/CayleyFermion5D.h @@ -1,6 +1,6 @@ /************************************************************************************* - Grid physics library, www.github.com/paboyle/Grid + Grid physics library, www.github.com/paboyle/Grid Source file: ./lib/qcd/action/fermion/CayleyFermion5D.h @@ -35,24 +35,24 @@ namespace Grid { namespace QCD { - template struct switcheroo { - static inline int iscomplex() { return 0; } + template struct switcheroo { + static inline int iscomplex() { return 0; } template static inline vec mult(vec a, vec b) { return real_mult(a,b); } }; - template<> struct switcheroo { - static inline int iscomplex() { return 1; } + template<> struct switcheroo { + static inline int iscomplex() { return 1; } template static inline vec mult(vec a, vec b) { return a*b; } }; - template<> struct switcheroo { - static inline int iscomplex() { return 1; } + template<> struct switcheroo { + static inline int iscomplex() { return 1; } template static inline vec mult(vec a, vec b) { return a*b; @@ -90,14 +90,14 @@ namespace Grid { // Instantiate different versions depending on Impl ///////////////////////////////////////////////////// void M5D(const FermionField &psi, - const FermionField &phi, + const FermionField &phi, FermionField &chi, std::vector &lower, std::vector &diag, std::vector &upper); void M5Ddag(const FermionField &psi, - const FermionField &phi, + const FermionField &phi, FermionField &chi, std::vector &lower, std::vector &diag, @@ -125,7 +125,7 @@ namespace Grid { // Efficient support for multigrid coarsening virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp); - + void Meooe5D (const FermionField &in, FermionField &out); void MeooeDag5D (const FermionField &in, FermionField &out); @@ -133,23 +133,23 @@ namespace Grid { RealD mass; // Cayley form Moebius (tanh and zolotarev) - std::vector omega; + std::vector omega; std::vector bs; // S dependent coeffs - std::vector cs; - std::vector as; + std::vector cs; + std::vector as; // For preconditioning Cayley form - std::vector bee; - std::vector cee; - std::vector aee; - std::vector beo; - std::vector ceo; - std::vector aeo; + std::vector bee; + std::vector cee; + std::vector aee; + std::vector beo; + std::vector ceo; + std::vector aeo; // LDU factorisation of the eeoo matrix - std::vector lee; - std::vector leem; - std::vector uee; - std::vector ueem; - std::vector dee; + std::vector lee; + std::vector leem; + std::vector uee; + std::vector ueem; + std::vector dee; // Matrices of 5d ee inverse params Vector > MatpInv; @@ -165,7 +165,7 @@ namespace Grid { GridRedBlackCartesian &FourDimRedBlackGrid, RealD _mass,RealD _M5,const ImplParams &p= ImplParams()); - + void CayleyReport(void); void CayleyZeroCounters(void); @@ -179,9 +179,9 @@ namespace Grid { double MooeeInvTime; protected: - void SetCoefficientsZolotarev(RealD zolohi,Approx::zolotarev_data *zdata,RealD b,RealD c); - void SetCoefficientsTanh(Approx::zolotarev_data *zdata,RealD b,RealD c); - void SetCoefficientsInternal(RealD zolo_hi,std::vector & gamma,RealD b,RealD c); + virtual void SetCoefficientsZolotarev(RealD zolohi,Approx::zolotarev_data *zdata,RealD b,RealD c); + virtual void SetCoefficientsTanh(Approx::zolotarev_data *zdata,RealD b,RealD c); + virtual void SetCoefficientsInternal(RealD zolo_hi,std::vector & gamma,RealD b,RealD c); }; } diff --git a/lib/qcd/action/fermion/DomainWallEOFAFermion.cc b/lib/qcd/action/fermion/DomainWallEOFAFermion.cc new file mode 100644 index 00000000..37ab5fa6 --- /dev/null +++ b/lib/qcd/action/fermion/DomainWallEOFAFermion.cc @@ -0,0 +1,438 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermion.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include +#include + +namespace Grid { +namespace QCD { + + template + DomainWallEOFAFermion::DomainWallEOFAFermion( + GaugeField &_Umu, + GridCartesian &FiveDimGrid, + GridRedBlackCartesian &FiveDimRedBlackGrid, + GridCartesian &FourDimGrid, + GridRedBlackCartesian &FourDimRedBlackGrid, + RealD _mq1, RealD _mq2, RealD _mq3, + RealD _shift, int _pm, RealD _M5, const ImplParams &p) : + AbstractEOFAFermion(_Umu, FiveDimGrid, FiveDimRedBlackGrid, + FourDimGrid, FourDimRedBlackGrid, _mq1, _mq2, _mq3, + _shift, _pm, _M5, 1.0, 0.0, p) + { + RealD eps = 1.0; + Approx::zolotarev_data *zdata = Approx::higham(eps,this->Ls); + assert(zdata->n == this->Ls); + + std::cout << GridLogMessage << "DomainWallEOFAFermion with Ls=" << this->Ls << std::endl; + this->SetCoefficientsTanh(zdata, 1.0, 0.0); + + Approx::zolotarev_free(zdata); + } + + /*************************************************************** + * Additional EOFA operators only called outside the inverter. + * Since speed is not essential, simple axpby-style + * implementations should be fine. + ***************************************************************/ + template + void DomainWallEOFAFermion::Omega(const FermionField& psi, FermionField& Din, int sign, int dag) + { + int Ls = this->Ls; + + Din = zero; + if((sign == 1) && (dag == 0)){ axpby_ssp(Din, 0.0, psi, 1.0, psi, Ls-1, 0); } + else if((sign == -1) && (dag == 0)){ axpby_ssp(Din, 0.0, psi, 1.0, psi, 0, 0); } + else if((sign == 1 ) && (dag == 1)){ axpby_ssp(Din, 0.0, psi, 1.0, psi, 0, Ls-1); } + else if((sign == -1) && (dag == 1)){ axpby_ssp(Din, 0.0, psi, 1.0, psi, 0, 0); } + } + + // This is just the identity for DWF + template + void DomainWallEOFAFermion::Dtilde(const FermionField& psi, FermionField& chi){ chi = psi; } + + // This is just the identity for DWF + template + void DomainWallEOFAFermion::DtildeInv(const FermionField& psi, FermionField& chi){ chi = psi; } + + /*****************************************************************************************************/ + + template + RealD DomainWallEOFAFermion::M(const FermionField& psi, FermionField& chi) + { + int Ls = this->Ls; + + FermionField Din(psi._grid); + + this->Meooe5D(psi, Din); + this->DW(Din, chi, DaggerNo); + axpby(chi, 1.0, 1.0, chi, psi); + this->M5D(psi, chi); + return(norm2(chi)); + } + + template + RealD DomainWallEOFAFermion::Mdag(const FermionField& psi, FermionField& chi) + { + int Ls = this->Ls; + + FermionField Din(psi._grid); + + this->DW(psi, Din, DaggerYes); + this->MeooeDag5D(Din, chi); + this->M5Ddag(psi, chi); + axpby(chi, 1.0, 1.0, chi, psi); + return(norm2(chi)); + } + + /******************************************************************** + * Performance critical fermion operators called inside the inverter + ********************************************************************/ + + template + void DomainWallEOFAFermion::M5D(const FermionField& psi, FermionField& chi) + { + int Ls = this->Ls; + int pm = this->pm; + RealD shift = this->shift; + RealD mq1 = this->mq1; + RealD mq2 = this->mq2; + RealD mq3 = this->mq3; + + // coefficients for shift operator ( = shift*\gamma_{5}*R_{5}*\Delta_{\pm}(mq2,mq3)*P_{\pm} ) + Coeff_t shiftp(0.0), shiftm(0.0); + if(shift != 0.0){ + if(pm == 1){ shiftp = shift*(mq3-mq2); } + else{ shiftm = -shift*(mq3-mq2); } + } + + std::vector diag(Ls,1.0); + std::vector upper(Ls,-1.0); upper[Ls-1] = mq1 + shiftm; + std::vector lower(Ls,-1.0); lower[0] = mq1 + shiftp; + + #if(0) + std::cout << GridLogMessage << "DomainWallEOFAFermion::M5D(FF&,FF&):" << std::endl; + for(int i=0; i::iscomplex()) { + sp[l] = PplusMat (l*istride+s1*ostride,s2); + sm[l] = PminusMat(l*istride+s1*ostride,s2); + } else { + // if real + scalar_type tmp; + tmp = PplusMat (l*istride+s1*ostride,s2); + sp[l] = scalar_type(tmp.real(),tmp.real()); + tmp = PminusMat(l*istride+s1*ostride,s2); + sm[l] = scalar_type(tmp.real(),tmp.real()); + } + } + Matp[LLs*s2+s1] = Vp; + Matm[LLs*s2+s1] = Vm; + }} + } + + FermOpTemplateInstantiate(DomainWallEOFAFermion); + GparityFermOpTemplateInstantiate(DomainWallEOFAFermion); + +}} diff --git a/lib/qcd/action/fermion/DomainWallEOFAFermion.h b/lib/qcd/action/fermion/DomainWallEOFAFermion.h new file mode 100644 index 00000000..5362cda8 --- /dev/null +++ b/lib/qcd/action/fermion/DomainWallEOFAFermion.h @@ -0,0 +1,115 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermion.h + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef GRID_QCD_DOMAIN_WALL_EOFA_FERMION_H +#define GRID_QCD_DOMAIN_WALL_EOFA_FERMION_H + +#include + +namespace Grid { +namespace QCD { + + template + class DomainWallEOFAFermion : public AbstractEOFAFermion + { + public: + INHERIT_IMPL_TYPES(Impl); + + public: + // Modified (0,Ls-1) and (Ls-1,0) elements of Mooee + // for red-black preconditioned Shamir EOFA + Coeff_t dm; + Coeff_t dp; + + virtual void Instantiatable(void) {}; + + // EOFA-specific operations + virtual void Omega (const FermionField& in, FermionField& out, int sign, int dag); + virtual void Dtilde (const FermionField& in, FermionField& out); + virtual void DtildeInv (const FermionField& in, FermionField& out); + + // override multiply + virtual RealD M (const FermionField& in, FermionField& out); + virtual RealD Mdag (const FermionField& in, FermionField& out); + + // half checkerboard operations + virtual void Mooee (const FermionField& in, FermionField& out); + virtual void MooeeDag (const FermionField& in, FermionField& out); + virtual void MooeeInv (const FermionField& in, FermionField& out); + virtual void MooeeInvDag(const FermionField& in, FermionField& out); + + virtual void M5D (const FermionField& psi, FermionField& chi); + virtual void M5Ddag (const FermionField& psi, FermionField& chi); + + ///////////////////////////////////////////////////// + // Instantiate different versions depending on Impl + ///////////////////////////////////////////////////// + void M5D(const FermionField& psi, const FermionField& phi, FermionField& chi, + std::vector& lower, std::vector& diag, std::vector& upper); + + void M5Ddag(const FermionField& psi, const FermionField& phi, FermionField& chi, + std::vector& lower, std::vector& diag, std::vector& upper); + + void MooeeInternal(const FermionField& in, FermionField& out, int dag, int inv); + + void MooeeInternalCompute(int dag, int inv, Vector>& Matp, Vector>& Matm); + + void MooeeInternalAsm(const FermionField& in, FermionField& out, int LLs, int site, + Vector>& Matp, Vector>& Matm); + + void MooeeInternalZAsm(const FermionField& in, FermionField& out, int LLs, int site, + Vector>& Matp, Vector>& Matm); + + virtual void RefreshShiftCoefficients(RealD new_shift); + + // Constructors + DomainWallEOFAFermion(GaugeField& _Umu, GridCartesian& FiveDimGrid, GridRedBlackCartesian& FiveDimRedBlackGrid, + GridCartesian& FourDimGrid, GridRedBlackCartesian& FourDimRedBlackGrid, + RealD _mq1, RealD _mq2, RealD _mq3, RealD _shift, int pm, + RealD _M5, const ImplParams& p=ImplParams()); + + protected: + void SetCoefficientsInternal(RealD zolo_hi, std::vector& gamma, RealD b, RealD c); + }; +}} + +#define INSTANTIATE_DPERP_DWF_EOFA(A)\ +template void DomainWallEOFAFermion::M5D(const FermionField& psi, const FermionField& phi, FermionField& chi, \ + std::vector& lower, std::vector& diag, std::vector& upper); \ +template void DomainWallEOFAFermion::M5Ddag(const FermionField& psi, const FermionField& phi, FermionField& chi, \ + std::vector& lower, std::vector& diag, std::vector& upper); \ +template void DomainWallEOFAFermion::MooeeInv(const FermionField& psi, FermionField& chi); \ +template void DomainWallEOFAFermion::MooeeInvDag(const FermionField& psi, FermionField& chi); + +#undef DOMAIN_WALL_EOFA_DPERP_DENSE +#define DOMAIN_WALL_EOFA_DPERP_CACHE +#undef DOMAIN_WALL_EOFA_DPERP_LINALG +#define DOMAIN_WALL_EOFA_DPERP_VEC + +#endif diff --git a/lib/qcd/action/fermion/DomainWallEOFAFermioncache.cc b/lib/qcd/action/fermion/DomainWallEOFAFermioncache.cc new file mode 100644 index 00000000..0b214d31 --- /dev/null +++ b/lib/qcd/action/fermion/DomainWallEOFAFermioncache.cc @@ -0,0 +1,248 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermioncache.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include + +namespace Grid { +namespace QCD { + + // FIXME -- make a version of these routines with site loop outermost for cache reuse. + + // Pminus fowards + // Pplus backwards.. + template + void DomainWallEOFAFermion::M5D(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper) + { + int Ls = this->Ls; + GridBase* grid = psi._grid; + + assert(phi.checkerboard == psi.checkerboard); + chi.checkerboard = psi.checkerboard; + // Flops = 6.0*(Nc*Ns) *Ls*vol + this->M5Dcalls++; + this->M5Dtime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ // adds Ls + for(int s=0; sM5Dtime += usecond(); + } + + template + void DomainWallEOFAFermion::M5Ddag(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper) + { + int Ls = this->Ls; + GridBase* grid = psi._grid; + assert(phi.checkerboard == psi.checkerboard); + chi.checkerboard=psi.checkerboard; + + // Flops = 6.0*(Nc*Ns) *Ls*vol + this->M5Dcalls++; + this->M5Dtime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ // adds Ls + auto tmp = psi._odata[0]; + for(int s=0; sM5Dtime += usecond(); + } + + template + void DomainWallEOFAFermion::MooeeInv(const FermionField& psi, FermionField& chi) + { + GridBase* grid = psi._grid; + int Ls = this->Ls; + + chi.checkerboard = psi.checkerboard; + + this->MooeeInvCalls++; + this->MooeeInvTime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ // adds Ls + + auto tmp1 = psi._odata[0]; + auto tmp2 = psi._odata[0]; + + // flops = 12*2*Ls + 12*2*Ls + 3*12*Ls + 12*2*Ls = 12*Ls * (9) = 108*Ls flops + // Apply (L^{\prime})^{-1} + chi[ss] = psi[ss]; // chi[0]=psi[0] + for(int s=1; slee[s-1]*tmp1; + } + + // L_m^{-1} + for(int s=0; sleem[s]*tmp1; + } + + // U_m^{-1} D^{-1} + for(int s=0; sdee[s])*chi[ss+s] - (this->ueem[s]/this->dee[Ls])*tmp1; + } + spProj5m(tmp2, chi[ss+Ls-1]); + chi[ss+Ls-1] = (1.0/this->dee[Ls])*tmp1 + (1.0/this->dee[Ls-1])*tmp2; + + // Apply U^{-1} + for(int s=Ls-2; s>=0; s--){ + spProj5m(tmp1, chi[ss+s+1]); + chi[ss+s] = chi[ss+s] - this->uee[s]*tmp1; + } + } + + this->MooeeInvTime += usecond(); + } + + template + void DomainWallEOFAFermion::MooeeInvDag(const FermionField& psi, FermionField& chi) + { + GridBase* grid = psi._grid; + int Ls = this->Ls; + + assert(psi.checkerboard == psi.checkerboard); + chi.checkerboard = psi.checkerboard; + + std::vector ueec(Ls); + std::vector deec(Ls+1); + std::vector leec(Ls); + std::vector ueemc(Ls); + std::vector leemc(Ls); + + for(int s=0; suee[s]); + deec[s] = conjugate(this->dee[s]); + leec[s] = conjugate(this->lee[s]); + ueemc[s] = conjugate(this->ueem[s]); + leemc[s] = conjugate(this->leem[s]); + } + deec[Ls] = conjugate(this->dee[Ls]); + + this->MooeeInvCalls++; + this->MooeeInvTime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ // adds Ls + + auto tmp1 = psi._odata[0]; + auto tmp2 = psi._odata[0]; + + // Apply (U^{\prime})^{-dagger} + chi[ss] = psi[ss]; + for(int s=1; s=0; s--){ + spProj5p(tmp1, chi[ss+s+1]); + chi[ss+s] = chi[ss+s] - leec[s]*tmp1; + } + } + + this->MooeeInvTime += usecond(); + } + + #ifdef DOMAIN_WALL_EOFA_DPERP_CACHE + + INSTANTIATE_DPERP_DWF_EOFA(WilsonImplF); + INSTANTIATE_DPERP_DWF_EOFA(WilsonImplD); + INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplF); + INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplD); + INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplF); + INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplD); + + INSTANTIATE_DPERP_DWF_EOFA(WilsonImplFH); + INSTANTIATE_DPERP_DWF_EOFA(WilsonImplDF); + INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplFH); + INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplDF); + INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplFH); + INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplDF); + + #endif + +}} diff --git a/lib/qcd/action/fermion/DomainWallEOFAFermiondense.cc b/lib/qcd/action/fermion/DomainWallEOFAFermiondense.cc new file mode 100644 index 00000000..c27074d9 --- /dev/null +++ b/lib/qcd/action/fermion/DomainWallEOFAFermiondense.cc @@ -0,0 +1,159 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermiondense.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include +#include + +namespace Grid { +namespace QCD { + + /* + * Dense matrix versions of routines + */ + template + void DomainWallEOFAFermion::MooeeInvDag(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerYes, InverseYes); + } + + template + void DomainWallEOFAFermion::MooeeInv(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerNo, InverseYes); + } + + template + void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv) + { + int Ls = this->Ls; + int LLs = psi._grid->_rdimensions[0]; + int vol = psi._grid->oSites()/LLs; + + chi.checkerboard = psi.checkerboard; + + assert(Ls==LLs); + + Eigen::MatrixXd Pplus = Eigen::MatrixXd::Zero(Ls,Ls); + Eigen::MatrixXd Pminus = Eigen::MatrixXd::Zero(Ls,Ls); + + for(int s=0;sbee[s]; + Pminus(s,s) = this->bee[s]; + } + + for(int s=0; scee[s]; + } + + for(int s=0; scee[s+1]; + } + + Pplus (0,Ls-1) = this->dp; + Pminus(Ls-1,0) = this->dm; + + Eigen::MatrixXd PplusMat ; + Eigen::MatrixXd PminusMat; + + if(inv) { + PplusMat = Pplus.inverse(); + PminusMat = Pminus.inverse(); + } else { + PplusMat = Pplus; + PminusMat = Pminus; + } + + if(dag){ + PplusMat.adjointInPlace(); + PminusMat.adjointInPlace(); + } + + // For the non-vectorised s-direction this is simple + + for(auto site=0; site::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + + INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplFH); + INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplDF); + INSTANTIATE_DPERP_DWF_EOFA(WilsonImplFH); + INSTANTIATE_DPERP_DWF_EOFA(WilsonImplDF); + INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplFH); + INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplDF); + + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + + #endif + +}} diff --git a/lib/qcd/action/fermion/DomainWallEOFAFermionssp.cc b/lib/qcd/action/fermion/DomainWallEOFAFermionssp.cc new file mode 100644 index 00000000..80a4bf09 --- /dev/null +++ b/lib/qcd/action/fermion/DomainWallEOFAFermionssp.cc @@ -0,0 +1,168 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermionssp.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include + +namespace Grid { +namespace QCD { + + // FIXME -- make a version of these routines with site loop outermost for cache reuse. + // Pminus fowards + // Pplus backwards + template + void DomainWallEOFAFermion::M5D(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper) + { + Coeff_t one(1.0); + int Ls = this->Ls; + for(int s=0; s + void DomainWallEOFAFermion::M5Ddag(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper) + { + Coeff_t one(1.0); + int Ls = this->Ls; + for(int s=0; s + void DomainWallEOFAFermion::MooeeInv(const FermionField& psi, FermionField& chi) + { + Coeff_t one(1.0); + Coeff_t czero(0.0); + chi.checkerboard = psi.checkerboard; + int Ls = this->Ls; + + FermionField tmp(psi._grid); + + // Apply (L^{\prime})^{-1} + axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0] + for(int s=1; slee[s-1], chi, s, s-1);// recursion Psi[s] -lee P_+ chi[s-1] + } + + // L_m^{-1} + for(int s=0; sleem[s], chi, Ls-1, s); + } + + // U_m^{-1} D^{-1} + for(int s=0; sdee[s], chi, -this->ueem[s]/this->dee[Ls], chi, s, Ls-1); + } + axpby_ssp_pminus(tmp, czero, chi, one/this->dee[Ls-1], chi, Ls-1, Ls-1); + axpby_ssp_pplus(chi, one, tmp, one/this->dee[Ls], chi, Ls-1, Ls-1); + + // Apply U^{-1} + for(int s=Ls-2; s>=0; s--){ + axpby_ssp_pminus(chi, one, chi, -this->uee[s], chi, s, s+1); // chi[Ls] + } + } + + template + void DomainWallEOFAFermion::MooeeInvDag(const FermionField& psi, FermionField& chi) + { + Coeff_t one(1.0); + Coeff_t czero(0.0); + chi.checkerboard = psi.checkerboard; + int Ls = this->Ls; + + FermionField tmp(psi._grid); + + // Apply (U^{\prime})^{-dagger} + axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0] + for(int s=1; suee[s-1]), chi, s, s-1); + } + + // U_m^{-\dagger} + for(int s=0; sueem[s]), chi, Ls-1, s); + } + + // L_m^{-\dagger} D^{-dagger} + for(int s=0; sdee[s]), chi, -conjugate(this->leem[s]/this->dee[Ls-1]), chi, s, Ls-1); + } + axpby_ssp_pminus(tmp, czero, chi, one/conjugate(this->dee[Ls-1]), chi, Ls-1, Ls-1); + axpby_ssp_pplus(chi, one, tmp, one/conjugate(this->dee[Ls]), chi, Ls-1, Ls-1); + + // Apply L^{-dagger} + for(int s=Ls-2; s>=0; s--){ + axpby_ssp_pplus(chi, one, chi, -conjugate(this->lee[s]), chi, s, s+1); // chi[Ls] + } + } + + #ifdef DOMAIN_WALL_EOFA_DPERP_LINALG + + INSTANTIATE_DPERP_DWF_EOFA(WilsonImplF); + INSTANTIATE_DPERP_DWF_EOFA(WilsonImplD); + INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplF); + INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplD); + INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplF); + INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplD); + + INSTANTIATE_DPERP_DWF_EOFA(WilsonImplFH); + INSTANTIATE_DPERP_DWF_EOFA(WilsonImplDF); + INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplFH); + INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplDF); + INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplFH); + INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplDF); + + #endif + +}} diff --git a/lib/qcd/action/fermion/DomainWallEOFAFermionvec.cc b/lib/qcd/action/fermion/DomainWallEOFAFermionvec.cc new file mode 100644 index 00000000..81ce448c --- /dev/null +++ b/lib/qcd/action/fermion/DomainWallEOFAFermionvec.cc @@ -0,0 +1,605 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermionvec.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include + +namespace Grid { +namespace QCD { + + /* + * Dense matrix versions of routines + */ + template + void DomainWallEOFAFermion::MooeeInvDag(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerYes, InverseYes); + } + + template + void DomainWallEOFAFermion::MooeeInv(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerNo, InverseYes); + } + + template + void DomainWallEOFAFermion::M5D(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper) + { + GridBase* grid = psi._grid; + int Ls = this->Ls; + int LLs = grid->_rdimensions[0]; + const int nsimd = Simd::Nsimd(); + + Vector > u(LLs); + Vector > l(LLs); + Vector > d(LLs); + + assert(Ls/LLs == nsimd); + assert(phi.checkerboard == psi.checkerboard); + + chi.checkerboard = psi.checkerboard; + + // just directly address via type pun + typedef typename Simd::scalar_type scalar_type; + scalar_type* u_p = (scalar_type*) &u[0]; + scalar_type* l_p = (scalar_type*) &l[0]; + scalar_type* d_p = (scalar_type*) &d[0]; + + for(int o=0;oM5Dcalls++; + this->M5Dtime -= usecond(); + + assert(Nc == 3); + + parallel_for(int ss=0; ssoSites(); ss+=LLs){ // adds LLs + + #if 0 + + alignas(64) SiteHalfSpinor hp; + alignas(64) SiteHalfSpinor hm; + alignas(64) SiteSpinor fp; + alignas(64) SiteSpinor fm; + + for(int v=0; v= v){ rotate(hm, hm, nsimd-1); } + + hp = 0.5*hp; + hm = 0.5*hm; + + spRecon5m(fp, hp); + spRecon5p(fm, hm); + + chi[ss+v] = d[v]*phi[ss+v]; + chi[ss+v] = chi[ss+v] + u[v]*fp; + chi[ss+v] = chi[ss+v] + l[v]*fm; + + } + + #else + + for(int v=0; v(hp_00.v); + hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v); + hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v); + hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v); + hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v); + hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v); + } + + if(vm >= v){ + hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v); + hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v); + hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v); + hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v); + hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v); + hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v); + } + + // Can force these to real arithmetic and save 2x. + Simd p_00 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo::mult(l[v]()()(), hm_00); + Simd p_01 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo::mult(l[v]()()(), hm_01); + Simd p_02 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo::mult(l[v]()()(), hm_02); + Simd p_10 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo::mult(l[v]()()(), hm_10); + Simd p_11 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo::mult(l[v]()()(), hm_11); + Simd p_12 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo::mult(l[v]()()(), hm_12); + Simd p_20 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo::mult(u[v]()()(), hp_00); + Simd p_21 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo::mult(u[v]()()(), hp_01); + Simd p_22 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo::mult(u[v]()()(), hp_02); + Simd p_30 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo::mult(u[v]()()(), hp_10); + Simd p_31 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo::mult(u[v]()()(), hp_11); + Simd p_32 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo::mult(u[v]()()(), hp_12); + + vstream(chi[ss+v]()(0)(0), p_00); + vstream(chi[ss+v]()(0)(1), p_01); + vstream(chi[ss+v]()(0)(2), p_02); + vstream(chi[ss+v]()(1)(0), p_10); + vstream(chi[ss+v]()(1)(1), p_11); + vstream(chi[ss+v]()(1)(2), p_12); + vstream(chi[ss+v]()(2)(0), p_20); + vstream(chi[ss+v]()(2)(1), p_21); + vstream(chi[ss+v]()(2)(2), p_22); + vstream(chi[ss+v]()(3)(0), p_30); + vstream(chi[ss+v]()(3)(1), p_31); + vstream(chi[ss+v]()(3)(2), p_32); + } + + #endif + } + + this->M5Dtime += usecond(); + } + + template + void DomainWallEOFAFermion::M5Ddag(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper) + { + GridBase* grid = psi._grid; + int Ls = this->Ls; + int LLs = grid->_rdimensions[0]; + int nsimd = Simd::Nsimd(); + + Vector > u(LLs); + Vector > l(LLs); + Vector > d(LLs); + + assert(Ls/LLs == nsimd); + assert(phi.checkerboard == psi.checkerboard); + + chi.checkerboard = psi.checkerboard; + + // just directly address via type pun + typedef typename Simd::scalar_type scalar_type; + scalar_type* u_p = (scalar_type*) &u[0]; + scalar_type* l_p = (scalar_type*) &l[0]; + scalar_type* d_p = (scalar_type*) &d[0]; + + for(int o=0; oM5Dcalls++; + this->M5Dtime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=LLs){ // adds LLs + + #if 0 + + alignas(64) SiteHalfSpinor hp; + alignas(64) SiteHalfSpinor hm; + alignas(64) SiteSpinor fp; + alignas(64) SiteSpinor fm; + + for(int v=0; v= v){ rotate(hm, hm, nsimd-1); } + + hp = hp*0.5; + hm = hm*0.5; + spRecon5p(fp, hp); + spRecon5m(fm, hm); + + chi[ss+v] = d[v]*phi[ss+v]+u[v]*fp; + chi[ss+v] = chi[ss+v] +l[v]*fm; + } + + #else + + for(int v=0; v(hp_00.v); + hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v); + hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v); + hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v); + hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v); + hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v); + } + + if(vm >= v){ + hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v); + hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v); + hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v); + hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v); + hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v); + hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v); + } + + Simd p_00 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo::mult(u[v]()()(), hp_00); + Simd p_01 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo::mult(u[v]()()(), hp_01); + Simd p_02 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo::mult(u[v]()()(), hp_02); + Simd p_10 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo::mult(u[v]()()(), hp_10); + Simd p_11 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo::mult(u[v]()()(), hp_11); + Simd p_12 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo::mult(u[v]()()(), hp_12); + Simd p_20 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo::mult(l[v]()()(), hm_00); + Simd p_21 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo::mult(l[v]()()(), hm_01); + Simd p_22 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo::mult(l[v]()()(), hm_02); + Simd p_30 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo::mult(l[v]()()(), hm_10); + Simd p_31 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo::mult(l[v]()()(), hm_11); + Simd p_32 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo::mult(l[v]()()(), hm_12); + + vstream(chi[ss+v]()(0)(0), p_00); + vstream(chi[ss+v]()(0)(1), p_01); + vstream(chi[ss+v]()(0)(2), p_02); + vstream(chi[ss+v]()(1)(0), p_10); + vstream(chi[ss+v]()(1)(1), p_11); + vstream(chi[ss+v]()(1)(2), p_12); + vstream(chi[ss+v]()(2)(0), p_20); + vstream(chi[ss+v]()(2)(1), p_21); + vstream(chi[ss+v]()(2)(2), p_22); + vstream(chi[ss+v]()(3)(0), p_30); + vstream(chi[ss+v]()(3)(1), p_31); + vstream(chi[ss+v]()(3)(2), p_32); + } + #endif + + } + + this->M5Dtime += usecond(); + } + + #ifdef AVX512 + #include + #include + #include + #endif + + template + void DomainWallEOFAFermion::MooeeInternalAsm(const FermionField& psi, FermionField& chi, + int LLs, int site, Vector >& Matp, Vector >& Matm) + { + #ifndef AVX512 + { + SiteHalfSpinor BcastP; + SiteHalfSpinor BcastM; + SiteHalfSpinor SiteChiP; + SiteHalfSpinor SiteChiM; + + // Ls*Ls * 2 * 12 * vol flops + for(int s1=0; s1); + for(int s1=0; s1 + void DomainWallEOFAFermion::MooeeInternalZAsm(const FermionField& psi, FermionField& chi, + int LLs, int site, Vector >& Matp, Vector >& Matm) + { + std::cout << "Error: zMobius not implemented for EOFA" << std::endl; + exit(-1); + }; + + template + void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv) + { + int Ls = this->Ls; + int LLs = psi._grid->_rdimensions[0]; + int vol = psi._grid->oSites()/LLs; + + chi.checkerboard = psi.checkerboard; + + Vector > Matp; + Vector > Matm; + Vector > *_Matp; + Vector > *_Matm; + + // MooeeInternalCompute(dag,inv,Matp,Matm); + if(inv && dag){ + _Matp = &this->MatpInvDag; + _Matm = &this->MatmInvDag; + } + + if(inv && (!dag)){ + _Matp = &this->MatpInv; + _Matm = &this->MatmInv; + } + + if(!inv){ + MooeeInternalCompute(dag, inv, Matp, Matm); + _Matp = &Matp; + _Matm = &Matm; + } + + assert(_Matp->size() == Ls*LLs); + + this->MooeeInvCalls++; + this->MooeeInvTime -= usecond(); + + if(switcheroo::iscomplex()){ + parallel_for(auto site=0; siteMooeeInvTime += usecond(); + } + + #ifdef DOMAIN_WALL_EOFA_DPERP_VEC + + INSTANTIATE_DPERP_DWF_EOFA(DomainWallVec5dImplD); + INSTANTIATE_DPERP_DWF_EOFA(DomainWallVec5dImplF); + INSTANTIATE_DPERP_DWF_EOFA(ZDomainWallVec5dImplD); + INSTANTIATE_DPERP_DWF_EOFA(ZDomainWallVec5dImplF); + + INSTANTIATE_DPERP_DWF_EOFA(DomainWallVec5dImplDF); + INSTANTIATE_DPERP_DWF_EOFA(DomainWallVec5dImplFH); + INSTANTIATE_DPERP_DWF_EOFA(ZDomainWallVec5dImplDF); + INSTANTIATE_DPERP_DWF_EOFA(ZDomainWallVec5dImplFH); + + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void DomainWallEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + + #endif + +}} diff --git a/lib/qcd/action/fermion/Fermion.h b/lib/qcd/action/fermion/Fermion.h index 0f803f44..ad2f383d 100644 --- a/lib/qcd/action/fermion/Fermion.h +++ b/lib/qcd/action/fermion/Fermion.h @@ -1,6 +1,6 @@ /************************************************************************************* - Grid physics library, www.github.com/paboyle/Grid + Grid physics library, www.github.com/paboyle/Grid Source file: ./lib/qcd/action/fermion/Fermion_base_aggregate.h @@ -38,6 +38,8 @@ Author: Peter Boyle // - ContinuedFractionFermion5D.cc // - WilsonFermion.cc // - WilsonKernels.cc +// - DomainWallEOFAFermion.cc +// - MobiusEOFAFermion.cc // // The explicit instantiation is only avoidable if we move this source to headers and end up with include/parse/recompile // for EVERY .cc file. This define centralises the list and restores global push of impl cases @@ -55,8 +57,9 @@ Author: Peter Boyle #include #include // Cayley types #include -#include +#include #include +#include #include #include #include @@ -113,6 +116,14 @@ typedef DomainWallFermion DomainWallFermionRL; typedef DomainWallFermion DomainWallFermionFH; typedef DomainWallFermion DomainWallFermionDF; +typedef DomainWallEOFAFermion DomainWallEOFAFermionR; +typedef DomainWallEOFAFermion DomainWallEOFAFermionF; +typedef DomainWallEOFAFermion DomainWallEOFAFermionD; + +typedef DomainWallEOFAFermion DomainWallEOFAFermionRL; +typedef DomainWallEOFAFermion DomainWallEOFAFermionFH; +typedef DomainWallEOFAFermion DomainWallEOFAFermionDF; + typedef MobiusFermion MobiusFermionR; typedef MobiusFermion MobiusFermionF; typedef MobiusFermion MobiusFermionD; @@ -121,6 +132,14 @@ typedef MobiusFermion MobiusFermionRL; typedef MobiusFermion MobiusFermionFH; typedef MobiusFermion MobiusFermionDF; +typedef MobiusEOFAFermion MobiusEOFAFermionR; +typedef MobiusEOFAFermion MobiusEOFAFermionF; +typedef MobiusEOFAFermion MobiusEOFAFermionD; + +typedef MobiusEOFAFermion MobiusEOFAFermionRL; +typedef MobiusEOFAFermion MobiusEOFAFermionFH; +typedef MobiusEOFAFermion MobiusEOFAFermionDF; + typedef ZMobiusFermion ZMobiusFermionR; typedef ZMobiusFermion ZMobiusFermionF; typedef ZMobiusFermion ZMobiusFermionD; @@ -129,7 +148,7 @@ typedef ZMobiusFermion ZMobiusFermionRL; typedef ZMobiusFermion ZMobiusFermionFH; typedef ZMobiusFermion ZMobiusFermionDF; -// Ls vectorised +// Ls vectorised typedef DomainWallFermion DomainWallFermionVec5dR; typedef DomainWallFermion DomainWallFermionVec5dF; typedef DomainWallFermion DomainWallFermionVec5dD; @@ -138,6 +157,14 @@ typedef DomainWallFermion DomainWallFermionVec5dRL; typedef DomainWallFermion DomainWallFermionVec5dFH; typedef DomainWallFermion DomainWallFermionVec5dDF; +typedef DomainWallEOFAFermion DomainWallEOFAFermionVec5dR; +typedef DomainWallEOFAFermion DomainWallEOFAFermionVec5dF; +typedef DomainWallEOFAFermion DomainWallEOFAFermionVec5dD; + +typedef DomainWallEOFAFermion DomainWallEOFAFermionVec5dRL; +typedef DomainWallEOFAFermion DomainWallEOFAFermionVec5dFH; +typedef DomainWallEOFAFermion DomainWallEOFAFermionVec5dDF; + typedef MobiusFermion MobiusFermionVec5dR; typedef MobiusFermion MobiusFermionVec5dF; typedef MobiusFermion MobiusFermionVec5dD; @@ -146,6 +173,14 @@ typedef MobiusFermion MobiusFermionVec5dRL; typedef MobiusFermion MobiusFermionVec5dFH; typedef MobiusFermion MobiusFermionVec5dDF; +typedef MobiusEOFAFermion MobiusEOFAFermionVec5dR; +typedef MobiusEOFAFermion MobiusEOFAFermionVec5dF; +typedef MobiusEOFAFermion MobiusEOFAFermionVec5dD; + +typedef MobiusEOFAFermion MobiusEOFAFermionVec5dRL; +typedef MobiusEOFAFermion MobiusEOFAFermionVec5dFH; +typedef MobiusEOFAFermion MobiusEOFAFermionVec5dDF; + typedef ZMobiusFermion ZMobiusFermionVec5dR; typedef ZMobiusFermion ZMobiusFermionVec5dF; typedef ZMobiusFermion ZMobiusFermionVec5dD; @@ -206,6 +241,14 @@ typedef DomainWallFermion GparityDomainWallFermionRL; typedef DomainWallFermion GparityDomainWallFermionFH; typedef DomainWallFermion GparityDomainWallFermionDF; +typedef DomainWallEOFAFermion GparityDomainWallEOFAFermionR; +typedef DomainWallEOFAFermion GparityDomainWallEOFAFermionF; +typedef DomainWallEOFAFermion GparityDomainWallEOFAFermionD; + +typedef DomainWallEOFAFermion GparityDomainWallEOFAFermionRL; +typedef DomainWallEOFAFermion GparityDomainWallEOFAFermionFH; +typedef DomainWallEOFAFermion GparityDomainWallEOFAFermionDF; + typedef WilsonTMFermion GparityWilsonTMFermionR; typedef WilsonTMFermion GparityWilsonTMFermionF; typedef WilsonTMFermion GparityWilsonTMFermionD; @@ -222,6 +265,14 @@ typedef MobiusFermion GparityMobiusFermionRL; typedef MobiusFermion GparityMobiusFermionFH; typedef MobiusFermion GparityMobiusFermionDF; +typedef MobiusEOFAFermion GparityMobiusEOFAFermionR; +typedef MobiusEOFAFermion GparityMobiusEOFAFermionF; +typedef MobiusEOFAFermion GparityMobiusEOFAFermionD; + +typedef MobiusEOFAFermion GparityMobiusEOFAFermionRL; +typedef MobiusEOFAFermion GparityMobiusEOFAFermionFH; +typedef MobiusEOFAFermion GparityMobiusEOFAFermionDF; + typedef ImprovedStaggeredFermion ImprovedStaggeredFermionR; typedef ImprovedStaggeredFermion ImprovedStaggeredFermionF; typedef ImprovedStaggeredFermion ImprovedStaggeredFermionD; diff --git a/lib/qcd/action/fermion/MobiusEOFAFermion.cc b/lib/qcd/action/fermion/MobiusEOFAFermion.cc new file mode 100644 index 00000000..0344afbf --- /dev/null +++ b/lib/qcd/action/fermion/MobiusEOFAFermion.cc @@ -0,0 +1,502 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/MobiusEOFAFermion.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include +#include + +namespace Grid { +namespace QCD { + + template + MobiusEOFAFermion::MobiusEOFAFermion( + GaugeField &_Umu, + GridCartesian &FiveDimGrid, + GridRedBlackCartesian &FiveDimRedBlackGrid, + GridCartesian &FourDimGrid, + GridRedBlackCartesian &FourDimRedBlackGrid, + RealD _mq1, RealD _mq2, RealD _mq3, + RealD _shift, int _pm, RealD _M5, + RealD _b, RealD _c, const ImplParams &p) : + AbstractEOFAFermion(_Umu, FiveDimGrid, FiveDimRedBlackGrid, + FourDimGrid, FourDimRedBlackGrid, _mq1, _mq2, _mq3, + _shift, _pm, _M5, _b, _c, p) + { + int Ls = this->Ls; + + RealD eps = 1.0; + Approx::zolotarev_data *zdata = Approx::higham(eps, this->Ls); + assert(zdata->n == this->Ls); + + std::cout << GridLogMessage << "MobiusEOFAFermion (b=" << _b << + ",c=" << _c << ") with Ls=" << Ls << std::endl; + this->SetCoefficientsTanh(zdata, _b, _c); + std::cout << GridLogMessage << "EOFA parameters: (mq1=" << _mq1 << + ",mq2=" << _mq2 << ",mq3=" << _mq3 << ",shift=" << _shift << + ",pm=" << _pm << ")" << std::endl; + + Approx::zolotarev_free(zdata); + + if(_shift != 0.0){ + SetCoefficientsPrecondShiftOps(); + } else { + Mooee_shift.resize(Ls, 0.0); + MooeeInv_shift_lc.resize(Ls, 0.0); + MooeeInv_shift_norm.resize(Ls, 0.0); + MooeeInvDag_shift_lc.resize(Ls, 0.0); + MooeeInvDag_shift_norm.resize(Ls, 0.0); + } + } + + /**************************************************************** + * Additional EOFA operators only called outside the inverter. + * Since speed is not essential, simple axpby-style + * implementations should be fine. + ***************************************************************/ + template + void MobiusEOFAFermion::Omega(const FermionField& psi, FermionField& Din, int sign, int dag) + { + int Ls = this->Ls; + RealD alpha = this->alpha; + + Din = zero; + if((sign == 1) && (dag == 0)) { // \Omega_{+} + for(int s=0; s + void MobiusEOFAFermion::Dtilde(const FermionField& psi, FermionField& chi) + { + int Ls = this->Ls; + RealD b = 0.5 * ( 1.0 + this->alpha ); + RealD c = 0.5 * ( 1.0 - this->alpha ); + RealD mq1 = this->mq1; + + for(int s=0; s + void MobiusEOFAFermion::DtildeInv(const FermionField& psi, FermionField& chi) + { + int Ls = this->Ls; + RealD m = this->mq1; + RealD c = 0.5 * this->alpha; + RealD d = 0.5; + + RealD DtInv_p(0.0), DtInv_m(0.0); + RealD N = std::pow(c+d,Ls) + m*std::pow(c-d,Ls); + FermionField tmp(this->FermionGrid()); + + for(int s=0; s sp) ? 0.0 : std::pow(-1.0,sp-s) * std::pow(c-d,sp-s) / std::pow(c+d,sp-s+1); + + if(sp == 0){ + axpby_ssp_pplus (tmp, 0.0, tmp, DtInv_p, psi, s, sp); + axpby_ssp_pminus(tmp, 0.0, tmp, DtInv_m, psi, s, sp); + } else { + axpby_ssp_pplus (tmp, 1.0, tmp, DtInv_p, psi, s, sp); + axpby_ssp_pminus(tmp, 1.0, tmp, DtInv_m, psi, s, sp); + } + + }} + } + + /*****************************************************************************************************/ + + template + RealD MobiusEOFAFermion::M(const FermionField& psi, FermionField& chi) + { + int Ls = this->Ls; + + FermionField Din(psi._grid); + + this->Meooe5D(psi, Din); + this->DW(Din, chi, DaggerNo); + axpby(chi, 1.0, 1.0, chi, psi); + this->M5D(psi, chi); + return(norm2(chi)); + } + + template + RealD MobiusEOFAFermion::Mdag(const FermionField& psi, FermionField& chi) + { + int Ls = this->Ls; + + FermionField Din(psi._grid); + + this->DW(psi, Din, DaggerYes); + this->MeooeDag5D(Din, chi); + this->M5Ddag(psi, chi); + axpby(chi, 1.0, 1.0, chi, psi); + return(norm2(chi)); + } + + /******************************************************************** + * Performance critical fermion operators called inside the inverter + ********************************************************************/ + + template + void MobiusEOFAFermion::M5D(const FermionField& psi, FermionField& chi) + { + int Ls = this->Ls; + + std::vector diag(Ls,1.0); + std::vector upper(Ls,-1.0); upper[Ls-1] = this->mq1; + std::vector lower(Ls,-1.0); lower[0] = this->mq1; + + // no shift term + if(this->shift == 0.0){ this->M5D(psi, chi, chi, lower, diag, upper); } + + // fused M + shift operation + else{ this->M5D_shift(psi, chi, chi, lower, diag, upper, Mooee_shift); } + } + + template + void MobiusEOFAFermion::M5Ddag(const FermionField& psi, FermionField& chi) + { + int Ls = this->Ls; + + std::vector diag(Ls,1.0); + std::vector upper(Ls,-1.0); upper[Ls-1] = this->mq1; + std::vector lower(Ls,-1.0); lower[0] = this->mq1; + + // no shift term + if(this->shift == 0.0){ this->M5Ddag(psi, chi, chi, lower, diag, upper); } + + // fused M + shift operation + else{ this->M5Ddag_shift(psi, chi, chi, lower, diag, upper, Mooee_shift); } + } + + // half checkerboard operations + template + void MobiusEOFAFermion::Mooee(const FermionField& psi, FermionField& chi) + { + int Ls = this->Ls; + + // coefficients of Mooee + std::vector diag = this->bee; + std::vector upper(Ls); + std::vector lower(Ls); + for(int s=0; scee[s]; + lower[s] = -this->cee[s]; + } + upper[Ls-1] *= -this->mq1; + lower[0] *= -this->mq1; + + // no shift term + if(this->shift == 0.0){ this->M5D(psi, psi, chi, lower, diag, upper); } + + // fused M + shift operation + else { this->M5D_shift(psi, psi, chi, lower, diag, upper, Mooee_shift); } + } + + template + void MobiusEOFAFermion::MooeeDag(const FermionField& psi, FermionField& chi) + { + int Ls = this->Ls; + + // coefficients of MooeeDag + std::vector diag = this->bee; + std::vector upper(Ls); + std::vector lower(Ls); + for(int s=0; scee[s+1]; + lower[s] = this->mq1*this->cee[Ls-1]; + } else if(s==(Ls-1)) { + upper[s] = this->mq1*this->cee[0]; + lower[s] = -this->cee[s-1]; + } else { + upper[s] = -this->cee[s+1]; + lower[s] = -this->cee[s-1]; + } + } + + // no shift term + if(this->shift == 0.0){ this->M5Ddag(psi, psi, chi, lower, diag, upper); } + + // fused M + shift operation + else{ this->M5Ddag_shift(psi, psi, chi, lower, diag, upper, Mooee_shift); } + } + + /****************************************************************************************/ + + // Computes coefficients for applying Cayley preconditioned shift operators + // (Mooee + \Delta) --> Mooee_shift + // (Mooee + \Delta)^{-1} --> MooeeInv_shift_lc, MooeeInv_shift_norm + // (Mooee + \Delta)^{-dag} --> MooeeInvDag_shift_lc, MooeeInvDag_shift_norm + // For the latter two cases, the operation takes the form + // [ (Mooee + \Delta)^{-1} \psi ]_{i} = Mooee_{ij} \psi_{j} + + // ( MooeeInv_shift_norm )_{i} ( \sum_{j} [ MooeeInv_shift_lc ]_{j} P_{pm} \psi_{j} ) + template + void MobiusEOFAFermion::SetCoefficientsPrecondShiftOps() + { + int Ls = this->Ls; + int pm = this->pm; + RealD alpha = this->alpha; + RealD k = this->k; + RealD mq1 = this->mq1; + RealD shift = this->shift; + + // Initialize + Mooee_shift.resize(Ls); + MooeeInv_shift_lc.resize(Ls); + MooeeInv_shift_norm.resize(Ls); + MooeeInvDag_shift_lc.resize(Ls); + MooeeInvDag_shift_norm.resize(Ls); + + // Construct Mooee_shift + int idx(0); + Coeff_t N = ( (pm == 1) ? 1.0 : -1.0 ) * (2.0*shift*k) * + ( std::pow(alpha+1.0,Ls) + mq1*std::pow(alpha-1.0,Ls) ); + for(int s=0; s d = Mooee_shift; + std::vector u(Ls,0.0); + std::vector y(Ls,0.0); + std::vector q(Ls,0.0); + if(pm == 1){ u[0] = 1.0; } + else{ u[Ls-1] = 1.0; } + + // Tridiagonal matrix algorithm + Sherman-Morrison formula + // + // We solve + // ( Mooee' + u \otimes v ) MooeeInvDag_shift_lc = Mooee_shift + // where Mooee' is the tridiagonal part of Mooee_{+}, and + // u = (1,0,...,0) and v = (0,...,0,mq1*cee[0]) are chosen + // so that the outer-product u \otimes v gives the (0,Ls-1) + // entry of Mooee_{+}. + // + // We do this as two solves: Mooee'*y = d and Mooee'*q = u, + // and then construct the solution to the original system + // MooeeInvDag_shift_lc = y - / ( 1 + ) q + if(pm == 1){ + for(int s=1; scee[s] / this->bee[s-1]; + d[s] -= m*d[s-1]; + u[s] -= m*u[s-1]; + } + } + y[Ls-1] = d[Ls-1] / this->bee[Ls-1]; + q[Ls-1] = u[Ls-1] / this->bee[Ls-1]; + for(int s=Ls-2; s>=0; --s){ + if(pm == 1){ + y[s] = d[s] / this->bee[s]; + q[s] = u[s] / this->bee[s]; + } else { + y[s] = ( d[s] + this->cee[s]*y[s+1] ) / this->bee[s]; + q[s] = ( u[s] + this->cee[s]*q[s+1] ) / this->bee[s]; + } + } + + // Construct MooeeInvDag_shift_lc + for(int s=0; scee[0]*y[Ls-1] / + (1.0+mq1*this->cee[0]*q[Ls-1]) * q[s]; + } else { + MooeeInvDag_shift_lc[s] = y[s] - mq1*this->cee[Ls-1]*y[0] / + (1.0+mq1*this->cee[Ls-1]*q[0]) * q[s]; + } + } + + // Compute remaining coefficients + N = (pm == 1) ? (1.0 + MooeeInvDag_shift_lc[Ls-1]) : (1.0 + MooeeInvDag_shift_lc[0]); + for(int s=0; sbee[s],s) * std::pow(this->cee[s],Ls-1-s); } + else{ MooeeInv_shift_lc[s] = std::pow(this->bee[s],Ls-1-s) * std::pow(this->cee[s],s); } + + // MooeeInv_shift_norm + MooeeInv_shift_norm[s] = -MooeeInvDag_shift_lc[s] / + ( std::pow(this->bee[s],Ls) + mq1*std::pow(this->cee[s],Ls) ) / N; + + // MooeeInvDag_shift_norm + if(pm == 1){ MooeeInvDag_shift_norm[s] = -std::pow(this->bee[s],s) * std::pow(this->cee[s],Ls-1-s) / + ( std::pow(this->bee[s],Ls) + mq1*std::pow(this->cee[s],Ls) ) / N; } + else{ MooeeInvDag_shift_norm[s] = -std::pow(this->bee[s],Ls-1-s) * std::pow(this->cee[s],s) / + ( std::pow(this->bee[s],Ls) + mq1*std::pow(this->cee[s],Ls) ) / N; } + } + } + } + + // Recompute coefficients for a different value of shift constant + template + void MobiusEOFAFermion::RefreshShiftCoefficients(RealD new_shift) + { + this->shift = new_shift; + if(new_shift != 0.0){ + SetCoefficientsPrecondShiftOps(); + } else { + int Ls = this->Ls; + Mooee_shift.resize(Ls,0.0); + MooeeInv_shift_lc.resize(Ls,0.0); + MooeeInv_shift_norm.resize(Ls,0.0); + MooeeInvDag_shift_lc.resize(Ls,0.0); + MooeeInvDag_shift_norm.resize(Ls,0.0); + } + } + + template + void MobiusEOFAFermion::MooeeInternalCompute(int dag, int inv, + Vector >& Matp, Vector >& Matm) + { + int Ls = this->Ls; + + GridBase* grid = this->FermionRedBlackGrid(); + int LLs = grid->_rdimensions[0]; + + if(LLs == Ls){ return; } // Not vectorised in 5th direction + + Eigen::MatrixXcd Pplus = Eigen::MatrixXcd::Zero(Ls,Ls); + Eigen::MatrixXcd Pminus = Eigen::MatrixXcd::Zero(Ls,Ls); + + for(int s=0; sbee[s]; + Pminus(s,s) = this->bee[s]; + } + + for(int s=0; scee[s]; + Pplus(s+1,s) = -this->cee[s+1]; + } + + Pplus (0,Ls-1) = this->mq1*this->cee[0]; + Pminus(Ls-1,0) = this->mq1*this->cee[Ls-1]; + + if(this->shift != 0.0){ + RealD c = 0.5 * this->alpha; + RealD d = 0.5; + RealD N = this->shift * this->k * ( std::pow(c+d,Ls) + this->mq1*std::pow(c-d,Ls) ); + if(this->pm == 1) { + for(int s=0; s::iscomplex()) { + sp[l] = PplusMat (l*istride+s1*ostride,s2); + sm[l] = PminusMat(l*istride+s1*ostride,s2); + } else { + // if real + scalar_type tmp; + tmp = PplusMat (l*istride+s1*ostride,s2); + sp[l] = scalar_type(tmp.real(),tmp.real()); + tmp = PminusMat(l*istride+s1*ostride,s2); + sm[l] = scalar_type(tmp.real(),tmp.real()); + } + } + Matp[LLs*s2+s1] = Vp; + Matm[LLs*s2+s1] = Vm; + }} + } + + FermOpTemplateInstantiate(MobiusEOFAFermion); + GparityFermOpTemplateInstantiate(MobiusEOFAFermion); + +}} diff --git a/lib/qcd/action/fermion/MobiusEOFAFermion.h b/lib/qcd/action/fermion/MobiusEOFAFermion.h new file mode 100644 index 00000000..519b49e7 --- /dev/null +++ b/lib/qcd/action/fermion/MobiusEOFAFermion.h @@ -0,0 +1,133 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/MobiusEOFAFermion.h + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef GRID_QCD_MOBIUS_EOFA_FERMION_H +#define GRID_QCD_MOBIUS_EOFA_FERMION_H + +#include + +namespace Grid { +namespace QCD { + + template + class MobiusEOFAFermion : public AbstractEOFAFermion + { + public: + INHERIT_IMPL_TYPES(Impl); + + public: + // Shift operator coefficients for red-black preconditioned Mobius EOFA + std::vector Mooee_shift; + std::vector MooeeInv_shift_lc; + std::vector MooeeInv_shift_norm; + std::vector MooeeInvDag_shift_lc; + std::vector MooeeInvDag_shift_norm; + + virtual void Instantiatable(void) {}; + + // EOFA-specific operations + virtual void Omega (const FermionField& in, FermionField& out, int sign, int dag); + virtual void Dtilde (const FermionField& in, FermionField& out); + virtual void DtildeInv (const FermionField& in, FermionField& out); + + // override multiply + virtual RealD M (const FermionField& in, FermionField& out); + virtual RealD Mdag (const FermionField& in, FermionField& out); + + // half checkerboard operations + virtual void Mooee (const FermionField& in, FermionField& out); + virtual void MooeeDag (const FermionField& in, FermionField& out); + virtual void MooeeInv (const FermionField& in, FermionField& out); + virtual void MooeeInv_shift (const FermionField& in, FermionField& out); + virtual void MooeeInvDag (const FermionField& in, FermionField& out); + virtual void MooeeInvDag_shift(const FermionField& in, FermionField& out); + + virtual void M5D (const FermionField& psi, FermionField& chi); + virtual void M5Ddag (const FermionField& psi, FermionField& chi); + + ///////////////////////////////////////////////////// + // Instantiate different versions depending on Impl + ///////////////////////////////////////////////////// + void M5D(const FermionField& psi, const FermionField& phi, FermionField& chi, + std::vector& lower, std::vector& diag, std::vector& upper); + + void M5D_shift(const FermionField& psi, const FermionField& phi, FermionField& chi, + std::vector& lower, std::vector& diag, std::vector& upper, + std::vector& shift_coeffs); + + void M5Ddag(const FermionField& psi, const FermionField& phi, FermionField& chi, + std::vector& lower, std::vector& diag, std::vector& upper); + + void M5Ddag_shift(const FermionField& psi, const FermionField& phi, FermionField& chi, + std::vector& lower, std::vector& diag, std::vector& upper, + std::vector& shift_coeffs); + + void MooeeInternal(const FermionField& in, FermionField& out, int dag, int inv); + + void MooeeInternalCompute(int dag, int inv, Vector>& Matp, Vector>& Matm); + + void MooeeInternalAsm(const FermionField& in, FermionField& out, int LLs, int site, + Vector>& Matp, Vector>& Matm); + + void MooeeInternalZAsm(const FermionField& in, FermionField& out, int LLs, int site, + Vector>& Matp, Vector>& Matm); + + virtual void RefreshShiftCoefficients(RealD new_shift); + + // Constructors + MobiusEOFAFermion(GaugeField& _Umu, GridCartesian& FiveDimGrid, GridRedBlackCartesian& FiveDimRedBlackGrid, + GridCartesian& FourDimGrid, GridRedBlackCartesian& FourDimRedBlackGrid, + RealD _mq1, RealD _mq2, RealD _mq3, RealD _shift, int pm, + RealD _M5, RealD _b, RealD _c, const ImplParams& p=ImplParams()); + + protected: + void SetCoefficientsPrecondShiftOps(void); + }; +}} + +#define INSTANTIATE_DPERP_MOBIUS_EOFA(A)\ +template void MobiusEOFAFermion::M5D(const FermionField& psi, const FermionField& phi, FermionField& chi, \ + std::vector& lower, std::vector& diag, std::vector& upper); \ +template void MobiusEOFAFermion::M5D_shift(const FermionField& psi, const FermionField& phi, FermionField& chi, \ + std::vector& lower, std::vector& diag, std::vector& upper, std::vector& shift_coeffs); \ +template void MobiusEOFAFermion::M5Ddag(const FermionField& psi, const FermionField& phi, FermionField& chi, \ + std::vector& lower, std::vector& diag, std::vector& upper); \ +template void MobiusEOFAFermion::M5Ddag_shift(const FermionField& psi, const FermionField& phi, FermionField& chi, \ + std::vector& lower, std::vector& diag, std::vector& upper, std::vector& shift_coeffs); \ +template void MobiusEOFAFermion::MooeeInv(const FermionField& psi, FermionField& chi); \ +template void MobiusEOFAFermion::MooeeInv_shift(const FermionField& psi, FermionField& chi); \ +template void MobiusEOFAFermion::MooeeInvDag(const FermionField& psi, FermionField& chi); \ +template void MobiusEOFAFermion::MooeeInvDag_shift(const FermionField& psi, FermionField& chi); + +#undef MOBIUS_EOFA_DPERP_DENSE +#define MOBIUS_EOFA_DPERP_CACHE +#undef MOBIUS_EOFA_DPERP_LINALG +#define MOBIUS_EOFA_DPERP_VEC + +#endif diff --git a/lib/qcd/action/fermion/MobiusEOFAFermioncache.cc b/lib/qcd/action/fermion/MobiusEOFAFermioncache.cc new file mode 100644 index 00000000..420f6390 --- /dev/null +++ b/lib/qcd/action/fermion/MobiusEOFAFermioncache.cc @@ -0,0 +1,429 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/MobiusEOFAFermioncache.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include + +namespace Grid { +namespace QCD { + + // FIXME -- make a version of these routines with site loop outermost for cache reuse. + + template + void MobiusEOFAFermion::M5D(const FermionField &psi, const FermionField &phi, FermionField &chi, + std::vector &lower, std::vector &diag, std::vector &upper) + { + int Ls = this->Ls; + GridBase *grid = psi._grid; + + assert(phi.checkerboard == psi.checkerboard); + chi.checkerboard = psi.checkerboard; + + // Flops = 6.0*(Nc*Ns) *Ls*vol + this->M5Dcalls++; + this->M5Dtime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ + for(int s=0; sM5Dtime += usecond(); + } + + template + void MobiusEOFAFermion::M5D_shift(const FermionField &psi, const FermionField &phi, FermionField &chi, + std::vector &lower, std::vector &diag, std::vector &upper, + std::vector &shift_coeffs) + { + int Ls = this->Ls; + int shift_s = (this->pm == 1) ? (Ls-1) : 0; // s-component modified by shift operator + GridBase *grid = psi._grid; + + assert(phi.checkerboard == psi.checkerboard); + chi.checkerboard = psi.checkerboard; + + // Flops = 6.0*(Nc*Ns) *Ls*vol + this->M5Dcalls++; + this->M5Dtime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ + for(int s=0; spm == 1){ spProj5p(tmp, psi._odata[ss+shift_s]); } + else{ spProj5m(tmp, psi._odata[ss+shift_s]); } + chi[ss+s] = chi[ss+s] + shift_coeffs[s]*tmp; + } + } + + this->M5Dtime += usecond(); + } + + template + void MobiusEOFAFermion::M5Ddag(const FermionField &psi, const FermionField &phi, FermionField &chi, + std::vector &lower, std::vector &diag, std::vector &upper) + { + int Ls = this->Ls; + GridBase *grid = psi._grid; + + assert(phi.checkerboard == psi.checkerboard); + chi.checkerboard = psi.checkerboard; + + // Flops = 6.0*(Nc*Ns) *Ls*vol + this->M5Dcalls++; + this->M5Dtime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ + auto tmp = psi._odata[0]; + for(int s=0; sM5Dtime += usecond(); + } + + template + void MobiusEOFAFermion::M5Ddag_shift(const FermionField &psi, const FermionField &phi, FermionField &chi, + std::vector &lower, std::vector &diag, std::vector &upper, + std::vector &shift_coeffs) + { + int Ls = this->Ls; + int shift_s = (this->pm == 1) ? (Ls-1) : 0; // s-component modified by shift operator + GridBase *grid = psi._grid; + + assert(phi.checkerboard == psi.checkerboard); + chi.checkerboard = psi.checkerboard; + + // Flops = 6.0*(Nc*Ns) *Ls*vol + this->M5Dcalls++; + this->M5Dtime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ + chi[ss+Ls-1] = zero; + auto tmp = psi._odata[0]; + for(int s=0; spm == 1){ spProj5p(tmp, psi._odata[ss+s]); } + else{ spProj5m(tmp, psi._odata[ss+s]); } + chi[ss+shift_s] = chi[ss+shift_s] + shift_coeffs[s]*tmp; + } + } + + this->M5Dtime += usecond(); + } + + template + void MobiusEOFAFermion::MooeeInv(const FermionField &psi, FermionField &chi) + { + if(this->shift != 0.0){ MooeeInv_shift(psi,chi); return; } + + GridBase *grid = psi._grid; + int Ls = this->Ls; + + chi.checkerboard = psi.checkerboard; + + this->MooeeInvCalls++; + this->MooeeInvTime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ + + auto tmp = psi._odata[0]; + + // Apply (L^{\prime})^{-1} + chi[ss] = psi[ss]; // chi[0]=psi[0] + for(int s=1; slee[s-1]*tmp; + } + + // L_m^{-1} + for(int s=0; sleem[s]*tmp; + } + + // U_m^{-1} D^{-1} + for(int s=0; sdee[s])*chi[ss+s] - (this->ueem[s]/this->dee[Ls-1])*tmp; + } + chi[ss+Ls-1] = (1.0/this->dee[Ls-1])*chi[ss+Ls-1]; + + // Apply U^{-1} + for(int s=Ls-2; s>=0; s--){ + spProj5m(tmp, chi[ss+s+1]); + chi[ss+s] = chi[ss+s] - this->uee[s]*tmp; + } + } + + this->MooeeInvTime += usecond(); + } + + template + void MobiusEOFAFermion::MooeeInv_shift(const FermionField &psi, FermionField &chi) + { + GridBase *grid = psi._grid; + int Ls = this->Ls; + + chi.checkerboard = psi.checkerboard; + + this->MooeeInvCalls++; + this->MooeeInvTime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ + + auto tmp1 = psi._odata[0]; + auto tmp2 = psi._odata[0]; + auto tmp2_spProj = psi._odata[0]; + + // Apply (L^{\prime})^{-1} and accumulate MooeeInv_shift_lc[j]*psi[j] in tmp2 + chi[ss] = psi[ss]; // chi[0]=psi[0] + tmp2 = MooeeInv_shift_lc[0]*psi[ss]; + for(int s=1; slee[s-1]*tmp1; + tmp2 = tmp2 + MooeeInv_shift_lc[s]*psi[ss+s]; + } + if(this->pm == 1){ spProj5p(tmp2_spProj, tmp2);} + else{ spProj5m(tmp2_spProj, tmp2); } + + // L_m^{-1} + for(int s=0; sleem[s]*tmp1; + } + + // U_m^{-1} D^{-1} + for(int s=0; sdee[s])*chi[ss+s] - (this->ueem[s]/this->dee[Ls-1])*tmp1; + } + // chi[ss+Ls-1] = (1.0/this->dee[Ls-1])*chi[ss+Ls-1] + MooeeInv_shift_norm[Ls-1]*tmp2_spProj; + chi[ss+Ls-1] = (1.0/this->dee[Ls-1])*chi[ss+Ls-1]; + spProj5m(tmp1, chi[ss+Ls-1]); + chi[ss+Ls-1] = chi[ss+Ls-1] + MooeeInv_shift_norm[Ls-1]*tmp2_spProj; + + // Apply U^{-1} and add shift term + for(int s=Ls-2; s>=0; s--){ + chi[ss+s] = chi[ss+s] - this->uee[s]*tmp1; + spProj5m(tmp1, chi[ss+s]); + chi[ss+s] = chi[ss+s] + MooeeInv_shift_norm[s]*tmp2_spProj; + } + } + + this->MooeeInvTime += usecond(); + } + + template + void MobiusEOFAFermion::MooeeInvDag(const FermionField &psi, FermionField &chi) + { + if(this->shift != 0.0){ MooeeInvDag_shift(psi,chi); return; } + + GridBase *grid = psi._grid; + int Ls = this->Ls; + + chi.checkerboard = psi.checkerboard; + + this->MooeeInvCalls++; + this->MooeeInvTime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ + + auto tmp = psi._odata[0]; + + // Apply (U^{\prime})^{-dag} + chi[ss] = psi[ss]; + for(int s=1; suee[s-1]*tmp; + } + + // U_m^{-\dag} + for(int s=0; sueem[s]*tmp; + } + + // L_m^{-\dag} D^{-dag} + for(int s=0; sdee[s])*chi[ss+s] - (this->leem[s]/this->dee[Ls-1])*tmp; + } + chi[ss+Ls-1] = (1.0/this->dee[Ls-1])*chi[ss+Ls-1]; + + // Apply L^{-dag} + for(int s=Ls-2; s>=0; s--){ + spProj5p(tmp, chi[ss+s+1]); + chi[ss+s] = chi[ss+s] - this->lee[s]*tmp; + } + } + + this->MooeeInvTime += usecond(); + } + + template + void MobiusEOFAFermion::MooeeInvDag_shift(const FermionField &psi, FermionField &chi) + { + GridBase *grid = psi._grid; + int Ls = this->Ls; + + chi.checkerboard = psi.checkerboard; + + this->MooeeInvCalls++; + this->MooeeInvTime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=Ls){ + + auto tmp1 = psi._odata[0]; + auto tmp2 = psi._odata[0]; + auto tmp2_spProj = psi._odata[0]; + + // Apply (U^{\prime})^{-dag} and accumulate MooeeInvDag_shift_lc[j]*psi[j] in tmp2 + chi[ss] = psi[ss]; + tmp2 = MooeeInvDag_shift_lc[0]*psi[ss]; + for(int s=1; suee[s-1]*tmp1; + tmp2 = tmp2 + MooeeInvDag_shift_lc[s]*psi[ss+s]; + } + if(this->pm == 1){ spProj5p(tmp2_spProj, tmp2);} + else{ spProj5m(tmp2_spProj, tmp2); } + + // U_m^{-\dag} + for(int s=0; sueem[s]*tmp1; + } + + // L_m^{-\dag} D^{-dag} + for(int s=0; sdee[s])*chi[ss+s] - (this->leem[s]/this->dee[Ls-1])*tmp1; + } + chi[ss+Ls-1] = (1.0/this->dee[Ls-1])*chi[ss+Ls-1]; + spProj5p(tmp1, chi[ss+Ls-1]); + chi[ss+Ls-1] = chi[ss+Ls-1] + MooeeInvDag_shift_norm[Ls-1]*tmp2_spProj; + + // Apply L^{-dag} + for(int s=Ls-2; s>=0; s--){ + chi[ss+s] = chi[ss+s] - this->lee[s]*tmp1; + spProj5p(tmp1, chi[ss+s]); + chi[ss+s] = chi[ss+s] + MooeeInvDag_shift_norm[s]*tmp2_spProj; + } + } + + this->MooeeInvTime += usecond(); + } + + #ifdef MOBIUS_EOFA_DPERP_CACHE + + INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplF); + INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplD); + INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplF); + INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplD); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplF); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplD); + + INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplFH); + INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplDF); + INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplFH); + INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplDF); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplFH); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplDF); + + #endif + +}} diff --git a/lib/qcd/action/fermion/MobiusEOFAFermiondense.cc b/lib/qcd/action/fermion/MobiusEOFAFermiondense.cc new file mode 100644 index 00000000..d66b8cd9 --- /dev/null +++ b/lib/qcd/action/fermion/MobiusEOFAFermiondense.cc @@ -0,0 +1,184 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/MobiusEOFAFermiondense.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include +#include + +namespace Grid { +namespace QCD { + + /* + * Dense matrix versions of routines + */ + template + void MobiusEOFAFermion::MooeeInv(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerNo, InverseYes); + } + + template + void MobiusEOFAFermion::MooeeInv_shift(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerNo, InverseYes); + } + + template + void MobiusEOFAFermion::MooeeInvDag(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerYes, InverseYes); + } + + template + void MobiusEOFAFermion::MooeeInvDag_shift(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerYes, InverseYes); + } + + template + void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv) + { + int Ls = this->Ls; + int LLs = psi._grid->_rdimensions[0]; + int vol = psi._grid->oSites()/LLs; + + int pm = this->pm; + RealD shift = this->shift; + RealD alpha = this->alpha; + RealD k = this->k; + RealD mq1 = this->mq1; + + chi.checkerboard = psi.checkerboard; + + assert(Ls==LLs); + + Eigen::MatrixXd Pplus = Eigen::MatrixXd::Zero(Ls,Ls); + Eigen::MatrixXd Pminus = Eigen::MatrixXd::Zero(Ls,Ls); + + for(int s=0;sbee[s]; + Pminus(s,s) = this->bee[s]; + } + + for(int s=0; scee[s]; + } + + for(int s=0; scee[s+1]; + } + Pplus (0,Ls-1) = mq1*this->cee[0]; + Pminus(Ls-1,0) = mq1*this->cee[Ls-1]; + + if(shift != 0.0){ + Coeff_t N = 2.0 * ( std::pow(alpha+1.0,Ls) + mq1*std::pow(alpha-1.0,Ls) ); + for(int s=0; s::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + + INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplFH); + INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplDF); + INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplFH); + INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplDF); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplFH); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplDF); + + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + + #endif + +}} diff --git a/lib/qcd/action/fermion/MobiusEOFAFermionssp.cc b/lib/qcd/action/fermion/MobiusEOFAFermionssp.cc new file mode 100644 index 00000000..c86bb995 --- /dev/null +++ b/lib/qcd/action/fermion/MobiusEOFAFermionssp.cc @@ -0,0 +1,290 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/MobiusEOFAFermionssp.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include + +namespace Grid { +namespace QCD { + + // FIXME -- make a version of these routines with site loop outermost for cache reuse. + // Pminus fowards + // Pplus backwards + template + void MobiusEOFAFermion::M5D(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper) + { + Coeff_t one(1.0); + int Ls = this->Ls; + for(int s=0; s + void MobiusEOFAFermion::M5D_shift(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper, + std::vector& shift_coeffs) + { + Coeff_t one(1.0); + int Ls = this->Ls; + for(int s=0; spm == 1){ axpby_ssp_pplus(chi, one, chi, shift_coeffs[s], psi, s, Ls-1); } + else{ axpby_ssp_pminus(chi, one, chi, shift_coeffs[s], psi, s, 0); } + } + } + + template + void MobiusEOFAFermion::M5Ddag(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper) + { + Coeff_t one(1.0); + int Ls = this->Ls; + for(int s=0; s + void MobiusEOFAFermion::M5Ddag_shift(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper, + std::vector& shift_coeffs) + { + Coeff_t one(1.0); + int Ls = this->Ls; + for(int s=0; spm == 1){ axpby_ssp_pplus(chi, one, chi, shift_coeffs[s], psi, Ls-1, s); } + else{ axpby_ssp_pminus(chi, one, chi, shift_coeffs[s], psi, 0, s); } + } + } + + template + void MobiusEOFAFermion::MooeeInv(const FermionField& psi, FermionField& chi) + { + if(this->shift != 0.0){ MooeeInv_shift(psi,chi); return; } + + Coeff_t one(1.0); + Coeff_t czero(0.0); + chi.checkerboard = psi.checkerboard; + int Ls = this->Ls; + + // Apply (L^{\prime})^{-1} + axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0] + for(int s=1; slee[s-1], chi, s, s-1);// recursion Psi[s] -lee P_+ chi[s-1] + } + + // L_m^{-1} + for(int s=0; sleem[s], chi, Ls-1, s); + } + + // U_m^{-1} D^{-1} + for(int s=0; sdee[s], chi, -this->ueem[s]/this->dee[Ls-1], chi, s, Ls-1); + } + axpby_ssp(chi, one/this->dee[Ls-1], chi, czero, chi, Ls-1, Ls-1); + + // Apply U^{-1} + for(int s=Ls-2; s>=0; s--){ + axpby_ssp_pminus(chi, one, chi, -this->uee[s], chi, s, s+1); // chi[Ls] + } + } + + template + void MobiusEOFAFermion::MooeeInv_shift(const FermionField& psi, FermionField& chi) + { + Coeff_t one(1.0); + Coeff_t czero(0.0); + chi.checkerboard = psi.checkerboard; + int Ls = this->Ls; + + FermionField tmp(psi._grid); + + // Apply (L^{\prime})^{-1} + axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0] + axpby_ssp(tmp, czero, tmp, this->MooeeInv_shift_lc[0], psi, 0, 0); + for(int s=1; slee[s-1], chi, s, s-1);// recursion Psi[s] -lee P_+ chi[s-1] + axpby_ssp(tmp, one, tmp, this->MooeeInv_shift_lc[s], psi, 0, s); + } + + // L_m^{-1} + for(int s=0; sleem[s], chi, Ls-1, s); + } + + // U_m^{-1} D^{-1} + for(int s=0; sdee[s], chi, -this->ueem[s]/this->dee[Ls-1], chi, s, Ls-1); + } + axpby_ssp(chi, one/this->dee[Ls-1], chi, czero, chi, Ls-1, Ls-1); + + // Apply U^{-1} and add shift term + if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, this->MooeeInv_shift_norm[Ls-1], tmp, Ls-1, 0); } + else{ axpby_ssp_pminus(chi, one, chi, this->MooeeInv_shift_norm[Ls-1], tmp, Ls-1, 0); } + for(int s=Ls-2; s>=0; s--){ + axpby_ssp_pminus(chi, one, chi, -this->uee[s], chi, s, s+1); // chi[Ls] + if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, this->MooeeInv_shift_norm[s], tmp, s, 0); } + else{ axpby_ssp_pminus(chi, one, chi, this->MooeeInv_shift_norm[s], tmp, s, 0); } + } + } + + template + void MobiusEOFAFermion::MooeeInvDag(const FermionField& psi, FermionField& chi) + { + if(this->shift != 0.0){ MooeeInvDag_shift(psi,chi); return; } + + Coeff_t one(1.0); + Coeff_t czero(0.0); + chi.checkerboard = psi.checkerboard; + int Ls = this->Ls; + + // Apply (U^{\prime})^{-dagger} + axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0] + for(int s=1; suee[s-1]), chi, s, s-1); + } + + // U_m^{-\dagger} + for(int s=0; sueem[s]), chi, Ls-1, s); + } + + // L_m^{-\dagger} D^{-dagger} + for(int s=0; sdee[s]), chi, -conjugate(this->leem[s]/this->dee[Ls-1]), chi, s, Ls-1); + } + axpby_ssp(chi, one/conjugate(this->dee[Ls-1]), chi, czero, chi, Ls-1, Ls-1); + + // Apply L^{-dagger} + for(int s=Ls-2; s>=0; s--){ + axpby_ssp_pplus(chi, one, chi, -conjugate(this->lee[s]), chi, s, s+1); // chi[Ls] + } + } + + template + void MobiusEOFAFermion::MooeeInvDag_shift(const FermionField& psi, FermionField& chi) + { + Coeff_t one(1.0); + Coeff_t czero(0.0); + chi.checkerboard = psi.checkerboard; + int Ls = this->Ls; + + FermionField tmp(psi._grid); + + // Apply (U^{\prime})^{-dagger} and accumulate (MooeeInvDag_shift_lc)_{j} \psi_{j} in tmp[0] + axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0] + axpby_ssp(tmp, czero, tmp, this->MooeeInvDag_shift_lc[0], psi, 0, 0); + for(int s=1; suee[s-1]), chi, s, s-1); + axpby_ssp(tmp, one, tmp, this->MooeeInvDag_shift_lc[s], psi, 0, s); + } + + // U_m^{-\dagger} + for(int s=0; sueem[s]), chi, Ls-1, s); + } + + // L_m^{-\dagger} D^{-dagger} + for(int s=0; sdee[s]), chi, -conjugate(this->leem[s]/this->dee[Ls-1]), chi, s, Ls-1); + } + axpby_ssp(chi, one/conjugate(this->dee[Ls-1]), chi, czero, chi, Ls-1, Ls-1); + + // Apply L^{-dagger} and add shift + if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, this->MooeeInvDag_shift_norm[Ls-1], tmp, Ls-1, 0); } + else{ axpby_ssp_pminus(chi, one, chi, this->MooeeInvDag_shift_norm[Ls-1], tmp, Ls-1, 0); } + for(int s=Ls-2; s>=0; s--){ + axpby_ssp_pplus(chi, one, chi, -conjugate(this->lee[s]), chi, s, s+1); // chi[Ls] + if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, this->MooeeInvDag_shift_norm[s], tmp, s, 0); } + else{ axpby_ssp_pminus(chi, one, chi, this->MooeeInvDag_shift_norm[s], tmp, s, 0); } + } + } + + #ifdef MOBIUS_EOFA_DPERP_LINALG + + INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplF); + INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplD); + INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplF); + INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplD); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplF); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplD); + + INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplFH); + INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplDF); + INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplFH); + INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplDF); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplFH); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplDF); + + #endif + +}} diff --git a/lib/qcd/action/fermion/MobiusEOFAFermionvec.cc b/lib/qcd/action/fermion/MobiusEOFAFermionvec.cc new file mode 100644 index 00000000..c4eaf0f3 --- /dev/null +++ b/lib/qcd/action/fermion/MobiusEOFAFermionvec.cc @@ -0,0 +1,983 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/fermion/MobiusEOFAFermionvec.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: Peter Boyle +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include + +namespace Grid { +namespace QCD { + + /* + * Dense matrix versions of routines + */ + template + void MobiusEOFAFermion::MooeeInv(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerNo, InverseYes); + } + + template + void MobiusEOFAFermion::MooeeInv_shift(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerNo, InverseYes); + } + + template + void MobiusEOFAFermion::MooeeInvDag(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerYes, InverseYes); + } + + template + void MobiusEOFAFermion::MooeeInvDag_shift(const FermionField& psi, FermionField& chi) + { + this->MooeeInternal(psi, chi, DaggerYes, InverseYes); + } + + template + void MobiusEOFAFermion::M5D(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper) + { + GridBase* grid = psi._grid; + int Ls = this->Ls; + int LLs = grid->_rdimensions[0]; + const int nsimd = Simd::Nsimd(); + + Vector> u(LLs); + Vector> l(LLs); + Vector> d(LLs); + + assert(Ls/LLs == nsimd); + assert(phi.checkerboard == psi.checkerboard); + + chi.checkerboard = psi.checkerboard; + + // just directly address via type pun + typedef typename Simd::scalar_type scalar_type; + scalar_type* u_p = (scalar_type*) &u[0]; + scalar_type* l_p = (scalar_type*) &l[0]; + scalar_type* d_p = (scalar_type*) &d[0]; + + for(int o=0; oM5Dcalls++; + this->M5Dtime -= usecond(); + + assert(Nc == 3); + + parallel_for(int ss=0; ssoSites(); ss+=LLs){ // adds LLs + + #if 0 + + alignas(64) SiteHalfSpinor hp; + alignas(64) SiteHalfSpinor hm; + alignas(64) SiteSpinor fp; + alignas(64) SiteSpinor fm; + + for(int v=0; v= v){ rotate(hm, hm, nsimd-1); } + + hp = 0.5*hp; + hm = 0.5*hm; + + spRecon5m(fp, hp); + spRecon5p(fm, hm); + + chi[ss+v] = d[v]*phi[ss+v]; + chi[ss+v] = chi[ss+v] + u[v]*fp; + chi[ss+v] = chi[ss+v] + l[v]*fm; + + } + + #else + + for(int v=0; v(hp_00.v); + hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v); + hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v); + hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v); + hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v); + hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v); + } + + if(vm >= v){ + hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v); + hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v); + hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v); + hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v); + hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v); + hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v); + } + + // Can force these to real arithmetic and save 2x. + Simd p_00 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo::mult(l[v]()()(), hm_00); + Simd p_01 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo::mult(l[v]()()(), hm_01); + Simd p_02 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo::mult(l[v]()()(), hm_02); + Simd p_10 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo::mult(l[v]()()(), hm_10); + Simd p_11 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo::mult(l[v]()()(), hm_11); + Simd p_12 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo::mult(l[v]()()(), hm_12); + Simd p_20 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo::mult(u[v]()()(), hp_00); + Simd p_21 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo::mult(u[v]()()(), hp_01); + Simd p_22 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo::mult(u[v]()()(), hp_02); + Simd p_30 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo::mult(u[v]()()(), hp_10); + Simd p_31 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo::mult(u[v]()()(), hp_11); + Simd p_32 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo::mult(u[v]()()(), hp_12); + + vstream(chi[ss+v]()(0)(0), p_00); + vstream(chi[ss+v]()(0)(1), p_01); + vstream(chi[ss+v]()(0)(2), p_02); + vstream(chi[ss+v]()(1)(0), p_10); + vstream(chi[ss+v]()(1)(1), p_11); + vstream(chi[ss+v]()(1)(2), p_12); + vstream(chi[ss+v]()(2)(0), p_20); + vstream(chi[ss+v]()(2)(1), p_21); + vstream(chi[ss+v]()(2)(2), p_22); + vstream(chi[ss+v]()(3)(0), p_30); + vstream(chi[ss+v]()(3)(1), p_31); + vstream(chi[ss+v]()(3)(2), p_32); + } + + #endif + } + + this->M5Dtime += usecond(); + } + + template + void MobiusEOFAFermion::M5D_shift(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper, + std::vector& shift_coeffs) + { + #if 0 + + this->M5D(psi, phi, chi, lower, diag, upper); + + // FIXME: possible gain from vectorizing shift operation as well? + Coeff_t one(1.0); + int Ls = this->Ls; + for(int s=0; spm == 1){ axpby_ssp_pplus(chi, one, chi, shift_coeffs[s], psi, s, Ls-1); } + else{ axpby_ssp_pminus(chi, one, chi, shift_coeffs[s], psi, s, 0); } + } + + #else + + GridBase* grid = psi._grid; + int Ls = this->Ls; + int LLs = grid->_rdimensions[0]; + const int nsimd = Simd::Nsimd(); + + Vector> u(LLs); + Vector> l(LLs); + Vector> d(LLs); + Vector> s(LLs); + + assert(Ls/LLs == nsimd); + assert(phi.checkerboard == psi.checkerboard); + + chi.checkerboard = psi.checkerboard; + + // just directly address via type pun + typedef typename Simd::scalar_type scalar_type; + scalar_type* u_p = (scalar_type*) &u[0]; + scalar_type* l_p = (scalar_type*) &l[0]; + scalar_type* d_p = (scalar_type*) &d[0]; + scalar_type* s_p = (scalar_type*) &s[0]; + + for(int o=0; oM5Dcalls++; + this->M5Dtime -= usecond(); + + assert(Nc == 3); + + parallel_for(int ss=0; ssoSites(); ss+=LLs){ // adds LLs + + int vs = (this->pm == 1) ? LLs-1 : 0; + Simd hs_00 = (this->pm == 1) ? psi[ss+vs]()(2)(0) : psi[ss+vs]()(0)(0); + Simd hs_01 = (this->pm == 1) ? psi[ss+vs]()(2)(1) : psi[ss+vs]()(0)(1); + Simd hs_02 = (this->pm == 1) ? psi[ss+vs]()(2)(2) : psi[ss+vs]()(0)(2); + Simd hs_10 = (this->pm == 1) ? psi[ss+vs]()(3)(0) : psi[ss+vs]()(1)(0); + Simd hs_11 = (this->pm == 1) ? psi[ss+vs]()(3)(1) : psi[ss+vs]()(1)(1); + Simd hs_12 = (this->pm == 1) ? psi[ss+vs]()(3)(2) : psi[ss+vs]()(1)(2); + + for(int v=0; v(hp_00.v); + hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v); + hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v); + hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v); + hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v); + hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v); + } + + if(this->pm == 1 && vs <= v){ + hs_00.v = Optimization::Rotate::tRotate<2>(hs_00.v); + hs_01.v = Optimization::Rotate::tRotate<2>(hs_01.v); + hs_02.v = Optimization::Rotate::tRotate<2>(hs_02.v); + hs_10.v = Optimization::Rotate::tRotate<2>(hs_10.v); + hs_11.v = Optimization::Rotate::tRotate<2>(hs_11.v); + hs_12.v = Optimization::Rotate::tRotate<2>(hs_12.v); + } + + if(vm >= v){ + hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v); + hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v); + hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v); + hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v); + hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v); + hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v); + } + + if(this->pm == -1 && vs >= v){ + hs_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_00.v); + hs_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_01.v); + hs_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_02.v); + hs_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_10.v); + hs_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_11.v); + hs_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_12.v); + } + + // Can force these to real arithmetic and save 2x. + Simd p_00 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo::mult(l[v]()()(), hm_00) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo::mult(l[v]()()(), hm_00) + + switcheroo::mult(s[v]()()(), hs_00); + Simd p_01 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo::mult(l[v]()()(), hm_01) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo::mult(l[v]()()(), hm_01) + + switcheroo::mult(s[v]()()(), hs_01); + Simd p_02 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo::mult(l[v]()()(), hm_02) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo::mult(l[v]()()(), hm_02) + + switcheroo::mult(s[v]()()(), hs_02); + Simd p_10 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo::mult(l[v]()()(), hm_10) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo::mult(l[v]()()(), hm_10) + + switcheroo::mult(s[v]()()(), hs_10); + Simd p_11 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo::mult(l[v]()()(), hm_11) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo::mult(l[v]()()(), hm_11) + + switcheroo::mult(s[v]()()(), hs_11); + Simd p_12 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo::mult(l[v]()()(), hm_12) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo::mult(l[v]()()(), hm_12) + + switcheroo::mult(s[v]()()(), hs_12); + Simd p_20 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo::mult(u[v]()()(), hp_00) + + switcheroo::mult(s[v]()()(), hs_00) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo::mult(u[v]()()(), hp_00); + Simd p_21 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo::mult(u[v]()()(), hp_01) + + switcheroo::mult(s[v]()()(), hs_01) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo::mult(u[v]()()(), hp_01); + Simd p_22 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo::mult(u[v]()()(), hp_02) + + switcheroo::mult(s[v]()()(), hs_02) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo::mult(u[v]()()(), hp_02); + Simd p_30 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo::mult(u[v]()()(), hp_10) + + switcheroo::mult(s[v]()()(), hs_10) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo::mult(u[v]()()(), hp_10); + Simd p_31 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo::mult(u[v]()()(), hp_11) + + switcheroo::mult(s[v]()()(), hs_11) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo::mult(u[v]()()(), hp_11); + Simd p_32 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo::mult(u[v]()()(), hp_12) + + switcheroo::mult(s[v]()()(), hs_12) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo::mult(u[v]()()(), hp_12); + + vstream(chi[ss+v]()(0)(0), p_00); + vstream(chi[ss+v]()(0)(1), p_01); + vstream(chi[ss+v]()(0)(2), p_02); + vstream(chi[ss+v]()(1)(0), p_10); + vstream(chi[ss+v]()(1)(1), p_11); + vstream(chi[ss+v]()(1)(2), p_12); + vstream(chi[ss+v]()(2)(0), p_20); + vstream(chi[ss+v]()(2)(1), p_21); + vstream(chi[ss+v]()(2)(2), p_22); + vstream(chi[ss+v]()(3)(0), p_30); + vstream(chi[ss+v]()(3)(1), p_31); + vstream(chi[ss+v]()(3)(2), p_32); + } + } + + this->M5Dtime += usecond(); + + #endif + } + + template + void MobiusEOFAFermion::M5Ddag(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper) + { + GridBase* grid = psi._grid; + int Ls = this->Ls; + int LLs = grid->_rdimensions[0]; + int nsimd = Simd::Nsimd(); + + Vector> u(LLs); + Vector> l(LLs); + Vector> d(LLs); + + assert(Ls/LLs == nsimd); + assert(phi.checkerboard == psi.checkerboard); + + chi.checkerboard = psi.checkerboard; + + // just directly address via type pun + typedef typename Simd::scalar_type scalar_type; + scalar_type* u_p = (scalar_type*) &u[0]; + scalar_type* l_p = (scalar_type*) &l[0]; + scalar_type* d_p = (scalar_type*) &d[0]; + + for(int o=0; oM5Dcalls++; + this->M5Dtime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=LLs){ // adds LLs + + #if 0 + + alignas(64) SiteHalfSpinor hp; + alignas(64) SiteHalfSpinor hm; + alignas(64) SiteSpinor fp; + alignas(64) SiteSpinor fm; + + for(int v=0; v= v){ rotate(hm, hm, nsimd-1); } + + hp = hp*0.5; + hm = hm*0.5; + spRecon5p(fp, hp); + spRecon5m(fm, hm); + + chi[ss+v] = d[v]*phi[ss+v]+u[v]*fp; + chi[ss+v] = chi[ss+v] +l[v]*fm; + + } + + #else + + for(int v=0; v(hp_00.v); + hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v); + hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v); + hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v); + hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v); + hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v); + } + + if(vm >= v){ + hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v); + hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v); + hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v); + hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v); + hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v); + hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v); + } + + Simd p_00 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo::mult(u[v]()()(), hp_00); + Simd p_01 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo::mult(u[v]()()(), hp_01); + Simd p_02 = switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo::mult(u[v]()()(), hp_02); + Simd p_10 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo::mult(u[v]()()(), hp_10); + Simd p_11 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo::mult(u[v]()()(), hp_11); + Simd p_12 = switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo::mult(u[v]()()(), hp_12); + Simd p_20 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo::mult(l[v]()()(), hm_00); + Simd p_21 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo::mult(l[v]()()(), hm_01); + Simd p_22 = switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo::mult(l[v]()()(), hm_02); + Simd p_30 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo::mult(l[v]()()(), hm_10); + Simd p_31 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo::mult(l[v]()()(), hm_11); + Simd p_32 = switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo::mult(l[v]()()(), hm_12); + + vstream(chi[ss+v]()(0)(0), p_00); + vstream(chi[ss+v]()(0)(1), p_01); + vstream(chi[ss+v]()(0)(2), p_02); + vstream(chi[ss+v]()(1)(0), p_10); + vstream(chi[ss+v]()(1)(1), p_11); + vstream(chi[ss+v]()(1)(2), p_12); + vstream(chi[ss+v]()(2)(0), p_20); + vstream(chi[ss+v]()(2)(1), p_21); + vstream(chi[ss+v]()(2)(2), p_22); + vstream(chi[ss+v]()(3)(0), p_30); + vstream(chi[ss+v]()(3)(1), p_31); + vstream(chi[ss+v]()(3)(2), p_32); + + } + + #endif + + } + + this->M5Dtime += usecond(); + } + + template + void MobiusEOFAFermion::M5Ddag_shift(const FermionField& psi, const FermionField& phi, + FermionField& chi, std::vector& lower, std::vector& diag, std::vector& upper, + std::vector& shift_coeffs) + { + #if 0 + + this->M5Ddag(psi, phi, chi, lower, diag, upper); + + // FIXME: possible gain from vectorizing shift operation as well? + Coeff_t one(1.0); + int Ls = this->Ls; + for(int s=0; spm == 1){ axpby_ssp_pplus(chi, one, chi, shift_coeffs[s], psi, Ls-1, s); } + else{ axpby_ssp_pminus(chi, one, chi, shift_coeffs[s], psi, 0, s); } + } + + #else + + GridBase* grid = psi._grid; + int Ls = this->Ls; + int LLs = grid->_rdimensions[0]; + int nsimd = Simd::Nsimd(); + + Vector> u(LLs); + Vector> l(LLs); + Vector> d(LLs); + Vector> s(LLs); + + assert(Ls/LLs == nsimd); + assert(phi.checkerboard == psi.checkerboard); + + chi.checkerboard = psi.checkerboard; + + // just directly address via type pun + typedef typename Simd::scalar_type scalar_type; + scalar_type* u_p = (scalar_type*) &u[0]; + scalar_type* l_p = (scalar_type*) &l[0]; + scalar_type* d_p = (scalar_type*) &d[0]; + scalar_type* s_p = (scalar_type*) &s[0]; + + for(int o=0; oM5Dcalls++; + this->M5Dtime -= usecond(); + + parallel_for(int ss=0; ssoSites(); ss+=LLs){ // adds LLs + + int vs = (this->pm == 1) ? LLs-1 : 0; + Simd hs_00 = (this->pm == 1) ? psi[ss+vs]()(0)(0) : psi[ss+vs]()(2)(0); + Simd hs_01 = (this->pm == 1) ? psi[ss+vs]()(0)(1) : psi[ss+vs]()(2)(1); + Simd hs_02 = (this->pm == 1) ? psi[ss+vs]()(0)(2) : psi[ss+vs]()(2)(2); + Simd hs_10 = (this->pm == 1) ? psi[ss+vs]()(1)(0) : psi[ss+vs]()(3)(0); + Simd hs_11 = (this->pm == 1) ? psi[ss+vs]()(1)(1) : psi[ss+vs]()(3)(1); + Simd hs_12 = (this->pm == 1) ? psi[ss+vs]()(1)(2) : psi[ss+vs]()(3)(2); + + for(int v=0; v(hp_00.v); + hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v); + hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v); + hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v); + hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v); + hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v); + } + + if(this->pm == 1 && vs <= v){ + hs_00.v = Optimization::Rotate::tRotate<2>(hs_00.v); + hs_01.v = Optimization::Rotate::tRotate<2>(hs_01.v); + hs_02.v = Optimization::Rotate::tRotate<2>(hs_02.v); + hs_10.v = Optimization::Rotate::tRotate<2>(hs_10.v); + hs_11.v = Optimization::Rotate::tRotate<2>(hs_11.v); + hs_12.v = Optimization::Rotate::tRotate<2>(hs_12.v); + } + + if(vm >= v){ + hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v); + hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v); + hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v); + hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v); + hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v); + hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v); + } + + if(this->pm == -1 && vs >= v){ + hs_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_00.v); + hs_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_01.v); + hs_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_02.v); + hs_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_10.v); + hs_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_11.v); + hs_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_12.v); + } + + Simd p_00 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo::mult(u[v]()()(), hp_00) + + switcheroo::mult(s[v]()()(), hs_00) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo::mult(u[v]()()(), hp_00); + Simd p_01 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo::mult(u[v]()()(), hp_01) + + switcheroo::mult(s[v]()()(), hs_01) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo::mult(u[v]()()(), hp_01); + Simd p_02 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo::mult(u[v]()()(), hp_02) + + switcheroo::mult(s[v]()()(), hs_02) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo::mult(u[v]()()(), hp_02); + Simd p_10 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo::mult(u[v]()()(), hp_10) + + switcheroo::mult(s[v]()()(), hs_10) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo::mult(u[v]()()(), hp_10); + Simd p_11 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo::mult(u[v]()()(), hp_11) + + switcheroo::mult(s[v]()()(), hs_11) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo::mult(u[v]()()(), hp_11); + Simd p_12 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo::mult(u[v]()()(), hp_12) + + switcheroo::mult(s[v]()()(), hs_12) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo::mult(u[v]()()(), hp_12); + Simd p_20 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo::mult(l[v]()()(), hm_00) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo::mult(l[v]()()(), hm_00) + + switcheroo::mult(s[v]()()(), hs_00); + Simd p_21 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo::mult(l[v]()()(), hm_01) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo::mult(l[v]()()(), hm_01) + + switcheroo::mult(s[v]()()(), hs_01); + Simd p_22 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo::mult(l[v]()()(), hm_02) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo::mult(l[v]()()(), hm_02) + + switcheroo::mult(s[v]()()(), hs_02); + Simd p_30 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo::mult(l[v]()()(), hm_10) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo::mult(l[v]()()(), hm_10) + + switcheroo::mult(s[v]()()(), hs_10); + Simd p_31 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo::mult(l[v]()()(), hm_11) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo::mult(l[v]()()(), hm_11) + + switcheroo::mult(s[v]()()(), hs_11); + Simd p_32 = (this->pm == 1) ? switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo::mult(l[v]()()(), hm_12) + : switcheroo::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo::mult(l[v]()()(), hm_12) + + switcheroo::mult(s[v]()()(), hs_12); + + vstream(chi[ss+v]()(0)(0), p_00); + vstream(chi[ss+v]()(0)(1), p_01); + vstream(chi[ss+v]()(0)(2), p_02); + vstream(chi[ss+v]()(1)(0), p_10); + vstream(chi[ss+v]()(1)(1), p_11); + vstream(chi[ss+v]()(1)(2), p_12); + vstream(chi[ss+v]()(2)(0), p_20); + vstream(chi[ss+v]()(2)(1), p_21); + vstream(chi[ss+v]()(2)(2), p_22); + vstream(chi[ss+v]()(3)(0), p_30); + vstream(chi[ss+v]()(3)(1), p_31); + vstream(chi[ss+v]()(3)(2), p_32); + + } + + } + + this->M5Dtime += usecond(); + + #endif + } + + #ifdef AVX512 + #include + #include + #include + #endif + + template + void MobiusEOFAFermion::MooeeInternalAsm(const FermionField& psi, FermionField& chi, + int LLs, int site, Vector >& Matp, Vector >& Matm) + { + #ifndef AVX512 + { + SiteHalfSpinor BcastP; + SiteHalfSpinor BcastM; + SiteHalfSpinor SiteChiP; + SiteHalfSpinor SiteChiM; + + // Ls*Ls * 2 * 12 * vol flops + for(int s1=0; s1); + + for(int s1=0; s1 + void MobiusEOFAFermion::MooeeInternalZAsm(const FermionField& psi, FermionField& chi, + int LLs, int site, Vector >& Matp, Vector >& Matm) + { + std::cout << "Error: zMobius not implemented for EOFA" << std::endl; + exit(-1); + }; + + template + void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv) + { + int Ls = this->Ls; + int LLs = psi._grid->_rdimensions[0]; + int vol = psi._grid->oSites()/LLs; + + chi.checkerboard = psi.checkerboard; + + Vector> Matp; + Vector> Matm; + Vector>* _Matp; + Vector>* _Matm; + + // MooeeInternalCompute(dag,inv,Matp,Matm); + if(inv && dag){ + _Matp = &this->MatpInvDag; + _Matm = &this->MatmInvDag; + } + + if(inv && (!dag)){ + _Matp = &this->MatpInv; + _Matm = &this->MatmInv; + } + + if(!inv){ + MooeeInternalCompute(dag, inv, Matp, Matm); + _Matp = &Matp; + _Matm = &Matm; + } + + assert(_Matp->size() == Ls*LLs); + + this->MooeeInvCalls++; + this->MooeeInvTime -= usecond(); + + if(switcheroo::iscomplex()){ + parallel_for(auto site=0; siteMooeeInvTime += usecond(); + } + + #ifdef MOBIUS_EOFA_DPERP_VEC + + INSTANTIATE_DPERP_MOBIUS_EOFA(DomainWallVec5dImplD); + INSTANTIATE_DPERP_MOBIUS_EOFA(DomainWallVec5dImplF); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZDomainWallVec5dImplD); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZDomainWallVec5dImplF); + + INSTANTIATE_DPERP_MOBIUS_EOFA(DomainWallVec5dImplDF); + INSTANTIATE_DPERP_MOBIUS_EOFA(DomainWallVec5dImplFH); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZDomainWallVec5dImplDF); + INSTANTIATE_DPERP_MOBIUS_EOFA(ZDomainWallVec5dImplFH); + + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + template void MobiusEOFAFermion::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv); + + #endif + +}} diff --git a/lib/qcd/action/pseudofermion/ExactOneFlavourRatio.h b/lib/qcd/action/pseudofermion/ExactOneFlavourRatio.h new file mode 100644 index 00000000..9c1e2921 --- /dev/null +++ b/lib/qcd/action/pseudofermion/ExactOneFlavourRatio.h @@ -0,0 +1,264 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./lib/qcd/action/pseudofermion/ExactOneFlavourRatio.h + +Copyright (C) 2017 + +Author: Peter Boyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +///////////////////////////////////////////////////////////////// +// Implementation of exact one flavour algorithm (EOFA) // +// using fermion classes defined in: // +// Grid/qcd/action/fermion/DomainWallEOFAFermion.h (Shamir) // +// Grid/qcd/action/fermion/MobiusEOFAFermion.h (Mobius) // +// arXiv: 1403.1683, 1706.05843 // +///////////////////////////////////////////////////////////////// + +#ifndef QCD_PSEUDOFERMION_EXACT_ONE_FLAVOUR_RATIO_H +#define QCD_PSEUDOFERMION_EXACT_ONE_FLAVOUR_RATIO_H + +namespace Grid{ +namespace QCD{ + + /////////////////////////////////////////////////////////////// + // Exact one flavour implementation of DWF determinant ratio // + /////////////////////////////////////////////////////////////// + + template + class ExactOneFlavourRatioPseudoFermionAction : public Action + { + public: + INHERIT_IMPL_TYPES(Impl); + typedef OneFlavourRationalParams Params; + Params param; + MultiShiftFunction PowerNegHalf; + + private: + bool use_heatbath_forecasting; + AbstractEOFAFermion& Lop; // the basic LH operator + AbstractEOFAFermion& Rop; // the basic RH operator + SchurRedBlackDiagMooeeSolve Solver; + FermionField Phi; // the pseudofermion field for this trajectory + + public: + ExactOneFlavourRatioPseudoFermionAction(AbstractEOFAFermion& _Lop, AbstractEOFAFermion& _Rop, + OperatorFunction& S, Params& p, bool use_fc=false) : Lop(_Lop), Rop(_Rop), Solver(S), + Phi(_Lop.FermionGrid()), param(p), use_heatbath_forecasting(use_fc) + { + AlgRemez remez(param.lo, param.hi, param.precision); + + // MdagM^(+- 1/2) + std::cout << GridLogMessage << "Generating degree " << param.degree << " for x^(-1/2)" << std::endl; + remez.generateApprox(param.degree, 1, 2); + PowerNegHalf.Init(remez, param.tolerance, true); + }; + + virtual std::string action_name() { return "ExactOneFlavourRatioPseudoFermionAction"; } + + virtual std::string LogParameters() { + std::stringstream sstream; + sstream << GridLogMessage << "[" << action_name() << "] Low :" << param.lo << std::endl; + sstream << GridLogMessage << "[" << action_name() << "] High :" << param.hi << std::endl; + sstream << GridLogMessage << "[" << action_name() << "] Max iterations :" << param.MaxIter << std::endl; + sstream << GridLogMessage << "[" << action_name() << "] Tolerance :" << param.tolerance << std::endl; + sstream << GridLogMessage << "[" << action_name() << "] Degree :" << param.degree << std::endl; + sstream << GridLogMessage << "[" << action_name() << "] Precision :" << param.precision << std::endl; + return sstream.str(); + } + + // Spin projection + void spProj(const FermionField& in, FermionField& out, int sign, int Ls) + { + if(sign == 1){ for(int s=0; s tmp(2, Lop.FermionGrid()); + + // Use chronological inverter to forecast solutions across poles + std::vector prev_solns; + if(use_heatbath_forecasting){ prev_solns.reserve(param.degree); } + ChronoForecast, FermionField> Forecast; + + // Seed with Gaussian noise vector (var = 0.5) + RealD scale = std::sqrt(0.5); + gaussian(pRNG,eta); + eta = eta * scale; + printf("Heatbath source vector: <\\eta|\\eta> = %1.15e\n", norm2(eta)); + + // \Phi = ( \alpha_{0} + \sum_{k=1}^{N_{p}} \alpha_{l} * \gamma_{l} ) * \eta + RealD N(PowerNegHalf.norm); + for(int k=0; k tmp(2, Lop.FermionGrid()); + + // S = <\Phi|\Phi> + RealD action(norm2(Phi)); + + // LH term: S = S - k <\Phi| P_{-} \Omega_{-}^{\dagger} H(mf)^{-1} \Omega_{-} P_{-} |\Phi> + spProj(Phi, spProj_Phi, -1, Lop.Ls); + Lop.Omega(spProj_Phi, tmp[0], -1, 0); + G5R5(tmp[1], tmp[0]); + tmp[0] = zero; + Solver(Lop, tmp[1], tmp[0]); + Lop.Dtilde(tmp[0], tmp[1]); // We actually solved Cayley preconditioned system: transform back + Lop.Omega(tmp[1], tmp[0], -1, 1); + action -= Lop.k * innerProduct(spProj_Phi, tmp[0]).real(); + + // RH term: S = S + k <\Phi| P_{+} \Omega_{+}^{\dagger} ( H(mb) + // - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{-} P_{-} |\Phi> + spProj(Phi, spProj_Phi, 1, Rop.Ls); + Rop.Omega(spProj_Phi, tmp[0], 1, 0); + G5R5(tmp[1], tmp[0]); + tmp[0] = zero; + Solver(Rop, tmp[1], tmp[0]); + Rop.Dtilde(tmp[0], tmp[1]); + Rop.Omega(tmp[1], tmp[0], 1, 1); + action += Rop.k * innerProduct(spProj_Phi, tmp[0]).real(); + + return action; + }; + + // EOFA pseudofermion force: see Eqns. (34)-(36) of arXiv:1706.05843 + virtual void deriv(const GaugeField& U, GaugeField& dSdU) + { + Lop.ImportGauge(U); + Rop.ImportGauge(U); + + FermionField spProj_Phi (Lop.FermionGrid()); + FermionField Omega_spProj_Phi(Lop.FermionGrid()); + FermionField CG_src (Lop.FermionGrid()); + FermionField Chi (Lop.FermionGrid()); + FermionField g5_R5_Chi (Lop.FermionGrid()); + + GaugeField force(Lop.GaugeGrid()); + + // LH: dSdU = k \chi_{L}^{\dagger} \gamma_{5} R_{5} ( \partial_{x,\mu} D_{w} ) \chi_{L} + // \chi_{L} = H(mf)^{-1} \Omega_{-} P_{-} \Phi + spProj(Phi, spProj_Phi, -1, Lop.Ls); + Lop.Omega(spProj_Phi, Omega_spProj_Phi, -1, 0); + G5R5(CG_src, Omega_spProj_Phi); + spProj_Phi = zero; + Solver(Lop, CG_src, spProj_Phi); + Lop.Dtilde(spProj_Phi, Chi); + G5R5(g5_R5_Chi, Chi); + Lop.MDeriv(force, g5_R5_Chi, Chi, DaggerNo); + dSdU = Lop.k * force; + + // RH: dSdU = dSdU - k \chi_{R}^{\dagger} \gamma_{5} R_{5} ( \partial_{x,\mu} D_{w} ) \chi_{} + // \chi_{R} = ( H(mb) - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{+} P_{+} \Phi + spProj(Phi, spProj_Phi, 1, Rop.Ls); + Rop.Omega(spProj_Phi, Omega_spProj_Phi, 1, 0); + G5R5(CG_src, Omega_spProj_Phi); + spProj_Phi = zero; + Solver(Rop, CG_src, spProj_Phi); + Rop.Dtilde(spProj_Phi, Chi); + G5R5(g5_R5_Chi, Chi); + Lop.MDeriv(force, g5_R5_Chi, Chi, DaggerNo); + dSdU = dSdU - Rop.k * force; + }; + }; +}} + +#endif diff --git a/lib/qcd/action/pseudofermion/PseudoFermion.h b/lib/qcd/action/pseudofermion/PseudoFermion.h index bccca3d4..133ebb7d 100644 --- a/lib/qcd/action/pseudofermion/PseudoFermion.h +++ b/lib/qcd/action/pseudofermion/PseudoFermion.h @@ -38,5 +38,6 @@ directory #include #include #include +#include #endif diff --git a/lib/qcd/modules/FermionOperatorModules.h b/lib/qcd/modules/FermionOperatorModules.h index c66842c6..fc9d96a7 100644 --- a/lib/qcd/modules/FermionOperatorModules.h +++ b/lib/qcd/modules/FermionOperatorModules.h @@ -72,7 +72,7 @@ protected: } virtual unsigned int Ls(){ - return 0; + return 0; } virtual void print_parameters(){ @@ -97,7 +97,7 @@ class HMC_FermionOperatorModuleFactory : public Factory < FermionOperatorModuleBase > , Reader > { public: // use SINGLETON FUNCTOR MACRO HERE - typedef Reader TheReader; + typedef Reader TheReader; HMC_FermionOperatorModuleFactory(const HMC_FermionOperatorModuleFactory& e) = delete; void operator=(const HMC_FermionOperatorModuleFactory& e) = delete; @@ -122,7 +122,7 @@ namespace QCD{ // Modules class WilsonFermionParameters : Serializable { public: - GRID_SERIALIZABLE_CLASS_MEMBERS(WilsonFermionParameters, + GRID_SERIALIZABLE_CLASS_MEMBERS(WilsonFermionParameters, RealD, mass); }; @@ -144,7 +144,7 @@ class WilsonFermionModule: public FermionOperatorModuleGridRefs[0]; auto GridMod5d = this->GridRefs[1]; typename FermionImpl::GaugeField U(GridMod->get_full()); - this->FOPtr.reset(new MobiusFermion( U, *(GridMod->get_full()), *(GridMod->get_rb()), + this->FOPtr.reset(new MobiusFermion( U, *(GridMod->get_full()), *(GridMod->get_rb()), *(GridMod5d->get_full()), *(GridMod5d->get_rb()), this->Par_.mass, this->Par_.M5, this->Par_.b, this->Par_.c)); } @@ -175,7 +175,7 @@ class MobiusFermionModule: public FermionOperatorModuleGridRefs[0]; auto GridMod5d = this->GridRefs[1]; typename FermionImpl::GaugeField U(GridMod->get_full()); - this->FOPtr.reset(new DomainWallFermion( U, *(GridMod->get_full()), *(GridMod->get_rb()), + this->FOPtr.reset(new DomainWallFermion( U, *(GridMod->get_full()), *(GridMod->get_rb()), *(GridMod5d->get_full()), *(GridMod5d->get_rb()), this->Par_.mass, this->Par_.M5)); } }; +class DomainWallEOFAFermionParameters : Serializable { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(DomainWallEOFAFermionParameters, + RealD, mq1, + RealD, mq2, + RealD, mq3, + RealD, shift, + int, pm, + RealD, M5, + unsigned int, Ls); +}; + +template +class DomainWallEOFAFermionModule: public FermionOperatorModule { + typedef FermionOperatorModule FermBase; + using FermBase::FermBase; // for constructors + + virtual unsigned int Ls(){ + return this->Par_.Ls; + } + + // acquire resource + virtual void initialize(){ + auto GridMod = this->GridRefs[0]; + auto GridMod5d = this->GridRefs[1]; + typename FermionImpl::GaugeField U(GridMod->get_full()); + this->FOPtr.reset(new DomainWallEOFAFermion( U, *(GridMod->get_full()), *(GridMod->get_rb()), + *(GridMod5d->get_full()), *(GridMod5d->get_rb()), + this->Par_.mq1, this->Par_.mq2, this->Par_.mq3, + this->Par_.shift, this->Par_.pm, this->Par_.M5)); + } +}; + } // QCD } // Grid -#endif //FERMIONOPERATOR_MODULES_H \ No newline at end of file +#endif //FERMIONOPERATOR_MODULES_H diff --git a/lib/qcd/utils/SpaceTimeGrid.cc b/lib/qcd/utils/SpaceTimeGrid.cc index 3ada4a3b..b2b5d9c8 100644 --- a/lib/qcd/utils/SpaceTimeGrid.cc +++ b/lib/qcd/utils/SpaceTimeGrid.cc @@ -60,7 +60,7 @@ GridCartesian *SpaceTimeGrid::makeFiveDimGrid(int Ls,const GridCartesian simd5.push_back(FourDimGrid->_simd_layout[d]); mpi5.push_back(FourDimGrid->_processors[d]); } - return new GridCartesian(latt5,simd5,mpi5); + return new GridCartesian(latt5,simd5,mpi5,*FourDimGrid); } @@ -68,18 +68,14 @@ GridRedBlackCartesian *SpaceTimeGrid::makeFiveDimRedBlackGrid(int Ls,const GridC { int N4=FourDimGrid->_ndimension; int cbd=1; - std::vector latt5(1,Ls); - std::vector simd5(1,1); - std::vector mpi5(1,1); std::vector cb5(1,0); - for(int d=0;d_fdimensions[d]); - simd5.push_back(FourDimGrid->_simd_layout[d]); - mpi5.push_back(FourDimGrid->_processors[d]); cb5.push_back( 1); - } - return new GridRedBlackCartesian(latt5,simd5,mpi5,cb5,cbd); + } + GridCartesian *tmp = makeFiveDimGrid(Ls,FourDimGrid); + GridRedBlackCartesian *ret = new GridRedBlackCartesian(tmp,cb5,cbd); + delete tmp; + return ret; } @@ -97,26 +93,24 @@ GridCartesian *SpaceTimeGrid::makeFiveDimDWFGrid(int Ls,const GridCartes simd5.push_back(1); mpi5.push_back(FourDimGrid->_processors[d]); } - return new GridCartesian(latt5,simd5,mpi5); + return new GridCartesian(latt5,simd5,mpi5,*FourDimGrid); } - +/////////////////////////////////////////////////// +// Interface is inefficient and forces the deletion +// Pass in the non-redblack grid +/////////////////////////////////////////////////// GridRedBlackCartesian *SpaceTimeGrid::makeFiveDimDWFRedBlackGrid(int Ls,const GridCartesian *FourDimGrid) { int N4=FourDimGrid->_ndimension; - int nsimd = FourDimGrid->Nsimd(); int cbd=1; - std::vector latt5(1,Ls); - std::vector simd5(1,nsimd); - std::vector mpi5(1,1); std::vector cb5(1,0); - for(int d=0;d_fdimensions[d]); - simd5.push_back(1); - mpi5.push_back(FourDimGrid->_processors[d]); cb5.push_back(1); - } - return new GridRedBlackCartesian(latt5,simd5,mpi5,cb5,cbd); + } + GridCartesian *tmp = makeFiveDimDWFGrid(Ls,FourDimGrid); + GridRedBlackCartesian *ret = new GridRedBlackCartesian(tmp,cb5,cbd); + delete tmp; + return ret; } diff --git a/lib/tensors/Tensor_index.h b/lib/tensors/Tensor_index.h index f114baf8..500f6c8d 100644 --- a/lib/tensors/Tensor_index.h +++ b/lib/tensors/Tensor_index.h @@ -175,7 +175,7 @@ class TensorIndexRecursion { } } template inline static - void pokeIndex(iVector &ret, const iVector::peekIndex(ret._internal[0],0)),N> &arg, int i,int j) + void pokeIndex(iVector &ret, const iVector::peekIndex(ret._internal[0],0,0)),N> &arg, int i,int j) { for(int ii=0;ii::pokeIndex(ret._internal[ii],arg._internal[ii],i,j); @@ -191,7 +191,7 @@ class TensorIndexRecursion { }} } template inline static - void pokeIndex(iMatrix &ret, const iMatrix::peekIndex(ret._internal[0][0],0)),N> &arg, int i,int j) + void pokeIndex(iMatrix &ret, const iMatrix::peekIndex(ret._internal[0][0],0,0)),N> &arg, int i,int j) { for(int ii=0;ii& coor,int index,std::vector &dims){ + static inline void CoorFromIndex (std::vector& coor,int index,const std::vector &dims){ int nd= dims.size(); coor.resize(nd); for(int d=0;d& coor,int &index,std::vector &dims){ + static inline void IndexFromCoor (const std::vector& coor,int &index,const std::vector &dims){ int nd=dims.size(); int stride=1; index=0; diff --git a/tests/Test_stencil.cc b/tests/Test_stencil.cc index fa4b0b57..e9517446 100644 --- a/tests/Test_stencil.cc +++ b/tests/Test_stencil.cc @@ -48,7 +48,7 @@ int main(int argc, char ** argv) { double volume = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; GridCartesian Fine(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian rbFine(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian rbFine(&Fine); GridParallelRNG fRNG(&Fine); // fRNG.SeedFixedIntegers(std::vector({45,12,81,9}); diff --git a/tests/core/Test_cshift_red_black.cc b/tests/core/Test_cshift_red_black.cc index f9269709..c7b0c2f1 100644 --- a/tests/core/Test_cshift_red_black.cc +++ b/tests/core/Test_cshift_red_black.cc @@ -47,7 +47,7 @@ int main (int argc, char ** argv) mask[0]=0; GridCartesian Fine (latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBFine(latt_size,simd_layout,mpi_layout,mask,1); + GridRedBlackCartesian RBFine(&Fine,mask,1); GridParallelRNG FineRNG(&Fine); FineRNG.SeedFixedIntegers(std::vector({45,12,81,9})); diff --git a/tests/core/Test_cshift_red_black_rotate.cc b/tests/core/Test_cshift_red_black_rotate.cc index 3ef1cd21..aa9e6104 100644 --- a/tests/core/Test_cshift_red_black_rotate.cc +++ b/tests/core/Test_cshift_red_black_rotate.cc @@ -47,7 +47,7 @@ int main (int argc, char ** argv) mask[0]=0; GridCartesian Fine (latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBFine(latt_size,simd_layout,mpi_layout,mask,1); + GridRedBlackCartesian RBFine(&Fine,mask,1); GridParallelRNG FineRNG(&Fine); FineRNG.SeedFixedIntegers(std::vector({45,12,81,9})); diff --git a/tests/core/Test_dwf_eofa_even_odd.cc b/tests/core/Test_dwf_eofa_even_odd.cc new file mode 100644 index 00000000..5fe0f653 --- /dev/null +++ b/tests/core/Test_dwf_eofa_even_odd.cc @@ -0,0 +1,239 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./tests/core/Test_dwf_eofa_even_odd.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +struct scal { + d internal; +}; + +Gamma::Algebra Gmu [] = { + Gamma::Algebra::GammaX, + Gamma::Algebra::GammaY, + Gamma::Algebra::GammaZ, + Gamma::Algebra::GammaT +}; + +int main (int argc, char ** argv) +{ + Grid_init(&argc, &argv); + + int threads = GridThread::GetThreads(); + std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl; + + const int Ls = 8; + // GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridCartesian* FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid); + GridRedBlackCartesian* UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian* FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid); + + std::vector seeds4({1,2,3,4}); + std::vector seeds5({5,6,7,8}); + + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); + + LatticeFermion src (FGrid); random(RNG5, src); + LatticeFermion phi (FGrid); random(RNG5, phi); + LatticeFermion chi (FGrid); random(RNG5, chi); + LatticeFermion result(FGrid); result = zero; + LatticeFermion ref (FGrid); ref = zero; + LatticeFermion tmp (FGrid); tmp = zero; + LatticeFermion err (FGrid); err = zero; + LatticeGaugeField Umu (UGrid); SU3::HotConfiguration(RNG4, Umu); + std::vector U(4,UGrid); + + // Only one non-zero (y) + Umu = zero; + for(int nn=0; nn0){ U[nn] = zero; } + PokeIndex(Umu, U[nn], nn); + } + + RealD mq1 = 0.1; + RealD mq2 = 0.5; + RealD mq3 = 1.0; + RealD shift = 0.1234; + RealD M5 = 1.8; + int pm = 1; + DomainWallEOFAFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mq1, mq2, mq3, shift, pm, M5); + + LatticeFermion src_e (FrbGrid); + LatticeFermion src_o (FrbGrid); + LatticeFermion r_e (FrbGrid); + LatticeFermion r_o (FrbGrid); + LatticeFermion r_eo (FGrid); + LatticeFermion r_eeoo(FGrid); + + std::cout << GridLogMessage << "==========================================================" << std::endl; + std::cout << GridLogMessage << "= Testing that Meo + Moe + Moo + Mee = Munprec " << std::endl; + std::cout << GridLogMessage << "==========================================================" << std::endl; + + pickCheckerboard(Even, src_e, src); + pickCheckerboard(Odd, src_o, src); + + Ddwf.Meooe(src_e, r_o); std::cout << GridLogMessage << "Applied Meo" << std::endl; + Ddwf.Meooe(src_o, r_e); std::cout << GridLogMessage << "Applied Moe" << std::endl; + setCheckerboard(r_eo, r_o); + setCheckerboard(r_eo, r_e); + + Ddwf.Mooee(src_e, r_e); std::cout << GridLogMessage << "Applied Mee" << std::endl; + Ddwf.Mooee(src_o, r_o); std::cout << GridLogMessage << "Applied Moo" << std::endl; + setCheckerboard(r_eeoo, r_e); + setCheckerboard(r_eeoo, r_o); + + r_eo = r_eo + r_eeoo; + Ddwf.M(src, ref); + + // std::cout << GridLogMessage << r_eo << std::endl; + // std::cout << GridLogMessage << ref << std::endl; + + err = ref - r_eo; + std::cout << GridLogMessage << "EO norm diff " << norm2(err) << " " << norm2(ref) << " " << norm2(r_eo) << std::endl; + + LatticeComplex cerr(FGrid); + cerr = localInnerProduct(err,err); + // std::cout << GridLogMessage << cerr << std::endl; + + std::cout << GridLogMessage << "==============================================================" << std::endl; + std::cout << GridLogMessage << "= Test Ddagger is the dagger of D by requiring " << std::endl; + std::cout << GridLogMessage << "= < phi | Deo | chi > * = < chi | Deo^dag| phi> " << std::endl; + std::cout << GridLogMessage << "==============================================================" << std::endl; + + LatticeFermion chi_e (FrbGrid); + LatticeFermion chi_o (FrbGrid); + + LatticeFermion dchi_e(FrbGrid); + LatticeFermion dchi_o(FrbGrid); + + LatticeFermion phi_e (FrbGrid); + LatticeFermion phi_o (FrbGrid); + + LatticeFermion dphi_e(FrbGrid); + LatticeFermion dphi_o(FrbGrid); + + pickCheckerboard(Even, chi_e, chi); + pickCheckerboard(Odd , chi_o, chi); + pickCheckerboard(Even, phi_e, phi); + pickCheckerboard(Odd , phi_o, phi); + + Ddwf.Meooe (chi_e, dchi_o); + Ddwf.Meooe (chi_o, dchi_e); + Ddwf.MeooeDag(phi_e, dphi_o); + Ddwf.MeooeDag(phi_o, dphi_e); + + ComplexD pDce = innerProduct(phi_e, dchi_e); + ComplexD pDco = innerProduct(phi_o, dchi_o); + ComplexD cDpe = innerProduct(chi_e, dphi_e); + ComplexD cDpo = innerProduct(chi_o, dphi_o); + + std::cout << GridLogMessage << "e " << pDce << " " << cDpe << std::endl; + std::cout << GridLogMessage << "o " << pDco << " " << cDpo << std::endl; + + std::cout << GridLogMessage << "pDce - conj(cDpo) " << pDce-conj(cDpo) << std::endl; + std::cout << GridLogMessage << "pDco - conj(cDpe) " << pDco-conj(cDpe) << std::endl; + + std::cout << GridLogMessage << "==============================================================" << std::endl; + std::cout << GridLogMessage << "= Test MeeInv Mee = 1 " << std::endl; + std::cout << GridLogMessage << "==============================================================" << std::endl; + + pickCheckerboard(Even, chi_e, chi); + pickCheckerboard(Odd , chi_o, chi); + + Ddwf.Mooee (chi_e, src_e); + Ddwf.MooeeInv(src_e, phi_e); + + Ddwf.Mooee (chi_o, src_o); + Ddwf.MooeeInv(src_o, phi_o); + + setCheckerboard(phi, phi_e); + setCheckerboard(phi, phi_o); + + err = phi - chi; + std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl; + + std::cout << GridLogMessage << "==============================================================" << std::endl; + std::cout << GridLogMessage << "= Test MeeInvDag MeeDag = 1 " << std::endl; + std::cout << GridLogMessage << "==============================================================" << std::endl; + + pickCheckerboard(Even, chi_e, chi); + pickCheckerboard(Odd , chi_o, chi); + + Ddwf.MooeeDag (chi_e, src_e); + Ddwf.MooeeInvDag(src_e, phi_e); + + Ddwf.MooeeDag (chi_o, src_o); + Ddwf.MooeeInvDag(src_o, phi_o); + + setCheckerboard(phi, phi_e); + setCheckerboard(phi, phi_o); + + err = phi - chi; + std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl; + + std::cout << GridLogMessage << "==============================================================" << std::endl; + std::cout << GridLogMessage << "= Test MpcDagMpc is Hermitian " << std::endl; + std::cout << GridLogMessage << "==============================================================" << std::endl; + + random(RNG5, phi); + random(RNG5, chi); + pickCheckerboard(Even, chi_e, chi); + pickCheckerboard(Odd , chi_o, chi); + pickCheckerboard(Even, phi_e, phi); + pickCheckerboard(Odd , phi_o, phi); + RealD t1,t2; + + SchurDiagMooeeOperator HermOpEO(Ddwf); + HermOpEO.MpcDagMpc(chi_e, dchi_e, t1, t2); + HermOpEO.MpcDagMpc(chi_o, dchi_o, t1, t2); + + HermOpEO.MpcDagMpc(phi_e, dphi_e, t1, t2); + HermOpEO.MpcDagMpc(phi_o, dphi_o, t1, t2); + + pDce = innerProduct(phi_e, dchi_e); + pDco = innerProduct(phi_o, dchi_o); + cDpe = innerProduct(chi_e, dphi_e); + cDpo = innerProduct(chi_o, dphi_o); + + std::cout << GridLogMessage << "e " << pDce << " " << cDpe << std::endl; + std::cout << GridLogMessage << "o " << pDco << " " << cDpo << std::endl; + + std::cout << GridLogMessage << "pDce - conj(cDpo) " << pDco-conj(cDpo) << std::endl; + std::cout << GridLogMessage << "pDco - conj(cDpe) " << pDce-conj(cDpe) << std::endl; + + Grid_finalize(); +} diff --git a/tests/core/Test_fft.cc b/tests/core/Test_fft.cc index 877683f0..b2336cfa 100644 --- a/tests/core/Test_fft.cc +++ b/tests/core/Test_fft.cc @@ -47,7 +47,7 @@ int main (int argc, char ** argv) vol = vol * latt_size[d]; } GridCartesian GRID(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGRID(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGRID(&GRID); LatticeComplexD one(&GRID); LatticeComplexD zz(&GRID); diff --git a/tests/core/Test_gpwilson_even_odd.cc b/tests/core/Test_gpwilson_even_odd.cc index fc12fe75..2069eb40 100644 --- a/tests/core/Test_gpwilson_even_odd.cc +++ b/tests/core/Test_gpwilson_even_odd.cc @@ -40,7 +40,7 @@ int main (int argc, char ** argv) std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); int threads = GridThread::GetThreads(); std::cout< +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +struct scal { + d internal; +}; + +Gamma::Algebra Gmu [] = { + Gamma::Algebra::GammaX, + Gamma::Algebra::GammaY, + Gamma::Algebra::GammaZ, + Gamma::Algebra::GammaT +}; + +int main (int argc, char ** argv) +{ + Grid_init(&argc, &argv); + + int threads = GridThread::GetThreads(); + std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl; + + const int Ls = 8; + // GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridCartesian* FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid); + GridRedBlackCartesian* UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian* FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid); + + std::vector seeds4({1,2,3,4}); + std::vector seeds5({5,6,7,8}); + + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); + + LatticeFermion src (FGrid); random(RNG5, src); + LatticeFermion phi (FGrid); random(RNG5, phi); + LatticeFermion chi (FGrid); random(RNG5, chi); + LatticeFermion result(FGrid); result = zero; + LatticeFermion ref (FGrid); ref = zero; + LatticeFermion tmp (FGrid); tmp = zero; + LatticeFermion err (FGrid); err = zero; + LatticeGaugeField Umu (UGrid); SU3::HotConfiguration(RNG4, Umu); + std::vector U(4,UGrid); + + // Only one non-zero (y) + Umu = zero; + for(int nn=0; nn0){ U[nn] = zero; } + PokeIndex(Umu, U[nn], nn); + } + + RealD b = 2.5; + RealD c = 1.5; + RealD mq1 = 0.1; + RealD mq2 = 0.5; + RealD mq3 = 1.0; + RealD shift = 0.1234; + RealD M5 = 1.8; + int pm = 1; + MobiusEOFAFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mq1, mq2, mq3, shift, pm, M5, b, c); + + LatticeFermion src_e (FrbGrid); + LatticeFermion src_o (FrbGrid); + LatticeFermion r_e (FrbGrid); + LatticeFermion r_o (FrbGrid); + LatticeFermion r_eo (FGrid); + LatticeFermion r_eeoo(FGrid); + + std::cout << GridLogMessage << "==========================================================" << std::endl; + std::cout << GridLogMessage << "= Testing that Meo + Moe + Moo + Mee = Munprec " << std::endl; + std::cout << GridLogMessage << "==========================================================" << std::endl; + + pickCheckerboard(Even, src_e, src); + pickCheckerboard(Odd, src_o, src); + + Ddwf.Meooe(src_e, r_o); std::cout << GridLogMessage << "Applied Meo" << std::endl; + Ddwf.Meooe(src_o, r_e); std::cout << GridLogMessage << "Applied Moe" << std::endl; + setCheckerboard(r_eo, r_o); + setCheckerboard(r_eo, r_e); + + Ddwf.Mooee(src_e, r_e); std::cout << GridLogMessage << "Applied Mee" << std::endl; + Ddwf.Mooee(src_o, r_o); std::cout << GridLogMessage << "Applied Moo" << std::endl; + setCheckerboard(r_eeoo, r_e); + setCheckerboard(r_eeoo, r_o); + + r_eo = r_eo + r_eeoo; + Ddwf.M(src, ref); + + // std::cout << GridLogMessage << r_eo << std::endl; + // std::cout << GridLogMessage << ref << std::endl; + + err = ref - r_eo; + std::cout << GridLogMessage << "EO norm diff " << norm2(err) << " " << norm2(ref) << " " << norm2(r_eo) << std::endl; + + LatticeComplex cerr(FGrid); + cerr = localInnerProduct(err,err); + // std::cout << GridLogMessage << cerr << std::endl; + + std::cout << GridLogMessage << "==============================================================" << std::endl; + std::cout << GridLogMessage << "= Test Ddagger is the dagger of D by requiring " << std::endl; + std::cout << GridLogMessage << "= < phi | Deo | chi > * = < chi | Deo^dag| phi> " << std::endl; + std::cout << GridLogMessage << "==============================================================" << std::endl; + + LatticeFermion chi_e (FrbGrid); + LatticeFermion chi_o (FrbGrid); + + LatticeFermion dchi_e(FrbGrid); + LatticeFermion dchi_o(FrbGrid); + + LatticeFermion phi_e (FrbGrid); + LatticeFermion phi_o (FrbGrid); + + LatticeFermion dphi_e(FrbGrid); + LatticeFermion dphi_o(FrbGrid); + + pickCheckerboard(Even, chi_e, chi); + pickCheckerboard(Odd , chi_o, chi); + pickCheckerboard(Even, phi_e, phi); + pickCheckerboard(Odd , phi_o, phi); + + Ddwf.Meooe (chi_e, dchi_o); + Ddwf.Meooe (chi_o, dchi_e); + Ddwf.MeooeDag(phi_e, dphi_o); + Ddwf.MeooeDag(phi_o, dphi_e); + + ComplexD pDce = innerProduct(phi_e, dchi_e); + ComplexD pDco = innerProduct(phi_o, dchi_o); + ComplexD cDpe = innerProduct(chi_e, dphi_e); + ComplexD cDpo = innerProduct(chi_o, dphi_o); + + std::cout << GridLogMessage << "e " << pDce << " " << cDpe << std::endl; + std::cout << GridLogMessage << "o " << pDco << " " << cDpo << std::endl; + + std::cout << GridLogMessage << "pDce - conj(cDpo) " << pDce-conj(cDpo) << std::endl; + std::cout << GridLogMessage << "pDco - conj(cDpe) " << pDco-conj(cDpe) << std::endl; + + std::cout << GridLogMessage << "==============================================================" << std::endl; + std::cout << GridLogMessage << "= Test MeeInv Mee = 1 " << std::endl; + std::cout << GridLogMessage << "==============================================================" << std::endl; + + pickCheckerboard(Even, chi_e, chi); + pickCheckerboard(Odd , chi_o, chi); + + Ddwf.Mooee (chi_e, src_e); + Ddwf.MooeeInv(src_e, phi_e); + + Ddwf.Mooee (chi_o, src_o); + Ddwf.MooeeInv(src_o, phi_o); + + setCheckerboard(phi, phi_e); + setCheckerboard(phi, phi_o); + + err = phi - chi; + std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl; + + std::cout << GridLogMessage << "==============================================================" << std::endl; + std::cout << GridLogMessage << "= Test MeeInvDag MeeDag = 1 " << std::endl; + std::cout << GridLogMessage << "==============================================================" << std::endl; + + pickCheckerboard(Even, chi_e, chi); + pickCheckerboard(Odd , chi_o, chi); + + Ddwf.MooeeDag (chi_e, src_e); + Ddwf.MooeeInvDag(src_e, phi_e); + + Ddwf.MooeeDag (chi_o, src_o); + Ddwf.MooeeInvDag(src_o, phi_o); + + setCheckerboard(phi, phi_e); + setCheckerboard(phi, phi_o); + + err = phi - chi; + std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl; + + std::cout << GridLogMessage << "==============================================================" << std::endl; + std::cout << GridLogMessage << "= Test MpcDagMpc is Hermitian " << std::endl; + std::cout << GridLogMessage << "==============================================================" << std::endl; + + random(RNG5, phi); + random(RNG5, chi); + pickCheckerboard(Even, chi_e, chi); + pickCheckerboard(Odd , chi_o, chi); + pickCheckerboard(Even, phi_e, phi); + pickCheckerboard(Odd , phi_o, phi); + RealD t1,t2; + + SchurDiagMooeeOperator HermOpEO(Ddwf); + HermOpEO.MpcDagMpc(chi_e, dchi_e, t1, t2); + HermOpEO.MpcDagMpc(chi_o, dchi_o, t1, t2); + + HermOpEO.MpcDagMpc(phi_e, dphi_e, t1, t2); + HermOpEO.MpcDagMpc(phi_o, dphi_o, t1, t2); + + pDce = innerProduct(phi_e, dchi_e); + pDco = innerProduct(phi_o, dchi_o); + cDpe = innerProduct(chi_e, dphi_e); + cDpo = innerProduct(chi_o, dphi_o); + + std::cout << GridLogMessage << "e " << pDce << " " << cDpe << std::endl; + std::cout << GridLogMessage << "o " << pDco << " " << cDpo << std::endl; + + std::cout << GridLogMessage << "pDce - conj(cDpo) " << pDco-conj(cDpo) << std::endl; + std::cout << GridLogMessage << "pDco - conj(cDpe) " << pDce-conj(cDpe) << std::endl; + + Grid_finalize(); +} diff --git a/tests/core/Test_staggered.cc b/tests/core/Test_staggered.cc index 75531c83..f8f7035a 100644 --- a/tests/core/Test_staggered.cc +++ b/tests/core/Test_staggered.cc @@ -40,7 +40,7 @@ int main (int argc, char ** argv) std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); int threads = GridThread::GetThreads(); std::cout< simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); int threads = GridThread::GetThreads(); std::cout< simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); int threads = GridThread::GetThreads(); std::cout< +Author: paboyle +Author: David Murphy + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ + +////////////////////////////////////////////////////////////////////////////////////////// +// This program sets up the initial pseudofermion field |Phi> = Meofa^{-1/2}*|eta>, and +// then uses this Phi to compute the action . +// If all is working, one should find that = . +////////////////////////////////////////////////////////////////////////////////////////// + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +// Parameters for test +const std::vector grid_dim = { 8, 8, 8, 8 }; +const int Ls = 8; +const int Npoles = 12; +const RealD mf = 0.01; +const RealD mpv = 1.0; +const RealD M5 = 1.8; + +int main(int argc, char** argv) +{ + Grid_init(&argc, &argv); + + int threads = GridThread::GetThreads(); + std::cout << GridLogMessage << "Grid is set up to use " << threads << " threads" << std::endl; + + // Initialize spacetime grid + std::cout << GridLogMessage << "Lattice dimensions: " << grid_dim << " Ls: " << Ls << std::endl; + GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid(grid_dim, + GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridRedBlackCartesian* UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian* FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid); + GridRedBlackCartesian* FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid); + + // Set up RNGs + std::vector seeds4({1, 2, 3, 4}); + std::vector seeds5({5, 6, 7, 8}); + GridParallelRNG RNG5(FGrid); + RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); + RNG4.SeedFixedIntegers(seeds4); + + // Random gauge field + LatticeGaugeField Umu(UGrid); + SU3::HotConfiguration(RNG4, Umu); + + DomainWallEOFAFermionR Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5); + DomainWallEOFAFermionR Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5); + + // Construct the action and test the heatbath (zero initial guess) + { + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, Npoles); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, false); + + Meofa.refresh(Umu, RNG5); + printf(" = %1.15e\n", Meofa.S(Umu)); + } + + // Construct the action and test the heatbath (forecasted initial guesses) + { + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, Npoles); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, true); + + Meofa.refresh(Umu, RNG5); + printf(" = %1.15e\n", Meofa.S(Umu)); + } + + return 0; +} diff --git a/tests/debug/Test_heatbath_dwf_eofa_gparity.cc b/tests/debug/Test_heatbath_dwf_eofa_gparity.cc new file mode 100644 index 00000000..5c9d4923 --- /dev/null +++ b/tests/debug/Test_heatbath_dwf_eofa_gparity.cc @@ -0,0 +1,108 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./tests/debug/Test_heatbath_dwf_eofa.cc + + Copyright (C) 2017 + +Author: Peter Boyle +Author: paboyle +Author: David Murphy + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ + +////////////////////////////////////////////////////////////////////////////////////////// +// This program sets up the initial pseudofermion field |Phi> = Meofa^{-1/2}*|eta>, and +// then uses this Phi to compute the action . +// If all is working, one should find that = . +////////////////////////////////////////////////////////////////////////////////////////// + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +typedef GparityWilsonImplR FermionImplPolicy; +typedef GparityDomainWallEOFAFermionR FermionAction; +typedef typename FermionAction::FermionField FermionField; + +// Parameters for test +const std::vector grid_dim = { 8, 8, 8, 8 }; +const int Ls = 8; +const int Npoles = 12; +const RealD mf = 0.01; +const RealD mpv = 1.0; +const RealD M5 = 1.8; + +int main(int argc, char** argv) +{ + Grid_init(&argc, &argv); + + int threads = GridThread::GetThreads(); + std::cout << GridLogMessage << "Grid is set up to use " << threads << " threads" << std::endl; + + // Initialize spacetime grid + std::cout << GridLogMessage << "Lattice dimensions: " << grid_dim << " Ls: " << Ls << std::endl; + GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid(grid_dim, + GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridRedBlackCartesian* UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian* FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid); + GridRedBlackCartesian* FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid); + + // Set up RNGs + std::vector seeds4({1, 2, 3, 4}); + std::vector seeds5({5, 6, 7, 8}); + GridParallelRNG RNG5(FGrid); + RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); + RNG4.SeedFixedIntegers(seeds4); + + // Random gauge field + LatticeGaugeField Umu(UGrid); + SU3::HotConfiguration(RNG4, Umu); + + // GparityDomainWallFermionR::ImplParams params; + FermionAction::ImplParams params; + FermionAction Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5, params); + FermionAction Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5, params); + + // Construct the action and test the heatbath (zero initial guess) + { + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, Npoles); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, false); + + Meofa.refresh(Umu, RNG5); + printf(" = %1.15e\n", Meofa.S(Umu)); + } + + // Construct the action and test the heatbath (forecasted initial guesses) + { + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, Npoles); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, true); + + Meofa.refresh(Umu, RNG5); + printf(" = %1.15e\n", Meofa.S(Umu)); + } + + return 0; +} diff --git a/tests/debug/Test_heatbath_mobius_eofa.cc b/tests/debug/Test_heatbath_mobius_eofa.cc new file mode 100644 index 00000000..a952873d --- /dev/null +++ b/tests/debug/Test_heatbath_mobius_eofa.cc @@ -0,0 +1,104 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./tests/debug/Test_heatbath_dwf_eofa.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +////////////////////////////////////////////////////////////////////////////////////////// +// This program sets up the initial pseudofermion field |Phi> = Meofa^{-1/2}*|eta>, and +// then uses this Phi to compute the action . +// If all is working, one should find that = . +////////////////////////////////////////////////////////////////////////////////////////// + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +// Parameters for test +const std::vector grid_dim = { 8, 8, 8, 8 }; +const int Ls = 8; +const int Npoles = 12; +const RealD b = 2.5; +const RealD c = 1.5; +const RealD mf = 0.01; +const RealD mpv = 1.0; +const RealD M5 = 1.8; + +int main(int argc, char** argv) +{ + Grid_init(&argc, &argv); + + int threads = GridThread::GetThreads(); + std::cout << GridLogMessage << "Grid is set up to use " << threads << " threads" << std::endl; + + // Initialize spacetime grid + std::cout << GridLogMessage << "Lattice dimensions: " << grid_dim << " Ls: " << Ls << std::endl; + GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid(grid_dim, + GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridRedBlackCartesian* UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian* FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid); + GridRedBlackCartesian* FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid); + + // Set up RNGs + std::vector seeds4({1, 2, 3, 4}); + std::vector seeds5({5, 6, 7, 8}); + GridParallelRNG RNG5(FGrid); + RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); + RNG4.SeedFixedIntegers(seeds4); + + // Random gauge field + LatticeGaugeField Umu(UGrid); + SU3::HotConfiguration(RNG4, Umu); + + MobiusEOFAFermionR Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5, b, c); + MobiusEOFAFermionR Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5, b, c); + + // Construct the action and test the heatbath (zero initial guess) + { + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, Npoles); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, false); + + Meofa.refresh(Umu, RNG5); + printf(" = %1.15e\n", Meofa.S(Umu)); + } + + // Construct the action and test the heatbath (forecasted initial guesses) + { + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, Npoles); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, true); + + Meofa.refresh(Umu, RNG5); + printf(" = %1.15e\n", Meofa.S(Umu)); + } + + return 0; +} diff --git a/tests/debug/Test_heatbath_mobius_eofa_gparity.cc b/tests/debug/Test_heatbath_mobius_eofa_gparity.cc new file mode 100644 index 00000000..08c6d566 --- /dev/null +++ b/tests/debug/Test_heatbath_mobius_eofa_gparity.cc @@ -0,0 +1,109 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./tests/debug/Test_heatbath_dwf_eofa.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +////////////////////////////////////////////////////////////////////////////////////////// +// This program sets up the initial pseudofermion field |Phi> = Meofa^{-1/2}*|eta>, and +// then uses this Phi to compute the action . +// If all is working, one should find that = . +////////////////////////////////////////////////////////////////////////////////////////// + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +typedef GparityWilsonImplR FermionImplPolicy; +typedef GparityMobiusEOFAFermionR FermionAction; +typedef typename FermionAction::FermionField FermionField; + +// Parameters for test +const std::vector grid_dim = { 8, 8, 8, 8 }; +const int Ls = 8; +const int Npoles = 12; +const RealD b = 2.5; +const RealD c = 1.5; +const RealD mf = 0.01; +const RealD mpv = 1.0; +const RealD M5 = 1.8; + +int main(int argc, char** argv) +{ + Grid_init(&argc, &argv); + + int threads = GridThread::GetThreads(); + std::cout << GridLogMessage << "Grid is set up to use " << threads << " threads" << std::endl; + + // Initialize spacetime grid + std::cout << GridLogMessage << "Lattice dimensions: " << grid_dim << " Ls: " << Ls << std::endl; + GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid(grid_dim, + GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridRedBlackCartesian* UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian* FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid); + GridRedBlackCartesian* FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid); + + // Set up RNGs + std::vector seeds4({1, 2, 3, 4}); + std::vector seeds5({5, 6, 7, 8}); + GridParallelRNG RNG5(FGrid); + RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); + RNG4.SeedFixedIntegers(seeds4); + + // Random gauge field + LatticeGaugeField Umu(UGrid); + SU3::HotConfiguration(RNG4, Umu); + + FermionAction::ImplParams params; + FermionAction Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5, b, c, params); + FermionAction Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5, b, c, params); + + // Construct the action and test the heatbath (zero initial guess) + { + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, Npoles); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, false); + + Meofa.refresh(Umu, RNG5); + printf(" = %1.15e\n", Meofa.S(Umu)); + } + + // Construct the action and test the heatbath (forecasted initial guesses) + { + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, Npoles); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, true); + + Meofa.refresh(Umu, RNG5); + printf(" = %1.15e\n", Meofa.S(Umu)); + } + + return 0; +} diff --git a/tests/debug/Test_reweight_dwf_eofa.cc b/tests/debug/Test_reweight_dwf_eofa.cc new file mode 100644 index 00000000..98a17e2f --- /dev/null +++ b/tests/debug/Test_reweight_dwf_eofa.cc @@ -0,0 +1,206 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./tests/debug/Test_reweight_dwf_eofa.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +// parameters for test +const std::vector grid_dim = { 8, 8, 8, 8 }; +const int Ls = 8; +const int Nhits = 25; +const int max_iter = 5000; +const RealD mf = 0.1; +const RealD mb = 0.11; +const RealD M5 = 1.8; +const RealD stop_tol = 1.0e-12; + +RealD mean(const std::vector& data) +{ + int N = data.size(); + RealD mean(0.0); + for(int i=0; i& data, int sample) +{ + int N = data.size(); + RealD mean(0.0); + for(int i=0; i& jacks, RealD mean) +{ + int N = jacks.size(); + RealD std(0.0); + for(int i=0; i jack_stats(const std::vector& data) +{ + int N = data.size(); + std::vector jack_samples(N); + std::vector jack_stats(2); + + jack_stats[0] = mean(data); + for(int i=0; i seeds4({1, 2, 3, 4}); + std::vector seeds5({5, 6, 7, 8}); + GridParallelRNG RNG5(FGrid); + RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); + RNG4.SeedFixedIntegers(seeds4); + + // Random gauge field + LatticeGaugeField Umu(UGrid); + SU3::HotConfiguration(RNG4, Umu); + + // Initialize RHMC fermion operators + DomainWallFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5); + DomainWallFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5); + SchurDiagMooeeOperator MdagM(Ddwf_f); + SchurDiagMooeeOperator VdagV(Ddwf_b); + + // Degree 12 rational approximations to x^(1/4) and x^(-1/4) + double lo = 0.0001; + double hi = 95.0; + int precision = 64; + int degree = 12; + AlgRemez remez(lo, hi, precision); + std::cout << GridLogMessage << "Generating degree " << degree << " for x^(1/4)" << std::endl; + remez.generateApprox(degree, 1, 4); + MultiShiftFunction PowerQuarter(remez, stop_tol, false); + MultiShiftFunction PowerNegQuarter(remez, stop_tol, true); + + // Stochastically estimate reweighting factor via RHMC + RealD scale = std::sqrt(0.5); + std::vector rw_rhmc(Nhits); + ConjugateGradientMultiShift msCG_V(max_iter, PowerQuarter); + ConjugateGradientMultiShift msCG_M(max_iter, PowerNegQuarter); + std::cout.precision(12); + + for(int hit=0; hit tmp(2, Ddwf_f.FermionRedBlackGrid()); + gaussian(RNG5, Phi); + Phi = Phi*scale; + + pickCheckerboard(Odd, PhiOdd, Phi); + + // evaluate -log(rw) + msCG_V(VdagV, PhiOdd, tmp[0]); + msCG_M(MdagM, tmp[0], tmp[1]); + rw_rhmc[hit] = norm2(tmp[1]) - norm2(PhiOdd); + std::cout << std::endl << "==================================================" << std::endl; + std::cout << " --- RHMC: Hit " << hit << ": rw = " << rw_rhmc[hit]; + std::cout << std::endl << "==================================================" << std::endl << std::endl; + + } + + // Initialize EOFA fermion operators + RealD shift_L = 0.0; + RealD shift_R = -1.0; + int pm = 1; + DomainWallEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5); + DomainWallEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5); + MdagMLinearOperator LdagL(Deofa_L); + MdagMLinearOperator RdagR(Deofa_R); + + // Stochastically estimate reweighting factor via EOFA + RealD k = Deofa_L.k; + std::vector rw_eofa(Nhits); + ConjugateGradient CG(stop_tol, max_iter); + SchurRedBlackDiagMooeeSolve SchurSolver(CG); + + for(int hit=0; hit tmp(2, Deofa_L.FermionGrid()); + gaussian(RNG5, Phi); + Phi = Phi*scale; + + // evaluate -log(rw) + // LH term + for(int s=0; s rhmc_result = jack_stats(rw_rhmc); + std::vector eofa_result = jack_stats(rw_eofa); + std::cout << std::endl << "RHMC: rw = " << rhmc_result[0] << " +/- " << rhmc_result[1] << std::endl; + std::cout << std::endl << "EOFA: rw = " << eofa_result[0] << " +/- " << eofa_result[1] << std::endl; + + Grid_finalize(); +} diff --git a/tests/debug/Test_reweight_dwf_eofa_gparity.cc b/tests/debug/Test_reweight_dwf_eofa_gparity.cc new file mode 100644 index 00000000..bb0fd98e --- /dev/null +++ b/tests/debug/Test_reweight_dwf_eofa_gparity.cc @@ -0,0 +1,209 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./tests/debug/Test_reweight_dwf_eofa_gparity.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +typedef typename GparityDomainWallFermionR::FermionField FermionField; + +// parameters for test +const std::vector grid_dim = { 8, 8, 8, 8 }; +const int Ls = 8; +const int Nhits = 10; +const int max_iter = 5000; +const RealD mf = 0.1; +const RealD mb = 0.11; +const RealD M5 = 1.8; +const RealD stop_tol = 1.0e-12; + +RealD mean(const std::vector& data) +{ + int N = data.size(); + RealD mean(0.0); + for(int i=0; i& data, int sample) +{ + int N = data.size(); + RealD mean(0.0); + for(int i=0; i& jacks, RealD mean) +{ + int N = jacks.size(); + RealD std(0.0); + for(int i=0; i jack_stats(const std::vector& data) +{ + int N = data.size(); + std::vector jack_samples(N); + std::vector jack_stats(2); + + jack_stats[0] = mean(data); + for(int i=0; i seeds4({1, 2, 3, 4}); + std::vector seeds5({5, 6, 7, 8}); + GridParallelRNG RNG5(FGrid); + RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); + RNG4.SeedFixedIntegers(seeds4); + + // Random gauge field + LatticeGaugeField Umu(UGrid); + SU3::HotConfiguration(RNG4, Umu); + + // Initialize RHMC fermion operators + GparityDomainWallFermionR::ImplParams params; + GparityDomainWallFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, params); + GparityDomainWallFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, params); + SchurDiagMooeeOperator MdagM(Ddwf_f); + SchurDiagMooeeOperator VdagV(Ddwf_b); + + // Degree 12 rational approximations to x^(1/4) and x^(-1/4) + double lo = 0.0001; + double hi = 95.0; + int precision = 64; + int degree = 12; + AlgRemez remez(lo, hi, precision); + std::cout << GridLogMessage << "Generating degree " << degree << " for x^(1/4)" << std::endl; + remez.generateApprox(degree, 1, 4); + MultiShiftFunction PowerQuarter(remez, stop_tol, false); + MultiShiftFunction PowerNegQuarter(remez, stop_tol, true); + + // Stochastically estimate reweighting factor via RHMC + RealD scale = std::sqrt(0.5); + std::vector rw_rhmc(Nhits); + ConjugateGradientMultiShift msCG_V(max_iter, PowerQuarter); + ConjugateGradientMultiShift msCG_M(max_iter, PowerNegQuarter); + std::cout.precision(12); + + for(int hit=0; hit tmp(2, Ddwf_f.FermionRedBlackGrid()); + gaussian(RNG5, Phi); + Phi = Phi*scale; + + pickCheckerboard(Odd, PhiOdd, Phi); + + // evaluate -log(rw) + msCG_V(VdagV, PhiOdd, tmp[0]); + msCG_M(MdagM, tmp[0], tmp[1]); + rw_rhmc[hit] = norm2(tmp[1]) - norm2(PhiOdd); + std::cout << std::endl << "==================================================" << std::endl; + std::cout << " --- RHMC: Hit " << hit << ": rw = " << rw_rhmc[hit]; + std::cout << std::endl << "==================================================" << std::endl << std::endl; + + } + + // Initialize EOFA fermion operators + RealD shift_L = 0.0; + RealD shift_R = -1.0; + int pm = 1; + GparityDomainWallEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, params); + GparityDomainWallEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, params); + MdagMLinearOperator LdagL(Deofa_L); + MdagMLinearOperator RdagR(Deofa_R); + + // Stochastically estimate reweighting factor via EOFA + RealD k = Deofa_L.k; + std::vector rw_eofa(Nhits); + ConjugateGradient CG(stop_tol, max_iter); + SchurRedBlackDiagMooeeSolve SchurSolver(CG); + + for(int hit=0; hit tmp(2, Deofa_L.FermionGrid()); + gaussian(RNG5, Phi); + Phi = Phi*scale; + + // evaluate -log(rw) + // LH term + for(int s=0; s rhmc_result = jack_stats(rw_rhmc); + std::vector eofa_result = jack_stats(rw_eofa); + std::cout << std::endl << "RHMC: rw = " << rhmc_result[0] << " +/- " << rhmc_result[1] << std::endl; + std::cout << std::endl << "EOFA: rw = " << eofa_result[0] << " +/- " << eofa_result[1] << std::endl; + + Grid_finalize(); +} diff --git a/tests/debug/Test_reweight_mobius_eofa.cc b/tests/debug/Test_reweight_mobius_eofa.cc new file mode 100644 index 00000000..c4fa78d0 --- /dev/null +++ b/tests/debug/Test_reweight_mobius_eofa.cc @@ -0,0 +1,215 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./tests/debug/Test_reweight_dwf_eofa.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +// parameters for test +const std::vector grid_dim = { 8, 8, 8, 8 }; +const int Ls = 8; +const int Nhits = 10; +const int max_iter = 5000; +const RealD b = 2.5; +const RealD c = 1.5; +const RealD mf = 0.1; +const RealD mb = 0.11; +const RealD M5 = 1.8; +const RealD stop_tol = 1.0e-12; + +RealD mean(const std::vector& data) +{ + int N = data.size(); + RealD mean(0.0); + for(int i=0; i& data, int sample) +{ + int N = data.size(); + RealD mean(0.0); + for(int i=0; i& jacks, RealD mean) +{ + int N = jacks.size(); + RealD std(0.0); + for(int i=0; i jack_stats(const std::vector& data) +{ + int N = data.size(); + std::vector jack_samples(N); + std::vector jack_stats(2); + + jack_stats[0] = mean(data); + for(int i=0; i seeds4({1, 2, 3, 4}); + std::vector seeds5({5, 6, 7, 8}); + GridParallelRNG RNG5(FGrid); + RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); + RNG4.SeedFixedIntegers(seeds4); + + // Random gauge field + LatticeGaugeField Umu(UGrid); + SU3::HotConfiguration(RNG4, Umu); + + // Initialize RHMC fermion operators + MobiusFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, b, c); + MobiusFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, b, c); + SchurDiagMooeeOperator MdagM(Ddwf_f); + SchurDiagMooeeOperator VdagV(Ddwf_b); + + // Degree 12 rational approximations to x^(1/4) and x^(-1/4) + double lo = 0.0001; + double hi = 95.0; + int precision = 64; + int degree = 12; + AlgRemez remez(lo, hi, precision); + std::cout << GridLogMessage << "Generating degree " << degree << " for x^(1/4)" << std::endl; + remez.generateApprox(degree, 1, 4); + MultiShiftFunction PowerQuarter(remez, stop_tol, false); + MultiShiftFunction PowerNegQuarter(remez, stop_tol, true); + + // Stochastically estimate reweighting factor via RHMC + RealD scale = std::sqrt(0.5); + std::vector rw_rhmc(Nhits); + ConjugateGradientMultiShift msCG_V(max_iter, PowerQuarter); + ConjugateGradientMultiShift msCG_M(max_iter, PowerNegQuarter); + std::cout.precision(12); + + for(int hit=0; hit tmp(2, Ddwf_f.FermionRedBlackGrid()); + gaussian(RNG5, Phi); + Phi = Phi*scale; + + pickCheckerboard(Odd, PhiOdd, Phi); + + // evaluate -log(rw) + msCG_V(VdagV, PhiOdd, tmp[0]); + msCG_M(MdagM, tmp[0], tmp[1]); + rw_rhmc[hit] = norm2(tmp[1]) - norm2(PhiOdd); + std::cout << std::endl << "==================================================" << std::endl; + std::cout << " --- RHMC: Hit " << hit << ": rw = " << rw_rhmc[hit]; + std::cout << std::endl << "==================================================" << std::endl << std::endl; + + } + + // Initialize EOFA fermion operators + RealD shift_L = 0.0; + RealD shift_R = -1.0; + int pm = 1; + MobiusEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, b, c); + MobiusEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, b, c); + MdagMLinearOperator LdagL(Deofa_L); + MdagMLinearOperator RdagR(Deofa_R); + + // Stochastically estimate reweighting factor via EOFA + RealD k = Deofa_L.k; + std::vector rw_eofa(Nhits); + ConjugateGradient CG(stop_tol, max_iter); + SchurRedBlackDiagMooeeSolve SchurSolver(CG); + + // Compute -log(Z), where: ( RHMC det ratio ) = Z * ( EOFA det ratio ) + RealD Z = std::pow(b+c+1.0,Ls) + mf*std::pow(b+c-1.0,Ls); + Z /= std::pow(b+c+1.0,Ls) + mb*std::pow(b+c-1.0,Ls); + Z = -12.0*grid_dim[0]*grid_dim[1]*grid_dim[2]*grid_dim[3]*std::log(Z); + + for(int hit=0; hit tmp(2, Deofa_L.FermionGrid()); + gaussian(RNG5, Phi); + Phi = Phi*scale; + + // evaluate -log(rw) + // LH term + for(int s=0; s rhmc_result = jack_stats(rw_rhmc); + std::vector eofa_result = jack_stats(rw_eofa); + std::cout << std::endl << "RHMC: rw = " << rhmc_result[0] << " +/- " << rhmc_result[1] << std::endl; + std::cout << std::endl << "EOFA: rw = " << eofa_result[0] << " +/- " << eofa_result[1] << std::endl; + + Grid_finalize(); +} diff --git a/tests/debug/Test_reweight_mobius_eofa_gparity.cc b/tests/debug/Test_reweight_mobius_eofa_gparity.cc new file mode 100644 index 00000000..11a242d2 --- /dev/null +++ b/tests/debug/Test_reweight_mobius_eofa_gparity.cc @@ -0,0 +1,218 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./tests/debug/Test_reweight_dwf_eofa.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: paboyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +typedef typename GparityDomainWallFermionR::FermionField FermionField; + +// parameters for test +const std::vector grid_dim = { 8, 8, 8, 8 }; +const int Ls = 8; +const int Nhits = 10; +const int max_iter = 5000; +const RealD b = 2.5; +const RealD c = 1.5; +const RealD mf = 0.1; +const RealD mb = 0.11; +const RealD M5 = 1.8; +const RealD stop_tol = 1.0e-12; + +RealD mean(const std::vector& data) +{ + int N = data.size(); + RealD mean(0.0); + for(int i=0; i& data, int sample) +{ + int N = data.size(); + RealD mean(0.0); + for(int i=0; i& jacks, RealD mean) +{ + int N = jacks.size(); + RealD std(0.0); + for(int i=0; i jack_stats(const std::vector& data) +{ + int N = data.size(); + std::vector jack_samples(N); + std::vector jack_stats(2); + + jack_stats[0] = mean(data); + for(int i=0; i seeds4({1, 2, 3, 4}); + std::vector seeds5({5, 6, 7, 8}); + GridParallelRNG RNG5(FGrid); + RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); + RNG4.SeedFixedIntegers(seeds4); + + // Random gauge field + LatticeGaugeField Umu(UGrid); + SU3::HotConfiguration(RNG4, Umu); + + // Initialize RHMC fermion operators + GparityDomainWallFermionR::ImplParams params; + GparityMobiusFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, b, c, params); + GparityMobiusFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, b, c, params); + SchurDiagMooeeOperator MdagM(Ddwf_f); + SchurDiagMooeeOperator VdagV(Ddwf_b); + + // Degree 12 rational approximations to x^(1/4) and x^(-1/4) + double lo = 0.0001; + double hi = 95.0; + int precision = 64; + int degree = 12; + AlgRemez remez(lo, hi, precision); + std::cout << GridLogMessage << "Generating degree " << degree << " for x^(1/4)" << std::endl; + remez.generateApprox(degree, 1, 4); + MultiShiftFunction PowerQuarter(remez, stop_tol, false); + MultiShiftFunction PowerNegQuarter(remez, stop_tol, true); + + // Stochastically estimate reweighting factor via RHMC + RealD scale = std::sqrt(0.5); + std::vector rw_rhmc(Nhits); + ConjugateGradientMultiShift msCG_V(max_iter, PowerQuarter); + ConjugateGradientMultiShift msCG_M(max_iter, PowerNegQuarter); + std::cout.precision(12); + + for(int hit=0; hit tmp(2, Ddwf_f.FermionRedBlackGrid()); + gaussian(RNG5, Phi); + Phi = Phi*scale; + + pickCheckerboard(Odd, PhiOdd, Phi); + + // evaluate -log(rw) + msCG_V(VdagV, PhiOdd, tmp[0]); + msCG_M(MdagM, tmp[0], tmp[1]); + rw_rhmc[hit] = norm2(tmp[1]) - norm2(PhiOdd); + std::cout << std::endl << "==================================================" << std::endl; + std::cout << " --- RHMC: Hit " << hit << ": rw = " << rw_rhmc[hit]; + std::cout << std::endl << "==================================================" << std::endl << std::endl; + + } + + // Initialize EOFA fermion operators + RealD shift_L = 0.0; + RealD shift_R = -1.0; + int pm = 1; + GparityMobiusEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, b, c, params); + GparityMobiusEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, b, c, params); + MdagMLinearOperator LdagL(Deofa_L); + MdagMLinearOperator RdagR(Deofa_R); + + // Stochastically estimate reweighting factor via EOFA + RealD k = Deofa_L.k; + std::vector rw_eofa(Nhits); + ConjugateGradient CG(stop_tol, max_iter); + SchurRedBlackDiagMooeeSolve SchurSolver(CG); + + // Compute -log(Z), where: ( RHMC det ratio ) = Z * ( EOFA det ratio ) + RealD Z = std::pow(b+c+1.0,Ls) + mf*std::pow(b+c-1.0,Ls); + Z /= std::pow(b+c+1.0,Ls) + mb*std::pow(b+c-1.0,Ls); + Z = -12.0*grid_dim[0]*grid_dim[1]*grid_dim[2]*grid_dim[3]*std::log(Z); + + for(int hit=0; hit tmp(2, Deofa_L.FermionGrid()); + gaussian(RNG5, Phi); + Phi = Phi*scale; + + // evaluate -log(rw) + // LH term + for(int s=0; s rhmc_result = jack_stats(rw_rhmc); + std::vector eofa_result = jack_stats(rw_eofa); + std::cout << std::endl << "RHMC: rw = " << rhmc_result[0] << " +/- " << rhmc_result[1] << std::endl; + std::cout << std::endl << "EOFA: rw = " << eofa_result[0] << " +/- " << eofa_result[1] << std::endl; + + Grid_finalize(); +} diff --git a/tests/forces/Test_dwf_force_eofa.cc b/tests/forces/Test_dwf_force_eofa.cc new file mode 100644 index 00000000..f17579ae --- /dev/null +++ b/tests/forces/Test_dwf_force_eofa.cc @@ -0,0 +1,164 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./tests/forces/Test_dwf_force_eofa.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char** argv) +{ + Grid_init(&argc, &argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + + const int Ls = 8; + + GridCartesian *UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridRedBlackCartesian *UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian *FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid); + GridRedBlackCartesian *FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid); + + // Want a different conf at every run + // First create an instance of an engine. + std::random_device rnd_device; + // Specify the engine and distribution. + std::mt19937 mersenne_engine(rnd_device()); + std::uniform_int_distribution dist(1, 100); + + auto gen = std::bind(dist, mersenne_engine); + std::vector seeds4(4); + generate(begin(seeds4), end(seeds4), gen); + + //std::vector seeds4({1,2,3,5}); + std::vector seeds5({5,6,7,8}); + GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + + int threads = GridThread::GetThreads(); + std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl; + + LatticeFermion phi (FGrid); gaussian(RNG5, phi); + LatticeFermion Mphi (FGrid); + LatticeFermion MphiPrime (FGrid); + + LatticeGaugeField U(UGrid); + SU3::HotConfiguration(RNG4,U); + + //////////////////////////////////// + // Unmodified matrix element + //////////////////////////////////// + RealD mf = 0.01; + RealD mb = 1.0; + RealD M5 = 1.8; + DomainWallEOFAFermionR Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5); + DomainWallEOFAFermionR Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5); + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, 12); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, true); + + Meofa.refresh(U, RNG5); + RealD S = Meofa.S(U); // pdag M p + + // get the deriv of phidag M phi with respect to "U" + LatticeGaugeField UdSdU(UGrid); + Meofa.deriv(U, UdSdU); + + //////////////////////////////////// + // Modify the gauge field a little + //////////////////////////////////// + RealD dt = 0.0001; + + LatticeColourMatrix mommu(UGrid); + LatticeColourMatrix forcemu(UGrid); + LatticeGaugeField mom(UGrid); + LatticeGaugeField Uprime(UGrid); + + for(int mu=0; mu(mom, mommu, mu); + + // fourth order exponential approx + parallel_for(auto i=mom.begin(); i(UdSdU, mu); + mommu = Ta(mommu)*2.0; + PokeIndex(UdSdU, mommu, mu); + } + + for(int mu=0; mu(UdSdU, mu); + mommu = PeekIndex(mom, mu); + + // Update PF action density + dS = dS + trace(mommu*forcemu)*dt; + } + + ComplexD dSpred = sum(dS); + + /*std::cout << GridLogMessage << " S " << S << std::endl; + std::cout << GridLogMessage << " Sprime " << Sprime << std::endl; + std::cout << GridLogMessage << "dS " << Sprime-S << std::endl; + std::cout << GridLogMessage << "predict dS " << dSpred << std::endl;*/ + printf("\nS = %1.15e\n", S); + printf("Sprime = %1.15e\n", Sprime); + printf("dS = %1.15e\n", Sprime - S); + printf("real(dS_predict) = %1.15e\n", dSpred.real()); + printf("imag(dS_predict) = %1.15e\n\n", dSpred.imag()); + + assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ; + + std::cout << GridLogMessage << "Done" << std::endl; + Grid_finalize(); +} diff --git a/tests/forces/Test_dwf_gpforce_eofa.cc b/tests/forces/Test_dwf_gpforce_eofa.cc new file mode 100644 index 00000000..3afeaa43 --- /dev/null +++ b/tests/forces/Test_dwf_gpforce_eofa.cc @@ -0,0 +1,169 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./tests/forces/Test_dwf_force_eofa.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +typedef GparityWilsonImplR FermionImplPolicy; +typedef GparityDomainWallEOFAFermionR FermionAction; +typedef typename FermionAction::FermionField FermionField; + +int main (int argc, char** argv) +{ + Grid_init(&argc, &argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + + const int Ls = 8; + + GridCartesian *UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridRedBlackCartesian *UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian *FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid); + GridRedBlackCartesian *FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid); + + // Want a different conf at every run + // First create an instance of an engine. + std::random_device rnd_device; + // Specify the engine and distribution. + std::mt19937 mersenne_engine(rnd_device()); + std::uniform_int_distribution dist(1, 100); + + auto gen = std::bind(dist, mersenne_engine); + std::vector seeds4(4); + generate(begin(seeds4), end(seeds4), gen); + + //std::vector seeds4({1,2,3,5}); + std::vector seeds5({5,6,7,8}); + GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + + int threads = GridThread::GetThreads(); + std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl; + + FermionField phi (FGrid); gaussian(RNG5, phi); + FermionField Mphi (FGrid); + FermionField MphiPrime (FGrid); + + LatticeGaugeField U(UGrid); + SU3::HotConfiguration(RNG4,U); + + //////////////////////////////////// + // Unmodified matrix element + //////////////////////////////////// + RealD mf = 0.01; + RealD mb = 1.0; + RealD M5 = 1.8; + FermionAction::ImplParams params; + FermionAction Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5, params); + FermionAction Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5, params); + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, 12); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, true); + + Meofa.refresh(U, RNG5); + RealD S = Meofa.S(U); // pdag M p + + // get the deriv of phidag M phi with respect to "U" + LatticeGaugeField UdSdU(UGrid); + Meofa.deriv(U, UdSdU); + + //////////////////////////////////// + // Modify the gauge field a little + //////////////////////////////////// + RealD dt = 0.0001; + + LatticeColourMatrix mommu(UGrid); + LatticeColourMatrix forcemu(UGrid); + LatticeGaugeField mom(UGrid); + LatticeGaugeField Uprime(UGrid); + + for(int mu=0; mu(mom, mommu, mu); + + // fourth order exponential approx + parallel_for(auto i=mom.begin(); i(UdSdU, mu); + mommu = Ta(mommu)*2.0; + PokeIndex(UdSdU, mommu, mu); + } + + for(int mu=0; mu(UdSdU, mu); + mommu = PeekIndex(mom, mu); + + // Update PF action density + dS = dS + trace(mommu*forcemu)*dt; + } + + ComplexD dSpred = sum(dS); + + /*std::cout << GridLogMessage << " S " << S << std::endl; + std::cout << GridLogMessage << " Sprime " << Sprime << std::endl; + std::cout << GridLogMessage << "dS " << Sprime-S << std::endl; + std::cout << GridLogMessage << "predict dS " << dSpred << std::endl;*/ + printf("\nS = %1.15e\n", S); + printf("Sprime = %1.15e\n", Sprime); + printf("dS = %1.15e\n", Sprime - S); + printf("real(dS_predict) = %1.15e\n", dSpred.real()); + printf("imag(dS_predict) = %1.15e\n\n", dSpred.imag()); + + assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ; + + std::cout << GridLogMessage << "Done" << std::endl; + Grid_finalize(); +} diff --git a/tests/forces/Test_gp_rect_force.cc b/tests/forces/Test_gp_rect_force.cc index bf308749..bb35c77a 100644 --- a/tests/forces/Test_gp_rect_force.cc +++ b/tests/forces/Test_gp_rect_force.cc @@ -42,7 +42,7 @@ int main (int argc, char ** argv) std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); int threads = GridThread::GetThreads(); std::cout< mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); int threads = GridThread::GetThreads(); std::cout< +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char** argv) +{ + Grid_init(&argc, &argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + + const int Ls = 8; + + GridCartesian *UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridRedBlackCartesian *UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian *FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid); + GridRedBlackCartesian *FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid); + + // Want a different conf at every run + // First create an instance of an engine. + std::random_device rnd_device; + // Specify the engine and distribution. + std::mt19937 mersenne_engine(rnd_device()); + std::uniform_int_distribution dist(1, 100); + + auto gen = std::bind(dist, mersenne_engine); + std::vector seeds4(4); + generate(begin(seeds4), end(seeds4), gen); + + //std::vector seeds4({1,2,3,5}); + std::vector seeds5({5,6,7,8}); + GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + + int threads = GridThread::GetThreads(); + std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl; + + LatticeFermion phi (FGrid); gaussian(RNG5, phi); + LatticeFermion Mphi (FGrid); + LatticeFermion MphiPrime (FGrid); + + LatticeGaugeField U(UGrid); + SU3::HotConfiguration(RNG4,U); + + //////////////////////////////////// + // Unmodified matrix element + //////////////////////////////////// + RealD b = 2.5; + RealD c = 1.5; + RealD mf = 0.01; + RealD mb = 1.0; + RealD M5 = 1.8; + MobiusEOFAFermionR Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5, b, c); + MobiusEOFAFermionR Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5, b, c); + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, 12); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, false); + + Meofa.refresh(U, RNG5); + RealD S = Meofa.S(U); // pdag M p + + // get the deriv of phidag M phi with respect to "U" + LatticeGaugeField UdSdU(UGrid); + Meofa.deriv(U, UdSdU); + + //////////////////////////////////// + // Modify the gauge field a little + //////////////////////////////////// + RealD dt = 0.0001; + + LatticeColourMatrix mommu(UGrid); + LatticeColourMatrix forcemu(UGrid); + LatticeGaugeField mom(UGrid); + LatticeGaugeField Uprime(UGrid); + + for(int mu=0; mu(mom, mommu, mu); + + // fourth order exponential approx + parallel_for(auto i=mom.begin(); i(UdSdU, mu); + mommu = Ta(mommu)*2.0; + PokeIndex(UdSdU, mommu, mu); + } + + for(int mu=0; mu(UdSdU, mu); + mommu = PeekIndex(mom, mu); + + // Update PF action density + dS = dS + trace(mommu*forcemu)*dt; + } + + ComplexD dSpred = sum(dS); + + /*std::cout << GridLogMessage << " S " << S << std::endl; + std::cout << GridLogMessage << " Sprime " << Sprime << std::endl; + std::cout << GridLogMessage << "dS " << Sprime-S << std::endl; + std::cout << GridLogMessage << "predict dS " << dSpred << std::endl;*/ + printf("\nS = %1.15e\n", S); + printf("Sprime = %1.15e\n", Sprime); + printf("dS = %1.15e\n", Sprime - S); + printf("real(dS_predict) = %1.15e\n", dSpred.real()); + printf("imag(dS_predict) = %1.15e\n\n", dSpred.imag()); + + assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ; + + std::cout << GridLogMessage << "Done" << std::endl; + Grid_finalize(); +} diff --git a/tests/forces/Test_mobius_gpforce_eofa.cc b/tests/forces/Test_mobius_gpforce_eofa.cc new file mode 100644 index 00000000..72f1dee2 --- /dev/null +++ b/tests/forces/Test_mobius_gpforce_eofa.cc @@ -0,0 +1,171 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: ./tests/forces/Test_dwf_force_eofa.cc + +Copyright (C) 2017 + +Author: Peter Boyle +Author: David Murphy + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +typedef GparityWilsonImplR FermionImplPolicy; +typedef GparityMobiusEOFAFermionR FermionAction; +typedef typename FermionAction::FermionField FermionField; + +int main (int argc, char** argv) +{ + Grid_init(&argc, &argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + + const int Ls = 8; + + GridCartesian *UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi()); + GridRedBlackCartesian *UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian *FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid); + GridRedBlackCartesian *FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid); + + // Want a different conf at every run + // First create an instance of an engine. + std::random_device rnd_device; + // Specify the engine and distribution. + std::mt19937 mersenne_engine(rnd_device()); + std::uniform_int_distribution dist(1, 100); + + auto gen = std::bind(dist, mersenne_engine); + std::vector seeds4(4); + generate(begin(seeds4), end(seeds4), gen); + + //std::vector seeds4({1,2,3,5}); + std::vector seeds5({5,6,7,8}); + GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + + int threads = GridThread::GetThreads(); + std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl; + + FermionField phi (FGrid); gaussian(RNG5, phi); + FermionField Mphi (FGrid); + FermionField MphiPrime (FGrid); + + LatticeGaugeField U(UGrid); + SU3::HotConfiguration(RNG4,U); + + //////////////////////////////////// + // Unmodified matrix element + //////////////////////////////////// + RealD b = 2.5; + RealD c = 1.5; + RealD mf = 0.01; + RealD mb = 1.0; + RealD M5 = 1.8; + FermionAction::ImplParams params; + FermionAction Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5, b, c, params); + FermionAction Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5, b, c, params); + OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, 12); + ConjugateGradient CG(1.0e-12, 5000); + ExactOneFlavourRatioPseudoFermionAction Meofa(Lop, Rop, CG, Params, false); + + Meofa.refresh(U, RNG5); + RealD S = Meofa.S(U); // pdag M p + + // get the deriv of phidag M phi with respect to "U" + LatticeGaugeField UdSdU(UGrid); + Meofa.deriv(U, UdSdU); + + //////////////////////////////////// + // Modify the gauge field a little + //////////////////////////////////// + RealD dt = 0.0001; + + LatticeColourMatrix mommu(UGrid); + LatticeColourMatrix forcemu(UGrid); + LatticeGaugeField mom(UGrid); + LatticeGaugeField Uprime(UGrid); + + for(int mu=0; mu(mom, mommu, mu); + + // fourth order exponential approx + parallel_for(auto i=mom.begin(); i(UdSdU, mu); + mommu = Ta(mommu)*2.0; + PokeIndex(UdSdU, mommu, mu); + } + + for(int mu=0; mu(UdSdU, mu); + mommu = PeekIndex(mom, mu); + + // Update PF action density + dS = dS + trace(mommu*forcemu)*dt; + } + + ComplexD dSpred = sum(dS); + + /*std::cout << GridLogMessage << " S " << S << std::endl; + std::cout << GridLogMessage << " Sprime " << Sprime << std::endl; + std::cout << GridLogMessage << "dS " << Sprime-S << std::endl; + std::cout << GridLogMessage << "predict dS " << dSpred << std::endl;*/ + printf("\nS = %1.15e\n", S); + printf("Sprime = %1.15e\n", Sprime); + printf("dS = %1.15e\n", Sprime - S); + printf("real(dS_predict) = %1.15e\n", dSpred.real()); + printf("imag(dS_predict) = %1.15e\n\n", dSpred.imag()); + + assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ; + + std::cout << GridLogMessage << "Done" << std::endl; + Grid_finalize(); +} diff --git a/tests/forces/Test_rect_force.cc b/tests/forces/Test_rect_force.cc index c312abeb..a846af30 100644 --- a/tests/forces/Test_rect_force.cc +++ b/tests/forces/Test_rect_force.cc @@ -42,7 +42,7 @@ int main (int argc, char ** argv) std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); int threads = GridThread::GetThreads(); std::cout< mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); int threads = GridThread::GetThreads(); std::cout< simd_layout = GridDefaultSimd(Nd, vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size, simd_layout, mpi_layout); - GridRedBlackCartesian RBGrid(latt_size, simd_layout, mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); std::vector seeds({1, 2, 3, 4, 5}); GridSerialRNG sRNG; @@ -149,4 +149,4 @@ JSON } -*/ \ No newline at end of file +*/ diff --git a/tests/solver/Params.h b/tests/solver/Params.h new file mode 100644 index 00000000..d9a6d3b3 --- /dev/null +++ b/tests/solver/Params.h @@ -0,0 +1,136 @@ +/* + Params IO + + Author: Christoph Lehner + Date: 2017 +*/ + +#define PADD(p,X) p.get(#X,X); + +class Params { + protected: + + std::string trim(const std::string& sc) { + std::string s = sc; + s.erase(s.begin(), std::find_if(s.begin(), s.end(), + std::not1(std::ptr_fun(std::isspace)))); + s.erase(std::find_if(s.rbegin(), s.rend(), + std::not1(std::ptr_fun(std::isspace))).base(), s.end()); + return s; + } + + public: + + std::map< std::string, std::string > lines; + std::string _fn; + + Params(const char* fn) : _fn(fn) { + FILE* f = fopen(fn,"rt"); + assert(f); + while (!feof(f)) { + char buf[4096]; + if (fgets(buf,sizeof(buf),f)) { + if (buf[0] != '#' && buf[0] != '\r' && buf[0] != '\n') { + char* sep = strchr(buf,'='); + assert(sep); + *sep = '\0'; + lines[trim(buf)] = trim(sep+1); + } + } + } + fclose(f); + } + + ~Params() { + } + + std::string loghead() { + return _fn + ": "; + } + + bool has(const char* name) { + auto f = lines.find(name); + return (f != lines.end()); + } + + const std::string& get(const char* name) { + auto f = lines.find(name); + if (f == lines.end()) { + std::cout << Grid::GridLogMessage << loghead() << "Could not find value for " << name << std::endl; + abort(); + } + return f->second; + } + + void parse(std::string& s, const std::string& cval) { + std::stringstream trimmer; + trimmer << cval; + s.clear(); + trimmer >> s; + } + + void parse(int& i, const std::string& cval) { + assert(sscanf(cval.c_str(),"%d",&i)==1); + } + + void parse(long long& i, const std::string& cval) { + assert(sscanf(cval.c_str(),"%lld",&i)==1); + } + + void parse(double& f, const std::string& cval) { + assert(sscanf(cval.c_str(),"%lf",&f)==1); + } + + void parse(float& f, const std::string& cval) { + assert(sscanf(cval.c_str(),"%f",&f)==1); + } + + void parse(bool& b, const std::string& cval) { + std::string lcval = cval; + std::transform(lcval.begin(), lcval.end(), lcval.begin(), ::tolower); + if (lcval == "true" || lcval == "yes") { + b = true; + } else if (lcval == "false" || lcval == "no") { + b = false; + } else { + std::cout << "Invalid value for boolean: " << b << std::endl; + assert(0); + } + } + + void parse(std::complex& f, const std::string& cval) { + double r,i; + assert(sscanf(cval.c_str(),"%lf %lf",&r,&i)==2); + f = std::complex(r,i); + } + + void parse(std::complex& f, const std::string& cval) { + float r,i; + assert(sscanf(cval.c_str(),"%f %f",&r,&i)==2); + f = std::complex(r,i); + } + + template + void get(const char* name, std::vector& v) { + int i = 0; + v.resize(0); + while (true) { + char buf[4096]; + sprintf(buf,"%s[%d]",name,i++); + if (!has(buf)) + break; + T val; + parse(val,get(buf)); + std::cout << Grid::GridLogMessage << loghead() << "Set " << buf << " to " << val << std::endl; + v.push_back(val); + } + } + + template + void get(const char* name, T& f) { + parse(f,get(name)); + std::cout << Grid::GridLogMessage << loghead() << "Set " << name << " to " << f << std::endl; + } + + +}; diff --git a/tests/solver/Test_dwf_compressed_lanczos.cc b/tests/solver/Test_dwf_compressed_lanczos.cc new file mode 100644 index 00000000..b42a2d55 --- /dev/null +++ b/tests/solver/Test_dwf_compressed_lanczos.cc @@ -0,0 +1,727 @@ +/* + Authors: Christoph Lehner + Date: 2017 + + Multigrid Lanczos + + + + TODO: + + High priority: + - Explore filtering of starting vector again, should really work: If cheby has 4 for low mode region and 1 for high mode, applying 15 iterations has 1e9 suppression + of high modes, which should create the desired invariant subspace already? Missing something here??? Maybe dynamic range dangerous, i.e., could also kill interesting + eigenrange if not careful. + + Better: Use all Cheby up to order N in order to approximate a step function; try this! Problem: width of step function. Can kill eigenspace > 1e-3 and have < 1e-5 equal + to 1 + + Low priority: + - Given that I seem to need many restarts and high degree poly to create the base and this takes about 1 day, seriously consider a simple method to create a basis + (ortho krylov low poly); and then fix up lowest say 200 eigenvalues by 1 run with high-degree poly (600 could be enough) +*/ +#include +#include "Params.h" + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +bool read_evals(GridBase* _grid, char* fn, std::vector& evals) { + + FILE* f = 0; + uint32_t status = 0; + if (_grid->IsBoss()) { + f = fopen(fn,"rt"); + status = f ? 1 : 0; + } + _grid->GlobalSum(status); + + if (!status) + return false; + + uint32_t N; + if (f) + assert(fscanf(f,"%d\n",&N)==1); + else + N = 0; + _grid->GlobalSum(N); + + std::cout << "Reading " << N << " eigenvalues" << std::endl; + + evals.resize(N); + + for (int i=0;iGlobalSumVector(&evals[0],evals.size()); + + if (f) + fclose(f); + return true; +} + +void write_evals(char* fn, std::vector& evals) { + FILE* f = fopen(fn,"wt"); + assert(f); + + int N = (int)evals.size(); + fprintf(f,"%d\n",N); + + for (int i=0;i& hist) { + FILE* f = fopen(fn,"wt"); + assert(f); + + int N = (int)hist.size(); + for (int i=0;i +class FunctionHermOp : public LinearFunction { +public: + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + + FunctionHermOp(OperatorFunction & poly,LinearOperatorBase& linop) : _poly(poly), _Linop(linop) { + } + + void operator()(const Field& in, Field& out) { + _poly(_Linop,in,out); + } +}; + +template +class CheckpointedLinearFunction : public LinearFunction { +public: + LinearFunction& _op; + std::string _dir; + int _max_apply; + int _apply, _apply_actual; + GridBase* _grid; + FILE* _f; + + CheckpointedLinearFunction(GridBase* grid, LinearFunction& op, const char* dir,int max_apply) : _op(op), _dir(dir), _grid(grid), _f(0), + _max_apply(max_apply), _apply(0), _apply_actual(0) { + + FieldVectorIO::conditionalMkDir(dir); + + char fn[4096]; + sprintf(fn,"%s/ckpt_op.%4.4d",_dir.c_str(),_grid->ThisRank()); + printf("CheckpointLinearFunction:: file %s\n",fn); + _f = fopen(fn,"r+b"); + if (!_f) + _f = fopen(fn,"w+b"); + assert(_f); + fseek(_f,0,SEEK_CUR); + + } + + ~CheckpointedLinearFunction() { + if (_f) { + fclose(_f); + _f = 0; + } + } + + bool load_ckpt(const Field& in, Field& out) { + + off_t cur = ftello(_f); + fseeko(_f,0,SEEK_END); + if (cur == ftello(_f)) + return false; + fseeko(_f,cur,SEEK_SET); + + size_t sz = sizeof(out._odata[0]) * out._odata.size(); + + GridStopWatch gsw; + gsw.Start(); + uint32_t crc_exp; + assert(fread(&crc_exp,4,1,_f)==1); + assert(fread(&out._odata[0],sz,1,_f)==1); + assert(FieldVectorIO::crc32_threaded((unsigned char*)&out._odata[0],sz,0x0)==crc_exp); + gsw.Stop(); + + printf("CheckpointLinearFunction:: reading %lld\n",(long long)sz); + std::cout << GridLogMessage << "Loading " << ((RealD)sz/1024./1024./1024.) << " GB in " << gsw.Elapsed() << std::endl; + return true; + } + + void save_ckpt(const Field& in, Field& out) { + + fseek(_f,0,SEEK_CUR); // switch to write + + size_t sz = sizeof(out._odata[0]) * out._odata.size(); + + GridStopWatch gsw; + gsw.Start(); + uint32_t crc = FieldVectorIO::crc32_threaded((unsigned char*)&out._odata[0],sz,0x0); + assert(fwrite(&crc,4,1,_f)==1); + assert(fwrite(&out._odata[0],sz,1,_f)==1); + fflush(_f); // try this on the GPFS to suppress OPA usage for disk during dslash; this is not needed at Lustre/JLAB + gsw.Stop(); + + printf("CheckpointLinearFunction:: writing %lld\n",(long long)sz); + std::cout << GridLogMessage << "Saving " << ((RealD)sz/1024./1024./1024.) << " GB in " << gsw.Elapsed() << std::endl; + } + + void operator()(const Field& in, Field& out) { + + _apply++; + + if (load_ckpt(in,out)) + return; + + _op(in,out); + + save_ckpt(in,out); + + if (_apply_actual++ >= _max_apply) { + std::cout << GridLogMessage << "Maximum application of operator reached, checkpoint and finish in future job" << std::endl; + if (_f) { fclose(_f); _f=0; } + in._grid->Barrier(); + Grid_finalize(); + exit(3); + } + } +}; + +template +class ProjectedFunctionHermOp : public LinearFunction { +public: + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + BlockProjector& _pr; + + ProjectedFunctionHermOp(BlockProjector& pr,OperatorFunction & poly,LinearOperatorBase& linop) : _poly(poly), _Linop(linop), _pr(pr) { + } + + void operator()(const CoarseField& in, CoarseField& out) { + assert(_pr._bgrid._o_blocks == in._grid->oSites()); + + Field fin(_pr._bgrid._grid); + Field fout(_pr._bgrid._grid); + + GridStopWatch gsw1,gsw2,gsw3; + // fill fin + gsw1.Start(); + _pr.coarseToFine(in,fin); + gsw1.Stop(); + + // apply poly + gsw2.Start(); + _poly(_Linop,fin,fout); + gsw2.Stop(); + + // fill out + gsw3.Start(); + _pr.fineToCoarse(fout,out); + gsw3.Stop(); + + auto eps = innerProduct(in,out); + std::cout << GridLogMessage << "Operator timing details: c2f = " << gsw1.Elapsed() << " poly = " << gsw2.Elapsed() << " f2c = " << gsw3.Elapsed() << + " Complimentary Hermiticity check: " << eps.imag() / std::abs(eps) << std::endl; + + } +}; + +template +class ProjectedHermOp : public LinearFunction { +public: + LinearOperatorBase &_Linop; + BlockProjector& _pr; + + ProjectedHermOp(BlockProjector& pr,LinearOperatorBase& linop) : _Linop(linop), _pr(pr) { + } + + void operator()(const CoarseField& in, CoarseField& out) { + assert(_pr._bgrid._o_blocks == in._grid->oSites()); + Field fin(_pr._bgrid._grid); + Field fout(_pr._bgrid._grid); + _pr.coarseToFine(in,fin); + _Linop.HermOp(fin,fout); + _pr.fineToCoarse(fout,out); + + } +}; + +template +class PlainHermOp : public LinearFunction { +public: + LinearOperatorBase &_Linop; + + PlainHermOp(LinearOperatorBase& linop) : _Linop(linop) { + } + + void operator()(const Field& in, Field& out) { + _Linop.HermOp(in,out); + } +}; + +template using CoarseSiteFieldGeneral = iScalar< iVector >; +template using CoarseSiteFieldD = CoarseSiteFieldGeneral< vComplexD, N >; +template using CoarseSiteFieldF = CoarseSiteFieldGeneral< vComplexF, N >; +template using CoarseSiteField = CoarseSiteFieldGeneral< vComplex, N >; +template using CoarseLatticeFermion = Lattice< CoarseSiteField >; +template using CoarseLatticeFermionD = Lattice< CoarseSiteFieldD >; + +template +void CoarseGridLanczos(BlockProjector& pr,RealD alpha2,RealD beta,int Npoly2, + int Nstop2,int Nk2,int Nm2,RealD resid2,RealD betastp2,int MaxIt,int MinRes2, + LinearOperatorBase& HermOp, std::vector& eval1, bool cg_test_enabled, + int cg_test_maxiter,int nsingle,int SkipTest2, int MaxApply2,bool smoothed_eval_enabled, + int smoothed_eval_inner,int smoothed_eval_outer,int smoothed_eval_begin, + int smoothed_eval_end,RealD smoothed_eval_inner_resid) { + + BlockedGrid& bgrid = pr._bgrid; + BasisFieldVector& basis = pr._evec; + + + std::vector coarseFourDimLatt; + for (int i=0;i<4;i++) + coarseFourDimLatt.push_back(bgrid._nb[1+i] * bgrid._grid->_processors[1+i]); + assert(bgrid._grid->_processors[0] == 1); + + std::cout << GridLogMessage << "CoarseGrid = " << coarseFourDimLatt << " with basis = " << Nstop1 << std::endl; + GridCartesian * UCoarseGrid = SpaceTimeGrid::makeFourDimGrid(coarseFourDimLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * FCoarseGrid = SpaceTimeGrid::makeFiveDimGrid(bgrid._nb[0],UCoarseGrid); + + Chebyshev Cheb2(alpha2,beta,Npoly2); + CoarseLatticeFermion src_coarse(FCoarseGrid); + + // Second round of Lanczos in blocked space + std::vector eval2(Nm2); + std::vector eval3(Nm2); + BasisFieldVector > coef(Nm2,FCoarseGrid); + + ProjectedFunctionHermOp,LatticeFermion> Op2plain(pr,Cheb2,HermOp); + CheckpointedLinearFunction > Op2ckpt(src_coarse._grid,Op2plain,"checkpoint",MaxApply2); + LinearFunction< CoarseLatticeFermion >* Op2; + if (MaxApply2) { + Op2 = &Op2ckpt; + } else { + Op2 = &Op2plain; + } + ProjectedHermOp,LatticeFermion> Op2nopoly(pr,HermOp); + BlockImplicitlyRestartedLanczos > IRL2(*Op2,*Op2,Nstop2,Nk2,Nm2,resid2,betastp2,MaxIt,MinRes2); + + + src_coarse = 1.0; + + // Precision test + { + Field tmp(bgrid._grid); + CoarseLatticeFermion tmp2(FCoarseGrid); + CoarseLatticeFermion tmp3(FCoarseGrid); + tmp2 = 1.0; + tmp3 = 1.0; + + pr.coarseToFine(tmp2,tmp); + pr.fineToCoarse(tmp,tmp2); + + tmp2 -= tmp3; + std::cout << GridLogMessage << "Precision Test c->f->c: " << norm2(tmp2) / norm2(tmp3) << std::endl; + + //bgrid._grid->Barrier(); + //return; + } + + int Nconv; + if (!FieldVectorIO::read_compressed_vectors("lanczos.output",pr,coef) || + !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt",eval3) || + !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.linear",eval1) || + !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.poly",eval2) + ) { + + + IRL2.calc(eval2,coef,src_coarse,Nconv,true,SkipTest2); + + coef.resize(Nstop2); + eval2.resize(Nstop2); + eval3.resize(Nstop2); + + std::vector step3_cache; + + // reconstruct eigenvalues of original operator + for (int i=0;iIsBoss()) { + write_evals((char *)"lanczos.output/eigen-values.txt",eval3); + write_evals((char *)"lanczos.output/eigen-values.txt.linear",eval1); + write_evals((char *)"lanczos.output/eigen-values.txt.poly",eval2); + } + + } + + // fix up eigenvalues + if (!read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.smoothed",eval3) && smoothed_eval_enabled) { + + ConjugateGradient CG(smoothed_eval_inner_resid, smoothed_eval_inner, false); + + LatticeFermion v_i(basis[0]._grid); + auto tmp = v_i; + auto tmp2 = v_i; + + for (int i=smoothed_eval_begin;iIsBoss()) { + write_evals((char *)"lanczos.output/eigen-values.txt.smoothed",eval3); + write_evals((char *)"lanczos.output/eigen-values.txt",eval3); // also reset this to the best ones we have available + } + } + + // do CG test with and without deflation + if (cg_test_enabled) { + ConjugateGradient CG(1.0e-8, cg_test_maxiter, false); + LatticeFermion src_orig(bgrid._grid); + src_orig.checkerboard = Odd; + src_orig = 1.0; + src_orig = src_orig * (1.0 / ::sqrt(norm2(src_orig)) ); + auto result = src_orig; + + // undeflated solve + result = zero; + CG(HermOp, src_orig, result); + // if (UCoarseGrid->IsBoss()) + // write_history("cg_test.undefl",CG.ResHistory); + // CG.ResHistory.clear(); + + // deflated solve with all eigenvectors + result = zero; + pr.deflate(coef,eval2,Nstop2,src_orig,result); + CG(HermOp, src_orig, result); + // if (UCoarseGrid->IsBoss()) + // write_history("cg_test.defl_all",CG.ResHistory); + // CG.ResHistory.clear(); + + // deflated solve with non-blocked eigenvectors + result = zero; + pr.deflate(coef,eval1,Nstop1,src_orig,result); + CG(HermOp, src_orig, result); + // if (UCoarseGrid->IsBoss()) + // write_history("cg_test.defl_full",CG.ResHistory); + // CG.ResHistory.clear(); + + // deflated solve with all eigenvectors and original eigenvalues from proj + result = zero; + pr.deflate(coef,eval3,Nstop2,src_orig,result); + CG(HermOp, src_orig, result); + // if (UCoarseGrid->IsBoss()) + // write_history("cg_test.defl_all_ev3",CG.ResHistory); + // CG.ResHistory.clear(); + + } + +} + + +template +void quick_krylov_basis(BasisFieldVector& evec,Field& src,LinearFunction& Op,int Nstop) { + Field tmp = src; + Field tmp2 = tmp; + + for (int i=0;i HermOp(Ddwf); + + // Eigenvector storage + const int Nm1 = Np1 + Nk1; + const int Nm2 = Np2 + Nk2; // maximum number of vectors we need to keep + std::cout << GridLogMessage << "Keep " << Nm1 << " full vectors" << std::endl; + std::cout << GridLogMessage << "Keep " << Nm2 << " total vectors" << std::endl; + assert(Nm2 >= Nm1); + BasisFieldVector evec(Nm1,FrbGrid); // start off with keeping full vectors + + // First and second cheby + Chebyshev Cheb1(alpha1,beta,Npoly1); + FunctionHermOp Op1(Cheb1,HermOp); + PlainHermOp Op1test(HermOp); + + // Eigenvalue storage + std::vector eval1(evec.size()); + + // Construct source vector + LatticeFermion src(FrbGrid); + { + src=1.0; + src.checkerboard = Odd; + + // normalize + RealD nn = norm2(src); + nn = Grid::sqrt(nn); + src = src * (1.0/nn); + } + + // Do a benchmark and a quick exit if performance is too little (ugly but needed due to performance fluctuations) + if (max_cheb_time_ms) { + // one round of warmup + auto tmp = src; + GridStopWatch gsw1,gsw2; + gsw1.Start(); + Cheb1(HermOp,src,tmp); + gsw1.Stop(); + Ddwf.ZeroCounters(); + gsw2.Start(); + Cheb1(HermOp,src,tmp); + gsw2.Stop(); + Ddwf.Report(); + std::cout << GridLogMessage << "Performance check; warmup = " << gsw1.Elapsed() << " test = " << gsw2.Elapsed() << std::endl; + int ms = (int)(gsw2.useconds()/1e3); + if (ms > max_cheb_time_ms) { + std::cout << GridLogMessage << "Performance too poor: " << ms << " ms, cutoff = " << max_cheb_time_ms << " ms" << std::endl; + Grid_finalize(); + return 2; + } + + } + + // First round of Lanczos to get low mode basis + BlockImplicitlyRestartedLanczos IRL1(Op1,Op1test,Nstop1,Nk1,Nm1,resid1,betastp1,MaxIt,MinRes1); + int Nconv; + + char tag[1024]; + if (!FieldVectorIO::read_argonne(evec,(char *)"checkpoint") || !read_evals(UGrid,(char *)"checkpoint/eigen-values.txt",eval1)) { + + if (simple_krylov_basis) { + quick_krylov_basis(evec,src,Op1,Nstop1); + } else { + IRL1.calc(eval1,evec,src,Nconv,false,1); + } + evec.resize(Nstop1); // and throw away superfluous + eval1.resize(Nstop1); + if (checkpoint_basis) + FieldVectorIO::write_argonne(evec,(char *)"checkpoint"); + if (UGrid->IsBoss() && checkpoint_basis) + write_evals((char *)"checkpoint/eigen-values.txt",eval1); + + Ddwf.Report(); + + if (exit_after_basis_calculation) { + Grid_finalize(); + return 0; + } + } + + // now test eigenvectors + if (!simple_krylov_basis) { + for (int i=0;i + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + typedef typename DomainWallFermionR::FermionField FermionField; + typedef typename DomainWallFermionR::ComplexField ComplexField; + typename DomainWallFermionR::ImplParams params; + + const int Ls=4; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + std::vector mpi_split (mpi_layout.size(),1); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + int nrhs = UGrid->RankCount() ; + + ///////////////////////////////////////////// + // Split into 1^4 mpi communicators + ///////////////////////////////////////////// + GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + mpi_split, + *UGrid); + + GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid); + GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid); + GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid); + + /////////////////////////////////////////////// + // Set up the problem as a 4d spreadout job + /////////////////////////////////////////////// + std::vector seeds({1,2,3,4}); + + GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); + GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); + std::vector src(nrhs,FGrid); + std::vector src_chk(nrhs,FGrid); + std::vector result(nrhs,FGrid); + FermionField tmp(FGrid); + + for(int s=0;sThisRank(); + LatticeGaugeField s_Umu(SGrid); + FermionField s_src(SFGrid); + FermionField s_src_split(SFGrid); + FermionField s_tmp(SFGrid); + FermionField s_res(SFGrid); + + { + FGrid->Barrier(); + ScidacWriter _ScidacWriter; + _ScidacWriter.open(file); + std::cout << GridLogMessage << "****************************************************************** "<Barrier(); + std::cout << GridLogMessage << "****************************************************************** "<Barrier(); + std::cout << GridLogMessage << "****************************************************************** "<Barrier(); + + std::cout << GridLogMessage << "****************************************************************** "<Barrier(); + } + + /////////////////////////////////////////////////////////////// + // split the source out using MPI instead of I/O + /////////////////////////////////////////////////////////////// + std::cout << GridLogMessage << " Splitting the grid data "<Barrier(); + if ( n==me ) { + std::cerr << GridLogMessage<<"Split "<< me << " " << norm2(s_src_split) << " " << norm2(s_src)<< " diff " << norm2(s_tmp)<Barrier(); + } + + + /////////////////////////////////////////////////////////////// + // Set up N-solvers as trivially parallel + /////////////////////////////////////////////////////////////// + + RealD mass=0.01; + RealD M5=1.8; + DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5); + DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5); + + std::cout << GridLogMessage << "****************************************************************** "< HermOp(Ddwf); + MdagMLinearOperator HermOpCk(Dchk); + ConjugateGradient CG((1.0e-8/(me+1)),10000); + s_res = zero; + CG(HermOp,s_src,s_res); + + ///////////////////////////////////////////////////////////// + // Report how long they all took + ///////////////////////////////////////////////////////////// + std::vector iterations(nrhs,0); + iterations[me] = CG.IterationsToComplete; + + for(int n=0;nGlobalSum(iterations[n]); + std::cout << GridLogMessage<<" Rank "< + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + typedef typename DomainWallFermionR::FermionField FermionField; + typedef typename DomainWallFermionR::ComplexField ComplexField; + typename DomainWallFermionR::ImplParams params; + + const int Ls=4; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + std::vector mpi_split (mpi_layout.size(),1); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + int nrhs = UGrid->RankCount() ; + + ///////////////////////////////////////////// + // Split into 1^4 mpi communicators + ///////////////////////////////////////////// + GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + mpi_split, + *UGrid); + + GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid); + GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid); + GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid); + + /////////////////////////////////////////////// + // Set up the problem as a 4d spreadout job + /////////////////////////////////////////////// + std::vector seeds({1,2,3,4}); + + GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); + GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); + std::vector src(nrhs,FGrid); + std::vector src_chk(nrhs,FGrid); + std::vector result(nrhs,FGrid); + FermionField tmp(FGrid); + + for(int s=0;sThisRank(); + + LatticeGaugeField s_Umu(SGrid); + FermionField s_src(SFGrid); + FermionField s_tmp(SFGrid); + FermionField s_res(SFGrid); + + /////////////////////////////////////////////////////////////// + // split the source out using MPI instead of I/O + /////////////////////////////////////////////////////////////// + Grid_split (Umu,s_Umu); + Grid_split (src,s_src); + + /////////////////////////////////////////////////////////////// + // Set up N-solvers as trivially parallel + /////////////////////////////////////////////////////////////// + RealD mass=0.01; + RealD M5=1.8; + DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5); + DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5); + + std::cout << GridLogMessage << "****************************************************************** "< HermOp(Ddwf); + MdagMLinearOperator HermOpCk(Dchk); + ConjugateGradient CG((1.0e-8/(me+1)),10000); + s_res = zero; + CG(HermOp,s_src,s_res); + + ///////////////////////////////////////////////////////////// + // Report how long they all took + ///////////////////////////////////////////////////////////// + std::vector iterations(nrhs,0); + iterations[me] = CG.IterationsToComplete; + + for(int n=0;nGlobalSum(iterations[n]); + std::cout << GridLogMessage<<" Rank "< + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + typedef typename DomainWallFermionR::FermionField FermionField; + typedef typename DomainWallFermionR::ComplexField ComplexField; + typename DomainWallFermionR::ImplParams params; + + const int Ls=4; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + std::vector mpi_split (mpi_layout.size(),1); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + int nrhs = UGrid->RankCount() ; + + ///////////////////////////////////////////// + // Split into 1^4 mpi communicators + ///////////////////////////////////////////// + GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + mpi_split, + *UGrid); + + GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid); + GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid); + GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid); + + /////////////////////////////////////////////// + // Set up the problem as a 4d spreadout job + /////////////////////////////////////////////// + std::vector seeds({1,2,3,4}); + + GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); + GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); + std::vector src(nrhs,FGrid); + std::vector src_chk(nrhs,FGrid); + std::vector result(nrhs,FGrid); + FermionField tmp(FGrid); + + std::vector src_e(nrhs,FrbGrid); + std::vector src_o(nrhs,FrbGrid); + + for(int s=0;sThisRank(); + + LatticeGaugeField s_Umu(SGrid); + FermionField s_src(SFGrid); + FermionField s_src_e(SFrbGrid); + FermionField s_src_o(SFrbGrid); + FermionField s_tmp(SFGrid); + FermionField s_res(SFGrid); + + /////////////////////////////////////////////////////////////// + // split the source out using MPI instead of I/O + /////////////////////////////////////////////////////////////// + Grid_split (Umu,s_Umu); + Grid_split (src,s_src); + + /////////////////////////////////////////////////////////////// + // Check even odd cases + /////////////////////////////////////////////////////////////// + for(int s=0;s HermOp(Ddwf); + MdagMLinearOperator HermOpCk(Dchk); + ConjugateGradient CG((1.0e-8/(me+1)),10000); + s_res = zero; + CG(HermOp,s_src,s_res); + + ///////////////////////////////////////////////////////////// + // Report how long they all took + ///////////////////////////////////////////////////////////// + std::vector iterations(nrhs,0); + iterations[me] = CG.IterationsToComplete; + + for(int n=0;nGlobalSum(iterations[n]); + std::cout << GridLogMessage<<" Rank "< simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); std::vector seeds({1,2,3,4,5}); GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds); diff --git a/tests/solver/Test_staggered_block_cg_prec.cc b/tests/solver/Test_staggered_block_cg_prec.cc new file mode 100644 index 00000000..0076e5a0 --- /dev/null +++ b/tests/solver/Test_staggered_block_cg_prec.cc @@ -0,0 +1,130 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./tests/Test_wilson_cg_unprec.cc + + Copyright (C) 2015 + +Author: Azusa Yamaguchi +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +struct scal { + d internal; +}; + + Gamma::Algebra Gmu [] = { + Gamma::Algebra::GammaX, + Gamma::Algebra::GammaY, + Gamma::Algebra::GammaZ, + Gamma::Algebra::GammaT + }; + +int main (int argc, char ** argv) +{ + typedef typename ImprovedStaggeredFermion5DR::FermionField FermionField; + typedef typename ImprovedStaggeredFermion5DR::ComplexField ComplexField; + typename ImprovedStaggeredFermion5DR::ImplParams params; + + const int Ls=8; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + std::vector seeds({1,2,3,4}); + GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); + GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); + + FermionField src(FGrid); random(pRNG5,src); + FermionField src_o(FrbGrid); pickCheckerboard(Odd,src_o,src); + FermionField result_o(FrbGrid); result_o=zero; + RealD nrm = norm2(src); + + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu); + + RealD mass=0.003; + ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass); + SchurStaggeredOperator HermOp(Ds); + + ConjugateGradient CG(1.0e-8,10000); + int blockDim = 0; + BlockConjugateGradient BCGrQ(BlockCGrQ,blockDim,1.0e-8,10000); + BlockConjugateGradient BCG (BlockCG,blockDim,1.0e-8,10000); + BlockConjugateGradient mCG (CGmultiRHS,blockDim,1.0e-8,10000); + + std::cout << GridLogMessage << "****************************************************************** "< HermOp4d(Ds4d); + FermionField src4d(UGrid); random(pRNG,src4d); + FermionField src4d_o(UrbGrid); pickCheckerboard(Odd,src4d_o,src4d); + FermionField result4d_o(UrbGrid); + + result4d_o=zero; + CG(HermOp4d,src4d_o,result4d_o); + std::cout << GridLogMessage << "************************************************************************ "< *************************************************************************************/ /* END LEGAL */ #include -#include using namespace std; using namespace Grid; diff --git a/tests/solver/Test_staggered_cg_prec.cc b/tests/solver/Test_staggered_cg_prec.cc index 66f11d3d..9a458f1f 100644 --- a/tests/solver/Test_staggered_cg_prec.cc +++ b/tests/solver/Test_staggered_cg_prec.cc @@ -57,7 +57,7 @@ int main (int argc, char ** argv) std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); std::vector seeds({1,2,3,4}); GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds); @@ -71,7 +71,7 @@ int main (int argc, char ** argv) volume=volume*latt_size[mu]; } - RealD mass=0.1; + RealD mass=0.003; ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass); FermionField res_o(&RBGrid); @@ -79,9 +79,14 @@ int main (int argc, char ** argv) pickCheckerboard(Odd,src_o,src); res_o=zero; - SchurDiagMooeeOperator HermOpEO(Ds); + SchurStaggeredOperator HermOpEO(Ds); ConjugateGradient CG(1.0e-8,10000); CG(HermOpEO,src_o,res_o); + FermionField tmp(&RBGrid); + + HermOpEO.Mpc(res_o,tmp); + std::cout << "check Mpc resid " << axpy_norm(tmp,-1.0,src_o,tmp)/norm2(src_o) << "\n"; + Grid_finalize(); } diff --git a/tests/solver/Test_staggered_cg_unprec.cc b/tests/solver/Test_staggered_cg_unprec.cc index 5e0358d7..eb33c004 100644 --- a/tests/solver/Test_staggered_cg_unprec.cc +++ b/tests/solver/Test_staggered_cg_unprec.cc @@ -57,7 +57,7 @@ int main (int argc, char ** argv) std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); std::vector seeds({1,2,3,4}); GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds); diff --git a/tests/solver/Test_wilson_cg_prec.cc b/tests/solver/Test_wilson_cg_prec.cc index 011bc70b..99ddfceb 100644 --- a/tests/solver/Test_wilson_cg_prec.cc +++ b/tests/solver/Test_wilson_cg_prec.cc @@ -52,7 +52,7 @@ int main (int argc, char ** argv) std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); std::vector seeds({1,2,3,4}); GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds); diff --git a/tests/solver/Test_wilson_cg_schur.cc b/tests/solver/Test_wilson_cg_schur.cc index 7bbf74d3..13ac0090 100644 --- a/tests/solver/Test_wilson_cg_schur.cc +++ b/tests/solver/Test_wilson_cg_schur.cc @@ -52,7 +52,7 @@ int main (int argc, char ** argv) std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); std::vector seeds({1,2,3,4}); GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds); diff --git a/tests/solver/Test_wilson_cg_unprec.cc b/tests/solver/Test_wilson_cg_unprec.cc index 19c5f854..db227ec8 100644 --- a/tests/solver/Test_wilson_cg_unprec.cc +++ b/tests/solver/Test_wilson_cg_unprec.cc @@ -52,7 +52,7 @@ int main (int argc, char ** argv) std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); std::vector seeds({1,2,3,4}); GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds); diff --git a/tests/solver/Test_wilson_cr_unprec.cc b/tests/solver/Test_wilson_cr_unprec.cc index 4182c04e..eccd7e74 100644 --- a/tests/solver/Test_wilson_cr_unprec.cc +++ b/tests/solver/Test_wilson_cr_unprec.cc @@ -52,7 +52,7 @@ int main (int argc, char ** argv) std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); GridCartesian Grid(latt_size,simd_layout,mpi_layout); - GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); std::vector seeds({1,2,3,4}); GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);