1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-14 01:35:36 +00:00

Block compressed Lanczos

This commit is contained in:
paboyle 2017-10-10 14:15:11 +01:00
parent 10cb37f504
commit bf58557fb1
10 changed files with 3432 additions and 19 deletions

View File

@ -207,7 +207,6 @@ namespace Grid {
void OpDir (const Field &in, Field &out,int dir,int disp) { void OpDir (const Field &in, Field &out,int dir,int disp) {
assert(0); assert(0);
} }
}; };
template<class Matrix,class Field> template<class Matrix,class Field>
class SchurDiagMooeeOperator : public SchurOperatorBase<Field> { class SchurDiagMooeeOperator : public SchurOperatorBase<Field> {
@ -265,7 +264,6 @@ namespace Grid {
return axpy_norm(out,-1.0,tmp,in); return axpy_norm(out,-1.0,tmp,in);
} }
}; };
template<class Matrix,class Field> template<class Matrix,class Field>
class SchurDiagTwoOperator : public SchurOperatorBase<Field> { class SchurDiagTwoOperator : public SchurOperatorBase<Field> {
protected: protected:
@ -294,8 +292,15 @@ namespace Grid {
return axpy_norm(out,-1.0,tmp,in); return axpy_norm(out,-1.0,tmp,in);
} }
}; };
///////////////////////////////////////////////////////////////////////////////////////////////////
// // Left handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) psi = eta --> ( 1 - Moo^-1 Moe Mee^-1 Meo ) psi = Moo^-1 eta
// Right handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) Moo^-1 Moo psi = eta --> ( 1 - Moe Mee^-1 Meo ) Moo^-1 phi=eta ; psi = Moo^-1 phi
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class Matrix,class Field> using SchurDiagOneRH = SchurDiagTwoOperator<Matrix,Field> ;
template<class Matrix,class Field> using SchurDiagOneLH = SchurDiagOneOperator<Matrix,Field> ;
///////////////////////////////////////////////////////////////////////////////////////////////////
// Staggered use
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class Matrix,class Field> template<class Matrix,class Field>
class SchurStaggeredOperator : public SchurOperatorBase<Field> { class SchurStaggeredOperator : public SchurOperatorBase<Field> {
protected: protected:
@ -303,9 +308,8 @@ namespace Grid {
public: public:
SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){}; SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){};
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
ComplexD dot;
n2 = Mpc(in,out); n2 = Mpc(in,out);
dot= innerProduct(in,out); ComplexD dot= innerProduct(in,out);
n1 = real(dot); n1 = real(dot);
} }
virtual void HermOp(const Field &in, Field &out){ virtual void HermOp(const Field &in, Field &out){

View File

@ -0,0 +1,754 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
Author: Chulwoo Jung <chulwoo@bnl.gov>
Author: Christoph Lehner <clehner@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_BIRL_H
#define GRID_BIRL_H
#include <string.h> //memset
#include <zlib.h>
#include <sys/stat.h>
#include <Grid/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h>
#include <Grid/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h>
#include <Grid/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockProjector.h>
#include <Grid/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldVectorIO.h>
namespace Grid {
/////////////////////////////////////////////////////////////
// Implicitly restarted lanczos
/////////////////////////////////////////////////////////////
template<class Field>
class BlockImplicitlyRestartedLanczos {
const RealD small = 1.0e-16;
public:
int lock;
int get;
int Niter;
int converged;
int Nminres; // Minimum number of restarts; only check for convergence after
int Nstop; // Number of evecs checked for convergence
int Nk; // Number of converged sought
int Np; // Np -- Number of spare vecs in kryloc space
int Nm; // Nm -- total number of vectors
int orth_period;
RealD OrthoTime;
RealD eresid, betastp;
SortEigen<Field> _sort;
LinearFunction<Field> &_HermOp;
LinearFunction<Field> &_HermOpTest;
/////////////////////////
// Constructor
/////////////////////////
BlockImplicitlyRestartedLanczos(
LinearFunction<Field> & HermOp,
LinearFunction<Field> & HermOpTest,
int _Nstop, // sought vecs
int _Nk, // sought vecs
int _Nm, // spare vecs
RealD _eresid, // resid in lmdue deficit
RealD _betastp, // if beta(k) < betastp: converged
int _Niter, // Max iterations
int _Nminres, int _orth_period = 1) :
_HermOp(HermOp),
_HermOpTest(HermOpTest),
Nstop(_Nstop),
Nk(_Nk),
Nm(_Nm),
eresid(_eresid),
betastp(_betastp),
Niter(_Niter),
Nminres(_Nminres),
orth_period(_orth_period)
{
Np = Nm-Nk; assert(Np>0);
};
BlockImplicitlyRestartedLanczos(
LinearFunction<Field> & HermOp,
LinearFunction<Field> & HermOpTest,
int _Nk, // sought vecs
int _Nm, // spare vecs
RealD _eresid, // resid in lmdue deficit
RealD _betastp, // if beta(k) < betastp: converged
int _Niter, // Max iterations
int _Nminres,
int _orth_period = 1) :
_HermOp(HermOp),
_HermOpTest(HermOpTest),
Nstop(_Nk),
Nk(_Nk),
Nm(_Nm),
eresid(_eresid),
betastp(_betastp),
Niter(_Niter),
Nminres(_Nminres),
orth_period(_orth_period)
{
Np = Nm-Nk; assert(Np>0);
};
/* Saad PP. 195
1. Choose an initial vector v1 of 2-norm unity. Set β1 0, v0 0
2. For k = 1,2,...,m Do:
3. wk:=Avkβkv_{k1}
4. αk:=(wk,vk) //
5. wk:=wkαkvk // wk orthog vk
6. βk+1 := wk2. If βk+1 = 0 then Stop
7. vk+1 := wk/βk+1
8. EndDo
*/
void step(std::vector<RealD>& lmd,
std::vector<RealD>& lme,
BasisFieldVector<Field>& evec,
Field& w,int Nm,int k)
{
assert( k< Nm );
GridStopWatch gsw_op,gsw_o;
Field& evec_k = evec[k];
gsw_op.Start();
_HermOp(evec_k,w);
gsw_op.Stop();
if(k>0){
w -= lme[k-1] * evec[k-1];
}
ComplexD zalph = innerProduct(evec_k,w); // 4. αk:=(wk,vk)
RealD alph = real(zalph);
w = w - alph * evec_k;// 5. wk:=wkαkvk
RealD beta = normalise(w); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop
// 7. vk+1 := wk/βk+1
std::cout<<GridLogMessage << "alpha[" << k << "] = " << zalph << " beta[" << k << "] = "<<beta<<std::endl;
const RealD tiny = 1.0e-20;
if ( beta < tiny ) {
std::cout<<GridLogMessage << " beta is tiny "<<beta<<std::endl;
}
lmd[k] = alph;
lme[k] = beta;
gsw_o.Start();
if (k>0 && k % orth_period == 0) {
orthogonalize(w,evec,k); // orthonormalise
}
gsw_o.Stop();
if(k < Nm-1) {
evec[k+1] = w;
}
std::cout << GridLogMessage << "Timing: operator=" << gsw_op.Elapsed() <<
" orth=" << gsw_o.Elapsed() << std::endl;
}
void qr_decomp(std::vector<RealD>& lmd,
std::vector<RealD>& lme,
int Nk,
int Nm,
std::vector<RealD>& Qt,
RealD Dsh,
int kmin,
int kmax)
{
int k = kmin-1;
RealD x;
RealD Fden = 1.0/hypot(lmd[k]-Dsh,lme[k]);
RealD c = ( lmd[k] -Dsh) *Fden;
RealD s = -lme[k] *Fden;
RealD tmpa1 = lmd[k];
RealD tmpa2 = lmd[k+1];
RealD tmpb = lme[k];
lmd[k] = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb;
lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb;
lme[k] = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb;
x =-s*lme[k+1];
lme[k+1] = c*lme[k+1];
for(int i=0; i<Nk; ++i){
RealD Qtmp1 = Qt[i+Nm*k ];
RealD Qtmp2 = Qt[i+Nm*(k+1)];
Qt[i+Nm*k ] = c*Qtmp1 - s*Qtmp2;
Qt[i+Nm*(k+1)] = s*Qtmp1 + c*Qtmp2;
}
// Givens transformations
for(int k = kmin; k < kmax-1; ++k){
RealD Fden = 1.0/hypot(x,lme[k-1]);
RealD c = lme[k-1]*Fden;
RealD s = - x*Fden;
RealD tmpa1 = lmd[k];
RealD tmpa2 = lmd[k+1];
RealD tmpb = lme[k];
lmd[k] = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb;
lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb;
lme[k] = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb;
lme[k-1] = c*lme[k-1] -s*x;
if(k != kmax-2){
x = -s*lme[k+1];
lme[k+1] = c*lme[k+1];
}
for(int i=0; i<Nk; ++i){
RealD Qtmp1 = Qt[i+Nm*k ];
RealD Qtmp2 = Qt[i+Nm*(k+1)];
Qt[i+Nm*k ] = c*Qtmp1 -s*Qtmp2;
Qt[i+Nm*(k+1)] = s*Qtmp1 +c*Qtmp2;
}
}
}
#ifdef USE_LAPACK_IRL
#define LAPACK_INT int
//long long
void diagonalize_lapack(std::vector<RealD>& lmd,
std::vector<RealD>& lme,
int N1,
int N2,
std::vector<RealD>& Qt,
GridBase *grid){
std::cout << GridLogMessage << "diagonalize_lapack start\n";
GridStopWatch gsw;
const int size = Nm;
// tevals.resize(size);
// tevecs.resize(size);
LAPACK_INT NN = N1;
std::vector<double> evals_tmp(NN);
std::vector<double> evec_tmp(NN*NN);
memset(&evec_tmp[0],0,sizeof(double)*NN*NN);
// double AA[NN][NN];
std::vector<double> DD(NN);
std::vector<double> EE(NN);
for (int i = 0; i< NN; i++)
for (int j = i - 1; j <= i + 1; j++)
if ( j < NN && j >= 0 ) {
if (i==j) DD[i] = lmd[i];
if (i==j) evals_tmp[i] = lmd[i];
if (j==(i-1)) EE[j] = lme[j];
}
LAPACK_INT evals_found;
LAPACK_INT lwork = ( (18*NN) > (1+4*NN+NN*NN)? (18*NN):(1+4*NN+NN*NN)) ;
LAPACK_INT liwork = 3+NN*10 ;
std::vector<LAPACK_INT> iwork(liwork);
std::vector<double> work(lwork);
std::vector<LAPACK_INT> isuppz(2*NN);
char jobz = 'V'; // calculate evals & evecs
char range = 'I'; // calculate all evals
// char range = 'A'; // calculate all evals
char uplo = 'U'; // refer to upper half of original matrix
char compz = 'I'; // Compute eigenvectors of tridiagonal matrix
std::vector<int> ifail(NN);
LAPACK_INT info;
// int total = QMP_get_number_of_nodes();
// int node = QMP_get_node_number();
// GridBase *grid = evec[0]._grid;
int total = grid->_Nprocessors;
int node = grid->_processor;
int interval = (NN/total)+1;
double vl = 0.0, vu = 0.0;
LAPACK_INT il = interval*node+1 , iu = interval*(node+1);
if (iu > NN) iu=NN;
double tol = 0.0;
if (1) {
memset(&evals_tmp[0],0,sizeof(double)*NN);
if ( il <= NN){
std::cout << GridLogMessage << "dstegr started" << std::endl;
gsw.Start();
dstegr(&jobz, &range, &NN,
(double*)&DD[0], (double*)&EE[0],
&vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A'
&tol, // tolerance
&evals_found, &evals_tmp[0], (double*)&evec_tmp[0], &NN,
&isuppz[0],
&work[0], &lwork, &iwork[0], &liwork,
&info);
gsw.Stop();
std::cout << GridLogMessage << "dstegr completed in " << gsw.Elapsed() << std::endl;
for (int i = iu-1; i>= il-1; i--){
evals_tmp[i] = evals_tmp[i - (il-1)];
if (il>1) evals_tmp[i-(il-1)]=0.;
for (int j = 0; j< NN; j++){
evec_tmp[i*NN + j] = evec_tmp[(i - (il-1)) * NN + j];
if (il>1) evec_tmp[(i-(il-1)) * NN + j]=0.;
}
}
}
{
// QMP_sum_double_array(evals_tmp,NN);
// QMP_sum_double_array((double *)evec_tmp,NN*NN);
grid->GlobalSumVector(&evals_tmp[0],NN);
grid->GlobalSumVector(&evec_tmp[0],NN*NN);
}
}
// cheating a bit. It is better to sort instead of just reversing it, but the document of the routine says evals are sorted in increasing order. qr gives evals in decreasing order.
for(int i=0;i<NN;i++){
for(int j=0;j<NN;j++)
Qt[(NN-1-i)*N2+j]=evec_tmp[i*NN + j];
lmd [NN-1-i]=evals_tmp[i];
}
std::cout << GridLogMessage << "diagonalize_lapack complete\n";
}
#undef LAPACK_INT
#endif
void diagonalize(std::vector<RealD>& lmd,
std::vector<RealD>& lme,
int N2,
int N1,
std::vector<RealD>& Qt,
GridBase *grid)
{
#ifdef USE_LAPACK_IRL
const int check_lapack=0; // just use lapack if 0, check against lapack if 1
if(!check_lapack)
return diagonalize_lapack(lmd,lme,N2,N1,Qt,grid);
std::vector <RealD> lmd2(N1);
std::vector <RealD> lme2(N1);
std::vector<RealD> Qt2(N1*N1);
for(int k=0; k<N1; ++k){
lmd2[k] = lmd[k];
lme2[k] = lme[k];
}
for(int k=0; k<N1*N1; ++k)
Qt2[k] = Qt[k];
// diagonalize_lapack(lmd2,lme2,Nm2,Nm,Qt,grid);
#endif
int Niter = 10000*N1;
int kmin = 1;
int kmax = N2;
// (this should be more sophisticated)
for(int iter=0; ; ++iter){
if ( (iter+1)%(100*N1)==0)
std::cout<<GridLogMessage << "[QL method] Not converged - iteration "<<iter+1<<"\n";
// determination of 2x2 leading submatrix
RealD dsub = lmd[kmax-1]-lmd[kmax-2];
RealD dd = sqrt(dsub*dsub + 4.0*lme[kmax-2]*lme[kmax-2]);
RealD Dsh = 0.5*(lmd[kmax-2]+lmd[kmax-1] +dd*(dsub/fabs(dsub)));
// (Dsh: shift)
// transformation
qr_decomp(lmd,lme,N2,N1,Qt,Dsh,kmin,kmax);
// Convergence criterion (redef of kmin and kamx)
for(int j=kmax-1; j>= kmin; --j){
RealD dds = fabs(lmd[j-1])+fabs(lmd[j]);
if(fabs(lme[j-1])+dds > dds){
kmax = j+1;
goto continued;
}
}
Niter = iter;
#ifdef USE_LAPACK_IRL
if(check_lapack){
const double SMALL=1e-8;
diagonalize_lapack(lmd2,lme2,N2,N1,Qt2,grid);
std::vector <RealD> lmd3(N2);
for(int k=0; k<N2; ++k) lmd3[k]=lmd[k];
_sort.push(lmd3,N2);
_sort.push(lmd2,N2);
for(int k=0; k<N2; ++k){
if (fabs(lmd2[k] - lmd3[k]) >SMALL) std::cout<<GridLogMessage <<"lmd(qr) lmd(lapack) "<< k << ": " << lmd2[k] <<" "<< lmd3[k] <<std::endl;
// if (fabs(lme2[k] - lme[k]) >SMALL) std::cout<<GridLogMessage <<"lme(qr)-lme(lapack) "<< k << ": " << lme2[k] - lme[k] <<std::endl;
}
for(int k=0; k<N1*N1; ++k){
// if (fabs(Qt2[k] - Qt[k]) >SMALL) std::cout<<GridLogMessage <<"Qt(qr)-Qt(lapack) "<< k << ": " << Qt2[k] - Qt[k] <<std::endl;
}
}
#endif
return;
continued:
for(int j=0; j<kmax-1; ++j){
RealD dds = fabs(lmd[j])+fabs(lmd[j+1]);
if(fabs(lme[j])+dds > dds){
kmin = j+1;
break;
}
}
}
std::cout<<GridLogMessage << "[QL method] Error - Too many iteration: "<<Niter<<"\n";
abort();
}
#if 1
template<typename T>
static RealD normalise(T& v)
{
RealD nn = norm2(v);
nn = sqrt(nn);
v = v * (1.0/nn);
return nn;
}
void orthogonalize(Field& w,
BasisFieldVector<Field>& evec,
int k)
{
double t0=-usecond()/1e6;
evec.orthogonalize(w,k);
normalise(w);
t0+=usecond()/1e6;
OrthoTime +=t0;
}
void setUnit_Qt(int Nm, std::vector<RealD> &Qt) {
for(int i=0; i<Qt.size(); ++i) Qt[i] = 0.0;
for(int k=0; k<Nm; ++k) Qt[k + k*Nm] = 1.0;
}
/* Rudy Arthur's thesis pp.137
------------------------
Require: M > K P = M K
Compute the factorization AVM = VM HM + fM eM
repeat
Q=I
for i = 1,...,P do
QiRi =HM θiI Q = QQi
H M = Q i H M Q i
end for
βK =HM(K+1,K) σK =Q(M,K)
r=vK+1βK +rσK
VK =VM(1:M)Q(1:M,1:K)
HK =HM(1:K,1:K)
AVK =VKHK +fKeK Extend to an M = K + P step factorization AVM = VMHM + fMeM
until convergence
*/
void calc(std::vector<RealD>& eval,
BasisFieldVector<Field>& evec,
const Field& src,
int& Nconv,
bool reverse,
int SkipTest)
{
GridBase *grid = evec._v[0]._grid;//evec.get(0 + evec_offset)._grid;
assert(grid == src._grid);
std::cout<<GridLogMessage << " -- Nk = " << Nk << " Np = "<< Np << std::endl;
std::cout<<GridLogMessage << " -- Nm = " << Nm << std::endl;
std::cout<<GridLogMessage << " -- size of eval = " << eval.size() << std::endl;
std::cout<<GridLogMessage << " -- size of evec = " << evec.size() << std::endl;
assert(Nm <= evec.size() && Nm <= eval.size());
// quickly get an idea of the largest eigenvalue to more properly normalize the residuum
RealD evalMaxApprox = 0.0;
{
auto src_n = src;
auto tmp = src;
const int _MAX_ITER_IRL_MEVAPP_ = 50;
for (int i=0;i<_MAX_ITER_IRL_MEVAPP_;i++) {
_HermOpTest(src_n,tmp);
RealD vnum = real(innerProduct(src_n,tmp)); // HermOp.
RealD vden = norm2(src_n);
RealD na = vnum/vden;
if (fabs(evalMaxApprox/na - 1.0) < 0.05)
i=_MAX_ITER_IRL_MEVAPP_;
evalMaxApprox = na;
std::cout << GridLogMessage << " Approximation of largest eigenvalue: " << evalMaxApprox << std::endl;
src_n = tmp;
}
}
std::vector<RealD> lme(Nm);
std::vector<RealD> lme2(Nm);
std::vector<RealD> eval2(Nm);
std::vector<RealD> eval2_copy(Nm);
std::vector<RealD> Qt(Nm*Nm);
Field f(grid);
Field v(grid);
int k1 = 1;
int k2 = Nk;
Nconv = 0;
RealD beta_k;
// Set initial vector
evec[0] = src;
normalise(evec[0]);
std:: cout<<GridLogMessage <<"norm2(evec[0])= " << norm2(evec[0])<<std::endl;
// Initial Nk steps
OrthoTime=0.;
double t0=usecond()/1e6;
for(int k=0; k<Nk; ++k) step(eval,lme,evec,f,Nm,k);
double t1=usecond()/1e6;
std::cout<<GridLogMessage <<"IRL::Initial steps: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
std::cout<<GridLogMessage <<"IRL::Initial steps:OrthoTime "<<OrthoTime<< "seconds"<<std::endl;
t1=usecond()/1e6;
// Restarting loop begins
for(int iter = 0; iter<Niter; ++iter){
std::cout<<GridLogMessage<<"\n Restart iteration = "<< iter << std::endl;
//
// Rudy does a sort first which looks very different. Getting fed up with sorting out the algo defs.
// We loop over
//
OrthoTime=0.;
for(int k=Nk; k<Nm; ++k) step(eval,lme,evec,f,Nm,k);
t1=usecond()/1e6;
std::cout<<GridLogMessage <<"IRL:: "<<Np <<" steps: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
std::cout<<GridLogMessage <<"IRL::Initial steps:OrthoTime "<<OrthoTime<< "seconds"<<std::endl;
f *= lme[Nm-1];
t1=usecond()/1e6;
// getting eigenvalues
for(int k=0; k<Nm; ++k){
eval2[k] = eval[k+k1-1];
lme2[k] = lme[k+k1-1];
}
setUnit_Qt(Nm,Qt);
diagonalize(eval2,lme2,Nm,Nm,Qt,grid);
t1=usecond()/1e6;
std::cout<<GridLogMessage <<"IRL:: diagonalize: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
// sorting
eval2_copy = eval2;
_sort.push(eval2,Nm);
t1=usecond()/1e6;
std::cout<<GridLogMessage <<"IRL:: eval sorting: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
// Implicitly shifted QR transformations
setUnit_Qt(Nm,Qt);
for(int ip=0; ip<k2; ++ip){
std::cout<<GridLogMessage << "eval "<< ip << " "<< eval2[ip] << std::endl;
}
for(int ip=k2; ip<Nm; ++ip){
std::cout<<GridLogMessage << "qr_decomp "<< ip << " "<< eval2[ip] << std::endl;
qr_decomp(eval,lme,Nm,Nm,Qt,eval2[ip],k1,Nm);
}
t1=usecond()/1e6;
std::cout<<GridLogMessage <<"IRL::qr_decomp: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
assert(k2<Nm);
assert(k2<Nm);
assert(k1>0);
evec.rotate(Qt,k1-1,k2+1,0,Nm,Nm);
t1=usecond()/1e6;
std::cout<<GridLogMessage <<"IRL::QR rotation: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
fflush(stdout);
// Compressed vector f and beta(k2)
f *= Qt[Nm-1+Nm*(k2-1)];
f += lme[k2-1] * evec[k2];
beta_k = norm2(f);
beta_k = sqrt(beta_k);
std::cout<<GridLogMessage<<" beta(k) = "<<beta_k<<std::endl;
RealD betar = 1.0/beta_k;
evec[k2] = betar * f;
lme[k2-1] = beta_k;
// Convergence test
for(int k=0; k<Nm; ++k){
eval2[k] = eval[k];
lme2[k] = lme[k];
std::cout<<GridLogMessage << "eval2[" << k << "] = " << eval2[k] << std::endl;
}
setUnit_Qt(Nm,Qt);
diagonalize(eval2,lme2,Nk,Nm,Qt,grid);
t1=usecond()/1e6;
std::cout<<GridLogMessage <<"IRL::diagonalize: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
Nconv = 0;
if (iter >= Nminres) {
std::cout << GridLogMessage << "Rotation to test convergence " << std::endl;
Field ev0_orig(grid);
ev0_orig = evec[0];
evec.rotate(Qt,0,Nk,0,Nk,Nm);
{
std::cout << GridLogMessage << "Test convergence" << std::endl;
Field B(grid);
for(int j = 0; j<Nk; j+=SkipTest){
B=evec[j];
//std::cout << "Checkerboard: " << evec[j].checkerboard << std::endl;
B.checkerboard = evec[0].checkerboard;
_HermOpTest(B,v);
RealD vnum = real(innerProduct(B,v)); // HermOp.
RealD vden = norm2(B);
RealD vv0 = norm2(v);
eval2[j] = vnum/vden;
v -= eval2[j]*B;
RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0);
std::cout.precision(13);
std::cout<<GridLogMessage << "[" << std::setw(3)<< std::setiosflags(std::ios_base::right) <<j<<"] "
<<"eval = "<<std::setw(25)<< std::setiosflags(std::ios_base::left)<< eval2[j] << " (" << eval2_copy[j] << ")"
<<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25)<< std::setiosflags(std::ios_base::right)<< vv
<<" "<< vnum/(sqrt(vden)*sqrt(vv0))
<< " norm(B["<<j<<"])="<< vden <<std::endl;
// change the criteria as evals are supposed to be sorted, all evals smaller(larger) than Nstop should have converged
if((vv<eresid*eresid) && (j == Nconv) ){
Nconv+=SkipTest;
}
}
// test if we converged, if so, terminate
t1=usecond()/1e6;
std::cout<<GridLogMessage <<"IRL::convergence testing: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
std::cout<<GridLogMessage<<" #modes converged: "<<Nconv<<std::endl;
if( Nconv>=Nstop || beta_k < betastp){
goto converged;
}
std::cout << GridLogMessage << "Rotate back" << std::endl;
//B[j] +=Qt[k+_Nm*j] * _v[k]._odata[ss];
{
Eigen::MatrixXd qm = Eigen::MatrixXd::Zero(Nk,Nk);
for (int k=0;k<Nk;k++)
for (int j=0;j<Nk;j++)
qm(j,k) = Qt[k+Nm*j];
GridStopWatch timeInv;
timeInv.Start();
Eigen::MatrixXd qmI = qm.inverse();
timeInv.Stop();
std::vector<RealD> QtI(Nm*Nm);
for (int k=0;k<Nk;k++)
for (int j=0;j<Nk;j++)
QtI[k+Nm*j] = qmI(j,k);
RealD res_check_rotate_inverse = (qm*qmI - Eigen::MatrixXd::Identity(Nk,Nk)).norm(); // sqrt( |X|^2 )
assert(res_check_rotate_inverse < 1e-7);
evec.rotate(QtI,0,Nk,0,Nk,Nm);
axpy(ev0_orig,-1.0,evec[0],ev0_orig);
std::cout << GridLogMessage << "Rotation done (in " << timeInv.Elapsed() << " = " << timeInv.useconds() << " us" <<
", error = " << res_check_rotate_inverse <<
"); | evec[0] - evec[0]_orig | = " << ::sqrt(norm2(ev0_orig)) << std::endl;
}
}
} else {
std::cout << GridLogMessage << "iter < Nminres: do not yet test for convergence\n";
} // end of iter loop
}
std::cout<<GridLogMessage<<"\n NOT converged.\n";
abort();
converged:
if (SkipTest == 1) {
eval = eval2;
} else {
// test quickly
for (int j=0;j<Nstop;j+=SkipTest) {
std::cout<<GridLogMessage << "Eigenvalue[" << j << "] = " << eval2[j] << " (" << eval2_copy[j] << ")" << std::endl;
}
eval2_copy.resize(eval2.size());
eval = eval2_copy;
}
evec.sortInPlace(eval,reverse);
{
// test
for (int j=0;j<Nstop;j++) {
std::cout<<GridLogMessage << " |e[" << j << "]|^2 = " << norm2(evec[j]) << std::endl;
}
}
//_sort.push(eval,evec,Nconv);
//evec.sort(eval,Nconv);
std::cout<<GridLogMessage << "\n Converged\n Summary :\n";
std::cout<<GridLogMessage << " -- Iterations = "<< Nconv << "\n";
std::cout<<GridLogMessage << " -- beta(k) = "<< beta_k << "\n";
std::cout<<GridLogMessage << " -- Nconv = "<< Nconv << "\n";
}
#endif
};
}
#endif

View File

@ -0,0 +1,143 @@
namespace Grid {
/*
BlockProjector
If _HP_BLOCK_PROJECTORS_ is defined, we assume that _evec is a basis that is not
fully orthonormalized (to the precision of the coarse field) and we allow for higher-precision
coarse field than basis field.
*/
//#define _HP_BLOCK_PROJECTORS_
template<typename Field>
class BlockProjector {
public:
BasisFieldVector<Field>& _evec;
BlockedGrid<Field>& _bgrid;
BlockProjector(BasisFieldVector<Field>& evec, BlockedGrid<Field>& bgrid) : _evec(evec), _bgrid(bgrid) {
}
void createOrthonormalBasis(RealD thres = 0.0) {
GridStopWatch sw;
sw.Start();
int cnt = 0;
#pragma omp parallel shared(cnt)
{
int lcnt = 0;
#pragma omp for
for (int b=0;b<_bgrid._o_blocks;b++) {
for (int i=0;i<_evec._Nm;i++) {
auto nrm0 = _bgrid.block_sp(b,_evec._v[i],_evec._v[i]);
// |i> -= <j|i> |j>
for (int j=0;j<i;j++) {
_bgrid.block_caxpy(b,_evec._v[i],-_bgrid.block_sp(b,_evec._v[j],_evec._v[i]),_evec._v[j],_evec._v[i]);
}
auto nrm = _bgrid.block_sp(b,_evec._v[i],_evec._v[i]);
auto eps = nrm/nrm0;
if (Reduce(eps).real() < thres) {
lcnt++;
}
// TODO: if norm is too small, remove this eigenvector/mark as not needed; in practice: set it to zero norm here and return a mask
// that is then used later to decide not to write certain eigenvectors to disk (add a norm calculation before subtraction step and look at nrm/nrm0 < eps to decide)
_bgrid.block_cscale(b,1.0 / sqrt(nrm),_evec._v[i]);
}
}
#pragma omp critical
{
cnt += lcnt;
}
}
sw.Stop();
std::cout << GridLogMessage << "Gram-Schmidt to create blocked basis took " << sw.Elapsed() << " (" << ((RealD)cnt / (RealD)_bgrid._o_blocks / (RealD)_evec._Nm)
<< " below threshold)" << std::endl;
}
template<typename CoarseField>
void coarseToFine(const CoarseField& in, Field& out) {
out = zero;
out.checkerboard = _evec._v[0].checkerboard;
int Nbasis = sizeof(in._odata[0]._internal._internal) / sizeof(in._odata[0]._internal._internal[0]);
assert(Nbasis == _evec._Nm);
#pragma omp parallel for
for (int b=0;b<_bgrid._o_blocks;b++) {
for (int j=0;j<_evec._Nm;j++) {
_bgrid.block_caxpy(b,out,in._odata[b]._internal._internal[j],_evec._v[j],out);
}
}
}
template<typename CoarseField>
void fineToCoarse(const Field& in, CoarseField& out) {
out = zero;
int Nbasis = sizeof(out._odata[0]._internal._internal) / sizeof(out._odata[0]._internal._internal[0]);
assert(Nbasis == _evec._Nm);
Field tmp(_bgrid._grid);
tmp = in;
#pragma omp parallel for
for (int b=0;b<_bgrid._o_blocks;b++) {
for (int j=0;j<_evec._Nm;j++) {
// |rhs> -= <j|rhs> |j>
auto c = _bgrid.block_sp(b,_evec._v[j],tmp);
_bgrid.block_caxpy(b,tmp,-c,_evec._v[j],tmp); // may make this more numerically stable
out._odata[b]._internal._internal[j] = c;
}
}
}
template<typename CoarseField>
void deflateFine(BasisFieldVector<CoarseField>& _coef,const std::vector<RealD>& eval,int N,const Field& src_orig,Field& result) {
result = zero;
for (int i=0;i<N;i++) {
Field tmp(result._grid);
coarseToFine(_coef._v[i],tmp);
axpy(result,TensorRemove(innerProduct(tmp,src_orig)) / eval[i],tmp,result);
}
}
template<typename CoarseField>
void deflateCoarse(BasisFieldVector<CoarseField>& _coef,const std::vector<RealD>& eval,int N,const Field& src_orig,Field& result) {
CoarseField src_coarse(_coef._v[0]._grid);
CoarseField result_coarse = src_coarse;
result_coarse = zero;
fineToCoarse(src_orig,src_coarse);
for (int i=0;i<N;i++) {
axpy(result_coarse,TensorRemove(innerProduct(_coef._v[i],src_coarse)) / eval[i],_coef._v[i],result_coarse);
}
coarseToFine(result_coarse,result);
}
template<typename CoarseField>
void deflate(BasisFieldVector<CoarseField>& _coef,const std::vector<RealD>& eval,int N,const Field& src_orig,Field& result) {
// Deflation on coarse Grid is much faster, so use it by default. Deflation on fine Grid is kept for legacy reasons for now.
deflateCoarse(_coef,eval,N,src_orig,result);
}
};
}

View File

@ -0,0 +1,401 @@
namespace Grid {
template<typename Field>
class BlockedGrid {
public:
GridBase* _grid;
typedef typename Field::scalar_type Coeff_t;
typedef typename Field::vector_type vCoeff_t;
std::vector<int> _bs; // block size
std::vector<int> _nb; // number of blocks
std::vector<int> _l; // local dimensions irrespective of cb
std::vector<int> _l_cb; // local dimensions of checkerboarded vector
std::vector<int> _l_cb_o; // local dimensions of inner checkerboarded vector
std::vector<int> _bs_cb; // block size in checkerboarded vector
std::vector<int> _nb_o; // number of blocks of simd o-sites
int _nd, _blocks, _cf_size, _cf_block_size, _cf_o_block_size, _o_blocks, _block_sites;
BlockedGrid(GridBase* grid, const std::vector<int>& block_size) :
_grid(grid), _bs(block_size), _nd((int)_bs.size()),
_nb(block_size), _l(block_size), _l_cb(block_size), _nb_o(block_size),
_l_cb_o(block_size), _bs_cb(block_size) {
_blocks = 1;
_o_blocks = 1;
_l = grid->FullDimensions();
_l_cb = grid->LocalDimensions();
_l_cb_o = grid->_rdimensions;
_cf_size = 1;
_block_sites = 1;
for (int i=0;i<_nd;i++) {
_l[i] /= grid->_processors[i];
assert(!(_l[i] % _bs[i])); // lattice must accommodate choice of blocksize
int r = _l[i] / _l_cb[i];
assert(!(_bs[i] % r)); // checkerboarding must accommodate choice of blocksize
_bs_cb[i] = _bs[i] / r;
_block_sites *= _bs_cb[i];
_nb[i] = _l[i] / _bs[i];
_nb_o[i] = _nb[i] / _grid->_simd_layout[i];
if (_nb[i] % _grid->_simd_layout[i]) { // simd must accommodate choice of blocksize
std::cout << GridLogMessage << "Problem: _nb[" << i << "] = " << _nb[i] << " _grid->_simd_layout[" << i << "] = " << _grid->_simd_layout[i] << std::endl;
assert(0);
}
_blocks *= _nb[i];
_o_blocks *= _nb_o[i];
_cf_size *= _l[i];
}
_cf_size *= 12 / 2;
_cf_block_size = _cf_size / _blocks;
_cf_o_block_size = _cf_size / _o_blocks;
std::cout << GridLogMessage << "BlockedGrid:" << std::endl;
std::cout << GridLogMessage << " _l = " << _l << std::endl;
std::cout << GridLogMessage << " _l_cb = " << _l_cb << std::endl;
std::cout << GridLogMessage << " _l_cb_o = " << _l_cb_o << std::endl;
std::cout << GridLogMessage << " _bs = " << _bs << std::endl;
std::cout << GridLogMessage << " _bs_cb = " << _bs_cb << std::endl;
std::cout << GridLogMessage << " _nb = " << _nb << std::endl;
std::cout << GridLogMessage << " _nb_o = " << _nb_o << std::endl;
std::cout << GridLogMessage << " _blocks = " << _blocks << std::endl;
std::cout << GridLogMessage << " _o_blocks = " << _o_blocks << std::endl;
std::cout << GridLogMessage << " sizeof(vCoeff_t) = " << sizeof(vCoeff_t) << std::endl;
std::cout << GridLogMessage << " _cf_size = " << _cf_size << std::endl;
std::cout << GridLogMessage << " _cf_block_size = " << _cf_block_size << std::endl;
std::cout << GridLogMessage << " _block_sites = " << _block_sites << std::endl;
std::cout << GridLogMessage << " _grid->oSites() = " << _grid->oSites() << std::endl;
// _grid->Barrier();
//abort();
}
void block_to_coor(int b, std::vector<int>& x0) {
std::vector<int> bcoor;
bcoor.resize(_nd);
x0.resize(_nd);
assert(b < _o_blocks);
Lexicographic::CoorFromIndex(bcoor,b,_nb_o);
int i;
for (i=0;i<_nd;i++) {
x0[i] = bcoor[i]*_bs_cb[i];
}
//std::cout << GridLogMessage << "Map block b -> " << x0 << std::endl;
}
void block_site_to_o_coor(const std::vector<int>& x0, std::vector<int>& coor, int i) {
Lexicographic::CoorFromIndex(coor,i,_bs_cb);
for (int j=0;j<_nd;j++)
coor[j] += x0[j];
}
int block_site_to_o_site(const std::vector<int>& x0, int i) {
std::vector<int> coor; coor.resize(_nd);
block_site_to_o_coor(x0,coor,i);
Lexicographic::IndexFromCoor(coor,i,_l_cb_o);
return i;
}
vCoeff_t block_sp(int b, const Field& x, const Field& y) {
std::vector<int> x0;
block_to_coor(b,x0);
vCoeff_t ret = 0.0;
for (int i=0;i<_block_sites;i++) { // only odd sites
int ss = block_site_to_o_site(x0,i);
ret += TensorRemove(innerProduct(x._odata[ss],y._odata[ss]));
}
return ret;
}
vCoeff_t block_sp(int b, const Field& x, const std::vector< ComplexD >& y) {
std::vector<int> x0;
block_to_coor(b,x0);
constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t);
int lsize = _cf_o_block_size / _block_sites;
std::vector< ComplexD > ret(nsimd);
for (int i=0;i<nsimd;i++)
ret[i] = 0.0;
for (int i=0;i<_block_sites;i++) { // only odd sites
int ss = block_site_to_o_site(x0,i);
int n = lsize / nsimd;
for (int l=0;l<n;l++) {
for (int j=0;j<nsimd;j++) {
int t = lsize * i + l*nsimd + j;
ret[j] += conjugate(((Coeff_t*)&x._odata[ss]._internal)[l*nsimd + j]) * y[t];
}
}
}
vCoeff_t vret;
for (int i=0;i<nsimd;i++)
((Coeff_t*)&vret)[i] = (Coeff_t)ret[i];
return vret;
}
template<class T>
void vcaxpy(iScalar<T>& r,const vCoeff_t& a,const iScalar<T>& x,const iScalar<T>& y) {
vcaxpy(r._internal,a,x._internal,y._internal);
}
template<class T,int N>
void vcaxpy(iVector<T,N>& r,const vCoeff_t& a,const iVector<T,N>& x,const iVector<T,N>& y) {
for (int i=0;i<N;i++)
vcaxpy(r._internal[i],a,x._internal[i],y._internal[i]);
}
void vcaxpy(vCoeff_t& r,const vCoeff_t& a,const vCoeff_t& x,const vCoeff_t& y) {
r = a*x + y;
}
void block_caxpy(int b, Field& ret, const vCoeff_t& a, const Field& x, const Field& y) {
std::vector<int> x0;
block_to_coor(b,x0);
for (int i=0;i<_block_sites;i++) { // only odd sites
int ss = block_site_to_o_site(x0,i);
vcaxpy(ret._odata[ss],a,x._odata[ss],y._odata[ss]);
}
}
void block_caxpy(int b, std::vector< ComplexD >& ret, const vCoeff_t& a, const Field& x, const std::vector< ComplexD >& y) {
std::vector<int> x0;
block_to_coor(b,x0);
constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t);
int lsize = _cf_o_block_size / _block_sites;
for (int i=0;i<_block_sites;i++) { // only odd sites
int ss = block_site_to_o_site(x0,i);
int n = lsize / nsimd;
for (int l=0;l<n;l++) {
vCoeff_t r = a* ((vCoeff_t*)&x._odata[ss]._internal)[l];
for (int j=0;j<nsimd;j++) {
int t = lsize * i + l*nsimd + j;
ret[t] = y[t] + ((Coeff_t*)&r)[j];
}
}
}
}
void block_set(int b, Field& ret, const std::vector< ComplexD >& x) {
std::vector<int> x0;
block_to_coor(b,x0);
int lsize = _cf_o_block_size / _block_sites;
for (int i=0;i<_block_sites;i++) { // only odd sites
int ss = block_site_to_o_site(x0,i);
for (int l=0;l<lsize;l++)
((Coeff_t*)&ret._odata[ss]._internal)[l] = (Coeff_t)x[lsize * i + l]; // convert precision
}
}
void block_get(int b, const Field& ret, std::vector< ComplexD >& x) {
std::vector<int> x0;
block_to_coor(b,x0);
int lsize = _cf_o_block_size / _block_sites;
for (int i=0;i<_block_sites;i++) { // only odd sites
int ss = block_site_to_o_site(x0,i);
for (int l=0;l<lsize;l++)
x[lsize * i + l] = (ComplexD)((Coeff_t*)&ret._odata[ss]._internal)[l];
}
}
template<class T>
void vcscale(iScalar<T>& r,const vCoeff_t& a,const iScalar<T>& x) {
vcscale(r._internal,a,x._internal);
}
template<class T,int N>
void vcscale(iVector<T,N>& r,const vCoeff_t& a,const iVector<T,N>& x) {
for (int i=0;i<N;i++)
vcscale(r._internal[i],a,x._internal[i]);
}
void vcscale(vCoeff_t& r,const vCoeff_t& a,const vCoeff_t& x) {
r = a*x;
}
void block_cscale(int b, const vCoeff_t& a, Field& ret) {
std::vector<int> x0;
block_to_coor(b,x0);
for (int i=0;i<_block_sites;i++) { // only odd sites
int ss = block_site_to_o_site(x0,i);
vcscale(ret._odata[ss],a,ret._odata[ss]);
}
}
void getCanonicalBlockOffset(int cb, std::vector<int>& x0) {
const int ndim = 5;
assert(_nb.size() == ndim);
std::vector<int> _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] };
std::vector<int> _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] };
x0.resize(ndim);
assert(cb >= 0);
assert(cb < _nbc[0]*_nbc[1]*_nbc[2]*_nbc[3]*_nbc[4]);
Lexicographic::CoorFromIndex(x0,cb,_nbc);
int i;
for (i=0;i<ndim;i++) {
x0[i] *= _bsc[i];
}
//if (cb < 2)
// std::cout << GridLogMessage << "Map: " << cb << " To: " << x0 << std::endl;
}
void pokeBlockOfVectorCanonical(int cb,Field& v,const std::vector<float>& buf) {
std::vector<int> _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] };
std::vector<int> ldim = v._grid->LocalDimensions();
std::vector<int> cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] };
const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4];
// take canonical block cb of v and put it in canonical ordering in buf
std::vector<int> cx0;
getCanonicalBlockOffset(cb,cx0);
#pragma omp parallel
{
std::vector<int> co0,cl0;
co0=cx0; cl0=cx0;
#pragma omp for
for (int i=0;i<_nbsc;i++) {
Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo
for (int j=0;j<(int)_bsc.size();j++)
cl0[j] = cx0[j] + co0[j];
std::vector<int> l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] };
int oi = v._grid->oIndex(l0);
int ii = v._grid->iIndex(l0);
int lti = i;
//if (cb < 2 && i<2)
// std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl;
for (int s=0;s<4;s++)
for (int c=0;c<3;c++) {
Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii];
int ti = 12*lti + 3*s + c;
ld = Coeff_t(buf[2*ti+0], buf[2*ti+1]);
}
}
}
}
void peekBlockOfVectorCanonical(int cb,const Field& v,std::vector<float>& buf) {
std::vector<int> _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] };
std::vector<int> ldim = v._grid->LocalDimensions();
std::vector<int> cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] };
const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4];
// take canonical block cb of v and put it in canonical ordering in buf
std::vector<int> cx0;
getCanonicalBlockOffset(cb,cx0);
buf.resize(_cf_block_size * 2);
#pragma omp parallel
{
std::vector<int> co0,cl0;
co0=cx0; cl0=cx0;
#pragma omp for
for (int i=0;i<_nbsc;i++) {
Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo
for (int j=0;j<(int)_bsc.size();j++)
cl0[j] = cx0[j] + co0[j];
std::vector<int> l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] };
int oi = v._grid->oIndex(l0);
int ii = v._grid->iIndex(l0);
int lti = i;
//if (cb < 2 && i<2)
// std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl;
for (int s=0;s<4;s++)
for (int c=0;c<3;c++) {
Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii];
int ti = 12*lti + 3*s + c;
buf[2*ti+0] = ld.real();
buf[2*ti+1] = ld.imag();
}
}
}
}
int globalToLocalCanonicalBlock(int slot,const std::vector<int>& src_nodes,int nb) {
// processor coordinate
int _nd = (int)src_nodes.size();
std::vector<int> _src_nodes = src_nodes;
std::vector<int> pco(_nd);
Lexicographic::CoorFromIndex(pco,slot,_src_nodes);
std::vector<int> cpco = { pco[1], pco[2], pco[3], pco[4], pco[0] };
// get local block
std::vector<int> _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] };
assert(_nd == 5);
std::vector<int> c_src_local_blocks(_nd);
for (int i=0;i<_nd;i++) {
assert(_grid->_fdimensions[i] % (src_nodes[i] * _bs[i]) == 0);
c_src_local_blocks[(i+4) % 5] = _grid->_fdimensions[i] / src_nodes[i] / _bs[i];
}
std::vector<int> cbcoor(_nd); // coordinate of block in slot in canonical form
Lexicographic::CoorFromIndex(cbcoor,nb,c_src_local_blocks);
// cpco, cbcoor
std::vector<int> clbcoor(_nd);
for (int i=0;i<_nd;i++) {
int cgcoor = cpco[i] * c_src_local_blocks[i] + cbcoor[i]; // global block coordinate
int pcoor = cgcoor / _nbc[i]; // processor coordinate in my Grid
int tpcoor = _grid->_processor_coor[(i+1)%5];
if (pcoor != tpcoor)
return -1;
clbcoor[i] = cgcoor - tpcoor * _nbc[i]; // canonical local block coordinate for canonical dimension i
}
int lnb;
Lexicographic::IndexFromCoor(clbcoor,lnb,_nbc);
//std::cout << "Mapped slot = " << slot << " nb = " << nb << " to " << lnb << std::endl;
return lnb;
}
};
}

View File

@ -0,0 +1,163 @@
namespace Grid {
template<class Field>
class BasisFieldVector {
public:
int _Nm;
typedef typename Field::scalar_type Coeff_t;
typedef typename Field::vector_type vCoeff_t;
typedef typename Field::vector_object vobj;
typedef typename vobj::scalar_object sobj;
std::vector<Field> _v; // _Nfull vectors
void report(int n,GridBase* value) {
std::cout << GridLogMessage << "BasisFieldVector allocated:\n";
std::cout << GridLogMessage << " Delta N = " << n << "\n";
std::cout << GridLogMessage << " Size of full vectors (size) = " <<
((double)n*sizeof(vobj)*value->oSites() / 1024./1024./1024.) << " GB\n";
std::cout << GridLogMessage << " Size = " << _v.size() << " Capacity = " << _v.capacity() << std::endl;
value->Barrier();
if (value->IsBoss()) {
system("cat /proc/meminfo");
}
value->Barrier();
}
BasisFieldVector(int Nm,GridBase* value) : _Nm(Nm), _v(Nm,value) {
report(Nm,value);
}
~BasisFieldVector() {
}
Field& operator[](int i) {
return _v[i];
}
void orthogonalize(Field& w, int k) {
for(int j=0; j<k; ++j){
Coeff_t ip = (Coeff_t)innerProduct(_v[j],w);
w = w - ip*_v[j];
}
}
void rotate(std::vector<RealD>& Qt,int j0, int j1, int k0,int k1,int Nm) {
GridBase* grid = _v[0]._grid;
#pragma omp parallel
{
std::vector < vobj > B(Nm);
#pragma omp for
for(int ss=0;ss < grid->oSites();ss++){
for(int j=j0; j<j1; ++j) B[j]=0.;
for(int j=j0; j<j1; ++j){
for(int k=k0; k<k1; ++k){
B[j] +=Qt[k+Nm*j] * _v[k]._odata[ss];
}
}
for(int j=j0; j<j1; ++j){
_v[j]._odata[ss] = B[j];
}
}
}
}
size_t size() const {
return _Nm;
}
void resize(int n) {
if (n > _Nm)
_v.reserve(n);
_v.resize(n,_v[0]._grid);
if (n < _Nm)
_v.shrink_to_fit();
report(n - _Nm,_v[0]._grid);
_Nm = n;
}
std::vector<int> getIndex(std::vector<RealD>& sort_vals) {
std::vector<int> idx(sort_vals.size());
iota(idx.begin(), idx.end(), 0);
// sort indexes based on comparing values in v
sort(idx.begin(), idx.end(),
[&sort_vals](int i1, int i2) {return ::fabs(sort_vals[i1]) < ::fabs(sort_vals[i2]);});
return idx;
}
void reorderInPlace(std::vector<RealD>& sort_vals, std::vector<int>& idx) {
GridStopWatch gsw;
gsw.Start();
int nswaps = 0;
for (size_t i=0;i<idx.size();i++) {
if (idx[i] != i) {
// find proper place (this could be done in logarithmic time, don't bother for now)
size_t j;
for (j=i;j<idx.size();j++)
if (idx[j]==i)
break;
assert(j!=idx.size());
Field _t(_v[0]._grid);
_t = _v[idx[j]];
_v[idx[j]] = _v[idx[i]];
_v[idx[i]] = _t;
RealD _td = sort_vals[idx[j]];
sort_vals[idx[j]] = sort_vals[idx[i]];
sort_vals[idx[i]] = _td;
int _tt = idx[i];
idx[i] = idx[j];
idx[j] = _tt;
nswaps++;
}
}
// sort values
gsw.Stop();
std::cout << GridLogMessage << "Sorted eigenspace in place in " << gsw.Elapsed() << " using " << nswaps << " swaps" << std::endl;
}
void sortInPlace(std::vector<RealD>& sort_vals, bool reverse) {
std::vector<int> idx = getIndex(sort_vals);
if (reverse)
std::reverse(idx.begin(), idx.end());
reorderInPlace(sort_vals,idx);
}
void deflate(const std::vector<RealD>& eval,const Field& src_orig,Field& result) {
result = zero;
int N = (int)_v.size();
for (int i=0;i<N;i++) {
Field& tmp = _v[i];
axpy(result,TensorRemove(innerProduct(tmp,src_orig)) / eval[i],tmp,result);
}
}
};
}

File diff suppressed because it is too large Load Diff

View File

@ -61,10 +61,10 @@ namespace QCD {
} }
/*************************************************************** /***************************************************************
/* Additional EOFA operators only called outside the inverter. * Additional EOFA operators only called outside the inverter.
/* Since speed is not essential, simple axpby-style * Since speed is not essential, simple axpby-style
/* implementations should be fine. * implementations should be fine.
/***************************************************************/ ***************************************************************/
template<class Impl> template<class Impl>
void DomainWallEOFAFermion<Impl>::Omega(const FermionField& psi, FermionField& Din, int sign, int dag) void DomainWallEOFAFermion<Impl>::Omega(const FermionField& psi, FermionField& Din, int sign, int dag)
{ {
@ -116,8 +116,8 @@ namespace QCD {
} }
/******************************************************************** /********************************************************************
/* Performance critical fermion operators called inside the inverter * Performance critical fermion operators called inside the inverter
/********************************************************************/ ********************************************************************/
template<class Impl> template<class Impl>
void DomainWallEOFAFermion<Impl>::M5D(const FermionField& psi, FermionField& chi) void DomainWallEOFAFermion<Impl>::M5D(const FermionField& psi, FermionField& chi)

View File

@ -77,11 +77,11 @@ namespace QCD {
} }
} }
/*************************************************************** /****************************************************************
/* Additional EOFA operators only called outside the inverter. * Additional EOFA operators only called outside the inverter.
/* Since speed is not essential, simple axpby-style * Since speed is not essential, simple axpby-style
/* implementations should be fine. * implementations should be fine.
/***************************************************************/ ***************************************************************/
template<class Impl> template<class Impl>
void MobiusEOFAFermion<Impl>::Omega(const FermionField& psi, FermionField& Din, int sign, int dag) void MobiusEOFAFermion<Impl>::Omega(const FermionField& psi, FermionField& Din, int sign, int dag)
{ {
@ -194,8 +194,8 @@ namespace QCD {
} }
/******************************************************************** /********************************************************************
/* Performance critical fermion operators called inside the inverter * Performance critical fermion operators called inside the inverter
/********************************************************************/ ********************************************************************/
template<class Impl> template<class Impl>
void MobiusEOFAFermion<Impl>::M5D(const FermionField& psi, FermionField& chi) void MobiusEOFAFermion<Impl>::M5D(const FermionField& psi, FermionField& chi)

136
tests/solver/Params.h Normal file
View File

@ -0,0 +1,136 @@
/*
Params IO
Author: Christoph Lehner
Date: 2017
*/
#define PADD(p,X) p.get(#X,X);
class Params {
protected:
std::string trim(const std::string& sc) {
std::string s = sc;
s.erase(s.begin(), std::find_if(s.begin(), s.end(),
std::not1(std::ptr_fun<int, int>(std::isspace))));
s.erase(std::find_if(s.rbegin(), s.rend(),
std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
return s;
}
public:
std::map< std::string, std::string > lines;
std::string _fn;
Params(const char* fn) : _fn(fn) {
FILE* f = fopen(fn,"rt");
assert(f);
while (!feof(f)) {
char buf[4096];
if (fgets(buf,sizeof(buf),f)) {
if (buf[0] != '#' && buf[0] != '\r' && buf[0] != '\n') {
char* sep = strchr(buf,'=');
assert(sep);
*sep = '\0';
lines[trim(buf)] = trim(sep+1);
}
}
}
fclose(f);
}
~Params() {
}
std::string loghead() {
return _fn + ": ";
}
bool has(const char* name) {
auto f = lines.find(name);
return (f != lines.end());
}
const std::string& get(const char* name) {
auto f = lines.find(name);
if (f == lines.end()) {
std::cout << Grid::GridLogMessage << loghead() << "Could not find value for " << name << std::endl;
abort();
}
return f->second;
}
void parse(std::string& s, const std::string& cval) {
std::stringstream trimmer;
trimmer << cval;
s.clear();
trimmer >> s;
}
void parse(int& i, const std::string& cval) {
assert(sscanf(cval.c_str(),"%d",&i)==1);
}
void parse(long long& i, const std::string& cval) {
assert(sscanf(cval.c_str(),"%lld",&i)==1);
}
void parse(double& f, const std::string& cval) {
assert(sscanf(cval.c_str(),"%lf",&f)==1);
}
void parse(float& f, const std::string& cval) {
assert(sscanf(cval.c_str(),"%f",&f)==1);
}
void parse(bool& b, const std::string& cval) {
std::string lcval = cval;
std::transform(lcval.begin(), lcval.end(), lcval.begin(), ::tolower);
if (lcval == "true" || lcval == "yes") {
b = true;
} else if (lcval == "false" || lcval == "no") {
b = false;
} else {
std::cout << "Invalid value for boolean: " << b << std::endl;
assert(0);
}
}
void parse(std::complex<double>& f, const std::string& cval) {
double r,i;
assert(sscanf(cval.c_str(),"%lf %lf",&r,&i)==2);
f = std::complex<double>(r,i);
}
void parse(std::complex<float>& f, const std::string& cval) {
float r,i;
assert(sscanf(cval.c_str(),"%f %f",&r,&i)==2);
f = std::complex<float>(r,i);
}
template<class T>
void get(const char* name, std::vector<T>& v) {
int i = 0;
v.resize(0);
while (true) {
char buf[4096];
sprintf(buf,"%s[%d]",name,i++);
if (!has(buf))
break;
T val;
parse(val,get(buf));
std::cout << Grid::GridLogMessage << loghead() << "Set " << buf << " to " << val << std::endl;
v.push_back(val);
}
}
template<class T>
void get(const char* name, T& f) {
parse(f,get(name));
std::cout << Grid::GridLogMessage << loghead() << "Set " << name << " to " << f << std::endl;
}
};

View File

@ -0,0 +1,727 @@
/*
Authors: Christoph Lehner
Date: 2017
Multigrid Lanczos
TODO:
High priority:
- Explore filtering of starting vector again, should really work: If cheby has 4 for low mode region and 1 for high mode, applying 15 iterations has 1e9 suppression
of high modes, which should create the desired invariant subspace already? Missing something here??? Maybe dynamic range dangerous, i.e., could also kill interesting
eigenrange if not careful.
Better: Use all Cheby up to order N in order to approximate a step function; try this! Problem: width of step function. Can kill eigenspace > 1e-3 and have < 1e-5 equal
to 1
Low priority:
- Given that I seem to need many restarts and high degree poly to create the base and this takes about 1 day, seriously consider a simple method to create a basis
(ortho krylov low poly); and then fix up lowest say 200 eigenvalues by 1 run with high-degree poly (600 could be enough)
*/
#include <Grid/Grid.h>
#include "Params.h"
#include <Grid/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
bool read_evals(GridBase* _grid, char* fn, std::vector<RealD>& evals) {
FILE* f = 0;
uint32_t status = 0;
if (_grid->IsBoss()) {
f = fopen(fn,"rt");
status = f ? 1 : 0;
}
_grid->GlobalSum(status);
if (!status)
return false;
uint32_t N;
if (f)
assert(fscanf(f,"%d\n",&N)==1);
else
N = 0;
_grid->GlobalSum(N);
std::cout << "Reading " << N << " eigenvalues" << std::endl;
evals.resize(N);
for (int i=0;i<N;i++) {
if (f)
assert(fscanf(f,"%lf",&evals[i])==1);
else
evals[i] = 0;
}
_grid->GlobalSumVector(&evals[0],evals.size());
if (f)
fclose(f);
return true;
}
void write_evals(char* fn, std::vector<RealD>& evals) {
FILE* f = fopen(fn,"wt");
assert(f);
int N = (int)evals.size();
fprintf(f,"%d\n",N);
for (int i=0;i<N;i++) {
fprintf(f,"%.15E\n",evals[i]);
}
fclose(f);
}
void write_history(char* fn, std::vector<RealD>& hist) {
FILE* f = fopen(fn,"wt");
assert(f);
int N = (int)hist.size();
for (int i=0;i<N;i++) {
fprintf(f,"%d %.15E\n",i,hist[i]);
}
fclose(f);
}
template<typename Field>
class FunctionHermOp : public LinearFunction<Field> {
public:
OperatorFunction<Field> & _poly;
LinearOperatorBase<Field> &_Linop;
FunctionHermOp(OperatorFunction<Field> & poly,LinearOperatorBase<Field>& linop) : _poly(poly), _Linop(linop) {
}
void operator()(const Field& in, Field& out) {
_poly(_Linop,in,out);
}
};
template<typename Field>
class CheckpointedLinearFunction : public LinearFunction<Field> {
public:
LinearFunction<Field>& _op;
std::string _dir;
int _max_apply;
int _apply, _apply_actual;
GridBase* _grid;
FILE* _f;
CheckpointedLinearFunction(GridBase* grid, LinearFunction<Field>& op, const char* dir,int max_apply) : _op(op), _dir(dir), _grid(grid), _f(0),
_max_apply(max_apply), _apply(0), _apply_actual(0) {
FieldVectorIO::conditionalMkDir(dir);
char fn[4096];
sprintf(fn,"%s/ckpt_op.%4.4d",_dir.c_str(),_grid->ThisRank());
printf("CheckpointLinearFunction:: file %s\n",fn);
_f = fopen(fn,"r+b");
if (!_f)
_f = fopen(fn,"w+b");
assert(_f);
fseek(_f,0,SEEK_CUR);
}
~CheckpointedLinearFunction() {
if (_f) {
fclose(_f);
_f = 0;
}
}
bool load_ckpt(const Field& in, Field& out) {
off_t cur = ftello(_f);
fseeko(_f,0,SEEK_END);
if (cur == ftello(_f))
return false;
fseeko(_f,cur,SEEK_SET);
size_t sz = sizeof(out._odata[0]) * out._odata.size();
GridStopWatch gsw;
gsw.Start();
uint32_t crc_exp;
assert(fread(&crc_exp,4,1,_f)==1);
assert(fread(&out._odata[0],sz,1,_f)==1);
assert(FieldVectorIO::crc32_threaded((unsigned char*)&out._odata[0],sz,0x0)==crc_exp);
gsw.Stop();
printf("CheckpointLinearFunction:: reading %lld\n",(long long)sz);
std::cout << GridLogMessage << "Loading " << ((RealD)sz/1024./1024./1024.) << " GB in " << gsw.Elapsed() << std::endl;
return true;
}
void save_ckpt(const Field& in, Field& out) {
fseek(_f,0,SEEK_CUR); // switch to write
size_t sz = sizeof(out._odata[0]) * out._odata.size();
GridStopWatch gsw;
gsw.Start();
uint32_t crc = FieldVectorIO::crc32_threaded((unsigned char*)&out._odata[0],sz,0x0);
assert(fwrite(&crc,4,1,_f)==1);
assert(fwrite(&out._odata[0],sz,1,_f)==1);
fflush(_f); // try this on the GPFS to suppress OPA usage for disk during dslash; this is not needed at Lustre/JLAB
gsw.Stop();
printf("CheckpointLinearFunction:: writing %lld\n",(long long)sz);
std::cout << GridLogMessage << "Saving " << ((RealD)sz/1024./1024./1024.) << " GB in " << gsw.Elapsed() << std::endl;
}
void operator()(const Field& in, Field& out) {
_apply++;
if (load_ckpt(in,out))
return;
_op(in,out);
save_ckpt(in,out);
if (_apply_actual++ >= _max_apply) {
std::cout << GridLogMessage << "Maximum application of operator reached, checkpoint and finish in future job" << std::endl;
if (_f) { fclose(_f); _f=0; }
in._grid->Barrier();
Grid_finalize();
exit(3);
}
}
};
template<typename CoarseField,typename Field>
class ProjectedFunctionHermOp : public LinearFunction<CoarseField> {
public:
OperatorFunction<Field> & _poly;
LinearOperatorBase<Field> &_Linop;
BlockProjector<Field>& _pr;
ProjectedFunctionHermOp(BlockProjector<Field>& pr,OperatorFunction<Field> & poly,LinearOperatorBase<Field>& linop) : _poly(poly), _Linop(linop), _pr(pr) {
}
void operator()(const CoarseField& in, CoarseField& out) {
assert(_pr._bgrid._o_blocks == in._grid->oSites());
Field fin(_pr._bgrid._grid);
Field fout(_pr._bgrid._grid);
GridStopWatch gsw1,gsw2,gsw3;
// fill fin
gsw1.Start();
_pr.coarseToFine(in,fin);
gsw1.Stop();
// apply poly
gsw2.Start();
_poly(_Linop,fin,fout);
gsw2.Stop();
// fill out
gsw3.Start();
_pr.fineToCoarse(fout,out);
gsw3.Stop();
auto eps = innerProduct(in,out);
std::cout << GridLogMessage << "Operator timing details: c2f = " << gsw1.Elapsed() << " poly = " << gsw2.Elapsed() << " f2c = " << gsw3.Elapsed() <<
" Complimentary Hermiticity check: " << eps.imag() / std::abs(eps) << std::endl;
}
};
template<typename CoarseField,typename Field>
class ProjectedHermOp : public LinearFunction<CoarseField> {
public:
LinearOperatorBase<Field> &_Linop;
BlockProjector<Field>& _pr;
ProjectedHermOp(BlockProjector<Field>& pr,LinearOperatorBase<Field>& linop) : _Linop(linop), _pr(pr) {
}
void operator()(const CoarseField& in, CoarseField& out) {
assert(_pr._bgrid._o_blocks == in._grid->oSites());
Field fin(_pr._bgrid._grid);
Field fout(_pr._bgrid._grid);
_pr.coarseToFine(in,fin);
_Linop.HermOp(fin,fout);
_pr.fineToCoarse(fout,out);
}
};
template<typename Field>
class PlainHermOp : public LinearFunction<Field> {
public:
LinearOperatorBase<Field> &_Linop;
PlainHermOp(LinearOperatorBase<Field>& linop) : _Linop(linop) {
}
void operator()(const Field& in, Field& out) {
_Linop.HermOp(in,out);
}
};
template<typename vtype, int N > using CoarseSiteFieldGeneral = iScalar< iVector<vtype, N> >;
template<int N> using CoarseSiteFieldD = CoarseSiteFieldGeneral< vComplexD, N >;
template<int N> using CoarseSiteFieldF = CoarseSiteFieldGeneral< vComplexF, N >;
template<int N> using CoarseSiteField = CoarseSiteFieldGeneral< vComplex, N >;
template<int N> using CoarseLatticeFermion = Lattice< CoarseSiteField<N> >;
template<int N> using CoarseLatticeFermionD = Lattice< CoarseSiteFieldD<N> >;
template<typename Field,int Nstop1>
void CoarseGridLanczos(BlockProjector<Field>& pr,RealD alpha2,RealD beta,int Npoly2,
int Nstop2,int Nk2,int Nm2,RealD resid2,RealD betastp2,int MaxIt,int MinRes2,
LinearOperatorBase<Field>& HermOp, std::vector<RealD>& eval1, bool cg_test_enabled,
int cg_test_maxiter,int nsingle,int SkipTest2, int MaxApply2,bool smoothed_eval_enabled,
int smoothed_eval_inner,int smoothed_eval_outer,int smoothed_eval_begin,
int smoothed_eval_end,RealD smoothed_eval_inner_resid) {
BlockedGrid<Field>& bgrid = pr._bgrid;
BasisFieldVector<Field>& basis = pr._evec;
std::vector<int> coarseFourDimLatt;
for (int i=0;i<4;i++)
coarseFourDimLatt.push_back(bgrid._nb[1+i] * bgrid._grid->_processors[1+i]);
assert(bgrid._grid->_processors[0] == 1);
std::cout << GridLogMessage << "CoarseGrid = " << coarseFourDimLatt << " with basis = " << Nstop1 << std::endl;
GridCartesian * UCoarseGrid = SpaceTimeGrid::makeFourDimGrid(coarseFourDimLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridCartesian * FCoarseGrid = SpaceTimeGrid::makeFiveDimGrid(bgrid._nb[0],UCoarseGrid);
Chebyshev<Field> Cheb2(alpha2,beta,Npoly2);
CoarseLatticeFermion<Nstop1> src_coarse(FCoarseGrid);
// Second round of Lanczos in blocked space
std::vector<RealD> eval2(Nm2);
std::vector<RealD> eval3(Nm2);
BasisFieldVector<CoarseLatticeFermion<Nstop1> > coef(Nm2,FCoarseGrid);
ProjectedFunctionHermOp<CoarseLatticeFermion<Nstop1>,LatticeFermion> Op2plain(pr,Cheb2,HermOp);
CheckpointedLinearFunction<CoarseLatticeFermion<Nstop1> > Op2ckpt(src_coarse._grid,Op2plain,"checkpoint",MaxApply2);
LinearFunction< CoarseLatticeFermion<Nstop1> >* Op2;
if (MaxApply2) {
Op2 = &Op2ckpt;
} else {
Op2 = &Op2plain;
}
ProjectedHermOp<CoarseLatticeFermion<Nstop1>,LatticeFermion> Op2nopoly(pr,HermOp);
BlockImplicitlyRestartedLanczos<CoarseLatticeFermion<Nstop1> > IRL2(*Op2,*Op2,Nstop2,Nk2,Nm2,resid2,betastp2,MaxIt,MinRes2);
src_coarse = 1.0;
// Precision test
{
Field tmp(bgrid._grid);
CoarseLatticeFermion<Nstop1> tmp2(FCoarseGrid);
CoarseLatticeFermion<Nstop1> tmp3(FCoarseGrid);
tmp2 = 1.0;
tmp3 = 1.0;
pr.coarseToFine(tmp2,tmp);
pr.fineToCoarse(tmp,tmp2);
tmp2 -= tmp3;
std::cout << GridLogMessage << "Precision Test c->f->c: " << norm2(tmp2) / norm2(tmp3) << std::endl;
//bgrid._grid->Barrier();
//return;
}
int Nconv;
if (!FieldVectorIO::read_compressed_vectors("lanczos.output",pr,coef) ||
!read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt",eval3) ||
!read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.linear",eval1) ||
!read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.poly",eval2)
) {
IRL2.calc(eval2,coef,src_coarse,Nconv,true,SkipTest2);
coef.resize(Nstop2);
eval2.resize(Nstop2);
eval3.resize(Nstop2);
std::vector<Field> step3_cache;
// reconstruct eigenvalues of original operator
for (int i=0;i<Nstop2;i++){
RealD eval2_linear;
if (i<Nstop1) {
eval2_linear = eval1[i];
} else {
eval2_linear = eval2[i-1];
}
RealD eval2_poly = eval2[i];
RealD eval_reconstruct = Cheb2.approxInv(eval2_poly,eval2_linear,100,1e-10);
std::cout << i << " Reconstructed eval = " << eval_reconstruct << " from quess " << eval2_linear << std::endl;
eval2[i] = eval_reconstruct;
}
// as demonstrated in CG test below, best result from mixed determination
for (int i=0;i<Nstop2;i++)
eval3[i] = (i < Nstop1) ? eval1[i] : eval2[i];
for(int i=0;i<Nstop2;i++){
std::cout << i<<" / "<< Nstop2<< " eigenvalue "<< eval3[i] <<std::endl;
};
// write
mkdir("lanczos.output",ACCESSPERMS);
FieldVectorIO::write_compressed_vectors("lanczos.output",pr,coef,nsingle);
if (bgrid._grid->IsBoss()) {
write_evals((char *)"lanczos.output/eigen-values.txt",eval3);
write_evals((char *)"lanczos.output/eigen-values.txt.linear",eval1);
write_evals((char *)"lanczos.output/eigen-values.txt.poly",eval2);
}
}
// fix up eigenvalues
if (!read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.smoothed",eval3) && smoothed_eval_enabled) {
ConjugateGradient<LatticeFermion> CG(smoothed_eval_inner_resid, smoothed_eval_inner, false);
LatticeFermion v_i(basis[0]._grid);
auto tmp = v_i;
auto tmp2 = v_i;
for (int i=smoothed_eval_begin;i<smoothed_eval_end;i++) {
GridStopWatch gsw;
gsw.Start();
pr.coarseToFine(coef[i],v_i);
v_i.checkerboard = Odd;
for (int j=0;j<smoothed_eval_outer;j++) {
tmp=zero;
//pr.deflate(coef,eval3,Nstop2,v_i,tmp);
CG(HermOp, v_i, tmp);
v_i = 1.0 / ::sqrt( norm2(tmp) ) * tmp;
}
tmp = v_i;
HermOp.HermOp(tmp,tmp2);
RealD ev = innerProduct(tmp,tmp2).real();
gsw.Stop();
std::cout << GridLogMessage << "Smoothed eigenvalue " << i << " from " << eval3[i] << " to " << ev << " in " << gsw.Elapsed() << std::endl;
// " with effective smoother precision " << (CG.ResHistory.back() / CG.ResHistory.front() ) << std::endl;
// CG.ResHistory.clear();
eval3[i] = ev;
}
if (bgrid._grid->IsBoss()) {
write_evals((char *)"lanczos.output/eigen-values.txt.smoothed",eval3);
write_evals((char *)"lanczos.output/eigen-values.txt",eval3); // also reset this to the best ones we have available
}
}
// do CG test with and without deflation
if (cg_test_enabled) {
ConjugateGradient<LatticeFermion> CG(1.0e-8, cg_test_maxiter, false);
LatticeFermion src_orig(bgrid._grid);
src_orig.checkerboard = Odd;
src_orig = 1.0;
src_orig = src_orig * (1.0 / ::sqrt(norm2(src_orig)) );
auto result = src_orig;
// undeflated solve
result = zero;
CG(HermOp, src_orig, result);
// if (UCoarseGrid->IsBoss())
// write_history("cg_test.undefl",CG.ResHistory);
// CG.ResHistory.clear();
// deflated solve with all eigenvectors
result = zero;
pr.deflate(coef,eval2,Nstop2,src_orig,result);
CG(HermOp, src_orig, result);
// if (UCoarseGrid->IsBoss())
// write_history("cg_test.defl_all",CG.ResHistory);
// CG.ResHistory.clear();
// deflated solve with non-blocked eigenvectors
result = zero;
pr.deflate(coef,eval1,Nstop1,src_orig,result);
CG(HermOp, src_orig, result);
// if (UCoarseGrid->IsBoss())
// write_history("cg_test.defl_full",CG.ResHistory);
// CG.ResHistory.clear();
// deflated solve with all eigenvectors and original eigenvalues from proj
result = zero;
pr.deflate(coef,eval3,Nstop2,src_orig,result);
CG(HermOp, src_orig, result);
// if (UCoarseGrid->IsBoss())
// write_history("cg_test.defl_all_ev3",CG.ResHistory);
// CG.ResHistory.clear();
}
}
template<typename Field>
void quick_krylov_basis(BasisFieldVector<Field>& evec,Field& src,LinearFunction<Field>& Op,int Nstop) {
Field tmp = src;
Field tmp2 = tmp;
for (int i=0;i<Nstop;i++) {
GridStopWatch gsw;
gsw.Start();
Op(tmp,tmp2);
gsw.Stop();
evec.orthogonalize(tmp2,i);
RealD nn = norm2(tmp2);
nn = Grid::sqrt(nn);
tmp2 = tmp2 * (1.0/nn);
evec[i] = tmp2;
tmp = tmp2;
std::cout << GridLogMessage << "Quick_krylov_basis: " << i << "/" << Nstop << " timing of operator=" << gsw.Elapsed() << std::endl;
}
}
int main (int argc, char ** argv) {
Grid_init(&argc,&argv);
const int MaxIt = 10000;
int Ls;
RealD mass;
RealD M5;
std::vector < std::complex<double> > omega;
RealD alpha1, alpha2, beta;
int Npoly1, Npoly2;
int Nstop1, Nstop2;
int Nk1, Nk2;
int Np1, Np2;
int MinRes1, MinRes2;
int SkipTest2, MaxApply2;
bool checkpoint_basis;
bool cg_test_enabled;
bool exit_after_basis_calculation;
bool simple_krylov_basis;
int cg_test_maxiter;
int nsingle; // store in single precision, the rest in FP16
int max_cheb_time_ms;
bool smoothed_eval_enabled;
int smoothed_eval_inner;
int smoothed_eval_outer;
int smoothed_eval_begin;
int smoothed_eval_end;
RealD smoothed_eval_inner_resid;
// vector representation
std::vector<int> block_size; // 5d block size
RealD resid1, resid2, betastp1, betastp2, basis_norm_threshold;
std::string config;
Params jp("params.txt");
PADD(jp,Npoly1); PADD(jp,Npoly2);
PADD(jp,max_cheb_time_ms);
PADD(jp,Nstop1); PADD(jp,Nstop2); PADD(jp,MaxApply2);
PADD(jp,Nk1); PADD(jp,Nk2); PADD(jp,betastp1); PADD(jp,betastp2);
PADD(jp,Np1); PADD(jp,Np2); basis_norm_threshold = 1e-5; //PADD(jp,basis_norm_threshold);
PADD(jp,block_size); PADD(jp,smoothed_eval_enabled); PADD(jp,smoothed_eval_inner);
PADD(jp,resid1); PADD(jp,resid2); PADD(jp,smoothed_eval_outer);
PADD(jp,alpha1); PADD(jp,alpha2); PADD(jp,smoothed_eval_begin);
PADD(jp,MinRes1); PADD(jp,MinRes2); PADD(jp,smoothed_eval_end);
PADD(jp,beta); PADD(jp,mass); PADD(jp,smoothed_eval_inner_resid);
PADD(jp,omega); PADD(jp,config);
PADD(jp,M5); PADD(jp,cg_test_enabled);
PADD(jp,cg_test_maxiter); PADD(jp,checkpoint_basis);
PADD(jp,nsingle); PADD(jp,exit_after_basis_calculation);
PADD(jp,simple_krylov_basis); PADD(jp,SkipTest2);
Ls = (int)omega.size();
// Grids
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridCartesian * UGridHP = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexD::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridRedBlackCartesian * UrbGridHP = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridHP);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridCartesian * FGridHP = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridHP);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGridHP = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridHP);
// Gauge field
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
NerscIO::readConfiguration(Umu,header,config);
std::cout << GridLogMessage << "Lattice dimensions: " << GridDefaultLatt()
<< " Ls: " << Ls << std::endl;
// ZMobius EO Operator
ZMobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omega,1.,0.);
SchurDiagTwoOperator<ZMobiusFermionR,LatticeFermion> HermOp(Ddwf);
// Eigenvector storage
const int Nm1 = Np1 + Nk1;
const int Nm2 = Np2 + Nk2; // maximum number of vectors we need to keep
std::cout << GridLogMessage << "Keep " << Nm1 << " full vectors" << std::endl;
std::cout << GridLogMessage << "Keep " << Nm2 << " total vectors" << std::endl;
assert(Nm2 >= Nm1);
BasisFieldVector<LatticeFermion> evec(Nm1,FrbGrid); // start off with keeping full vectors
// First and second cheby
Chebyshev<LatticeFermion> Cheb1(alpha1,beta,Npoly1);
FunctionHermOp<LatticeFermion> Op1(Cheb1,HermOp);
PlainHermOp<LatticeFermion> Op1test(HermOp);
// Eigenvalue storage
std::vector<RealD> eval1(evec.size());
// Construct source vector
LatticeFermion src(FrbGrid);
{
src=1.0;
src.checkerboard = Odd;
// normalize
RealD nn = norm2(src);
nn = Grid::sqrt(nn);
src = src * (1.0/nn);
}
// Do a benchmark and a quick exit if performance is too little (ugly but needed due to performance fluctuations)
if (max_cheb_time_ms) {
// one round of warmup
auto tmp = src;
GridStopWatch gsw1,gsw2;
gsw1.Start();
Cheb1(HermOp,src,tmp);
gsw1.Stop();
Ddwf.ZeroCounters();
gsw2.Start();
Cheb1(HermOp,src,tmp);
gsw2.Stop();
Ddwf.Report();
std::cout << GridLogMessage << "Performance check; warmup = " << gsw1.Elapsed() << " test = " << gsw2.Elapsed() << std::endl;
int ms = (int)(gsw2.useconds()/1e3);
if (ms > max_cheb_time_ms) {
std::cout << GridLogMessage << "Performance too poor: " << ms << " ms, cutoff = " << max_cheb_time_ms << " ms" << std::endl;
Grid_finalize();
return 2;
}
}
// First round of Lanczos to get low mode basis
BlockImplicitlyRestartedLanczos<LatticeFermion> IRL1(Op1,Op1test,Nstop1,Nk1,Nm1,resid1,betastp1,MaxIt,MinRes1);
int Nconv;
char tag[1024];
if (!FieldVectorIO::read_argonne(evec,(char *)"checkpoint") || !read_evals(UGrid,(char *)"checkpoint/eigen-values.txt",eval1)) {
if (simple_krylov_basis) {
quick_krylov_basis(evec,src,Op1,Nstop1);
} else {
IRL1.calc(eval1,evec,src,Nconv,false,1);
}
evec.resize(Nstop1); // and throw away superfluous
eval1.resize(Nstop1);
if (checkpoint_basis)
FieldVectorIO::write_argonne(evec,(char *)"checkpoint");
if (UGrid->IsBoss() && checkpoint_basis)
write_evals((char *)"checkpoint/eigen-values.txt",eval1);
Ddwf.Report();
if (exit_after_basis_calculation) {
Grid_finalize();
return 0;
}
}
// now test eigenvectors
if (!simple_krylov_basis) {
for (int i=0;i<Nstop1;i++){
auto B = evec[i];
auto tmp = B;
auto v = B;
{
HermOp.HermOp(B,v);
RealD vnum = real(innerProduct(B,v)); // HermOp.
RealD vden = norm2(B);
RealD vv0 = norm2(v);
RealD eval2 = vnum/vden;
v -= eval2*B;
RealD vv = norm2(v);
std::cout << i << " OP eval = " << eval2 << " (" << eval1[i] << ") "
<< "res2 = " << vv << " norm2 = " << norm2(B) << std::endl;
}
}
}
// do second step only if needed
if (Nstop1 <= Nstop2) {
// Now setup blocking
assert(evec.size() == Nstop1);
BlockedGrid<LatticeFermion> bgrid(FrbGrid, block_size);
BlockProjector<LatticeFermion> pr(evec,bgrid);
pr.createOrthonormalBasis(basis_norm_threshold);
pr.createOrthonormalBasis(basis_norm_threshold); // another round due to precision issues created by local coherence
constexpr int common_basis_sizes[] = { 60, 250, 400 };
constexpr int n_common_basis_sizes = sizeof(common_basis_sizes) / sizeof(common_basis_sizes[0]);
switch (Nstop1) {
#define BASIS(n) case common_basis_sizes[n]:\
CoarseGridLanczos<LatticeFermion,common_basis_sizes[n]>\
(pr,alpha2,beta,Npoly2,Nstop2,Nk2,Nm2,resid2,betastp2,MaxIt,MinRes2,HermOp,eval1, \
cg_test_enabled,cg_test_maxiter,nsingle,SkipTest2, \
MaxApply2,smoothed_eval_enabled,smoothed_eval_inner,smoothed_eval_outer, \
smoothed_eval_begin,smoothed_eval_end,smoothed_eval_inner_resid); break;
BASIS(0);
BASIS(1);
BASIS(2);
default:
std::cout << GridLogMessage << "Basis size " << Nstop1 << " must be added at compile-time" << std::endl;
std::cout << GridLogMessage << "Currently available sizes: " << std::endl;
for (int i=0;i<n_common_basis_sizes;i++) {
std::cout << GridLogMessage << " " << common_basis_sizes[i] << std::endl;
}
}
}
Grid_finalize();
}