/************************************************************************************* Grid physics library, www.github.com/paboyle/Grid Source file: ./lib/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h Copyright (C) 2015 Author: Peter Boyle Author: Chulwoo Jung Author: Yong-Chull Jang Author: Guido Cossu This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ #ifndef GRID_IRBL_H #define GRID_IRBL_H #include //memset #ifdef USE_LAPACK #include #endif #undef USE_LAPACK #define Glog std::cout << GridLogMessage #ifdef GRID_CUDA #include "cublas_v2.h" #endif #if 0 #define CUDA_COMPLEX cuDoubleComplex #define CUDA_FLOAT double #define MAKE_CUDA_COMPLEX make_cuDoubleComplex #define CUDA_GEMM cublasZgemm #else #define CUDA_COMPLEX cuComplex #define CUDA_FLOAT float #define MAKE_CUDA_COMPLEX make_cuComplex #define CUDA_GEMM cublasCgemm #endif namespace Grid { //////////////////////////////////////////////////////////////////////////////// // Helper class for sorting the evalues AND evectors by Field // Use pointer swizzle on vectors SHOULD GET RID OF IT SOON! //////////////////////////////////////////////////////////////////////////////// template class SortEigen { private: static bool less_lmd(RealD left,RealD right){ return left > right; } static bool less_pair(std::pair& left, std::pair& right){ return left.first > (right.first); } public: void push(std::vector& lmd,std::vector& evec,int N) { //////////////////////////////////////////////////////////////////////// // PAB: FIXME: VERY VERY VERY wasteful: takes a copy of the entire vector set. // : The vector reorder should be done by pointer swizzle somehow //////////////////////////////////////////////////////////////////////// std::vector cpy(lmd.size(),evec[0].Grid()); for(int i=0;i > emod(lmd.size()); for(int i=0;i(lmd[i],&cpy[i]); partial_sort(emod.begin(),emod.begin()+N,emod.end(),less_pair); typename std::vector >::iterator it = emod.begin(); for(int i=0;ifirst; evec[i]=*(it->second); ++it; } } void push(std::vector& lmd,int N) { std::partial_sort(lmd.begin(),lmd.begin()+N,lmd.end(),less_lmd); } bool saturated(RealD lmd, RealD thrs) { return fabs(lmd) > fabs(thrs); } }; enum class LanczosType { irbl, rbl }; enum IRBLdiagonalisation { IRBLdiagonaliseWithDSTEGR, IRBLdiagonaliseWithQR, IRBLdiagonaliseWithEigen }; ///////////////////////////////////////////////////////////// // Implicitly restarted block lanczos ///////////////////////////////////////////////////////////// template class ImplicitlyRestartedBlockLanczos { private: std::string cname = std::string("ImplicitlyRestartedBlockLanczos"); int MaxIter; // Max iterations int Nstop; // Number of evecs checked for convergence int Nu; // Number of vecs in the unit block int Nk; // Number of converged sought int Nm; // total number of vectors int Nblock_k; // Nk/Nu int Nblock_m; // Nm/Nu int Nconv_test_interval; // Number of skipped vectors when checking a convergence RealD eresid; IRBLdiagonalisation diagonalisation; //////////////////////////////////// // Embedded objects //////////////////////////////////// SortEigen _sort; LinearOperatorBase &_Linop; LinearOperatorBase &_SLinop;//for split OperatorFunction &_poly; GridRedBlackCartesian * f_grid; GridRedBlackCartesian * sf_grid; int mrhs; ///////////////////////// // BLAS objects ///////////////////////// #ifdef GRID_CUDA cudaError_t cudaStat; CUDA_COMPLEX *w_acc, *evec_acc, *c_acc; #endif int Nevec_acc; // Number of eigenvectors stored in the buffer evec_acc ///////////////////////// // Constructor ///////////////////////// public: int split_test; //test split in the first iteration ImplicitlyRestartedBlockLanczos(LinearOperatorBase &Linop, // op LinearOperatorBase &SLinop, // op GridRedBlackCartesian * FrbGrid, GridRedBlackCartesian * SFrbGrid, int _mrhs, OperatorFunction & poly, // polynomial int _Nstop, // really sought vecs int _Nconv_test_interval, // conv check interval int _Nu, // vecs in the unit block int _Nk, // sought vecs int _Nm, // total vecs RealD _eresid, // resid in lmd deficit int _MaxIter, // Max iterations IRBLdiagonalisation _diagonalisation = IRBLdiagonaliseWithEigen) : _Linop(Linop), _SLinop(SLinop), _poly(poly),sf_grid(SFrbGrid),f_grid(FrbGrid), Nstop(_Nstop), Nconv_test_interval(_Nconv_test_interval), mrhs(_mrhs), Nu(_Nu), Nk(_Nk), Nm(_Nm), Nblock_m(_Nm/_Nu), Nblock_k(_Nk/_Nu), //eresid(_eresid), MaxIter(10), eresid(_eresid), MaxIter(_MaxIter), diagonalisation(_diagonalisation),split_test(0), Nevec_acc(_Nu) { assert( (Nk%Nu==0) && (Nm%Nu==0) ); }; //////////////////////////////// // Helpers //////////////////////////////// static RealD normalize(Field& v, int if_print=0) { RealD nn = norm2(v); nn = sqrt(nn); #if 0 if(if_print && nn < 1e20) Glog<<"normalize: "<& evec, int k, int if_print=0) { typedef typename Field::scalar_type MyComplex; // MyComplex ip; ComplexD ip; for(int j=0; j 1e-14) Glog<<"orthogonalize before: "< 1e-14) Glog<<"orthogonalize after: "<& evec, int k) { orthogonalize(w, evec, k,1); } void orthogonalize(std::vector& w, int _Nu, std::vector& evec, int k, int if_print=0) { typedef typename Field::scalar_type MyComplex; MyComplex ip; // ComplexD ip; for(int j=0; j 1e-14) Glog<<"orthogonalize before: "< 1e-14) Glog<<"orthogonalize after: "<& w, std::vector& evec, int R, int do_print=0) { #ifdef GRID_CUDA Glog << "cuBLAS orthogonalize" << std::endl; typedef typename Field::vector_object vobj; typedef typename vobj::scalar_type scalar_type; typedef typename vobj::vector_type vector_type; typedef typename Field::scalar_type MyComplex; GridBase *grid = w[0].Grid(); const uint64_t sites = grid->lSites(); int Nbatch = R/Nevec_acc; assert( R%Nevec_acc == 0 ); // Glog << "nBatch, Nevec_acc, R, Nu = " // << Nbatch << "," << Nevec_acc << "," << R << "," << Nu << std::endl; #if 0 // a trivial test for (int col=0; col(&w_v[0]); // Glog << "col= "<(&evec_v[0]); // Glog << "col= "<[" << j << "," << i << "] = " << z.x << " + i " << z.y << std::endl; } w[i] = w[i] - ip * evec[b*Nevec_acc+j]; } } #else alpha = MAKE_CUDA_COMPLEX(-1.0,0.0); beta = MAKE_CUDA_COMPLEX(1.0,0.0); stat = CUDA_GEMM(handle, CUBLAS_OP_N, CUBLAS_OP_N, 12*sites, Nu, Nevec_acc, &alpha, evec_acc, 12*sites, c_acc, Nevec_acc, &beta, w_acc, 12*sites); //Glog << stat << std::endl; #endif } #if 1 for (int col=0; col(&w_v[0]); for (size_t row=0; row &inner, std::vector& lhs, int llhs, std::vector& rhs, int lrhs) { typedef typename Field:vector_object vobj; typedef typename vobj::scalar_type scalar_type; typedef typename vobj::vector_typeD vector_type; GridBase *grid = lhs[0]._grid; assert(grid == rhs[0]._grid; const int pad = 8; int total = llhs*lrhs; assert(inner.size()==total); int sum_size=grid->SumArraySize(); // std::vector inner(total); Vector sumarray(sum_size*pad*total); parallel_for(int thr=0;throSites(),thr,mywork,myoff); std::vector< decltype(innerProductD(lhs[0]._odata[0],rhs[0]._odata[0])) > vinner(total,zero); // private to thread; sub summation for(int ss=myoff;ssGlobalSum(tmp); inner[i]=tmp; } // return inner; } #endif void orthogonalize_blockhead(Field& w, std::vector& evec, int k, int Nu) { typedef typename Field::scalar_type MyComplex; MyComplex ip; for(int j=0; j& eval, std::vector& evec, const std::vector& src, int& Nconv, LanczosType Impl) { #ifdef GRID_CUDA GridBase *grid = src[0].Grid(); grid->show_decomposition(); // printf("GRID_CUDA\n"); // set eigenvector buffers for the cuBLAS calls //const uint64_t nsimd = grid->Nsimd(); const uint64_t sites = grid->lSites(); cudaStat = cudaMallocManaged((void **)&w_acc, Nu*sites*12*sizeof(CUDA_COMPLEX)); // Glog << "w_acc= "<& eval, std::vector& evec, const std::vector& src, int& Nconv) { std::string fname = std::string(cname+"::calc_irbl()"); GridBase *grid = evec[0].Grid(); assert(grid == src[0].Grid()); assert( Nu = src.size() ); Glog << std::string(74,'*') << std::endl; Glog << fname + " starting iteration 0 / "<< MaxIter<< std::endl; Glog << std::string(74,'*') << std::endl; Glog <<" -- seek Nk = "<< Nk <<" vectors"<< std::endl; Glog <<" -- accept Nstop = "<< Nstop <<" vectors"<< std::endl; Glog <<" -- total Nm = "<< Nm <<" vectors"<< std::endl; Glog <<" -- size of eval = "<< eval.size() << std::endl; Glog <<" -- size of evec = "<< evec.size() << std::endl; if ( diagonalisation == IRBLdiagonaliseWithEigen ) { Glog << "Diagonalisation is Eigen "<< std::endl; #ifdef USE_LAPACK } else if ( diagonalisation == IRBLdiagonaliseWithLAPACK ) { Glog << "Diagonalisation is LAPACK "<< std::endl; #endif } else { abort(); } Glog << std::string(74,'*') << std::endl; assert(Nm == evec.size() && Nm == eval.size()); std::vector> lmd(Nu,std::vector(Nm,0.0)); std::vector> lme(Nu,std::vector(Nm,0.0)); std::vector> lmd2(Nu,std::vector(Nm,0.0)); std::vector> lme2(Nu,std::vector(Nm,0.0)); std::vector eval2(Nm); std::vector resid(Nk); Eigen::MatrixXcd Qt = Eigen::MatrixXcd::Zero(Nm,Nm); Eigen::MatrixXcd Q = Eigen::MatrixXcd::Zero(Nm,Nm); std::vector Iconv(Nm); std::vector B(Nm,grid); // waste of space replicating std::vector f(Nu,grid); std::vector f_copy(Nu,grid); Field v(grid); Nconv = 0; RealD beta_k; // set initial vector for (int i=0; i& eval, std::vector& evec, const std::vector& src, int& Nconv) { std::string fname = std::string(cname+"::calc_rbl()"); GridBase *grid = evec[0].Grid(); assert(grid == src[0].Grid()); assert( Nu = src.size() ); int Np = (Nm-Nk); if (Np > 0 && MaxIter > 1) Np /= MaxIter; int Nblock_p = Np/Nu; for(int i=0;i< evec.size();i++) evec[0].Advise()=AdviseInfrequentUse; Glog << std::string(74,'*') << std::endl; Glog << fname + " starting iteration 0 / "<< MaxIter<< std::endl; Glog << std::string(74,'*') << std::endl; Glog <<" -- seek (min) Nk = "<< Nk <<" vectors"<< std::endl; Glog <<" -- seek (inc) Np = "<< Np <<" vectors"<< std::endl; Glog <<" -- seek (max) Nm = "<< Nm <<" vectors"<< std::endl; Glog <<" -- accept Nstop = "<< Nstop <<" vectors"<< std::endl; Glog <<" -- size of eval = "<< eval.size() << std::endl; Glog <<" -- size of evec = "<< evec.size() << std::endl; if ( diagonalisation == IRBLdiagonaliseWithEigen ) { Glog << "Diagonalisation is Eigen "<< std::endl; #ifdef USE_LAPACK } else if ( diagonalisation == IRBLdiagonaliseWithLAPACK ) { Glog << "Diagonalisation is LAPACK "<< std::endl; #endif } else { abort(); } Glog << std::string(74,'*') << std::endl; assert(Nm == evec.size() && Nm == eval.size()); std::vector> lmd(Nu,std::vector(Nm,0.0)); std::vector> lme(Nu,std::vector(Nm,0.0)); std::vector> lmd2(Nu,std::vector(Nm,0.0)); std::vector> lme2(Nu,std::vector(Nm,0.0)); std::vector eval2(Nk); std::vector resid(Nm); Eigen::MatrixXcd Qt = Eigen::MatrixXcd::Zero(Nm,Nm); Eigen::MatrixXcd Q = Eigen::MatrixXcd::Zero(Nm,Nm); std::vector Iconv(Nm); int Ntest=Nu; // std::vector B(Nm,grid); // waste of space replicating std::vector B(1,grid); // waste of space replicating std::vector f(Nu,grid); std::vector f_copy(Nu,grid); Field v(grid); Nconv = 0; RealD beta_k; // set initial vector for (int i=0; i Btmp(Nconv,grid); // waste of space replicating for(int i=0; i>& lmd, std::vector>& lme, std::vector& evec, std::vector& w, std::vector& w_copy, int b) { const RealD tiny = 1.0e-20; int Nu = w.size(); int Nm = evec.size(); assert( b < Nm/Nu ); // GridCartesian *grid = evec[0]._grid; // converts block index to full indicies for an interval [L,R) int L = Nu*b; int R = Nu*(b+1); Real beta; Glog << "Using split grid"<< std::endl; // LatticeGaugeField s_Umu(SGrid); assert((Nu%mrhs)==0); std::vector in(mrhs,f_grid); Field s_in(sf_grid); Field s_out(sf_grid); // unnecessary copy. Can or should it be avoided? int k_start = 0; while ( k_start < Nu) { Glog << "k_start= "<0) { for (int u=0; u0) { for (int u=0; u0) { // orthogonalize_blockhead(w[0],evec,b,Nu); // for (int u=1; u& eval, std::vector>& lmd, std::vector>& lme, int Nu, int Nk, int Nm, Eigen::MatrixXcd & Qt, // Nm x Nm GridBase *grid) { assert( Nk%Nu == 0 && Nm%Nu == 0 ); assert( Nk <= Nm ); Eigen::MatrixXcd BlockTriDiag = Eigen::MatrixXcd::Zero(Nk,Nk); for ( int u=0; u eigensolver(BlockTriDiag); for (int i = 0; i < Nk; i++) { eval[Nk-1-i] = eigensolver.eigenvalues()(i); } for (int i = 0; i < Nk; i++) { for (int j = 0; j < Nk; j++) { Qt(j,Nk-1-i) = eigensolver.eigenvectors()(j,i); //Qt(Nk-1-i,j) = eigensolver.eigenvectors()(i,j); //Qt(i,j) = eigensolver.eigenvectors()(i,j); } } } #ifdef USE_LAPACK void diagonalize_lapack(std::vector& eval, std::vector>& lmd, std::vector>& lme, int Nu, int Nk, int Nm, Eigen::MatrixXcd & Qt, // Nm x Nm GridBase *grid) { Glog << "diagonalize_lapack: Nu= "<_Nprocessors; int node = grid->_processor; int interval = (NN/total)+1; double vl = 0.0, vu = 0.0; MKL_INT il = interval*node+1 , iu = interval*(node+1); if (iu > NN) iu=NN; Glog << "node "<= il-1; i--){ evals_tmp[i] = evals_tmp[i - (il-1)]; if (il>1) evals_tmp[i-(il-1)]=0.; for (int j = 0; j< NN; j++){ evec_tmp[i*NN+j] = evec_tmp[(i - (il-1))*NN+j]; if (il>1) { evec_tmp[(i-(il-1))*NN+j].imag=0.; evec_tmp[(i-(il-1))*NN+j].real=0.; } } } } { grid->GlobalSumVector(evals_tmp,NN); grid->GlobalSumVector((double*)evec_tmp,2*NN*NN); } } // Safer to sort instead of just reversing it, // but the document of the routine says evals are sorted in increasing order. // qr gives evals in decreasing order. // for(int i=0;i ( evec_tmp[i*Nk+j].real, evec_tmp[i*Nk+j].imag); // ( evec_tmp[(Nk-1-j)*Nk+Nk-1-i].real, // evec_tmp[(Nk-1-j)*Nk+Nk-1-i].imag); } } if (1){ Eigen::SelfAdjointEigenSolver eigensolver(BlockTriDiag); for (int i = 0; i < Nk; i++) { Glog << "eval = "<& eval, std::vector>& lmd, std::vector>& lme, int Nu, int Nk, int Nm, Eigen::MatrixXcd & Qt, GridBase *grid) { Qt = Eigen::MatrixXcd::Identity(Nm,Nm); if ( diagonalisation == IRBLdiagonaliseWithEigen ) { diagonalize_Eigen(eval,lmd,lme,Nu,Nk,Nm,Qt,grid); #ifdef USE_LAPACK } else if ( diagonalisation == IRBLdiagonaliseWithLAPACK ) { diagonalize_lapack(eval,lmd,lme,Nu,Nk,Nm,Qt,grid); #endif } else { assert(0); } } void unpackHermitBlockTriDiagMatToEigen( std::vector>& lmd, std::vector>& lme, int Nu, int Nb, int Nk, int Nm, Eigen::MatrixXcd& M) { //Glog << "unpackHermitBlockTriDiagMatToEigen() begin" << '\n'; assert( Nk%Nu == 0 && Nm%Nu == 0 ); assert( Nk <= Nm ); M = Eigen::MatrixXcd::Zero(Nk,Nk); // rearrange for ( int u=0; u>& lmd, std::vector>& lme, int Nu, int Nb, int Nk, int Nm, Eigen::MatrixXcd& M) { //Glog << "packHermitBlockTriDiagMatfromEigen() begin" << '\n'; assert( Nk%Nu == 0 && Nm%Nu == 0 ); assert( Nk <= Nm ); // rearrange for ( int u=0; u QRD(Mtmp); Q = QRD.householderQ(); R = QRD.matrixQR(); // upper triangular part is the R matrix. // lower triangular part used to represent series // of Q sequence. // equivalent operation of Qprod *= Q //M = Eigen::MatrixXcd::Zero(Nm,Nm); //for (int i=0; i Nm) kmax = Nm; for (int k=i; ki) M(i,j) = conj(M(j,i)); // if (i-j > Nu || j-i > Nu) M(i,j) = 0.; // } //} //Glog << "shiftedQRDecompEigen() end" << endl; } void exampleQRDecompEigen(void) { Eigen::MatrixXd A = Eigen::MatrixXd::Zero(3,3); Eigen::MatrixXd Q = Eigen::MatrixXd::Zero(3,3); Eigen::MatrixXd R = Eigen::MatrixXd::Zero(3,3); Eigen::MatrixXd P = Eigen::MatrixXd::Zero(3,3); A(0,0) = 12.0; A(0,1) = -51.0; A(0,2) = 4.0; A(1,0) = 6.0; A(1,1) = 167.0; A(1,2) = -68.0; A(2,0) = -4.0; A(2,1) = 24.0; A(2,2) = -41.0; Glog << "matrix A before ColPivHouseholder" << std::endl; for ( int i=0; i<3; i++ ) { for ( int j=0; j<3; j++ ) { Glog << "A[" << i << "," << j << "] = " << A(i,j) << '\n'; } } Glog << std::endl; Eigen::ColPivHouseholderQR QRD(A); Glog << "matrix A after ColPivHouseholder" << std::endl; for ( int i=0; i<3; i++ ) { for ( int j=0; j<3; j++ ) { Glog << "A[" << i << "," << j << "] = " << A(i,j) << '\n'; } } Glog << std::endl; Glog << "HouseholderQ with sequence lenth = nonzeroPiviots" << std::endl; Q = QRD.householderQ().setLength(QRD.nonzeroPivots()); for ( int i=0; i<3; i++ ) { for ( int j=0; j<3; j++ ) { Glog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n'; } } Glog << std::endl; Glog << "HouseholderQ with sequence lenth = 1" << std::endl; Q = QRD.householderQ().setLength(1); for ( int i=0; i<3; i++ ) { for ( int j=0; j<3; j++ ) { Glog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n'; } } Glog << std::endl; Glog << "HouseholderQ with sequence lenth = 2" << std::endl; Q = QRD.householderQ().setLength(2); for ( int i=0; i<3; i++ ) { for ( int j=0; j<3; j++ ) { Glog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n'; } } Glog << std::endl; Glog << "matrixR" << std::endl; R = QRD.matrixR(); for ( int i=0; i<3; i++ ) { for ( int j=0; j<3; j++ ) { Glog << "R[" << i << "," << j << "] = " << R(i,j) << '\n'; } } Glog << std::endl; Glog << "rank = " << QRD.rank() << std::endl; Glog << "threshold = " << QRD.threshold() << std::endl; Glog << "matrixP" << std::endl; P = QRD.colsPermutation(); for ( int i=0; i<3; i++ ) { for ( int j=0; j<3; j++ ) { Glog << "P[" << i << "," << j << "] = " << P(i,j) << '\n'; } } Glog << std::endl; Glog << "QR decomposition without column pivoting" << std::endl; A(0,0) = 12.0; A(0,1) = -51.0; A(0,2) = 4.0; A(1,0) = 6.0; A(1,1) = 167.0; A(1,2) = -68.0; A(2,0) = -4.0; A(2,1) = 24.0; A(2,2) = -41.0; Glog << "matrix A before Householder" << std::endl; for ( int i=0; i<3; i++ ) { for ( int j=0; j<3; j++ ) { Glog << "A[" << i << "," << j << "] = " << A(i,j) << '\n'; } } Glog << std::endl; Eigen::HouseholderQR QRDplain(A); Glog << "HouseholderQ" << std::endl; Q = QRDplain.householderQ(); for ( int i=0; i<3; i++ ) { for ( int j=0; j<3; j++ ) { Glog << "Q[" << i << "," << j << "] = " << Q(i,j) << '\n'; } } Glog << std::endl; Glog << "matrix A after Householder" << std::endl; for ( int i=0; i<3; i++ ) { for ( int j=0; j<3; j++ ) { Glog << "A[" << i << "," << j << "] = " << A(i,j) << '\n'; } } Glog << std::endl; } }; } #undef Glog #undef USE_LAPACK #undef CUDA_COMPLEX #undef CUDA_FLOAT #undef MAKE_CUDA_COMPLEX #undef CUDA_GEMM #endif