mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-11-04 05:54:32 +00:00 
			
		
		
		
	Compare commits
	
		
			81 Commits
		
	
	
		
			feature/dw
			...
			FgridStagg
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					4ac85b3e8f | ||
| 
						 | 
					dd8cd8a8e8 | ||
| 
						 | 
					0cccb3dc82 | ||
| 
						 | 
					5d44346be3 | ||
| 
						 | 
					3a754fcd51 | ||
| 
						 | 
					1ef424b139 | ||
| 
						 | 
					034de160bf | ||
| 
						 | 
					f9e28577f3 | ||
| 
						 | 
					8a3aae98f6 | ||
| 
						 | 
					8309f2364b | ||
| 
						 | 
					cac1750078 | ||
| 
						 | 
					137886c316 | ||
| 
						 | 
					27936900e6 | ||
| 
						 | 
					9fe6ac71ea | ||
| 
						 | 
					ef61b549e6 | ||
| 
						 | 
					f1fa00b71b | ||
| 
						 | 
					bf58557fb1 | ||
| 
						 | 
					10cb37f504 | ||
| 
						 | 
					1374c943d4 | ||
| 
						 | 
					a1d80282ec | ||
| 
						 | 
					4eb8bbbebe | ||
| 
						 | 
					d1c6288c5f | ||
| 
						 | 
					dd949bc428 | ||
| 
						 | 
					bb7378cfc3 | ||
| 
						 | 
					f0e084a88c | ||
| 
						 | 
					153672d8ec | ||
| 
						 | 
					08ca338875 | ||
| 
						 | 
					f7cbf82c04 | ||
| 
						 | 
					07009c569a | ||
| 
						 | 
					09f4cdb11e | ||
| 
						 | 
					1e54882f71 | ||
| 
						 | 
					d54807b8c0 | ||
| 
						 | 
					5625b47c7d | ||
| 
						 | 
					eb6153080a | ||
| 
						 | 
					f7072d1ac2 | ||
| 
						 | 
					a6eeea777b | ||
| 
						 | 
					77f7737ccc | ||
| 
						 | 
					f9df685cde | ||
| 
						 | 
					3006663b9c | ||
| 
						 | 
					0145685f96 | ||
| 
						 | 
					e73e4b4002 | ||
| 
						 | 
					caa6605b43 | ||
| 
						 | 
					522c9248ae | ||
| 
						 | 
					191fbf85fc | ||
| 
						 | 
					93650f3a61 | ||
| 
						 | 
					cab4b4d063 | ||
| 
						 | 
					cf4b30b2dd | ||
| 
						 | 
					c51d0b4078 | ||
| 
						 | 
					2f4cbeb4d5 | ||
| 
						 | 
					fb7c4fb815 | ||
| 
						 | 
					00bb71e5af | ||
| 
						 | 
					cfed2c1ea0 | ||
| 
						 | 
					b1b15f0b70 | ||
| 
						 | 
					927c7ae3ed | ||
| 
						 | 
					05d04ceff8 | ||
| 
						 | 
					8313367a50 | ||
| 
						 | 
					5c479ce663 | ||
| 
						 | 
					4bf9d65bf8 | ||
| 
						 | 
					3a056c4dff | ||
| 
						 | 
					b0ba651654 | ||
| 
						 | 
					25d4c175c3 | ||
| 
						 | 
					a8d7986e1c | ||
| 
						 | 
					92ec509bfa | ||
| 
						 | 
					e80a87ff7f | ||
| 
						 | 
					867fe93018 | ||
| 
						 | 
					09651c3326 | ||
| 
						 | 
					f87f2a3f8b | ||
| 
						 | 
					a07556dd5f | ||
| 
						 | 
					f80a847aef | ||
| 
						 | 
					93cb5d4e97 | ||
| 
						 | 
					9e48b7dfda | ||
| 
						 | 
					d0c2c9c71f | ||
| 
						 | 
					c8cafa77ca | ||
| 
						 | 
					a3bcad3804 | ||
| 
						 | 
					5a5b66292b | ||
| 
						 | 
					e63be32ad2 | ||
| 
						 | 
					6aa106d906 | ||
| 
						 | 
					33d59c8869 | ||
| 
						 | 
					a833fd8dbf | ||
| 
						 | 
					e9712bc7fb | ||
| 
						 | 
					0cd6b1858c | 
@@ -5,7 +5,7 @@ EIGEN_URL='http://bitbucket.org/eigen/eigen/get/3.3.3.tar.bz2'
 | 
			
		||||
echo "-- deploying Eigen source..."
 | 
			
		||||
wget ${EIGEN_URL} --no-check-certificate
 | 
			
		||||
./scripts/update_eigen.sh `basename ${EIGEN_URL}`
 | 
			
		||||
rm `basename ${EIGEN_URL}`
 | 
			
		||||
#rm `basename ${EIGEN_URL}`
 | 
			
		||||
 | 
			
		||||
echo '-- generating Make.inc files...'
 | 
			
		||||
./scripts/filelist
 | 
			
		||||
 
 | 
			
		||||
@@ -550,6 +550,7 @@ AC_CONFIG_FILES(tests/forces/Makefile)
 | 
			
		||||
AC_CONFIG_FILES(tests/hadrons/Makefile)
 | 
			
		||||
AC_CONFIG_FILES(tests/hmc/Makefile)
 | 
			
		||||
AC_CONFIG_FILES(tests/solver/Makefile)
 | 
			
		||||
AC_CONFIG_FILES(tests/lanczos/Makefile)
 | 
			
		||||
AC_CONFIG_FILES(tests/smearing/Makefile)
 | 
			
		||||
AC_CONFIG_FILES(tests/qdpxx/Makefile)
 | 
			
		||||
AC_CONFIG_FILES(tests/testu01/Makefile)
 | 
			
		||||
 
 | 
			
		||||
@@ -39,6 +39,10 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#include <Grid/algorithms/approx/MultiShiftFunction.h>
 | 
			
		||||
#include <Grid/algorithms/approx/Forecast.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/algorithms/densematrix/DenseMatrix.h>
 | 
			
		||||
#include <Grid/algorithms/densematrix/Francis.h>
 | 
			
		||||
#include <Grid/algorithms/densematrix/Householder.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/algorithms/iterative/ConjugateGradient.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ConjugateResidual.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/NormalEquations.h>
 | 
			
		||||
@@ -48,6 +52,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ConjugateGradientReliableUpdate.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczosCJ.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/SimpleLanczos.h>
 | 
			
		||||
#include <Grid/algorithms/CoarsenedMatrix.h>
 | 
			
		||||
#include <Grid/algorithms/FFT.h>
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -8,6 +8,7 @@
 | 
			
		||||
 | 
			
		||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Chulwoo Jung <chulwoo@bnl.gov>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
@@ -162,15 +163,10 @@ namespace Grid {
 | 
			
		||||
	_Mat.M(in,out);
 | 
			
		||||
      }
 | 
			
		||||
      void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
 | 
			
		||||
	ComplexD dot;
 | 
			
		||||
 | 
			
		||||
	_Mat.M(in,out);
 | 
			
		||||
	
 | 
			
		||||
	dot= innerProduct(in,out);
 | 
			
		||||
	n1=real(dot);
 | 
			
		||||
 | 
			
		||||
	dot = innerProduct(out,out);
 | 
			
		||||
	n2=real(dot);
 | 
			
		||||
	ComplexD dot= innerProduct(in,out); n1=real(dot);
 | 
			
		||||
	n2=norm2(out);
 | 
			
		||||
      }
 | 
			
		||||
      void HermOp(const Field &in, Field &out){
 | 
			
		||||
	_Mat.M(in,out);
 | 
			
		||||
@@ -192,10 +188,10 @@ namespace Grid {
 | 
			
		||||
	ni=Mpc(in,tmp);
 | 
			
		||||
	no=MpcDag(tmp,out);
 | 
			
		||||
      }
 | 
			
		||||
      void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
 | 
			
		||||
      virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
 | 
			
		||||
	MpcDagMpc(in,out,n1,n2);
 | 
			
		||||
      }
 | 
			
		||||
      void HermOp(const Field &in, Field &out){
 | 
			
		||||
      virtual void HermOp(const Field &in, Field &out){
 | 
			
		||||
	RealD n1,n2;
 | 
			
		||||
	HermOpAndNorm(in,out,n1,n2);
 | 
			
		||||
      }
 | 
			
		||||
@@ -212,7 +208,6 @@ namespace Grid {
 | 
			
		||||
      void OpDir  (const Field &in, Field &out,int dir,int disp) {
 | 
			
		||||
	assert(0);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
    };
 | 
			
		||||
    template<class Matrix,class Field>
 | 
			
		||||
      class SchurDiagMooeeOperator :  public SchurOperatorBase<Field> {
 | 
			
		||||
@@ -270,7 +265,6 @@ namespace Grid {
 | 
			
		||||
	return axpy_norm(out,-1.0,tmp,in);
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template<class Matrix,class Field>
 | 
			
		||||
      class SchurDiagTwoOperator :  public SchurOperatorBase<Field> {
 | 
			
		||||
    protected:
 | 
			
		||||
@@ -299,6 +293,168 @@ namespace Grid {
 | 
			
		||||
	return axpy_norm(out,-1.0,tmp,in);
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
    ///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Left  handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) psi = eta  -->  ( 1 - Moo^-1 Moe Mee^-1 Meo ) psi = Moo^-1 eta
 | 
			
		||||
    // Right handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) Moo^-1 Moo psi = eta  -->  ( 1 - Moe Mee^-1 Meo ) Moo^-1 phi=eta ; psi = Moo^-1 phi
 | 
			
		||||
    ///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    template<class Matrix,class Field> using SchurDiagOneRH = SchurDiagTwoOperator<Matrix,Field> ;
 | 
			
		||||
    template<class Matrix,class Field> using SchurDiagOneLH = SchurDiagOneOperator<Matrix,Field> ;
 | 
			
		||||
    ///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    //  Staggered use
 | 
			
		||||
    ///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    template<class Matrix,class Field>
 | 
			
		||||
      class SchurStaggeredOperator :  public SchurOperatorBase<Field> {
 | 
			
		||||
    protected:
 | 
			
		||||
      Matrix &_Mat;
 | 
			
		||||
    public:
 | 
			
		||||
      SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){};
 | 
			
		||||
      virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
 | 
			
		||||
	n2 = Mpc(in,out);
 | 
			
		||||
	ComplexD dot= innerProduct(in,out);
 | 
			
		||||
	n1 = real(dot);
 | 
			
		||||
      }
 | 
			
		||||
      virtual void HermOp(const Field &in, Field &out){
 | 
			
		||||
	Mpc(in,out);
 | 
			
		||||
      }
 | 
			
		||||
      virtual  RealD Mpc      (const Field &in, Field &out) {
 | 
			
		||||
	Field tmp(in._grid);
 | 
			
		||||
	_Mat.Meooe(in,tmp);
 | 
			
		||||
	_Mat.MooeeInv(tmp,out);
 | 
			
		||||
	_Mat.Meooe(out,tmp);
 | 
			
		||||
	_Mat.Mooee(in,out);
 | 
			
		||||
        return axpy_norm(out,-1.0,tmp,out);
 | 
			
		||||
      }
 | 
			
		||||
      virtual  RealD MpcDag   (const Field &in, Field &out){
 | 
			
		||||
	return Mpc(in,out);
 | 
			
		||||
      }
 | 
			
		||||
      virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no) {
 | 
			
		||||
	assert(0);// Never need with staggered
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
//    template<class Matrix,class Field> using SchurStagOperator = SchurStaggeredOperator<Matrix,Field>;
 | 
			
		||||
    template<class Matrix,class Field>
 | 
			
		||||
//      class SchurStagOperator :  public LinearOperatorBase<Field> {
 | 
			
		||||
      class SchurStagOperator :  public SchurOperatorBase<Field> {
 | 
			
		||||
    protected:
 | 
			
		||||
      Matrix &_Mat;
 | 
			
		||||
    public:
 | 
			
		||||
      SchurStagOperator (Matrix &Mat): _Mat(Mat){};
 | 
			
		||||
      virtual  RealD Mpc      (const Field &in, Field &out) {
 | 
			
		||||
	Field tmp(in._grid);
 | 
			
		||||
	Field tmp2(in._grid);
 | 
			
		||||
 | 
			
		||||
	_Mat.Mooee(in,out);
 | 
			
		||||
	_Mat.Mooee(out,tmp);
 | 
			
		||||
 | 
			
		||||
	_Mat.Meooe(in,out);
 | 
			
		||||
	_Mat.Meooe(out,tmp2);
 | 
			
		||||
 | 
			
		||||
	return axpy_norm(out,-1.0,tmp2,tmp);
 | 
			
		||||
      }
 | 
			
		||||
      virtual  RealD MpcDag   (const Field &in, Field &out){
 | 
			
		||||
 | 
			
		||||
	return Mpc(in,out);
 | 
			
		||||
      }
 | 
			
		||||
#if 0
 | 
			
		||||
      virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no) {
 | 
			
		||||
	Field tmp(in._grid);
 | 
			
		||||
	ni=Mpc(in,tmp);
 | 
			
		||||
	no=MpcDag(tmp,out);
 | 
			
		||||
      }
 | 
			
		||||
#endif
 | 
			
		||||
      void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
 | 
			
		||||
	n2 = Mpc(in,out);
 | 
			
		||||
	ComplexD dot = innerProduct(in,out);
 | 
			
		||||
	n1 = real(dot);
 | 
			
		||||
      }
 | 
			
		||||
      void HermOp(const Field &in, Field &out){
 | 
			
		||||
	RealD n1,n2;
 | 
			
		||||
	HermOpAndNorm(in,out,n1,n2);
 | 
			
		||||
      }
 | 
			
		||||
      void Op     (const Field &in, Field &out){
 | 
			
		||||
	Mpc(in,out);
 | 
			
		||||
      }
 | 
			
		||||
      void AdjOp     (const Field &in, Field &out){ 
 | 
			
		||||
	MpcDag(in,out);
 | 
			
		||||
      }
 | 
			
		||||
      // Support for coarsening to a multigrid
 | 
			
		||||
      void OpDiag (const Field &in, Field &out) {
 | 
			
		||||
	assert(0); // must coarsen the unpreconditioned system
 | 
			
		||||
      }
 | 
			
		||||
      void OpDir  (const Field &in, Field &out,int dir,int disp) {
 | 
			
		||||
	assert(0);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
#if 0
 | 
			
		||||
  // This is specific to (Z)mobius fermions
 | 
			
		||||
  template<class Matrix, class Field>
 | 
			
		||||
    class KappaSimilarityTransform {
 | 
			
		||||
  public:
 | 
			
		||||
//    INHERIT_IMPL_TYPES(Matrix);
 | 
			
		||||
    typedef typename Matrix::Coeff_t                     Coeff_t;
 | 
			
		||||
    std::vector<Coeff_t> kappa, kappaDag, kappaInv, kappaInvDag;
 | 
			
		||||
 | 
			
		||||
    KappaSimilarityTransform (Matrix &zmob) {
 | 
			
		||||
      for (int i=0;i<(int)zmob.bs.size();i++) {
 | 
			
		||||
	Coeff_t k = 1.0 / ( 2.0 * (zmob.bs[i] *(4 - zmob.M5) + 1.0) );
 | 
			
		||||
	kappa.push_back( k );
 | 
			
		||||
	kappaDag.push_back( conj(k) );
 | 
			
		||||
	kappaInv.push_back( 1.0 / k );
 | 
			
		||||
	kappaInvDag.push_back( 1.0 / conj(k) );
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  template<typename vobj>
 | 
			
		||||
    void sscale(const Lattice<vobj>& in, Lattice<vobj>& out, Coeff_t* s) {
 | 
			
		||||
    GridBase *grid=out._grid;
 | 
			
		||||
    out.checkerboard = in.checkerboard;
 | 
			
		||||
    assert(grid->_simd_layout[0] == 1); // should be fine for ZMobius for now
 | 
			
		||||
    int Ls = grid->_rdimensions[0];
 | 
			
		||||
    parallel_for(int ss=0;ss<grid->oSites();ss++){
 | 
			
		||||
      vobj tmp = s[ss % Ls]*in._odata[ss];
 | 
			
		||||
      vstream(out._odata[ss],tmp);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  RealD sscale_norm(const Field& in, Field& out, Coeff_t* s) {
 | 
			
		||||
    sscale(in,out,s);
 | 
			
		||||
    return norm2(out);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  virtual RealD M       (const Field& in, Field& out) { return sscale_norm(in,out,&kappa[0]);   }
 | 
			
		||||
  virtual RealD MDag    (const Field& in, Field& out) { return sscale_norm(in,out,&kappaDag[0]);}
 | 
			
		||||
  virtual RealD MInv    (const Field& in, Field& out) { return sscale_norm(in,out,&kappaInv[0]);}
 | 
			
		||||
  virtual RealD MInvDag (const Field& in, Field& out) { return sscale_norm(in,out,&kappaInvDag[0]);}
 | 
			
		||||
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  template<class Matrix,class Field>
 | 
			
		||||
    class SchurDiagTwoKappaOperator :  public SchurOperatorBase<Field> {
 | 
			
		||||
  public:
 | 
			
		||||
    KappaSimilarityTransform<Matrix, Field> _S;
 | 
			
		||||
    SchurDiagTwoOperator<Matrix, Field> _Mat;
 | 
			
		||||
 | 
			
		||||
    SchurDiagTwoKappaOperator (Matrix &Mat): _S(Mat), _Mat(Mat) {};
 | 
			
		||||
 | 
			
		||||
    virtual  RealD Mpc      (const Field &in, Field &out) {
 | 
			
		||||
      Field tmp(in._grid);
 | 
			
		||||
 | 
			
		||||
      _S.MInv(in,out);
 | 
			
		||||
      _Mat.Mpc(out,tmp);
 | 
			
		||||
      return _S.M(tmp,out);
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
    virtual  RealD MpcDag   (const Field &in, Field &out){
 | 
			
		||||
      Field tmp(in._grid);
 | 
			
		||||
 | 
			
		||||
      _S.MDag(in,out);
 | 
			
		||||
      _Mat.MpcDag(out,tmp);
 | 
			
		||||
      return _S.MInvDag(tmp,out);
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
 
 | 
			
		||||
@@ -8,6 +8,7 @@
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Christoph Lehner <clehner@bnl.gov>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
@@ -193,6 +194,47 @@ namespace Grid {
 | 
			
		||||
      return sum;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    RealD approxD(RealD x)
 | 
			
		||||
    {
 | 
			
		||||
      RealD Un;
 | 
			
		||||
      RealD Unm;
 | 
			
		||||
      RealD Unp;
 | 
			
		||||
      
 | 
			
		||||
      RealD y=( x-0.5*(hi+lo))/(0.5*(hi-lo));
 | 
			
		||||
      
 | 
			
		||||
      RealD U0=1;
 | 
			
		||||
      RealD U1=2*y;
 | 
			
		||||
      
 | 
			
		||||
      RealD sum;
 | 
			
		||||
      sum = Coeffs[1]*U0;
 | 
			
		||||
      sum+= Coeffs[2]*U1*2.0;
 | 
			
		||||
      
 | 
			
		||||
      Un =U1;
 | 
			
		||||
      Unm=U0;
 | 
			
		||||
      for(int i=2;i<order-1;i++){
 | 
			
		||||
	Unp=2*y*Un-Unm;
 | 
			
		||||
	Unm=Un;
 | 
			
		||||
	Un =Unp;
 | 
			
		||||
	sum+= Un*Coeffs[i+1]*(i+1.0);
 | 
			
		||||
      }
 | 
			
		||||
      return sum/(0.5*(hi-lo));
 | 
			
		||||
    };
 | 
			
		||||
    
 | 
			
		||||
    RealD approxInv(RealD z, RealD x0, int maxiter, RealD resid) {
 | 
			
		||||
      RealD x = x0;
 | 
			
		||||
      RealD eps;
 | 
			
		||||
      
 | 
			
		||||
      int i;
 | 
			
		||||
      for (i=0;i<maxiter;i++) {
 | 
			
		||||
	eps = approx(x) - z;
 | 
			
		||||
	if (fabs(eps / z) < resid)
 | 
			
		||||
	  return x;
 | 
			
		||||
	x = x - eps / approxD(x);
 | 
			
		||||
      }
 | 
			
		||||
      
 | 
			
		||||
      return std::numeric_limits<double>::quiet_NaN();
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    // Implement the required interface
 | 
			
		||||
    void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										137
									
								
								lib/algorithms/densematrix/DenseMatrix.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										137
									
								
								lib/algorithms/densematrix/DenseMatrix.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,137 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/algorithms/iterative/DenseMatrix.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_DENSE_MATRIX_H
 | 
			
		||||
#define GRID_DENSE_MATRIX_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
    // Matrix untils
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
template<class T> using DenseVector = std::vector<T>;
 | 
			
		||||
template<class T> using DenseMatrix = DenseVector<DenseVector<T> >;
 | 
			
		||||
 | 
			
		||||
template<class T> void Size(DenseVector<T> & vec, int &N) 
 | 
			
		||||
{ 
 | 
			
		||||
  N= vec.size();
 | 
			
		||||
}
 | 
			
		||||
template<class T> void Size(DenseMatrix<T> & mat, int &N,int &M) 
 | 
			
		||||
{ 
 | 
			
		||||
  N= mat.size();
 | 
			
		||||
  M= mat[0].size();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class T> void SizeSquare(DenseMatrix<T> & mat, int &N) 
 | 
			
		||||
{ 
 | 
			
		||||
  int M; Size(mat,N,M);
 | 
			
		||||
  assert(N==M);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class T> void Resize(DenseVector<T > & mat, int N) { 
 | 
			
		||||
  mat.resize(N);
 | 
			
		||||
}
 | 
			
		||||
template<class T> void Resize(DenseMatrix<T > & mat, int N, int M) { 
 | 
			
		||||
  mat.resize(N);
 | 
			
		||||
  for(int i=0;i<N;i++){
 | 
			
		||||
    mat[i].resize(M);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class T> void Fill(DenseMatrix<T> & mat, T&val) { 
 | 
			
		||||
  int N,M;
 | 
			
		||||
  Size(mat,N,M);
 | 
			
		||||
  for(int i=0;i<N;i++){
 | 
			
		||||
  for(int j=0;j<M;j++){
 | 
			
		||||
    mat[i][j] = val;
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/** Transpose of a matrix **/
 | 
			
		||||
template<class T> DenseMatrix<T> Transpose(DenseMatrix<T> & mat){
 | 
			
		||||
  int N,M;
 | 
			
		||||
  Size(mat,N,M);
 | 
			
		||||
  DenseMatrix<T> C; Resize(C,M,N);
 | 
			
		||||
  for(int i=0;i<M;i++){
 | 
			
		||||
  for(int j=0;j<N;j++){
 | 
			
		||||
    C[i][j] = mat[j][i];
 | 
			
		||||
  }} 
 | 
			
		||||
  return C;
 | 
			
		||||
}
 | 
			
		||||
/** Set DenseMatrix to unit matrix **/
 | 
			
		||||
template<class T> void Unity(DenseMatrix<T> &A){
 | 
			
		||||
  int N;  SizeSquare(A,N);
 | 
			
		||||
  for(int i=0;i<N;i++){
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      if ( i==j ) A[i][j] = 1;
 | 
			
		||||
      else        A[i][j] = 0;
 | 
			
		||||
    } 
 | 
			
		||||
  } 
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/** Add C * I to matrix **/
 | 
			
		||||
template<class T>
 | 
			
		||||
void PlusUnit(DenseMatrix<T> & A,T c){
 | 
			
		||||
  int dim;  SizeSquare(A,dim);
 | 
			
		||||
  for(int i=0;i<dim;i++){A[i][i] = A[i][i] + c;} 
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/** return the Hermitian conjugate of matrix **/
 | 
			
		||||
template<class T>
 | 
			
		||||
DenseMatrix<T> HermitianConj(DenseMatrix<T> &mat){
 | 
			
		||||
 | 
			
		||||
  int dim; SizeSquare(mat,dim);
 | 
			
		||||
 | 
			
		||||
  DenseMatrix<T> C; Resize(C,dim,dim);
 | 
			
		||||
 | 
			
		||||
  for(int i=0;i<dim;i++){
 | 
			
		||||
    for(int j=0;j<dim;j++){
 | 
			
		||||
      C[i][j] = conj(mat[j][i]);
 | 
			
		||||
    } 
 | 
			
		||||
  } 
 | 
			
		||||
  return C;
 | 
			
		||||
}
 | 
			
		||||
/**Get a square submatrix**/
 | 
			
		||||
template <class T>
 | 
			
		||||
DenseMatrix<T> GetSubMtx(DenseMatrix<T> &A,int row_st, int row_end, int col_st, int col_end)
 | 
			
		||||
{
 | 
			
		||||
  DenseMatrix<T> H; Resize(H,row_end - row_st,col_end-col_st);
 | 
			
		||||
 | 
			
		||||
  for(int i = row_st; i<row_end; i++){
 | 
			
		||||
  for(int j = col_st; j<col_end; j++){
 | 
			
		||||
    H[i-row_st][j-col_st]=A[i][j];
 | 
			
		||||
  }}
 | 
			
		||||
  return H;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#include "Householder.h"
 | 
			
		||||
#include "Francis.h"
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										525
									
								
								lib/algorithms/densematrix/Francis.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										525
									
								
								lib/algorithms/densematrix/Francis.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,525 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/algorithms/iterative/Francis.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef FRANCIS_H
 | 
			
		||||
#define FRANCIS_H
 | 
			
		||||
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <cmath>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <sstream>
 | 
			
		||||
#include <stdexcept>
 | 
			
		||||
#include <fstream>
 | 
			
		||||
#include <complex>
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
 | 
			
		||||
//#include <timer.h>
 | 
			
		||||
//#include <lapacke.h>
 | 
			
		||||
//#include <Eigen/Dense>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
template <class T> int SymmEigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small);
 | 
			
		||||
template <class T> int     Eigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
  Find the eigenvalues of an upper hessenberg matrix using the Francis QR algorithm.
 | 
			
		||||
H =
 | 
			
		||||
      x  x  x  x  x  x  x  x  x
 | 
			
		||||
      x  x  x  x  x  x  x  x  x
 | 
			
		||||
      0  x  x  x  x  x  x  x  x
 | 
			
		||||
      0  0  x  x  x  x  x  x  x
 | 
			
		||||
      0  0  0  x  x  x  x  x  x
 | 
			
		||||
      0  0  0  0  x  x  x  x  x
 | 
			
		||||
      0  0  0  0  0  x  x  x  x
 | 
			
		||||
      0  0  0  0  0  0  x  x  x
 | 
			
		||||
      0  0  0  0  0  0  0  x  x
 | 
			
		||||
Factorization is P T P^H where T is upper triangular (mod cc blocks) and P is orthagonal/unitary.
 | 
			
		||||
**/
 | 
			
		||||
template <class T>
 | 
			
		||||
int QReigensystem(DenseMatrix<T> &Hin, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small)
 | 
			
		||||
{
 | 
			
		||||
  DenseMatrix<T> H = Hin; 
 | 
			
		||||
 | 
			
		||||
  int N ; SizeSquare(H,N);
 | 
			
		||||
  int M = N;
 | 
			
		||||
 | 
			
		||||
  Fill(evals,0);
 | 
			
		||||
  Fill(evecs,0);
 | 
			
		||||
 | 
			
		||||
  T s,t,x=0,y=0,z=0;
 | 
			
		||||
  T u,d;
 | 
			
		||||
  T apd,amd,bc;
 | 
			
		||||
  DenseVector<T> p(N,0);
 | 
			
		||||
  T nrm = Norm(H);    ///DenseMatrix Norm
 | 
			
		||||
  int n, m;
 | 
			
		||||
  int e = 0;
 | 
			
		||||
  int it = 0;
 | 
			
		||||
  int tot_it = 0;
 | 
			
		||||
  int l = 0;
 | 
			
		||||
  int r = 0;
 | 
			
		||||
  DenseMatrix<T> P; Resize(P,N,N); Unity(P);
 | 
			
		||||
  DenseVector<int> trows(N,0);
 | 
			
		||||
 | 
			
		||||
  /// Check if the matrix is really hessenberg, if not abort
 | 
			
		||||
  RealD sth = 0;
 | 
			
		||||
  for(int j=0;j<N;j++){
 | 
			
		||||
    for(int i=j+2;i<N;i++){
 | 
			
		||||
      sth = abs(H[i][j]);
 | 
			
		||||
      if(sth > small){
 | 
			
		||||
	std::cout << "Non hessenberg H = " << sth << " > " << small << std::endl;
 | 
			
		||||
	exit(1);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  do{
 | 
			
		||||
    std::cout << "Francis QR Step N = " << N << std::endl;
 | 
			
		||||
    /** Check for convergence
 | 
			
		||||
      x  x  x  x  x
 | 
			
		||||
      0  x  x  x  x
 | 
			
		||||
      0  0  x  x  x
 | 
			
		||||
      0  0  x  x  x
 | 
			
		||||
      0  0  0  0  x
 | 
			
		||||
      for this matrix l = 4
 | 
			
		||||
     **/
 | 
			
		||||
    do{
 | 
			
		||||
      l = Chop_subdiag(H,nrm,e,small);
 | 
			
		||||
      r = 0;    ///May have converged on more than one eval
 | 
			
		||||
      ///Single eval
 | 
			
		||||
      if(l == N-1){
 | 
			
		||||
        evals[e] = H[l][l];
 | 
			
		||||
        N--; e++; r++; it = 0;
 | 
			
		||||
      }
 | 
			
		||||
      ///RealD eval
 | 
			
		||||
      if(l == N-2){
 | 
			
		||||
        trows[l+1] = 1;    ///Needed for UTSolve
 | 
			
		||||
        apd = H[l][l] + H[l+1][l+1];
 | 
			
		||||
        amd = H[l][l] - H[l+1][l+1];
 | 
			
		||||
        bc =  (T)4.0*H[l+1][l]*H[l][l+1];
 | 
			
		||||
        evals[e]   = (T)0.5*( apd + sqrt(amd*amd + bc) );
 | 
			
		||||
        evals[e+1] = (T)0.5*( apd - sqrt(amd*amd + bc) );
 | 
			
		||||
        N-=2; e+=2; r++; it = 0;
 | 
			
		||||
      }
 | 
			
		||||
    } while(r>0);
 | 
			
		||||
 | 
			
		||||
    if(N ==0) break;
 | 
			
		||||
 | 
			
		||||
    DenseVector<T > ck; Resize(ck,3);
 | 
			
		||||
    DenseVector<T> v;   Resize(v,3);
 | 
			
		||||
 | 
			
		||||
    for(int m = N-3; m >= l; m--){
 | 
			
		||||
      ///Starting vector essentially random shift.
 | 
			
		||||
      if(it%10 == 0 && N >= 3 && it > 0){
 | 
			
		||||
        s = (T)1.618033989*( abs( H[N-1][N-2] ) + abs( H[N-2][N-3] ) );
 | 
			
		||||
        t = (T)0.618033989*( abs( H[N-1][N-2] ) + abs( H[N-2][N-3] ) );
 | 
			
		||||
        x = H[m][m]*H[m][m] + H[m][m+1]*H[m+1][m] - s*H[m][m] + t;
 | 
			
		||||
        y = H[m+1][m]*(H[m][m] + H[m+1][m+1] - s);
 | 
			
		||||
        z = H[m+1][m]*H[m+2][m+1];
 | 
			
		||||
      }
 | 
			
		||||
      ///Starting vector implicit Q theorem
 | 
			
		||||
      else{
 | 
			
		||||
        s = (H[N-2][N-2] + H[N-1][N-1]);
 | 
			
		||||
        t = (H[N-2][N-2]*H[N-1][N-1] - H[N-2][N-1]*H[N-1][N-2]);
 | 
			
		||||
        x = H[m][m]*H[m][m] + H[m][m+1]*H[m+1][m] - s*H[m][m] + t;
 | 
			
		||||
        y = H[m+1][m]*(H[m][m] + H[m+1][m+1] - s);
 | 
			
		||||
        z = H[m+1][m]*H[m+2][m+1];
 | 
			
		||||
      }
 | 
			
		||||
      ck[0] = x; ck[1] = y; ck[2] = z;
 | 
			
		||||
 | 
			
		||||
      if(m == l) break;
 | 
			
		||||
 | 
			
		||||
      /** Some stupid thing from numerical recipies, seems to work**/
 | 
			
		||||
      // PAB.. for heaven's sake quote page, purpose, evidence it works.
 | 
			
		||||
      //       what sort of comment is that!?!?!?
 | 
			
		||||
      u=abs(H[m][m-1])*(abs(y)+abs(z));
 | 
			
		||||
      d=abs(x)*(abs(H[m-1][m-1])+abs(H[m][m])+abs(H[m+1][m+1]));
 | 
			
		||||
      if ((T)abs(u+d) == (T)abs(d) ){
 | 
			
		||||
	l = m; break;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      //if (u < small){l = m; break;}
 | 
			
		||||
    }
 | 
			
		||||
    if(it > 100000){
 | 
			
		||||
     std::cout << "QReigensystem: bugger it got stuck after 100000 iterations" << std::endl;
 | 
			
		||||
     std::cout << "got " << e << " evals " << l << " " << N << std::endl;
 | 
			
		||||
      exit(1);
 | 
			
		||||
    }
 | 
			
		||||
    normalize(ck);    ///Normalization cancels in PHP anyway
 | 
			
		||||
    T beta;
 | 
			
		||||
    Householder_vector<T >(ck, 0, 2, v, beta);
 | 
			
		||||
    Householder_mult<T >(H,v,beta,0,l,l+2,0);
 | 
			
		||||
    Householder_mult<T >(H,v,beta,0,l,l+2,1);
 | 
			
		||||
    ///Accumulate eigenvector
 | 
			
		||||
    Householder_mult<T >(P,v,beta,0,l,l+2,1);
 | 
			
		||||
    int sw = 0;      ///Are we on the last row?
 | 
			
		||||
    for(int k=l;k<N-2;k++){
 | 
			
		||||
      x = H[k+1][k];
 | 
			
		||||
      y = H[k+2][k];
 | 
			
		||||
      z = (T)0.0;
 | 
			
		||||
      if(k+3 <= N-1){
 | 
			
		||||
	z = H[k+3][k];
 | 
			
		||||
      } else{
 | 
			
		||||
	sw = 1; 
 | 
			
		||||
	v[2] = (T)0.0;
 | 
			
		||||
      }
 | 
			
		||||
      ck[0] = x; ck[1] = y; ck[2] = z;
 | 
			
		||||
      normalize(ck);
 | 
			
		||||
      Householder_vector<T >(ck, 0, 2-sw, v, beta);
 | 
			
		||||
      Householder_mult<T >(H,v, beta,0,k+1,k+3-sw,0);
 | 
			
		||||
      Householder_mult<T >(H,v, beta,0,k+1,k+3-sw,1);
 | 
			
		||||
      ///Accumulate eigenvector
 | 
			
		||||
      Householder_mult<T >(P,v, beta,0,k+1,k+3-sw,1);
 | 
			
		||||
    }
 | 
			
		||||
    it++;
 | 
			
		||||
    tot_it++;
 | 
			
		||||
  }while(N > 1);
 | 
			
		||||
  N = evals.size();
 | 
			
		||||
  ///Annoying - UT solves in reverse order;
 | 
			
		||||
  DenseVector<T> tmp; Resize(tmp,N);
 | 
			
		||||
  for(int i=0;i<N;i++){
 | 
			
		||||
    tmp[i] = evals[N-i-1];
 | 
			
		||||
  } 
 | 
			
		||||
  evals = tmp;
 | 
			
		||||
  UTeigenvectors(H, trows, evals, evecs);
 | 
			
		||||
  for(int i=0;i<evals.size();i++){evecs[i] = P*evecs[i]; normalize(evecs[i]);}
 | 
			
		||||
  return tot_it;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
int my_Wilkinson(DenseMatrix<T> &Hin, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small)
 | 
			
		||||
{
 | 
			
		||||
  /**
 | 
			
		||||
  Find the eigenvalues of an upper Hessenberg matrix using the Wilkinson QR algorithm.
 | 
			
		||||
  H =
 | 
			
		||||
  x  x  0  0  0  0
 | 
			
		||||
  x  x  x  0  0  0
 | 
			
		||||
  0  x  x  x  0  0
 | 
			
		||||
  0  0  x  x  x  0
 | 
			
		||||
  0  0  0  x  x  x
 | 
			
		||||
  0  0  0  0  x  x
 | 
			
		||||
  Factorization is P T P^H where T is upper triangular (mod cc blocks) and P is orthagonal/unitary.  **/
 | 
			
		||||
  return my_Wilkinson(Hin, evals, evecs, small, small);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
int my_Wilkinson(DenseMatrix<T> &Hin, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small, RealD tol)
 | 
			
		||||
{
 | 
			
		||||
  int N; SizeSquare(Hin,N);
 | 
			
		||||
  int M = N;
 | 
			
		||||
 | 
			
		||||
  ///I don't want to modify the input but matricies must be passed by reference
 | 
			
		||||
  //Scale a matrix by its "norm"
 | 
			
		||||
  //RealD Hnorm = abs( Hin.LargestDiag() ); H =  H*(1.0/Hnorm);
 | 
			
		||||
  DenseMatrix<T> H;  H = Hin;
 | 
			
		||||
  
 | 
			
		||||
  RealD Hnorm = abs(Norm(Hin));
 | 
			
		||||
  H = H * (1.0 / Hnorm);
 | 
			
		||||
 | 
			
		||||
  // TODO use openmp and memset
 | 
			
		||||
  Fill(evals,0);
 | 
			
		||||
  Fill(evecs,0);
 | 
			
		||||
 | 
			
		||||
  T s, t, x = 0, y = 0, z = 0;
 | 
			
		||||
  T u, d;
 | 
			
		||||
  T apd, amd, bc;
 | 
			
		||||
  DenseVector<T> p; Resize(p,N); Fill(p,0);
 | 
			
		||||
 | 
			
		||||
  T nrm = Norm(H);    ///DenseMatrix Norm
 | 
			
		||||
  int n, m;
 | 
			
		||||
  int e = 0;
 | 
			
		||||
  int it = 0;
 | 
			
		||||
  int tot_it = 0;
 | 
			
		||||
  int l = 0;
 | 
			
		||||
  int r = 0;
 | 
			
		||||
  DenseMatrix<T> P; Resize(P,N,N);
 | 
			
		||||
  Unity(P);
 | 
			
		||||
  DenseVector<int> trows(N, 0);
 | 
			
		||||
  /// Check if the matrix is really symm tridiag
 | 
			
		||||
  RealD sth = 0;
 | 
			
		||||
  for(int j = 0; j < N; ++j)
 | 
			
		||||
  {
 | 
			
		||||
    for(int i = j + 2; i < N; ++i)
 | 
			
		||||
    {
 | 
			
		||||
      if(abs(H[i][j]) > tol || abs(H[j][i]) > tol)
 | 
			
		||||
      {
 | 
			
		||||
	std::cout << "Non Tridiagonal H(" << i << ","<< j << ") = |" << Real( real( H[j][i] ) ) << "| > " << tol << std::endl;
 | 
			
		||||
	std::cout << "Warning tridiagonalize and call again" << std::endl;
 | 
			
		||||
        // exit(1); // see what is going on
 | 
			
		||||
        //return;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  do{
 | 
			
		||||
    do{
 | 
			
		||||
      //Jasper
 | 
			
		||||
      //Check if the subdiagonal term is small enough (<small)
 | 
			
		||||
      //if true then it is converged.
 | 
			
		||||
      //check start from H.dim - e - 1
 | 
			
		||||
      //How to deal with more than 2 are converged?
 | 
			
		||||
      //What if Chop_symm_subdiag return something int the middle?
 | 
			
		||||
      //--------------
 | 
			
		||||
      l = Chop_symm_subdiag(H,nrm, e, small);
 | 
			
		||||
      r = 0;    ///May have converged on more than one eval
 | 
			
		||||
      //Jasper
 | 
			
		||||
      //In this case
 | 
			
		||||
      // x  x  0  0  0  0
 | 
			
		||||
      // x  x  x  0  0  0
 | 
			
		||||
      // 0  x  x  x  0  0
 | 
			
		||||
      // 0  0  x  x  x  0
 | 
			
		||||
      // 0  0  0  x  x  0
 | 
			
		||||
      // 0  0  0  0  0  x  <- l
 | 
			
		||||
      //--------------
 | 
			
		||||
      ///Single eval
 | 
			
		||||
      if(l == N - 1)
 | 
			
		||||
      {
 | 
			
		||||
        evals[e] = H[l][l];
 | 
			
		||||
        N--;
 | 
			
		||||
        e++;
 | 
			
		||||
        r++;
 | 
			
		||||
        it = 0;
 | 
			
		||||
      }
 | 
			
		||||
      //Jasper
 | 
			
		||||
      // x  x  0  0  0  0
 | 
			
		||||
      // x  x  x  0  0  0
 | 
			
		||||
      // 0  x  x  x  0  0
 | 
			
		||||
      // 0  0  x  x  0  0
 | 
			
		||||
      // 0  0  0  0  x  x  <- l
 | 
			
		||||
      // 0  0  0  0  x  x
 | 
			
		||||
      //--------------
 | 
			
		||||
      ///RealD eval
 | 
			
		||||
      if(l == N - 2)
 | 
			
		||||
      {
 | 
			
		||||
        trows[l + 1] = 1;    ///Needed for UTSolve
 | 
			
		||||
        apd = H[l][l] + H[l + 1][ l + 1];
 | 
			
		||||
        amd = H[l][l] - H[l + 1][l + 1];
 | 
			
		||||
        bc =  (T) 4.0 * H[l + 1][l] * H[l][l + 1];
 | 
			
		||||
        evals[e] = (T) 0.5 * (apd + sqrt(amd * amd + bc));
 | 
			
		||||
        evals[e + 1] = (T) 0.5 * (apd - sqrt(amd * amd + bc));
 | 
			
		||||
        N -= 2;
 | 
			
		||||
        e += 2;
 | 
			
		||||
        r++;
 | 
			
		||||
        it = 0;
 | 
			
		||||
      }
 | 
			
		||||
    }while(r > 0);
 | 
			
		||||
    //Jasper
 | 
			
		||||
    //Already converged
 | 
			
		||||
    //--------------
 | 
			
		||||
    if(N == 0) break;
 | 
			
		||||
 | 
			
		||||
    DenseVector<T> ck,v; Resize(ck,2); Resize(v,2);
 | 
			
		||||
 | 
			
		||||
    for(int m = N - 3; m >= l; m--)
 | 
			
		||||
    {
 | 
			
		||||
      ///Starting vector essentially random shift.
 | 
			
		||||
      if(it%10 == 0 && N >= 3 && it > 0)
 | 
			
		||||
      {
 | 
			
		||||
        t = abs(H[N - 1][N - 2]) + abs(H[N - 2][N - 3]);
 | 
			
		||||
        x = H[m][m] - t;
 | 
			
		||||
        z = H[m + 1][m];
 | 
			
		||||
      } else {
 | 
			
		||||
      ///Starting vector implicit Q theorem
 | 
			
		||||
        d = (H[N - 2][N - 2] - H[N - 1][N - 1]) * (T) 0.5;
 | 
			
		||||
        t =  H[N - 1][N - 1] - H[N - 1][N - 2] * H[N - 1][N - 2] 
 | 
			
		||||
	  / (d + sign(d) * sqrt(d * d + H[N - 1][N - 2] * H[N - 1][N - 2]));
 | 
			
		||||
        x = H[m][m] - t;
 | 
			
		||||
        z = H[m + 1][m];
 | 
			
		||||
      }
 | 
			
		||||
      //Jasper
 | 
			
		||||
      //why it is here????
 | 
			
		||||
      //-----------------------
 | 
			
		||||
      if(m == l)
 | 
			
		||||
        break;
 | 
			
		||||
 | 
			
		||||
      u = abs(H[m][m - 1]) * (abs(y) + abs(z));
 | 
			
		||||
      d = abs(x) * (abs(H[m - 1][m - 1]) + abs(H[m][m]) + abs(H[m + 1][m + 1]));
 | 
			
		||||
      if ((T)abs(u + d) == (T)abs(d))
 | 
			
		||||
      {
 | 
			
		||||
        l = m;
 | 
			
		||||
        break;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    //Jasper
 | 
			
		||||
    if(it > 1000000)
 | 
			
		||||
    {
 | 
			
		||||
      std::cout << "Wilkinson: bugger it got stuck after 100000 iterations" << std::endl;
 | 
			
		||||
      std::cout << "got " << e << " evals " << l << " " << N << std::endl;
 | 
			
		||||
      exit(1);
 | 
			
		||||
    }
 | 
			
		||||
    //
 | 
			
		||||
    T s, c;
 | 
			
		||||
    Givens_calc<T>(x, z, c, s);
 | 
			
		||||
    Givens_mult<T>(H, l, l + 1, c, -s, 0);
 | 
			
		||||
    Givens_mult<T>(H, l, l + 1, c,  s, 1);
 | 
			
		||||
    Givens_mult<T>(P, l, l + 1, c,  s, 1);
 | 
			
		||||
    //
 | 
			
		||||
    for(int k = l; k < N - 2; ++k)
 | 
			
		||||
    {
 | 
			
		||||
      x = H.A[k + 1][k];
 | 
			
		||||
      z = H.A[k + 2][k];
 | 
			
		||||
      Givens_calc<T>(x, z, c, s);
 | 
			
		||||
      Givens_mult<T>(H, k + 1, k + 2, c, -s, 0);
 | 
			
		||||
      Givens_mult<T>(H, k + 1, k + 2, c,  s, 1);
 | 
			
		||||
      Givens_mult<T>(P, k + 1, k + 2, c,  s, 1);
 | 
			
		||||
    }
 | 
			
		||||
    it++;
 | 
			
		||||
    tot_it++;
 | 
			
		||||
  }while(N > 1);
 | 
			
		||||
 | 
			
		||||
  N = evals.size();
 | 
			
		||||
  ///Annoying - UT solves in reverse order;
 | 
			
		||||
  DenseVector<T> tmp(N);
 | 
			
		||||
  for(int i = 0; i < N; ++i)
 | 
			
		||||
    tmp[i] = evals[N-i-1];
 | 
			
		||||
  evals = tmp;
 | 
			
		||||
  //
 | 
			
		||||
  UTeigenvectors(H, trows, evals, evecs);
 | 
			
		||||
  //UTSymmEigenvectors(H, trows, evals, evecs);
 | 
			
		||||
  for(int i = 0; i < evals.size(); ++i)
 | 
			
		||||
  {
 | 
			
		||||
    evecs[i] = P * evecs[i];
 | 
			
		||||
    normalize(evecs[i]);
 | 
			
		||||
    evals[i] = evals[i] * Hnorm;
 | 
			
		||||
  }
 | 
			
		||||
  // // FIXME this is to test
 | 
			
		||||
  // Hin.write("evecs3", evecs);
 | 
			
		||||
  // Hin.write("evals3", evals);
 | 
			
		||||
  // // check rsd
 | 
			
		||||
  // for(int i = 0; i < M; i++) {
 | 
			
		||||
  //   vector<T> Aevec = Hin * evecs[i];
 | 
			
		||||
  //   RealD norm2(0.);
 | 
			
		||||
  //   for(int j = 0; j < M; j++) {
 | 
			
		||||
  //     norm2 += (Aevec[j] - evals[i] * evecs[i][j]) * (Aevec[j] - evals[i] * evecs[i][j]);
 | 
			
		||||
  //   }
 | 
			
		||||
  // }
 | 
			
		||||
  return tot_it;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
void Hess(DenseMatrix<T > &A, DenseMatrix<T> &Q, int start){
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
  turn a matrix A =
 | 
			
		||||
  x  x  x  x  x
 | 
			
		||||
  x  x  x  x  x
 | 
			
		||||
  x  x  x  x  x
 | 
			
		||||
  x  x  x  x  x
 | 
			
		||||
  x  x  x  x  x
 | 
			
		||||
  into
 | 
			
		||||
  x  x  x  x  x
 | 
			
		||||
  x  x  x  x  x
 | 
			
		||||
  0  x  x  x  x
 | 
			
		||||
  0  0  x  x  x
 | 
			
		||||
  0  0  0  x  x
 | 
			
		||||
  with householder rotations
 | 
			
		||||
  Slow.
 | 
			
		||||
  */
 | 
			
		||||
  int N ; SizeSquare(A,N);
 | 
			
		||||
  DenseVector<T > p; Resize(p,N); Fill(p,0);
 | 
			
		||||
 | 
			
		||||
  for(int k=start;k<N-2;k++){
 | 
			
		||||
    //cerr << "hess" << k << std::endl;
 | 
			
		||||
    DenseVector<T > ck,v; Resize(ck,N-k-1); Resize(v,N-k-1);
 | 
			
		||||
    for(int i=k+1;i<N;i++){ck[i-k-1] = A(i,k);}  ///kth column
 | 
			
		||||
    normalize(ck);    ///Normalization cancels in PHP anyway
 | 
			
		||||
    T beta;
 | 
			
		||||
    Householder_vector<T >(ck, 0, ck.size()-1, v, beta);  ///Householder vector
 | 
			
		||||
    Householder_mult<T>(A,v,beta,start,k+1,N-1,0);  ///A -> PA
 | 
			
		||||
    Householder_mult<T >(A,v,beta,start,k+1,N-1,1);  ///PA -> PAP^H
 | 
			
		||||
    ///Accumulate eigenvector
 | 
			
		||||
    Householder_mult<T >(Q,v,beta,start,k+1,N-1,1);  ///Q -> QP^H
 | 
			
		||||
  }
 | 
			
		||||
  /*for(int l=0;l<N-2;l++){
 | 
			
		||||
    for(int k=l+2;k<N;k++){
 | 
			
		||||
    A(0,k,l);
 | 
			
		||||
    }
 | 
			
		||||
    }*/
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
void Tri(DenseMatrix<T > &A, DenseMatrix<T> &Q, int start){
 | 
			
		||||
///Tridiagonalize a matrix
 | 
			
		||||
  int N; SizeSquare(A,N);
 | 
			
		||||
  Hess(A,Q,start);
 | 
			
		||||
  /*for(int l=0;l<N-2;l++){
 | 
			
		||||
    for(int k=l+2;k<N;k++){
 | 
			
		||||
    A(0,l,k);
 | 
			
		||||
    }
 | 
			
		||||
    }*/
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
void ForceTridiagonal(DenseMatrix<T> &A){
 | 
			
		||||
///Tridiagonalize a matrix
 | 
			
		||||
  int N ; SizeSquare(A,N);
 | 
			
		||||
  for(int l=0;l<N-2;l++){
 | 
			
		||||
    for(int k=l+2;k<N;k++){
 | 
			
		||||
      A[l][k]=0;
 | 
			
		||||
      A[k][l]=0;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
int my_SymmEigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){
 | 
			
		||||
  ///Solve a symmetric eigensystem, not necessarily in tridiagonal form
 | 
			
		||||
  int N; SizeSquare(Ain,N);
 | 
			
		||||
  DenseMatrix<T > A; A = Ain;
 | 
			
		||||
  DenseMatrix<T > Q; Resize(Q,N,N); Unity(Q);
 | 
			
		||||
  Tri(A,Q,0);
 | 
			
		||||
  int it = my_Wilkinson<T>(A, evals, evecs, small);
 | 
			
		||||
  for(int k=0;k<N;k++){evecs[k] = Q*evecs[k];}
 | 
			
		||||
  return it;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
int Wilkinson(DenseMatrix<T> &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){
 | 
			
		||||
  return my_Wilkinson(Ain, evals, evecs, small);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
int SymmEigensystem(DenseMatrix<T> &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){
 | 
			
		||||
  return my_SymmEigensystem(Ain, evals, evecs, small);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
int Eigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){
 | 
			
		||||
///Solve a general eigensystem, not necessarily in tridiagonal form
 | 
			
		||||
  int N = Ain.dim;
 | 
			
		||||
  DenseMatrix<T > A(N); A = Ain;
 | 
			
		||||
  DenseMatrix<T > Q(N);Q.Unity();
 | 
			
		||||
  Hess(A,Q,0);
 | 
			
		||||
  int it = QReigensystem<T>(A, evals, evecs, small);
 | 
			
		||||
  for(int k=0;k<N;k++){evecs[k] = Q*evecs[k];}
 | 
			
		||||
  return it;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										242
									
								
								lib/algorithms/densematrix/Householder.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										242
									
								
								lib/algorithms/densematrix/Householder.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,242 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/algorithms/iterative/Householder.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef HOUSEHOLDER_H
 | 
			
		||||
#define HOUSEHOLDER_H
 | 
			
		||||
 | 
			
		||||
#define TIMER(A) std::cout << GridLogMessage << __FUNC__ << " file "<< __FILE__ <<" line " << __LINE__ << std::endl;
 | 
			
		||||
#define ENTER()  std::cout << GridLogMessage << "ENTRY "<<__FUNC__ << " file "<< __FILE__ <<" line " << __LINE__ << std::endl;
 | 
			
		||||
#define LEAVE()  std::cout << GridLogMessage << "EXIT  "<<__FUNC__ << " file "<< __FILE__ <<" line " << __LINE__ << std::endl;
 | 
			
		||||
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <cmath>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <sstream>
 | 
			
		||||
#include <stdexcept>
 | 
			
		||||
#include <fstream>
 | 
			
		||||
#include <complex>
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
/** Comparison function for finding the max element in a vector **/
 | 
			
		||||
template <class T> bool cf(T i, T j) { 
 | 
			
		||||
  return abs(i) < abs(j); 
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/** 
 | 
			
		||||
	Calculate a real Givens angle 
 | 
			
		||||
 **/
 | 
			
		||||
template <class T> inline void Givens_calc(T y, T z, T &c, T &s){
 | 
			
		||||
 | 
			
		||||
  RealD mz = (RealD)abs(z);
 | 
			
		||||
  
 | 
			
		||||
  if(mz==0.0){
 | 
			
		||||
    c = 1; s = 0;
 | 
			
		||||
  }
 | 
			
		||||
  if(mz >= (RealD)abs(y)){
 | 
			
		||||
    T t = -y/z;
 | 
			
		||||
    s = (T)1.0 / sqrt ((T)1.0 + t * t);
 | 
			
		||||
    c = s * t;
 | 
			
		||||
  } else {
 | 
			
		||||
    T t = -z/y;
 | 
			
		||||
    c = (T)1.0 / sqrt ((T)1.0 + t * t);
 | 
			
		||||
    s = c * t;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class T> inline void Givens_mult(DenseMatrix<T> &A,  int i, int k, T c, T s, int dir)
 | 
			
		||||
{
 | 
			
		||||
  int q ; SizeSquare(A,q);
 | 
			
		||||
 | 
			
		||||
  if(dir == 0){
 | 
			
		||||
    for(int j=0;j<q;j++){
 | 
			
		||||
      T nu = A[i][j];
 | 
			
		||||
      T w  = A[k][j];
 | 
			
		||||
      A[i][j] = (c*nu + s*w);
 | 
			
		||||
      A[k][j] = (-s*nu + c*w);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if(dir == 1){
 | 
			
		||||
    for(int j=0;j<q;j++){
 | 
			
		||||
      T nu = A[j][i];
 | 
			
		||||
      T w  = A[j][k];
 | 
			
		||||
      A[j][i] = (c*nu - s*w);
 | 
			
		||||
      A[j][k] = (s*nu + c*w);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
	from input = x;
 | 
			
		||||
	Compute the complex Householder vector, v, such that
 | 
			
		||||
	P = (I - b v transpose(v) )
 | 
			
		||||
	b = 2/v.v
 | 
			
		||||
 | 
			
		||||
	P | x |    | x | k = 0
 | 
			
		||||
	| x |    | 0 | 
 | 
			
		||||
	| x | =  | 0 |
 | 
			
		||||
	| x |    | 0 | j = 3
 | 
			
		||||
	| x |	   | x |
 | 
			
		||||
 | 
			
		||||
	These are the "Unreduced" Householder vectors.
 | 
			
		||||
 | 
			
		||||
 **/
 | 
			
		||||
template <class T> inline void Householder_vector(DenseVector<T> input, int k, int j, DenseVector<T> &v, T &beta)
 | 
			
		||||
{
 | 
			
		||||
  int N ; Size(input,N);
 | 
			
		||||
  T m = *max_element(input.begin() + k, input.begin() + j + 1, cf<T> );
 | 
			
		||||
 | 
			
		||||
  if(abs(m) > 0.0){
 | 
			
		||||
    T alpha = 0;
 | 
			
		||||
 | 
			
		||||
    for(int i=k; i<j+1; i++){
 | 
			
		||||
      v[i] = input[i]/m;
 | 
			
		||||
      alpha = alpha + v[i]*conj(v[i]);
 | 
			
		||||
    }
 | 
			
		||||
    alpha = sqrt(alpha);
 | 
			
		||||
    beta = (T)1.0/(alpha*(alpha + abs(v[k]) ));
 | 
			
		||||
 | 
			
		||||
    if(abs(v[k]) > 0.0)  v[k] = v[k] + (v[k]/abs(v[k]))*alpha;
 | 
			
		||||
    else                 v[k] = -alpha;
 | 
			
		||||
  } else{
 | 
			
		||||
    for(int i=k; i<j+1; i++){
 | 
			
		||||
      v[i] = 0.0;
 | 
			
		||||
    } 
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
	from input = x;
 | 
			
		||||
	Compute the complex Householder vector, v, such that
 | 
			
		||||
	P = (I - b v transpose(v) )
 | 
			
		||||
	b = 2/v.v
 | 
			
		||||
 | 
			
		||||
	Px = alpha*e_dir
 | 
			
		||||
 | 
			
		||||
	These are the "Unreduced" Householder vectors.
 | 
			
		||||
 | 
			
		||||
 **/
 | 
			
		||||
 | 
			
		||||
template <class T> inline void Householder_vector(DenseVector<T> input, int k, int j, int dir, DenseVector<T> &v, T &beta)
 | 
			
		||||
{
 | 
			
		||||
  int N = input.size();
 | 
			
		||||
  T m = *max_element(input.begin() + k, input.begin() + j + 1, cf);
 | 
			
		||||
  
 | 
			
		||||
  if(abs(m) > 0.0){
 | 
			
		||||
    T alpha = 0;
 | 
			
		||||
 | 
			
		||||
    for(int i=k; i<j+1; i++){
 | 
			
		||||
      v[i] = input[i]/m;
 | 
			
		||||
      alpha = alpha + v[i]*conj(v[i]);
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    alpha = sqrt(alpha);
 | 
			
		||||
    beta = 1.0/(alpha*(alpha + abs(v[dir]) ));
 | 
			
		||||
	
 | 
			
		||||
    if(abs(v[dir]) > 0.0) v[dir] = v[dir] + (v[dir]/abs(v[dir]))*alpha;
 | 
			
		||||
    else                  v[dir] = -alpha;
 | 
			
		||||
  }else{
 | 
			
		||||
    for(int i=k; i<j+1; i++){
 | 
			
		||||
      v[i] = 0.0;
 | 
			
		||||
    } 
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
	Compute the product PA if trans = 0
 | 
			
		||||
	AP if trans = 1
 | 
			
		||||
	P = (I - b v transpose(v) )
 | 
			
		||||
	b = 2/v.v
 | 
			
		||||
	start at element l of matrix A
 | 
			
		||||
	v is of length j - k + 1 of v are nonzero
 | 
			
		||||
 **/
 | 
			
		||||
 | 
			
		||||
template <class T> inline void Householder_mult(DenseMatrix<T> &A , DenseVector<T> v, T beta, int l, int k, int j, int trans)
 | 
			
		||||
{
 | 
			
		||||
  int N ; SizeSquare(A,N);
 | 
			
		||||
 | 
			
		||||
  if(abs(beta) > 0.0){
 | 
			
		||||
    for(int p=l; p<N; p++){
 | 
			
		||||
      T s = 0;
 | 
			
		||||
      if(trans==0){
 | 
			
		||||
	for(int i=k;i<j+1;i++) s += conj(v[i-k])*A[i][p];
 | 
			
		||||
	s *= beta;
 | 
			
		||||
	for(int i=k;i<j+1;i++){ A[i][p] = A[i][p]-s*conj(v[i-k]);}
 | 
			
		||||
      } else {
 | 
			
		||||
	for(int i=k;i<j+1;i++){ s += conj(v[i-k])*A[p][i];}
 | 
			
		||||
	s *= beta;
 | 
			
		||||
	for(int i=k;i<j+1;i++){ A[p][i]=A[p][i]-s*conj(v[i-k]);}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
	Compute the product PA if trans = 0
 | 
			
		||||
	AP if trans = 1
 | 
			
		||||
	P = (I - b v transpose(v) )
 | 
			
		||||
	b = 2/v.v
 | 
			
		||||
	start at element l of matrix A
 | 
			
		||||
	v is of length j - k + 1 of v are nonzero
 | 
			
		||||
	A is tridiagonal
 | 
			
		||||
 **/
 | 
			
		||||
template <class T> inline void Householder_mult_tri(DenseMatrix<T> &A , DenseVector<T> v, T beta, int l, int M, int k, int j, int trans)
 | 
			
		||||
{
 | 
			
		||||
  if(abs(beta) > 0.0){
 | 
			
		||||
 | 
			
		||||
    int N ; SizeSquare(A,N);
 | 
			
		||||
 | 
			
		||||
    DenseMatrix<T> tmp; Resize(tmp,N,N); Fill(tmp,0); 
 | 
			
		||||
 | 
			
		||||
    T s;
 | 
			
		||||
    for(int p=l; p<M; p++){
 | 
			
		||||
      s = 0;
 | 
			
		||||
      if(trans==0){
 | 
			
		||||
	for(int i=k;i<j+1;i++) s = s + conj(v[i-k])*A[i][p];
 | 
			
		||||
      }else{
 | 
			
		||||
	for(int i=k;i<j+1;i++) s = s + v[i-k]*A[p][i];
 | 
			
		||||
      }
 | 
			
		||||
      s = beta*s;
 | 
			
		||||
      if(trans==0){
 | 
			
		||||
	for(int i=k;i<j+1;i++) tmp[i][p] = tmp(i,p) - s*v[i-k];
 | 
			
		||||
      }else{
 | 
			
		||||
	for(int i=k;i<j+1;i++) tmp[p][i] = tmp[p][i] - s*conj(v[i-k]);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    for(int p=l; p<M; p++){
 | 
			
		||||
      if(trans==0){
 | 
			
		||||
	for(int i=k;i<j+1;i++) A[i][p] = A[i][p] + tmp[i][p];
 | 
			
		||||
      }else{
 | 
			
		||||
	for(int i=k;i<j+1;i++) A[p][i] = A[p][i] + tmp[p][i];
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -0,0 +1,753 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Chulwoo Jung <chulwoo@bnl.gov>
 | 
			
		||||
Author: Christoph Lehner <clehner@bnl.gov>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_BIRL_H
 | 
			
		||||
#define GRID_BIRL_H
 | 
			
		||||
 | 
			
		||||
#include <string.h> //memset
 | 
			
		||||
 | 
			
		||||
#include <zlib.h>
 | 
			
		||||
#include <sys/stat.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockProjector.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid { 
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////////////
 | 
			
		||||
// Implicitly restarted lanczos
 | 
			
		||||
/////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
 template<class Field> 
 | 
			
		||||
 class BlockImplicitlyRestartedLanczos {
 | 
			
		||||
 | 
			
		||||
    const RealD small = 1.0e-16;
 | 
			
		||||
public:       
 | 
			
		||||
    int lock;
 | 
			
		||||
    int get;
 | 
			
		||||
    int Niter;
 | 
			
		||||
    int converged;
 | 
			
		||||
 | 
			
		||||
    int Nminres; // Minimum number of restarts; only check for convergence after
 | 
			
		||||
    int Nstop;   // Number of evecs checked for convergence
 | 
			
		||||
    int Nk;      // Number of converged sought
 | 
			
		||||
    int Np;      // Np -- Number of spare vecs in kryloc space
 | 
			
		||||
    int Nm;      // Nm -- total number of vectors
 | 
			
		||||
 | 
			
		||||
    int orth_period;
 | 
			
		||||
 | 
			
		||||
    RealD OrthoTime;
 | 
			
		||||
 | 
			
		||||
    RealD eresid, betastp;
 | 
			
		||||
    SortEigen<Field> _sort;
 | 
			
		||||
    LinearFunction<Field> &_HermOp;
 | 
			
		||||
    LinearFunction<Field> &_HermOpTest;
 | 
			
		||||
    /////////////////////////
 | 
			
		||||
    // Constructor
 | 
			
		||||
    /////////////////////////
 | 
			
		||||
 | 
			
		||||
    BlockImplicitlyRestartedLanczos(
 | 
			
		||||
			       LinearFunction<Field> & HermOp,
 | 
			
		||||
			       LinearFunction<Field> & HermOpTest,
 | 
			
		||||
			       int _Nstop, // sought vecs
 | 
			
		||||
			       int _Nk, // sought vecs
 | 
			
		||||
			       int _Nm, // spare vecs
 | 
			
		||||
			       RealD _eresid, // resid in lmdue deficit 
 | 
			
		||||
			       RealD _betastp, // if beta(k) < betastp: converged
 | 
			
		||||
			       int _Niter, // Max iterations
 | 
			
		||||
			       int _Nminres, int _orth_period = 1) :
 | 
			
		||||
      _HermOp(HermOp),
 | 
			
		||||
      _HermOpTest(HermOpTest),
 | 
			
		||||
      Nstop(_Nstop),
 | 
			
		||||
      Nk(_Nk),
 | 
			
		||||
      Nm(_Nm),
 | 
			
		||||
      eresid(_eresid),
 | 
			
		||||
      betastp(_betastp),
 | 
			
		||||
      Niter(_Niter),
 | 
			
		||||
	Nminres(_Nminres),
 | 
			
		||||
	orth_period(_orth_period)
 | 
			
		||||
    { 
 | 
			
		||||
      Np = Nm-Nk; assert(Np>0);
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    BlockImplicitlyRestartedLanczos(
 | 
			
		||||
			       LinearFunction<Field> & HermOp,
 | 
			
		||||
			       LinearFunction<Field> & HermOpTest,
 | 
			
		||||
			       int _Nk, // sought vecs
 | 
			
		||||
			       int _Nm, // spare vecs
 | 
			
		||||
			       RealD _eresid, // resid in lmdue deficit 
 | 
			
		||||
			       RealD _betastp, // if beta(k) < betastp: converged
 | 
			
		||||
			       int _Niter, // Max iterations
 | 
			
		||||
			       int _Nminres,
 | 
			
		||||
			       int _orth_period = 1) : 
 | 
			
		||||
      _HermOp(HermOp),
 | 
			
		||||
      _HermOpTest(HermOpTest),
 | 
			
		||||
      Nstop(_Nk),
 | 
			
		||||
      Nk(_Nk),
 | 
			
		||||
      Nm(_Nm),
 | 
			
		||||
      eresid(_eresid),
 | 
			
		||||
      betastp(_betastp),
 | 
			
		||||
      Niter(_Niter),
 | 
			
		||||
	Nminres(_Nminres),
 | 
			
		||||
	orth_period(_orth_period)
 | 
			
		||||
    { 
 | 
			
		||||
      Np = Nm-Nk; assert(Np>0);
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/* Saad PP. 195
 | 
			
		||||
1. Choose an initial vector v1 of 2-norm unity. Set β1 ≡ 0, v0 ≡ 0
 | 
			
		||||
2. For k = 1,2,...,m Do:
 | 
			
		||||
3. wk:=Avk−βkv_{k−1}      
 | 
			
		||||
4. αk:=(wk,vk)       // 
 | 
			
		||||
5. wk:=wk−αkvk       // wk orthog vk 
 | 
			
		||||
6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop
 | 
			
		||||
7. vk+1 := wk/βk+1
 | 
			
		||||
8. EndDo
 | 
			
		||||
 */
 | 
			
		||||
    void step(std::vector<RealD>& lmd,
 | 
			
		||||
	      std::vector<RealD>& lme, 
 | 
			
		||||
	      BasisFieldVector<Field>& evec,
 | 
			
		||||
	      Field& w,int Nm,int k)
 | 
			
		||||
    {
 | 
			
		||||
      assert( k< Nm );
 | 
			
		||||
 | 
			
		||||
      GridStopWatch gsw_op,gsw_o;
 | 
			
		||||
 | 
			
		||||
      Field& evec_k = evec[k];
 | 
			
		||||
 | 
			
		||||
      gsw_op.Start();
 | 
			
		||||
      _HermOp(evec_k,w);
 | 
			
		||||
      gsw_op.Stop();
 | 
			
		||||
 | 
			
		||||
      if(k>0){
 | 
			
		||||
	w -= lme[k-1] * evec[k-1];
 | 
			
		||||
      }    
 | 
			
		||||
 | 
			
		||||
      ComplexD zalph = innerProduct(evec_k,w); // 4. αk:=(wk,vk)
 | 
			
		||||
      RealD     alph = real(zalph);
 | 
			
		||||
 | 
			
		||||
      w = w - alph * evec_k;// 5. wk:=wk−αkvk
 | 
			
		||||
 | 
			
		||||
      RealD beta = normalise(w); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop
 | 
			
		||||
                                 // 7. vk+1 := wk/βk+1
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogMessage << "alpha[" << k << "] = " << zalph << " beta[" << k << "] = "<<beta<<std::endl;
 | 
			
		||||
      const RealD tiny = 1.0e-20;
 | 
			
		||||
      if ( beta < tiny ) { 
 | 
			
		||||
	std::cout<<GridLogMessage << " beta is tiny "<<beta<<std::endl;
 | 
			
		||||
     }
 | 
			
		||||
      lmd[k] = alph;
 | 
			
		||||
      lme[k]  = beta;
 | 
			
		||||
 | 
			
		||||
      gsw_o.Start();
 | 
			
		||||
      if (k>0 && k % orth_period == 0) { 
 | 
			
		||||
	orthogonalize(w,evec,k); // orthonormalise
 | 
			
		||||
      }
 | 
			
		||||
      gsw_o.Stop();
 | 
			
		||||
 | 
			
		||||
      if(k < Nm-1) { 
 | 
			
		||||
	evec[k+1] = w;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage << "Timing: operator=" << gsw_op.Elapsed() <<
 | 
			
		||||
	" orth=" << gsw_o.Elapsed() << std::endl;
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void qr_decomp(std::vector<RealD>& lmd,
 | 
			
		||||
		   std::vector<RealD>& lme,
 | 
			
		||||
		   int Nk,
 | 
			
		||||
		   int Nm,
 | 
			
		||||
		   std::vector<RealD>& Qt,
 | 
			
		||||
		   RealD Dsh, 
 | 
			
		||||
		   int kmin,
 | 
			
		||||
		   int kmax)
 | 
			
		||||
    {
 | 
			
		||||
      int k = kmin-1;
 | 
			
		||||
      RealD x;
 | 
			
		||||
 | 
			
		||||
      RealD Fden = 1.0/hypot(lmd[k]-Dsh,lme[k]);
 | 
			
		||||
      RealD c = ( lmd[k] -Dsh) *Fden;
 | 
			
		||||
      RealD s = -lme[k] *Fden;
 | 
			
		||||
      
 | 
			
		||||
      RealD tmpa1 = lmd[k];
 | 
			
		||||
      RealD tmpa2 = lmd[k+1];
 | 
			
		||||
      RealD tmpb  = lme[k];
 | 
			
		||||
 | 
			
		||||
      lmd[k]   = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb;
 | 
			
		||||
      lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb;
 | 
			
		||||
      lme[k]   = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb;
 | 
			
		||||
      x        =-s*lme[k+1];
 | 
			
		||||
      lme[k+1] = c*lme[k+1];
 | 
			
		||||
      
 | 
			
		||||
      for(int i=0; i<Nk; ++i){
 | 
			
		||||
	RealD Qtmp1 = Qt[i+Nm*k  ];
 | 
			
		||||
	RealD Qtmp2 = Qt[i+Nm*(k+1)];
 | 
			
		||||
	Qt[i+Nm*k    ] = c*Qtmp1 - s*Qtmp2;
 | 
			
		||||
	Qt[i+Nm*(k+1)] = s*Qtmp1 + c*Qtmp2; 
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      // Givens transformations
 | 
			
		||||
      for(int k = kmin; k < kmax-1; ++k){
 | 
			
		||||
 | 
			
		||||
	RealD Fden = 1.0/hypot(x,lme[k-1]);
 | 
			
		||||
	RealD c = lme[k-1]*Fden;
 | 
			
		||||
	RealD s = - x*Fden;
 | 
			
		||||
	
 | 
			
		||||
	RealD tmpa1 = lmd[k];
 | 
			
		||||
	RealD tmpa2 = lmd[k+1];
 | 
			
		||||
	RealD tmpb  = lme[k];
 | 
			
		||||
 | 
			
		||||
	lmd[k]   = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb;
 | 
			
		||||
	lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb;
 | 
			
		||||
	lme[k]   = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb;
 | 
			
		||||
	lme[k-1] = c*lme[k-1] -s*x;
 | 
			
		||||
 | 
			
		||||
	if(k != kmax-2){
 | 
			
		||||
	  x = -s*lme[k+1];
 | 
			
		||||
	  lme[k+1] = c*lme[k+1];
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for(int i=0; i<Nk; ++i){
 | 
			
		||||
	  RealD Qtmp1 = Qt[i+Nm*k    ];
 | 
			
		||||
	  RealD Qtmp2 = Qt[i+Nm*(k+1)];
 | 
			
		||||
	  Qt[i+Nm*k    ] = c*Qtmp1 -s*Qtmp2;
 | 
			
		||||
	  Qt[i+Nm*(k+1)] = s*Qtmp1 +c*Qtmp2;
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
#ifdef USE_LAPACK_IRL
 | 
			
		||||
#define LAPACK_INT int
 | 
			
		||||
//long long
 | 
			
		||||
    void diagonalize_lapack(std::vector<RealD>& lmd,
 | 
			
		||||
			    std::vector<RealD>& lme, 
 | 
			
		||||
			    int N1,
 | 
			
		||||
			    int N2,
 | 
			
		||||
			    std::vector<RealD>& Qt,
 | 
			
		||||
			    GridBase *grid){
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage << "diagonalize_lapack start\n";
 | 
			
		||||
      GridStopWatch gsw;
 | 
			
		||||
 | 
			
		||||
      const int size = Nm;
 | 
			
		||||
      //  tevals.resize(size);
 | 
			
		||||
      //  tevecs.resize(size);
 | 
			
		||||
      LAPACK_INT NN = N1;
 | 
			
		||||
      std::vector<double> evals_tmp(NN);
 | 
			
		||||
      std::vector<double> evec_tmp(NN*NN);
 | 
			
		||||
      memset(&evec_tmp[0],0,sizeof(double)*NN*NN);
 | 
			
		||||
      //  double AA[NN][NN];
 | 
			
		||||
      std::vector<double> DD(NN);
 | 
			
		||||
      std::vector<double> EE(NN);
 | 
			
		||||
      for (int i = 0; i< NN; i++)
 | 
			
		||||
	for (int j = i - 1; j <= i + 1; j++)
 | 
			
		||||
	  if ( j < NN && j >= 0 ) {
 | 
			
		||||
	    if (i==j) DD[i] = lmd[i];
 | 
			
		||||
	    if (i==j) evals_tmp[i] = lmd[i];
 | 
			
		||||
	    if (j==(i-1)) EE[j] = lme[j];
 | 
			
		||||
	  }
 | 
			
		||||
      LAPACK_INT evals_found;
 | 
			
		||||
      LAPACK_INT lwork = ( (18*NN) > (1+4*NN+NN*NN)? (18*NN):(1+4*NN+NN*NN)) ;
 | 
			
		||||
      LAPACK_INT liwork =  3+NN*10 ;
 | 
			
		||||
      std::vector<LAPACK_INT> iwork(liwork);
 | 
			
		||||
      std::vector<double> work(lwork);
 | 
			
		||||
      std::vector<LAPACK_INT> isuppz(2*NN);
 | 
			
		||||
      char jobz = 'V'; // calculate evals & evecs
 | 
			
		||||
      char range = 'I'; // calculate all evals
 | 
			
		||||
      //    char range = 'A'; // calculate all evals
 | 
			
		||||
      char uplo = 'U'; // refer to upper half of original matrix
 | 
			
		||||
      char compz = 'I'; // Compute eigenvectors of tridiagonal matrix
 | 
			
		||||
      std::vector<int> ifail(NN);
 | 
			
		||||
      LAPACK_INT info;
 | 
			
		||||
      //  int total = QMP_get_number_of_nodes();
 | 
			
		||||
      //  int node = QMP_get_node_number();
 | 
			
		||||
      //  GridBase *grid = evec[0]._grid;
 | 
			
		||||
      int total = grid->_Nprocessors;
 | 
			
		||||
      int node = grid->_processor;
 | 
			
		||||
      int interval = (NN/total)+1;
 | 
			
		||||
      double vl = 0.0, vu = 0.0;
 | 
			
		||||
      LAPACK_INT il = interval*node+1 , iu = interval*(node+1);
 | 
			
		||||
      if (iu > NN)  iu=NN;
 | 
			
		||||
      double tol = 0.0;
 | 
			
		||||
      if (1) {
 | 
			
		||||
	memset(&evals_tmp[0],0,sizeof(double)*NN);
 | 
			
		||||
	if ( il <= NN){
 | 
			
		||||
	  std::cout << GridLogMessage << "dstegr started" << std::endl; 
 | 
			
		||||
	  gsw.Start();
 | 
			
		||||
	  dstegr(&jobz, &range, &NN,
 | 
			
		||||
		 (double*)&DD[0], (double*)&EE[0],
 | 
			
		||||
		 &vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A'
 | 
			
		||||
		 &tol, // tolerance
 | 
			
		||||
		 &evals_found, &evals_tmp[0], (double*)&evec_tmp[0], &NN,
 | 
			
		||||
		 &isuppz[0],
 | 
			
		||||
		 &work[0], &lwork, &iwork[0], &liwork,
 | 
			
		||||
		 &info);
 | 
			
		||||
	  gsw.Stop();
 | 
			
		||||
	  std::cout << GridLogMessage << "dstegr completed in " << gsw.Elapsed() << std::endl;
 | 
			
		||||
	  for (int i = iu-1; i>= il-1; i--){
 | 
			
		||||
	    evals_tmp[i] = evals_tmp[i - (il-1)];
 | 
			
		||||
	    if (il>1) evals_tmp[i-(il-1)]=0.;
 | 
			
		||||
	    for (int j = 0; j< NN; j++){
 | 
			
		||||
	      evec_tmp[i*NN + j] = evec_tmp[(i - (il-1)) * NN + j];
 | 
			
		||||
	      if (il>1) evec_tmp[(i-(il-1)) * NN + j]=0.;
 | 
			
		||||
	    }
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
	{
 | 
			
		||||
	  //        QMP_sum_double_array(evals_tmp,NN);
 | 
			
		||||
	  //        QMP_sum_double_array((double *)evec_tmp,NN*NN);
 | 
			
		||||
	  grid->GlobalSumVector(&evals_tmp[0],NN);
 | 
			
		||||
	  grid->GlobalSumVector(&evec_tmp[0],NN*NN);
 | 
			
		||||
	}
 | 
			
		||||
      } 
 | 
			
		||||
      // cheating a bit. It is better to sort instead of just reversing it, but the document of the routine says evals are sorted in increasing order. qr gives evals in decreasing order.
 | 
			
		||||
      for(int i=0;i<NN;i++){
 | 
			
		||||
	for(int j=0;j<NN;j++)
 | 
			
		||||
	  Qt[(NN-1-i)*N2+j]=evec_tmp[i*NN + j];
 | 
			
		||||
	lmd [NN-1-i]=evals_tmp[i];
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage << "diagonalize_lapack complete\n";
 | 
			
		||||
    }
 | 
			
		||||
#undef LAPACK_INT 
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    void diagonalize(std::vector<RealD>& lmd,
 | 
			
		||||
		     std::vector<RealD>& lme, 
 | 
			
		||||
		     int N2,
 | 
			
		||||
		     int N1,
 | 
			
		||||
		     std::vector<RealD>& Qt,
 | 
			
		||||
		     GridBase *grid)
 | 
			
		||||
    {
 | 
			
		||||
 | 
			
		||||
#ifdef USE_LAPACK_IRL
 | 
			
		||||
    const int check_lapack=0; // just use lapack if 0, check against lapack if 1
 | 
			
		||||
 | 
			
		||||
    if(!check_lapack)
 | 
			
		||||
	return diagonalize_lapack(lmd,lme,N2,N1,Qt,grid);
 | 
			
		||||
 | 
			
		||||
	std::vector <RealD> lmd2(N1);
 | 
			
		||||
	std::vector <RealD> lme2(N1);
 | 
			
		||||
	std::vector<RealD> Qt2(N1*N1);
 | 
			
		||||
         for(int k=0; k<N1; ++k){
 | 
			
		||||
	    lmd2[k] = lmd[k];
 | 
			
		||||
	    lme2[k] = lme[k];
 | 
			
		||||
	  }
 | 
			
		||||
         for(int k=0; k<N1*N1; ++k)
 | 
			
		||||
	Qt2[k] = Qt[k];
 | 
			
		||||
 | 
			
		||||
//	diagonalize_lapack(lmd2,lme2,Nm2,Nm,Qt,grid);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
      int Niter = 10000*N1;
 | 
			
		||||
      int kmin = 1;
 | 
			
		||||
      int kmax = N2;
 | 
			
		||||
      // (this should be more sophisticated)
 | 
			
		||||
 | 
			
		||||
      for(int iter=0; ; ++iter){
 | 
			
		||||
      if ( (iter+1)%(100*N1)==0) 
 | 
			
		||||
      std::cout<<GridLogMessage << "[QL method] Not converged - iteration "<<iter+1<<"\n";
 | 
			
		||||
 | 
			
		||||
	// determination of 2x2 leading submatrix
 | 
			
		||||
	RealD dsub = lmd[kmax-1]-lmd[kmax-2];
 | 
			
		||||
	RealD dd = sqrt(dsub*dsub + 4.0*lme[kmax-2]*lme[kmax-2]);
 | 
			
		||||
	RealD Dsh = 0.5*(lmd[kmax-2]+lmd[kmax-1] +dd*(dsub/fabs(dsub)));
 | 
			
		||||
	// (Dsh: shift)
 | 
			
		||||
	
 | 
			
		||||
	// transformation
 | 
			
		||||
	qr_decomp(lmd,lme,N2,N1,Qt,Dsh,kmin,kmax);
 | 
			
		||||
	
 | 
			
		||||
	// Convergence criterion (redef of kmin and kamx)
 | 
			
		||||
	for(int j=kmax-1; j>= kmin; --j){
 | 
			
		||||
	  RealD dds = fabs(lmd[j-1])+fabs(lmd[j]);
 | 
			
		||||
	  if(fabs(lme[j-1])+dds > dds){
 | 
			
		||||
	    kmax = j+1;
 | 
			
		||||
	    goto continued;
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
	Niter = iter;
 | 
			
		||||
#ifdef USE_LAPACK_IRL
 | 
			
		||||
    if(check_lapack){
 | 
			
		||||
	const double SMALL=1e-8;
 | 
			
		||||
	diagonalize_lapack(lmd2,lme2,N2,N1,Qt2,grid);
 | 
			
		||||
	std::vector <RealD> lmd3(N2);
 | 
			
		||||
         for(int k=0; k<N2; ++k) lmd3[k]=lmd[k];
 | 
			
		||||
        _sort.push(lmd3,N2);
 | 
			
		||||
        _sort.push(lmd2,N2);
 | 
			
		||||
         for(int k=0; k<N2; ++k){
 | 
			
		||||
	    if (fabs(lmd2[k] - lmd3[k]) >SMALL)  std::cout<<GridLogMessage <<"lmd(qr) lmd(lapack) "<< k << ": " << lmd2[k] <<" "<< lmd3[k] <<std::endl;
 | 
			
		||||
//	    if (fabs(lme2[k] - lme[k]) >SMALL)  std::cout<<GridLogMessage <<"lme(qr)-lme(lapack) "<< k << ": " << lme2[k] - lme[k] <<std::endl;
 | 
			
		||||
	  }
 | 
			
		||||
         for(int k=0; k<N1*N1; ++k){
 | 
			
		||||
//	    if (fabs(Qt2[k] - Qt[k]) >SMALL)  std::cout<<GridLogMessage <<"Qt(qr)-Qt(lapack) "<< k << ": " << Qt2[k] - Qt[k] <<std::endl;
 | 
			
		||||
	}
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
	return;
 | 
			
		||||
 | 
			
		||||
      continued:
 | 
			
		||||
	for(int j=0; j<kmax-1; ++j){
 | 
			
		||||
	  RealD dds = fabs(lmd[j])+fabs(lmd[j+1]);
 | 
			
		||||
	  if(fabs(lme[j])+dds > dds){
 | 
			
		||||
	    kmin = j+1;
 | 
			
		||||
	    break;
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      std::cout<<GridLogMessage << "[QL method] Error - Too many iteration: "<<Niter<<"\n";
 | 
			
		||||
      abort();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
#if 1
 | 
			
		||||
    template<typename T>
 | 
			
		||||
    static RealD normalise(T& v) 
 | 
			
		||||
    {
 | 
			
		||||
      RealD nn = norm2(v);
 | 
			
		||||
      nn = sqrt(nn);
 | 
			
		||||
      v = v * (1.0/nn);
 | 
			
		||||
      return nn;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void orthogonalize(Field& w,
 | 
			
		||||
		       BasisFieldVector<Field>& evec,
 | 
			
		||||
		       int k)
 | 
			
		||||
    {
 | 
			
		||||
      double t0=-usecond()/1e6;
 | 
			
		||||
 | 
			
		||||
      evec.orthogonalize(w,k);
 | 
			
		||||
 | 
			
		||||
      normalise(w);
 | 
			
		||||
      t0+=usecond()/1e6;
 | 
			
		||||
      OrthoTime +=t0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void setUnit_Qt(int Nm, std::vector<RealD> &Qt) {
 | 
			
		||||
      for(int i=0; i<Qt.size(); ++i) Qt[i] = 0.0;
 | 
			
		||||
      for(int k=0; k<Nm; ++k) Qt[k + k*Nm] = 1.0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
/* Rudy Arthur's thesis pp.137
 | 
			
		||||
------------------------
 | 
			
		||||
Require: M > K P = M − K †
 | 
			
		||||
Compute the factorization AVM = VM HM + fM eM 
 | 
			
		||||
repeat
 | 
			
		||||
  Q=I
 | 
			
		||||
  for i = 1,...,P do
 | 
			
		||||
    QiRi =HM −θiI Q = QQi
 | 
			
		||||
    H M = Q †i H M Q i
 | 
			
		||||
  end for
 | 
			
		||||
  βK =HM(K+1,K) σK =Q(M,K)
 | 
			
		||||
  r=vK+1βK +rσK
 | 
			
		||||
  VK =VM(1:M)Q(1:M,1:K)
 | 
			
		||||
  HK =HM(1:K,1:K)
 | 
			
		||||
  →AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM
 | 
			
		||||
until convergence
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
    void calc(std::vector<RealD>& eval,
 | 
			
		||||
	      BasisFieldVector<Field>& evec,
 | 
			
		||||
	      const Field& src,
 | 
			
		||||
	      int& Nconv,
 | 
			
		||||
	      bool reverse,
 | 
			
		||||
	      int SkipTest)
 | 
			
		||||
      {
 | 
			
		||||
 | 
			
		||||
	GridBase *grid = evec._v[0]._grid;//evec.get(0 + evec_offset)._grid;
 | 
			
		||||
	assert(grid == src._grid);
 | 
			
		||||
 | 
			
		||||
	std::cout<<GridLogMessage << " -- Nk = " << Nk << " Np = "<< Np << std::endl;
 | 
			
		||||
	std::cout<<GridLogMessage << " -- Nm = " << Nm << std::endl;
 | 
			
		||||
	std::cout<<GridLogMessage << " -- size of eval   = " << eval.size() << std::endl;
 | 
			
		||||
	std::cout<<GridLogMessage << " -- size of evec  = " << evec.size() << std::endl;
 | 
			
		||||
	
 | 
			
		||||
	assert(Nm <= evec.size() && Nm <= eval.size());
 | 
			
		||||
 | 
			
		||||
	// quickly get an idea of the largest eigenvalue to more properly normalize the residuum
 | 
			
		||||
	RealD evalMaxApprox = 0.0;
 | 
			
		||||
	{
 | 
			
		||||
	  auto src_n = src;
 | 
			
		||||
	  auto tmp = src;
 | 
			
		||||
	  const int _MAX_ITER_IRL_MEVAPP_ = 50;
 | 
			
		||||
	  for (int i=0;i<_MAX_ITER_IRL_MEVAPP_;i++) {
 | 
			
		||||
	    _HermOpTest(src_n,tmp);
 | 
			
		||||
	    RealD vnum = real(innerProduct(src_n,tmp)); // HermOp.
 | 
			
		||||
	    RealD vden = norm2(src_n);
 | 
			
		||||
	    RealD na = vnum/vden;
 | 
			
		||||
	    if (fabs(evalMaxApprox/na - 1.0) < 0.05)
 | 
			
		||||
	      i=_MAX_ITER_IRL_MEVAPP_;
 | 
			
		||||
	    evalMaxApprox = na;
 | 
			
		||||
	    std::cout << GridLogMessage << " Approximation of largest eigenvalue: " << evalMaxApprox << std::endl;
 | 
			
		||||
	    src_n = tmp;
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
	
 | 
			
		||||
	std::vector<RealD> lme(Nm);  
 | 
			
		||||
	std::vector<RealD> lme2(Nm);
 | 
			
		||||
	std::vector<RealD> eval2(Nm);
 | 
			
		||||
	std::vector<RealD> eval2_copy(Nm);
 | 
			
		||||
	std::vector<RealD> Qt(Nm*Nm);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
	Field f(grid);
 | 
			
		||||
	Field v(grid);
 | 
			
		||||
  
 | 
			
		||||
	int k1 = 1;
 | 
			
		||||
	int k2 = Nk;
 | 
			
		||||
 | 
			
		||||
	Nconv = 0;
 | 
			
		||||
 | 
			
		||||
	RealD beta_k;
 | 
			
		||||
  
 | 
			
		||||
	// Set initial vector
 | 
			
		||||
	evec[0] = src;
 | 
			
		||||
	normalise(evec[0]);
 | 
			
		||||
	std:: cout<<GridLogMessage <<"norm2(evec[0])= " << norm2(evec[0])<<std::endl;
 | 
			
		||||
	
 | 
			
		||||
	// Initial Nk steps
 | 
			
		||||
	OrthoTime=0.;
 | 
			
		||||
	double t0=usecond()/1e6;
 | 
			
		||||
	for(int k=0; k<Nk; ++k) step(eval,lme,evec,f,Nm,k);
 | 
			
		||||
	double t1=usecond()/1e6;
 | 
			
		||||
	std::cout<<GridLogMessage <<"IRL::Initial steps: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
 | 
			
		||||
	std::cout<<GridLogMessage <<"IRL::Initial steps:OrthoTime "<<OrthoTime<< "seconds"<<std::endl;
 | 
			
		||||
	t1=usecond()/1e6;
 | 
			
		||||
 | 
			
		||||
	// Restarting loop begins
 | 
			
		||||
	for(int iter = 0; iter<Niter; ++iter){
 | 
			
		||||
	  
 | 
			
		||||
	  std::cout<<GridLogMessage<<"\n Restart iteration = "<< iter << std::endl;
 | 
			
		||||
	  
 | 
			
		||||
	  // 
 | 
			
		||||
	  // Rudy does a sort first which looks very different. Getting fed up with sorting out the algo defs.
 | 
			
		||||
	  // We loop over 
 | 
			
		||||
	  //
 | 
			
		||||
	  OrthoTime=0.;
 | 
			
		||||
	  for(int k=Nk; k<Nm; ++k) step(eval,lme,evec,f,Nm,k);
 | 
			
		||||
	  t1=usecond()/1e6;
 | 
			
		||||
	  std::cout<<GridLogMessage <<"IRL:: "<<Np <<" steps: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
 | 
			
		||||
	  std::cout<<GridLogMessage <<"IRL::Initial steps:OrthoTime "<<OrthoTime<< "seconds"<<std::endl;
 | 
			
		||||
	  f *= lme[Nm-1];
 | 
			
		||||
	  
 | 
			
		||||
	  t1=usecond()/1e6;
 | 
			
		||||
 | 
			
		||||
	  
 | 
			
		||||
	  // getting eigenvalues
 | 
			
		||||
	  for(int k=0; k<Nm; ++k){
 | 
			
		||||
	    eval2[k] = eval[k+k1-1];
 | 
			
		||||
	    lme2[k] = lme[k+k1-1];
 | 
			
		||||
	  }
 | 
			
		||||
	  setUnit_Qt(Nm,Qt);
 | 
			
		||||
	  diagonalize(eval2,lme2,Nm,Nm,Qt,grid);
 | 
			
		||||
	  t1=usecond()/1e6;
 | 
			
		||||
	  std::cout<<GridLogMessage <<"IRL:: diagonalize: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
 | 
			
		||||
	  
 | 
			
		||||
	  // sorting
 | 
			
		||||
	  eval2_copy = eval2;
 | 
			
		||||
 | 
			
		||||
	  _sort.push(eval2,Nm);
 | 
			
		||||
	  t1=usecond()/1e6;
 | 
			
		||||
	  std::cout<<GridLogMessage <<"IRL:: eval sorting: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
 | 
			
		||||
	  
 | 
			
		||||
	  // Implicitly shifted QR transformations
 | 
			
		||||
	  setUnit_Qt(Nm,Qt);
 | 
			
		||||
	  for(int ip=0; ip<k2; ++ip){
 | 
			
		||||
	    std::cout<<GridLogMessage << "eval "<< ip << " "<< eval2[ip] << std::endl;
 | 
			
		||||
	  }
 | 
			
		||||
 | 
			
		||||
	  for(int ip=k2; ip<Nm; ++ip){ 
 | 
			
		||||
	    std::cout<<GridLogMessage << "qr_decomp "<< ip << " "<< eval2[ip] << std::endl;
 | 
			
		||||
	    qr_decomp(eval,lme,Nm,Nm,Qt,eval2[ip],k1,Nm);
 | 
			
		||||
	    
 | 
			
		||||
	  }
 | 
			
		||||
	  t1=usecond()/1e6;
 | 
			
		||||
	  std::cout<<GridLogMessage <<"IRL::qr_decomp: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
 | 
			
		||||
	  assert(k2<Nm);
 | 
			
		||||
	  
 | 
			
		||||
 | 
			
		||||
	  assert(k2<Nm);
 | 
			
		||||
	  assert(k1>0);
 | 
			
		||||
	  evec.rotate(Qt,k1-1,k2+1,0,Nm,Nm);
 | 
			
		||||
	  
 | 
			
		||||
	  t1=usecond()/1e6;
 | 
			
		||||
	  std::cout<<GridLogMessage <<"IRL::QR rotation: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
 | 
			
		||||
	  fflush(stdout);
 | 
			
		||||
	  
 | 
			
		||||
	  // Compressed vector f and beta(k2)
 | 
			
		||||
	  f *= Qt[Nm-1+Nm*(k2-1)];
 | 
			
		||||
	  f += lme[k2-1] * evec[k2];
 | 
			
		||||
	  beta_k = norm2(f);
 | 
			
		||||
	  beta_k = sqrt(beta_k);
 | 
			
		||||
	  std::cout<<GridLogMessage<<" beta(k) = "<<beta_k<<std::endl;
 | 
			
		||||
	  
 | 
			
		||||
	  RealD betar = 1.0/beta_k;
 | 
			
		||||
	  evec[k2] = betar * f;
 | 
			
		||||
	  lme[k2-1] = beta_k;
 | 
			
		||||
	  
 | 
			
		||||
	  // Convergence test
 | 
			
		||||
	  for(int k=0; k<Nm; ++k){    
 | 
			
		||||
	    eval2[k] = eval[k];
 | 
			
		||||
	    lme2[k] = lme[k];
 | 
			
		||||
 | 
			
		||||
	    std::cout<<GridLogMessage << "eval2[" << k << "] = " << eval2[k] << std::endl;
 | 
			
		||||
	  }
 | 
			
		||||
	  setUnit_Qt(Nm,Qt);
 | 
			
		||||
	  diagonalize(eval2,lme2,Nk,Nm,Qt,grid);
 | 
			
		||||
	  t1=usecond()/1e6;
 | 
			
		||||
	  std::cout<<GridLogMessage <<"IRL::diagonalize: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
 | 
			
		||||
	  
 | 
			
		||||
	  
 | 
			
		||||
	  Nconv = 0;
 | 
			
		||||
	  
 | 
			
		||||
	  if (iter >= Nminres) {
 | 
			
		||||
	    std::cout << GridLogMessage << "Rotation to test convergence " << std::endl;
 | 
			
		||||
	    
 | 
			
		||||
	    Field ev0_orig(grid);
 | 
			
		||||
	    ev0_orig = evec[0];
 | 
			
		||||
	    
 | 
			
		||||
	    evec.rotate(Qt,0,Nk,0,Nk,Nm);
 | 
			
		||||
	    
 | 
			
		||||
	    {
 | 
			
		||||
	      std::cout << GridLogMessage << "Test convergence" << std::endl;
 | 
			
		||||
	      Field B(grid);
 | 
			
		||||
	      
 | 
			
		||||
	      for(int j = 0; j<Nk; j+=SkipTest){
 | 
			
		||||
		B=evec[j];
 | 
			
		||||
		//std::cout << "Checkerboard: " << evec[j].checkerboard << std::endl; 
 | 
			
		||||
		B.checkerboard = evec[0].checkerboard;
 | 
			
		||||
 | 
			
		||||
		_HermOpTest(B,v);
 | 
			
		||||
		
 | 
			
		||||
		RealD vnum = real(innerProduct(B,v)); // HermOp.
 | 
			
		||||
		RealD vden = norm2(B);
 | 
			
		||||
		RealD vv0 = norm2(v);
 | 
			
		||||
		eval2[j] = vnum/vden;
 | 
			
		||||
		v -= eval2[j]*B;
 | 
			
		||||
		RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0);
 | 
			
		||||
		std::cout.precision(13);
 | 
			
		||||
		std::cout<<GridLogMessage << "[" << std::setw(3)<< std::setiosflags(std::ios_base::right) <<j<<"] "
 | 
			
		||||
			 <<"eval = "<<std::setw(25)<< std::setiosflags(std::ios_base::left)<< eval2[j] << " (" << eval2_copy[j] << ")"
 | 
			
		||||
			 <<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25)<< std::setiosflags(std::ios_base::right)<< vv
 | 
			
		||||
			 <<" "<< vnum/(sqrt(vden)*sqrt(vv0))
 | 
			
		||||
			 << " norm(B["<<j<<"])="<< vden <<std::endl;
 | 
			
		||||
		
 | 
			
		||||
		// change the criteria as evals are supposed to be sorted, all evals smaller(larger) than Nstop should have converged
 | 
			
		||||
		if((vv<eresid*eresid) && (j == Nconv) ){
 | 
			
		||||
		  Nconv+=SkipTest;
 | 
			
		||||
		}
 | 
			
		||||
	      }
 | 
			
		||||
	      
 | 
			
		||||
	      // test if we converged, if so, terminate
 | 
			
		||||
	      t1=usecond()/1e6;
 | 
			
		||||
	      std::cout<<GridLogMessage <<"IRL::convergence testing: "<<t1-t0<< "seconds"<<std::endl; t0=t1;
 | 
			
		||||
	      
 | 
			
		||||
	      std::cout<<GridLogMessage<<" #modes converged: "<<Nconv<<std::endl;
 | 
			
		||||
	      
 | 
			
		||||
	      if( Nconv>=Nstop || beta_k < betastp){
 | 
			
		||||
		goto converged;
 | 
			
		||||
	      }
 | 
			
		||||
	      
 | 
			
		||||
	      std::cout << GridLogMessage << "Rotate back" << std::endl;
 | 
			
		||||
	      //B[j] +=Qt[k+_Nm*j] * _v[k]._odata[ss];
 | 
			
		||||
	      {
 | 
			
		||||
		Eigen::MatrixXd qm = Eigen::MatrixXd::Zero(Nk,Nk);
 | 
			
		||||
		for (int k=0;k<Nk;k++)
 | 
			
		||||
		  for (int j=0;j<Nk;j++)
 | 
			
		||||
		    qm(j,k) = Qt[k+Nm*j];
 | 
			
		||||
		GridStopWatch timeInv;
 | 
			
		||||
		timeInv.Start();
 | 
			
		||||
		Eigen::MatrixXd qmI = qm.inverse();
 | 
			
		||||
		timeInv.Stop();
 | 
			
		||||
		std::vector<RealD> QtI(Nm*Nm);
 | 
			
		||||
		for (int k=0;k<Nk;k++)
 | 
			
		||||
		  for (int j=0;j<Nk;j++)
 | 
			
		||||
		    QtI[k+Nm*j] = qmI(j,k);
 | 
			
		||||
		
 | 
			
		||||
		RealD res_check_rotate_inverse = (qm*qmI - Eigen::MatrixXd::Identity(Nk,Nk)).norm(); // sqrt( |X|^2 )
 | 
			
		||||
		assert(res_check_rotate_inverse < 1e-7);
 | 
			
		||||
		evec.rotate(QtI,0,Nk,0,Nk,Nm);
 | 
			
		||||
		
 | 
			
		||||
		axpy(ev0_orig,-1.0,evec[0],ev0_orig);
 | 
			
		||||
		std::cout << GridLogMessage << "Rotation done (in " << timeInv.Elapsed() << " = " << timeInv.useconds() << " us" <<
 | 
			
		||||
		  ", error = " << res_check_rotate_inverse << 
 | 
			
		||||
		  "); | evec[0] - evec[0]_orig | = " << ::sqrt(norm2(ev0_orig)) << std::endl;
 | 
			
		||||
	      }
 | 
			
		||||
	    }
 | 
			
		||||
	  } else {
 | 
			
		||||
	    std::cout << GridLogMessage << "iter < Nminres: do not yet test for convergence\n";
 | 
			
		||||
	  } // end of iter loop
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	std::cout<<GridLogMessage<<"\n NOT converged.\n";
 | 
			
		||||
	abort();
 | 
			
		||||
	
 | 
			
		||||
      converged:
 | 
			
		||||
 | 
			
		||||
	if (SkipTest == 1) {
 | 
			
		||||
	  eval = eval2;
 | 
			
		||||
	} else {
 | 
			
		||||
 | 
			
		||||
	  // test quickly
 | 
			
		||||
	  for (int j=0;j<Nstop;j+=SkipTest) {
 | 
			
		||||
	    std::cout<<GridLogMessage << "Eigenvalue[" << j << "] = " << eval2[j] << " (" << eval2_copy[j] << ")" << std::endl;
 | 
			
		||||
	  }
 | 
			
		||||
 | 
			
		||||
	  eval2_copy.resize(eval2.size());
 | 
			
		||||
	  eval = eval2_copy;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	evec.sortInPlace(eval,reverse);
 | 
			
		||||
 | 
			
		||||
	{
 | 
			
		||||
	  
 | 
			
		||||
	 // test
 | 
			
		||||
	 for (int j=0;j<Nstop;j++) {
 | 
			
		||||
	   std::cout<<GridLogMessage << " |e[" << j << "]|^2 = " << norm2(evec[j]) << std::endl;
 | 
			
		||||
	 }
 | 
			
		||||
       }
 | 
			
		||||
       
 | 
			
		||||
       //_sort.push(eval,evec,Nconv);
 | 
			
		||||
       //evec.sort(eval,Nconv);
 | 
			
		||||
       
 | 
			
		||||
       std::cout<<GridLogMessage << "\n Converged\n Summary :\n";
 | 
			
		||||
       std::cout<<GridLogMessage << " -- Iterations  = "<< Nconv  << "\n";
 | 
			
		||||
       std::cout<<GridLogMessage << " -- beta(k)     = "<< beta_k << "\n";
 | 
			
		||||
       std::cout<<GridLogMessage << " -- Nconv       = "<< Nconv  << "\n";
 | 
			
		||||
      }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
@@ -0,0 +1,143 @@
 | 
			
		||||
namespace Grid { 
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
  BlockProjector
 | 
			
		||||
 | 
			
		||||
  If _HP_BLOCK_PROJECTORS_ is defined, we assume that _evec is a basis that is not
 | 
			
		||||
  fully orthonormalized (to the precision of the coarse field) and we allow for higher-precision
 | 
			
		||||
  coarse field than basis field.
 | 
			
		||||
 | 
			
		||||
*/
 | 
			
		||||
//#define _HP_BLOCK_PROJECTORS_
 | 
			
		||||
 | 
			
		||||
template<typename Field>
 | 
			
		||||
class BlockProjector {
 | 
			
		||||
public:
 | 
			
		||||
 | 
			
		||||
  BasisFieldVector<Field>& _evec;
 | 
			
		||||
  BlockedGrid<Field>& _bgrid;
 | 
			
		||||
 | 
			
		||||
  BlockProjector(BasisFieldVector<Field>& evec, BlockedGrid<Field>& bgrid) : _evec(evec), _bgrid(bgrid) {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void createOrthonormalBasis(RealD thres = 0.0) {
 | 
			
		||||
 | 
			
		||||
    GridStopWatch sw;
 | 
			
		||||
    sw.Start();
 | 
			
		||||
 | 
			
		||||
    int cnt = 0;
 | 
			
		||||
 | 
			
		||||
#pragma omp parallel shared(cnt)
 | 
			
		||||
    {
 | 
			
		||||
      int lcnt = 0;
 | 
			
		||||
 | 
			
		||||
#pragma omp for
 | 
			
		||||
      for (int b=0;b<_bgrid._o_blocks;b++) {
 | 
			
		||||
	
 | 
			
		||||
	for (int i=0;i<_evec._Nm;i++) {
 | 
			
		||||
	  
 | 
			
		||||
	  auto nrm0 = _bgrid.block_sp(b,_evec._v[i],_evec._v[i]);
 | 
			
		||||
	  
 | 
			
		||||
	  // |i> -= <j|i> |j>
 | 
			
		||||
	  for (int j=0;j<i;j++) {
 | 
			
		||||
	    _bgrid.block_caxpy(b,_evec._v[i],-_bgrid.block_sp(b,_evec._v[j],_evec._v[i]),_evec._v[j],_evec._v[i]);
 | 
			
		||||
	  }
 | 
			
		||||
	  
 | 
			
		||||
	  auto nrm = _bgrid.block_sp(b,_evec._v[i],_evec._v[i]);
 | 
			
		||||
	  
 | 
			
		||||
	  auto eps = nrm/nrm0;
 | 
			
		||||
	  if (Reduce(eps).real() < thres) {
 | 
			
		||||
	    lcnt++;
 | 
			
		||||
	  }
 | 
			
		||||
	  
 | 
			
		||||
	  // TODO: if norm is too small, remove this eigenvector/mark as not needed; in practice: set it to zero norm here and return a mask
 | 
			
		||||
	  // that is then used later to decide not to write certain eigenvectors to disk (add a norm calculation before subtraction step and look at nrm/nrm0 < eps to decide)
 | 
			
		||||
	  _bgrid.block_cscale(b,1.0 / sqrt(nrm),_evec._v[i]);
 | 
			
		||||
	  
 | 
			
		||||
	}
 | 
			
		||||
	
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
#pragma omp critical
 | 
			
		||||
      {
 | 
			
		||||
	cnt += lcnt;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    sw.Stop();
 | 
			
		||||
    std::cout << GridLogMessage << "Gram-Schmidt to create blocked basis took " << sw.Elapsed() << " (" << ((RealD)cnt / (RealD)_bgrid._o_blocks / (RealD)_evec._Nm) 
 | 
			
		||||
	      << " below threshold)" << std::endl;
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<typename CoarseField>
 | 
			
		||||
  void coarseToFine(const CoarseField& in, Field& out) {
 | 
			
		||||
 | 
			
		||||
    out = zero;
 | 
			
		||||
    out.checkerboard = _evec._v[0].checkerboard;
 | 
			
		||||
 | 
			
		||||
    int Nbasis = sizeof(in._odata[0]._internal._internal) / sizeof(in._odata[0]._internal._internal[0]);
 | 
			
		||||
    assert(Nbasis == _evec._Nm);
 | 
			
		||||
    
 | 
			
		||||
#pragma omp parallel for
 | 
			
		||||
    for (int b=0;b<_bgrid._o_blocks;b++) {
 | 
			
		||||
      for (int j=0;j<_evec._Nm;j++) {
 | 
			
		||||
	_bgrid.block_caxpy(b,out,in._odata[b]._internal._internal[j],_evec._v[j],out);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<typename CoarseField>
 | 
			
		||||
  void fineToCoarse(const Field& in, CoarseField& out) {
 | 
			
		||||
 | 
			
		||||
    out = zero;
 | 
			
		||||
 | 
			
		||||
    int Nbasis = sizeof(out._odata[0]._internal._internal) / sizeof(out._odata[0]._internal._internal[0]);
 | 
			
		||||
    assert(Nbasis == _evec._Nm);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    Field tmp(_bgrid._grid);
 | 
			
		||||
    tmp = in;
 | 
			
		||||
    
 | 
			
		||||
#pragma omp parallel for
 | 
			
		||||
    for (int b=0;b<_bgrid._o_blocks;b++) {
 | 
			
		||||
      for (int j=0;j<_evec._Nm;j++) {
 | 
			
		||||
	// |rhs> -= <j|rhs> |j>
 | 
			
		||||
	auto c = _bgrid.block_sp(b,_evec._v[j],tmp);
 | 
			
		||||
	_bgrid.block_caxpy(b,tmp,-c,_evec._v[j],tmp); // may make this more numerically stable
 | 
			
		||||
	out._odata[b]._internal._internal[j] = c;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<typename CoarseField>
 | 
			
		||||
    void deflateFine(BasisFieldVector<CoarseField>& _coef,const std::vector<RealD>& eval,int N,const Field& src_orig,Field& result) {
 | 
			
		||||
    result = zero;
 | 
			
		||||
    for (int i=0;i<N;i++) {
 | 
			
		||||
      Field tmp(result._grid);
 | 
			
		||||
      coarseToFine(_coef._v[i],tmp);
 | 
			
		||||
      axpy(result,TensorRemove(innerProduct(tmp,src_orig)) / eval[i],tmp,result);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<typename CoarseField>
 | 
			
		||||
    void deflateCoarse(BasisFieldVector<CoarseField>& _coef,const std::vector<RealD>& eval,int N,const Field& src_orig,Field& result) {
 | 
			
		||||
    CoarseField src_coarse(_coef._v[0]._grid);
 | 
			
		||||
    CoarseField result_coarse = src_coarse;
 | 
			
		||||
    result_coarse = zero;
 | 
			
		||||
    fineToCoarse(src_orig,src_coarse);
 | 
			
		||||
    for (int i=0;i<N;i++) {
 | 
			
		||||
      axpy(result_coarse,TensorRemove(innerProduct(_coef._v[i],src_coarse)) / eval[i],_coef._v[i],result_coarse);
 | 
			
		||||
    }
 | 
			
		||||
    coarseToFine(result_coarse,result);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<typename CoarseField>
 | 
			
		||||
    void deflate(BasisFieldVector<CoarseField>& _coef,const std::vector<RealD>& eval,int N,const Field& src_orig,Field& result) {
 | 
			
		||||
    // Deflation on coarse Grid is much faster, so use it by default.  Deflation on fine Grid is kept for legacy reasons for now.
 | 
			
		||||
    deflateCoarse(_coef,eval,N,src_orig,result);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,401 @@
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
template<typename Field>
 | 
			
		||||
class BlockedGrid {
 | 
			
		||||
public:
 | 
			
		||||
  GridBase* _grid;
 | 
			
		||||
  typedef typename Field::scalar_type  Coeff_t;
 | 
			
		||||
  typedef typename Field::vector_type vCoeff_t;
 | 
			
		||||
  
 | 
			
		||||
  std::vector<int> _bs; // block size
 | 
			
		||||
  std::vector<int> _nb; // number of blocks
 | 
			
		||||
  std::vector<int> _l;  // local dimensions irrespective of cb
 | 
			
		||||
  std::vector<int> _l_cb;  // local dimensions of checkerboarded vector
 | 
			
		||||
  std::vector<int> _l_cb_o;  // local dimensions of inner checkerboarded vector
 | 
			
		||||
  std::vector<int> _bs_cb; // block size in checkerboarded vector
 | 
			
		||||
  std::vector<int> _nb_o; // number of blocks of simd o-sites
 | 
			
		||||
 | 
			
		||||
  int _nd, _blocks, _cf_size, _cf_block_size, _cf_o_block_size, _o_blocks, _block_sites;
 | 
			
		||||
  
 | 
			
		||||
  BlockedGrid(GridBase* grid, const std::vector<int>& block_size) :
 | 
			
		||||
    _grid(grid), _bs(block_size), _nd((int)_bs.size()), 
 | 
			
		||||
      _nb(block_size), _l(block_size), _l_cb(block_size), _nb_o(block_size),
 | 
			
		||||
      _l_cb_o(block_size), _bs_cb(block_size) {
 | 
			
		||||
 | 
			
		||||
    _blocks = 1;
 | 
			
		||||
    _o_blocks = 1;
 | 
			
		||||
    _l = grid->FullDimensions();
 | 
			
		||||
    _l_cb = grid->LocalDimensions();
 | 
			
		||||
    _l_cb_o = grid->_rdimensions;
 | 
			
		||||
 | 
			
		||||
    _cf_size = 1;
 | 
			
		||||
    _block_sites = 1;
 | 
			
		||||
    for (int i=0;i<_nd;i++) {
 | 
			
		||||
      _l[i] /= grid->_processors[i];
 | 
			
		||||
 | 
			
		||||
      assert(!(_l[i] % _bs[i])); // lattice must accommodate choice of blocksize
 | 
			
		||||
 | 
			
		||||
      int r = _l[i] / _l_cb[i];
 | 
			
		||||
      assert(!(_bs[i] % r)); // checkerboarding must accommodate choice of blocksize
 | 
			
		||||
      _bs_cb[i] = _bs[i] / r;
 | 
			
		||||
      _block_sites *= _bs_cb[i];
 | 
			
		||||
      _nb[i] = _l[i] / _bs[i];
 | 
			
		||||
      _nb_o[i] = _nb[i] / _grid->_simd_layout[i];
 | 
			
		||||
      if (_nb[i] % _grid->_simd_layout[i]) { // simd must accommodate choice of blocksize
 | 
			
		||||
	std::cout << GridLogMessage << "Problem: _nb[" << i << "] = " << _nb[i] << " _grid->_simd_layout[" << i << "] = " << _grid->_simd_layout[i] << std::endl;
 | 
			
		||||
	assert(0);
 | 
			
		||||
      }
 | 
			
		||||
      _blocks *= _nb[i];
 | 
			
		||||
      _o_blocks *= _nb_o[i];
 | 
			
		||||
      _cf_size *= _l[i];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    _cf_size *= 12 / 2;
 | 
			
		||||
    _cf_block_size = _cf_size / _blocks;
 | 
			
		||||
    _cf_o_block_size = _cf_size / _o_blocks;
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage << "BlockedGrid:" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _l     = " << _l << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _l_cb     = " << _l_cb << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _l_cb_o     = " << _l_cb_o << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _bs    = " << _bs << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _bs_cb    = " << _bs_cb << std::endl;
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage << " _nb    = " << _nb << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _nb_o    = " << _nb_o << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _blocks = " << _blocks << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _o_blocks = " << _o_blocks << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " sizeof(vCoeff_t) = " << sizeof(vCoeff_t) << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _cf_size = " << _cf_size << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _cf_block_size = " << _cf_block_size << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _block_sites = " << _block_sites << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " _grid->oSites() = " << _grid->oSites() << std::endl;
 | 
			
		||||
 | 
			
		||||
    //    _grid->Barrier();
 | 
			
		||||
    //abort();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
    void block_to_coor(int b, std::vector<int>& x0) {
 | 
			
		||||
 | 
			
		||||
      std::vector<int> bcoor;
 | 
			
		||||
      bcoor.resize(_nd);
 | 
			
		||||
      x0.resize(_nd);
 | 
			
		||||
      assert(b < _o_blocks);
 | 
			
		||||
      Lexicographic::CoorFromIndex(bcoor,b,_nb_o);
 | 
			
		||||
      int i;
 | 
			
		||||
 | 
			
		||||
      for (i=0;i<_nd;i++) {
 | 
			
		||||
	x0[i] = bcoor[i]*_bs_cb[i];
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      //std::cout << GridLogMessage << "Map block b -> " << x0 << std::endl;
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void block_site_to_o_coor(const std::vector<int>& x0, std::vector<int>& coor, int i) {
 | 
			
		||||
      Lexicographic::CoorFromIndex(coor,i,_bs_cb);
 | 
			
		||||
      for (int j=0;j<_nd;j++)
 | 
			
		||||
	coor[j] += x0[j];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    int block_site_to_o_site(const std::vector<int>& x0, int i) {
 | 
			
		||||
      std::vector<int> coor;  coor.resize(_nd);
 | 
			
		||||
      block_site_to_o_coor(x0,coor,i);
 | 
			
		||||
      Lexicographic::IndexFromCoor(coor,i,_l_cb_o);
 | 
			
		||||
      return i;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    vCoeff_t block_sp(int b, const Field& x, const Field& y) {
 | 
			
		||||
 | 
			
		||||
      std::vector<int> x0;
 | 
			
		||||
      block_to_coor(b,x0);
 | 
			
		||||
 | 
			
		||||
      vCoeff_t ret = 0.0;
 | 
			
		||||
      for (int i=0;i<_block_sites;i++) { // only odd sites
 | 
			
		||||
	int ss = block_site_to_o_site(x0,i);
 | 
			
		||||
	ret += TensorRemove(innerProduct(x._odata[ss],y._odata[ss]));
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      return ret;
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    vCoeff_t block_sp(int b, const Field& x, const std::vector< ComplexD >& y) {
 | 
			
		||||
 | 
			
		||||
      std::vector<int> x0;
 | 
			
		||||
      block_to_coor(b,x0);
 | 
			
		||||
 | 
			
		||||
      constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t);
 | 
			
		||||
      int lsize = _cf_o_block_size / _block_sites;
 | 
			
		||||
 | 
			
		||||
      std::vector< ComplexD > ret(nsimd);
 | 
			
		||||
      for (int i=0;i<nsimd;i++)
 | 
			
		||||
	ret[i] = 0.0;
 | 
			
		||||
 | 
			
		||||
      for (int i=0;i<_block_sites;i++) { // only odd sites
 | 
			
		||||
	int ss = block_site_to_o_site(x0,i);
 | 
			
		||||
 | 
			
		||||
	int n = lsize / nsimd;
 | 
			
		||||
	for (int l=0;l<n;l++) {
 | 
			
		||||
	  for (int j=0;j<nsimd;j++) {
 | 
			
		||||
	    int t = lsize * i + l*nsimd + j;
 | 
			
		||||
 | 
			
		||||
	    ret[j] += conjugate(((Coeff_t*)&x._odata[ss]._internal)[l*nsimd + j]) * y[t];
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      vCoeff_t vret;
 | 
			
		||||
      for (int i=0;i<nsimd;i++)
 | 
			
		||||
	((Coeff_t*)&vret)[i] = (Coeff_t)ret[i];
 | 
			
		||||
 | 
			
		||||
      return vret;
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template<class T>
 | 
			
		||||
      void vcaxpy(iScalar<T>& r,const vCoeff_t& a,const iScalar<T>& x,const iScalar<T>& y) {
 | 
			
		||||
      vcaxpy(r._internal,a,x._internal,y._internal);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template<class T,int N>
 | 
			
		||||
      void vcaxpy(iVector<T,N>& r,const vCoeff_t& a,const iVector<T,N>& x,const iVector<T,N>& y) {
 | 
			
		||||
      for (int i=0;i<N;i++)
 | 
			
		||||
	vcaxpy(r._internal[i],a,x._internal[i],y._internal[i]);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void vcaxpy(vCoeff_t& r,const vCoeff_t& a,const vCoeff_t& x,const vCoeff_t& y) {
 | 
			
		||||
      r = a*x + y;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void block_caxpy(int b, Field& ret, const vCoeff_t& a, const Field& x, const Field& y) {
 | 
			
		||||
 | 
			
		||||
      std::vector<int> x0;
 | 
			
		||||
      block_to_coor(b,x0);
 | 
			
		||||
 | 
			
		||||
      for (int i=0;i<_block_sites;i++) { // only odd sites
 | 
			
		||||
	int ss = block_site_to_o_site(x0,i);
 | 
			
		||||
	vcaxpy(ret._odata[ss],a,x._odata[ss],y._odata[ss]);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void block_caxpy(int b, std::vector< ComplexD >& ret, const vCoeff_t& a, const Field& x, const std::vector< ComplexD >& y) {
 | 
			
		||||
      std::vector<int> x0;
 | 
			
		||||
      block_to_coor(b,x0);
 | 
			
		||||
 | 
			
		||||
      constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t);
 | 
			
		||||
      int lsize = _cf_o_block_size / _block_sites;
 | 
			
		||||
 | 
			
		||||
      for (int i=0;i<_block_sites;i++) { // only odd sites
 | 
			
		||||
	int ss = block_site_to_o_site(x0,i);
 | 
			
		||||
 | 
			
		||||
	int n = lsize / nsimd;
 | 
			
		||||
	for (int l=0;l<n;l++) {
 | 
			
		||||
	  vCoeff_t r = a* ((vCoeff_t*)&x._odata[ss]._internal)[l];
 | 
			
		||||
 | 
			
		||||
	  for (int j=0;j<nsimd;j++) {
 | 
			
		||||
	    int t = lsize * i + l*nsimd + j;
 | 
			
		||||
	    ret[t] = y[t] + ((Coeff_t*)&r)[j];
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void block_set(int b, Field& ret, const std::vector< ComplexD >& x) {
 | 
			
		||||
      std::vector<int> x0;
 | 
			
		||||
      block_to_coor(b,x0);
 | 
			
		||||
 | 
			
		||||
      int lsize = _cf_o_block_size / _block_sites;
 | 
			
		||||
 | 
			
		||||
      for (int i=0;i<_block_sites;i++) { // only odd sites
 | 
			
		||||
	int ss = block_site_to_o_site(x0,i);
 | 
			
		||||
 | 
			
		||||
	for (int l=0;l<lsize;l++)
 | 
			
		||||
	  ((Coeff_t*)&ret._odata[ss]._internal)[l] = (Coeff_t)x[lsize * i + l]; // convert precision
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void block_get(int b, const Field& ret, std::vector< ComplexD >& x) {
 | 
			
		||||
      std::vector<int> x0;
 | 
			
		||||
      block_to_coor(b,x0);
 | 
			
		||||
 | 
			
		||||
      int lsize = _cf_o_block_size / _block_sites;
 | 
			
		||||
 | 
			
		||||
      for (int i=0;i<_block_sites;i++) { // only odd sites
 | 
			
		||||
	int ss = block_site_to_o_site(x0,i);
 | 
			
		||||
 | 
			
		||||
	for (int l=0;l<lsize;l++)
 | 
			
		||||
	  x[lsize * i + l] = (ComplexD)((Coeff_t*)&ret._odata[ss]._internal)[l];
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template<class T>
 | 
			
		||||
    void vcscale(iScalar<T>& r,const vCoeff_t& a,const iScalar<T>& x) {
 | 
			
		||||
      vcscale(r._internal,a,x._internal);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template<class T,int N>
 | 
			
		||||
    void vcscale(iVector<T,N>& r,const vCoeff_t& a,const iVector<T,N>& x) {
 | 
			
		||||
      for (int i=0;i<N;i++)
 | 
			
		||||
	vcscale(r._internal[i],a,x._internal[i]);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void vcscale(vCoeff_t& r,const vCoeff_t& a,const vCoeff_t& x) {
 | 
			
		||||
      r = a*x;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void block_cscale(int b, const vCoeff_t& a, Field& ret) {
 | 
			
		||||
 | 
			
		||||
      std::vector<int> x0;
 | 
			
		||||
      block_to_coor(b,x0);
 | 
			
		||||
      
 | 
			
		||||
      for (int i=0;i<_block_sites;i++) { // only odd sites
 | 
			
		||||
	int ss = block_site_to_o_site(x0,i);
 | 
			
		||||
	vcscale(ret._odata[ss],a,ret._odata[ss]);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void getCanonicalBlockOffset(int cb, std::vector<int>& x0) {
 | 
			
		||||
      const int ndim = 5;
 | 
			
		||||
      assert(_nb.size() == ndim);
 | 
			
		||||
      std::vector<int> _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] };
 | 
			
		||||
      std::vector<int> _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] };
 | 
			
		||||
      x0.resize(ndim);
 | 
			
		||||
 | 
			
		||||
      assert(cb >= 0);
 | 
			
		||||
      assert(cb < _nbc[0]*_nbc[1]*_nbc[2]*_nbc[3]*_nbc[4]);
 | 
			
		||||
 | 
			
		||||
      Lexicographic::CoorFromIndex(x0,cb,_nbc);
 | 
			
		||||
      int i;
 | 
			
		||||
 | 
			
		||||
      for (i=0;i<ndim;i++) {
 | 
			
		||||
	x0[i] *= _bsc[i];
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      //if (cb < 2)
 | 
			
		||||
      //	std::cout << GridLogMessage << "Map: " << cb << " To: " << x0 << std::endl;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void pokeBlockOfVectorCanonical(int cb,Field& v,const std::vector<float>& buf) {
 | 
			
		||||
      std::vector<int> _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] };
 | 
			
		||||
      std::vector<int> ldim = v._grid->LocalDimensions();
 | 
			
		||||
      std::vector<int> cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] };
 | 
			
		||||
      const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4];
 | 
			
		||||
      // take canonical block cb of v and put it in canonical ordering in buf
 | 
			
		||||
      std::vector<int> cx0;
 | 
			
		||||
      getCanonicalBlockOffset(cb,cx0);
 | 
			
		||||
 | 
			
		||||
#pragma omp parallel
 | 
			
		||||
      {
 | 
			
		||||
	std::vector<int> co0,cl0;
 | 
			
		||||
	co0=cx0; cl0=cx0;
 | 
			
		||||
 | 
			
		||||
#pragma omp for
 | 
			
		||||
	for (int i=0;i<_nbsc;i++) {
 | 
			
		||||
	  Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo
 | 
			
		||||
	  for (int j=0;j<(int)_bsc.size();j++)
 | 
			
		||||
	    cl0[j] = cx0[j] + co0[j];
 | 
			
		||||
	  
 | 
			
		||||
	  std::vector<int> l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] };
 | 
			
		||||
	  int oi = v._grid->oIndex(l0);
 | 
			
		||||
	  int ii = v._grid->iIndex(l0);
 | 
			
		||||
	  int lti = i;
 | 
			
		||||
 | 
			
		||||
	  //if (cb < 2 && i<2)
 | 
			
		||||
	  //  std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl;
 | 
			
		||||
	  
 | 
			
		||||
	  for (int s=0;s<4;s++)
 | 
			
		||||
	    for (int c=0;c<3;c++) {
 | 
			
		||||
	      Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii];
 | 
			
		||||
	      int ti = 12*lti + 3*s + c;
 | 
			
		||||
	      ld = Coeff_t(buf[2*ti+0], buf[2*ti+1]);
 | 
			
		||||
	    }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void peekBlockOfVectorCanonical(int cb,const Field& v,std::vector<float>& buf) {
 | 
			
		||||
      std::vector<int> _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] };
 | 
			
		||||
      std::vector<int> ldim = v._grid->LocalDimensions();
 | 
			
		||||
      std::vector<int> cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] };
 | 
			
		||||
      const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4];
 | 
			
		||||
      // take canonical block cb of v and put it in canonical ordering in buf
 | 
			
		||||
      std::vector<int> cx0;
 | 
			
		||||
      getCanonicalBlockOffset(cb,cx0);
 | 
			
		||||
 | 
			
		||||
      buf.resize(_cf_block_size * 2);
 | 
			
		||||
 | 
			
		||||
#pragma omp parallel
 | 
			
		||||
      {
 | 
			
		||||
	std::vector<int> co0,cl0;
 | 
			
		||||
	co0=cx0; cl0=cx0;
 | 
			
		||||
 | 
			
		||||
#pragma omp for
 | 
			
		||||
	for (int i=0;i<_nbsc;i++) {
 | 
			
		||||
	  Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo
 | 
			
		||||
	  for (int j=0;j<(int)_bsc.size();j++)
 | 
			
		||||
	    cl0[j] = cx0[j] + co0[j];
 | 
			
		||||
	  
 | 
			
		||||
	  std::vector<int> l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] };
 | 
			
		||||
	  int oi = v._grid->oIndex(l0);
 | 
			
		||||
	  int ii = v._grid->iIndex(l0);
 | 
			
		||||
	  int lti = i;
 | 
			
		||||
	  
 | 
			
		||||
	  //if (cb < 2 && i<2)
 | 
			
		||||
	  //  std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl;
 | 
			
		||||
 | 
			
		||||
	  for (int s=0;s<4;s++)
 | 
			
		||||
	    for (int c=0;c<3;c++) {
 | 
			
		||||
	      Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii];
 | 
			
		||||
	      int ti = 12*lti + 3*s + c;
 | 
			
		||||
	      buf[2*ti+0] = ld.real();
 | 
			
		||||
	      buf[2*ti+1] = ld.imag();
 | 
			
		||||
	    }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    int globalToLocalCanonicalBlock(int slot,const std::vector<int>& src_nodes,int nb) {
 | 
			
		||||
      // processor coordinate
 | 
			
		||||
      int _nd = (int)src_nodes.size();
 | 
			
		||||
      std::vector<int> _src_nodes = src_nodes;
 | 
			
		||||
      std::vector<int> pco(_nd);
 | 
			
		||||
      Lexicographic::CoorFromIndex(pco,slot,_src_nodes);
 | 
			
		||||
      std::vector<int> cpco = { pco[1], pco[2], pco[3], pco[4], pco[0] };
 | 
			
		||||
 | 
			
		||||
      // get local block
 | 
			
		||||
      std::vector<int> _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] };
 | 
			
		||||
      assert(_nd == 5);
 | 
			
		||||
      std::vector<int> c_src_local_blocks(_nd);
 | 
			
		||||
      for (int i=0;i<_nd;i++) {
 | 
			
		||||
	assert(_grid->_fdimensions[i] % (src_nodes[i] * _bs[i]) == 0);
 | 
			
		||||
	c_src_local_blocks[(i+4) % 5] = _grid->_fdimensions[i] / src_nodes[i] / _bs[i];
 | 
			
		||||
      }
 | 
			
		||||
      std::vector<int> cbcoor(_nd); // coordinate of block in slot in canonical form
 | 
			
		||||
      Lexicographic::CoorFromIndex(cbcoor,nb,c_src_local_blocks);
 | 
			
		||||
 | 
			
		||||
      // cpco, cbcoor
 | 
			
		||||
      std::vector<int> clbcoor(_nd);
 | 
			
		||||
      for (int i=0;i<_nd;i++) {
 | 
			
		||||
	int cgcoor = cpco[i] * c_src_local_blocks[i] + cbcoor[i]; // global block coordinate
 | 
			
		||||
	int pcoor = cgcoor / _nbc[i]; // processor coordinate in my Grid
 | 
			
		||||
	int tpcoor = _grid->_processor_coor[(i+1)%5];
 | 
			
		||||
	if (pcoor != tpcoor)
 | 
			
		||||
	  return -1;
 | 
			
		||||
	clbcoor[i] = cgcoor - tpcoor * _nbc[i]; // canonical local block coordinate for canonical dimension i
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      int lnb;
 | 
			
		||||
      Lexicographic::IndexFromCoor(clbcoor,lnb,_nbc);
 | 
			
		||||
      //std::cout << "Mapped slot = " << slot << " nb = " << nb << " to " << lnb << std::endl;
 | 
			
		||||
      return lnb;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 };
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,163 @@
 | 
			
		||||
namespace Grid { 
 | 
			
		||||
 | 
			
		||||
template<class Field>
 | 
			
		||||
class BasisFieldVector {
 | 
			
		||||
 public:
 | 
			
		||||
  int _Nm;
 | 
			
		||||
 | 
			
		||||
  typedef typename Field::scalar_type Coeff_t;
 | 
			
		||||
  typedef typename Field::vector_type vCoeff_t;
 | 
			
		||||
  typedef typename Field::vector_object vobj;
 | 
			
		||||
  typedef typename vobj::scalar_object sobj;
 | 
			
		||||
 | 
			
		||||
  std::vector<Field> _v; // _Nfull vectors
 | 
			
		||||
 | 
			
		||||
  void report(int n,GridBase* value) {
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage << "BasisFieldVector allocated:\n";
 | 
			
		||||
    std::cout << GridLogMessage << " Delta N = " << n << "\n";
 | 
			
		||||
    std::cout << GridLogMessage << " Size of full vectors (size) = " << 
 | 
			
		||||
      ((double)n*sizeof(vobj)*value->oSites() / 1024./1024./1024.) << " GB\n";
 | 
			
		||||
    std::cout << GridLogMessage << " Size = " << _v.size() << " Capacity = " << _v.capacity() << std::endl;
 | 
			
		||||
 | 
			
		||||
    value->Barrier();
 | 
			
		||||
 | 
			
		||||
    if (value->IsBoss()) {
 | 
			
		||||
      system("cat /proc/meminfo");
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    value->Barrier();
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  BasisFieldVector(int Nm,GridBase* value) : _Nm(Nm), _v(Nm,value) {
 | 
			
		||||
    report(Nm,value);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  ~BasisFieldVector() {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  Field& operator[](int i) {
 | 
			
		||||
    return _v[i];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void orthogonalize(Field& w, int k) {
 | 
			
		||||
    for(int j=0; j<k; ++j){
 | 
			
		||||
      Coeff_t ip = (Coeff_t)innerProduct(_v[j],w);
 | 
			
		||||
      w = w - ip*_v[j];
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void rotate(std::vector<RealD>& Qt,int j0, int j1, int k0,int k1,int Nm) {
 | 
			
		||||
    
 | 
			
		||||
    GridBase* grid = _v[0]._grid;
 | 
			
		||||
      
 | 
			
		||||
#pragma omp parallel
 | 
			
		||||
    {
 | 
			
		||||
      std::vector < vobj > B(Nm);
 | 
			
		||||
      
 | 
			
		||||
#pragma omp for
 | 
			
		||||
      for(int ss=0;ss < grid->oSites();ss++){
 | 
			
		||||
	for(int j=j0; j<j1; ++j) B[j]=0.;
 | 
			
		||||
	
 | 
			
		||||
	for(int j=j0; j<j1; ++j){
 | 
			
		||||
	  for(int k=k0; k<k1; ++k){
 | 
			
		||||
	    B[j] +=Qt[k+Nm*j] * _v[k]._odata[ss];
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
	for(int j=j0; j<j1; ++j){
 | 
			
		||||
	  _v[j]._odata[ss] = B[j];
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  size_t size() const {
 | 
			
		||||
    return _Nm;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void resize(int n) {
 | 
			
		||||
    if (n > _Nm)
 | 
			
		||||
      _v.reserve(n);
 | 
			
		||||
    
 | 
			
		||||
    _v.resize(n,_v[0]._grid);
 | 
			
		||||
 | 
			
		||||
    if (n < _Nm)
 | 
			
		||||
      _v.shrink_to_fit();
 | 
			
		||||
 | 
			
		||||
    report(n - _Nm,_v[0]._grid);
 | 
			
		||||
 | 
			
		||||
    _Nm = n;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::vector<int> getIndex(std::vector<RealD>& sort_vals) {
 | 
			
		||||
 | 
			
		||||
    std::vector<int> idx(sort_vals.size());
 | 
			
		||||
    iota(idx.begin(), idx.end(), 0);
 | 
			
		||||
 | 
			
		||||
    // sort indexes based on comparing values in v
 | 
			
		||||
    sort(idx.begin(), idx.end(),
 | 
			
		||||
	 [&sort_vals](int i1, int i2) {return ::fabs(sort_vals[i1]) < ::fabs(sort_vals[i2]);});
 | 
			
		||||
 | 
			
		||||
    return idx;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void reorderInPlace(std::vector<RealD>& sort_vals, std::vector<int>& idx) {
 | 
			
		||||
    GridStopWatch gsw;
 | 
			
		||||
    gsw.Start();
 | 
			
		||||
 | 
			
		||||
    int nswaps = 0;
 | 
			
		||||
    for (size_t i=0;i<idx.size();i++) {
 | 
			
		||||
      if (idx[i] != i) {
 | 
			
		||||
 | 
			
		||||
	// find proper place (this could be done in logarithmic time, don't bother for now)
 | 
			
		||||
	size_t j;
 | 
			
		||||
	for (j=i;j<idx.size();j++)
 | 
			
		||||
	  if (idx[j]==i)
 | 
			
		||||
	    break;
 | 
			
		||||
	assert(j!=idx.size());
 | 
			
		||||
	
 | 
			
		||||
	Field _t(_v[0]._grid);
 | 
			
		||||
	_t = _v[idx[j]];
 | 
			
		||||
	_v[idx[j]] = _v[idx[i]];
 | 
			
		||||
	_v[idx[i]] = _t;
 | 
			
		||||
 | 
			
		||||
	RealD _td = sort_vals[idx[j]];
 | 
			
		||||
	sort_vals[idx[j]] = sort_vals[idx[i]];
 | 
			
		||||
	sort_vals[idx[i]] = _td;
 | 
			
		||||
 | 
			
		||||
	int _tt = idx[i];
 | 
			
		||||
	idx[i] = idx[j];
 | 
			
		||||
	idx[j] = _tt;
 | 
			
		||||
	
 | 
			
		||||
	nswaps++;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // sort values
 | 
			
		||||
    gsw.Stop();
 | 
			
		||||
    std::cout << GridLogMessage << "Sorted eigenspace in place in " << gsw.Elapsed() << " using " << nswaps << " swaps" << std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void sortInPlace(std::vector<RealD>& sort_vals, bool reverse) {
 | 
			
		||||
 | 
			
		||||
    std::vector<int> idx = getIndex(sort_vals);
 | 
			
		||||
    if (reverse)
 | 
			
		||||
      std::reverse(idx.begin(), idx.end());
 | 
			
		||||
 | 
			
		||||
    reorderInPlace(sort_vals,idx);
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void deflate(const std::vector<RealD>& eval,const Field& src_orig,Field& result) {
 | 
			
		||||
    result = zero;
 | 
			
		||||
    int N = (int)_v.size();
 | 
			
		||||
    for (int i=0;i<N;i++) {
 | 
			
		||||
      Field& tmp = _v[i];
 | 
			
		||||
      axpy(result,TensorRemove(innerProduct(tmp,src_orig)) / eval[i],tmp,result);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 }; 
 | 
			
		||||
}
 | 
			
		||||
@@ -60,6 +60,7 @@ namespace Grid {
 | 
			
		||||
    }
 | 
			
		||||
  
 | 
			
		||||
    void operator() (const FieldD &src_d_in, FieldD &sol_d){
 | 
			
		||||
 | 
			
		||||
      TotalInnerIterations = 0;
 | 
			
		||||
	
 | 
			
		||||
      GridStopWatch TotalTimer;
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										1222
									
								
								lib/algorithms/iterative/ImplicitlyRestartedLanczosCJ.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1222
									
								
								lib/algorithms/iterative/ImplicitlyRestartedLanczosCJ.h
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -7,6 +7,7 @@
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Chulwoo Jung <chulwoo@bnl.gov>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
@@ -53,16 +54,194 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
   *     M psi = eta
 | 
			
		||||
   ***********************
 | 
			
		||||
   *Odd
 | 
			
		||||
   * i)   (D_oo)^{\dag} D_oo psi_o = (D_oo)^dag L^{-1}  eta_o
 | 
			
		||||
   * i)                 D_oo psi_o =  L^{-1}  eta_o
 | 
			
		||||
   *                        eta_o' = (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e)
 | 
			
		||||
   *
 | 
			
		||||
   * Wilson:
 | 
			
		||||
   *      (D_oo)^{\dag} D_oo psi_o = (D_oo)^dag L^{-1}  eta_o
 | 
			
		||||
   * Stag:
 | 
			
		||||
   *      D_oo psi_o = L^{-1}  eta =    (eta_o - Moe Mee^{-1} eta_e)
 | 
			
		||||
   *
 | 
			
		||||
   * L^-1 eta_o= (1              0 ) (e
 | 
			
		||||
   *             (-MoeMee^{-1}   1 )   
 | 
			
		||||
   *
 | 
			
		||||
   *Even
 | 
			
		||||
   * ii)  Mee psi_e + Meo psi_o = src_e
 | 
			
		||||
   *
 | 
			
		||||
   *   => sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
 | 
			
		||||
   *
 | 
			
		||||
   * 
 | 
			
		||||
   * TODO: Other options:
 | 
			
		||||
   * 
 | 
			
		||||
   * a) change checkerboards for Schur e<->o
 | 
			
		||||
   *
 | 
			
		||||
   * Left precon by Moo^-1
 | 
			
		||||
   * b) Doo^{dag} M_oo^-dag Moo^-1 Doo psi_0 =  (D_oo)^dag M_oo^-dag Moo^-1 L^{-1}  eta_o
 | 
			
		||||
   *                              eta_o'     = (D_oo)^dag  M_oo^-dag Moo^-1 (eta_o - Moe Mee^{-1} eta_e)
 | 
			
		||||
   *
 | 
			
		||||
   * Right precon by Moo^-1
 | 
			
		||||
   * c) M_oo^-dag Doo^{dag} Doo Moo^-1 phi_0 = M_oo^-dag (D_oo)^dag L^{-1}  eta_o
 | 
			
		||||
   *                              eta_o'     = M_oo^-dag (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e)
 | 
			
		||||
   *                              psi_o = M_oo^-1 phi_o
 | 
			
		||||
   * TODO: Deflation 
 | 
			
		||||
   */
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Take a matrix and form a Red Black solver calling a Herm solver
 | 
			
		||||
  // Use of RB info prevents making SchurRedBlackSolve conform to standard interface
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
  template<class Field> class SchurRedBlackStaggeredSolve {
 | 
			
		||||
  private:
 | 
			
		||||
    OperatorFunction<Field> & _HermitianRBSolver;
 | 
			
		||||
    int CBfactorise;
 | 
			
		||||
  public:
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
    // Wrap the usual normal equations Schur trick
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
  SchurRedBlackStaggeredSolve(OperatorFunction<Field> &HermitianRBSolver)  :
 | 
			
		||||
     _HermitianRBSolver(HermitianRBSolver) 
 | 
			
		||||
    { 
 | 
			
		||||
      CBfactorise=0;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template<class Matrix>
 | 
			
		||||
      void operator() (Matrix & _Matrix,const Field &in, Field &out){
 | 
			
		||||
 | 
			
		||||
      // FIXME CGdiagonalMee not implemented virtual function
 | 
			
		||||
      // FIXME use CBfactorise to control schur decomp
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      SchurStaggeredOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
 
 | 
			
		||||
      Field src_e(grid);
 | 
			
		||||
      Field src_o(grid);
 | 
			
		||||
      Field sol_e(grid);
 | 
			
		||||
      Field sol_o(grid);
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field  Mtmp(grid);
 | 
			
		||||
      Field resid(fgrid);
 | 
			
		||||
 | 
			
		||||
      pickCheckerboard(Even,src_e,in);
 | 
			
		||||
      pickCheckerboard(Odd ,src_o,in);
 | 
			
		||||
      pickCheckerboard(Even,sol_e,out);
 | 
			
		||||
      pickCheckerboard(Odd ,sol_o,out);
 | 
			
		||||
    
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      // src_o = (source_o - Moe MeeInv source_e)
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Even);
 | 
			
		||||
      _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Odd);     
 | 
			
		||||
      tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Odd);     
 | 
			
		||||
 | 
			
		||||
      src_o = tmp;     assert(src_o.checkerboard ==Odd);
 | 
			
		||||
      //  _Matrix.Mooee(tmp,src_o); // Extra factor of "m" in source
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      // Call the red-black solver
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver calling the Mpc solver" <<std::endl;
 | 
			
		||||
      _HermitianRBSolver(_HermOpEO,src_o,sol_o);  assert(sol_o.checkerboard==Odd);
 | 
			
		||||
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      // sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.Meooe(sol_o,tmp);        assert(  tmp.checkerboard   ==Even);
 | 
			
		||||
      src_e = src_e-tmp;               assert(  src_e.checkerboard ==Even);
 | 
			
		||||
      _Matrix.MooeeInv(src_e,sol_e);   assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
     
 | 
			
		||||
      setCheckerboard(out,sol_e); assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
      setCheckerboard(out,sol_o); assert(  sol_o.checkerboard ==Odd );
 | 
			
		||||
 | 
			
		||||
      // Verify the unprec residual
 | 
			
		||||
      _Matrix.M(out,resid); 
 | 
			
		||||
      resid = resid-in;
 | 
			
		||||
      RealD ns = norm2(in);
 | 
			
		||||
      RealD nr = norm2(resid);
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlackStaggered solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
 | 
			
		||||
    }     
 | 
			
		||||
  };
 | 
			
		||||
//  template<class Field> using SchurRedBlackStagSolve = SchurRedBlackStaggeredSolve<Field>;
 | 
			
		||||
  template<class Field> class SchurRedBlackStagSolve {
 | 
			
		||||
  private:
 | 
			
		||||
    OperatorFunction<Field> & _HermitianRBSolver;
 | 
			
		||||
    int CBfactorise;
 | 
			
		||||
  public:
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
    // Wrap the usual normal equations Schur trick
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
  SchurRedBlackStagSolve(OperatorFunction<Field> &HermitianRBSolver, int cb)  :
 | 
			
		||||
     _HermitianRBSolver(HermitianRBSolver), CBfactorise(cb) {}
 | 
			
		||||
 | 
			
		||||
    template<class Matrix>
 | 
			
		||||
      void operator() (Matrix & _Matrix,const Field &in, Field &out){
 | 
			
		||||
 | 
			
		||||
      // FIXME CGdiagonalMee not implemented virtual function
 | 
			
		||||
      // FIXME use CBfactorise to control schur decomp
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      SchurStagOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
      int Schur = CBfactorise;
 | 
			
		||||
      int Other = 1 - CBfactorise;
 | 
			
		||||
 
 | 
			
		||||
      Field src_e(grid);
 | 
			
		||||
      Field src_o(grid);
 | 
			
		||||
      Field sol_e(grid);
 | 
			
		||||
      Field sol_o(grid);
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field  Mtmp(grid);
 | 
			
		||||
      Field resid(fgrid);
 | 
			
		||||
 | 
			
		||||
      pickCheckerboard(Other,src_e,in);
 | 
			
		||||
      pickCheckerboard(Schur ,src_o,in);
 | 
			
		||||
      pickCheckerboard(Other,sol_e,out);
 | 
			
		||||
      pickCheckerboard(Schur ,sol_o,out);
 | 
			
		||||
    
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      // src_o = Mdag * (source_o - Moe MeeInv source_e)
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Other);
 | 
			
		||||
      _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Schur);     
 | 
			
		||||
      tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Schur);     
 | 
			
		||||
 | 
			
		||||
#if 0
 | 
			
		||||
      // get the right MpcDag
 | 
			
		||||
//      _HermOpEO.MpcDag(tmp,src_o);     assert(src_o.checkerboard ==Schur);       
 | 
			
		||||
#else
 | 
			
		||||
      _Matrix.Mooee(tmp,src_o);     assert(src_o.checkerboard ==Schur);
 | 
			
		||||
#endif
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      // Call the red-black solver
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
 | 
			
		||||
      _HermitianRBSolver(_HermOpEO,src_o,sol_o);  assert(sol_o.checkerboard==Schur);
 | 
			
		||||
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      // sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.Meooe(sol_o,tmp);        assert(  tmp.checkerboard   ==Other);
 | 
			
		||||
      src_e = src_e-tmp;               assert(  src_e.checkerboard ==Other);
 | 
			
		||||
      _Matrix.MooeeInv(src_e,sol_e);   assert(  sol_e.checkerboard ==Other);
 | 
			
		||||
     
 | 
			
		||||
      setCheckerboard(out,sol_e); assert(  sol_e.checkerboard ==Other);
 | 
			
		||||
      setCheckerboard(out,sol_o); assert(  sol_o.checkerboard ==Schur );
 | 
			
		||||
 | 
			
		||||
      // Verify the unprec residual
 | 
			
		||||
      _Matrix.M(out,resid); 
 | 
			
		||||
      resid = resid-in;
 | 
			
		||||
      RealD ns = norm2(in);
 | 
			
		||||
      RealD nr = norm2(resid);
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlackStag solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
 | 
			
		||||
    }     
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Take a matrix and form a Red Black solver calling a Herm solver
 | 
			
		||||
  // Use of RB info prevents making SchurRedBlackSolve conform to standard interface
 | 
			
		||||
@@ -76,12 +255,10 @@ namespace Grid {
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
    // Wrap the usual normal equations Schur trick
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
  SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver)  :
 | 
			
		||||
     _HermitianRBSolver(HermitianRBSolver) 
 | 
			
		||||
    { 
 | 
			
		||||
      CBfactorise=0;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
  SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver,int cb=0)  :  _HermitianRBSolver(HermitianRBSolver) 
 | 
			
		||||
  { 
 | 
			
		||||
    CBfactorise=cb;
 | 
			
		||||
  };
 | 
			
		||||
    template<class Matrix>
 | 
			
		||||
      void operator() (Matrix & _Matrix,const Field &in, Field &out){
 | 
			
		||||
 | 
			
		||||
@@ -141,5 +318,238 @@ namespace Grid {
 | 
			
		||||
    }     
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Take a matrix and form a Red Black solver calling a Herm solver
 | 
			
		||||
  // Use of RB info prevents making SchurRedBlackSolve conform to standard interface
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class Field> class SchurRedBlackDiagTwoSolve {
 | 
			
		||||
  private:
 | 
			
		||||
    OperatorFunction<Field> & _HermitianRBSolver;
 | 
			
		||||
    int CBfactorise;
 | 
			
		||||
  public:
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
    // Wrap the usual normal equations Schur trick
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
  SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver)  :
 | 
			
		||||
     _HermitianRBSolver(HermitianRBSolver) 
 | 
			
		||||
    { 
 | 
			
		||||
      CBfactorise=0;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template<class Matrix>
 | 
			
		||||
      void operator() (Matrix & _Matrix,const Field &in, Field &out){
 | 
			
		||||
 | 
			
		||||
      // FIXME CGdiagonalMee not implemented virtual function
 | 
			
		||||
      // FIXME use CBfactorise to control schur decomp
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
 
 | 
			
		||||
      Field src_e(grid);
 | 
			
		||||
      Field src_o(grid);
 | 
			
		||||
      Field sol_e(grid);
 | 
			
		||||
      Field sol_o(grid);
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field  Mtmp(grid);
 | 
			
		||||
      Field resid(fgrid);
 | 
			
		||||
 | 
			
		||||
      pickCheckerboard(Even,src_e,in);
 | 
			
		||||
      pickCheckerboard(Odd ,src_o,in);
 | 
			
		||||
      pickCheckerboard(Even,sol_e,out);
 | 
			
		||||
      pickCheckerboard(Odd ,sol_o,out);
 | 
			
		||||
    
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      // src_o = Mdag * (source_o - Moe MeeInv source_e)
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Even);
 | 
			
		||||
      _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Odd);     
 | 
			
		||||
      tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Odd);     
 | 
			
		||||
 | 
			
		||||
      // get the right MpcDag
 | 
			
		||||
      _HermOpEO.MpcDag(tmp,src_o);     assert(src_o.checkerboard ==Odd);       
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      // Call the red-black solver
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
 | 
			
		||||
//      _HermitianRBSolver(_HermOpEO,src_o,sol_o);  assert(sol_o.checkerboard==Odd);
 | 
			
		||||
      _HermitianRBSolver(_HermOpEO,src_o,tmp);  assert(tmp.checkerboard==Odd);
 | 
			
		||||
      _Matrix.MooeeInv(tmp,sol_o);        assert(  sol_o.checkerboard   ==Odd);
 | 
			
		||||
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      // sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.Meooe(sol_o,tmp);        assert(  tmp.checkerboard   ==Even);
 | 
			
		||||
      src_e = src_e-tmp;               assert(  src_e.checkerboard ==Even);
 | 
			
		||||
      _Matrix.MooeeInv(src_e,sol_e);   assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
     
 | 
			
		||||
      setCheckerboard(out,sol_e); assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
      setCheckerboard(out,sol_o); assert(  sol_o.checkerboard ==Odd );
 | 
			
		||||
 | 
			
		||||
      // Verify the unprec residual
 | 
			
		||||
      _Matrix.M(out,resid); 
 | 
			
		||||
      resid = resid-in;
 | 
			
		||||
      RealD ns = norm2(in);
 | 
			
		||||
      RealD nr = norm2(resid);
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlackDiagTwo solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
 | 
			
		||||
    }     
 | 
			
		||||
  };
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Take a matrix and form a Red Black solver calling a Herm solver
 | 
			
		||||
  // Use of RB info prevents making SchurRedBlackSolve conform to standard interface
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class Field> class SchurRedBlackDiagTwoMixed {
 | 
			
		||||
  private:
 | 
			
		||||
    LinearFunction<Field> & _HermitianRBSolver;
 | 
			
		||||
    int CBfactorise;
 | 
			
		||||
  public:
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
    // Wrap the usual normal equations Schur trick
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
  SchurRedBlackDiagTwoMixed(LinearFunction<Field> &HermitianRBSolver)  :
 | 
			
		||||
     _HermitianRBSolver(HermitianRBSolver) 
 | 
			
		||||
    { 
 | 
			
		||||
      CBfactorise=0;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template<class Matrix>
 | 
			
		||||
      void operator() (Matrix & _Matrix,const Field &in, Field &out){
 | 
			
		||||
 | 
			
		||||
      // FIXME CGdiagonalMee not implemented virtual function
 | 
			
		||||
      // FIXME use CBfactorise to control schur decomp
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
 
 | 
			
		||||
      Field src_e(grid);
 | 
			
		||||
      Field src_o(grid);
 | 
			
		||||
      Field sol_e(grid);
 | 
			
		||||
      Field sol_o(grid);
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field  Mtmp(grid);
 | 
			
		||||
      Field resid(fgrid);
 | 
			
		||||
 | 
			
		||||
      pickCheckerboard(Even,src_e,in);
 | 
			
		||||
      pickCheckerboard(Odd ,src_o,in);
 | 
			
		||||
      pickCheckerboard(Even,sol_e,out);
 | 
			
		||||
      pickCheckerboard(Odd ,sol_o,out);
 | 
			
		||||
    
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      // src_o = Mdag * (source_o - Moe MeeInv source_e)
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Even);
 | 
			
		||||
      _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Odd);     
 | 
			
		||||
      tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Odd);     
 | 
			
		||||
 | 
			
		||||
      // get the right MpcDag
 | 
			
		||||
      _HermOpEO.MpcDag(tmp,src_o);     assert(src_o.checkerboard ==Odd);       
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      // Call the red-black solver
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
 | 
			
		||||
//      _HermitianRBSolver(_HermOpEO,src_o,sol_o);  assert(sol_o.checkerboard==Odd);
 | 
			
		||||
//      _HermitianRBSolver(_HermOpEO,src_o,tmp);  assert(tmp.checkerboard==Odd);
 | 
			
		||||
      _HermitianRBSolver(src_o,tmp);  assert(tmp.checkerboard==Odd);
 | 
			
		||||
      _Matrix.MooeeInv(tmp,sol_o);        assert(  sol_o.checkerboard   ==Odd);
 | 
			
		||||
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      // sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.Meooe(sol_o,tmp);        assert(  tmp.checkerboard   ==Even);
 | 
			
		||||
      src_e = src_e-tmp;               assert(  src_e.checkerboard ==Even);
 | 
			
		||||
      _Matrix.MooeeInv(src_e,sol_e);   assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
     
 | 
			
		||||
      setCheckerboard(out,sol_e); assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
      setCheckerboard(out,sol_o); assert(  sol_o.checkerboard ==Odd );
 | 
			
		||||
 | 
			
		||||
      // Verify the unprec residual
 | 
			
		||||
      _Matrix.M(out,resid); 
 | 
			
		||||
      resid = resid-in;
 | 
			
		||||
      RealD ns = norm2(in);
 | 
			
		||||
      RealD nr = norm2(resid);
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlackDiagTwo solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
 | 
			
		||||
    }     
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  template<class Field> class SchurRedBlackStagMixed {
 | 
			
		||||
  private:
 | 
			
		||||
    LinearFunction<Field> & _HermitianRBSolver;
 | 
			
		||||
    int CBfactorise;
 | 
			
		||||
  public:
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
    // Wrap the usual normal equations Schur trick
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
  SchurRedBlackStagMixed(LinearFunction<Field> &HermitianRBSolver, int cb)  :
 | 
			
		||||
     _HermitianRBSolver(HermitianRBSolver), CBfactorise(cb) {}
 | 
			
		||||
 | 
			
		||||
    template<class Matrix>
 | 
			
		||||
      void operator() (Matrix & _Matrix,const Field &in, Field &out){
 | 
			
		||||
 | 
			
		||||
      // FIXME CGdiagonalMee not implemented virtual function
 | 
			
		||||
      // FIXME use CBfactorise to control schur decomp
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      SchurStagOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
      int Schur = CBfactorise;
 | 
			
		||||
      int Other = 1 - CBfactorise;
 | 
			
		||||
 
 | 
			
		||||
      Field src_e(grid);
 | 
			
		||||
      Field src_o(grid);
 | 
			
		||||
      Field sol_e(grid);
 | 
			
		||||
      Field sol_o(grid);
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field  Mtmp(grid);
 | 
			
		||||
      Field resid(fgrid);
 | 
			
		||||
 | 
			
		||||
      pickCheckerboard(Other,src_e,in);
 | 
			
		||||
      pickCheckerboard(Schur ,src_o,in);
 | 
			
		||||
      pickCheckerboard(Other,sol_e,out);
 | 
			
		||||
      pickCheckerboard(Schur ,sol_o,out);
 | 
			
		||||
    
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      // src_o = Mdag * (source_o - Moe MeeInv source_e)
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Other);
 | 
			
		||||
      _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Schur);     
 | 
			
		||||
      tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Schur);     
 | 
			
		||||
 | 
			
		||||
      _Matrix.Mooee(tmp,src_o);     assert(src_o.checkerboard ==Schur);
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      // Call the red-black solver
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
 | 
			
		||||
//      _HermitianRBSolver(_HermOpEO,src_o,sol_o);  assert(sol_o.checkerboard==Schur);
 | 
			
		||||
      _HermitianRBSolver(src_o,sol_o);  assert(sol_o.checkerboard==Other);
 | 
			
		||||
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      // sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.Meooe(sol_o,tmp);        assert(  tmp.checkerboard   ==Other);
 | 
			
		||||
      src_e = src_e-tmp;               assert(  src_e.checkerboard ==Other);
 | 
			
		||||
      _Matrix.MooeeInv(src_e,sol_e);   assert(  sol_e.checkerboard ==Other);
 | 
			
		||||
     
 | 
			
		||||
      setCheckerboard(out,sol_e); assert(  sol_e.checkerboard ==Other);
 | 
			
		||||
      setCheckerboard(out,sol_o); assert(  sol_o.checkerboard ==Schur );
 | 
			
		||||
 | 
			
		||||
      // Verify the unprec residual
 | 
			
		||||
      _Matrix.M(out,resid); 
 | 
			
		||||
      resid = resid-in;
 | 
			
		||||
      RealD ns = norm2(in);
 | 
			
		||||
      RealD nr = norm2(resid);
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlackStag solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
 | 
			
		||||
    }     
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										933
									
								
								lib/algorithms/iterative/SimpleLanczos.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										933
									
								
								lib/algorithms/iterative/SimpleLanczos.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,933 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Chulwoo Jung <chulwoo@bnl.gov>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_LANC_H
 | 
			
		||||
#define GRID_LANC_H
 | 
			
		||||
 | 
			
		||||
#include <string.h>		//memset
 | 
			
		||||
 | 
			
		||||
#ifdef USE_LAPACK
 | 
			
		||||
#ifdef USE_MKL
 | 
			
		||||
#include<mkl_lapack.h>
 | 
			
		||||
#else
 | 
			
		||||
void LAPACK_dstegr (char *jobz, char *range, int *n, double *d, double *e,
 | 
			
		||||
		    double *vl, double *vu, int *il, int *iu, double *abstol,
 | 
			
		||||
		    int *m, double *w, double *z, int *ldz, int *isuppz,
 | 
			
		||||
		    double *work, int *lwork, int *iwork, int *liwork,
 | 
			
		||||
		    int *info);
 | 
			
		||||
//#include <lapacke/lapacke.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#include <Grid/algorithms/densematrix/DenseMatrix.h>
 | 
			
		||||
//#include <Grid/algorithms/iterative/EigenSort.h>
 | 
			
		||||
 | 
			
		||||
// eliminate temorary vector in calc()
 | 
			
		||||
#define MEM_SAVE
 | 
			
		||||
 | 
			
		||||
namespace Grid
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  struct Bisection
 | 
			
		||||
  {
 | 
			
		||||
 | 
			
		||||
#if 0
 | 
			
		||||
    static void get_eig2 (int row_num, std::vector < RealD > &ALPHA,
 | 
			
		||||
			  std::vector < RealD > &BETA,
 | 
			
		||||
			  std::vector < RealD > &eig)
 | 
			
		||||
    {
 | 
			
		||||
      int i, j;
 | 
			
		||||
        std::vector < RealD > evec1 (row_num + 3);
 | 
			
		||||
        std::vector < RealD > evec2 (row_num + 3);
 | 
			
		||||
      RealD eps2;
 | 
			
		||||
        ALPHA[1] = 0.;
 | 
			
		||||
        BETHA[1] = 0.;
 | 
			
		||||
      for (i = 0; i < row_num - 1; i++)
 | 
			
		||||
	{
 | 
			
		||||
	  ALPHA[i + 1] = A[i * (row_num + 1)].real ();
 | 
			
		||||
	  BETHA[i + 2] = A[i * (row_num + 1) + 1].real ();
 | 
			
		||||
	}
 | 
			
		||||
      ALPHA[row_num] = A[(row_num - 1) * (row_num + 1)].real ();
 | 
			
		||||
        bisec (ALPHA, BETHA, row_num, 1, row_num, 1e-10, 1e-10, evec1, eps2);
 | 
			
		||||
        bisec (ALPHA, BETHA, row_num, 1, row_num, 1e-16, 1e-16, evec2, eps2);
 | 
			
		||||
 | 
			
		||||
      // Do we really need to sort here?
 | 
			
		||||
      int begin = 1;
 | 
			
		||||
      int end = row_num;
 | 
			
		||||
      int swapped = 1;
 | 
			
		||||
      while (swapped)
 | 
			
		||||
	{
 | 
			
		||||
	  swapped = 0;
 | 
			
		||||
	  for (i = begin; i < end; i++)
 | 
			
		||||
	    {
 | 
			
		||||
	      if (mag (evec2[i]) > mag (evec2[i + 1]))
 | 
			
		||||
		{
 | 
			
		||||
		  swap (evec2 + i, evec2 + i + 1);
 | 
			
		||||
		  swapped = 1;
 | 
			
		||||
		}
 | 
			
		||||
	    }
 | 
			
		||||
	  end--;
 | 
			
		||||
	  for (i = end - 1; i >= begin; i--)
 | 
			
		||||
	    {
 | 
			
		||||
	      if (mag (evec2[i]) > mag (evec2[i + 1]))
 | 
			
		||||
		{
 | 
			
		||||
		  swap (evec2 + i, evec2 + i + 1);
 | 
			
		||||
		  swapped = 1;
 | 
			
		||||
		}
 | 
			
		||||
	    }
 | 
			
		||||
	  begin++;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
      for (i = 0; i < row_num; i++)
 | 
			
		||||
	{
 | 
			
		||||
	  for (j = 0; j < row_num; j++)
 | 
			
		||||
	    {
 | 
			
		||||
	      if (i == j)
 | 
			
		||||
		H[i * row_num + j] = evec2[i + 1];
 | 
			
		||||
	      else
 | 
			
		||||
		H[i * row_num + j] = 0.;
 | 
			
		||||
	    }
 | 
			
		||||
	}
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    static void bisec (std::vector < RealD > &c,
 | 
			
		||||
		       std::vector < RealD > &b,
 | 
			
		||||
		       int n,
 | 
			
		||||
		       int m1,
 | 
			
		||||
		       int m2,
 | 
			
		||||
		       RealD eps1,
 | 
			
		||||
		       RealD relfeh, std::vector < RealD > &x, RealD & eps2)
 | 
			
		||||
    {
 | 
			
		||||
      std::vector < RealD > wu (n + 2);
 | 
			
		||||
 | 
			
		||||
      RealD h, q, x1, xu, x0, xmin, xmax;
 | 
			
		||||
      int i, a, k;
 | 
			
		||||
 | 
			
		||||
      b[1] = 0.0;
 | 
			
		||||
      xmin = c[n] - fabs (b[n]);
 | 
			
		||||
      xmax = c[n] + fabs (b[n]);
 | 
			
		||||
      for (i = 1; i < n; i++)
 | 
			
		||||
	{
 | 
			
		||||
	  h = fabs (b[i]) + fabs (b[i + 1]);
 | 
			
		||||
	  if (c[i] + h > xmax)
 | 
			
		||||
	    xmax = c[i] + h;
 | 
			
		||||
	  if (c[i] - h < xmin)
 | 
			
		||||
	    xmin = c[i] - h;
 | 
			
		||||
	}
 | 
			
		||||
      xmax *= 2.;
 | 
			
		||||
 | 
			
		||||
      eps2 = relfeh * ((xmin + xmax) > 0.0 ? xmax : -xmin);
 | 
			
		||||
      if (eps1 <= 0.0)
 | 
			
		||||
	eps1 = eps2;
 | 
			
		||||
      eps2 = 0.5 * eps1 + 7.0 * (eps2);
 | 
			
		||||
      x0 = xmax;
 | 
			
		||||
      for (i = m1; i <= m2; i++)
 | 
			
		||||
	{
 | 
			
		||||
	  x[i] = xmax;
 | 
			
		||||
	  wu[i] = xmin;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
      for (k = m2; k >= m1; k--)
 | 
			
		||||
	{
 | 
			
		||||
	  xu = xmin;
 | 
			
		||||
	  i = k;
 | 
			
		||||
	  do
 | 
			
		||||
	    {
 | 
			
		||||
	      if (xu < wu[i])
 | 
			
		||||
		{
 | 
			
		||||
		  xu = wu[i];
 | 
			
		||||
		  i = m1 - 1;
 | 
			
		||||
		}
 | 
			
		||||
	      i--;
 | 
			
		||||
	    }
 | 
			
		||||
	  while (i >= m1);
 | 
			
		||||
	  if (x0 > x[k])
 | 
			
		||||
	    x0 = x[k];
 | 
			
		||||
	  while ((x0 - xu) > 2 * relfeh * (fabs (xu) + fabs (x0)) + eps1)
 | 
			
		||||
	    {
 | 
			
		||||
	      x1 = (xu + x0) / 2;
 | 
			
		||||
 | 
			
		||||
	      a = 0;
 | 
			
		||||
	      q = 1.0;
 | 
			
		||||
	      for (i = 1; i <= n; i++)
 | 
			
		||||
		{
 | 
			
		||||
		  q =
 | 
			
		||||
		    c[i] - x1 -
 | 
			
		||||
		    ((q != 0.0) ? b[i] * b[i] / q : fabs (b[i]) / relfeh);
 | 
			
		||||
		  if (q < 0)
 | 
			
		||||
		    a++;
 | 
			
		||||
		}
 | 
			
		||||
//      printf("x1=%0.14e a=%d\n",x1,a);
 | 
			
		||||
	      if (a < k)
 | 
			
		||||
		{
 | 
			
		||||
		  if (a < m1)
 | 
			
		||||
		    {
 | 
			
		||||
		      xu = x1;
 | 
			
		||||
		      wu[m1] = x1;
 | 
			
		||||
		    }
 | 
			
		||||
		  else
 | 
			
		||||
		    {
 | 
			
		||||
		      xu = x1;
 | 
			
		||||
		      wu[a + 1] = x1;
 | 
			
		||||
		      if (x[a] > x1)
 | 
			
		||||
			x[a] = x1;
 | 
			
		||||
		    }
 | 
			
		||||
		}
 | 
			
		||||
	      else
 | 
			
		||||
		x0 = x1;
 | 
			
		||||
	    }
 | 
			
		||||
	  printf ("x0=%0.14e xu=%0.14e k=%d\n", x0, xu, k);
 | 
			
		||||
	  x[k] = (x0 + xu) / 2;
 | 
			
		||||
	}
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////////////
 | 
			
		||||
// Implicitly restarted lanczos
 | 
			
		||||
/////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  template < class Field > class SimpleLanczos
 | 
			
		||||
  {
 | 
			
		||||
 | 
			
		||||
    const RealD small = 1.0e-16;
 | 
			
		||||
  public:
 | 
			
		||||
    int lock;
 | 
			
		||||
    int get;
 | 
			
		||||
    int Niter;
 | 
			
		||||
    int converged;
 | 
			
		||||
 | 
			
		||||
    int Nstop;			// Number of evecs checked for convergence
 | 
			
		||||
    int Nk;			// Number of converged sought
 | 
			
		||||
    int Np;			// Np -- Number of spare vecs in kryloc space
 | 
			
		||||
    int Nm;			// Nm -- total number of vectors
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    RealD OrthoTime;
 | 
			
		||||
 | 
			
		||||
    RealD eresid;
 | 
			
		||||
 | 
			
		||||
    SortEigen < Field > _sort;
 | 
			
		||||
 | 
			
		||||
    LinearOperatorBase < Field > &_Linop;
 | 
			
		||||
 | 
			
		||||
    OperatorFunction < Field > &_poly;
 | 
			
		||||
 | 
			
		||||
    /////////////////////////
 | 
			
		||||
    // Constructor
 | 
			
		||||
    /////////////////////////
 | 
			
		||||
    void init (void)
 | 
			
		||||
    {
 | 
			
		||||
    };
 | 
			
		||||
    void Abort (int ff, DenseVector < RealD > &evals,
 | 
			
		||||
		DenseVector < DenseVector < RealD > >&evecs);
 | 
			
		||||
 | 
			
		||||
    SimpleLanczos (LinearOperatorBase < Field > &Linop,	// op
 | 
			
		||||
		   OperatorFunction < Field > &poly,	// polynmial
 | 
			
		||||
		   int _Nstop,	// sought vecs
 | 
			
		||||
		   int _Nk,	// sought vecs
 | 
			
		||||
		   int _Nm,	// spare vecs
 | 
			
		||||
		   RealD _eresid,	// resid in lmdue deficit 
 | 
			
		||||
		   int _Niter):	// Max iterations
 | 
			
		||||
     
 | 
			
		||||
      _Linop (Linop),
 | 
			
		||||
      _poly (poly),
 | 
			
		||||
      Nstop (_Nstop), Nk (_Nk), Nm (_Nm), eresid (_eresid), Niter (_Niter)
 | 
			
		||||
    {
 | 
			
		||||
      Np = Nm - Nk;
 | 
			
		||||
      assert (Np > 0);
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    /////////////////////////
 | 
			
		||||
    // Sanity checked this routine (step) against Saad.
 | 
			
		||||
    /////////////////////////
 | 
			
		||||
    void RitzMatrix (DenseVector < Field > &evec, int k)
 | 
			
		||||
    {
 | 
			
		||||
 | 
			
		||||
      if (1)
 | 
			
		||||
	return;
 | 
			
		||||
 | 
			
		||||
      GridBase *grid = evec[0]._grid;
 | 
			
		||||
      Field w (grid);
 | 
			
		||||
      std::cout << GridLogMessage << "RitzMatrix " << std::endl;
 | 
			
		||||
      for (int i = 0; i < k; i++)
 | 
			
		||||
	{
 | 
			
		||||
	  _Linop.HermOp (evec[i], w);
 | 
			
		||||
//      _poly(_Linop,evec[i],w);
 | 
			
		||||
	  std::cout << GridLogMessage << "[" << i << "] ";
 | 
			
		||||
	  for (int j = 0; j < k; j++)
 | 
			
		||||
	    {
 | 
			
		||||
	      ComplexD in = innerProduct (evec[j], w);
 | 
			
		||||
	      if (fabs ((double) i - j) > 1)
 | 
			
		||||
		{
 | 
			
		||||
		  if (abs (in) > 1.0e-9)
 | 
			
		||||
		    {
 | 
			
		||||
		      std::cout << GridLogMessage << "oops" << std::endl;
 | 
			
		||||
		      abort ();
 | 
			
		||||
		    }
 | 
			
		||||
		  else
 | 
			
		||||
		    std::cout << GridLogMessage << " 0 ";
 | 
			
		||||
		}
 | 
			
		||||
	      else
 | 
			
		||||
		{
 | 
			
		||||
		  std::cout << GridLogMessage << " " << in << " ";
 | 
			
		||||
		}
 | 
			
		||||
	    }
 | 
			
		||||
	  std::cout << GridLogMessage << std::endl;
 | 
			
		||||
	}
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void step (DenseVector < RealD > &lmd,
 | 
			
		||||
	       DenseVector < RealD > &lme,
 | 
			
		||||
	       Field & last, Field & current, Field & next, uint64_t k)
 | 
			
		||||
    {
 | 
			
		||||
      if (lmd.size () <= k)
 | 
			
		||||
	lmd.resize (k + Nm);
 | 
			
		||||
      if (lme.size () <= k)
 | 
			
		||||
	lme.resize (k + Nm);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
//      _poly(_Linop,current,next );   // 3. wk:=Avk−βkv_{k−1}
 | 
			
		||||
      _Linop.HermOp (current, next);	// 3. wk:=Avk−βkv_{k−1}
 | 
			
		||||
      if (k > 0)
 | 
			
		||||
	{
 | 
			
		||||
	  next -= lme[k - 1] * last;
 | 
			
		||||
	}
 | 
			
		||||
//      std::cout<<GridLogMessage << "<last|next>" << innerProduct(last,next) <<std::endl;
 | 
			
		||||
 | 
			
		||||
      ComplexD zalph = innerProduct (current, next);	// 4. αk:=(wk,vk)
 | 
			
		||||
      RealD alph = real (zalph);
 | 
			
		||||
 | 
			
		||||
      next = next - alph * current;	// 5. wk:=wk−αkvk
 | 
			
		||||
//      std::cout<<GridLogMessage << "<current|next>" << innerProduct(current,next) <<std::endl;
 | 
			
		||||
 | 
			
		||||
      RealD beta = normalise (next);	// 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop
 | 
			
		||||
      // 7. vk+1 := wk/βk+1
 | 
			
		||||
//       norm=beta;
 | 
			
		||||
 | 
			
		||||
      int interval = Nm / 100 + 1;
 | 
			
		||||
      if ((k % interval) == 0)
 | 
			
		||||
	std::
 | 
			
		||||
	  cout << GridLogMessage << k << " : alpha = " << zalph << " beta " <<
 | 
			
		||||
	  beta << std::endl;
 | 
			
		||||
      const RealD tiny = 1.0e-20;
 | 
			
		||||
      if (beta < tiny)
 | 
			
		||||
	{
 | 
			
		||||
	  std::cout << GridLogMessage << " beta is tiny " << beta << std::
 | 
			
		||||
	    endl;
 | 
			
		||||
	}
 | 
			
		||||
      lmd[k] = alph;
 | 
			
		||||
      lme[k] = beta;
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void qr_decomp (DenseVector < RealD > &lmd,
 | 
			
		||||
		    DenseVector < RealD > &lme,
 | 
			
		||||
		    int Nk,
 | 
			
		||||
		    int Nm,
 | 
			
		||||
		    DenseVector < RealD > &Qt, RealD Dsh, int kmin, int kmax)
 | 
			
		||||
    {
 | 
			
		||||
      int k = kmin - 1;
 | 
			
		||||
      RealD x;
 | 
			
		||||
 | 
			
		||||
      RealD Fden = 1.0 / hypot (lmd[k] - Dsh, lme[k]);
 | 
			
		||||
      RealD c = (lmd[k] - Dsh) * Fden;
 | 
			
		||||
      RealD s = -lme[k] * Fden;
 | 
			
		||||
 | 
			
		||||
      RealD tmpa1 = lmd[k];
 | 
			
		||||
      RealD tmpa2 = lmd[k + 1];
 | 
			
		||||
      RealD tmpb = lme[k];
 | 
			
		||||
 | 
			
		||||
      lmd[k] = c * c * tmpa1 + s * s * tmpa2 - 2.0 * c * s * tmpb;
 | 
			
		||||
      lmd[k + 1] = s * s * tmpa1 + c * c * tmpa2 + 2.0 * c * s * tmpb;
 | 
			
		||||
      lme[k] = c * s * (tmpa1 - tmpa2) + (c * c - s * s) * tmpb;
 | 
			
		||||
      x = -s * lme[k + 1];
 | 
			
		||||
      lme[k + 1] = c * lme[k + 1];
 | 
			
		||||
 | 
			
		||||
      for (int i = 0; i < Nk; ++i)
 | 
			
		||||
	{
 | 
			
		||||
	  RealD Qtmp1 = Qt[i + Nm * k];
 | 
			
		||||
	  RealD Qtmp2 = Qt[i + Nm * (k + 1)];
 | 
			
		||||
	  Qt[i + Nm * k] = c * Qtmp1 - s * Qtmp2;
 | 
			
		||||
	  Qt[i + Nm * (k + 1)] = s * Qtmp1 + c * Qtmp2;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
      // Givens transformations
 | 
			
		||||
      for (int k = kmin; k < kmax - 1; ++k)
 | 
			
		||||
	{
 | 
			
		||||
 | 
			
		||||
	  RealD Fden = 1.0 / hypot (x, lme[k - 1]);
 | 
			
		||||
	  RealD c = lme[k - 1] * Fden;
 | 
			
		||||
	  RealD s = -x * Fden;
 | 
			
		||||
 | 
			
		||||
	  RealD tmpa1 = lmd[k];
 | 
			
		||||
	  RealD tmpa2 = lmd[k + 1];
 | 
			
		||||
	  RealD tmpb = lme[k];
 | 
			
		||||
 | 
			
		||||
	  lmd[k] = c * c * tmpa1 + s * s * tmpa2 - 2.0 * c * s * tmpb;
 | 
			
		||||
	  lmd[k + 1] = s * s * tmpa1 + c * c * tmpa2 + 2.0 * c * s * tmpb;
 | 
			
		||||
	  lme[k] = c * s * (tmpa1 - tmpa2) + (c * c - s * s) * tmpb;
 | 
			
		||||
	  lme[k - 1] = c * lme[k - 1] - s * x;
 | 
			
		||||
 | 
			
		||||
	  if (k != kmax - 2)
 | 
			
		||||
	    {
 | 
			
		||||
	      x = -s * lme[k + 1];
 | 
			
		||||
	      lme[k + 1] = c * lme[k + 1];
 | 
			
		||||
	    }
 | 
			
		||||
 | 
			
		||||
	  for (int i = 0; i < Nk; ++i)
 | 
			
		||||
	    {
 | 
			
		||||
	      RealD Qtmp1 = Qt[i + Nm * k];
 | 
			
		||||
	      RealD Qtmp2 = Qt[i + Nm * (k + 1)];
 | 
			
		||||
	      Qt[i + Nm * k] = c * Qtmp1 - s * Qtmp2;
 | 
			
		||||
	      Qt[i + Nm * (k + 1)] = s * Qtmp1 + c * Qtmp2;
 | 
			
		||||
	    }
 | 
			
		||||
	}
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
#ifdef USE_LAPACK
 | 
			
		||||
#ifdef USE_MKL
 | 
			
		||||
#define LAPACK_INT MKL_INT
 | 
			
		||||
#else
 | 
			
		||||
#define LAPACK_INT long long
 | 
			
		||||
#endif
 | 
			
		||||
    void diagonalize_lapack (DenseVector < RealD > &lmd, DenseVector < RealD > &lme, int N1,	// all
 | 
			
		||||
			     int N2,	// get
 | 
			
		||||
			     GridBase * grid)
 | 
			
		||||
    {
 | 
			
		||||
      const int size = Nm;
 | 
			
		||||
      LAPACK_INT NN = N1;
 | 
			
		||||
      double evals_tmp[NN];
 | 
			
		||||
      double DD[NN];
 | 
			
		||||
      double EE[NN];
 | 
			
		||||
      for (int i = 0; i < NN; i++)
 | 
			
		||||
	for (int j = i - 1; j <= i + 1; j++)
 | 
			
		||||
	  if (j < NN && j >= 0)
 | 
			
		||||
	    {
 | 
			
		||||
	      if (i == j)
 | 
			
		||||
		DD[i] = lmd[i];
 | 
			
		||||
	      if (i == j)
 | 
			
		||||
		evals_tmp[i] = lmd[i];
 | 
			
		||||
	      if (j == (i - 1))
 | 
			
		||||
		EE[j] = lme[j];
 | 
			
		||||
	    }
 | 
			
		||||
      LAPACK_INT evals_found;
 | 
			
		||||
      LAPACK_INT lwork =
 | 
			
		||||
	((18 * NN) >
 | 
			
		||||
	 (1 + 4 * NN + NN * NN) ? (18 * NN) : (1 + 4 * NN + NN * NN));
 | 
			
		||||
      LAPACK_INT liwork = 3 + NN * 10;
 | 
			
		||||
      LAPACK_INT iwork[liwork];
 | 
			
		||||
      double work[lwork];
 | 
			
		||||
      LAPACK_INT isuppz[2 * NN];
 | 
			
		||||
      char jobz = 'N';		// calculate evals only
 | 
			
		||||
      char range = 'I';		// calculate il-th to iu-th evals
 | 
			
		||||
      //    char range = 'A'; // calculate all evals
 | 
			
		||||
      char uplo = 'U';		// refer to upper half of original matrix
 | 
			
		||||
      char compz = 'I';		// Compute eigenvectors of tridiagonal matrix
 | 
			
		||||
      int ifail[NN];
 | 
			
		||||
      LAPACK_INT info;
 | 
			
		||||
//  int total = QMP_get_number_of_nodes();
 | 
			
		||||
//  int node = QMP_get_node_number();
 | 
			
		||||
//  GridBase *grid = evec[0]._grid;
 | 
			
		||||
      int total = grid->_Nprocessors;
 | 
			
		||||
      int node = grid->_processor;
 | 
			
		||||
      int interval = (NN / total) + 1;
 | 
			
		||||
      double vl = 0.0, vu = 0.0;
 | 
			
		||||
      LAPACK_INT il = interval * node + 1, iu = interval * (node + 1);
 | 
			
		||||
      if (iu > NN)
 | 
			
		||||
	iu = NN;
 | 
			
		||||
      double tol = 0.0;
 | 
			
		||||
      if (1)
 | 
			
		||||
	{
 | 
			
		||||
	  memset (evals_tmp, 0, sizeof (double) * NN);
 | 
			
		||||
	  if (il <= NN)
 | 
			
		||||
	    {
 | 
			
		||||
	      printf ("total=%d node=%d il=%d iu=%d\n", total, node, il, iu);
 | 
			
		||||
#ifdef USE_MKL
 | 
			
		||||
	      dstegr (&jobz, &range, &NN,
 | 
			
		||||
#else
 | 
			
		||||
	      LAPACK_dstegr (&jobz, &range, &NN,
 | 
			
		||||
#endif
 | 
			
		||||
			     (double *) DD, (double *) EE, &vl, &vu, &il, &iu,	// these four are ignored if second parameteris 'A'
 | 
			
		||||
			     &tol,	// tolerance
 | 
			
		||||
			     &evals_found, evals_tmp, (double *) NULL, &NN,
 | 
			
		||||
			     isuppz, work, &lwork, iwork, &liwork, &info);
 | 
			
		||||
	      for (int i = iu - 1; i >= il - 1; i--)
 | 
			
		||||
		{
 | 
			
		||||
		  printf ("node=%d evals_found=%d evals_tmp[%d] = %g\n", node,
 | 
			
		||||
			  evals_found, i - (il - 1), evals_tmp[i - (il - 1)]);
 | 
			
		||||
		  evals_tmp[i] = evals_tmp[i - (il - 1)];
 | 
			
		||||
		  if (il > 1)
 | 
			
		||||
		    evals_tmp[i - (il - 1)] = 0.;
 | 
			
		||||
		}
 | 
			
		||||
	    }
 | 
			
		||||
	  {
 | 
			
		||||
	    grid->GlobalSumVector (evals_tmp, NN);
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
// cheating a bit. It is better to sort instead of just reversing it, but the document of the routine says evals are sorted in increasing order. qr gives evals in decreasing order.
 | 
			
		||||
    }
 | 
			
		||||
#undef LAPACK_INT
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    void diagonalize (DenseVector < RealD > &lmd,
 | 
			
		||||
		      DenseVector < RealD > &lme,
 | 
			
		||||
		      int N2, int N1, GridBase * grid)
 | 
			
		||||
    {
 | 
			
		||||
 | 
			
		||||
#ifdef USE_LAPACK
 | 
			
		||||
      const int check_lapack = 0;	// just use lapack if 0, check against lapack if 1
 | 
			
		||||
 | 
			
		||||
      if (!check_lapack)
 | 
			
		||||
	return diagonalize_lapack (lmd, lme, N2, N1, grid);
 | 
			
		||||
 | 
			
		||||
//      diagonalize_lapack(lmd2,lme2,Nm2,Nm,Qt,grid);
 | 
			
		||||
#endif
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
#if 1
 | 
			
		||||
    static RealD normalise (Field & v)
 | 
			
		||||
    {
 | 
			
		||||
      RealD nn = norm2 (v);
 | 
			
		||||
      nn = sqrt (nn);
 | 
			
		||||
      v = v * (1.0 / nn);
 | 
			
		||||
      return nn;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void orthogonalize (Field & w, DenseVector < Field > &evec, int k)
 | 
			
		||||
    {
 | 
			
		||||
      double t0 = -usecond () / 1e6;
 | 
			
		||||
      typedef typename Field::scalar_type MyComplex;
 | 
			
		||||
      MyComplex ip;
 | 
			
		||||
 | 
			
		||||
      if (0)
 | 
			
		||||
	{
 | 
			
		||||
	  for (int j = 0; j < k; ++j)
 | 
			
		||||
	    {
 | 
			
		||||
	      normalise (evec[j]);
 | 
			
		||||
	      for (int i = 0; i < j; i++)
 | 
			
		||||
		{
 | 
			
		||||
		  ip = innerProduct (evec[i], evec[j]);	// are the evecs normalised? ; this assumes so.
 | 
			
		||||
		  evec[j] = evec[j] - ip * evec[i];
 | 
			
		||||
		}
 | 
			
		||||
	    }
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
      for (int j = 0; j < k; ++j)
 | 
			
		||||
	{
 | 
			
		||||
	  ip = innerProduct (evec[j], w);	// are the evecs normalised? ; this assumes so.
 | 
			
		||||
	  w = w - ip * evec[j];
 | 
			
		||||
	}
 | 
			
		||||
      normalise (w);
 | 
			
		||||
      t0 += usecond () / 1e6;
 | 
			
		||||
      OrthoTime += t0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void setUnit_Qt (int Nm, DenseVector < RealD > &Qt)
 | 
			
		||||
    {
 | 
			
		||||
      for (int i = 0; i < Qt.size (); ++i)
 | 
			
		||||
	Qt[i] = 0.0;
 | 
			
		||||
      for (int k = 0; k < Nm; ++k)
 | 
			
		||||
	Qt[k + k * Nm] = 1.0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    void calc (DenseVector < RealD > &eval, const Field & src, int &Nconv)
 | 
			
		||||
    {
 | 
			
		||||
 | 
			
		||||
      GridBase *grid = src._grid;
 | 
			
		||||
//      assert(grid == src._grid);
 | 
			
		||||
 | 
			
		||||
      std::
 | 
			
		||||
	cout << GridLogMessage << " -- Nk = " << Nk << " Np = " << Np << std::
 | 
			
		||||
	endl;
 | 
			
		||||
      std::cout << GridLogMessage << " -- Nm = " << Nm << std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << " -- size of eval   = " << eval.
 | 
			
		||||
	size () << std::endl;
 | 
			
		||||
 | 
			
		||||
//      assert(c.size() && Nm == eval.size());
 | 
			
		||||
 | 
			
		||||
      DenseVector < RealD > lme (Nm);
 | 
			
		||||
      DenseVector < RealD > lmd (Nm);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
      Field current (grid);
 | 
			
		||||
      Field last (grid);
 | 
			
		||||
      Field next (grid);
 | 
			
		||||
 | 
			
		||||
      Nconv = 0;
 | 
			
		||||
 | 
			
		||||
      RealD beta_k;
 | 
			
		||||
 | 
			
		||||
      // Set initial vector
 | 
			
		||||
      // (uniform vector) Why not src??
 | 
			
		||||
      //      evec[0] = 1.0;
 | 
			
		||||
      current = src;
 | 
			
		||||
      std::cout << GridLogMessage << "norm2(src)= " << norm2 (src) << std::
 | 
			
		||||
	endl;
 | 
			
		||||
      normalise (current);
 | 
			
		||||
      std::
 | 
			
		||||
	cout << GridLogMessage << "norm2(evec[0])= " << norm2 (current) <<
 | 
			
		||||
	std::endl;
 | 
			
		||||
 | 
			
		||||
      // Initial Nk steps
 | 
			
		||||
      OrthoTime = 0.;
 | 
			
		||||
      double t0 = usecond () / 1e6;
 | 
			
		||||
      RealD norm;		// sqrt norm of last vector
 | 
			
		||||
 | 
			
		||||
      uint64_t iter = 0;
 | 
			
		||||
 | 
			
		||||
      bool initted = false;
 | 
			
		||||
      std::vector < RealD > low (Nstop * 10);
 | 
			
		||||
      std::vector < RealD > high (Nstop * 10);
 | 
			
		||||
      RealD cont = 0.;
 | 
			
		||||
      while (1) {
 | 
			
		||||
	  cont = 0.;
 | 
			
		||||
	  std::vector < RealD > lme2 (Nm);
 | 
			
		||||
	  std::vector < RealD > lmd2 (Nm);
 | 
			
		||||
	  for (uint64_t k = 0; k < Nm; ++k, iter++) {
 | 
			
		||||
	      step (lmd, lme, last, current, next, iter);
 | 
			
		||||
	      last = current;
 | 
			
		||||
	      current = next;
 | 
			
		||||
	    }
 | 
			
		||||
	  double t1 = usecond () / 1e6;
 | 
			
		||||
	  std::cout << GridLogMessage << "IRL::Initial steps: " << t1 -
 | 
			
		||||
	    t0 << "seconds" << std::endl;
 | 
			
		||||
	  t0 = t1;
 | 
			
		||||
	  std::
 | 
			
		||||
	    cout << GridLogMessage << "IRL::Initial steps:OrthoTime " <<
 | 
			
		||||
	    OrthoTime << "seconds" << std::endl;
 | 
			
		||||
 | 
			
		||||
	  // getting eigenvalues
 | 
			
		||||
	  lmd2.resize (iter + 2);
 | 
			
		||||
	  lme2.resize (iter + 2);
 | 
			
		||||
	  for (uint64_t k = 0; k < iter; ++k) {
 | 
			
		||||
	      lmd2[k + 1] = lmd[k];
 | 
			
		||||
	      lme2[k + 2] = lme[k];
 | 
			
		||||
	    }
 | 
			
		||||
	  t1 = usecond () / 1e6;
 | 
			
		||||
	  std::cout << GridLogMessage << "IRL:: copy: " << t1 -
 | 
			
		||||
	    t0 << "seconds" << std::endl;
 | 
			
		||||
	  t0 = t1;
 | 
			
		||||
	  {
 | 
			
		||||
	    int total = grid->_Nprocessors;
 | 
			
		||||
	    int node = grid->_processor;
 | 
			
		||||
	    int interval = (Nstop / total) + 1;
 | 
			
		||||
	    int iu = (iter + 1) - (interval * node + 1);
 | 
			
		||||
	    int il = (iter + 1) - (interval * (node + 1));
 | 
			
		||||
	    std::vector < RealD > eval2 (iter + 3);
 | 
			
		||||
	    RealD eps2;
 | 
			
		||||
	    Bisection::bisec (lmd2, lme2, iter, il, iu, 1e-16, 1e-10, eval2,
 | 
			
		||||
			      eps2);
 | 
			
		||||
//        diagonalize(eval2,lme2,iter,Nk,grid);
 | 
			
		||||
	    RealD diff = 0.;
 | 
			
		||||
	    for (int i = il; i <= iu; i++) {
 | 
			
		||||
		if (initted)
 | 
			
		||||
		  diff =
 | 
			
		||||
		    fabs (eval2[i] - high[iu-i]) / (fabs (eval2[i]) +
 | 
			
		||||
						      fabs (high[iu-i]));
 | 
			
		||||
		if (initted && (diff > eresid))
 | 
			
		||||
		  cont = 1.;
 | 
			
		||||
		if (initted)
 | 
			
		||||
		  printf ("eval[%d]=%0.14e %0.14e, %0.14e\n", i, eval2[i],
 | 
			
		||||
			  high[iu-i], diff);
 | 
			
		||||
		high[iu-i] = eval2[i];
 | 
			
		||||
	      }
 | 
			
		||||
	    il = (interval * node + 1);
 | 
			
		||||
	    iu = (interval * (node + 1));
 | 
			
		||||
	    Bisection::bisec (lmd2, lme2, iter, il, iu, 1e-16, 1e-10, eval2,
 | 
			
		||||
			      eps2);
 | 
			
		||||
	    for (int i = il; i <= iu; i++) {
 | 
			
		||||
		if (initted)
 | 
			
		||||
		  diff =
 | 
			
		||||
		    fabs (eval2[i] - low[i]) / (fabs (eval2[i]) +
 | 
			
		||||
						fabs (low[i]));
 | 
			
		||||
		if (initted && (diff > eresid))
 | 
			
		||||
		  cont = 1.;
 | 
			
		||||
		if (initted)
 | 
			
		||||
		  printf ("eval[%d]=%0.14e %0.14e, %0.14e\n", i, eval2[i],
 | 
			
		||||
			  low[i], diff);
 | 
			
		||||
		low[i] = eval2[i];
 | 
			
		||||
	      }
 | 
			
		||||
	    t1 = usecond () / 1e6;
 | 
			
		||||
	    std::cout << GridLogMessage << "IRL:: diagonalize: " << t1 -
 | 
			
		||||
	      t0 << "seconds" << std::endl;
 | 
			
		||||
	    t0 = t1;
 | 
			
		||||
	  }
 | 
			
		||||
 | 
			
		||||
	  for (uint64_t k = 0; k < Nk; ++k) {
 | 
			
		||||
//          eval[k] = eval2[k];
 | 
			
		||||
	    }
 | 
			
		||||
	  if (initted)
 | 
			
		||||
	    {
 | 
			
		||||
	      grid->GlobalSumVector (&cont, 1);
 | 
			
		||||
	      if (cont < 1.) return;
 | 
			
		||||
	    }
 | 
			
		||||
	  initted = true;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
   There is some matrix Q such that for any vector y
 | 
			
		||||
   Q.e_1 = y and Q is unitary.
 | 
			
		||||
**/
 | 
			
		||||
    template < class T >
 | 
			
		||||
      static T orthQ (DenseMatrix < T > &Q, DenseVector < T > y)
 | 
			
		||||
    {
 | 
			
		||||
      int N = y.size ();	//Matrix Size
 | 
			
		||||
      Fill (Q, 0.0);
 | 
			
		||||
      T tau;
 | 
			
		||||
      for (int i = 0; i < N; i++)
 | 
			
		||||
	{
 | 
			
		||||
	  Q[i][0] = y[i];
 | 
			
		||||
	}
 | 
			
		||||
      T sig = conj (y[0]) * y[0];
 | 
			
		||||
      T tau0 = fabs (sqrt (sig));
 | 
			
		||||
 | 
			
		||||
      for (int j = 1; j < N; j++)
 | 
			
		||||
	{
 | 
			
		||||
	  sig += conj (y[j]) * y[j];
 | 
			
		||||
	  tau = abs (sqrt (sig));
 | 
			
		||||
 | 
			
		||||
	  if (abs (tau0) > 0.0)
 | 
			
		||||
	    {
 | 
			
		||||
 | 
			
		||||
	      T gam = conj ((y[j] / tau) / tau0);
 | 
			
		||||
	      for (int k = 0; k <= j - 1; k++)
 | 
			
		||||
		{
 | 
			
		||||
		  Q[k][j] = -gam * y[k];
 | 
			
		||||
		}
 | 
			
		||||
	      Q[j][j] = tau0 / tau;
 | 
			
		||||
	    }
 | 
			
		||||
	  else
 | 
			
		||||
	    {
 | 
			
		||||
	      Q[j - 1][j] = 1.0;
 | 
			
		||||
	    }
 | 
			
		||||
	  tau0 = tau;
 | 
			
		||||
	}
 | 
			
		||||
      return tau;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
	There is some matrix Q such that for any vector y
 | 
			
		||||
	Q.e_k = y and Q is unitary.
 | 
			
		||||
**/
 | 
			
		||||
    template < class T >
 | 
			
		||||
      static T orthU (DenseMatrix < T > &Q, DenseVector < T > y)
 | 
			
		||||
    {
 | 
			
		||||
      T tau = orthQ (Q, y);
 | 
			
		||||
      SL (Q);
 | 
			
		||||
      return tau;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
	Wind up with a matrix with the first con rows untouched
 | 
			
		||||
 | 
			
		||||
say con = 2
 | 
			
		||||
	Q is such that Qdag H Q has {x, x, val, 0, 0, 0, 0, ...} as 1st colum
 | 
			
		||||
	and the matrix is upper hessenberg
 | 
			
		||||
	and with f and Q appropriately modidied with Q is the arnoldi factorization
 | 
			
		||||
 | 
			
		||||
**/
 | 
			
		||||
 | 
			
		||||
    template < class T > static void Lock (DenseMatrix < T > &H,	///Hess mtx     
 | 
			
		||||
					   DenseMatrix < T > &Q,	///Lock Transform
 | 
			
		||||
					   T val,	///value to be locked
 | 
			
		||||
					   int con,	///number already locked
 | 
			
		||||
					   RealD small, int dfg, bool herm)
 | 
			
		||||
    {
 | 
			
		||||
      //ForceTridiagonal(H);
 | 
			
		||||
 | 
			
		||||
      int M = H.dim;
 | 
			
		||||
      DenseVector < T > vec;
 | 
			
		||||
      Resize (vec, M - con);
 | 
			
		||||
 | 
			
		||||
      DenseMatrix < T > AH;
 | 
			
		||||
      Resize (AH, M - con, M - con);
 | 
			
		||||
      AH = GetSubMtx (H, con, M, con, M);
 | 
			
		||||
 | 
			
		||||
      DenseMatrix < T > QQ;
 | 
			
		||||
      Resize (QQ, M - con, M - con);
 | 
			
		||||
 | 
			
		||||
      Unity (Q);
 | 
			
		||||
      Unity (QQ);
 | 
			
		||||
 | 
			
		||||
      DenseVector < T > evals;
 | 
			
		||||
      Resize (evals, M - con);
 | 
			
		||||
      DenseMatrix < T > evecs;
 | 
			
		||||
      Resize (evecs, M - con, M - con);
 | 
			
		||||
 | 
			
		||||
      Wilkinson < T > (AH, evals, evecs, small);
 | 
			
		||||
 | 
			
		||||
      int k = 0;
 | 
			
		||||
      RealD cold = abs (val - evals[k]);
 | 
			
		||||
      for (int i = 1; i < M - con; i++)
 | 
			
		||||
	{
 | 
			
		||||
	  RealD cnew = abs (val - evals[i]);
 | 
			
		||||
	  if (cnew < cold)
 | 
			
		||||
	    {
 | 
			
		||||
	      k = i;
 | 
			
		||||
	      cold = cnew;
 | 
			
		||||
	    }
 | 
			
		||||
	}
 | 
			
		||||
      vec = evecs[k];
 | 
			
		||||
 | 
			
		||||
      ComplexD tau;
 | 
			
		||||
      orthQ (QQ, vec);
 | 
			
		||||
      //orthQM(QQ,AH,vec);
 | 
			
		||||
 | 
			
		||||
      AH = Hermitian (QQ) * AH;
 | 
			
		||||
      AH = AH * QQ;
 | 
			
		||||
 | 
			
		||||
      for (int i = con; i < M; i++)
 | 
			
		||||
	{
 | 
			
		||||
	  for (int j = con; j < M; j++)
 | 
			
		||||
	    {
 | 
			
		||||
	      Q[i][j] = QQ[i - con][j - con];
 | 
			
		||||
	      H[i][j] = AH[i - con][j - con];
 | 
			
		||||
	    }
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
      for (int j = M - 1; j > con + 2; j--)
 | 
			
		||||
	{
 | 
			
		||||
 | 
			
		||||
	  DenseMatrix < T > U;
 | 
			
		||||
	  Resize (U, j - 1 - con, j - 1 - con);
 | 
			
		||||
	  DenseVector < T > z;
 | 
			
		||||
	  Resize (z, j - 1 - con);
 | 
			
		||||
	  T nm = norm (z);
 | 
			
		||||
	  for (int k = con + 0; k < j - 1; k++)
 | 
			
		||||
	    {
 | 
			
		||||
	      z[k - con] = conj (H (j, k + 1));
 | 
			
		||||
	    }
 | 
			
		||||
	  normalise (z);
 | 
			
		||||
 | 
			
		||||
	  RealD tmp = 0;
 | 
			
		||||
	  for (int i = 0; i < z.size () - 1; i++)
 | 
			
		||||
	    {
 | 
			
		||||
	      tmp = tmp + abs (z[i]);
 | 
			
		||||
	    }
 | 
			
		||||
 | 
			
		||||
	  if (tmp < small / ((RealD) z.size () - 1.0))
 | 
			
		||||
	    {
 | 
			
		||||
	      continue;
 | 
			
		||||
	    }
 | 
			
		||||
 | 
			
		||||
	  tau = orthU (U, z);
 | 
			
		||||
 | 
			
		||||
	  DenseMatrix < T > Hb;
 | 
			
		||||
	  Resize (Hb, j - 1 - con, M);
 | 
			
		||||
 | 
			
		||||
	  for (int a = 0; a < M; a++)
 | 
			
		||||
	    {
 | 
			
		||||
	      for (int b = 0; b < j - 1 - con; b++)
 | 
			
		||||
		{
 | 
			
		||||
		  T sum = 0;
 | 
			
		||||
		  for (int c = 0; c < j - 1 - con; c++)
 | 
			
		||||
		    {
 | 
			
		||||
		      sum += H[a][con + 1 + c] * U[c][b];
 | 
			
		||||
		    }		//sum += H(a,con+1+c)*U(c,b);}
 | 
			
		||||
		  Hb[b][a] = sum;
 | 
			
		||||
		}
 | 
			
		||||
	    }
 | 
			
		||||
 | 
			
		||||
	  for (int k = con + 1; k < j; k++)
 | 
			
		||||
	    {
 | 
			
		||||
	      for (int l = 0; l < M; l++)
 | 
			
		||||
		{
 | 
			
		||||
		  H[l][k] = Hb[k - 1 - con][l];
 | 
			
		||||
		}
 | 
			
		||||
	    }			//H(Hb[k-1-con][l] , l,k);}}
 | 
			
		||||
 | 
			
		||||
	  DenseMatrix < T > Qb;
 | 
			
		||||
	  Resize (Qb, M, M);
 | 
			
		||||
 | 
			
		||||
	  for (int a = 0; a < M; a++)
 | 
			
		||||
	    {
 | 
			
		||||
	      for (int b = 0; b < j - 1 - con; b++)
 | 
			
		||||
		{
 | 
			
		||||
		  T sum = 0;
 | 
			
		||||
		  for (int c = 0; c < j - 1 - con; c++)
 | 
			
		||||
		    {
 | 
			
		||||
		      sum += Q[a][con + 1 + c] * U[c][b];
 | 
			
		||||
		    }		//sum += Q(a,con+1+c)*U(c,b);}
 | 
			
		||||
		  Qb[b][a] = sum;
 | 
			
		||||
		}
 | 
			
		||||
	    }
 | 
			
		||||
 | 
			
		||||
	  for (int k = con + 1; k < j; k++)
 | 
			
		||||
	    {
 | 
			
		||||
	      for (int l = 0; l < M; l++)
 | 
			
		||||
		{
 | 
			
		||||
		  Q[l][k] = Qb[k - 1 - con][l];
 | 
			
		||||
		}
 | 
			
		||||
	    }			//Q(Qb[k-1-con][l] , l,k);}}
 | 
			
		||||
 | 
			
		||||
	  DenseMatrix < T > Hc;
 | 
			
		||||
	  Resize (Hc, M, M);
 | 
			
		||||
 | 
			
		||||
	  for (int a = 0; a < j - 1 - con; a++)
 | 
			
		||||
	    {
 | 
			
		||||
	      for (int b = 0; b < M; b++)
 | 
			
		||||
		{
 | 
			
		||||
		  T sum = 0;
 | 
			
		||||
		  for (int c = 0; c < j - 1 - con; c++)
 | 
			
		||||
		    {
 | 
			
		||||
		      sum += conj (U[c][a]) * H[con + 1 + c][b];
 | 
			
		||||
		    }		//sum += conj( U(c,a) )*H(con+1+c,b);}
 | 
			
		||||
		  Hc[b][a] = sum;
 | 
			
		||||
		}
 | 
			
		||||
	    }
 | 
			
		||||
 | 
			
		||||
	  for (int k = 0; k < M; k++)
 | 
			
		||||
	    {
 | 
			
		||||
	      for (int l = con + 1; l < j; l++)
 | 
			
		||||
		{
 | 
			
		||||
		  H[l][k] = Hc[k][l - 1 - con];
 | 
			
		||||
		}
 | 
			
		||||
	    }			//H(Hc[k][l-1-con] , l,k);}}
 | 
			
		||||
 | 
			
		||||
	}
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										122
									
								
								lib/algorithms/iterative/bisec.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										122
									
								
								lib/algorithms/iterative/bisec.c
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,122 @@
 | 
			
		||||
#include <math.h>
 | 
			
		||||
#include <stdlib.h>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
struct Bisection {
 | 
			
		||||
 | 
			
		||||
static void get_eig2(int row_num,std::vector<RealD> &ALPHA,std::vector<RealD> &BETA, std::vector<RealD> & eig)
 | 
			
		||||
{
 | 
			
		||||
  int i,j;
 | 
			
		||||
  std::vector<RealD> evec1(row_num+3);
 | 
			
		||||
  std::vector<RealD> evec2(row_num+3);
 | 
			
		||||
  RealD eps2;
 | 
			
		||||
  ALPHA[1]=0.;
 | 
			
		||||
  BETHA[1]=0.;
 | 
			
		||||
  for(i=0;i<row_num-1;i++) {
 | 
			
		||||
    ALPHA[i+1] = A[i*(row_num+1)].real();
 | 
			
		||||
    BETHA[i+2] = A[i*(row_num+1)+1].real();
 | 
			
		||||
  }
 | 
			
		||||
  ALPHA[row_num] = A[(row_num-1)*(row_num+1)].real();
 | 
			
		||||
  bisec(ALPHA,BETHA,row_num,1,row_num,1e-10,1e-10,evec1,eps2);
 | 
			
		||||
  bisec(ALPHA,BETHA,row_num,1,row_num,1e-16,1e-16,evec2,eps2);
 | 
			
		||||
 | 
			
		||||
  // Do we really need to sort here?
 | 
			
		||||
  int begin=1;
 | 
			
		||||
  int end = row_num;
 | 
			
		||||
  int swapped=1;
 | 
			
		||||
  while(swapped) {
 | 
			
		||||
    swapped=0;
 | 
			
		||||
    for(i=begin;i<end;i++){
 | 
			
		||||
      if(mag(evec2[i])>mag(evec2[i+1]))	{
 | 
			
		||||
	swap(evec2+i,evec2+i+1);
 | 
			
		||||
	swapped=1;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    end--;
 | 
			
		||||
    for(i=end-1;i>=begin;i--){
 | 
			
		||||
      if(mag(evec2[i])>mag(evec2[i+1]))	{
 | 
			
		||||
	swap(evec2+i,evec2+i+1);
 | 
			
		||||
	swapped=1;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    begin++;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  for(i=0;i<row_num;i++){
 | 
			
		||||
    for(j=0;j<row_num;j++) {
 | 
			
		||||
      if(i==j) H[i*row_num+j]=evec2[i+1];
 | 
			
		||||
      else H[i*row_num+j]=0.;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void bisec(std::vector<RealD> &c,   
 | 
			
		||||
		  std::vector<RealD> &b,
 | 
			
		||||
		  int n,
 | 
			
		||||
		  int m1,
 | 
			
		||||
		  int m2,
 | 
			
		||||
		  RealD eps1,
 | 
			
		||||
		  RealD relfeh,
 | 
			
		||||
		  std::vector<RealD> &x,
 | 
			
		||||
		  RealD &eps2)
 | 
			
		||||
{
 | 
			
		||||
  std::vector<RealD> wu(n+2);
 | 
			
		||||
 | 
			
		||||
  RealD h,q,x1,xu,x0,xmin,xmax; 
 | 
			
		||||
  int i,a,k;
 | 
			
		||||
 | 
			
		||||
  b[1]=0.0;
 | 
			
		||||
  xmin=c[n]-fabs(b[n]);
 | 
			
		||||
  xmax=c[n]+fabs(b[n]);
 | 
			
		||||
  for(i=1;i<n;i++){
 | 
			
		||||
    h=fabs(b[i])+fabs(b[i+1]);
 | 
			
		||||
    if(c[i]+h>xmax) xmax= c[i]+h;
 | 
			
		||||
    if(c[i]-h<xmin) xmin= c[i]-h;
 | 
			
		||||
  }
 | 
			
		||||
  xmax *=2.;
 | 
			
		||||
 | 
			
		||||
  eps2=relfeh*((xmin+xmax)>0.0 ? xmax : -xmin);
 | 
			
		||||
  if(eps1<=0.0) eps1=eps2;
 | 
			
		||||
  eps2=0.5*eps1+7.0*(eps2);
 | 
			
		||||
  x0=xmax;
 | 
			
		||||
  for(i=m1;i<=m2;i++){
 | 
			
		||||
    x[i]=xmax;
 | 
			
		||||
    wu[i]=xmin;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  for(k=m2;k>=m1;k--){
 | 
			
		||||
    xu=xmin;
 | 
			
		||||
    i=k;
 | 
			
		||||
    do{
 | 
			
		||||
      if(xu<wu[i]){
 | 
			
		||||
	xu=wu[i];
 | 
			
		||||
	i=m1-1;
 | 
			
		||||
      }
 | 
			
		||||
      i--;
 | 
			
		||||
    }while(i>=m1);
 | 
			
		||||
    if(x0>x[k]) x0=x[k];
 | 
			
		||||
    while((x0-xu)>2*relfeh*(fabs(xu)+fabs(x0))+eps1){
 | 
			
		||||
      x1=(xu+x0)/2;
 | 
			
		||||
 | 
			
		||||
      a=0;
 | 
			
		||||
      q=1.0;
 | 
			
		||||
      for(i=1;i<=n;i++){
 | 
			
		||||
	q=c[i]-x1-((q!=0.0)? b[i]*b[i]/q:fabs(b[i])/relfeh);
 | 
			
		||||
	if(q<0) a++;
 | 
			
		||||
      }
 | 
			
		||||
      //			printf("x1=%e a=%d\n",x1,a);
 | 
			
		||||
      if(a<k){
 | 
			
		||||
	if(a<m1){
 | 
			
		||||
	  xu=x1;
 | 
			
		||||
	  wu[m1]=x1;
 | 
			
		||||
	}else {
 | 
			
		||||
	  xu=x1;
 | 
			
		||||
	  wu[a+1]=x1;
 | 
			
		||||
	  if(x[a]>x1) x[a]=x1;
 | 
			
		||||
	}
 | 
			
		||||
      }else x0=x1;
 | 
			
		||||
    }
 | 
			
		||||
    x[k]=(x0+xu)/2;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
@@ -52,6 +52,8 @@ public:
 | 
			
		||||
    GridBase(const std::vector<int> & processor_grid,
 | 
			
		||||
	     const CartesianCommunicator &parent) : CartesianCommunicator(processor_grid,parent) {};
 | 
			
		||||
 | 
			
		||||
    virtual ~GridBase() = default;
 | 
			
		||||
 | 
			
		||||
    // Physics Grid information.
 | 
			
		||||
    std::vector<int> _simd_layout;// Which dimensions get relayed out over simd lanes.
 | 
			
		||||
    std::vector<int> _fdimensions;// (full) Global dimensions of array prior to cb removal
 | 
			
		||||
 
 | 
			
		||||
@@ -81,6 +81,8 @@ public:
 | 
			
		||||
      Init(dimensions,simd_layout,processor_grid);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    virtual ~GridCartesian() = default;
 | 
			
		||||
 | 
			
		||||
    void Init(const std::vector<int> &dimensions,
 | 
			
		||||
	      const std::vector<int> &simd_layout,
 | 
			
		||||
	      const std::vector<int> &processor_grid)
 | 
			
		||||
 
 | 
			
		||||
@@ -133,6 +133,8 @@ public:
 | 
			
		||||
    {
 | 
			
		||||
      Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim)  ;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    virtual ~GridRedBlackCartesian() = default;
 | 
			
		||||
#if 0
 | 
			
		||||
    ////////////////////////////////////////////////////////////
 | 
			
		||||
    // Create redblack grid ;; deprecate these. Should not
 | 
			
		||||
 
 | 
			
		||||
@@ -96,6 +96,124 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N)
 | 
			
		||||
  GlobalSumVector((double *)c,2*N);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT)
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent) 
 | 
			
		||||
{
 | 
			
		||||
  _ndimension = processors.size();
 | 
			
		||||
  
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // split the communicator
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  int Nparent;
 | 
			
		||||
  MPI_Comm_size(parent.communicator,&Nparent);
 | 
			
		||||
 | 
			
		||||
  int childsize=1;
 | 
			
		||||
  for(int d=0;d<processors.size();d++) {
 | 
			
		||||
    childsize *= processors[d];
 | 
			
		||||
  }
 | 
			
		||||
  int Nchild = Nparent/childsize;
 | 
			
		||||
  assert (childsize * Nchild == Nparent);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> ccoor(_ndimension); // coor within subcommunicator
 | 
			
		||||
  std::vector<int> scoor(_ndimension); // coor of split within parent
 | 
			
		||||
  std::vector<int> ssize(_ndimension); // coor of split within parent
 | 
			
		||||
 | 
			
		||||
  std::vector<int> pcoor(_ndimension,0); 
 | 
			
		||||
  std::vector<int> pdims(_ndimension,1); 
 | 
			
		||||
 | 
			
		||||
  if(parent._processors.size()==4 && _ndimension==5){
 | 
			
		||||
      for(int i=0;i<4;i++) pcoor[i+1]=parent._processor_coor[i];
 | 
			
		||||
      for(int i=0;i<4;i++) pdims[i+1]=parent._processors[i];
 | 
			
		||||
  } else {
 | 
			
		||||
      assert(_ndimension == parent._ndimension);
 | 
			
		||||
      for(int i=0;i<_ndimension;i++) pcoor[i]=parent._processor_coor[i];
 | 
			
		||||
      for(int i=0;i<_ndimension;i++) pdims[i]=parent._processors[i];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  for(int d=0;d<_ndimension;d++){
 | 
			
		||||
    ccoor[d] = pcoor[d] % processors[d];
 | 
			
		||||
    scoor[d] = pcoor[d] / processors[d];
 | 
			
		||||
    ssize[d] = pdims[d] / processors[d];
 | 
			
		||||
  }
 | 
			
		||||
  int crank,srank;  // rank within subcomm ; rank of subcomm within blocks of subcomms
 | 
			
		||||
  Lexicographic::IndexFromCoor(ccoor,crank,processors);
 | 
			
		||||
  Lexicographic::IndexFromCoor(scoor,srank,ssize);
 | 
			
		||||
 | 
			
		||||
  MPI_Comm comm_split;
 | 
			
		||||
  if ( Nchild > 1 ) { 
 | 
			
		||||
 | 
			
		||||
    //    std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl;
 | 
			
		||||
    //    std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"]    ";
 | 
			
		||||
    //    for(int d=0;d<parent._processors.size();d++)  std::cout << parent._processors[d] << " ";
 | 
			
		||||
    //    std::cout<<std::endl;
 | 
			
		||||
 | 
			
		||||
    //    std::cout << GridLogMessage<<" child grid["<< _ndimension <<"]    ";
 | 
			
		||||
    //    for(int d=0;d<processors.size();d++)  std::cout << processors[d] << " ";
 | 
			
		||||
    //    std::cout<<std::endl;
 | 
			
		||||
 | 
			
		||||
    int ierr= MPI_Comm_split(parent.communicator,srank,crank,&comm_split);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Declare victory
 | 
			
		||||
    //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    //    std::cout << GridLogMessage<<"Divided communicator "<< parent._Nprocessors<<" into "
 | 
			
		||||
    // 	      << Nchild <<" communicators with " << childsize << " ranks"<<std::endl;
 | 
			
		||||
  } else {
 | 
			
		||||
    comm_split=parent.communicator;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Set up from the new split communicator
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  InitFromMPICommunicator(processors,comm_split);
 | 
			
		||||
}
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Take an MPI_Comm and self assemble
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base)
 | 
			
		||||
{
 | 
			
		||||
  //  if ( communicator_base != communicator_world ) {
 | 
			
		||||
  //    std::cout << "Cartesian communicator created with a non-world communicator"<<std::endl;
 | 
			
		||||
  //  }
 | 
			
		||||
  _ndimension = processors.size();
 | 
			
		||||
  _processor_coor.resize(_ndimension);
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////
 | 
			
		||||
  // Count the requested nodes
 | 
			
		||||
  /////////////////////////////////
 | 
			
		||||
  _Nprocessors=1;
 | 
			
		||||
  _processors = processors;
 | 
			
		||||
  for(int i=0;i<_ndimension;i++){
 | 
			
		||||
    _Nprocessors*=_processors[i];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::vector<int> periodic(_ndimension,1);
 | 
			
		||||
  MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],1,&communicator);
 | 
			
		||||
  MPI_Comm_rank(communicator,&_processor);
 | 
			
		||||
  MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
 | 
			
		||||
 | 
			
		||||
  int Size;
 | 
			
		||||
  MPI_Comm_size(communicator,&Size);
 | 
			
		||||
 | 
			
		||||
#ifdef GRID_COMMS_MPIT
 | 
			
		||||
  communicator_halo.resize (2*_ndimension);
 | 
			
		||||
  for(int i=0;i<_ndimension*2;i++){
 | 
			
		||||
    MPI_Comm_dup(communicator,&communicator_halo[i]);
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
  
 | 
			
		||||
  assert(Size==_Nprocessors);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors) 
 | 
			
		||||
{
 | 
			
		||||
  InitFromMPICommunicator(processors,communicator_world);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if !defined( GRID_COMMS_MPI3) 
 | 
			
		||||
 | 
			
		||||
int                      CartesianCommunicator::NodeCount(void)    { return ProcessorCount();};
 | 
			
		||||
 
 | 
			
		||||
@@ -155,10 +155,10 @@ class CartesianCommunicator {
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent);
 | 
			
		||||
  CartesianCommunicator(const std::vector<int> &pdimensions_in);
 | 
			
		||||
  virtual ~CartesianCommunicator();
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
#if defined (GRID_COMMS_MPI) 
 | 
			
		||||
  //|| defined (GRID_COMMS_MPI3) 
 | 
			
		||||
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) 
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  // Private initialise from an MPI communicator
 | 
			
		||||
  // Can use after an MPI_Comm_split, but hidden from user so private
 | 
			
		||||
@@ -263,6 +263,27 @@ class CartesianCommunicator {
 | 
			
		||||
  // Broadcast a buffer and composite larger
 | 
			
		||||
  ////////////////////////////////////////////////////////////
 | 
			
		||||
  void Broadcast(int root,void* data, int bytes);
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////
 | 
			
		||||
  // All2All down one dimension
 | 
			
		||||
  ////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class T> void AllToAll(int dim,std::vector<T> &in, std::vector<T> &out){
 | 
			
		||||
    assert(dim>=0);
 | 
			
		||||
    assert(dim<_ndimension);
 | 
			
		||||
    int numnode = _processors[dim];
 | 
			
		||||
    //    std::cerr << " AllToAll in.size()  "<<in.size()<<std::endl;
 | 
			
		||||
    //    std::cerr << " AllToAll out.size() "<<out.size()<<std::endl;
 | 
			
		||||
    assert(in.size()==out.size());
 | 
			
		||||
    uint64_t bytes=sizeof(T);
 | 
			
		||||
    uint64_t words=in.size()/numnode;
 | 
			
		||||
 | 
			
		||||
    assert(numnode * words == in.size());
 | 
			
		||||
    assert(words < (1ULL<<32));
 | 
			
		||||
 | 
			
		||||
    AllToAll(dim,(void *)&in[0],(void *)&out[0],words,bytes);
 | 
			
		||||
  }
 | 
			
		||||
  void AllToAll(int dim  ,void *in,void *out,uint64_t words,uint64_t bytes);
 | 
			
		||||
  void AllToAll(void  *in,void *out,uint64_t words         ,uint64_t bytes);
 | 
			
		||||
  
 | 
			
		||||
  template<class obj> void Broadcast(int root,obj &data)
 | 
			
		||||
    {
 | 
			
		||||
 
 | 
			
		||||
@@ -53,94 +53,14 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
  ShmInitGeneric();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors) 
 | 
			
		||||
CartesianCommunicator::~CartesianCommunicator()
 | 
			
		||||
{
 | 
			
		||||
  InitFromMPICommunicator(processors,communicator_world);
 | 
			
		||||
  //  std::cout << "Passed communicator world to a new communicator" <<communicator<<std::endl;
 | 
			
		||||
}
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent) 
 | 
			
		||||
{
 | 
			
		||||
  _ndimension = processors.size();
 | 
			
		||||
  assert(_ndimension = parent._ndimension);
 | 
			
		||||
  
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // split the communicator
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  int Nparent;
 | 
			
		||||
  MPI_Comm_size(parent.communicator,&Nparent);
 | 
			
		||||
 | 
			
		||||
  int childsize=1;
 | 
			
		||||
  for(int d=0;d<processors.size();d++) {
 | 
			
		||||
    childsize *= processors[d];
 | 
			
		||||
  }
 | 
			
		||||
  int Nchild = Nparent/childsize;
 | 
			
		||||
  assert (childsize * Nchild == Nparent);
 | 
			
		||||
 | 
			
		||||
  int prank;  MPI_Comm_rank(parent.communicator,&prank);
 | 
			
		||||
  int crank = prank % childsize;
 | 
			
		||||
  int ccomm = prank / childsize;
 | 
			
		||||
 | 
			
		||||
  MPI_Comm comm_split;
 | 
			
		||||
  if ( Nchild > 1 ) { 
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl;
 | 
			
		||||
    std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"]    ";
 | 
			
		||||
    for(int d=0;d<parent._processors.size();d++)  std::cout << parent._processors[d] << " ";
 | 
			
		||||
    std::cout<<std::endl;
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage<<" child grid["<< _ndimension <<"]    ";
 | 
			
		||||
    for(int d=0;d<processors.size();d++)  std::cout << processors[d] << " ";
 | 
			
		||||
    std::cout<<std::endl;
 | 
			
		||||
 | 
			
		||||
    int ierr= MPI_Comm_split(parent.communicator, ccomm,crank,&comm_split);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Declare victory
 | 
			
		||||
    //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    std::cout << GridLogMessage<<"Divided communicator "<< parent._Nprocessors<<" into "
 | 
			
		||||
	      <<Nchild <<" communicators with " << childsize << " ranks"<<std::endl;
 | 
			
		||||
  } else {
 | 
			
		||||
    comm_split=parent.communicator;
 | 
			
		||||
    //    std::cout << "Passed parental communicator to a new communicator" <<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Set up from the new split communicator
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  InitFromMPICommunicator(processors,comm_split);
 | 
			
		||||
 | 
			
		||||
  int MPI_is_finalised;
 | 
			
		||||
  MPI_Finalized(&MPI_is_finalised);
 | 
			
		||||
  if (communicator && MPI_is_finalised)
 | 
			
		||||
    MPI_Comm_free(&communicator);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Take an MPI_Comm and self assemble
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base)
 | 
			
		||||
{
 | 
			
		||||
  //  if ( communicator_base != communicator_world ) {
 | 
			
		||||
  //    std::cout << "Cartesian communicator created with a non-world communicator"<<std::endl;
 | 
			
		||||
  //  }
 | 
			
		||||
  _ndimension = processors.size();
 | 
			
		||||
  _processor_coor.resize(_ndimension);
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////
 | 
			
		||||
  // Count the requested nodes
 | 
			
		||||
  /////////////////////////////////
 | 
			
		||||
  _Nprocessors=1;
 | 
			
		||||
  _processors = processors;
 | 
			
		||||
  for(int i=0;i<_ndimension;i++){
 | 
			
		||||
    _Nprocessors*=_processors[i];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::vector<int> periodic(_ndimension,1);
 | 
			
		||||
  MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],1,&communicator);
 | 
			
		||||
  MPI_Comm_rank(communicator,&_processor);
 | 
			
		||||
  MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
 | 
			
		||||
 | 
			
		||||
  int Size;
 | 
			
		||||
  MPI_Comm_size(communicator,&Size);
 | 
			
		||||
  
 | 
			
		||||
  assert(Size==_Nprocessors);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
@@ -276,6 +196,35 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
 | 
			
		||||
		     root,
 | 
			
		||||
		     communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::AllToAll(int dim,void  *in,void *out,uint64_t words,uint64_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  std::vector<int> row(_ndimension,1);
 | 
			
		||||
  assert(dim>=0 && dim<_ndimension);
 | 
			
		||||
 | 
			
		||||
  //  Split the communicator
 | 
			
		||||
  row[dim] = _processors[dim];
 | 
			
		||||
 | 
			
		||||
  CartesianCommunicator Comm(row,*this);
 | 
			
		||||
  Comm.AllToAll(in,out,words,bytes);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::AllToAll(void  *in,void *out,uint64_t words,uint64_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  // MPI is a pain and uses "int" arguments
 | 
			
		||||
  // 64*64*64*128*16 == 500Million elements of data.
 | 
			
		||||
  // When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug.
 | 
			
		||||
  // (Turns up on 32^3 x 64 Gparity too)
 | 
			
		||||
  MPI_Datatype object;
 | 
			
		||||
  int iwords; 
 | 
			
		||||
  int ibytes;
 | 
			
		||||
  iwords = words;
 | 
			
		||||
  ibytes = bytes;
 | 
			
		||||
  assert(words == iwords); // safe to cast to int ?
 | 
			
		||||
  assert(bytes == ibytes); // safe to cast to int ?
 | 
			
		||||
  MPI_Type_contiguous(ibytes,MPI_BYTE,&object);
 | 
			
		||||
  MPI_Type_commit(&object);
 | 
			
		||||
  MPI_Alltoall(in,iwords,object,out,iwords,object,communicator);
 | 
			
		||||
  MPI_Type_free(&object);
 | 
			
		||||
}
 | 
			
		||||
  ///////////////////////////////////////////////////////
 | 
			
		||||
  // Should only be used prior to Grid Init finished.
 | 
			
		||||
@@ -296,5 +245,7 @@ void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -712,7 +712,8 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
 | 
			
		||||
							 int from,
 | 
			
		||||
							 int bytes,int dir)
 | 
			
		||||
{
 | 
			
		||||
  assert(dir < communicator_halo.size());
 | 
			
		||||
  int ncomm  =communicator_halo.size(); 
 | 
			
		||||
  int commdir=dir%ncomm;
 | 
			
		||||
 | 
			
		||||
  MPI_Request xrq;
 | 
			
		||||
  MPI_Request rrq;
 | 
			
		||||
@@ -732,14 +733,14 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
 | 
			
		||||
  gfrom = MPI_UNDEFINED;
 | 
			
		||||
#endif
 | 
			
		||||
  if ( gfrom ==MPI_UNDEFINED) {
 | 
			
		||||
    ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator_halo[dir],&rrq);
 | 
			
		||||
    ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator_halo[commdir],&rrq);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(rrq);
 | 
			
		||||
    off_node_bytes+=bytes;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if ( gdest == MPI_UNDEFINED ) {
 | 
			
		||||
    ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator_halo[dir],&xrq);
 | 
			
		||||
    ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator_halo[commdir],&xrq);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(xrq);
 | 
			
		||||
    off_node_bytes+=bytes;
 | 
			
		||||
 
 | 
			
		||||
@@ -53,33 +53,13 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
  ShmInitGeneric();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
 | 
			
		||||
CartesianCommunicator::~CartesianCommunicator()
 | 
			
		||||
{
 | 
			
		||||
  _ndimension = processors.size();
 | 
			
		||||
  std::vector<int> periodic(_ndimension,1);
 | 
			
		||||
 | 
			
		||||
  _Nprocessors=1;
 | 
			
		||||
  _processors = processors;
 | 
			
		||||
  _processor_coor.resize(_ndimension);
 | 
			
		||||
  
 | 
			
		||||
  MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator);
 | 
			
		||||
  MPI_Comm_rank(communicator,&_processor);
 | 
			
		||||
  MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
 | 
			
		||||
 | 
			
		||||
  for(int i=0;i<_ndimension;i++){
 | 
			
		||||
    _Nprocessors*=_processors[i];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  communicator_halo.resize (2*_ndimension);
 | 
			
		||||
  for(int i=0;i<_ndimension*2;i++){
 | 
			
		||||
    MPI_Comm_dup(communicator,&communicator_halo[i]);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  int Size; 
 | 
			
		||||
  MPI_Comm_size(communicator,&Size);
 | 
			
		||||
  
 | 
			
		||||
  assert(Size==_Nprocessors);
 | 
			
		||||
  if (communicator && !MPI::Is_finalized())
 | 
			
		||||
    MPI_Comm_free(&communicator);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
@@ -244,13 +224,14 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
 | 
			
		||||
{
 | 
			
		||||
  int myrank = _processor;
 | 
			
		||||
  int ierr;
 | 
			
		||||
  assert(dir < communicator_halo.size());
 | 
			
		||||
  int ncomm  =communicator_halo.size(); 
 | 
			
		||||
  int commdir=dir%ncomm;
 | 
			
		||||
  
 | 
			
		||||
  //  std::cout << " sending on communicator "<<dir<<" " <<communicator_halo[dir]<<std::endl;
 | 
			
		||||
  // Give the CPU to MPI immediately; can use threads to overlap optionally
 | 
			
		||||
  MPI_Request req[2];
 | 
			
		||||
  MPI_Irecv(recv,bytes,MPI_CHAR,recv_from_rank,recv_from_rank, communicator_halo[dir],&req[1]);
 | 
			
		||||
  MPI_Isend(xmit,bytes,MPI_CHAR,xmit_to_rank  ,myrank        , communicator_halo[dir],&req[0]);
 | 
			
		||||
  MPI_Irecv(recv,bytes,MPI_CHAR,recv_from_rank,recv_from_rank, communicator_halo[commdir],&req[1]);
 | 
			
		||||
  MPI_Isend(xmit,bytes,MPI_CHAR,xmit_to_rank  ,myrank        , communicator_halo[commdir],&req[0]);
 | 
			
		||||
 | 
			
		||||
  list.push_back(req[0]);
 | 
			
		||||
  list.push_back(req[1]);
 | 
			
		||||
@@ -269,13 +250,14 @@ double CartesianCommunicator::StencilSendToRecvFrom(void *xmit,
 | 
			
		||||
{
 | 
			
		||||
  int myrank = _processor;
 | 
			
		||||
  int ierr;
 | 
			
		||||
  assert(dir < communicator_halo.size());
 | 
			
		||||
  
 | 
			
		||||
  //  std::cout << " sending on communicator "<<dir<<" " <<communicator_halo[dir]<<std::endl;
 | 
			
		||||
  //  std::cout << " sending on communicator "<<dir<<" " <<communicator_halo.size()<< <std::endl;
 | 
			
		||||
 | 
			
		||||
  int ncomm  =communicator_halo.size(); 
 | 
			
		||||
  int commdir=dir%ncomm;
 | 
			
		||||
  // Give the CPU to MPI immediately; can use threads to overlap optionally
 | 
			
		||||
  MPI_Request req[2];
 | 
			
		||||
  MPI_Irecv(recv,bytes,MPI_CHAR,recv_from_rank,recv_from_rank, communicator_halo[dir],&req[1]);
 | 
			
		||||
  MPI_Isend(xmit,bytes,MPI_CHAR,xmit_to_rank  ,myrank        , communicator_halo[dir],&req[0]);
 | 
			
		||||
  MPI_Irecv(recv,bytes,MPI_CHAR,recv_from_rank,recv_from_rank, communicator_halo[commdir],&req[1]);
 | 
			
		||||
  MPI_Isend(xmit,bytes,MPI_CHAR,xmit_to_rank  ,myrank        , communicator_halo[commdir],&req[0]);
 | 
			
		||||
  MPI_Waitall(2, req, MPI_STATUSES_IGNORE);
 | 
			
		||||
  return 2.0*bytes;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -56,6 +56,8 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::~CartesianCommunicator(){}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::GlobalSum(float &){}
 | 
			
		||||
void CartesianCommunicator::GlobalSumVector(float *,int N){}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(double &){}
 | 
			
		||||
@@ -98,6 +100,14 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::AllToAll(int dim,void  *in,void *out,uint64_t words,uint64_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  bcopy(in,out,bytes*words);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::AllToAll(void  *in,void *out,uint64_t words,uint64_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  bcopy(in,out,bytes*words);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int  CartesianCommunicator::RankWorld(void){return 0;}
 | 
			
		||||
void CartesianCommunicator::Barrier(void){}
 | 
			
		||||
 
 | 
			
		||||
@@ -63,7 +63,7 @@ SOFTWARE.
 | 
			
		||||
        #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers"
 | 
			
		||||
    #endif
 | 
			
		||||
#elif defined(__GNUC__)
 | 
			
		||||
    #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40900
 | 
			
		||||
    #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40805
 | 
			
		||||
        #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers"
 | 
			
		||||
    #endif
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -684,6 +684,307 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
 | 
			
		||||
    merge(out._odata[out_oidx], ptrs, 0);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Communicate between grids
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
//
 | 
			
		||||
// All to all plan
 | 
			
		||||
//
 | 
			
		||||
// Subvolume on fine grid is v.    Vectors a,b,c,d 
 | 
			
		||||
//
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// SIMPLEST CASE:
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Mesh of nodes (2) ; subdivide to  1 subdivisions
 | 
			
		||||
//
 | 
			
		||||
// Lex ord:   
 | 
			
		||||
//          N0 va0 vb0  N1 va1 vb1 
 | 
			
		||||
//
 | 
			
		||||
// For each dimension do an all to all
 | 
			
		||||
//
 | 
			
		||||
// full AllToAll(0)
 | 
			
		||||
//          N0 va0 va1    N1 vb0 vb1
 | 
			
		||||
//
 | 
			
		||||
// REARRANGE
 | 
			
		||||
//          N0 va01       N1 vb01
 | 
			
		||||
//
 | 
			
		||||
// Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract".
 | 
			
		||||
// NB: Easiest to programme if keep in lex order.
 | 
			
		||||
//
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// SIMPLE CASE:
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
//
 | 
			
		||||
// Mesh of nodes (2x2) ; subdivide to  1x1 subdivisions
 | 
			
		||||
//
 | 
			
		||||
// Lex ord:   
 | 
			
		||||
//          N0 va0 vb0 vc0 vd0       N1 va1 vb1 vc1 vd1  
 | 
			
		||||
//          N2 va2 vb2 vc2 vd2       N3 va3 vb3 vc3 vd3 
 | 
			
		||||
//
 | 
			
		||||
// Ratio = full[dim] / split[dim]
 | 
			
		||||
//
 | 
			
		||||
// For each dimension do an all to all; get Nvec -> Nvec / ratio
 | 
			
		||||
//                                          Ldim -> Ldim * ratio
 | 
			
		||||
//                                          LocalVol -> LocalVol * ratio
 | 
			
		||||
// full AllToAll(0)
 | 
			
		||||
//          N0 va0 vb0 va1 vb1       N1 vc0 vd0 vc1 vd1   
 | 
			
		||||
//          N2 va2 vb2 va3 vb3       N3 vc2 vd2 vc3 vd3 
 | 
			
		||||
//
 | 
			
		||||
// REARRANGE
 | 
			
		||||
//          N0 va01 vb01      N1 vc01 vd01
 | 
			
		||||
//          N2 va23 vb23      N3 vc23 vd23
 | 
			
		||||
//
 | 
			
		||||
// full AllToAll(1)           // Not what is wanted. FIXME
 | 
			
		||||
//          N0 va01 va23      N1 vc01 vc23 
 | 
			
		||||
//          N2 vb01 vb23      N3 vd01 vd23
 | 
			
		||||
// 
 | 
			
		||||
// REARRANGE
 | 
			
		||||
//          N0 va0123      N1 vc0123
 | 
			
		||||
//          N2 vb0123      N3 vd0123
 | 
			
		||||
//
 | 
			
		||||
// Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract".
 | 
			
		||||
// NB: Easiest to programme if keep in lex order.
 | 
			
		||||
//
 | 
			
		||||
/////////////////////////////////////////////////////////
 | 
			
		||||
template<class Vobj>
 | 
			
		||||
void Grid_split(std::vector<Lattice<Vobj> > & full,Lattice<Vobj>   & split)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Vobj::scalar_object Sobj;
 | 
			
		||||
 | 
			
		||||
  int full_vecs   = full.size();
 | 
			
		||||
 | 
			
		||||
  assert(full_vecs>=1);
 | 
			
		||||
 | 
			
		||||
  GridBase * full_grid = full[0]._grid;
 | 
			
		||||
  GridBase *split_grid = split._grid;
 | 
			
		||||
 | 
			
		||||
  int       ndim  = full_grid->_ndimension;
 | 
			
		||||
  int  full_nproc = full_grid->_Nprocessors;
 | 
			
		||||
  int split_nproc =split_grid->_Nprocessors;
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  // Checkerboard management
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  int cb = full[0].checkerboard;
 | 
			
		||||
  split.checkerboard = cb;
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////
 | 
			
		||||
  // Checks
 | 
			
		||||
  //////////////////////////////
 | 
			
		||||
  assert(full_grid->_ndimension==split_grid->_ndimension);
 | 
			
		||||
  for(int n=0;n<full_vecs;n++){
 | 
			
		||||
    assert(full[n].checkerboard == cb);
 | 
			
		||||
    for(int d=0;d<ndim;d++){
 | 
			
		||||
      assert(full[n]._grid->_gdimensions[d]==split._grid->_gdimensions[d]);
 | 
			
		||||
      assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int   nvector   =full_nproc/split_nproc; 
 | 
			
		||||
  assert(nvector*split_nproc==full_nproc);
 | 
			
		||||
  assert(nvector == full_vecs);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> ratio(ndim);
 | 
			
		||||
  for(int d=0;d<ndim;d++){
 | 
			
		||||
    ratio[d] = full_grid->_processors[d]/ split_grid->_processors[d];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  uint64_t lsites = full_grid->lSites();
 | 
			
		||||
  uint64_t     sz = lsites * nvector;
 | 
			
		||||
  std::vector<Sobj> tmpdata(sz);
 | 
			
		||||
  std::vector<Sobj> alldata(sz);
 | 
			
		||||
  std::vector<Sobj> scalardata(lsites); 
 | 
			
		||||
  for(int v=0;v<nvector;v++){
 | 
			
		||||
    unvectorizeToLexOrdArray(scalardata,full[v]);    
 | 
			
		||||
    parallel_for(int site=0;site<lsites;site++){
 | 
			
		||||
      alldata[v*lsites+site] = scalardata[site];
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int nvec = nvector; // Counts down to 1 as we collapse dims
 | 
			
		||||
  std::vector<int> ldims = full_grid->_ldimensions;
 | 
			
		||||
  std::vector<int> lcoor(ndim);
 | 
			
		||||
 | 
			
		||||
  for(int d=0;d<ndim;d++){
 | 
			
		||||
 | 
			
		||||
    if ( ratio[d] != 1 ) {
 | 
			
		||||
 | 
			
		||||
      full_grid ->AllToAll(d,alldata,tmpdata);
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////////////
 | 
			
		||||
      //Local volume for this dimension is expanded by ratio of processor extents
 | 
			
		||||
      // Number of vectors is decreased by same factor
 | 
			
		||||
      // Rearrange to lexico for bigger volume
 | 
			
		||||
      //////////////////////////////////////////
 | 
			
		||||
      nvec    /= ratio[d];
 | 
			
		||||
      auto rdims = ldims; rdims[d]  *=   ratio[d];
 | 
			
		||||
      auto rsites= lsites*ratio[d];
 | 
			
		||||
      for(int v=0;v<nvec;v++){
 | 
			
		||||
 | 
			
		||||
	// For loop over each site within old subvol
 | 
			
		||||
	for(int lsite=0;lsite<lsites;lsite++){
 | 
			
		||||
 | 
			
		||||
	  Lexicographic::CoorFromIndex(lcoor, lsite, ldims);	  
 | 
			
		||||
 | 
			
		||||
	  for(int r=0;r<ratio[d];r++){ // ratio*nvec terms
 | 
			
		||||
 | 
			
		||||
	    auto rcoor = lcoor;	    rcoor[d]  += r*ldims[d];
 | 
			
		||||
 | 
			
		||||
	    int rsite; Lexicographic::IndexFromCoor(rcoor, rsite, rdims);	  
 | 
			
		||||
	    rsite += v * rsites;
 | 
			
		||||
 | 
			
		||||
	    int rmul=nvec*lsites;
 | 
			
		||||
	    int vmul=     lsites;
 | 
			
		||||
	    alldata[rsite] = tmpdata[lsite+r*rmul+v*vmul];
 | 
			
		||||
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      ldims[d]*= ratio[d];
 | 
			
		||||
      lsites  *= ratio[d];
 | 
			
		||||
 | 
			
		||||
      if ( split_grid->_processors[d] > 1 ) {
 | 
			
		||||
	tmpdata = alldata;
 | 
			
		||||
	split_grid->AllToAll(d,tmpdata,alldata);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  vectorizeFromLexOrdArray(alldata,split);    
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Vobj>
 | 
			
		||||
void Grid_split(Lattice<Vobj> &full,Lattice<Vobj>   & split)
 | 
			
		||||
{
 | 
			
		||||
  int nvector = full._grid->_Nprocessors / split._grid->_Nprocessors;
 | 
			
		||||
  std::vector<Lattice<Vobj> > full_v(nvector,full._grid);
 | 
			
		||||
  for(int n=0;n<nvector;n++){
 | 
			
		||||
    full_v[n] = full;
 | 
			
		||||
  }
 | 
			
		||||
  Grid_split(full_v,split);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Vobj>
 | 
			
		||||
void Grid_unsplit(std::vector<Lattice<Vobj> > & full,Lattice<Vobj>   & split)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Vobj::scalar_object Sobj;
 | 
			
		||||
 | 
			
		||||
  int full_vecs   = full.size();
 | 
			
		||||
 | 
			
		||||
  assert(full_vecs>=1);
 | 
			
		||||
 | 
			
		||||
  GridBase * full_grid = full[0]._grid;
 | 
			
		||||
  GridBase *split_grid = split._grid;
 | 
			
		||||
 | 
			
		||||
  int       ndim  = full_grid->_ndimension;
 | 
			
		||||
  int  full_nproc = full_grid->_Nprocessors;
 | 
			
		||||
  int split_nproc =split_grid->_Nprocessors;
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  // Checkerboard management
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  int cb = full[0].checkerboard;
 | 
			
		||||
  split.checkerboard = cb;
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////
 | 
			
		||||
  // Checks
 | 
			
		||||
  //////////////////////////////
 | 
			
		||||
  assert(full_grid->_ndimension==split_grid->_ndimension);
 | 
			
		||||
  for(int n=0;n<full_vecs;n++){
 | 
			
		||||
    assert(full[n].checkerboard == cb);
 | 
			
		||||
    for(int d=0;d<ndim;d++){
 | 
			
		||||
      assert(full[n]._grid->_gdimensions[d]==split._grid->_gdimensions[d]);
 | 
			
		||||
      assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int   nvector   =full_nproc/split_nproc; 
 | 
			
		||||
  assert(nvector*split_nproc==full_nproc);
 | 
			
		||||
  assert(nvector == full_vecs);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> ratio(ndim);
 | 
			
		||||
  for(int d=0;d<ndim;d++){
 | 
			
		||||
    ratio[d] = full_grid->_processors[d]/ split_grid->_processors[d];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  uint64_t lsites = full_grid->lSites();
 | 
			
		||||
  uint64_t     sz = lsites * nvector;
 | 
			
		||||
  std::vector<Sobj> tmpdata(sz);
 | 
			
		||||
  std::vector<Sobj> alldata(sz);
 | 
			
		||||
  std::vector<Sobj> scalardata(lsites); 
 | 
			
		||||
 | 
			
		||||
  unvectorizeToLexOrdArray(alldata,split);    
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Start from split grid and work towards full grid
 | 
			
		||||
  /////////////////////////////////////////////////////////////////
 | 
			
		||||
  std::vector<int> lcoor(ndim);
 | 
			
		||||
  std::vector<int> rcoor(ndim);
 | 
			
		||||
 | 
			
		||||
  int nvec = 1;
 | 
			
		||||
  lsites = split_grid->lSites();
 | 
			
		||||
  std::vector<int> ldims = split_grid->_ldimensions;
 | 
			
		||||
 | 
			
		||||
  for(int d=ndim-1;d>=0;d--){
 | 
			
		||||
 | 
			
		||||
    if ( ratio[d] != 1 ) {
 | 
			
		||||
 | 
			
		||||
      if ( split_grid->_processors[d] > 1 ) {
 | 
			
		||||
	tmpdata = alldata;
 | 
			
		||||
	split_grid->AllToAll(d,tmpdata,alldata);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////////////
 | 
			
		||||
      //Local volume for this dimension is expanded by ratio of processor extents
 | 
			
		||||
      // Number of vectors is decreased by same factor
 | 
			
		||||
      // Rearrange to lexico for bigger volume
 | 
			
		||||
      //////////////////////////////////////////
 | 
			
		||||
      auto rsites= lsites/ratio[d];
 | 
			
		||||
      auto rdims = ldims; rdims[d]/=ratio[d];
 | 
			
		||||
 | 
			
		||||
      for(int v=0;v<nvec;v++){
 | 
			
		||||
 | 
			
		||||
	// rsite, rcoor --> smaller local volume
 | 
			
		||||
	// lsite, lcoor --> bigger original (single node?) volume
 | 
			
		||||
	// For loop over each site within smaller subvol
 | 
			
		||||
	for(int rsite=0;rsite<rsites;rsite++){
 | 
			
		||||
 | 
			
		||||
	  Lexicographic::CoorFromIndex(rcoor, rsite, rdims);	  
 | 
			
		||||
	  int lsite;
 | 
			
		||||
 | 
			
		||||
	  for(int r=0;r<ratio[d];r++){ 
 | 
			
		||||
 | 
			
		||||
	    lcoor = rcoor; lcoor[d] += r*rdims[d];
 | 
			
		||||
	    Lexicographic::IndexFromCoor(lcoor, lsite, ldims); lsite += v * lsites;
 | 
			
		||||
 | 
			
		||||
	    int rmul=nvec*rsites;
 | 
			
		||||
	    int vmul=     rsites;
 | 
			
		||||
	    tmpdata[rsite+r*rmul+v*vmul]=alldata[lsite];
 | 
			
		||||
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      nvec   *= ratio[d];
 | 
			
		||||
      ldims[d]=rdims[d];
 | 
			
		||||
      lsites  =rsites;
 | 
			
		||||
 | 
			
		||||
      full_grid ->AllToAll(d,tmpdata,alldata);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  lsites = full_grid->lSites();
 | 
			
		||||
  for(int v=0;v<nvector;v++){
 | 
			
		||||
    parallel_for(int site=0;site<lsites;site++){
 | 
			
		||||
      scalardata[site] = alldata[v*lsites+site];
 | 
			
		||||
    }
 | 
			
		||||
    assert(v<full.size());
 | 
			
		||||
 | 
			
		||||
    vectorizeFromLexOrdArray(scalardata,full[v]);    
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -77,7 +77,6 @@ void CayleyFermion5D<Impl>::DminusDag(const FermionField &psi, FermionField &chi
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<class Impl> void CayleyFermion5D<Impl>::CayleyReport(void)
 | 
			
		||||
{
 | 
			
		||||
  this->Report();
 | 
			
		||||
@@ -119,7 +118,6 @@ template<class Impl> void CayleyFermion5D<Impl>::CayleyZeroCounters(void)
 | 
			
		||||
  MooeeInvTime=0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<class Impl>  
 | 
			
		||||
void CayleyFermion5D<Impl>::M5D   (const FermionField &psi, FermionField &chi)
 | 
			
		||||
{
 | 
			
		||||
@@ -417,6 +415,8 @@ void CayleyFermion5D<Impl>::SetCoefficientsInternal(RealD zolo_hi,std::vector<Co
 | 
			
		||||
    assert(omega[i]!=Coeff_t(0.0));
 | 
			
		||||
    bs[i] = 0.5*(bpc/omega[i] + bmc);
 | 
			
		||||
    cs[i] = 0.5*(bpc/omega[i] - bmc);
 | 
			
		||||
    std::cout<<GridLogMessage << "CayleyFermion5D "<<i<<" bs="<<bs[i]<<" cs="<<cs[i]<< std::endl;
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
 
 | 
			
		||||
@@ -61,10 +61,10 @@ namespace QCD {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /***************************************************************
 | 
			
		||||
    /* Additional EOFA operators only called outside the inverter.
 | 
			
		||||
    /* Since speed is not essential, simple axpby-style
 | 
			
		||||
    /* implementations should be fine.
 | 
			
		||||
    /***************************************************************/
 | 
			
		||||
     * Additional EOFA operators only called outside the inverter.
 | 
			
		||||
     * Since speed is not essential, simple axpby-style
 | 
			
		||||
     * implementations should be fine.
 | 
			
		||||
     ***************************************************************/
 | 
			
		||||
    template<class Impl>
 | 
			
		||||
    void DomainWallEOFAFermion<Impl>::Omega(const FermionField& psi, FermionField& Din, int sign, int dag)
 | 
			
		||||
    {
 | 
			
		||||
@@ -116,8 +116,8 @@ namespace QCD {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /********************************************************************
 | 
			
		||||
    /* Performance critical fermion operators called inside the inverter
 | 
			
		||||
    /********************************************************************/
 | 
			
		||||
     * Performance critical fermion operators called inside the inverter
 | 
			
		||||
     ********************************************************************/
 | 
			
		||||
 | 
			
		||||
    template<class Impl>
 | 
			
		||||
    void DomainWallEOFAFermion<Impl>::M5D(const FermionField& psi, FermionField& chi)
 | 
			
		||||
 
 | 
			
		||||
@@ -61,8 +61,8 @@ Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
 | 
			
		||||
#include <Grid/qcd/action/fermion/MobiusFermion.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/MobiusEOFAFermion.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/ZMobiusFermion.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/SchurDiagTwoKappa.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/ScaledShamirFermion.h>
 | 
			
		||||
//#include <Grid/qcd/action/fermion/SchurDiagTwoKappa.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/MobiusZolotarevFermion.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/ShamirZolotarevFermion.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h>
 | 
			
		||||
 
 | 
			
		||||
@@ -77,11 +77,11 @@ namespace QCD {
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /***************************************************************
 | 
			
		||||
    /* Additional EOFA operators only called outside the inverter.
 | 
			
		||||
    /* Since speed is not essential, simple axpby-style
 | 
			
		||||
    /* implementations should be fine.
 | 
			
		||||
    /***************************************************************/
 | 
			
		||||
    /****************************************************************
 | 
			
		||||
     * Additional EOFA operators only called outside the inverter.  
 | 
			
		||||
     * Since speed is not essential, simple axpby-style
 | 
			
		||||
     * implementations should be fine.
 | 
			
		||||
     ***************************************************************/
 | 
			
		||||
    template<class Impl>
 | 
			
		||||
    void MobiusEOFAFermion<Impl>::Omega(const FermionField& psi, FermionField& Din, int sign, int dag)
 | 
			
		||||
    {
 | 
			
		||||
@@ -194,8 +194,8 @@ namespace QCD {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /********************************************************************
 | 
			
		||||
    /* Performance critical fermion operators called inside the inverter
 | 
			
		||||
    /********************************************************************/
 | 
			
		||||
     * Performance critical fermion operators called inside the inverter
 | 
			
		||||
     ********************************************************************/
 | 
			
		||||
 | 
			
		||||
    template<class Impl>
 | 
			
		||||
    void MobiusEOFAFermion<Impl>::M5D(const FermionField& psi, FermionField& chi)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,4 @@
 | 
			
		||||
#if 1
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
@@ -97,6 +98,117 @@ namespace Grid {
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
#if 0
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Copied from DiagTwoSolve
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class Field> class SchurRedBlackDiagTwoSolve {
 | 
			
		||||
  private:
 | 
			
		||||
    OperatorFunction<Field> & _HermitianRBSolver;
 | 
			
		||||
    int CBfactorise;
 | 
			
		||||
  public:
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
    // Wrap the usual normal equations Schur trick
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
  SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver)  :
 | 
			
		||||
     _HermitianRBSolver(HermitianRBSolver) 
 | 
			
		||||
    { 
 | 
			
		||||
      CBfactorise=0;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template<class Matrix>
 | 
			
		||||
      void operator() (Matrix & _Matrix,const Field &in, Field &out){
 | 
			
		||||
 | 
			
		||||
      // FIXME CGdiagonalMee not implemented virtual function
 | 
			
		||||
      // FIXME use CBfactorise to control schur decomp
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
 
 | 
			
		||||
      Field src_e(grid);
 | 
			
		||||
      Field src_o(grid);
 | 
			
		||||
      Field sol_e(grid);
 | 
			
		||||
      Field sol_o(grid);
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field  Mtmp(grid);
 | 
			
		||||
      Field resid(fgrid);
 | 
			
		||||
 | 
			
		||||
      pickCheckerboard(Even,src_e,in);
 | 
			
		||||
      pickCheckerboard(Odd ,src_o,in);
 | 
			
		||||
      pickCheckerboard(Even,sol_e,out);
 | 
			
		||||
      pickCheckerboard(Odd ,sol_o,out);
 | 
			
		||||
    
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      // src_o = Mdag * (source_o - Moe MeeInv source_e)
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Even);
 | 
			
		||||
      _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Odd);     
 | 
			
		||||
      tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Odd);     
 | 
			
		||||
 | 
			
		||||
      // get the right MpcDag
 | 
			
		||||
      _HermOpEO.MpcDag(tmp,src_o);     assert(src_o.checkerboard ==Odd);       
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      // Call the red-black solver
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
 | 
			
		||||
//      _HermitianRBSolver(_HermOpEO,src_o,sol_o);  assert(sol_o.checkerboard==Odd);
 | 
			
		||||
      _HermitianRBSolver(_HermOpEO,src_o,tmp);  assert(tmp.checkerboard==Odd);
 | 
			
		||||
      _Matrix.MooeeInv(tmp,sol_o);        assert(  sol_o.checkerboard   ==Odd);
 | 
			
		||||
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      // sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.Meooe(sol_o,tmp);        assert(  tmp.checkerboard   ==Even);
 | 
			
		||||
      src_e = src_e-tmp;               assert(  src_e.checkerboard ==Even);
 | 
			
		||||
      _Matrix.MooeeInv(src_e,sol_e);   assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
     
 | 
			
		||||
      setCheckerboard(out,sol_e); assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
      setCheckerboard(out,sol_o); assert(  sol_o.checkerboard ==Odd );
 | 
			
		||||
 | 
			
		||||
      // Verify the unprec residual
 | 
			
		||||
      _Matrix.M(out,resid); 
 | 
			
		||||
      resid = resid-in;
 | 
			
		||||
      RealD ns = norm2(in);
 | 
			
		||||
      RealD nr = norm2(resid);
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlackDiagTwoKappa solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
 | 
			
		||||
    }     
 | 
			
		||||
  };
 | 
			
		||||
#endif
 | 
			
		||||
namespace QCD{
 | 
			
		||||
    //
 | 
			
		||||
    // Determinant is det of middle factor
 | 
			
		||||
    // This assumes Mee is indept of U.
 | 
			
		||||
    //
 | 
			
		||||
    //
 | 
			
		||||
    template<class Impl>
 | 
			
		||||
    class SchurDifferentiableDiagTwo:  public SchurDiagTwoOperator<FermionOperator<Impl>,typename Impl::FermionField> 
 | 
			
		||||
      {
 | 
			
		||||
      public:
 | 
			
		||||
      INHERIT_IMPL_TYPES(Impl);
 | 
			
		||||
 | 
			
		||||
 	typedef FermionOperator<Impl> Matrix;
 | 
			
		||||
 | 
			
		||||
	SchurDifferentiableDiagTwo (Matrix &Mat) : SchurDiagTwoOperator<Matrix,FermionField>(Mat) {};
 | 
			
		||||
    };
 | 
			
		||||
#if 0
 | 
			
		||||
    template<class Impl>
 | 
			
		||||
    class SchurDifferentiableDiagTwoKappa :  public SchurDiagTwoKappaOperator<FermionOperator<Impl>,typename Impl::FermionField> 
 | 
			
		||||
      {
 | 
			
		||||
      public:
 | 
			
		||||
      INHERIT_IMPL_TYPES(Impl);
 | 
			
		||||
 | 
			
		||||
 	typedef FermionOperator<Impl> Matrix;
 | 
			
		||||
 | 
			
		||||
	SchurDifferentiableDiagTwoKappa (Matrix &Mat) : SchurDiagTwoKappaOperator<Matrix,FermionField>(Mat) {};
 | 
			
		||||
    };
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -140,6 +140,7 @@ namespace Grid{
 | 
			
		||||
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -231,7 +231,7 @@ class ForceGradient : public Integrator<FieldImplementation, SmearingPolicy,
 | 
			
		||||
    Field Pfg(U._grid);
 | 
			
		||||
    Ufg = U;
 | 
			
		||||
    Pfg = zero;
 | 
			
		||||
    std::cout << GridLogMessage << "FG update " << fg_dt << " " << ep
 | 
			
		||||
    std::cout << GridLogIntegrator << "FG update " << fg_dt << " " << ep
 | 
			
		||||
              << std::endl;
 | 
			
		||||
    // prepare_fg; no prediction/result cache for now
 | 
			
		||||
    // could relax CG stopping conditions for the
 | 
			
		||||
 
 | 
			
		||||
@@ -50,6 +50,7 @@ GridCartesian *SpaceTimeGrid::makeFourDimDWFGrid(const std::vector<int> & latt,c
 | 
			
		||||
GridCartesian         *SpaceTimeGrid::makeFiveDimGrid(int Ls,const GridCartesian *FourDimGrid)
 | 
			
		||||
{
 | 
			
		||||
  int N4=FourDimGrid->_ndimension;
 | 
			
		||||
  assert(N4==4);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> latt5(1,Ls);
 | 
			
		||||
  std::vector<int> simd5(1,1);
 | 
			
		||||
 
 | 
			
		||||
@@ -243,6 +243,12 @@ void Grid_init(int *argc,char ***argv)
 | 
			
		||||
    fname<<CartesianCommunicator::RankWorld();
 | 
			
		||||
    fp=freopen(fname.str().c_str(),"w",stdout);
 | 
			
		||||
    assert(fp!=(FILE *)NULL);
 | 
			
		||||
 | 
			
		||||
    std::ostringstream ename;
 | 
			
		||||
    ename<<"Grid.stderr.";
 | 
			
		||||
    ename<<CartesianCommunicator::RankWorld();
 | 
			
		||||
    fp=freopen(ename.str().c_str(),"w",stderr);
 | 
			
		||||
    assert(fp!=(FILE *)NULL);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////
 | 
			
		||||
 
 | 
			
		||||
@@ -18,6 +18,10 @@ namespace Grid{
 | 
			
		||||
 | 
			
		||||
    static inline void IndexFromCoor (const std::vector<int>& coor,int &index,const std::vector<int> &dims){
 | 
			
		||||
      int nd=dims.size();
 | 
			
		||||
      if(nd > coor.size())  {
 | 
			
		||||
	std::cout<< "coor.size "<<coor.size()<<" >dims.size "<<dims.size()<<std::endl; 
 | 
			
		||||
	assert(0);
 | 
			
		||||
	}
 | 
			
		||||
      int stride=1;
 | 
			
		||||
      index=0;
 | 
			
		||||
      for(int d=0;d<nd;d++){
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,4 @@
 | 
			
		||||
SUBDIRS = . core forces hmc solver debug smearing IO
 | 
			
		||||
SUBDIRS = . core forces hmc solver debug smearing IO lanczos
 | 
			
		||||
 | 
			
		||||
if BUILD_CHROMA_REGRESSION
 | 
			
		||||
  SUBDIRS+= qdpxx
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										1085
									
								
								tests/lanczos/FieldVectorIO.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1085
									
								
								tests/lanczos/FieldVectorIO.h
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										1
									
								
								tests/lanczos/Makefile.am
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								tests/lanczos/Makefile.am
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
include Make.inc
 | 
			
		||||
							
								
								
									
										136
									
								
								tests/lanczos/Params.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										136
									
								
								tests/lanczos/Params.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,136 @@
 | 
			
		||||
/*
 | 
			
		||||
  Params IO
 | 
			
		||||
 | 
			
		||||
  Author: Christoph Lehner
 | 
			
		||||
  Date:   2017
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#define PADD(p,X) p.get(#X,X);
 | 
			
		||||
 | 
			
		||||
class Params {
 | 
			
		||||
 protected:
 | 
			
		||||
 | 
			
		||||
  std::string trim(const std::string& sc) {
 | 
			
		||||
    std::string s = sc;
 | 
			
		||||
    s.erase(s.begin(), std::find_if(s.begin(), s.end(),
 | 
			
		||||
				    std::not1(std::ptr_fun<int, int>(std::isspace))));
 | 
			
		||||
    s.erase(std::find_if(s.rbegin(), s.rend(),
 | 
			
		||||
			 std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
 | 
			
		||||
    return s;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 public:
 | 
			
		||||
 | 
			
		||||
  std::map< std::string, std::string > lines;
 | 
			
		||||
  std::string _fn;
 | 
			
		||||
 | 
			
		||||
 Params(const char* fn) : _fn(fn) {
 | 
			
		||||
    FILE* f = fopen(fn,"rt");
 | 
			
		||||
    assert(f);
 | 
			
		||||
    while (!feof(f)) {
 | 
			
		||||
      char buf[4096];
 | 
			
		||||
      if (fgets(buf,sizeof(buf),f)) {
 | 
			
		||||
	if (buf[0] != '#' && buf[0] != '\r' && buf[0] != '\n') {
 | 
			
		||||
	  char* sep = strchr(buf,'=');
 | 
			
		||||
	  assert(sep);
 | 
			
		||||
	  *sep = '\0';
 | 
			
		||||
	  lines[trim(buf)] = trim(sep+1);
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }      
 | 
			
		||||
    fclose(f);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ~Params() {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::string loghead() {
 | 
			
		||||
    return _fn + ": ";
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  bool has(const char* name) {
 | 
			
		||||
    auto f = lines.find(name);
 | 
			
		||||
    return (f != lines.end());
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const std::string& get(const char* name) {
 | 
			
		||||
    auto f = lines.find(name);
 | 
			
		||||
    if (f == lines.end()) {
 | 
			
		||||
      std::cout << Grid::GridLogMessage << loghead() << "Could not find value for " << name << std::endl;
 | 
			
		||||
      abort();
 | 
			
		||||
    }
 | 
			
		||||
    return f->second;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void parse(std::string& s, const std::string& cval) {
 | 
			
		||||
    std::stringstream trimmer;
 | 
			
		||||
    trimmer << cval;
 | 
			
		||||
    s.clear();
 | 
			
		||||
    trimmer >> s;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void parse(int& i, const std::string& cval) {
 | 
			
		||||
    assert(sscanf(cval.c_str(),"%d",&i)==1);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void parse(long long& i, const std::string& cval) {
 | 
			
		||||
    assert(sscanf(cval.c_str(),"%lld",&i)==1);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void parse(double& f, const std::string& cval) {
 | 
			
		||||
    assert(sscanf(cval.c_str(),"%lf",&f)==1);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void parse(float& f, const std::string& cval) {
 | 
			
		||||
    assert(sscanf(cval.c_str(),"%f",&f)==1);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void parse(bool& b, const std::string& cval) {
 | 
			
		||||
    std::string lcval = cval;
 | 
			
		||||
    std::transform(lcval.begin(), lcval.end(), lcval.begin(), ::tolower);
 | 
			
		||||
    if (lcval == "true" || lcval == "yes") {
 | 
			
		||||
      b = true;
 | 
			
		||||
    } else if (lcval == "false" || lcval == "no") {
 | 
			
		||||
      b = false;
 | 
			
		||||
    } else {
 | 
			
		||||
      std::cout << "Invalid value for boolean: " << b << std::endl;
 | 
			
		||||
      assert(0);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void parse(std::complex<double>& f, const std::string& cval) {
 | 
			
		||||
    double r,i;
 | 
			
		||||
    assert(sscanf(cval.c_str(),"%lf %lf",&r,&i)==2);
 | 
			
		||||
    f = std::complex<double>(r,i);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void parse(std::complex<float>& f, const std::string& cval) {
 | 
			
		||||
    float r,i;
 | 
			
		||||
    assert(sscanf(cval.c_str(),"%f %f",&r,&i)==2);
 | 
			
		||||
    f = std::complex<float>(r,i);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class T>
 | 
			
		||||
    void get(const char* name, std::vector<T>& v) {
 | 
			
		||||
    int i = 0;
 | 
			
		||||
    v.resize(0);
 | 
			
		||||
    while (true) {
 | 
			
		||||
      char buf[4096];
 | 
			
		||||
      sprintf(buf,"%s[%d]",name,i++);
 | 
			
		||||
      if (!has(buf))
 | 
			
		||||
	break;
 | 
			
		||||
      T val;
 | 
			
		||||
      parse(val,get(buf));
 | 
			
		||||
      std::cout << Grid::GridLogMessage << loghead() << "Set " << buf << " to " << val << std::endl;
 | 
			
		||||
      v.push_back(val);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class T>
 | 
			
		||||
    void get(const char* name, T& f) {
 | 
			
		||||
    parse(f,get(name));
 | 
			
		||||
    std::cout << Grid::GridLogMessage << loghead() << "Set " << name << " to " << f << std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
};
 | 
			
		||||
							
								
								
									
										727
									
								
								tests/lanczos/Test_dwf_compressed_lanczos.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										727
									
								
								tests/lanczos/Test_dwf_compressed_lanczos.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,727 @@
 | 
			
		||||
/*
 | 
			
		||||
  Authors: Christoph Lehner
 | 
			
		||||
  Date: 2017
 | 
			
		||||
 | 
			
		||||
  Multigrid Lanczos
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  TODO:
 | 
			
		||||
 | 
			
		||||
  High priority:
 | 
			
		||||
  - Explore filtering of starting vector again, should really work:  If cheby has 4 for low mode region and 1 for high mode, applying 15 iterations has 1e9 suppression
 | 
			
		||||
    of high modes, which should create the desired invariant subspace already?  Missing something here???  Maybe dynamic range dangerous, i.e., could also kill interesting
 | 
			
		||||
    eigenrange if not careful.
 | 
			
		||||
 | 
			
		||||
    Better: Use all Cheby up to order N in order to approximate a step function; try this!  Problem: width of step function.  Can kill eigenspace > 1e-3 and have < 1e-5 equal
 | 
			
		||||
            to 1
 | 
			
		||||
 | 
			
		||||
  Low priority:
 | 
			
		||||
  - Given that I seem to need many restarts and high degree poly to create the base and this takes about 1 day, seriously consider a simple method to create a basis
 | 
			
		||||
    (ortho krylov low poly); and then fix up lowest say 200 eigenvalues by 1 run with high-degree poly (600 could be enough)
 | 
			
		||||
*/
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h>
 | 
			
		||||
#include "FieldVectorIO.h"
 | 
			
		||||
#include "Params.h"
 | 
			
		||||
 | 
			
		||||
using namespace std;
 | 
			
		||||
using namespace Grid;
 | 
			
		||||
using namespace Grid::QCD;
 | 
			
		||||
 | 
			
		||||
bool read_evals(GridBase* _grid, char* fn, std::vector<RealD>& evals) {
 | 
			
		||||
 | 
			
		||||
  FILE* f = 0;
 | 
			
		||||
  uint32_t status = 0;
 | 
			
		||||
  if (_grid->IsBoss()) {
 | 
			
		||||
    f = fopen(fn,"rt");
 | 
			
		||||
    status = f ? 1 : 0;
 | 
			
		||||
  }
 | 
			
		||||
  _grid->GlobalSum(status);
 | 
			
		||||
 | 
			
		||||
  if (!status)
 | 
			
		||||
    return false;
 | 
			
		||||
 | 
			
		||||
  uint32_t N;
 | 
			
		||||
  if (f)
 | 
			
		||||
    assert(fscanf(f,"%d\n",&N)==1);
 | 
			
		||||
  else
 | 
			
		||||
    N = 0;
 | 
			
		||||
  _grid->GlobalSum(N);
 | 
			
		||||
 | 
			
		||||
  std::cout << "Reading " << N << " eigenvalues" << std::endl;
 | 
			
		||||
 | 
			
		||||
  evals.resize(N);
 | 
			
		||||
 | 
			
		||||
  for (int i=0;i<N;i++) {
 | 
			
		||||
    if (f)
 | 
			
		||||
      assert(fscanf(f,"%lf",&evals[i])==1);
 | 
			
		||||
    else
 | 
			
		||||
      evals[i] = 0;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  _grid->GlobalSumVector(&evals[0],evals.size());
 | 
			
		||||
 | 
			
		||||
  if (f)
 | 
			
		||||
    fclose(f);
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void write_evals(char* fn, std::vector<RealD>& evals) {
 | 
			
		||||
  FILE* f = fopen(fn,"wt");
 | 
			
		||||
  assert(f);
 | 
			
		||||
 | 
			
		||||
  int N = (int)evals.size();
 | 
			
		||||
  fprintf(f,"%d\n",N);
 | 
			
		||||
 | 
			
		||||
  for (int i=0;i<N;i++) {
 | 
			
		||||
    fprintf(f,"%.15E\n",evals[i]);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  fclose(f);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void write_history(char* fn, std::vector<RealD>& hist) {
 | 
			
		||||
  FILE* f = fopen(fn,"wt");
 | 
			
		||||
  assert(f);
 | 
			
		||||
 | 
			
		||||
  int N = (int)hist.size();
 | 
			
		||||
  for (int i=0;i<N;i++) {
 | 
			
		||||
    fprintf(f,"%d %.15E\n",i,hist[i]);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  fclose(f);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<typename Field>
 | 
			
		||||
class FunctionHermOp : public LinearFunction<Field> {
 | 
			
		||||
public:
 | 
			
		||||
  OperatorFunction<Field>   & _poly;
 | 
			
		||||
  LinearOperatorBase<Field> &_Linop;
 | 
			
		||||
 | 
			
		||||
  FunctionHermOp(OperatorFunction<Field> & poly,LinearOperatorBase<Field>& linop) : _poly(poly), _Linop(linop) {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void operator()(const Field& in, Field& out) {
 | 
			
		||||
    _poly(_Linop,in,out);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<typename Field>
 | 
			
		||||
class CheckpointedLinearFunction : public LinearFunction<Field> {
 | 
			
		||||
public:
 | 
			
		||||
  LinearFunction<Field>& _op;
 | 
			
		||||
  std::string _dir;
 | 
			
		||||
  int _max_apply;
 | 
			
		||||
  int _apply, _apply_actual;
 | 
			
		||||
  GridBase* _grid;
 | 
			
		||||
  FILE* _f;
 | 
			
		||||
 | 
			
		||||
  CheckpointedLinearFunction(GridBase* grid, LinearFunction<Field>& op, const char* dir,int max_apply) : _op(op), _dir(dir), _grid(grid), _f(0),
 | 
			
		||||
													 _max_apply(max_apply), _apply(0), _apply_actual(0) {
 | 
			
		||||
 | 
			
		||||
    FieldVectorIO::conditionalMkDir(dir);
 | 
			
		||||
 | 
			
		||||
    char fn[4096];
 | 
			
		||||
    sprintf(fn,"%s/ckpt_op.%4.4d",_dir.c_str(),_grid->ThisRank());
 | 
			
		||||
    printf("CheckpointLinearFunction:: file %s\n",fn);
 | 
			
		||||
    _f = fopen(fn,"r+b");
 | 
			
		||||
    if (!_f)
 | 
			
		||||
      _f = fopen(fn,"w+b");
 | 
			
		||||
    assert(_f);
 | 
			
		||||
    fseek(_f,0,SEEK_CUR);
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ~CheckpointedLinearFunction() {
 | 
			
		||||
    if (_f) {
 | 
			
		||||
      fclose(_f);
 | 
			
		||||
      _f = 0;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  bool load_ckpt(const Field& in, Field& out) {
 | 
			
		||||
 | 
			
		||||
    off_t cur = ftello(_f);
 | 
			
		||||
    fseeko(_f,0,SEEK_END);
 | 
			
		||||
    if (cur == ftello(_f))
 | 
			
		||||
      return false;
 | 
			
		||||
    fseeko(_f,cur,SEEK_SET);
 | 
			
		||||
 | 
			
		||||
    size_t sz = sizeof(out._odata[0]) * out._odata.size();
 | 
			
		||||
 | 
			
		||||
    GridStopWatch gsw;
 | 
			
		||||
    gsw.Start();
 | 
			
		||||
    uint32_t crc_exp;
 | 
			
		||||
    assert(fread(&crc_exp,4,1,_f)==1);
 | 
			
		||||
    assert(fread(&out._odata[0],sz,1,_f)==1);
 | 
			
		||||
    assert(FieldVectorIO::crc32_threaded((unsigned char*)&out._odata[0],sz,0x0)==crc_exp);
 | 
			
		||||
    gsw.Stop();
 | 
			
		||||
 | 
			
		||||
    printf("CheckpointLinearFunction:: reading %lld\n",(long long)sz);
 | 
			
		||||
    std::cout << GridLogMessage << "Loading " << ((RealD)sz/1024./1024./1024.) << " GB in " << gsw.Elapsed() << std::endl;
 | 
			
		||||
    return true;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void save_ckpt(const Field& in, Field& out) {
 | 
			
		||||
 | 
			
		||||
    fseek(_f,0,SEEK_CUR); // switch to write
 | 
			
		||||
 | 
			
		||||
    size_t sz = sizeof(out._odata[0]) * out._odata.size();
 | 
			
		||||
 | 
			
		||||
    GridStopWatch gsw;
 | 
			
		||||
    gsw.Start();
 | 
			
		||||
    uint32_t crc = FieldVectorIO::crc32_threaded((unsigned char*)&out._odata[0],sz,0x0);
 | 
			
		||||
    assert(fwrite(&crc,4,1,_f)==1);
 | 
			
		||||
    assert(fwrite(&out._odata[0],sz,1,_f)==1);
 | 
			
		||||
    fflush(_f); // try this on the GPFS to suppress OPA usage for disk during dslash; this is not needed at Lustre/JLAB
 | 
			
		||||
    gsw.Stop();
 | 
			
		||||
 | 
			
		||||
    printf("CheckpointLinearFunction:: writing %lld\n",(long long)sz);
 | 
			
		||||
    std::cout << GridLogMessage << "Saving " << ((RealD)sz/1024./1024./1024.) << " GB in " << gsw.Elapsed() << std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void operator()(const Field& in, Field& out) {
 | 
			
		||||
 | 
			
		||||
    _apply++;
 | 
			
		||||
 | 
			
		||||
    if (load_ckpt(in,out))
 | 
			
		||||
      return;
 | 
			
		||||
 | 
			
		||||
    _op(in,out);
 | 
			
		||||
    
 | 
			
		||||
    save_ckpt(in,out);
 | 
			
		||||
 | 
			
		||||
    if (_apply_actual++ >= _max_apply) {
 | 
			
		||||
      std::cout << GridLogMessage << "Maximum application of operator reached, checkpoint and finish in future job" << std::endl;
 | 
			
		||||
      if (_f) { fclose(_f); _f=0; }
 | 
			
		||||
      in._grid->Barrier();
 | 
			
		||||
      Grid_finalize();
 | 
			
		||||
      exit(3);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<typename CoarseField,typename Field>
 | 
			
		||||
class ProjectedFunctionHermOp : public LinearFunction<CoarseField> {
 | 
			
		||||
public:
 | 
			
		||||
  OperatorFunction<Field>   & _poly;
 | 
			
		||||
  LinearOperatorBase<Field> &_Linop;
 | 
			
		||||
  BlockProjector<Field>& _pr;
 | 
			
		||||
 | 
			
		||||
  ProjectedFunctionHermOp(BlockProjector<Field>& pr,OperatorFunction<Field> & poly,LinearOperatorBase<Field>& linop) : _poly(poly), _Linop(linop), _pr(pr) {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void operator()(const CoarseField& in, CoarseField& out) {
 | 
			
		||||
    assert(_pr._bgrid._o_blocks == in._grid->oSites());
 | 
			
		||||
 | 
			
		||||
    Field fin(_pr._bgrid._grid);
 | 
			
		||||
    Field fout(_pr._bgrid._grid);
 | 
			
		||||
 | 
			
		||||
    GridStopWatch gsw1,gsw2,gsw3;
 | 
			
		||||
    // fill fin
 | 
			
		||||
    gsw1.Start();
 | 
			
		||||
    _pr.coarseToFine(in,fin);
 | 
			
		||||
    gsw1.Stop();
 | 
			
		||||
 | 
			
		||||
    // apply poly
 | 
			
		||||
    gsw2.Start();
 | 
			
		||||
    _poly(_Linop,fin,fout);
 | 
			
		||||
    gsw2.Stop();
 | 
			
		||||
 | 
			
		||||
    // fill out
 | 
			
		||||
    gsw3.Start();
 | 
			
		||||
    _pr.fineToCoarse(fout,out);
 | 
			
		||||
    gsw3.Stop();
 | 
			
		||||
 | 
			
		||||
    auto eps = innerProduct(in,out);
 | 
			
		||||
    std::cout << GridLogMessage << "Operator timing details: c2f = " << gsw1.Elapsed() << " poly = " << gsw2.Elapsed() << " f2c = " << gsw3.Elapsed() << 
 | 
			
		||||
      "   Complimentary Hermiticity check: " << eps.imag() / std::abs(eps) << std::endl;
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<typename CoarseField,typename Field>
 | 
			
		||||
class ProjectedHermOp : public LinearFunction<CoarseField> {
 | 
			
		||||
public:
 | 
			
		||||
  LinearOperatorBase<Field> &_Linop;
 | 
			
		||||
  BlockProjector<Field>& _pr;
 | 
			
		||||
 | 
			
		||||
  ProjectedHermOp(BlockProjector<Field>& pr,LinearOperatorBase<Field>& linop) : _Linop(linop), _pr(pr) {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void operator()(const CoarseField& in, CoarseField& out) {
 | 
			
		||||
    assert(_pr._bgrid._o_blocks == in._grid->oSites());
 | 
			
		||||
    Field fin(_pr._bgrid._grid);
 | 
			
		||||
    Field fout(_pr._bgrid._grid);
 | 
			
		||||
    _pr.coarseToFine(in,fin);
 | 
			
		||||
    _Linop.HermOp(fin,fout);
 | 
			
		||||
    _pr.fineToCoarse(fout,out);
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<typename Field>
 | 
			
		||||
class PlainHermOp : public LinearFunction<Field> {
 | 
			
		||||
public:
 | 
			
		||||
  LinearOperatorBase<Field> &_Linop;
 | 
			
		||||
 | 
			
		||||
  PlainHermOp(LinearOperatorBase<Field>& linop) : _Linop(linop) {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void operator()(const Field& in, Field& out) {
 | 
			
		||||
    _Linop.HermOp(in,out);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<typename vtype, int N > using CoarseSiteFieldGeneral = iScalar< iVector<vtype, N> >;
 | 
			
		||||
template<int N> using CoarseSiteFieldD = CoarseSiteFieldGeneral< vComplexD, N >;
 | 
			
		||||
template<int N> using CoarseSiteFieldF = CoarseSiteFieldGeneral< vComplexF, N >;
 | 
			
		||||
template<int N> using CoarseSiteField  = CoarseSiteFieldGeneral< vComplex,  N >;
 | 
			
		||||
template<int N> using CoarseLatticeFermion  = Lattice< CoarseSiteField<N> >;
 | 
			
		||||
template<int N> using CoarseLatticeFermionD = Lattice< CoarseSiteFieldD<N> >;
 | 
			
		||||
 | 
			
		||||
template<typename Field,int Nstop1>
 | 
			
		||||
void CoarseGridLanczos(BlockProjector<Field>& pr,RealD alpha2,RealD beta,int Npoly2,
 | 
			
		||||
		       int Nstop2,int Nk2,int Nm2,RealD resid2,RealD betastp2,int MaxIt,int MinRes2,
 | 
			
		||||
		       LinearOperatorBase<Field>& HermOp, std::vector<RealD>& eval1, bool cg_test_enabled, 
 | 
			
		||||
		       int cg_test_maxiter,int nsingle,int SkipTest2, int MaxApply2,bool smoothed_eval_enabled,
 | 
			
		||||
		       int smoothed_eval_inner,int smoothed_eval_outer,int smoothed_eval_begin,
 | 
			
		||||
		       int smoothed_eval_end,RealD smoothed_eval_inner_resid) {
 | 
			
		||||
 | 
			
		||||
  BlockedGrid<Field>& bgrid = pr._bgrid;
 | 
			
		||||
  BasisFieldVector<Field>& basis = pr._evec;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  std::vector<int> coarseFourDimLatt;
 | 
			
		||||
  for (int i=0;i<4;i++)
 | 
			
		||||
    coarseFourDimLatt.push_back(bgrid._nb[1+i] * bgrid._grid->_processors[1+i]);
 | 
			
		||||
  assert(bgrid._grid->_processors[0] == 1);
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage << "CoarseGrid = " << coarseFourDimLatt << " with basis = " << Nstop1 << std::endl;
 | 
			
		||||
  GridCartesian         * UCoarseGrid   = SpaceTimeGrid::makeFourDimGrid(coarseFourDimLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
 | 
			
		||||
  GridCartesian         * FCoarseGrid   = SpaceTimeGrid::makeFiveDimGrid(bgrid._nb[0],UCoarseGrid);
 | 
			
		||||
 | 
			
		||||
  Chebyshev<Field> Cheb2(alpha2,beta,Npoly2);
 | 
			
		||||
  CoarseLatticeFermion<Nstop1> src_coarse(FCoarseGrid);
 | 
			
		||||
 | 
			
		||||
  // Second round of Lanczos in blocked space
 | 
			
		||||
  std::vector<RealD>         eval2(Nm2);
 | 
			
		||||
  std::vector<RealD>         eval3(Nm2);
 | 
			
		||||
  BasisFieldVector<CoarseLatticeFermion<Nstop1> > coef(Nm2,FCoarseGrid);
 | 
			
		||||
 | 
			
		||||
  ProjectedFunctionHermOp<CoarseLatticeFermion<Nstop1>,LatticeFermion> Op2plain(pr,Cheb2,HermOp);
 | 
			
		||||
  CheckpointedLinearFunction<CoarseLatticeFermion<Nstop1> > Op2ckpt(src_coarse._grid,Op2plain,"checkpoint",MaxApply2);
 | 
			
		||||
  LinearFunction< CoarseLatticeFermion<Nstop1> >* Op2;
 | 
			
		||||
  if (MaxApply2) {
 | 
			
		||||
    Op2 = &Op2ckpt;
 | 
			
		||||
  } else {
 | 
			
		||||
    Op2 = &Op2plain;
 | 
			
		||||
  }
 | 
			
		||||
  ProjectedHermOp<CoarseLatticeFermion<Nstop1>,LatticeFermion> Op2nopoly(pr,HermOp);
 | 
			
		||||
  BlockImplicitlyRestartedLanczos<CoarseLatticeFermion<Nstop1> > IRL2(*Op2,*Op2,Nstop2,Nk2,Nm2,resid2,betastp2,MaxIt,MinRes2);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  src_coarse = 1.0;
 | 
			
		||||
  
 | 
			
		||||
  // Precision test
 | 
			
		||||
  {
 | 
			
		||||
    Field tmp(bgrid._grid);
 | 
			
		||||
    CoarseLatticeFermion<Nstop1> tmp2(FCoarseGrid);
 | 
			
		||||
    CoarseLatticeFermion<Nstop1> tmp3(FCoarseGrid);
 | 
			
		||||
    tmp2 = 1.0;
 | 
			
		||||
    tmp3 = 1.0;
 | 
			
		||||
 | 
			
		||||
    pr.coarseToFine(tmp2,tmp);
 | 
			
		||||
    pr.fineToCoarse(tmp,tmp2);
 | 
			
		||||
 | 
			
		||||
    tmp2 -= tmp3;
 | 
			
		||||
    std::cout << GridLogMessage << "Precision Test c->f->c: " << norm2(tmp2) / norm2(tmp3) << std::endl;
 | 
			
		||||
 | 
			
		||||
    //bgrid._grid->Barrier();
 | 
			
		||||
    //return;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int Nconv;
 | 
			
		||||
  if (!FieldVectorIO::read_compressed_vectors("lanczos.output",pr,coef) ||
 | 
			
		||||
      !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt",eval3) ||
 | 
			
		||||
      !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.linear",eval1) ||
 | 
			
		||||
      !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.poly",eval2)
 | 
			
		||||
      ) {
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    IRL2.calc(eval2,coef,src_coarse,Nconv,true,SkipTest2);
 | 
			
		||||
 | 
			
		||||
    coef.resize(Nstop2);
 | 
			
		||||
    eval2.resize(Nstop2);
 | 
			
		||||
    eval3.resize(Nstop2);
 | 
			
		||||
 | 
			
		||||
    std::vector<Field> step3_cache;
 | 
			
		||||
 | 
			
		||||
    // reconstruct eigenvalues of original operator
 | 
			
		||||
    for (int i=0;i<Nstop2;i++){
 | 
			
		||||
      RealD eval2_linear;
 | 
			
		||||
 | 
			
		||||
      if (i<Nstop1) {
 | 
			
		||||
	eval2_linear = eval1[i];
 | 
			
		||||
      } else {
 | 
			
		||||
	eval2_linear = eval2[i-1];
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      RealD eval2_poly = eval2[i];
 | 
			
		||||
      RealD eval_reconstruct = Cheb2.approxInv(eval2_poly,eval2_linear,100,1e-10);
 | 
			
		||||
      std::cout << i << " Reconstructed eval = " << eval_reconstruct << " from quess " << eval2_linear << std::endl;
 | 
			
		||||
      eval2[i] = eval_reconstruct;
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    // as demonstrated in CG test below, best result from mixed determination
 | 
			
		||||
    for (int i=0;i<Nstop2;i++)
 | 
			
		||||
      eval3[i] = (i < Nstop1) ? eval1[i] : eval2[i];
 | 
			
		||||
    
 | 
			
		||||
    for(int i=0;i<Nstop2;i++){
 | 
			
		||||
      std::cout << i<<" / "<< Nstop2<< " eigenvalue "<< eval3[i] <<std::endl;
 | 
			
		||||
    };
 | 
			
		||||
    
 | 
			
		||||
    // write
 | 
			
		||||
    mkdir("lanczos.output",ACCESSPERMS);
 | 
			
		||||
    FieldVectorIO::write_compressed_vectors("lanczos.output",pr,coef,nsingle);
 | 
			
		||||
    if (bgrid._grid->IsBoss()) {
 | 
			
		||||
      write_evals((char *)"lanczos.output/eigen-values.txt",eval3);
 | 
			
		||||
      write_evals((char *)"lanczos.output/eigen-values.txt.linear",eval1);
 | 
			
		||||
      write_evals((char *)"lanczos.output/eigen-values.txt.poly",eval2);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // fix up eigenvalues
 | 
			
		||||
  if (!read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.smoothed",eval3) && smoothed_eval_enabled) {
 | 
			
		||||
 | 
			
		||||
    ConjugateGradient<LatticeFermion> CG(smoothed_eval_inner_resid, smoothed_eval_inner, false);
 | 
			
		||||
 | 
			
		||||
    LatticeFermion v_i(basis[0]._grid);
 | 
			
		||||
    auto tmp = v_i;
 | 
			
		||||
    auto tmp2 = v_i;
 | 
			
		||||
 | 
			
		||||
    for (int i=smoothed_eval_begin;i<smoothed_eval_end;i++) {
 | 
			
		||||
 | 
			
		||||
      GridStopWatch gsw;
 | 
			
		||||
 | 
			
		||||
      gsw.Start();
 | 
			
		||||
 | 
			
		||||
      pr.coarseToFine(coef[i],v_i);
 | 
			
		||||
      v_i.checkerboard = Odd;
 | 
			
		||||
      
 | 
			
		||||
      for (int j=0;j<smoothed_eval_outer;j++) {
 | 
			
		||||
	tmp=zero;
 | 
			
		||||
	//pr.deflate(coef,eval3,Nstop2,v_i,tmp);
 | 
			
		||||
	CG(HermOp, v_i, tmp);
 | 
			
		||||
 | 
			
		||||
	v_i = 1.0 / ::sqrt( norm2(tmp) ) * tmp;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      tmp = v_i;
 | 
			
		||||
 | 
			
		||||
      HermOp.HermOp(tmp,tmp2);
 | 
			
		||||
 | 
			
		||||
      RealD ev = innerProduct(tmp,tmp2).real();
 | 
			
		||||
 | 
			
		||||
      gsw.Stop();
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage << "Smoothed eigenvalue " << i << " from " << eval3[i] << " to " << ev << " in " << gsw.Elapsed() << std::endl;
 | 
			
		||||
      //	" with effective smoother precision " << (CG.ResHistory.back() / CG.ResHistory.front() ) << std::endl;
 | 
			
		||||
      //      CG.ResHistory.clear();
 | 
			
		||||
 | 
			
		||||
      eval3[i] = ev;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (bgrid._grid->IsBoss()) {
 | 
			
		||||
      write_evals((char *)"lanczos.output/eigen-values.txt.smoothed",eval3);
 | 
			
		||||
      write_evals((char *)"lanczos.output/eigen-values.txt",eval3); // also reset this to the best ones we have available
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // do CG test with and without deflation
 | 
			
		||||
  if (cg_test_enabled) {
 | 
			
		||||
    ConjugateGradient<LatticeFermion> CG(1.0e-8, cg_test_maxiter, false);
 | 
			
		||||
    LatticeFermion src_orig(bgrid._grid);
 | 
			
		||||
    src_orig.checkerboard = Odd;
 | 
			
		||||
    src_orig = 1.0;
 | 
			
		||||
    src_orig = src_orig * (1.0 / ::sqrt(norm2(src_orig)) );
 | 
			
		||||
    auto result = src_orig; 
 | 
			
		||||
 | 
			
		||||
    // undeflated solve
 | 
			
		||||
    result = zero;
 | 
			
		||||
    CG(HermOp, src_orig, result);
 | 
			
		||||
    //    if (UCoarseGrid->IsBoss())
 | 
			
		||||
    //      write_history("cg_test.undefl",CG.ResHistory);
 | 
			
		||||
    //    CG.ResHistory.clear();
 | 
			
		||||
 | 
			
		||||
    // deflated solve with all eigenvectors
 | 
			
		||||
    result = zero;
 | 
			
		||||
    pr.deflate(coef,eval2,Nstop2,src_orig,result);
 | 
			
		||||
    CG(HermOp, src_orig, result);
 | 
			
		||||
    //    if (UCoarseGrid->IsBoss())
 | 
			
		||||
    //      write_history("cg_test.defl_all",CG.ResHistory);
 | 
			
		||||
    //    CG.ResHistory.clear();
 | 
			
		||||
 | 
			
		||||
    // deflated solve with non-blocked eigenvectors
 | 
			
		||||
    result = zero;
 | 
			
		||||
    pr.deflate(coef,eval1,Nstop1,src_orig,result);
 | 
			
		||||
    CG(HermOp, src_orig, result);
 | 
			
		||||
    //    if (UCoarseGrid->IsBoss())
 | 
			
		||||
    //      write_history("cg_test.defl_full",CG.ResHistory);
 | 
			
		||||
    //    CG.ResHistory.clear();
 | 
			
		||||
 | 
			
		||||
    // deflated solve with all eigenvectors and original eigenvalues from proj
 | 
			
		||||
    result = zero;
 | 
			
		||||
    pr.deflate(coef,eval3,Nstop2,src_orig,result);
 | 
			
		||||
    CG(HermOp, src_orig, result);
 | 
			
		||||
    //    if (UCoarseGrid->IsBoss())
 | 
			
		||||
    //      write_history("cg_test.defl_all_ev3",CG.ResHistory);
 | 
			
		||||
    //    CG.ResHistory.clear();
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<typename Field>
 | 
			
		||||
void quick_krylov_basis(BasisFieldVector<Field>& evec,Field& src,LinearFunction<Field>& Op,int Nstop) {
 | 
			
		||||
  Field tmp = src;
 | 
			
		||||
  Field tmp2 = tmp;
 | 
			
		||||
 | 
			
		||||
  for (int i=0;i<Nstop;i++) {
 | 
			
		||||
    GridStopWatch gsw;
 | 
			
		||||
    gsw.Start();
 | 
			
		||||
    Op(tmp,tmp2);
 | 
			
		||||
    gsw.Stop();
 | 
			
		||||
    evec.orthogonalize(tmp2,i);
 | 
			
		||||
 | 
			
		||||
    RealD nn = norm2(tmp2);
 | 
			
		||||
    nn = Grid::sqrt(nn);
 | 
			
		||||
    tmp2 = tmp2 * (1.0/nn);
 | 
			
		||||
 | 
			
		||||
    evec[i] = tmp2;
 | 
			
		||||
    tmp = tmp2;
 | 
			
		||||
    std::cout << GridLogMessage << "Quick_krylov_basis: " << i << "/" << Nstop << " timing of operator=" << gsw.Elapsed() << std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
int main (int argc, char ** argv) {
 | 
			
		||||
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
  const int MaxIt = 10000;
 | 
			
		||||
 | 
			
		||||
  int Ls;
 | 
			
		||||
  RealD mass;
 | 
			
		||||
  RealD M5;
 | 
			
		||||
  std::vector < std::complex<double>  > omega;
 | 
			
		||||
  
 | 
			
		||||
  RealD alpha1, alpha2, beta;
 | 
			
		||||
  int Npoly1, Npoly2;
 | 
			
		||||
  int Nstop1, Nstop2;
 | 
			
		||||
  int Nk1, Nk2;
 | 
			
		||||
  int Np1, Np2;
 | 
			
		||||
  int MinRes1, MinRes2;
 | 
			
		||||
  int SkipTest2, MaxApply2;
 | 
			
		||||
  bool checkpoint_basis;
 | 
			
		||||
  bool cg_test_enabled;
 | 
			
		||||
  bool exit_after_basis_calculation;
 | 
			
		||||
  bool simple_krylov_basis;
 | 
			
		||||
  int cg_test_maxiter;
 | 
			
		||||
  int nsingle; // store in single precision, the rest in FP16
 | 
			
		||||
  int max_cheb_time_ms;
 | 
			
		||||
  bool smoothed_eval_enabled;
 | 
			
		||||
  int smoothed_eval_inner;
 | 
			
		||||
  int smoothed_eval_outer;
 | 
			
		||||
  int smoothed_eval_begin;
 | 
			
		||||
  int smoothed_eval_end;
 | 
			
		||||
  RealD smoothed_eval_inner_resid;
 | 
			
		||||
 | 
			
		||||
  // vector representation
 | 
			
		||||
  std::vector<int> block_size; // 5d block size
 | 
			
		||||
 | 
			
		||||
  RealD resid1, resid2, betastp1, betastp2, basis_norm_threshold;
 | 
			
		||||
 | 
			
		||||
  std::string config;
 | 
			
		||||
  
 | 
			
		||||
  Params jp("params.txt");
 | 
			
		||||
  PADD(jp,Npoly1); PADD(jp,Npoly2);
 | 
			
		||||
  PADD(jp,max_cheb_time_ms);
 | 
			
		||||
  PADD(jp,Nstop1); PADD(jp,Nstop2); PADD(jp,MaxApply2);
 | 
			
		||||
  PADD(jp,Nk1); PADD(jp,Nk2); PADD(jp,betastp1); PADD(jp,betastp2);
 | 
			
		||||
  PADD(jp,Np1); PADD(jp,Np2); basis_norm_threshold = 1e-5; //PADD(jp,basis_norm_threshold);
 | 
			
		||||
  PADD(jp,block_size); PADD(jp,smoothed_eval_enabled); PADD(jp,smoothed_eval_inner);
 | 
			
		||||
  PADD(jp,resid1); PADD(jp,resid2); PADD(jp,smoothed_eval_outer);
 | 
			
		||||
  PADD(jp,alpha1); PADD(jp,alpha2); PADD(jp,smoothed_eval_begin);
 | 
			
		||||
  PADD(jp,MinRes1); PADD(jp,MinRes2); PADD(jp,smoothed_eval_end);
 | 
			
		||||
  PADD(jp,beta); PADD(jp,mass); PADD(jp,smoothed_eval_inner_resid);
 | 
			
		||||
  PADD(jp,omega); PADD(jp,config); 
 | 
			
		||||
  PADD(jp,M5); PADD(jp,cg_test_enabled);
 | 
			
		||||
  PADD(jp,cg_test_maxiter); PADD(jp,checkpoint_basis);
 | 
			
		||||
  PADD(jp,nsingle); PADD(jp,exit_after_basis_calculation);
 | 
			
		||||
  PADD(jp,simple_krylov_basis); PADD(jp,SkipTest2);
 | 
			
		||||
 | 
			
		||||
  Ls = (int)omega.size();
 | 
			
		||||
 | 
			
		||||
  // Grids
 | 
			
		||||
  GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
 | 
			
		||||
  GridCartesian         * UGridHP = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexD::Nsimd()),GridDefaultMpi());
 | 
			
		||||
  GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
 | 
			
		||||
  GridRedBlackCartesian * UrbGridHP = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridHP);
 | 
			
		||||
  GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
 | 
			
		||||
  GridCartesian         * FGridHP   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridHP);
 | 
			
		||||
  GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
 | 
			
		||||
  GridRedBlackCartesian * FrbGridHP = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridHP);
 | 
			
		||||
 | 
			
		||||
  // Gauge field
 | 
			
		||||
  LatticeGaugeField Umu(UGrid);
 | 
			
		||||
  FieldMetaData header;
 | 
			
		||||
  NerscIO::readConfiguration(Umu,header,config);
 | 
			
		||||
  std::cout << GridLogMessage << "Lattice dimensions: " << GridDefaultLatt()
 | 
			
		||||
            << "   Ls: " << Ls << std::endl;
 | 
			
		||||
 | 
			
		||||
  // ZMobius EO Operator
 | 
			
		||||
  ZMobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omega,1.,0.);
 | 
			
		||||
  SchurDiagTwoOperator<ZMobiusFermionR,LatticeFermion> HermOp(Ddwf);
 | 
			
		||||
 | 
			
		||||
  // Eigenvector storage
 | 
			
		||||
  const int Nm1 = Np1 + Nk1;
 | 
			
		||||
  const int Nm2 = Np2 + Nk2; // maximum number of vectors we need to keep
 | 
			
		||||
  std::cout << GridLogMessage << "Keep " << Nm1 << " full vectors" << std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << "Keep " << Nm2 << " total vectors" << std::endl;
 | 
			
		||||
  assert(Nm2 >= Nm1);
 | 
			
		||||
  BasisFieldVector<LatticeFermion> evec(Nm1,FrbGrid); // start off with keeping full vectors
 | 
			
		||||
 | 
			
		||||
  // First and second cheby
 | 
			
		||||
  Chebyshev<LatticeFermion> Cheb1(alpha1,beta,Npoly1);
 | 
			
		||||
  FunctionHermOp<LatticeFermion> Op1(Cheb1,HermOp);
 | 
			
		||||
  PlainHermOp<LatticeFermion> Op1test(HermOp);
 | 
			
		||||
 | 
			
		||||
  // Eigenvalue storage
 | 
			
		||||
  std::vector<RealD>          eval1(evec.size());
 | 
			
		||||
 | 
			
		||||
  // Construct source vector
 | 
			
		||||
  LatticeFermion    src(FrbGrid);
 | 
			
		||||
  {
 | 
			
		||||
    src=1.0;
 | 
			
		||||
    src.checkerboard = Odd;
 | 
			
		||||
 | 
			
		||||
    // normalize
 | 
			
		||||
    RealD nn = norm2(src);
 | 
			
		||||
    nn = Grid::sqrt(nn);
 | 
			
		||||
    src = src * (1.0/nn);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Do a benchmark and a quick exit if performance is too little (ugly but needed due to performance fluctuations)
 | 
			
		||||
  if (max_cheb_time_ms) {
 | 
			
		||||
    // one round of warmup
 | 
			
		||||
    auto tmp = src;
 | 
			
		||||
    GridStopWatch gsw1,gsw2;
 | 
			
		||||
    gsw1.Start();
 | 
			
		||||
    Cheb1(HermOp,src,tmp);
 | 
			
		||||
    gsw1.Stop();
 | 
			
		||||
    Ddwf.ZeroCounters();
 | 
			
		||||
    gsw2.Start();
 | 
			
		||||
    Cheb1(HermOp,src,tmp);
 | 
			
		||||
    gsw2.Stop();
 | 
			
		||||
    Ddwf.Report();
 | 
			
		||||
    std::cout << GridLogMessage << "Performance check; warmup = " << gsw1.Elapsed() << "  test = " << gsw2.Elapsed() << std::endl;
 | 
			
		||||
    int ms = (int)(gsw2.useconds()/1e3);
 | 
			
		||||
    if (ms > max_cheb_time_ms) {
 | 
			
		||||
      std::cout << GridLogMessage << "Performance too poor: " << ms << " ms, cutoff = " << max_cheb_time_ms << " ms" << std::endl;
 | 
			
		||||
      Grid_finalize();
 | 
			
		||||
      return 2;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // First round of Lanczos to get low mode basis
 | 
			
		||||
  BlockImplicitlyRestartedLanczos<LatticeFermion> IRL1(Op1,Op1test,Nstop1,Nk1,Nm1,resid1,betastp1,MaxIt,MinRes1);
 | 
			
		||||
  int Nconv;
 | 
			
		||||
 | 
			
		||||
  char tag[1024];
 | 
			
		||||
  if (!FieldVectorIO::read_argonne(evec,(char *)"checkpoint") || !read_evals(UGrid,(char *)"checkpoint/eigen-values.txt",eval1)) {
 | 
			
		||||
 | 
			
		||||
    if (simple_krylov_basis) {
 | 
			
		||||
      quick_krylov_basis(evec,src,Op1,Nstop1);
 | 
			
		||||
    } else {
 | 
			
		||||
      IRL1.calc(eval1,evec,src,Nconv,false,1);
 | 
			
		||||
    }
 | 
			
		||||
    evec.resize(Nstop1); // and throw away superfluous
 | 
			
		||||
    eval1.resize(Nstop1);
 | 
			
		||||
    if (checkpoint_basis)
 | 
			
		||||
      FieldVectorIO::write_argonne(evec,(char *)"checkpoint");
 | 
			
		||||
    if (UGrid->IsBoss() && checkpoint_basis)
 | 
			
		||||
      write_evals((char *)"checkpoint/eigen-values.txt",eval1);
 | 
			
		||||
 | 
			
		||||
    Ddwf.Report();
 | 
			
		||||
 | 
			
		||||
    if (exit_after_basis_calculation) {
 | 
			
		||||
      Grid_finalize();
 | 
			
		||||
      return 0;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // now test eigenvectors
 | 
			
		||||
  if (!simple_krylov_basis) {
 | 
			
		||||
    for (int i=0;i<Nstop1;i++){
 | 
			
		||||
      auto B = evec[i];
 | 
			
		||||
      auto tmp = B;
 | 
			
		||||
      auto v = B;
 | 
			
		||||
      
 | 
			
		||||
      {
 | 
			
		||||
	HermOp.HermOp(B,v);
 | 
			
		||||
	
 | 
			
		||||
	RealD vnum = real(innerProduct(B,v)); // HermOp.
 | 
			
		||||
	RealD vden = norm2(B);
 | 
			
		||||
	RealD vv0 = norm2(v);
 | 
			
		||||
	RealD eval2 = vnum/vden;
 | 
			
		||||
	v -= eval2*B;
 | 
			
		||||
	RealD vv = norm2(v);
 | 
			
		||||
	
 | 
			
		||||
	std::cout << i << " OP eval = " << eval2 << " (" << eval1[i] << ") "
 | 
			
		||||
		  << "res2 = " << vv << " norm2 = " << norm2(B) << std::endl;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // do second step only if needed
 | 
			
		||||
  if (Nstop1 <= Nstop2) {
 | 
			
		||||
    
 | 
			
		||||
    // Now setup blocking
 | 
			
		||||
    assert(evec.size() == Nstop1);
 | 
			
		||||
    BlockedGrid<LatticeFermion> bgrid(FrbGrid, block_size);
 | 
			
		||||
    BlockProjector<LatticeFermion> pr(evec,bgrid);
 | 
			
		||||
    pr.createOrthonormalBasis(basis_norm_threshold);
 | 
			
		||||
    pr.createOrthonormalBasis(basis_norm_threshold); // another round due to precision issues created by local coherence
 | 
			
		||||
 | 
			
		||||
    constexpr int common_basis_sizes[] = { 60, 250, 400 };
 | 
			
		||||
    constexpr int n_common_basis_sizes = sizeof(common_basis_sizes) / sizeof(common_basis_sizes[0]);
 | 
			
		||||
    switch (Nstop1) {
 | 
			
		||||
#define BASIS(n) case common_basis_sizes[n]:\
 | 
			
		||||
      CoarseGridLanczos<LatticeFermion,common_basis_sizes[n]>\
 | 
			
		||||
	(pr,alpha2,beta,Npoly2,Nstop2,Nk2,Nm2,resid2,betastp2,MaxIt,MinRes2,HermOp,eval1, \
 | 
			
		||||
	 cg_test_enabled,cg_test_maxiter,nsingle,SkipTest2, \
 | 
			
		||||
	 MaxApply2,smoothed_eval_enabled,smoothed_eval_inner,smoothed_eval_outer, \
 | 
			
		||||
	 smoothed_eval_begin,smoothed_eval_end,smoothed_eval_inner_resid); break;
 | 
			
		||||
      BASIS(0);
 | 
			
		||||
      BASIS(1);
 | 
			
		||||
      BASIS(2);
 | 
			
		||||
    default:
 | 
			
		||||
      std::cout << GridLogMessage << "Basis size " << Nstop1 << " must be added at compile-time" << std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "Currently available sizes: " << std::endl;
 | 
			
		||||
      for (int i=0;i<n_common_basis_sizes;i++) {
 | 
			
		||||
	std::cout << GridLogMessage << "  " << common_basis_sizes[i] << std::endl;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
    
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -38,7 +38,7 @@ int main (int argc, char ** argv)
 | 
			
		||||
  typedef typename DomainWallFermionR::ComplexField ComplexField; 
 | 
			
		||||
  typename DomainWallFermionR::ImplParams params; 
 | 
			
		||||
 | 
			
		||||
  const int Ls=8;
 | 
			
		||||
  const int Ls=4;
 | 
			
		||||
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
@@ -47,29 +47,24 @@ int main (int argc, char ** argv)
 | 
			
		||||
  std::vector<int> mpi_layout  = GridDefaultMpi();
 | 
			
		||||
  std::vector<int> mpi_split (mpi_layout.size(),1);
 | 
			
		||||
 | 
			
		||||
  std::cout << "UGrid (world root)"<<std::endl;
 | 
			
		||||
  GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
 | 
			
		||||
 | 
			
		||||
  std::cout << "FGrid (child of UGrid)"<<std::endl;
 | 
			
		||||
  GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
 | 
			
		||||
  GridRedBlackCartesian * rbGrid  = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
 | 
			
		||||
  GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
 | 
			
		||||
 | 
			
		||||
  int nrhs = UGrid->RankCount() ;
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////
 | 
			
		||||
  // Split into 1^4 mpi communicators
 | 
			
		||||
  /////////////////////////////////////////////
 | 
			
		||||
  std::cout << "SGrid (world root)"<<std::endl;
 | 
			
		||||
  GridCartesian         * SGrid = new GridCartesian(GridDefaultLatt(),
 | 
			
		||||
						    GridDefaultSimd(Nd,vComplex::Nsimd()),
 | 
			
		||||
						    mpi_split,
 | 
			
		||||
						    *UGrid); 
 | 
			
		||||
 | 
			
		||||
  GridCartesian         * SFGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
 | 
			
		||||
  std::cout << "SFGrid"<<std::endl;
 | 
			
		||||
  GridRedBlackCartesian * SrbGrid  = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
 | 
			
		||||
  std::cout << "SrbGrid"<<std::endl;
 | 
			
		||||
  GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid);
 | 
			
		||||
  std::cout << "SFrbGrid"<<std::endl;
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////
 | 
			
		||||
  // Set up the problem as a 4d spreadout job
 | 
			
		||||
@@ -79,10 +74,12 @@ int main (int argc, char ** argv)
 | 
			
		||||
  GridParallelRNG pRNG(UGrid );  pRNG.SeedFixedIntegers(seeds);
 | 
			
		||||
  GridParallelRNG pRNG5(FGrid);  pRNG5.SeedFixedIntegers(seeds);
 | 
			
		||||
  std::vector<FermionField>    src(nrhs,FGrid);
 | 
			
		||||
  std::vector<FermionField> src_chk(nrhs,FGrid);
 | 
			
		||||
  std::vector<FermionField> result(nrhs,FGrid);
 | 
			
		||||
  FermionField tmp(FGrid);
 | 
			
		||||
 | 
			
		||||
  for(int s=0;s<nrhs;s++) random(pRNG5,src[s]);
 | 
			
		||||
  for(int s=0;s<nrhs;s++) result[s] = zero;
 | 
			
		||||
  for(int s=0;s<nrhs;s++) result[s]=zero;
 | 
			
		||||
 | 
			
		||||
  LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
 | 
			
		||||
 | 
			
		||||
@@ -99,6 +96,8 @@ int main (int argc, char ** argv)
 | 
			
		||||
  int me = UGrid->ThisRank();
 | 
			
		||||
  LatticeGaugeField s_Umu(SGrid);
 | 
			
		||||
  FermionField s_src(SFGrid);
 | 
			
		||||
  FermionField s_src_split(SFGrid);
 | 
			
		||||
  FermionField s_tmp(SFGrid);
 | 
			
		||||
  FermionField s_res(SFGrid);
 | 
			
		||||
 | 
			
		||||
  {
 | 
			
		||||
@@ -157,6 +156,24 @@ int main (int argc, char ** argv)
 | 
			
		||||
    FGrid->Barrier();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // split the source out using MPI instead of I/O
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  std::cout << GridLogMessage << " Splitting the grid data "<<std::endl;
 | 
			
		||||
  Grid_split  (src,s_src_split);
 | 
			
		||||
  std::cout << GridLogMessage << " Finished splitting the grid data "<<std::endl;
 | 
			
		||||
  for(int n=0;n<nrhs;n++){
 | 
			
		||||
    std::cout <<GridLogMessage<<"Full "<< n <<" "<< norm2(src[n])<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  s_tmp = s_src_split - s_src;
 | 
			
		||||
  for(int n=0;n<nrhs;n++){
 | 
			
		||||
    FGrid->Barrier();
 | 
			
		||||
    if ( n==me ) {
 | 
			
		||||
      std::cerr << GridLogMessage<<"Split "<< me << " " << norm2(s_src_split) << " " << norm2(s_src)<< " diff " << norm2(s_tmp)<<std::endl;
 | 
			
		||||
    }
 | 
			
		||||
    FGrid->Barrier();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // Set up N-solvers as trivially parallel
 | 
			
		||||
@@ -164,6 +181,7 @@ int main (int argc, char ** argv)
 | 
			
		||||
 | 
			
		||||
  RealD mass=0.01;
 | 
			
		||||
  RealD M5=1.8;
 | 
			
		||||
  DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5);
 | 
			
		||||
  DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5);
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
 | 
			
		||||
@@ -171,25 +189,41 @@ int main (int argc, char ** argv)
 | 
			
		||||
  std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
 | 
			
		||||
 | 
			
		||||
  MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
 | 
			
		||||
  MdagMLinearOperator<DomainWallFermionR,FermionField> HermOpCk(Dchk);
 | 
			
		||||
  ConjugateGradient<FermionField> CG((1.0e-8/(me+1)),10000);
 | 
			
		||||
  s_res = zero;
 | 
			
		||||
  CG(HermOp,s_src,s_res);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////
 | 
			
		||||
  // Share the information
 | 
			
		||||
  ///////////////////////////////////////
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  // Report how long they all took
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  std::vector<uint32_t> iterations(nrhs,0);
 | 
			
		||||
  iterations[me] = CG.IterationsToComplete;
 | 
			
		||||
 | 
			
		||||
  for(int n=0;n<nrhs;n++){
 | 
			
		||||
    UGrid->GlobalSum(iterations[n]);
 | 
			
		||||
    std::cout << GridLogMessage<<" Rank "<<n<<" "<< iterations[n]<<" CG iterations"<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  // Report how long they all took
 | 
			
		||||
  // Gather and residual check on the results
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  for(int r=0;r<nrhs;r++){
 | 
			
		||||
    std::cout << GridLogMessage<<" Rank "<<r<<" "<< iterations[r]<<" CG iterations"<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage<< "Unsplitting the result"<<std::endl;
 | 
			
		||||
  Grid_unsplit(result,s_res);
 | 
			
		||||
  /*
 | 
			
		||||
  Grid_unsplit(src_chk,s_src);
 | 
			
		||||
  for(int n=0;n<nrhs;n++){
 | 
			
		||||
    tmp = src[n]-src_chk[n];
 | 
			
		||||
    std::cout << " src_chk "<<n<<" "<<norm2(src_chk[n])<<" " <<norm2(src[n])<<" " <<norm2(tmp)<< std::endl;
 | 
			
		||||
    std::cout << " diff " <<tmp<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  */
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage<< "Checking the residuals"<<std::endl;
 | 
			
		||||
  for(int n=0;n<nrhs;n++){
 | 
			
		||||
    HermOpCk.HermOp(result[n],tmp); tmp = tmp - src[n];
 | 
			
		||||
    std::cout << GridLogMessage<<" resid["<<n<<"]  "<< norm2(tmp)<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										144
									
								
								tests/solver/Test_dwf_mrhs_cg_mpi.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										144
									
								
								tests/solver/Test_dwf_mrhs_cg_mpi.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,144 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./tests/Test_dwf_mrhs_cg.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
 | 
			
		||||
 | 
			
		||||
using namespace std;
 | 
			
		||||
using namespace Grid;
 | 
			
		||||
using namespace Grid::QCD;
 | 
			
		||||
 | 
			
		||||
int main (int argc, char ** argv)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename DomainWallFermionR::FermionField FermionField; 
 | 
			
		||||
  typedef typename DomainWallFermionR::ComplexField ComplexField; 
 | 
			
		||||
  typename DomainWallFermionR::ImplParams params; 
 | 
			
		||||
 | 
			
		||||
  const int Ls=4;
 | 
			
		||||
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> latt_size   = GridDefaultLatt();
 | 
			
		||||
  std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
 | 
			
		||||
  std::vector<int> mpi_layout  = GridDefaultMpi();
 | 
			
		||||
  std::vector<int> mpi_split (mpi_layout.size(),1);
 | 
			
		||||
 | 
			
		||||
  GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
 | 
			
		||||
  GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
 | 
			
		||||
  GridRedBlackCartesian * rbGrid  = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
 | 
			
		||||
  GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
 | 
			
		||||
 | 
			
		||||
  int nrhs = UGrid->RankCount() ;
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////
 | 
			
		||||
  // Split into 1^4 mpi communicators
 | 
			
		||||
  /////////////////////////////////////////////
 | 
			
		||||
  GridCartesian         * SGrid = new GridCartesian(GridDefaultLatt(),
 | 
			
		||||
						    GridDefaultSimd(Nd,vComplex::Nsimd()),
 | 
			
		||||
						    mpi_split,
 | 
			
		||||
						    *UGrid); 
 | 
			
		||||
 | 
			
		||||
  GridCartesian         * SFGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
 | 
			
		||||
  GridRedBlackCartesian * SrbGrid  = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
 | 
			
		||||
  GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////
 | 
			
		||||
  // Set up the problem as a 4d spreadout job
 | 
			
		||||
  ///////////////////////////////////////////////
 | 
			
		||||
  std::vector<int> seeds({1,2,3,4});
 | 
			
		||||
 | 
			
		||||
  GridParallelRNG pRNG(UGrid );  pRNG.SeedFixedIntegers(seeds);
 | 
			
		||||
  GridParallelRNG pRNG5(FGrid);  pRNG5.SeedFixedIntegers(seeds);
 | 
			
		||||
  std::vector<FermionField>    src(nrhs,FGrid);
 | 
			
		||||
  std::vector<FermionField> src_chk(nrhs,FGrid);
 | 
			
		||||
  std::vector<FermionField> result(nrhs,FGrid);
 | 
			
		||||
  FermionField tmp(FGrid);
 | 
			
		||||
 | 
			
		||||
  for(int s=0;s<nrhs;s++) random(pRNG5,src[s]);
 | 
			
		||||
  for(int s=0;s<nrhs;s++) result[s]=zero;
 | 
			
		||||
 | 
			
		||||
  LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
 | 
			
		||||
 | 
			
		||||
  /////////////////
 | 
			
		||||
  // MPI only sends
 | 
			
		||||
  /////////////////
 | 
			
		||||
  int me = UGrid->ThisRank();
 | 
			
		||||
 | 
			
		||||
  LatticeGaugeField s_Umu(SGrid);
 | 
			
		||||
  FermionField s_src(SFGrid);
 | 
			
		||||
  FermionField s_tmp(SFGrid);
 | 
			
		||||
  FermionField s_res(SFGrid);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // split the source out using MPI instead of I/O
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  Grid_split  (Umu,s_Umu);
 | 
			
		||||
  Grid_split  (src,s_src);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // Set up N-solvers as trivially parallel
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  RealD mass=0.01;
 | 
			
		||||
  RealD M5=1.8;
 | 
			
		||||
  DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5);
 | 
			
		||||
  DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5);
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << " Calling DWF CG "<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
 | 
			
		||||
 | 
			
		||||
  MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
 | 
			
		||||
  MdagMLinearOperator<DomainWallFermionR,FermionField> HermOpCk(Dchk);
 | 
			
		||||
  ConjugateGradient<FermionField> CG((1.0e-8/(me+1)),10000);
 | 
			
		||||
  s_res = zero;
 | 
			
		||||
  CG(HermOp,s_src,s_res);
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  // Report how long they all took
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  std::vector<uint32_t> iterations(nrhs,0);
 | 
			
		||||
  iterations[me] = CG.IterationsToComplete;
 | 
			
		||||
 | 
			
		||||
  for(int n=0;n<nrhs;n++){
 | 
			
		||||
    UGrid->GlobalSum(iterations[n]);
 | 
			
		||||
    std::cout << GridLogMessage<<" Rank "<<n<<" "<< iterations[n]<<" CG iterations"<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  // Gather and residual check on the results
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  std::cout << GridLogMessage<< "Unsplitting the result"<<std::endl;
 | 
			
		||||
  Grid_unsplit(result,s_res);
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage<< "Checking the residuals"<<std::endl;
 | 
			
		||||
  for(int n=0;n<nrhs;n++){
 | 
			
		||||
    HermOpCk.HermOp(result[n],tmp); tmp = tmp - src[n];
 | 
			
		||||
    std::cout << GridLogMessage<<" resid["<<n<<"]  "<< norm2(tmp)<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										163
									
								
								tests/solver/Test_dwf_mrhs_cg_mpieo.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										163
									
								
								tests/solver/Test_dwf_mrhs_cg_mpieo.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,163 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./tests/Test_dwf_mrhs_cg.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
 | 
			
		||||
 | 
			
		||||
using namespace std;
 | 
			
		||||
using namespace Grid;
 | 
			
		||||
using namespace Grid::QCD;
 | 
			
		||||
 | 
			
		||||
int main (int argc, char ** argv)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename DomainWallFermionR::FermionField FermionField; 
 | 
			
		||||
  typedef typename DomainWallFermionR::ComplexField ComplexField; 
 | 
			
		||||
  typename DomainWallFermionR::ImplParams params; 
 | 
			
		||||
 | 
			
		||||
  const int Ls=4;
 | 
			
		||||
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> latt_size   = GridDefaultLatt();
 | 
			
		||||
  std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
 | 
			
		||||
  std::vector<int> mpi_layout  = GridDefaultMpi();
 | 
			
		||||
  std::vector<int> mpi_split (mpi_layout.size(),1);
 | 
			
		||||
 | 
			
		||||
  GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
 | 
			
		||||
  GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
 | 
			
		||||
  GridRedBlackCartesian * rbGrid  = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
 | 
			
		||||
  GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
 | 
			
		||||
 | 
			
		||||
  int nrhs = UGrid->RankCount() ;
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////
 | 
			
		||||
  // Split into 1^4 mpi communicators
 | 
			
		||||
  /////////////////////////////////////////////
 | 
			
		||||
  GridCartesian         * SGrid = new GridCartesian(GridDefaultLatt(),
 | 
			
		||||
						    GridDefaultSimd(Nd,vComplex::Nsimd()),
 | 
			
		||||
						    mpi_split,
 | 
			
		||||
						    *UGrid); 
 | 
			
		||||
 | 
			
		||||
  GridCartesian         * SFGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
 | 
			
		||||
  GridRedBlackCartesian * SrbGrid  = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
 | 
			
		||||
  GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////
 | 
			
		||||
  // Set up the problem as a 4d spreadout job
 | 
			
		||||
  ///////////////////////////////////////////////
 | 
			
		||||
  std::vector<int> seeds({1,2,3,4});
 | 
			
		||||
 | 
			
		||||
  GridParallelRNG pRNG(UGrid );  pRNG.SeedFixedIntegers(seeds);
 | 
			
		||||
  GridParallelRNG pRNG5(FGrid);  pRNG5.SeedFixedIntegers(seeds);
 | 
			
		||||
  std::vector<FermionField>    src(nrhs,FGrid);
 | 
			
		||||
  std::vector<FermionField> src_chk(nrhs,FGrid);
 | 
			
		||||
  std::vector<FermionField> result(nrhs,FGrid);
 | 
			
		||||
  FermionField tmp(FGrid);
 | 
			
		||||
 | 
			
		||||
  std::vector<FermionField> src_e(nrhs,FrbGrid);
 | 
			
		||||
  std::vector<FermionField> src_o(nrhs,FrbGrid);
 | 
			
		||||
 | 
			
		||||
  for(int s=0;s<nrhs;s++) random(pRNG5,src[s]);
 | 
			
		||||
  for(int s=0;s<nrhs;s++) result[s]=zero;
 | 
			
		||||
 | 
			
		||||
  LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
 | 
			
		||||
 | 
			
		||||
  /////////////////
 | 
			
		||||
  // MPI only sends
 | 
			
		||||
  /////////////////
 | 
			
		||||
  int me = UGrid->ThisRank();
 | 
			
		||||
 | 
			
		||||
  LatticeGaugeField s_Umu(SGrid);
 | 
			
		||||
  FermionField s_src(SFGrid);
 | 
			
		||||
  FermionField s_src_e(SFrbGrid);
 | 
			
		||||
  FermionField s_src_o(SFrbGrid);
 | 
			
		||||
  FermionField s_tmp(SFGrid);
 | 
			
		||||
  FermionField s_res(SFGrid);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // split the source out using MPI instead of I/O
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  Grid_split  (Umu,s_Umu);
 | 
			
		||||
  Grid_split  (src,s_src);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // Check even odd cases
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  for(int s=0;s<nrhs;s++){
 | 
			
		||||
    pickCheckerboard(Odd , src_o[s], src[s]);
 | 
			
		||||
    pickCheckerboard(Even, src_e[s], src[s]);
 | 
			
		||||
  }
 | 
			
		||||
  Grid_split  (src_e,s_src_e);
 | 
			
		||||
  Grid_split  (src_o,s_src_o);
 | 
			
		||||
  setCheckerboard(s_tmp, s_src_o);
 | 
			
		||||
  setCheckerboard(s_tmp, s_src_e);
 | 
			
		||||
  s_tmp = s_tmp - s_src;
 | 
			
		||||
  std::cout << GridLogMessage<<" EvenOdd Difference " <<norm2(s_tmp)<<std::endl;
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // Set up N-solvers as trivially parallel
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  RealD mass=0.01;
 | 
			
		||||
  RealD M5=1.8;
 | 
			
		||||
  DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5);
 | 
			
		||||
  DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5);
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << " Calling DWF CG "<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
 | 
			
		||||
 | 
			
		||||
  MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
 | 
			
		||||
  MdagMLinearOperator<DomainWallFermionR,FermionField> HermOpCk(Dchk);
 | 
			
		||||
  ConjugateGradient<FermionField> CG((1.0e-8/(me+1)),10000);
 | 
			
		||||
  s_res = zero;
 | 
			
		||||
  CG(HermOp,s_src,s_res);
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  // Report how long they all took
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  std::vector<uint32_t> iterations(nrhs,0);
 | 
			
		||||
  iterations[me] = CG.IterationsToComplete;
 | 
			
		||||
 | 
			
		||||
  for(int n=0;n<nrhs;n++){
 | 
			
		||||
    UGrid->GlobalSum(iterations[n]);
 | 
			
		||||
    std::cout << GridLogMessage<<" Rank "<<n<<" "<< iterations[n]<<" CG iterations"<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  // Gather and residual check on the results
 | 
			
		||||
  /////////////////////////////////////////////////////////////
 | 
			
		||||
  std::cout << GridLogMessage<< "Unsplitting the result"<<std::endl;
 | 
			
		||||
  Grid_unsplit(result,s_res);
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage<< "Checking the residuals"<<std::endl;
 | 
			
		||||
  for(int n=0;n<nrhs;n++){
 | 
			
		||||
    HermOpCk.HermOp(result[n],tmp); tmp = tmp - src[n];
 | 
			
		||||
    std::cout << GridLogMessage<<" resid["<<n<<"]  "<< norm2(tmp)<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										130
									
								
								tests/solver/Test_staggered_block_cg_prec.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										130
									
								
								tests/solver/Test_staggered_block_cg_prec.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,130 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./tests/Test_wilson_cg_unprec.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
 | 
			
		||||
using namespace std;
 | 
			
		||||
using namespace Grid;
 | 
			
		||||
using namespace Grid::QCD;
 | 
			
		||||
 | 
			
		||||
template<class d>
 | 
			
		||||
struct scal {
 | 
			
		||||
  d internal;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
  Gamma::Algebra Gmu [] = {
 | 
			
		||||
    Gamma::Algebra::GammaX,
 | 
			
		||||
    Gamma::Algebra::GammaY,
 | 
			
		||||
    Gamma::Algebra::GammaZ,
 | 
			
		||||
    Gamma::Algebra::GammaT
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
int main (int argc, char ** argv)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename ImprovedStaggeredFermion5DR::FermionField FermionField; 
 | 
			
		||||
  typedef typename ImprovedStaggeredFermion5DR::ComplexField ComplexField; 
 | 
			
		||||
  typename ImprovedStaggeredFermion5DR::ImplParams params; 
 | 
			
		||||
 | 
			
		||||
  const int Ls=8;
 | 
			
		||||
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> latt_size   = GridDefaultLatt();
 | 
			
		||||
  std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
 | 
			
		||||
  std::vector<int> mpi_layout  = GridDefaultMpi();
 | 
			
		||||
 | 
			
		||||
  GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
 | 
			
		||||
  GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
 | 
			
		||||
  GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
 | 
			
		||||
  GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> seeds({1,2,3,4});
 | 
			
		||||
  GridParallelRNG pRNG(UGrid );  pRNG.SeedFixedIntegers(seeds);
 | 
			
		||||
  GridParallelRNG pRNG5(FGrid);  pRNG5.SeedFixedIntegers(seeds);
 | 
			
		||||
 | 
			
		||||
  FermionField src(FGrid); random(pRNG5,src);
 | 
			
		||||
  FermionField src_o(FrbGrid);   pickCheckerboard(Odd,src_o,src);
 | 
			
		||||
  FermionField result_o(FrbGrid); result_o=zero; 
 | 
			
		||||
  RealD nrm = norm2(src);
 | 
			
		||||
 | 
			
		||||
  LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
 | 
			
		||||
 | 
			
		||||
  RealD mass=0.003;
 | 
			
		||||
  ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass); 
 | 
			
		||||
  SchurStaggeredOperator<ImprovedStaggeredFermion5DR,FermionField> HermOp(Ds);
 | 
			
		||||
 | 
			
		||||
  ConjugateGradient<FermionField> CG(1.0e-8,10000);
 | 
			
		||||
  int blockDim = 0;
 | 
			
		||||
  BlockConjugateGradient<FermionField>    BCGrQ(BlockCGrQ,blockDim,1.0e-8,10000);
 | 
			
		||||
  BlockConjugateGradient<FermionField>    BCG  (BlockCG,blockDim,1.0e-8,10000);
 | 
			
		||||
  BlockConjugateGradient<FermionField>    mCG  (CGmultiRHS,blockDim,1.0e-8,10000);
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << " Calling 4d CG "<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
 | 
			
		||||
  ImprovedStaggeredFermionR Ds4d(Umu,Umu,*UGrid,*UrbGrid,mass);
 | 
			
		||||
  SchurStaggeredOperator<ImprovedStaggeredFermionR,FermionField> HermOp4d(Ds4d);
 | 
			
		||||
  FermionField src4d(UGrid); random(pRNG,src4d);
 | 
			
		||||
  FermionField src4d_o(UrbGrid);   pickCheckerboard(Odd,src4d_o,src4d);
 | 
			
		||||
  FermionField result4d_o(UrbGrid); 
 | 
			
		||||
 | 
			
		||||
  result4d_o=zero;
 | 
			
		||||
  CG(HermOp4d,src4d_o,result4d_o);
 | 
			
		||||
  std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << " Calling 5d CG for "<<Ls <<" right hand sides" <<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
 | 
			
		||||
  Ds.ZeroCounters();
 | 
			
		||||
  result_o=zero;
 | 
			
		||||
  CG(HermOp,src_o,result_o);
 | 
			
		||||
  Ds.Report();
 | 
			
		||||
  std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << " Calling multiRHS CG for "<<Ls <<" right hand sides" <<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
 | 
			
		||||
  Ds.ZeroCounters();
 | 
			
		||||
  result_o=zero;
 | 
			
		||||
  mCG(HermOp,src_o,result_o);
 | 
			
		||||
  Ds.Report();
 | 
			
		||||
  std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << " Calling Block CG for "<<Ls <<" right hand sides" <<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
 | 
			
		||||
  Ds.ZeroCounters();
 | 
			
		||||
  result_o=zero;
 | 
			
		||||
  BCGrQ(HermOp,src_o,result_o);
 | 
			
		||||
  Ds.Report();
 | 
			
		||||
  std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
}
 | 
			
		||||
@@ -48,7 +48,6 @@ struct scal {
 | 
			
		||||
int main (int argc, char ** argv)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename ImprovedStaggeredFermionR::FermionField FermionField; 
 | 
			
		||||
  typedef typename ImprovedStaggeredFermionR::ComplexField ComplexField; 
 | 
			
		||||
  typename ImprovedStaggeredFermionR::ImplParams params; 
 | 
			
		||||
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
@@ -71,7 +70,7 @@ int main (int argc, char ** argv)
 | 
			
		||||
    volume=volume*latt_size[mu];
 | 
			
		||||
  }  
 | 
			
		||||
  
 | 
			
		||||
  RealD mass=0.1;
 | 
			
		||||
  RealD mass=0.003;
 | 
			
		||||
  ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass);
 | 
			
		||||
 | 
			
		||||
  FermionField res_o(&RBGrid); 
 | 
			
		||||
@@ -79,9 +78,14 @@ int main (int argc, char ** argv)
 | 
			
		||||
  pickCheckerboard(Odd,src_o,src);
 | 
			
		||||
  res_o=zero;
 | 
			
		||||
 | 
			
		||||
  SchurDiagMooeeOperator<ImprovedStaggeredFermionR,FermionField> HermOpEO(Ds);
 | 
			
		||||
  SchurStaggeredOperator<ImprovedStaggeredFermionR,FermionField> HermOpEO(Ds);
 | 
			
		||||
  ConjugateGradient<FermionField> CG(1.0e-8,10000);
 | 
			
		||||
  CG(HermOpEO,src_o,res_o);
 | 
			
		||||
 | 
			
		||||
  FermionField tmp(&RBGrid);
 | 
			
		||||
 | 
			
		||||
  HermOpEO.Mpc(res_o,tmp);
 | 
			
		||||
  std::cout << "check Mpc resid " << axpy_norm(tmp,-1.0,src_o,tmp)/norm2(src_o) << "\n";
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										76
									
								
								tests/solver/Test_staggered_cg_schur.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										76
									
								
								tests/solver/Test_staggered_cg_schur.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,76 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./tests/Test_wilson_cg_schur.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
 | 
			
		||||
using namespace std;
 | 
			
		||||
using namespace Grid;
 | 
			
		||||
using namespace Grid::QCD;
 | 
			
		||||
 | 
			
		||||
template<class d>
 | 
			
		||||
struct scal {
 | 
			
		||||
  d internal;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
  Gamma::Algebra Gmu [] = {
 | 
			
		||||
    Gamma::Algebra::GammaX,
 | 
			
		||||
    Gamma::Algebra::GammaY,
 | 
			
		||||
    Gamma::Algebra::GammaZ,
 | 
			
		||||
    Gamma::Algebra::GammaT
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
int main (int argc, char ** argv)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename ImprovedStaggeredFermionR::FermionField FermionField; 
 | 
			
		||||
  typename ImprovedStaggeredFermionR::ImplParams params; 
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> latt_size   = GridDefaultLatt();
 | 
			
		||||
  std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
 | 
			
		||||
  std::vector<int> mpi_layout  = GridDefaultMpi();
 | 
			
		||||
  GridCartesian               Grid(latt_size,simd_layout,mpi_layout);
 | 
			
		||||
  GridRedBlackCartesian     RBGrid(&Grid);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> seeds({1,2,3,4});
 | 
			
		||||
  GridParallelRNG          pRNG(&Grid);  pRNG.SeedFixedIntegers(seeds);
 | 
			
		||||
 | 
			
		||||
  LatticeGaugeField Umu(&Grid); SU3::HotConfiguration(pRNG,Umu);
 | 
			
		||||
 | 
			
		||||
  FermionField    src(&Grid); random(pRNG,src);
 | 
			
		||||
  FermionField result(&Grid); result=zero;
 | 
			
		||||
  FermionField  resid(&Grid); 
 | 
			
		||||
 | 
			
		||||
  RealD mass=0.1;
 | 
			
		||||
  ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass);
 | 
			
		||||
 | 
			
		||||
  ConjugateGradient<FermionField> CG(1.0e-8,10000);
 | 
			
		||||
  SchurRedBlackStaggeredSolve<FermionField> SchurSolver(CG);
 | 
			
		||||
 | 
			
		||||
  SchurSolver(Ds,src,result);
 | 
			
		||||
  
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
}
 | 
			
		||||
		Reference in New Issue
	
	Block a user