mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-10-29 11:09:33 +00:00 
			
		
		
		
	Compare commits
	
		
			268 Commits
		
	
	
		
			fix/HOST_N
			...
			d299c86633
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | d299c86633 | ||
|  | 6ce52092e8 | ||
|  | b5926c1d21 | ||
|  | 9563238e9b | ||
|  | fb9b1d76ca | ||
|  | 1739146599 | ||
|  | ed20b39ab3 | ||
|  | 284fc05f15 | ||
|  | 07a07b6fa3 | ||
|  | dc80b08969 | ||
|  | a49a161f8d | ||
|  | a6479ca50f | ||
|  | 0e607a55e7 | ||
|  | c4b9f71357 | ||
|  | 394e506aea | ||
|  | e19b26341b | ||
|  | cfe1b13225 | ||
|  | 890c5ea1cd | ||
|  | a87378d3b6 | ||
|  | 832fc08809 | ||
|  | 9a1ad6a5eb | ||
|  | a90eafad24 | ||
|  | ad14a82742 | ||
|  | 14e9d8ed9f | ||
|  | 0ac85fa70b | ||
|  | c371de42b9 | ||
|  | ccf147d6c1 | ||
|  | 7aa12b446f | ||
|  | c293228102 | ||
|  | 5c4c9f721a | ||
|  | 057f86c1de | ||
|  | cd52e3cbc2 | ||
|  | 24602e1259 | ||
|  | 8a098889fc | ||
|  | 5c3ace7c3e | ||
|  | aa148455b7 | ||
|  | 98cf247f33 | ||
|  | 0cf16522d1 | ||
|  | 7b7c75f9e5 | ||
|  | aefd255a3c | ||
|  | 1c5aa939fd | ||
|  | 3a0ff17be0 | ||
|  | 47829ae5cc | ||
|  | bfa7b69aff | ||
|  | 2aaa959b5f | ||
|  | ce2970b93a | ||
|  | 7b76970d10 | ||
|  | 9fd41882d2 | ||
|  | ff2ea5de18 | ||
|  | 5147a42818 | ||
|  | 57552d8ca3 | ||
|  | 13713b2a76 | ||
|  | 36a14e4ee3 | ||
|  | b4cc788b8c | ||
|  | 0f0e7512f3 | ||
|  | 1196b1a161 | ||
|  | 2c8c3be9ee | ||
|  | 5b79d51c22 | ||
|  | da890dc293 | ||
|  | 93d0a1e73a | ||
|  | f0a8c7d045 | ||
|  | db8793777c | ||
|  | c745484e65 | ||
|  | da59379612 | ||
|  | 3ef2a41518 | ||
|  | aa96f420c6 | ||
|  | 49e9e4ed0e | ||
|  | f7b8163016 | ||
|  | 93769eacd3 | ||
|  | 59b0cc11df | ||
|  | f32c275376 | ||
|  | 5404fc66ab | ||
|  | 1f53458af8 | ||
|  | 434c3e7f1d | ||
|  | 500b119f3d | ||
|  | 4b87259c1b | ||
|  | 503dec34ef | ||
|  | d1e9fe50d2 | ||
|  | d01e5fa838 | ||
|  | a477c25e8c | ||
|  | 1bd20cd9e8 | ||
|  | e49e95b037 | ||
|  | 6f59fed563 | ||
|  | 60b7f6c99d | ||
|  | b92dfcc8d3 | ||
|  | f6fd6dd053 | ||
|  | 79ad567dd5 | ||
|  | fab1efb48c | ||
|  | 660eb76d93 | ||
|  | 461cd045c6 | ||
|  | fee65d7a75 | ||
|  | 31f9971dbf | ||
|  | 62e7bf024a | ||
|  | 95f3d69cf9 | ||
| 89c0519f83 | |||
| 2704b82084 | |||
| cf8632bbac | |||
| d224297972 | |||
|  | a4d11a630f | ||
|  | d87296f3e8 | ||
|  | be94cf1c6f | ||
|  | cc04dc42dc | ||
|  | 070b61f08f | ||
|  | ee3b3c4c56 | ||
|  | 462d706a63 | ||
|  | ee0d460c8e | ||
|  | cd15abe9d1 | ||
|  | 9f40467e24 | ||
|  | d0b6593823 | ||
|  | 79fc821d8d | ||
|  | d7fdb9a7e6 | ||
|  | b74de51c18 | ||
|  | 44b466e072 | ||
|  | 5e5b471bb2 | ||
|  | 9c2565f64e | ||
|  | e1d0a7cec3 | ||
|  | b19ae8f465 | ||
|  | cdff2c8e18 | ||
|  | eb702f581b | ||
|  | 3d13fd56c5 | ||
|  | 6f51b49ef8 | ||
|  | addc638856 | ||
|  | 42ae36bc28 | ||
|  | c69f73ff9f | ||
|  | ca5ae8a2e6 | ||
|  | d967eb53de | ||
|  | 839f9f1bbe | ||
|  | b754a152c6 | ||
|  | e07cb2b9de | ||
|  | a1f8bbb078 | ||
|  | 7909683f3b | ||
|  | 25f71913b7 | ||
|  | 34ddd2b7b1 | ||
|  | d5fd90b2f3 | ||
|  | b7c7000d0d | ||
|  | 551f6c4edd | ||
|  | defd814750 | ||
|  | 3d517bbd2a | ||
|  | 78ab955fec | ||
|  | dd13937bb6 | ||
|  | 66a1b63aa9 | ||
|  | 22c611bd1a | ||
|  | c9bb1bf8ea | ||
|  | 9e489887cf | ||
|  | 9feb801bb9 | ||
|  | c00b495933 | ||
|  | d22eebe553 | ||
|  | 8bcbd82680 | ||
|  | dfa617c439 | ||
|  | 48d1f0df89 | ||
|  | b75cb7a12c | ||
|  | 332563e037 | ||
|  | 0cce97a4fe | ||
|  | 95a8e4be64 | ||
|  | abcd6b8cb6 | ||
|  | e8f21c9b6d | ||
|  | e054078b11 | ||
|  | 6835a7f208 | ||
|  | f59993b979 | ||
|  | 2290b8f680 | ||
|  | 2c54be651c | ||
|  | e859a199df | ||
|  | 0a3682ad0b | ||
|  | 59abaeb5cd | ||
|  | 3e448435d3 | ||
|  | a294bc3c5b | ||
|  | b302ad3d49 | ||
|  | 82fc4b1e94 | ||
|  | b4f1740380 | ||
|  | 031f85247c | ||
|  | 639cc6f73a | ||
|  | 09946cf1ba | ||
|  | f4fa95e7cb | ||
|  | 100e29e35e | ||
|  | 4cbe471a83 | ||
|  | 8bece1f861 | ||
|  | a3ca71ec01 | ||
|  | e0543e8af5 | ||
|  | c1eb80d01a | ||
|  | a26121d97b | ||
|  | 043031a757 | ||
|  | 807aeebe4c | ||
|  | 8aa1a37aad | ||
|  | 4efa042f50 | ||
|  | c7cb37e970 | ||
|  | d34b207eab | ||
|  | 0e6fa6f6b8 | ||
|  | 38b87de53f | ||
|  | aa5047a9e4 | ||
|  | 24b6ee0df9 | ||
|  | 1e79cc9cbe | ||
|  | b3925df9c3 | ||
|  | 351795ac3a | ||
|  | 9c9c42d0df | ||
|  | b6ad1bafc7 | ||
|  | a5ca40f446 | ||
|  | 9ab54c5565 | ||
|  | 4341d96bde | ||
|  | 5fac47a26d | ||
|  | e064f17346 | ||
|  | afe10ba2a2 | ||
|  | 7cc3435ba8 | ||
|  | 541772313c | ||
|  | 3747494a09 | ||
|  | f2b98d0dcc | ||
|  | 80471bf762 | ||
|  | a06f63c110 | ||
|  | 0ae4478cd9 | ||
|  | ae4e705e09 | ||
|  | f5dcea9dbf | ||
|  | 2207309f8a | ||
|  | 2111e7ab5f | ||
|  | d29abfdcaf | ||
|  | a751c42cc5 | ||
|  | 6a3bc9865e | ||
|  | 4d5f7e4377 | ||
|  | 78b117fb78 | ||
|  | ded63a1319 | ||
|  | df3e4d1e9c | ||
|  | b58fd80379 | ||
|  | 7f6e0f57d0 | ||
|  | cae27678d8 | ||
|  | 48ff655bad | ||
|  | 2525ad4623 | ||
|  | e7020017c5 | ||
|  | eacebfad74 | ||
|  | 3bc2da5321 | ||
|  | 2d710d6bfd | ||
|  | 6532b7f32b | ||
|  | 7b41b92d99 | ||
|  | dd557af84b | ||
|  | 59b9d0e030 | ||
|  | b82eee4733 | ||
|  | 6a87487544 | ||
|  | fcf5023845 | ||
|  | c8adad6d8b | ||
|  | 737d3ffb98 | ||
|  | b01e67bab1 | ||
|  | 8a70314f54 | ||
|  | 36ae6e5aba | ||
|  | 9db585cfeb | ||
|  | c564611ba7 | ||
|  | e187bcb85c | ||
|  | be18ffe3b4 | ||
|  | 0d63dce4e2 | ||
|  | 26b30e1551 | ||
|  | 7fc58ac293 | ||
|  | 3a86cce8c1 | ||
|  | 37884d369f | ||
|  | 9246e653cd | ||
|  | 64283c8673 | ||
|  | 755002da9c | ||
|  | 31b8e8b437 | ||
|  | 0ec0de97e6 | ||
|  | 6c3ade5d89 | ||
|  | 980c5f9a34 | ||
|  | 471ca5f281 | ||
|  | e82ddcff5d | ||
|  | b9dcad89e8 | ||
|  | 993f43ef4a | ||
|  | 2b43308208 | ||
|  | 04a1ac3a76 | ||
|  | 990b8798bd | ||
|  | b334a73a44 | ||
|  | 5d113d1c70 | ||
|  | c14977aeab | ||
|  | 3e94838204 | ||
|  | c0a0b8ca62 | 
| @@ -59,6 +59,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk> | ||||
| #include <Grid/lattice/Lattice.h>       | ||||
| #include <Grid/cshift/Cshift.h>        | ||||
| #include <Grid/stencil/Stencil.h>       | ||||
| #include <Grid/stencil/GeneralLocalStencil.h>       | ||||
| #include <Grid/parallelIO/BinaryIO.h> | ||||
| #include <Grid/algorithms/Algorithms.h>    | ||||
| NAMESPACE_CHECK(GridCore) | ||||
|   | ||||
| @@ -29,6 +29,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| #ifndef GRID_ALGORITHMS_H | ||||
| #define GRID_ALGORITHMS_H | ||||
|  | ||||
| NAMESPACE_CHECK(blas); | ||||
| #include <Grid/algorithms/blas/BatchedBlas.h> | ||||
|  | ||||
| NAMESPACE_CHECK(algorithms); | ||||
| #include <Grid/algorithms/SparseMatrix.h> | ||||
| #include <Grid/algorithms/LinearOperator.h> | ||||
| @@ -44,7 +47,10 @@ NAMESPACE_CHECK(SparseMatrix); | ||||
| #include <Grid/algorithms/approx/RemezGeneral.h> | ||||
| #include <Grid/algorithms/approx/ZMobius.h> | ||||
| NAMESPACE_CHECK(approx); | ||||
| #include <Grid/algorithms/iterative/Deflation.h> | ||||
| #include <Grid/algorithms/deflation/Deflation.h> | ||||
| #include <Grid/algorithms/deflation/MultiRHSBlockProject.h> | ||||
| #include <Grid/algorithms/deflation/MultiRHSDeflation.h> | ||||
| NAMESPACE_CHECK(deflation); | ||||
| #include <Grid/algorithms/iterative/ConjugateGradient.h> | ||||
| NAMESPACE_CHECK(ConjGrad); | ||||
| #include <Grid/algorithms/iterative/BiCGSTAB.h> | ||||
| @@ -67,10 +73,11 @@ NAMESPACE_CHECK(BiCGSTAB); | ||||
| #include <Grid/algorithms/iterative/MixedPrecisionFlexibleGeneralisedMinimalResidual.h> | ||||
| #include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h> | ||||
| #include <Grid/algorithms/iterative/PowerMethod.h> | ||||
|  | ||||
| #include <Grid/algorithms/iterative/AdefGeneric.h> | ||||
| #include <Grid/algorithms/iterative/AdefMrhs.h> | ||||
| NAMESPACE_CHECK(PowerMethod); | ||||
| #include <Grid/algorithms/CoarsenedMatrix.h> | ||||
| NAMESPACE_CHECK(CoarsendMatrix); | ||||
| #include <Grid/algorithms/multigrid/MultiGrid.h> | ||||
| NAMESPACE_CHECK(multigrid); | ||||
| #include <Grid/algorithms/FFT.h> | ||||
|  | ||||
| #endif | ||||
|   | ||||
| @@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| #define _GRID_FFT_H_ | ||||
|  | ||||
| #ifdef HAVE_FFTW | ||||
| #ifdef USE_MKL | ||||
| #if defined(USE_MKL) || defined(GRID_SYCL) | ||||
| #include <fftw/fftw3.h> | ||||
| #else | ||||
| #include <fftw3.h> | ||||
|   | ||||
| @@ -145,6 +145,44 @@ public: | ||||
|   } | ||||
| }; | ||||
|  | ||||
| //////////////////////////////////////////////////////////////////// | ||||
| // Create a shifted HermOp | ||||
| //////////////////////////////////////////////////////////////////// | ||||
| template<class Field> | ||||
| class ShiftedHermOpLinearOperator : public LinearOperatorBase<Field> { | ||||
|   LinearOperatorBase<Field> &_Mat; | ||||
|   RealD _shift; | ||||
| public: | ||||
|   ShiftedHermOpLinearOperator(LinearOperatorBase<Field> &Mat,RealD shift): _Mat(Mat), _shift(shift){}; | ||||
|   // Support for coarsening to a multigrid | ||||
|   void OpDiag (const Field &in, Field &out) { | ||||
|     assert(0); | ||||
|   } | ||||
|   void OpDir  (const Field &in, Field &out,int dir,int disp) { | ||||
|     assert(0); | ||||
|   } | ||||
|   void OpDirAll  (const Field &in, std::vector<Field> &out){ | ||||
|     assert(0); | ||||
|   }; | ||||
|   void Op     (const Field &in, Field &out){ | ||||
|     HermOp(in,out); | ||||
|   } | ||||
|   void AdjOp     (const Field &in, Field &out){ | ||||
|     HermOp(in,out); | ||||
|   } | ||||
|   void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ | ||||
|     HermOp(in,out); | ||||
|     ComplexD dot = innerProduct(in,out); | ||||
|     n1=real(dot); | ||||
|     n2=norm2(out); | ||||
|   } | ||||
|   void HermOp(const Field &in, Field &out){ | ||||
|     _Mat.HermOp(in,out); | ||||
|     out = out + _shift*in; | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| //////////////////////////////////////////////////////////////////// | ||||
| // Wrap an already herm matrix | ||||
| //////////////////////////////////////////////////////////////////// | ||||
|   | ||||
| @@ -90,9 +90,8 @@ public: | ||||
|     order=_order; | ||||
|        | ||||
|     if(order < 2) exit(-1); | ||||
|     Coeffs.resize(order); | ||||
|     Coeffs.assign(0.,order); | ||||
|     Coeffs[order-1] = 1.; | ||||
|     Coeffs.resize(order,0.0); | ||||
|     Coeffs[order-1] = 1.0; | ||||
|   }; | ||||
|    | ||||
|   // PB - more efficient low pass drops high modes above the low as 1/x uses all Chebyshev's. | ||||
|   | ||||
| @@ -40,7 +40,7 @@ public: | ||||
|   RealD norm; | ||||
|   RealD lo,hi; | ||||
|  | ||||
|   MultiShiftFunction(int n,RealD _lo,RealD _hi): poles(n), residues(n), lo(_lo), hi(_hi) {;}; | ||||
|   MultiShiftFunction(int n,RealD _lo,RealD _hi): poles(n), residues(n), tolerances(n), lo(_lo), hi(_hi) {;}; | ||||
|   RealD approx(RealD x); | ||||
|   void csv(std::ostream &out); | ||||
|   void gnuplot(std::ostream &out); | ||||
|   | ||||
| @@ -42,6 +42,7 @@ Author: Peter Boyle <pboyle@bnl.gov> | ||||
| #ifdef GRID_ONE_MKL | ||||
| #include <oneapi/mkl.hpp> | ||||
| #endif | ||||
|  | ||||
| ///////////////////////////////////////////////////////////////////////	   | ||||
| // Need to rearrange lattice data to be in the right format for a | ||||
| // batched multiply. Might as well make these static, dense packed | ||||
| @@ -633,7 +634,6 @@ public: | ||||
|     deviceVector<ComplexD> beta_p(1); | ||||
|     acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(ComplexD)); | ||||
|     acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(ComplexD)); | ||||
|  | ||||
|     //    std::cout << "blasZgemmStridedBatched mnk  "<<m<<","<<n<<","<<k<<" count "<<batchCount<<std::endl; | ||||
|     //    std::cout << "blasZgemmStridedBatched ld   "<<lda<<","<<ldb<<","<<ldc<<std::endl; | ||||
|     //    std::cout << "blasZgemmStridedBatched sd   "<<sda<<","<<sdb<<","<<sdc<<std::endl; | ||||
|   | ||||
							
								
								
									
										513
									
								
								Grid/algorithms/deflation/MultiRHSBlockProject.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										513
									
								
								Grid/algorithms/deflation/MultiRHSBlockProject.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,513 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: MultiRHSDeflation.h | ||||
|  | ||||
|     Copyright (C) 2023 | ||||
|  | ||||
| Author: Peter Boyle <pboyle@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #pragma once | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
|  | ||||
|  | ||||
| /*  | ||||
|    MultiRHS block projection | ||||
|  | ||||
|    Import basis -> nblock x nbasis x  (block x internal)  | ||||
|    Import vector of fine lattice objects -> nblock x nrhs x (block x internal)  | ||||
|  | ||||
|    => coarse_(nrhs x nbasis )^block = via batched GEMM | ||||
|  | ||||
| //template<class vobj,class CComplex,int nbasis,class VLattice> | ||||
| //inline void blockProject(Lattice<iVector<CComplex,nbasis > > &coarseData, | ||||
| //			   const VLattice &fineData, | ||||
| //			   const VLattice &Basis) | ||||
| */ | ||||
|  | ||||
| template<class Field> | ||||
| class MultiRHSBlockProject | ||||
| { | ||||
| public: | ||||
|  | ||||
|   typedef typename Field::scalar_type   scalar; | ||||
|   typedef typename Field::scalar_object scalar_object; | ||||
|   typedef Field Fermion; | ||||
|  | ||||
|   int nbasis; | ||||
|   GridBase *coarse_grid; | ||||
|   GridBase *fine_grid; | ||||
|   uint64_t block_vol; | ||||
|   uint64_t fine_vol; | ||||
|   uint64_t coarse_vol; | ||||
|   uint64_t words; | ||||
|  | ||||
|   // Row major layout "C" order: | ||||
|   // BLAS_V[coarse_vol][nbasis][block_vol][words] | ||||
|   // BLAS_F[coarse_vol][nrhs][block_vol][words] | ||||
|   // BLAS_C[coarse_vol][nrhs][nbasis] | ||||
|   /* | ||||
|    * in Fortran column major notation (cuBlas order) | ||||
|    * | ||||
|    * Vxb = [v1(x)][..][vn(x)] ... x coarse vol | ||||
|    * | ||||
|    * Fxr = [r1(x)][..][rm(x)] ... x coarse vol | ||||
|    * | ||||
|    * Block project: | ||||
|    * C_br = V^dag F x coarse vol | ||||
|    * | ||||
|    * Block promote: | ||||
|    * F_xr = Vxb Cbr x coarse_vol | ||||
|    */   | ||||
|   deviceVector<scalar> BLAS_V;      // words * block_vol * nbasis x coarse_vol  | ||||
|   deviceVector<scalar> BLAS_F;      // nrhs x fine_vol * words   -- the sources | ||||
|   deviceVector<scalar> BLAS_C;      // nrhs x coarse_vol * nbasis -- the coarse coeffs | ||||
|  | ||||
|   RealD blasNorm2(deviceVector<scalar> &blas) | ||||
|   { | ||||
|     scalar ss(0.0); | ||||
|     std::vector<scalar> tmp(blas.size()); | ||||
|     acceleratorCopyFromDevice(&blas[0],&tmp[0],blas.size()*sizeof(scalar)); | ||||
|     for(int64_t s=0;s<blas.size();s++){ | ||||
|       ss=ss+tmp[s]*adj(tmp[s]); | ||||
|     } | ||||
|     coarse_grid->GlobalSum(ss); | ||||
|     return real(ss); | ||||
|   } | ||||
|    | ||||
|   MultiRHSBlockProject(){}; | ||||
|  ~MultiRHSBlockProject(){ Deallocate(); }; | ||||
|    | ||||
|   void Deallocate(void) | ||||
|   { | ||||
|     nbasis=0; | ||||
|     coarse_grid=nullptr; | ||||
|     fine_grid=nullptr; | ||||
|     fine_vol=0; | ||||
|     block_vol=0; | ||||
|     coarse_vol=0; | ||||
|     words=0; | ||||
|     BLAS_V.resize(0); | ||||
|     BLAS_F.resize(0); | ||||
|     BLAS_C.resize(0); | ||||
|   } | ||||
|   void Allocate(int _nbasis,GridBase *_fgrid,GridBase *_cgrid) | ||||
|   { | ||||
|     nbasis=_nbasis; | ||||
|  | ||||
|     fine_grid=_fgrid; | ||||
|     coarse_grid=_cgrid; | ||||
|  | ||||
|     fine_vol   = fine_grid->lSites(); | ||||
|     coarse_vol = coarse_grid->lSites(); | ||||
|     block_vol = fine_vol/coarse_vol; | ||||
|      | ||||
|     words = sizeof(scalar_object)/sizeof(scalar); | ||||
|  | ||||
|     BLAS_V.resize (fine_vol * words * nbasis ); | ||||
|   } | ||||
|   void ImportFineGridVectors(std::vector <Field > &vecs, deviceVector<scalar> &blas) | ||||
|   { | ||||
|     int nvec = vecs.size(); | ||||
|     typedef typename Field::vector_object vobj; | ||||
|     //    std::cout << GridLogMessage <<" BlockProjector importing "<<nvec<< " fine grid vectors" <<std::endl; | ||||
|  | ||||
|     assert(vecs[0].Grid()==fine_grid); | ||||
|  | ||||
|     subdivides(coarse_grid,fine_grid); // require they map | ||||
|  | ||||
|     int _ndimension = coarse_grid->_ndimension; | ||||
|     assert(block_vol == fine_grid->oSites() / coarse_grid->oSites()); | ||||
|      | ||||
|     Coordinate  block_r      (_ndimension); | ||||
|     for(int d=0 ; d<_ndimension;d++){ | ||||
|       block_r[d] = fine_grid->_rdimensions[d] / coarse_grid->_rdimensions[d]; | ||||
|     } | ||||
|  | ||||
|     uint64_t sz = blas.size(); | ||||
|  | ||||
|     acceleratorMemSet(&blas[0],0,blas.size()*sizeof(scalar)); | ||||
|  | ||||
|     Coordinate fine_rdimensions = fine_grid->_rdimensions; | ||||
|     Coordinate coarse_rdimensions = coarse_grid->_rdimensions; | ||||
|     int64_t bv= block_vol; | ||||
|     for(int v=0;v<vecs.size();v++){ | ||||
|  | ||||
|       //      std::cout << " BlockProjector importing vector"<<v<<" "<<norm2(vecs[v])<<std::endl; | ||||
|       autoView( fineData   , vecs[v], AcceleratorRead); | ||||
|  | ||||
|       auto blasData_p  = &blas[0]; | ||||
|       auto fineData_p  = &fineData[0]; | ||||
|  | ||||
|       int64_t osites = fine_grid->oSites(); | ||||
|  | ||||
|       // loop over fine sites | ||||
|       const int Nsimd = vobj::Nsimd(); | ||||
|       //      std::cout << "sz "<<sz<<std::endl; | ||||
|       //      std::cout << "prod "<<Nsimd * coarse_grid->oSites() * block_vol * nvec * words<<std::endl; | ||||
|       assert(sz == Nsimd * coarse_grid->oSites() * block_vol * nvec * words); | ||||
|       uint64_t lwords= words; // local variable for copy in to GPU | ||||
|       accelerator_for(sf,osites,Nsimd,{ | ||||
| #ifdef GRID_SIMT | ||||
|         { | ||||
| 	  int lane=acceleratorSIMTlane(Nsimd); // buffer lane | ||||
| #else | ||||
| 	  for(int lane=0;lane<Nsimd;lane++) { | ||||
| #endif | ||||
| 	  // One thread per fine site | ||||
| 	  Coordinate coor_f(_ndimension); | ||||
| 	  Coordinate coor_b(_ndimension); | ||||
| 	  Coordinate coor_c(_ndimension); | ||||
|  | ||||
| 	  // Fine site to fine coor | ||||
| 	  Lexicographic::CoorFromIndex(coor_f,sf,fine_rdimensions); | ||||
|  | ||||
| 	  for(int d=0;d<_ndimension;d++) coor_b[d] = coor_f[d]%block_r[d]; | ||||
| 	  for(int d=0;d<_ndimension;d++) coor_c[d] = coor_f[d]/block_r[d]; | ||||
| 	   | ||||
| 	  int sc;// coarse site | ||||
| 	  int sb;// block site | ||||
| 	  Lexicographic::IndexFromCoor(coor_c,sc,coarse_rdimensions); | ||||
| 	  Lexicographic::IndexFromCoor(coor_b,sb,block_r); | ||||
|  | ||||
|           scalar_object data = extractLane(lane,fineData[sf]); | ||||
|  | ||||
| 	  // BLAS layout address calculation | ||||
| 	  // words * block_vol * nbasis x coarse_vol | ||||
| 	  // coarse oSite x block vole x lanes | ||||
| 	  int64_t site = (lane*osites + sc*bv)*nvec | ||||
|    	               + v*bv | ||||
| 	               + sb; | ||||
|  | ||||
| 	  //	  assert(site*lwords<sz); | ||||
|  | ||||
| 	  scalar_object * ptr = (scalar_object *)&blasData_p[site*lwords]; | ||||
|  | ||||
| 	  *ptr = data; | ||||
| #ifdef GRID_SIMT | ||||
| 	} | ||||
| #else | ||||
| 	} | ||||
| #endif | ||||
|       }); | ||||
|       //      std::cout << " import fine Blas norm "<<blasNorm2(blas)<<std::endl; | ||||
|       //      std::cout << " BlockProjector imported vector"<<v<<std::endl; | ||||
|     } | ||||
|   } | ||||
|   void ExportFineGridVectors(std::vector <Field> &vecs, deviceVector<scalar> &blas) | ||||
|   { | ||||
|     typedef typename Field::vector_object vobj; | ||||
|  | ||||
|     int nvec = vecs.size(); | ||||
|  | ||||
|     assert(vecs[0].Grid()==fine_grid); | ||||
|  | ||||
|     subdivides(coarse_grid,fine_grid); // require they map | ||||
|  | ||||
|     int _ndimension = coarse_grid->_ndimension; | ||||
|     assert(block_vol == fine_grid->oSites() / coarse_grid->oSites()); | ||||
|      | ||||
|     Coordinate  block_r      (_ndimension); | ||||
|     for(int d=0 ; d<_ndimension;d++){ | ||||
|       block_r[d] = fine_grid->_rdimensions[d] / coarse_grid->_rdimensions[d]; | ||||
|     } | ||||
|     Coordinate fine_rdimensions = fine_grid->_rdimensions; | ||||
|     Coordinate coarse_rdimensions = coarse_grid->_rdimensions; | ||||
|  | ||||
|     //    std::cout << " export fine Blas norm "<<blasNorm2(blas)<<std::endl; | ||||
|  | ||||
|     int64_t bv= block_vol; | ||||
|     for(int v=0;v<vecs.size();v++){ | ||||
|  | ||||
|       autoView( fineData   , vecs[v], AcceleratorWrite); | ||||
|  | ||||
|       auto blasData_p  = &blas[0]; | ||||
|       auto fineData_p    = &fineData[0]; | ||||
|  | ||||
|       int64_t osites = fine_grid->oSites(); | ||||
|       uint64_t lwords = words; | ||||
|       //      std::cout << " Nsimd is "<<vobj::Nsimd() << std::endl; | ||||
|       //      std::cout << " lwords is "<<lwords << std::endl; | ||||
|       //      std::cout << " sizeof(scalar_object) is "<<sizeof(scalar_object) << std::endl; | ||||
|       // loop over fine sites | ||||
|       accelerator_for(sf,osites,vobj::Nsimd(),{ | ||||
|        | ||||
| #ifdef GRID_SIMT | ||||
|         { | ||||
| 	  int lane=acceleratorSIMTlane(vobj::Nsimd()); // buffer lane | ||||
| #else | ||||
| 	  for(int lane=0;lane<vobj::Nsimd();lane++) { | ||||
| #endif | ||||
| 	  // One thread per fine site | ||||
| 	  Coordinate coor_f(_ndimension); | ||||
| 	  Coordinate coor_b(_ndimension); | ||||
| 	  Coordinate coor_c(_ndimension); | ||||
|  | ||||
| 	  Lexicographic::CoorFromIndex(coor_f,sf,fine_rdimensions); | ||||
|  | ||||
| 	  for(int d=0;d<_ndimension;d++) coor_b[d] = coor_f[d]%block_r[d]; | ||||
| 	  for(int d=0;d<_ndimension;d++) coor_c[d] = coor_f[d]/block_r[d]; | ||||
| 	   | ||||
| 	  int sc; | ||||
| 	  int sb; | ||||
| 	  Lexicographic::IndexFromCoor(coor_c,sc,coarse_rdimensions); | ||||
| 	  Lexicographic::IndexFromCoor(coor_b,sb,block_r); | ||||
|  | ||||
| 	  // BLAS layout address calculation | ||||
| 	  // words * block_vol * nbasis x coarse_vol 	   | ||||
| 	  int64_t site = (lane*osites + sc*bv)*nvec | ||||
|    	               + v*bv | ||||
| 	               + sb; | ||||
|  | ||||
| 	  scalar_object * ptr = (scalar_object *)&blasData_p[site*lwords]; | ||||
|  | ||||
| 	  scalar_object data = *ptr; | ||||
|  | ||||
| 	  insertLane(lane,fineData[sf],data); | ||||
| #ifdef GRID_SIMT | ||||
| 	} | ||||
| #else | ||||
| 	} | ||||
| #endif | ||||
|       }); | ||||
|     } | ||||
|   } | ||||
|   template<class vobj> | ||||
|   void ImportCoarseGridVectors(std::vector <Lattice<vobj> > &vecs, deviceVector<scalar> &blas) | ||||
|   { | ||||
|     int nvec = vecs.size(); | ||||
|     typedef typename vobj::scalar_object coarse_scalar_object; | ||||
|  | ||||
|     //    std::cout << " BlockProjector importing "<<nvec<< " coarse grid vectors" <<std::endl; | ||||
|  | ||||
|     assert(vecs[0].Grid()==coarse_grid); | ||||
|  | ||||
|     int _ndimension = coarse_grid->_ndimension; | ||||
|  | ||||
|     uint64_t sz = blas.size(); | ||||
|  | ||||
|     Coordinate coarse_rdimensions = coarse_grid->_rdimensions; | ||||
|      | ||||
|     for(int v=0;v<vecs.size();v++){ | ||||
|  | ||||
|       //      std::cout << " BlockProjector importing coarse vector"<<v<<" "<<norm2(vecs[v])<<std::endl; | ||||
|       autoView( coarseData   , vecs[v], AcceleratorRead); | ||||
|  | ||||
|       auto blasData_p  = &blas[0]; | ||||
|       auto coarseData_p  = &coarseData[0]; | ||||
|  | ||||
|       int64_t osites = coarse_grid->oSites(); | ||||
|  | ||||
|       // loop over fine sites | ||||
|       const int Nsimd = vobj::Nsimd(); | ||||
|       uint64_t cwords=sizeof(typename vobj::scalar_object)/sizeof(scalar); | ||||
|       assert(cwords==nbasis); | ||||
|        | ||||
|       accelerator_for(sc,osites,Nsimd,{ | ||||
| #ifdef GRID_SIMT | ||||
|         { | ||||
| 	  int lane=acceleratorSIMTlane(Nsimd); // buffer lane | ||||
| #else | ||||
| 	  for(int lane=0;lane<Nsimd;lane++) { | ||||
| #endif | ||||
|            // C_br per site | ||||
| 	    int64_t blas_site = (lane*osites + sc)*nvec*cwords + v*cwords; | ||||
| 	     | ||||
| 	    coarse_scalar_object data = extractLane(lane,coarseData[sc]); | ||||
|  | ||||
| 	    coarse_scalar_object * ptr = (coarse_scalar_object *)&blasData_p[blas_site]; | ||||
|  | ||||
| 	    *ptr = data; | ||||
| #ifdef GRID_SIMT | ||||
| 	} | ||||
| #else | ||||
| 	} | ||||
| #endif | ||||
|       }); | ||||
|       //      std::cout << " import coarsee Blas norm "<<blasNorm2(blas)<<std::endl; | ||||
|     } | ||||
|   } | ||||
|   template<class vobj> | ||||
|   void ExportCoarseGridVectors(std::vector <Lattice<vobj> > &vecs, deviceVector<scalar> &blas) | ||||
|   { | ||||
|     int nvec = vecs.size(); | ||||
|     typedef typename vobj::scalar_object coarse_scalar_object; | ||||
|     //    std::cout << GridLogMessage<<" BlockProjector exporting "<<nvec<< " coarse grid vectors" <<std::endl; | ||||
|  | ||||
|     assert(vecs[0].Grid()==coarse_grid); | ||||
|  | ||||
|     int _ndimension = coarse_grid->_ndimension; | ||||
|      | ||||
|     uint64_t sz = blas.size(); | ||||
|  | ||||
|     Coordinate coarse_rdimensions = coarse_grid->_rdimensions; | ||||
|      | ||||
|     //    std::cout << " export coarsee Blas norm "<<blasNorm2(blas)<<std::endl; | ||||
|     for(int v=0;v<vecs.size();v++){ | ||||
|  | ||||
|       //  std::cout << " BlockProjector exporting coarse vector"<<v<<std::endl; | ||||
|       autoView( coarseData   , vecs[v], AcceleratorWrite); | ||||
|  | ||||
|       auto blasData_p  = &blas[0]; | ||||
|       auto coarseData_p  = &coarseData[0]; | ||||
|  | ||||
|       int64_t osites = coarse_grid->oSites(); | ||||
|  | ||||
|       // loop over fine sites | ||||
|       const int Nsimd = vobj::Nsimd(); | ||||
|       uint64_t cwords=sizeof(typename vobj::scalar_object)/sizeof(scalar); | ||||
|       assert(cwords==nbasis); | ||||
|        | ||||
|       accelerator_for(sc,osites,Nsimd,{ | ||||
| 	  // Wrap in a macro "FOR_ALL_LANES(lane,{ ... }); | ||||
| #ifdef GRID_SIMT | ||||
|         { | ||||
| 	  int lane=acceleratorSIMTlane(Nsimd); // buffer lane | ||||
| #else | ||||
| 	  for(int lane=0;lane<Nsimd;lane++) { | ||||
| #endif | ||||
| 	    int64_t blas_site = (lane*osites + sc)*nvec*cwords + v*cwords; | ||||
| 	    coarse_scalar_object * ptr = (coarse_scalar_object *)&blasData_p[blas_site]; | ||||
| 	    coarse_scalar_object data = *ptr; | ||||
| 	    insertLane(lane,coarseData[sc],data); | ||||
| #ifdef GRID_SIMT | ||||
| 	} | ||||
| #else | ||||
| 	} | ||||
| #endif | ||||
|       }); | ||||
|     } | ||||
|   } | ||||
|   void ImportBasis(std::vector < Field > &vecs) | ||||
|   { | ||||
|     //    std::cout << " BlockProjector Import basis size "<<vecs.size()<<std::endl; | ||||
|     ImportFineGridVectors(vecs,BLAS_V); | ||||
|   } | ||||
|  | ||||
|   template<class cobj> | ||||
|   void blockProject(std::vector<Field> &fine,std::vector< Lattice<cobj> > & coarse) | ||||
|   { | ||||
|     int nrhs=fine.size(); | ||||
|     int _nbasis = sizeof(typename cobj::scalar_object)/sizeof(scalar); | ||||
|     //    std::cout << "blockProject nbasis " <<nbasis<<" " << _nbasis<<std::endl; | ||||
|     assert(nbasis==_nbasis); | ||||
|      | ||||
|     BLAS_F.resize (fine_vol * words * nrhs ); | ||||
|     BLAS_C.resize (coarse_vol * nbasis * nrhs ); | ||||
|  | ||||
|     ///////////////////////////////////////////// | ||||
|     // Copy in the multi-rhs sources to same data layout | ||||
|     ///////////////////////////////////////////// | ||||
|     //    std::cout << "BlockProject import fine"<<std::endl; | ||||
|     ImportFineGridVectors(fine,BLAS_F); | ||||
|      | ||||
|     deviceVector<scalar *> Vd(coarse_vol); | ||||
|     deviceVector<scalar *> Fd(coarse_vol); | ||||
|     deviceVector<scalar *> Cd(coarse_vol); | ||||
|  | ||||
|     //    std::cout << "BlockProject pointers"<<std::endl; | ||||
|     for(int c=0;c<coarse_vol;c++){ | ||||
|       // BLAS_V[coarse_vol][nbasis][block_vol][words] | ||||
|       // BLAS_F[coarse_vol][nrhs][block_vol][words] | ||||
|       // BLAS_C[coarse_vol][nrhs][nbasis] | ||||
|       scalar * Vh = & BLAS_V[c*nbasis*block_vol*words]; | ||||
|       scalar * Fh = & BLAS_F[c*nrhs*block_vol*words]; | ||||
|       scalar * Ch = & BLAS_C[c*nrhs*nbasis]; | ||||
|  | ||||
|       acceleratorPut(Vd[c],Vh); | ||||
|       acceleratorPut(Fd[c],Fh); | ||||
|       acceleratorPut(Cd[c],Ch); | ||||
|     } | ||||
|  | ||||
|     GridBLAS BLAS; | ||||
|  | ||||
|     //    std::cout << "BlockProject BLAS"<<std::endl; | ||||
|     int64_t vw = block_vol * words; | ||||
|     ///////////////////////////////////////// | ||||
|     // C_br = V^dag R | ||||
|     ///////////////////////////////////////// | ||||
|     BLAS.gemmBatched(GridBLAS_OP_C,GridBLAS_OP_N,  | ||||
|     		     nbasis,nrhs,vw, | ||||
| 		     ComplexD(1.0), | ||||
| 		     Vd, | ||||
| 		     Fd, | ||||
| 		     ComplexD(0.0),  // wipe out C | ||||
| 		     Cd); | ||||
|     BLAS.synchronise(); | ||||
|     //    std::cout << "BlockProject done"<<std::endl; | ||||
|     ExportCoarseGridVectors(coarse, BLAS_C); | ||||
|     //    std::cout << "BlockProject done"<<std::endl; | ||||
|  | ||||
|   } | ||||
|  | ||||
|   template<class cobj> | ||||
|   void blockPromote(std::vector<Field> &fine,std::vector<Lattice<cobj> > & coarse) | ||||
|   { | ||||
|     int nrhs=fine.size(); | ||||
|     int _nbasis = sizeof(typename cobj::scalar_object)/sizeof(scalar); | ||||
|     assert(nbasis==_nbasis); | ||||
|      | ||||
|     BLAS_F.resize (fine_vol * words * nrhs ); | ||||
|     BLAS_C.resize (coarse_vol * nbasis * nrhs ); | ||||
|  | ||||
|     ImportCoarseGridVectors(coarse, BLAS_C); | ||||
|  | ||||
|     GridBLAS BLAS; | ||||
|  | ||||
|     deviceVector<scalar *> Vd(coarse_vol); | ||||
|     deviceVector<scalar *> Fd(coarse_vol); | ||||
|     deviceVector<scalar *> Cd(coarse_vol); | ||||
|  | ||||
|     for(int c=0;c<coarse_vol;c++){ | ||||
|       // BLAS_V[coarse_vol][nbasis][block_vol][words] | ||||
|       // BLAS_F[coarse_vol][nrhs][block_vol][words] | ||||
|       // BLAS_C[coarse_vol][nrhs][nbasis] | ||||
|       scalar * Vh = & BLAS_V[c*nbasis*block_vol*words]; | ||||
|       scalar * Fh = & BLAS_F[c*nrhs*block_vol*words]; | ||||
|       scalar * Ch = & BLAS_C[c*nrhs*nbasis]; | ||||
|       acceleratorPut(Vd[c],Vh); | ||||
|       acceleratorPut(Fd[c],Fh); | ||||
|       acceleratorPut(Cd[c],Ch); | ||||
|     } | ||||
|  | ||||
|     ///////////////////////////////////////// | ||||
|     // Block promote: | ||||
|     // F_xr = Vxb Cbr (x coarse_vol) | ||||
|     ///////////////////////////////////////// | ||||
|  | ||||
|     int64_t vw = block_vol * words; | ||||
|     BLAS.gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,  | ||||
|     		     vw,nrhs,nbasis, | ||||
| 		     ComplexD(1.0), | ||||
| 		     Vd, | ||||
| 		     Cd, | ||||
| 		     ComplexD(0.0),  // wipe out C | ||||
| 		     Fd); | ||||
|     BLAS.synchronise(); | ||||
|     //    std::cout << " blas call done"<<std::endl; | ||||
|      | ||||
|     ExportFineGridVectors(fine, BLAS_F); | ||||
|     //    std::cout << " exported "<<std::endl; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
							
								
								
									
										233
									
								
								Grid/algorithms/deflation/MultiRHSDeflation.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										233
									
								
								Grid/algorithms/deflation/MultiRHSDeflation.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,233 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: MultiRHSDeflation.h | ||||
|  | ||||
|     Copyright (C) 2023 | ||||
|  | ||||
| Author: Peter Boyle <pboyle@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #pragma once | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
|  | ||||
|  | ||||
| /* Need helper object for BLAS accelerated mrhs projection | ||||
|  | ||||
|    i) MultiRHS Deflation | ||||
|  | ||||
|    Import Evecs -> nev x vol x internal  | ||||
|    Import vector of Lattice objects -> nrhs x vol x internal | ||||
|    => Cij (nrhs x Nev) via GEMM. | ||||
|    => Guess  (nrhs x vol x internal)  = C x evecs (via GEMM) | ||||
|    Export | ||||
|  | ||||
|     | ||||
|    ii) MultiRHS block projection | ||||
|  | ||||
|    Import basis -> nblock x nbasis x  (block x internal)  | ||||
|    Import vector of fine lattice objects -> nblock x nrhs x (block x internal)  | ||||
|  | ||||
|    => coarse_(nrhs x nbasis )^block = via batched GEMM | ||||
|  | ||||
|    iii)   Alternate interface:  | ||||
|    Import higher dim Lattice object-> vol x nrhs layout | ||||
|     | ||||
| */ | ||||
| template<class Field> | ||||
| class MultiRHSDeflation | ||||
| { | ||||
| public: | ||||
|  | ||||
|   typedef typename Field::scalar_type   scalar; | ||||
|   typedef typename Field::scalar_object scalar_object; | ||||
|  | ||||
|   int nev; | ||||
|   std::vector<RealD> eval; | ||||
|   GridBase *grid; | ||||
|   uint64_t vol; | ||||
|   uint64_t words; | ||||
|    | ||||
|   deviceVector<scalar> BLAS_E;      //  nev x vol -- the eigenbasis   (up to a 1/sqrt(lambda)) | ||||
|   deviceVector<scalar> BLAS_R;      // nrhs x vol -- the sources | ||||
|   deviceVector<scalar> BLAS_G;      // nrhs x vol -- the guess | ||||
|   deviceVector<scalar> BLAS_C;      // nrhs x nev -- the coefficients  | ||||
|    | ||||
|   MultiRHSDeflation(){}; | ||||
|   ~MultiRHSDeflation(){ Deallocate(); }; | ||||
|    | ||||
|   void Deallocate(void) | ||||
|   { | ||||
|     nev=0; | ||||
|     grid=nullptr; | ||||
|     vol=0; | ||||
|     words=0; | ||||
|     BLAS_E.resize(0); | ||||
|     BLAS_R.resize(0); | ||||
|     BLAS_C.resize(0); | ||||
|     BLAS_G.resize(0); | ||||
|   } | ||||
|   void Allocate(int _nev,GridBase *_grid) | ||||
|   { | ||||
|     nev=_nev; | ||||
|     grid=_grid; | ||||
|     vol   = grid->lSites(); | ||||
|     words = sizeof(scalar_object)/sizeof(scalar); | ||||
|     eval.resize(nev); | ||||
|     BLAS_E.resize (vol * words * nev ); | ||||
|     std::cout << GridLogMessage << " Allocate for "<<nev<<" eigenvectors and volume "<<vol<<std::endl; | ||||
|   } | ||||
|   void ImportEigenVector(Field &evec,RealD &_eval, int ev) | ||||
|   { | ||||
|     //    std::cout << " ev " <<ev<<" eval "<<_eval<< std::endl; | ||||
|     assert(ev<eval.size()); | ||||
|     eval[ev] = _eval; | ||||
|  | ||||
|     int64_t offset = ev*vol*words; | ||||
|     autoView(v,evec,AcceleratorRead); | ||||
|     acceleratorCopyDeviceToDevice(&v[0],&BLAS_E[offset],sizeof(scalar_object)*vol); | ||||
|  | ||||
|   } | ||||
|   void ImportEigenBasis(std::vector<Field> &evec,std::vector<RealD> &_eval) | ||||
|   { | ||||
|     ImportEigenBasis(evec,_eval,0,evec.size()); | ||||
|   } | ||||
|   // Could use to import a batch of eigenvectors | ||||
|   void ImportEigenBasis(std::vector<Field> &evec,std::vector<RealD> &_eval, int _ev0, int _nev) | ||||
|   { | ||||
|     assert(_ev0+_nev<=evec.size()); | ||||
|  | ||||
|     Allocate(_nev,evec[0].Grid()); | ||||
|      | ||||
|     // Imports a sub-batch of eigenvectors, _ev0, ..., _ev0+_nev-1 | ||||
|     for(int e=0;e<nev;e++){ | ||||
|       std::cout << "Importing eigenvector "<<e<<" evalue "<<_eval[_ev0+e]<<std::endl; | ||||
|       ImportEigenVector(evec[_ev0+e],_eval[_ev0+e],e); | ||||
|     } | ||||
|   } | ||||
|   void DeflateSources(std::vector<Field> &source,std::vector<Field> & guess) | ||||
|   { | ||||
|     int nrhs = source.size(); | ||||
|     assert(source.size()==guess.size()); | ||||
|     assert(grid == guess[0].Grid()); | ||||
|     conformable(guess[0],source[0]); | ||||
|  | ||||
|     int64_t vw = vol * words; | ||||
|  | ||||
|     RealD t0 = usecond(); | ||||
|     BLAS_R.resize(nrhs * vw); // cost free if size doesn't change | ||||
|     BLAS_G.resize(nrhs * vw); // cost free if size doesn't change | ||||
|     BLAS_C.resize(nev * nrhs);// cost free if size doesn't change | ||||
|  | ||||
|     ///////////////////////////////////////////// | ||||
|     // Copy in the multi-rhs sources | ||||
|     ///////////////////////////////////////////// | ||||
|     //    for(int r=0;r<nrhs;r++){ | ||||
|     //      std::cout << " source["<<r<<"] = "<<norm2(source[r])<<std::endl; | ||||
|     //    } | ||||
|     for(int r=0;r<nrhs;r++){ | ||||
|       int64_t offset = r*vw; | ||||
|       autoView(v,source[r],AcceleratorRead); | ||||
|       acceleratorCopyDeviceToDevice(&v[0],&BLAS_R[offset],sizeof(scalar_object)*vol); | ||||
|     } | ||||
|  | ||||
|   /* | ||||
|    * in Fortran column major notation (cuBlas order) | ||||
|    * | ||||
|    * Exe = [e1(x)][..][en(x)] | ||||
|    * | ||||
|    * Rxr = [r1(x)][..][rm(x)] | ||||
|    * | ||||
|    * C_er = E^dag R | ||||
|    * C_er = C_er / lambda_e  | ||||
|    * G_xr = Exe Cer | ||||
|    */ | ||||
|     deviceVector<scalar *> Ed(1); | ||||
|     deviceVector<scalar *> Rd(1); | ||||
|     deviceVector<scalar *> Cd(1); | ||||
|     deviceVector<scalar *> Gd(1); | ||||
|  | ||||
|     scalar * Eh = & BLAS_E[0]; | ||||
|     scalar * Rh = & BLAS_R[0]; | ||||
|     scalar * Ch = & BLAS_C[0]; | ||||
|     scalar * Gh = & BLAS_G[0]; | ||||
|  | ||||
|     acceleratorPut(Ed[0],Eh); | ||||
|     acceleratorPut(Rd[0],Rh); | ||||
|     acceleratorPut(Cd[0],Ch); | ||||
|     acceleratorPut(Gd[0],Gh); | ||||
|  | ||||
|     GridBLAS BLAS; | ||||
|  | ||||
|     ///////////////////////////////////////// | ||||
|     // C_er = E^dag R | ||||
|     ///////////////////////////////////////// | ||||
|     BLAS.gemmBatched(GridBLAS_OP_C,GridBLAS_OP_N,  | ||||
|     		     nev,nrhs,vw, | ||||
| 		     ComplexD(1.0), | ||||
| 		     Ed, | ||||
| 		     Rd, | ||||
| 		     ComplexD(0.0),  // wipe out C | ||||
| 		     Cd); | ||||
|     BLAS.synchronise(); | ||||
|  | ||||
|     assert(BLAS_C.size()==nev*nrhs); | ||||
|  | ||||
|     std::vector<scalar> HOST_C(BLAS_C.size());      // nrhs . nev -- the coefficients  | ||||
|     acceleratorCopyFromDevice(&BLAS_C[0],&HOST_C[0],BLAS_C.size()*sizeof(scalar)); | ||||
|     grid->GlobalSumVector(&HOST_C[0],nev*nrhs); | ||||
|     for(int e=0;e<nev;e++){ | ||||
|       RealD lam(1.0/eval[e]); | ||||
|       for(int r=0;r<nrhs;r++){ | ||||
| 	int off = e+nev*r; | ||||
| 	HOST_C[off]=HOST_C[off] * lam; | ||||
| 	//	std::cout << "C["<<e<<"]["<<r<<"] ="<<HOST_C[off]<< " eval[e] "<<eval[e] <<std::endl; | ||||
|       } | ||||
|     } | ||||
|     acceleratorCopyToDevice(&HOST_C[0],&BLAS_C[0],BLAS_C.size()*sizeof(scalar)); | ||||
|  | ||||
|      | ||||
|     ///////////////////////////////////////// | ||||
|     // Guess G_xr = Exe Cer | ||||
|     ///////////////////////////////////////// | ||||
|     BLAS.gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,  | ||||
| 		     vw,nrhs,nev, | ||||
| 		     ComplexD(1.0), | ||||
| 		     Ed, // x . nev | ||||
| 		     Cd, // nev . nrhs | ||||
| 		     ComplexD(0.0), | ||||
| 		     Gd); | ||||
|     BLAS.synchronise(); | ||||
|  | ||||
|     /////////////////////////////////////// | ||||
|     // Copy out the multirhs | ||||
|     /////////////////////////////////////// | ||||
|     for(int r=0;r<nrhs;r++){ | ||||
|       int64_t offset = r*vw; | ||||
|       autoView(v,guess[r],AcceleratorWrite); | ||||
|       acceleratorCopyDeviceToDevice(&BLAS_G[offset],&v[0],sizeof(scalar_object)*vol); | ||||
|     } | ||||
|     RealD t1 = usecond(); | ||||
|     std::cout << GridLogMessage << "MultiRHSDeflation for "<<nrhs<<" sources with "<<nev<<" eigenvectors took " << (t1-t0)/1e3 <<" ms"<<std::endl; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
| @@ -33,109 +33,111 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
|    * Script A = SolverMatrix  | ||||
|    * Script P = Preconditioner | ||||
|    * | ||||
|    * Deflation methods considered | ||||
|    *      -- Solve P A x = P b        [ like Luscher ] | ||||
|    * DEF-1        M P A x = M P b     [i.e. left precon] | ||||
|    * DEF-2        P^T M A x = P^T M b | ||||
|    * ADEF-1       Preconditioner = M P + Q      [ Q + M + M A Q] | ||||
|    * ADEF-2       Preconditioner = P^T M + Q | ||||
|    * BNN          Preconditioner = P^T M P + Q | ||||
|    * BNN2         Preconditioner = M P + P^TM +Q - M P A M  | ||||
|    *  | ||||
|    * Implement ADEF-2 | ||||
|    * | ||||
|    * Vstart = P^Tx + Qb | ||||
|    * M1 = P^TM + Q | ||||
|    * M2=M3=1 | ||||
|    * Vout = x | ||||
|    */ | ||||
| NAMESPACE_BEGIN(Grid); | ||||
|  | ||||
| // abstract base | ||||
| template<class Field, class CoarseField> | ||||
| class TwoLevelFlexiblePcg : public LinearFunction<Field> | ||||
|  | ||||
| template<class Field> | ||||
| class TwoLevelCG : public LinearFunction<Field> | ||||
| { | ||||
|  public: | ||||
|   int verbose; | ||||
|   RealD   Tolerance; | ||||
|   Integer MaxIterations; | ||||
|   const int mmax = 5; | ||||
|   GridBase *grid; | ||||
|   GridBase *coarsegrid; | ||||
|  | ||||
|   LinearOperatorBase<Field>   *_Linop | ||||
|   OperatorFunction<Field>     *_Smoother, | ||||
|   LinearFunction<CoarseField> *_CoarseSolver; | ||||
|  | ||||
|   // Need somthing that knows how to get from Coarse to fine and back again | ||||
|   // Fine operator, Smoother, CoarseSolver | ||||
|   LinearOperatorBase<Field>   &_FineLinop; | ||||
|   LinearFunction<Field>   &_Smoother; | ||||
|    | ||||
|   // more most opertor functions | ||||
|   TwoLevelFlexiblePcg(RealD tol, | ||||
| 		     Integer maxit, | ||||
| 		     LinearOperatorBase<Field> *Linop, | ||||
| 		     LinearOperatorBase<Field> *SmootherLinop, | ||||
| 		     OperatorFunction<Field>   *Smoother, | ||||
| 		     OperatorFunction<CoarseField>  CoarseLinop | ||||
| 		     ) :  | ||||
|   TwoLevelCG(RealD tol, | ||||
| 	     Integer maxit, | ||||
| 	     LinearOperatorBase<Field>   &FineLinop, | ||||
| 	     LinearFunction<Field>       &Smoother, | ||||
| 	     GridBase *fine) :  | ||||
|       Tolerance(tol),  | ||||
|       MaxIterations(maxit), | ||||
|       _Linop(Linop), | ||||
|       _PreconditionerLinop(PrecLinop), | ||||
|       _Preconditioner(Preconditioner) | ||||
|   {  | ||||
|     verbose=0; | ||||
|       _FineLinop(FineLinop), | ||||
|       _Smoother(Smoother) | ||||
|   { | ||||
|     grid       = fine; | ||||
|   }; | ||||
|  | ||||
|   // The Pcg routine is common to all, but the various matrices differ from derived  | ||||
|   // implementation to derived implmentation | ||||
|   void operator() (const Field &src, Field &psi){ | ||||
|   void operator() (const Field &src, Field &psi){ | ||||
|  | ||||
|     psi.Checkerboard() = src.Checkerboard(); | ||||
|     grid             = src.Grid(); | ||||
|  | ||||
|    | ||||
|   virtual void operator() (const Field &src, Field &x) | ||||
|   { | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg starting single RHS"<<std::endl; | ||||
|     RealD f; | ||||
|     RealD rtzp,rtz,a,d,b; | ||||
|     RealD rptzp; | ||||
|     RealD tn; | ||||
|     RealD guess = norm2(psi); | ||||
|     RealD ssq   = norm2(src); | ||||
|     RealD rsq   = ssq*Tolerance*Tolerance; | ||||
|      | ||||
|  | ||||
|     ///////////////////////////// | ||||
|     // Set up history vectors | ||||
|     ///////////////////////////// | ||||
|     std::vector<Field> p  (mmax,grid); | ||||
|     int mmax = 5; | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg allocating"<<std::endl; | ||||
|     std::vector<Field> p(mmax,grid); | ||||
|     std::vector<Field> mmp(mmax,grid); | ||||
|     std::vector<RealD> pAp(mmax); | ||||
|  | ||||
|     Field x  (grid); x = psi; | ||||
|     Field z  (grid); | ||||
|     Field z(grid); | ||||
|     Field tmp(grid); | ||||
|     Field r  (grid); | ||||
|     Field mu (grid); | ||||
|    | ||||
|     Field  mp (grid); | ||||
|     Field  r  (grid); | ||||
|     Field  mu (grid); | ||||
|      | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg allocated"<<std::endl; | ||||
|     //Initial residual computation & set up | ||||
|     RealD guess   = norm2(x); | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg guess nrm "<<guess<<std::endl; | ||||
|     RealD src_nrm = norm2(src); | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg src nrm "<<src_nrm<<std::endl; | ||||
|      | ||||
|     if ( src_nrm == 0.0 ) { | ||||
|       std::cout << GridLogMessage<<"HDCG: fPcg given trivial source norm "<<src_nrm<<std::endl; | ||||
|       x=Zero(); | ||||
|     } | ||||
|     RealD tn; | ||||
|      | ||||
|     GridStopWatch HDCGTimer; | ||||
|     HDCGTimer.Start(); | ||||
|     ////////////////////////// | ||||
|     // x0 = Vstart -- possibly modify guess | ||||
|     ////////////////////////// | ||||
|     x=src; | ||||
|     Vstart(x,src); | ||||
|  | ||||
|      | ||||
|     // r0 = b -A x0 | ||||
|     HermOp(x,mmp); // Shouldn't this be something else? | ||||
|     _FineLinop.HermOp(x,mmp[0]); | ||||
|     axpy (r, -1.0,mmp[0], src);    // Recomputes r=src-Ax0 | ||||
|     { | ||||
|       double n1 = norm2(x); | ||||
|       double n2 = norm2(mmp[0]); | ||||
|       double n3 = norm2(r); | ||||
|       std::cout<<GridLogMessage<<"x,vstart,r = "<<n1<<" "<<n2<<" "<<n3<<std::endl; | ||||
|     } | ||||
|  | ||||
|     ////////////////////////////////// | ||||
|     // Compute z = M1 x | ||||
|     ////////////////////////////////// | ||||
|     M1(r,z,tmp,mp,SmootherMirs); | ||||
|     PcgM1(r,z); | ||||
|     rtzp =real(innerProduct(r,z)); | ||||
|  | ||||
|      | ||||
|     /////////////////////////////////////// | ||||
|     // Solve for Mss mu = P A z and set p = z-mu | ||||
|     // Def2: p = 1 - Q Az = Pright z  | ||||
|     // Def2 p = 1 - Q Az = Pright z | ||||
|     // Other algos M2 is trivial | ||||
|     /////////////////////////////////////// | ||||
|     M2(z,p[0]); | ||||
|     PcgM2(z,p[0]); | ||||
|  | ||||
|     RealD ssq =  norm2(src); | ||||
|     RealD rsq =  ssq*Tolerance*Tolerance; | ||||
|  | ||||
|     std::cout << GridLogMessage<<"HDCG: k=0 residual "<<rtzp<<" rsq "<<rsq<<"\n"; | ||||
|  | ||||
|     Field pp(grid); | ||||
|  | ||||
|     for (int k=0;k<=MaxIterations;k++){ | ||||
|      | ||||
| @@ -143,31 +145,46 @@ class TwoLevelFlexiblePcg : public LinearFunction<Field> | ||||
|       int peri_kp = (k+1) % mmax; | ||||
|  | ||||
|       rtz=rtzp; | ||||
|       d= M3(p[peri_k],mp,mmp[peri_k],tmp); | ||||
|       d= PcgM3(p[peri_k],mmp[peri_k]); | ||||
|       a = rtz/d; | ||||
|      | ||||
|       // Memorise this | ||||
|       pAp[peri_k] = d; | ||||
|  | ||||
|        | ||||
|       axpy(x,a,p[peri_k],x); | ||||
|       RealD rn = axpy_norm(r,-a,mmp[peri_k],r); | ||||
|  | ||||
|       // Compute z = M x | ||||
|       M1(r,z,tmp,mp); | ||||
|  | ||||
|       PcgM1(r,z); | ||||
|        | ||||
|       { | ||||
| 	RealD n1,n2; | ||||
| 	n1=norm2(r); | ||||
| 	n2=norm2(z); | ||||
| 	std::cout << GridLogMessage<<"HDCG::fPcg iteration "<<k<<" : vector r,z "<<n1<<" "<<n2<<"\n"; | ||||
|       } | ||||
|       rtzp =real(innerProduct(r,z)); | ||||
|       std::cout << GridLogMessage<<"HDCG::fPcg iteration "<<k<<" : inner rtzp "<<rtzp<<"\n"; | ||||
|  | ||||
|       M2(z,mu); // ADEF-2 this is identity. Axpy possible to eliminate | ||||
|       //    PcgM2(z,p[0]); | ||||
|       PcgM2(z,mu); // ADEF-2 this is identity. Axpy possible to eliminate | ||||
|        | ||||
|       p[peri_kp]=mu; | ||||
|  | ||||
|       p[peri_kp]=p[peri_k]; | ||||
|  | ||||
|       // Standard search direction  p -> z + b p    ; b =  | ||||
|       // Standard search direction  p -> z + b p     | ||||
|       b = (rtzp)/rtz; | ||||
|  | ||||
|        | ||||
|       int northog; | ||||
|       // k=zero  <=> peri_kp=1;        northog = 1 | ||||
|       // k=1     <=> peri_kp=2;        northog = 2 | ||||
|       // ...               ...                  ... | ||||
|       // k=mmax-2<=> peri_kp=mmax-1;   northog = mmax-1 | ||||
|       // k=mmax-1<=> peri_kp=0;        northog = 1 | ||||
|  | ||||
|       //    northog     = (peri_kp==0)?1:peri_kp; // This is the fCG(mmax) algorithm | ||||
|       northog     = (k>mmax-1)?(mmax-1):k;        // This is the fCG-Tr(mmax-1) algorithm | ||||
|      | ||||
|       std::cout<<GridLogMessage<<"HDCG::fPcg iteration "<<k<<" : orthogonalising to last "<<northog<<" vectors\n"; | ||||
|       for(int back=0; back < northog; back++){ | ||||
| 	int peri_back = (k-back)%mmax; | ||||
| 	RealD pbApk= real(innerProduct(mmp[peri_back],p[peri_kp])); | ||||
| @@ -176,75 +193,324 @@ class TwoLevelFlexiblePcg : public LinearFunction<Field> | ||||
|       } | ||||
|  | ||||
|       RealD rrn=sqrt(rn/ssq); | ||||
|       std::cout<<GridLogMessage<<"TwoLevelfPcg: k= "<<k<<" residual = "<<rrn<<std::endl; | ||||
|       RealD rtn=sqrt(rtz/ssq); | ||||
|       RealD rtnp=sqrt(rtzp/ssq); | ||||
|  | ||||
|       std::cout<<GridLogMessage<<"HDCG: fPcg k= "<<k<<" residual = "<<rrn<<"\n"; | ||||
|  | ||||
|       // Stopping condition | ||||
|       if ( rn <= rsq ) {  | ||||
|  | ||||
| 	HermOp(x,mmp); // Shouldn't this be something else? | ||||
| 	HDCGTimer.Stop(); | ||||
| 	std::cout<<GridLogMessage<<"HDCG: fPcg converged in "<<k<<" iterations and "<<HDCGTimer.Elapsed()<<std::endl;; | ||||
| 	 | ||||
| 	_FineLinop.HermOp(x,mmp[0]);			   | ||||
| 	axpy(tmp,-1.0,src,mmp[0]); | ||||
| 	 | ||||
| 	RealD psinorm = sqrt(norm2(x)); | ||||
| 	RealD srcnorm = sqrt(norm2(src)); | ||||
| 	RealD tmpnorm = sqrt(norm2(tmp)); | ||||
| 	RealD true_residual = tmpnorm/srcnorm; | ||||
| 	std::cout<<GridLogMessage<<"TwoLevelfPcg:   true residual is "<<true_residual<<std::endl; | ||||
| 	std::cout<<GridLogMessage<<"TwoLevelfPcg: target residual was"<<Tolerance<<std::endl; | ||||
| 	return k; | ||||
| 	RealD  mmpnorm = sqrt(norm2(mmp[0])); | ||||
| 	RealD  xnorm   = sqrt(norm2(x)); | ||||
| 	RealD  srcnorm = sqrt(norm2(src)); | ||||
| 	RealD  tmpnorm = sqrt(norm2(tmp)); | ||||
| 	RealD  true_residual = tmpnorm/srcnorm; | ||||
| 	std::cout<<GridLogMessage | ||||
| 	       <<"HDCG: true residual is "<<true_residual | ||||
| 	       <<" solution "<<xnorm | ||||
| 	       <<" source "<<srcnorm | ||||
| 	       <<" mmp "<<mmpnorm	   | ||||
| 	       <<std::endl; | ||||
|        | ||||
| 	return; | ||||
|       } | ||||
|  | ||||
|     } | ||||
|     // Non-convergence | ||||
|     assert(0); | ||||
|     HDCGTimer.Stop(); | ||||
|     std::cout<<GridLogMessage<<"HDCG: not converged "<<HDCGTimer.Elapsed()<<std::endl; | ||||
|     RealD  xnorm   = sqrt(norm2(x)); | ||||
|     RealD  srcnorm = sqrt(norm2(src)); | ||||
|     std::cout<<GridLogMessage<<"HDCG: non-converged solution "<<xnorm<<" source "<<srcnorm<<std::endl; | ||||
|   } | ||||
|  | ||||
|  | ||||
|  | ||||
|   virtual void operator() (std::vector<Field> &src, std::vector<Field> &x) | ||||
|   { | ||||
|     std::cout << GridLogMessage<<"HDCG: mrhs fPcg starting"<<std::endl; | ||||
|     src[0].Grid()->Barrier(); | ||||
|     int nrhs = src.size(); | ||||
|     std::vector<RealD> f(nrhs); | ||||
|     std::vector<RealD> rtzp(nrhs); | ||||
|     std::vector<RealD> rtz(nrhs); | ||||
|     std::vector<RealD> a(nrhs); | ||||
|     std::vector<RealD> d(nrhs); | ||||
|     std::vector<RealD> b(nrhs); | ||||
|     std::vector<RealD> rptzp(nrhs); | ||||
|     ///////////////////////////// | ||||
|     // Set up history vectors | ||||
|     ///////////////////////////// | ||||
|     int mmax = 3; | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg allocating"<<std::endl; | ||||
|     src[0].Grid()->Barrier(); | ||||
|     std::vector<std::vector<Field> > p(nrhs);   for(int r=0;r<nrhs;r++)  p[r].resize(mmax,grid); | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg allocated p"<<std::endl; | ||||
|     src[0].Grid()->Barrier(); | ||||
|     std::vector<std::vector<Field> > mmp(nrhs); for(int r=0;r<nrhs;r++) mmp[r].resize(mmax,grid); | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg allocated mmp"<<std::endl; | ||||
|     src[0].Grid()->Barrier(); | ||||
|     std::vector<std::vector<RealD> > pAp(nrhs); for(int r=0;r<nrhs;r++) pAp[r].resize(mmax); | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg allocated pAp"<<std::endl; | ||||
|     src[0].Grid()->Barrier(); | ||||
|     std::vector<Field> z(nrhs,grid); | ||||
|     std::vector<Field>  mp (nrhs,grid); | ||||
|     std::vector<Field>  r  (nrhs,grid); | ||||
|     std::vector<Field>  mu (nrhs,grid); | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg allocated z,mp,r,mu"<<std::endl; | ||||
|     src[0].Grid()->Barrier(); | ||||
|  | ||||
|     //Initial residual computation & set up | ||||
|     std::vector<RealD> src_nrm(nrhs); | ||||
|     for(int rhs=0;rhs<nrhs;rhs++) { | ||||
|       src_nrm[rhs]=norm2(src[rhs]); | ||||
|       assert(src_nrm[rhs]!=0.0); | ||||
|     } | ||||
|     std::vector<RealD> tn(nrhs); | ||||
|  | ||||
|     GridStopWatch HDCGTimer; | ||||
|     HDCGTimer.Start(); | ||||
|     ////////////////////////// | ||||
|     // x0 = Vstart -- possibly modify guess | ||||
|     ////////////////////////// | ||||
|     Vstart(x,src); | ||||
|  | ||||
|     for(int rhs=0;rhs<nrhs;rhs++){ | ||||
|       // r0 = b -A x0 | ||||
|       _FineLinop.HermOp(x[rhs],mmp[rhs][0]); | ||||
|       axpy (r[rhs], -1.0,mmp[rhs][0], src[rhs]);    // Recomputes r=src-Ax0 | ||||
|     } | ||||
|  | ||||
|     ////////////////////////////////// | ||||
|     // Compute z = M1 x | ||||
|     ////////////////////////////////// | ||||
|     // This needs a multiRHS version for acceleration | ||||
|     PcgM1(r,z); | ||||
|  | ||||
|     std::vector<RealD> ssq(nrhs); | ||||
|     std::vector<RealD> rsq(nrhs); | ||||
|     std::vector<Field> pp(nrhs,grid); | ||||
|  | ||||
|     for(int rhs=0;rhs<nrhs;rhs++){ | ||||
|       rtzp[rhs] =real(innerProduct(r[rhs],z[rhs])); | ||||
|       p[rhs][0]=z[rhs]; | ||||
|       ssq[rhs]=norm2(src[rhs]); | ||||
|       rsq[rhs]=  ssq[rhs]*Tolerance*Tolerance; | ||||
|       std::cout << GridLogMessage<<"mrhs HDCG: "<<rhs<<" k=0 residual "<<rtzp[rhs]<<" rsq "<<rsq[rhs]<<"\n"; | ||||
|     } | ||||
|  | ||||
|     std::vector<RealD> rn(nrhs); | ||||
|     for (int k=0;k<=MaxIterations;k++){ | ||||
|      | ||||
|       int peri_k  = k % mmax; | ||||
|       int peri_kp = (k+1) % mmax; | ||||
|  | ||||
|       for(int rhs=0;rhs<nrhs;rhs++){ | ||||
| 	rtz[rhs]=rtzp[rhs]; | ||||
| 	d[rhs]= PcgM3(p[rhs][peri_k],mmp[rhs][peri_k]); | ||||
| 	a[rhs] = rtz[rhs]/d[rhs]; | ||||
|      | ||||
| 	// Memorise this | ||||
| 	pAp[rhs][peri_k] = d[rhs]; | ||||
|  | ||||
| 	axpy(x[rhs],a[rhs],p[rhs][peri_k],x[rhs]); | ||||
| 	rn[rhs] = axpy_norm(r[rhs],-a[rhs],mmp[rhs][peri_k],r[rhs]); | ||||
|       } | ||||
|  | ||||
|       // Compute z = M x (for *all* RHS) | ||||
|       PcgM1(r,z); | ||||
|       std::cout << GridLogMessage<<"HDCG::fPcg M1 complete"<<std::endl; | ||||
|       grid->Barrier(); | ||||
|        | ||||
|       RealD max_rn=0.0; | ||||
|       for(int rhs=0;rhs<nrhs;rhs++){ | ||||
|  | ||||
| 	rtzp[rhs] =real(innerProduct(r[rhs],z[rhs])); | ||||
|  | ||||
| 	std::cout << GridLogMessage<<"HDCG::fPcg rhs"<<rhs<<" iteration "<<k<<" : inner rtzp "<<rtzp[rhs]<<"\n"; | ||||
| 	 | ||||
| 	mu[rhs]=z[rhs]; | ||||
|  | ||||
| 	p[rhs][peri_kp]=mu[rhs]; | ||||
|  | ||||
| 	// Standard search direction p == z + b p  | ||||
| 	b[rhs] = (rtzp[rhs])/rtz[rhs]; | ||||
|  | ||||
| 	int northog = (k>mmax-1)?(mmax-1):k;        // This is the fCG-Tr(mmax-1) algorithm | ||||
| 	std::cout<<GridLogMessage<<"HDCG::fPcg iteration "<<k<<" : orthogonalising to last "<<northog<<" vectors\n"; | ||||
| 	for(int back=0; back < northog; back++){ | ||||
| 	  int peri_back = (k-back)%mmax; | ||||
| 	  RealD pbApk= real(innerProduct(mmp[rhs][peri_back],p[rhs][peri_kp])); | ||||
| 	  RealD beta = -pbApk/pAp[rhs][peri_back]; | ||||
| 	  axpy(p[rhs][peri_kp],beta,p[rhs][peri_back],p[rhs][peri_kp]); | ||||
| 	} | ||||
|  | ||||
| 	RealD rrn=sqrt(rn[rhs]/ssq[rhs]); | ||||
| 	RealD rtn=sqrt(rtz[rhs]/ssq[rhs]); | ||||
| 	RealD rtnp=sqrt(rtzp[rhs]/ssq[rhs]); | ||||
| 	 | ||||
| 	std::cout<<GridLogMessage<<"HDCG: rhs "<<rhs<<"fPcg k= "<<k<<" residual = "<<rrn<<"\n"; | ||||
| 	if ( rrn > max_rn ) max_rn = rrn; | ||||
|       } | ||||
|  | ||||
|       // Stopping condition based on worst case | ||||
|       if ( max_rn <= Tolerance ) {  | ||||
|  | ||||
| 	HDCGTimer.Stop(); | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg converged in "<<k<<" iterations and "<<HDCGTimer.Elapsed()<<std::endl;; | ||||
|  | ||||
| 	for(int rhs=0;rhs<nrhs;rhs++){ | ||||
| 	  _FineLinop.HermOp(x[rhs],mmp[rhs][0]);			   | ||||
| 	  Field tmp(grid); | ||||
| 	  axpy(tmp,-1.0,src[rhs],mmp[rhs][0]); | ||||
|        | ||||
| 	  RealD  mmpnorm = sqrt(norm2(mmp[rhs][0])); | ||||
| 	  RealD  xnorm   = sqrt(norm2(x[rhs])); | ||||
| 	  RealD  srcnorm = sqrt(norm2(src[rhs])); | ||||
| 	  RealD  tmpnorm = sqrt(norm2(tmp)); | ||||
| 	  RealD  true_residual = tmpnorm/srcnorm; | ||||
| 	  std::cout<<GridLogMessage | ||||
| 		   <<"HDCG: true residual ["<<rhs<<"] is "<<true_residual | ||||
| 		   <<" solution "<<xnorm | ||||
| 		   <<" source "<<srcnorm | ||||
| 		   <<" mmp "<<mmpnorm	   | ||||
| 		   <<std::endl; | ||||
| 	} | ||||
| 	return; | ||||
|       } | ||||
|        | ||||
|     } | ||||
|     HDCGTimer.Stop(); | ||||
|     std::cout<<GridLogMessage<<"HDCG: not converged "<<HDCGTimer.Elapsed()<<std::endl; | ||||
|     for(int rhs=0;rhs<nrhs;rhs++){ | ||||
|       RealD  xnorm   = sqrt(norm2(x[rhs])); | ||||
|       RealD  srcnorm = sqrt(norm2(src[rhs])); | ||||
|       std::cout<<GridLogMessage<<"HDCG: non-converged solution "<<xnorm<<" source "<<srcnorm<<std::endl; | ||||
|     } | ||||
|   } | ||||
|    | ||||
|  | ||||
|  public: | ||||
|  | ||||
|   virtual void M(Field & in,Field & out,Field & tmp) { | ||||
|   virtual void PcgM1(std::vector<Field> & in,std::vector<Field> & out) | ||||
|   { | ||||
|     std::cout << "PcgM1 default (cheat) mrhs version"<<std::endl; | ||||
|     for(int rhs=0;rhs<in.size();rhs++){ | ||||
|       this->PcgM1(in[rhs],out[rhs]); | ||||
|     } | ||||
|   } | ||||
|   virtual void PcgM1(Field & in, Field & out)     =0; | ||||
|   virtual void Vstart(std::vector<Field> & x,std::vector<Field> & src) | ||||
|   { | ||||
|     std::cout << "Vstart default (cheat) mrhs version"<<std::endl; | ||||
|     for(int rhs=0;rhs<x.size();rhs++){ | ||||
|       this->Vstart(x[rhs],src[rhs]); | ||||
|     } | ||||
|   } | ||||
|   virtual void Vstart(Field & x,const Field & src)=0; | ||||
|  | ||||
|   virtual void PcgM2(const Field & in, Field & out) { | ||||
|     out=in; | ||||
|   } | ||||
|  | ||||
|   virtual void M1(Field & in, Field & out) {// the smoother | ||||
|   virtual RealD PcgM3(const Field & p, Field & mmp){ | ||||
|     RealD dd; | ||||
|     _FineLinop.HermOp(p,mmp); | ||||
|     ComplexD dot = innerProduct(p,mmp); | ||||
|     dd=real(dot); | ||||
|     return dd; | ||||
|   } | ||||
|  | ||||
|   ///////////////////////////////////////////////////////////////////// | ||||
|   // Only Def1 has non-trivial Vout. | ||||
|   ///////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| }; | ||||
|    | ||||
| template<class Field, class CoarseField, class Aggregation> | ||||
| class TwoLevelADEF2 : public TwoLevelCG<Field> | ||||
| { | ||||
|  public: | ||||
|   /////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Need something that knows how to get from Coarse to fine and back again | ||||
|   //  void ProjectToSubspace(CoarseVector &CoarseVec,const FineField &FineVec){ | ||||
|   //  void PromoteFromSubspace(const CoarseVector &CoarseVec,FineField &FineVec){ | ||||
|   /////////////////////////////////////////////////////////////////////////////////// | ||||
|   GridBase *coarsegrid; | ||||
|   Aggregation &_Aggregates;                     | ||||
|   LinearFunction<CoarseField> &_CoarseSolver; | ||||
|   LinearFunction<CoarseField> &_CoarseSolverPrecise; | ||||
|   /////////////////////////////////////////////////////////////////////////////////// | ||||
|    | ||||
|   // more most opertor functions | ||||
|   TwoLevelADEF2(RealD tol, | ||||
| 		Integer maxit, | ||||
| 		LinearOperatorBase<Field>    &FineLinop, | ||||
| 		LinearFunction<Field>        &Smoother, | ||||
| 		LinearFunction<CoarseField>  &CoarseSolver, | ||||
| 		LinearFunction<CoarseField>  &CoarseSolverPrecise, | ||||
| 		Aggregation &Aggregates | ||||
| 		) : | ||||
|       TwoLevelCG<Field>(tol,maxit,FineLinop,Smoother,Aggregates.FineGrid), | ||||
|       _CoarseSolver(CoarseSolver), | ||||
|       _CoarseSolverPrecise(CoarseSolverPrecise), | ||||
|       _Aggregates(Aggregates) | ||||
|   { | ||||
|     coarsegrid = Aggregates.CoarseGrid; | ||||
|   }; | ||||
|  | ||||
|   virtual void PcgM1(Field & in, Field & out) | ||||
|   { | ||||
|     GRID_TRACE("MultiGridPreconditioner "); | ||||
|     // [PTM+Q] in = [1 - Q A] M in + Q in = Min + Q [ in -A Min] | ||||
|     Field tmp(grid); | ||||
|     Field Min(grid); | ||||
|  | ||||
|     PcgM(in,Min); // Smoother call | ||||
|     Field tmp(this->grid); | ||||
|     Field Min(this->grid); | ||||
|     CoarseField PleftProj(this->coarsegrid); | ||||
|     CoarseField PleftMss_proj(this->coarsegrid); | ||||
|  | ||||
|     HermOp(Min,out); | ||||
|     GridStopWatch SmootherTimer; | ||||
|     GridStopWatch MatrixTimer; | ||||
|     SmootherTimer.Start(); | ||||
|     this->_Smoother(in,Min); | ||||
|     SmootherTimer.Stop(); | ||||
|  | ||||
|     MatrixTimer.Start(); | ||||
|     this->_FineLinop.HermOp(Min,out); | ||||
|     MatrixTimer.Stop(); | ||||
|     axpy(tmp,-1.0,out,in);          // tmp  = in - A Min | ||||
|  | ||||
|     ProjectToSubspace(tmp,PleftProj);      | ||||
|     ApplyInverse(PleftProj,PleftMss_proj); // Ass^{-1} [in - A Min]_s | ||||
|     PromoteFromSubspace(PleftMss_proj,tmp);// tmp = Q[in - A Min]   | ||||
|     GridStopWatch ProjTimer; | ||||
|     GridStopWatch CoarseTimer; | ||||
|     GridStopWatch PromTimer; | ||||
|     ProjTimer.Start(); | ||||
|     this->_Aggregates.ProjectToSubspace(PleftProj,tmp);      | ||||
|     ProjTimer.Stop(); | ||||
|     CoarseTimer.Start(); | ||||
|     this->_CoarseSolver(PleftProj,PleftMss_proj); // Ass^{-1} [in - A Min]_s | ||||
|     CoarseTimer.Stop(); | ||||
|     PromTimer.Start(); | ||||
|     this->_Aggregates.PromoteFromSubspace(PleftMss_proj,tmp);// tmp = Q[in - A Min]   | ||||
|     PromTimer.Stop(); | ||||
|     std::cout << GridLogPerformance << "PcgM1 breakdown "<<std::endl; | ||||
|     std::cout << GridLogPerformance << "\tSmoother   " << SmootherTimer.Elapsed() <<std::endl; | ||||
|     std::cout << GridLogPerformance << "\tMatrix     " << MatrixTimer.Elapsed() <<std::endl; | ||||
|     std::cout << GridLogPerformance << "\tProj       " << ProjTimer.Elapsed() <<std::endl; | ||||
|     std::cout << GridLogPerformance << "\tCoarse     " << CoarseTimer.Elapsed() <<std::endl; | ||||
|     std::cout << GridLogPerformance << "\tProm       " << PromTimer.Elapsed() <<std::endl; | ||||
|  | ||||
|     axpy(out,1.0,Min,tmp); // Min+tmp | ||||
|   } | ||||
|  | ||||
|   virtual void M2(const Field & in, Field & out) { | ||||
|     out=in; | ||||
|     // Must override for Def2 only | ||||
|     //  case PcgDef2: | ||||
|     //    Pright(in,out); | ||||
|     //    break; | ||||
|   } | ||||
|  | ||||
|   virtual RealD M3(const Field & p, Field & mmp){ | ||||
|     double d,dd; | ||||
|     HermOpAndNorm(p,mmp,d,dd); | ||||
|     return dd; | ||||
|     // Must override for Def1 only | ||||
|     //  case PcgDef1: | ||||
|     //    d=linop_d->Mprec(p,mmp,tmp,0,1);// Dag no | ||||
|     //      linop_d->Mprec(mmp,mp,tmp,1);// Dag yes | ||||
|     //    Pleft(mp,mmp); | ||||
|     //    d=real(linop_d->inner(p,mmp)); | ||||
|   } | ||||
|  | ||||
|   virtual void VstartDef2(Field & xconst Field & src){ | ||||
|     //case PcgDef2: | ||||
|     //case PcgAdef2:  | ||||
|     //case PcgAdef2f: | ||||
|     //case PcgV11f: | ||||
|   virtual void Vstart(Field & x,const Field & src) | ||||
|   { | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg Vstart "<<std::endl; | ||||
|     /////////////////////////////////// | ||||
|     // Choose x_0 such that  | ||||
|     // x_0 = guess +  (A_ss^inv) r_s = guess + Ass_inv [src -Aguess] | ||||
| @@ -256,142 +522,78 @@ class TwoLevelFlexiblePcg : public LinearFunction<Field> | ||||
|     //                   = src_s - (A guess)_s - src_s  + (A guess)_s  | ||||
|     //                   = 0  | ||||
|     /////////////////////////////////// | ||||
|     Field r(grid); | ||||
|     Field mmp(grid); | ||||
|      | ||||
|     HermOp(x,mmp); | ||||
|     axpy (r, -1.0, mmp, src);        // r_{-1} = src - A x | ||||
|     ProjectToSubspace(r,PleftProj);      | ||||
|     ApplyInverseCG(PleftProj,PleftMss_proj); // Ass^{-1} r_s | ||||
|     PromoteFromSubspace(PleftMss_proj,mmp);   | ||||
|     x=x+mmp; | ||||
|     Field r(this->grid); | ||||
|     Field mmp(this->grid); | ||||
|     CoarseField PleftProj(this->coarsegrid); | ||||
|     CoarseField PleftMss_proj(this->coarsegrid); | ||||
|  | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg Vstart projecting "<<std::endl; | ||||
|     this->_Aggregates.ProjectToSubspace(PleftProj,src);      | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg Vstart coarse solve "<<std::endl; | ||||
|     this->_CoarseSolverPrecise(PleftProj,PleftMss_proj); // Ass^{-1} r_s | ||||
|     std::cout << GridLogMessage<<"HDCG: fPcg Vstart promote "<<std::endl; | ||||
|     this->_Aggregates.PromoteFromSubspace(PleftMss_proj,x);   | ||||
|  | ||||
|   } | ||||
|  | ||||
| }; | ||||
|  | ||||
|    | ||||
| template<class Field> | ||||
| class TwoLevelADEF1defl : public TwoLevelCG<Field> | ||||
| { | ||||
| public: | ||||
|   const std::vector<Field> &evec; | ||||
|   const std::vector<RealD> &eval; | ||||
|    | ||||
|   TwoLevelADEF1defl(RealD tol, | ||||
| 		   Integer maxit, | ||||
| 		   LinearOperatorBase<Field>   &FineLinop, | ||||
| 		   LinearFunction<Field>   &Smoother, | ||||
| 		   std::vector<Field> &_evec, | ||||
| 		   std::vector<RealD> &_eval) :  | ||||
|     TwoLevelCG<Field>(tol,maxit,FineLinop,Smoother,_evec[0].Grid()), | ||||
|     evec(_evec), | ||||
|     eval(_eval) | ||||
|   {}; | ||||
|  | ||||
|   // Can just inherit existing M2 | ||||
|   // Can just inherit existing M3 | ||||
|  | ||||
|   // Simple vstart - do nothing | ||||
|   virtual void Vstart(Field & x,const Field & src){ | ||||
|     return; | ||||
|     x=src; // Could apply Q | ||||
|   }; | ||||
|  | ||||
|   // Override PcgM1 | ||||
|   virtual void PcgM1(Field & in, Field & out) | ||||
|   { | ||||
|     GRID_TRACE("EvecPreconditioner "); | ||||
|     int N=evec.size(); | ||||
|     Field Pin(this->grid); | ||||
|     Field Qin(this->grid); | ||||
|  | ||||
|     //MP  + Q = M(1-AQ) + Q = M | ||||
|     // // If we are eigenvector deflating in coarse space | ||||
|     // // Q   = Sum_i |phi_i> 1/lambda_i <phi_i| | ||||
|     // // A Q = Sum_i |phi_i> <phi_i| | ||||
|     // // M(1-AQ) = M(1-proj) + Q | ||||
|     Qin.Checkerboard()=in.Checkerboard(); | ||||
|     Qin = Zero(); | ||||
|     Pin = in; | ||||
|     for (int i=0;i<N;i++) { | ||||
|       const Field& tmp = evec[i]; | ||||
|       auto ip = TensorRemove(innerProduct(tmp,in)); | ||||
|       axpy(Qin, ip / eval[i],tmp,Qin); | ||||
|       axpy(Pin, -ip ,tmp,Pin); | ||||
|     } | ||||
|  | ||||
|     this->_Smoother(Pin,out); | ||||
|  | ||||
|     out = out + Qin; | ||||
|   } | ||||
| }; | ||||
|  | ||||
|   ///////////////////////////////////////////////////////////////////// | ||||
|   // Only Def1 has non-trivial Vout. Override in Def1 | ||||
|   ///////////////////////////////////////////////////////////////////// | ||||
|   virtual void   Vout  (Field & in, Field & out,Field & src){ | ||||
|     out = in; | ||||
|     //case PcgDef1: | ||||
|     //    //Qb + PT x | ||||
|     //    ProjectToSubspace(src,PleftProj);      | ||||
|     //    ApplyInverse(PleftProj,PleftMss_proj); // Ass^{-1} r_s | ||||
|     //    PromoteFromSubspace(PleftMss_proj,tmp);   | ||||
|     //     | ||||
|     //    Pright(in,out); | ||||
|     //     | ||||
|     //    linop_d->axpy(out,tmp,out,1.0); | ||||
|     //    break; | ||||
|   } | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Pright and Pleft are common to all implementations | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   virtual void Pright(Field & in,Field & out){ | ||||
|     // P_R  = [ 1              0 ]  | ||||
|     //        [ -Mss^-1 Msb    0 ]  | ||||
|     Field in_sbar(grid); | ||||
|  | ||||
|     ProjectToSubspace(in,PleftProj);      | ||||
|     PromoteFromSubspace(PleftProj,out);   | ||||
|     axpy(in_sbar,-1.0,out,in);       // in_sbar = in - in_s  | ||||
|  | ||||
|     HermOp(in_sbar,out); | ||||
|     ProjectToSubspace(out,PleftProj);           // Mssbar in_sbar  (project) | ||||
|  | ||||
|     ApplyInverse     (PleftProj,PleftMss_proj); // Mss^{-1} Mssbar  | ||||
|     PromoteFromSubspace(PleftMss_proj,out);     //  | ||||
|  | ||||
|     axpy(out,-1.0,out,in_sbar);     // in_sbar - Mss^{-1} Mssbar in_sbar | ||||
|   } | ||||
|   virtual void Pleft (Field & in,Field & out){ | ||||
|     // P_L  = [ 1  -Mbs Mss^-1]  | ||||
|     //        [ 0   0         ]  | ||||
|     Field in_sbar(grid); | ||||
|     Field    tmp2(grid); | ||||
|     Field    Mtmp(grid); | ||||
|  | ||||
|     ProjectToSubspace(in,PleftProj);      | ||||
|     PromoteFromSubspace(PleftProj,out);   | ||||
|     axpy(in_sbar,-1.0,out,in);      // in_sbar = in - in_s | ||||
|  | ||||
|     ApplyInverse(PleftProj,PleftMss_proj); // Mss^{-1} in_s | ||||
|     PromoteFromSubspace(PleftMss_proj,out); | ||||
|  | ||||
|     HermOp(out,Mtmp); | ||||
|  | ||||
|     ProjectToSubspace(Mtmp,PleftProj);      // Msbar s Mss^{-1} | ||||
|     PromoteFromSubspace(PleftProj,tmp2); | ||||
|  | ||||
|     axpy(out,-1.0,tmp2,Mtmp); | ||||
|     axpy(out,-1.0,out,in_sbar);     // in_sbar - Msbars Mss^{-1} in_s | ||||
|   } | ||||
| } | ||||
|  | ||||
| template<class Field> | ||||
| class TwoLevelFlexiblePcgADef2 : public TwoLevelFlexiblePcg<Field> { | ||||
|  public: | ||||
|   virtual void M(Field & in,Field & out,Field & tmp){ | ||||
|  | ||||
|   }  | ||||
|   virtual void M1(Field & in, Field & out,Field & tmp,Field & mp){ | ||||
|  | ||||
|   } | ||||
|   virtual void M2(Field & in, Field & out){ | ||||
|  | ||||
|   } | ||||
|   virtual RealD M3(Field & p, Field & mp,Field & mmp, Field & tmp){ | ||||
|  | ||||
|   } | ||||
|   virtual void Vstart(Field & in, Field & src, Field & r, Field & mp, Field & mmp, Field & tmp){ | ||||
|  | ||||
|   } | ||||
| } | ||||
| /* | ||||
| template<class Field> | ||||
| class TwoLevelFlexiblePcgAD : public TwoLevelFlexiblePcg<Field> { | ||||
|  public: | ||||
|   virtual void M(Field & in,Field & out,Field & tmp);  | ||||
|   virtual void M1(Field & in, Field & out,Field & tmp,Field & mp); | ||||
|   virtual void M2(Field & in, Field & out); | ||||
|   virtual RealD M3(Field & p, Field & mp,Field & mmp, Field & tmp); | ||||
|   virtual void Vstart(Field & in, Field & src, Field & r, Field & mp, Field & mmp, Field & tmp); | ||||
| } | ||||
|  | ||||
| template<class Field> | ||||
| class TwoLevelFlexiblePcgDef1 : public TwoLevelFlexiblePcg<Field> { | ||||
|  public: | ||||
|   virtual void M(Field & in,Field & out,Field & tmp);  | ||||
|   virtual void M1(Field & in, Field & out,Field & tmp,Field & mp); | ||||
|   virtual void M2(Field & in, Field & out); | ||||
|   virtual RealD M3(Field & p, Field & mp,Field & mmp, Field & tmp); | ||||
|   virtual void Vstart(Field & in, Field & src, Field & r, Field & mp, Field & mmp, Field & tmp); | ||||
|   virtual void   Vout  (Field & in, Field & out,Field & src,Field & tmp); | ||||
| } | ||||
|  | ||||
| template<class Field> | ||||
| class TwoLevelFlexiblePcgDef2 : public TwoLevelFlexiblePcg<Field> { | ||||
|  public: | ||||
|   virtual void M(Field & in,Field & out,Field & tmp);  | ||||
|   virtual void M1(Field & in, Field & out,Field & tmp,Field & mp); | ||||
|   virtual void M2(Field & in, Field & out); | ||||
|   virtual RealD M3(Field & p, Field & mp,Field & mmp, Field & tmp); | ||||
|   virtual void Vstart(Field & in, Field & src, Field & r, Field & mp, Field & mmp, Field & tmp); | ||||
| } | ||||
|  | ||||
| template<class Field> | ||||
| class TwoLevelFlexiblePcgV11: public TwoLevelFlexiblePcg<Field> { | ||||
|  public: | ||||
|   virtual void M(Field & in,Field & out,Field & tmp);  | ||||
|   virtual void M1(Field & in, Field & out,Field & tmp,Field & mp); | ||||
|   virtual void M2(Field & in, Field & out); | ||||
|   virtual RealD M3(Field & p, Field & mp,Field & mmp, Field & tmp); | ||||
|   virtual void Vstart(Field & in, Field & src, Field & r, Field & mp, Field & mmp, Field & tmp); | ||||
| } | ||||
| */ | ||||
| #endif | ||||
|   | ||||
							
								
								
									
										414
									
								
								Grid/algorithms/iterative/AdefMrhs.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										414
									
								
								Grid/algorithms/iterative/AdefMrhs.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,414 @@ | ||||
|     /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/algorithms/iterative/AdefGeneric.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #pragma once | ||||
|  | ||||
|  | ||||
|   /* | ||||
|    * Compared to Tang-2009:  P=Pleft. P^T = PRight Q=MssInv.  | ||||
|    * Script A = SolverMatrix  | ||||
|    * Script P = Preconditioner | ||||
|    * | ||||
|    * Implement ADEF-2 | ||||
|    * | ||||
|    * Vstart = P^Tx + Qb | ||||
|    * M1 = P^TM + Q | ||||
|    * M2=M3=1 | ||||
|    */ | ||||
| NAMESPACE_BEGIN(Grid); | ||||
|  | ||||
|  | ||||
| template<class Field> | ||||
| class TwoLevelCGmrhs | ||||
| { | ||||
|  public: | ||||
|   RealD   Tolerance; | ||||
|   Integer MaxIterations; | ||||
|   GridBase *grid; | ||||
|  | ||||
|   // Fine operator, Smoother, CoarseSolver | ||||
|   LinearOperatorBase<Field>   &_FineLinop; | ||||
|   LinearFunction<Field>   &_Smoother; | ||||
|  | ||||
|   GridStopWatch ProjectTimer; | ||||
|   GridStopWatch PromoteTimer; | ||||
|   GridStopWatch DeflateTimer; | ||||
|   GridStopWatch CoarseTimer; | ||||
|   GridStopWatch FineTimer; | ||||
|   GridStopWatch SmoothTimer; | ||||
|   GridStopWatch InsertTimer; | ||||
|  | ||||
|    | ||||
|   // more most opertor functions | ||||
|   TwoLevelCGmrhs(RealD tol, | ||||
| 		 Integer maxit, | ||||
| 		 LinearOperatorBase<Field>   &FineLinop, | ||||
| 		 LinearFunction<Field>       &Smoother, | ||||
| 		 GridBase *fine) :  | ||||
|     Tolerance(tol),  | ||||
|     MaxIterations(maxit), | ||||
|     _FineLinop(FineLinop), | ||||
|     _Smoother(Smoother) | ||||
|   { | ||||
|     grid       = fine; | ||||
|   }; | ||||
|    | ||||
|   // Vector case | ||||
|   virtual void operator() (std::vector<Field> &src, std::vector<Field> &x) | ||||
|   { | ||||
|     std::cout << GridLogMessage<<"HDCG: mrhs fPcg starting"<<std::endl; | ||||
|     src[0].Grid()->Barrier(); | ||||
|     int nrhs = src.size(); | ||||
|     std::vector<RealD> f(nrhs); | ||||
|     std::vector<RealD> rtzp(nrhs); | ||||
|     std::vector<RealD> rtz(nrhs); | ||||
|     std::vector<RealD> a(nrhs); | ||||
|     std::vector<RealD> d(nrhs); | ||||
|     std::vector<RealD> b(nrhs); | ||||
|     std::vector<RealD> rptzp(nrhs); | ||||
|     ///////////////////////////// | ||||
|     // Set up history vectors | ||||
|     ///////////////////////////// | ||||
|     int mmax = 3; | ||||
|  | ||||
|     std::vector<std::vector<Field> > p(nrhs);   for(int r=0;r<nrhs;r++)  p[r].resize(mmax,grid); | ||||
|     std::vector<std::vector<Field> > mmp(nrhs); for(int r=0;r<nrhs;r++) mmp[r].resize(mmax,grid); | ||||
|     std::vector<std::vector<RealD> > pAp(nrhs); for(int r=0;r<nrhs;r++) pAp[r].resize(mmax); | ||||
|  | ||||
|     std::vector<Field> z(nrhs,grid); | ||||
|     std::vector<Field>  mp (nrhs,grid); | ||||
|     std::vector<Field>  r  (nrhs,grid); | ||||
|     std::vector<Field>  mu (nrhs,grid); | ||||
|  | ||||
|     //Initial residual computation & set up | ||||
|     std::vector<RealD> src_nrm(nrhs); | ||||
|     for(int rhs=0;rhs<nrhs;rhs++) { | ||||
|       src_nrm[rhs]=norm2(src[rhs]); | ||||
|       assert(src_nrm[rhs]!=0.0); | ||||
|     } | ||||
|     std::vector<RealD> tn(nrhs); | ||||
|  | ||||
|     GridStopWatch HDCGTimer; | ||||
|     ////////////////////////// | ||||
|     // x0 = Vstart -- possibly modify guess | ||||
|     ////////////////////////// | ||||
|     Vstart(x,src); | ||||
|  | ||||
|     for(int rhs=0;rhs<nrhs;rhs++){ | ||||
|       // r0 = b -A x0 | ||||
|       _FineLinop.HermOp(x[rhs],mmp[rhs][0]); | ||||
|       axpy (r[rhs], -1.0,mmp[rhs][0], src[rhs]);    // Recomputes r=src-Ax0 | ||||
|     } | ||||
|  | ||||
|     ////////////////////////////////// | ||||
|     // Compute z = M1 x | ||||
|     ////////////////////////////////// | ||||
|     // This needs a multiRHS version for acceleration | ||||
|     PcgM1(r,z); | ||||
|  | ||||
|     std::vector<RealD> ssq(nrhs); | ||||
|     std::vector<RealD> rsq(nrhs); | ||||
|     std::vector<Field> pp(nrhs,grid); | ||||
|  | ||||
|     for(int rhs=0;rhs<nrhs;rhs++){ | ||||
|       rtzp[rhs] =real(innerProduct(r[rhs],z[rhs])); | ||||
|       p[rhs][0]=z[rhs]; | ||||
|       ssq[rhs]=norm2(src[rhs]); | ||||
|       rsq[rhs]=  ssq[rhs]*Tolerance*Tolerance; | ||||
|       //      std::cout << GridLogMessage<<"mrhs HDCG: "<<rhs<<" k=0 residual "<<rtzp[rhs]<<" rsq "<<rsq[rhs]<<"\n"; | ||||
|     } | ||||
|  | ||||
|     ProjectTimer.Reset(); | ||||
|     PromoteTimer.Reset(); | ||||
|     DeflateTimer.Reset(); | ||||
|     CoarseTimer.Reset(); | ||||
|     SmoothTimer.Reset(); | ||||
|     FineTimer.Reset(); | ||||
|     InsertTimer.Reset(); | ||||
|  | ||||
|     GridStopWatch M1Timer; | ||||
|     GridStopWatch M2Timer; | ||||
|     GridStopWatch M3Timer; | ||||
|     GridStopWatch LinalgTimer; | ||||
|  | ||||
|     HDCGTimer.Start(); | ||||
|  | ||||
|     std::vector<RealD> rn(nrhs); | ||||
|     for (int k=0;k<=MaxIterations;k++){ | ||||
|      | ||||
|       int peri_k  = k % mmax; | ||||
|       int peri_kp = (k+1) % mmax; | ||||
|  | ||||
|       for(int rhs=0;rhs<nrhs;rhs++){ | ||||
| 	rtz[rhs]=rtzp[rhs]; | ||||
| 	M3Timer.Start(); | ||||
| 	d[rhs]= PcgM3(p[rhs][peri_k],mmp[rhs][peri_k]); | ||||
| 	M3Timer.Stop(); | ||||
| 	a[rhs] = rtz[rhs]/d[rhs]; | ||||
|  | ||||
| 	LinalgTimer.Start(); | ||||
| 	// Memorise this | ||||
| 	pAp[rhs][peri_k] = d[rhs]; | ||||
|  | ||||
| 	axpy(x[rhs],a[rhs],p[rhs][peri_k],x[rhs]); | ||||
| 	rn[rhs] = axpy_norm(r[rhs],-a[rhs],mmp[rhs][peri_k],r[rhs]); | ||||
| 	LinalgTimer.Stop(); | ||||
|       } | ||||
|  | ||||
|       // Compute z = M x (for *all* RHS) | ||||
|       M1Timer.Start(); | ||||
|       PcgM1(r,z); | ||||
|       M1Timer.Stop(); | ||||
|        | ||||
|       RealD max_rn=0.0; | ||||
|       LinalgTimer.Start(); | ||||
|       for(int rhs=0;rhs<nrhs;rhs++){ | ||||
|  | ||||
| 	rtzp[rhs] =real(innerProduct(r[rhs],z[rhs])); | ||||
|  | ||||
| 	//	std::cout << GridLogMessage<<"HDCG::fPcg rhs"<<rhs<<" iteration "<<k<<" : inner rtzp "<<rtzp[rhs]<<"\n"; | ||||
| 	mu[rhs]=z[rhs]; | ||||
|  | ||||
| 	p[rhs][peri_kp]=mu[rhs]; | ||||
|  | ||||
| 	// Standard search direction p == z + b p  | ||||
| 	b[rhs] = (rtzp[rhs])/rtz[rhs]; | ||||
|  | ||||
| 	int northog = (k>mmax-1)?(mmax-1):k;        // This is the fCG-Tr(mmax-1) algorithm | ||||
| 	for(int back=0; back < northog; back++){ | ||||
| 	  int peri_back = (k-back)%mmax; | ||||
| 	  RealD pbApk= real(innerProduct(mmp[rhs][peri_back],p[rhs][peri_kp])); | ||||
| 	  RealD beta = -pbApk/pAp[rhs][peri_back]; | ||||
| 	  axpy(p[rhs][peri_kp],beta,p[rhs][peri_back],p[rhs][peri_kp]); | ||||
| 	} | ||||
|  | ||||
| 	RealD rrn=sqrt(rn[rhs]/ssq[rhs]); | ||||
| 	RealD rtn=sqrt(rtz[rhs]/ssq[rhs]); | ||||
| 	RealD rtnp=sqrt(rtzp[rhs]/ssq[rhs]); | ||||
| 	 | ||||
| 	std::cout<<GridLogMessage<<"HDCG:fPcg rhs "<<rhs<<" k= "<<k<<" residual = "<<rrn<<"\n"; | ||||
| 	if ( rrn > max_rn ) max_rn = rrn; | ||||
|       } | ||||
|       LinalgTimer.Stop(); | ||||
|  | ||||
|       // Stopping condition based on worst case | ||||
|       if ( max_rn <= Tolerance ) {  | ||||
|  | ||||
| 	HDCGTimer.Stop(); | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg converged in "<<k<<" iterations and "<<HDCGTimer.Elapsed()<<std::endl;; | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg : Linalg  "<<LinalgTimer.Elapsed()<<std::endl;; | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg : fine M3 "<<M3Timer.Elapsed()<<std::endl;; | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg : prec M1 "<<M1Timer.Elapsed()<<std::endl;; | ||||
| 	std::cout<<GridLogMessage<<"**** M1 breakdown:"<<std::endl; | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg : Project "<<ProjectTimer.Elapsed()<<std::endl;; | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg : Promote "<<PromoteTimer.Elapsed()<<std::endl;; | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg : Deflate "<<DeflateTimer.Elapsed()<<std::endl;; | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg : Coarse  "<<CoarseTimer.Elapsed()<<std::endl;; | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg : Fine    "<<FineTimer.Elapsed()<<std::endl;; | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg : Smooth  "<<SmoothTimer.Elapsed()<<std::endl;; | ||||
| 	std::cout<<GridLogMessage<<"HDCG: mrhs fPcg : Insert  "<<InsertTimer.Elapsed()<<std::endl;; | ||||
|  | ||||
| 	for(int rhs=0;rhs<nrhs;rhs++){ | ||||
| 	  _FineLinop.HermOp(x[rhs],mmp[rhs][0]);			   | ||||
| 	  Field tmp(grid); | ||||
| 	  axpy(tmp,-1.0,src[rhs],mmp[rhs][0]); | ||||
|        | ||||
| 	  RealD  mmpnorm = sqrt(norm2(mmp[rhs][0])); | ||||
| 	  RealD  xnorm   = sqrt(norm2(x[rhs])); | ||||
| 	  RealD  srcnorm = sqrt(norm2(src[rhs])); | ||||
| 	  RealD  tmpnorm = sqrt(norm2(tmp)); | ||||
| 	  RealD  true_residual = tmpnorm/srcnorm; | ||||
| 	  std::cout<<GridLogMessage | ||||
| 		   <<"HDCG: true residual ["<<rhs<<"] is "<<true_residual | ||||
| 		   <<" solution "<<xnorm | ||||
| 		   <<" source "<<srcnorm | ||||
| 		   <<" mmp "<<mmpnorm	   | ||||
| 		   <<std::endl; | ||||
| 	} | ||||
| 	return; | ||||
|       } | ||||
|        | ||||
|     } | ||||
|     HDCGTimer.Stop(); | ||||
|     std::cout<<GridLogMessage<<"HDCG: not converged "<<HDCGTimer.Elapsed()<<std::endl; | ||||
|     for(int rhs=0;rhs<nrhs;rhs++){ | ||||
|       RealD  xnorm   = sqrt(norm2(x[rhs])); | ||||
|       RealD  srcnorm = sqrt(norm2(src[rhs])); | ||||
|       std::cout<<GridLogMessage<<"HDCG: non-converged solution "<<xnorm<<" source "<<srcnorm<<std::endl; | ||||
|     } | ||||
|   } | ||||
|    | ||||
|  | ||||
|  public: | ||||
|  | ||||
|   virtual void PcgM1(std::vector<Field> & in,std::vector<Field> & out) = 0; | ||||
|   virtual void Vstart(std::vector<Field> & x,std::vector<Field> & src) = 0; | ||||
|   virtual void PcgM2(const Field & in, Field & out) { | ||||
|     out=in; | ||||
|   } | ||||
|  | ||||
|   virtual RealD PcgM3(const Field & p, Field & mmp){ | ||||
|     RealD dd; | ||||
|     _FineLinop.HermOp(p,mmp); | ||||
|     ComplexD dot = innerProduct(p,mmp); | ||||
|     dd=real(dot); | ||||
|     return dd; | ||||
|   } | ||||
|  | ||||
| }; | ||||
|  | ||||
| template<class Field, class CoarseField> | ||||
| class TwoLevelADEF2mrhs : public TwoLevelCGmrhs<Field> | ||||
| { | ||||
| public: | ||||
|   GridBase *coarsegrid; | ||||
|   GridBase *coarsegridmrhs; | ||||
|   LinearFunction<CoarseField> &_CoarseSolverMrhs; | ||||
|   LinearFunction<CoarseField> &_CoarseSolverPreciseMrhs; | ||||
|   MultiRHSBlockProject<Field>    &_Projector; | ||||
|   MultiRHSDeflation<CoarseField> &_Deflator; | ||||
|  | ||||
|    | ||||
|   TwoLevelADEF2mrhs(RealD tol, | ||||
| 		    Integer maxit, | ||||
| 		    LinearOperatorBase<Field>    &FineLinop, | ||||
| 		    LinearFunction<Field>        &Smoother, | ||||
| 		    LinearFunction<CoarseField>  &CoarseSolverMrhs, | ||||
| 		    LinearFunction<CoarseField>  &CoarseSolverPreciseMrhs, | ||||
| 		    MultiRHSBlockProject<Field>    &Projector, | ||||
| 		    MultiRHSDeflation<CoarseField> &Deflator, | ||||
| 		    GridBase *_coarsemrhsgrid) : | ||||
|     TwoLevelCGmrhs<Field>(tol, maxit,FineLinop,Smoother,Projector.fine_grid), | ||||
|     _CoarseSolverMrhs(CoarseSolverMrhs), | ||||
|     _CoarseSolverPreciseMrhs(CoarseSolverPreciseMrhs), | ||||
|     _Projector(Projector), | ||||
|     _Deflator(Deflator) | ||||
|   { | ||||
|     coarsegrid = Projector.coarse_grid; | ||||
|     coarsegridmrhs = _coarsemrhsgrid;// Thi could be in projector | ||||
|   }; | ||||
|  | ||||
|   // Override Vstart | ||||
|   virtual void Vstart(std::vector<Field> & x,std::vector<Field> & src) | ||||
|   { | ||||
|     int nrhs=x.size(); | ||||
|     /////////////////////////////////// | ||||
|     // Choose x_0 such that  | ||||
|     // x_0 = guess +  (A_ss^inv) r_s = guess + Ass_inv [src -Aguess] | ||||
|     //                               = [1 - Ass_inv A] Guess + Assinv src | ||||
|     //                               = P^T guess + Assinv src  | ||||
|     //                               = Vstart  [Tang notation] | ||||
|     // This gives: | ||||
|     // W^T (src - A x_0) = src_s - A guess_s - r_s | ||||
|     //                   = src_s - (A guess)_s - src_s  + (A guess)_s  | ||||
|     //                   = 0  | ||||
|     /////////////////////////////////// | ||||
|     std::vector<CoarseField> PleftProj(nrhs,this->coarsegrid); | ||||
|     std::vector<CoarseField> PleftMss_proj(nrhs,this->coarsegrid); | ||||
|     CoarseField PleftProjMrhs(this->coarsegridmrhs); | ||||
|     CoarseField PleftMss_projMrhs(this->coarsegridmrhs); | ||||
|  | ||||
|     this->_Projector.blockProject(src,PleftProj); | ||||
|     this->_Deflator.DeflateSources(PleftProj,PleftMss_proj); | ||||
|     for(int rhs=0;rhs<nrhs;rhs++) { | ||||
|       InsertSliceFast(PleftProj[rhs],PleftProjMrhs,rhs,0); | ||||
|       InsertSliceFast(PleftMss_proj[rhs],PleftMss_projMrhs,rhs,0); // the guess | ||||
|     } | ||||
|      | ||||
|     this->_CoarseSolverPreciseMrhs(PleftProjMrhs,PleftMss_projMrhs); // Ass^{-1} r_s | ||||
|  | ||||
|     for(int rhs=0;rhs<nrhs;rhs++) { | ||||
|       ExtractSliceFast(PleftMss_proj[rhs],PleftMss_projMrhs,rhs,0); | ||||
|     } | ||||
|     this->_Projector.blockPromote(x,PleftMss_proj); | ||||
|   } | ||||
|  | ||||
|   virtual void PcgM1(std::vector<Field> & in,std::vector<Field> & out){ | ||||
|  | ||||
|     int nrhs=in.size(); | ||||
|  | ||||
|     // [PTM+Q] in = [1 - Q A] M in + Q in = Min + Q [ in -A Min] | ||||
|     std::vector<Field> tmp(nrhs,this->grid); | ||||
|     std::vector<Field> Min(nrhs,this->grid); | ||||
|  | ||||
|     std::vector<CoarseField> PleftProj(nrhs,this->coarsegrid); | ||||
|     std::vector<CoarseField> PleftMss_proj(nrhs,this->coarsegrid); | ||||
|  | ||||
|     CoarseField PleftProjMrhs(this->coarsegridmrhs); | ||||
|     CoarseField PleftMss_projMrhs(this->coarsegridmrhs); | ||||
|  | ||||
|     for(int rhs=0;rhs<nrhs;rhs++) { | ||||
|  | ||||
|       this->SmoothTimer.Start(); | ||||
|       this->_Smoother(in[rhs],Min[rhs]); | ||||
|       this->SmoothTimer.Stop(); | ||||
|  | ||||
|       this->FineTimer.Start(); | ||||
|       this->_FineLinop.HermOp(Min[rhs],out[rhs]); | ||||
|  | ||||
|       axpy(tmp[rhs],-1.0,out[rhs],in[rhs]);          // resid  = in - A Min | ||||
|       this->FineTimer.Stop(); | ||||
|  | ||||
|     } | ||||
|  | ||||
|     this->ProjectTimer.Start(); | ||||
|     this->_Projector.blockProject(tmp,PleftProj); | ||||
|     this->ProjectTimer.Stop(); | ||||
|     this->DeflateTimer.Start(); | ||||
|     this->_Deflator.DeflateSources(PleftProj,PleftMss_proj); | ||||
|     this->DeflateTimer.Stop(); | ||||
|     this->InsertTimer.Start(); | ||||
|     for(int rhs=0;rhs<nrhs;rhs++) { | ||||
|       InsertSliceFast(PleftProj[rhs],PleftProjMrhs,rhs,0); | ||||
|       InsertSliceFast(PleftMss_proj[rhs],PleftMss_projMrhs,rhs,0); // the guess | ||||
|     } | ||||
|     this->InsertTimer.Stop(); | ||||
|  | ||||
|     this->CoarseTimer.Start(); | ||||
|     this->_CoarseSolverMrhs(PleftProjMrhs,PleftMss_projMrhs); // Ass^{-1} [in - A Min]_s | ||||
|     this->CoarseTimer.Stop(); | ||||
|  | ||||
|     this->InsertTimer.Start(); | ||||
|     for(int rhs=0;rhs<nrhs;rhs++) { | ||||
|       ExtractSliceFast(PleftMss_proj[rhs],PleftMss_projMrhs,rhs,0); | ||||
|     } | ||||
|     this->InsertTimer.Stop(); | ||||
|     this->PromoteTimer.Start(); | ||||
|     this->_Projector.blockPromote(tmp,PleftMss_proj);// tmp= Q[in - A Min]   | ||||
|     this->PromoteTimer.Stop(); | ||||
|     this->FineTimer.Start(); | ||||
|     for(int rhs=0;rhs<nrhs;rhs++) { | ||||
|       axpy(out[rhs],1.0,Min[rhs],tmp[rhs]); // Min+tmp | ||||
|     } | ||||
|     this->FineTimer.Stop(); | ||||
|   } | ||||
| }; | ||||
|    | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
|  | ||||
| @@ -54,11 +54,14 @@ public: | ||||
|   ConjugateGradient(RealD tol, Integer maxit, bool err_on_no_conv = true) | ||||
|     : Tolerance(tol), | ||||
|       MaxIterations(maxit), | ||||
|       ErrorOnNoConverge(err_on_no_conv){}; | ||||
|       ErrorOnNoConverge(err_on_no_conv) | ||||
|   {}; | ||||
|  | ||||
|   void operator()(LinearOperatorBase<Field> &Linop, const Field &src, Field &psi) { | ||||
|  | ||||
|     GRID_TRACE("ConjugateGradient"); | ||||
|     GridStopWatch PreambleTimer; | ||||
|     PreambleTimer.Start(); | ||||
|     psi.Checkerboard() = src.Checkerboard(); | ||||
|  | ||||
|     conformable(psi, src); | ||||
| @@ -66,22 +69,26 @@ public: | ||||
|     RealD cp, c, a, d, b, ssq, qq; | ||||
|     //RealD b_pred; | ||||
|  | ||||
|     Field p(src); | ||||
|     Field mmp(src); | ||||
|     Field r(src); | ||||
|     // Was doing copies | ||||
|     Field p(src.Grid()); | ||||
|     Field mmp(src.Grid()); | ||||
|     Field r(src.Grid()); | ||||
|  | ||||
|     // Initial residual computation & set up | ||||
|     ssq = norm2(src); | ||||
|     RealD guess = norm2(psi); | ||||
|     assert(std::isnan(guess) == 0); | ||||
|      | ||||
|     Linop.HermOpAndNorm(psi, mmp, d, b); | ||||
|      | ||||
|     r = src - mmp; | ||||
|     p = r; | ||||
|  | ||||
|     a = norm2(p); | ||||
|     if ( guess == 0.0 ) { | ||||
|       r = src; | ||||
|       p = r; | ||||
|       a = ssq; | ||||
|     } else {  | ||||
|       Linop.HermOpAndNorm(psi, mmp, d, b); | ||||
|       r = src - mmp; | ||||
|       p = r; | ||||
|       a = norm2(p); | ||||
|     } | ||||
|     cp = a; | ||||
|     ssq = norm2(src); | ||||
|  | ||||
|     // Handle trivial case of zero src | ||||
|     if (ssq == 0.){ | ||||
| @@ -111,6 +118,7 @@ public: | ||||
|     std::cout << GridLogIterative << std::setprecision(8) | ||||
|               << "ConjugateGradient: k=0 residual " << cp << " target " << rsq << std::endl; | ||||
|  | ||||
|     PreambleTimer.Stop(); | ||||
|     GridStopWatch LinalgTimer; | ||||
|     GridStopWatch InnerTimer; | ||||
|     GridStopWatch AxpyNormTimer; | ||||
| @@ -183,13 +191,14 @@ public: | ||||
| 		  << "\tTrue residual " << true_residual | ||||
| 		  << "\tTarget " << Tolerance << std::endl; | ||||
|  | ||||
|         std::cout << GridLogMessage << "Time breakdown "<<std::endl; | ||||
| 	std::cout << GridLogMessage << "\tElapsed    " << SolverTimer.Elapsed() <<std::endl; | ||||
| 	std::cout << GridLogMessage << "\tMatrix     " << MatrixTimer.Elapsed() <<std::endl; | ||||
| 	std::cout << GridLogMessage << "\tLinalg     " << LinalgTimer.Elapsed() <<std::endl; | ||||
| 	std::cout << GridLogMessage << "\tInner      " << InnerTimer.Elapsed() <<std::endl; | ||||
| 	std::cout << GridLogMessage << "\tAxpyNorm   " << AxpyNormTimer.Elapsed() <<std::endl; | ||||
| 	std::cout << GridLogMessage << "\tLinearComb " << LinearCombTimer.Elapsed() <<std::endl; | ||||
| 	//	std::cout << GridLogMessage << "\tPreamble   " << PreambleTimer.Elapsed() <<std::endl; | ||||
| 	std::cout << GridLogMessage << "\tSolver Elapsed    " << SolverTimer.Elapsed() <<std::endl; | ||||
|         std::cout << GridLogPerformance << "Time breakdown "<<std::endl; | ||||
| 	std::cout << GridLogPerformance << "\tMatrix     " << MatrixTimer.Elapsed() <<std::endl; | ||||
| 	std::cout << GridLogPerformance << "\tLinalg     " << LinalgTimer.Elapsed() <<std::endl; | ||||
| 	std::cout << GridLogPerformance << "\t\tInner      " << InnerTimer.Elapsed() <<std::endl; | ||||
| 	std::cout << GridLogPerformance << "\t\tAxpyNorm   " << AxpyNormTimer.Elapsed() <<std::endl; | ||||
| 	std::cout << GridLogPerformance << "\t\tLinearComb " << LinearCombTimer.Elapsed() <<std::endl; | ||||
|  | ||||
| 	std::cout << GridLogDebug << "\tMobius flop rate " << DwfFlops/ usecs<< " Gflops " <<std::endl; | ||||
|  | ||||
| @@ -202,12 +211,22 @@ public: | ||||
|       } | ||||
|     } | ||||
|     // Failed. Calculate true residual before giving up                                                          | ||||
|     Linop.HermOpAndNorm(psi, mmp, d, qq); | ||||
|     p = mmp - src; | ||||
|     // Linop.HermOpAndNorm(psi, mmp, d, qq); | ||||
|     //    p = mmp - src; | ||||
|     //TrueResidual = sqrt(norm2(p)/ssq); | ||||
|     //    TrueResidual = 1; | ||||
|  | ||||
|     TrueResidual = sqrt(norm2(p)/ssq); | ||||
|  | ||||
|     std::cout << GridLogMessage << "ConjugateGradient did NOT converge "<<k<<" / "<< MaxIterations<< std::endl; | ||||
|     std::cout << GridLogMessage << "ConjugateGradient did NOT converge "<<k<<" / "<< MaxIterations | ||||
|     	      <<" residual "<< std::sqrt(cp / ssq)<< std::endl; | ||||
|     SolverTimer.Stop(); | ||||
|     std::cout << GridLogMessage << "\tPreamble   " << PreambleTimer.Elapsed() <<std::endl; | ||||
|     std::cout << GridLogMessage << "\tSolver     " << SolverTimer.Elapsed() <<std::endl; | ||||
|     std::cout << GridLogMessage << "Solver breakdown "<<std::endl; | ||||
|     std::cout << GridLogMessage << "\tMatrix     " << MatrixTimer.Elapsed() <<std::endl; | ||||
|     std::cout << GridLogMessage<< "\tLinalg     " << LinalgTimer.Elapsed() <<std::endl; | ||||
|     std::cout << GridLogPerformance << "\t\tInner      " << InnerTimer.Elapsed() <<std::endl; | ||||
|     std::cout << GridLogPerformance << "\t\tAxpyNorm   " << AxpyNormTimer.Elapsed() <<std::endl; | ||||
|     std::cout << GridLogPerformance << "\t\tLinearComb " << LinearCombTimer.Elapsed() <<std::endl; | ||||
|  | ||||
|     if (ErrorOnNoConverge) assert(0); | ||||
|     IterationsToComplete = k; | ||||
|   | ||||
| @@ -144,7 +144,7 @@ public: | ||||
|     for(int s=0;s<nshift;s++){ | ||||
|       rsq[s] = cp * mresidual[s] * mresidual[s]; | ||||
|       std::cout<<GridLogMessage<<"ConjugateGradientMultiShift: shift "<<s | ||||
| 	       <<" target resid "<<rsq[s]<<std::endl; | ||||
| 	       <<" target resid^2 "<<rsq[s]<<std::endl; | ||||
|       ps[s] = src; | ||||
|     } | ||||
|     // r and p for primary | ||||
|   | ||||
							
								
								
									
										1212
									
								
								Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczosCoarse.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1212
									
								
								Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczosCoarse.h
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -79,14 +79,16 @@ template<class Field> class ImplicitlyRestartedLanczosHermOpTester  : public Imp | ||||
|     RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0); | ||||
|  | ||||
|     std::cout.precision(13); | ||||
|     std::cout<<GridLogIRL  << "[" << std::setw(3)<<j<<"] " | ||||
| 	     <<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")" | ||||
| 	     <<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv | ||||
| 	     <<std::endl; | ||||
|  | ||||
|     int conv=0; | ||||
|     if( (vv<eresid*eresid) ) conv = 1; | ||||
|  | ||||
|     std::cout<<GridLogIRL  << "[" << std::setw(3)<<j<<"] " | ||||
| 	     <<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")" | ||||
| 	     <<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv | ||||
| 	     <<" target " << eresid*eresid << " conv " <<conv | ||||
| 	     <<std::endl; | ||||
|  | ||||
|     return conv; | ||||
|   } | ||||
| }; | ||||
| @@ -457,7 +459,7 @@ until convergence | ||||
| 	    std::vector<Field>& evec, | ||||
| 	    Field& w,int Nm,int k) | ||||
|   { | ||||
|     std::cout<<GridLogIRL << "Lanczos step " <<k<<std::endl; | ||||
|     std::cout<<GridLogDebug << "Lanczos step " <<k<<std::endl; | ||||
|     const RealD tiny = 1.0e-20; | ||||
|     assert( k< Nm ); | ||||
|  | ||||
| @@ -465,7 +467,7 @@ until convergence | ||||
|  | ||||
|     Field& evec_k = evec[k]; | ||||
|  | ||||
|     _PolyOp(evec_k,w);    std::cout<<GridLogIRL << "PolyOp" <<std::endl; | ||||
|     _PolyOp(evec_k,w);    std::cout<<GridLogDebug << "PolyOp" <<std::endl; | ||||
|  | ||||
|     if(k>0) w -= lme[k-1] * evec[k-1]; | ||||
|  | ||||
| @@ -480,18 +482,18 @@ until convergence | ||||
|     lme[k] = beta; | ||||
|  | ||||
|     if ( (k>0) && ( (k % orth_period) == 0 )) { | ||||
|       std::cout<<GridLogIRL << "Orthogonalising " <<k<<std::endl; | ||||
|       std::cout<<GridLogDebug << "Orthogonalising " <<k<<std::endl; | ||||
|       orthogonalize(w,evec,k); // orthonormalise | ||||
|       std::cout<<GridLogIRL << "Orthogonalised " <<k<<std::endl; | ||||
|       std::cout<<GridLogDebug << "Orthogonalised " <<k<<std::endl; | ||||
|     } | ||||
|  | ||||
|     if(k < Nm-1) evec[k+1] = w; | ||||
|  | ||||
|     std::cout<<GridLogIRL << "alpha[" << k << "] = " << zalph << " beta[" << k << "] = "<<beta<<std::endl; | ||||
|     std::cout<<GridLogIRL << "Lanczos step alpha[" << k << "] = " << zalph << " beta[" << k << "] = "<<beta<<std::endl; | ||||
|     if ( beta < tiny )  | ||||
|       std::cout<<GridLogIRL << " beta is tiny "<<beta<<std::endl; | ||||
|  | ||||
|     std::cout<<GridLogIRL << "Lanczos step complete " <<k<<std::endl; | ||||
|     std::cout<<GridLogDebug << "Lanczos step complete " <<k<<std::endl; | ||||
|   } | ||||
|  | ||||
|   void diagonalize_Eigen(std::vector<RealD>& lmd, std::vector<RealD>& lme,  | ||||
|   | ||||
| @@ -33,7 +33,7 @@ NAMESPACE_BEGIN(Grid); | ||||
| /////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // Take a matrix and form an NE solver calling a Herm solver | ||||
| /////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| template<class Field> class NormalEquations { | ||||
| template<class Field> class NormalEquations : public LinearFunction<Field>{ | ||||
| private: | ||||
|   SparseMatrixBase<Field> & _Matrix; | ||||
|   OperatorFunction<Field> & _HermitianSolver; | ||||
| @@ -60,7 +60,7 @@ public: | ||||
|   }      | ||||
| }; | ||||
|  | ||||
| template<class Field> class HPDSolver { | ||||
| template<class Field> class HPDSolver : public LinearFunction<Field> { | ||||
| private: | ||||
|   LinearOperatorBase<Field> & _Matrix; | ||||
|   OperatorFunction<Field> & _HermitianSolver; | ||||
| @@ -78,13 +78,13 @@ public: | ||||
|   void operator() (const Field &in, Field &out){ | ||||
|   | ||||
|     _Guess(in,out); | ||||
|     _HermitianSolver(_Matrix,in,out);  // Mdag M out = Mdag in | ||||
|     _HermitianSolver(_Matrix,in,out);  //M out = in | ||||
|  | ||||
|   }      | ||||
| }; | ||||
|  | ||||
|  | ||||
| template<class Field> class MdagMSolver { | ||||
| template<class Field> class MdagMSolver : public LinearFunction<Field> { | ||||
| private: | ||||
|   SparseMatrixBase<Field> & _Matrix; | ||||
|   OperatorFunction<Field> & _HermitianSolver; | ||||
|   | ||||
| @@ -20,7 +20,7 @@ template<class Field> class PowerMethod | ||||
|     RealD evalMaxApprox = 0.0;  | ||||
|     auto src_n = src;  | ||||
|     auto tmp = src;  | ||||
|     const int _MAX_ITER_EST_ = 50;  | ||||
|     const int _MAX_ITER_EST_ = 100;  | ||||
|  | ||||
|     for (int i=0;i<_MAX_ITER_EST_;i++) {  | ||||
|        | ||||
|   | ||||
							
								
								
									
										478
									
								
								Grid/algorithms/multigrid/Aggregates.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										478
									
								
								Grid/algorithms/multigrid/Aggregates.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,478 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/algorithms/Aggregates.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk> | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||
| Author: paboyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #pragma once | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
|  | ||||
| inline RealD AggregatePowerLaw(RealD x) | ||||
| { | ||||
|   //  return std::pow(x,-4); | ||||
|   //  return std::pow(x,-3); | ||||
|   return std::pow(x,-5); | ||||
| } | ||||
|  | ||||
| template<class Fobj,class CComplex,int nbasis> | ||||
| class Aggregation { | ||||
| public: | ||||
|   constexpr int Nbasis(void) { return nbasis; }; | ||||
|    | ||||
|   typedef iVector<CComplex,nbasis >             siteVector; | ||||
|   typedef Lattice<siteVector>                 CoarseVector; | ||||
|   typedef Lattice<iMatrix<CComplex,nbasis > > CoarseMatrix; | ||||
|  | ||||
|   typedef Lattice< CComplex >   CoarseScalar; // used for inner products on fine field | ||||
|   typedef Lattice<Fobj >        FineField; | ||||
|  | ||||
|   GridBase *CoarseGrid; | ||||
|   GridBase *FineGrid; | ||||
|   std::vector<Lattice<Fobj> > subspace; | ||||
|   int checkerboard; | ||||
|   int Checkerboard(void){return checkerboard;} | ||||
|   Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid,int _checkerboard) :  | ||||
|     CoarseGrid(_CoarseGrid), | ||||
|     FineGrid(_FineGrid), | ||||
|     subspace(nbasis,_FineGrid), | ||||
|     checkerboard(_checkerboard) | ||||
|   { | ||||
|   }; | ||||
|    | ||||
|    | ||||
|   void Orthogonalise(void){ | ||||
|     CoarseScalar InnerProd(CoarseGrid);  | ||||
|     //    std::cout << GridLogMessage <<" Block Gramm-Schmidt pass 1"<<std::endl; | ||||
|     blockOrthogonalise(InnerProd,subspace); | ||||
|   }  | ||||
|   void ProjectToSubspace(CoarseVector &CoarseVec,const FineField &FineVec){ | ||||
|     blockProject(CoarseVec,FineVec,subspace); | ||||
|   } | ||||
|   void PromoteFromSubspace(const CoarseVector &CoarseVec,FineField &FineVec){ | ||||
|     FineVec.Checkerboard() = subspace[0].Checkerboard(); | ||||
|     blockPromote(CoarseVec,FineVec,subspace); | ||||
|   } | ||||
|  | ||||
|   virtual void CreateSubspaceRandom(GridParallelRNG  &RNG) { | ||||
|     int nn=nbasis; | ||||
|     RealD scale; | ||||
|     FineField noise(FineGrid); | ||||
|     for(int b=0;b<nn;b++){ | ||||
|       subspace[b] = Zero(); | ||||
|       gaussian(RNG,noise); | ||||
|       scale = std::pow(norm2(noise),-0.5);  | ||||
|       noise=noise*scale; | ||||
|       subspace[b] = noise; | ||||
|     } | ||||
|   } | ||||
|   virtual void CreateSubspace(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop,int nn=nbasis) | ||||
|   { | ||||
|  | ||||
|     RealD scale; | ||||
|  | ||||
|     ConjugateGradient<FineField> CG(1.0e-2,100,false); | ||||
|     FineField noise(FineGrid); | ||||
|     FineField Mn(FineGrid); | ||||
|  | ||||
|     for(int b=0;b<nn;b++){ | ||||
|        | ||||
|       subspace[b] = Zero(); | ||||
|       gaussian(RNG,noise); | ||||
|       scale = std::pow(norm2(noise),-0.5);  | ||||
|       noise=noise*scale; | ||||
|        | ||||
|       hermop.Op(noise,Mn); std::cout<<GridLogMessage << "noise   ["<<b<<"] <n|MdagM|n> "<<norm2(Mn)<<std::endl; | ||||
|  | ||||
|       for(int i=0;i<1;i++){ | ||||
|  | ||||
| 	CG(hermop,noise,subspace[b]); | ||||
|  | ||||
| 	noise = subspace[b]; | ||||
| 	scale = std::pow(norm2(noise),-0.5);  | ||||
| 	noise=noise*scale; | ||||
|  | ||||
|       } | ||||
|  | ||||
|       hermop.Op(noise,Mn); std::cout<<GridLogMessage << "filtered["<<b<<"] <f|MdagM|f> "<<norm2(Mn)<<std::endl; | ||||
|       subspace[b]   = noise; | ||||
|  | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // World of possibilities here. But have tried quite a lot of experiments (250+ jobs run on Summit) | ||||
|   // and this is the best I found | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
|   virtual void CreateSubspaceChebyshev(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop, | ||||
| 				       int nn, | ||||
| 				       double hi, | ||||
| 				       double lo, | ||||
| 				       int orderfilter, | ||||
| 				       int ordermin, | ||||
| 				       int orderstep, | ||||
| 				       double filterlo | ||||
| 				       ) { | ||||
|  | ||||
|     RealD scale; | ||||
|  | ||||
|     FineField noise(FineGrid); | ||||
|     FineField Mn(FineGrid); | ||||
|     FineField tmp(FineGrid); | ||||
|  | ||||
|     // New normalised noise | ||||
|     gaussian(RNG,noise); | ||||
|     scale = std::pow(norm2(noise),-0.5);  | ||||
|     noise=noise*scale; | ||||
|  | ||||
|     std::cout << GridLogMessage<<" Chebyshev subspace pass-1 : ord "<<orderfilter<<" ["<<lo<<","<<hi<<"]"<<std::endl; | ||||
|     std::cout << GridLogMessage<<" Chebyshev subspace pass-2 : nbasis"<<nn<<" min " | ||||
| 	      <<ordermin<<" step "<<orderstep | ||||
| 	      <<" lo"<<filterlo<<std::endl; | ||||
|  | ||||
|     // Initial matrix element | ||||
|     hermop.Op(noise,Mn); std::cout<<GridLogMessage << "noise <n|MdagM|n> "<<norm2(Mn)<<std::endl; | ||||
|  | ||||
|     int b =0; | ||||
|     { | ||||
|       // Filter | ||||
|       Chebyshev<FineField> Cheb(lo,hi,orderfilter); | ||||
|       Cheb(hermop,noise,Mn); | ||||
|       // normalise | ||||
|       scale = std::pow(norm2(Mn),-0.5); 	Mn=Mn*scale; | ||||
|       subspace[b]   = Mn; | ||||
|       hermop.Op(Mn,tmp);  | ||||
|       std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|       b++; | ||||
|     } | ||||
|  | ||||
|     // Generate a full sequence of Chebyshevs | ||||
|     { | ||||
|       lo=filterlo; | ||||
|       noise=Mn; | ||||
|  | ||||
|       FineField T0(FineGrid); T0 = noise;   | ||||
|       FineField T1(FineGrid);  | ||||
|       FineField T2(FineGrid); | ||||
|       FineField y(FineGrid); | ||||
|        | ||||
|       FineField *Tnm = &T0; | ||||
|       FineField *Tn  = &T1; | ||||
|       FineField *Tnp = &T2; | ||||
|  | ||||
|       // Tn=T1 = (xscale M + mscale)in | ||||
|       RealD xscale = 2.0/(hi-lo); | ||||
|       RealD mscale = -(hi+lo)/(hi-lo); | ||||
|       hermop.HermOp(T0,y); | ||||
|       T1=y*xscale+noise*mscale; | ||||
|  | ||||
|       for(int n=2;n<=ordermin+orderstep*(nn-2);n++){ | ||||
| 	 | ||||
| 	hermop.HermOp(*Tn,y); | ||||
|  | ||||
| 	autoView( y_v , y, AcceleratorWrite); | ||||
| 	autoView( Tn_v , (*Tn), AcceleratorWrite); | ||||
| 	autoView( Tnp_v , (*Tnp), AcceleratorWrite); | ||||
| 	autoView( Tnm_v , (*Tnm), AcceleratorWrite); | ||||
| 	const int Nsimd = CComplex::Nsimd(); | ||||
| 	accelerator_for(ss, FineGrid->oSites(), Nsimd, { | ||||
| 	  coalescedWrite(y_v[ss],xscale*y_v(ss)+mscale*Tn_v(ss)); | ||||
| 	  coalescedWrite(Tnp_v[ss],2.0*y_v(ss)-Tnm_v(ss)); | ||||
|         }); | ||||
|  | ||||
| 	// Possible more fine grained control is needed than a linear sweep, | ||||
| 	// but huge productivity gain if this is simple algorithm and not a tunable | ||||
| 	int m =1; | ||||
| 	if ( n>=ordermin ) m=n-ordermin; | ||||
| 	if ( (m%orderstep)==0 ) {  | ||||
| 	  Mn=*Tnp; | ||||
| 	  scale = std::pow(norm2(Mn),-0.5);         Mn=Mn*scale; | ||||
| 	  subspace[b] = Mn; | ||||
| 	  hermop.Op(Mn,tmp);  | ||||
| 	  std::cout<<GridLogMessage << n<<" filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
| 	  b++; | ||||
| 	} | ||||
|  | ||||
| 	// Cycle pointers to avoid copies | ||||
| 	FineField *swizzle = Tnm; | ||||
| 	Tnm    =Tn; | ||||
| 	Tn     =Tnp; | ||||
| 	Tnp    =swizzle; | ||||
| 	   | ||||
|       } | ||||
|     } | ||||
|     assert(b==nn); | ||||
|   } | ||||
|   virtual void CreateSubspaceChebyshev(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop, | ||||
| 				       int nn, | ||||
| 				       double hi, | ||||
| 				       double lo, | ||||
| 				       int orderfilter | ||||
| 				       ) { | ||||
|  | ||||
|     RealD scale; | ||||
|  | ||||
|     FineField noise(FineGrid); | ||||
|     FineField Mn(FineGrid); | ||||
|     FineField tmp(FineGrid); | ||||
|  | ||||
|     // New normalised noise | ||||
|     std::cout << GridLogMessage<<" Chebyshev subspace pure noise : ord "<<orderfilter<<" ["<<lo<<","<<hi<<"]"<<std::endl; | ||||
|     std::cout << GridLogMessage<<" Chebyshev subspace pure noise  : nbasis "<<nn<<std::endl; | ||||
|  | ||||
|  | ||||
|     for(int b =0;b<nbasis;b++) | ||||
|     { | ||||
|       gaussian(RNG,noise); | ||||
|       scale = std::pow(norm2(noise),-0.5);  | ||||
|       noise=noise*scale; | ||||
|  | ||||
|       // Initial matrix element | ||||
|       hermop.Op(noise,Mn); | ||||
|       if(b==0) std::cout<<GridLogMessage << "noise <n|MdagM|n> "<<norm2(Mn)<<std::endl; | ||||
|  | ||||
|       // Filter | ||||
|       Chebyshev<FineField> Cheb(lo,hi,orderfilter); | ||||
|       Cheb(hermop,noise,Mn); | ||||
|       scale = std::pow(norm2(Mn),-0.5); 	Mn=Mn*scale; | ||||
|  | ||||
|       // Refine | ||||
|       Chebyshev<FineField> PowerLaw(lo,hi,1000,AggregatePowerLaw); | ||||
|       noise = Mn; | ||||
|       PowerLaw(hermop,noise,Mn); | ||||
|       scale = std::pow(norm2(Mn),-0.5); 	Mn=Mn*scale; | ||||
|  | ||||
|       // normalise | ||||
|       subspace[b]   = Mn; | ||||
|       hermop.Op(Mn,tmp);  | ||||
|       std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|     } | ||||
|  | ||||
|   } | ||||
|  | ||||
|   virtual void CreateSubspaceChebyshevPowerLaw(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop, | ||||
| 					       int nn, | ||||
| 					       double hi, | ||||
| 					       int orderfilter | ||||
| 					       ) { | ||||
|  | ||||
|     RealD scale; | ||||
|  | ||||
|     FineField noise(FineGrid); | ||||
|     FineField Mn(FineGrid); | ||||
|     FineField tmp(FineGrid); | ||||
|  | ||||
|     // New normalised noise | ||||
|     std::cout << GridLogMessage<<" Chebyshev subspace pure noise : ord "<<orderfilter<<" [0,"<<hi<<"]"<<std::endl; | ||||
|     std::cout << GridLogMessage<<" Chebyshev subspace pure noise  : nbasis "<<nn<<std::endl; | ||||
|  | ||||
|     for(int b =0;b<nbasis;b++) | ||||
|     { | ||||
|       gaussian(RNG,noise); | ||||
|       scale = std::pow(norm2(noise),-0.5);  | ||||
|       noise=noise*scale; | ||||
|  | ||||
|       // Initial matrix element | ||||
|       hermop.Op(noise,Mn); | ||||
|       if(b==0) std::cout<<GridLogMessage << "noise <n|MdagM|n> "<<norm2(Mn)<<std::endl; | ||||
|       // Filter | ||||
|       Chebyshev<FineField> Cheb(0.0,hi,orderfilter,AggregatePowerLaw); | ||||
|       Cheb(hermop,noise,Mn); | ||||
|       // normalise | ||||
|       scale = std::pow(norm2(Mn),-0.5); 	Mn=Mn*scale; | ||||
|       subspace[b]   = Mn; | ||||
|       hermop.Op(Mn,tmp);  | ||||
|       std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|     } | ||||
|  | ||||
|   } | ||||
|   virtual void CreateSubspaceChebyshevNew(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop, | ||||
| 					  double hi | ||||
| 					  ) { | ||||
|  | ||||
|     RealD scale; | ||||
|  | ||||
|     FineField noise(FineGrid); | ||||
|     FineField Mn(FineGrid); | ||||
|     FineField tmp(FineGrid); | ||||
|  | ||||
|     // New normalised noise | ||||
|     for(int b =0;b<nbasis;b++) | ||||
|     { | ||||
|       gaussian(RNG,noise); | ||||
|       scale = std::pow(norm2(noise),-0.5);  | ||||
|       noise=noise*scale; | ||||
|  | ||||
|       // Initial matrix element | ||||
|       hermop.Op(noise,Mn); | ||||
|       if(b==0) std::cout<<GridLogMessage << "noise <n|MdagM|n> "<<norm2(Mn)<<std::endl; | ||||
|       // Filter | ||||
|       //#opt2(x) =  acheb(x,3,90,300)* acheb(x,1,90,50) * acheb(x,0.5,90,200) * acheb(x,0.05,90,400) * acheb(x,0.01,90,1500) | ||||
|       /*266 | ||||
|       Chebyshev<FineField> Cheb1(3.0,hi,300); | ||||
|       Chebyshev<FineField> Cheb2(1.0,hi,50); | ||||
|       Chebyshev<FineField> Cheb3(0.5,hi,300); | ||||
|       Chebyshev<FineField> Cheb4(0.05,hi,500); | ||||
|       Chebyshev<FineField> Cheb5(0.01,hi,2000); | ||||
|       */ | ||||
|       /* 242 */ | ||||
|       /* | ||||
|       Chebyshev<FineField> Cheb3(0.1,hi,300); | ||||
|       Chebyshev<FineField> Cheb2(0.02,hi,1000); | ||||
|       Chebyshev<FineField> Cheb1(0.003,hi,2000); | ||||
|       8? | ||||
|       */ | ||||
|       /* How many?? | ||||
|       */ | ||||
|       Chebyshev<FineField> Cheb2(0.001,hi,2500); // 169 iters on HDCG after refine | ||||
|       Chebyshev<FineField> Cheb1(0.02,hi,600); | ||||
|  | ||||
|       //      Chebyshev<FineField> Cheb2(0.001,hi,1500); | ||||
|       //      Chebyshev<FineField> Cheb1(0.02,hi,600); | ||||
|       Cheb1(hermop,noise,Mn); scale = std::pow(norm2(Mn),-0.5); 	noise=Mn*scale; | ||||
|       hermop.Op(noise,tmp); std::cout<<GridLogMessage << "Cheb1 <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|       Cheb2(hermop,noise,Mn); scale = std::pow(norm2(Mn),-0.5); 	noise=Mn*scale; | ||||
|       hermop.Op(noise,tmp); std::cout<<GridLogMessage << "Cheb2 <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|       //      Cheb3(hermop,noise,Mn); scale = std::pow(norm2(Mn),-0.5); 	noise=Mn*scale; | ||||
|       //      hermop.Op(noise,tmp); std::cout<<GridLogMessage << "Cheb3 <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|       //      Cheb4(hermop,noise,Mn); scale = std::pow(norm2(Mn),-0.5); 	noise=Mn*scale; | ||||
|       //      hermop.Op(noise,tmp); std::cout<<GridLogMessage << "Cheb4 <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|       //      Cheb5(hermop,noise,Mn); scale = std::pow(norm2(Mn),-0.5); 	noise=Mn*scale; | ||||
|       //      hermop.Op(noise,tmp); std::cout<<GridLogMessage << "Cheb5 <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|       subspace[b]   = noise; | ||||
|       hermop.Op(subspace[b],tmp);  | ||||
|       std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<< " norm " << norm2(noise)<<std::endl; | ||||
|     } | ||||
|  | ||||
|   } | ||||
|  | ||||
|   virtual void CreateSubspaceMultishift(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop, | ||||
| 					double Lo,double tol,int maxit) | ||||
|   { | ||||
|  | ||||
|     RealD scale; | ||||
|  | ||||
|     FineField noise(FineGrid); | ||||
|     FineField Mn(FineGrid); | ||||
|     FineField tmp(FineGrid); | ||||
|  | ||||
|     // New normalised noise | ||||
|     std::cout << GridLogMessage<<" Multishift subspace : Lo "<<Lo<<std::endl; | ||||
|  | ||||
|     // Filter | ||||
|     // [ 1/6(x+Lo)  - 1/2(x+2Lo) + 1/2(x+3Lo)  -1/6(x+4Lo) = Lo^3 /[ (x+1Lo)(x+2Lo)(x+3Lo)(x+4Lo) ] | ||||
|     // | ||||
|     // 1/(x+Lo)  - 1/(x+2 Lo) | ||||
|     double epsilon      = Lo/3; | ||||
|     std::vector<RealD> alpha({1.0/6.0,-1.0/2.0,1.0/2.0,-1.0/6.0}); | ||||
|     std::vector<RealD> shifts({Lo,Lo+epsilon,Lo+2*epsilon,Lo+3*epsilon}); | ||||
|     std::vector<RealD> tols({tol,tol,tol,tol}); | ||||
|     std::cout << "sizes "<<alpha.size()<<" "<<shifts.size()<<" "<<tols.size()<<std::endl; | ||||
|  | ||||
|     MultiShiftFunction msf(4,0.0,95.0); | ||||
|     std::cout << "msf constructed "<<std::endl; | ||||
|     msf.poles=shifts; | ||||
|     msf.residues=alpha; | ||||
|     msf.tolerances=tols; | ||||
|     msf.norm=0.0; | ||||
|     msf.order=alpha.size(); | ||||
|     ConjugateGradientMultiShift<FineField> MSCG(maxit,msf); | ||||
|      | ||||
|     for(int b =0;b<nbasis;b++) | ||||
|     { | ||||
|       gaussian(RNG,noise); | ||||
|       scale = std::pow(norm2(noise),-0.5);  | ||||
|       noise=noise*scale; | ||||
|  | ||||
|       // Initial matrix element | ||||
|       hermop.Op(noise,Mn); | ||||
|       if(b==0) std::cout<<GridLogMessage << "noise <n|MdagM|n> "<<norm2(Mn)<<std::endl; | ||||
|  | ||||
|       MSCG(hermop,noise,Mn); | ||||
|       scale = std::pow(norm2(Mn),-0.5); 	Mn=Mn*scale; | ||||
|       subspace[b]   = Mn; | ||||
|       hermop.Op(Mn,tmp);  | ||||
|       std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|  | ||||
|     } | ||||
|  | ||||
|   } | ||||
|   virtual void RefineSubspace(LinearOperatorBase<FineField> &hermop, | ||||
| 			      double Lo,double tol,int maxit) | ||||
|   { | ||||
|     FineField tmp(FineGrid); | ||||
|     for(int b =0;b<nbasis;b++) | ||||
|     { | ||||
|       ConjugateGradient<FineField>  CGsloppy(tol,maxit,false); | ||||
|       ShiftedHermOpLinearOperator<FineField> ShiftedFineHermOp(hermop,Lo); | ||||
|       tmp=Zero(); | ||||
|       CGsloppy(hermop,subspace[b],tmp); | ||||
|       RealD scale = std::pow(norm2(tmp),-0.5); 	tmp=tmp*scale; | ||||
|       subspace[b]=tmp; | ||||
|       hermop.Op(subspace[b],tmp); | ||||
|       std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|     } | ||||
|   } | ||||
|   virtual void RefineSubspaceHDCG(LinearOperatorBase<FineField> &hermop, | ||||
| 				  TwoLevelADEF2mrhs<FineField,CoarseVector> & theHDCG, | ||||
| 				  int nrhs) | ||||
|   { | ||||
|     std::vector<FineField> src_mrhs(nrhs,FineGrid); | ||||
|     std::vector<FineField> res_mrhs(nrhs,FineGrid); | ||||
|     FineField tmp(FineGrid); | ||||
|     for(int b =0;b<nbasis;b+=nrhs) | ||||
|     { | ||||
|       tmp = subspace[b]; | ||||
|       RealD scale = std::pow(norm2(tmp),-0.5); 	tmp=tmp*scale; | ||||
|       subspace[b] =tmp; | ||||
|       hermop.Op(subspace[b],tmp); | ||||
|       std::cout<<GridLogMessage << "before filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|  | ||||
|       for(int r=0;r<MIN(nbasis-b,nrhs);r++){ | ||||
| 	src_mrhs[r] = subspace[b+r]; | ||||
|       } | ||||
|       for(int r=0;r<nrhs;r++){ | ||||
| 	res_mrhs[r] = Zero(); | ||||
|       } | ||||
|       theHDCG(src_mrhs,res_mrhs); | ||||
|  | ||||
|       for(int r=0;r<MIN(nbasis-b,nrhs);r++){ | ||||
| 	tmp = res_mrhs[r]; | ||||
| 	RealD scale = std::pow(norm2(tmp),-0.5); tmp=tmp*scale; | ||||
| 	subspace[b+r]=tmp; | ||||
|       } | ||||
|       hermop.Op(subspace[b],tmp); | ||||
|       std::cout<<GridLogMessage << "after filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|     } | ||||
|   } | ||||
|  | ||||
|    | ||||
|    | ||||
| }; | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
| @@ -56,243 +56,6 @@ inline void blockMaskedInnerProduct(Lattice<CComplex> &CoarseInner, | ||||
|   blockSum(CoarseInner,fine_inner_msk); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| class Geometry { | ||||
| public: | ||||
|   int npoint; | ||||
|   int base; | ||||
|   std::vector<int> directions   ; | ||||
|   std::vector<int> displacements; | ||||
|   std::vector<int> points_dagger; | ||||
| 
 | ||||
|   Geometry(int _d)  { | ||||
|      | ||||
|     base = (_d==5) ? 1:0; | ||||
| 
 | ||||
|     // make coarse grid stencil for 4d , not 5d
 | ||||
|     if ( _d==5 ) _d=4; | ||||
| 
 | ||||
|     npoint = 2*_d+1; | ||||
|     directions.resize(npoint); | ||||
|     displacements.resize(npoint); | ||||
|     points_dagger.resize(npoint); | ||||
|     for(int d=0;d<_d;d++){ | ||||
|       directions[d   ] = d+base; | ||||
|       directions[d+_d] = d+base; | ||||
|       displacements[d  ] = +1; | ||||
|       displacements[d+_d]= -1; | ||||
|       points_dagger[d   ] = d+_d; | ||||
|       points_dagger[d+_d] = d; | ||||
|     } | ||||
|     directions   [2*_d]=0; | ||||
|     displacements[2*_d]=0; | ||||
|     points_dagger[2*_d]=2*_d; | ||||
|   } | ||||
| 
 | ||||
|   int point(int dir, int disp) { | ||||
|     assert(disp == -1 || disp == 0 || disp == 1); | ||||
|     assert(base+0 <= dir && dir < base+4); | ||||
| 
 | ||||
|     // directions faster index = new indexing
 | ||||
|     // 4d (base = 0):
 | ||||
|     // point 0  1  2  3  4  5  6  7  8
 | ||||
|     // dir   0  1  2  3  0  1  2  3  0
 | ||||
|     // disp +1 +1 +1 +1 -1 -1 -1 -1  0
 | ||||
|     // 5d (base = 1):
 | ||||
|     // point 0  1  2  3  4  5  6  7  8
 | ||||
|     // dir   1  2  3  4  1  2  3  4  0
 | ||||
|     // disp +1 +1 +1 +1 -1 -1 -1 -1  0
 | ||||
| 
 | ||||
|     // displacements faster index = old indexing
 | ||||
|     // 4d (base = 0):
 | ||||
|     // point 0  1  2  3  4  5  6  7  8
 | ||||
|     // dir   0  0  1  1  2  2  3  3  0
 | ||||
|     // disp +1 -1 +1 -1 +1 -1 +1 -1  0
 | ||||
|     // 5d (base = 1):
 | ||||
|     // point 0  1  2  3  4  5  6  7  8
 | ||||
|     // dir   1  1  2  2  3  3  4  4  0
 | ||||
|     // disp +1 -1 +1 -1 +1 -1 +1 -1  0
 | ||||
| 
 | ||||
|     if(dir == 0 and disp == 0) | ||||
|       return 8; | ||||
|     else // New indexing
 | ||||
|       return (1 - disp) / 2 * 4 + dir - base; | ||||
|     // else // Old indexing
 | ||||
|     //   return (4 * (dir - base) + 1 - disp) / 2;
 | ||||
|   } | ||||
| }; | ||||
|    | ||||
| template<class Fobj,class CComplex,int nbasis> | ||||
| class Aggregation   { | ||||
| public: | ||||
|   typedef iVector<CComplex,nbasis >             siteVector; | ||||
|   typedef Lattice<siteVector>                 CoarseVector; | ||||
|   typedef Lattice<iMatrix<CComplex,nbasis > > CoarseMatrix; | ||||
| 
 | ||||
|   typedef Lattice< CComplex >   CoarseScalar; // used for inner products on fine field
 | ||||
|   typedef Lattice<Fobj >        FineField; | ||||
| 
 | ||||
|   GridBase *CoarseGrid; | ||||
|   GridBase *FineGrid; | ||||
|   std::vector<Lattice<Fobj> > subspace; | ||||
|   int checkerboard; | ||||
|   int Checkerboard(void){return checkerboard;} | ||||
|   Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid,int _checkerboard) :  | ||||
|     CoarseGrid(_CoarseGrid), | ||||
|     FineGrid(_FineGrid), | ||||
|     subspace(nbasis,_FineGrid), | ||||
|     checkerboard(_checkerboard) | ||||
|   { | ||||
|   }; | ||||
|    | ||||
|   void Orthogonalise(void){ | ||||
|     CoarseScalar InnerProd(CoarseGrid);  | ||||
|     std::cout << GridLogMessage <<" Block Gramm-Schmidt pass 1"<<std::endl; | ||||
|     blockOrthogonalise(InnerProd,subspace); | ||||
|   }  | ||||
|   void ProjectToSubspace(CoarseVector &CoarseVec,const FineField &FineVec){ | ||||
|     blockProject(CoarseVec,FineVec,subspace); | ||||
|   } | ||||
|   void PromoteFromSubspace(const CoarseVector &CoarseVec,FineField &FineVec){ | ||||
|     FineVec.Checkerboard() = subspace[0].Checkerboard(); | ||||
|     blockPromote(CoarseVec,FineVec,subspace); | ||||
|   } | ||||
| 
 | ||||
|   virtual void CreateSubspace(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop,int nn=nbasis) { | ||||
| 
 | ||||
|     RealD scale; | ||||
| 
 | ||||
|     ConjugateGradient<FineField> CG(1.0e-2,100,false); | ||||
|     FineField noise(FineGrid); | ||||
|     FineField Mn(FineGrid); | ||||
| 
 | ||||
|     for(int b=0;b<nn;b++){ | ||||
|        | ||||
|       subspace[b] = Zero(); | ||||
|       gaussian(RNG,noise); | ||||
|       scale = std::pow(norm2(noise),-0.5);  | ||||
|       noise=noise*scale; | ||||
|        | ||||
|       hermop.Op(noise,Mn); std::cout<<GridLogMessage << "noise   ["<<b<<"] <n|MdagM|n> "<<norm2(Mn)<<std::endl; | ||||
| 
 | ||||
|       for(int i=0;i<1;i++){ | ||||
| 
 | ||||
| 	CG(hermop,noise,subspace[b]); | ||||
| 
 | ||||
| 	noise = subspace[b]; | ||||
| 	scale = std::pow(norm2(noise),-0.5);  | ||||
| 	noise=noise*scale; | ||||
| 
 | ||||
|       } | ||||
| 
 | ||||
|       hermop.Op(noise,Mn); std::cout<<GridLogMessage << "filtered["<<b<<"] <f|MdagM|f> "<<norm2(Mn)<<std::endl; | ||||
|       subspace[b]   = noise; | ||||
| 
 | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////////////
 | ||||
|   // World of possibilities here. But have tried quite a lot of experiments (250+ jobs run on Summit)
 | ||||
|   // and this is the best I found
 | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////////////
 | ||||
| 
 | ||||
|   virtual void CreateSubspaceChebyshev(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop, | ||||
| 				       int nn, | ||||
| 				       double hi, | ||||
| 				       double lo, | ||||
| 				       int orderfilter, | ||||
| 				       int ordermin, | ||||
| 				       int orderstep, | ||||
| 				       double filterlo | ||||
| 				       ) { | ||||
| 
 | ||||
|     RealD scale; | ||||
| 
 | ||||
|     FineField noise(FineGrid); | ||||
|     FineField Mn(FineGrid); | ||||
|     FineField tmp(FineGrid); | ||||
| 
 | ||||
|     // New normalised noise
 | ||||
|     gaussian(RNG,noise); | ||||
|     scale = std::pow(norm2(noise),-0.5);  | ||||
|     noise=noise*scale; | ||||
| 
 | ||||
|     // Initial matrix element
 | ||||
|     hermop.Op(noise,Mn); std::cout<<GridLogMessage << "noise <n|MdagM|n> "<<norm2(Mn)<<std::endl; | ||||
| 
 | ||||
|     int b =0; | ||||
|     { | ||||
|       // Filter
 | ||||
|       Chebyshev<FineField> Cheb(lo,hi,orderfilter); | ||||
|       Cheb(hermop,noise,Mn); | ||||
|       // normalise
 | ||||
|       scale = std::pow(norm2(Mn),-0.5); 	Mn=Mn*scale; | ||||
|       subspace[b]   = Mn; | ||||
|       hermop.Op(Mn,tmp);  | ||||
|       std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
|       b++; | ||||
|     } | ||||
| 
 | ||||
|     // Generate a full sequence of Chebyshevs
 | ||||
|     { | ||||
|       lo=filterlo; | ||||
|       noise=Mn; | ||||
| 
 | ||||
|       FineField T0(FineGrid); T0 = noise;   | ||||
|       FineField T1(FineGrid);  | ||||
|       FineField T2(FineGrid); | ||||
|       FineField y(FineGrid); | ||||
|        | ||||
|       FineField *Tnm = &T0; | ||||
|       FineField *Tn  = &T1; | ||||
|       FineField *Tnp = &T2; | ||||
| 
 | ||||
|       // Tn=T1 = (xscale M + mscale)in
 | ||||
|       RealD xscale = 2.0/(hi-lo); | ||||
|       RealD mscale = -(hi+lo)/(hi-lo); | ||||
|       hermop.HermOp(T0,y); | ||||
|       T1=y*xscale+noise*mscale; | ||||
| 
 | ||||
|       for(int n=2;n<=ordermin+orderstep*(nn-2);n++){ | ||||
| 	 | ||||
| 	hermop.HermOp(*Tn,y); | ||||
| 
 | ||||
| 	autoView( y_v , y, AcceleratorWrite); | ||||
| 	autoView( Tn_v , (*Tn), AcceleratorWrite); | ||||
| 	autoView( Tnp_v , (*Tnp), AcceleratorWrite); | ||||
| 	autoView( Tnm_v , (*Tnm), AcceleratorWrite); | ||||
| 	const int Nsimd = CComplex::Nsimd(); | ||||
| 	accelerator_for(ss, FineGrid->oSites(), Nsimd, { | ||||
| 	  coalescedWrite(y_v[ss],xscale*y_v(ss)+mscale*Tn_v(ss)); | ||||
| 	  coalescedWrite(Tnp_v[ss],2.0*y_v(ss)-Tnm_v(ss)); | ||||
|         }); | ||||
| 
 | ||||
| 	// Possible more fine grained control is needed than a linear sweep,
 | ||||
| 	// but huge productivity gain if this is simple algorithm and not a tunable
 | ||||
| 	int m =1; | ||||
| 	if ( n>=ordermin ) m=n-ordermin; | ||||
| 	if ( (m%orderstep)==0 ) {  | ||||
| 	  Mn=*Tnp; | ||||
| 	  scale = std::pow(norm2(Mn),-0.5);         Mn=Mn*scale; | ||||
| 	  subspace[b] = Mn; | ||||
| 	  hermop.Op(Mn,tmp);  | ||||
| 	  std::cout<<GridLogMessage << n<<" filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl; | ||||
| 	  b++; | ||||
| 	} | ||||
| 
 | ||||
| 	// Cycle pointers to avoid copies
 | ||||
| 	FineField *swizzle = Tnm; | ||||
| 	Tnm    =Tn; | ||||
| 	Tn     =Tnp; | ||||
| 	Tnp    =swizzle; | ||||
| 	   | ||||
|       } | ||||
|     } | ||||
|     assert(b==nn); | ||||
|   } | ||||
| 
 | ||||
| }; | ||||
| 
 | ||||
| // Fine Object == (per site) type of fine field
 | ||||
| // nbasis      == number of deflation vectors
 | ||||
| template<class Fobj,class CComplex,int nbasis> | ||||
							
								
								
									
										619
									
								
								Grid/algorithms/multigrid/GeneralCoarsenedMatrix.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										619
									
								
								Grid/algorithms/multigrid/GeneralCoarsenedMatrix.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,619 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/algorithms/GeneralCoarsenedMatrix.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Peter Boyle <pboyle@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #pragma once | ||||
|  | ||||
| #include <Grid/qcd/QCD.h> // needed for Dagger(Yes|No), Inverse(Yes|No) | ||||
|  | ||||
| #include <Grid/lattice/PaddedCell.h> | ||||
| #include <Grid/stencil/GeneralLocalStencil.h> | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
|  | ||||
| // Fine Object == (per site) type of fine field | ||||
| // nbasis      == number of deflation vectors | ||||
| template<class Fobj,class CComplex,int nbasis> | ||||
| class GeneralCoarsenedMatrix : public SparseMatrixBase<Lattice<iVector<CComplex,nbasis > > >  { | ||||
| public: | ||||
|  | ||||
|   typedef GeneralCoarsenedMatrix<Fobj,CComplex,nbasis> GeneralCoarseOp; | ||||
|   typedef iVector<CComplex,nbasis >           siteVector; | ||||
|   typedef iMatrix<CComplex,nbasis >           siteMatrix; | ||||
|   typedef Lattice<iScalar<CComplex> >         CoarseComplexField; | ||||
|   typedef Lattice<siteVector>                 CoarseVector; | ||||
|   typedef Lattice<iMatrix<CComplex,nbasis > > CoarseMatrix; | ||||
|   typedef iMatrix<CComplex,nbasis >  Cobj; | ||||
|   typedef iVector<CComplex,nbasis >  Cvec; | ||||
|   typedef Lattice< CComplex >   CoarseScalar; // used for inner products on fine field | ||||
|   typedef Lattice<Fobj >        FineField; | ||||
|   typedef Lattice<CComplex >    FineComplexField; | ||||
|   typedef CoarseVector Field; | ||||
|   //////////////////// | ||||
|   // Data members | ||||
|   //////////////////// | ||||
|   int hermitian; | ||||
|   GridBase      *       _FineGrid;  | ||||
|   GridCartesian *       _CoarseGrid;  | ||||
|   NonLocalStencilGeometry &geom; | ||||
|   PaddedCell Cell; | ||||
|   GeneralLocalStencil Stencil; | ||||
|    | ||||
|   std::vector<CoarseMatrix> _A; | ||||
|   std::vector<CoarseMatrix> _Adag; | ||||
|   std::vector<CoarseVector> MultTemporaries; | ||||
|  | ||||
|   /////////////////////// | ||||
|   // Interface | ||||
|   /////////////////////// | ||||
|   GridBase      * Grid(void)           { return _CoarseGrid; };   // this is all the linalg routines need to know | ||||
|   GridBase      * FineGrid(void)       { return _FineGrid; };   // this is all the linalg routines need to know | ||||
|   GridCartesian * CoarseGrid(void)     { return _CoarseGrid; };   // this is all the linalg routines need to know | ||||
|  | ||||
|   /*  void ShiftMatrix(RealD shift) | ||||
|   { | ||||
|     int Nd=_FineGrid->Nd();  | ||||
|     Coordinate zero_shift(Nd,0); | ||||
|     for(int p=0;p<geom.npoint;p++){ | ||||
|       if ( zero_shift==geom.shifts[p] ) { | ||||
| 	_A[p] = _A[p]+shift; | ||||
| 	//	_Adag[p] = _Adag[p]+shift; | ||||
|       } | ||||
|     }     | ||||
|   } | ||||
|   void ProjectNearestNeighbour(RealD shift, GeneralCoarseOp &CopyMe) | ||||
|   { | ||||
|     int nfound=0; | ||||
|     std::cout << GridLogMessage <<"GeneralCoarsenedMatrix::ProjectNearestNeighbour "<< CopyMe._A[0].Grid()<<std::endl; | ||||
|     for(int p=0;p<geom.npoint;p++){ | ||||
|       for(int pp=0;pp<CopyMe.geom.npoint;pp++){ | ||||
|  	// Search for the same relative shift | ||||
| 	// Avoids brutal handling of Grid pointers | ||||
| 	if ( CopyMe.geom.shifts[pp]==geom.shifts[p] ) { | ||||
| 	  _A[p] = CopyMe.Cell.Extract(CopyMe._A[pp]); | ||||
| 	  //	  _Adag[p] = CopyMe.Cell.Extract(CopyMe._Adag[pp]); | ||||
| 	  nfound++; | ||||
| 	} | ||||
|       } | ||||
|     } | ||||
|     assert(nfound==geom.npoint); | ||||
|     ExchangeCoarseLinks(); | ||||
|   } | ||||
|   */ | ||||
|    | ||||
|   GeneralCoarsenedMatrix(NonLocalStencilGeometry &_geom,GridBase *FineGrid, GridCartesian * CoarseGrid) | ||||
|     : geom(_geom), | ||||
|       _FineGrid(FineGrid), | ||||
|       _CoarseGrid(CoarseGrid), | ||||
|       hermitian(1), | ||||
|       Cell(_geom.Depth(),_CoarseGrid), | ||||
|       Stencil(Cell.grids.back(),geom.shifts) | ||||
|   { | ||||
|     { | ||||
|       int npoint = _geom.npoint; | ||||
|     } | ||||
|     _A.resize(geom.npoint,CoarseGrid); | ||||
|     //    _Adag.resize(geom.npoint,CoarseGrid); | ||||
|   } | ||||
|   void M (const CoarseVector &in, CoarseVector &out) | ||||
|   { | ||||
|     Mult(_A,in,out); | ||||
|   } | ||||
|   void Mdag (const CoarseVector &in, CoarseVector &out) | ||||
|   { | ||||
|     assert(hermitian); | ||||
|     Mult(_A,in,out); | ||||
|     //    if ( hermitian ) M(in,out); | ||||
|     //    else Mult(_Adag,in,out); | ||||
|   } | ||||
|   void Mult (std::vector<CoarseMatrix> &A,const CoarseVector &in, CoarseVector &out) | ||||
|   { | ||||
|     RealD tviews=0;    RealD ttot=0;    RealD tmult=0;   RealD texch=0;    RealD text=0; RealD ttemps=0; RealD tcopy=0; | ||||
|     RealD tmult2=0; | ||||
|  | ||||
|     ttot=-usecond(); | ||||
|     conformable(CoarseGrid(),in.Grid()); | ||||
|     conformable(in.Grid(),out.Grid()); | ||||
|     out.Checkerboard() = in.Checkerboard(); | ||||
|     CoarseVector tin=in; | ||||
|  | ||||
|     texch-=usecond(); | ||||
|     CoarseVector pin = Cell.ExchangePeriodic(tin); | ||||
|     texch+=usecond(); | ||||
|  | ||||
|     CoarseVector pout(pin.Grid()); | ||||
|  | ||||
|     int npoint = geom.npoint; | ||||
|     typedef LatticeView<Cobj> Aview; | ||||
|     typedef LatticeView<Cvec> Vview; | ||||
|        | ||||
|     const int Nsimd = CComplex::Nsimd(); | ||||
|      | ||||
|     int64_t osites=pin.Grid()->oSites(); | ||||
|  | ||||
|     RealD flops = 1.0* npoint * nbasis * nbasis * 8.0 * osites * CComplex::Nsimd(); | ||||
|     RealD bytes = 1.0*osites*sizeof(siteMatrix)*npoint | ||||
|                 + 2.0*osites*sizeof(siteVector)*npoint; | ||||
|        | ||||
|     { | ||||
|       tviews-=usecond(); | ||||
|       autoView( in_v , pin, AcceleratorRead); | ||||
|       autoView( out_v , pout, AcceleratorWriteDiscard); | ||||
|       autoView( Stencil_v  , Stencil, AcceleratorRead); | ||||
|       tviews+=usecond(); | ||||
|  | ||||
|       // Static and prereserve to keep UVM region live and not resized across multiple calls | ||||
|       ttemps-=usecond(); | ||||
|       MultTemporaries.resize(npoint,pin.Grid());        | ||||
|       ttemps+=usecond(); | ||||
|       std::vector<Aview> AcceleratorViewContainer_h; | ||||
|       std::vector<Vview> AcceleratorVecViewContainer_h;  | ||||
|  | ||||
|       tviews-=usecond(); | ||||
|       for(int p=0;p<npoint;p++) { | ||||
| 	AcceleratorViewContainer_h.push_back(      A[p].View(AcceleratorRead)); | ||||
| 	AcceleratorVecViewContainer_h.push_back(MultTemporaries[p].View(AcceleratorWrite)); | ||||
|       } | ||||
|       tviews+=usecond(); | ||||
|  | ||||
|       static deviceVector<Aview> AcceleratorViewContainer; AcceleratorViewContainer.resize(npoint); | ||||
|       static deviceVector<Vview> AcceleratorVecViewContainer; AcceleratorVecViewContainer.resize(npoint);  | ||||
|        | ||||
|       auto Aview_p = &AcceleratorViewContainer[0]; | ||||
|       auto Vview_p = &AcceleratorVecViewContainer[0]; | ||||
|       tcopy-=usecond(); | ||||
|       acceleratorCopyToDevice(&AcceleratorViewContainer_h[0],&AcceleratorViewContainer[0],npoint *sizeof(Aview)); | ||||
|       acceleratorCopyToDevice(&AcceleratorVecViewContainer_h[0],&AcceleratorVecViewContainer[0],npoint *sizeof(Vview)); | ||||
|       tcopy+=usecond(); | ||||
|  | ||||
|       tmult-=usecond(); | ||||
|       accelerator_for(spb, osites*nbasis*npoint, Nsimd, { | ||||
| 	  typedef decltype(coalescedRead(in_v[0](0))) calcComplex; | ||||
| 	  int32_t ss   = spb/(nbasis*npoint); | ||||
| 	  int32_t bp   = spb%(nbasis*npoint); | ||||
| 	  int32_t point= bp/nbasis; | ||||
| 	  int32_t b    = bp%nbasis; | ||||
| 	  auto SE  = Stencil_v.GetEntry(point,ss); | ||||
| 	  auto nbr = coalescedReadGeneralPermute(in_v[SE->_offset],SE->_permute,Nd); | ||||
| 	  auto res = coalescedRead(Aview_p[point][ss](0,b))*nbr(0); | ||||
| 	  for(int bb=1;bb<nbasis;bb++) { | ||||
| 	    res = res + coalescedRead(Aview_p[point][ss](bb,b))*nbr(bb); | ||||
| 	  } | ||||
| 	  coalescedWrite(Vview_p[point][ss](b),res); | ||||
|       }); | ||||
|       tmult2-=usecond(); | ||||
|       accelerator_for(sb, osites*nbasis, Nsimd, { | ||||
| 	  int ss = sb/nbasis; | ||||
| 	  int b  = sb%nbasis; | ||||
| 	  auto res = coalescedRead(Vview_p[0][ss](b)); | ||||
| 	  for(int point=1;point<npoint;point++){ | ||||
| 	    res = res + coalescedRead(Vview_p[point][ss](b)); | ||||
| 	  } | ||||
| 	  coalescedWrite(out_v[ss](b),res); | ||||
|       }); | ||||
|       tmult2+=usecond(); | ||||
|       tmult+=usecond(); | ||||
|       for(int p=0;p<npoint;p++) { | ||||
| 	AcceleratorViewContainer_h[p].ViewClose(); | ||||
| 	AcceleratorVecViewContainer_h[p].ViewClose(); | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     text-=usecond(); | ||||
|     out = Cell.Extract(pout); | ||||
|     text+=usecond(); | ||||
|     ttot+=usecond(); | ||||
|      | ||||
|     std::cout << GridLogPerformance<<"Coarse 1rhs Mult Aviews "<<tviews<<" us"<<std::endl; | ||||
|     std::cout << GridLogPerformance<<"Coarse Mult exch "<<texch<<" us"<<std::endl; | ||||
|     std::cout << GridLogPerformance<<"Coarse Mult mult "<<tmult<<" us"<<std::endl; | ||||
|     std::cout << GridLogPerformance<<" of which mult2  "<<tmult2<<" us"<<std::endl; | ||||
|     std::cout << GridLogPerformance<<"Coarse Mult ext  "<<text<<" us"<<std::endl; | ||||
|     std::cout << GridLogPerformance<<"Coarse Mult temps "<<ttemps<<" us"<<std::endl; | ||||
|     std::cout << GridLogPerformance<<"Coarse Mult copy  "<<tcopy<<" us"<<std::endl; | ||||
|     std::cout << GridLogPerformance<<"Coarse Mult tot  "<<ttot<<" us"<<std::endl; | ||||
|     //    std::cout << GridLogPerformance<<std::endl; | ||||
|     std::cout << GridLogPerformance<<"Coarse Kernel flops "<< flops<<std::endl; | ||||
|     std::cout << GridLogPerformance<<"Coarse Kernel flop/s "<< flops/tmult<<" mflop/s"<<std::endl; | ||||
|     std::cout << GridLogPerformance<<"Coarse Kernel bytes/s "<< bytes/tmult<<" MB/s"<<std::endl; | ||||
|     std::cout << GridLogPerformance<<"Coarse overall flops/s "<< flops/ttot<<" mflop/s"<<std::endl; | ||||
|     std::cout << GridLogPerformance<<"Coarse total bytes   "<< bytes/1e6<<" MB"<<std::endl; | ||||
|  | ||||
|   }; | ||||
|    | ||||
|   void PopulateAdag(void) | ||||
|   { | ||||
|     for(int64_t bidx=0;bidx<CoarseGrid()->gSites() ;bidx++){ | ||||
|       Coordinate bcoor; | ||||
|       CoarseGrid()->GlobalIndexToGlobalCoor(bidx,bcoor); | ||||
|        | ||||
|       for(int p=0;p<geom.npoint;p++){ | ||||
| 	Coordinate scoor = bcoor; | ||||
| 	for(int mu=0;mu<bcoor.size();mu++){ | ||||
| 	  int L = CoarseGrid()->GlobalDimensions()[mu]; | ||||
| 	  scoor[mu] = (bcoor[mu] - geom.shifts[p][mu] + L) % L; // Modulo arithmetic | ||||
| 	} | ||||
| 	// Flip to poke/peekLocalSite and not too bad | ||||
| 	auto link = peekSite(_A[p],scoor); | ||||
| 	int pp = geom.Reverse(p); | ||||
| 	pokeSite(adj(link),_Adag[pp],bcoor); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   ///////////////////////////////////////////////////////////// | ||||
|   //  | ||||
|   // A) Only reduced flops option is to use a padded cell of depth 4 | ||||
|   // and apply MpcDagMpc in the padded cell. | ||||
|   // | ||||
|   // Makes for ONE application of MpcDagMpc per vector instead of 30 or 80. | ||||
|   // With the effective cell size around (B+8)^4 perhaps 12^4/4^4 ratio | ||||
|   // Cost is 81x more, same as stencil size. | ||||
|   // | ||||
|   // But: can eliminate comms and do as local dirichlet. | ||||
|   // | ||||
|   // Local exchange gauge field once. | ||||
|   // Apply to all vectors, local only computation. | ||||
|   // Must exchange ghost subcells in reverse process of PaddedCell to take inner products | ||||
|   // | ||||
|   // B) Can reduce cost: pad by 1, apply Deo      (4^4+6^4+8^4+8^4 )/ (4x 4^4) | ||||
|   //                     pad by 2, apply Doe | ||||
|   //                     pad by 3, apply Deo | ||||
|   //                     then break out 8x directions; cost is ~10x MpcDagMpc per vector | ||||
|   // | ||||
|   // => almost factor of 10 in setup cost, excluding data rearrangement | ||||
|   // | ||||
|   // Intermediates -- ignore the corner terms, leave approximate and force Hermitian | ||||
|   // Intermediates -- pad by 2 and apply 1+8+24 = 33 times. | ||||
|   ///////////////////////////////////////////////////////////// | ||||
|  | ||||
|     ////////////////////////////////////////////////////////// | ||||
|     // BFM HDCG style approach: Solve a system of equations to get Aij | ||||
|     ////////////////////////////////////////////////////////// | ||||
|     /* | ||||
|      *     Here, k,l index which possible shift within the 3^Nd "ball" connected by MdagM. | ||||
|      * | ||||
|      *     conj(phases[block]) proj[k][ block*Nvec+j ] =  \sum_ball  e^{i q_k . delta} < phi_{block,j} | MdagM | phi_{(block+delta),i} >  | ||||
|      *                                                 =  \sum_ball e^{iqk.delta} A_ji | ||||
|      * | ||||
|      *     Must invert matrix M_k,l = e^[i q_k . delta_l] | ||||
|      * | ||||
|      *     Where q_k = delta_k . (2*M_PI/global_nb[mu]) | ||||
|      */ | ||||
| #if 0 | ||||
|   void CoarsenOperator(LinearOperatorBase<Lattice<Fobj> > &linop, | ||||
| 		       Aggregation<Fobj,CComplex,nbasis> & Subspace) | ||||
|   { | ||||
|     std::cout << GridLogMessage<< "GeneralCoarsenMatrix "<< std::endl; | ||||
|     GridBase *grid = FineGrid(); | ||||
|  | ||||
|     RealD tproj=0.0; | ||||
|     RealD teigen=0.0; | ||||
|     RealD tmat=0.0; | ||||
|     RealD tphase=0.0; | ||||
|     RealD tinv=0.0; | ||||
|  | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|     // Orthogonalise the subblocks over the basis | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|     CoarseScalar InnerProd(CoarseGrid());  | ||||
|     blockOrthogonalise(InnerProd,Subspace.subspace); | ||||
|  | ||||
|     const int npoint = geom.npoint; | ||||
|        | ||||
|     Coordinate clatt = CoarseGrid()->GlobalDimensions(); | ||||
|     int Nd = CoarseGrid()->Nd(); | ||||
|  | ||||
|       /* | ||||
|        *     Here, k,l index which possible momentum/shift within the N-points connected by MdagM. | ||||
|        *     Matrix index i is mapped to this shift via  | ||||
|        *               geom.shifts[i] | ||||
|        * | ||||
|        *     conj(pha[block]) proj[k (which mom)][j (basis vec cpt)][block]  | ||||
|        *       =  \sum_{l in ball}  e^{i q_k . delta_l} < phi_{block,j} | MdagM | phi_{(block+delta_l),i} >  | ||||
|        *       =  \sum_{l in ball} e^{iqk.delta_l} A_ji^{b.b+l} | ||||
|        *       = M_{kl} A_ji^{b.b+l} | ||||
|        * | ||||
|        *     Must assemble and invert matrix M_k,l = e^[i q_k . delta_l] | ||||
|        *   | ||||
|        *     Where q_k = delta_k . (2*M_PI/global_nb[mu]) | ||||
|        * | ||||
|        *     Then A{ji}^{b,b+l} = M^{-1}_{lm} ComputeProj_{m,b,i,j} | ||||
|        */ | ||||
|     teigen-=usecond(); | ||||
|     Eigen::MatrixXcd Mkl    = Eigen::MatrixXcd::Zero(npoint,npoint); | ||||
|     Eigen::MatrixXcd invMkl = Eigen::MatrixXcd::Zero(npoint,npoint); | ||||
|     ComplexD ci(0.0,1.0); | ||||
|     for(int k=0;k<npoint;k++){ // Loop over momenta | ||||
|  | ||||
|       for(int l=0;l<npoint;l++){ // Loop over nbr relative | ||||
| 	ComplexD phase(0.0,0.0); | ||||
| 	for(int mu=0;mu<Nd;mu++){ | ||||
| 	  RealD TwoPiL =  M_PI * 2.0/ clatt[mu]; | ||||
| 	  phase=phase+TwoPiL*geom.shifts[k][mu]*geom.shifts[l][mu]; | ||||
| 	} | ||||
| 	phase=exp(phase*ci); | ||||
| 	Mkl(k,l) = phase; | ||||
|       } | ||||
|     } | ||||
|     invMkl = Mkl.inverse(); | ||||
|     teigen+=usecond(); | ||||
|  | ||||
|     /////////////////////////////////////////////////////////////////////// | ||||
|     // Now compute the matrix elements of linop between the orthonormal | ||||
|     // set of vectors. | ||||
|     /////////////////////////////////////////////////////////////////////// | ||||
|     FineField phaV(grid); // Phased block basis vector | ||||
|     FineField MphaV(grid);// Matrix applied | ||||
|     CoarseVector coarseInner(CoarseGrid()); | ||||
|  | ||||
|     std::vector<CoarseVector> ComputeProj(npoint,CoarseGrid()); | ||||
|     std::vector<CoarseVector>          FT(npoint,CoarseGrid()); | ||||
|     for(int i=0;i<nbasis;i++){// Loop over basis vectors | ||||
|       std::cout << GridLogMessage<< "CoarsenMatrixColoured vec "<<i<<"/"<<nbasis<< std::endl; | ||||
|       for(int p=0;p<npoint;p++){ // Loop over momenta in npoint | ||||
| 	///////////////////////////////////////////////////// | ||||
| 	// Stick a phase on every block | ||||
| 	///////////////////////////////////////////////////// | ||||
| 	tphase-=usecond(); | ||||
| 	CoarseComplexField coor(CoarseGrid()); | ||||
| 	CoarseComplexField pha(CoarseGrid());	pha=Zero(); | ||||
| 	for(int mu=0;mu<Nd;mu++){ | ||||
| 	  LatticeCoordinate(coor,mu); | ||||
| 	  RealD TwoPiL =  M_PI * 2.0/ clatt[mu]; | ||||
| 	  pha = pha + (TwoPiL * geom.shifts[p][mu]) * coor; | ||||
| 	} | ||||
| 	pha  =exp(pha*ci); | ||||
| 	phaV=Zero(); | ||||
| 	blockZAXPY(phaV,pha,Subspace.subspace[i],phaV); | ||||
| 	tphase+=usecond(); | ||||
|  | ||||
| 	///////////////////////////////////////////////////////////////////// | ||||
| 	// Multiple phased subspace vector by matrix and project to subspace | ||||
| 	// Remove local bulk phase to leave relative phases | ||||
| 	///////////////////////////////////////////////////////////////////// | ||||
| 	tmat-=usecond(); | ||||
| 	linop.Op(phaV,MphaV); | ||||
| 	tmat+=usecond(); | ||||
|  | ||||
| 	tproj-=usecond(); | ||||
| 	blockProject(coarseInner,MphaV,Subspace.subspace); | ||||
| 	coarseInner = conjugate(pha) * coarseInner; | ||||
|  | ||||
| 	ComputeProj[p] = coarseInner; | ||||
| 	tproj+=usecond(); | ||||
|  | ||||
|       } | ||||
|  | ||||
|       tinv-=usecond(); | ||||
|       for(int k=0;k<npoint;k++){ | ||||
| 	FT[k] = Zero(); | ||||
| 	for(int l=0;l<npoint;l++){ | ||||
| 	  FT[k]= FT[k]+ invMkl(l,k)*ComputeProj[l]; | ||||
| 	} | ||||
|        | ||||
| 	int osites=CoarseGrid()->oSites(); | ||||
| 	autoView( A_v  , _A[k], AcceleratorWrite); | ||||
| 	autoView( FT_v  , FT[k], AcceleratorRead); | ||||
| 	accelerator_for(sss, osites, 1, { | ||||
| 	    for(int j=0;j<nbasis;j++){ | ||||
| 	      A_v[sss](i,j) = FT_v[sss](j); | ||||
| 	    } | ||||
|         }); | ||||
|       } | ||||
|       tinv+=usecond(); | ||||
|     } | ||||
|  | ||||
|     // Only needed if nonhermitian | ||||
|     if ( ! hermitian ) { | ||||
|       //      std::cout << GridLogMessage<<"PopulateAdag  "<<std::endl; | ||||
|       //      PopulateAdag(); | ||||
|     } | ||||
|  | ||||
|     // Need to write something to populate Adag from A | ||||
|     ExchangeCoarseLinks(); | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator eigen  "<<teigen<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator phase  "<<tphase<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator mat    "<<tmat <<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator proj   "<<tproj<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator inv    "<<tinv<<" us"<<std::endl; | ||||
|   } | ||||
| #else | ||||
|   void CoarsenOperator(LinearOperatorBase<Lattice<Fobj> > &linop, | ||||
| 		       Aggregation<Fobj,CComplex,nbasis> & Subspace) | ||||
|   { | ||||
|     std::cout << GridLogMessage<< "GeneralCoarsenMatrix "<< std::endl; | ||||
|     GridBase *grid = FineGrid(); | ||||
|  | ||||
|     RealD tproj=0.0; | ||||
|     RealD teigen=0.0; | ||||
|     RealD tmat=0.0; | ||||
|     RealD tphase=0.0; | ||||
|     RealD tphaseBZ=0.0; | ||||
|     RealD tinv=0.0; | ||||
|  | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|     // Orthogonalise the subblocks over the basis | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|     CoarseScalar InnerProd(CoarseGrid());  | ||||
|     blockOrthogonalise(InnerProd,Subspace.subspace); | ||||
|  | ||||
|     //    for(int s=0;s<Subspace.subspace.size();s++){ | ||||
|       //      std::cout << " subspace norm "<<norm2(Subspace.subspace[s])<<std::endl; | ||||
|     //    } | ||||
|     const int npoint = geom.npoint; | ||||
|        | ||||
|     Coordinate clatt = CoarseGrid()->GlobalDimensions(); | ||||
|     int Nd = CoarseGrid()->Nd(); | ||||
|  | ||||
|       /* | ||||
|        *     Here, k,l index which possible momentum/shift within the N-points connected by MdagM. | ||||
|        *     Matrix index i is mapped to this shift via  | ||||
|        *               geom.shifts[i] | ||||
|        * | ||||
|        *     conj(pha[block]) proj[k (which mom)][j (basis vec cpt)][block]  | ||||
|        *       =  \sum_{l in ball}  e^{i q_k . delta_l} < phi_{block,j} | MdagM | phi_{(block+delta_l),i} >  | ||||
|        *       =  \sum_{l in ball} e^{iqk.delta_l} A_ji^{b.b+l} | ||||
|        *       = M_{kl} A_ji^{b.b+l} | ||||
|        * | ||||
|        *     Must assemble and invert matrix M_k,l = e^[i q_k . delta_l] | ||||
|        *   | ||||
|        *     Where q_k = delta_k . (2*M_PI/global_nb[mu]) | ||||
|        * | ||||
|        *     Then A{ji}^{b,b+l} = M^{-1}_{lm} ComputeProj_{m,b,i,j} | ||||
|        */ | ||||
|     teigen-=usecond(); | ||||
|     Eigen::MatrixXcd Mkl    = Eigen::MatrixXcd::Zero(npoint,npoint); | ||||
|     Eigen::MatrixXcd invMkl = Eigen::MatrixXcd::Zero(npoint,npoint); | ||||
|     ComplexD ci(0.0,1.0); | ||||
|     for(int k=0;k<npoint;k++){ // Loop over momenta | ||||
|  | ||||
|       for(int l=0;l<npoint;l++){ // Loop over nbr relative | ||||
| 	ComplexD phase(0.0,0.0); | ||||
| 	for(int mu=0;mu<Nd;mu++){ | ||||
| 	  RealD TwoPiL =  M_PI * 2.0/ clatt[mu]; | ||||
| 	  phase=phase+TwoPiL*geom.shifts[k][mu]*geom.shifts[l][mu]; | ||||
| 	} | ||||
| 	phase=exp(phase*ci); | ||||
| 	Mkl(k,l) = phase; | ||||
|       } | ||||
|     } | ||||
|     invMkl = Mkl.inverse(); | ||||
|     teigen+=usecond(); | ||||
|  | ||||
|     /////////////////////////////////////////////////////////////////////// | ||||
|     // Now compute the matrix elements of linop between the orthonormal | ||||
|     // set of vectors. | ||||
|     /////////////////////////////////////////////////////////////////////// | ||||
|     FineField phaV(grid); // Phased block basis vector | ||||
|     FineField MphaV(grid);// Matrix applied | ||||
|     std::vector<FineComplexField> phaF(npoint,grid); | ||||
|     std::vector<CoarseComplexField> pha(npoint,CoarseGrid()); | ||||
|      | ||||
|     CoarseVector coarseInner(CoarseGrid()); | ||||
|      | ||||
|     typedef typename CComplex::scalar_type SComplex; | ||||
|     FineComplexField one(grid); one=SComplex(1.0); | ||||
|     FineComplexField zz(grid); zz = Zero(); | ||||
|     tphase=-usecond(); | ||||
|     for(int p=0;p<npoint;p++){ // Loop over momenta in npoint | ||||
|       ///////////////////////////////////////////////////// | ||||
|       // Stick a phase on every block | ||||
|       ///////////////////////////////////////////////////// | ||||
|       CoarseComplexField coor(CoarseGrid()); | ||||
|       pha[p]=Zero(); | ||||
|       for(int mu=0;mu<Nd;mu++){ | ||||
| 	LatticeCoordinate(coor,mu); | ||||
| 	RealD TwoPiL =  M_PI * 2.0/ clatt[mu]; | ||||
| 	pha[p] = pha[p] + (TwoPiL * geom.shifts[p][mu]) * coor; | ||||
|       } | ||||
|       pha[p]  =exp(pha[p]*ci); | ||||
|  | ||||
|       blockZAXPY(phaF[p],pha[p],one,zz); | ||||
|        | ||||
|     } | ||||
|     tphase+=usecond(); | ||||
|      | ||||
|     std::vector<CoarseVector> ComputeProj(npoint,CoarseGrid()); | ||||
|     std::vector<CoarseVector>          FT(npoint,CoarseGrid()); | ||||
|     for(int i=0;i<nbasis;i++){// Loop over basis vectors | ||||
|       std::cout << GridLogMessage<< "CoarsenMatrixColoured vec "<<i<<"/"<<nbasis<< std::endl; | ||||
|       for(int p=0;p<npoint;p++){ // Loop over momenta in npoint | ||||
| 	tphaseBZ-=usecond(); | ||||
| 	phaV = phaF[p]*Subspace.subspace[i]; | ||||
| 	tphaseBZ+=usecond(); | ||||
|  | ||||
| 	///////////////////////////////////////////////////////////////////// | ||||
| 	// Multiple phased subspace vector by matrix and project to subspace | ||||
| 	// Remove local bulk phase to leave relative phases | ||||
| 	///////////////////////////////////////////////////////////////////// | ||||
| 	tmat-=usecond(); | ||||
| 	linop.Op(phaV,MphaV); | ||||
| 	tmat+=usecond(); | ||||
| 	//	std::cout << i << " " <<p << " MphaV "<<norm2(MphaV)<<" "<<norm2(phaV)<<std::endl; | ||||
|  | ||||
| 	tproj-=usecond(); | ||||
| 	blockProject(coarseInner,MphaV,Subspace.subspace); | ||||
| 	coarseInner = conjugate(pha[p]) * coarseInner; | ||||
|  | ||||
| 	ComputeProj[p] = coarseInner; | ||||
| 	tproj+=usecond(); | ||||
| 	//	std::cout << i << " " <<p << " ComputeProj "<<norm2(ComputeProj[p])<<std::endl; | ||||
|  | ||||
|       } | ||||
|  | ||||
|       tinv-=usecond(); | ||||
|       for(int k=0;k<npoint;k++){ | ||||
| 	FT[k] = Zero(); | ||||
| 	for(int l=0;l<npoint;l++){ | ||||
| 	  FT[k]= FT[k]+ invMkl(l,k)*ComputeProj[l]; | ||||
| 	} | ||||
|        | ||||
| 	int osites=CoarseGrid()->oSites(); | ||||
| 	autoView( A_v  , _A[k], AcceleratorWrite); | ||||
| 	autoView( FT_v  , FT[k], AcceleratorRead); | ||||
| 	accelerator_for(sss, osites, 1, { | ||||
| 	    for(int j=0;j<nbasis;j++){ | ||||
| 	      A_v[sss](i,j) = FT_v[sss](j); | ||||
| 	    } | ||||
|         }); | ||||
|       } | ||||
|       tinv+=usecond(); | ||||
|     } | ||||
|  | ||||
|     // Only needed if nonhermitian | ||||
|     if ( ! hermitian ) { | ||||
|       //      std::cout << GridLogMessage<<"PopulateAdag  "<<std::endl; | ||||
|       //      PopulateAdag(); | ||||
|     } | ||||
|  | ||||
|     for(int p=0;p<geom.npoint;p++){ | ||||
|       std::cout << " _A["<<p<<"] "<<norm2(_A[p])<<std::endl; | ||||
|     } | ||||
|  | ||||
|     // Need to write something to populate Adag from A | ||||
|     ExchangeCoarseLinks(); | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator eigen  "<<teigen<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator phase  "<<tphase<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator phaseBZ "<<tphaseBZ<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator mat    "<<tmat <<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator proj   "<<tproj<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator inv    "<<tinv<<" us"<<std::endl; | ||||
|   } | ||||
| #endif   | ||||
|   void ExchangeCoarseLinks(void){ | ||||
|     for(int p=0;p<geom.npoint;p++){ | ||||
|       _A[p] = Cell.ExchangePeriodic(_A[p]); | ||||
|       //      _Adag[p]= Cell.ExchangePeriodic(_Adag[p]); | ||||
|     } | ||||
|   } | ||||
|   virtual  void Mdiag    (const Field &in, Field &out){ assert(0);}; | ||||
|   virtual  void Mdir     (const Field &in, Field &out,int dir, int disp){assert(0);}; | ||||
|   virtual  void MdirAll  (const Field &in, std::vector<Field> &out){assert(0);}; | ||||
| }; | ||||
|  | ||||
|  | ||||
|    | ||||
| NAMESPACE_END(Grid); | ||||
							
								
								
									
										729
									
								
								Grid/algorithms/multigrid/GeneralCoarsenedMatrixMultiRHS.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										729
									
								
								Grid/algorithms/multigrid/GeneralCoarsenedMatrixMultiRHS.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,729 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/algorithms/GeneralCoarsenedMatrixMultiRHS.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Peter Boyle <pboyle@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #pragma once | ||||
|  | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
|  | ||||
|  | ||||
| // Fine Object == (per site) type of fine field | ||||
| // nbasis      == number of deflation vectors | ||||
| template<class Fobj,class CComplex,int nbasis> | ||||
| class MultiGeneralCoarsenedMatrix : public SparseMatrixBase<Lattice<iVector<CComplex,nbasis > > >  { | ||||
| public: | ||||
|   typedef typename CComplex::scalar_object SComplex; | ||||
|   typedef GeneralCoarsenedMatrix<Fobj,CComplex,nbasis> GeneralCoarseOp; | ||||
|   typedef MultiGeneralCoarsenedMatrix<Fobj,CComplex,nbasis> MultiGeneralCoarseOp; | ||||
|  | ||||
|   typedef iVector<CComplex,nbasis >           siteVector; | ||||
|   typedef iMatrix<CComplex,nbasis >           siteMatrix; | ||||
|   typedef iVector<SComplex,nbasis >           calcVector; | ||||
|   typedef iMatrix<SComplex,nbasis >           calcMatrix; | ||||
|   typedef Lattice<iScalar<CComplex> >         CoarseComplexField; | ||||
|   typedef Lattice<siteVector>                 CoarseVector; | ||||
|   typedef Lattice<iMatrix<CComplex,nbasis > > CoarseMatrix; | ||||
|   typedef iMatrix<CComplex,nbasis >  Cobj; | ||||
|   typedef iVector<CComplex,nbasis >  Cvec; | ||||
|   typedef Lattice< CComplex >   CoarseScalar; // used for inner products on fine field | ||||
|   typedef Lattice<Fobj >        FineField; | ||||
|   typedef Lattice<CComplex >    FineComplexField; | ||||
|   typedef CoarseVector Field; | ||||
|  | ||||
|   //////////////////// | ||||
|   // Data members | ||||
|   //////////////////// | ||||
|   GridCartesian *       _CoarseGridMulti;  | ||||
|   NonLocalStencilGeometry geom; | ||||
|   NonLocalStencilGeometry geom_srhs; | ||||
|   PaddedCell Cell; | ||||
|   GeneralLocalStencil Stencil; | ||||
|  | ||||
|   deviceVector<calcVector> BLAS_B; | ||||
|   deviceVector<calcVector> BLAS_C; | ||||
|   std::vector<deviceVector<calcMatrix> > BLAS_A; | ||||
|  | ||||
|   std::vector<deviceVector<ComplexD *> > BLAS_AP; | ||||
|   std::vector<deviceVector<ComplexD *> > BLAS_BP; | ||||
|   deviceVector<ComplexD *>               BLAS_CP; | ||||
|  | ||||
|   /////////////////////// | ||||
|   // Interface | ||||
|   /////////////////////// | ||||
|   GridBase      * Grid(void)           { return _CoarseGridMulti; };   // this is all the linalg routines need to know | ||||
|   GridCartesian * CoarseGrid(void)     { return _CoarseGridMulti; };   // this is all the linalg routines need to know | ||||
|  | ||||
|   // Can be used to do I/O on the operator matrices externally | ||||
|   void SetMatrix (int p,CoarseMatrix & A) | ||||
|   { | ||||
|     assert(A.size()==geom_srhs.npoint); | ||||
|     GridtoBLAS(A[p],BLAS_A[p]); | ||||
|   } | ||||
|   void GetMatrix (int p,CoarseMatrix & A) | ||||
|   { | ||||
|     assert(A.size()==geom_srhs.npoint); | ||||
|     BLAStoGrid(A[p],BLAS_A[p]); | ||||
|   } | ||||
|   void CopyMatrix (GeneralCoarseOp &_Op) | ||||
|   { | ||||
|     for(int p=0;p<geom.npoint;p++){ | ||||
|       auto Aup = _Op.Cell.Extract(_Op._A[p]); | ||||
|       //Unpadded | ||||
|       GridtoBLAS(Aup,BLAS_A[p]); | ||||
|     } | ||||
|   } | ||||
|   /* | ||||
|   void CheckMatrix (GeneralCoarseOp &_Op) | ||||
|   { | ||||
|     std::cout <<"************* Checking the little direc operator mRHS"<<std::endl; | ||||
|     for(int p=0;p<geom.npoint;p++){ | ||||
|       //Unpadded | ||||
|       auto Aup = _Op.Cell.Extract(_Op._A[p]); | ||||
|       auto Ack = Aup; | ||||
|       BLAStoGrid(Ack,BLAS_A[p]); | ||||
|       std::cout << p<<" Ack "<<norm2(Ack)<<std::endl; | ||||
|       std::cout << p<<" Aup "<<norm2(Aup)<<std::endl; | ||||
|     } | ||||
|     std::cout <<"************* "<<std::endl; | ||||
|   } | ||||
|   */ | ||||
|    | ||||
|   MultiGeneralCoarsenedMatrix(NonLocalStencilGeometry &_geom,GridCartesian *CoarseGridMulti) : | ||||
|     _CoarseGridMulti(CoarseGridMulti), | ||||
|     geom_srhs(_geom), | ||||
|     geom(_CoarseGridMulti,_geom.hops,_geom.skip+1), | ||||
|     Cell(geom.Depth(),_CoarseGridMulti), | ||||
|     Stencil(Cell.grids.back(),geom.shifts) // padded cell stencil | ||||
|   { | ||||
|     int32_t padded_sites   = Cell.grids.back()->lSites(); | ||||
|     int32_t unpadded_sites = CoarseGridMulti->lSites(); | ||||
|      | ||||
|     int32_t nrhs  = CoarseGridMulti->FullDimensions()[0];  // # RHS | ||||
|     int32_t orhs  = nrhs/CComplex::Nsimd(); | ||||
|  | ||||
|     padded_sites   = padded_sites/nrhs; | ||||
|     unpadded_sites = unpadded_sites/nrhs; | ||||
|      | ||||
|     ///////////////////////////////////////////////// | ||||
|     // Device data vector storage | ||||
|     ///////////////////////////////////////////////// | ||||
|     BLAS_A.resize(geom.npoint); | ||||
|     for(int p=0;p<geom.npoint;p++){ | ||||
|       BLAS_A[p].resize (unpadded_sites); // no ghost zone, npoint elements | ||||
|     } | ||||
|      | ||||
|     BLAS_B.resize(nrhs *padded_sites);   // includes ghost zone | ||||
|     BLAS_C.resize(nrhs *unpadded_sites); // no ghost zone | ||||
|     BLAS_AP.resize(geom.npoint); | ||||
|     BLAS_BP.resize(geom.npoint); | ||||
|     for(int p=0;p<geom.npoint;p++){ | ||||
|       BLAS_AP[p].resize(unpadded_sites); | ||||
|       BLAS_BP[p].resize(unpadded_sites); | ||||
|     } | ||||
|     BLAS_CP.resize(unpadded_sites); | ||||
|  | ||||
|     ///////////////////////////////////////////////// | ||||
|     // Pointers to data | ||||
|     ///////////////////////////////////////////////// | ||||
|  | ||||
|     // Site identity mapping for A | ||||
|     for(int p=0;p<geom.npoint;p++){ | ||||
|       for(int ss=0;ss<unpadded_sites;ss++){ | ||||
| 	ComplexD *ptr = (ComplexD *)&BLAS_A[p][ss]; | ||||
| 	acceleratorPut(BLAS_AP[p][ss],ptr); | ||||
|       } | ||||
|     } | ||||
|     // Site identity mapping for C | ||||
|     for(int ss=0;ss<unpadded_sites;ss++){ | ||||
|       ComplexD *ptr = (ComplexD *)&BLAS_C[ss*nrhs]; | ||||
|       acceleratorPut(BLAS_CP[ss],ptr); | ||||
|     } | ||||
|  | ||||
|     // Neighbour table is more complicated | ||||
|     int32_t j=0; // Interior point counter (unpadded) | ||||
|     for(int32_t s=0;s<padded_sites;s++){ // 4 volume, padded | ||||
|       int ghost_zone=0; | ||||
|       for(int32_t point = 0 ; point < geom.npoint; point++){ | ||||
| 	int i=s*orhs*geom.npoint+point; | ||||
| 	if( Stencil._entries[i]._wrap ) { // stencil is indexed by the oSite of the CoarseGridMulti, hence orhs factor | ||||
| 	  ghost_zone=1; // If general stencil wrapped in any direction, wrap=1 | ||||
| 	} | ||||
|       } | ||||
|  | ||||
|       if( ghost_zone==0) { | ||||
| 	for(int32_t point = 0 ; point < geom.npoint; point++){ | ||||
| 	  int i=s*orhs*geom.npoint+point; | ||||
|  	  int32_t nbr = Stencil._entries[i]._offset*CComplex::Nsimd(); // oSite -> lSite | ||||
| 	  assert(nbr<BLAS_B.size()); | ||||
| 	  ComplexD * ptr = (ComplexD *)&BLAS_B[nbr]; | ||||
| 	  acceleratorPut(BLAS_BP[point][j],ptr); // neighbour indexing in ghost zone volume | ||||
| 	} | ||||
| 	j++; | ||||
|       } | ||||
|     } | ||||
|     assert(j==unpadded_sites); | ||||
|   } | ||||
|   template<class vobj> void GridtoBLAS(const Lattice<vobj> &from,deviceVector<typename vobj::scalar_object> &to) | ||||
|   { | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|  | ||||
|   GridBase *Fg = from.Grid(); | ||||
|   assert(!Fg->_isCheckerBoarded); | ||||
|   int nd = Fg->_ndimension; | ||||
|  | ||||
|   to.resize(Fg->lSites()); | ||||
|  | ||||
|   Coordinate LocalLatt = Fg->LocalDimensions(); | ||||
|   size_t nsite = 1; | ||||
|   for(int i=0;i<nd;i++) nsite *= LocalLatt[i]; | ||||
|  | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // do the index calc on the GPU | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   Coordinate f_ostride = Fg->_ostride; | ||||
|   Coordinate f_istride = Fg->_istride; | ||||
|   Coordinate f_rdimensions = Fg->_rdimensions; | ||||
|  | ||||
|   autoView(from_v,from,AcceleratorRead); | ||||
|   auto to_v = &to[0]; | ||||
|  | ||||
|   const int words=sizeof(vobj)/sizeof(vector_type); | ||||
|   accelerator_for(idx,nsite,1,{ | ||||
|        | ||||
|       Coordinate from_coor, base; | ||||
|       Lexicographic::CoorFromIndex(base,idx,LocalLatt); | ||||
|       for(int i=0;i<nd;i++){ | ||||
| 	from_coor[i] = base[i]; | ||||
|       } | ||||
|       int from_oidx = 0; for(int d=0;d<nd;d++) from_oidx+=f_ostride[d]*(from_coor[d]%f_rdimensions[d]); | ||||
|       int from_lane = 0; for(int d=0;d<nd;d++) from_lane+=f_istride[d]*(from_coor[d]/f_rdimensions[d]); | ||||
|  | ||||
|       const vector_type* from = (const vector_type *)&from_v[from_oidx]; | ||||
|       scalar_type* to = (scalar_type *)&to_v[idx]; | ||||
|        | ||||
|       scalar_type stmp; | ||||
|       for(int w=0;w<words;w++){ | ||||
| 	stmp = getlane(from[w], from_lane); | ||||
| 	to[w] = stmp; | ||||
|       } | ||||
|     }); | ||||
|   }     | ||||
|   template<class vobj> void BLAStoGrid(Lattice<vobj> &grid,deviceVector<typename vobj::scalar_object> &in) | ||||
|   { | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|  | ||||
|   GridBase *Tg = grid.Grid(); | ||||
|   assert(!Tg->_isCheckerBoarded); | ||||
|   int nd = Tg->_ndimension; | ||||
|    | ||||
|   assert(in.size()==Tg->lSites()); | ||||
|  | ||||
|   Coordinate LocalLatt = Tg->LocalDimensions(); | ||||
|   size_t nsite = 1; | ||||
|   for(int i=0;i<nd;i++) nsite *= LocalLatt[i]; | ||||
|  | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // do the index calc on the GPU | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   Coordinate t_ostride = Tg->_ostride; | ||||
|   Coordinate t_istride = Tg->_istride; | ||||
|   Coordinate t_rdimensions = Tg->_rdimensions; | ||||
|  | ||||
|   autoView(to_v,grid,AcceleratorWrite); | ||||
|   auto from_v = &in[0]; | ||||
|  | ||||
|   const int words=sizeof(vobj)/sizeof(vector_type); | ||||
|   accelerator_for(idx,nsite,1,{ | ||||
|        | ||||
|       Coordinate to_coor, base; | ||||
|       Lexicographic::CoorFromIndex(base,idx,LocalLatt); | ||||
|       for(int i=0;i<nd;i++){ | ||||
| 	to_coor[i] = base[i]; | ||||
|       } | ||||
|       int to_oidx = 0; for(int d=0;d<nd;d++) to_oidx+=t_ostride[d]*(to_coor[d]%t_rdimensions[d]); | ||||
|       int to_lane = 0; for(int d=0;d<nd;d++) to_lane+=t_istride[d]*(to_coor[d]/t_rdimensions[d]); | ||||
|  | ||||
|       vector_type* to = (vector_type *)&to_v[to_oidx]; | ||||
|       scalar_type* from = (scalar_type *)&from_v[idx]; | ||||
|        | ||||
|       scalar_type stmp; | ||||
|       for(int w=0;w<words;w++){ | ||||
| 	stmp=from[w]; | ||||
| 	putlane(to[w], stmp, to_lane); | ||||
|       } | ||||
|     }); | ||||
|   } | ||||
|   void CoarsenOperator(LinearOperatorBase<Lattice<Fobj> > &linop, | ||||
| 		       Aggregation<Fobj,CComplex,nbasis> & Subspace, | ||||
| 		       GridBase *CoarseGrid) | ||||
|   { | ||||
| #if 0 | ||||
|     std::cout << GridLogMessage<< "GeneralCoarsenMatrixMrhs "<< std::endl; | ||||
|  | ||||
|     GridBase *grid = Subspace.FineGrid; | ||||
|  | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|     // Orthogonalise the subblocks over the basis | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|     CoarseScalar InnerProd(CoarseGrid);  | ||||
|     blockOrthogonalise(InnerProd,Subspace.subspace); | ||||
|  | ||||
|     const int npoint = geom_srhs.npoint; | ||||
|  | ||||
|     Coordinate clatt = CoarseGrid->GlobalDimensions(); | ||||
|     int Nd = CoarseGrid->Nd(); | ||||
|       /* | ||||
|        *     Here, k,l index which possible momentum/shift within the N-points connected by MdagM. | ||||
|        *     Matrix index i is mapped to this shift via  | ||||
|        *               geom.shifts[i] | ||||
|        * | ||||
|        *     conj(pha[block]) proj[k (which mom)][j (basis vec cpt)][block]  | ||||
|        *       =  \sum_{l in ball}  e^{i q_k . delta_l} < phi_{block,j} | MdagM | phi_{(block+delta_l),i} >  | ||||
|        *       =  \sum_{l in ball} e^{iqk.delta_l} A_ji^{b.b+l} | ||||
|        *       = M_{kl} A_ji^{b.b+l} | ||||
|        * | ||||
|        *     Must assemble and invert matrix M_k,l = e^[i q_k . delta_l] | ||||
|        *   | ||||
|        *     Where q_k = delta_k . (2*M_PI/global_nb[mu]) | ||||
|        * | ||||
|        *     Then A{ji}^{b,b+l} = M^{-1}_{lm} ComputeProj_{m,b,i,j} | ||||
|        */ | ||||
|     Eigen::MatrixXcd Mkl    = Eigen::MatrixXcd::Zero(npoint,npoint); | ||||
|     Eigen::MatrixXcd invMkl = Eigen::MatrixXcd::Zero(npoint,npoint); | ||||
|     ComplexD ci(0.0,1.0); | ||||
|     for(int k=0;k<npoint;k++){ // Loop over momenta | ||||
|  | ||||
|       for(int l=0;l<npoint;l++){ // Loop over nbr relative | ||||
| 	ComplexD phase(0.0,0.0); | ||||
| 	for(int mu=0;mu<Nd;mu++){ | ||||
| 	  RealD TwoPiL =  M_PI * 2.0/ clatt[mu]; | ||||
| 	  phase=phase+TwoPiL*geom_srhs.shifts[k][mu]*geom_srhs.shifts[l][mu]; | ||||
| 	} | ||||
| 	phase=exp(phase*ci); | ||||
| 	Mkl(k,l) = phase; | ||||
|       } | ||||
|     } | ||||
|     invMkl = Mkl.inverse(); | ||||
|  | ||||
|     /////////////////////////////////////////////////////////////////////// | ||||
|     // Now compute the matrix elements of linop between the orthonormal | ||||
|     // set of vectors. | ||||
|     /////////////////////////////////////////////////////////////////////// | ||||
|     FineField phaV(grid); // Phased block basis vector | ||||
|     FineField MphaV(grid);// Matrix applied | ||||
|     std::vector<FineComplexField> phaF(npoint,grid); | ||||
|     std::vector<CoarseComplexField> pha(npoint,CoarseGrid); | ||||
|      | ||||
|     CoarseVector coarseInner(CoarseGrid); | ||||
|      | ||||
|     typedef typename CComplex::scalar_type SComplex; | ||||
|     FineComplexField one(grid); one=SComplex(1.0); | ||||
|     FineComplexField zz(grid); zz = Zero(); | ||||
|     for(int p=0;p<npoint;p++){ // Loop over momenta in npoint | ||||
|       ///////////////////////////////////////////////////// | ||||
|       // Stick a phase on every block | ||||
|       ///////////////////////////////////////////////////// | ||||
|       CoarseComplexField coor(CoarseGrid); | ||||
|       pha[p]=Zero(); | ||||
|       for(int mu=0;mu<Nd;mu++){ | ||||
| 	LatticeCoordinate(coor,mu); | ||||
| 	RealD TwoPiL =  M_PI * 2.0/ clatt[mu]; | ||||
| 	pha[p] = pha[p] + (TwoPiL * geom_srhs.shifts[p][mu]) * coor; | ||||
|       } | ||||
|       pha[p]  =exp(pha[p]*ci);	 | ||||
|  | ||||
|       blockZAXPY(phaF[p],pha[p],one,zz); | ||||
|     } | ||||
|  | ||||
|     // Could save on temporary storage here | ||||
|     std::vector<CoarseMatrix> _A; | ||||
|     _A.resize(geom_srhs.npoint,CoarseGrid); | ||||
|  | ||||
|     std::vector<CoarseVector> ComputeProj(npoint,CoarseGrid); | ||||
|     CoarseVector          FT(CoarseGrid); | ||||
|     for(int i=0;i<nbasis;i++){// Loop over basis vectors | ||||
|       std::cout << GridLogMessage<< "CoarsenMatrixColoured vec "<<i<<"/"<<nbasis<< std::endl; | ||||
|       for(int p=0;p<npoint;p++){ // Loop over momenta in npoint | ||||
|  | ||||
| 	phaV = phaF[p]*Subspace.subspace[i]; | ||||
|  | ||||
| 	///////////////////////////////////////////////////////////////////// | ||||
| 	// Multiple phased subspace vector by matrix and project to subspace | ||||
| 	// Remove local bulk phase to leave relative phases | ||||
| 	///////////////////////////////////////////////////////////////////// | ||||
| 	linop.Op(phaV,MphaV); | ||||
|  | ||||
| 	// Fixme, could use batched block projector here | ||||
| 	blockProject(coarseInner,MphaV,Subspace.subspace); | ||||
|  | ||||
| 	coarseInner = conjugate(pha[p]) * coarseInner; | ||||
|  | ||||
| 	ComputeProj[p] = coarseInner; | ||||
|       } | ||||
|  | ||||
|       // Could do this with a block promote or similar BLAS call via the MultiRHSBlockProjector with a const matrix. | ||||
|       for(int k=0;k<npoint;k++){ | ||||
|  | ||||
| 	FT = Zero(); | ||||
| 	for(int l=0;l<npoint;l++){ | ||||
| 	  FT= FT+ invMkl(l,k)*ComputeProj[l]; | ||||
| 	} | ||||
|        | ||||
| 	int osites=CoarseGrid->oSites(); | ||||
| 	autoView( A_v  , _A[k], AcceleratorWrite); | ||||
| 	autoView( FT_v  , FT, AcceleratorRead); | ||||
| 	accelerator_for(sss, osites, 1, { | ||||
| 	    for(int j=0;j<nbasis;j++){ | ||||
| 	      A_v[sss](i,j) = FT_v[sss](j); | ||||
| 	    } | ||||
|         }); | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     // Only needed if nonhermitian | ||||
|     //    if ( ! hermitian ) { | ||||
|     //      std::cout << GridLogMessage<<"PopulateAdag  "<<std::endl; | ||||
|     //      PopulateAdag(); | ||||
|     //    } | ||||
|     // Need to write something to populate Adag from A | ||||
|  | ||||
|     for(int p=0;p<geom_srhs.npoint;p++){ | ||||
|       GridtoBLAS(_A[p],BLAS_A[p]); | ||||
|     } | ||||
|     /* | ||||
| Grid : Message : 11698.730546 s : CoarsenOperator eigen  1334 us | ||||
| Grid : Message : 11698.730563 s : CoarsenOperator phase  34729 us | ||||
| Grid : Message : 11698.730565 s : CoarsenOperator phaseBZ 2423814 us | ||||
| Grid : Message : 11698.730566 s : CoarsenOperator mat    127890998 us | ||||
| Grid : Message : 11698.730567 s : CoarsenOperator proj   515840840 us | ||||
| Grid : Message : 11698.730568 s : CoarsenOperator inv    103948313 us | ||||
| Takes 600s to compute matrix elements, DOMINATED by the block project. | ||||
| Easy to speed up with the batched block project. | ||||
| Store npoint vectors, get npoint x Nbasis block projection, and 81 fold faster. | ||||
|  | ||||
| // Block project below taks to 240s | ||||
| Grid : Message : 328.193418 s : CoarsenOperator phase      38338 us | ||||
| Grid : Message : 328.193434 s : CoarsenOperator phaseBZ  1711226 us | ||||
| Grid : Message : 328.193436 s : CoarsenOperator mat    122213270 us | ||||
| //Grid : Message : 328.193438 s : CoarsenOperator proj   1181154 us <-- this is mistimed | ||||
| //Grid : Message : 11698.730568 s : CoarsenOperator inv  103948313 us <-- Cut this ~10x if lucky by loop fusion | ||||
|      */ | ||||
| #else | ||||
|     RealD tproj=0.0; | ||||
|     RealD tmat=0.0; | ||||
|     RealD tphase=0.0; | ||||
|     RealD tphaseBZ=0.0; | ||||
|     RealD tinv=0.0; | ||||
|  | ||||
|     std::cout << GridLogMessage<< "GeneralCoarsenMatrixMrhs "<< std::endl; | ||||
|  | ||||
|     GridBase *grid = Subspace.FineGrid; | ||||
|  | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|     // Orthogonalise the subblocks over the basis | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|     CoarseScalar InnerProd(CoarseGrid);  | ||||
|     blockOrthogonalise(InnerProd,Subspace.subspace); | ||||
|  | ||||
|  | ||||
|     MultiRHSBlockProject<Lattice<Fobj> >    Projector; | ||||
|     Projector.Allocate(nbasis,grid,CoarseGrid); | ||||
|     Projector.ImportBasis(Subspace.subspace); | ||||
|      | ||||
|     const int npoint = geom_srhs.npoint; | ||||
|  | ||||
|     Coordinate clatt = CoarseGrid->GlobalDimensions(); | ||||
|     int Nd = CoarseGrid->Nd(); | ||||
|       /* | ||||
|        *     Here, k,l index which possible momentum/shift within the N-points connected by MdagM. | ||||
|        *     Matrix index i is mapped to this shift via  | ||||
|        *               geom.shifts[i] | ||||
|        * | ||||
|        *     conj(pha[block]) proj[k (which mom)][j (basis vec cpt)][block]  | ||||
|        *       =  \sum_{l in ball}  e^{i q_k . delta_l} < phi_{block,j} | MdagM | phi_{(block+delta_l),i} >  | ||||
|        *       =  \sum_{l in ball} e^{iqk.delta_l} A_ji^{b.b+l} | ||||
|        *       = M_{kl} A_ji^{b.b+l} | ||||
|        * | ||||
|        *     Must assemble and invert matrix M_k,l = e^[i q_k . delta_l] | ||||
|        *   | ||||
|        *     Where q_k = delta_k . (2*M_PI/global_nb[mu]) | ||||
|        * | ||||
|        *     Then A{ji}^{b,b+l} = M^{-1}_{lm} ComputeProj_{m,b,i,j} | ||||
|        */ | ||||
|     Eigen::MatrixXcd Mkl    = Eigen::MatrixXcd::Zero(npoint,npoint); | ||||
|     Eigen::MatrixXcd invMkl = Eigen::MatrixXcd::Zero(npoint,npoint); | ||||
|     ComplexD ci(0.0,1.0); | ||||
|     for(int k=0;k<npoint;k++){ // Loop over momenta | ||||
|  | ||||
|       for(int l=0;l<npoint;l++){ // Loop over nbr relative | ||||
| 	ComplexD phase(0.0,0.0); | ||||
| 	for(int mu=0;mu<Nd;mu++){ | ||||
| 	  RealD TwoPiL =  M_PI * 2.0/ clatt[mu]; | ||||
| 	  phase=phase+TwoPiL*geom_srhs.shifts[k][mu]*geom_srhs.shifts[l][mu]; | ||||
| 	} | ||||
| 	phase=exp(phase*ci); | ||||
| 	Mkl(k,l) = phase; | ||||
|       } | ||||
|     } | ||||
|     invMkl = Mkl.inverse(); | ||||
|  | ||||
|     /////////////////////////////////////////////////////////////////////// | ||||
|     // Now compute the matrix elements of linop between the orthonormal | ||||
|     // set of vectors. | ||||
|     /////////////////////////////////////////////////////////////////////// | ||||
|     FineField phaV(grid); // Phased block basis vector | ||||
|     FineField MphaV(grid);// Matrix applied | ||||
|     std::vector<FineComplexField> phaF(npoint,grid); | ||||
|     std::vector<CoarseComplexField> pha(npoint,CoarseGrid); | ||||
|      | ||||
|     CoarseVector coarseInner(CoarseGrid); | ||||
|      | ||||
|     tphase=-usecond(); | ||||
|     typedef typename CComplex::scalar_type SComplex; | ||||
|     FineComplexField one(grid); one=SComplex(1.0); | ||||
|     FineComplexField zz(grid); zz = Zero(); | ||||
|     for(int p=0;p<npoint;p++){ // Loop over momenta in npoint | ||||
|       ///////////////////////////////////////////////////// | ||||
|       // Stick a phase on every block | ||||
|       ///////////////////////////////////////////////////// | ||||
|       CoarseComplexField coor(CoarseGrid); | ||||
|       pha[p]=Zero(); | ||||
|       for(int mu=0;mu<Nd;mu++){ | ||||
| 	LatticeCoordinate(coor,mu); | ||||
| 	RealD TwoPiL =  M_PI * 2.0/ clatt[mu]; | ||||
| 	pha[p] = pha[p] + (TwoPiL * geom_srhs.shifts[p][mu]) * coor; | ||||
|       } | ||||
|       pha[p]  =exp(pha[p]*ci);	 | ||||
|  | ||||
|       blockZAXPY(phaF[p],pha[p],one,zz); | ||||
|     } | ||||
|     tphase+=usecond(); | ||||
|  | ||||
|     // Could save on temporary storage here | ||||
|     std::vector<CoarseMatrix> _A; | ||||
|     _A.resize(geom_srhs.npoint,CoarseGrid); | ||||
|  | ||||
|     // Count use small chunks than npoint == 81 and save memory | ||||
|     int batch = 9; | ||||
|     std::vector<FineField>    _MphaV(batch,grid); | ||||
|     std::vector<CoarseVector> TmpProj(batch,CoarseGrid); | ||||
|  | ||||
|     std::vector<CoarseVector> ComputeProj(npoint,CoarseGrid); | ||||
|     CoarseVector          FT(CoarseGrid); | ||||
|     for(int i=0;i<nbasis;i++){// Loop over basis vectors | ||||
|       std::cout << GridLogMessage<< "CoarsenMatrixColoured vec "<<i<<"/"<<nbasis<< std::endl; | ||||
|  | ||||
|       //      std::cout << GridLogMessage << " phasing the fine vector "<<std::endl; | ||||
|       // Fixme : do this in batches | ||||
|       for(int p=0;p<npoint;p+=batch){ // Loop over momenta in npoint | ||||
|  | ||||
| 	for(int b=0;b<MIN(batch,npoint-p);b++){ | ||||
| 	  tphaseBZ-=usecond(); | ||||
| 	  phaV = phaF[p+b]*Subspace.subspace[i]; | ||||
| 	  tphaseBZ+=usecond(); | ||||
|  | ||||
| 	  ///////////////////////////////////////////////////////////////////// | ||||
| 	  // Multiple phased subspace vector by matrix and project to subspace | ||||
| 	  // Remove local bulk phase to leave relative phases | ||||
| 	  ///////////////////////////////////////////////////////////////////// | ||||
| 	  // Memory footprint was an issue | ||||
| 	  tmat-=usecond(); | ||||
| 	  linop.Op(phaV,MphaV); | ||||
| 	  _MphaV[b] = MphaV; | ||||
| 	  tmat+=usecond(); | ||||
| 	}       | ||||
|  | ||||
| 	//	std::cout << GridLogMessage << " Calling block project "<<std::endl; | ||||
| 	tproj-=usecond(); | ||||
| 	Projector.blockProject(_MphaV,TmpProj); | ||||
| 	tproj+=usecond(); | ||||
| 	 | ||||
| 	//	std::cout << GridLogMessage << " conj phasing the coarse vectors "<<std::endl; | ||||
| 	for(int b=0;b<MIN(batch,npoint-p);b++){ | ||||
| 	  ComputeProj[p+b] = conjugate(pha[p+b])*TmpProj[b]; | ||||
| 	} | ||||
|       } | ||||
|  | ||||
|       // Could do this with a block promote or similar BLAS call via the MultiRHSBlockProjector with a const matrix. | ||||
|        | ||||
|       // std::cout << GridLogMessage << " Starting FT inv "<<std::endl; | ||||
|       tinv-=usecond(); | ||||
|       for(int k=0;k<npoint;k++){ | ||||
| 	FT = Zero(); | ||||
| 	// 81 kernel calls as many ComputeProj vectors | ||||
| 	// Could fuse with a vector of views, but ugly | ||||
| 	// Could unroll the expression and run fewer kernels -- much more attractive | ||||
| 	// Could also do non blocking. | ||||
| #if 0	 | ||||
| 	for(int l=0;l<npoint;l++){ | ||||
| 	  FT= FT+ invMkl(l,k)*ComputeProj[l]; | ||||
| 	} | ||||
| #else | ||||
| 	const int radix = 9; | ||||
| 	int ll; | ||||
| 	for(ll=0;ll+radix-1<npoint;ll+=radix){ | ||||
| 	  // When ll = npoint-radix, ll+radix-1 = npoint-1, and we do it all. | ||||
| 	  FT = FT  | ||||
| 	    + invMkl(ll+0,k)*ComputeProj[ll+0] | ||||
| 	    + invMkl(ll+1,k)*ComputeProj[ll+1] | ||||
| 	    + invMkl(ll+2,k)*ComputeProj[ll+2] | ||||
| 	    + invMkl(ll+3,k)*ComputeProj[ll+3] | ||||
| 	    + invMkl(ll+4,k)*ComputeProj[ll+4] | ||||
| 	    + invMkl(ll+5,k)*ComputeProj[ll+5] | ||||
| 	    + invMkl(ll+6,k)*ComputeProj[ll+6] | ||||
| 	    + invMkl(ll+7,k)*ComputeProj[ll+7] | ||||
| 	    + invMkl(ll+8,k)*ComputeProj[ll+8]; | ||||
| 	} | ||||
| 	for(int l=ll;l<npoint;l++){ | ||||
| 	  FT= FT+ invMkl(l,k)*ComputeProj[l]; | ||||
| 	} | ||||
| #endif | ||||
|        | ||||
| 	// 1 kernel call -- must be cheaper | ||||
| 	int osites=CoarseGrid->oSites(); | ||||
| 	autoView( A_v  , _A[k], AcceleratorWrite); | ||||
| 	autoView( FT_v  , FT, AcceleratorRead); | ||||
| 	accelerator_for(sss, osites, 1, { | ||||
| 	    for(int j=0;j<nbasis;j++){ | ||||
| 	      A_v[sss](i,j) = FT_v[sss](j); | ||||
| 	    } | ||||
|         }); | ||||
|       } | ||||
|       tinv+=usecond(); | ||||
|     } | ||||
|  | ||||
|     // Only needed if nonhermitian | ||||
|     //    if ( ! hermitian ) { | ||||
|     //      std::cout << GridLogMessage<<"PopulateAdag  "<<std::endl; | ||||
|     //      PopulateAdag(); | ||||
|     //    } | ||||
|     // Need to write something to populate Adag from A | ||||
|     //    std::cout << GridLogMessage << " Calling GridtoBLAS "<<std::endl; | ||||
|     for(int p=0;p<geom_srhs.npoint;p++){ | ||||
|       GridtoBLAS(_A[p],BLAS_A[p]); | ||||
|     } | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator phase  "<<tphase<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator phaseBZ "<<tphaseBZ<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator mat    "<<tmat <<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator proj   "<<tproj<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"CoarsenOperator inv    "<<tinv<<" us"<<std::endl; | ||||
| #endif | ||||
|   } | ||||
|   void Mdag(const CoarseVector &in, CoarseVector &out) | ||||
|   { | ||||
|     this->M(in,out); | ||||
|   } | ||||
|   void M (const CoarseVector &in, CoarseVector &out) | ||||
|   { | ||||
|     //    std::cout << GridLogMessage << "New Mrhs coarse"<<std::endl; | ||||
|     conformable(CoarseGrid(),in.Grid()); | ||||
|     conformable(in.Grid(),out.Grid()); | ||||
|     out.Checkerboard() = in.Checkerboard(); | ||||
|  | ||||
|     RealD t_tot; | ||||
|     RealD t_exch; | ||||
|     RealD t_GtoB; | ||||
|     RealD t_BtoG; | ||||
|     RealD t_mult; | ||||
|  | ||||
|     t_tot=-usecond(); | ||||
|     CoarseVector tin=in; | ||||
|     t_exch=-usecond(); | ||||
|     CoarseVector pin = Cell.ExchangePeriodic(tin); //padded input | ||||
|     t_exch+=usecond(); | ||||
|  | ||||
|     CoarseVector pout(pin.Grid()); | ||||
|  | ||||
|     int npoint = geom.npoint; | ||||
|     typedef calcMatrix* Aview; | ||||
|     typedef LatticeView<Cvec> Vview; | ||||
|        | ||||
|     const int Nsimd = CComplex::Nsimd(); | ||||
|  | ||||
|     int64_t nrhs  =pin.Grid()->GlobalDimensions()[0]; | ||||
|     assert(nrhs>=1); | ||||
|  | ||||
|     RealD flops,bytes; | ||||
|     int64_t osites=in.Grid()->oSites(); // unpadded | ||||
|     int64_t unpadded_vol = CoarseGrid()->lSites()/nrhs; | ||||
|      | ||||
|     flops = 1.0* npoint * nbasis * nbasis * 8.0 * osites * CComplex::Nsimd(); | ||||
|     bytes = 1.0*osites*sizeof(siteMatrix)*npoint/pin.Grid()->GlobalDimensions()[0] | ||||
|           + 2.0*osites*sizeof(siteVector)*npoint; | ||||
|      | ||||
|  | ||||
|     t_GtoB=-usecond(); | ||||
|     GridtoBLAS(pin,BLAS_B); | ||||
|     t_GtoB+=usecond(); | ||||
|  | ||||
|     GridBLAS BLAS; | ||||
|  | ||||
|     t_mult=-usecond(); | ||||
|     for(int p=0;p<geom.npoint;p++){ | ||||
|       RealD c = 1.0; | ||||
|       if (p==0) c = 0.0; | ||||
|       ComplexD beta(c); | ||||
|  | ||||
|       BLAS.gemmBatched(nbasis,nrhs,nbasis, | ||||
| 		       ComplexD(1.0), | ||||
| 		       BLAS_AP[p],  | ||||
| 		       BLAS_BP[p],  | ||||
| 		       ComplexD(c),  | ||||
| 		       BLAS_CP); | ||||
|     } | ||||
|     BLAS.synchronise(); | ||||
|     t_mult+=usecond(); | ||||
|  | ||||
|     t_BtoG=-usecond(); | ||||
|     BLAStoGrid(out,BLAS_C); | ||||
|     t_BtoG+=usecond(); | ||||
|     t_tot+=usecond(); | ||||
|     /* | ||||
|     std::cout << GridLogMessage << "New Mrhs coarse DONE "<<std::endl; | ||||
|     std::cout << GridLogMessage<<"Coarse Mult exch "<<t_exch<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"Coarse Mult mult "<<t_mult<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"Coarse Mult GtoB  "<<t_GtoB<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"Coarse Mult BtoG  "<<t_BtoG<<" us"<<std::endl; | ||||
|     std::cout << GridLogMessage<<"Coarse Mult tot  "<<t_tot<<" us"<<std::endl; | ||||
|     */ | ||||
|     //    std::cout << GridLogMessage<<std::endl; | ||||
|     //    std::cout << GridLogMessage<<"Coarse Kernel flops "<< flops<<std::endl; | ||||
|     //    std::cout << GridLogMessage<<"Coarse Kernel flop/s "<< flops/t_mult<<" mflop/s"<<std::endl; | ||||
|     //    std::cout << GridLogMessage<<"Coarse Kernel bytes/s "<< bytes/t_mult/1000<<" GB/s"<<std::endl; | ||||
|     //    std::cout << GridLogMessage<<"Coarse overall flops/s "<< flops/t_tot<<" mflop/s"<<std::endl; | ||||
|     //    std::cout << GridLogMessage<<"Coarse total bytes   "<< bytes/1e6<<" MB"<<std::endl; | ||||
|   }; | ||||
|   virtual  void Mdiag    (const Field &in, Field &out){ assert(0);}; | ||||
|   virtual  void Mdir     (const Field &in, Field &out,int dir, int disp){assert(0);}; | ||||
|   virtual  void MdirAll  (const Field &in, std::vector<Field> &out){assert(0);}; | ||||
| }; | ||||
|    | ||||
| NAMESPACE_END(Grid); | ||||
							
								
								
									
										238
									
								
								Grid/algorithms/multigrid/Geometry.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										238
									
								
								Grid/algorithms/multigrid/Geometry.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,238 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/algorithms/GeneralCoarsenedMatrix.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Peter Boyle <pboyle@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #pragma once | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
|  | ||||
|  | ||||
| ///////////////////////////////////////////////////////////////// | ||||
| // Geometry class in cartesian case | ||||
| ///////////////////////////////////////////////////////////////// | ||||
|  | ||||
| class Geometry { | ||||
| public: | ||||
|   int npoint; | ||||
|   int base; | ||||
|   std::vector<int> directions   ; | ||||
|   std::vector<int> displacements; | ||||
|   std::vector<int> points_dagger; | ||||
|  | ||||
|   Geometry(int _d)  { | ||||
|      | ||||
|     base = (_d==5) ? 1:0; | ||||
|  | ||||
|     // make coarse grid stencil for 4d , not 5d | ||||
|     if ( _d==5 ) _d=4; | ||||
|  | ||||
|     npoint = 2*_d+1; | ||||
|     directions.resize(npoint); | ||||
|     displacements.resize(npoint); | ||||
|     points_dagger.resize(npoint); | ||||
|     for(int d=0;d<_d;d++){ | ||||
|       directions[d   ] = d+base; | ||||
|       directions[d+_d] = d+base; | ||||
|       displacements[d  ] = +1; | ||||
|       displacements[d+_d]= -1; | ||||
|       points_dagger[d   ] = d+_d; | ||||
|       points_dagger[d+_d] = d; | ||||
|     } | ||||
|     directions   [2*_d]=0; | ||||
|     displacements[2*_d]=0; | ||||
|     points_dagger[2*_d]=2*_d; | ||||
|   } | ||||
|  | ||||
|   int point(int dir, int disp) { | ||||
|     assert(disp == -1 || disp == 0 || disp == 1); | ||||
|     assert(base+0 <= dir && dir < base+4); | ||||
|  | ||||
|     // directions faster index = new indexing | ||||
|     // 4d (base = 0): | ||||
|     // point 0  1  2  3  4  5  6  7  8 | ||||
|     // dir   0  1  2  3  0  1  2  3  0 | ||||
|     // disp +1 +1 +1 +1 -1 -1 -1 -1  0 | ||||
|     // 5d (base = 1): | ||||
|     // point 0  1  2  3  4  5  6  7  8 | ||||
|     // dir   1  2  3  4  1  2  3  4  0 | ||||
|     // disp +1 +1 +1 +1 -1 -1 -1 -1  0 | ||||
|  | ||||
|     // displacements faster index = old indexing | ||||
|     // 4d (base = 0): | ||||
|     // point 0  1  2  3  4  5  6  7  8 | ||||
|     // dir   0  0  1  1  2  2  3  3  0 | ||||
|     // disp +1 -1 +1 -1 +1 -1 +1 -1  0 | ||||
|     // 5d (base = 1): | ||||
|     // point 0  1  2  3  4  5  6  7  8 | ||||
|     // dir   1  1  2  2  3  3  4  4  0 | ||||
|     // disp +1 -1 +1 -1 +1 -1 +1 -1  0 | ||||
|  | ||||
|     if(dir == 0 and disp == 0) | ||||
|       return 8; | ||||
|     else // New indexing | ||||
|       return (1 - disp) / 2 * 4 + dir - base; | ||||
|     // else // Old indexing | ||||
|     //   return (4 * (dir - base) + 1 - disp) / 2; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| ///////////////////////////////////////////////////////////////// | ||||
| // Less local equivalent of Geometry class in cartesian case | ||||
| ///////////////////////////////////////////////////////////////// | ||||
| class NonLocalStencilGeometry { | ||||
| public: | ||||
|   //  int depth; | ||||
|   int skip; | ||||
|   int hops; | ||||
|   int npoint; | ||||
|   std::vector<Coordinate> shifts; | ||||
|   Coordinate stencil_size; | ||||
|   Coordinate stencil_lo; | ||||
|   Coordinate stencil_hi; | ||||
|   GridCartesian *grid; | ||||
|   GridCartesian *Grid() {return grid;}; | ||||
|   int Depth(void){return 1;};   // Ghost zone depth | ||||
|   int Hops(void){return hops;}; // # of hops=> level of corner fill in in stencil | ||||
|   int DimSkip(void){return skip;}; | ||||
|  | ||||
|   virtual ~NonLocalStencilGeometry() {}; | ||||
|  | ||||
|   int  Reverse(int point) | ||||
|   { | ||||
|     int Nd = Grid()->Nd(); | ||||
|     Coordinate shft = shifts[point]; | ||||
|     Coordinate rev(Nd); | ||||
|     for(int mu=0;mu<Nd;mu++) rev[mu]= -shft[mu]; | ||||
|     for(int p=0;p<npoint;p++){ | ||||
|       if(rev==shifts[p]){ | ||||
| 	return p; | ||||
|       } | ||||
|     } | ||||
|     assert(0); | ||||
|     return -1; | ||||
|   } | ||||
|   void BuildShifts(void) | ||||
|   { | ||||
|     this->shifts.resize(0); | ||||
|     int Nd = this->grid->Nd(); | ||||
|  | ||||
|     int dd = this->DimSkip(); | ||||
|     for(int s0=this->stencil_lo[dd+0];s0<=this->stencil_hi[dd+0];s0++){ | ||||
|     for(int s1=this->stencil_lo[dd+1];s1<=this->stencil_hi[dd+1];s1++){ | ||||
|     for(int s2=this->stencil_lo[dd+2];s2<=this->stencil_hi[dd+2];s2++){ | ||||
|     for(int s3=this->stencil_lo[dd+3];s3<=this->stencil_hi[dd+3];s3++){ | ||||
|       Coordinate sft(Nd,0); | ||||
|       sft[dd+0] = s0; | ||||
|       sft[dd+1] = s1; | ||||
|       sft[dd+2] = s2; | ||||
|       sft[dd+3] = s3; | ||||
|       int nhops = abs(s0)+abs(s1)+abs(s2)+abs(s3); | ||||
|       if(nhops<=this->hops) this->shifts.push_back(sft); | ||||
|     }}}} | ||||
|     this->npoint = this->shifts.size(); | ||||
|     std::cout << GridLogMessage << "NonLocalStencilGeometry has "<< this->npoint << " terms in stencil "<<std::endl; | ||||
|   } | ||||
|    | ||||
|   NonLocalStencilGeometry(GridCartesian *_coarse_grid,int _hops,int _skip) : grid(_coarse_grid), hops(_hops), skip(_skip) | ||||
|   { | ||||
|     Coordinate latt = grid->GlobalDimensions(); | ||||
|     stencil_size.resize(grid->Nd()); | ||||
|     stencil_lo.resize(grid->Nd()); | ||||
|     stencil_hi.resize(grid->Nd()); | ||||
|     for(int d=0;d<grid->Nd();d++){ | ||||
|      if ( latt[d] == 1 ) { | ||||
|       stencil_lo[d] = 0; | ||||
|       stencil_hi[d] = 0; | ||||
|       stencil_size[d]= 1; | ||||
|      } else if ( latt[d] == 2 ) { | ||||
|       stencil_lo[d] = -1; | ||||
|       stencil_hi[d] = 0; | ||||
|       stencil_size[d]= 2; | ||||
|      } else if ( latt[d] > 2 ) { | ||||
|        stencil_lo[d] = -1; | ||||
|        stencil_hi[d] =  1; | ||||
|        stencil_size[d]= 3; | ||||
|      } | ||||
|     } | ||||
|     this->BuildShifts(); | ||||
|   }; | ||||
|  | ||||
| }; | ||||
|  | ||||
| // Need to worry about red-black now | ||||
| class NonLocalStencilGeometry4D : public NonLocalStencilGeometry { | ||||
| public: | ||||
|   virtual int DerivedDimSkip(void) { return 0;}; | ||||
|   NonLocalStencilGeometry4D(GridCartesian *Coarse,int _hops) : NonLocalStencilGeometry(Coarse,_hops,0) { }; | ||||
|   virtual ~NonLocalStencilGeometry4D() {}; | ||||
| }; | ||||
| class NonLocalStencilGeometry5D : public NonLocalStencilGeometry { | ||||
| public: | ||||
|   virtual int DerivedDimSkip(void) { return 1; };  | ||||
|   NonLocalStencilGeometry5D(GridCartesian *Coarse,int _hops) : NonLocalStencilGeometry(Coarse,_hops,1)  { }; | ||||
|   virtual ~NonLocalStencilGeometry5D() {}; | ||||
| }; | ||||
| /* | ||||
|  * Bunch of different options classes | ||||
|  */ | ||||
| class NextToNextToNextToNearestStencilGeometry4D : public NonLocalStencilGeometry4D { | ||||
| public: | ||||
|   NextToNextToNextToNearestStencilGeometry4D(GridCartesian *Coarse) :  NonLocalStencilGeometry4D(Coarse,4) | ||||
|   { | ||||
|   }; | ||||
| }; | ||||
| class NextToNextToNextToNearestStencilGeometry5D : public  NonLocalStencilGeometry5D { | ||||
| public: | ||||
|   NextToNextToNextToNearestStencilGeometry5D(GridCartesian *Coarse) :  NonLocalStencilGeometry5D(Coarse,4) | ||||
|   { | ||||
|   }; | ||||
| }; | ||||
| class NextToNearestStencilGeometry4D : public  NonLocalStencilGeometry4D { | ||||
| public: | ||||
|   NextToNearestStencilGeometry4D(GridCartesian *Coarse) :  NonLocalStencilGeometry4D(Coarse,2) | ||||
|   { | ||||
|   }; | ||||
| }; | ||||
| class NextToNearestStencilGeometry5D : public  NonLocalStencilGeometry5D { | ||||
| public: | ||||
|   NextToNearestStencilGeometry5D(GridCartesian *Coarse) :  NonLocalStencilGeometry5D(Coarse,2) | ||||
|   { | ||||
|   }; | ||||
| }; | ||||
| class NearestStencilGeometry4D : public  NonLocalStencilGeometry4D { | ||||
| public: | ||||
|   NearestStencilGeometry4D(GridCartesian *Coarse) :  NonLocalStencilGeometry4D(Coarse,1) | ||||
|   { | ||||
|   }; | ||||
| }; | ||||
| class NearestStencilGeometry5D : public  NonLocalStencilGeometry5D { | ||||
| public: | ||||
|   NearestStencilGeometry5D(GridCartesian *Coarse) :  NonLocalStencilGeometry5D(Coarse,1) | ||||
|   { | ||||
|   }; | ||||
| }; | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
							
								
								
									
										34
									
								
								Grid/algorithms/multigrid/MultiGrid.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								Grid/algorithms/multigrid/MultiGrid.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,34 @@ | ||||
|     /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid | ||||
|  | ||||
|     Source file: Grid/algorithms/multigrid/MultiGrid.h | ||||
|  | ||||
|     Copyright (C) 2023 | ||||
|  | ||||
| Author: Peter Boyle <pboyle@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #pragma once | ||||
|  | ||||
| #include <Grid/algorithms/multigrid/Aggregates.h> | ||||
| #include <Grid/algorithms/multigrid/Geometry.h> | ||||
| #include <Grid/algorithms/multigrid/CoarsenedMatrix.h> | ||||
| #include <Grid/algorithms/multigrid/GeneralCoarsenedMatrix.h> | ||||
| #include <Grid/algorithms/multigrid/GeneralCoarsenedMatrixMultiRHS.h> | ||||
| @@ -175,9 +175,56 @@ template<class T> using cshiftAllocator = std::allocator<T>; | ||||
|  | ||||
| template<class T> using Vector        = std::vector<T,uvmAllocator<T> >;            | ||||
| template<class T> using stencilVector = std::vector<T,alignedAllocator<T> >;            | ||||
| template<class T> using commVector = std::vector<T,devAllocator<T> >; | ||||
| template<class T> using commVector    = std::vector<T,devAllocator<T> >; | ||||
| template<class T> using deviceVector  = std::vector<T,devAllocator<T> >; | ||||
| template<class T> using cshiftVector = std::vector<T,cshiftAllocator<T> >; | ||||
| template<class T> using cshiftVector  = std::vector<T,cshiftAllocator<T> >; | ||||
|  | ||||
| /* | ||||
| template<class T> class vecView | ||||
| { | ||||
|  protected: | ||||
|   T * data; | ||||
|   uint64_t size; | ||||
|   ViewMode mode; | ||||
|   void * cpu_ptr; | ||||
|  public: | ||||
|   accelerator_inline T & operator[](size_t i) const { return this->data[i]; }; | ||||
|   vecView(std::vector<T> &refer_to_me,ViewMode _mode) | ||||
|   { | ||||
|     cpu_ptr = &refer_to_me[0]; | ||||
|     size = refer_to_me.size(); | ||||
|     mode = _mode; | ||||
|     data =(T *) MemoryManager::ViewOpen(cpu_ptr, | ||||
| 					size*sizeof(T), | ||||
| 					mode, | ||||
| 					AdviseDefault); | ||||
|   } | ||||
|   void ViewClose(void) | ||||
|   { // Inform the manager | ||||
|     MemoryManager::ViewClose(this->cpu_ptr,this->mode);     | ||||
|   } | ||||
| }; | ||||
|  | ||||
| template<class T> vecView<T> VectorView(std::vector<T> &vec,ViewMode _mode) | ||||
| { | ||||
|   vecView<T> ret(vec,_mode); // does the open | ||||
|   return ret;                // must be closed | ||||
| } | ||||
|  | ||||
| // Little autoscope assister | ||||
| template<class View>  | ||||
| class VectorViewCloser | ||||
| { | ||||
|   View v;  // Take a copy of view and call view close when I go out of scope automatically | ||||
|  public: | ||||
|   VectorViewCloser(View &_v) : v(_v) {}; | ||||
|   ~VectorViewCloser() { auto ptr = v.cpu_ptr; v.ViewClose();  MemoryManager::NotifyDeletion(ptr);} | ||||
| }; | ||||
|  | ||||
| #define autoVecView(v_v,v,mode)					\ | ||||
|   auto v_v = VectorView(v,mode);				\ | ||||
|   ViewCloser<decltype(v_v)> _autoView##v_v(v_v); | ||||
| */ | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
|   | ||||
| @@ -209,9 +209,9 @@ private: | ||||
|   static void     CpuViewClose(uint64_t Ptr); | ||||
|   static uint64_t CpuViewOpen(uint64_t  CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint); | ||||
| #endif | ||||
|   static void NotifyDeletion(void * CpuPtr); | ||||
|  | ||||
|  public: | ||||
|   static void NotifyDeletion(void * CpuPtr); | ||||
|   static void Print(void); | ||||
|   static void PrintAll(void); | ||||
|   static void PrintState( void* CpuPtr); | ||||
|   | ||||
| @@ -8,7 +8,7 @@ NAMESPACE_BEGIN(Grid); | ||||
| static char print_buffer [ MAXLINE ]; | ||||
|  | ||||
| #define mprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer; | ||||
| #define dprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer; | ||||
| #define dprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogDebug << print_buffer; | ||||
| //#define dprintf(...)  | ||||
|  | ||||
|  | ||||
| @@ -111,7 +111,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache) | ||||
|   /////////////////////////////////////////////////////////// | ||||
|   assert(AccCache.state!=Empty); | ||||
|    | ||||
|   mprintf("MemoryManager: Discard(%lx) %lx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);  | ||||
|   dprintf("MemoryManager: Discard(%lx) %lx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);  | ||||
|   assert(AccCache.accLock==0); | ||||
|   assert(AccCache.cpuLock==0); | ||||
|   assert(AccCache.CpuPtr!=(uint64_t)NULL); | ||||
| @@ -141,7 +141,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache) | ||||
|   /////////////////////////////////////////////////////////////////////////// | ||||
|   assert(AccCache.state!=Empty); | ||||
|    | ||||
|   mprintf("MemoryManager: Evict cpu %lx acc %lx cpuLock %ld accLock %ld\n", | ||||
|   mprintf("MemoryManager: Evict CpuPtr %lx AccPtr %lx cpuLock %ld accLock %ld\n", | ||||
| 	  (uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr, | ||||
| 	  (uint64_t)AccCache.cpuLock,(uint64_t)AccCache.accLock);  | ||||
|   if (AccCache.accLock!=0) return; | ||||
| @@ -155,7 +155,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache) | ||||
|     AccCache.AccPtr=(uint64_t)NULL; | ||||
|     AccCache.state=CpuDirty; // CPU primary now | ||||
|     DeviceBytes   -=AccCache.bytes; | ||||
|     dprintf("MemoryManager: Free(%lx) footprint now %ld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);   | ||||
|     dprintf("MemoryManager: Free(AccPtr %lx) footprint now %ld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);   | ||||
|   } | ||||
|   //  uint64_t CpuPtr = AccCache.CpuPtr; | ||||
|   DeviceEvictions++; | ||||
| @@ -169,7 +169,7 @@ void MemoryManager::Flush(AcceleratorViewEntry &AccCache) | ||||
|   assert(AccCache.AccPtr!=(uint64_t)NULL); | ||||
|   assert(AccCache.CpuPtr!=(uint64_t)NULL); | ||||
|   acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes); | ||||
|   mprintf("MemoryManager: Flush  %lx -> %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout); | ||||
|   mprintf("MemoryManager: acceleratorCopyFromDevice Flush AccPtr %lx -> CpuPtr %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout); | ||||
|   DeviceToHostBytes+=AccCache.bytes; | ||||
|   DeviceToHostXfer++; | ||||
|   AccCache.state=Consistent; | ||||
| @@ -184,7 +184,7 @@ void MemoryManager::Clone(AcceleratorViewEntry &AccCache) | ||||
|     AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes); | ||||
|     DeviceBytes+=AccCache.bytes; | ||||
|   } | ||||
|   mprintf("MemoryManager: Clone %lx <- %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout); | ||||
|   mprintf("MemoryManager: acceleratorCopyToDevice   Clone AccPtr %lx <- CpuPtr %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout); | ||||
|   acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes); | ||||
|   HostToDeviceBytes+=AccCache.bytes; | ||||
|   HostToDeviceXfer++; | ||||
| @@ -474,6 +474,7 @@ void  MemoryManager::Print(void) | ||||
|   std::cout << GridLogMessage << DeviceEvictions  << " Evictions from device " << std::endl; | ||||
|   std::cout << GridLogMessage << DeviceDestroy    << " Destroyed vectors on device " << std::endl; | ||||
|   std::cout << GridLogMessage << AccViewTable.size()<< " vectors " << LRU.size()<<" evictable"<< std::endl; | ||||
|   acceleratorMem(); | ||||
|   std::cout << GridLogMessage << "--------------------------------------------" << std::endl; | ||||
| } | ||||
| void  MemoryManager::PrintAll(void) | ||||
|   | ||||
| @@ -70,8 +70,8 @@ public: | ||||
|   Coordinate _istride;    // Inner stride i.e. within simd lane | ||||
|   int _osites;                  // _isites*_osites = product(dimensions). | ||||
|   int _isites; | ||||
|   int _fsites;                  // _isites*_osites = product(dimensions). | ||||
|   int _gsites; | ||||
|   int64_t _fsites;                  // _isites*_osites = product(dimensions). | ||||
|   int64_t _gsites; | ||||
|   Coordinate _slice_block;// subslice information | ||||
|   Coordinate _slice_stride; | ||||
|   Coordinate _slice_nblock; | ||||
| @@ -183,7 +183,7 @@ public: | ||||
|   inline int Nsimd(void)  const { return _isites; };// Synonymous with iSites | ||||
|   inline int oSites(void) const { return _osites; }; | ||||
|   inline int lSites(void) const { return _isites*_osites; };  | ||||
|   inline int gSites(void) const { return _isites*_osites*_Nprocessors; };  | ||||
|   inline int64_t gSites(void) const { return (int64_t)_isites*(int64_t)_osites*(int64_t)_Nprocessors; };  | ||||
|   inline int Nd    (void) const { return _ndimension;}; | ||||
|  | ||||
|   inline const Coordinate LocalStarts(void)             { return _lstart;    }; | ||||
| @@ -214,7 +214,7 @@ public: | ||||
|   //////////////////////////////////////////////////////////////// | ||||
|   // Global addressing | ||||
|   //////////////////////////////////////////////////////////////// | ||||
|   void GlobalIndexToGlobalCoor(int gidx,Coordinate &gcoor){ | ||||
|   void GlobalIndexToGlobalCoor(int64_t gidx,Coordinate &gcoor){ | ||||
|     assert(gidx< gSites()); | ||||
|     Lexicographic::CoorFromIndex(gcoor,gidx,_gdimensions); | ||||
|   } | ||||
| @@ -222,7 +222,7 @@ public: | ||||
|     assert(lidx<lSites()); | ||||
|     Lexicographic::CoorFromIndex(lcoor,lidx,_ldimensions); | ||||
|   } | ||||
|   void GlobalCoorToGlobalIndex(const Coordinate & gcoor,int & gidx){ | ||||
|   void GlobalCoorToGlobalIndex(const Coordinate & gcoor,int64_t & gidx){ | ||||
|     gidx=0; | ||||
|     int mult=1; | ||||
|     for(int mu=0;mu<_ndimension;mu++) { | ||||
|   | ||||
| @@ -138,6 +138,14 @@ public: | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   // Face exchange, buffer swap in translational invariant way | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   void CommsComplete(std::vector<CommsRequest_t> &list); | ||||
|   void SendToRecvFromBegin(std::vector<CommsRequest_t> &list, | ||||
| 			   void *xmit, | ||||
| 			   int dest, | ||||
| 			   void *recv, | ||||
| 			   int from, | ||||
| 			   int bytes,int dir); | ||||
|    | ||||
|   void SendToRecvFrom(void *xmit, | ||||
| 		      int xmit_to_rank, | ||||
| 		      void *recv, | ||||
|   | ||||
| @@ -306,6 +306,44 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N) | ||||
|   int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
|  | ||||
| void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list, | ||||
| 						void *xmit, | ||||
| 						int dest, | ||||
| 						void *recv, | ||||
| 						int from, | ||||
| 						int bytes,int dir) | ||||
| { | ||||
|   MPI_Request xrq; | ||||
|   MPI_Request rrq; | ||||
|  | ||||
|   assert(dest != _processor); | ||||
|   assert(from != _processor); | ||||
|  | ||||
|   int tag; | ||||
|  | ||||
|   tag= dir+from*32; | ||||
|   int ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator,&rrq); | ||||
|   assert(ierr==0); | ||||
|   list.push_back(rrq); | ||||
|    | ||||
|   tag= dir+_processor*32; | ||||
|   ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator,&xrq); | ||||
|   assert(ierr==0); | ||||
|   list.push_back(xrq); | ||||
| } | ||||
| void CartesianCommunicator::CommsComplete(std::vector<CommsRequest_t> &list) | ||||
| { | ||||
|   int nreq=list.size(); | ||||
|  | ||||
|   if (nreq==0) return; | ||||
|  | ||||
|   std::vector<MPI_Status> status(nreq); | ||||
|   int ierr = MPI_Waitall(nreq,&list[0],&status[0]); | ||||
|   assert(ierr==0); | ||||
|   list.resize(0); | ||||
| } | ||||
|  | ||||
| // Basic Halo comms primitive | ||||
| void CartesianCommunicator::SendToRecvFrom(void *xmit, | ||||
| 					   int dest, | ||||
| @@ -348,6 +386,7 @@ double CartesianCommunicator::StencilSendToRecvFrom( void *xmit, | ||||
|   return offbytes; | ||||
| } | ||||
|  | ||||
| #undef NVLINK_GET // Define to use get instead of put DMA | ||||
| double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list, | ||||
| 							 void *xmit, | ||||
| 							 int dest,int dox, | ||||
| @@ -380,9 +419,15 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques | ||||
|       list.push_back(rrq); | ||||
|       off_node_bytes+=rbytes; | ||||
|     } | ||||
| #ifdef NVLINK_GET | ||||
|       void *shm = (void *) this->ShmBufferTranslate(from,xmit); | ||||
|       assert(shm!=NULL); | ||||
|       acceleratorCopyDeviceToDeviceAsynch(shm,recv,rbytes); | ||||
| #endif | ||||
|   } | ||||
|    | ||||
|   if (dox) { | ||||
|     //  rcrc = crc32(rcrc,(unsigned char *)recv,bytes); | ||||
|     if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) { | ||||
|       tag= dir+_processor*32; | ||||
|       ierr =MPI_Isend(xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq); | ||||
| @@ -390,9 +435,12 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques | ||||
|       list.push_back(xrq); | ||||
|       off_node_bytes+=xbytes; | ||||
|     } else { | ||||
| #ifndef NVLINK_GET | ||||
|       void *shm = (void *) this->ShmBufferTranslate(dest,recv); | ||||
|       assert(shm!=NULL); | ||||
|       acceleratorCopyDeviceToDeviceAsynch(xmit,shm,xbytes); | ||||
| #endif | ||||
|        | ||||
|     } | ||||
|   } | ||||
|  | ||||
| @@ -402,6 +450,8 @@ void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsReque | ||||
| { | ||||
|   int nreq=list.size(); | ||||
|  | ||||
|   acceleratorCopySynchronise(); | ||||
|  | ||||
|   if (nreq==0) return; | ||||
|  | ||||
|   std::vector<MPI_Status> status(nreq); | ||||
|   | ||||
| @@ -91,6 +91,17 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit, | ||||
| { | ||||
|   assert(0); | ||||
| } | ||||
| void CartesianCommunicator::CommsComplete(std::vector<CommsRequest_t> &list){ assert(0);} | ||||
| void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list, | ||||
| 						void *xmit, | ||||
| 						int dest, | ||||
| 						void *recv, | ||||
| 						int from, | ||||
| 						int bytes,int dir) | ||||
| { | ||||
|   assert(0); | ||||
| } | ||||
|  | ||||
| void CartesianCommunicator::AllToAll(int dim,void  *in,void *out,uint64_t words,uint64_t bytes) | ||||
| { | ||||
|   bcopy(in,out,bytes*words); | ||||
|   | ||||
| @@ -40,6 +40,9 @@ int                 GlobalSharedMemory::_ShmAlloc; | ||||
| uint64_t            GlobalSharedMemory::_ShmAllocBytes; | ||||
|  | ||||
| std::vector<void *> GlobalSharedMemory::WorldShmCommBufs; | ||||
| #ifndef ACCELERATOR_AWARE_MPI | ||||
| void * GlobalSharedMemory::HostCommBuf; | ||||
| #endif | ||||
|  | ||||
| Grid_MPI_Comm       GlobalSharedMemory::WorldShmComm; | ||||
| int                 GlobalSharedMemory::WorldShmRank; | ||||
| @@ -66,6 +69,26 @@ void GlobalSharedMemory::SharedMemoryFree(void) | ||||
| ///////////////////////////////// | ||||
| // Alloc, free shmem region | ||||
| ///////////////////////////////// | ||||
| #ifndef ACCELERATOR_AWARE_MPI | ||||
| void *SharedMemory::HostBufferMalloc(size_t bytes){ | ||||
|   void *ptr = (void *)host_heap_top; | ||||
|   host_heap_top  += bytes; | ||||
|   host_heap_bytes+= bytes; | ||||
|   if (host_heap_bytes >= host_heap_size) { | ||||
|     std::cout<< " HostBufferMalloc exceeded heap size -- try increasing with --shm <MB> flag" <<std::endl; | ||||
|     std::cout<< " Parameter specified in units of MB (megabytes) " <<std::endl; | ||||
|     std::cout<< " Current alloc is " << (bytes/(1024*1024)) <<"MB"<<std::endl; | ||||
|     std::cout<< " Current bytes is " << (host_heap_bytes/(1024*1024)) <<"MB"<<std::endl; | ||||
|     std::cout<< " Current heap  is " << (host_heap_size/(1024*1024)) <<"MB"<<std::endl; | ||||
|     assert(host_heap_bytes<host_heap_size); | ||||
|   } | ||||
|   return ptr; | ||||
| } | ||||
| void SharedMemory::HostBufferFreeAll(void) {  | ||||
|   host_heap_top  =(size_t)HostCommBuf; | ||||
|   host_heap_bytes=0; | ||||
| } | ||||
| #endif | ||||
| void *SharedMemory::ShmBufferMalloc(size_t bytes){ | ||||
|   //  bytes = (bytes+sizeof(vRealD))&(~(sizeof(vRealD)-1));// align up bytes | ||||
|   void *ptr = (void *)heap_top; | ||||
|   | ||||
| @@ -75,7 +75,9 @@ public: | ||||
|   static int           Hugepages; | ||||
|  | ||||
|   static std::vector<void *> WorldShmCommBufs; | ||||
|  | ||||
| #ifndef ACCELERATOR_AWARE_MPI | ||||
|   static void *HostCommBuf; | ||||
| #endif | ||||
|   static Grid_MPI_Comm WorldComm; | ||||
|   static int           WorldRank; | ||||
|   static int           WorldSize; | ||||
| @@ -120,6 +122,13 @@ private: | ||||
|   size_t heap_bytes; | ||||
|   size_t heap_size; | ||||
|  | ||||
| #ifndef ACCELERATOR_AWARE_MPI | ||||
|   size_t host_heap_top;  // set in free all | ||||
|   size_t host_heap_bytes;// set in free all | ||||
|   void *HostCommBuf;     // set in SetCommunicator | ||||
|   size_t host_heap_size; // set in SetCommunicator | ||||
| #endif | ||||
|    | ||||
| protected: | ||||
|  | ||||
|   Grid_MPI_Comm    ShmComm; // for barriers | ||||
| @@ -151,7 +160,10 @@ public: | ||||
|   void *ShmBufferTranslate(int rank,void * local_p); | ||||
|   void *ShmBufferMalloc(size_t bytes); | ||||
|   void  ShmBufferFreeAll(void) ; | ||||
|    | ||||
| #ifndef ACCELERATOR_AWARE_MPI | ||||
|   void *HostBufferMalloc(size_t bytes); | ||||
|   void HostBufferFreeAll(void); | ||||
| #endif   | ||||
|   ////////////////////////////////////////////////////////////////////////// | ||||
|   // Make info on Nodes & ranks and Shared memory available | ||||
|   ////////////////////////////////////////////////////////////////////////// | ||||
|   | ||||
| @@ -39,9 +39,11 @@ Author: Christoph Lehner <christoph@lhnr.de> | ||||
| #include <hip/hip_runtime_api.h> | ||||
| #endif | ||||
| #ifdef GRID_SYCL | ||||
| #ifdef ACCELERATOR_AWARE_MPI | ||||
| #define GRID_SYCL_LEVEL_ZERO_IPC | ||||
| #define SHM_SOCKETS | ||||
| #endif  | ||||
| #include <syscall.h> | ||||
| #define SHM_SOCKETS  | ||||
| #endif | ||||
|  | ||||
| #include <sys/socket.h> | ||||
| @@ -512,46 +514,6 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) | ||||
| // Hugetlbfs mapping intended | ||||
| //////////////////////////////////////////////////////////////////////////////////////////// | ||||
| #if defined(GRID_CUDA) ||defined(GRID_HIP)  || defined(GRID_SYCL) | ||||
|  | ||||
| //if defined(GRID_SYCL) | ||||
| #if 0 | ||||
| void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) | ||||
| { | ||||
|   void * ShmCommBuf ;  | ||||
|   assert(_ShmSetup==1); | ||||
|   assert(_ShmAlloc==0); | ||||
|  | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // allocate the pointer array for shared windows for our group | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   MPI_Barrier(WorldShmComm); | ||||
|   WorldShmCommBufs.resize(WorldShmSize); | ||||
|  | ||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Each MPI rank should allocate our own buffer | ||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   ShmCommBuf = acceleratorAllocDevice(bytes); | ||||
|  | ||||
|   if (ShmCommBuf == (void *)NULL ) { | ||||
|     std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl; | ||||
|     exit(EXIT_FAILURE);   | ||||
|   } | ||||
|  | ||||
|   std::cout << WorldRank << Mheader " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes  | ||||
| 	    << "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl; | ||||
|  | ||||
|   SharedMemoryZero(ShmCommBuf,bytes); | ||||
|  | ||||
|   assert(WorldShmSize == 1); | ||||
|   for(int r=0;r<WorldShmSize;r++){ | ||||
|     WorldShmCommBufs[r] = ShmCommBuf; | ||||
|   } | ||||
|   _ShmAllocBytes=bytes; | ||||
|   _ShmAlloc=1; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #if defined(GRID_CUDA) ||defined(GRID_HIP) ||defined(GRID_SYCL)   | ||||
| void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) | ||||
| { | ||||
|   void * ShmCommBuf ;  | ||||
| @@ -574,6 +536,9 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) | ||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Each MPI rank should allocate our own buffer | ||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| #ifndef ACCELERATOR_AWARE_MPI | ||||
|   HostCommBuf= malloc(bytes); | ||||
| #endif   | ||||
|   ShmCommBuf = acceleratorAllocDevice(bytes); | ||||
|   if (ShmCommBuf == (void *)NULL ) { | ||||
|     std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl; | ||||
| @@ -738,7 +703,6 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) | ||||
|   _ShmAllocBytes=bytes; | ||||
|   _ShmAlloc=1; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #else  | ||||
| #ifdef GRID_MPI3_SHMMMAP | ||||
| @@ -962,6 +926,12 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm) | ||||
|   } | ||||
|   ShmBufferFreeAll(); | ||||
|  | ||||
| #ifndef ACCELERATOR_AWARE_MPI | ||||
|   host_heap_size = heap_size; | ||||
|   HostCommBuf= GlobalSharedMemory::HostCommBuf; | ||||
|   HostBufferFreeAll(); | ||||
| #endif   | ||||
|  | ||||
|   ///////////////////////////////////////////////////////////////////// | ||||
|   // find comm ranks in our SHM group (i.e. which ranks are on our node) | ||||
|   ///////////////////////////////////////////////////////////////////// | ||||
|   | ||||
| @@ -234,10 +234,20 @@ public: | ||||
|   } | ||||
|  | ||||
|   template<class sobj> inline Lattice<vobj> & operator = (const sobj & r){ | ||||
|     vobj vtmp; | ||||
|     vtmp = r; | ||||
| #if 1 | ||||
|     auto me  = View(CpuWrite); | ||||
|     thread_for(ss,me.size(),{ | ||||
| 	me[ss]= r; | ||||
|        me[ss]= r; | ||||
|       }); | ||||
| #else     | ||||
|     auto me  = View(AcceleratorWrite); | ||||
|     accelerator_for(ss,me.size(),vobj::Nsimd(),{ | ||||
| 	auto stmp=coalescedRead(vtmp); | ||||
| 	coalescedWrite(me[ss],stmp); | ||||
|     }); | ||||
| #endif     | ||||
|     me.ViewClose(); | ||||
|     return *this; | ||||
|   } | ||||
| @@ -360,7 +370,7 @@ public: | ||||
|  | ||||
| template<class vobj> std::ostream& operator<< (std::ostream& stream, const Lattice<vobj> &o){ | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   for(int g=0;g<o.Grid()->_gsites;g++){ | ||||
|   for(int64_t g=0;g<o.Grid()->_gsites;g++){ | ||||
|  | ||||
|     Coordinate gcoor; | ||||
|     o.Grid()->GlobalIndexToGlobalCoor(g,gcoor); | ||||
|   | ||||
| @@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
|  | ||||
| template<class vobj> void DumpSliceNorm(std::string s,Lattice<vobj> &f,int mu=-1) | ||||
| template<class vobj> void DumpSliceNorm(std::string s,const Lattice<vobj> &f,int mu=-1) | ||||
| { | ||||
|   auto ff = localNorm2(f); | ||||
|   if ( mu==-1 ) mu = f.Grid()->Nd()-1; | ||||
|   | ||||
| @@ -204,6 +204,27 @@ template<class vobj> inline RealD norm2(const Lattice<vobj> &arg){ | ||||
|   return real(nrm);  | ||||
| } | ||||
|  | ||||
|  | ||||
| template<class Op,class T1> | ||||
| inline auto norm2(const LatticeUnaryExpression<Op,T1> & expr)  ->RealD | ||||
| { | ||||
|   return norm2(closure(expr)); | ||||
| } | ||||
|  | ||||
| template<class Op,class T1,class T2> | ||||
| inline auto norm2(const LatticeBinaryExpression<Op,T1,T2> & expr)      ->RealD | ||||
| { | ||||
|   return norm2(closure(expr)); | ||||
| } | ||||
|  | ||||
|  | ||||
| template<class Op,class T1,class T2,class T3> | ||||
| inline auto norm2(const LatticeTrinaryExpression<Op,T1,T2,T3> & expr)      ->RealD | ||||
| { | ||||
|   return norm2(closure(expr)); | ||||
| } | ||||
|  | ||||
|  | ||||
| //The global maximum of the site norm2 | ||||
| template<class vobj> inline RealD maxLocalNorm2(const Lattice<vobj> &arg) | ||||
| { | ||||
| @@ -281,12 +302,29 @@ inline ComplexD rankInnerProduct(const Lattice<vobj> &left,const Lattice<vobj> & | ||||
|   return nrm; | ||||
| } | ||||
|  | ||||
|  | ||||
| template<class vobj> | ||||
| inline ComplexD innerProduct(const Lattice<vobj> &left,const Lattice<vobj> &right) { | ||||
|   GridBase *grid = left.Grid(); | ||||
|  | ||||
| #ifdef GRID_SYCL | ||||
|   uint64_t csum=0; | ||||
|   if ( FlightRecorder::LoggingMode != FlightRecorder::LoggingModeNone) | ||||
|   { | ||||
|     // Hack | ||||
|     // Fast integer xor checksum. Can also be used in comms now. | ||||
|     autoView(l_v,left,AcceleratorRead); | ||||
|     Integer words = left.Grid()->oSites()*sizeof(vobj)/sizeof(uint64_t); | ||||
|     uint64_t *base= (uint64_t *)&l_v[0]; | ||||
|     csum=svm_xor(base,words); | ||||
|   } | ||||
|   FlightRecorder::CsumLog(csum); | ||||
| #endif | ||||
|   ComplexD nrm = rankInnerProduct(left,right); | ||||
|   //  std::cerr<<"flight log " << std::hexfloat << nrm <<" "<<crc(left)<<std::endl; | ||||
|   RealD local = real(nrm); | ||||
|   FlightRecorder::NormLog(real(nrm));  | ||||
|   grid->GlobalSum(nrm); | ||||
|   FlightRecorder::ReductionLog(local,real(nrm));  | ||||
|   return nrm; | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -9,14 +9,18 @@ inline typename vobj::scalar_objectD sumD_gpu_tensor(const vobj *lat, Integer os | ||||
| { | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   typedef typename vobj::scalar_objectD sobjD; | ||||
|   sobj *mysum =(sobj *) malloc_shared(sizeof(sobj),*theGridAccelerator); | ||||
|   static Vector<sobj> mysum; | ||||
|   mysum.resize(1); | ||||
|   sobj *mysum_p = & mysum[0]; | ||||
|   sobj identity; zeroit(identity); | ||||
|   mysum[0] = identity; | ||||
|   sobj ret ;  | ||||
|  | ||||
|   Integer nsimd= vobj::Nsimd(); | ||||
|    | ||||
|  | ||||
|   const cl::sycl::property_list PropList ({ cl::sycl::property::reduction::initialize_to_identity() }); | ||||
|   theGridAccelerator->submit([&](cl::sycl::handler &cgh) { | ||||
|      auto Reduction = cl::sycl::reduction(mysum,identity,std::plus<>()); | ||||
|     auto Reduction = cl::sycl::reduction(mysum_p,identity,std::plus<>(),PropList); | ||||
|      cgh.parallel_for(cl::sycl::range<1>{osites}, | ||||
| 		      Reduction, | ||||
| 		      [=] (cl::sycl::id<1> item, auto &sum) { | ||||
| @@ -26,7 +30,7 @@ inline typename vobj::scalar_objectD sumD_gpu_tensor(const vobj *lat, Integer os | ||||
|    }); | ||||
|   theGridAccelerator->wait(); | ||||
|   ret = mysum[0]; | ||||
|   free(mysum,*theGridAccelerator); | ||||
|   //  free(mysum,*theGridAccelerator); | ||||
|   sobjD dret; convertType(dret,ret); | ||||
|   return dret; | ||||
| } | ||||
| @@ -69,29 +73,34 @@ inline typename vobj::scalar_object sum_gpu_large(const vobj *lat, Integer osite | ||||
|   return result; | ||||
| } | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
| /* | ||||
| template<class Double> Double svm_reduce(Double *vec,uint64_t L) | ||||
| template<class Word> Word svm_xor(Word *vec,uint64_t L) | ||||
| { | ||||
|   Double sumResult; zeroit(sumResult); | ||||
|   Double *d_sum =(Double *)cl::sycl::malloc_shared(sizeof(Double),*theGridAccelerator); | ||||
|   Double identity;  zeroit(identity); | ||||
|   Word xorResult; xorResult = 0; | ||||
|   static Vector<Word> d_sum; | ||||
|   d_sum.resize(1); | ||||
|   Word *d_sum_p=&d_sum[0]; | ||||
|   Word identity;  identity=0; | ||||
|   d_sum[0] = identity; | ||||
|   const cl::sycl::property_list PropList ({ cl::sycl::property::reduction::initialize_to_identity() }); | ||||
|   theGridAccelerator->submit([&](cl::sycl::handler &cgh) { | ||||
|      auto Reduction = cl::sycl::reduction(d_sum,identity,std::plus<>()); | ||||
|     auto Reduction = cl::sycl::reduction(d_sum_p,identity,std::bit_xor<>(),PropList); | ||||
|      cgh.parallel_for(cl::sycl::range<1>{L}, | ||||
| 		      Reduction, | ||||
| 		      [=] (cl::sycl::id<1> index, auto &sum) { | ||||
| 	 sum +=vec[index]; | ||||
| 	 sum^=vec[index]; | ||||
|      }); | ||||
|    }); | ||||
|   theGridAccelerator->wait(); | ||||
|   Double ret = d_sum[0]; | ||||
|   free(d_sum,*theGridAccelerator); | ||||
|   std::cout << " svm_reduce finished "<<L<<" sites sum = " << ret <<std::endl; | ||||
|   Word ret = d_sum[0]; | ||||
|   //  free(d_sum,*theGridAccelerator); | ||||
|   return ret; | ||||
| } | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
| /* | ||||
|  | ||||
| template <class vobj> | ||||
| inline typename vobj::scalar_objectD sumD_gpu_repack(const vobj *lat, Integer osites) | ||||
| { | ||||
|   | ||||
| @@ -365,9 +365,14 @@ public: | ||||
|     _bernoulli.resize(_vol,std::discrete_distribution<int32_t>{1,1}); | ||||
|     _uid.resize(_vol,std::uniform_int_distribution<uint32_t>() ); | ||||
|   } | ||||
|  | ||||
|   template <class vobj,class distribution> inline void fill(Lattice<vobj> &l,std::vector<distribution> &dist){ | ||||
|  | ||||
|   template <class vobj,class distribution> inline void fill(Lattice<vobj> &l,std::vector<distribution> &dist) | ||||
|   { | ||||
|     if ( l.Grid()->_isCheckerBoarded ) { | ||||
|       Lattice<vobj> tmp(_grid); | ||||
|       fill(tmp,dist); | ||||
|       pickCheckerboard(l.Checkerboard(),l,tmp); | ||||
|       return; | ||||
|     } | ||||
|     typedef typename vobj::scalar_object scalar_object; | ||||
|     typedef typename vobj::scalar_type scalar_type; | ||||
|     typedef typename vobj::vector_type vector_type; | ||||
| @@ -411,7 +416,7 @@ public: | ||||
|       std::cout << GridLogMessage << "Seed SHA256: " << GridChecksum::sha256_string(seeds) << std::endl; | ||||
|       SeedFixedIntegers(seeds); | ||||
|     } | ||||
|   void SeedFixedIntegers(const std::vector<int> &seeds){ | ||||
|   void SeedFixedIntegers(const std::vector<int> &seeds, int britney=0){ | ||||
|  | ||||
|     // Everyone generates the same seed_seq based on input seeds | ||||
|     CartesianCommunicator::BroadcastWorld(0,(void *)&seeds[0],sizeof(int)*seeds.size()); | ||||
| @@ -428,10 +433,9 @@ public: | ||||
|     // MT implementation does not implement fast discard even though | ||||
|     // in principle this is possible | ||||
|     //////////////////////////////////////////////// | ||||
| #if 1 | ||||
|     thread_for( lidx, _grid->lSites(), { | ||||
|  | ||||
| 	int gidx; | ||||
| 	int64_t gidx; | ||||
| 	int o_idx; | ||||
| 	int i_idx; | ||||
| 	int rank; | ||||
| @@ -449,29 +453,12 @@ public: | ||||
| 	 | ||||
| 	int l_idx=generator_idx(o_idx,i_idx); | ||||
| 	_generators[l_idx] = master_engine; | ||||
| 	Skip(_generators[l_idx],gidx); // Skip to next RNG sequence | ||||
|     }); | ||||
| #else | ||||
|     // Everybody loops over global volume. | ||||
|     thread_for( gidx, _grid->_gsites, { | ||||
|  | ||||
| 	// Where is it? | ||||
| 	int rank; | ||||
| 	int o_idx; | ||||
| 	int i_idx; | ||||
|  | ||||
| 	Coordinate gcoor; | ||||
| 	_grid->GlobalIndexToGlobalCoor(gidx,gcoor); | ||||
| 	_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor); | ||||
| 	 | ||||
| 	// If this is one of mine we take it | ||||
| 	if( rank == _grid->ThisRank() ){ | ||||
| 	  int l_idx=generator_idx(o_idx,i_idx); | ||||
| 	  _generators[l_idx] = master_engine; | ||||
| 	if ( britney ) {  | ||||
| 	  Skip(_generators[l_idx],l_idx); // Skip to next RNG sequence | ||||
| 	} else { 	 | ||||
| 	  Skip(_generators[l_idx],gidx); // Skip to next RNG sequence | ||||
| 	} | ||||
|     }); | ||||
| #endif | ||||
| #else  | ||||
|     //////////////////////////////////////////////////////////////// | ||||
|     // Machine and thread decomposition dependent seeding is efficient | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| #pragma once | ||||
| #include <type_traits> | ||||
|  | ||||
| #if defined(GRID_CUDA) | ||||
|  | ||||
| #include <cub/cub.cuh> | ||||
| @@ -90,8 +90,61 @@ template<class vobj> inline void sliceSumReduction_cub_small(const vobj *Data, V | ||||
|    | ||||
|  | ||||
| } | ||||
| #endif  | ||||
|  | ||||
| template<class vobj> inline void sliceSumReduction_cub_large(const vobj *Data, Vector<vobj> &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd) { | ||||
|  | ||||
| #if defined(GRID_SYCL) | ||||
| template<class vobj> inline void sliceSumReduction_sycl_small(const vobj *Data, Vector <vobj> &lvSum, const int  &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd) | ||||
| { | ||||
|   size_t subvol_size = e1*e2; | ||||
|  | ||||
|   vobj *mysum = (vobj *) malloc_shared(rd*sizeof(vobj),*theGridAccelerator); | ||||
|   vobj vobj_zero; | ||||
|   zeroit(vobj_zero); | ||||
|   for (int r = 0; r<rd; r++) {  | ||||
|     mysum[r] = vobj_zero;  | ||||
|   } | ||||
|  | ||||
|   commVector<vobj> reduction_buffer(rd*subvol_size);     | ||||
|  | ||||
|   auto rb_p = &reduction_buffer[0]; | ||||
|  | ||||
|   // autoView(Data_v, Data, AcceleratorRead); | ||||
|  | ||||
|   //prepare reduction buffer  | ||||
|   accelerator_for2d( s,subvol_size, r,rd, (size_t)Nsimd,{  | ||||
|    | ||||
|       int n = s / e2; | ||||
|       int b = s % e2; | ||||
|       int so=r*ostride; // base offset for start of plane  | ||||
|       int ss= so+n*stride+b; | ||||
|  | ||||
|       coalescedWrite(rb_p[r*subvol_size+s], coalescedRead(Data[ss])); | ||||
|  | ||||
|   }); | ||||
|  | ||||
|   for (int r = 0; r < rd; r++) { | ||||
|       theGridAccelerator->submit([&](cl::sycl::handler &cgh) { | ||||
|           auto Reduction = cl::sycl::reduction(&mysum[r],std::plus<>()); | ||||
|           cgh.parallel_for(cl::sycl::range<1>{subvol_size}, | ||||
|           Reduction, | ||||
|           [=](cl::sycl::id<1> item, auto &sum) { | ||||
|               auto s = item[0]; | ||||
|               sum += rb_p[r*subvol_size+s]; | ||||
|           }); | ||||
|       }); | ||||
|        | ||||
|       | ||||
|   } | ||||
|   theGridAccelerator->wait(); | ||||
|   for (int r = 0; r < rd; r++) { | ||||
|     lvSum[r] = mysum[r]; | ||||
|   } | ||||
|   free(mysum,*theGridAccelerator); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| template<class vobj> inline void sliceSumReduction_large(const vobj *Data, Vector<vobj> &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd) { | ||||
|   typedef typename vobj::vector_type vector; | ||||
|   const int words = sizeof(vobj)/sizeof(vector); | ||||
|   const int osites = rd*e1*e2; | ||||
| @@ -106,8 +159,12 @@ template<class vobj> inline void sliceSumReduction_cub_large(const vobj *Data, V | ||||
| 	    buf[ss] = dat[ss*words+w]; | ||||
|     }); | ||||
|  | ||||
|     sliceSumReduction_cub_small(buf,lvSum_small,rd,e1,e2,stride, ostride,Nsimd); | ||||
|        | ||||
|     #if defined(GRID_CUDA) || defined(GRID_HIP) | ||||
|       sliceSumReduction_cub_small(buf,lvSum_small,rd,e1,e2,stride, ostride,Nsimd); | ||||
|     #elif defined(GRID_SYCL) | ||||
|       sliceSumReduction_sycl_small(buf,lvSum_small,rd,e1,e2,stride, ostride,Nsimd); | ||||
|     #endif | ||||
|  | ||||
|     for (int r = 0; r < rd; r++) { | ||||
|       lvSum_ptr[w+words*r]=lvSum_small[r]; | ||||
|     } | ||||
| @@ -117,66 +174,24 @@ template<class vobj> inline void sliceSumReduction_cub_large(const vobj *Data, V | ||||
|    | ||||
| } | ||||
|  | ||||
| template<class vobj> inline void sliceSumReduction_cub(const Lattice<vobj> &Data, Vector<vobj> &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd) | ||||
| template<class vobj> inline void sliceSumReduction_gpu(const Lattice<vobj> &Data, Vector<vobj> &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd) | ||||
| { | ||||
|   autoView(Data_v, Data, AcceleratorRead); //hipcub/cub cannot deal with large vobjs so we split into small/large case. | ||||
|   autoView(Data_v, Data, AcceleratorRead); //reduction libraries cannot deal with large vobjs so we split into small/large case. | ||||
|     if constexpr (sizeof(vobj) <= 256) {  | ||||
|       sliceSumReduction_cub_small(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd); | ||||
|  | ||||
|       #if defined(GRID_CUDA) || defined(GRID_HIP) | ||||
|         sliceSumReduction_cub_small(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd); | ||||
|       #elif defined (GRID_SYCL) | ||||
|         sliceSumReduction_sycl_small(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd); | ||||
|       #endif | ||||
|  | ||||
|     } | ||||
|     else { | ||||
|       sliceSumReduction_cub_large(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd); | ||||
|       sliceSumReduction_large(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd); | ||||
|     } | ||||
| } | ||||
| #endif | ||||
|  | ||||
|  | ||||
| #if defined(GRID_SYCL) | ||||
| template<class vobj> inline void sliceSumReduction_sycl(const Lattice<vobj> &Data, Vector <vobj> &lvSum, const int  &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd) | ||||
| { | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   size_t subvol_size = e1*e2; | ||||
|  | ||||
|   vobj *mysum = (vobj *) malloc_shared(sizeof(vobj),*theGridAccelerator); | ||||
|   vobj vobj_zero; | ||||
|   zeroit(vobj_zero); | ||||
|      | ||||
|   commVector<vobj> reduction_buffer(rd*subvol_size);     | ||||
|  | ||||
|   auto rb_p = &reduction_buffer[0]; | ||||
|  | ||||
|   autoView(Data_v, Data, AcceleratorRead); | ||||
|  | ||||
|   //prepare reduction buffer  | ||||
|   accelerator_for2d( s,subvol_size, r,rd, (size_t)Nsimd,{  | ||||
|    | ||||
|       int n = s / e2; | ||||
|       int b = s % e2; | ||||
|       int so=r*ostride; // base offset for start of plane  | ||||
|       int ss= so+n*stride+b; | ||||
|  | ||||
|       coalescedWrite(rb_p[r*subvol_size+s], coalescedRead(Data_v[ss])); | ||||
|  | ||||
|   }); | ||||
|  | ||||
|   for (int r = 0; r < rd; r++) { | ||||
|       mysum[0] = vobj_zero; //dirty hack: cannot pass vobj_zero as identity to sycl::reduction as its not device_copyable | ||||
|       theGridAccelerator->submit([&](cl::sycl::handler &cgh) { | ||||
|           auto Reduction = cl::sycl::reduction(mysum,std::plus<>()); | ||||
|           cgh.parallel_for(cl::sycl::range<1>{subvol_size}, | ||||
|           Reduction, | ||||
|           [=](cl::sycl::id<1> item, auto &sum) { | ||||
|               auto s = item[0]; | ||||
|               sum += rb_p[r*subvol_size+s]; | ||||
|           }); | ||||
|       }); | ||||
|       theGridAccelerator->wait(); | ||||
|       lvSum[r] = mysum[0]; | ||||
|   } | ||||
|    | ||||
|   free(mysum,*theGridAccelerator); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| template<class vobj> inline void sliceSumReduction_cpu(const Lattice<vobj> &Data, Vector<vobj> &lvSum, const int &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd) | ||||
| { | ||||
|   // sum over reduced dimension planes, breaking out orthog dir | ||||
| @@ -195,13 +210,9 @@ template<class vobj> inline void sliceSumReduction_cpu(const Lattice<vobj> &Data | ||||
|  | ||||
| template<class vobj> inline void sliceSumReduction(const Lattice<vobj> &Data, Vector<vobj> &lvSum, const int &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd)  | ||||
| { | ||||
|   #if defined(GRID_CUDA) || defined(GRID_HIP) | ||||
|   #if defined(GRID_CUDA) || defined(GRID_HIP) || defined(GRID_SYCL) | ||||
|    | ||||
|   sliceSumReduction_cub(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd); | ||||
|    | ||||
|   #elif defined(GRID_SYCL) | ||||
|    | ||||
|   sliceSumReduction_sycl(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd); | ||||
|   sliceSumReduction_gpu(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd); | ||||
|    | ||||
|   #else | ||||
|   sliceSumReduction_cpu(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd); | ||||
|   | ||||
| @@ -276,18 +276,33 @@ inline void blockProject(Lattice<iVector<CComplex,nbasis > > &coarseData, | ||||
|  | ||||
|   autoView( coarseData_ , coarseData, AcceleratorWrite); | ||||
|   autoView( ip_         , ip,         AcceleratorWrite); | ||||
|   RealD t_IP=0; | ||||
|   RealD t_co=0; | ||||
|   RealD t_za=0; | ||||
|   for(int v=0;v<nbasis;v++) { | ||||
|     t_IP-=usecond(); | ||||
|     blockInnerProductD(ip,Basis[v],fineDataRed); // ip = <basis|fine> | ||||
|     t_IP+=usecond(); | ||||
|     t_co-=usecond(); | ||||
|     accelerator_for( sc, coarse->oSites(), vobj::Nsimd(), { | ||||
| 	convertType(coarseData_[sc](v),ip_[sc]); | ||||
|     }); | ||||
|     t_co+=usecond(); | ||||
|  | ||||
|     // improve numerical stability of projection | ||||
|     // |fine> = |fine> - <basis|fine> |basis> | ||||
|     ip=-ip; | ||||
|     t_za-=usecond(); | ||||
|     blockZAXPY(fineDataRed,ip,Basis[v],fineDataRed);  | ||||
|     t_za+=usecond(); | ||||
|   } | ||||
|   //  std::cout << GridLogPerformance << " blockProject : blockInnerProduct :  "<<t_IP<<" us"<<std::endl; | ||||
|   //  std::cout << GridLogPerformance << " blockProject : conv              :  "<<t_co<<" us"<<std::endl; | ||||
|   //  std::cout << GridLogPerformance << " blockProject : blockZaxpy        :  "<<t_za<<" us"<<std::endl; | ||||
| } | ||||
| // This only minimises data motion from CPU to GPU | ||||
| // there is chance of better implementation that does a vxk loop of inner products to data share | ||||
| // at the GPU thread level | ||||
| template<class vobj,class CComplex,int nbasis,class VLattice> | ||||
| inline void batchBlockProject(std::vector<Lattice<iVector<CComplex,nbasis>>> &coarseData, | ||||
|                                const std::vector<Lattice<vobj>> &fineData, | ||||
| @@ -393,8 +408,15 @@ template<class vobj,class CComplex> | ||||
|   Lattice<dotp> coarse_inner(coarse); | ||||
|  | ||||
|   // Precision promotion | ||||
|   RealD t; | ||||
|   t=-usecond(); | ||||
|   fine_inner = localInnerProductD<vobj>(fineX,fineY); | ||||
|   //  t+=usecond(); std::cout << GridLogPerformance << " blockInnerProduct : localInnerProductD "<<t<<" us"<<std::endl; | ||||
|    | ||||
|   t=-usecond(); | ||||
|   blockSum(coarse_inner,fine_inner); | ||||
|   //  t+=usecond(); std::cout << GridLogPerformance << " blockInnerProduct : blockSum "<<t<<" us"<<std::endl; | ||||
|   t=-usecond(); | ||||
|   { | ||||
|     autoView( CoarseInner_  , CoarseInner,AcceleratorWrite); | ||||
|     autoView( coarse_inner_ , coarse_inner,AcceleratorRead); | ||||
| @@ -402,6 +424,7 @@ template<class vobj,class CComplex> | ||||
|       convertType(CoarseInner_[ss], TensorRemove(coarse_inner_[ss])); | ||||
|     }); | ||||
|   } | ||||
|   //  t+=usecond(); std::cout << GridLogPerformance << " blockInnerProduct : convertType "<<t<<" us"<<std::endl; | ||||
|   | ||||
| } | ||||
|  | ||||
| @@ -444,6 +467,9 @@ inline void blockNormalise(Lattice<CComplex> &ip,Lattice<vobj> &fineX) | ||||
| template<class vobj> | ||||
| inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)  | ||||
| { | ||||
|   const int maxsubsec=256; | ||||
|   typedef iVector<vobj,maxsubsec> vSubsec; | ||||
|  | ||||
|   GridBase * fine  = fineData.Grid(); | ||||
|   GridBase * coarse= coarseData.Grid(); | ||||
|  | ||||
| @@ -463,35 +489,62 @@ inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData) | ||||
|   autoView( coarseData_ , coarseData, AcceleratorWrite); | ||||
|   autoView( fineData_   , fineData, AcceleratorRead); | ||||
|  | ||||
|   auto coarseData_p = &coarseData_[0]; | ||||
|   auto fineData_p = &fineData_[0]; | ||||
|   auto coarseData_p  = &coarseData_[0]; | ||||
|   auto fineData_p    = &fineData_[0]; | ||||
|    | ||||
|   Coordinate fine_rdimensions = fine->_rdimensions; | ||||
|   Coordinate coarse_rdimensions = coarse->_rdimensions; | ||||
|  | ||||
|   accelerator_for(sc,coarse->oSites(),1,{ | ||||
|   vobj zz = Zero(); | ||||
|  | ||||
|   // Somewhat lazy calculation | ||||
|   // Find the biggest power of two subsection divisor less than or equal to maxsubsec | ||||
|   int subsec=maxsubsec; | ||||
|   int subvol; | ||||
|   subvol=blockVol/subsec; | ||||
|   while(subvol*subsec!=blockVol){ | ||||
|     subsec = subsec/2; | ||||
|     subvol=blockVol/subsec; | ||||
|   }; | ||||
|  | ||||
|   Lattice<vSubsec> coarseTmp(coarse); | ||||
|   autoView( coarseTmp_, coarseTmp, AcceleratorWriteDiscard); | ||||
|   auto coarseTmp_p= &coarseTmp_[0]; | ||||
|    | ||||
|   // Sum within subsecs in a first kernel | ||||
|   accelerator_for(sce,subsec*coarse->oSites(),vobj::Nsimd(),{ | ||||
|  | ||||
|       int sc=sce/subsec; | ||||
|       int e=sce%subsec; | ||||
|        | ||||
|       // One thread per sub block | ||||
|       Coordinate coor_c(_ndimension); | ||||
|       Lexicographic::CoorFromIndex(coor_c,sc,coarse_rdimensions);  // Block coordinate | ||||
|  | ||||
|       vobj cd = Zero(); | ||||
|        | ||||
|       for(int sb=0;sb<blockVol;sb++){ | ||||
|  | ||||
|       auto cd = coalescedRead(zz); | ||||
|       for(int sb=e*subvol;sb<MIN((e+1)*subvol,blockVol);sb++){ | ||||
| 	int sf; | ||||
| 	Coordinate coor_b(_ndimension); | ||||
| 	Coordinate coor_f(_ndimension); | ||||
| 	Lexicographic::CoorFromIndex(coor_b,sb,block_r);               // Block sub coordinate | ||||
| 	for(int d=0;d<_ndimension;d++) coor_f[d]=coor_c[d]*block_r[d] + coor_b[d]; | ||||
| 	Lexicographic::IndexFromCoor(coor_f,sf,fine_rdimensions); | ||||
|  | ||||
| 	cd=cd+fineData_p[sf]; | ||||
| 	 | ||||
| 	cd=cd+coalescedRead(fineData_p[sf]); | ||||
|       } | ||||
|  | ||||
|       coarseData_p[sc] = cd; | ||||
|       coalescedWrite(coarseTmp_[sc](e),cd); | ||||
|  | ||||
|     }); | ||||
|    // Sum across subsecs in a second kernel | ||||
|    accelerator_for(sc,coarse->oSites(),vobj::Nsimd(),{ | ||||
|       auto cd = coalescedRead(coarseTmp_p[sc](0)); | ||||
|       for(int e=1;e<subsec;e++){ | ||||
| 	cd=cd+coalescedRead(coarseTmp_p[sc](e)); | ||||
|       } | ||||
|       coalescedWrite(coarseData_p[sc],cd); | ||||
|    }); | ||||
|  | ||||
|   return; | ||||
| } | ||||
|  | ||||
| @@ -548,7 +601,7 @@ inline void blockOrthogonalise(Lattice<CComplex> &ip,std::vector<Lattice<vobj> > | ||||
|   blockOrthonormalize(ip,Basis); | ||||
| } | ||||
|  | ||||
| #if 0 | ||||
| #ifdef GRID_ACCELERATED | ||||
| // TODO: CPU optimized version here | ||||
| template<class vobj,class CComplex,int nbasis> | ||||
| inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData, | ||||
| @@ -574,26 +627,37 @@ inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData, | ||||
|   autoView( fineData_   , fineData, AcceleratorWrite); | ||||
|   autoView( coarseData_ , coarseData, AcceleratorRead); | ||||
|  | ||||
|   typedef LatticeView<vobj> Vview; | ||||
|   std::vector<Vview> AcceleratorVecViewContainer_h;  | ||||
|   for(int v=0;v<nbasis;v++) { | ||||
|     AcceleratorVecViewContainer_h.push_back(Basis[v].View(AcceleratorRead)); | ||||
|   } | ||||
|   static deviceVector<Vview> AcceleratorVecViewContainer; AcceleratorVecViewContainer.resize(nbasis);  | ||||
|   acceleratorCopyToDevice(&AcceleratorVecViewContainer_h[0],&AcceleratorVecViewContainer[0],nbasis *sizeof(Vview)); | ||||
|   auto Basis_p = &AcceleratorVecViewContainer[0]; | ||||
|   // Loop with a cache friendly loop ordering | ||||
|   accelerator_for(sf,fine->oSites(),1,{ | ||||
|   Coordinate frdimensions=fine->_rdimensions; | ||||
|   Coordinate crdimensions=coarse->_rdimensions; | ||||
|   accelerator_for(sf,fine->oSites(),vobj::Nsimd(),{ | ||||
|     int sc; | ||||
|     Coordinate coor_c(_ndimension); | ||||
|     Coordinate coor_f(_ndimension); | ||||
|  | ||||
|     Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions); | ||||
|     Lexicographic::CoorFromIndex(coor_f,sf,frdimensions); | ||||
|     for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; | ||||
|     Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); | ||||
|     Lexicographic::IndexFromCoor(coor_c,sc,crdimensions); | ||||
|  | ||||
|     for(int i=0;i<nbasis;i++) { | ||||
|       /*      auto basis_ = Basis[i],  );*/ | ||||
|       if(i==0) fineData_[sf]=coarseData_[sc](i) *basis_[sf]); | ||||
|       else     fineData_[sf]=fineData_[sf]+coarseData_[sc](i)*basis_[sf]); | ||||
|     } | ||||
|     auto sum= coarseData_(sc)(0) *Basis_p[0](sf); | ||||
|     for(int i=1;i<nbasis;i++) sum = sum + coarseData_(sc)(i)*Basis_p[i](sf); | ||||
|     coalescedWrite(fineData_[sf],sum); | ||||
|   }); | ||||
|   for(int v=0;v<nbasis;v++) { | ||||
|     AcceleratorVecViewContainer_h[v].ViewClose(); | ||||
|   } | ||||
|   return; | ||||
|    | ||||
| } | ||||
| #else | ||||
| // CPU version | ||||
| template<class vobj,class CComplex,int nbasis,class VLattice> | ||||
| inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData, | ||||
| 			 Lattice<vobj>   &fineData, | ||||
| @@ -680,7 +744,11 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|  | ||||
|   static const int words=sizeof(vobj)/sizeof(vector_type); | ||||
|   const int words=sizeof(vobj)/sizeof(vector_type); | ||||
|  | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // checks should guarantee that the operations are local | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
|   GridBase *Fg = From.Grid(); | ||||
|   GridBase *Tg = To.Grid(); | ||||
| @@ -695,52 +763,38 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro | ||||
|   for(int d=0;d<nd;d++){ | ||||
|     assert(Fg->_processors[d]  == Tg->_processors[d]); | ||||
|   } | ||||
|   // the above should guarantee that the operations are local | ||||
|    | ||||
| #if 1 | ||||
|  | ||||
|   /////////////////////////////////////////////////////////// | ||||
|   // do the index calc on the GPU | ||||
|   /////////////////////////////////////////////////////////// | ||||
|   Coordinate f_ostride = Fg->_ostride; | ||||
|   Coordinate f_istride = Fg->_istride; | ||||
|   Coordinate f_rdimensions = Fg->_rdimensions; | ||||
|   Coordinate t_ostride = Tg->_ostride; | ||||
|   Coordinate t_istride = Tg->_istride; | ||||
|   Coordinate t_rdimensions = Tg->_rdimensions; | ||||
|  | ||||
|   size_t nsite = 1; | ||||
|   for(int i=0;i<nd;i++) nsite *= RegionSize[i]; | ||||
|    | ||||
|   size_t tbytes = 4*nsite*sizeof(int); | ||||
|   int *table = (int*)malloc(tbytes); | ||||
|   | ||||
|   thread_for(idx, nsite, { | ||||
|       Coordinate from_coor, to_coor; | ||||
|       size_t rem = idx; | ||||
|       for(int i=0;i<nd;i++){ | ||||
| 	size_t base_i  = rem % RegionSize[i]; rem /= RegionSize[i]; | ||||
| 	from_coor[i] = base_i + FromLowerLeft[i]; | ||||
| 	to_coor[i] = base_i + ToLowerLeft[i]; | ||||
|       } | ||||
|        | ||||
|       int foidx = Fg->oIndex(from_coor); | ||||
|       int fiidx = Fg->iIndex(from_coor); | ||||
|       int toidx = Tg->oIndex(to_coor); | ||||
|       int tiidx = Tg->iIndex(to_coor); | ||||
|       int* tt = table + 4*idx; | ||||
|       tt[0] = foidx; | ||||
|       tt[1] = fiidx; | ||||
|       tt[2] = toidx; | ||||
|       tt[3] = tiidx; | ||||
|     }); | ||||
|    | ||||
|   int* table_d = (int*)acceleratorAllocDevice(tbytes); | ||||
|   acceleratorCopyToDevice(table,table_d,tbytes); | ||||
|  | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|  | ||||
|   autoView(from_v,From,AcceleratorRead); | ||||
|   autoView(to_v,To,AcceleratorWrite); | ||||
|    | ||||
|  | ||||
|   accelerator_for(idx,nsite,1,{ | ||||
|       static const int words=sizeof(vobj)/sizeof(vector_type); | ||||
|       int* tt = table_d + 4*idx; | ||||
|       int from_oidx = *tt++; | ||||
|       int from_lane = *tt++; | ||||
|       int to_oidx = *tt++; | ||||
|       int to_lane = *tt; | ||||
|  | ||||
|       Coordinate from_coor, to_coor, base; | ||||
|       Lexicographic::CoorFromIndex(base,idx,RegionSize); | ||||
|       for(int i=0;i<nd;i++){ | ||||
| 	from_coor[i] = base[i] + FromLowerLeft[i]; | ||||
| 	to_coor[i] = base[i] + ToLowerLeft[i]; | ||||
|       } | ||||
|       int from_oidx = 0; for(int d=0;d<nd;d++) from_oidx+=f_ostride[d]*(from_coor[d]%f_rdimensions[d]); | ||||
|       int from_lane = 0; for(int d=0;d<nd;d++) from_lane+=f_istride[d]*(from_coor[d]/f_rdimensions[d]); | ||||
|       int to_oidx   = 0; for(int d=0;d<nd;d++) to_oidx+=t_ostride[d]*(to_coor[d]%t_rdimensions[d]); | ||||
|       int to_lane   = 0; for(int d=0;d<nd;d++) to_lane+=t_istride[d]*(to_coor[d]/t_rdimensions[d]); | ||||
|  | ||||
|       const vector_type* from = (const vector_type *)&from_v[from_oidx]; | ||||
|       vector_type* to = (vector_type *)&to_v[to_oidx]; | ||||
| @@ -750,56 +804,146 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro | ||||
| 	stmp = getlane(from[w], from_lane); | ||||
| 	putlane(to[w], stmp, to_lane); | ||||
|       } | ||||
|     }); | ||||
|    | ||||
|   acceleratorFreeDevice(table_d);     | ||||
|   free(table); | ||||
|    | ||||
|  | ||||
| #else   | ||||
|   Coordinate ldf = Fg->_ldimensions; | ||||
|   Coordinate rdf = Fg->_rdimensions; | ||||
|   Coordinate isf = Fg->_istride; | ||||
|   Coordinate osf = Fg->_ostride; | ||||
|   Coordinate rdt = Tg->_rdimensions; | ||||
|   Coordinate ist = Tg->_istride; | ||||
|   Coordinate ost = Tg->_ostride; | ||||
|  | ||||
|   autoView( t_v , To, CpuWrite); | ||||
|   autoView( f_v , From, CpuRead); | ||||
|   thread_for(idx,Fg->lSites(),{ | ||||
|     sobj s; | ||||
|     Coordinate Fcoor(nd); | ||||
|     Coordinate Tcoor(nd); | ||||
|     Lexicographic::CoorFromIndex(Fcoor,idx,ldf); | ||||
|     int in_region=1; | ||||
|     for(int d=0;d<nd;d++){ | ||||
|       if ( (Fcoor[d] < FromLowerLeft[d]) || (Fcoor[d]>=FromLowerLeft[d]+RegionSize[d]) ){  | ||||
| 	in_region=0; | ||||
|       } | ||||
|       Tcoor[d] = ToLowerLeft[d]+ Fcoor[d]-FromLowerLeft[d]; | ||||
|     } | ||||
|     if (in_region) { | ||||
| #if 0       | ||||
|       Integer idx_f = 0; for(int d=0;d<nd;d++) idx_f+=isf[d]*(Fcoor[d]/rdf[d]); // inner index from | ||||
|       Integer idx_t = 0; for(int d=0;d<nd;d++) idx_t+=ist[d]*(Tcoor[d]/rdt[d]); // inner index to | ||||
|       Integer odx_f = 0; for(int d=0;d<nd;d++) odx_f+=osf[d]*(Fcoor[d]%rdf[d]); // outer index from | ||||
|       Integer odx_t = 0; for(int d=0;d<nd;d++) odx_t+=ost[d]*(Tcoor[d]%rdt[d]); // outer index to | ||||
|       scalar_type * fp = (scalar_type *)&f_v[odx_f]; | ||||
|       scalar_type * tp = (scalar_type *)&t_v[odx_t]; | ||||
|       for(int w=0;w<words;w++){ | ||||
| 	tp[w].putlane(fp[w].getlane(idx_f),idx_t); | ||||
|       } | ||||
| #else | ||||
|     peekLocalSite(s,f_v,Fcoor); | ||||
|     pokeLocalSite(s,t_v,Tcoor); | ||||
| #endif | ||||
|     } | ||||
|   }); | ||||
|  | ||||
| #endif | ||||
| } | ||||
|  | ||||
| template<class vobj> | ||||
| void InsertSliceFast(const Lattice<vobj> &From,Lattice<vobj> & To,int slice, int orthog) | ||||
| { | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|  | ||||
|   const int words=sizeof(vobj)/sizeof(vector_type); | ||||
|  | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // checks should guarantee that the operations are local | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   GridBase *Fg = From.Grid(); | ||||
|   GridBase *Tg = To.Grid(); | ||||
|   assert(!Fg->_isCheckerBoarded); | ||||
|   assert(!Tg->_isCheckerBoarded); | ||||
|   int Nsimd = Fg->Nsimd(); | ||||
|   int nF = Fg->_ndimension; | ||||
|   int nT = Tg->_ndimension; | ||||
|   assert(nF+1 == nT); | ||||
|  | ||||
|   /////////////////////////////////////////////////////////// | ||||
|   // do the index calc on the GPU | ||||
|   /////////////////////////////////////////////////////////// | ||||
|   Coordinate f_ostride = Fg->_ostride; | ||||
|   Coordinate f_istride = Fg->_istride; | ||||
|   Coordinate f_rdimensions = Fg->_rdimensions; | ||||
|   Coordinate t_ostride = Tg->_ostride; | ||||
|   Coordinate t_istride = Tg->_istride; | ||||
|   Coordinate t_rdimensions = Tg->_rdimensions; | ||||
|   Coordinate RegionSize = Fg->_ldimensions; | ||||
|   size_t nsite = 1; | ||||
|   for(int i=0;i<nF;i++) nsite *= RegionSize[i]; // whole volume of lower dim grid | ||||
|  | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|  | ||||
|   autoView(from_v,From,AcceleratorRead); | ||||
|   autoView(to_v,To,AcceleratorWrite); | ||||
|  | ||||
|   accelerator_for(idx,nsite,1,{ | ||||
|  | ||||
|       Coordinate from_coor(nF), to_coor(nT); | ||||
|       Lexicographic::CoorFromIndex(from_coor,idx,RegionSize); | ||||
|       int j=0; | ||||
|       for(int i=0;i<nT;i++){ | ||||
| 	if ( i!=orthog ) {  | ||||
| 	  to_coor[i] = from_coor[j]; | ||||
| 	  j++; | ||||
| 	} else { | ||||
| 	  to_coor[i] = slice; | ||||
| 	} | ||||
|       } | ||||
|       int from_oidx = 0; for(int d=0;d<nF;d++) from_oidx+=f_ostride[d]*(from_coor[d]%f_rdimensions[d]); | ||||
|       int from_lane = 0; for(int d=0;d<nF;d++) from_lane+=f_istride[d]*(from_coor[d]/f_rdimensions[d]); | ||||
|       int to_oidx   = 0; for(int d=0;d<nT;d++) to_oidx+=t_ostride[d]*(to_coor[d]%t_rdimensions[d]); | ||||
|       int to_lane   = 0; for(int d=0;d<nT;d++) to_lane+=t_istride[d]*(to_coor[d]/t_rdimensions[d]); | ||||
|  | ||||
|       const vector_type* from = (const vector_type *)&from_v[from_oidx]; | ||||
|       vector_type* to = (vector_type *)&to_v[to_oidx]; | ||||
|        | ||||
|       scalar_type stmp; | ||||
|       for(int w=0;w<words;w++){ | ||||
| 	stmp = getlane(from[w], from_lane); | ||||
| 	putlane(to[w], stmp, to_lane); | ||||
|       } | ||||
|   }); | ||||
| } | ||||
|  | ||||
| template<class vobj> | ||||
| void ExtractSliceFast(Lattice<vobj> &To,const Lattice<vobj> & From,int slice, int orthog) | ||||
| { | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|  | ||||
|   const int words=sizeof(vobj)/sizeof(vector_type); | ||||
|  | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // checks should guarantee that the operations are local | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   GridBase *Fg = From.Grid(); | ||||
|   GridBase *Tg = To.Grid(); | ||||
|   assert(!Fg->_isCheckerBoarded); | ||||
|   assert(!Tg->_isCheckerBoarded); | ||||
|   int Nsimd = Fg->Nsimd(); | ||||
|   int nF = Fg->_ndimension; | ||||
|   int nT = Tg->_ndimension; | ||||
|   assert(nT+1 == nF); | ||||
|  | ||||
|   /////////////////////////////////////////////////////////// | ||||
|   // do the index calc on the GPU | ||||
|   /////////////////////////////////////////////////////////// | ||||
|   Coordinate f_ostride = Fg->_ostride; | ||||
|   Coordinate f_istride = Fg->_istride; | ||||
|   Coordinate f_rdimensions = Fg->_rdimensions; | ||||
|   Coordinate t_ostride = Tg->_ostride; | ||||
|   Coordinate t_istride = Tg->_istride; | ||||
|   Coordinate t_rdimensions = Tg->_rdimensions; | ||||
|   Coordinate RegionSize = Tg->_ldimensions; | ||||
|   size_t nsite = 1; | ||||
|   for(int i=0;i<nT;i++) nsite *= RegionSize[i]; // whole volume of lower dim grid | ||||
|  | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|  | ||||
|   autoView(from_v,From,AcceleratorRead); | ||||
|   autoView(to_v,To,AcceleratorWrite); | ||||
|  | ||||
|   accelerator_for(idx,nsite,1,{ | ||||
|  | ||||
|       Coordinate from_coor(nF), to_coor(nT); | ||||
|       Lexicographic::CoorFromIndex(to_coor,idx,RegionSize); | ||||
|       int j=0; | ||||
|       for(int i=0;i<nF;i++){ | ||||
| 	if ( i!=orthog ) {  | ||||
| 	  from_coor[i] = to_coor[j]; | ||||
| 	  j++; | ||||
| 	} else { | ||||
| 	  from_coor[i] = slice; | ||||
| 	} | ||||
|       } | ||||
|       int from_oidx = 0; for(int d=0;d<nF;d++) from_oidx+=f_ostride[d]*(from_coor[d]%f_rdimensions[d]); | ||||
|       int from_lane = 0; for(int d=0;d<nF;d++) from_lane+=f_istride[d]*(from_coor[d]/f_rdimensions[d]); | ||||
|       int to_oidx   = 0; for(int d=0;d<nT;d++) to_oidx+=t_ostride[d]*(to_coor[d]%t_rdimensions[d]); | ||||
|       int to_lane   = 0; for(int d=0;d<nT;d++) to_lane+=t_istride[d]*(to_coor[d]/t_rdimensions[d]); | ||||
|  | ||||
|       const vector_type* from = (const vector_type *)&from_v[from_oidx]; | ||||
|       vector_type* to = (vector_type *)&to_v[to_oidx]; | ||||
|        | ||||
|       scalar_type stmp; | ||||
|       for(int w=0;w<words;w++){ | ||||
| 	stmp = getlane(from[w], from_lane); | ||||
| 	putlane(to[w], stmp, to_lane); | ||||
|       } | ||||
|   }); | ||||
| } | ||||
|  | ||||
| template<class vobj> | ||||
| void InsertSlice(const Lattice<vobj> &lowDim,Lattice<vobj> & higherDim,int slice, int orthog) | ||||
| @@ -889,9 +1033,7 @@ void ExtractSlice(Lattice<vobj> &lowDim,const Lattice<vobj> & higherDim,int slic | ||||
|  | ||||
| } | ||||
|  | ||||
|  | ||||
| //Insert subvolume orthogonal to direction 'orthog' with slice index 'slice_lo' from 'lowDim' onto slice index 'slice_hi' of higherDim | ||||
| //The local dimensions of both 'lowDim' and 'higherDim' orthogonal to 'orthog' should be the same | ||||
| //Can I implement with local copyregion?? | ||||
| template<class vobj> | ||||
| void InsertSliceLocal(const Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog) | ||||
| { | ||||
| @@ -912,121 +1054,18 @@ void InsertSliceLocal(const Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int | ||||
|       assert(lg->_ldimensions[d] == hg->_ldimensions[d]); | ||||
|     } | ||||
|   } | ||||
|  | ||||
| #if 1 | ||||
|   size_t nsite = lg->lSites()/lg->LocalDimensions()[orthog]; | ||||
|   size_t tbytes = 4*nsite*sizeof(int); | ||||
|   int *table = (int*)malloc(tbytes); | ||||
|    | ||||
|   thread_for(idx,nsite,{ | ||||
|     Coordinate lcoor(nl); | ||||
|     Coordinate hcoor(nh); | ||||
|     lcoor[orthog] = slice_lo; | ||||
|     hcoor[orthog] = slice_hi; | ||||
|     size_t rem = idx; | ||||
|     for(int mu=0;mu<nl;mu++){ | ||||
|       if(mu != orthog){ | ||||
| 	int xmu = rem % lg->LocalDimensions()[mu];  rem /= lg->LocalDimensions()[mu]; | ||||
| 	lcoor[mu] = hcoor[mu] = xmu; | ||||
|       } | ||||
|     } | ||||
|     int loidx = lg->oIndex(lcoor); | ||||
|     int liidx = lg->iIndex(lcoor); | ||||
|     int hoidx = hg->oIndex(hcoor); | ||||
|     int hiidx = hg->iIndex(hcoor); | ||||
|     int* tt = table + 4*idx; | ||||
|     tt[0] = loidx; | ||||
|     tt[1] = liidx; | ||||
|     tt[2] = hoidx; | ||||
|     tt[3] = hiidx; | ||||
|     }); | ||||
|     | ||||
|   int* table_d = (int*)acceleratorAllocDevice(tbytes); | ||||
|   acceleratorCopyToDevice(table,table_d,tbytes); | ||||
|  | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|  | ||||
|   autoView(lowDim_v,lowDim,AcceleratorRead); | ||||
|   autoView(higherDim_v,higherDim,AcceleratorWrite); | ||||
|    | ||||
|   accelerator_for(idx,nsite,1,{ | ||||
|       static const int words=sizeof(vobj)/sizeof(vector_type); | ||||
|       int* tt = table_d + 4*idx; | ||||
|       int from_oidx = *tt++; | ||||
|       int from_lane = *tt++; | ||||
|       int to_oidx = *tt++; | ||||
|       int to_lane = *tt; | ||||
|  | ||||
|       const vector_type* from = (const vector_type *)&lowDim_v[from_oidx]; | ||||
|       vector_type* to = (vector_type *)&higherDim_v[to_oidx]; | ||||
|        | ||||
|       scalar_type stmp; | ||||
|       for(int w=0;w<words;w++){ | ||||
| 	stmp = getlane(from[w], from_lane); | ||||
| 	putlane(to[w], stmp, to_lane); | ||||
|       } | ||||
|     }); | ||||
|    | ||||
|   acceleratorFreeDevice(table_d);     | ||||
|   free(table); | ||||
|    | ||||
| #else | ||||
|   // the above should guarantee that the operations are local | ||||
|   autoView(lowDimv,lowDim,CpuRead); | ||||
|   autoView(higherDimv,higherDim,CpuWrite); | ||||
|   thread_for(idx,lg->lSites(),{ | ||||
|     sobj s; | ||||
|     Coordinate lcoor(nl); | ||||
|     Coordinate hcoor(nh); | ||||
|     lg->LocalIndexToLocalCoor(idx,lcoor); | ||||
|     if( lcoor[orthog] == slice_lo ) {  | ||||
|       hcoor=lcoor; | ||||
|       hcoor[orthog] = slice_hi; | ||||
|       peekLocalSite(s,lowDimv,lcoor); | ||||
|       pokeLocalSite(s,higherDimv,hcoor); | ||||
|     } | ||||
|   }); | ||||
| #endif | ||||
|   Coordinate sz = lg->_ldimensions; | ||||
|   sz[orthog]=1; | ||||
|   Coordinate f_ll(nl,0); f_ll[orthog]=slice_lo; | ||||
|   Coordinate t_ll(nh,0); t_ll[orthog]=slice_hi; | ||||
|   localCopyRegion(lowDim,higherDim,f_ll,t_ll,sz); | ||||
| } | ||||
|  | ||||
|  | ||||
| template<class vobj> | ||||
| void ExtractSliceLocal(Lattice<vobj> &lowDim,const Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog) | ||||
| { | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|  | ||||
|   GridBase *lg = lowDim.Grid(); | ||||
|   GridBase *hg = higherDim.Grid(); | ||||
|   int nl = lg->_ndimension; | ||||
|   int nh = hg->_ndimension; | ||||
|  | ||||
|   assert(nl == nh); | ||||
|   assert(orthog<nh); | ||||
|   assert(orthog>=0); | ||||
|  | ||||
|   for(int d=0;d<nh;d++){ | ||||
|     if ( d!=orthog ) { | ||||
|     assert(lg->_processors[d]  == hg->_processors[d]); | ||||
|     assert(lg->_ldimensions[d] == hg->_ldimensions[d]); | ||||
|   } | ||||
|   } | ||||
|  | ||||
|   // the above should guarantee that the operations are local | ||||
|   autoView(lowDimv,lowDim,CpuWrite); | ||||
|   autoView(higherDimv,higherDim,CpuRead); | ||||
|   thread_for(idx,lg->lSites(),{ | ||||
|     sobj s; | ||||
|     Coordinate lcoor(nl); | ||||
|     Coordinate hcoor(nh); | ||||
|     lg->LocalIndexToLocalCoor(idx,lcoor); | ||||
|     if( lcoor[orthog] == slice_lo ) {  | ||||
|       hcoor=lcoor; | ||||
|       hcoor[orthog] = slice_hi; | ||||
|       peekLocalSite(s,higherDimv,hcoor); | ||||
|       pokeLocalSite(s,lowDimv,lcoor); | ||||
|     } | ||||
|   }); | ||||
|   InsertSliceLocal(higherDim,lowDim,slice_hi,slice_lo,orthog); | ||||
| } | ||||
|  | ||||
|  | ||||
| @@ -1052,7 +1091,7 @@ void Replicate(const Lattice<vobj> &coarse,Lattice<vobj> & fine) | ||||
|  | ||||
|   Coordinate fcoor(nd); | ||||
|   Coordinate ccoor(nd); | ||||
|   for(int g=0;g<fg->gSites();g++){ | ||||
|   for(int64_t g=0;g<fg->gSites();g++){ | ||||
|  | ||||
|     fg->GlobalIndexToGlobalCoor(g,fcoor); | ||||
|     for(int d=0;d<nd;d++){ | ||||
| @@ -1738,5 +1777,35 @@ void Grid_unsplit(std::vector<Lattice<Vobj> > & full,Lattice<Vobj>   & split) | ||||
|   } | ||||
| } | ||||
|  | ||||
| ////////////////////////////////////////////////////// | ||||
| // Faster but less accurate blockProject | ||||
| ////////////////////////////////////////////////////// | ||||
| template<class vobj,class CComplex,int nbasis,class VLattice> | ||||
| inline void blockProjectFast(Lattice<iVector<CComplex,nbasis > > &coarseData, | ||||
| 			     const             Lattice<vobj>   &fineData, | ||||
| 			     const VLattice &Basis) | ||||
| { | ||||
|   GridBase * fine  = fineData.Grid(); | ||||
|   GridBase * coarse= coarseData.Grid(); | ||||
|  | ||||
|   Lattice<iScalar<CComplex> > ip(coarse); | ||||
|  | ||||
|   autoView( coarseData_ , coarseData, AcceleratorWrite); | ||||
|   autoView( ip_         , ip,         AcceleratorWrite); | ||||
|   RealD t_IP=0; | ||||
|   RealD t_co=0; | ||||
|   for(int v=0;v<nbasis;v++) { | ||||
|     t_IP-=usecond(); | ||||
|     blockInnerProductD(ip,Basis[v],fineData);  | ||||
|     t_IP+=usecond(); | ||||
|     t_co-=usecond(); | ||||
|     accelerator_for( sc, coarse->oSites(), vobj::Nsimd(), { | ||||
| 	convertType(coarseData_[sc](v),ip_[sc]); | ||||
|       }); | ||||
|     t_co+=usecond(); | ||||
|   } | ||||
| } | ||||
|  | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
|   | ||||
| @@ -45,6 +45,188 @@ struct CshiftImplGauge: public CshiftImplBase<typename Gimpl::GaugeLinkField::ve | ||||
|   typename Gimpl::GaugeLinkField Cshift(const typename Gimpl::GaugeLinkField &in, int dir, int shift) const override{ return Gimpl::CshiftLink(in,dir,shift); } | ||||
| };   | ||||
|  | ||||
|  | ||||
| /* | ||||
|  * | ||||
|  * TODO:  | ||||
|  *  -- address elementsof vobj via thread block in Scatter/Gather | ||||
|  *  -- overlap comms with motion in Face_exchange | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| template<class vobj> inline void ScatterSlice(const cshiftVector<vobj> &buf, | ||||
| 					      Lattice<vobj> &lat, | ||||
| 					      int x, | ||||
| 					      int dim, | ||||
| 					      int offset=0) | ||||
| { | ||||
|   const int Nsimd=vobj::Nsimd(); | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|  | ||||
|   GridBase *grid = lat.Grid(); | ||||
|   Coordinate simd = grid->_simd_layout; | ||||
|   int Nd          = grid->Nd(); | ||||
|   int block       = grid->_slice_block[dim]; | ||||
|   int stride      = grid->_slice_stride[dim]; | ||||
|   int nblock      = grid->_slice_nblock[dim]; | ||||
|   int rd          = grid->_rdimensions[dim]; | ||||
|  | ||||
|   int ox = x%rd; | ||||
|   int ix = x/rd; | ||||
|  | ||||
|   int isites = 1; for(int d=0;d<Nd;d++) if( d!=dim) isites*=simd[d]; | ||||
|  | ||||
|   Coordinate rsimd= simd;  rsimd[dim]=1; // maybe reduce Nsimd | ||||
|  | ||||
|   int rNsimd = 1; for(int d=0;d<Nd;d++) rNsimd*=rsimd[d]; | ||||
|   int rNsimda= Nsimd/simd[dim]; // should be equal | ||||
|   assert(rNsimda==rNsimd); | ||||
|   int face_ovol=block*nblock; | ||||
|  | ||||
|   //  assert(buf.size()==face_ovol*rNsimd); | ||||
|  | ||||
|   /*This will work GPU ONLY unless rNsimd is put in the lexico index*/ | ||||
|   //Let's make it work on GPU and then make a special accelerator_for that | ||||
|   //doesn't hide the SIMD direction and keeps explicit in the threadIdx | ||||
|   //for cross platform | ||||
|   // FIXME -- can put internal indices into thread loop | ||||
|   auto buf_p = & buf[0]; | ||||
|   autoView(lat_v, lat, AcceleratorWrite); | ||||
|   accelerator_for(ss, face_ovol/simd[dim],Nsimd,{ | ||||
|  | ||||
|     // scalar layout won't coalesce | ||||
| #ifdef GRID_SIMT | ||||
|       { | ||||
| 	int blane=acceleratorSIMTlane(Nsimd); // buffer lane | ||||
| #else | ||||
|       for(int blane=0;blane<Nsimd;blane++) { | ||||
| #endif | ||||
| 	int olane=blane%rNsimd;               // reduced lattice lane | ||||
| 	int obit =blane/rNsimd; | ||||
|  | ||||
| 	/////////////////////////////////////////////////////////////// | ||||
| 	// osite -- potentially one bit from simd in the buffer: (ss<<1)|obit | ||||
| 	/////////////////////////////////////////////////////////////// | ||||
| 	int ssp = ss*simd[dim]+obit; | ||||
| 	int b    = ssp%block; | ||||
| 	int n    = ssp/block; | ||||
| 	int osite= b+n*stride + ox*block; | ||||
| 	 | ||||
| 	//////////////////////////////////////////// | ||||
| 	// isite -- map lane within buffer to lane within lattice | ||||
| 	//////////////////////////////////////////// | ||||
| 	Coordinate icoor; | ||||
| 	int lane; | ||||
| 	Lexicographic::CoorFromIndex(icoor,olane,rsimd); | ||||
| 	icoor[dim]=ix; | ||||
| 	Lexicographic::IndexFromCoor(icoor,lane,simd); | ||||
| 	 | ||||
| 	/////////////////////////////////////////// | ||||
| 	// Transfer into lattice - will coalesce | ||||
| 	/////////////////////////////////////////// | ||||
| 	//	sobj obj = extractLane(blane,buf_p[ss+offset]); | ||||
| 	//	insertLane(lane,lat_v[osite],obj); | ||||
| 	const int words=sizeof(vobj)/sizeof(vector_type); | ||||
| 	vector_type * from = (vector_type *)&buf_p[ss+offset]; | ||||
| 	vector_type * to   = (vector_type *)&lat_v[osite]; | ||||
| 	scalar_type stmp; | ||||
| 	for(int w=0;w<words;w++){ | ||||
| 	  stmp = getlane(from[w], blane); | ||||
| 	  putlane(to[w], stmp, lane); | ||||
| 	} | ||||
|       } | ||||
|   }); | ||||
| } | ||||
|  | ||||
| template<class vobj> inline void GatherSlice(cshiftVector<vobj> &buf, | ||||
| 					     const Lattice<vobj> &lat, | ||||
| 					     int x, | ||||
| 					     int dim, | ||||
| 					     int offset=0) | ||||
| { | ||||
|   const int Nsimd=vobj::Nsimd(); | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|  | ||||
|   autoView(lat_v, lat, AcceleratorRead); | ||||
|  | ||||
|   GridBase *grid = lat.Grid(); | ||||
|   Coordinate simd = grid->_simd_layout; | ||||
|   int Nd          = grid->Nd(); | ||||
|   int block       = grid->_slice_block[dim]; | ||||
|   int stride      = grid->_slice_stride[dim]; | ||||
|   int nblock      = grid->_slice_nblock[dim]; | ||||
|   int rd          = grid->_rdimensions[dim]; | ||||
|  | ||||
|   int ox = x%rd; | ||||
|   int ix = x/rd; | ||||
|  | ||||
|   int isites = 1; for(int d=0;d<Nd;d++) if( d!=dim) isites*=simd[d]; | ||||
|  | ||||
|   Coordinate rsimd= simd;  rsimd[dim]=1; // maybe reduce Nsimd | ||||
|  | ||||
|   int rNsimd = 1; for(int d=0;d<Nd;d++) rNsimd*=rsimd[d]; | ||||
|    | ||||
|   int face_ovol=block*nblock; | ||||
|  | ||||
|   //  assert(buf.size()==face_ovol*rNsimd); | ||||
|  | ||||
|   /*This will work GPU ONLY unless rNsimd is put in the lexico index*/ | ||||
|   //Let's make it work on GPU and then make a special accelerator_for that | ||||
|   //doesn't hide the SIMD direction and keeps explicit in the threadIdx | ||||
|   //for cross platform | ||||
|   //For CPU perhaps just run a loop over Nsimd | ||||
|   auto buf_p = & buf[0]; | ||||
|   accelerator_for(ss, face_ovol/simd[dim],Nsimd,{ | ||||
|  | ||||
|     // scalar layout won't coalesce | ||||
| #ifdef GRID_SIMT | ||||
|       { | ||||
| 	int blane=acceleratorSIMTlane(Nsimd); // buffer lane | ||||
| #else | ||||
|       for(int blane=0;blane<Nsimd;blane++) { | ||||
| #endif | ||||
| 	int olane=blane%rNsimd;               // reduced lattice lane | ||||
| 	int obit =blane/rNsimd; | ||||
| 	 | ||||
| 	//////////////////////////////////////////// | ||||
| 	// osite | ||||
| 	//////////////////////////////////////////// | ||||
| 	int ssp = ss*simd[dim]+obit; | ||||
| 	int b    = ssp%block; | ||||
| 	int n    = ssp/block; | ||||
| 	int osite= b+n*stride + ox*block; | ||||
|  | ||||
| 	//////////////////////////////////////////// | ||||
| 	// isite -- map lane within buffer to lane within lattice | ||||
| 	//////////////////////////////////////////// | ||||
| 	Coordinate icoor; | ||||
| 	int lane; | ||||
| 	Lexicographic::CoorFromIndex(icoor,olane,rsimd); | ||||
| 	icoor[dim]=ix; | ||||
| 	Lexicographic::IndexFromCoor(icoor,lane,simd); | ||||
| 	 | ||||
| 	/////////////////////////////////////////// | ||||
| 	// Take out of lattice | ||||
| 	/////////////////////////////////////////// | ||||
| 	//	sobj obj = extractLane(lane,lat_v[osite]); | ||||
| 	//	insertLane(blane,buf_p[ss+offset],obj); | ||||
| 	const int words=sizeof(vobj)/sizeof(vector_type); | ||||
| 	vector_type * to    = (vector_type *)&buf_p[ss+offset]; | ||||
| 	vector_type * from  = (vector_type *)&lat_v[osite]; | ||||
| 	scalar_type stmp; | ||||
| 	for(int w=0;w<words;w++){ | ||||
| 	  stmp = getlane(from[w], lane); | ||||
| 	  putlane(to[w], stmp, blane); | ||||
| 	} | ||||
|       } | ||||
|   }); | ||||
| } | ||||
|  | ||||
|  | ||||
| class PaddedCell { | ||||
| public: | ||||
|   GridCartesian * unpadded_grid; | ||||
| @@ -63,14 +245,18 @@ public: | ||||
|     dims=_grid->Nd(); | ||||
|     AllocateGrids(); | ||||
|     Coordinate local     =unpadded_grid->LocalDimensions(); | ||||
|     Coordinate procs     =unpadded_grid->ProcessorGrid(); | ||||
|     for(int d=0;d<dims;d++){ | ||||
|       assert(local[d]>=depth); | ||||
|       if ( procs[d] > 1 ) assert(local[d]>=depth); | ||||
|     } | ||||
|   } | ||||
|   void DeleteGrids(void) | ||||
|   { | ||||
|     Coordinate processors=unpadded_grid->_processors; | ||||
|     for(int d=0;d<grids.size();d++){ | ||||
|       delete grids[d]; | ||||
|       if ( processors[d] > 1 ) {  | ||||
| 	delete grids[d]; | ||||
|       } | ||||
|     } | ||||
|     grids.resize(0); | ||||
|   }; | ||||
| @@ -81,27 +267,36 @@ public: | ||||
|     Coordinate processors=unpadded_grid->_processors; | ||||
|     Coordinate plocal    =unpadded_grid->LocalDimensions(); | ||||
|     Coordinate global(dims); | ||||
|  | ||||
|     GridCartesian *old_grid = unpadded_grid; | ||||
|     // expand up one dim at a time | ||||
|     for(int d=0;d<dims;d++){ | ||||
|  | ||||
|       plocal[d] += 2*depth;  | ||||
|       if ( processors[d] > 1 ) {  | ||||
| 	plocal[d] += 2*depth;  | ||||
|        | ||||
| 	for(int d=0;d<dims;d++){ | ||||
| 	  global[d] = plocal[d]*processors[d]; | ||||
| 	} | ||||
|  | ||||
|       for(int d=0;d<dims;d++){ | ||||
| 	global[d] = plocal[d]*processors[d]; | ||||
| 	old_grid = new GridCartesian(global,simd,processors); | ||||
|       } | ||||
|  | ||||
|       grids.push_back(new GridCartesian(global,simd,processors)); | ||||
|       grids.push_back(old_grid); | ||||
|     } | ||||
|   }; | ||||
|   template<class vobj> | ||||
|   inline Lattice<vobj> Extract(const Lattice<vobj> &in) const | ||||
|   { | ||||
|     Coordinate processors=unpadded_grid->_processors; | ||||
|  | ||||
|     Lattice<vobj> out(unpadded_grid); | ||||
|  | ||||
|     Coordinate local     =unpadded_grid->LocalDimensions(); | ||||
|     Coordinate fll(dims,depth); // depends on the MPI spread | ||||
|     // depends on the MPI spread       | ||||
|     Coordinate fll(dims,depth); | ||||
|     Coordinate tll(dims,0); // depends on the MPI spread | ||||
|     for(int d=0;d<dims;d++){ | ||||
|       if( processors[d]==1 ) fll[d]=0; | ||||
|     } | ||||
|     localCopyRegion(in,out,fll,tll,local); | ||||
|     return out; | ||||
|   } | ||||
| @@ -116,10 +311,22 @@ public: | ||||
|     } | ||||
|     return tmp; | ||||
|   } | ||||
|   template<class vobj> | ||||
|   inline Lattice<vobj> ExchangePeriodic(const Lattice<vobj> &in) const | ||||
|   { | ||||
|     GridBase *old_grid = in.Grid(); | ||||
|     int dims = old_grid->Nd(); | ||||
|     Lattice<vobj> tmp = in; | ||||
|     for(int d=0;d<dims;d++){ | ||||
|       tmp = ExpandPeriodic(d,tmp); // rvalue && assignment | ||||
|     } | ||||
|     return tmp; | ||||
|   } | ||||
|   // expand up one dim at a time | ||||
|   template<class vobj> | ||||
|   inline Lattice<vobj> Expand(int dim, const Lattice<vobj> &in, const CshiftImplBase<vobj> &cshift = CshiftImplDefault<vobj>()) const | ||||
|   { | ||||
|     Coordinate processors=unpadded_grid->_processors; | ||||
|     GridBase *old_grid = in.Grid(); | ||||
|     GridCartesian *new_grid = grids[dim];//These are new grids | ||||
|     Lattice<vobj>  padded(new_grid); | ||||
| @@ -129,46 +336,236 @@ public: | ||||
|     if(dim==0) conformable(old_grid,unpadded_grid); | ||||
|     else       conformable(old_grid,grids[dim-1]); | ||||
|  | ||||
|     std::cout << " dim "<<dim<<" local "<<local << " padding to "<<plocal<<std::endl; | ||||
|  | ||||
|     double tins=0, tshift=0; | ||||
|      | ||||
|     // Middle bit | ||||
|     double t = usecond(); | ||||
|     for(int x=0;x<local[dim];x++){ | ||||
|       InsertSliceLocal(in,padded,x,depth+x,dim); | ||||
|     } | ||||
|     tins += usecond() - t; | ||||
|      | ||||
|     // High bit | ||||
|     t = usecond(); | ||||
|     shifted = cshift.Cshift(in,dim,depth); | ||||
|     tshift += usecond() - t; | ||||
|  | ||||
|     t=usecond(); | ||||
|     for(int x=0;x<depth;x++){ | ||||
|       InsertSliceLocal(shifted,padded,local[dim]-depth+x,depth+local[dim]+x,dim); | ||||
|     } | ||||
|     tins += usecond() - t; | ||||
|      | ||||
|     // Low bit | ||||
|     t = usecond(); | ||||
|     shifted = cshift.Cshift(in,dim,-depth); | ||||
|     tshift += usecond() - t; | ||||
|      | ||||
|     t = usecond(); | ||||
|     for(int x=0;x<depth;x++){ | ||||
|       InsertSliceLocal(shifted,padded,x,x,dim); | ||||
|     } | ||||
|     tins += usecond() - t; | ||||
|     int islocal = 0 ; | ||||
|     if ( processors[dim] == 1 ) islocal = 1; | ||||
|  | ||||
|     if ( islocal ) { | ||||
|  | ||||
|       // replace with a copy and maybe grid swizzle | ||||
|       // return in;?? | ||||
|       double t = usecond(); | ||||
|       padded = in; | ||||
|       tins += usecond() - t; | ||||
|        | ||||
|     } else { | ||||
|  | ||||
|       ////////////////////////////////////////////// | ||||
|       // Replace sequence with | ||||
|       // --------------------- | ||||
|       // (i) Gather high face(s); start comms | ||||
|       // (ii) Gather low  face(s); start comms | ||||
|       // (iii) Copy middle bit with localCopyRegion | ||||
|       // (iv) Complete high face(s), insert slice(s) | ||||
|       // (iv) Complete low  face(s), insert slice(s) | ||||
|       ////////////////////////////////////////////// | ||||
|       // Middle bit | ||||
|       double t = usecond(); | ||||
|       for(int x=0;x<local[dim];x++){ | ||||
| 	InsertSliceLocal(in,padded,x,depth+x,dim); | ||||
|       } | ||||
|       tins += usecond() - t; | ||||
|      | ||||
|       // High bit | ||||
|       t = usecond(); | ||||
|       shifted = cshift.Cshift(in,dim,depth); | ||||
|       tshift += usecond() - t; | ||||
|  | ||||
|       t=usecond(); | ||||
|       for(int x=0;x<depth;x++){ | ||||
| 	InsertSliceLocal(shifted,padded,local[dim]-depth+x,depth+local[dim]+x,dim); | ||||
|       } | ||||
|       tins += usecond() - t; | ||||
|      | ||||
|       // Low bit | ||||
|       t = usecond(); | ||||
|       shifted = cshift.Cshift(in,dim,-depth); | ||||
|       tshift += usecond() - t; | ||||
|      | ||||
|       t = usecond(); | ||||
|       for(int x=0;x<depth;x++){ | ||||
| 	InsertSliceLocal(shifted,padded,x,x,dim); | ||||
|       } | ||||
|       tins += usecond() - t; | ||||
|  | ||||
|     } | ||||
|     std::cout << GridLogPerformance << "PaddedCell::Expand timings: cshift:" << tshift/1000 << "ms, insert-slice:" << tins/1000 << "ms" << std::endl; | ||||
|      | ||||
|     return padded; | ||||
|   } | ||||
|  | ||||
|   template<class vobj> | ||||
|   inline Lattice<vobj> ExpandPeriodic(int dim, const Lattice<vobj> &in) const | ||||
|   { | ||||
|     Coordinate processors=unpadded_grid->_processors; | ||||
|     GridBase *old_grid = in.Grid(); | ||||
|     GridCartesian *new_grid = grids[dim];//These are new grids | ||||
|     Lattice<vobj>  padded(new_grid); | ||||
|     //    Lattice<vobj> shifted(old_grid);     | ||||
|     Coordinate local     =old_grid->LocalDimensions(); | ||||
|     Coordinate plocal    =new_grid->LocalDimensions(); | ||||
|     if(dim==0) conformable(old_grid,unpadded_grid); | ||||
|     else       conformable(old_grid,grids[dim-1]); | ||||
|  | ||||
|     //    std::cout << " dim "<<dim<<" local "<<local << " padding to "<<plocal<<std::endl; | ||||
|     double tins=0, tshift=0; | ||||
|  | ||||
|     int islocal = 0 ; | ||||
|     if ( processors[dim] == 1 ) islocal = 1; | ||||
|  | ||||
|     if ( islocal ) { | ||||
|       padded=in; // slightly different interface could avoid a copy operation | ||||
|     } else { | ||||
|       Face_exchange(in,padded,dim,depth); | ||||
|       return padded; | ||||
|     } | ||||
|     return padded; | ||||
|   } | ||||
|   template<class vobj> | ||||
|   void Face_exchange(const Lattice<vobj> &from, | ||||
| 		     Lattice<vobj> &to, | ||||
| 		     int dimension,int depth) const | ||||
|   { | ||||
|     typedef typename vobj::vector_type vector_type; | ||||
|     typedef typename vobj::scalar_type scalar_type; | ||||
|     typedef typename vobj::scalar_object sobj; | ||||
|  | ||||
|     RealD t_gather=0.0; | ||||
|     RealD t_scatter=0.0; | ||||
|     RealD t_comms=0.0; | ||||
|     RealD t_copy=0.0; | ||||
|      | ||||
|     //    std::cout << GridLogMessage << "dimension " <<dimension<<std::endl; | ||||
|     //    DumpSliceNorm(std::string("Face_exchange from"),from,dimension); | ||||
|     GridBase *grid=from.Grid(); | ||||
|     GridBase *new_grid=to.Grid(); | ||||
|  | ||||
|     Coordinate lds = from.Grid()->_ldimensions; | ||||
|     Coordinate nlds=   to.Grid()->_ldimensions; | ||||
|     Coordinate simd= from.Grid()->_simd_layout; | ||||
|     int ld    = lds[dimension]; | ||||
|     int nld   = to.Grid()->_ldimensions[dimension]; | ||||
|     const int Nsimd = vobj::Nsimd(); | ||||
|  | ||||
|     assert(depth<=lds[dimension]); // A must be on neighbouring node | ||||
|     assert(depth>0);   // A caller bug if zero | ||||
|     assert(ld+2*depth==nld); | ||||
|     //////////////////////////////////////////////////////////////////////////// | ||||
|     // Face size and byte calculations | ||||
|     //////////////////////////////////////////////////////////////////////////// | ||||
|     int buffer_size = 1; | ||||
|     for(int d=0;d<lds.size();d++){ | ||||
|       if ( d!= dimension) buffer_size=buffer_size*lds[d]; | ||||
|     } | ||||
|     buffer_size = buffer_size  / Nsimd; | ||||
|     int rNsimd = Nsimd / simd[dimension]; | ||||
|     assert( buffer_size == from.Grid()->_slice_nblock[dimension]*from.Grid()->_slice_block[dimension] / simd[dimension]); | ||||
|  | ||||
|     static cshiftVector<vobj> send_buf;  | ||||
|     static cshiftVector<vobj> recv_buf; | ||||
|     send_buf.resize(buffer_size*2*depth);     | ||||
|     recv_buf.resize(buffer_size*2*depth); | ||||
|  | ||||
|     std::vector<CommsRequest_t> fwd_req;    | ||||
|     std::vector<CommsRequest_t> bwd_req;    | ||||
|  | ||||
|     int words = buffer_size; | ||||
|     int bytes = words * sizeof(vobj); | ||||
|  | ||||
|     //////////////////////////////////////////////////////////////////////////// | ||||
|     // Communication coords | ||||
|     //////////////////////////////////////////////////////////////////////////// | ||||
|     int comm_proc = 1; | ||||
|     int xmit_to_rank; | ||||
|     int recv_from_rank; | ||||
|     grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank); | ||||
|  | ||||
|     //////////////////////////////////////////////////////////////////////////// | ||||
|     // Gather all surface terms up to depth "d" | ||||
|     //////////////////////////////////////////////////////////////////////////// | ||||
|     RealD t; | ||||
|     RealD t_tot=-usecond(); | ||||
|     int plane=0; | ||||
|     for ( int d=0;d < depth ; d ++ ) { | ||||
|       int tag = d*1024 + dimension*2+0; | ||||
|  | ||||
|       t=usecond(); | ||||
|       GatherSlice(send_buf,from,d,dimension,plane*buffer_size); plane++; | ||||
|       t_gather+=usecond()-t; | ||||
|  | ||||
|       t=usecond(); | ||||
|       grid->SendToRecvFromBegin(fwd_req, | ||||
| 				(void *)&send_buf[d*buffer_size], xmit_to_rank, | ||||
| 				(void *)&recv_buf[d*buffer_size], recv_from_rank, bytes, tag); | ||||
|       t_comms+=usecond()-t; | ||||
|      } | ||||
|     for ( int d=0;d < depth ; d ++ ) { | ||||
|       int tag = d*1024 + dimension*2+1; | ||||
|  | ||||
|       t=usecond(); | ||||
|       GatherSlice(send_buf,from,ld-depth+d,dimension,plane*buffer_size); plane++; | ||||
|       t_gather+= usecond() - t; | ||||
|  | ||||
|       t=usecond(); | ||||
|       grid->SendToRecvFromBegin(bwd_req, | ||||
| 				(void *)&send_buf[(d+depth)*buffer_size], recv_from_rank, | ||||
| 				(void *)&recv_buf[(d+depth)*buffer_size], xmit_to_rank, bytes,tag); | ||||
|       t_comms+=usecond()-t; | ||||
|     } | ||||
|  | ||||
|     //////////////////////////////////////////////////////////////////////////// | ||||
|     // Copy interior -- overlap this with comms | ||||
|     //////////////////////////////////////////////////////////////////////////// | ||||
|     int Nd = new_grid->Nd(); | ||||
|     Coordinate LL(Nd,0); | ||||
|     Coordinate sz = grid->_ldimensions; | ||||
|     Coordinate toLL(Nd,0); | ||||
|     toLL[dimension]=depth; | ||||
|     t=usecond(); | ||||
|     localCopyRegion(from,to,LL,toLL,sz); | ||||
|     t_copy= usecond() - t; | ||||
|      | ||||
|     //////////////////////////////////////////////////////////////////////////// | ||||
|     // Scatter all faces | ||||
|     //////////////////////////////////////////////////////////////////////////// | ||||
|     plane=0; | ||||
|  | ||||
|     t=usecond(); | ||||
|     grid->CommsComplete(fwd_req); | ||||
|     t_comms+= usecond() - t; | ||||
|  | ||||
|     t=usecond(); | ||||
|     for ( int d=0;d < depth ; d ++ ) { | ||||
|       ScatterSlice(recv_buf,to,nld-depth+d,dimension,plane*buffer_size); plane++; | ||||
|     } | ||||
|     t_scatter= usecond() - t; | ||||
|  | ||||
|     t=usecond(); | ||||
|     grid->CommsComplete(bwd_req); | ||||
|     t_comms+= usecond() - t; | ||||
|      | ||||
|     t=usecond(); | ||||
|     for ( int d=0;d < depth ; d ++ ) { | ||||
|       ScatterSlice(recv_buf,to,d,dimension,plane*buffer_size); plane++; | ||||
|     } | ||||
|     t_scatter+= usecond() - t; | ||||
|     t_tot+=usecond(); | ||||
|  | ||||
|     std::cout << GridLogPerformance << "PaddedCell::Expand new timings: gather :" << t_gather/1000  << "ms"<<std::endl; | ||||
|     std::cout << GridLogPerformance << "PaddedCell::Expand new timings: scatter:" << t_scatter/1000   << "ms"<<std::endl; | ||||
|     std::cout << GridLogPerformance << "PaddedCell::Expand new timings: copy   :" << t_copy/1000      << "ms"<<std::endl; | ||||
|     std::cout << GridLogPerformance << "PaddedCell::Expand new timings: comms  :" << t_comms/1000     << "ms"<<std::endl; | ||||
|     std::cout << GridLogPerformance << "PaddedCell::Expand new timings: total  :" << t_tot/1000     << "ms"<<std::endl; | ||||
|     std::cout << GridLogPerformance << "PaddedCell::Expand new timings: gather :" << depth*4.0*bytes/t_gather << "MB/s"<<std::endl; | ||||
|     std::cout << GridLogPerformance << "PaddedCell::Expand new timings: scatter:" << depth*4.0*bytes/t_scatter<< "MB/s"<<std::endl; | ||||
|     std::cout << GridLogPerformance << "PaddedCell::Expand new timings: comms  :" << (RealD)4.0*bytes/t_comms   << "MB/s"<<std::endl; | ||||
|     std::cout << GridLogPerformance << "PaddedCell::Expand new timings: face bytes  :" << depth*bytes/1e6 << "MB"<<std::endl; | ||||
|   } | ||||
|    | ||||
| }; | ||||
|   | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
|  | ||||
|   | ||||
| @@ -165,7 +165,7 @@ class BinaryIO { | ||||
| 	 * FIXME -- 128^3 x 256 x 16 will overflow. | ||||
| 	 */ | ||||
| 	 | ||||
| 	int global_site; | ||||
| 	int64_t global_site; | ||||
|  | ||||
| 	Lexicographic::CoorFromIndex(coor,local_site,local_vol); | ||||
|  | ||||
| @@ -175,8 +175,8 @@ class BinaryIO { | ||||
|  | ||||
| 	Lexicographic::IndexFromCoor(coor,global_site,global_vol); | ||||
|  | ||||
| 	uint32_t gsite29   = global_site%29; | ||||
| 	uint32_t gsite31   = global_site%31; | ||||
| 	uint64_t gsite29   = global_site%29; | ||||
| 	uint64_t gsite31   = global_site%31; | ||||
| 	 | ||||
| 	site_crc = crc32(0,(unsigned char *)site_buf,sizeof(fobj)); | ||||
| 	//	std::cout << "Site "<<local_site << " crc "<<std::hex<<site_crc<<std::dec<<std::endl; | ||||
| @@ -545,7 +545,9 @@ class BinaryIO { | ||||
| 				       const std::string &format, | ||||
| 				       uint32_t &nersc_csum, | ||||
| 				       uint32_t &scidac_csuma, | ||||
| 				       uint32_t &scidac_csumb) | ||||
| 				       uint32_t &scidac_csumb, | ||||
| 				       int control=BINARYIO_LEXICOGRAPHIC | ||||
| 				       ) | ||||
|   { | ||||
|     typedef typename vobj::scalar_object sobj; | ||||
|     typedef typename vobj::Realified::scalar_type word;    word w=0; | ||||
| @@ -556,7 +558,7 @@ class BinaryIO { | ||||
|     std::vector<sobj> scalardata(lsites);  | ||||
|     std::vector<fobj>     iodata(lsites); // Munge, checksum, byte order in here | ||||
|      | ||||
|     IOobject(w,grid,iodata,file,offset,format,BINARYIO_READ|BINARYIO_LEXICOGRAPHIC, | ||||
|     IOobject(w,grid,iodata,file,offset,format,BINARYIO_READ|control, | ||||
| 	     nersc_csum,scidac_csuma,scidac_csumb); | ||||
|  | ||||
|     GridStopWatch timer;  | ||||
| @@ -582,7 +584,8 @@ class BinaryIO { | ||||
| 					  const std::string &format, | ||||
| 					  uint32_t &nersc_csum, | ||||
| 					  uint32_t &scidac_csuma, | ||||
| 					  uint32_t &scidac_csumb) | ||||
| 					  uint32_t &scidac_csumb, | ||||
| 					  int control=BINARYIO_LEXICOGRAPHIC) | ||||
|   { | ||||
|     typedef typename vobj::scalar_object sobj; | ||||
|     typedef typename vobj::Realified::scalar_type word;    word w=0; | ||||
| @@ -607,7 +610,7 @@ class BinaryIO { | ||||
|     while (attemptsLeft >= 0) | ||||
|     { | ||||
|       grid->Barrier(); | ||||
|       IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|BINARYIO_LEXICOGRAPHIC, | ||||
|       IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|control, | ||||
| 	             nersc_csum,scidac_csuma,scidac_csumb); | ||||
|       if (checkWrite) | ||||
|       { | ||||
| @@ -617,7 +620,7 @@ class BinaryIO { | ||||
|  | ||||
|         std::cout << GridLogMessage << "writeLatticeObject: read back object" << std::endl; | ||||
|         grid->Barrier(); | ||||
|         IOobject(w,grid,ckiodata,file,ckoffset,format,BINARYIO_READ|BINARYIO_LEXICOGRAPHIC, | ||||
|         IOobject(w,grid,ckiodata,file,ckoffset,format,BINARYIO_READ|control, | ||||
| 	               cknersc_csum,ckscidac_csuma,ckscidac_csumb); | ||||
|         if ((cknersc_csum != nersc_csum) or (ckscidac_csuma != scidac_csuma) or (ckscidac_csumb != scidac_csumb)) | ||||
|         { | ||||
|   | ||||
| @@ -162,8 +162,14 @@ template<class vobj> void ScidacMetaData(Lattice<vobj> & field, | ||||
|  { | ||||
|    uint32_t scidac_checksuma = stoull(scidacChecksum_.suma,0,16); | ||||
|    uint32_t scidac_checksumb = stoull(scidacChecksum_.sumb,0,16); | ||||
|    if ( scidac_csuma !=scidac_checksuma) return 0; | ||||
|    if ( scidac_csumb !=scidac_checksumb) return 0; | ||||
|    std::cout << GridLogMessage << " scidacChecksumVerify computed "<<scidac_csuma<<" expected "<<scidac_checksuma <<std::endl; | ||||
|    std::cout << GridLogMessage << " scidacChecksumVerify computed "<<scidac_csumb<<" expected "<<scidac_checksumb <<std::endl; | ||||
|    if ( scidac_csuma !=scidac_checksuma) { | ||||
|      return 0; | ||||
|    }; | ||||
|    if ( scidac_csumb !=scidac_checksumb) { | ||||
|      return 0; | ||||
|    }; | ||||
|    return 1; | ||||
|  } | ||||
|  | ||||
| @@ -206,7 +212,7 @@ class GridLimeReader : public BinaryIO { | ||||
|   // Read a generic lattice field and verify checksum | ||||
|   //////////////////////////////////////////// | ||||
|   template<class vobj> | ||||
|   void readLimeLatticeBinaryObject(Lattice<vobj> &field,std::string record_name) | ||||
|   void readLimeLatticeBinaryObject(Lattice<vobj> &field,std::string record_name,int control=BINARYIO_LEXICOGRAPHIC) | ||||
|   { | ||||
|     typedef typename vobj::scalar_object sobj; | ||||
|     scidacChecksum scidacChecksum_; | ||||
| @@ -238,7 +244,7 @@ class GridLimeReader : public BinaryIO { | ||||
| 	uint64_t offset= ftello(File); | ||||
| 	//	std::cout << " ReadLatticeObject from offset "<<offset << std::endl; | ||||
| 	BinarySimpleMunger<sobj,sobj> munge; | ||||
| 	BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); | ||||
| 	BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb,control); | ||||
| 	std::cout << GridLogMessage << "SciDAC checksum A " << std::hex << scidac_csuma << std::dec << std::endl; | ||||
| 	std::cout << GridLogMessage << "SciDAC checksum B " << std::hex << scidac_csumb << std::dec << std::endl; | ||||
| 	///////////////////////////////////////////// | ||||
| @@ -408,7 +414,7 @@ class GridLimeWriter : public BinaryIO | ||||
|   // in communicator used by the field.Grid() | ||||
|   //////////////////////////////////////////////////// | ||||
|   template<class vobj> | ||||
|   void writeLimeLatticeBinaryObject(Lattice<vobj> &field,std::string record_name) | ||||
|   void writeLimeLatticeBinaryObject(Lattice<vobj> &field,std::string record_name,int control=BINARYIO_LEXICOGRAPHIC) | ||||
|   { | ||||
|     //////////////////////////////////////////////////////////////////// | ||||
|     // NB: FILE and iostream are jointly writing disjoint sequences in the | ||||
| @@ -459,7 +465,7 @@ class GridLimeWriter : public BinaryIO | ||||
|     /////////////////////////////////////////// | ||||
|     std::string format = getFormatString<vobj>(); | ||||
|     BinarySimpleMunger<sobj,sobj> munge; | ||||
|     BinaryIO::writeLatticeObject<vobj,sobj>(field, filename, munge, offset1, format,nersc_csum,scidac_csuma,scidac_csumb); | ||||
|     BinaryIO::writeLatticeObject<vobj,sobj>(field, filename, munge, offset1, format,nersc_csum,scidac_csuma,scidac_csumb,control); | ||||
|  | ||||
|     /////////////////////////////////////////// | ||||
|     // Wind forward and close the record | ||||
| @@ -512,7 +518,8 @@ class ScidacWriter : public GridLimeWriter { | ||||
|   //////////////////////////////////////////////// | ||||
|   template <class vobj, class userRecord> | ||||
|   void writeScidacFieldRecord(Lattice<vobj> &field,userRecord _userRecord, | ||||
|                               const unsigned int recordScientificPrec = 0)  | ||||
|                               const unsigned int recordScientificPrec = 0, | ||||
| 			      int control=BINARYIO_LEXICOGRAPHIC) | ||||
|   { | ||||
|     GridBase * grid = field.Grid(); | ||||
|  | ||||
| @@ -534,7 +541,7 @@ class ScidacWriter : public GridLimeWriter { | ||||
|       writeLimeObject(0,0,_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML)); | ||||
|     } | ||||
|     // Collective call | ||||
|     writeLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA));      // Closes message with checksum | ||||
|     writeLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA),control);      // Closes message with checksum | ||||
|   } | ||||
| }; | ||||
|  | ||||
| @@ -553,7 +560,8 @@ class ScidacReader : public GridLimeReader { | ||||
|   // Write generic lattice field in scidac format | ||||
|   //////////////////////////////////////////////// | ||||
|   template <class vobj, class userRecord> | ||||
|   void readScidacFieldRecord(Lattice<vobj> &field,userRecord &_userRecord)  | ||||
|   void readScidacFieldRecord(Lattice<vobj> &field,userRecord &_userRecord, | ||||
| 			     int control=BINARYIO_LEXICOGRAPHIC)  | ||||
|   { | ||||
|     typedef typename vobj::scalar_object sobj; | ||||
|     GridBase * grid = field.Grid(); | ||||
| @@ -571,7 +579,7 @@ class ScidacReader : public GridLimeReader { | ||||
|     readLimeObject(header ,std::string("FieldMetaData"),std::string(GRID_FORMAT)); // Open message  | ||||
|     readLimeObject(_userRecord,_userRecord.SerialisableClassName(),std::string(SCIDAC_RECORD_XML)); | ||||
|     readLimeObject(_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML)); | ||||
|     readLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA)); | ||||
|     readLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA),control); | ||||
|   } | ||||
|   void skipPastBinaryRecord(void) { | ||||
|     std::string rec_name(ILDG_BINARY_DATA); | ||||
|   | ||||
| @@ -462,6 +462,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st,  DoubledGaugeField | ||||
|     autoView(st_v , st,AcceleratorRead); | ||||
|  | ||||
|    if( interior && exterior ) { | ||||
|      acceleratorFenceComputeStream(); | ||||
|      if (Opt == WilsonKernelsStatic::OptGeneric    ) { KERNEL_CALL(GenericDhopSite); return;} | ||||
|      if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite);    return;} | ||||
| #ifndef GRID_CUDA | ||||
| @@ -495,6 +496,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st,  DoubledGaugeField | ||||
|     autoView(st_v ,st,AcceleratorRead); | ||||
|  | ||||
|    if( interior && exterior ) { | ||||
|      acceleratorFenceComputeStream(); | ||||
|      if (Opt == WilsonKernelsStatic::OptGeneric    ) { KERNEL_CALL(GenericDhopSiteDag); return;} | ||||
|      if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDag);    return;} | ||||
| #ifndef GRID_CUDA | ||||
|   | ||||
| @@ -170,7 +170,7 @@ public: | ||||
|             typedef decltype(coalescedReadGeneralPermute(U_v[0](0),gStencil.GetEntry(0,0)->_permute,Nd)) U3matrix; | ||||
|  | ||||
|             int Nsites = U_v.size(); | ||||
|             auto gStencil_v = gStencil.View();  | ||||
|             auto gStencil_v = gStencil.View(AcceleratorRead);  | ||||
|  | ||||
|             accelerator_for(site,Nsites,Simd::Nsimd(),{ // ----------- 3-link constructs | ||||
|                 stencilElement SE0, SE1, SE2, SE3, SE4, SE5; | ||||
| @@ -386,4 +386,4 @@ public: | ||||
| }; | ||||
|  | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
| NAMESPACE_END(Grid); | ||||
|   | ||||
| @@ -488,7 +488,7 @@ public: | ||||
|     for(int mu=0;mu<Nd;mu++){ | ||||
|       { //view scope | ||||
| 	autoView( gStaple_v , gStaple, AcceleratorWrite); | ||||
| 	auto gStencil_v = gStencil.View(); | ||||
| 	auto gStencil_v = gStencil.View(AcceleratorRead); | ||||
| 	 | ||||
| 	accelerator_for(ss, ggrid->oSites(), (size_t)ggrid->Nsimd(), { | ||||
| 	    decltype(coalescedRead(Ug_dirs_v[0][0])) stencil_ss; | ||||
| @@ -1200,7 +1200,7 @@ public: | ||||
|  | ||||
|       { //view scope | ||||
| 	autoView( gStaple_v , gStaple, AcceleratorWrite); | ||||
| 	auto gStencil_v = gStencil.View(); | ||||
| 	auto gStencil_v = gStencil.View(AcceleratorRead); | ||||
|  | ||||
| 	accelerator_for(ss, ggrid->oSites(), (size_t)ggrid->Nsimd(), { | ||||
| 	    decltype(coalescedRead(Ug_dirs_v[0][0])) stencil_ss; | ||||
|   | ||||
| @@ -1130,6 +1130,14 @@ static_assert(sizeof(SIMD_Ftype) == sizeof(SIMD_Itype), "SIMD vector lengths inc | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| // Fixme need coalesced read gpermute | ||||
| template<class vobj> void gpermute(vobj & inout,int perm){ | ||||
|   vobj tmp=inout; | ||||
|   if (perm & 0x1 ) { permute(inout,tmp,0); tmp=inout;} | ||||
|   if (perm & 0x2 ) { permute(inout,tmp,1); tmp=inout;} | ||||
|   if (perm & 0x4 ) { permute(inout,tmp,2); tmp=inout;} | ||||
|   if (perm & 0x8 ) { permute(inout,tmp,3); tmp=inout;} | ||||
| } | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
|   | ||||
| @@ -99,6 +99,8 @@ using std::log; | ||||
| using std::exp; | ||||
| using std::sin; | ||||
| using std::cos; | ||||
| using std::asin; | ||||
| using std::acos; | ||||
|  | ||||
|  | ||||
| accelerator_inline RealF    conjugate(const RealF  & r){ return r; } | ||||
|   | ||||
| @@ -32,7 +32,12 @@ NAMESPACE_BEGIN(Grid); | ||||
| struct GeneralStencilEntry {  | ||||
|   uint64_t _offset;            // 4 bytes  | ||||
|   uint8_t _permute;            // 1 bytes // Horrible alignment properties | ||||
|   uint8_t _wrap;               // 1 bytes // Horrible alignment properties | ||||
| }; | ||||
| struct GeneralStencilEntryReordered : public GeneralStencilEntry { | ||||
|   uint64_t _input; | ||||
| }; | ||||
|  | ||||
| // Could pack to 8 + 4 + 4 = 128 bit and use  | ||||
|  | ||||
| class GeneralLocalStencilView { | ||||
| @@ -46,7 +51,7 @@ class GeneralLocalStencilView { | ||||
|   accelerator_inline GeneralStencilEntry * GetEntry(int point,int osite) const {  | ||||
|     return & this->_entries_p[point+this->_npoints*osite];  | ||||
|   } | ||||
|  | ||||
|   void ViewClose(void){}; | ||||
| }; | ||||
| //////////////////////////////////////// | ||||
| // The Stencil Class itself | ||||
| @@ -61,7 +66,7 @@ protected: | ||||
| public:  | ||||
|   GridBase *Grid(void) const { return _grid; } | ||||
|  | ||||
|   View_type View(void) const { | ||||
|   View_type View(int mode) const { | ||||
|     View_type accessor(*( (View_type *) this)); | ||||
|     return accessor; | ||||
|   } | ||||
| @@ -101,17 +106,23 @@ public: | ||||
| 	  // Simpler version using icoor calculation | ||||
| 	  //////////////////////////////////////////////// | ||||
| 	  SE._permute =0; | ||||
| 	  SE._wrap=0; | ||||
| 	  for(int d=0;d<Coor.size();d++){ | ||||
|  | ||||
| 	    int fd = grid->_fdimensions[d]; | ||||
| 	    int rd = grid->_rdimensions[d]; | ||||
| 	    int ld = grid->_ldimensions[d]; | ||||
| 	    int ly = grid->_simd_layout[d]; | ||||
|  | ||||
| 	    assert((ly==1)||(ly==2)); | ||||
| 	    assert((ly==1)||(ly==2)||(ly==grid->Nsimd())); | ||||
|  | ||||
| 	    int shift = (shifts[ii][d]+fd)%fd;  // make it strictly positive 0.. L-1 | ||||
| 	    int x = Coor[d];                // x in [0... rd-1] as an oSite  | ||||
|  | ||||
| 	    if ( (x + shift)%fd != (x+shift)%ld ){ | ||||
| 	      SE._wrap = 1; | ||||
| 	    } | ||||
| 	     | ||||
| 	    int permute_dim  = grid->PermuteDim(d); | ||||
| 	    int permute_slice=0; | ||||
| 	    if(permute_dim){     | ||||
|   | ||||
| @@ -70,57 +70,6 @@ struct DefaultImplParams { | ||||
| void Gather_plane_table_compute (GridBase *grid,int dimension,int plane,int cbmask, | ||||
| 				 int off,std::vector<std::pair<int,int> > & table); | ||||
|  | ||||
| /* | ||||
| template<class vobj,class cobj,class compressor> | ||||
| void Gather_plane_simple_table (commVector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so)   __attribute__((noinline)); | ||||
|  | ||||
| template<class vobj,class cobj,class compressor> | ||||
| void Gather_plane_simple_table (commVector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so) | ||||
| { | ||||
|   int num=table.size(); | ||||
|   std::pair<int,int> *table_v = & table[0]; | ||||
|  | ||||
|   auto rhs_v = rhs.View(AcceleratorRead); | ||||
|   accelerator_forNB( i,num, vobj::Nsimd(), { | ||||
|     compress.Compress(buffer[off+table_v[i].first],rhs_v[so+table_v[i].second]); | ||||
|   }); | ||||
|   rhs_v.ViewClose(); | ||||
| } | ||||
|  | ||||
| /////////////////////////////////////////////////////////////////// | ||||
| // Gather for when there *is* need to SIMD split with compression | ||||
| /////////////////////////////////////////////////////////////////// | ||||
| template<class cobj,class vobj,class compressor> | ||||
| void Gather_plane_exchange_table(const Lattice<vobj> &rhs, | ||||
| 				 commVector<cobj *> pointers, | ||||
| 				 int dimension,int plane, | ||||
| 				 int cbmask,compressor &compress,int type) __attribute__((noinline)); | ||||
|  | ||||
| template<class cobj,class vobj,class compressor> | ||||
| void Gather_plane_exchange_table(commVector<std::pair<int,int> >& table, | ||||
| 				 const Lattice<vobj> &rhs, | ||||
| 				 std::vector<cobj *> &pointers,int dimension,int plane,int cbmask, | ||||
| 				 compressor &compress,int type) | ||||
| { | ||||
|   assert( (table.size()&0x1)==0); | ||||
|   int num=table.size()/2; | ||||
|   int so  = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane | ||||
|  | ||||
|   auto rhs_v = rhs.View(AcceleratorRead); | ||||
|   auto rhs_p = &rhs_v[0]; | ||||
|   auto p0=&pointers[0][0]; | ||||
|   auto p1=&pointers[1][0]; | ||||
|   auto tp=&table[0]; | ||||
|   accelerator_forNB(j, num, vobj::Nsimd(), { | ||||
|       compress.CompressExchange(p0,p1, rhs_p, j, | ||||
| 				so+tp[2*j  ].second, | ||||
| 				so+tp[2*j+1].second, | ||||
| 				type); | ||||
|   }); | ||||
|   rhs_v.ViewClose(); | ||||
| } | ||||
| */ | ||||
|  | ||||
| void DslashResetCounts(void); | ||||
| void DslashGetCounts(uint64_t &dirichlet,uint64_t &partial,uint64_t &full); | ||||
| void DslashLogFull(void); | ||||
| @@ -258,6 +207,10 @@ public: | ||||
|   struct Packet { | ||||
|     void * send_buf; | ||||
|     void * recv_buf; | ||||
| #ifndef ACCELERATOR_AWARE_MPI | ||||
|     void * host_send_buf; // Allocate this if not MPI_CUDA_AWARE | ||||
|     void * host_recv_buf; // Allocate this if not MPI_CUDA_AWARE | ||||
| #endif | ||||
|     Integer to_rank; | ||||
|     Integer from_rank; | ||||
|     Integer do_send; | ||||
| @@ -324,7 +277,7 @@ public: | ||||
|   Vector<int> surface_list; | ||||
|  | ||||
|   stencilVector<StencilEntry>  _entries; // Resident in managed memory | ||||
|   commVector<StencilEntry>     _entries_device; // Resident in managed memory | ||||
|   commVector<StencilEntry>     _entries_device; // Resident in device memory | ||||
|   std::vector<Packet> Packets; | ||||
|   std::vector<Merge> Mergers; | ||||
|   std::vector<Merge> MergersSHM; | ||||
| @@ -408,33 +361,16 @@ public: | ||||
|   // Use OpenMP Tasks for cleaner ??? | ||||
|   // must be called *inside* parallel region | ||||
|   ////////////////////////////////////////// | ||||
|   /* | ||||
|   void CommunicateThreaded() | ||||
|   { | ||||
| #ifdef GRID_OMP | ||||
|     int mythread = omp_get_thread_num(); | ||||
|     int nthreads = CartesianCommunicator::nCommThreads; | ||||
| #else | ||||
|     int mythread = 0; | ||||
|     int nthreads = 1; | ||||
| #endif | ||||
|     if (nthreads == -1) nthreads = 1; | ||||
|     if (mythread < nthreads) { | ||||
|       for (int i = mythread; i < Packets.size(); i += nthreads) { | ||||
| 	uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf, | ||||
| 						      Packets[i].to_rank, | ||||
| 						      Packets[i].recv_buf, | ||||
| 						      Packets[i].from_rank, | ||||
| 						      Packets[i].bytes,i); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   */ | ||||
|   //////////////////////////////////////////////////////////////////////// | ||||
|   // Non blocking send and receive. Necessarily parallel. | ||||
|   //////////////////////////////////////////////////////////////////////// | ||||
|   void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs) | ||||
|   { | ||||
|     // All GPU kernel tasks must complete | ||||
|     //    accelerator_barrier();     // All kernels should ALREADY be complete | ||||
|     //    _grid->StencilBarrier();   // Everyone is here, so noone running slow and still using receive buffer | ||||
|                                // But the HaloGather had a barrier too. | ||||
| #ifdef ACCELERATOR_AWARE_MPI | ||||
|     for(int i=0;i<Packets.size();i++){ | ||||
|       _grid->StencilSendToRecvFromBegin(MpiReqs, | ||||
| 					Packets[i].send_buf, | ||||
| @@ -443,16 +379,54 @@ public: | ||||
| 					Packets[i].from_rank,Packets[i].do_recv, | ||||
| 					Packets[i].xbytes,Packets[i].rbytes,i); | ||||
|     } | ||||
| #else | ||||
| #warning "Using COPY VIA HOST BUFFERS IN STENCIL" | ||||
|     for(int i=0;i<Packets.size();i++){ | ||||
|       // Introduce a host buffer with a cheap slab allocator and zero cost wipe all | ||||
|       Packets[i].host_send_buf = _grid->HostBufferMalloc(Packets[i].xbytes); | ||||
|       Packets[i].host_recv_buf = _grid->HostBufferMalloc(Packets[i].rbytes); | ||||
|       if ( Packets[i].do_send ) { | ||||
| 	acceleratorCopyFromDevice(Packets[i].send_buf, Packets[i].host_send_buf,Packets[i].xbytes); | ||||
|       } | ||||
|       _grid->StencilSendToRecvFromBegin(MpiReqs, | ||||
| 					Packets[i].host_send_buf, | ||||
| 					Packets[i].to_rank,Packets[i].do_send, | ||||
| 					Packets[i].host_recv_buf, | ||||
| 					Packets[i].from_rank,Packets[i].do_recv, | ||||
| 					Packets[i].xbytes,Packets[i].rbytes,i); | ||||
|     } | ||||
| #endif | ||||
|     // Get comms started then run checksums | ||||
|     // Having this PRIOR to the dslash seems to make Sunspot work... (!) | ||||
|     for(int i=0;i<Packets.size();i++){ | ||||
|       if ( Packets[i].do_send ) | ||||
| 	FlightRecorder::xmitLog(Packets[i].send_buf,Packets[i].xbytes); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   void CommunicateComplete(std::vector<std::vector<CommsRequest_t> > &reqs) | ||||
|   { | ||||
|     _grid->StencilSendToRecvFromComplete(MpiReqs,0); | ||||
|     _grid->StencilSendToRecvFromComplete(MpiReqs,0); // MPI is done | ||||
|     if   ( this->partialDirichlet ) DslashLogPartial(); | ||||
|     else if ( this->fullDirichlet ) DslashLogDirichlet(); | ||||
|     else DslashLogFull(); | ||||
|     acceleratorCopySynchronise(); | ||||
|     // acceleratorCopySynchronise() is in the StencilSendToRecvFromComplete | ||||
|     //    accelerator_barrier();  | ||||
|     _grid->StencilBarrier();  | ||||
| #ifndef ACCELERATOR_AWARE_MPI | ||||
| #warning "Using COPY VIA HOST BUFFERS IN STENCIL" | ||||
|     for(int i=0;i<Packets.size();i++){ | ||||
|       if ( Packets[i].do_recv ) { | ||||
| 	acceleratorCopyToDevice(Packets[i].host_recv_buf, Packets[i].recv_buf,Packets[i].rbytes); | ||||
|       } | ||||
|     } | ||||
|     _grid->HostBufferFreeAll(); | ||||
| #endif | ||||
|     // run any checksums | ||||
|     for(int i=0;i<Packets.size();i++){ | ||||
|       if ( Packets[i].do_recv ) | ||||
| 	FlightRecorder::recvLog(Packets[i].recv_buf,Packets[i].rbytes,Packets[i].from_rank); | ||||
|     } | ||||
|   } | ||||
|   //////////////////////////////////////////////////////////////////////// | ||||
|   // Blocking send and receive. Either sequential or parallel. | ||||
| @@ -528,6 +502,7 @@ public: | ||||
|   template<class compressor> | ||||
|   void HaloGather(const Lattice<vobj> &source,compressor &compress) | ||||
|   { | ||||
|     //    accelerator_barrier(); | ||||
|     _grid->StencilBarrier();// Synch shared memory on a single nodes | ||||
|  | ||||
|     assert(source.Grid()==_grid); | ||||
| @@ -540,10 +515,9 @@ public: | ||||
|       compress.Point(point); | ||||
|       HaloGatherDir(source,compress,point,face_idx); | ||||
|     } | ||||
|     accelerator_barrier(); | ||||
|     accelerator_barrier(); // All my local gathers are complete | ||||
|     face_table_computed=1; | ||||
|     assert(u_comm_offset==_unified_buffer_size); | ||||
|  | ||||
|   } | ||||
|  | ||||
|   ///////////////////////// | ||||
| @@ -579,6 +553,7 @@ public: | ||||
|       accelerator_forNB(j, words, cobj::Nsimd(), { | ||||
| 	  coalescedWrite(to[j] ,coalescedRead(from [j])); | ||||
|       }); | ||||
|       acceleratorFenceComputeStream(); | ||||
|     } | ||||
|   } | ||||
|    | ||||
| @@ -669,6 +644,7 @@ public: | ||||
|     for(int i=0;i<dd.size();i++){ | ||||
|       decompressor::DecompressFace(decompress,dd[i]); | ||||
|     } | ||||
|     acceleratorFenceComputeStream(); // dependent kernels | ||||
|   } | ||||
|   //////////////////////////////////////// | ||||
|   // Set up routines | ||||
| @@ -1224,7 +1200,6 @@ public: | ||||
| 	  /////////////////////////////////////////////////////////// | ||||
| 	  int do_send = (comms_send|comms_partial_send) && (!shm_send ); | ||||
| 	  int do_recv = (comms_send|comms_partial_send) && (!shm_recv ); | ||||
| 	   | ||||
| 	  AddPacket((void *)&send_buf[comm_off], | ||||
| 		    (void *)&recv_buf[comm_off], | ||||
| 		    xmit_to_rank, do_send, | ||||
|   | ||||
| @@ -405,11 +405,4 @@ NAMESPACE_BEGIN(Grid); | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
|  | ||||
| #ifdef GRID_SYCL | ||||
| template<typename T> struct | ||||
| sycl::is_device_copyable<T, typename std::enable_if< | ||||
| 			      Grid::isGridTensor<T>::value  && (!std::is_trivially_copyable<T>::value), | ||||
| 			      void>::type> | ||||
|   : public std::true_type {}; | ||||
| #endif | ||||
|  | ||||
|   | ||||
| @@ -122,7 +122,7 @@ hipStream_t computeStream; | ||||
| void acceleratorInit(void) | ||||
| { | ||||
|   int nDevices = 1; | ||||
|   hipGetDeviceCount(&nDevices); | ||||
|   auto discard = hipGetDeviceCount(&nDevices); | ||||
|   gpu_props = new hipDeviceProp_t[nDevices]; | ||||
|  | ||||
|   char * localRankStr = NULL; | ||||
| @@ -149,7 +149,7 @@ void acceleratorInit(void) | ||||
| #define GPU_PROP_FMT(canMapHostMemory,FMT)     printf("AcceleratorHipInit:   " #canMapHostMemory ": " FMT" \n",prop.canMapHostMemory); | ||||
| #define GPU_PROP(canMapHostMemory)             GPU_PROP_FMT(canMapHostMemory,"%d"); | ||||
|      | ||||
|     auto r=hipGetDeviceProperties(&gpu_props[i], i); | ||||
|     discard = hipGetDeviceProperties(&gpu_props[i], i); | ||||
|     hipDeviceProp_t prop;  | ||||
|     prop = gpu_props[i]; | ||||
|     totalDeviceMem = prop.totalGlobalMem; | ||||
| @@ -186,13 +186,13 @@ void acceleratorInit(void) | ||||
|   } | ||||
|   int device = rank; | ||||
| #endif | ||||
|   hipSetDevice(device); | ||||
|   hipStreamCreate(©Stream); | ||||
|   hipStreamCreate(&computeStream); | ||||
|   discard = hipSetDevice(device); | ||||
|   discard = hipStreamCreate(©Stream); | ||||
|   discard = hipStreamCreate(&computeStream); | ||||
|   const int len=64; | ||||
|   char busid[len]; | ||||
|   if( rank == world_rank ) {  | ||||
|     hipDeviceGetPCIBusId(busid, len, device); | ||||
|     discard = hipDeviceGetPCIBusId(busid, len, device); | ||||
|     printf("local rank %d device %d bus id: %s\n", rank, device, busid); | ||||
|   } | ||||
|   if ( world_rank == 0 )  printf("AcceleratorHipInit: ================================================\n"); | ||||
| @@ -210,8 +210,8 @@ void acceleratorInit(void) | ||||
|   cl::sycl::gpu_selector selector; | ||||
|   cl::sycl::device selectedDevice { selector }; | ||||
|   theGridAccelerator = new sycl::queue (selectedDevice); | ||||
|   //  theCopyAccelerator = new sycl::queue (selectedDevice); | ||||
|   theCopyAccelerator = theGridAccelerator; // Should proceed concurrenlty anyway. | ||||
|   theCopyAccelerator = new sycl::queue (selectedDevice); | ||||
|   //  theCopyAccelerator = theGridAccelerator; // Should proceed concurrenlty anyway. | ||||
|  | ||||
| #ifdef GRID_SYCL_LEVEL_ZERO_IPC | ||||
|   zeInit(0); | ||||
|   | ||||
| @@ -117,7 +117,7 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) { | ||||
| #endif | ||||
| } // CUDA specific | ||||
|  | ||||
| inline void cuda_mem(void) | ||||
| inline void acceleratorMem(void) | ||||
| { | ||||
|   size_t free_t,total_t,used_t; | ||||
|   cudaMemGetInfo(&free_t,&total_t); | ||||
| @@ -125,6 +125,11 @@ inline void cuda_mem(void) | ||||
|   std::cout << " MemoryManager : GPU used "<<used_t<<" free "<<free_t<< " total "<<total_t<<std::endl; | ||||
| } | ||||
|  | ||||
| inline void cuda_mem(void) | ||||
| { | ||||
|   acceleratorMem(); | ||||
| } | ||||
|  | ||||
| #define accelerator_for2dNB( iter1, num1, iter2, num2, nsimd, ... )	\ | ||||
|   {									\ | ||||
|     int nt=acceleratorThreads();					\ | ||||
| @@ -137,6 +142,18 @@ inline void cuda_mem(void) | ||||
|     dim3 cu_blocks ((num1+nt-1)/nt,num2,1);				\ | ||||
|     LambdaApply<<<cu_blocks,cu_threads,0,computeStream>>>(num1,num2,nsimd,lambda);	\ | ||||
|   } | ||||
| #define prof_accelerator_for2dNB( iter1, num1, iter2, num2, nsimd, ... )	\ | ||||
|   {									\ | ||||
|     int nt=acceleratorThreads();					\ | ||||
|     typedef uint64_t Iterator;						\ | ||||
|     auto lambda = [=] accelerator					\ | ||||
|       (Iterator iter1,Iterator iter2,Iterator lane) mutable {		\ | ||||
|       __VA_ARGS__;							\ | ||||
|     };									\ | ||||
|     dim3 cu_threads(nsimd,acceleratorThreads(),1);			\ | ||||
|     dim3 cu_blocks ((num1+nt-1)/nt,num2,1);				\ | ||||
|     ProfileLambdaApply<<<cu_blocks,cu_threads,0,computeStream>>>(num1,num2,nsimd,lambda); \ | ||||
|   } | ||||
|  | ||||
| #define accelerator_for6dNB(iter1, num1,				\ | ||||
|                             iter2, num2,				\ | ||||
| @@ -157,6 +174,20 @@ inline void cuda_mem(void) | ||||
|     Lambda6Apply<<<cu_blocks,cu_threads,0,computeStream>>>(num1,num2,num3,num4,num5,num6,lambda); \ | ||||
|   } | ||||
|  | ||||
|  | ||||
| #define accelerator_for2dNB( iter1, num1, iter2, num2, nsimd, ... )	\ | ||||
|   {									\ | ||||
|     int nt=acceleratorThreads();					\ | ||||
|     typedef uint64_t Iterator;						\ | ||||
|     auto lambda = [=] accelerator					\ | ||||
|       (Iterator iter1,Iterator iter2,Iterator lane) mutable {		\ | ||||
|       __VA_ARGS__;							\ | ||||
|     };									\ | ||||
|     dim3 cu_threads(nsimd,acceleratorThreads(),1);			\ | ||||
|     dim3 cu_blocks ((num1+nt-1)/nt,num2,1);				\ | ||||
|     LambdaApply<<<cu_blocks,cu_threads,0,computeStream>>>(num1,num2,nsimd,lambda);	\ | ||||
|   } | ||||
|  | ||||
| template<typename lambda>  __global__ | ||||
| void LambdaApply(uint64_t num1, uint64_t num2, uint64_t num3, lambda Lambda) | ||||
| { | ||||
| @@ -168,6 +199,17 @@ void LambdaApply(uint64_t num1, uint64_t num2, uint64_t num3, lambda Lambda) | ||||
|     Lambda(x,y,z); | ||||
|   } | ||||
| } | ||||
| template<typename lambda>  __global__ | ||||
| void ProfileLambdaApply(uint64_t num1, uint64_t num2, uint64_t num3, lambda Lambda) | ||||
| { | ||||
|   // Weird permute is to make lane coalesce for large blocks | ||||
|   uint64_t x = threadIdx.y + blockDim.y*blockIdx.x; | ||||
|   uint64_t y = threadIdx.z + blockDim.z*blockIdx.y; | ||||
|   uint64_t z = threadIdx.x; | ||||
|   if ( (x < num1) && (y<num2) && (z<num3) ) { | ||||
|     Lambda(x,y,z); | ||||
|   } | ||||
| } | ||||
|  | ||||
| template<typename lambda>  __global__ | ||||
| void Lambda6Apply(uint64_t num1, uint64_t num2, uint64_t num3, | ||||
| @@ -208,6 +250,7 @@ inline void *acceleratorAllocShared(size_t bytes) | ||||
|   if( err != cudaSuccess ) { | ||||
|     ptr = (void *) NULL; | ||||
|     printf(" cudaMallocManaged failed for %d %s \n",bytes,cudaGetErrorString(err)); | ||||
|     assert(0); | ||||
|   } | ||||
|   return ptr; | ||||
| }; | ||||
| @@ -234,6 +277,7 @@ inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes | ||||
| } | ||||
| inline void acceleratorCopySynchronise(void) { cudaStreamSynchronize(copyStream); }; | ||||
|  | ||||
|  | ||||
| inline int  acceleratorIsCommunicable(void *ptr) | ||||
| { | ||||
|   //  int uvm=0; | ||||
| @@ -265,6 +309,11 @@ NAMESPACE_END(Grid); | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
|  | ||||
| inline void acceleratorMem(void) | ||||
| { | ||||
|   std::cout <<" SYCL acceleratorMem not implemented"<<std::endl; | ||||
| } | ||||
|  | ||||
| extern cl::sycl::queue *theGridAccelerator; | ||||
| extern cl::sycl::queue *theCopyAccelerator; | ||||
|  | ||||
| @@ -344,6 +393,15 @@ NAMESPACE_BEGIN(Grid); | ||||
| #define accelerator        __host__ __device__ | ||||
| #define accelerator_inline __host__ __device__ inline | ||||
|  | ||||
| inline void acceleratorMem(void) | ||||
| { | ||||
|   size_t free_t,total_t,used_t; | ||||
|   auto discard = hipMemGetInfo(&free_t,&total_t); | ||||
|   used_t=total_t-free_t; | ||||
|   std::cout << " MemoryManager : GPU used "<<used_t<<" free "<<free_t<< " total "<<total_t<<std::endl; | ||||
| } | ||||
|  | ||||
|  | ||||
| extern hipStream_t copyStream; | ||||
| extern hipStream_t computeStream; | ||||
| /*These routines define mapping from thread grid to loop & vector lane indexing */ | ||||
| @@ -404,7 +462,7 @@ void LambdaApply(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda) | ||||
|  | ||||
| #define accelerator_barrier(dummy)				\ | ||||
|   {								\ | ||||
|     auto r=hipStreamSynchronize(computeStream);			\ | ||||
|     auto tmp=hipStreamSynchronize(computeStream);		\ | ||||
|     auto err = hipGetLastError();				\ | ||||
|     if ( err != hipSuccess ) {					\ | ||||
|       printf("After hipDeviceSynchronize() : HIP error %s \n", hipGetErrorString( err )); \ | ||||
| @@ -420,7 +478,7 @@ inline void *acceleratorAllocShared(size_t bytes) | ||||
|   auto err = hipMallocManaged((void **)&ptr,bytes); | ||||
|   if( err != hipSuccess ) { | ||||
|     ptr = (void *) NULL; | ||||
|     printf(" hipMallocManaged failed for %ld %s \n",bytes,hipGetErrorString(err)); | ||||
|     fprintf(stderr," hipMallocManaged failed for %ld %s \n",bytes,hipGetErrorString(err)); fflush(stderr); | ||||
|   } | ||||
|   return ptr; | ||||
| }; | ||||
| @@ -432,26 +490,30 @@ inline void *acceleratorAllocDevice(size_t bytes) | ||||
|   auto err = hipMalloc((void **)&ptr,bytes); | ||||
|   if( err != hipSuccess ) { | ||||
|     ptr = (void *) NULL; | ||||
|     printf(" hipMalloc failed for %ld %s \n",bytes,hipGetErrorString(err)); | ||||
|     fprintf(stderr," hipMalloc failed for %ld %s \n",bytes,hipGetErrorString(err)); fflush(stderr); | ||||
|   } | ||||
|   return ptr; | ||||
| }; | ||||
|  | ||||
| inline void acceleratorFreeShared(void *ptr){ auto r=hipFree(ptr);}; | ||||
| inline void acceleratorFreeDevice(void *ptr){ auto r=hipFree(ptr);}; | ||||
| inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes)  { auto r=hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);} | ||||
| inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ auto r=hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);} | ||||
| inline void acceleratorCopyToDeviceAsync(void *from, void *to, size_t bytes, hipStream_t stream = copyStream) { auto r = hipMemcpyAsync(to,from,bytes, hipMemcpyHostToDevice, stream);} | ||||
| inline void acceleratorCopyFromDeviceAsync(void *from, void *to, size_t bytes, hipStream_t stream = copyStream) { auto r = hipMemcpyAsync(to,from,bytes, hipMemcpyDeviceToHost, stream);} | ||||
| inline void acceleratorFreeShared(void *ptr){ auto discard=hipFree(ptr);}; | ||||
| inline void acceleratorFreeDevice(void *ptr){ auto discard=hipFree(ptr);}; | ||||
| inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes)  { auto discard=hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);} | ||||
| inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ auto discard=hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);} | ||||
| //inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes)  { hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);} | ||||
| //inline void acceleratorCopySynchronise(void) {  } | ||||
| inline void acceleratorMemSet(void *base,int value,size_t bytes) { auto r=hipMemset(base,value,bytes);} | ||||
| inline void acceleratorMemSet(void *base,int value,size_t bytes) { auto discard=hipMemset(base,value,bytes);} | ||||
|  | ||||
| inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch | ||||
| { | ||||
|   auto r=hipMemcpyDtoDAsync(to,from,bytes, copyStream); | ||||
|   auto discard=hipMemcpyDtoDAsync(to,from,bytes, copyStream); | ||||
| } | ||||
| inline void acceleratorCopySynchronise(void) { auto r=hipStreamSynchronize(copyStream); }; | ||||
| inline void acceleratorCopyToDeviceAsync(void *from, void *to, size_t bytes, hipStream_t stream = copyStream) { | ||||
|   auto r = hipMemcpyAsync(to,from,bytes, hipMemcpyHostToDevice, stream); | ||||
| } | ||||
| inline void acceleratorCopyFromDeviceAsync(void *from, void *to, size_t bytes, hipStream_t stream = copyStream) { | ||||
|   auto r = hipMemcpyAsync(to,from,bytes, hipMemcpyDeviceToHost, stream); | ||||
| } | ||||
| inline void acceleratorCopySynchronise(void) { auto discard=hipStreamSynchronize(copyStream); }; | ||||
|  | ||||
| #endif | ||||
|  | ||||
| @@ -461,6 +523,9 @@ inline void acceleratorCopySynchronise(void) { auto r=hipStreamSynchronize(copyS | ||||
| #if defined(GRID_SYCL) || defined(GRID_CUDA) || defined(GRID_HIP) | ||||
| // FIXME -- the non-blocking nature got broken March 30 2023 by PAB | ||||
| #define accelerator_forNB( iter1, num1, nsimd, ... ) accelerator_for2dNB( iter1, num1, iter2, 1, nsimd, {__VA_ARGS__} );   | ||||
| #define prof_accelerator_for( iter1, num1, nsimd, ... ) \ | ||||
|   prof_accelerator_for2dNB( iter1, num1, iter2, 1, nsimd, {__VA_ARGS__} );\ | ||||
|   accelerator_barrier(dummy); | ||||
|  | ||||
| #define accelerator_for( iter, num, nsimd, ... )		\ | ||||
|   accelerator_forNB(iter, num, nsimd, { __VA_ARGS__ } );	\ | ||||
| @@ -482,7 +547,15 @@ inline void acceleratorCopySynchronise(void) { auto r=hipStreamSynchronize(copyS | ||||
|  | ||||
| #undef GRID_SIMT | ||||
|  | ||||
|  | ||||
| inline void acceleratorMem(void) | ||||
| { | ||||
|   /* | ||||
|     struct rusage rusage; | ||||
|     getrusage( RUSAGE_SELF, &rusage ); | ||||
|     return (size_t)rusage.ru_maxrss; | ||||
|   */ | ||||
|   std::cout <<" system acceleratorMem not implemented"<<std::endl; | ||||
| } | ||||
|  | ||||
| #define accelerator  | ||||
| #define accelerator_inline strong_inline | ||||
| @@ -582,5 +655,18 @@ inline void acceleratorCopyDeviceToDevice(void *from,void *to,size_t bytes) | ||||
|   acceleratorCopySynchronise(); | ||||
| } | ||||
|  | ||||
| template<class T> void acceleratorPut(T& dev,T&host) | ||||
| { | ||||
|   acceleratorCopyToDevice(&host,&dev,sizeof(T)); | ||||
| } | ||||
| template<class T> T acceleratorGet(T& dev) | ||||
| { | ||||
|   T host; | ||||
|   acceleratorCopyFromDevice(&dev,&host,sizeof(T)); | ||||
|   return host; | ||||
| } | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
|   | ||||
| @@ -94,6 +94,13 @@ static constexpr int MaxDims = GRID_MAX_LATTICE_DIMENSION; | ||||
|  | ||||
| typedef AcceleratorVector<int,MaxDims> Coordinate; | ||||
|  | ||||
| template<class T,int _ndim> | ||||
| inline bool operator==(const AcceleratorVector<T,_ndim> &v,const AcceleratorVector<T,_ndim> &w) | ||||
| { | ||||
|   if (v.size()!=w.size()) return false; | ||||
|   for(int i=0;i<v.size();i++) if ( v[i]!=w[i] ) return false; | ||||
|   return true; | ||||
| } | ||||
| template<class T,int _ndim> | ||||
| inline std::ostream & operator<<(std::ostream &os, const AcceleratorVector<T,_ndim> &v) | ||||
| { | ||||
|   | ||||
							
								
								
									
										336
									
								
								Grid/util/FlightRecorder.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										336
									
								
								Grid/util/FlightRecorder.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,336 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid | ||||
|  | ||||
|     Source file: ./lib/Init.cc | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk> | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| Author: Peter Boyle <peterboyle@MacBook-Pro.local> | ||||
| Author: paboyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #include <Grid/Grid.h> | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
| /////////////////////////////////////////////////////// | ||||
| // Grid Norm logging for repro testing | ||||
| /////////////////////////////////////////////////////// | ||||
| int FlightRecorder::PrintEntireLog; | ||||
| int FlightRecorder::ContinueOnFail; | ||||
| int FlightRecorder::LoggingMode; | ||||
| int FlightRecorder::ChecksumComms; | ||||
| int FlightRecorder::ChecksumCommsSend; | ||||
| int32_t  FlightRecorder::XmitLoggingCounter; | ||||
| int32_t  FlightRecorder::RecvLoggingCounter; | ||||
| int32_t  FlightRecorder::CsumLoggingCounter; | ||||
| int32_t  FlightRecorder::NormLoggingCounter; | ||||
| int32_t  FlightRecorder::ReductionLoggingCounter; | ||||
| uint64_t FlightRecorder::ErrorCounter; | ||||
| std::vector<double> FlightRecorder::NormLogVector; | ||||
| std::vector<double> FlightRecorder::ReductionLogVector; | ||||
| std::vector<uint64_t> FlightRecorder::CsumLogVector; | ||||
| std::vector<uint64_t> FlightRecorder::XmitLogVector; | ||||
| std::vector<uint64_t> FlightRecorder::RecvLogVector; | ||||
|  | ||||
| void FlightRecorder::ResetCounters(void) | ||||
| { | ||||
|   XmitLoggingCounter=0; | ||||
|   RecvLoggingCounter=0; | ||||
|   CsumLoggingCounter=0; | ||||
|   NormLoggingCounter=0; | ||||
|   ReductionLoggingCounter=0; | ||||
| } | ||||
| void FlightRecorder::Truncate(void) | ||||
| { | ||||
|   ResetCounters(); | ||||
|   XmitLogVector.resize(0); | ||||
|   RecvLogVector.resize(0); | ||||
|   NormLogVector.resize(0); | ||||
|   CsumLogVector.resize(0); | ||||
|   ReductionLogVector.resize(0); | ||||
| } | ||||
| void FlightRecorder::SetLoggingMode(FlightRecorder::LoggingMode_t mode) | ||||
| { | ||||
|   switch ( mode ) { | ||||
|   case LoggingModePrint: | ||||
|     SetLoggingModePrint(); | ||||
|     break; | ||||
|   case LoggingModeRecord: | ||||
|     SetLoggingModeRecord(); | ||||
|     break; | ||||
|   case LoggingModeVerify: | ||||
|     SetLoggingModeVerify(); | ||||
|     break; | ||||
|   case LoggingModeNone: | ||||
|     LoggingMode = mode; | ||||
|     Truncate(); | ||||
|     break; | ||||
|   default: | ||||
|     assert(0); | ||||
|   } | ||||
| } | ||||
|  | ||||
| void FlightRecorder::SetLoggingModePrint(void) | ||||
| { | ||||
|   std::cout << " FlightRecorder: set to print output " <<std::endl; | ||||
|   Truncate(); | ||||
|   LoggingMode = LoggingModePrint; | ||||
| } | ||||
| void FlightRecorder::SetLoggingModeRecord(void) | ||||
| { | ||||
|   std::cout << " FlightRecorder: set to RECORD " <<std::endl; | ||||
|   Truncate(); | ||||
|   LoggingMode = LoggingModeRecord; | ||||
| } | ||||
| void FlightRecorder::SetLoggingModeVerify(void) | ||||
| { | ||||
|   std::cout << " FlightRecorder: set to VERIFY " << NormLogVector.size()<< " log entries "<<std::endl; | ||||
|   ResetCounters(); | ||||
|   LoggingMode = LoggingModeVerify; | ||||
| } | ||||
| uint64_t FlightRecorder::ErrorCount(void) | ||||
| { | ||||
|   return ErrorCounter; | ||||
| } | ||||
| void FlightRecorder::NormLog(double value) | ||||
| { | ||||
|   uint64_t hex = * ( (uint64_t *)&value ); | ||||
|   if(LoggingMode == LoggingModePrint) { | ||||
|     std::cerr<<"FlightRecorder::NormLog : "<< NormLoggingCounter <<" "<<std::hex<< hex<<std::dec <<std::endl; | ||||
|     NormLoggingCounter++; | ||||
|   } | ||||
|   if(LoggingMode == LoggingModeRecord) { | ||||
|     std::cerr<<"FlightRecorder::NormLog RECORDING : "<< NormLoggingCounter <<" "<<std::hex<< hex<<std::dec <<std::endl; | ||||
|     NormLogVector.push_back(value); | ||||
|     NormLoggingCounter++; | ||||
|   } | ||||
|   if(LoggingMode == LoggingModeVerify) { | ||||
|  | ||||
|     if(NormLoggingCounter < NormLogVector.size()){ | ||||
|       uint64_t hexref  = * ( (uint64_t *)&NormLogVector[NormLoggingCounter] ); | ||||
|  | ||||
|       if ( (value != NormLogVector[NormLoggingCounter]) || std::isnan(value) ) { | ||||
|  | ||||
| 	std::cerr<<"FlightRecorder::NormLog Oops, I did it again "<< NormLoggingCounter | ||||
| 		 <<std::hex<<" "<<hex<<" "<<hexref<<std::dec<<" " | ||||
| 		 <<std::hexfloat<<value<<" "<< NormLogVector[NormLoggingCounter]<<std::endl; | ||||
|  | ||||
| 	std::cerr << " Oops got norm "<< std::hexfloat<<value<<" expect "<<NormLogVector[NormLoggingCounter] <<std::endl; | ||||
|  | ||||
| 	fprintf(stderr,"%s:%d Oops, I did it again! Reproduce failure for norm %d/%zu %.16e expect %.16e\n", | ||||
| 		GridHostname(), | ||||
| 		GlobalSharedMemory::WorldShmRank, | ||||
| 		NormLoggingCounter,NormLogVector.size(), | ||||
| 		value, NormLogVector[NormLoggingCounter]); fflush(stderr); | ||||
|  | ||||
| 	if(!ContinueOnFail)assert(0); // Force takedown of job | ||||
| 	   | ||||
| 	ErrorCounter++; | ||||
|       } else { | ||||
| 	if ( PrintEntireLog ) {  | ||||
| 	  std::cerr<<"FlightRecorder::NormLog VALID "<< NormLoggingCounter << std::hex | ||||
| 		   <<" "<<hex<<" "<<hexref | ||||
| 		   <<" "<<std::hexfloat<<value<<" "<< NormLogVector[NormLoggingCounter]<<std::dec<<std::endl; | ||||
| 	} | ||||
|       } | ||||
|         | ||||
|     } | ||||
|     if ( NormLogVector.size()==NormLoggingCounter ) { | ||||
|       std::cout << "FlightRecorder:: Verified entire sequence of "<<NormLoggingCounter<<" norms "<<std::endl; | ||||
|     } | ||||
|     NormLoggingCounter++; | ||||
|   } | ||||
| } | ||||
| void FlightRecorder::CsumLog(uint64_t hex) | ||||
| { | ||||
|   if(LoggingMode == LoggingModePrint) { | ||||
|     std::cerr<<"FlightRecorder::CsumLog : "<< CsumLoggingCounter <<" "<<std::hex<< hex<<std::dec <<std::endl; | ||||
|     CsumLoggingCounter++; | ||||
|   } | ||||
|  | ||||
|   if(LoggingMode == LoggingModeRecord) { | ||||
|     std::cerr<<"FlightRecorder::CsumLog RECORDING : "<< NormLoggingCounter <<" "<<std::hex<< hex<<std::dec <<std::endl; | ||||
|     CsumLogVector.push_back(hex); | ||||
|     CsumLoggingCounter++; | ||||
|   } | ||||
|  | ||||
|   if(LoggingMode == LoggingModeVerify) { | ||||
|      | ||||
|     if(CsumLoggingCounter < CsumLogVector.size()) { | ||||
|  | ||||
|       uint64_t hexref  = CsumLogVector[CsumLoggingCounter] ; | ||||
|  | ||||
|       if ( hex != hexref ) { | ||||
|  | ||||
|         std::cerr<<"FlightRecorder::CsumLog Oops, I did it again "<< CsumLoggingCounter | ||||
| 		 <<std::hex<<" "<<hex<<" "<<hexref<<std::dec<<std::endl; | ||||
|  | ||||
| 	fprintf(stderr,"%s:%d Oops, I did it again! Reproduce failure for csum %d %lx expect %lx\n", | ||||
| 		GridHostname(), | ||||
| 		GlobalSharedMemory::WorldShmRank, | ||||
| 		CsumLoggingCounter,hex, hexref); | ||||
| 	fflush(stderr); | ||||
|  | ||||
| 	if(!ContinueOnFail) assert(0); // Force takedown of job | ||||
| 	   | ||||
| 	ErrorCounter++; | ||||
|  | ||||
|       } else { | ||||
|  | ||||
| 	if ( PrintEntireLog ) {  | ||||
| 	  std::cerr<<"FlightRecorder::CsumLog VALID "<< CsumLoggingCounter << std::hex | ||||
| 		   <<" "<<hex<<" "<<hexref<<std::dec<<std::endl; | ||||
| 	} | ||||
|       } | ||||
|     }   | ||||
|     if ( CsumLogVector.size()==CsumLoggingCounter ) { | ||||
|       std::cout << "FlightRecorder:: Verified entire sequence of "<<CsumLoggingCounter<<" checksums "<<std::endl; | ||||
|     } | ||||
|     CsumLoggingCounter++; | ||||
|   } | ||||
| } | ||||
| void FlightRecorder::ReductionLog(double local,double global) | ||||
| { | ||||
|   uint64_t hex_l = * ( (uint64_t *)&local ); | ||||
|   uint64_t hex_g = * ( (uint64_t *)&global ); | ||||
|   if(LoggingMode == LoggingModePrint) { | ||||
|     std::cerr<<"FlightRecorder::ReductionLog : "<< ReductionLoggingCounter <<" "<< std::hex << hex_l << " -> " <<hex_g<<std::dec <<std::endl; | ||||
|     ReductionLoggingCounter++; | ||||
|   } | ||||
|   if(LoggingMode == LoggingModeRecord) { | ||||
|     std::cerr<<"FlightRecorder::ReductionLog RECORDING : "<< ReductionLoggingCounter <<" "<< std::hex << hex_l << " -> " <<hex_g<<std::dec <<std::endl; | ||||
|     ReductionLogVector.push_back(global); | ||||
|     ReductionLoggingCounter++; | ||||
|   } | ||||
|   if(LoggingMode == LoggingModeVerify) { | ||||
|     if(ReductionLoggingCounter < ReductionLogVector.size()){ | ||||
|       if ( global != ReductionLogVector[ReductionLoggingCounter] ) { | ||||
| 	fprintf(stderr,"%s:%d Oops, MPI_Allreduce did it again! Reproduce failure for norm %d/%zu glb %.16e lcl %.16e expect glb %.16e\n", | ||||
| 		GridHostname(), | ||||
| 		GlobalSharedMemory::WorldShmRank, | ||||
| 		ReductionLoggingCounter,ReductionLogVector.size(), | ||||
| 		global, local, ReductionLogVector[ReductionLoggingCounter]); fflush(stderr); | ||||
| 	 | ||||
| 	if ( !ContinueOnFail ) assert(0); | ||||
|  | ||||
| 	ErrorCounter++; | ||||
|       } else { | ||||
| 	if ( PrintEntireLog ) {  | ||||
| 	  std::cerr<<"FlightRecorder::ReductionLog : VALID "<< ReductionLoggingCounter <<" "<< std::hexfloat << local << "-> "<< global <<std::endl; | ||||
| 	} | ||||
|       } | ||||
|     } | ||||
|     if ( ReductionLogVector.size()==ReductionLoggingCounter ) { | ||||
|       std::cout << "FlightRecorder::ReductionLog : Verified entire sequence of "<<ReductionLoggingCounter<<" norms "<<std::endl; | ||||
|     } | ||||
|     ReductionLoggingCounter++; | ||||
|   } | ||||
| } | ||||
| void FlightRecorder::xmitLog(void *buf,uint64_t bytes) | ||||
| { | ||||
|   if(LoggingMode == LoggingModeNone) return; | ||||
|  | ||||
|   if ( ChecksumCommsSend ){ | ||||
|   uint64_t *ubuf = (uint64_t *)buf; | ||||
|   if(LoggingMode == LoggingModeNone) return; | ||||
|    | ||||
| #ifdef GRID_SYCL | ||||
|   uint64_t _xor = svm_xor(ubuf,bytes/sizeof(uint64_t)); | ||||
|   if(LoggingMode == LoggingModePrint) { | ||||
|     std::cerr<<"FlightRecorder::xmitLog : "<< XmitLoggingCounter <<" "<< std::hex << _xor <<std::dec <<std::endl; | ||||
|     XmitLoggingCounter++; | ||||
|   } | ||||
|   if(LoggingMode == LoggingModeRecord) { | ||||
|     std::cerr<<"FlightRecorder::xmitLog RECORD : "<< XmitLoggingCounter <<" "<< std::hex << _xor <<std::dec <<std::endl; | ||||
|     XmitLogVector.push_back(_xor); | ||||
|     XmitLoggingCounter++; | ||||
|   } | ||||
|   if(LoggingMode == LoggingModeVerify) { | ||||
|     if(XmitLoggingCounter < XmitLogVector.size()){ | ||||
|       if ( _xor != XmitLogVector[XmitLoggingCounter] ) { | ||||
| 	fprintf(stderr,"%s:%d Oops, send buf difference! Reproduce failure for xmit %d/%zu  %lx expect glb %lx\n", | ||||
| 		GridHostname(), | ||||
| 		GlobalSharedMemory::WorldShmRank, | ||||
| 		XmitLoggingCounter,XmitLogVector.size(), | ||||
| 		_xor, XmitLogVector[XmitLoggingCounter]); fflush(stderr); | ||||
| 	 | ||||
| 	if ( !ContinueOnFail ) assert(0); | ||||
|  | ||||
| 	ErrorCounter++; | ||||
|       } else { | ||||
| 	if ( PrintEntireLog ) {  | ||||
| 	  std::cerr<<"FlightRecorder::XmitLog : VALID "<< XmitLoggingCounter <<" "<< std::hexfloat << _xor << " "<<  XmitLogVector[XmitLoggingCounter] <<std::endl; | ||||
| 	} | ||||
|       } | ||||
|     } | ||||
|     if ( XmitLogVector.size()==XmitLoggingCounter ) { | ||||
|       std::cout << "FlightRecorder::ReductionLog : Verified entire sequence of "<<XmitLoggingCounter<<" sends "<<std::endl; | ||||
|     } | ||||
|     XmitLoggingCounter++; | ||||
|   } | ||||
| #endif | ||||
|   } | ||||
| } | ||||
| void FlightRecorder::recvLog(void *buf,uint64_t bytes,int rank) | ||||
| { | ||||
|   if ( ChecksumComms ){ | ||||
|   uint64_t *ubuf = (uint64_t *)buf; | ||||
|   if(LoggingMode == LoggingModeNone) return; | ||||
| #ifdef GRID_SYCL | ||||
|   uint64_t _xor = svm_xor(ubuf,bytes/sizeof(uint64_t)); | ||||
|   if(LoggingMode == LoggingModePrint) { | ||||
|     std::cerr<<"FlightRecorder::recvLog : "<< RecvLoggingCounter <<" "<< std::hex << _xor <<std::dec <<std::endl; | ||||
|     RecvLoggingCounter++; | ||||
|   } | ||||
|   if(LoggingMode == LoggingModeRecord) { | ||||
|     std::cerr<<"FlightRecorder::recvLog RECORD : "<< RecvLoggingCounter <<" "<< std::hex << _xor <<std::dec <<std::endl; | ||||
|     RecvLogVector.push_back(_xor); | ||||
|     RecvLoggingCounter++; | ||||
|   } | ||||
|   if(LoggingMode == LoggingModeVerify) { | ||||
|     if(RecvLoggingCounter < RecvLogVector.size()){ | ||||
|       if ( _xor != RecvLogVector[RecvLoggingCounter] ) { | ||||
| 	fprintf(stderr,"%s:%d Oops, recv buf difference! Reproduce failure for recv %d/%zu  %lx expect glb %lx from MPI rank %d\n", | ||||
| 		GridHostname(), | ||||
| 		GlobalSharedMemory::WorldShmRank, | ||||
| 		RecvLoggingCounter,RecvLogVector.size(), | ||||
| 		_xor, RecvLogVector[RecvLoggingCounter],rank); fflush(stderr); | ||||
| 	 | ||||
| 	if ( !ContinueOnFail ) assert(0); | ||||
|  | ||||
| 	ErrorCounter++; | ||||
|       } else { | ||||
| 	if ( PrintEntireLog ) {  | ||||
| 	  std::cerr<<"FlightRecorder::RecvLog : VALID "<< RecvLoggingCounter <<" "<< std::hexfloat << _xor << " "<<  RecvLogVector[RecvLoggingCounter] <<std::endl; | ||||
| 	} | ||||
|       } | ||||
|     } | ||||
|     if ( RecvLogVector.size()==RecvLoggingCounter ) { | ||||
|       std::cout << "FlightRecorder::ReductionLog : Verified entire sequence of "<<RecvLoggingCounter<<" sends "<<std::endl; | ||||
|     } | ||||
|     RecvLoggingCounter++; | ||||
|   } | ||||
| #endif | ||||
|   } | ||||
| } | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
							
								
								
									
										43
									
								
								Grid/util/FlightRecorder.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										43
									
								
								Grid/util/FlightRecorder.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,43 @@ | ||||
| #pragma once | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
| class FlightRecorder { | ||||
|  public: | ||||
|   enum LoggingMode_t { | ||||
|     LoggingModeNone, | ||||
|     LoggingModePrint, | ||||
|     LoggingModeRecord, | ||||
|     LoggingModeVerify | ||||
|   }; | ||||
|    | ||||
|   static int                   LoggingMode; | ||||
|   static uint64_t              ErrorCounter; | ||||
|   static int32_t               XmitLoggingCounter; | ||||
|   static int32_t               RecvLoggingCounter; | ||||
|   static int32_t               CsumLoggingCounter; | ||||
|   static int32_t               NormLoggingCounter; | ||||
|   static int32_t               ReductionLoggingCounter; | ||||
|   static std::vector<uint64_t> XmitLogVector; | ||||
|   static std::vector<uint64_t> RecvLogVector; | ||||
|   static std::vector<uint64_t> CsumLogVector; | ||||
|   static std::vector<double>   NormLogVector; | ||||
|   static std::vector<double>   ReductionLogVector; | ||||
|   static int ContinueOnFail; | ||||
|   static int PrintEntireLog; | ||||
|   static int ChecksumComms; | ||||
|   static int ChecksumCommsSend; | ||||
|   static void SetLoggingModePrint(void); | ||||
|   static void SetLoggingModeRecord(void); | ||||
|   static void SetLoggingModeVerify(void); | ||||
|   static void SetLoggingMode(LoggingMode_t mode); | ||||
|   static void NormLog(double value); | ||||
|   static void CsumLog(uint64_t csum); | ||||
|   static void ReductionLog(double lcl, double glbl); | ||||
|   static void Truncate(void); | ||||
|   static void ResetCounters(void); | ||||
|   static uint64_t ErrorCount(void); | ||||
|   static void xmitLog(void *,uint64_t bytes); | ||||
|   static void recvLog(void *,uint64_t bytes,int rank); | ||||
| }; | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
| @@ -94,7 +94,12 @@ int GridThread::_threads =1; | ||||
| int GridThread::_hyperthreads=1; | ||||
| int GridThread::_cores=1; | ||||
|  | ||||
| char hostname[HOST_NAME_MAX+1]; | ||||
|  | ||||
| char *GridHostname(void) | ||||
| { | ||||
|   return hostname; | ||||
| } | ||||
| const Coordinate &GridDefaultLatt(void)     {return Grid_default_latt;}; | ||||
| const Coordinate &GridDefaultMpi(void)      {return Grid_default_mpi;}; | ||||
| const Coordinate GridDefaultSimd(int dims,int nsimd) | ||||
| @@ -287,6 +292,7 @@ void GridBanner(void) | ||||
|     std::cout << "Build " << GRID_BUILD_STR(GRID_BUILD_REF) << std::endl; | ||||
| #endif | ||||
|     std::cout << std::endl; | ||||
|     std::cout << std::setprecision(9); | ||||
| } | ||||
|  | ||||
| void Grid_init(int *argc,char ***argv) | ||||
| @@ -397,7 +403,6 @@ void Grid_init(int *argc,char ***argv) | ||||
|   std::cout << GridLogMessage << "MPI is initialised and logging filters activated "<<std::endl; | ||||
|   std::cout << GridLogMessage << "================================================ "<<std::endl; | ||||
|  | ||||
|   char hostname[HOST_NAME_MAX+1]; | ||||
|   gethostname(hostname, HOST_NAME_MAX+1); | ||||
|   std::cout << GridLogMessage << "This rank is running on host "<< hostname<<std::endl; | ||||
|  | ||||
| @@ -420,7 +425,7 @@ void Grid_init(int *argc,char ***argv) | ||||
|   // Logging | ||||
|   //////////////////////////////////// | ||||
|   std::vector<std::string> logstreams; | ||||
|   std::string defaultLog("Error,Warning,Message,Performance"); | ||||
|   std::string defaultLog("Error,Warning,Message"); | ||||
|   GridCmdOptionCSL(defaultLog,logstreams); | ||||
|   GridLogConfigure(logstreams); | ||||
|  | ||||
| @@ -544,6 +549,10 @@ void Grid_init(int *argc,char ***argv) | ||||
|  | ||||
| void Grid_finalize(void) | ||||
| { | ||||
|   std::cout<<GridLogMessage<<"*******************************************"<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"******* Grid Finalize                ******"<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"*******************************************"<<std::endl; | ||||
|  | ||||
| #if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPIT) | ||||
|   MPI_Barrier(MPI_COMM_WORLD); | ||||
|   MPI_Finalize(); | ||||
|   | ||||
| @@ -34,6 +34,8 @@ NAMESPACE_BEGIN(Grid); | ||||
| void Grid_init(int *argc,char ***argv); | ||||
| void Grid_finalize(void); | ||||
|  | ||||
| char * GridHostname(void); | ||||
|  | ||||
| // internal, controled with --handle | ||||
| void Grid_sa_signal_handler(int sig,siginfo_t *si,void * ptr); | ||||
| void Grid_debug_handler_init(void); | ||||
| @@ -68,5 +70,6 @@ void GridParseLayout(char **argv,int argc, | ||||
| void printHash(void); | ||||
|  | ||||
|  | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
|   | ||||
| @@ -8,7 +8,7 @@ namespace Grid{ | ||||
|   public: | ||||
|  | ||||
|     template<class coor_t> | ||||
|     static accelerator_inline void CoorFromIndex (coor_t& coor,int index,const coor_t &dims){ | ||||
|     static accelerator_inline void CoorFromIndex (coor_t& coor,int64_t index,const coor_t &dims){ | ||||
|       int nd= dims.size(); | ||||
|       coor.resize(nd); | ||||
|       for(int d=0;d<nd;d++){ | ||||
| @@ -18,28 +18,45 @@ namespace Grid{ | ||||
|     } | ||||
|  | ||||
|     template<class coor_t> | ||||
|     static accelerator_inline void IndexFromCoor (const coor_t& coor,int &index,const coor_t &dims){ | ||||
|     static accelerator_inline void IndexFromCoor (const coor_t& coor,int64_t &index,const coor_t &dims){ | ||||
|       int nd=dims.size(); | ||||
|       int stride=1; | ||||
|       index=0; | ||||
|       for(int d=0;d<nd;d++){ | ||||
| 	index = index+stride*coor[d]; | ||||
| 	index = index+(int64_t)stride*coor[d]; | ||||
| 	stride=stride*dims[d]; | ||||
|       } | ||||
|     } | ||||
|     template<class coor_t> | ||||
|     static accelerator_inline void IndexFromCoor (const coor_t& coor,int &index,const coor_t &dims){ | ||||
|       int64_t index64; | ||||
|       IndexFromCoor(coor,index64,dims); | ||||
|       assert(index64<2*1024*1024*1024LL); | ||||
|       index = (int) index64; | ||||
|     } | ||||
|  | ||||
|     template<class coor_t> | ||||
|     static inline void IndexFromCoorReversed (const coor_t& coor,int &index,const coor_t &dims){ | ||||
|     static inline void IndexFromCoorReversed (const coor_t& coor,int64_t &index,const coor_t &dims){ | ||||
|       int nd=dims.size(); | ||||
|       int stride=1; | ||||
|       index=0; | ||||
|       for(int d=nd-1;d>=0;d--){ | ||||
| 	index = index+stride*coor[d]; | ||||
| 	index = index+(int64_t)stride*coor[d]; | ||||
| 	stride=stride*dims[d]; | ||||
|       } | ||||
|     } | ||||
|     template<class coor_t> | ||||
|     static inline void CoorFromIndexReversed (coor_t& coor,int index,const coor_t &dims){ | ||||
|     static inline void IndexFromCoorReversed (const coor_t& coor,int &index,const coor_t &dims){ | ||||
|       int64_t index64; | ||||
|       IndexFromCoorReversed(coor,index64,dims); | ||||
|       if ( index64>=2*1024*1024*1024LL ){ | ||||
| 	std::cout << " IndexFromCoorReversed " << coor<<" index " << index64<< " dims "<<dims<<std::endl; | ||||
|       } | ||||
|       assert(index64<2*1024*1024*1024LL); | ||||
|       index = (int) index64; | ||||
|     } | ||||
|     template<class coor_t> | ||||
|     static inline void CoorFromIndexReversed (coor_t& coor,int64_t index,const coor_t &dims){ | ||||
|       int nd= dims.size(); | ||||
|       coor.resize(nd); | ||||
|       for(int d=nd-1;d>=0;d--){ | ||||
|   | ||||
| @@ -1,6 +1,6 @@ | ||||
| #ifndef GRID_UTIL_H | ||||
| #define GRID_UTIL_H | ||||
| #pragma once | ||||
| #include <Grid/util/Coordinate.h> | ||||
| #include <Grid/util/Lexicographic.h> | ||||
| #include <Grid/util/Init.h> | ||||
| #endif | ||||
| #include <Grid/util/FlightRecorder.h> | ||||
|  | ||||
|   | ||||
							
								
								
									
										238
									
								
								HMC/ComputeWilsonFlow.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										238
									
								
								HMC/ComputeWilsonFlow.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,238 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
| Grid physics library, www.github.com/paboyle/Grid | ||||
|  | ||||
| Source file: HMC/ComputeWilsonFlow.cc | ||||
|  | ||||
| Copyright (C) 2017 | ||||
|  | ||||
| Author: Guido Cossu <guido.cossu@ed.ac.uk> | ||||
| Author: Shuhei Yamamoto <syamamoto@bnl.gov> | ||||
|  | ||||
| This program is free software; you can redistribute it and/or modify | ||||
| it under the terms of the GNU General Public License as published by | ||||
| the Free Software Foundation; either version 2 of the License, or | ||||
| (at your option) any later version. | ||||
|  | ||||
| This program is distributed in the hope that it will be useful, | ||||
| but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| GNU General Public License for more details. | ||||
|  | ||||
| You should have received a copy of the GNU General Public License along | ||||
| with this program; if not, write to the Free Software Foundation, Inc., | ||||
| 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
| See the full license in the file "LICENSE" in the top level distribution | ||||
| directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #include <Grid/Grid.h> | ||||
| #include <string> | ||||
|  | ||||
| namespace Grid{ | ||||
|   struct WFParameters: Serializable { | ||||
|     GRID_SERIALIZABLE_CLASS_MEMBERS(WFParameters, | ||||
|             int, steps, | ||||
|             double, step_size, | ||||
|             int, meas_interval, | ||||
| 	    double, maxTau, // for the adaptive algorithm | ||||
| 	    int, meas_interval_density, | ||||
| 	    std::string, path);  | ||||
|         | ||||
|  | ||||
|     template <class ReaderClass > | ||||
|     WFParameters(Reader<ReaderClass>& Reader){ | ||||
|       read(Reader, "WilsonFlow", *this); | ||||
|     } | ||||
|  | ||||
|   }; | ||||
|  | ||||
|   struct ConfParameters: Serializable { | ||||
|     GRID_SERIALIZABLE_CLASS_MEMBERS(ConfParameters, | ||||
| 	   std::string, conf_path, | ||||
|            std::string, conf_prefix, | ||||
| 	   std::string, conf_smr_prefix, | ||||
|            std::string, rng_prefix, | ||||
| 	   int, StartConfiguration, | ||||
| 	   int, EndConfiguration, | ||||
|            int, Skip); | ||||
|    | ||||
|     template <class ReaderClass > | ||||
|     ConfParameters(Reader<ReaderClass>& Reader){ | ||||
|       read(Reader, "Configurations", *this); | ||||
|     } | ||||
|  | ||||
|   }; | ||||
| } | ||||
|  | ||||
| template <class T> void writeFile(T& in, std::string const fname){   | ||||
| #ifdef HAVE_LIME | ||||
|   // Ref: https://github.com/paboyle/Grid/blob/feature/scidac-wp1/tests/debug/Test_general_coarse_hdcg_phys48.cc#L111 | ||||
|   std::cout << Grid::GridLogMessage << "Writes to: " << fname << std::endl; | ||||
|   Grid::emptyUserRecord record; | ||||
|   Grid::ScidacWriter WR(in.Grid()->IsBoss()); | ||||
|   WR.open(fname); | ||||
|   WR.writeScidacFieldRecord(in,record,0); | ||||
|   WR.close(); | ||||
| #endif | ||||
|   // What is the appropriate way to throw error? | ||||
| } | ||||
|  | ||||
|  | ||||
| int main(int argc, char **argv) { | ||||
|   using namespace Grid; | ||||
|    | ||||
|   Grid_init(&argc, &argv); | ||||
|   GridLogLayout(); | ||||
|  | ||||
|   auto latt_size   = GridDefaultLatt(); | ||||
|   auto simd_layout = GridDefaultSimd(Nd, vComplex::Nsimd()); | ||||
|   auto mpi_layout  = GridDefaultMpi(); | ||||
|   GridCartesian               Grid(latt_size, simd_layout, mpi_layout); | ||||
|    | ||||
|   std::vector<int> seeds({1, 2, 3, 4, 5}); | ||||
|   GridSerialRNG sRNG; | ||||
|   GridParallelRNG pRNG(&Grid); | ||||
|   pRNG.SeedFixedIntegers(seeds); | ||||
|  | ||||
|   LatticeGaugeField Umu(&Grid), Uflow(&Grid); | ||||
|    | ||||
|   typedef Grid::XmlReader       Serialiser; | ||||
|   Serialiser Reader("input.xml", false, "root"); | ||||
|   WFParameters WFPar(Reader); | ||||
|   ConfParameters CPar(Reader); | ||||
|   CheckpointerParameters CPPar(CPar.conf_path+CPar.conf_prefix, CPar.conf_path+CPar.conf_smr_prefix, CPar.conf_path+CPar.rng_prefix); | ||||
|   NerscHmcCheckpointer<PeriodicGimplR> CPNersc(CPPar); | ||||
|  | ||||
|   for (int conf = CPar.StartConfiguration; conf <= CPar.EndConfiguration; conf+= CPar.Skip){ | ||||
|  | ||||
|   CPNersc.CheckpointRestore(conf, Umu, sRNG, pRNG); | ||||
|  | ||||
|   std::cout << std::setprecision(15); | ||||
|   std::cout << GridLogMessage << "Initial plaquette: "<< WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu) << std::endl; | ||||
|    | ||||
|   std::string file_pre  = WFPar.path; | ||||
|   std::string file_post = CPar.conf_prefix + "." + std::to_string(conf); | ||||
|  | ||||
|   WilsonFlow<PeriodicGimplR> WF(WFPar.step_size,WFPar.steps,WFPar.meas_interval); | ||||
|   WF.addMeasurement(WFPar.meas_interval_density, [&file_pre,&file_post,&conf](int step, RealD t, const typename PeriodicGimplR::GaugeField &U){ | ||||
|      | ||||
|     typedef typename PeriodicGimplR::GaugeLinkField GaugeMat; | ||||
|     typedef typename PeriodicGimplR::ComplexField ComplexField; | ||||
|      | ||||
|     assert(Nd == 4); | ||||
|  | ||||
|     // NOTE: | ||||
|     // Ideally, turn the folloing into methods of the appropriate class | ||||
|     /////////////   Compute Energy Density via Clover Leaf    ///////////////////////////////////////////////// | ||||
|     ///// Taken from qcd/smearing/WilsonFlow.h | ||||
|     //         For plq, use static sitePlaquette from class WilsonLoops in Grid/qcd/utils/WilsonLoops.h and divide it by #faces=(1.0 * Nd * (Nd - 1)) / 2.0, ncol=3 | ||||
|     //E = 1/2 tr( F_munu F_munu ) | ||||
|     //However as  F_numu = -F_munu, only need to sum the trace of the squares of the following 6 field strengths: | ||||
|     //F_01 F_02 F_03   F_12 F_13  F_23 | ||||
|     GaugeMat F(U.Grid()); | ||||
|     //LatticeComplexD R(U.Grid()); | ||||
|     ComplexField R(U.Grid()); | ||||
|     R = Zero(); | ||||
|    | ||||
|     for(int mu=0;mu<3;mu++){ | ||||
|       for(int nu=mu+1;nu<4;nu++){ | ||||
| 	WilsonLoops<PeriodicGimplR>::FieldStrength(F, U, mu, nu); | ||||
| 	R = R + trace(F*F); | ||||
|       } | ||||
|     } | ||||
|     R = (-1.0) * R; | ||||
|      | ||||
|     //// Taken from qcd/utils/WilsonLoops.h | ||||
|      | ||||
|     // Bx = -iF(y,z), By = -iF(z,y), Bz = -iF(x,y) | ||||
|     GaugeMat Bx(U.Grid()), By(U.Grid()), Bz(U.Grid()); | ||||
|     WilsonLoops<PeriodicGimplR>::FieldStrength(Bx, U, Ydir, Zdir); | ||||
|     WilsonLoops<PeriodicGimplR>::FieldStrength(By, U, Zdir, Xdir); | ||||
|     WilsonLoops<PeriodicGimplR>::FieldStrength(Bz, U, Xdir, Ydir); | ||||
|  | ||||
|     // Ex = -iF(t,x), Ey = -iF(t,y), Ez = -iF(t,z) | ||||
|     GaugeMat Ex(U.Grid()), Ey(U.Grid()), Ez(U.Grid()); | ||||
|     WilsonLoops<PeriodicGimplR>::FieldStrength(Ex, U, Tdir, Xdir); | ||||
|     WilsonLoops<PeriodicGimplR>::FieldStrength(Ey, U, Tdir, Ydir); | ||||
|     WilsonLoops<PeriodicGimplR>::FieldStrength(Ez, U, Tdir, Zdir); | ||||
|  | ||||
|     double coeff = 8.0/(32.0*M_PI*M_PI); | ||||
|     ComplexField qfield = coeff*trace(Bx*Ex + By*Ey + Bz*Ez); | ||||
|     //ComplexField qfield Plq(U.Grid()); | ||||
|     //WilsonLoops<PeriodicGimplR>::sitePlaquette(Plq, U); | ||||
|     //double coeff = 2.0 / (1.0 * Nd * (Nd - 1)) / 3.0; | ||||
|     //Plq = coeff * Plq; | ||||
|  | ||||
|     int tau = std::round(t); | ||||
|     std::string efile = file_pre + "E_dnsty_" + std::to_string(tau) + "_" + file_post; | ||||
|     writeFile(R,efile); | ||||
|     std::string tfile = file_pre + "Top_dnsty_" + std::to_string(tau) + "_" + file_post; | ||||
|     writeFile(qfield,tfile); | ||||
|  | ||||
|     RealD E = real(sum(R))/ RealD(U.Grid()->gSites()); | ||||
|     RealD T = real( sum(qfield) ); | ||||
|     Coordinate scoor; for (int mu=0; mu < Nd; mu++) scoor[mu] = 0; | ||||
|     RealD E0 = real(peekSite(R,scoor)); | ||||
|     RealD T0 = real(peekSite(qfield,scoor)); | ||||
|     std::cout << GridLogMessage << "[WilsonFlow] Saved energy density (clover) & topo. charge density: "  << conf << " " << step << "  " << tau << "  " | ||||
| 	      << "(E_avg,T_sum) " << E << " " << T << " (E, T at origin) " << E0 << " " << T0 << std::endl; | ||||
|      | ||||
|   }); | ||||
|    | ||||
|   int t=WFPar.maxTau; | ||||
|   WF.smear(Uflow, Umu); | ||||
|  | ||||
|   RealD WFlow_plaq = WilsonLoops<PeriodicGimplR>::avgPlaquette(Uflow); | ||||
|   RealD WFlow_TC   = WilsonLoops<PeriodicGimplR>::TopologicalCharge(Uflow); | ||||
|   RealD WFlow_T0   = WF.energyDensityPlaquette(t,Uflow); // t | ||||
|   RealD WFlow_EC   = WF.energyDensityCloverleaf(t,Uflow); | ||||
|   std::cout << GridLogMessage << "Plaquette          "<< conf << "   " << WFlow_plaq << std::endl; | ||||
|   std::cout << GridLogMessage << "T0                 "<< conf << "   " << WFlow_T0 << std::endl; | ||||
|   std::cout << GridLogMessage << "TC0                 "<< conf << "   " << WFlow_EC << std::endl; | ||||
|   std::cout << GridLogMessage << "TopologicalCharge  "<< conf << "   " << WFlow_TC   << std::endl; | ||||
|  | ||||
|   std::cout<< GridLogMessage << " Admissibility check:\n"; | ||||
|   const double sp_adm = 0.067;                // admissible threshold | ||||
|   const double pl_adm = 1.0-sp_adm/Nc; | ||||
|   std::cout << GridLogMessage << "   (pl_adm =" << pl_adm << ")\n"; | ||||
|  | ||||
|   // Need min and reduce min for this function | ||||
|   //double sp_max = NC_*(1.0-stpl.plaq_min(U,pl_adm)); | ||||
|   double sp_ave = Nc*(1.0-WFlow_plaq); | ||||
|  | ||||
|   //std::cout<< GridLogMessage << "   sp_max = "        << sp_max <<"\n"; | ||||
|   std::cout<< GridLogMessage << "   sp_ave = "        << sp_ave <<"\n"; | ||||
|   std::cout<< GridLogMessage << "   (sp_admissible = "<< sp_adm <<")\n"; | ||||
|   //std::cout<< GridLogMessage << "   sp_admissible - sp_max = "<<sp_adm-sp_max <<"\n"; | ||||
|   std::cout<< GridLogMessage << "   sp_admissible - sp_ave = "<<sp_adm-sp_ave <<"\n"; | ||||
|   } | ||||
|   Grid_finalize(); | ||||
| }  // main | ||||
|  | ||||
|  | ||||
| /* | ||||
| Input file example | ||||
|  | ||||
|  | ||||
| JSON | ||||
|  | ||||
| { | ||||
|     "WilsonFlow":{ | ||||
| 	"steps": 200, | ||||
| 	"step_size": 0.01, | ||||
| 	"meas_interval": 50, | ||||
|   "maxTau": 2.0 | ||||
|     }, | ||||
|     "Configurations":{ | ||||
| 	"conf_prefix": "ckpoint_lat", | ||||
| 	"rng_prefix": "ckpoint_rng", | ||||
| 	"StartConfiguration": 3000, | ||||
| 	"EndConfiguration": 3000, | ||||
| 	"Skip": 5 | ||||
|     } | ||||
| } | ||||
|  | ||||
|  | ||||
| */ | ||||
							
								
								
									
										92
									
								
								HMC/site_autocorrelation.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								HMC/site_autocorrelation.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,92 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
| Grid physics library, www.github.com/paboyle/Grid | ||||
|  | ||||
| Source file:  | ||||
|  | ||||
| Copyright (C) 2017 | ||||
|  | ||||
| Author: Peter Boyle | ||||
|  | ||||
| This program is free software; you can redistribute it and/or modify | ||||
| it under the terms of the GNU General Public License as published by | ||||
| the Free Software Foundation; either version 2 of the License, or | ||||
| (at your option) any later version. | ||||
|  | ||||
| This program is distributed in the hope that it will be useful, | ||||
| but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| GNU General Public License for more details. | ||||
|  | ||||
| You should have received a copy of the GNU General Public License along | ||||
| with this program; if not, write to the Free Software Foundation, Inc., | ||||
| 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
| See the full license in the file "LICENSE" in the top level distribution | ||||
| directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #include <Grid/Grid.h> | ||||
| #include <string> | ||||
|  | ||||
| template <class T> void readFile(T& out, std::string const fname){ | ||||
| #ifdef HAVE_LIME | ||||
|   Grid::emptyUserRecord record; | ||||
|   Grid::ScidacReader RD; | ||||
|   RD.open(fname); | ||||
|   RD.readScidacFieldRecord(out,record); | ||||
|   RD.close(); | ||||
| #endif | ||||
| } | ||||
|  | ||||
|  | ||||
| int main(int argc, char **argv) { | ||||
|   using namespace Grid; | ||||
|    | ||||
|   Grid_init(&argc, &argv); | ||||
|   GridLogLayout(); | ||||
|  | ||||
|   auto latt_size   = GridDefaultLatt(); | ||||
|   auto simd_layout = GridDefaultSimd(Nd, vComplex::Nsimd()); | ||||
|   auto mpi_layout  = GridDefaultMpi(); | ||||
|   GridCartesian               Grid(latt_size, simd_layout, mpi_layout); | ||||
|  | ||||
|   LatticeComplexD plaq1(&Grid), plaq2(&Grid); | ||||
|  | ||||
|   FieldMetaData header; | ||||
|  | ||||
|   double vol = plaq1.Grid()->gSites(); | ||||
|    | ||||
|   std::string file1(argv[1]); | ||||
|   std::cout << "Reading "<<file1<<std::endl; | ||||
|   readFile(plaq1,file1); | ||||
|   std::string file2(argv[2]); | ||||
|   std::cout << "Reading "<<file2<<std::endl; | ||||
|   readFile(plaq2,file2); | ||||
|    | ||||
|   auto p1bar = TensorRemove(sum(plaq1)); | ||||
|   auto p2bar = TensorRemove(sum(plaq2)); | ||||
|  | ||||
|   p1bar = p1bar / vol; | ||||
|   p2bar = p2bar / vol; | ||||
|  | ||||
|   std::cout<< GridLogMessage << "p1bar = "<<p1bar<<std::endl; | ||||
|   std::cout<< GridLogMessage << "p2bar = "<<p2bar<<std::endl; | ||||
|  | ||||
|   auto corr_site = plaq1 * plaq2 - p1bar * p2bar; | ||||
|   auto corr_bar  = TensorRemove(sum(corr_site))/vol; | ||||
|  | ||||
|   auto cov1_site = plaq1 * plaq1 - p1bar * p1bar; | ||||
|   auto cov1_bar  = TensorRemove(sum(cov1_site))/vol; | ||||
|  | ||||
|   auto cov2_site = plaq2 * plaq2 - p2bar * p2bar; | ||||
|   auto cov2_bar  = TensorRemove(sum(cov2_site))/vol; | ||||
|  | ||||
|   std::cout<< GridLogMessage << "cov_bar = "<<corr_bar<<std::endl; | ||||
|  | ||||
|   std::cout<< GridLogMessage << "corr_bar = "<<corr_bar/sqrt(cov1_bar*cov2_bar)<<std::endl; | ||||
|    | ||||
|   Grid_finalize(); | ||||
| }  // main | ||||
|  | ||||
|  | ||||
							
								
								
									
										81
									
								
								HMC/site_plaquette.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										81
									
								
								HMC/site_plaquette.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,81 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
| Grid physics library, www.github.com/paboyle/Grid | ||||
|  | ||||
| Source file:  | ||||
|  | ||||
| Copyright (C) 2017 | ||||
|  | ||||
| Author: Peter Boyle | ||||
|  | ||||
| This program is free software; you can redistribute it and/or modify | ||||
| it under the terms of the GNU General Public License as published by | ||||
| the Free Software Foundation; either version 2 of the License, or | ||||
| (at your option) any later version. | ||||
|  | ||||
| This program is distributed in the hope that it will be useful, | ||||
| but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| GNU General Public License for more details. | ||||
|  | ||||
| You should have received a copy of the GNU General Public License along | ||||
| with this program; if not, write to the Free Software Foundation, Inc., | ||||
| 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
| See the full license in the file "LICENSE" in the top level distribution | ||||
| directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #include <Grid/Grid.h> | ||||
| #include <string> | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
| template <class T> void writeFile(T& out, std::string const fname){ | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacWriter WR(out.Grid()->IsBoss()); | ||||
|   WR.open(fname); | ||||
|   WR.writeScidacFieldRecord(out,record,0,Grid::BinaryIO::BINARYIO_LEXICOGRAPHIC); | ||||
|   WR.close(); | ||||
| #endif | ||||
| } | ||||
| NAMESPACE_END(Grid); | ||||
| int main(int argc, char **argv) { | ||||
|   using namespace Grid; | ||||
|    | ||||
|   Grid_init(&argc, &argv); | ||||
|   GridLogLayout(); | ||||
|  | ||||
|   auto latt_size   = GridDefaultLatt(); | ||||
|   auto simd_layout = GridDefaultSimd(Nd, vComplex::Nsimd()); | ||||
|   auto mpi_layout  = GridDefaultMpi(); | ||||
|   GridCartesian               Grid(latt_size, simd_layout, mpi_layout); | ||||
|  | ||||
|   LatticeGaugeField Umu(&Grid); | ||||
|   std::vector<LatticeColourMatrix> U(4,&Grid); | ||||
|   LatticeComplexD plaq(&Grid); | ||||
|  | ||||
|   FieldMetaData header; | ||||
|  | ||||
|   double vol = Umu.Grid()->gSites(); | ||||
|   double faces = (1.0 * Nd * (Nd - 1)) / 2.0; | ||||
|   double Ncdiv = 1.0/Nc; | ||||
|    | ||||
|   std::string file1(argv[1]); | ||||
|   std::string file2(argv[2]); | ||||
|   std::cout << "Reading "<<file1<<std::endl; | ||||
|   NerscIO::readConfiguration(Umu,header,file1); | ||||
|   for(int mu=0;mu<Nd;mu++){ | ||||
|     U[mu] = PeekIndex<LorentzIndex>(Umu,mu); | ||||
|   } | ||||
|   SU3WilsonLoops::sitePlaquette(plaq,U); | ||||
|  | ||||
|   plaq = plaq *(Ncdiv/faces); | ||||
|    | ||||
|   std::cout << "Writing "<<file2<<std::endl; | ||||
|   writeFile(plaq,file2); | ||||
|    | ||||
|   Grid_finalize(); | ||||
| }  // main | ||||
|  | ||||
|  | ||||
							
								
								
									
										49
									
								
								TODO
									
									
									
									
									
								
							
							
						
						
									
										49
									
								
								TODO
									
									
									
									
									
								
							| @@ -1,6 +1,50 @@ | ||||
| - - Slice sum optimisation & A2A - atomic addition | ||||
| i)    Refine subspace with HDCG & recompute | ||||
| ii)   Block Lanczos in coarse space | ||||
| iii)  Batched block project in the operator computation | ||||
|  | ||||
| ------- | ||||
|  | ||||
| i) Clean up CoarsenedMatrix, GeneralCoarsenedMatrix, GeneralCoarsenedMatrixMultiRHS | ||||
|  | ||||
|  -- Ideally want a SINGLE implementation that does MultiRHS **AND** works with one RHS. | ||||
|  | ||||
|  -- -- Getting there. One RHS is hard due to vectorisation & hardwired coarse5d layout | ||||
|  -- Compromise: Wrap it in a copy in/out for a slice. | ||||
|   | ||||
|  -- Bad for Lanczos: need to do a BLOCK Lanczos instead. Longer term. | ||||
|  | ||||
|  -- **** Make the test do ONLY the single RHS. **** | ||||
|  -- I/O for the matrix elements required. | ||||
|  -- Make the Adef2 build an eigenvector deflater and a block projector | ||||
|  --  | ||||
|   | ||||
|  -- Work with Regensburg on tests. | ||||
|  -- Plan interface preserving the coarsened matrix interface (??) | ||||
|  | ||||
| -- Move functionality from GeneralCoarsenedMatrix INTO GeneralCoarsenedMatrixMultiRHS -- DONE | ||||
|    -- Don't immediately delete original | ||||
|    -- Instead make the new one self contained, then delete. | ||||
|    -- New DWF inverter test. | ||||
|  | ||||
|   // void PopulateAdag(void) | ||||
|   void CoarsenOperator(LinearOperatorBase<Lattice<Fobj> > &linop, Aggregation<Fobj,CComplex,nbasis> & Subspace) -- DONE | ||||
|   ExchangeCoarseLinks(); | ||||
|  | ||||
| iii) Aurora -- christoph's problem -- DONE | ||||
|      Aurora -- Carleton's problem staggered. | ||||
|  | ||||
| iv) Dennis merge and test Aurora -- DONE (save test) | ||||
|  | ||||
| v) Merge Ed Bennet's request --DONE  | ||||
|  | ||||
| vi) Repro CG  -- get down to the level of single node testing via split grid test  | ||||
|  | ||||
|  | ||||
| ========================= | ||||
|  | ||||
| =============== | ||||
| - - Slice sum optimisation & A2A - atomic addition -- Dennis | ||||
| - - Also faster non-atomic reduction | ||||
| - - Remaining PRs | ||||
| - - DDHMC | ||||
|   - - MixedPrec is the action eval, high precision | ||||
|   - - MixedPrecCleanup is the force eval, low precision | ||||
| @@ -17,7 +61,6 @@ DDHMC | ||||
| -- Multishift Mixed Precision - DONE | ||||
| -- Pole dependent residual  - DONE | ||||
|  | ||||
|  | ||||
| ======= | ||||
| -- comms threads issue?? | ||||
| -- Part done: Staggered kernel performance on GPU | ||||
|   | ||||
							
								
								
									
										21
									
								
								configure.ac
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								configure.ac
									
									
									
									
									
								
							| @@ -226,23 +226,14 @@ case ${ac_SFW_FP16} in | ||||
| esac | ||||
|  | ||||
| ############### Default to accelerator cshift, but revert to host if UCX is buggy or other reasons | ||||
| AC_ARG_ENABLE([accelerator-cshift], | ||||
|     [AS_HELP_STRING([--enable-accelerator-cshift=yes|no],[run cshift on the device])], | ||||
|     [ac_ACC_CSHIFT=${enable_accelerator_cshift}], [ac_ACC_CSHIFT=yes]) | ||||
| AC_ARG_ENABLE([accelerator-aware-mpi], | ||||
|     [AS_HELP_STRING([--enable-accelerator-aware-mpi=yes|no],[run mpi transfers from device])], | ||||
|     [ac_ACCELERATOR_AWARE_MPI=${enable_accelerator_aware_mpi}], [ac_ACCELERATOR_AWARE_MPI=yes]) | ||||
|  | ||||
| AC_ARG_ENABLE([ucx-buggy], | ||||
|     [AS_HELP_STRING([--enable-ucx-buggy=yes|no],[enable workaround for UCX device buffer bugs])], | ||||
|     [ac_UCXBUGGY=${enable_ucx_buggy}], [ac_UCXBUGGY=no]) | ||||
|  | ||||
| case ${ac_UCXBUGGY} in | ||||
| case ${ac_ACCELERATOR_AWARE_MPI} in | ||||
|     yes) | ||||
|     ac_ACC_CSHIFT=no;; | ||||
|     *);; | ||||
| esac | ||||
|  | ||||
| case ${ac_ACC_CSHIFT} in | ||||
|     yes) | ||||
|       AC_DEFINE([ACCELERATOR_CSHIFT],[1],[ UCX device buffer bugs are not present]);; | ||||
|       AC_DEFINE([ACCELERATOR_CSHIFT],[1],[ Cshift runs on host]) | ||||
|       AC_DEFINE([ACCELERATOR_AWARE_MPI],[1],[ Stencil can use device pointers]);; | ||||
|     *);; | ||||
| esac | ||||
|  | ||||
|   | ||||
							
								
								
									
										44
									
								
								scripts/prequisites.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										44
									
								
								scripts/prequisites.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,44 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| if [ $1 = "install" ] | ||||
| then | ||||
|     dir=`pwd` | ||||
|     cd $HOME | ||||
|     git clone -c feature.manyFiles=true https://github.com/spack/spack.git | ||||
|     source $HOME/spack/share/spack/setup-env.sh | ||||
|  | ||||
|     spack install autoconf | ||||
|     spack install automake | ||||
|     spack install c-lime cppflags=-fPIE | ||||
|     spack install fftw | ||||
|     spack install llvm | ||||
|     spack install gmp | ||||
|     spack install mpfr | ||||
|     spack install cuda@11.8 | ||||
|     spack install openmpi | ||||
|     spack install openssl | ||||
|     spack install hdf5 | ||||
| else | ||||
|     source $HOME/spack/share/spack/setup-env.sh | ||||
| fi | ||||
|  | ||||
| spack load autoconf | ||||
| spack load automake | ||||
| spack load c-lime | ||||
| spack load fftw | ||||
| spack load llvm | ||||
| spack load gmp | ||||
| spack load mpfr | ||||
| spack load cuda@11.8 | ||||
| spack load openmpi | ||||
| spack load openssl | ||||
| spack load hdf5 | ||||
|  | ||||
| export FFTW=`spack find --paths fftw    | grep ^fftw   | awk '{print $2}' ` | ||||
| export HDF5=`spack find --paths hdf5    | grep ^hdf5   | awk '{print $2}' ` | ||||
| export CLIME=`spack find --paths c-lime | grep ^c-lime | awk '{print $2}' ` | ||||
| export MPFR=`spack find --paths mpfr    | grep ^mpfr  | awk '{print $2}' ` | ||||
| export GMP=`spack find --paths gmp      | grep ^gmp | awk '{print $2}' ` | ||||
| export NVIDIA=$CUDA_HOME | ||||
| export NVIDIALIB=$NVIDIA/targets/x86_64-linux/lib/ | ||||
| export LD_LIBRARY_PATH=$NVIDIALIB:$FFTW/lib/:$MPFR/lib:$LD_LIBRARY_PATH | ||||
							
								
								
									
										67
									
								
								systems/Aurora/benchmarks/bench1.pbs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										67
									
								
								systems/Aurora/benchmarks/bench1.pbs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,67 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| #PBS -q debug | ||||
| #PBS -l select=1 | ||||
| #PBS -l walltime=00:20:00 | ||||
| #PBS -A LatticeQCD_aesp_CNDA | ||||
|  | ||||
| #export OMP_PROC_BIND=spread | ||||
| #unset OMP_PLACES | ||||
|  | ||||
| cd $PBS_O_WORKDIR | ||||
|  | ||||
| source ../sourceme.sh | ||||
| module load pti-gpu | ||||
|  | ||||
| #cat $PBS_NODEFILE | ||||
|  | ||||
| export OMP_NUM_THREADS=4 | ||||
| export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1 | ||||
|  | ||||
| #unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE | ||||
| #unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE | ||||
| #unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST | ||||
|  | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0 | ||||
| #export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16 | ||||
| export MPICH_OFI_NIC_POLICY=GPU | ||||
|  | ||||
| # 12 ppn, 2 nodes, 24 ranks | ||||
| # | ||||
| CMD="mpiexec -np 12 -ppn 12  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Benchmark_comms_host_device --mpi 2.2.1.3 --grid 24.32.32.24 \ | ||||
| 		--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32"  | ||||
| #$CMD | tee 1node.comms | ||||
|  | ||||
|  | ||||
| CMD="mpiexec -np 1 -ppn 1  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Benchmark_dwf_fp32 --mpi 1.1.1.1 --grid 16.32.32.32 \ | ||||
| 		--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 " | ||||
| #$CMD | tee 1tile.dwf | ||||
|  | ||||
| CMD="mpiexec -np 12 -ppn 12  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Benchmark_dwf_fp32 --mpi 2.2.1.3 --grid 32.32.32.48 \ | ||||
| 		--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap" | ||||
| $CMD | tee 1node.32.32.32.48.dwf | ||||
|  | ||||
|  | ||||
| CMD="mpiexec -np 12 -ppn 12  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Benchmark_dwf_fp32 --mpi 2.2.1.3 --grid 64.64.32.96 \ | ||||
| 		--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap" | ||||
| #$CMD | tee 1node.64.64.32.96.dwf | ||||
|  | ||||
| CMD="mpiexec -np 12 -ppn 12  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Benchmark_dwf_fp32 --mpi 2.2.1.3 --grid 64.32.32.48 \ | ||||
| 		--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap" | ||||
| #$CMD | tee 1node.64.32.32.48.dwf | ||||
|  | ||||
| @@ -1,10 +1,8 @@ | ||||
| #!/bin/bash | ||||
| 
 | ||||
| ## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00 | ||||
| 
 | ||||
| #PBS -q EarlyAppAccess | ||||
| #PBS -q workq | ||||
| #PBS -l select=2 | ||||
| #PBS -l walltime=01:00:00 | ||||
| #PBS -l walltime=00:20:00 | ||||
| #PBS -A LatticeQCD_aesp_CNDA | ||||
| 
 | ||||
| #export OMP_PROC_BIND=spread | ||||
| @@ -13,11 +11,13 @@ | ||||
| cd $PBS_O_WORKDIR | ||||
| 
 | ||||
| source ../sourceme.sh | ||||
| module load pti-gpu | ||||
| 
 | ||||
| export OMP_NUM_THREADS=3 | ||||
| #cat $PBS_NODEFILE | ||||
| 
 | ||||
| export OMP_NUM_THREADS=4 | ||||
| export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1 | ||||
| 
 | ||||
| 
 | ||||
| #unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE | ||||
| #unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE | ||||
| #unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST | ||||
| @@ -31,30 +31,25 @@ export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16 | ||||
| export MPICH_OFI_NIC_POLICY=GPU | ||||
| 
 | ||||
| # 12 ppn, 2 nodes, 24 ranks | ||||
| # | ||||
| CMD="mpiexec -np 24 -ppn 12  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Benchmark_comms_host_device --mpi 2.3.2.2 --grid 32.24.32.192 \ | ||||
| 		--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32" | ||||
| 	     ./Benchmark_comms_host_device --mpi 2.2.2.3 --grid 24.32.32.24 \ | ||||
| 		--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32"  | ||||
| $CMD | tee 2node.comms | ||||
| 
 | ||||
| #$CMD  | ||||
| 
 | ||||
| CMD="mpiexec -np 24 -ppn 12  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Benchmark_dwf_fp32 --mpi 2.3.2.2 --grid 64.96.64.64 --comms-overlap \ | ||||
| 		--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32" | ||||
| 	     ./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 32.32.64.48 \ | ||||
| 		--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap" | ||||
| $CMD | tee 2node.32.32.64.48.dwf | ||||
| 
 | ||||
| #$CMD  | ||||
| 
 | ||||
| CMD="mpiexec -np 1 -ppn 1  -envall \ | ||||
| CMD="mpiexec -np 24 -ppn 12  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Benchmark_dwf --mpi 1.1.1.1 --grid 16.32.32.32 --comms-sequential \ | ||||
| 		--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32" | ||||
| 	     ./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 64.64.64.96 \ | ||||
| 		--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap" | ||||
| $CMD | tee 2node.64.64.64.96.dwf | ||||
| 
 | ||||
| $CMD  | ||||
| 
 | ||||
| CMD="mpiexec -np 1 -ppn 1  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Benchmark_dwf_fp32 --mpi 1.1.1.1 --grid 16.32.32.32 --comms-sequential \ | ||||
| 		--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32" | ||||
| 
 | ||||
| $CMD  | ||||
| @@ -1,33 +1,34 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| export NUMA_MAP=(2 2 2 3 3 3 2 2 2 3 3 3 ) | ||||
| #export NUMA_MAP=(0 0 0 1 1 1 0 0 0 1 1 1 ) | ||||
| export NUMA_PMAP=(0 0 0 1 1 1 0 0 0 1 1 1 ) | ||||
| export  NIC_MAP=(0 1 2 4 5 6 0 1 2 4 5 6 ) | ||||
| export  GPU_MAP=(0 1 2 3 4 5 0 1 2 3 4 5 ) | ||||
| export TILE_MAP=(0 0 0 0 0 0 1 1 1 1 1 1 ) | ||||
| #export NUMA_MAP=(2 2 2 3 3 3 2 2 2 3 3 3 ) | ||||
| #export NUMA_MAP=(0 0 1 1 0 0 1 1 0 0 1 1); | ||||
| #export  GPU_MAP=(0.0 0.1 3.0 3.1 1.0 1.1 4.0 4.1 2.0 2.1 5.0 5.1) | ||||
|  | ||||
| export NUMA_MAP=(0 0 0 0 0 0 1 1 1 1 1 1 ); | ||||
| export  GPU_MAP=(0.0 1.0 2.0 3.0 4.0 5.0 0.1 1.1 2.1 3.1 4.1 5.1 ) | ||||
|  | ||||
| export NUMA=${NUMA_MAP[$PALS_LOCAL_RANKID]} | ||||
| export NUMAP=${NUMA_PMAP[$PALS_LOCAL_RANKID]} | ||||
| export NIC=${NIC_MAP[$PALS_LOCAL_RANKID]} | ||||
| export gpu_id=${GPU_MAP[$PALS_LOCAL_RANKID]} | ||||
| export tile_id=${TILE_MAP[$PALS_LOCAL_RANKID]} | ||||
|    | ||||
| #export GRID_MPICH_NIC_BIND=$NIC | ||||
| #export ONEAPI_DEVICE_SELECTOR=level_zero:$gpu_id.$tile_id | ||||
|  | ||||
| unset EnableWalkerPartition | ||||
| export EnableImplicitScaling=0 | ||||
| export ZE_AFFINITY_MASK=$gpu_id.$tile_id | ||||
| export ZE_AFFINITY_MASK=$gpu_id | ||||
| export ONEAPI_DEVICE_FILTER=gpu,level_zero | ||||
|  | ||||
| #export ZE_ENABLE_PCI_ID_DEVICE_ORDER=1 | ||||
| #export SYCL_PI_LEVEL_ZERO_DEVICE_SCOPE_EVENTS=0 | ||||
| #export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 | ||||
| export SYCL_PI_LEVEL_ZERO_DEVICE_SCOPE_EVENTS=0 | ||||
| export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 | ||||
| export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:5 | ||||
| #export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:2 | ||||
| #export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1 | ||||
| export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1 | ||||
| #export SYCL_PI_LEVEL_ZERO_USM_RESIDENT=1 | ||||
|  | ||||
| #echo "rank $PALS_RANKID ; local rank $PALS_LOCAL_RANKID ; ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK ; NUMA $NUMA " | ||||
| echo "rank $PALS_RANKID ; local rank $PALS_LOCAL_RANKID ; ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK ; NUMA $NUMA " | ||||
|  | ||||
| numactl -m $NUMA -N $NUMAP  "$@" | ||||
| if [ $PALS_RANKID = "0" ] | ||||
| then | ||||
| #    numactl -m $NUMA -N $NUMA onetrace --chrome-device-timeline  "$@" | ||||
| #    numactl -m $NUMA -N $NUMA unitrace --chrome-kernel-logging --chrome-mpi-logging --chrome-sycl-logging --demangle "$@" | ||||
|     numactl -m $NUMA -N $NUMA  "$@" | ||||
| else  | ||||
|     numactl -m $NUMA -N $NUMA  "$@" | ||||
| fi | ||||
|   | ||||
| @@ -1,16 +1,16 @@ | ||||
| TOOLS=$HOME/tools | ||||
|  | ||||
| ../../configure \ | ||||
| 	--enable-simd=GPU \ | ||||
| 	--enable-gen-simd-width=64 \ | ||||
| 	--enable-comms=mpi-auto \ | ||||
| 	--enable-accelerator-cshift \ | ||||
| 	--disable-gparity \ | ||||
| 	--disable-fermion-reps \ | ||||
| 	--enable-shm=nvlink \ | ||||
| 	--enable-accelerator=sycl \ | ||||
| 	--enable-accelerator-aware-mpi=yes\ | ||||
| 	--enable-unified=no \ | ||||
| 	MPICXX=mpicxx \ | ||||
| 	CXX=icpx \ | ||||
| 	LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L$TOOLS/lib64/ -L${MKLROOT}/lib -qmkl=parallel " \ | ||||
| 	CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -I$TOOLS/include -qmkl=parallel" | ||||
| 	LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -lsycl" \ | ||||
| 	CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -qmkl=parallel" | ||||
|  | ||||
|   | ||||
							
								
								
									
										2
									
								
								systems/Aurora/sourceme-sunspot-deterministic.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								systems/Aurora/sourceme-sunspot-deterministic.sh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,2 @@ | ||||
| module load oneapi/eng-compiler/2023.05.15.003 | ||||
| module load mpich/51.2/icc-all-deterministic-pmix-gpu | ||||
| @@ -1,7 +1,9 @@ | ||||
| #export ONEAPI_DEVICE_SELECTOR=level_zero:0.0 | ||||
|  | ||||
| module use /soft/modulefiles | ||||
| module load intel_compute_runtime/release/agama-devel-682.22 | ||||
| module load oneapi/release/2023.12.15.001 | ||||
|  | ||||
| #module use /soft/modulefiles | ||||
| #module load intel_compute_runtime/release/agama-devel-682.22 | ||||
|  | ||||
| export FI_CXI_DEFAULT_CQ_SIZE=131072 | ||||
| export FI_CXI_CQ_FILL_PERCENT=20 | ||||
|   | ||||
							
								
								
									
										41
									
								
								systems/Aurora/tests/repro128.pbs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										41
									
								
								systems/Aurora/tests/repro128.pbs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,41 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| ## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00 | ||||
|  | ||||
| #PBS -q EarlyAppAccess | ||||
| #PBS -l select=128 | ||||
| #PBS -l walltime=02:00:00 | ||||
| #PBS -A LatticeQCD_aesp_CNDA | ||||
|  | ||||
| #export OMP_PROC_BIND=spread | ||||
| #unset OMP_PLACES | ||||
|  | ||||
| cd $PBS_O_WORKDIR | ||||
|  | ||||
| source ../sourceme.sh | ||||
|  | ||||
| cat $PBS_NODEFILE | ||||
|  | ||||
| export OMP_NUM_THREADS=3 | ||||
| export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1 | ||||
|  | ||||
| #unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE | ||||
| #unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE | ||||
| #unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST | ||||
|  | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0 | ||||
| export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16 | ||||
| export MPICH_OFI_NIC_POLICY=GPU | ||||
|  | ||||
| # 12 ppn, 16 nodes, 192 ranks | ||||
| # 12 ppn, 128 nodes, 1536 ranks | ||||
| CMD="mpiexec -np 1536 -ppn 12  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Test_dwf_mixedcg_prec --mpi 4.4.4.24 --grid 128.128.128.384 \ | ||||
| 		--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 7000 --comms-overlap " | ||||
| $CMD  | ||||
| @@ -2,26 +2,39 @@ | ||||
|  | ||||
| ## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00 | ||||
|  | ||||
| #PBS -q EarlyAppAccess | ||||
| #PBS -l select=16 | ||||
| #PBS -l walltime=01:00:00 | ||||
| #PBS -l select=16:system=sunspot,place=scatter | ||||
| #PBS -A LatticeQCD_aesp_CNDA | ||||
| #PBS -l walltime=01:00:00 | ||||
| #PBS -N dwf | ||||
| #PBS -k doe | ||||
|  | ||||
| #export OMP_PROC_BIND=spread | ||||
| #unset OMP_PLACES | ||||
|  | ||||
| cd $PBS_O_WORKDIR | ||||
|  | ||||
| source ../sourceme.sh | ||||
| #source ../sourceme.sh | ||||
|  | ||||
| cat $PBS_NODEFILE | ||||
|  | ||||
| #export MPICH_COLL_SYNC=1 | ||||
| #export MPICH_ENV_DISPLAY=1 | ||||
| export MPICH_ | ||||
| export OMP_NUM_THREADS=3 | ||||
| export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1 | ||||
| module load oneapi/eng-compiler/2023.05.15.003 | ||||
| module load mpich/51.2/icc-all-deterministic-pmix-gpu | ||||
| #export LD_LIBRARY_PATH=/soft/restricted/CNDA/updates/2023.05.15.001/oneapi/compiler/eng-20230512/compiler/linux/lib/:$LD_LIBRARY_PATH | ||||
|  | ||||
| #unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE | ||||
| #unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE | ||||
| #unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST | ||||
| export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0 | ||||
| export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0 | ||||
| export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling | ||||
| unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE | ||||
| unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE | ||||
| unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE | ||||
|  | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0 | ||||
| @@ -32,9 +45,17 @@ export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16 | ||||
| export MPICH_OFI_NIC_POLICY=GPU | ||||
|  | ||||
| # 12 ppn, 16 nodes, 192 ranks | ||||
| DIR=repro.$PBS_JOBID | ||||
| mkdir $DIR | ||||
| cd $DIR | ||||
|  | ||||
| CMD="mpiexec -np 192 -ppn 12  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Test_dwf_mixedcg_prec --mpi 2.4.4.6 --grid 64.128.128.192 \ | ||||
| 		--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 3000" | ||||
| 	     ../gpu_tile_compact.sh \ | ||||
| 	     ../Test_dwf_mixedcg_prec --mpi 2.4.4.6 --grid 64.128.128.192 \ | ||||
| 		--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 3000 --debug-stdout --log Message,Iterative" | ||||
| #--comms-overlap | ||||
| $CMD  | ||||
|  | ||||
| grep Oops Grid.stderr.* > failures.$PBS_JOBID | ||||
| rm core.* | ||||
|  | ||||
|   | ||||
							
								
								
									
										82
									
								
								systems/Aurora/tests/repro1gpu.pbs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										82
									
								
								systems/Aurora/tests/repro1gpu.pbs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,82 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| #PBS -l select=16:system=sunspot,place=scatter | ||||
| #PBS -A LatticeQCD_aesp_CNDA | ||||
| #PBS -l walltime=02:00:00 | ||||
| #PBS -N repro1gpu | ||||
| #PBS -k doe | ||||
|  | ||||
| #export OMP_PROC_BIND=spread | ||||
| #unset OMP_PLACES | ||||
|  | ||||
| module load oneapi/eng-compiler/2023.05.15.003 | ||||
| module load mpich/51.2/icc-all-deterministic-pmix-gpu | ||||
|  | ||||
| # 56 cores / 6 threads ~9 | ||||
| export OMP_NUM_THREADS=6 | ||||
| export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0 | ||||
| export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16 | ||||
| export MPICH_OFI_NIC_POLICY=GPU | ||||
|  | ||||
| export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0 | ||||
| export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0 | ||||
| export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling | ||||
| unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE | ||||
| unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE | ||||
| unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE | ||||
| export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file" | ||||
|  | ||||
| cd $PBS_O_WORKDIR | ||||
|  | ||||
| NN=`cat $PBS_NODEFILE | wc -l` | ||||
| echo $PBS_NODEFILE | ||||
| cat $PBS_NODEFILE | ||||
|  | ||||
| echo $NN nodes in node file | ||||
| for n in `eval echo {1..$NN}` | ||||
| do | ||||
|  | ||||
| THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 ` | ||||
| echo Node $n is $THIS_NODE | ||||
|  | ||||
|  | ||||
| for g in {0..11} | ||||
| do | ||||
| export NUMA_MAP=(0 0 0 1 1 1 0 0 0 1 1 1 ) | ||||
| export TILE_MAP=(0 0 0 0 0 0 1 1 1 1 1 1 ) | ||||
| export  GPU_MAP=(0 1 2 3 4 5 0 1 2 3 4 5 ) | ||||
|  | ||||
| export numa=${NUMA_MAP[$g]} | ||||
| export gpu_id=${GPU_MAP[$g]} | ||||
| export tile_id=${TILE_MAP[$g]} | ||||
| export gpu=$gpu_id.$tile_id | ||||
|  | ||||
| cd $PBS_O_WORKDIR | ||||
|  | ||||
| DIR=repro.1gpu.$PBS_JOBID/node-$n-$THIS_NODE-GPU-$gpu | ||||
| mkdir -p $DIR | ||||
| cd $DIR | ||||
|  | ||||
| echo $THIS_NODE > nodefile | ||||
| echo $gpu > gpu | ||||
|  | ||||
| export ZE_AFFINITY_MASK=$gpu | ||||
| export ONEAPI_DEVICE_FILTER=gpu,level_zero | ||||
|  | ||||
| CMD="mpiexec -np 1 -ppn 1  -envall --hostfile nodefile \ | ||||
| 	     numactl -N $numa -m $numa ../../Test_dwf_mixedcg_prec --mpi 1.1.1.1 --grid 16.16.32.32 \ | ||||
| 		--shm-mpi 0 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message" | ||||
| echo $CMD | ||||
| $CMD & | ||||
|  | ||||
| done | ||||
| done | ||||
|  | ||||
| wait | ||||
|  | ||||
							
								
								
									
										98
									
								
								systems/Aurora/tests/reproN.pbs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										98
									
								
								systems/Aurora/tests/reproN.pbs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,98 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| #PBS -l select=32:system=sunspot,place=scatter | ||||
| #PBS -A LatticeQCD_aesp_CNDA | ||||
| #PBS -l walltime=02:00:00 | ||||
| #PBS -N reproN | ||||
| #PBS -k doe | ||||
|  | ||||
| #export OMP_PROC_BIND=spread | ||||
| #unset OMP_PLACES | ||||
|  | ||||
| module load oneapi/eng-compiler/2023.05.15.003 | ||||
| module load mpich/51.2/icc-all-deterministic-pmix-gpu | ||||
|  | ||||
| # 56 cores / 6 threads ~9 | ||||
| export OMP_NUM_THREADS=6 | ||||
| export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16 | ||||
| #export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1 | ||||
|  | ||||
| export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 | ||||
| export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=1 | ||||
| export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1 | ||||
| export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file" | ||||
|  | ||||
| export GRID_PRINT_ENTIRE_LOG=0 | ||||
| export GRID_CHECKSUM_RECV_BUF=0 | ||||
| export GRID_CHECKSUM_SEND_BUF=0 | ||||
|  | ||||
| export MPICH_OFI_NIC_POLICY=GPU | ||||
|  | ||||
| export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0 | ||||
| export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0 | ||||
| export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling | ||||
| unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE | ||||
| unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE | ||||
| unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE | ||||
|  | ||||
| cd $PBS_O_WORKDIR | ||||
|  | ||||
| NN=`cat $PBS_NODEFILE | wc -l` | ||||
| echo $PBS_NODEFILE | ||||
| cat $PBS_NODEFILE | ||||
|  | ||||
| echo $NN nodes in node file | ||||
| for n in `eval echo {1..$NN}` | ||||
| do | ||||
|  | ||||
| cd $PBS_O_WORKDIR | ||||
|  | ||||
| THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 ` | ||||
| echo Node $n is $THIS_NODE | ||||
|  | ||||
| DIR=reproN.$PBS_JOBID/node-$n-$THIS_NODE | ||||
|  | ||||
| mkdir -p $DIR | ||||
| cd $DIR | ||||
|  | ||||
| echo $THIS_NODE > nodefile | ||||
|  | ||||
| #CMD="mpiexec -np 12 -ppn 12  -envall --hostfile nodefile \ | ||||
| #	     ../../gpu_tile_compact.sh \ | ||||
| #	     ../../Test_dwf_mixedcg_prec --mpi 1.2.2.3 --grid 32.64.64.96 \ | ||||
| #		--shm-mpi 0 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message --comms-overlap" | ||||
|  | ||||
| CMD="mpiexec -np 12 -ppn 12  -envall --hostfile nodefile \ | ||||
| 	     ../../gpu_tile_compact.sh \ | ||||
| 	     ../../Test_dwf_mixedcg_prec --mpi 1.2.2.3 --grid 32.64.64.96 \ | ||||
| 		--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message --comms-overlap" | ||||
|  | ||||
| echo $CMD > command-line | ||||
| env > environment | ||||
| $CMD & | ||||
|  | ||||
| done | ||||
|  | ||||
| # Suspicious wait is allowing jobs to collide and knock out | ||||
| #wait | ||||
|  | ||||
| sleep 6500 | ||||
|  | ||||
| for n in ` eval echo {1..$NN} ` | ||||
| do | ||||
|  | ||||
| THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 ` | ||||
| DIR=reproN.$PBS_JOBID/node-$n-$THIS_NODE | ||||
|  | ||||
| cd $DIR | ||||
|  | ||||
| grep Oops Grid.stderr.* > failures.$PBS_JOBID | ||||
| rm core.* | ||||
|  | ||||
| done | ||||
| @@ -36,5 +36,5 @@ export MPICH_OFI_NIC_POLICY=GPU | ||||
| CMD="mpiexec -np 192 -ppn 12  -envall \ | ||||
| 	     ./gpu_tile_compact.sh \ | ||||
| 	     ./Test_staggered_cg_prec --mpi 2.4.4.6 --grid 128.128.128.192 \ | ||||
| 	     --shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 3000" | ||||
| 	     --shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 3000 --comms-overlap" | ||||
| $CMD  | ||||
|   | ||||
							
								
								
									
										76
									
								
								systems/Frontier/benchmarks/Benchmark_usqcd.csv
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										76
									
								
								systems/Frontier/benchmarks/Benchmark_usqcd.csv
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,76 @@ | ||||
| Memory Bandwidth | ||||
|  | ||||
| Bytes, GB/s per node | ||||
| 6291456, 379.297050 | ||||
| 100663296, 3754.674992 | ||||
| 509607936, 6521.472413 | ||||
| 1610612736, 8513.456479 | ||||
| 3932160000, 9018.901766 | ||||
|  | ||||
|  | ||||
| GEMM | ||||
|  | ||||
|  M, N, K, BATCH, GF/s per rank | ||||
| 16, 8, 16, 256, 0.564958 | ||||
| 16, 16, 16, 256, 243.148058 | ||||
| 16, 32, 16, 256, 440.346877 | ||||
| 32, 8, 32, 256, 439.194136 | ||||
| 32, 16, 32, 256, 847.334141 | ||||
| 32, 32, 32, 256, 1430.892623 | ||||
| 64, 8, 64, 256, 1242.756741 | ||||
| 64, 16, 64, 256, 2196.689493 | ||||
| 64, 32, 64, 256, 3697.458072 | ||||
| 16, 8, 256, 256, 899.582627 | ||||
| 16, 16, 256, 256, 1673.537756 | ||||
| 16, 32, 256, 256, 2959.597089 | ||||
| 32, 8, 256, 256, 1558.858630 | ||||
| 32, 16, 256, 256, 2864.839445 | ||||
| 32, 32, 256, 256, 4810.671254 | ||||
| 64, 8, 256, 256, 2386.092942 | ||||
| 64, 16, 256, 256, 4451.665937 | ||||
| 64, 32, 256, 256, 5942.124095 | ||||
| 8, 256, 16, 256, 799.867271 | ||||
| 16, 256, 16, 256, 1584.624888 | ||||
| 32, 256, 16, 256, 1949.422338 | ||||
| 8, 256, 32, 256, 1389.417474 | ||||
| 16, 256, 32, 256, 2668.344493 | ||||
| 32, 256, 32, 256, 3234.162120 | ||||
| 8, 256, 64, 256, 2150.925128 | ||||
| 16, 256, 64, 256, 4012.488132 | ||||
| 32, 256, 64, 256, 5154.785521 | ||||
|  | ||||
|  | ||||
|  | ||||
| Communications | ||||
|  | ||||
| Packet bytes, direction, GB/s per node | ||||
| 4718592, 1, 245.026198 | ||||
| 4718592, 2, 251.180996 | ||||
| 4718592, 3, 361.110977 | ||||
| 4718592, 5, 247.898447 | ||||
| 4718592, 6, 249.867523 | ||||
| 4718592, 7, 359.033061 | ||||
| 15925248, 1, 255.030946 | ||||
| 15925248, 2, 264.453890 | ||||
| 15925248, 3, 392.949183 | ||||
| 15925248, 5, 256.040644 | ||||
| 15925248, 6, 264.681896 | ||||
| 15925248, 7, 392.102622 | ||||
| 37748736, 1, 258.823333 | ||||
| 37748736, 2, 268.181577 | ||||
| 37748736, 3, 401.478191 | ||||
| 37748736, 5, 258.995363 | ||||
| 37748736, 6, 268.206586 | ||||
| 37748736, 7, 400.397611 | ||||
|  | ||||
|  | ||||
| Per node summary table | ||||
|  | ||||
| L , Wilson, DWF4, Staggered, GF/s per node | ||||
|  | ||||
| 8 , 155, 1386, 50 | ||||
| 12 , 694, 4208, 230 | ||||
| 16 , 1841, 6675, 609 | ||||
| 24 , 3934, 8573, 1641 | ||||
| 32 , 5083, 9771, 3086 | ||||
|  | ||||
| 
 | 
							
								
								
									
										702
									
								
								systems/Frontier/benchmarks/Benchmark_usqcd.log
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										702
									
								
								systems/Frontier/benchmarks/Benchmark_usqcd.log
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,702 @@ | ||||
| RANK 1 using GPU 1 | ||||
| RANK 5 using GPU 6 | ||||
| RANK 0 using GPU 0 | ||||
| RANK 2 using GPU 2 | ||||
| RANK 3 using GPU 3 | ||||
| RANK 6 using GPU 5 | ||||
| RANK 7 using GPU 4 | ||||
| RANK 4 using GPU 7 | ||||
| world_rank 0 has 1 devices | ||||
| AcceleratorHipInit: ======================== | ||||
| AcceleratorHipInit: Device Number    : 0 | ||||
| AcceleratorHipInit: ======================== | ||||
| AcceleratorHipInit: Device identifier: AMD Instinct MI250X | ||||
| AcceleratorHipInit:   totalGlobalMem: 68702699520  | ||||
| AcceleratorHipInit:   isMultiGpuBoard: 0  | ||||
| AcceleratorHipInit:   warpSize: 64  | ||||
| AcceleratorHipInit: using default device  | ||||
| AcceleratorHipInit: assume user or srun sets ROCR_VISIBLE_DEVICES and numa binding  | ||||
| AcceleratorHipInit: Configure options --enable-setdevice=no  | ||||
| local rank 0 device 0 bus id: 0000:c1:00.0 | ||||
| AcceleratorHipInit: ================================================ | ||||
| SharedMemoryMpi:  World communicator of size 8 | ||||
| SharedMemoryMpi:  Node  communicator of size 8 | ||||
| 0SharedMemoryMpi:  SharedMemoryMPI.cc acceleratorAllocDevice 4294967296bytes at 0x7ff651800000 - 7ff7517fffff for comms buffers  | ||||
| Setting up IPC | ||||
|  | ||||
| __|__|__|__|__|__|__|__|__|__|__|__|__|__|__ | ||||
| __|__|__|__|__|__|__|__|__|__|__|__|__|__|__ | ||||
| __|_ |  |  |  |  |  |  |  |  |  |  |  | _|__ | ||||
| __|_                                    _|__ | ||||
| __|_   GGGG    RRRR    III    DDDD      _|__ | ||||
| __|_  G        R   R    I     D   D     _|__ | ||||
| __|_  G        R   R    I     D    D    _|__ | ||||
| __|_  G  GG    RRRR     I     D    D    _|__ | ||||
| __|_  G   G    R  R     I     D   D     _|__ | ||||
| __|_   GGGG    R   R   III    DDDD      _|__ | ||||
| __|_                                    _|__ | ||||
| __|__|__|__|__|__|__|__|__|__|__|__|__|__|__ | ||||
| __|__|__|__|__|__|__|__|__|__|__|__|__|__|__ | ||||
|   |  |  |  |  |  |  |  |  |  |  |  |  |  |   | ||||
|  | ||||
|  | ||||
| Copyright (C) 2015 Peter Boyle, Azusa Yamaguchi, Guido Cossu, Antonin Portelli and other authors | ||||
|  | ||||
| This program is free software; you can redistribute it and/or modify | ||||
| it under the terms of the GNU General Public License as published by | ||||
| the Free Software Foundation; either version 2 of the License, or | ||||
| (at your option) any later version. | ||||
|  | ||||
| This program is distributed in the hope that it will be useful, | ||||
| but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| GNU General Public License for more details. | ||||
| Current Grid git commit hash=9a1ad6a5eb29a369d74784e7483c60e578323d76: (HEAD -> develop, origin/develop, origin/HEAD) clean | ||||
|  | ||||
| Grid : Message : ================================================  | ||||
| Grid : Message : MPI is initialised and logging filters activated  | ||||
| Grid : Message : ================================================  | ||||
| Grid : Message : This rank is running on host frontier01320 | ||||
| Grid : Message : Requested 4294967296 byte stencil comms buffers  | ||||
| Grid : Message : MemoryManager Cache 54962159616 bytes  | ||||
| Grid : Message : MemoryManager::Init() setting up | ||||
| Grid : Message : MemoryManager::Init() cache pool for recent host   allocations: SMALL 8 LARGE 2 HUGE 0 | ||||
| Grid : Message : MemoryManager::Init() cache pool for recent device allocations: SMALL 16 LARGE 8 Huge 0 | ||||
| Grid : Message : MemoryManager::Init() cache pool for recent shared allocations: SMALL 16 LARGE 8 Huge 0 | ||||
| Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory | ||||
| Grid : Message : MemoryManager::Init() Using hipMalloc | ||||
| Grid : Message : 0.293720 s : ================================================================================== | ||||
| Grid : Message : 0.293790 s : = Grid is setup to use 1 threads | ||||
| Grid : Message : 0.293800 s : ================================================================================== | ||||
| Grid : Message : 0.293810 s : Grid Default Decomposition patterns | ||||
| Grid : Message : 0.293810 s : 	OpenMP threads : 1 | ||||
| Grid : Message : 0.293820 s : 	MPI tasks      : 1 2 2 2  | ||||
| Grid : Message : 0.293870 s : 	vReal          : 512bits ; 1 2 2 2  | ||||
| Grid : Message : 0.293890 s : 	vRealF         : 512bits ; 2 2 2 2  | ||||
| Grid : Message : 0.293910 s : 	vRealD         : 512bits ; 1 2 2 2  | ||||
| Grid : Message : 0.293920 s : 	vComplex       : 512bits ; 1 1 2 2  | ||||
| Grid : Message : 0.293930 s : 	vComplexF      : 512bits ; 1 2 2 2  | ||||
| Grid : Message : 0.293960 s : 	vComplexD      : 512bits ; 1 1 2 2  | ||||
| Grid : Message : 0.293970 s : ================================================================================== | ||||
| Grid : Message : 0.293980 s : ================================================================================== | ||||
| Grid : Message : 0.293990 s :  Clover dslash 4D vectorised (temporarily Wilson) | ||||
| Grid : Message : 0.294000 s : ================================================================================== | ||||
| Grid : Message : 0.301330 s : ================================================================================== | ||||
| Grid : Message : 0.301360 s : Benchmark DWF on 8^4 local volume  | ||||
| Grid : Message : 0.301370 s : * Nc             : 3 | ||||
| Grid : Message : 0.301380 s : * Global volume  : 8 16 16 16  | ||||
| Grid : Message : 0.301410 s : * Ls             : 1 | ||||
| Grid : Message : 0.301420 s : * ranks          : 8 | ||||
| Grid : Message : 0.301430 s : * nodes          : 1 | ||||
| Grid : Message : 0.301440 s : * ranks/node     : 8 | ||||
| Grid : Message : 0.301450 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 0.301460 s : * Using 1 threads | ||||
| Grid : Message : 0.301470 s : ================================================================================== | ||||
| Grid : Message : 0.345030 s : Initialised RNGs | ||||
| Grid : Message : 0.158302 s : ================================================================================== | ||||
| Grid : Message : 0.158310 s : * Using GENERIC Nc WilsonKernels | ||||
| Grid : Message : 0.158311 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 0.158312 s : * SINGLE precision  | ||||
| Grid : Message : 0.158313 s : ================================================================================== | ||||
| Grid : Message : 0.240681 s : Deo FlopsPerSite is 1344 | ||||
| Grid : Message : 0.240711 s : Deo mflop/s =   154914.0 (130.8) 139367.7-159565.9 | ||||
| Grid : Message : 0.240715 s : Deo mflop/s per rank   19364.3 | ||||
| Grid : Message : 0.240716 s : Deo mflop/s per node   154914.0 | ||||
| Grid : Message : 0.240718 s : ================================================================================== | ||||
| Grid : Message : 0.240719 s : * Using UNROLLED WilsonKernels | ||||
| Grid : Message : 0.240719 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 0.240719 s : * SINGLE precision  | ||||
| Grid : Message : 0.240719 s : ================================================================================== | ||||
| Grid : Message : 0.315028 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 0.315033 s : Deo mflop/s =   151459.5 (142.0) 131856.9-157286.4 | ||||
| Grid : Message : 0.315036 s : Deo mflop/s per rank   18932.4 | ||||
| Grid : Message : 0.315037 s : Deo mflop/s per node   151459.5 | ||||
| Grid : Message : 0.315038 s : ================================================================================== | ||||
| Grid : Message : 0.315040 s : 8^4 x 1 Deo Best  mflop/s        =   154914.0 ; 154914.0 per node  | ||||
| Grid : Message : 0.315042 s : 8^4 x 1 Deo Worst mflop/s        =   151459.5 ; 151459.5 per node  | ||||
| Grid : Message : 0.315043 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 0.315043 s : 154914.0 ; 151459.5 ;  | ||||
| Grid : Message : 0.315044 s : ================================================================================== | ||||
| Grid : Message : 0.316507 s : ================================================================================== | ||||
| Grid : Message : 0.316510 s : Benchmark DWF on 12^4 local volume  | ||||
| Grid : Message : 0.316511 s : * Nc             : 3 | ||||
| Grid : Message : 0.316512 s : * Global volume  : 12 24 24 24  | ||||
| Grid : Message : 0.316515 s : * Ls             : 1 | ||||
| Grid : Message : 0.316516 s : * ranks          : 8 | ||||
| Grid : Message : 0.316517 s : * nodes          : 1 | ||||
| Grid : Message : 0.316518 s : * ranks/node     : 8 | ||||
| Grid : Message : 0.316518 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 0.316519 s : * Using 1 threads | ||||
| Grid : Message : 0.316520 s : ================================================================================== | ||||
| Grid : Message : 0.327883 s : Initialised RNGs | ||||
| Grid : Message : 0.786395 s : ================================================================================== | ||||
| Grid : Message : 0.786404 s : * Using GENERIC Nc WilsonKernels | ||||
| Grid : Message : 0.786405 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 0.786406 s : * SINGLE precision  | ||||
| Grid : Message : 0.786406 s : ================================================================================== | ||||
| Grid : Message : 0.871646 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 0.871659 s : Deo mflop/s =   684982.2 (632.4) 609162.5-714594.5 | ||||
| Grid : Message : 0.871663 s : Deo mflop/s per rank   85622.8 | ||||
| Grid : Message : 0.871664 s : Deo mflop/s per node   684982.2 | ||||
| Grid : Message : 0.871665 s : ================================================================================== | ||||
| Grid : Message : 0.871665 s : * Using UNROLLED WilsonKernels | ||||
| Grid : Message : 0.871665 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 0.871665 s : * SINGLE precision  | ||||
| Grid : Message : 0.871665 s : ================================================================================== | ||||
| Grid : Message : 0.953697 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 0.953702 s : Deo mflop/s =   693556.6 (576.5) 663552.0-719204.7 | ||||
| Grid : Message : 0.953705 s : Deo mflop/s per rank   86694.6 | ||||
| Grid : Message : 0.953706 s : Deo mflop/s per node   693556.6 | ||||
| Grid : Message : 0.953707 s : ================================================================================== | ||||
| Grid : Message : 0.953708 s : 12^4 x 1 Deo Best  mflop/s        =   693556.6 ; 693556.6 per node  | ||||
| Grid : Message : 0.953710 s : 12^4 x 1 Deo Worst mflop/s        =   684982.2 ; 684982.2 per node  | ||||
| Grid : Message : 0.953712 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 0.953712 s : 684982.2 ; 693556.6 ;  | ||||
| Grid : Message : 0.953713 s : ================================================================================== | ||||
| Grid : Message : 0.957609 s : ================================================================================== | ||||
| Grid : Message : 0.957613 s : Benchmark DWF on 16^4 local volume  | ||||
| Grid : Message : 0.957614 s : * Nc             : 3 | ||||
| Grid : Message : 0.957615 s : * Global volume  : 16 32 32 32  | ||||
| Grid : Message : 0.957620 s : * Ls             : 1 | ||||
| Grid : Message : 0.957621 s : * ranks          : 8 | ||||
| Grid : Message : 0.957622 s : * nodes          : 1 | ||||
| Grid : Message : 0.957623 s : * ranks/node     : 8 | ||||
| Grid : Message : 0.957623 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 0.957624 s : * Using 1 threads | ||||
| Grid : Message : 0.957625 s : ================================================================================== | ||||
| Grid : Message : 0.985828 s : Initialised RNGs | ||||
| Grid : Message : 2.379761 s : ================================================================================== | ||||
| Grid : Message : 2.379772 s : * Using GENERIC Nc WilsonKernels | ||||
| Grid : Message : 2.379773 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 2.379774 s : * SINGLE precision  | ||||
| Grid : Message : 2.379775 s : ================================================================================== | ||||
| Grid : Message : 2.486712 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 2.486725 s : Deo mflop/s =   1803226.1 (1139.4) 1646362.3-1864135.1 | ||||
| Grid : Message : 2.486729 s : Deo mflop/s per rank   225403.3 | ||||
| Grid : Message : 2.486731 s : Deo mflop/s per node   1803226.1 | ||||
| Grid : Message : 2.486732 s : ================================================================================== | ||||
| Grid : Message : 2.486732 s : * Using UNROLLED WilsonKernels | ||||
| Grid : Message : 2.486732 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 2.486732 s : * SINGLE precision  | ||||
| Grid : Message : 2.486732 s : ================================================================================== | ||||
| Grid : Message : 2.584407 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 2.584412 s : Deo mflop/s =   1840587.3 (1119.6) 1779401.7-1914791.0 | ||||
| Grid : Message : 2.584415 s : Deo mflop/s per rank   230073.4 | ||||
| Grid : Message : 2.584416 s : Deo mflop/s per node   1840587.3 | ||||
| Grid : Message : 2.584417 s : ================================================================================== | ||||
| Grid : Message : 2.584418 s : 16^4 x 1 Deo Best  mflop/s        =   1840587.3 ; 1840587.3 per node  | ||||
| Grid : Message : 2.584420 s : 16^4 x 1 Deo Worst mflop/s        =   1803226.1 ; 1803226.1 per node  | ||||
| Grid : Message : 2.584422 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 2.584422 s : 1803226.1 ; 1840587.3 ;  | ||||
| Grid : Message : 2.584423 s : ================================================================================== | ||||
| Grid : Message : 2.592858 s : ================================================================================== | ||||
| Grid : Message : 2.592862 s : Benchmark DWF on 24^4 local volume  | ||||
| Grid : Message : 2.592863 s : * Nc             : 3 | ||||
| Grid : Message : 2.592864 s : * Global volume  : 24 48 48 48  | ||||
| Grid : Message : 2.592869 s : * Ls             : 1 | ||||
| Grid : Message : 2.592870 s : * ranks          : 8 | ||||
| Grid : Message : 2.592871 s : * nodes          : 1 | ||||
| Grid : Message : 2.592872 s : * ranks/node     : 8 | ||||
| Grid : Message : 2.592872 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 2.592873 s : * Using 1 threads | ||||
| Grid : Message : 2.592874 s : ================================================================================== | ||||
| Grid : Message : 2.715623 s : Initialised RNGs | ||||
| Grid : Message : 9.608838 s : ================================================================================== | ||||
| Grid : Message : 9.608852 s : * Using GENERIC Nc WilsonKernels | ||||
| Grid : Message : 9.608853 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 9.608854 s : * SINGLE precision  | ||||
| Grid : Message : 9.608855 s : ================================================================================== | ||||
| Grid : Message : 9.870294 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 9.870309 s : Deo mflop/s =   3861903.3 (1708.9) 3511078.3-3937368.2 | ||||
| Grid : Message : 9.870313 s : Deo mflop/s per rank   482737.9 | ||||
| Grid : Message : 9.870314 s : Deo mflop/s per node   3861903.3 | ||||
| Grid : Message : 9.870315 s : ================================================================================== | ||||
| Grid : Message : 9.870316 s : * Using UNROLLED WilsonKernels | ||||
| Grid : Message : 9.870316 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 9.870317 s : * SINGLE precision  | ||||
| Grid : Message : 9.870317 s : ================================================================================== | ||||
| Grid : Message : 10.101619 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 10.101624 s : Deo mflop/s =   3933599.5 (1412.7) 3835758.7-4008152.3 | ||||
| Grid : Message : 10.101627 s : Deo mflop/s per rank   491699.9 | ||||
| Grid : Message : 10.101628 s : Deo mflop/s per node   3933599.5 | ||||
| Grid : Message : 10.101629 s : ================================================================================== | ||||
| Grid : Message : 10.101629 s : 24^4 x 1 Deo Best  mflop/s        =   3933599.5 ; 3933599.5 per node  | ||||
| Grid : Message : 10.101631 s : 24^4 x 1 Deo Worst mflop/s        =   3861903.3 ; 3861903.3 per node  | ||||
| Grid : Message : 10.101633 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 10.101633 s : 3861903.3 ; 3933599.5 ;  | ||||
| Grid : Message : 10.101634 s : ================================================================================== | ||||
| Grid : Message : 10.139642 s : ================================================================================== | ||||
| Grid : Message : 10.139652 s : Benchmark DWF on 32^4 local volume  | ||||
| Grid : Message : 10.139653 s : * Nc             : 3 | ||||
| Grid : Message : 10.139654 s : * Global volume  : 32 64 64 64  | ||||
| Grid : Message : 10.139661 s : * Ls             : 1 | ||||
| Grid : Message : 10.139661 s : * ranks          : 8 | ||||
| Grid : Message : 10.139662 s : * nodes          : 1 | ||||
| Grid : Message : 10.139662 s : * ranks/node     : 8 | ||||
| Grid : Message : 10.139662 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 10.139663 s : * Using 1 threads | ||||
| Grid : Message : 10.139663 s : ================================================================================== | ||||
| Grid : Message : 10.502161 s : Initialised RNGs | ||||
| Grid : Message : 32.211092 s : ================================================================================== | ||||
| Grid : Message : 32.211107 s : * Using GENERIC Nc WilsonKernels | ||||
| Grid : Message : 32.211108 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 32.211109 s : * SINGLE precision  | ||||
| Grid : Message : 32.211110 s : ================================================================================== | ||||
| Grid : Message : 32.841718 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 32.841732 s : Deo mflop/s =   4988499.9 (2722.5) 4244837.8-5120022.3 | ||||
| Grid : Message : 32.841736 s : Deo mflop/s per rank   623562.5 | ||||
| Grid : Message : 32.841737 s : Deo mflop/s per node   4988499.9 | ||||
| Grid : Message : 32.841738 s : ================================================================================== | ||||
| Grid : Message : 32.841739 s : * Using UNROLLED WilsonKernels | ||||
| Grid : Message : 32.841739 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 32.841740 s : * SINGLE precision  | ||||
| Grid : Message : 32.841740 s : ================================================================================== | ||||
| Grid : Message : 33.407434 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 33.407442 s : Deo mflop/s =   5082758.0 (1883.1) 4971027.0-5205119.6 | ||||
| Grid : Message : 33.407446 s : Deo mflop/s per rank   635344.7 | ||||
| Grid : Message : 33.407447 s : Deo mflop/s per node   5082758.0 | ||||
| Grid : Message : 33.407448 s : ================================================================================== | ||||
| Grid : Message : 33.407448 s : 32^4 x 1 Deo Best  mflop/s        =   5082758.0 ; 5082758.0 per node  | ||||
| Grid : Message : 33.407450 s : 32^4 x 1 Deo Worst mflop/s        =   4988499.9 ; 4988499.9 per node  | ||||
| Grid : Message : 33.407452 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 33.407452 s : 4988499.9 ; 5082758.0 ;  | ||||
| Grid : Message : 33.407453 s : ================================================================================== | ||||
| Grid : Message : 33.506785 s : ================================================================================== | ||||
| Grid : Message : 33.506798 s :  Domain wall dslash 4D vectorised | ||||
| Grid : Message : 33.506799 s : ================================================================================== | ||||
| Grid : Message : 33.530686 s : ================================================================================== | ||||
| Grid : Message : 33.530689 s : Benchmark DWF on 8^4 local volume  | ||||
| Grid : Message : 33.530690 s : * Nc             : 3 | ||||
| Grid : Message : 33.530691 s : * Global volume  : 8 16 16 16  | ||||
| Grid : Message : 33.530698 s : * Ls             : 12 | ||||
| Grid : Message : 33.530699 s : * ranks          : 8 | ||||
| Grid : Message : 33.530700 s : * nodes          : 1 | ||||
| Grid : Message : 33.530701 s : * ranks/node     : 8 | ||||
| Grid : Message : 33.530702 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 33.530703 s : * Using 1 threads | ||||
| Grid : Message : 33.530704 s : ================================================================================== | ||||
| Grid : Message : 33.545465 s : Initialised RNGs | ||||
| Grid : Message : 33.752384 s : ================================================================================== | ||||
| Grid : Message : 33.752397 s : * Using GENERIC Nc WilsonKernels | ||||
| Grid : Message : 33.752398 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 33.752399 s : * SINGLE precision  | ||||
| Grid : Message : 33.752400 s : ================================================================================== | ||||
| Grid : Message : 33.851964 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 33.851977 s : Deo mflop/s =   1383287.7 (849.8) 1321205.8-1420651.4 | ||||
| Grid : Message : 33.851981 s : Deo mflop/s per rank   172911.0 | ||||
| Grid : Message : 33.851983 s : Deo mflop/s per node   1383287.7 | ||||
| Grid : Message : 33.851984 s : ================================================================================== | ||||
| Grid : Message : 33.851984 s : * Using UNROLLED WilsonKernels | ||||
| Grid : Message : 33.851984 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 33.851984 s : * SINGLE precision  | ||||
| Grid : Message : 33.851984 s : ================================================================================== | ||||
| Grid : Message : 33.949235 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 33.949240 s : Deo mflop/s =   1386335.8 (734.6) 1341325.6-1428330.6 | ||||
| Grid : Message : 33.949243 s : Deo mflop/s per rank   173292.0 | ||||
| Grid : Message : 33.949244 s : Deo mflop/s per node   1386335.8 | ||||
| Grid : Message : 33.949245 s : ================================================================================== | ||||
| Grid : Message : 33.949245 s : 8^4 x 12 Deo Best  mflop/s        =   1386335.8 ; 1386335.8 per node  | ||||
| Grid : Message : 33.949247 s : 8^4 x 12 Deo Worst mflop/s        =   1383287.7 ; 1383287.7 per node  | ||||
| Grid : Message : 33.949249 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 33.949249 s : 1383287.7 ; 1386335.8 ;  | ||||
| Grid : Message : 33.949250 s : ================================================================================== | ||||
| Grid : Message : 33.952789 s : ================================================================================== | ||||
| Grid : Message : 33.952793 s : Benchmark DWF on 12^4 local volume  | ||||
| Grid : Message : 33.952794 s : * Nc             : 3 | ||||
| Grid : Message : 33.952795 s : * Global volume  : 12 24 24 24  | ||||
| Grid : Message : 33.952800 s : * Ls             : 12 | ||||
| Grid : Message : 33.952801 s : * ranks          : 8 | ||||
| Grid : Message : 33.952802 s : * nodes          : 1 | ||||
| Grid : Message : 33.952803 s : * ranks/node     : 8 | ||||
| Grid : Message : 33.952803 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 33.952804 s : * Using 1 threads | ||||
| Grid : Message : 33.952805 s : ================================================================================== | ||||
| Grid : Message : 34.362200 s : Initialised RNGs | ||||
| Grid : Message : 34.969821 s : ================================================================================== | ||||
| Grid : Message : 34.969832 s : * Using GENERIC Nc WilsonKernels | ||||
| Grid : Message : 34.969833 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 34.969834 s : * SINGLE precision  | ||||
| Grid : Message : 34.969835 s : ================================================================================== | ||||
| Grid : Message : 35.135545 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 35.135558 s : Deo mflop/s =   4208495.6 (2165.0) 4053699.5-4315228.5 | ||||
| Grid : Message : 35.135562 s : Deo mflop/s per rank   526062.0 | ||||
| Grid : Message : 35.135563 s : Deo mflop/s per node   4208495.6 | ||||
| Grid : Message : 35.135564 s : ================================================================================== | ||||
| Grid : Message : 35.135565 s : * Using UNROLLED WilsonKernels | ||||
| Grid : Message : 35.135565 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 35.135565 s : * SINGLE precision  | ||||
| Grid : Message : 35.135565 s : ================================================================================== | ||||
| Grid : Message : 35.299710 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 35.299715 s : Deo mflop/s =   4156968.7 (1450.2) 4053699.5-4219939.5 | ||||
| Grid : Message : 35.299718 s : Deo mflop/s per rank   519621.1 | ||||
| Grid : Message : 35.299719 s : Deo mflop/s per node   4156968.7 | ||||
| Grid : Message : 35.299721 s : ================================================================================== | ||||
| Grid : Message : 35.299721 s : 12^4 x 12 Deo Best  mflop/s        =   4208495.6 ; 4208495.6 per node  | ||||
| Grid : Message : 35.299723 s : 12^4 x 12 Deo Worst mflop/s        =   4156968.7 ; 4156968.7 per node  | ||||
| Grid : Message : 35.299725 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 35.299725 s : 4208495.6 ; 4156968.7 ;  | ||||
| Grid : Message : 35.299726 s : ================================================================================== | ||||
| Grid : Message : 35.309687 s : ================================================================================== | ||||
| Grid : Message : 35.309693 s : Benchmark DWF on 16^4 local volume  | ||||
| Grid : Message : 35.309694 s : * Nc             : 3 | ||||
| Grid : Message : 35.309695 s : * Global volume  : 16 32 32 32  | ||||
| Grid : Message : 35.309701 s : * Ls             : 12 | ||||
| Grid : Message : 35.309702 s : * ranks          : 8 | ||||
| Grid : Message : 35.309703 s : * nodes          : 1 | ||||
| Grid : Message : 35.309704 s : * ranks/node     : 8 | ||||
| Grid : Message : 35.309704 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 35.309705 s : * Using 1 threads | ||||
| Grid : Message : 35.309706 s : ================================================================================== | ||||
| Grid : Message : 35.448780 s : Initialised RNGs | ||||
| Grid : Message : 38.468764 s : ================================================================================== | ||||
| Grid : Message : 38.468777 s : * Using GENERIC Nc WilsonKernels | ||||
| Grid : Message : 38.468778 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 38.468779 s : * SINGLE precision  | ||||
| Grid : Message : 38.468780 s : ================================================================================== | ||||
| Grid : Message : 38.801024 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 38.801040 s : Deo mflop/s =   6674673.6 (2168.6) 6484445.4-6797200.1 | ||||
| Grid : Message : 38.801044 s : Deo mflop/s per rank   834334.2 | ||||
| Grid : Message : 38.801045 s : Deo mflop/s per node   6674673.6 | ||||
| Grid : Message : 38.801046 s : ================================================================================== | ||||
| Grid : Message : 38.801047 s : * Using UNROLLED WilsonKernels | ||||
| Grid : Message : 38.801048 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 38.801049 s : * SINGLE precision  | ||||
| Grid : Message : 38.801049 s : ================================================================================== | ||||
| Grid : Message : 39.129777 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 39.129783 s : Deo mflop/s =   6560128.4 (2117.4) 6405846.1-6679081.3 | ||||
| Grid : Message : 39.129786 s : Deo mflop/s per rank   820016.1 | ||||
| Grid : Message : 39.129787 s : Deo mflop/s per node   6560128.4 | ||||
| Grid : Message : 39.129788 s : ================================================================================== | ||||
| Grid : Message : 39.129788 s : 16^4 x 12 Deo Best  mflop/s        =   6674673.6 ; 6674673.6 per node  | ||||
| Grid : Message : 39.129790 s : 16^4 x 12 Deo Worst mflop/s        =   6560128.4 ; 6560128.4 per node  | ||||
| Grid : Message : 39.129792 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 39.129793 s : 6674673.6 ; 6560128.4 ;  | ||||
| Grid : Message : 39.129795 s : ================================================================================== | ||||
| Grid : Message : 39.161251 s : ================================================================================== | ||||
| Grid : Message : 39.161265 s : Benchmark DWF on 24^4 local volume  | ||||
| Grid : Message : 39.161266 s : * Nc             : 3 | ||||
| Grid : Message : 39.161267 s : * Global volume  : 24 48 48 48  | ||||
| Grid : Message : 39.161274 s : * Ls             : 12 | ||||
| Grid : Message : 39.161275 s : * ranks          : 8 | ||||
| Grid : Message : 39.161276 s : * nodes          : 1 | ||||
| Grid : Message : 39.161277 s : * ranks/node     : 8 | ||||
| Grid : Message : 39.161277 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 39.161278 s : * Using 1 threads | ||||
| Grid : Message : 39.161279 s : ================================================================================== | ||||
| Grid : Message : 39.911996 s : Initialised RNGs | ||||
| Grid : Message : 54.971914 s : ================================================================================== | ||||
| Grid : Message : 54.971928 s : * Using GENERIC Nc WilsonKernels | ||||
| Grid : Message : 54.971929 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 54.971930 s : * SINGLE precision  | ||||
| Grid : Message : 54.971931 s : ================================================================================== | ||||
| Grid : Message : 56.309445 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 56.309462 s : Deo mflop/s =   8572660.7 (1374.9) 8483366.4-8644399.6 | ||||
| Grid : Message : 56.309467 s : Deo mflop/s per rank   1071582.6 | ||||
| Grid : Message : 56.309468 s : Deo mflop/s per node   8572660.7 | ||||
| Grid : Message : 56.309469 s : ================================================================================== | ||||
| Grid : Message : 56.309471 s : * Using UNROLLED WilsonKernels | ||||
| Grid : Message : 56.309472 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 56.309473 s : * SINGLE precision  | ||||
| Grid : Message : 56.309474 s : ================================================================================== | ||||
| Grid : Message : 57.640707 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 57.640714 s : Deo mflop/s =   8200141.3 (1445.8) 8113545.6-8286307.9 | ||||
| Grid : Message : 57.640717 s : Deo mflop/s per rank   1025017.7 | ||||
| Grid : Message : 57.640718 s : Deo mflop/s per node   8200141.3 | ||||
| Grid : Message : 57.640719 s : ================================================================================== | ||||
| Grid : Message : 57.640720 s : 24^4 x 12 Deo Best  mflop/s        =   8572660.7 ; 8572660.7 per node  | ||||
| Grid : Message : 57.640723 s : 24^4 x 12 Deo Worst mflop/s        =   8200141.3 ; 8200141.3 per node  | ||||
| Grid : Message : 57.640725 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 57.640725 s : 8572660.7 ; 8200141.3 ;  | ||||
| Grid : Message : 57.640727 s : ================================================================================== | ||||
| Grid : Message : 57.806175 s : ================================================================================== | ||||
| Grid : Message : 57.806190 s : Benchmark DWF on 32^4 local volume  | ||||
| Grid : Message : 57.806191 s : * Nc             : 3 | ||||
| Grid : Message : 57.806192 s : * Global volume  : 32 64 64 64  | ||||
| Grid : Message : 57.806200 s : * Ls             : 12 | ||||
| Grid : Message : 57.806200 s : * ranks          : 8 | ||||
| Grid : Message : 57.806200 s : * nodes          : 1 | ||||
| Grid : Message : 57.806200 s : * ranks/node     : 8 | ||||
| Grid : Message : 57.806200 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 57.806201 s : * Using 1 threads | ||||
| Grid : Message : 57.806201 s : ================================================================================== | ||||
| Grid : Message : 60.313153 s : Initialised RNGs | ||||
| Grid : Message : 107.830286 s : ================================================================================== | ||||
| Grid : Message : 107.830306 s : * Using GENERIC Nc WilsonKernels | ||||
| Grid : Message : 107.830307 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 107.830308 s : * SINGLE precision  | ||||
| Grid : Message : 107.830309 s : ================================================================================== | ||||
| Grid : Message : 111.479603 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 111.479625 s : Deo mflop/s =   9771387.8 (1000.8) 9688589.9-9830800.0 | ||||
| Grid : Message : 111.479629 s : Deo mflop/s per rank   1221423.5 | ||||
| Grid : Message : 111.479630 s : Deo mflop/s per node   9771387.8 | ||||
| Grid : Message : 111.479631 s : ================================================================================== | ||||
| Grid : Message : 111.479631 s : * Using UNROLLED WilsonKernels | ||||
| Grid : Message : 111.479631 s : * Using Overlapped Comms/Compute | ||||
| Grid : Message : 111.479631 s : * SINGLE precision  | ||||
| Grid : Message : 111.479631 s : ================================================================================== | ||||
| Grid : Message : 115.406559 s : Deo FlopsPerSite is 1344.0 | ||||
| Grid : Message : 115.406573 s : Deo mflop/s =   8785297.3 (1739.6) 8628282.5-8911307.5 | ||||
| Grid : Message : 115.406576 s : Deo mflop/s per rank   1098162.2 | ||||
| Grid : Message : 115.406577 s : Deo mflop/s per node   8785297.3 | ||||
| Grid : Message : 115.406578 s : ================================================================================== | ||||
| Grid : Message : 115.406578 s : 32^4 x 12 Deo Best  mflop/s        =   9771387.8 ; 9771387.8 per node  | ||||
| Grid : Message : 115.406580 s : 32^4 x 12 Deo Worst mflop/s        =   8785297.3 ; 8785297.3 per node  | ||||
| Grid : Message : 115.406581 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 115.406581 s : 9771387.8 ; 8785297.3 ;  | ||||
| Grid : Message : 115.406582 s : ================================================================================== | ||||
| Grid : Message : 115.918888 s : ================================================================================== | ||||
| Grid : Message : 115.918902 s :  Improved Staggered dslash 4D vectorised | ||||
| Grid : Message : 115.918903 s : ================================================================================== | ||||
| Grid : Message : 115.920344 s : ================================================================================== | ||||
| Grid : Message : 115.920346 s : Benchmark ImprovedStaggered on 8^4 local volume  | ||||
| Grid : Message : 115.920347 s : * Global volume  : 8 16 16 16  | ||||
| Grid : Message : 115.920354 s : * ranks          : 8 | ||||
| Grid : Message : 115.920355 s : * nodes          : 1 | ||||
| Grid : Message : 115.920356 s : * ranks/node     : 8 | ||||
| Grid : Message : 115.920357 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 115.920376 s : * Using 1 threads | ||||
| Grid : Message : 115.920377 s : ================================================================================== | ||||
| Grid : Message : 115.923522 s : Initialised RNGs | ||||
| Grid : Message : 116.904870 s : ================================================================================== | ||||
| Grid : Message : 116.904950 s : * Using GENERIC Nc StaggeredKernels | ||||
| Grid : Message : 116.904960 s : * SINGLE precision  | ||||
| Grid : Message : 116.904970 s : ================================================================================== | ||||
| Grid : Message : 116.288979 s : Deo mflop/s =   49708.9 (22.9) 44075.3-50609.3 | ||||
| Grid : Message : 116.289000 s : Deo mflop/s per rank   6213.6 | ||||
| Grid : Message : 116.289002 s : Deo mflop/s per node   49708.9 | ||||
| Grid : Message : 116.289003 s : ================================================================================== | ||||
| Grid : Message : 116.289004 s : * SINGLE precision  | ||||
| Grid : Message : 116.289005 s : ================================================================================== | ||||
| Grid : Message : 116.481632 s : Deo mflop/s =   49737.1 (13.5) 48517.0-50338.0 | ||||
| Grid : Message : 116.481639 s : Deo mflop/s per rank   6217.1 | ||||
| Grid : Message : 116.481640 s : Deo mflop/s per node   49737.1 | ||||
| Grid : Message : 116.481641 s : ================================================================================== | ||||
| Grid : Message : 116.481642 s : 8^4  Deo Best  mflop/s        =   49737.1 ; 49737.1 per node  | ||||
| Grid : Message : 116.481644 s : 8^4  Deo Worst mflop/s        =   49708.9 ; 49708.9 per node  | ||||
| Grid : Message : 116.481646 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 116.481646 s : 49708.9 ; 49737.1 ;  | ||||
| Grid : Message : 116.481647 s : ================================================================================== | ||||
| Grid : Message : 116.483458 s : ================================================================================== | ||||
| Grid : Message : 116.483461 s : Benchmark ImprovedStaggered on 12^4 local volume  | ||||
| Grid : Message : 116.483462 s : * Global volume  : 12 24 24 24  | ||||
| Grid : Message : 116.483465 s : * ranks          : 8 | ||||
| Grid : Message : 116.483466 s : * nodes          : 1 | ||||
| Grid : Message : 116.483466 s : * ranks/node     : 8 | ||||
| Grid : Message : 116.483466 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 116.483467 s : * Using 1 threads | ||||
| Grid : Message : 116.483468 s : ================================================================================== | ||||
| Grid : Message : 116.489279 s : Initialised RNGs | ||||
| Grid : Message : 116.945016 s : ================================================================================== | ||||
| Grid : Message : 116.945025 s : * Using GENERIC Nc StaggeredKernels | ||||
| Grid : Message : 116.945026 s : * SINGLE precision  | ||||
| Grid : Message : 116.945027 s : ================================================================================== | ||||
| Grid : Message : 117.159821 s : Deo mflop/s =   229778.4 (89.5) 223656.1-233547.5 | ||||
| Grid : Message : 117.159835 s : Deo mflop/s per rank   28722.3 | ||||
| Grid : Message : 117.159837 s : Deo mflop/s per node   229778.4 | ||||
| Grid : Message : 117.159838 s : ================================================================================== | ||||
| Grid : Message : 117.159838 s : * SINGLE precision  | ||||
| Grid : Message : 117.159838 s : ================================================================================== | ||||
| Grid : Message : 117.371102 s : Deo mflop/s =   229516.6 (61.8) 225781.1-233547.5 | ||||
| Grid : Message : 117.371109 s : Deo mflop/s per rank   28689.6 | ||||
| Grid : Message : 117.371110 s : Deo mflop/s per node   229516.6 | ||||
| Grid : Message : 117.371111 s : ================================================================================== | ||||
| Grid : Message : 117.371111 s : 12^4  Deo Best  mflop/s        =   229778.4 ; 229778.4 per node  | ||||
| Grid : Message : 117.371113 s : 12^4  Deo Worst mflop/s        =   229516.6 ; 229516.6 per node  | ||||
| Grid : Message : 117.371115 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 117.371115 s : 229778.4 ; 229516.6 ;  | ||||
| Grid : Message : 117.371116 s : ================================================================================== | ||||
| Grid : Message : 117.373669 s : ================================================================================== | ||||
| Grid : Message : 117.373673 s : Benchmark ImprovedStaggered on 16^4 local volume  | ||||
| Grid : Message : 117.373674 s : * Global volume  : 16 32 32 32  | ||||
| Grid : Message : 117.373678 s : * ranks          : 8 | ||||
| Grid : Message : 117.373679 s : * nodes          : 1 | ||||
| Grid : Message : 117.373679 s : * ranks/node     : 8 | ||||
| Grid : Message : 117.373679 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 117.373680 s : * Using 1 threads | ||||
| Grid : Message : 117.373681 s : ================================================================================== | ||||
| Grid : Message : 117.386495 s : Initialised RNGs | ||||
| Grid : Message : 118.755695 s : ================================================================================== | ||||
| Grid : Message : 118.755706 s : * Using GENERIC Nc StaggeredKernels | ||||
| Grid : Message : 118.755707 s : * SINGLE precision  | ||||
| Grid : Message : 118.755708 s : ================================================================================== | ||||
| Grid : Message : 119.178990 s : Deo mflop/s =   608844.0 (126.1) 596065.5-615608.7 | ||||
| Grid : Message : 119.179160 s : Deo mflop/s per rank   76105.5 | ||||
| Grid : Message : 119.179180 s : Deo mflop/s per node   608844.0 | ||||
| Grid : Message : 119.179190 s : ================================================================================== | ||||
| Grid : Message : 119.179200 s : * SINGLE precision  | ||||
| Grid : Message : 119.179200 s : ================================================================================== | ||||
| Grid : Message : 119.271093 s : Deo mflop/s =   605259.7 (188.7) 591372.1-614349.7 | ||||
| Grid : Message : 119.271101 s : Deo mflop/s per rank   75657.5 | ||||
| Grid : Message : 119.271103 s : Deo mflop/s per node   605259.7 | ||||
| Grid : Message : 119.271104 s : ================================================================================== | ||||
| Grid : Message : 119.271105 s : 16^4  Deo Best  mflop/s        =   608844.0 ; 608844.0 per node  | ||||
| Grid : Message : 119.271107 s : 16^4  Deo Worst mflop/s        =   605259.7 ; 605259.7 per node  | ||||
| Grid : Message : 119.271109 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 119.271109 s : 608844.0 ; 605259.7 ;  | ||||
| Grid : Message : 119.271110 s : ================================================================================== | ||||
| Grid : Message : 119.275303 s : ================================================================================== | ||||
| Grid : Message : 119.275308 s : Benchmark ImprovedStaggered on 24^4 local volume  | ||||
| Grid : Message : 119.275309 s : * Global volume  : 24 48 48 48  | ||||
| Grid : Message : 119.275315 s : * ranks          : 8 | ||||
| Grid : Message : 119.275316 s : * nodes          : 1 | ||||
| Grid : Message : 119.275317 s : * ranks/node     : 8 | ||||
| Grid : Message : 119.275317 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 119.275318 s : * Using 1 threads | ||||
| Grid : Message : 119.275319 s : ================================================================================== | ||||
| Grid : Message : 119.328765 s : Initialised RNGs | ||||
| Grid : Message : 126.866160 s : ================================================================================== | ||||
| Grid : Message : 126.866270 s : * Using GENERIC Nc StaggeredKernels | ||||
| Grid : Message : 126.866280 s : * SINGLE precision  | ||||
| Grid : Message : 126.866290 s : ================================================================================== | ||||
| Grid : Message : 126.604376 s : Deo mflop/s =   1641161.6 (335.5) 1619660.5-1663961.9 | ||||
| Grid : Message : 126.604392 s : Deo mflop/s per rank   205145.2 | ||||
| Grid : Message : 126.604394 s : Deo mflop/s per node   1641161.6 | ||||
| Grid : Message : 126.604395 s : ================================================================================== | ||||
| Grid : Message : 126.604396 s : * SINGLE precision  | ||||
| Grid : Message : 126.604396 s : ================================================================================== | ||||
| Grid : Message : 127.829420 s : Deo mflop/s =   1620972.4 (344.9) 1602593.4-1644174.3 | ||||
| Grid : Message : 127.829520 s : Deo mflop/s per rank   202621.6 | ||||
| Grid : Message : 127.829530 s : Deo mflop/s per node   1620972.4 | ||||
| Grid : Message : 127.829540 s : ================================================================================== | ||||
| Grid : Message : 127.829550 s : 24^4  Deo Best  mflop/s        =   1641161.6 ; 1641161.6 per node  | ||||
| Grid : Message : 127.829570 s : 24^4  Deo Worst mflop/s        =   1620972.4 ; 1620972.4 per node  | ||||
| Grid : Message : 127.829590 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 127.829590 s : 1641161.6 ; 1620972.4 ;  | ||||
| Grid : Message : 127.829600 s : ================================================================================== | ||||
| Grid : Message : 127.107891 s : ================================================================================== | ||||
| Grid : Message : 127.107903 s : Benchmark ImprovedStaggered on 32^4 local volume  | ||||
| Grid : Message : 127.107904 s : * Global volume  : 32 64 64 64  | ||||
| Grid : Message : 127.107912 s : * ranks          : 8 | ||||
| Grid : Message : 127.107913 s : * nodes          : 1 | ||||
| Grid : Message : 127.107914 s : * ranks/node     : 8 | ||||
| Grid : Message : 127.107914 s : * ranks geom     : 1 2 2 2  | ||||
| Grid : Message : 127.107915 s : * Using 1 threads | ||||
| Grid : Message : 127.107916 s : ================================================================================== | ||||
| Grid : Message : 127.257116 s : Initialised RNGs | ||||
| Grid : Message : 148.527930 s : ================================================================================== | ||||
| Grid : Message : 148.527941 s : * Using GENERIC Nc StaggeredKernels | ||||
| Grid : Message : 148.527942 s : * SINGLE precision  | ||||
| Grid : Message : 148.527943 s : ================================================================================== | ||||
| Grid : Message : 149.401625 s : Deo mflop/s =   3085543.7 (956.0) 2934476.4-3115147.4 | ||||
| Grid : Message : 149.401643 s : Deo mflop/s per rank   385693.0 | ||||
| Grid : Message : 149.401645 s : Deo mflop/s per node   3085543.7 | ||||
| Grid : Message : 149.401646 s : ================================================================================== | ||||
| Grid : Message : 149.401647 s : * SINGLE precision  | ||||
| Grid : Message : 149.401648 s : ================================================================================== | ||||
| Grid : Message : 150.204533 s : Deo mflop/s =   3053468.5 (343.9) 3030688.8-3077255.0 | ||||
| Grid : Message : 150.204540 s : Deo mflop/s per rank   381683.6 | ||||
| Grid : Message : 150.204541 s : Deo mflop/s per node   3053468.5 | ||||
| Grid : Message : 150.204542 s : ================================================================================== | ||||
| Grid : Message : 150.204543 s : 32^4  Deo Best  mflop/s        =   3085543.7 ; 3085543.7 per node  | ||||
| Grid : Message : 150.204545 s : 32^4  Deo Worst mflop/s        =   3053468.5 ; 3053468.5 per node  | ||||
| Grid : Message : 150.204547 s : G/S/C ; G/O/C ; G/S/S ; G/O/S  | ||||
| Grid : Message : 150.204547 s : 3085543.7 ; 3053468.5 ;  | ||||
| Grid : Message : 150.204548 s : ================================================================================== | ||||
| Grid : Message : 150.292848 s : ================================================================================== | ||||
| Grid : Message : 150.292864 s :  Summary table Ls=12 | ||||
| Grid : Message : 150.292866 s : ================================================================================== | ||||
| Grid : Message : 150.292866 s : L 		 Clover 		 DWF4 		 Staggered | ||||
| Grid : Message : 150.292867 s : 8 		 154914.0 		 1386335.8 		 49737.1 | ||||
| Grid : Message : 150.292880 s : 12 		 693556.6 		 4208495.6 		 229778.4 | ||||
| Grid : Message : 150.292882 s : 16 		 1840587.3 		 6674673.6 		 608844.0 | ||||
| Grid : Message : 150.292884 s : 24 		 3933599.5 		 8572660.7 		 1641161.6 | ||||
| Grid : Message : 150.292886 s : 32 		 5082758.0 		 9771387.8 		 3085543.7 | ||||
| Grid : Message : 150.292888 s : ================================================================================== | ||||
| Grid : Message : 150.292888 s : ================================================================================== | ||||
| Grid : Message : 150.292888 s :  Memory benchmark  | ||||
| Grid : Message : 150.292888 s : ================================================================================== | ||||
| Grid : Message : 150.295495 s : ================================================================================== | ||||
| Grid : Message : 150.295497 s : = Benchmarking a*x + y bandwidth | ||||
| Grid : Message : 150.295498 s : ================================================================================== | ||||
| Grid : Message : 150.295499 s :   L  		bytes			GB/s		Gflop/s		 seconds		GB/s / node | ||||
| Grid : Message : 150.295500 s : ---------------------------------------------------------- | ||||
| Grid : Message : 160.682233 s : 8		6291456.000   		379.297		31.608		10.367		379.297 | ||||
| Grid : Message : 161.851979 s : 16		100663296.000   		3754.675		312.890		1.047		3754.675 | ||||
| Grid : Message : 162.458098 s : 24		509607936.000   		6521.472		543.456		0.603		6521.472 | ||||
| Grid : Message : 162.924116 s : 32		1610612736.000   		8513.456		709.455		0.462		8513.456 | ||||
| Grid : Message : 163.363877 s : 40		3932160000.000   		9018.902		751.575		0.436		9018.902 | ||||
| Grid : Message : 163.363976 s : ================================================================================== | ||||
| Grid : Message : 163.363978 s :  Batched BLAS benchmark  | ||||
| Grid : Message : 163.363979 s : ================================================================================== | ||||
| hipblasCreate | ||||
| Grid : Message : 163.364046 s : ================================================================================== | ||||
| Grid : Message : 163.364048 s : = batched GEMM (double precision)  | ||||
| Grid : Message : 163.364048 s : ================================================================================== | ||||
| Grid : Message : 163.364048 s :   M  		N			K		Gflop/s / rank (coarse mrhs) | ||||
| Grid : Message : 163.364049 s : ---------------------------------------------------------- | ||||
| Grid : Message : 163.438476 s : 16		8		16		256		0.565 | ||||
| Grid : Message : 163.438944 s : 16		16		16		256		243.148 | ||||
| Grid : Message : 163.439501 s : 16		32		16		256		440.347 | ||||
| Grid : Message : 163.440003 s : 32		8		32		256		439.194 | ||||
| Grid : Message : 163.440463 s : 32		16		32		256		847.334 | ||||
| Grid : Message : 163.441051 s : 32		32		32		256		1430.893 | ||||
| Grid : Message : 163.441679 s : 64		8		64		256		1242.757 | ||||
| Grid : Message : 163.442354 s : 64		16		64		256		2196.689 | ||||
| Grid : Message : 163.443196 s : 64		32		64		256		3697.458 | ||||
| Grid : Message : 163.443200 s : ---------------------------------------------------------- | ||||
| Grid : Message : 163.443201 s :   M  		N			K		Gflop/s / rank (block project) | ||||
| Grid : Message : 163.443202 s : ---------------------------------------------------------- | ||||
| Grid : Message : 163.444013 s : 16		8		256		256		899.583 | ||||
| Grid : Message : 163.444933 s : 16		16		256		256		1673.538 | ||||
| Grid : Message : 163.446013 s : 16		32		256		256		2959.597 | ||||
| Grid : Message : 163.446951 s : 32		8		256		256		1558.859 | ||||
| Grid : Message : 163.447970 s : 32		16		256		256		2864.839 | ||||
| Grid : Message : 163.449240 s : 32		32		256		256		4810.671 | ||||
| Grid : Message : 163.450524 s : 64		8		256		256		2386.093 | ||||
| Grid : Message : 163.451877 s : 64		16		256		256		4451.666 | ||||
| Grid : Message : 163.453806 s : 64		32		256		256		5942.124 | ||||
| Grid : Message : 163.453809 s : ---------------------------------------------------------- | ||||
| Grid : Message : 163.453810 s :   M  		N			K		Gflop/s / rank (block promote) | ||||
| Grid : Message : 163.453811 s : ---------------------------------------------------------- | ||||
| Grid : Message : 163.454716 s : 8		256		16		256		799.867 | ||||
| Grid : Message : 163.455690 s : 16		256		16		256		1584.625 | ||||
| Grid : Message : 163.457209 s : 32		256		16		256		1949.422 | ||||
| Grid : Message : 163.458254 s : 8		256		32		256		1389.417 | ||||
| Grid : Message : 163.459339 s : 16		256		32		256		2668.344 | ||||
| Grid : Message : 163.461158 s : 32		256		32		256		3234.162 | ||||
| Grid : Message : 163.462566 s : 8		256		64		256		2150.925 | ||||
| Grid : Message : 163.464066 s : 16		256		64		256		4012.488 | ||||
| Grid : Message : 163.466272 s : 32		256		64		256		5154.786 | ||||
| Grid : Message : 163.466276 s : ================================================================================== | ||||
| Grid : Message : 163.466277 s : ================================================================================== | ||||
| Grid : Message : 163.466278 s :  Communications benchmark  | ||||
| Grid : Message : 163.466279 s : ================================================================================== | ||||
| Grid : Message : 163.466280 s : ==================================================================================================== | ||||
| Grid : Message : 163.466280 s : = Benchmarking threaded STENCIL halo exchange in 3 dimensions | ||||
| Grid : Message : 163.466281 s : ==================================================================================================== | ||||
| Grid : Message : 163.466281 s :  L  	 Ls  	bytes	 MB/s uni  		 MB/s bidi  | ||||
| Grid : Message : 163.521339 s : 16	12	 4718592 	 122513.099		245026.198 | ||||
| Grid : Message : 163.551417 s : 16	12	 4718592 	 125590.498		251180.996 | ||||
| Grid : Message : 163.572339 s : 16	12	 4718592 	 180555.489		361110.977 | ||||
| Grid : Message : 163.602810 s : 16	12	 4718592 	 123949.223		247898.447 | ||||
| Grid : Message : 163.633041 s : 16	12	 4718592 	 124933.761		249867.523 | ||||
| Grid : Message : 163.654084 s : 16	12	 4718592 	 179516.530		359033.061 | ||||
| Grid : Message : 163.756280 s : 24	12	 15925248 	 127515.473		255030.946 | ||||
| Grid : Message : 163.852651 s : 24	12	 15925248 	 132226.945		264453.890 | ||||
| Grid : Message : 163.917510 s : 24	12	 15925248 	 196474.591		392949.183 | ||||
| Grid : Message : 164.170390 s : 24	12	 15925248 	 128020.322		256040.644 | ||||
| Grid : Message : 164.113321 s : 24	12	 15925248 	 132340.948		264681.896 | ||||
| Grid : Message : 164.178314 s : 24	12	 15925248 	 196051.311		392102.622 | ||||
| Grid : Message : 164.413983 s : 32	12	 37748736 	 129411.666		258823.333 | ||||
| Grid : Message : 164.639218 s : 32	12	 37748736 	 134090.789		268181.577 | ||||
| Grid : Message : 164.789675 s : 32	12	 37748736 	 200739.096		401478.191 | ||||
| Grid : Message : 165.228910 s : 32	12	 37748736 	 129497.681		258995.363 | ||||
| Grid : Message : 165.248096 s : 32	12	 37748736 	 134103.293		268206.586 | ||||
| Grid : Message : 165.398958 s : 32	12	 37748736 	 200198.805		400397.611 | ||||
| Grid : Message : 165.399411 s : ================================================================================== | ||||
| Grid : Message : 165.399413 s :  Per Node Summary table Ls=12 | ||||
| Grid : Message : 165.399414 s : ================================================================================== | ||||
| Grid : Message : 165.399414 s :  L 		 Clover		 DWF4		 Staggered (GF/s per node) | ||||
| Grid : Message : 165.399417 s : 8 		 154914.003 	 1386335.817 	 49737.127 | ||||
| Grid : Message : 165.399423 s : 12 		 693556.579 	 4208495.611 	 229778.435 | ||||
| Grid : Message : 165.399426 s : 16 		 1840587.280 	 6674673.647 	 608844.000 | ||||
| Grid : Message : 165.399429 s : 24 		 3933599.545 	 8572660.656 	 1641161.613 | ||||
| Grid : Message : 165.399432 s : 32 		 5082757.996 	 9771387.820 	 3085543.742 | ||||
| Grid : Message : 165.399435 s : ================================================================================== | ||||
| Grid : Message : 165.399435 s : ================================================================================== | ||||
| Grid : Message : 165.399435 s :  Comparison point     result: 9172024.238 Mflop/s per node | ||||
| Grid : Message : 165.399436 s :  Comparison point is 0.5*(9771387.820+8572660.656)  | ||||
| Grid : Message : 165.399438 s : ================================================================================== | ||||
| Grid : Message : 165.399438 s : ******************************************* | ||||
| Grid : Message : 165.399438 s : ******* Grid Finalize                ****** | ||||
| Grid : Message : 165.399438 s : ******************************************* | ||||
							
								
								
									
										43
									
								
								systems/Frontier/benchmarks/bench2.slurm
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										43
									
								
								systems/Frontier/benchmarks/bench2.slurm
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,43 @@ | ||||
| #!/bin/bash -l | ||||
| #SBATCH --job-name=bench | ||||
| ##SBATCH --partition=small-g | ||||
| #SBATCH --nodes=2 | ||||
| #SBATCH --ntasks-per-node=8 | ||||
| #SBATCH --cpus-per-task=7 | ||||
| #SBATCH --gpus-per-node=8 | ||||
| #SBATCH --time=00:10:00 | ||||
| #SBATCH --account=phy157_dwf | ||||
| #SBATCH --gpu-bind=none | ||||
| #SBATCH --exclusive | ||||
| #SBATCH --mem=0 | ||||
|  | ||||
| cat << EOF > select_gpu | ||||
| #!/bin/bash | ||||
| export GPU_MAP=(0 1 2 3 7 6 5 4) | ||||
| export NUMA_MAP=(3 3 1 1 2 2 0 0) | ||||
| export GPU=\${GPU_MAP[\$SLURM_LOCALID]} | ||||
| export NUMA=\${NUMA_MAP[\$SLURM_LOCALID]} | ||||
| export HIP_VISIBLE_DEVICES=\$GPU | ||||
| unset ROCR_VISIBLE_DEVICES | ||||
| echo RANK \$SLURM_LOCALID using GPU \$GPU     | ||||
| exec numactl -m \$NUMA -N \$NUMA \$* | ||||
| EOF | ||||
|  | ||||
| chmod +x ./select_gpu | ||||
|  | ||||
| root=$HOME/Frontier/Grid/systems/Frontier/ | ||||
| source ${root}/sourceme.sh | ||||
|  | ||||
| export OMP_NUM_THREADS=7 | ||||
| export MPICH_GPU_SUPPORT_ENABLED=1 | ||||
| export MPICH_SMP_SINGLE_COPY_MODE=XPMEM | ||||
|  | ||||
| for vol in 32.32.32.64 | ||||
| do | ||||
| srun ./select_gpu ./Benchmark_dwf_fp32 --mpi 2.2.2.2 --accelerator-threads 8 --comms-overlap --shm 2048 --shm-mpi 0 --grid $vol  > log.shm0.ov.$vol | ||||
| srun ./select_gpu ./Benchmark_dwf_fp32 --mpi 2.2.2.2 --accelerator-threads 8 --comms-overlap --shm 2048 --shm-mpi 1 --grid $vol  > log.shm1.ov.$vol | ||||
|  | ||||
| srun ./select_gpu ./Benchmark_dwf_fp32 --mpi 2.2.2.2 --accelerator-threads 8 --comms-sequential --shm 2048 --shm-mpi 0 --grid $vol  > log.shm0.seq.$vol | ||||
| srun ./select_gpu ./Benchmark_dwf_fp32 --mpi 2.2.2.2 --accelerator-threads 8 --comms-sequential --shm 2048 --shm-mpi 1 --grid $vol > log.shm1.seq.$vol | ||||
| done | ||||
|  | ||||
							
								
								
									
										38
									
								
								systems/Frontier/benchmarks/benchusqcd.slurm
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										38
									
								
								systems/Frontier/benchmarks/benchusqcd.slurm
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,38 @@ | ||||
| #!/bin/bash -l | ||||
| #SBATCH --job-name=bench | ||||
| ##SBATCH --partition=small-g | ||||
| ##SBATCH -q debug | ||||
| #SBATCH --nodes=1 | ||||
| #SBATCH --ntasks-per-node=8 | ||||
| #SBATCH --cpus-per-task=7 | ||||
| #SBATCH --gpus-per-node=8 | ||||
| #SBATCH --time=00:30:00 | ||||
| #SBATCH --account=phy157_dwf | ||||
| #SBATCH --gpu-bind=none | ||||
| #SBATCH --exclusive | ||||
| #SBATCH --mem=0 | ||||
|  | ||||
| cat << EOF > select_gpu | ||||
| #!/bin/bash | ||||
| export GPU_MAP=(0 1 2 3 7 6 5 4) | ||||
| export NUMA_MAP=(3 3 1 1 2 2 0 0) | ||||
| export GPU=\${GPU_MAP[\$SLURM_LOCALID]} | ||||
| export NUMA=\${NUMA_MAP[\$SLURM_LOCALID]} | ||||
| export HIP_VISIBLE_DEVICES=\$GPU | ||||
| unset ROCR_VISIBLE_DEVICES | ||||
| echo RANK \$SLURM_LOCALID using GPU \$GPU     | ||||
| exec numactl -m \$NUMA -N \$NUMA \$* | ||||
| EOF | ||||
|  | ||||
| chmod +x ./select_gpu | ||||
|  | ||||
| root=$HOME/Frontier/Grid/systems/Frontier/ | ||||
| source ${root}/sourceme.sh | ||||
|  | ||||
| export OMP_NUM_THREADS=7 | ||||
| export MPICH_GPU_SUPPORT_ENABLED=1 | ||||
| #export MPICH_SMP_SINGLE_COPY_MODE=XPMEM | ||||
|  | ||||
| srun ./select_gpu ./Benchmark_usqcd --grid 32.32.32.32 --mpi 1.2.2.2 --accelerator-threads 8 --comms-overlap --shm 4096 --shm-mpi 0 --grid $vol  > Benchmark_usqcd.log | ||||
|  | ||||
|  | ||||
| @@ -15,8 +15,8 @@ CLIME=`spack find --paths c-lime@2-3-9 | grep c-lime| cut -c 15-` | ||||
| --with-mpfr=/opt/cray/pe/gcc/mpfr/3.1.4/ \ | ||||
| --disable-fermion-reps \ | ||||
| CXX=hipcc MPICXX=mpicxx \ | ||||
| CXXFLAGS="-fPIC -I{$ROCM_PATH}/include/ -I${MPICH_DIR}/include -L/lib64 -fgpu-sanitize" \ | ||||
|  LDFLAGS="-L/lib64 -L${MPICH_DIR}/lib -lmpi -L${CRAY_MPICH_ROOTDIR}/gtl/lib -lmpi_gtl_hsa -lamdhip64  -lhipblas -lrocblas" | ||||
| CXXFLAGS="-fPIC -I{$ROCM_PATH}/include/ -I${MPICH_DIR}/include -L/lib64 " \ | ||||
|  LDFLAGS="-L/lib64 -L${MPICH_DIR}/lib -lmpi -L${CRAY_MPICH_ROOTDIR}/gtl/lib -lmpi_gtl_hsa -lamdhip64 -lhipblas -lrocblas" | ||||
|  | ||||
|  | ||||
|  | ||||
|   | ||||
							
								
								
									
										13
									
								
								systems/Frontier/mpiwrapper.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										13
									
								
								systems/Frontier/mpiwrapper.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| lrank=$SLURM_LOCALID | ||||
| lgpu=(0 1 2 3 7 6 5 4) | ||||
|  | ||||
| export ROCR_VISIBLE_DEVICES=${lgpu[$lrank]} | ||||
|  | ||||
| echo "`hostname` - $lrank device=$ROCR_VISIBLE_DEVICES " | ||||
|  | ||||
| $* | ||||
|  | ||||
|  | ||||
|  | ||||
| @@ -1,6 +1,5 @@ | ||||
| . /autofs/nccs-svm1_home1/paboyle/Crusher/Grid/spack/share/spack/setup-env.sh | ||||
| spack load c-lime | ||||
| #export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/sw/crusher/spack-envs/base/opt/cray-sles15-zen3/gcc-11.2.0/gperftools-2.9.1-72ubwtuc5wcz2meqltbfdb76epufgzo2/lib | ||||
| module load emacs  | ||||
| module load PrgEnv-gnu | ||||
| module load rocm | ||||
|   | ||||
							
								
								
									
										9
									
								
								systems/Frontier/wrap.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										9
									
								
								systems/Frontier/wrap.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,9 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| export HIP_VISIBLE_DEVICES=$ROCR_VISIBLE_DEVICES | ||||
| unset ROCR_VISIBLE_DEVICES | ||||
|  | ||||
| #rank=$SLURM_PROCID | ||||
| #rocprof -d rocprof.$rank -o rocprof.$rank/results.rank$SLURM_PROCID.csv --sys-trace $@ | ||||
|  | ||||
| $@ | ||||
| @@ -1,4 +1,4 @@ | ||||
| TOOLS=$HOME/tools | ||||
|  | ||||
| ../../configure \ | ||||
| 	--enable-simd=GPU \ | ||||
| 	--enable-gen-simd-width=64 \ | ||||
| @@ -11,6 +11,6 @@ TOOLS=$HOME/tools | ||||
| 	--enable-unified=no \ | ||||
| 	MPICXX=mpicxx \ | ||||
| 	CXX=icpx \ | ||||
| 	LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L$TOOLS/lib64/" \ | ||||
| 	CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -I$TOOLS/include" | ||||
| 	LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -lsycl" \ | ||||
| 	CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -qmkl=parallel" | ||||
|  | ||||
|   | ||||
							
								
								
									
										2
									
								
								systems/Sunspot/sourceme.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								systems/Sunspot/sourceme.sh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,2 @@ | ||||
| module load oneapi/eng-compiler/2023.05.15.003 | ||||
| module load mpich/51.2/icc-all-deterministic-pmix-gpu | ||||
							
								
								
									
										81
									
								
								systems/Sunspot/tests/repro1gpu.pbs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										81
									
								
								systems/Sunspot/tests/repro1gpu.pbs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,81 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| #PBS -l select=16:system=sunspot,place=scatter | ||||
| #PBS -A LatticeQCD_aesp_CNDA | ||||
| #PBS -l walltime=02:00:00 | ||||
| #PBS -N repro1gpu | ||||
| #PBS -k doe | ||||
|  | ||||
| #export OMP_PROC_BIND=spread | ||||
| #unset OMP_PLACES | ||||
|  | ||||
| module load oneapi/eng-compiler/2023.05.15.003 | ||||
| module load mpich/51.2/icc-all-deterministic-pmix-gpu | ||||
|  | ||||
| # 56 cores / 6 threads ~9 | ||||
| export OMP_NUM_THREADS=6 | ||||
| export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0 | ||||
| export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16 | ||||
| export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16 | ||||
| export MPICH_OFI_NIC_POLICY=GPU | ||||
|  | ||||
| export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0 | ||||
| export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0 | ||||
| export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling | ||||
| unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE | ||||
| unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE | ||||
| unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE | ||||
|  | ||||
| cd $PBS_O_WORKDIR | ||||
|  | ||||
| NN=`cat $PBS_NODEFILE | wc -l` | ||||
| echo $PBS_NODEFILE | ||||
| cat $PBS_NODEFILE | ||||
|  | ||||
| echo $NN nodes in node file | ||||
| for n in `eval echo {1..$NN}` | ||||
| do | ||||
|  | ||||
| THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 ` | ||||
| echo Node $n is $THIS_NODE | ||||
|  | ||||
|  | ||||
| for g in {0..11} | ||||
| do | ||||
| export NUMA_MAP=(0 0 0 1 1 1 0 0 0 1 1 1 ) | ||||
| export TILE_MAP=(0 0 0 0 0 0 1 1 1 1 1 1 ) | ||||
| export  GPU_MAP=(0 1 2 3 4 5 0 1 2 3 4 5 ) | ||||
|  | ||||
| export numa=${NUMA_MAP[$g]} | ||||
| export gpu_id=${GPU_MAP[$g]} | ||||
| export tile_id=${TILE_MAP[$g]} | ||||
| export gpu=$gpu_id.$tile_id | ||||
|  | ||||
| cd $PBS_O_WORKDIR | ||||
|  | ||||
| DIR=repro.1gpu.$PBS_JOBID/node-$n-$THIS_NODE-GPU-$gpu | ||||
| mkdir -p $DIR | ||||
| cd $DIR | ||||
|  | ||||
| echo $THIS_NODE > nodefile | ||||
| echo $gpu > gpu | ||||
|  | ||||
| export ZE_AFFINITY_MASK=$gpu | ||||
| export ONEAPI_DEVICE_FILTER=gpu,level_zero | ||||
|  | ||||
| CMD="mpiexec -np 1 -ppn 1  -envall --hostfile nodefile \ | ||||
| 	     numactl -N $numa -m $numa ../../Test_dwf_mixedcg_prec --mpi 1.1.1.1 --grid 16.16.32.32 \ | ||||
| 		--shm-mpi 0 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message" | ||||
| echo $CMD | ||||
| $CMD & | ||||
|  | ||||
| done | ||||
| done | ||||
|  | ||||
| wait | ||||
|  | ||||
							
								
								
									
										97
									
								
								systems/Sunspot/tests/reproN.pbs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								systems/Sunspot/tests/reproN.pbs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,97 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| #PBS -l select=32:system=sunspot,place=scatter | ||||
| #PBS -A LatticeQCD_aesp_CNDA | ||||
| #PBS -l walltime=02:00:00 | ||||
| #PBS -N reproN | ||||
| #PBS -k doe | ||||
|  | ||||
| #export OMP_PROC_BIND=spread | ||||
| #unset OMP_PLACES | ||||
|  | ||||
| module load oneapi/eng-compiler/2023.05.15.003 | ||||
| module load mpich/51.2/icc-all-deterministic-pmix-gpu | ||||
|  | ||||
| # 56 cores / 6 threads ~9 | ||||
| export OMP_NUM_THREADS=6 | ||||
| export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16 | ||||
| #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16 | ||||
| #export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1 | ||||
|  | ||||
| export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 | ||||
| export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=1 | ||||
| export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1 | ||||
|  | ||||
| export GRID_PRINT_ENTIRE_LOG=0 | ||||
| export GRID_CHECKSUM_RECV_BUF=1 | ||||
| export GRID_CHECKSUM_SEND_BUF=0 | ||||
|  | ||||
| export MPICH_OFI_NIC_POLICY=GPU | ||||
|  | ||||
| export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0 | ||||
| export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0 | ||||
| export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling | ||||
| unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE | ||||
| unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE | ||||
| unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE | ||||
|  | ||||
| cd $PBS_O_WORKDIR | ||||
|  | ||||
| NN=`cat $PBS_NODEFILE | wc -l` | ||||
| echo $PBS_NODEFILE | ||||
| cat $PBS_NODEFILE | ||||
|  | ||||
| echo $NN nodes in node file | ||||
| for n in `eval echo {1..$NN}` | ||||
| do | ||||
|  | ||||
| cd $PBS_O_WORKDIR | ||||
|  | ||||
| THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 ` | ||||
| echo Node $n is $THIS_NODE | ||||
|  | ||||
| DIR=reproN.$PBS_JOBID/node-$n-$THIS_NODE | ||||
|  | ||||
| mkdir -p $DIR | ||||
| cd $DIR | ||||
|  | ||||
| echo $THIS_NODE > nodefile | ||||
|  | ||||
| #CMD="mpiexec -np 12 -ppn 12  -envall --hostfile nodefile \ | ||||
| #	     ../../gpu_tile_compact.sh \ | ||||
| #	     ../../Test_dwf_mixedcg_prec --mpi 1.2.2.3 --grid 32.64.64.96 \ | ||||
| #		--shm-mpi 0 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message --comms-overlap" | ||||
|  | ||||
| CMD="mpiexec -np 12 -ppn 12  -envall --hostfile nodefile \ | ||||
| 	     ../../gpu_tile_compact.sh \ | ||||
| 	     ../../Test_dwf_mixedcg_prec --mpi 1.2.2.3 --grid 32.64.64.96 \ | ||||
| 		--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message --comms-overlap" | ||||
|  | ||||
| echo $CMD > command-line | ||||
| env > environment | ||||
| $CMD & | ||||
|  | ||||
| done | ||||
|  | ||||
| # Suspicious wait is allowing jobs to collide and knock out | ||||
| #wait | ||||
|  | ||||
| sleep 6500 | ||||
|  | ||||
| for n in ` eval echo {1..$NN} ` | ||||
| do | ||||
|  | ||||
| THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 ` | ||||
| DIR=reproN.$PBS_JOBID/node-$n-$THIS_NODE | ||||
|  | ||||
| cd $DIR | ||||
|  | ||||
| grep Oops Grid.stderr.* > failures.$PBS_JOBID | ||||
| rm core.* | ||||
|  | ||||
| done | ||||
| @@ -2,11 +2,11 @@ | ||||
|     --enable-comms=mpi \ | ||||
|     --enable-simd=GPU \ | ||||
|     --enable-shm=nvlink \ | ||||
|     --enable-gen-simd-width=64 \ | ||||
|     --enable-accelerator=cuda \ | ||||
|     --enable-gen-simd-width=64 \ | ||||
|     --disable-gparity \ | ||||
|     --with-lime=/mnt/lustre/tursafs1/home/tc002/tc002/dc-boyl1/spack/spack/opt/spack/linux-rhel8-zen/gcc-8.4.1/c-lime-2-3-9-e6wxqrid6rqmd45z7n32dxkvkykpvyez \ | ||||
|     --enable-accelerator-cshift \ | ||||
|     --disable-unified \ | ||||
|     CXX=nvcc \ | ||||
|     LDFLAGS="-cudart shared " \ | ||||
|     CXXFLAGS="-ccbin mpicxx -gencode arch=compute_80,code=sm_80 -std=c++14 -cudart shared" | ||||
|     LDFLAGS="-cudart shared -lcublas " \ | ||||
|     CXXFLAGS="-ccbin mpicxx -gencode arch=compute_80,code=sm_80 -std=c++17 -cudart shared --diag-suppress 177,550,611" | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| module load cuda/11.4.1  openmpi/4.1.1-cuda11.4.1  ucx/1.12.0-cuda11.4.1   | ||||
| #module load cuda/11.4.1 openmpi/4.1.1 ucx/1.10.1 | ||||
| export PREFIX=/home/tc002/tc002/shared/env/prefix/ | ||||
| export LD_LIBRARY_PATH=$PREFIX/lib/:$LD_LIBRARY_PATH | ||||
| module load cuda/12.3  | ||||
| module load ucx/1.15.0-cuda12.3   | ||||
| module load openmpi/4.1.5-cuda12.3 | ||||
| source /home/dp207/dp207/shared/env/production/env-base.sh  | ||||
| source /home/dp207/dp207/shared/env/production/env-gpu.sh  | ||||
| unset SBATCH_EXPORT | ||||
|  | ||||
|   | ||||
| @@ -1,3 +1,2 @@ | ||||
| CXXFLAGS=-I/opt/local/include LDFLAGS=-L/opt/local/lib/ CXX=c++-13 MPICXX=mpicxx ../../configure --enable-simd=GEN --enable-comms=mpi-auto --enable-unified=yes --prefix $HOME/QCD/GridInstall --with-lime=/Users/peterboyle/QCD/SciDAC/install/ --with-openssl=$BREW --disable-fermion-reps --disable-gparity --disable-debug  | ||||
|  | ||||
|  | ||||
|   | ||||
| @@ -34,6 +34,46 @@ using namespace Grid; | ||||
| #define HOST_NAME_MAX _POSIX_HOST_NAME_MAX | ||||
| #endif | ||||
|  | ||||
|  | ||||
| NAMESPACE_BEGIN(Grid); | ||||
| template<class Matrix,class Field> | ||||
|   class SchurDiagMooeeOperatorParanoid :  public SchurOperatorBase<Field> { | ||||
|  public: | ||||
|     Matrix &_Mat; | ||||
|     SchurDiagMooeeOperatorParanoid (Matrix &Mat): _Mat(Mat){}; | ||||
|     virtual  void Mpc      (const Field &in, Field &out) { | ||||
|       Field tmp(in.Grid()); | ||||
|       tmp.Checkerboard() = !in.Checkerboard(); | ||||
|       //      std::cout <<" Mpc starting"<<std::endl; | ||||
|  | ||||
|       RealD nn = norm2(in); // std::cout <<" Mpc Prior to dslash norm is "<<nn<<std::endl; | ||||
|       _Mat.Meooe(in,tmp); | ||||
|       nn = norm2(tmp); //std::cout <<" Mpc Prior to Mooeinv "<<nn<<std::endl; | ||||
|       _Mat.MooeeInv(tmp,out); | ||||
|       nn = norm2(out); //std::cout <<" Mpc Prior to dslash norm is "<<nn<<std::endl; | ||||
|       _Mat.Meooe(out,tmp); | ||||
|       nn = norm2(tmp); //std::cout <<" Mpc Prior to Mooee "<<nn<<std::endl; | ||||
|       _Mat.Mooee(in,out); | ||||
|       nn = norm2(out); //std::cout <<" Mpc Prior to axpy "<<nn<<std::endl; | ||||
|       axpy(out,-1.0,tmp,out); | ||||
|     } | ||||
|     virtual void MpcDag   (const Field &in, Field &out){ | ||||
|       Field tmp(in.Grid()); | ||||
|       //      std::cout <<" MpcDag starting"<<std::endl; | ||||
|       RealD nn = norm2(in);// std::cout <<" MpcDag Prior to dslash norm is "<<nn<<std::endl; | ||||
|       _Mat.MeooeDag(in,tmp); | ||||
|       _Mat.MooeeInvDag(tmp,out); | ||||
|       nn = norm2(out);// std::cout <<" MpcDag Prior to dslash norm is "<<nn<<std::endl; | ||||
|       _Mat.MeooeDag(out,tmp); | ||||
|       nn = norm2(tmp);// std::cout <<" MpcDag Prior to Mooee "<<nn<<std::endl; | ||||
|       _Mat.MooeeDag(in,out); | ||||
|       nn = norm2(out);// std::cout <<" MpcDag Prior to axpy "<<nn<<std::endl; | ||||
|       axpy(out,-1.0,tmp,out); | ||||
|     } | ||||
| }; | ||||
|  | ||||
| NAMESPACE_END(Grid); | ||||
|  | ||||
| int main (int argc, char ** argv) | ||||
| { | ||||
|   char hostname[HOST_NAME_MAX+1]; | ||||
| @@ -82,8 +122,8 @@ int main (int argc, char ** argv) | ||||
|   result_o_2.Checkerboard() = Odd; | ||||
|   result_o_2 = Zero(); | ||||
|  | ||||
|   SchurDiagMooeeOperator<DomainWallFermionD,LatticeFermionD> HermOpEO(Ddwf); | ||||
|   SchurDiagMooeeOperator<DomainWallFermionF,LatticeFermionF> HermOpEO_f(Ddwf_f); | ||||
|   SchurDiagMooeeOperatorParanoid<DomainWallFermionD,LatticeFermionD> HermOpEO(Ddwf); | ||||
|   SchurDiagMooeeOperatorParanoid<DomainWallFermionF,LatticeFermionF> HermOpEO_f(Ddwf_f); | ||||
|  | ||||
|   int nsecs=600; | ||||
|   if( GridCmdOptionExists(argv,argv+argc,"--seconds") ){ | ||||
| @@ -102,46 +142,57 @@ int main (int argc, char ** argv) | ||||
|   std:: cout << " CG    site flops = "<< CGsiteflops <<std::endl; | ||||
|   int iters; | ||||
|  | ||||
|   time_t now; | ||||
|   time_t start = time(NULL); | ||||
|   UGrid->Broadcast(0,(void *)&start,sizeof(start)); | ||||
|  | ||||
|   FlightRecorder::ContinueOnFail = 0; | ||||
|   FlightRecorder::PrintEntireLog = 0; | ||||
|   FlightRecorder::ChecksumComms  = 1; | ||||
|   FlightRecorder::ChecksumCommsSend=0; | ||||
|  | ||||
|   if(char *s=getenv("GRID_PRINT_ENTIRE_LOG"))  FlightRecorder::PrintEntireLog     = atoi(s); | ||||
|   if(char *s=getenv("GRID_CHECKSUM_RECV_BUF")) FlightRecorder::ChecksumComms      = atoi(s); | ||||
|   if(char *s=getenv("GRID_CHECKSUM_SEND_BUF")) FlightRecorder::ChecksumCommsSend  = atoi(s); | ||||
|  | ||||
|   uint32_t csum, csumref; | ||||
|   csumref=0; | ||||
|   int iter=0; | ||||
|   do { | ||||
|     if ( iter == 0 ) { | ||||
|       FlightRecorder::SetLoggingMode(FlightRecorder::LoggingModeRecord); | ||||
|     } else { | ||||
|       FlightRecorder::SetLoggingMode(FlightRecorder::LoggingModeVerify); | ||||
|     } | ||||
|     std::cerr << "******************* SINGLE PRECISION SOLVE "<<iter<<std::endl; | ||||
|     result_o = Zero(); | ||||
|     t1=usecond(); | ||||
|     t1=usecond();  | ||||
|     mCG(src_o,result_o); | ||||
|     t2=usecond(); | ||||
|     t2=usecond();  | ||||
|     iters = mCG.TotalInnerIterations; //Number of inner CG iterations | ||||
|     flops = MdagMsiteflops*4*FrbGrid->gSites()*iters; | ||||
|     flops+= CGsiteflops*FrbGrid->gSites()*iters; | ||||
|     std::cout << " SinglePrecision iterations/sec "<< iters/(t2-t1)*1000.*1000.<<std::endl; | ||||
|     std::cout << " SinglePrecision GF/s "<< flops/(t2-t1)/1000.<<std::endl; | ||||
|     std::cout << " SinglePrecision error count "<< FlightRecorder::ErrorCount()<<std::endl; | ||||
|  | ||||
|     csum = crc(result_o); | ||||
|     assert(FlightRecorder::ErrorCount()==0); | ||||
|  | ||||
|     if ( csumref == 0 ) { | ||||
|       csumref = csum; | ||||
|     } else { | ||||
|       if ( csum != csumref ) {  | ||||
| 	std::cerr << host<<" FAILURE " <<iter <<" csum "<<std::hex<<csum<< " != "<<csumref <<std::dec<<std::endl; | ||||
| 	assert(0); | ||||
|       } else { | ||||
| 	std::cout << host <<" OK " <<iter <<" csum "<<std::hex<<csum<<std::dec<<" -- OK! "<<std::endl; | ||||
|       } | ||||
|     } | ||||
|     std::cout << " FlightRecorder is OK! "<<std::endl; | ||||
|     iter ++; | ||||
|   } while (time(NULL) < (start + nsecs/2) ); | ||||
|     now = time(NULL); UGrid->Broadcast(0,(void *)&now,sizeof(now)); | ||||
|   } while (now < (start + nsecs/10) ); | ||||
|      | ||||
|   std::cout << GridLogMessage << "::::::::::::: Starting double precision CG" << std::endl; | ||||
|   ConjugateGradient<LatticeFermionD> CG(1.0e-8,10000); | ||||
|   csumref=0; | ||||
|   int i=0; | ||||
|   do {  | ||||
|     if ( i == 0 ) { | ||||
|       FlightRecorder::SetLoggingMode(FlightRecorder::LoggingModeRecord); | ||||
|     } else { | ||||
|       FlightRecorder::SetLoggingMode(FlightRecorder::LoggingModeVerify); | ||||
|     } | ||||
|     std::cerr << "******************* DOUBLE PRECISION SOLVE "<<i<<std::endl; | ||||
|     result_o_2 = Zero(); | ||||
|     t1=usecond(); | ||||
|     t1=usecond();  | ||||
|     CG(HermOpEO,src_o,result_o_2); | ||||
|     t2=usecond(); | ||||
|     iters = CG.IterationsToComplete; | ||||
| @@ -150,21 +201,12 @@ int main (int argc, char ** argv) | ||||
|  | ||||
|     std::cout << " DoublePrecision iterations/sec "<< iters/(t2-t1)*1000.*1000.<<std::endl; | ||||
|     std::cout << " DoublePrecision GF/s "<< flops/(t2-t1)/1000.<<std::endl; | ||||
|  | ||||
|     csum = crc(result_o); | ||||
|  | ||||
|     if ( csumref == 0 ) { | ||||
|       csumref = csum; | ||||
|     } else { | ||||
|       if ( csum != csumref ) {  | ||||
| 	std::cerr << i <<" csum "<<std::hex<<csum<< " != "<<csumref <<std::dec<<std::endl; | ||||
| 	assert(0); | ||||
|       } else { | ||||
| 	std::cout << i <<" csum "<<std::hex<<csum<<std::dec<<" -- OK! "<<std::endl; | ||||
|       } | ||||
|     } | ||||
|     std::cout << " DoublePrecision error count "<< FlightRecorder::ErrorCount()<<std::endl; | ||||
|     assert(FlightRecorder::ErrorCount()==0); | ||||
|     std::cout << " FlightRecorder is OK! "<<std::endl; | ||||
|     now = time(NULL); UGrid->Broadcast(0,(void *)&now,sizeof(now)); | ||||
|     i++; | ||||
|   } while (time(NULL) < (start + nsecs) ); | ||||
|   } while (now < (start + nsecs) ); | ||||
|  | ||||
|   LatticeFermionD diff_o(FrbGrid); | ||||
|   RealD diff = axpy_norm(diff_o, -1.0, result_o, result_o_2); | ||||
|   | ||||
							
								
								
									
										319
									
								
								tests/debug/Test_general_coarse.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										319
									
								
								tests/debug/Test_general_coarse.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,319 @@ | ||||
|     /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./tests/Test_padded_cell.cc | ||||
|  | ||||
|     Copyright (C) 2023 | ||||
|  | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #include <Grid/Grid.h> | ||||
| #include <Grid/lattice/PaddedCell.h> | ||||
| #include <Grid/stencil/GeneralLocalStencil.h> | ||||
|  | ||||
| #include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h> | ||||
| #include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidualNonHermitian.h> | ||||
| #include <Grid/algorithms/iterative/BiCGSTAB.h> | ||||
|  | ||||
| using namespace std; | ||||
| using namespace Grid; | ||||
|  | ||||
| gridblasHandle_t GridBLAS::gridblasHandle; | ||||
| int            GridBLAS::gridblasInit; | ||||
|  | ||||
| /////////////////////// | ||||
| // Tells little dirac op to use MdagM as the .Op() | ||||
| /////////////////////// | ||||
| template<class Field> | ||||
| class HermOpAdaptor : public LinearOperatorBase<Field> | ||||
| { | ||||
|   LinearOperatorBase<Field> & wrapped; | ||||
| public: | ||||
|   HermOpAdaptor(LinearOperatorBase<Field> &wrapme) : wrapped(wrapme)  {}; | ||||
|   void OpDiag (const Field &in, Field &out) {    assert(0);  } | ||||
|   void OpDir  (const Field &in, Field &out,int dir,int disp) {    assert(0);  } | ||||
|   void OpDirAll  (const Field &in, std::vector<Field> &out){    assert(0);  }; | ||||
|   void Op     (const Field &in, Field &out){ | ||||
|     wrapped.HermOp(in,out); | ||||
|   } | ||||
|   void AdjOp     (const Field &in, Field &out){ | ||||
|     wrapped.HermOp(in,out); | ||||
|   } | ||||
|   void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){    assert(0);  } | ||||
|   void HermOp(const Field &in, Field &out){ | ||||
|     wrapped.HermOp(in,out); | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| int main (int argc, char ** argv) | ||||
| { | ||||
|   Grid_init(&argc,&argv); | ||||
|  | ||||
|   const int Ls=4; | ||||
|  | ||||
|   GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), | ||||
| 								   GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 								   GridDefaultMpi()); | ||||
|   GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); | ||||
|  | ||||
|   GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); | ||||
|   GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); | ||||
|  | ||||
|   // Construct a coarsened grid | ||||
|   Coordinate clatt = GridDefaultLatt(); | ||||
|   for(int d=0;d<clatt.size();d++){ | ||||
|     clatt[d] = clatt[d]/4; | ||||
|   } | ||||
|  | ||||
|   GridCartesian *Coarse4d =  SpaceTimeGrid::makeFourDimGrid(clatt, | ||||
| 							    GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 							    GridDefaultMpi());; | ||||
|   GridCartesian *Coarse5d =  SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d); | ||||
|  | ||||
|   std::vector<int> seeds4({1,2,3,4}); | ||||
|   std::vector<int> seeds5({5,6,7,8}); | ||||
|   std::vector<int> cseeds({5,6,7,8}); | ||||
|   GridParallelRNG          RNG5(FGrid);   RNG5.SeedFixedIntegers(seeds5); | ||||
|   GridParallelRNG          RNG4(UGrid);   RNG4.SeedFixedIntegers(seeds4); | ||||
|   GridParallelRNG          CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds); | ||||
|  | ||||
|   LatticeFermion    src(FGrid); random(RNG5,src); | ||||
|   LatticeFermion result(FGrid); result=Zero(); | ||||
|   LatticeFermion    ref(FGrid); ref=Zero(); | ||||
|   LatticeFermion    tmp(FGrid); | ||||
|   LatticeFermion    err(FGrid); | ||||
|   LatticeGaugeField Umu(UGrid); | ||||
|   SU<Nc>::HotConfiguration(RNG4,Umu); | ||||
|   //  Umu=Zero(); | ||||
|    | ||||
|   RealD mass=0.1; | ||||
|   RealD M5=1.8; | ||||
|  | ||||
|   DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5); | ||||
|  | ||||
|   const int nbasis = 62; | ||||
|   const int cb = 0 ; | ||||
|   LatticeFermion prom(FGrid); | ||||
|  | ||||
|   std::vector<LatticeFermion> subspace(nbasis,FGrid); | ||||
|  | ||||
|   std::cout<<GridLogMessage<<"Calling Aggregation class" <<std::endl; | ||||
|  | ||||
|   /////////////////////////////////////////////////////////// | ||||
|   // Squared operator is in HermOp | ||||
|   /////////////////////////////////////////////////////////// | ||||
|   MdagMLinearOperator<DomainWallFermionD,LatticeFermion> HermDefOp(Ddwf); | ||||
|  | ||||
|   /////////////////////////////////////////////////// | ||||
|   // Random aggregation space | ||||
|   /////////////////////////////////////////////////// | ||||
|   std::cout<<GridLogMessage << "Building random aggregation class"<< std::endl; | ||||
|   typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace; | ||||
|   Subspace Aggregates(Coarse5d,FGrid,cb); | ||||
|   Aggregates.CreateSubspaceRandom(RNG5); | ||||
|  | ||||
|   /////////////////////////////////////////////////// | ||||
|   // Build little dirac op | ||||
|   /////////////////////////////////////////////////// | ||||
|   std::cout<<GridLogMessage << "Building little Dirac operator"<< std::endl; | ||||
|  | ||||
|   typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperator; | ||||
|   typedef LittleDiracOperator::CoarseVector CoarseVector; | ||||
|  | ||||
|   NextToNextToNextToNearestStencilGeometry5D geom(Coarse5d); | ||||
|   LittleDiracOperator LittleDiracOp(geom,FGrid,Coarse5d); | ||||
|   LittleDiracOperator LittleDiracOpCol(geom,FGrid,Coarse5d); | ||||
|  | ||||
|   HermOpAdaptor<LatticeFermionD> HOA(HermDefOp); | ||||
|  | ||||
|   LittleDiracOp.CoarsenOperator(HOA,Aggregates); | ||||
|    | ||||
|   /////////////////////////////////////////////////// | ||||
|   // Test the operator | ||||
|   /////////////////////////////////////////////////// | ||||
|   CoarseVector c_src (Coarse5d); | ||||
|   CoarseVector c_res (Coarse5d); | ||||
|   CoarseVector c_res_dag(Coarse5d); | ||||
|   CoarseVector c_proj(Coarse5d); | ||||
|  | ||||
|   subspace=Aggregates.subspace; | ||||
|  | ||||
|   //  random(CRNG,c_src); | ||||
|   c_src = 1.0; | ||||
|  | ||||
|   blockPromote(c_src,err,subspace); | ||||
|  | ||||
|   prom=Zero(); | ||||
|   for(int b=0;b<nbasis;b++){ | ||||
|     prom=prom+subspace[b]; | ||||
|   } | ||||
|   err=err-prom;  | ||||
|   std::cout<<GridLogMessage<<"Promoted back from subspace: err "<<norm2(err)<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"c_src "<<norm2(c_src)<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"prom  "<<norm2(prom)<<std::endl; | ||||
|  | ||||
|   HermDefOp.HermOp(prom,tmp); | ||||
|  | ||||
|   blockProject(c_proj,tmp,subspace); | ||||
|   std::cout<<GridLogMessage<<" Called Big Dirac Op "<<norm2(tmp)<<std::endl; | ||||
|  | ||||
|   std::cout<<GridLogMessage<<" Calling little Dirac Op "<<std::endl; | ||||
|   LittleDiracOp.M(c_src,c_res); | ||||
|   LittleDiracOp.Mdag(c_src,c_res_dag); | ||||
|  | ||||
|   std::cout<<GridLogMessage<<"Little dop : "<<norm2(c_res)<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"Little dop dag : "<<norm2(c_res_dag)<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"Big dop in subspace : "<<norm2(c_proj)<<std::endl; | ||||
|  | ||||
|   c_proj = c_proj - c_res; | ||||
|   std::cout<<GridLogMessage<<" ldop error: "<<norm2(c_proj)<<std::endl; | ||||
|  | ||||
|   c_res_dag = c_res_dag - c_res; | ||||
|   std::cout<<GridLogMessage<<"Little dopDag - dop: "<<norm2(c_res_dag)<<std::endl; | ||||
|  | ||||
|   std::cout<<GridLogMessage << "Testing Hermiticity stochastically "<< std::endl; | ||||
|   CoarseVector phi(Coarse5d); | ||||
|   CoarseVector chi(Coarse5d); | ||||
|   CoarseVector Aphi(Coarse5d); | ||||
|   CoarseVector Achi(Coarse5d); | ||||
|  | ||||
|   random(CRNG,phi); | ||||
|   random(CRNG,chi); | ||||
|  | ||||
|   std::cout<<GridLogMessage<<"Made randoms "<<norm2(phi)<<" " << norm2(chi)<<std::endl; | ||||
|  | ||||
|   LittleDiracOp.M(phi,Aphi); | ||||
|  | ||||
|   LittleDiracOp.Mdag(chi,Achi); | ||||
|  | ||||
|   std::cout<<GridLogMessage<<"Aphi "<<norm2(Aphi)<<" A chi" << norm2(Achi)<<std::endl; | ||||
|  | ||||
|   ComplexD pAc = innerProduct(chi,Aphi); | ||||
|   ComplexD cAp = innerProduct(phi,Achi); | ||||
|   ComplexD cAc = innerProduct(chi,Achi); | ||||
|   ComplexD pAp = innerProduct(phi,Aphi); | ||||
|  | ||||
|   std::cout<<GridLogMessage<< "pAc "<<pAc<<" cAp "<< cAp<< " diff "<<pAc-adj(cAp)<<std::endl; | ||||
|   std::cout<<GridLogMessage<< "pAp "<<pAp<<" cAc "<< cAc<<"Should be real"<< std::endl; | ||||
|  | ||||
|   std::cout<<GridLogMessage<<"Testing linearity"<<std::endl; | ||||
|   CoarseVector PhiPlusChi(Coarse5d); | ||||
|   CoarseVector APhiPlusChi(Coarse5d); | ||||
|   CoarseVector linerr(Coarse5d); | ||||
|   PhiPlusChi = phi+chi; | ||||
|   LittleDiracOp.M(PhiPlusChi,APhiPlusChi); | ||||
|  | ||||
|   linerr= APhiPlusChi-Aphi; | ||||
|   linerr= linerr-Achi; | ||||
|   std::cout<<GridLogMessage<<"**Diff "<<norm2(linerr)<<std::endl; | ||||
|  | ||||
|   std::cout<<GridLogMessage<<std::endl; | ||||
|   std::cout<<GridLogMessage<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"*******************************************"<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"*******************************************"<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"*******************************************"<<std::endl; | ||||
|   ////////////////////////////////////////////////////////////////////////////////////// | ||||
|   //  Create a higher dim coarse grid | ||||
|   ////////////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
|   const int nrhs=vComplex::Nsimd()*3; | ||||
|  | ||||
|   Coordinate mpi=GridDefaultMpi(); | ||||
|   Coordinate rhMpi ({1,1,mpi[0],mpi[1],mpi[2],mpi[3]}); | ||||
|   Coordinate rhLatt({nrhs,1,clatt[0],clatt[1],clatt[2],clatt[3]}); | ||||
|   Coordinate rhSimd({vComplex::Nsimd(),1, 1,1,1,1}); | ||||
|  | ||||
|   GridCartesian *CoarseMrhs = new GridCartesian(rhLatt,rhSimd,rhMpi);  | ||||
|  | ||||
|    | ||||
|   MultiGeneralCoarsenedMatrix mrhs(LittleDiracOp,CoarseMrhs); | ||||
|   typedef decltype(mrhs) MultiGeneralCoarsenedMatrix_t; | ||||
|    | ||||
|   ////////////////////////////////////////// | ||||
|   // Test against single RHS | ||||
|   ////////////////////////////////////////// | ||||
|   { | ||||
|     GridParallelRNG          rh_CRNG(CoarseMrhs);rh_CRNG.SeedFixedIntegers(cseeds); | ||||
|     CoarseVector rh_phi(CoarseMrhs); | ||||
|     CoarseVector rh_res(CoarseMrhs); | ||||
|     random(rh_CRNG,rh_phi); | ||||
|  | ||||
|     std::cout << "Warmup"<<std::endl; | ||||
|     mrhs.M(rh_phi,rh_res); | ||||
|     const int ncall=5; | ||||
|     RealD t0=-usecond(); | ||||
|     for(int i=0;i<ncall;i++){ | ||||
|       std::cout << "Call "<<i<<"/"<<ncall<<std::endl; | ||||
|       mrhs.M(rh_phi,rh_res); | ||||
|     } | ||||
|     t0+=usecond(); | ||||
|     RealD t1=0; | ||||
|     for(int r=0;r<nrhs;r++){ | ||||
|       std::cout << " compare to single RHS "<<r<<"/"<<nrhs<<std::endl; | ||||
|       ExtractSlice(phi,rh_phi,r,0); | ||||
|       ExtractSlice(chi,rh_res,r,0); | ||||
|       LittleDiracOp.M(phi,Aphi); | ||||
|       t1-=usecond(); | ||||
|       for(int i=0;i<ncall;i++){ | ||||
| 	std::cout << "Call "<<i<<"/"<<ncall<<std::endl; | ||||
| 	LittleDiracOp.M(phi,Aphi); | ||||
|       } | ||||
|       t1+=usecond(); | ||||
|       Coordinate site({0,0,0,0,0}); | ||||
|       auto  bad = peekSite(chi,site); | ||||
|       auto good = peekSite(Aphi,site); | ||||
|       std::cout << " mrhs [" <<r <<"] "<< norm2(chi)<<std::endl; | ||||
|       std::cout << " srhs [" <<r <<"] "<< norm2(Aphi)<<std::endl; | ||||
|       chi=chi-Aphi; | ||||
|       RealD diff =norm2(chi); | ||||
|       std::cout << r << " diff " << diff<<std::endl; | ||||
|       assert(diff < 1.0e-10); | ||||
|     } | ||||
|     std::cout << nrhs<< " mrhs " << t0/ncall/nrhs <<" us"<<std::endl; | ||||
|     std::cout << nrhs<< " srhs " << t1/ncall/nrhs <<" us"<<std::endl; | ||||
|   } | ||||
|  | ||||
|   ////////////////////////////////////////// | ||||
|   // Test against single RHS | ||||
|   ////////////////////////////////////////// | ||||
|   { | ||||
|     typedef HermitianLinearOperator<MultiGeneralCoarsenedMatrix_t,CoarseVector> HermMatrix; | ||||
|     HermMatrix MrhsCoarseOp     (mrhs); | ||||
|  | ||||
|     GridParallelRNG          rh_CRNG(CoarseMrhs);rh_CRNG.SeedFixedIntegers(cseeds); | ||||
|     ConjugateGradient<CoarseVector>  mrhsCG(1.0e-8,2000,true); | ||||
|     CoarseVector rh_res(CoarseMrhs); | ||||
|     CoarseVector rh_src(CoarseMrhs); | ||||
|     random(rh_CRNG,rh_src); | ||||
|     rh_res= Zero(); | ||||
|     mrhsCG(MrhsCoarseOp,rh_src,rh_res); | ||||
|   } | ||||
|    | ||||
|   std::cout<<GridLogMessage<<std::endl; | ||||
|   std::cout<<GridLogMessage<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"*******************************************"<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"*******************************************"<<std::endl; | ||||
|   std::cout<<GridLogMessage<<"*******************************************"<<std::endl; | ||||
|  | ||||
|   Grid_finalize(); | ||||
|   return 0; | ||||
| } | ||||
							
								
								
									
										304
									
								
								tests/debug/Test_general_coarse_hdcg.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										304
									
								
								tests/debug/Test_general_coarse_hdcg.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,304 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./tests/Test_general_coarse_hdcg.cc | ||||
|  | ||||
|     Copyright (C) 2023 | ||||
|  | ||||
| Author: Peter Boyle <pboyle@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #include <Grid/Grid.h> | ||||
| #include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h> | ||||
| #include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczosCoarse.h> | ||||
| #include <Grid/algorithms/iterative/AdefMrhs.h> | ||||
|  | ||||
| using namespace std; | ||||
| using namespace Grid; | ||||
|  | ||||
| // Want Op in CoarsenOp to call MatPcDagMatPc | ||||
| template<class Field> | ||||
| class HermOpAdaptor : public LinearOperatorBase<Field> | ||||
| { | ||||
|   LinearOperatorBase<Field> & wrapped; | ||||
| public: | ||||
|   HermOpAdaptor(LinearOperatorBase<Field> &wrapme) : wrapped(wrapme)  {}; | ||||
|   void Op     (const Field &in, Field &out)   { wrapped.HermOp(in,out);  } | ||||
|   void HermOp(const Field &in, Field &out)    { wrapped.HermOp(in,out); } | ||||
|   void AdjOp     (const Field &in, Field &out){ wrapped.HermOp(in,out);  } | ||||
|   void OpDiag (const Field &in, Field &out)                  {    assert(0);  } | ||||
|   void OpDir  (const Field &in, Field &out,int dir,int disp) {    assert(0);  } | ||||
|   void OpDirAll  (const Field &in, std::vector<Field> &out)  {    assert(0);  }; | ||||
|   void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){    assert(0);  } | ||||
| }; | ||||
|  | ||||
| template<class Field> class CGSmoother : public LinearFunction<Field> | ||||
| { | ||||
| public: | ||||
|   using LinearFunction<Field>::operator(); | ||||
|   typedef LinearOperatorBase<Field> FineOperator; | ||||
|   FineOperator   & _SmootherOperator; | ||||
|   int iters; | ||||
|   CGSmoother(int _iters, FineOperator &SmootherOperator) : | ||||
|     _SmootherOperator(SmootherOperator), | ||||
|     iters(_iters) | ||||
|   { | ||||
|     std::cout << GridLogMessage<<" Mirs smoother order "<<iters<<std::endl; | ||||
|   }; | ||||
|   void operator() (const Field &in, Field &out)  | ||||
|   { | ||||
|     ConjugateGradient<Field>  CG(0.0,iters,false); // non-converge is just fine in a smoother | ||||
|  | ||||
|     out=Zero(); | ||||
|  | ||||
|     CG(_SmootherOperator,in,out); | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| int main (int argc, char ** argv) | ||||
| { | ||||
|   Grid_init(&argc,&argv); | ||||
|  | ||||
|   const int Ls=24; | ||||
|   const int nbasis = 60; | ||||
|   const int cb = 0 ; | ||||
|   RealD mass=0.00078; | ||||
|   RealD M5=1.8; | ||||
|   RealD b=1.5; | ||||
|   RealD c=0.5; | ||||
|  | ||||
|   GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), | ||||
| 								   GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 								   GridDefaultMpi()); | ||||
|   GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); | ||||
|   GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); | ||||
|   GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); | ||||
|  | ||||
|   // Construct a coarsened grid with 4^4 cell | ||||
|   Coordinate Block({4,4,4,4}); | ||||
|   Coordinate clatt = GridDefaultLatt(); | ||||
|   for(int d=0;d<clatt.size();d++){ | ||||
|     clatt[d] = clatt[d]/Block[d]; | ||||
|   } | ||||
|  | ||||
|   GridCartesian *Coarse4d =  SpaceTimeGrid::makeFourDimGrid(clatt, | ||||
| 							    GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 							    GridDefaultMpi());; | ||||
|   GridCartesian *Coarse5d =  SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d); | ||||
|  | ||||
|   ///////////////////////// RNGs ///////////////////////////////// | ||||
|   std::vector<int> seeds4({1,2,3,4}); | ||||
|   std::vector<int> seeds5({5,6,7,8}); | ||||
|   std::vector<int> cseeds({5,6,7,8}); | ||||
|  | ||||
|   GridParallelRNG          RNG5(FGrid);   RNG5.SeedFixedIntegers(seeds5); | ||||
|   GridParallelRNG          RNG4(UGrid);   RNG4.SeedFixedIntegers(seeds4); | ||||
|   GridParallelRNG          CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds); | ||||
|  | ||||
|   ///////////////////////// Configuration ///////////////////////////////// | ||||
|   LatticeGaugeField Umu(UGrid); | ||||
|  | ||||
|   FieldMetaData header; | ||||
|   std::string file("ckpoint_EODWF_lat.125"); | ||||
|   NerscIO::readConfiguration(Umu,header,file); | ||||
|  | ||||
|   //////////////////////// Fermion action ////////////////////////////////// | ||||
|   MobiusFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c); | ||||
|  | ||||
|   SchurDiagMooeeOperator<MobiusFermionD, LatticeFermion> HermOpEO(Ddwf); | ||||
|  | ||||
|   typedef HermOpAdaptor<LatticeFermionD> HermFineMatrix; | ||||
|   HermFineMatrix FineHermOp(HermOpEO); | ||||
|  | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   ///////////// Coarse basis and Little Dirac Operator /////// | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperator; | ||||
|   typedef LittleDiracOperator::CoarseVector CoarseVector; | ||||
|  | ||||
|   NextToNextToNextToNearestStencilGeometry5D geom(Coarse5d); | ||||
|  | ||||
|   typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace; | ||||
|   Subspace Aggregates(Coarse5d,FrbGrid,cb); | ||||
|  | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   // Need to check about red-black grid coarsening | ||||
|   //////////////////////////////////////////////////////////// | ||||
|  | ||||
|   int refine=1; | ||||
|     //    Aggregates.CreateSubspaceMultishift(RNG5,HermOpEO, | ||||
|     //    					0.0003,1.0e-5,2000); // Lo, tol, maxit | ||||
|     //    Aggregates.CreateSubspaceChebyshev(RNG5,HermOpEO,nbasis,95.,0.01,1500);// <== last run | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Create Subspace"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   Aggregates.CreateSubspaceChebyshevNew(RNG5,HermOpEO,95.);  | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Refine Subspace"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   Aggregates.RefineSubspace(HermOpEO,0.001,1.0e-3,3000); // 172 iters | ||||
|    | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Coarsen after refine"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   Aggregates.Orthogonalise(); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Building MultiRHS Coarse operator"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   ConjugateGradient<CoarseVector>  coarseCG(4.0e-2,20000,true); | ||||
|      | ||||
|   const int nrhs=12; | ||||
|      | ||||
|   Coordinate mpi=GridDefaultMpi(); | ||||
|   Coordinate rhMpi ({1,1,mpi[0],mpi[1],mpi[2],mpi[3]}); | ||||
|   Coordinate rhLatt({nrhs,1,clatt[0],clatt[1],clatt[2],clatt[3]}); | ||||
|   Coordinate rhSimd({vComplex::Nsimd(),1, 1,1,1,1}); | ||||
|      | ||||
|   GridCartesian *CoarseMrhs = new GridCartesian(rhLatt,rhSimd,rhMpi);  | ||||
|   typedef MultiGeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> MultiGeneralCoarsenedMatrix_t; | ||||
|   MultiGeneralCoarsenedMatrix_t mrhs(geom,CoarseMrhs); | ||||
|  | ||||
|   mrhs.CoarsenOperator(FineHermOp,Aggregates,Coarse5d); | ||||
|    | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "         Coarse Lanczos               "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|  | ||||
|   typedef HermitianLinearOperator<MultiGeneralCoarsenedMatrix_t,CoarseVector> MrhsHermMatrix; | ||||
|   Chebyshev<CoarseVector>      IRLCheby(0.01,42.0,301);  // 1 iter | ||||
|   MrhsHermMatrix MrhsCoarseOp     (mrhs); | ||||
|  | ||||
|   CoarseVector pm_src(CoarseMrhs); | ||||
|   pm_src = ComplexD(1.0); | ||||
|   PowerMethod<CoarseVector>       cPM; | ||||
|   cPM(MrhsCoarseOp,pm_src); | ||||
|  | ||||
|   int Nk=192; | ||||
|   int Nm=384; | ||||
|   int Nstop=Nk; | ||||
|   int Nconv_test_interval=1; | ||||
|    | ||||
|   ImplicitlyRestartedBlockLanczosCoarse<CoarseVector> IRL(MrhsCoarseOp, | ||||
| 							  Coarse5d, | ||||
| 							  CoarseMrhs, | ||||
| 							  nrhs, | ||||
| 							  IRLCheby, | ||||
| 							  Nstop, | ||||
| 							  Nconv_test_interval, | ||||
| 							  nrhs, | ||||
| 							  Nk, | ||||
| 							  Nm, | ||||
| 							  1e-5,10); | ||||
|  | ||||
|   int Nconv; | ||||
|   std::vector<RealD>            eval(Nm); | ||||
|   std::vector<CoarseVector>     evec(Nm,Coarse5d); | ||||
|   std::vector<CoarseVector>     c_src(nrhs,Coarse5d); | ||||
|  | ||||
|   ////////////////////////////////////////// | ||||
|   // Block projector for coarse/fine | ||||
|   ////////////////////////////////////////// | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Calling mRHS HDCG"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   MultiRHSBlockProject<LatticeFermionD> MrhsProjector; | ||||
|   MrhsProjector.Allocate(nbasis,FrbGrid,Coarse5d); | ||||
|   MrhsProjector.ImportBasis(Aggregates.subspace); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << " Recompute coarse evecs  "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   evec.resize(Nm,Coarse5d); | ||||
|   eval.resize(Nm); | ||||
|   for(int r=0;r<nrhs;r++){ | ||||
|     random(CRNG,c_src[r]); | ||||
|   } | ||||
|  | ||||
|   IRL.calc(eval,evec,c_src,Nconv,LanczosType::irbl); | ||||
|  | ||||
|   /////////////////////// | ||||
|   // Deflation guesser object | ||||
|   /////////////////////// | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << " Reimport coarse evecs  "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   MultiRHSDeflation<CoarseVector> MrhsGuesser; | ||||
|   MrhsGuesser.ImportEigenBasis(evec,eval); | ||||
|        | ||||
|   ////////////////////////// | ||||
|   // Extra HDCG parameters | ||||
|   ////////////////////////// | ||||
|   int maxit=3000; | ||||
|   ConjugateGradient<CoarseVector>  CG(5.0e-2,maxit,false); | ||||
|   RealD lo=2.0; | ||||
|   int ord = 7; | ||||
|  | ||||
|   DoNothingGuesser<CoarseVector> DoNothing; | ||||
|   HPDSolver<CoarseVector> HPDSolveMrhs(MrhsCoarseOp,CG,DoNothing); | ||||
|  | ||||
|   ///////////////////////////////////////////////// | ||||
|   // Mirs smoother | ||||
|   ///////////////////////////////////////////////// | ||||
|   RealD MirsShift = lo; | ||||
|   ShiftedHermOpLinearOperator<LatticeFermionD> ShiftedFineHermOp(HermOpEO,MirsShift); | ||||
|   CGSmoother<LatticeFermionD> CGsmooth(ord,ShiftedFineHermOp) ; | ||||
|  | ||||
|   TwoLevelADEF2mrhs<LatticeFermion,CoarseVector> | ||||
|     HDCGmrhs(1.0e-8, 500, | ||||
| 	     FineHermOp, | ||||
| 	     CGsmooth, | ||||
| 	     HPDSolveMrhs,    // Used in M1 | ||||
| 	     HPDSolveMrhs,          // Used in Vstart | ||||
| 	     MrhsProjector, | ||||
| 	     MrhsGuesser, | ||||
| 	     CoarseMrhs); | ||||
|      | ||||
|   std::vector<LatticeFermionD> src_mrhs(nrhs,FrbGrid); | ||||
|   std::vector<LatticeFermionD> res_mrhs(nrhs,FrbGrid); | ||||
|    | ||||
|   for(int r=0;r<nrhs;r++){ | ||||
|     random(RNG5,src_mrhs[r]); | ||||
|     res_mrhs[r]=Zero(); | ||||
|   } | ||||
|    | ||||
|   HDCGmrhs(src_mrhs,res_mrhs); | ||||
|  | ||||
|   // Standard CG | ||||
| #if 1 | ||||
|   { | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Calling red black CG"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|        | ||||
|     LatticeFermion result(FrbGrid); result=Zero(); | ||||
|     LatticeFermion    src(FrbGrid); random(RNG5,src); | ||||
|     result=Zero(); | ||||
|  | ||||
|     ConjugateGradient<LatticeFermionD>  CGfine(1.0e-8,30000,false); | ||||
|     CGfine(HermOpEO, src, result); | ||||
|   } | ||||
| #endif   | ||||
|   Grid_finalize(); | ||||
|   return 0; | ||||
| } | ||||
							
								
								
									
										444
									
								
								tests/debug/Test_general_coarse_hdcg_phys.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										444
									
								
								tests/debug/Test_general_coarse_hdcg_phys.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,444 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./tests/Test_general_coarse_hdcg.cc | ||||
|  | ||||
|     Copyright (C) 2023 | ||||
|  | ||||
| Author: Peter Boyle <pboyle@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #include <Grid/Grid.h> | ||||
| #include <Grid/lattice/PaddedCell.h> | ||||
| #include <Grid/stencil/GeneralLocalStencil.h> | ||||
| //#include <Grid/algorithms/GeneralCoarsenedMatrix.h> | ||||
| #include <Grid/algorithms/iterative/AdefGeneric.h> | ||||
|  | ||||
| using namespace std; | ||||
| using namespace Grid; | ||||
|  | ||||
| template<class Coarsened> | ||||
| void SaveOperator(Coarsened &Operator,std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacWriter WR(Operator.Grid()->IsBoss()); | ||||
|   assert(Operator._A.size()==Operator.geom.npoint); | ||||
|   WR.open(file); | ||||
|   for(int p=0;p<Operator._A.size();p++){ | ||||
|     auto tmp = Operator.Cell.Extract(Operator._A[p]); | ||||
|     WR.writeScidacFieldRecord(tmp,record,0,0); | ||||
|     //    WR.writeScidacFieldRecord(tmp,record,0,BINARYIO_LEXICOGRAPHIC); | ||||
|   } | ||||
|   WR.close(); | ||||
| #endif | ||||
| } | ||||
| template<class Coarsened> | ||||
| void LoadOperator(Coarsened &Operator,std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   Grid::ScidacReader RD ; | ||||
|   RD.open(file); | ||||
|   assert(Operator._A.size()==Operator.geom.npoint); | ||||
|   for(int p=0;p<Operator.geom.npoint;p++){ | ||||
|     conformable(Operator._A[p].Grid(),Operator.CoarseGrid()); | ||||
|     //    RD.readScidacFieldRecord(Operator._A[p],record,BINARYIO_LEXICOGRAPHIC); | ||||
|     RD.readScidacFieldRecord(Operator._A[p],record,0); | ||||
|   }     | ||||
|   RD.close(); | ||||
|   Operator.ExchangeCoarseLinks(); | ||||
| #endif | ||||
| } | ||||
| template<class Coarsened> | ||||
| void ReLoadOperator(Coarsened &Operator,std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   Grid::ScidacReader RD ; | ||||
|   RD.open(file); | ||||
|   assert(Operator._A.size()==Operator.geom.npoint); | ||||
|   for(int p=0;p<Operator.geom.npoint;p++){ | ||||
|     auto tmp=Operator.Cell.Extract(Operator._A[p]); | ||||
|     RD.readScidacFieldRecord(tmp,record,0); | ||||
|     Operator._A[p] = Operator.Cell.ExchangePeriodic(tmp); | ||||
|   }     | ||||
|   RD.close(); | ||||
| #endif | ||||
| } | ||||
| template<class aggregation> | ||||
| void SaveBasis(aggregation &Agg,std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacWriter WR(Agg.FineGrid->IsBoss()); | ||||
|   WR.open(file); | ||||
|   for(int b=0;b<Agg.subspace.size();b++){ | ||||
|     //WR.writeScidacFieldRecord(Agg.subspace[b],record,0,BINARYIO_LEXICOGRAPHIC); | ||||
|     WR.writeScidacFieldRecord(Agg.subspace[b],record,0,0); | ||||
|   } | ||||
|   WR.close(); | ||||
| #endif | ||||
| } | ||||
| template<class aggregation> | ||||
| void LoadBasis(aggregation &Agg, std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacReader RD ; | ||||
|   RD.open(file); | ||||
|   for(int b=0;b<Agg.subspace.size();b++){ | ||||
|     //    RD.readScidacFieldRecord(Agg.subspace[b],record,BINARYIO_LEXICOGRAPHIC); | ||||
|     RD.readScidacFieldRecord(Agg.subspace[b],record,0); | ||||
|   }     | ||||
|   RD.close(); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| RealD InverseApproximation(RealD x){ | ||||
|   return 1.0/x; | ||||
| } | ||||
|  | ||||
| // Want Op in CoarsenOp to call MatPcDagMatPc | ||||
| template<class Field> | ||||
| class HermOpAdaptor : public LinearOperatorBase<Field> | ||||
| { | ||||
|   LinearOperatorBase<Field> & wrapped; | ||||
| public: | ||||
|   HermOpAdaptor(LinearOperatorBase<Field> &wrapme) : wrapped(wrapme)  {}; | ||||
|   void Op     (const Field &in, Field &out)   { wrapped.HermOp(in,out);  } | ||||
|   void HermOp(const Field &in, Field &out)    { wrapped.HermOp(in,out); } | ||||
|   void AdjOp     (const Field &in, Field &out){ wrapped.HermOp(in,out);  } | ||||
|   void OpDiag (const Field &in, Field &out)                  {    assert(0);  } | ||||
|   void OpDir  (const Field &in, Field &out,int dir,int disp) {    assert(0);  } | ||||
|   void OpDirAll  (const Field &in, std::vector<Field> &out)  {    assert(0);  }; | ||||
|   void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){    assert(0);  } | ||||
| }; | ||||
| /* | ||||
| template<class Field> class ChebyshevSmoother : public LinearFunction<Field> | ||||
| { | ||||
| public: | ||||
|   using LinearFunction<Field>::operator(); | ||||
|   typedef LinearOperatorBase<Field> FineOperator; | ||||
|   FineOperator   & _SmootherOperator; | ||||
|   Chebyshev<Field> Cheby; | ||||
|   ChebyshevSmoother(RealD _lo,RealD _hi,int _ord, FineOperator &SmootherOperator) : | ||||
|     _SmootherOperator(SmootherOperator), | ||||
|     Cheby(_lo,_hi,_ord,InverseApproximation) | ||||
|   { | ||||
|     std::cout << GridLogMessage<<" Chebyshev smoother order "<<_ord<<" ["<<_lo<<","<<_hi<<"]"<<std::endl; | ||||
|   }; | ||||
|   void operator() (const Field &in, Field &out)  | ||||
|   { | ||||
|     Field tmp(in.Grid()); | ||||
|     tmp = in; | ||||
|     Cheby(_SmootherOperator,tmp,out); | ||||
|   } | ||||
| }; | ||||
| */ | ||||
| template<class Field> class CGSmoother : public LinearFunction<Field> | ||||
| { | ||||
| public: | ||||
|   using LinearFunction<Field>::operator(); | ||||
|   typedef LinearOperatorBase<Field> FineOperator; | ||||
|   FineOperator   & _SmootherOperator; | ||||
|   int iters; | ||||
|   CGSmoother(int _iters, FineOperator &SmootherOperator) : | ||||
|     _SmootherOperator(SmootherOperator), | ||||
|     iters(_iters) | ||||
|   { | ||||
|     std::cout << GridLogMessage<<" Mirs smoother order "<<iters<<std::endl; | ||||
|   }; | ||||
|   void operator() (const Field &in, Field &out)  | ||||
|   { | ||||
|     ConjugateGradient<Field>  CG(0.0,iters,false); // non-converge is just fine in a smoother | ||||
|     CG(_SmootherOperator,in,out); | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| int main (int argc, char ** argv) | ||||
| { | ||||
|   Grid_init(&argc,&argv); | ||||
|  | ||||
|   const int Ls=24; | ||||
|   const int nbasis = 62; | ||||
|   const int cb = 0 ; | ||||
|   RealD mass=0.00078; | ||||
|   RealD M5=1.8; | ||||
|   RealD b=1.5; | ||||
|   RealD c=0.5; | ||||
|  | ||||
|   GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), | ||||
| 								   GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 								   GridDefaultMpi()); | ||||
|   GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); | ||||
|   GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); | ||||
|   GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); | ||||
|  | ||||
|   // Construct a coarsened grid with 4^4 cell | ||||
|   Coordinate Block({4,4,6,4}); | ||||
|   Coordinate clatt = GridDefaultLatt(); | ||||
|   for(int d=0;d<clatt.size();d++){ | ||||
|     clatt[d] = clatt[d]/Block[d]; | ||||
|   } | ||||
|  | ||||
|   GridCartesian *Coarse4d =  SpaceTimeGrid::makeFourDimGrid(clatt, | ||||
| 							    GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 							    GridDefaultMpi());; | ||||
|   GridCartesian *Coarse5d =  SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d); | ||||
|  | ||||
|   ///////////////////////// RNGs ///////////////////////////////// | ||||
|   std::vector<int> seeds4({1,2,3,4}); | ||||
|   std::vector<int> seeds5({5,6,7,8}); | ||||
|   std::vector<int> cseeds({5,6,7,8}); | ||||
|  | ||||
|   GridParallelRNG          RNG5(FGrid);   RNG5.SeedFixedIntegers(seeds5); | ||||
|   GridParallelRNG          RNG4(UGrid);   RNG4.SeedFixedIntegers(seeds4); | ||||
|   GridParallelRNG          CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds); | ||||
|  | ||||
|   ///////////////////////// Configuration ///////////////////////////////// | ||||
|   LatticeGaugeField Umu(UGrid); | ||||
|  | ||||
|   FieldMetaData header; | ||||
|   std::string file("ckpoint_lat.1000"); | ||||
|   NerscIO::readConfiguration(Umu,header,file); | ||||
|  | ||||
|   //////////////////////// Fermion action ////////////////////////////////// | ||||
|   MobiusFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c); | ||||
|  | ||||
|   SchurDiagMooeeOperator<MobiusFermionD, LatticeFermion> HermOpEO(Ddwf); | ||||
|  | ||||
|   typedef HermOpAdaptor<LatticeFermionD> HermFineMatrix; | ||||
|   HermFineMatrix FineHermOp(HermOpEO); | ||||
|  | ||||
|   LatticeFermion result(FrbGrid); result=Zero(); | ||||
|  | ||||
|   LatticeFermion    src(FrbGrid); random(RNG5,src); | ||||
|  | ||||
|   // Run power method on FineHermOp | ||||
|   PowerMethod<LatticeFermion>       PM;   PM(HermOpEO,src); | ||||
|   | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   ///////////// Coarse basis and Little Dirac Operator /////// | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperator; | ||||
|   typedef LittleDiracOperator::CoarseVector CoarseVector; | ||||
|  | ||||
|   NextToNextToNextToNearestStencilGeometry5D geom(Coarse5d); | ||||
|   NearestStencilGeometry5D geom_nn(Coarse5d); | ||||
|    | ||||
|   // Warning: This routine calls PVdagM.Op, not PVdagM.HermOp | ||||
|   typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace; | ||||
|   Subspace Aggregates(Coarse5d,FrbGrid,cb); | ||||
|  | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   // Need to check about red-black grid coarsening | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   LittleDiracOperator LittleDiracOp(geom,FrbGrid,Coarse5d); | ||||
|  | ||||
|   std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.rat.scidac.62"); | ||||
|   std::string refine_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Refine.phys48.rat.scidac.62"); | ||||
|   std::string ldop_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/LittleDiracOp.phys48.rat.scidac.62"); | ||||
|   bool load_agg=true; | ||||
|   bool load_refine=true; | ||||
|   bool load_mat=true; | ||||
|   if ( load_agg ) { | ||||
|     LoadBasis(Aggregates,subspace_file); | ||||
|   } else { | ||||
|  | ||||
|     // NBASIS=40 | ||||
|     // Best so far: ord 2000 [0.01,95], 500,500  -- 466 iters | ||||
|     // slurm-398626.out:Grid : Message : 141.295253 s : 500 filt [1] <n|MdagM|n> 0.000103622063 | ||||
|  | ||||
|  | ||||
|     //Grid : Message : 33.870465 s :  Chebyshev subspace pass-1 : ord 2000 [0.001,95] | ||||
|     //Grid : Message : 33.870485 s :  Chebyshev subspace pass-2 : nbasis40 min 1000 step 1000 lo0 | ||||
|     //slurm-1482200.out : filt ~ 0.004 -- not as low mode projecting -- took 626 iters | ||||
|  | ||||
|     // To try: 2000 [0.1,95]  ,2000,500,500 -- slurm-1482213.out 586 iterations | ||||
|  | ||||
|     // To try: 2000 [0.01,95] ,2000,500,500 -- 469 (think I bumped 92 to 95) (??) | ||||
|     // To try: 2000 [0.025,95],2000,500,500 | ||||
|     // To try: 2000 [0.005,95],2000,500,500 | ||||
|  | ||||
|     // NBASIS=44 -- HDCG paper was 64 vectors; AMD compiler craps out at 48 | ||||
|     // To try: 2000 [0.01,95] ,2000,500,500 -- 419 lowest slurm-1482355.out | ||||
|     // To try: 2000 [0.025,95] ,2000,500,500 -- 487  | ||||
|     // To try: 2000 [0.005,95] ,2000,500,500 | ||||
|     /* | ||||
|       Smoother [3,92] order 16 | ||||
| slurm-1482355.out:Grid : Message : 35.239686 s :  Chebyshev subspace pass-1 : ord 2000 [0.01,95] | ||||
| slurm-1482355.out:Grid : Message : 35.239714 s :  Chebyshev subspace pass-2 : nbasis44 min 500 step 500 lo0 | ||||
| slurm-1482355.out:Grid : Message : 5561.305552 s : HDCG: Pcg converged in 419 iterations and 2616.202598 s | ||||
|  | ||||
| slurm-1482367.out:Grid : Message : 43.157235 s :  Chebyshev subspace pass-1 : ord 2000 [0.025,95] | ||||
| slurm-1482367.out:Grid : Message : 43.157257 s :  Chebyshev subspace pass-2 : nbasis44 min 500 step 500 lo0 | ||||
| slurm-1482367.out:Grid : Message : 6169.469330 s : HDCG: Pcg converged in 487 iterations and 3131.185821 s | ||||
|     */ | ||||
| 		 /* | ||||
| 		   Aggregates.CreateSubspaceChebyshev(RNG5,HermOpEO,nbasis, | ||||
| 				       95.0,0.0075, | ||||
| 				       2500, | ||||
| 				       500, | ||||
| 				       500, | ||||
| 				       0.0); | ||||
| 		 */ | ||||
|  | ||||
| 		 /* | ||||
| 		   Aggregates.CreateSubspaceChebyshevPowerLaw(RNG5,HermOpEO,nbasis, | ||||
| 							      95.0, | ||||
| 							      2000); | ||||
| 		 */ | ||||
|  | ||||
|     Aggregates.CreateSubspaceMultishift(RNG5,HermOpEO, | ||||
| 					0.0003,1.0e-5,2000); // Lo, tol, maxit | ||||
|   /* | ||||
|     Aggregates.CreateSubspaceChebyshev(RNG5,HermOpEO,nbasis, | ||||
| 				       95.0,0.05, | ||||
| 				       2000, | ||||
| 				       500, | ||||
| 				       500, | ||||
| 				       0.0); | ||||
|  */ | ||||
|     /* | ||||
|       Aggregates.CreateSubspaceChebyshev(RNG5,HermOpEO,nbasis, | ||||
| 				       95.0,0.01, | ||||
| 				       2000, | ||||
| 				       500, | ||||
| 				       500, | ||||
| 				       0.0); | ||||
|     */ | ||||
|     //    Aggregates.CreateSubspaceChebyshev(RNG5,HermOpEO,nbasis,95.,0.01,1500); -- running slurm-1484934.out nbasis 56 | ||||
|  | ||||
|     //    Aggregates.CreateSubspaceChebyshev(RNG5,HermOpEO,nbasis,95.,0.01,1500); <== last run | ||||
|     SaveBasis(Aggregates,subspace_file); | ||||
|   } | ||||
|  | ||||
|   int refine=1; | ||||
|   if(refine){ | ||||
|     if ( load_refine ) { | ||||
|       LoadBasis(Aggregates,refine_file); | ||||
|     } else { | ||||
|       // HDCG used Pcg to refine | ||||
|       Aggregates.RefineSubspace(HermOpEO,0.001,1.0e-3,3000); | ||||
|       SaveBasis(Aggregates,refine_file); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   Aggregates.Orthogonalise(); | ||||
|   if ( load_mat ) { | ||||
|     LoadOperator(LittleDiracOp,ldop_file); | ||||
|   } else { | ||||
|     LittleDiracOp.CoarsenOperator(FineHermOp,Aggregates); | ||||
|     SaveOperator(LittleDiracOp,ldop_file); | ||||
|   } | ||||
|  | ||||
|   // I/O test: | ||||
|   CoarseVector c_src(Coarse5d);   random(CRNG,c_src); | ||||
|   CoarseVector c_res(Coarse5d);  | ||||
|   CoarseVector c_ref(Coarse5d); | ||||
|  | ||||
|   ////////////////////////////////////////// | ||||
|   // Build a coarse lanczos | ||||
|   ////////////////////////////////////////// | ||||
|   typedef HermitianLinearOperator<LittleDiracOperator,CoarseVector> HermMatrix; | ||||
|   HermMatrix CoarseOp     (LittleDiracOp); | ||||
|    | ||||
|   int Nk=192; | ||||
|   int Nm=256; | ||||
|   int Nstop=Nk; | ||||
|    | ||||
|   Chebyshev<CoarseVector>      IRLCheby(0.005,40.0,201); | ||||
|   //  Chebyshev<CoarseVector>      IRLCheby(0.010,45.0,201);  // 1 iter | ||||
|   FunctionHermOp<CoarseVector> IRLOpCheby(IRLCheby,CoarseOp); | ||||
|   PlainHermOp<CoarseVector>    IRLOp    (CoarseOp); | ||||
|    | ||||
|   ImplicitlyRestartedLanczos<CoarseVector> IRL(IRLOpCheby,IRLOp,Nstop,Nk,Nm,1e-5,10); | ||||
|  | ||||
|   int Nconv; | ||||
|   std::vector<RealD>            eval(Nm); | ||||
|   std::vector<CoarseVector>     evec(Nm,Coarse5d); | ||||
|  | ||||
|   PowerMethod<CoarseVector>       cPM;   cPM(CoarseOp,c_src); | ||||
|  | ||||
|   IRL.calc(eval,evec,c_src,Nconv); | ||||
|  | ||||
|   ////////////////////////////////////////// | ||||
|   // Deflated guesser | ||||
|   ////////////////////////////////////////// | ||||
|   DeflatedGuesser<CoarseVector> DeflCoarseGuesser(evec,eval); | ||||
|  | ||||
|   int maxit=30000; | ||||
|   ConjugateGradient<CoarseVector>  CG(1.0e-10,maxit,false); | ||||
|   ConjugateGradient<LatticeFermionD>  CGfine(1.0e-8,30000,false); | ||||
|  | ||||
|   ////////////////////////////////////////// | ||||
|   // HDCG | ||||
|   ////////////////////////////////////////// | ||||
|    | ||||
|   std::vector<RealD> los({2.0,2.5}); // Nbasis 40 == 36,36 iters | ||||
|   std::vector<int> ords({9}); // Nbasis 40 == 40 iters (320 mults)   | ||||
|  | ||||
|   for(int l=0;l<los.size();l++){ | ||||
|  | ||||
|     RealD lo = los[l]; | ||||
|  | ||||
|     for(int o=0;o<ords.size();o++){ | ||||
|  | ||||
|       ////////////////////////////////////////// | ||||
|       // Sloppy coarse solve | ||||
|       ////////////////////////////////////////// | ||||
|        | ||||
|       ConjugateGradient<CoarseVector>  CGsloppy(4.0e-2,maxit,false); | ||||
|       HPDSolver<CoarseVector> HPDSolveSloppy(CoarseOp,CGsloppy,DeflCoarseGuesser); | ||||
|       HPDSolver<CoarseVector> HPDSolve(CoarseOp,CG,DeflCoarseGuesser); | ||||
|  | ||||
|       ////////////////////////////////////////// | ||||
|       // IRS shifted smoother based on CG | ||||
|       ////////////////////////////////////////// | ||||
|       RealD MirsShift = lo; | ||||
|       ShiftedHermOpLinearOperator<LatticeFermionD> ShiftedFineHermOp(HermOpEO,MirsShift); | ||||
|       CGSmoother<LatticeFermionD> CGsmooth(ords[o],ShiftedFineHermOp) ; | ||||
|    | ||||
|       ////////////////////////////////////////// | ||||
|       // Build a HDCG solver | ||||
|       ////////////////////////////////////////// | ||||
|       TwoLevelADEF2<LatticeFermion,CoarseVector,Subspace> | ||||
| 	HDCG(1.0e-8, 700, | ||||
| 	     FineHermOp, | ||||
| 	     CGsmooth, | ||||
| 	     HPDSolveSloppy, | ||||
| 	     HPDSolve, | ||||
| 	     Aggregates); | ||||
|  | ||||
|       result=Zero(); | ||||
|       HDCG(src,result); | ||||
|        | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   // Standard CG | ||||
|   result=Zero(); | ||||
|   CGfine(HermOpEO, src, result); | ||||
|    | ||||
|   Grid_finalize(); | ||||
|   return 0; | ||||
| } | ||||
							
								
								
									
										513
									
								
								tests/debug/Test_general_coarse_hdcg_phys48.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										513
									
								
								tests/debug/Test_general_coarse_hdcg_phys48.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,513 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./tests/Test_general_coarse_hdcg.cc | ||||
|  | ||||
|     Copyright (C) 2023 | ||||
|  | ||||
| Author: Peter Boyle <pboyle@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #include <Grid/Grid.h> | ||||
| #include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h> | ||||
| #include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczosCoarse.h> | ||||
| #include <Grid/algorithms/iterative/AdefMrhs.h> | ||||
|  | ||||
| using namespace std; | ||||
| using namespace Grid; | ||||
|  | ||||
| class HDCGwrapper { | ||||
|  | ||||
| }; | ||||
|  | ||||
| /* | ||||
| template<class Coarsened> | ||||
| void SaveOperator(Coarsened &Operator,std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacWriter WR(Operator.Grid()->IsBoss()); | ||||
|   assert(Operator._A.size()==Operator.geom.npoint); | ||||
|   WR.open(file); | ||||
|   for(int p=0;p<Operator._A.size();p++){ | ||||
|     auto tmp = Operator.Cell.Extract(Operator._A[p]); | ||||
|     WR.writeScidacFieldRecord(tmp,record,0,0); | ||||
|     //    WR.writeScidacFieldRecord(tmp,record,0,BINARYIO_LEXICOGRAPHIC); | ||||
|   } | ||||
|   WR.close(); | ||||
| #endif | ||||
| } | ||||
| template<class Coarsened> | ||||
| void LoadOperator(Coarsened &Operator,std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   Grid::ScidacReader RD ; | ||||
|   RD.open(file); | ||||
|   assert(Operator._A.size()==Operator.geom.npoint); | ||||
|   for(int p=0;p<Operator.geom.npoint;p++){ | ||||
|     conformable(Operator._A[p].Grid(),Operator.CoarseGrid()); | ||||
|     //    RD.readScidacFieldRecord(Operator._A[p],record,BINARYIO_LEXICOGRAPHIC); | ||||
|     RD.readScidacFieldRecord(Operator._A[p],record,0); | ||||
|   }     | ||||
|   RD.close(); | ||||
|   Operator.ExchangeCoarseLinks(); | ||||
| #endif | ||||
| } | ||||
| template<class Coarsened> | ||||
| void ReLoadOperator(Coarsened &Operator,std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   Grid::ScidacReader RD ; | ||||
|   RD.open(file); | ||||
|   assert(Operator._A.size()==Operator.geom.npoint); | ||||
|   for(int p=0;p<Operator.geom.npoint;p++){ | ||||
|     auto tmp=Operator.Cell.Extract(Operator._A[p]); | ||||
|     RD.readScidacFieldRecord(tmp,record,0); | ||||
|     Operator._A[p] = Operator.Cell.ExchangePeriodic(tmp); | ||||
|   }     | ||||
|   RD.close(); | ||||
| #endif | ||||
| } | ||||
| */ | ||||
| template<class aggregation> | ||||
| void SaveBasis(aggregation &Agg,std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacWriter WR(Agg.FineGrid->IsBoss()); | ||||
|   WR.open(file); | ||||
|   for(int b=0;b<Agg.subspace.size();b++){ | ||||
|     //WR.writeScidacFieldRecord(Agg.subspace[b],record,0,BINARYIO_LEXICOGRAPHIC); | ||||
|     WR.writeScidacFieldRecord(Agg.subspace[b],record,0,0); | ||||
|   } | ||||
|   WR.close(); | ||||
| #endif | ||||
| } | ||||
| template<class aggregation> | ||||
| void LoadBasis(aggregation &Agg, std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacReader RD ; | ||||
|   RD.open(file); | ||||
|   for(int b=0;b<Agg.subspace.size();b++){ | ||||
|     //    RD.readScidacFieldRecord(Agg.subspace[b],record,BINARYIO_LEXICOGRAPHIC); | ||||
|     RD.readScidacFieldRecord(Agg.subspace[b],record,0); | ||||
|   }     | ||||
|   RD.close(); | ||||
| #endif | ||||
| } | ||||
| template<class CoarseVector> | ||||
| void SaveEigenvectors(std::vector<RealD>            &eval, | ||||
| 		      std::vector<CoarseVector>     &evec, | ||||
| 		      std::string evec_file, | ||||
| 		      std::string eval_file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacWriter WR(evec[0].Grid()->IsBoss()); | ||||
|   WR.open(evec_file); | ||||
|   for(int b=0;b<evec.size();b++){ | ||||
|     WR.writeScidacFieldRecord(evec[b],record,0,0); | ||||
|   } | ||||
|   WR.close(); | ||||
|   XmlWriter WRx(eval_file); | ||||
|   write(WRx,"evals",eval); | ||||
| #endif | ||||
| } | ||||
| template<class CoarseVector> | ||||
| void LoadEigenvectors(std::vector<RealD>            &eval, | ||||
| 		      std::vector<CoarseVector>     &evec, | ||||
| 		      std::string evec_file, | ||||
| 		      std::string eval_file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|     XmlReader RDx(eval_file); | ||||
|     read(RDx,"evals",eval); | ||||
|     emptyUserRecord record; | ||||
|  | ||||
|     Grid::ScidacReader RD ; | ||||
|     RD.open(evec_file); | ||||
|     assert(evec.size()==eval.size()); | ||||
|     for(int k=0;k<eval.size();k++) { | ||||
|       RD.readScidacFieldRecord(evec[k],record); | ||||
|     } | ||||
|     RD.close(); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| // Want Op in CoarsenOp to call MatPcDagMatPc | ||||
| template<class Field> | ||||
| class HermOpAdaptor : public LinearOperatorBase<Field> | ||||
| { | ||||
|   LinearOperatorBase<Field> & wrapped; | ||||
| public: | ||||
|   HermOpAdaptor(LinearOperatorBase<Field> &wrapme) : wrapped(wrapme)  {}; | ||||
|   void Op     (const Field &in, Field &out)   { wrapped.HermOp(in,out);  } | ||||
|   void HermOp(const Field &in, Field &out)    { wrapped.HermOp(in,out); } | ||||
|   void AdjOp     (const Field &in, Field &out){ wrapped.HermOp(in,out);  } | ||||
|   void OpDiag (const Field &in, Field &out)                  {    assert(0);  } | ||||
|   void OpDir  (const Field &in, Field &out,int dir,int disp) {    assert(0);  } | ||||
|   void OpDirAll  (const Field &in, std::vector<Field> &out)  {    assert(0);  }; | ||||
|   void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){    assert(0);  } | ||||
| }; | ||||
|  | ||||
| template<class Field> class CGSmoother : public LinearFunction<Field> | ||||
| { | ||||
| public: | ||||
|   using LinearFunction<Field>::operator(); | ||||
|   typedef LinearOperatorBase<Field> FineOperator; | ||||
|   FineOperator   & _SmootherOperator; | ||||
|   int iters; | ||||
|   CGSmoother(int _iters, FineOperator &SmootherOperator) : | ||||
|     _SmootherOperator(SmootherOperator), | ||||
|     iters(_iters) | ||||
|   { | ||||
|     std::cout << GridLogMessage<<" Mirs smoother order "<<iters<<std::endl; | ||||
|   }; | ||||
|   void operator() (const Field &in, Field &out)  | ||||
|   { | ||||
|     ConjugateGradient<Field>  CG(0.0,iters,false); // non-converge is just fine in a smoother | ||||
|  | ||||
|     out=Zero(); | ||||
|  | ||||
|     CG(_SmootherOperator,in,out); | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| int main (int argc, char ** argv) | ||||
| { | ||||
|   Grid_init(&argc,&argv); | ||||
|  | ||||
|   const int Ls=24; | ||||
|   const int nbasis = 62; | ||||
|   const int cb = 0 ; | ||||
|   RealD mass=0.00078; | ||||
|   RealD M5=1.8; | ||||
|   RealD b=1.5; | ||||
|   RealD c=0.5; | ||||
|  | ||||
|   GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), | ||||
| 								   GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 								   GridDefaultMpi()); | ||||
|   GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); | ||||
|   GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); | ||||
|   GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); | ||||
|  | ||||
|   // Construct a coarsened grid with 4^4 cell | ||||
|   Coordinate Block({4,4,6,4}); | ||||
|   Coordinate clatt = GridDefaultLatt(); | ||||
|   for(int d=0;d<clatt.size();d++){ | ||||
|     clatt[d] = clatt[d]/Block[d]; | ||||
|   } | ||||
|  | ||||
|   GridCartesian *Coarse4d =  SpaceTimeGrid::makeFourDimGrid(clatt, | ||||
| 							    GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 							    GridDefaultMpi());; | ||||
|   GridCartesian *Coarse5d =  SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d); | ||||
|  | ||||
|   ///////////////////////// RNGs ///////////////////////////////// | ||||
|   std::vector<int> seeds4({1,2,3,4}); | ||||
|   std::vector<int> seeds5({5,6,7,8}); | ||||
|   std::vector<int> cseeds({5,6,7,8}); | ||||
|  | ||||
|   GridParallelRNG          RNG5(FGrid);   RNG5.SeedFixedIntegers(seeds5); | ||||
|   GridParallelRNG          RNG4(UGrid);   RNG4.SeedFixedIntegers(seeds4); | ||||
|   GridParallelRNG          CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds); | ||||
|  | ||||
|   ///////////////////////// Configuration ///////////////////////////////// | ||||
|   LatticeGaugeField Umu(UGrid); | ||||
|  | ||||
|   FieldMetaData header; | ||||
|   std::string file("ckpoint_lat.1000"); | ||||
|   NerscIO::readConfiguration(Umu,header,file); | ||||
|  | ||||
|   //////////////////////// Fermion action ////////////////////////////////// | ||||
|   MobiusFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c); | ||||
|  | ||||
|   SchurDiagMooeeOperator<MobiusFermionD, LatticeFermion> HermOpEO(Ddwf); | ||||
|  | ||||
|   typedef HermOpAdaptor<LatticeFermionD> HermFineMatrix; | ||||
|   HermFineMatrix FineHermOp(HermOpEO); | ||||
|  | ||||
|   // Run power method on FineHermOp | ||||
|   // PowerMethod<LatticeFermion>       PM;   PM(HermOpEO,src); | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   ///////////// Coarse basis and Little Dirac Operator /////// | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperator; | ||||
|   typedef LittleDiracOperator::CoarseVector CoarseVector; | ||||
|  | ||||
|   NextToNextToNextToNearestStencilGeometry5D geom(Coarse5d); | ||||
|    | ||||
|   // Warning: This routine calls PVdagM.Op, not PVdagM.HermOp | ||||
|   typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace; | ||||
|   Subspace Aggregates(Coarse5d,FrbGrid,cb); | ||||
|  | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   // Need to check about red-black grid coarsening | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   //  LittleDiracOperator LittleDiracOp(geom,FrbGrid,Coarse5d); | ||||
|  | ||||
|   std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.new.62"); | ||||
|   std::string refine_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Refine.phys48.hdcg.62"); | ||||
|   std::string ldop_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/LittleDiracOp.phys48.new.62"); | ||||
|   std::string evec_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/evecs.scidac"); | ||||
|   std::string eval_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/eval.xml"); | ||||
|   bool load_agg=false; | ||||
|   bool load_refine=false; | ||||
|   bool load_mat=false; | ||||
|   bool load_evec=false; | ||||
|   std::cout << GridLogMessage <<" Restoring from checkpoint "<<std::endl; | ||||
|   int refine=1; | ||||
|   if ( load_agg ) { | ||||
|     if ( !(refine) || (!load_refine) ) {  | ||||
|       LoadBasis(Aggregates,subspace_file); | ||||
|     } | ||||
|   } else { | ||||
|     //    Aggregates.CreateSubspaceMultishift(RNG5,HermOpEO, | ||||
|     //					0.0003,1.0e-5,2000); // Lo, tol, maxit | ||||
|     Aggregates.CreateSubspaceChebyshev(RNG5,HermOpEO,nbasis,95.,0.01,1500); <== last run | ||||
|     //    Aggregates.CreateSubspaceChebyshevNew(RNG5,HermOpEO,95.); // 176 with refinement | ||||
|     //    Aggregates.CreateSubspaceChebyshev(RNG5,HermOpEO,nbasis,95.,0.001,3000,1500,200,0.0); // Attempt to resurrect | ||||
|     SaveBasis(Aggregates,subspace_file); | ||||
|   } | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Building MultiRHS Coarse operator"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   ConjugateGradient<CoarseVector>  coarseCG(4.0e-2,20000,true); | ||||
|      | ||||
|   const int nrhs=vComplex::Nsimd()*3; // 12 | ||||
|      | ||||
|   Coordinate mpi=GridDefaultMpi(); | ||||
|   Coordinate rhMpi ({1,1,mpi[0],mpi[1],mpi[2],mpi[3]}); | ||||
|   Coordinate rhLatt({nrhs,1,clatt[0],clatt[1],clatt[2],clatt[3]}); | ||||
|   Coordinate rhSimd({vComplex::Nsimd(),1, 1,1,1,1}); | ||||
|      | ||||
|   GridCartesian *CoarseMrhs = new GridCartesian(rhLatt,rhSimd,rhMpi);  | ||||
|   typedef MultiGeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> MultiGeneralCoarsenedMatrix_t; | ||||
|   MultiGeneralCoarsenedMatrix_t mrhs(geom,CoarseMrhs); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "         Coarse Lanczos               "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|  | ||||
|   typedef HermitianLinearOperator<MultiGeneralCoarsenedMatrix_t,CoarseVector> MrhsHermMatrix; | ||||
|   //  FunctionHermOp<CoarseVector> IRLOpCheby(IRLCheby,CoarseOp); | ||||
|   //  PlainHermOp<CoarseVector>    IRLOp    (CoarseOp); | ||||
|   Chebyshev<CoarseVector>      IRLCheby(0.006,42.0,301);  // 1 iter | ||||
|   MrhsHermMatrix MrhsCoarseOp     (mrhs); | ||||
|  | ||||
|   CoarseVector pm_src(CoarseMrhs); | ||||
|   pm_src = ComplexD(1.0); | ||||
|   PowerMethod<CoarseVector>       cPM;   cPM(MrhsCoarseOp,pm_src); | ||||
|  | ||||
|   int Nk=192; | ||||
|   int Nm=384; | ||||
|   int Nstop=Nk; | ||||
|   int Nconv_test_interval=1; | ||||
|    | ||||
|   ImplicitlyRestartedBlockLanczosCoarse<CoarseVector> IRL(MrhsCoarseOp, | ||||
| 							  Coarse5d, | ||||
| 							  CoarseMrhs, | ||||
| 							  nrhs, | ||||
| 							  IRLCheby, | ||||
| 							  Nstop, | ||||
| 							  Nconv_test_interval, | ||||
| 							  nrhs, | ||||
| 							  Nk, | ||||
| 							  Nm, | ||||
| 							  1e-5,10); | ||||
|  | ||||
|   int Nconv; | ||||
|   std::vector<RealD>            eval(Nm); | ||||
|   std::vector<CoarseVector>     evec(Nm,Coarse5d); | ||||
|   std::vector<CoarseVector>     c_src(nrhs,Coarse5d); | ||||
|  | ||||
|   /////////////////////// | ||||
|   // Deflation guesser object | ||||
|   /////////////////////// | ||||
|   MultiRHSDeflation<CoarseVector> MrhsGuesser; | ||||
|  | ||||
|   ////////////////////////////////////////// | ||||
|   // Block projector for coarse/fine | ||||
|   ////////////////////////////////////////// | ||||
|   MultiRHSBlockProject<LatticeFermionD> MrhsProjector; | ||||
|  | ||||
|   ////////////////////////// | ||||
|   // Extra HDCG parameters | ||||
|   ////////////////////////// | ||||
|   int maxit=3000; | ||||
|   ConjugateGradient<CoarseVector>  CG(5.0e-2,maxit,false); | ||||
|   RealD lo=2.0; | ||||
|   int ord = 7; | ||||
|  | ||||
|   DoNothingGuesser<CoarseVector> DoNothing; | ||||
|   HPDSolver<CoarseVector> HPDSolveMrhs(MrhsCoarseOp,CG,DoNothing); | ||||
|   HPDSolver<CoarseVector> HPDSolveMrhsRefine(MrhsCoarseOp,CG,DoNothing); | ||||
|  | ||||
|   ///////////////////////////////////////////////// | ||||
|   // Mirs smoother | ||||
|   ///////////////////////////////////////////////// | ||||
|   RealD MirsShift = lo; | ||||
|   ShiftedHermOpLinearOperator<LatticeFermionD> ShiftedFineHermOp(HermOpEO,MirsShift); | ||||
|   CGSmoother<LatticeFermionD> CGsmooth(ord,ShiftedFineHermOp) ; | ||||
|  | ||||
|    | ||||
|   if ( load_refine ) { | ||||
|     LoadBasis(Aggregates,refine_file); | ||||
|   } else { | ||||
| #if 1 | ||||
|     // Make a copy as subspace gets block orthogonalised | ||||
|     // HDCG used Pcg to refine | ||||
|     int Refineord = 11; | ||||
|     // Not as good as refining with shifted CG (169 iters), but 10% | ||||
|     // Datapoints | ||||
|     //- refining to 0.001 and shift 0.0 is expensive, but gets to 180 outer iterations | ||||
|     //- refining to 0.001 and shift 0.001 is cheap, but gets to 240 outer iterations   | ||||
|     //- refining to 0.0005 and shift 0.0005 is cheap, but gets to 230 outer iterations   | ||||
|     //- refining to 0.001 and shift 0.0001 220 iterations | ||||
|     //- refining to 0.001 and shift 0.00003  | ||||
|     RealD RefineShift = 0.00003; | ||||
|     RealD RefineTol   = 0.001; | ||||
|     ShiftedHermOpLinearOperator<LatticeFermionD> RefineFineHermOp(HermOpEO,RefineShift); | ||||
|      | ||||
|     mrhs.CoarsenOperator(RefineFineHermOp,Aggregates,Coarse5d); | ||||
|  | ||||
|     MrhsProjector.Allocate(nbasis,FrbGrid,Coarse5d); | ||||
|  | ||||
|     MrhsProjector.ImportBasis(Aggregates.subspace); | ||||
|  | ||||
|     // Lanczos with random start | ||||
|     for(int r=0;r<nrhs;r++){ | ||||
|       random(CRNG,c_src[r]); | ||||
|     } | ||||
|     IRL.calc(eval,evec,c_src,Nconv,LanczosType::irbl); | ||||
|  | ||||
|     MrhsGuesser.ImportEigenBasis(evec,eval); | ||||
|  | ||||
|     CGSmoother<LatticeFermionD> CGsmooth(Refineord,ShiftedFineHermOp) ; | ||||
|     TwoLevelADEF2mrhs<LatticeFermion,CoarseVector> | ||||
|       HDCGmrhsRefine(RefineTol, 500, | ||||
| 		     RefineFineHermOp, | ||||
| 		     CGsmooth, | ||||
| 		     HPDSolveMrhs,    // Used in M1 | ||||
| 		     HPDSolveMrhs,    // Used in Vstart | ||||
| 		     MrhsProjector, | ||||
| 		     MrhsGuesser, | ||||
| 		     CoarseMrhs); | ||||
|  | ||||
|     // Reload the first pass aggregates, because we orthogonalised them | ||||
|     LoadBasis(Aggregates,subspace_file); | ||||
|  | ||||
|     Aggregates.RefineSubspaceHDCG(HermOpEO, | ||||
| 				  HDCGmrhsRefine, | ||||
| 				  nrhs); | ||||
|  | ||||
| #else      | ||||
|     Aggregates.RefineSubspace(HermOpEO,0.001,1.0e-3,3000); // 172 iters | ||||
| #endif | ||||
|  | ||||
|     SaveBasis(Aggregates,refine_file); | ||||
|   } | ||||
|   Aggregates.Orthogonalise(); | ||||
|  | ||||
|   /* | ||||
|   if ( load_mat ) { | ||||
|     LoadOperator(LittleDiracOp,ldop_file); | ||||
|   } else { | ||||
|     LittleDiracOp.CoarsenOperator(FineHermOp,Aggregates); | ||||
|     SaveOperator(LittleDiracOp,ldop_file); | ||||
|   } | ||||
|   */ | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Coarsen after refine"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|  | ||||
|   mrhs.CoarsenOperator(FineHermOp,Aggregates,Coarse5d); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << " Recompute coarse evecs ; use old evecs as source  "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   evec.resize(Nm,Coarse5d); | ||||
|   eval.resize(Nm); | ||||
|   for(int r=0;r<nrhs;r++){ | ||||
|     //    c_src[r]=Zero(); | ||||
|     random(CRNG,c_src[r]); | ||||
|   } | ||||
|   for(int e=0;e<evec.size();e++){ | ||||
|     //    int r = e%nrhs; | ||||
|     //    c_src[r] = c_src[r]+evec[r]; | ||||
|   } | ||||
|  IRL.calc(eval,evec,c_src,Nconv,LanczosType::irbl); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << " Reimport coarse evecs  "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   MrhsGuesser.ImportEigenBasis(evec,eval); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Calling mRHS HDCG"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   MrhsProjector.Allocate(nbasis,FrbGrid,Coarse5d); | ||||
|   MrhsProjector.ImportBasis(Aggregates.subspace); | ||||
|        | ||||
|   TwoLevelADEF2mrhs<LatticeFermion,CoarseVector> | ||||
|     HDCGmrhs(1.0e-8, 500, | ||||
| 	     FineHermOp, | ||||
| 	     CGsmooth, | ||||
| 	     HPDSolveMrhs,    // Used in M1 | ||||
| 	     HPDSolveMrhs,          // Used in Vstart | ||||
| 	     MrhsProjector, | ||||
| 	     MrhsGuesser, | ||||
| 	     CoarseMrhs); | ||||
|      | ||||
|   std::vector<LatticeFermionD> src_mrhs(nrhs,FrbGrid); | ||||
|   std::vector<LatticeFermionD> res_mrhs(nrhs,FrbGrid); | ||||
|    | ||||
|   for(int r=0;r<nrhs;r++){ | ||||
|     random(RNG5,src_mrhs[r]); | ||||
|     res_mrhs[r]=Zero(); | ||||
|   } | ||||
|    | ||||
|   HDCGmrhs(src_mrhs,res_mrhs); | ||||
|  | ||||
|   // Standard CG | ||||
| #if 0 | ||||
|   { | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Calling red black CG"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|        | ||||
|     LatticeFermion result(FrbGrid); result=Zero(); | ||||
|     LatticeFermion    src(FrbGrid); random(RNG5,src); | ||||
|     result=Zero(); | ||||
|  | ||||
|     CGfine(HermOpEO, src, result); | ||||
|   } | ||||
| #endif   | ||||
|   Grid_finalize(); | ||||
|   return 0; | ||||
| } | ||||
							
								
								
									
										388
									
								
								tests/debug/Test_general_coarse_hdcg_phys48_mixed.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										388
									
								
								tests/debug/Test_general_coarse_hdcg_phys48_mixed.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,388 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./tests/Test_general_coarse_hdcg.cc | ||||
|  | ||||
|     Copyright (C) 2023 | ||||
|  | ||||
| Author: Peter Boyle <pboyle@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #include <Grid/Grid.h> | ||||
| #include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h> | ||||
| #include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczosCoarse.h> | ||||
| #include <Grid/algorithms/iterative/AdefMrhs.h> | ||||
|  | ||||
| using namespace std; | ||||
| using namespace Grid; | ||||
|  | ||||
| template<class aggregation> | ||||
| void SaveBasis(aggregation &Agg,std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacWriter WR(Agg.FineGrid->IsBoss()); | ||||
|   WR.open(file); | ||||
|   for(int b=0;b<Agg.subspace.size();b++){ | ||||
|     WR.writeScidacFieldRecord(Agg.subspace[b],record,0,Grid::BinaryIO::BINARYIO_LEXICOGRAPHIC); | ||||
|     //    WR.writeScidacFieldRecord(Agg.subspace[b],record); | ||||
|   } | ||||
|   WR.close(); | ||||
| #endif | ||||
| } | ||||
| template<class aggregation> | ||||
| void LoadBasis(aggregation &Agg, std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacReader RD ; | ||||
|   RD.open(file); | ||||
|   for(int b=0;b<Agg.subspace.size();b++){ | ||||
|     RD.readScidacFieldRecord(Agg.subspace[b],record,Grid::BinaryIO::BINARYIO_LEXICOGRAPHIC); | ||||
|     //    RD.readScidacFieldRecord(Agg.subspace[b],record,0); | ||||
|   }     | ||||
|   RD.close(); | ||||
| #endif | ||||
| } | ||||
| template<class CoarseVector> | ||||
| void SaveEigenvectors(std::vector<RealD>            &eval, | ||||
| 		      std::vector<CoarseVector>     &evec, | ||||
| 		      std::string evec_file, | ||||
| 		      std::string eval_file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacWriter WR(evec[0].Grid()->IsBoss()); | ||||
|   WR.open(evec_file); | ||||
|   for(int b=0;b<evec.size();b++){ | ||||
|     WR.writeScidacFieldRecord(evec[b],record,0,0); | ||||
|   } | ||||
|   WR.close(); | ||||
|   XmlWriter WRx(eval_file); | ||||
|   write(WRx,"evals",eval); | ||||
| #endif | ||||
| } | ||||
| template<class CoarseVector> | ||||
| void LoadEigenvectors(std::vector<RealD>            &eval, | ||||
| 		      std::vector<CoarseVector>     &evec, | ||||
| 		      std::string evec_file, | ||||
| 		      std::string eval_file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|     XmlReader RDx(eval_file); | ||||
|     read(RDx,"evals",eval); | ||||
|     emptyUserRecord record; | ||||
|  | ||||
|     Grid::ScidacReader RD ; | ||||
|     RD.open(evec_file); | ||||
|     assert(evec.size()==eval.size()); | ||||
|     for(int k=0;k<eval.size();k++) { | ||||
|       RD.readScidacFieldRecord(evec[k],record); | ||||
|     } | ||||
|     RD.close(); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| // Want Op in CoarsenOp to call MatPcDagMatPc | ||||
| template<class Field> | ||||
| class HermOpAdaptor : public LinearOperatorBase<Field> | ||||
| { | ||||
|   LinearOperatorBase<Field> & wrapped; | ||||
| public: | ||||
|   HermOpAdaptor(LinearOperatorBase<Field> &wrapme) : wrapped(wrapme)  {}; | ||||
|   void Op     (const Field &in, Field &out)   { wrapped.HermOp(in,out);  } | ||||
|   void HermOp(const Field &in, Field &out)    { wrapped.HermOp(in,out); } | ||||
|   void AdjOp     (const Field &in, Field &out){ wrapped.HermOp(in,out);  } | ||||
|   void OpDiag (const Field &in, Field &out)                  {    assert(0);  } | ||||
|   void OpDir  (const Field &in, Field &out,int dir,int disp) {    assert(0);  } | ||||
|   void OpDirAll  (const Field &in, std::vector<Field> &out)  {    assert(0);  }; | ||||
|   void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){    assert(0);  } | ||||
| }; | ||||
|  | ||||
| template<class Field> class CGSmoother : public LinearFunction<Field> | ||||
| { | ||||
| public: | ||||
|   using LinearFunction<Field>::operator(); | ||||
|   typedef LinearOperatorBase<Field> FineOperator; | ||||
|   FineOperator   & _SmootherOperator; | ||||
|   int iters; | ||||
|   CGSmoother(int _iters, FineOperator &SmootherOperator) : | ||||
|     _SmootherOperator(SmootherOperator), | ||||
|     iters(_iters) | ||||
|   { | ||||
|     std::cout << GridLogMessage<<" Mirs smoother order "<<iters<<std::endl; | ||||
|   }; | ||||
|   void operator() (const Field &in, Field &out)  | ||||
|   { | ||||
|     ConjugateGradient<Field>  CG(0.0,iters,false); // non-converge is just fine in a smoother | ||||
|  | ||||
|     out=Zero(); | ||||
|  | ||||
|     CG(_SmootherOperator,in,out); | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| int main (int argc, char ** argv) | ||||
| { | ||||
|   Grid_init(&argc,&argv); | ||||
|  | ||||
|   const int Ls=24; | ||||
|   const int nbasis = 60; | ||||
|   const int cb = 0 ; | ||||
|   RealD mass=0.00078; | ||||
|   RealD M5=1.8; | ||||
|   RealD b=1.5; | ||||
|   RealD c=0.5; | ||||
|  | ||||
|   GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), | ||||
| 								   GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 								   GridDefaultMpi()); | ||||
|   GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); | ||||
|   GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); | ||||
|   GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); | ||||
|  | ||||
|   // Construct a coarsened grid with 4^4 cell | ||||
|   Coordinate Block({4,4,4,4}); | ||||
|   Coordinate clatt = GridDefaultLatt(); | ||||
|   for(int d=0;d<clatt.size();d++){ | ||||
|     clatt[d] = clatt[d]/Block[d]; | ||||
|   } | ||||
|  | ||||
|   GridCartesian *Coarse4d =  SpaceTimeGrid::makeFourDimGrid(clatt, | ||||
| 							    GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 							    GridDefaultMpi());; | ||||
|   GridCartesian *Coarse5d =  SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d); | ||||
|  | ||||
|   ///////////////////////// RNGs ///////////////////////////////// | ||||
|   std::vector<int> seeds4({1,2,3,4}); | ||||
|   std::vector<int> seeds5({5,6,7,8}); | ||||
|   std::vector<int> cseeds({5,6,7,8}); | ||||
|  | ||||
|   GridParallelRNG          RNG5(FGrid);   RNG5.SeedFixedIntegers(seeds5); | ||||
|   GridParallelRNG          RNG4(UGrid);   RNG4.SeedFixedIntegers(seeds4); | ||||
|   GridParallelRNG          CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds); | ||||
|  | ||||
|   ///////////////////////// Configuration ///////////////////////////////// | ||||
|   LatticeGaugeField Umu(UGrid); | ||||
|  | ||||
|   FieldMetaData header; | ||||
|   std::string file("ckpoint_lat.1000"); | ||||
|   NerscIO::readConfiguration(Umu,header,file); | ||||
|  | ||||
|   //////////////////////// Fermion action ////////////////////////////////// | ||||
|   MobiusFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c); | ||||
|  | ||||
|   SchurDiagMooeeOperator<MobiusFermionD, LatticeFermion> HermOpEO(Ddwf); | ||||
|  | ||||
|   typedef HermOpAdaptor<LatticeFermionD> HermFineMatrix; | ||||
|   HermFineMatrix FineHermOp(HermOpEO); | ||||
|  | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   ///////////// Coarse basis and Little Dirac Operator /////// | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperator; | ||||
|   typedef LittleDiracOperator::CoarseVector CoarseVector; | ||||
|  | ||||
|   NextToNextToNextToNearestStencilGeometry5D geom(Coarse5d); | ||||
|  | ||||
|   typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace; | ||||
|   Subspace Aggregates(Coarse5d,FrbGrid,cb); | ||||
|  | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   // Need to check about red-black grid coarsening | ||||
|   //////////////////////////////////////////////////////////// | ||||
|  | ||||
|   std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.mixed.2500.60"); | ||||
|   //  std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.new.62"); | ||||
|   std::string refine_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Refine.phys48.mixed.2500.60"); | ||||
|   std::string ldop_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/LittleDiracOp.phys48.mixed.60"); | ||||
|   std::string evec_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/evecs.scidac"); | ||||
|   std::string eval_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/eval.xml"); | ||||
|   bool load_agg=true; | ||||
|   bool load_refine=true; | ||||
|   bool load_mat=false; | ||||
|   bool load_evec=false; | ||||
|  | ||||
|   int refine=1; | ||||
|   if ( load_agg ) { | ||||
|     if ( !(refine) || (!load_refine) ) {  | ||||
|       LoadBasis(Aggregates,subspace_file); | ||||
|     } | ||||
|   } else { | ||||
|     //    Aggregates.CreateSubspaceMultishift(RNG5,HermOpEO, | ||||
|     //    					0.0003,1.0e-5,2000); // Lo, tol, maxit | ||||
|     //    Aggregates.CreateSubspaceChebyshev(RNG5,HermOpEO,nbasis,95.,0.01,1500);// <== last run | ||||
|     Aggregates.CreateSubspaceChebyshevNew(RNG5,HermOpEO,95.);  | ||||
|     SaveBasis(Aggregates,subspace_file); | ||||
|   } | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Building MultiRHS Coarse operator"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   ConjugateGradient<CoarseVector>  coarseCG(4.0e-2,20000,true); | ||||
|      | ||||
|   const int nrhs=12; | ||||
|      | ||||
|   Coordinate mpi=GridDefaultMpi(); | ||||
|   Coordinate rhMpi ({1,1,mpi[0],mpi[1],mpi[2],mpi[3]}); | ||||
|   Coordinate rhLatt({nrhs,1,clatt[0],clatt[1],clatt[2],clatt[3]}); | ||||
|   Coordinate rhSimd({vComplex::Nsimd(),1, 1,1,1,1}); | ||||
|      | ||||
|   GridCartesian *CoarseMrhs = new GridCartesian(rhLatt,rhSimd,rhMpi);  | ||||
|   typedef MultiGeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> MultiGeneralCoarsenedMatrix_t; | ||||
|   MultiGeneralCoarsenedMatrix_t mrhs(geom,CoarseMrhs); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "         Coarse Lanczos               "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|  | ||||
|   typedef HermitianLinearOperator<MultiGeneralCoarsenedMatrix_t,CoarseVector> MrhsHermMatrix; | ||||
|   Chebyshev<CoarseVector>      IRLCheby(0.005,42.0,301);  // 1 iter | ||||
|   MrhsHermMatrix MrhsCoarseOp     (mrhs); | ||||
|  | ||||
|   CoarseVector pm_src(CoarseMrhs); | ||||
|   pm_src = ComplexD(1.0); | ||||
|   PowerMethod<CoarseVector>       cPM;   cPM(MrhsCoarseOp,pm_src); | ||||
|  | ||||
|   int Nk=192; | ||||
|   int Nm=384; | ||||
|   int Nstop=Nk; | ||||
|   int Nconv_test_interval=1; | ||||
|    | ||||
|   ImplicitlyRestartedBlockLanczosCoarse<CoarseVector> IRL(MrhsCoarseOp, | ||||
| 							  Coarse5d, | ||||
| 							  CoarseMrhs, | ||||
| 							  nrhs, | ||||
| 							  IRLCheby, | ||||
| 							  Nstop, | ||||
| 							  Nconv_test_interval, | ||||
| 							  nrhs, | ||||
| 							  Nk, | ||||
| 							  Nm, | ||||
| 							  1e-5,10); | ||||
|  | ||||
|   int Nconv; | ||||
|   std::vector<RealD>            eval(Nm); | ||||
|   std::vector<CoarseVector>     evec(Nm,Coarse5d); | ||||
|   std::vector<CoarseVector>     c_src(nrhs,Coarse5d); | ||||
|  | ||||
|   /////////////////////// | ||||
|   // Deflation guesser object | ||||
|   /////////////////////// | ||||
|   MultiRHSDeflation<CoarseVector> MrhsGuesser; | ||||
|  | ||||
|   ////////////////////////////////////////// | ||||
|   // Block projector for coarse/fine | ||||
|   ////////////////////////////////////////// | ||||
|   MultiRHSBlockProject<LatticeFermionD> MrhsProjector; | ||||
|  | ||||
|   ////////////////////////// | ||||
|   // Extra HDCG parameters | ||||
|   ////////////////////////// | ||||
|   int maxit=3000; | ||||
|   ConjugateGradient<CoarseVector>  CG(5.0e-2,maxit,false); | ||||
|   RealD lo=2.0; | ||||
|   int ord = 7; | ||||
|  | ||||
|   DoNothingGuesser<CoarseVector> DoNothing; | ||||
|   HPDSolver<CoarseVector> HPDSolveMrhs(MrhsCoarseOp,CG,DoNothing); | ||||
|   HPDSolver<CoarseVector> HPDSolveMrhsRefine(MrhsCoarseOp,CG,DoNothing); | ||||
|  | ||||
|   ///////////////////////////////////////////////// | ||||
|   // Mirs smoother | ||||
|   ///////////////////////////////////////////////// | ||||
|   RealD MirsShift = lo; | ||||
|   ShiftedHermOpLinearOperator<LatticeFermionD> ShiftedFineHermOp(HermOpEO,MirsShift); | ||||
|   CGSmoother<LatticeFermionD> CGsmooth(ord,ShiftedFineHermOp) ; | ||||
|  | ||||
|    | ||||
|   if ( load_refine ) { | ||||
|     LoadBasis(Aggregates,refine_file); | ||||
|   } else { | ||||
|     Aggregates.RefineSubspace(HermOpEO,0.001,1.0e-3,3000); // 172 iters | ||||
|     SaveBasis(Aggregates,refine_file); | ||||
|   } | ||||
|   Aggregates.Orthogonalise(); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Coarsen after refine"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   mrhs.CoarsenOperator(FineHermOp,Aggregates,Coarse5d); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << " Recompute coarse evecs  "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   evec.resize(Nm,Coarse5d); | ||||
|   eval.resize(Nm); | ||||
|   for(int r=0;r<nrhs;r++){ | ||||
|     random(CRNG,c_src[r]); | ||||
|   } | ||||
|  IRL.calc(eval,evec,c_src,Nconv,LanczosType::irbl); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << " Reimport coarse evecs  "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   MrhsGuesser.ImportEigenBasis(evec,eval); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Calling mRHS HDCG"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   MrhsProjector.Allocate(nbasis,FrbGrid,Coarse5d); | ||||
|   MrhsProjector.ImportBasis(Aggregates.subspace); | ||||
|        | ||||
|   TwoLevelADEF2mrhs<LatticeFermion,CoarseVector> | ||||
|     HDCGmrhs(1.0e-8, 500, | ||||
| 	     FineHermOp, | ||||
| 	     CGsmooth, | ||||
| 	     HPDSolveMrhs,    // Used in M1 | ||||
| 	     HPDSolveMrhs,          // Used in Vstart | ||||
| 	     MrhsProjector, | ||||
| 	     MrhsGuesser, | ||||
| 	     CoarseMrhs); | ||||
|      | ||||
|   std::vector<LatticeFermionD> src_mrhs(nrhs,FrbGrid); | ||||
|   std::vector<LatticeFermionD> res_mrhs(nrhs,FrbGrid); | ||||
|    | ||||
|   for(int r=0;r<nrhs;r++){ | ||||
|     random(RNG5,src_mrhs[r]); | ||||
|     res_mrhs[r]=Zero(); | ||||
|   } | ||||
|    | ||||
|   HDCGmrhs(src_mrhs,res_mrhs); | ||||
|  | ||||
|   // Standard CG | ||||
| #if 1 | ||||
|   { | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Calling red black CG"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|        | ||||
|     LatticeFermion result(FrbGrid); result=Zero(); | ||||
|     LatticeFermion    src(FrbGrid); random(RNG5,src); | ||||
|     result=Zero(); | ||||
|  | ||||
|     ConjugateGradient<LatticeFermionD>  CGfine(1.0e-8,30000,false); | ||||
|     CGfine(HermOpEO, src, result); | ||||
|   } | ||||
| #endif   | ||||
|   Grid_finalize(); | ||||
|   return 0; | ||||
| } | ||||
							
								
								
									
										387
									
								
								tests/debug/Test_general_coarse_hdcg_phys96_mixed.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										387
									
								
								tests/debug/Test_general_coarse_hdcg_phys96_mixed.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,387 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./tests/Test_general_coarse_hdcg.cc | ||||
|  | ||||
|     Copyright (C) 2023 | ||||
|  | ||||
| Author: Peter Boyle <pboyle@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #include <Grid/Grid.h> | ||||
| #include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h> | ||||
| #include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczosCoarse.h> | ||||
| #include <Grid/algorithms/iterative/AdefMrhs.h> | ||||
|  | ||||
| using namespace std; | ||||
| using namespace Grid; | ||||
|  | ||||
| template<class aggregation> | ||||
| void SaveBasis(aggregation &Agg,std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacWriter WR(Agg.FineGrid->IsBoss()); | ||||
|   WR.open(file); | ||||
|   for(int b=0;b<Agg.subspace.size();b++){ | ||||
|     WR.writeScidacFieldRecord(Agg.subspace[b],record,0,Grid::BinaryIO::BINARYIO_LEXICOGRAPHIC); | ||||
|     //    WR.writeScidacFieldRecord(Agg.subspace[b],record); | ||||
|   } | ||||
|   WR.close(); | ||||
| #endif | ||||
| } | ||||
| template<class aggregation> | ||||
| void LoadBasis(aggregation &Agg, std::string file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacReader RD ; | ||||
|   RD.open(file); | ||||
|   for(int b=0;b<Agg.subspace.size();b++){ | ||||
|     RD.readScidacFieldRecord(Agg.subspace[b],record,Grid::BinaryIO::BINARYIO_LEXICOGRAPHIC); | ||||
|     //    RD.readScidacFieldRecord(Agg.subspace[b],record,0); | ||||
|   }     | ||||
|   RD.close(); | ||||
| #endif | ||||
| } | ||||
| template<class CoarseVector> | ||||
| void SaveEigenvectors(std::vector<RealD>            &eval, | ||||
| 		      std::vector<CoarseVector>     &evec, | ||||
| 		      std::string evec_file, | ||||
| 		      std::string eval_file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|   emptyUserRecord record; | ||||
|   ScidacWriter WR(evec[0].Grid()->IsBoss()); | ||||
|   WR.open(evec_file); | ||||
|   for(int b=0;b<evec.size();b++){ | ||||
|     WR.writeScidacFieldRecord(evec[b],record,0,0); | ||||
|   } | ||||
|   WR.close(); | ||||
|   XmlWriter WRx(eval_file); | ||||
|   write(WRx,"evals",eval); | ||||
| #endif | ||||
| } | ||||
| template<class CoarseVector> | ||||
| void LoadEigenvectors(std::vector<RealD>            &eval, | ||||
| 		      std::vector<CoarseVector>     &evec, | ||||
| 		      std::string evec_file, | ||||
| 		      std::string eval_file) | ||||
| { | ||||
| #ifdef HAVE_LIME | ||||
|     XmlReader RDx(eval_file); | ||||
|     read(RDx,"evals",eval); | ||||
|     emptyUserRecord record; | ||||
|  | ||||
|     Grid::ScidacReader RD ; | ||||
|     RD.open(evec_file); | ||||
|     assert(evec.size()==eval.size()); | ||||
|     for(int k=0;k<eval.size();k++) { | ||||
|       RD.readScidacFieldRecord(evec[k],record); | ||||
|     } | ||||
|     RD.close(); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| // Want Op in CoarsenOp to call MatPcDagMatPc | ||||
| template<class Field> | ||||
| class HermOpAdaptor : public LinearOperatorBase<Field> | ||||
| { | ||||
|   LinearOperatorBase<Field> & wrapped; | ||||
| public: | ||||
|   HermOpAdaptor(LinearOperatorBase<Field> &wrapme) : wrapped(wrapme)  {}; | ||||
|   void Op     (const Field &in, Field &out)   { wrapped.HermOp(in,out);  } | ||||
|   void HermOp(const Field &in, Field &out)    { wrapped.HermOp(in,out); } | ||||
|   void AdjOp     (const Field &in, Field &out){ wrapped.HermOp(in,out);  } | ||||
|   void OpDiag (const Field &in, Field &out)                  {    assert(0);  } | ||||
|   void OpDir  (const Field &in, Field &out,int dir,int disp) {    assert(0);  } | ||||
|   void OpDirAll  (const Field &in, std::vector<Field> &out)  {    assert(0);  }; | ||||
|   void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){    assert(0);  } | ||||
| }; | ||||
|  | ||||
| template<class Field> class CGSmoother : public LinearFunction<Field> | ||||
| { | ||||
| public: | ||||
|   using LinearFunction<Field>::operator(); | ||||
|   typedef LinearOperatorBase<Field> FineOperator; | ||||
|   FineOperator   & _SmootherOperator; | ||||
|   int iters; | ||||
|   CGSmoother(int _iters, FineOperator &SmootherOperator) : | ||||
|     _SmootherOperator(SmootherOperator), | ||||
|     iters(_iters) | ||||
|   { | ||||
|     std::cout << GridLogMessage<<" Mirs smoother order "<<iters<<std::endl; | ||||
|   }; | ||||
|   void operator() (const Field &in, Field &out)  | ||||
|   { | ||||
|     ConjugateGradient<Field>  CG(0.0,iters,false); // non-converge is just fine in a smoother | ||||
|  | ||||
|     out=Zero(); | ||||
|  | ||||
|     CG(_SmootherOperator,in,out); | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| int main (int argc, char ** argv) | ||||
| { | ||||
|   Grid_init(&argc,&argv); | ||||
|  | ||||
|   const int Ls=24; | ||||
|   const int nbasis = 60; | ||||
|   const int cb = 0 ; | ||||
|   RealD mass=0.00078; | ||||
|   RealD M5=1.8; | ||||
|   RealD b=1.5; | ||||
|   RealD c=0.5; | ||||
|  | ||||
|   GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), | ||||
| 								   GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 								   GridDefaultMpi()); | ||||
|   GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); | ||||
|   GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); | ||||
|   GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); | ||||
|  | ||||
|   // Construct a coarsened grid with 4^4 cell | ||||
|   Coordinate Block({4,4,6,6}); | ||||
|   Coordinate clatt = GridDefaultLatt(); | ||||
|   for(int d=0;d<clatt.size();d++){ | ||||
|     clatt[d] = clatt[d]/Block[d]; | ||||
|   } | ||||
|  | ||||
|   GridCartesian *Coarse4d =  SpaceTimeGrid::makeFourDimGrid(clatt, | ||||
| 							    GridDefaultSimd(Nd,vComplex::Nsimd()), | ||||
| 							    GridDefaultMpi());; | ||||
|   GridCartesian *Coarse5d =  SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d); | ||||
|  | ||||
|   ///////////////////////// RNGs ///////////////////////////////// | ||||
|   std::vector<int> seeds4({1,2,3,4}); | ||||
|   std::vector<int> seeds5({5,6,7,8}); | ||||
|   std::vector<int> cseeds({5,6,7,8}); | ||||
|  | ||||
|   GridParallelRNG          RNG5(FGrid);   RNG5.SeedFixedIntegers(seeds5); | ||||
|   GridParallelRNG          RNG4(UGrid);   RNG4.SeedFixedIntegers(seeds4); | ||||
|   GridParallelRNG          CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds); | ||||
|  | ||||
|   ///////////////////////// Configuration ///////////////////////////////// | ||||
|   LatticeGaugeField Umu(UGrid); | ||||
|  | ||||
|   FieldMetaData header; | ||||
|   std::string file("/lustre/orion/phy157/proj-shared/phy157_dwf/lehner/ensemble-Ha/ckpoint_lat.2250"); | ||||
|   NerscIO::readConfiguration(Umu,header,file); | ||||
|  | ||||
|   //////////////////////// Fermion action ////////////////////////////////// | ||||
|   MobiusFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c); | ||||
|  | ||||
|   SchurDiagMooeeOperator<MobiusFermionD, LatticeFermion> HermOpEO(Ddwf); | ||||
|  | ||||
|   typedef HermOpAdaptor<LatticeFermionD> HermFineMatrix; | ||||
|   HermFineMatrix FineHermOp(HermOpEO); | ||||
|  | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   ///////////// Coarse basis and Little Dirac Operator /////// | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperator; | ||||
|   typedef LittleDiracOperator::CoarseVector CoarseVector; | ||||
|  | ||||
|   NextToNextToNextToNearestStencilGeometry5D geom(Coarse5d); | ||||
|  | ||||
|   typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace; | ||||
|   Subspace Aggregates(Coarse5d,FrbGrid,cb); | ||||
|  | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   // Need to check about red-black grid coarsening | ||||
|   //////////////////////////////////////////////////////////// | ||||
|  | ||||
|   //  std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys96.mixed.2500.60"); | ||||
|   std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Refine.phys96.mixed.2500.60"); | ||||
|   std::string refine_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Refine.phys96.mixed.2500.60_v2"); | ||||
|   std::string ldop_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/LittleDiracOp.phys96.mixed.60"); | ||||
|   std::string evec_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/evecs.scidac"); | ||||
|   std::string eval_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/eval.xml"); | ||||
|   bool load_agg=true; | ||||
|   bool load_refine=false; | ||||
|   bool load_mat=false; | ||||
|   bool load_evec=false; | ||||
|  | ||||
|   int refine=1; | ||||
|   if ( load_agg ) { | ||||
|     if ( !(refine) || (!load_refine) ) {  | ||||
|       LoadBasis(Aggregates,subspace_file); | ||||
|     } | ||||
|   } else { | ||||
|     Aggregates.CreateSubspaceChebyshevNew(RNG5,HermOpEO,95.);  | ||||
|     SaveBasis(Aggregates,subspace_file); | ||||
|   } | ||||
|  | ||||
|   if ( load_refine ) { | ||||
|     std::cout << " Load Refine "<< refine_file <<std::endl; | ||||
|     LoadBasis(Aggregates,refine_file); | ||||
|   } else { | ||||
|     Aggregates.RefineSubspace(HermOpEO,0.001,3.0e-4,3000); // 172 iters | ||||
|     //    Aggregates.RefineSubspace(HermOpEO,0.001,3.0e-4,2500); // 172 iters | ||||
|     SaveBasis(Aggregates,refine_file); | ||||
|   } | ||||
|   Aggregates.Orthogonalise(); | ||||
|    | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Building MultiRHS Coarse operator"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|      | ||||
|   const int nrhs=12; | ||||
|      | ||||
|   Coordinate mpi=GridDefaultMpi(); | ||||
|   Coordinate rhMpi ({1,1,mpi[0],mpi[1],mpi[2],mpi[3]}); | ||||
|   Coordinate rhLatt({nrhs,1,clatt[0],clatt[1],clatt[2],clatt[3]}); | ||||
|   Coordinate rhSimd({vComplex::Nsimd(),1, 1,1,1,1}); | ||||
|      | ||||
|   GridCartesian *CoarseMrhs = new GridCartesian(rhLatt,rhSimd,rhMpi);  | ||||
|   typedef MultiGeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> MultiGeneralCoarsenedMatrix_t; | ||||
|   MultiGeneralCoarsenedMatrix_t mrhs(geom,CoarseMrhs); | ||||
|  | ||||
|   /////////////////////// | ||||
|   // Deflation guesser object | ||||
|   /////////////////////// | ||||
|   MultiRHSDeflation<CoarseVector> MrhsGuesser; | ||||
|  | ||||
|   ////////////////////////////////////////// | ||||
|   // Block projector for coarse/fine | ||||
|   ////////////////////////////////////////// | ||||
|   MultiRHSBlockProject<LatticeFermionD> MrhsProjector; | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Coarsen after refine"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   mrhs.CoarsenOperator(FineHermOp,Aggregates,Coarse5d); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "         Coarse Lanczos               "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|  | ||||
|   typedef HermitianLinearOperator<MultiGeneralCoarsenedMatrix_t,CoarseVector> MrhsHermMatrix; | ||||
|   Chebyshev<CoarseVector>      IRLCheby(0.0012,42.0,301);  // 1 iter | ||||
|   MrhsHermMatrix MrhsCoarseOp     (mrhs); | ||||
|  | ||||
|   CoarseVector pm_src(CoarseMrhs); | ||||
|   pm_src = ComplexD(1.0); | ||||
|   PowerMethod<CoarseVector>       cPM;   cPM(MrhsCoarseOp,pm_src); | ||||
|  | ||||
|   int Nk=nrhs*30; | ||||
|   //  int Nk=nrhs*80; | ||||
|   int Nm=Nk*4; | ||||
|   int Nstop=Nk; | ||||
|   int Nconv_test_interval=1; | ||||
|    | ||||
|   ImplicitlyRestartedBlockLanczosCoarse<CoarseVector> IRL(MrhsCoarseOp, | ||||
| 							  Coarse5d, | ||||
| 							  CoarseMrhs, | ||||
| 							  nrhs, | ||||
| 							  IRLCheby, | ||||
| 							  Nstop, | ||||
| 							  Nconv_test_interval, | ||||
| 							  nrhs, | ||||
| 							  Nk, | ||||
| 							  Nm, | ||||
| 							  1e-4,20); | ||||
|  | ||||
|   std::vector<RealD>            eval(Nm); | ||||
|   std::vector<CoarseVector>     evec(Nm,Coarse5d); | ||||
|   std::vector<CoarseVector>     c_src(nrhs,Coarse5d); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << " Recompute coarse evecs  "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   evec.resize(Nm,Coarse5d); | ||||
|   eval.resize(Nm); | ||||
|   for(int r=0;r<nrhs;r++){ | ||||
|     random(CRNG,c_src[r]); | ||||
|   } | ||||
|   int Nconv; | ||||
|   IRL.calc(eval,evec,c_src,Nconv,LanczosType::rbl); | ||||
|   Nconv = eval.size(); | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << " import coarse evecs  "<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   MrhsGuesser.ImportEigenBasis(evec,eval); | ||||
|  | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Calling mRHS HDCG"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   MrhsProjector.Allocate(nbasis,FrbGrid,Coarse5d); | ||||
|   MrhsProjector.ImportBasis(Aggregates.subspace); | ||||
|  | ||||
|   ////////////////////////// | ||||
|   // Extra HDCG parameters | ||||
|   ////////////////////////// | ||||
|   int maxit=3000; | ||||
|   ConjugateGradient<CoarseVector>  CG(5.0e-2,maxit,false); | ||||
|   RealD lo=2.0; | ||||
|   int ord = 7; | ||||
|  | ||||
|   DoNothingGuesser<CoarseVector> DoNothing; | ||||
|   HPDSolver<CoarseVector> HPDSolveMrhs(MrhsCoarseOp,CG,DoNothing); | ||||
|   HPDSolver<CoarseVector> HPDSolveMrhsRefine(MrhsCoarseOp,CG,DoNothing); | ||||
|  | ||||
|   ///////////////////////////////////////////////// | ||||
|   // Mirs smoother | ||||
|   ///////////////////////////////////////////////// | ||||
|   RealD MirsShift = lo; | ||||
|   ShiftedHermOpLinearOperator<LatticeFermionD> ShiftedFineHermOp(HermOpEO,MirsShift); | ||||
|   CGSmoother<LatticeFermionD> CGsmooth(ord,ShiftedFineHermOp) ; | ||||
|    | ||||
|    | ||||
|   TwoLevelADEF2mrhs<LatticeFermion,CoarseVector> | ||||
|     HDCGmrhs(1.0e-8, 500, | ||||
| 	     FineHermOp, | ||||
| 	     CGsmooth, | ||||
| 	     HPDSolveMrhs,    // Used in M1 | ||||
| 	     HPDSolveMrhs,          // Used in Vstart | ||||
| 	     MrhsProjector, | ||||
| 	     MrhsGuesser, | ||||
| 	     CoarseMrhs); | ||||
|      | ||||
|   std::vector<LatticeFermionD> src_mrhs(nrhs,FrbGrid); | ||||
|   std::vector<LatticeFermionD> res_mrhs(nrhs,FrbGrid); | ||||
|    | ||||
|   for(int r=0;r<nrhs;r++){ | ||||
|     random(RNG5,src_mrhs[r]); | ||||
|     res_mrhs[r]=Zero(); | ||||
|   } | ||||
|    | ||||
|   HDCGmrhs(src_mrhs,res_mrhs); | ||||
|  | ||||
|   // Standard CG | ||||
| #if 0 | ||||
|   { | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|   std::cout << "Calling red black CG"<<std::endl; | ||||
|   std::cout << "**************************************"<<std::endl; | ||||
|        | ||||
|     LatticeFermion result(FrbGrid); result=Zero(); | ||||
|     LatticeFermion    src(FrbGrid); random(RNG5,src); | ||||
|     result=Zero(); | ||||
|  | ||||
|     ConjugateGradient<LatticeFermionD>  CGfine(1.0e-8,30000,false); | ||||
|     CGfine(HermOpEO, src, result); | ||||
|   } | ||||
| #endif   | ||||
|   Grid_finalize(); | ||||
|   return 0; | ||||
| } | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user