1
0
mirror of https://github.com/paboyle/Grid.git synced 2026-03-20 19:26:09 +00:00

wqMerge branch 'develop' of https://github.com/paboyle/Grid into KS_shifted

This commit is contained in:
Chulwoo Jung
2026-03-11 21:49:26 -04:00
351 changed files with 6909 additions and 2629 deletions

View File

@@ -103,7 +103,7 @@ template<typename FieldType>
void readFieldArray(std::vector<FieldType> &data, const std::string &file){
typedef typename FieldType::vector_object vobj;
typedef typename FieldType::scalar_object sobj;
assert(data.size() > 0);
GRID_ASSERT(data.size() > 0);
GridBase* grid = data[0].Grid(); //assume all fields have the same Grid
BinarySimpleUnmunger<sobj, sobj> munge; //straight copy
@@ -113,7 +113,7 @@ void readFieldArray(std::vector<FieldType> &data, const std::string &file){
std::cout << "Data offset read " << offset << std::endl;
std::cout << "Data size read " << hdr_size << std::endl;
assert(data.size() == hdr_size);
GRID_ASSERT(data.size() == hdr_size);
uint64_t field_size = uint64_t(grid->gSites()) * sizeof(sobj);
@@ -132,7 +132,7 @@ void readFieldArray(std::vector<FieldType> &data, const std::string &file){
std::cout << "Read checksum " << checksum << std::endl;
assert( hdr_checksum == checksum );
GRID_ASSERT( hdr_checksum == checksum );
}

View File

@@ -61,7 +61,7 @@ int main(int argc, char** argv) {
if(GridCmdOptionExists(argv, argv + argc, "--config")) {
file = GridCmdOptionPayload(argv, argv + argc, "--config");
std::cout << "file: " << file << std::endl;
assert(!file.empty());
GRID_ASSERT(!file.empty());
}
OpenQcdIOChromaReference::readConfiguration(Umu_ref, header_ref, file);

View File

@@ -25,6 +25,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include "disable_tests_without_instantiations.h"
#ifdef ENABLE_FERMION_INSTANTIATIONS
#include <Grid/Grid.h>
using namespace std;
@@ -273,8 +276,6 @@ void TestWhat(What & Ddwf,
err = phi-chi;
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<< std::endl;
}
#endif

View File

@@ -30,6 +30,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
* Reimplement the badly named "multigrid" lanczos as compressed Lanczos using the features
* in Grid that were intended to be used to support blocked Aggregates, from
*/
#include "disable_tests_without_instantiations.h"
#ifdef ENABLE_FERMION_INSTANTIATIONS
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
#include <Grid/algorithms/iterative/LocalCoherenceLanczos.h>
@@ -56,7 +59,7 @@ public:
void checkpointFine(std::string evecs_file,std::string evals_file)
{
#ifdef HAVE_LIME
assert(this->subspace.size()==nbasis);
GRID_ASSERT(this->subspace.size()==nbasis);
emptyUserRecord record;
Grid::ScidacWriter WR(this->_FineGrid->IsBoss());
WR.open(evecs_file);
@@ -68,7 +71,7 @@ public:
XmlWriter WRx(evals_file);
write(WRx,"evals",this->evals_fine);
#else
assert(0);
GRID_ASSERT(0);
#endif
}
@@ -82,7 +85,7 @@ public:
XmlReader RDx(evals_file);
read(RDx,"evals",this->evals_fine);
assert(this->evals_fine.size()==nbasis);
GRID_ASSERT(this->evals_fine.size()==nbasis);
std::cout << GridLogIRL<< "checkpointFineRestore: Reading evecs from "<<evecs_file<<std::endl;
emptyUserRecord record;
@@ -95,7 +98,7 @@ public:
}
RD.close();
#else
assert(0);
GRID_ASSERT(0);
#endif
}
@@ -114,7 +117,7 @@ public:
XmlWriter WRx(evals_file);
write(WRx,"evals",this->evals_coarse);
#else
assert(0);
GRID_ASSERT(0);
#endif
}
@@ -128,7 +131,7 @@ public:
XmlReader RDx(evals_file);
read(RDx,"evals",this->evals_coarse);
assert(this->evals_coarse.size()==nvec);
GRID_ASSERT(this->evals_coarse.size()==nvec);
emptyUserRecord record;
std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evecs from "<<evecs_file<<std::endl;
Grid::ScidacReader RD ;
@@ -138,7 +141,7 @@ public:
}
RD.close();
#else
assert(0);
GRID_ASSERT(0);
#endif
}
};
@@ -179,19 +182,19 @@ int main (int argc, char ** argv) {
std::vector<int> fineLatt = latt;
int dims=fineLatt.size();
assert(blockSize.size()==dims+1);
GRID_ASSERT(blockSize.size()==dims+1);
std::vector<int> coarseLatt(dims);
std::vector<int> coarseLatt5d ;
for (int d=0;d<coarseLatt.size();d++){
coarseLatt[d] = fineLatt[d]/blockSize[d]; assert(coarseLatt[d]*blockSize[d]==fineLatt[d]);
coarseLatt[d] = fineLatt[d]/blockSize[d]; GRID_ASSERT(coarseLatt[d]*blockSize[d]==fineLatt[d]);
}
std::cout << GridLogMessage<< " 5d coarse lattice is ";
for (int i=0;i<coarseLatt.size();i++){
std::cout << coarseLatt[i]<<"x";
}
int cLs = Ls/blockSize[dims]; assert(cLs*blockSize[dims]==Ls);
int cLs = Ls/blockSize[dims]; GRID_ASSERT(cLs*blockSize[dims]==Ls);
std::cout << cLs<<std::endl;
GridCartesian * CoarseGrid4 = SpaceTimeGrid::makeFourDimGrid(coarseLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
@@ -222,14 +225,14 @@ int main (int argc, char ** argv) {
std::cout << GridLogMessage << "Keep " << fine.Nstop << " fine vectors" << std::endl;
std::cout << GridLogMessage << "Keep " << coarse.Nstop << " coarse vectors" << std::endl;
assert(Nm2 >= Nm1);
GRID_ASSERT(Nm2 >= Nm1);
const int nbasis= 60;
assert(nbasis==Ns1);
GRID_ASSERT(nbasis==Ns1);
LocalCoherenceLanczosScidac<vSpinColourVector,vTComplex,nbasis> _LocalCoherenceLanczos(FrbGrid,CoarseGrid5,HermOp,Odd);
std::cout << GridLogMessage << "Constructed LocalCoherenceLanczos" << std::endl;
assert( (Params.doFine)||(Params.doFineRead));
GRID_ASSERT( (Params.doFine)||(Params.doFineRead));
if ( Params.doFine ) {
std::cout << GridLogMessage << "Performing fine grid IRL Nstop "<< Ns1 << " Nk "<<Nk1<<" Nm "<<Nm1<< std::endl;
@@ -256,3 +259,4 @@ int main (int argc, char ** argv) {
Grid_finalize();
}
#endif

View File

@@ -25,6 +25,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include "disable_tests_without_instantiations.h"
#ifdef ENABLE_FERMION_INSTANTIATIONS
#include <Grid/Grid.h>
using namespace std;
@@ -237,3 +240,5 @@ int main (int argc, char ** argv)
Grid_finalize();
}
#endif

View File

@@ -25,6 +25,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include "disable_tests_without_instantiations.h"
#ifdef ENABLE_FERMION_INSTANTIATIONS
#include <Grid/Grid.h>
using namespace std;
@@ -180,7 +183,7 @@ int main (int argc, char ** argv)
std::cout << " SinglePrecision GF/s "<< flops/(t2-t1)/1000.<<std::endl;
std::cout << " SinglePrecision error count "<< FlightRecorder::ErrorCount()<<std::endl;
assert(FlightRecorder::ErrorCount()==0);
GRID_ASSERT(FlightRecorder::ErrorCount()==0);
std::cout << " FlightRecorder is OK! "<<std::endl;
iter ++;
@@ -208,7 +211,7 @@ int main (int argc, char ** argv)
std::cout << " DoublePrecision iterations/sec "<< iters/(t2-t1)*1000.*1000.<<std::endl;
std::cout << " DoublePrecision GF/s "<< flops/(t2-t1)/1000.<<std::endl;
std::cout << " DoublePrecision error count "<< FlightRecorder::ErrorCount()<<std::endl;
assert(FlightRecorder::ErrorCount()==0);
GRID_ASSERT(FlightRecorder::ErrorCount()==0);
std::cout << " FlightRecorder is OK! "<<std::endl;
now = time(NULL); UGrid->Broadcast(0,(void *)&now,sizeof(now));
i++;
@@ -218,7 +221,9 @@ int main (int argc, char ** argv)
RealD diff = axpy_norm(diff_o, -1.0, result_o, result_o_2);
std::cout << GridLogMessage << "::::::::::::: Diff between mixed and regular CG: " << diff << std::endl;
assert(diff < 1e-4);
GRID_ASSERT(diff < 1e-4);
Grid_finalize();
}
#endif

View File

@@ -25,6 +25,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include "disable_tests_without_instantiations.h"
#ifdef ENABLE_FERMION_INSTANTIATIONS
#include <Grid/Grid.h>
using namespace std;
@@ -118,3 +121,4 @@ int main (int argc, char ** argv)
Grid_finalize();
}
#endif
#endif

View File

@@ -84,8 +84,8 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "Double: time_ref = " << sw_ref.Elapsed() << " time_res = " << sw_res.Elapsed() << std::endl;
// clang-format on
assert(diff_ip_d == 0.);
assert(diff_norm2_d == 0.);
GRID_ASSERT(diff_ip_d == 0.);
GRID_ASSERT(diff_norm2_d == 0.);
std::cout << GridLogMessage << "Double: all checks passed" << std::endl;
}
@@ -116,8 +116,8 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "Single: time_ref = " << sw_ref.Elapsed() << " time_res = " << sw_res.Elapsed() << std::endl;
// clang-format on
assert(diff_ip_f == 0.);
assert(diff_norm2_f == 0.);
GRID_ASSERT(diff_ip_f == 0.);
GRID_ASSERT(diff_norm2_f == 0.);
std::cout << GridLogMessage << "Single: all checks passed" << std::endl;
}

View File

@@ -24,6 +24,8 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
#include "disable_tests_without_instantiations.h"
#ifdef ENABLE_FERMION_INSTANTIATIONS
#include <Grid/Grid.h>
#include <Grid/qcd/utils/A2Autils.h>
@@ -157,3 +159,5 @@ int main(int argc, char *argv[])
return EXIT_SUCCESS;
}
#endif

View File

@@ -175,7 +175,7 @@ void Tester(const functor &func)
} else {
std::cout<<GridLogMessage << " wrong!" <<std::endl;
}
assert(ok==0);
GRID_ASSERT(ok==0);
}
template<class functor>
@@ -234,7 +234,7 @@ void IntTester(const functor &func)
} else {
std::cout<<GridLogMessage << " wrong!" <<std::endl;
}
assert(ok==0);
GRID_ASSERT(ok==0);
}
@@ -285,7 +285,7 @@ void ReductionTester(const functor &func)
} else {
std::cout<<GridLogMessage << " wrong!" <<std::endl;
}
assert(ok==0);
GRID_ASSERT(ok==0);
}
@@ -332,7 +332,7 @@ void IntReductionTester(const functor &func)
} else {
std::cout<<GridLogMessage << " wrong!" <<std::endl;
}
assert(ok==0);
GRID_ASSERT(ok==0);
}
@@ -456,7 +456,7 @@ void PermTester(const functor &func)
} else {
std::cout<<GridLogMessage << " wrong!" <<std::endl;
}
assert(ok==0);
GRID_ASSERT(ok==0);
}
@@ -520,8 +520,8 @@ void ExchangeTester(const functor &func)
// std::cout << " i "<<i<<" j "<<j<<" "<<reference1[j]<<" "<<result1[i]<<std::endl;
}
}
// assert(found==1);
assert(found==1||found==0);
// GRID_ASSERT(found==1);
GRID_ASSERT(found==1||found==0);
}
for(int i=0;i<Nsimd;i++){
int found=0;
@@ -531,8 +531,8 @@ void ExchangeTester(const functor &func)
// std::cout << " i "<<i<<" j "<<j<<" "<<reference2[j]<<" "<<result2[i]<<std::endl;
}
}
// assert(found==1);
assert(found==1||found==0);
// GRID_ASSERT(found==1);
GRID_ASSERT(found==1||found==0);
}
/*
@@ -547,8 +547,8 @@ void ExchangeTester(const functor &func)
}
*/
for(int i=0;i<Nsimd;i++){
assert(test1[i]==input1[i]);
assert(test2[i]==input2[i]);
GRID_ASSERT(test1[i]==input1[i]);
GRID_ASSERT(test2[i]==input2[i]);
}
}
@@ -789,7 +789,7 @@ int main (int argc, char ** argv)
nrm = innerProduct(DD[i],DD[i]);
auto tmp = Reduce(nrm);
// std::cout << tmp << std::endl;
assert( tmp < 1.0e-14 );
GRID_ASSERT( tmp < 1.0e-14 );
}
std::cout <<" OK ! "<<std::endl;
@@ -805,7 +805,7 @@ int main (int argc, char ** argv)
nrm = innerProduct(DD[i],DD[i]);
auto tmp = Reduce(nrm);
// std::cout << tmp << std::endl;
assert( tmp < 1.0e-3 );
GRID_ASSERT( tmp < 1.0e-3 );
}
std::cout <<" OK ! "<<std::endl;
@@ -820,7 +820,7 @@ int main (int argc, char ** argv)
nrm = innerProduct(FF[i],FF[i]);
auto tmp = Reduce(nrm);
// std::cout << tmp << std::endl;
assert( tmp < 1.0e-3 );
GRID_ASSERT( tmp < 1.0e-3 );
}
std::cout <<" OK ! "<<std::endl;
#endif

View File

@@ -54,7 +54,7 @@ int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
assert(argc >= 5);
GRID_ASSERT(argc >= 5);
Coordinate latt(4,0);
latt[0] = toint(argv[1]);

View File

@@ -205,7 +205,7 @@ void runBenchmark(int* argc, char*** argv) {
double secs_res = (t5-t4)/1e6; \
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n", \
"compact_"#KERNEL, precision.c_str(), secs_res, clov_gflop_total/secs_res, clov_gbyte_total/secs_res, secs_ref/secs_res, secs_res/secs_hop); \
assert(resultsAgree(ref, res, #KERNEL)); \
GRID_ASSERT(resultsAgree(ref, res, #KERNEL)); \
}
BENCH_CLOVER_KERNEL(Mooee);

View File

@@ -29,7 +29,6 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/Grid.h>
using namespace Grid;
;
int main (int argc, char ** argv)
{
@@ -116,10 +115,10 @@ int main (int argc, char ** argv)
Stilde=S;
std::cout<<" Benchmarking FFT of LatticeSpinMatrix "<<std::endl;
theFFT.FFT_dim(Stilde,S,0,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<std::endl;
theFFT.FFT_dim(Stilde,S,1,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<std::endl;
theFFT.FFT_dim(Stilde,S,2,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<std::endl;
theFFT.FFT_dim(Stilde,S,3,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<std::endl;
theFFT.FFT_dim(Stilde,Stilde,0,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<std::endl;
theFFT.FFT_dim(Stilde,Stilde,1,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<std::endl;
theFFT.FFT_dim(Stilde,Stilde,2,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<std::endl;
theFFT.FFT_dim(Stilde,Stilde,3,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<std::endl;
SpinMatrixD Sp;
Sp = Zero(); Sp = Sp+cVol;
@@ -202,11 +201,16 @@ int main (int argc, char ** argv)
FFT theFFT5(FGrid);
theFFT5.FFT_dim(result5,tmp5,1,FFT::forward); tmp5 = result5;
std::cout<<"Fourier xformed Ddwf 1 "<<norm2(result5)<<std::endl;
theFFT5.FFT_dim(result5,tmp5,2,FFT::forward); tmp5 = result5;
std::cout<<"Fourier xformed Ddwf 2 "<<norm2(result5)<<std::endl;
theFFT5.FFT_dim(result5,tmp5,3,FFT::forward); tmp5 = result5;
theFFT5.FFT_dim(result5,tmp5,4,FFT::forward); result5 = result5*ComplexD(::sqrt(1.0/vol),0.0);
std::cout<<"Fourier xformed Ddwf 3 "<<norm2(result5)<<std::endl;
theFFT5.FFT_dim(result5,tmp5,4,FFT::forward);
std::cout<<"Fourier xformed Ddwf 4 "<<norm2(result5)<<std::endl;
result5 = result5*ComplexD(::sqrt(1.0/vol),0.0);
std::cout<<"Fourier xformed Ddwf"<<std::endl;
std::cout<<"Fourier xformed Ddwf "<<norm2(result5)<<std::endl;
tmp5 = src5;
theFFT5.FFT_dim(src5_p,tmp5,1,FFT::forward); tmp5 = src5_p;
@@ -214,7 +218,7 @@ int main (int argc, char ** argv)
theFFT5.FFT_dim(src5_p,tmp5,3,FFT::forward); tmp5 = src5_p;
theFFT5.FFT_dim(src5_p,tmp5,4,FFT::forward); src5_p = src5_p*ComplexD(::sqrt(1.0/vol),0.0);
std::cout<<"Fourier xformed src5"<<std::endl;
std::cout<<"Fourier xformed src5"<< norm2(src5)<<" -> "<<norm2(src5_p)<<std::endl;
/////////////////////////////////////////////////////////////////
// work out the predicted from Fourier
@@ -251,7 +255,8 @@ int main (int argc, char ** argv)
Kinetic = Kinetic + sin(kmu)*ci*(Gamma(Gmu[mu])*src5_p);
}
std::cout << " src5 "<<norm2(src5_p)<<std::endl;
std::cout << " Kinetic "<<norm2(Kinetic)<<std::endl;
// NB implicit sum over mu
//
// 1-1/2 Dw = 1 - 1/2 ( eip+emip)
@@ -260,18 +265,23 @@ int main (int argc, char ** argv)
// = 2 sink/2 ink/2 = sk2
W = one - M5 + sk2;
std::cout << " W "<<norm2(W)<<std::endl;
Kinetic = Kinetic + W * src5_p;
std::cout << " Kinetic "<<norm2(Kinetic)<<std::endl;
LatticeCoordinate(scoor,sdir);
tmp5 = Cshift(src5_p,sdir,+1);
tmp5 = (tmp5 - G5*tmp5)*0.5;
tmp5 = where(scoor==Integer(Ls-1),mass*tmp5,-tmp5);
std::cout << " tmp5 "<<norm2(tmp5)<<std::endl;
Kinetic = Kinetic + tmp5;
tmp5 = Cshift(src5_p,sdir,-1);
tmp5 = (tmp5 + G5*tmp5)*0.5;
tmp5 = where(scoor==Integer(0),mass*tmp5,-tmp5);
std::cout << " tmp5 "<<norm2(tmp5)<<std::endl;
Kinetic = Kinetic + tmp5;
std::cout<<"Momentum space Ddwf "<< norm2(Kinetic)<<std::endl;
@@ -279,7 +289,7 @@ int main (int argc, char ** argv)
result5 = result5 - Kinetic;
std::cout<<"diff "<< norm2(result5)<<std::endl;
assert(norm2(result5)<1.0e-4);
GRID_ASSERT(norm2(result5)<1.0e-4);
}
@@ -339,7 +349,7 @@ int main (int argc, char ** argv)
Ddwf.Mdag(src5,tmp5);
src5=tmp5;
MdagMLinearOperator<DomainWallFermionD,LatticeFermionD> HermOp(Ddwf);
ConjugateGradient<LatticeFermionD> CG(1.0e-16,10000);
ConjugateGradient<LatticeFermionD> CG(1.0e-8,10000);
CG(HermOp,src5,result5);
////////////////////////////////////////////////////////////////////////
@@ -358,7 +368,7 @@ int main (int argc, char ** argv)
diff = ref - result4;
std::cout << "result - ref "<<norm2(diff)<<std::endl;
assert(norm2(diff)<1.0e-4);
GRID_ASSERT(norm2(diff)<1.0e-4);
}
@@ -423,7 +433,7 @@ int main (int argc, char ** argv)
Dov.Mdag(src5,tmp5);
src5=tmp5;
MdagMLinearOperator<OverlapWilsonCayleyTanhFermionD,LatticeFermionD> HermOp(Dov);
ConjugateGradient<LatticeFermionD> CG(1.0e-16,10000);
ConjugateGradient<LatticeFermionD> CG(1.0e-8,10000);
CG(HermOp,src5,result5);
////////////////////////////////////////////////////////////////////////
@@ -442,7 +452,7 @@ int main (int argc, char ** argv)
diff = ref - result4;
std::cout << "result - ref "<<norm2(diff)<<std::endl;
assert(norm2(diff)<1.0e-4);
GRID_ASSERT(norm2(diff)<1.0e-4);
}

View File

@@ -196,7 +196,7 @@ int main (int argc, char ** argv)
for(int i=1;i<argc;i++){
std::string sarg(argv[i]);
if(sarg == "--gimpl"){
assert(i<argc-1 && "--gimpl option requires an argument");
GRID_ASSERT(i<argc-1 && "--gimpl option requires an argument");
gimpl = argv[i+1];
if(gimpl != "periodic" && gimpl != "conjugate")
assert(0 && "Invalid gimpl");
@@ -206,7 +206,7 @@ int main (int argc, char ** argv)
std::cout << "Not doing the Fourier accelerated gauge fixing tests" << std::endl;
do_fft_gfix = false;
}else if(sarg == "--alpha"){
assert(i<argc-1 && "--alpha option requires an argument");
GRID_ASSERT(i<argc-1 && "--alpha option requires an argument");
std::istringstream ss(argv[i+1]); ss >> alpha;
}
}

101
tests/core/Test_fft_prop.cc Normal file
View File

@@ -0,0 +1,101 @@
/*************************************************************************************
grid` physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_cshift.cc
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace Grid;
template<class LatticeObject>
void bench(GridCartesian *grid, std::string name)
{
LatticeComplexD C(grid);
LatticeComplexD coor(grid);
ComplexD ci(0.0,1.0);
Coordinate p({1,2,3,4});
Coordinate latt_size = grid->_fdimensions;
std::cout<<"*************************************************"<<std::endl;
std::cout<<" Benchmarking FFT of "<<name<<" on plane wave "<<std::endl;
std::cout<<"*************************************************"<<std::endl;
C=Zero();
for(int mu=0;mu<4;mu++){
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(coor,mu);
C = C + (TwoPiL * p[mu]) * coor;
}
C = exp(C*ci);
LatticeObject S(grid);
LatticeObject Stilde(grid);
S=Zero();
S = S+C;
FFT theFFT(grid);
Stilde=S;
std::cout << " norm2(s) "<<norm2(Stilde)<<std::endl;
double tt= -usecond();
theFFT.FFT_dim(Stilde,Stilde,0,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<norm2(Stilde)<<std::endl;
theFFT.FFT_dim(Stilde,Stilde,1,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<norm2(Stilde)<<std::endl;
theFFT.FFT_dim(Stilde,Stilde,2,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<norm2(Stilde)<<std::endl;
theFFT.FFT_dim(Stilde,Stilde,3,FFT::forward); std::cout << theFFT.MFlops()<<" mflops "<<norm2(Stilde)<<std::endl;
tt+= usecond();
std::cout<<"*************************************************"<<std::endl;
std::cout<<" FFT of "<<latt_size <<" "<<name<<" took "<<tt/1.e6<<" s"<<std::endl;
std::cout<<"*************************************************"<<std::endl;
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
Coordinate latt_size = GridDefaultLatt();
Coordinate simd_layout = GridDefaultSimd(Nd,vComplexD::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
int vol = 1;
for(int d=0;d<latt_size.size();d++){
vol = vol * latt_size[d];
}
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
bench<LatticeComplexD>(&GRID,std::string("LatticeComplexD"));
bench<LatticeColourMatrixD>(&GRID,std::string("LatticeColourMatrixD"));
bench<LatticePropagatorD>(&GRID,std::string("LatticePropagatorD"));
Grid_finalize();
}

View File

@@ -171,7 +171,7 @@ int main(int argc, char** argv) {
Complex tr2 = TRa()()(b,c) * Complex(0,1);
std::cout << " 2 Tr( Tc[Ta,Tb]) " << 2.0*tr1<<std::endl;
std::cout << " - TRa_bc " << tr2<<std::endl;
assert(abs( (2.0*tr1-tr2) ) < 1.0e-7);
GRID_ASSERT(abs( (2.0*tr1-tr2) ) < 1.0e-7);
std::cout << "------------------"<<std::endl;
}}}
#endif

View File

@@ -93,13 +93,13 @@ void MemoryTest(GridCartesian * FGrid, int N)
if ( dev ) {
autoView(A_v,A[v],AcceleratorRead);
accelerator_for(ss,FGrid->oSites(),1,{
// assert(B[v]==A_v[ss]()()().getlane(0));
// GRID_ASSERT(B[v]==A_v[ss]()()().getlane(0));
});
// std::cout << "["<<v<<"] checked on GPU"<<B[v]<<std::endl;
} else {
autoView(A_v,A[v],CpuRead);
thread_for(ss,FGrid->oSites(),{
assert(B[v]==A_v[ss]()()().getlane(0));
GRID_ASSERT(B[v]==A_v[ss]()()().getlane(0));
});
// std::cout << "["<<v<<"] checked on CPU"<<B[v]<<std::endl;
}

View File

@@ -11,13 +11,13 @@ template<class vobj> inline void sliceSumCPU(const Grid::Lattice<vobj> &Data,std
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_object::scalar_type scalar_type;
GridBase *grid = Data.Grid();
assert(grid!=NULL);
GRID_ASSERT(grid!=NULL);
const int Nd = grid->_ndimension;
const int Nsimd = grid->Nsimd();
assert(orthogdim >= 0);
assert(orthogdim < Nd);
GRID_ASSERT(orthogdim >= 0);
GRID_ASSERT(orthogdim < Nd);
int fd=grid->_fdimensions[orthogdim];
int ld=grid->_ldimensions[orthogdim];
@@ -134,7 +134,7 @@ int main (int argc, char ** argv) {
for(int t=0;t<reduction_reference.size();t++) {
auto diff = reduction_reference[t]-reduction_result[t];
assert(abs(TensorRemove(diff)) < 1e-8 );
GRID_ASSERT(abs(TensorRemove(diff)) < 1e-8 );
}
@@ -184,10 +184,10 @@ int main (int argc, char ** argv) {
for(int t=0;t<reduction_reference_cv.size();t++) {
auto diff = reduction_reference_cv[t]-reduction_result_cv[t];
assert(abs(diff()(0)()) < 1e-8 );
assert(abs(diff()(1)()) < 1e-8 );
assert(abs(diff()(2)()) < 1e-8 );
assert(abs(diff()(3)()) < 1e-8 );
GRID_ASSERT(abs(diff()(0)()) < 1e-8 );
GRID_ASSERT(abs(diff()(1)()) < 1e-8 );
GRID_ASSERT(abs(diff()(2)()) < 1e-8 );
GRID_ASSERT(abs(diff()(3)()) < 1e-8 );
}
@@ -238,18 +238,18 @@ int main (int argc, char ** argv) {
auto diff = reduction_reference_scv[t]-reduction_result_scv[t];
// std::cout << diff <<std::endl;
assert(abs(diff()(0)(0)) < 1e-8 );
assert(abs(diff()(0)(1)) < 1e-8 );
assert(abs(diff()(0)(2)) < 1e-8 );
assert(abs(diff()(1)(0)) < 1e-8 );
assert(abs(diff()(1)(1)) < 1e-8 );
assert(abs(diff()(1)(2)) < 1e-8 );
assert(abs(diff()(2)(0)) < 1e-8 );
assert(abs(diff()(2)(1)) < 1e-8 );
assert(abs(diff()(2)(2)) < 1e-8 );
assert(abs(diff()(3)(0)) < 1e-8 );
assert(abs(diff()(3)(1)) < 1e-8 );
assert(abs(diff()(3)(2)) < 1e-8 );
GRID_ASSERT(abs(diff()(0)(0)) < 1e-8 );
GRID_ASSERT(abs(diff()(0)(1)) < 1e-8 );
GRID_ASSERT(abs(diff()(0)(2)) < 1e-8 );
GRID_ASSERT(abs(diff()(1)(0)) < 1e-8 );
GRID_ASSERT(abs(diff()(1)(1)) < 1e-8 );
GRID_ASSERT(abs(diff()(1)(2)) < 1e-8 );
GRID_ASSERT(abs(diff()(2)(0)) < 1e-8 );
GRID_ASSERT(abs(diff()(2)(1)) < 1e-8 );
GRID_ASSERT(abs(diff()(2)(2)) < 1e-8 );
GRID_ASSERT(abs(diff()(3)(0)) < 1e-8 );
GRID_ASSERT(abs(diff()(3)(1)) < 1e-8 );
GRID_ASSERT(abs(diff()(3)(2)) < 1e-8 );
}
@@ -304,7 +304,7 @@ int main (int argc, char ** argv) {
for (int js = 0; js < Ns; js++) {
for (int ic = 0; ic < Nc; ic++) {
for (int jc = 0; jc < Nc; jc++) {
assert(abs(diff()(is,js)(ic,jc)) < 1e-8);
GRID_ASSERT(abs(diff()(is,js)(ic,jc)) < 1e-8);
}
}
}

View File

@@ -77,11 +77,11 @@ public:
ComplexD ref = B[v][p*PageWords];
std::cout << "Device compare "<<B[v][p*PageWords]<<std::endl;
accelerator_for(ss,1,1,{
assert(ref==A_v[p*PageWords]);
GRID_ASSERT(ref==A_v[p*PageWords]);
});
} else {
std::cout << "Host compare "<<B[v][p*PageWords]<<std::endl;
assert(B[v][p*PageWords]==A[v][p*PageWords]);
GRID_ASSERT(B[v][p*PageWords]==A[v][p*PageWords]);
}
}
}

View File

@@ -77,7 +77,7 @@ int main (int argc, char ** argv)
ns=ns+norm2(sl);
}
std::cout <<GridLogMessage <<" sliceNorm" <<mu<<" "<< nn <<" "<<ns<<" err " << nn-ns<<std::endl;
assert(abs(nn-ns) < 1.0e-10);
GRID_ASSERT(abs(nn-ns) < 1.0e-10);
}
}
@@ -105,7 +105,7 @@ int main (int argc, char ** argv)
ns=ns+norm2(sl);
}
std::cout <<GridLogMessage <<" sliceNorm" <<mu<<" "<< nn <<" "<<ns<<" err " << nn-ns<<std::endl;
assert(abs(nn-ns) < 1.0e-10);
GRID_ASSERT(abs(nn-ns) < 1.0e-10);
}
}
@@ -135,7 +135,7 @@ int main (int argc, char ** argv)
ns=ns+norm2(sl);
}
std::cout <<GridLogMessage <<" sliceNorm" <<mu<<" "<< nn <<" "<<ns<<" err " << nn-ns<<std::endl;
assert(abs(nn-ns) < 1.0e-10);
GRID_ASSERT(abs(nn-ns) < 1.0e-10);
}
}

View File

@@ -114,7 +114,7 @@ int main(int argc, char **argv)
err = ref - r_eo;
std::cout << GridLogMessage << "EO norm diff\t" << norm2(err) << " (" << norm2(ref) << " - " << norm2(r_eo) << ")" << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
@@ -129,7 +129,7 @@ int main(int argc, char **argv)
err = ref - r_eo;
std::cout << GridLogMessage << "EO norm diff compact\t" << norm2(err) << " (" << norm2(ref) << " - " << norm2(r_eo) << ")" << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
@@ -204,7 +204,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
Dwc_compact.Mooee(chi_e, src_e);
Dwc_compact.MooeeInv(src_e, phi_e);
@@ -217,7 +217,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test MeeDag MeeInvDag = 1 (if csw!=0) " << std::endl;
@@ -237,7 +237,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
Dwc_compact.MooeeDag(chi_e, src_e);
Dwc_compact.MooeeInvDag(src_e, phi_e);
@@ -250,7 +250,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test MeeInv MeeDag = 1 (if csw!=0) " << std::endl;
@@ -270,7 +270,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
Dwc_compact.MooeeDag(chi_e, src_e);
Dwc_compact.MooeeInv(src_e, phi_e);
@@ -283,7 +283,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "================================================================" << std::endl;
std::cout << GridLogMessage << "= Testing gauge covariance Clover term with EO preconditioning " << std::endl;
@@ -339,7 +339,7 @@ int main(int argc, char **argv)
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
@@ -368,7 +368,7 @@ int main(int argc, char **argv)
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "=================================================================" << std::endl;
std::cout << GridLogMessage << "= Testing gauge covariance Clover term w/o EO preconditioning " << std::endl;
@@ -389,10 +389,10 @@ int main(int argc, char **argv)
err = result - adj(Omega) * result2;
std::cout << GridLogMessage << "norm diff Wilson " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff WilsonClover " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
@@ -402,7 +402,7 @@ int main(int argc, char **argv)
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff CompactWilsonClover " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing Mooee(csw=0) Clover to reproduce Mooee Wilson " << std::endl;
@@ -432,7 +432,7 @@ int main(int argc, char **argv)
err = chi - phi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
@@ -458,7 +458,7 @@ int main(int argc, char **argv)
err = chi - phi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing EO operator is equal to the unprec " << std::endl;
@@ -493,7 +493,7 @@ int main(int argc, char **argv)
std::cout << GridLogMessage << "ref (unpreconditioned operator) diff : " << norm2(ref) << std::endl;
std::cout << GridLogMessage << "phi (EO decomposition) diff : " << norm2(phi) << std::endl;
std::cout << GridLogMessage << "norm diff : " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
@@ -524,7 +524,7 @@ int main(int argc, char **argv)
std::cout << GridLogMessage << "ref (unpreconditioned operator) diff compact : " << norm2(ref) << std::endl;
std::cout << GridLogMessage << "phi (EO decomposition) diff compact : " << norm2(phi) << std::endl;
std::cout << GridLogMessage << "norm diff compact : " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
Grid_finalize();
}

View File

@@ -198,8 +198,8 @@ void TestConserved(Action & Dw,
std::cout<<GridLogMessage<<"Vector Ward identity by timeslice (~ 0)"<<std::endl;
for(int t=0;t<Nt;t++){
std::cout<<GridLogMessage <<" t "<<t<<" SV "<<real(TensorRemove(sumSV[t]))<<" VV "<<real(TensorRemove(sumVV[t]))<<std::endl;
assert(abs(real(TensorRemove(sumSV[t]))) < 1e-10);
assert(abs(real(TensorRemove(sumVV[t]))) < 1e-2);
GRID_ASSERT(abs(real(TensorRemove(sumSV[t]))) < 1e-10);
GRID_ASSERT(abs(real(TensorRemove(sumVV[t]))) < 1e-2);
}
///////////////////////////////
@@ -245,9 +245,9 @@ void TestConserved(Action & Dw,
std::cout<<GridLogMessage << "Consistency check for sequential conserved " <<std::endl;
std::cout<<GridLogMessage << "Diff S = " << abs(check_S) << std::endl;
assert(abs(check_S) < 1e-8);
GRID_ASSERT(abs(check_S) < 1e-8);
std::cout<<GridLogMessage << "Diff V = " << abs(check_V) << std::endl;
assert(abs(check_V) < 1e-8);
GRID_ASSERT(abs(check_V) < 1e-8);
}
}

View File

@@ -114,7 +114,7 @@ int main(int argc, char **argv)
err = ref - r_eo;
std::cout << GridLogMessage << "EO norm diff\t" << norm2(err) << " (" << norm2(ref) << " - " << norm2(r_eo) << ")" << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
@@ -129,7 +129,7 @@ int main(int argc, char **argv)
err = ref - r_eo;
std::cout << GridLogMessage << "EO norm diff compact\t" << norm2(err) << " (" << norm2(ref) << " - " << norm2(r_eo) << ")" << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
@@ -204,7 +204,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
Dwc_compact.Mooee(chi_e, src_e);
Dwc_compact.MooeeInv(src_e, phi_e);
@@ -217,7 +217,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test MeeDag MeeInvDag = 1 (if csw!=0) " << std::endl;
@@ -237,7 +237,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
Dwc_compact.MooeeDag(chi_e, src_e);
Dwc_compact.MooeeInvDag(src_e, phi_e);
@@ -250,7 +250,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test MeeInv MeeDag = 1 (if csw!=0) " << std::endl;
@@ -270,7 +270,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
Dwc_compact.MooeeDag(chi_e, src_e);
Dwc_compact.MooeeInv(src_e, phi_e);
@@ -283,7 +283,7 @@ int main(int argc, char **argv)
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "================================================================" << std::endl;
std::cout << GridLogMessage << "= Testing gauge covariance Clover term with EO preconditioning " << std::endl;
@@ -339,7 +339,7 @@ int main(int argc, char **argv)
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
@@ -368,7 +368,7 @@ int main(int argc, char **argv)
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "=================================================================" << std::endl;
std::cout << GridLogMessage << "= Testing gauge covariance Clover term w/o EO preconditioning " << std::endl;
@@ -389,10 +389,10 @@ int main(int argc, char **argv)
err = result - adj(Omega) * result2;
std::cout << GridLogMessage << "norm diff Wilson " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff WilsonExpClover " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
@@ -402,7 +402,7 @@ int main(int argc, char **argv)
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff CompactWilsonExpClover " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing Mooee(csw=0) Clover to reproduce Mooee Wilson " << std::endl;
@@ -432,7 +432,7 @@ int main(int argc, char **argv)
err = chi - phi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
@@ -458,7 +458,7 @@ int main(int argc, char **argv)
err = chi - phi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing EO operator is equal to the unprec " << std::endl;
@@ -493,7 +493,7 @@ int main(int argc, char **argv)
std::cout << GridLogMessage << "ref (unpreconditioned operator) diff : " << norm2(ref) << std::endl;
std::cout << GridLogMessage << "phi (EO decomposition) diff : " << norm2(phi) << std::endl;
std::cout << GridLogMessage << "norm diff : " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
@@ -524,7 +524,7 @@ int main(int argc, char **argv)
std::cout << GridLogMessage << "ref (unpreconditioned operator) diff compact : " << norm2(ref) << std::endl;
std::cout << GridLogMessage << "phi (EO decomposition) diff compact : " << norm2(phi) << std::endl;
std::cout << GridLogMessage << "norm diff compact : " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
GRID_ASSERT(fabs(norm2(err)) < tolerance);
Grid_finalize();
}

View File

@@ -471,8 +471,8 @@ void TestConserved1(Action & Ddwf, Action & Ddwfrev,
// Mobius parameters
auto b=Ddwf.bs[s];
auto c=Ddwf.cs[s];
assert(Ddwfrev.bs[sr]==Ddwf.bs[s]);
assert(Ddwfrev.cs[sr]==Ddwf.cs[s]);
GRID_ASSERT(Ddwfrev.bs[sr]==Ddwf.bs[s]);
GRID_ASSERT(Ddwfrev.cs[sr]==Ddwf.cs[s]);
LatticePropagator tmp(UGrid);

View File

@@ -48,16 +48,16 @@ class HermOpAdaptor : public LinearOperatorBase<Field>
LinearOperatorBase<Field> & wrapped;
public:
HermOpAdaptor(LinearOperatorBase<Field> &wrapme) : wrapped(wrapme) {};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void OpDiag (const Field &in, Field &out) { GRID_ASSERT(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { GRID_ASSERT(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ GRID_ASSERT(0); };
void Op (const Field &in, Field &out){
wrapped.HermOp(in,out);
}
void AdjOp (const Field &in, Field &out){
wrapped.HermOp(in,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ GRID_ASSERT(0); }
void HermOp(const Field &in, Field &out){
wrapped.HermOp(in,out);
}
@@ -286,7 +286,7 @@ int main (int argc, char ** argv)
chi=chi-Aphi;
RealD diff =norm2(chi);
std::cout << r << " diff " << diff<<std::endl;
assert(diff < 1.0e-10);
GRID_ASSERT(diff < 1.0e-10);
}
std::cout << nrhs<< " mrhs " << t0/ncall/nrhs <<" us"<<std::endl;
std::cout << nrhs<< " srhs " << t1/ncall/nrhs <<" us"<<std::endl;

View File

@@ -43,10 +43,10 @@ public:
void Op (const Field &in, Field &out) { wrapped.HermOp(in,out); }
void HermOp(const Field &in, Field &out) { wrapped.HermOp(in,out); }
void AdjOp (const Field &in, Field &out){ wrapped.HermOp(in,out); }
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { assert(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void OpDiag (const Field &in, Field &out) { GRID_ASSERT(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { GRID_ASSERT(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { GRID_ASSERT(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ GRID_ASSERT(0); }
};
template<class Field> class CGSmoother : public LinearFunction<Field>
@@ -128,6 +128,10 @@ int main (int argc, char ** argv)
typedef HermOpAdaptor<LatticeFermionD> HermFineMatrix;
HermFineMatrix FineHermOp(HermOpEO);
LatticeFermionD src(FrbGrid);
src = ComplexD(1.0);
PowerMethod<LatticeFermionD> PM; PM(HermOpEO,src);
////////////////////////////////////////////////////////////
///////////// Coarse basis and Little Dirac Operator ///////
////////////////////////////////////////////////////////////
@@ -150,7 +154,7 @@ int main (int argc, char ** argv)
std::cout << "**************************************"<<std::endl;
std::cout << "Create Subspace"<<std::endl;
std::cout << "**************************************"<<std::endl;
Aggregates.CreateSubspaceChebyshevNew(RNG5,HermOpEO,95.);
Aggregates.CreateSubspaceChebyshev(RNG5,HermOpEO,nbasis,35.,0.01,500);// <== last run
std::cout << "**************************************"<<std::endl;
std::cout << "Refine Subspace"<<std::endl;
@@ -185,7 +189,7 @@ int main (int argc, char ** argv)
std::cout << "**************************************"<<std::endl;
typedef HermitianLinearOperator<MultiGeneralCoarsenedMatrix_t,CoarseVector> MrhsHermMatrix;
Chebyshev<CoarseVector> IRLCheby(0.05,40.0,101); // 1 iter
Chebyshev<CoarseVector> IRLCheby(0.01,16.0,201); // 1 iter
MrhsHermMatrix MrhsCoarseOp (mrhs);
CoarseVector pm_src(CoarseMrhs);
@@ -193,10 +197,10 @@ int main (int argc, char ** argv)
PowerMethod<CoarseVector> cPM;
cPM(MrhsCoarseOp,pm_src);
int Nk=nrhs;
int Nm=Nk*3;
// int Nk=36;
// int Nm=144;
// int Nk=16;
// int Nm=Nk*3;
int Nk=32;
int Nm=128;
int Nstop=Nk;
int Nconv_test_interval=1;
@@ -210,7 +214,7 @@ int main (int argc, char ** argv)
nrhs,
Nk,
Nm,
1e-4,10);
1e-4,100);
int Nconv;
std::vector<RealD> eval(Nm);
@@ -231,8 +235,6 @@ int main (int argc, char ** argv)
std::cout << "**************************************"<<std::endl;
std::cout << " Recompute coarse evecs "<<std::endl;
std::cout << "**************************************"<<std::endl;
evec.resize(Nm,Coarse5d);
eval.resize(Nm);
for(int r=0;r<nrhs;r++){
random(CRNG,c_src[r]);
}
@@ -243,7 +245,7 @@ int main (int argc, char ** argv)
// Deflation guesser object
///////////////////////
std::cout << "**************************************"<<std::endl;
std::cout << " Reimport coarse evecs "<<std::endl;
std::cout << " Reimport coarse evecs "<<evec.size()<<" "<<eval.size()<<std::endl;
std::cout << "**************************************"<<std::endl;
MultiRHSDeflation<CoarseVector> MrhsGuesser;
MrhsGuesser.ImportEigenBasis(evec,eval);
@@ -252,9 +254,11 @@ int main (int argc, char ** argv)
// Extra HDCG parameters
//////////////////////////
int maxit=3000;
ConjugateGradient<CoarseVector> CG(2.0e-1,maxit,false);
RealD lo=2.0;
int ord = 9;
// ConjugateGradient<CoarseVector> CG(2.0e-1,maxit,false);
// ConjugateGradient<CoarseVector> CG(1.0e-2,maxit,false);
ConjugateGradient<CoarseVector> CG(5.0e-2,maxit,false);
RealD lo=0.2;
int ord = 7;
DoNothingGuesser<CoarseVector> DoNothing;
HPDSolver<CoarseVector> HPDSolveMrhs(MrhsCoarseOp,CG,DoNothing);
@@ -300,6 +304,19 @@ int main (int argc, char ** argv)
ConjugateGradient<LatticeFermionD> CGfine(1.0e-8,30000,false);
CGfine(HermOpEO, src, result);
}
{
std::cout << "**************************************"<<std::endl;
std::cout << "Calling MdagM CG"<<std::endl;
std::cout << "**************************************"<<std::endl;
LatticeFermion result(FGrid); result=Zero();
LatticeFermion src(FGrid); random(RNG5,src);
result=Zero();
MdagMLinearOperator<MobiusFermionD, LatticeFermionD> HermOp(Ddwf);
ConjugateGradient<LatticeFermionD> CGfine(1.0e-8,30000,false);
CGfine(HermOp, src, result);
}
#endif
Grid_finalize();
return 0;

View File

@@ -40,7 +40,7 @@ void SaveOperator(Coarsened &Operator,std::string file)
#ifdef HAVE_LIME
emptyUserRecord record;
ScidacWriter WR(Operator.Grid()->IsBoss());
assert(Operator._A.size()==Operator.geom.npoint);
GRID_ASSERT(Operator._A.size()==Operator.geom.npoint);
WR.open(file);
for(int p=0;p<Operator._A.size();p++){
auto tmp = Operator.Cell.Extract(Operator._A[p]);
@@ -57,7 +57,7 @@ void LoadOperator(Coarsened &Operator,std::string file)
emptyUserRecord record;
Grid::ScidacReader RD ;
RD.open(file);
assert(Operator._A.size()==Operator.geom.npoint);
GRID_ASSERT(Operator._A.size()==Operator.geom.npoint);
for(int p=0;p<Operator.geom.npoint;p++){
conformable(Operator._A[p].Grid(),Operator.CoarseGrid());
// RD.readScidacFieldRecord(Operator._A[p],record,BINARYIO_LEXICOGRAPHIC);
@@ -74,7 +74,7 @@ void ReLoadOperator(Coarsened &Operator,std::string file)
emptyUserRecord record;
Grid::ScidacReader RD ;
RD.open(file);
assert(Operator._A.size()==Operator.geom.npoint);
GRID_ASSERT(Operator._A.size()==Operator.geom.npoint);
for(int p=0;p<Operator.geom.npoint;p++){
auto tmp=Operator.Cell.Extract(Operator._A[p]);
RD.readScidacFieldRecord(tmp,record,0);
@@ -126,10 +126,10 @@ public:
void Op (const Field &in, Field &out) { wrapped.HermOp(in,out); }
void HermOp(const Field &in, Field &out) { wrapped.HermOp(in,out); }
void AdjOp (const Field &in, Field &out){ wrapped.HermOp(in,out); }
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { assert(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void OpDiag (const Field &in, Field &out) { GRID_ASSERT(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { GRID_ASSERT(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { GRID_ASSERT(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ GRID_ASSERT(0); }
};
/*
template<class Field> class ChebyshevSmoother : public LinearFunction<Field>

View File

@@ -44,7 +44,7 @@ void SaveOperator(Coarsened &Operator,std::string file)
#ifdef HAVE_LIME
emptyUserRecord record;
ScidacWriter WR(Operator.Grid()->IsBoss());
assert(Operator._A.size()==Operator.geom.npoint);
GRID_ASSERT(Operator._A.size()==Operator.geom.npoint);
WR.open(file);
for(int p=0;p<Operator._A.size();p++){
auto tmp = Operator.Cell.Extract(Operator._A[p]);
@@ -61,7 +61,7 @@ void LoadOperator(Coarsened &Operator,std::string file)
emptyUserRecord record;
Grid::ScidacReader RD ;
RD.open(file);
assert(Operator._A.size()==Operator.geom.npoint);
GRID_ASSERT(Operator._A.size()==Operator.geom.npoint);
for(int p=0;p<Operator.geom.npoint;p++){
conformable(Operator._A[p].Grid(),Operator.CoarseGrid());
// RD.readScidacFieldRecord(Operator._A[p],record,BINARYIO_LEXICOGRAPHIC);
@@ -78,7 +78,7 @@ void ReLoadOperator(Coarsened &Operator,std::string file)
emptyUserRecord record;
Grid::ScidacReader RD ;
RD.open(file);
assert(Operator._A.size()==Operator.geom.npoint);
GRID_ASSERT(Operator._A.size()==Operator.geom.npoint);
for(int p=0;p<Operator.geom.npoint;p++){
auto tmp=Operator.Cell.Extract(Operator._A[p]);
RD.readScidacFieldRecord(tmp,record,0);
@@ -147,7 +147,7 @@ void LoadEigenvectors(std::vector<RealD> &eval,
Grid::ScidacReader RD ;
RD.open(evec_file);
assert(evec.size()==eval.size());
GRID_ASSERT(evec.size()==eval.size());
for(int k=0;k<eval.size();k++) {
RD.readScidacFieldRecord(evec[k],record);
}
@@ -165,10 +165,10 @@ public:
void Op (const Field &in, Field &out) { wrapped.HermOp(in,out); }
void HermOp(const Field &in, Field &out) { wrapped.HermOp(in,out); }
void AdjOp (const Field &in, Field &out){ wrapped.HermOp(in,out); }
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { assert(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void OpDiag (const Field &in, Field &out) { GRID_ASSERT(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { GRID_ASSERT(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { GRID_ASSERT(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ GRID_ASSERT(0); }
};
template<class Field> class CGSmoother : public LinearFunction<Field>

View File

@@ -148,7 +148,7 @@ void LoadEigenvectors(std::vector<RealD> &eval,
Grid::ScidacReader RD ;
RD.open(evec_file);
assert(evec.size()==eval.size());
GRID_ASSERT(evec.size()==eval.size());
for(int k=0;k<eval.size();k++) {
RD.readScidacFieldRecord(evec[k],record);
}
@@ -166,10 +166,10 @@ public:
void Op (const Field &in, Field &out) { wrapped.HermOp(in,out); }
void HermOp(const Field &in, Field &out) { wrapped.HermOp(in,out); }
void AdjOp (const Field &in, Field &out){ wrapped.HermOp(in,out); }
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { assert(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void OpDiag (const Field &in, Field &out) { GRID_ASSERT(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { GRID_ASSERT(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { GRID_ASSERT(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ GRID_ASSERT(0); }
};
template<class Field> class FixedCGPolynomial : public LinearFunction<Field>

View File

@@ -148,7 +148,7 @@ void LoadEigenvectors(std::vector<RealD> &eval,
Grid::ScidacReader RD ;
RD.open(evec_file);
assert(evec.size()==eval.size());
GRID_ASSERT(evec.size()==eval.size());
for(int k=0;k<eval.size();k++) {
RD.readScidacFieldRecord(evec[k],record);
}
@@ -166,10 +166,10 @@ public:
void Op (const Field &in, Field &out) { wrapped.HermOp(in,out); }
void HermOp(const Field &in, Field &out) { wrapped.HermOp(in,out); }
void AdjOp (const Field &in, Field &out){ wrapped.HermOp(in,out); }
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { assert(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void OpDiag (const Field &in, Field &out) { GRID_ASSERT(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { GRID_ASSERT(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { GRID_ASSERT(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ GRID_ASSERT(0); }
};
template<class Field> class FixedCGPolynomial : public LinearFunction<Field>

View File

@@ -119,7 +119,7 @@ void LoadEigenvectors(std::vector<RealD> &eval,
Grid::ScidacReader RD ;
RD.open(evec_file);
assert(evec.size()==eval.size());
GRID_ASSERT(evec.size()==eval.size());
for(int k=0;k<eval.size();k++) {
RD.readScidacFieldRecord(evec[k],record);
}
@@ -137,10 +137,10 @@ public:
void Op (const Field &in, Field &out) { wrapped.HermOp(in,out); }
void HermOp(const Field &in, Field &out) { wrapped.HermOp(in,out); }
void AdjOp (const Field &in, Field &out){ wrapped.HermOp(in,out); }
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { assert(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void OpDiag (const Field &in, Field &out) { GRID_ASSERT(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { GRID_ASSERT(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { GRID_ASSERT(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ GRID_ASSERT(0); }
};
template<class Field> class CGSmoother : public LinearFunction<Field>

View File

@@ -92,7 +92,7 @@ void LoadEigenvectors(std::vector<RealD> &eval,
Grid::ScidacReader RD ;
RD.open(evec_file);
assert(evec.size()==eval.size());
GRID_ASSERT(evec.size()==eval.size());
for(int k=0;k<eval.size();k++) {
RD.readScidacFieldRecord(evec[k],record);
}
@@ -110,10 +110,10 @@ public:
void Op (const Field &in, Field &out) { wrapped.HermOp(in,out); }
void HermOp(const Field &in, Field &out) { wrapped.HermOp(in,out); }
void AdjOp (const Field &in, Field &out){ wrapped.HermOp(in,out); }
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { assert(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void OpDiag (const Field &in, Field &out) { GRID_ASSERT(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { GRID_ASSERT(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { GRID_ASSERT(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ GRID_ASSERT(0); }
};
template<class Field> class CGSmoother : public LinearFunction<Field>

View File

@@ -92,7 +92,7 @@ void LoadEigenvectors(std::vector<RealD> &eval,
Grid::ScidacReader RD ;
RD.open(evec_file);
assert(evec.size()==eval.size());
GRID_ASSERT(evec.size()==eval.size());
for(int k=0;k<eval.size();k++) {
RD.readScidacFieldRecord(evec[k],record);
}
@@ -110,10 +110,10 @@ public:
void Op (const Field &in, Field &out) { wrapped.HermOp(in,out); }
void HermOp(const Field &in, Field &out) { wrapped.HermOp(in,out); }
void AdjOp (const Field &in, Field &out){ wrapped.HermOp(in,out); }
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { assert(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void OpDiag (const Field &in, Field &out) { GRID_ASSERT(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { GRID_ASSERT(0); }
void OpDirAll (const Field &in, std::vector<Field> &out) { GRID_ASSERT(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ GRID_ASSERT(0); }
};
template<class Field> class CGSmoother : public LinearFunction<Field>

View File

@@ -43,9 +43,9 @@ class PVdagMLinearOperator : public LinearOperatorBase<Field> {
public:
PVdagMLinearOperator(Matrix &Mat,Matrix &PV): _Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void OpDiag (const Field &in, Field &out) { GRID_ASSERT(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { GRID_ASSERT(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ GRID_ASSERT(0); };
void Op (const Field &in, Field &out){
// std::cout << "Op: PVdag M "<<std::endl;
Field tmp(in.Grid());
@@ -58,7 +58,7 @@ public:
_PV.M(in,tmp);
_Mat.Mdag(tmp,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ GRID_ASSERT(0); }
void HermOp(const Field &in, Field &out){
// std::cout << "HermOp: Mdag PV PVdag M"<<std::endl;
Field tmp(in.Grid());
@@ -79,9 +79,9 @@ class ShiftedPVdagMLinearOperator : public LinearOperatorBase<Field> {
public:
ShiftedPVdagMLinearOperator(RealD _shift,Matrix &Mat,Matrix &PV): shift(_shift),_Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void OpDiag (const Field &in, Field &out) { GRID_ASSERT(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { GRID_ASSERT(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ GRID_ASSERT(0); };
void Op (const Field &in, Field &out){
// std::cout << "Op: PVdag M "<<std::endl;
Field tmp(in.Grid());
@@ -96,7 +96,7 @@ public:
_Mat.Mdag(in,tmp);
out = out + shift * in;
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ GRID_ASSERT(0); }
void HermOp(const Field &in, Field &out){
// std::cout << "HermOp: Mdag PV PVdag M"<<std::endl;
Field tmp(in.Grid());
@@ -368,7 +368,10 @@ int main (int argc, char ** argv)
TrivialPrecon<CoarseVector> simple;
NonHermitianLinearOperator<LittleDiracOperator,CoarseVector> LinOpCoarse(LittleDiracOpPV);
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-4, 100, LinOpCoarse,simple,10,10);
PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(3.0e-2, 100, LinOpCoarse,simple,10,10);
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(3.0e-2, 100, LinOpCoarse,simple,12,12); // 35 outer
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(5.0e-2, 100, LinOpCoarse,simple,12,12); // 36 outer, 12s
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-1, 100, LinOpCoarse,simple,12,12); // 36 ; 11s
PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(3.0e-1, 100, LinOpCoarse,simple,12,12);
L2PGCR.Level(3);
c_res=Zero();
L2PGCR(c_src,c_res);
@@ -400,7 +403,7 @@ int main (int argc, char ** argv)
LinOpCoarse,
L2PGCR);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermion> L1PGCR(1.0e-8,1000,PVdagM,TwoLevelPrecon,16,16);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermion> L1PGCR(1.0e-8,100,PVdagM,TwoLevelPrecon,10,10);
L1PGCR.Level(1);
f_res=Zero();

View File

@@ -0,0 +1,493 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_padded_cell.cc
Copyright (C) 2023
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidualNonHermitian.h>
#include <Grid/algorithms/iterative/BiCGSTAB.h>
using namespace std;
using namespace Grid;
template<class Matrix,class Field>
class PVdagMLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
Matrix &_PV;
public:
PVdagMLinearOperator(Matrix &Mat,Matrix &PV): _Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void Op (const Field &in, Field &out){
// std::cout << GridLogMessage<< "Op: PVdag M "<<std::endl;
Field tmp(in.Grid());
_Mat.M(in,tmp);
_PV.Mdag(tmp,out);
}
void AdjOp (const Field &in, Field &out){
// std::cout << GridLogMessage<<"AdjOp: Mdag PV "<<std::endl;
Field tmp(in.Grid());
_PV.M(in,tmp);
_Mat.Mdag(tmp,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
assert(0);
}
void HermOp(const Field &in, Field &out){
// std::cout <<GridLogMessage<< "HermOp: Mdag PV PVdag M"<<std::endl;
Field tmp(in.Grid());
Op(in,tmp);
AdjOp(tmp,out);
// std::cout << "HermOp done "<<norm2(out)<<std::endl;
}
};
template<class Matrix,class Field>
class MdagPVLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
Matrix &_PV;
public:
MdagPVLinearOperator(Matrix &Mat,Matrix &PV): _Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void Op (const Field &in, Field &out){
Field tmp(in.Grid());
// std::cout <<GridLogMessage<< "Op: PVdag M "<<std::endl;
_PV.M(in,tmp);
_Mat.Mdag(tmp,out);
}
void AdjOp (const Field &in, Field &out){
// std::cout <<GridLogMessage<< "AdjOp: Mdag PV "<<std::endl;
Field tmp(in.Grid());
_Mat.M(in,tmp);
_PV.Mdag(tmp,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
assert(0);
}
void HermOp(const Field &in, Field &out){
// std::cout << GridLogMessage<<"HermOp: PVdag M Mdag PV "<<std::endl;
Field tmp(in.Grid());
Op(in,tmp);
AdjOp(tmp,out);
// std::cout << "HermOp done "<<norm2(out)<<std::endl;
}
};
template<class Matrix,class Field>
class ShiftedPVdagMLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
Matrix &_PV;
RealD shift;
public:
ShiftedPVdagMLinearOperator(RealD _shift,Matrix &Mat,Matrix &PV): shift(_shift),_Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void Op (const Field &in, Field &out){
// std::cout << "Op: PVdag M "<<std::endl;
Field tmp(in.Grid());
_Mat.M(in,tmp);
_PV.Mdag(tmp,out);
out = out + shift * in;
}
void AdjOp (const Field &in, Field &out){
// std::cout << "AdjOp: Mdag PV "<<std::endl;
Field tmp(in.Grid());
_PV.M(tmp,out);
_Mat.Mdag(in,tmp);
out = out + shift * in;
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void HermOp(const Field &in, Field &out){
// std::cout << "HermOp: Mdag PV PVdag M"<<std::endl;
Field tmp(in.Grid());
Op(in,tmp);
AdjOp(tmp,out);
}
};
template<class Fobj,class CComplex,int nbasis>
class MGPreconditionerSVD : public LinearFunction< Lattice<Fobj> > {
public:
using LinearFunction<Lattice<Fobj> >::operator();
typedef Aggregation<Fobj,CComplex,nbasis> Aggregates;
typedef typename Aggregation<Fobj,CComplex,nbasis>::FineField FineField;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseMatrix CoarseMatrix;
typedef LinearOperatorBase<FineField> FineOperator;
typedef LinearFunction <FineField> FineSmoother;
typedef LinearOperatorBase<CoarseVector> CoarseOperator;
typedef LinearFunction <CoarseVector> CoarseSolver;
Aggregates & _FineToCoarse;
Aggregates & _CoarseToFine;
FineOperator & _FineOperator;
FineSmoother & _PreSmoother;
FineSmoother & _PostSmoother;
CoarseOperator & _CoarseOperator;
CoarseSolver & _CoarseSolve;
int level; void Level(int lv) {level = lv; };
MGPreconditionerSVD(Aggregates &FtoC,
Aggregates &CtoF,
FineOperator &Fine,
FineSmoother &PreSmoother,
FineSmoother &PostSmoother,
CoarseOperator &CoarseOperator_,
CoarseSolver &CoarseSolve_)
: _FineToCoarse(FtoC),
_CoarseToFine(CtoF),
_FineOperator(Fine),
_PreSmoother(PreSmoother),
_PostSmoother(PostSmoother),
_CoarseOperator(CoarseOperator_),
_CoarseSolve(CoarseSolve_),
level(1) { }
virtual void operator()(const FineField &in, FineField & out)
{
GridBase *CoarseGrid = _FineToCoarse.CoarseGrid;
// auto CoarseGrid = _CoarseOperator.Grid();
CoarseVector Csrc(CoarseGrid);
CoarseVector Csol(CoarseGrid);
FineField vec1(in.Grid());
FineField vec2(in.Grid());
std::cout<<GridLogMessage << "Calling PreSmoother " <<std::endl;
// std::cout<<GridLogMessage << "Calling PreSmoother input residual "<<norm2(in) <<std::endl;
double t;
// Fine Smoother
// out = in;
out = Zero();
t=-usecond();
_PreSmoother(in,out);
t+=usecond();
std::cout<<GridLogMessage << "PreSmoother took "<< t/1000.0<< "ms" <<std::endl;
// Update the residual
_FineOperator.Op(out,vec1); sub(vec1, in ,vec1);
// std::cout<<GridLogMessage <<"Residual-1 now " <<norm2(vec1)<<std::endl;
// Fine to Coarse
t=-usecond();
_FineToCoarse.ProjectToSubspace (Csrc,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Project to coarse took "<< t/1000.0<< "ms" <<std::endl;
// Coarse correction
t=-usecond();
Csol = Zero();
_CoarseSolve(Csrc,Csol);
//Csol=Zero();
t+=usecond();
std::cout<<GridLogMessage << "Coarse solve took "<< t/1000.0<< "ms" <<std::endl;
// Coarse to Fine
t=-usecond();
// _CoarseOperator.PromoteFromSubspace(_Aggregates,Csol,vec1);
_CoarseToFine.PromoteFromSubspace(Csol,vec1);
add(out,out,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Promote to this level took "<< t/1000.0<< "ms" <<std::endl;
// Residual
_FineOperator.Op(out,vec1); sub(vec1 ,in , vec1);
// std::cout<<GridLogMessage <<"Residual-2 now " <<norm2(vec1)<<std::endl;
// Fine Smoother
t=-usecond();
// vec2=vec1;
vec2=Zero();
_PostSmoother(vec1,vec2);
t+=usecond();
std::cout<<GridLogMessage << "PostSmoother took "<< t/1000.0<< "ms" <<std::endl;
add( out,out,vec2);
std::cout<<GridLogMessage << "Done " <<std::endl;
}
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=16;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
// Construct a coarsened grid
Coordinate clatt = GridDefaultLatt();
for(int d=0;d<clatt.size();d++){
clatt[d] = clatt[d]/2;
// clatt[d] = clatt[d]/4;
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds);
LatticeFermion src(FGrid); random(RNG5,src);
LatticeFermion result(FGrid); result=Zero();
LatticeFermion ref(FGrid); ref=Zero();
LatticeFermion tmp(FGrid);
LatticeFermion err(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
std::string file("ckpoint_lat.4000");
NerscIO::readConfiguration(Umu,header,file);
RealD mass=0.01;
RealD M5=1.8;
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionD Dpv(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,1.0,M5);
const int nbasis = 30;
const int cb = 0 ;
NextToNearestStencilGeometry5D geom(Coarse5d);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
typedef PVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> PVdagM_t;
typedef MdagPVLinearOperator<DomainWallFermionD,LatticeFermionD> MdagPV_t;
typedef ShiftedPVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> ShiftedPVdagM_t;
PVdagM_t PVdagM(Ddwf,Dpv);
MdagPV_t MdagPV(Ddwf,Dpv);
// ShiftedPVdagM_t ShiftedPVdagM(2.0,Ddwf,Dpv); // 355
// ShiftedPVdagM_t ShiftedPVdagM(1.0,Ddwf,Dpv); // 246
// ShiftedPVdagM_t ShiftedPVdagM(0.5,Ddwf,Dpv); // 183
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 145
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 134
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 127 -- NULL space via inverse iteration
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 57 -- NULL space via inverse iteration; 3 iterations
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 57 , tighter inversion
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // nbasis 20 -- 49 iters
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // nbasis 20 -- 70 iters; asymmetric
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 58; Loosen coarse, tighten fine
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 56 ...
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 51 ... with 24 vecs
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 31 ... with 24 vecs and 2^4 blocking
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 43 ... with 16 vecs and 2^4 blocking, sloppier
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 35 ... with 20 vecs and 2^4 blocking
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 35 ... with 20 vecs and 2^4 blocking, looser coarse
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 64 ... with 20 vecs, Christoph setup, and 2^4 blocking, looser coarse
ShiftedPVdagM_t ShiftedPVdagM(0.01,Ddwf,Dpv); //
// Run power method on HOA??
PowerMethod<LatticeFermion> PM;
// PM(PVdagM,src);
// PM(MdagPV,src);
// Warning: This routine calls PVdagM.Op, not PVdagM.HermOp
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
Subspace V(Coarse5d,FGrid,cb);
Subspace U(Coarse5d,FGrid,cb);
// Breeds right singular vectors with call to HermOp (V)
V.CreateSubspaceChebyshev(RNG5,PVdagM,
nbasis,
4000.0,0.003,
500);
// Breeds left singular vectors with call to HermOp (U)
// U.CreateSubspaceChebyshev(RNG5,PVdagM,
U.CreateSubspaceChebyshev(RNG5,MdagPV,
nbasis,
4000.0,0.003,
500);
typedef Aggregation<vSpinColourVector,vTComplex,2*nbasis> CombinedSubspace;
CombinedSubspace CombinedUV(Coarse5d,FGrid,cb);
for(int b=0;b<nbasis;b++){
CombinedUV.subspace[b] = V.subspace[b];
CombinedUV.subspace[b+nbasis] = U.subspace[b];
}
int bl, br;
std::cout <<" <V| PVdagM| V> " <<std::endl;
for(bl=0;bl<nbasis;bl++){
for(br=0;br<nbasis;br++){
PVdagM.Op(V.subspace[br],src);
std::cout <<bl<<" "<<br<<"\t"<<innerProduct(V.subspace[bl],src)<<std::endl;
}}
std::cout <<" <V| PVdagM| U> " <<std::endl;
for(bl=0;bl<nbasis;bl++){
for(br=0;br<nbasis;br++){
PVdagM.Op(U.subspace[br],src);
std::cout <<bl<<" "<<br<<"\t"<<innerProduct(V.subspace[bl],src)<<std::endl;
}}
std::cout <<" <U| PVdagM| V> " <<std::endl;
for(bl=0;bl<nbasis;bl++){
for(br=0;br<nbasis;br++){
PVdagM.Op(V.subspace[br],src);
std::cout <<bl<<" "<<br<<"\t"<<innerProduct(U.subspace[bl],src)<<std::endl;
}}
std::cout <<" <U| PVdagM| U> " <<std::endl;
for(bl=0;bl<nbasis;bl++){
for(br=0;br<nbasis;br++){
PVdagM.Op(U.subspace[br],src);
std::cout <<bl<<" "<<br<<"\t"<<innerProduct(U.subspace[bl],src)<<std::endl;
}}
typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperatorV;
typedef LittleDiracOperatorV::CoarseVector CoarseVectorV;
typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,2*nbasis> LittleDiracOperator;
typedef LittleDiracOperator::CoarseVector CoarseVector;
V.Orthogonalise();
for(int b =0 ; b<nbasis;b++){
CoarseVectorV c_src (Coarse5d);
V.ProjectToSubspace (c_src,U.subspace[b]);
V.PromoteFromSubspace(c_src,src);
std::cout << " Completeness of U in V ["<< b<<"] "<< std::sqrt(norm2(src)/norm2(U.subspace[b]))<<std::endl;
}
CoarseVector c_src (Coarse5d);
CoarseVector c_res (Coarse5d);
CoarseVector c_proj(Coarse5d);
LittleDiracOperator LittleDiracOpPV(geom,FGrid,Coarse5d);
LittleDiracOpPV.CoarsenOperator(PVdagM,CombinedUV,CombinedUV);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"Testing coarsened operator "<<std::endl;
Complex one(1.0);
c_src = one; // 1 in every element for vector 1.
blockPromote(c_src,err,CombinedUV.subspace);
LatticeFermion prom(FGrid);
prom=Zero();
for(int b=0;b<nbasis*2;b++){
prom=prom+CombinedUV.subspace[b];
}
std::cout<<GridLogMessage<<"c_src "<<norm2(c_src)<<std::endl;
std::cout<<GridLogMessage<<"prom "<<norm2(prom)<<std::endl;
PVdagM.Op(prom,tmp);
blockProject(c_proj,tmp,CombinedUV.subspace);
std::cout<<GridLogMessage<<" Called Big Dirac Op "<<norm2(tmp)<<std::endl;
LittleDiracOpPV.M(c_src,c_res);
std::cout<<GridLogMessage<<" Called Little Dirac Op c_src "<< norm2(c_src) << " c_res "<< norm2(c_res) <<std::endl;
std::cout<<GridLogMessage<<"Little dop : "<<norm2(c_res)<<std::endl;
std::cout<<GridLogMessage<<"Big dop in subspace : "<<norm2(c_proj)<<std::endl;
c_proj = c_proj - c_res;
std::cout<<GridLogMessage<<" ldop error: "<<norm2(c_proj)<<std::endl;
/**********
* Some solvers
**********
*/
///////////////////////////////////////
// Coarse grid solver test
///////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Coarse Grid Solve -- Level 3 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<CoarseVector> simple;
NonHermitianLinearOperator<LittleDiracOperator,CoarseVector> LinOpCoarse(LittleDiracOpPV);
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-4, 100, LinOpCoarse,simple,10,10);
PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-2, 10, LinOpCoarse,simple,20,20);
L2PGCR.Level(3);
c_res=Zero();
L2PGCR(c_src,c_res);
////////////////////////////////////////
// Fine grid smoother
////////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Fine Grid Smoother -- Level 2 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<LatticeFermionD> simple_fine;
// NonHermitianLinearOperator<PVdagM_t,LatticeFermionD> LinOpSmooth(PVdagM);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermionD> SmootherGCR(0.01,1,ShiftedPVdagM,simple_fine,16,16);
SmootherGCR.Level(2);
LatticeFermionD f_src(FGrid);
LatticeFermionD f_res(FGrid);
f_src = one; // 1 in every element for vector 1.
f_res=Zero();
SmootherGCR(f_src,f_res);
typedef MGPreconditionerSVD<vSpinColourVector, vTComplex,nbasis*2> TwoLevelMG;
TwoLevelMG TwoLevelPrecon(CombinedUV,CombinedUV,
PVdagM,
simple_fine,
SmootherGCR,
LinOpCoarse,
L2PGCR);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermion> L1PGCR(1.0e-8,1000,PVdagM,TwoLevelPrecon,20,20);
L1PGCR.Level(1);
f_res=Zero();
L1PGCR(f_src,f_res);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
Grid_finalize();
return 0;
}

View File

@@ -0,0 +1,492 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_padded_cell.cc
Copyright (C) 2023
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidualNonHermitian.h>
#include <Grid/algorithms/iterative/BiCGSTAB.h>
using namespace std;
using namespace Grid;
template<class Matrix,class Field>
class PVdagMLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
Matrix &_PV;
public:
PVdagMLinearOperator(Matrix &Mat,Matrix &PV): _Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void Op (const Field &in, Field &out){
// std::cout << GridLogMessage<< "Op: PVdag M "<<std::endl;
Field tmp(in.Grid());
_Mat.M(in,tmp);
_PV.Mdag(tmp,out);
}
void AdjOp (const Field &in, Field &out){
// std::cout << GridLogMessage<<"AdjOp: Mdag PV "<<std::endl;
Field tmp(in.Grid());
_PV.M(in,tmp);
_Mat.Mdag(tmp,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
HermOp(in,out);
ComplexD dot = innerProduct(in,out);
n1=real(dot);
n2=norm2(out);
}
void HermOp(const Field &in, Field &out){
// std::cout <<GridLogMessage<< "HermOp: Mdag PV PVdag M"<<std::endl;
Field tmp(in.Grid());
Op(in,tmp);
AdjOp(tmp,out);
// std::cout << "HermOp done "<<norm2(out)<<std::endl;
}
};
template<class Matrix,class Field>
class MdagPVLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
Matrix &_PV;
public:
MdagPVLinearOperator(Matrix &Mat,Matrix &PV): _Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void Op (const Field &in, Field &out){
Field tmp(in.Grid());
// std::cout <<GridLogMessage<< "Op: PVdag M "<<std::endl;
_PV.M(in,tmp);
_Mat.Mdag(tmp,out);
}
void AdjOp (const Field &in, Field &out){
// std::cout <<GridLogMessage<< "AdjOp: Mdag PV "<<std::endl;
Field tmp(in.Grid());
_Mat.M(in,tmp);
_PV.Mdag(tmp,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
ComplexD dot = innerProduct(in,out);
n1=real(dot);
n2=norm2(out);
}
void HermOp(const Field &in, Field &out){
// std::cout << GridLogMessage<<"HermOp: PVdag M Mdag PV "<<std::endl;
Field tmp(in.Grid());
Op(in,tmp);
AdjOp(tmp,out);
// std::cout << "HermOp done "<<norm2(out)<<std::endl;
}
};
template<class Matrix,class Field>
class ShiftedPVdagMLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
Matrix &_PV;
RealD shift;
public:
ShiftedPVdagMLinearOperator(RealD _shift,Matrix &Mat,Matrix &PV): shift(_shift),_Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void Op (const Field &in, Field &out){
// std::cout << "Op: PVdag M "<<std::endl;
Field tmp(in.Grid());
_Mat.M(in,tmp);
_PV.Mdag(tmp,out);
out = out + shift * in;
}
void AdjOp (const Field &in, Field &out){
// std::cout << "AdjOp: Mdag PV "<<std::endl;
Field tmp(in.Grid());
_PV.M(tmp,out);
_Mat.Mdag(in,tmp);
out = out + shift * in;
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void HermOp(const Field &in, Field &out){
// std::cout << "HermOp: Mdag PV PVdag M"<<std::endl;
Field tmp(in.Grid());
Op(in,tmp);
AdjOp(tmp,out);
}
};
template<class Fobj,class CComplex,int nbasis>
class MGPreconditionerSVD : public LinearFunction< Lattice<Fobj> > {
public:
using LinearFunction<Lattice<Fobj> >::operator();
typedef Aggregation<Fobj,CComplex,nbasis> Aggregates;
typedef typename Aggregation<Fobj,CComplex,nbasis>::FineField FineField;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseMatrix CoarseMatrix;
typedef LinearOperatorBase<FineField> FineOperator;
typedef LinearFunction <FineField> FineSmoother;
typedef LinearOperatorBase<CoarseVector> CoarseOperator;
typedef LinearFunction <CoarseVector> CoarseSolver;
Aggregates & _FineToCoarse;
Aggregates & _CoarseToFine;
FineOperator & _FineOperator;
FineSmoother & _PreSmoother;
FineSmoother & _PostSmoother;
CoarseOperator & _CoarseOperator;
CoarseSolver & _CoarseSolve;
int level; void Level(int lv) {level = lv; };
MGPreconditionerSVD(Aggregates &FtoC,
Aggregates &CtoF,
FineOperator &Fine,
FineSmoother &PreSmoother,
FineSmoother &PostSmoother,
CoarseOperator &CoarseOperator_,
CoarseSolver &CoarseSolve_)
: _FineToCoarse(FtoC),
_CoarseToFine(CtoF),
_FineOperator(Fine),
_PreSmoother(PreSmoother),
_PostSmoother(PostSmoother),
_CoarseOperator(CoarseOperator_),
_CoarseSolve(CoarseSolve_),
level(1) { }
virtual void operator()(const FineField &in, FineField & out)
{
GridBase *CoarseGrid = _FineToCoarse.CoarseGrid;
// auto CoarseGrid = _CoarseOperator.Grid();
CoarseVector Csrc(CoarseGrid);
CoarseVector Csol(CoarseGrid);
FineField vec1(in.Grid());
FineField vec2(in.Grid());
std::cout<<GridLogMessage << "Calling PreSmoother " <<std::endl;
// std::cout<<GridLogMessage << "Calling PreSmoother input residual "<<norm2(in) <<std::endl;
double t;
// Fine Smoother
// out = in;
out = Zero();
t=-usecond();
_PreSmoother(in,out);
t+=usecond();
std::cout<<GridLogMessage << "PreSmoother took "<< t/1000.0<< "ms" <<std::endl;
// Update the residual
_FineOperator.Op(out,vec1); sub(vec1, in ,vec1);
// std::cout<<GridLogMessage <<"Residual-1 now " <<norm2(vec1)<<std::endl;
// Fine to Coarse
t=-usecond();
_FineToCoarse.ProjectToSubspace (Csrc,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Project to coarse took "<< t/1000.0<< "ms" <<std::endl;
// Coarse correction
t=-usecond();
Csol = Zero();
_CoarseSolve(Csrc,Csol);
//Csol=Zero();
t+=usecond();
std::cout<<GridLogMessage << "Coarse solve took "<< t/1000.0<< "ms" <<std::endl;
// Coarse to Fine
t=-usecond();
// _CoarseOperator.PromoteFromSubspace(_Aggregates,Csol,vec1);
_CoarseToFine.PromoteFromSubspace(Csol,vec1);
add(out,out,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Promote to this level took "<< t/1000.0<< "ms" <<std::endl;
// Residual
_FineOperator.Op(out,vec1); sub(vec1 ,in , vec1);
// std::cout<<GridLogMessage <<"Residual-2 now " <<norm2(vec1)<<std::endl;
// Fine Smoother
t=-usecond();
// vec2=vec1;
vec2=Zero();
_PostSmoother(vec1,vec2);
t+=usecond();
std::cout<<GridLogMessage << "PostSmoother took "<< t/1000.0<< "ms" <<std::endl;
add( out,out,vec2);
std::cout<<GridLogMessage << "Done " <<std::endl;
}
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=16;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
// Construct a coarsened grid
Coordinate clatt = GridDefaultLatt();
for(int d=0;d<clatt.size();d++){
clatt[d] = clatt[d]/2;
// clatt[d] = clatt[d]/4;
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds);
LatticeFermion src(FGrid); random(RNG5,src);
LatticeFermion result(FGrid); result=Zero();
LatticeFermion ref(FGrid); ref=Zero();
LatticeFermion tmp(FGrid);
LatticeFermion err(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
std::string file("ckpoint_lat.4000");
NerscIO::readConfiguration(Umu,header,file);
RealD mass=0.01;
RealD M5=1.8;
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionD Dpv(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,1.0,M5);
const int nbasis = 20;
const int cb = 0 ;
NextToNearestStencilGeometry5D geom(Coarse5d);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
typedef PVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> PVdagM_t;
typedef MdagPVLinearOperator<DomainWallFermionD,LatticeFermionD> MdagPV_t;
typedef ShiftedPVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> ShiftedPVdagM_t;
PVdagM_t PVdagM(Ddwf,Dpv);
MdagPV_t MdagPV(Ddwf,Dpv);
// ShiftedPVdagM_t ShiftedPVdagM(2.0,Ddwf,Dpv); // 355
// ShiftedPVdagM_t ShiftedPVdagM(1.0,Ddwf,Dpv); // 246
// ShiftedPVdagM_t ShiftedPVdagM(0.5,Ddwf,Dpv); // 183
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 145
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 134
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 127 -- NULL space via inverse iteration
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 57 -- NULL space via inverse iteration; 3 iterations
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 57 , tighter inversion
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // nbasis 20 -- 49 iters
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // nbasis 20 -- 70 iters; asymmetric
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 58; Loosen coarse, tighten fine
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 56 ...
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 51 ... with 24 vecs
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 31 ... with 24 vecs and 2^4 blocking
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 43 ... with 16 vecs and 2^4 blocking, sloppier
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 35 ... with 20 vecs and 2^4 blocking
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 35 ... with 20 vecs and 2^4 blocking, looser coarse
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 64 ... with 20 vecs, Christoph setup, and 2^4 blocking, looser coarse
ShiftedPVdagM_t ShiftedPVdagM(0.01,Ddwf,Dpv); //
// Run power method on HOA??
PowerMethod<LatticeFermion> PM;
// PM(PVdagM,src);
// PM(MdagPV,src);
// Warning: This routine calls PVdagM.Op, not PVdagM.HermOp
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
Subspace V(Coarse5d,FGrid,cb);
Subspace U(Coarse5d,FGrid,cb);
// Breeds right singular vectors with call to HermOp (V)
V.CreateSubspace(RNG5,PVdagM,nbasis);
// Breeds left singular vectors with call to HermOp (U)
// U.CreateSubspaceChebyshev(RNG5,MdagPV,
U.CreateSubspace(RNG5,PVdagM,nbasis);
typedef Aggregation<vSpinColourVector,vTComplex,2*nbasis> CombinedSubspace;
CombinedSubspace CombinedUV(Coarse5d,FGrid,cb);
for(int b=0;b<nbasis;b++){
CombinedUV.subspace[b] = V.subspace[b];
CombinedUV.subspace[b+nbasis] = U.subspace[b];
}
int bl, br;
std::cout <<" <V| PVdagM| V> " <<std::endl;
for(bl=0;bl<nbasis;bl++){
for(br=0;br<nbasis;br++){
PVdagM.Op(V.subspace[br],src);
std::cout <<bl<<" "<<br<<"\t"<<innerProduct(V.subspace[bl],src)<<std::endl;
}}
std::cout <<" <V| PVdagM| U> " <<std::endl;
for(bl=0;bl<nbasis;bl++){
for(br=0;br<nbasis;br++){
PVdagM.Op(U.subspace[br],src);
std::cout <<bl<<" "<<br<<"\t"<<innerProduct(V.subspace[bl],src)<<std::endl;
}}
std::cout <<" <U| PVdagM| V> " <<std::endl;
for(bl=0;bl<nbasis;bl++){
for(br=0;br<nbasis;br++){
PVdagM.Op(V.subspace[br],src);
std::cout <<bl<<" "<<br<<"\t"<<innerProduct(U.subspace[bl],src)<<std::endl;
}}
std::cout <<" <U| PVdagM| U> " <<std::endl;
for(bl=0;bl<nbasis;bl++){
for(br=0;br<nbasis;br++){
PVdagM.Op(U.subspace[br],src);
std::cout <<bl<<" "<<br<<"\t"<<innerProduct(U.subspace[bl],src)<<std::endl;
}}
typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperatorV;
typedef LittleDiracOperatorV::CoarseVector CoarseVectorV;
typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,2*nbasis> LittleDiracOperator;
typedef LittleDiracOperator::CoarseVector CoarseVector;
V.Orthogonalise();
for(int b =0 ; b<nbasis;b++){
CoarseVectorV c_src (Coarse5d);
V.ProjectToSubspace (c_src,U.subspace[b]);
V.PromoteFromSubspace(c_src,src);
std::cout << " Completeness of U in V ["<< b<<"] "<< std::sqrt(norm2(src)/norm2(U.subspace[b]))<<std::endl;
}
CoarseVector c_src (Coarse5d);
CoarseVector c_res (Coarse5d);
CoarseVector c_proj(Coarse5d);
LittleDiracOperator LittleDiracOpPV(geom,FGrid,Coarse5d);
LittleDiracOpPV.CoarsenOperator(PVdagM,CombinedUV,CombinedUV);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"Testing coarsened operator "<<std::endl;
Complex one(1.0);
c_src = one; // 1 in every element for vector 1.
blockPromote(c_src,err,CombinedUV.subspace);
LatticeFermion prom(FGrid);
prom=Zero();
for(int b=0;b<nbasis*2;b++){
prom=prom+CombinedUV.subspace[b];
}
std::cout<<GridLogMessage<<"c_src "<<norm2(c_src)<<std::endl;
std::cout<<GridLogMessage<<"prom "<<norm2(prom)<<std::endl;
PVdagM.Op(prom,tmp);
blockProject(c_proj,tmp,CombinedUV.subspace);
std::cout<<GridLogMessage<<" Called Big Dirac Op "<<norm2(tmp)<<std::endl;
LittleDiracOpPV.M(c_src,c_res);
std::cout<<GridLogMessage<<" Called Little Dirac Op c_src "<< norm2(c_src) << " c_res "<< norm2(c_res) <<std::endl;
std::cout<<GridLogMessage<<"Little dop : "<<norm2(c_res)<<std::endl;
std::cout<<GridLogMessage<<"Big dop in subspace : "<<norm2(c_proj)<<std::endl;
c_proj = c_proj - c_res;
std::cout<<GridLogMessage<<" ldop error: "<<norm2(c_proj)<<std::endl;
/**********
* Some solvers
**********
*/
///////////////////////////////////////
// Coarse grid solver test
///////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Coarse Grid Solve -- Level 3 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<CoarseVector> simple;
NonHermitianLinearOperator<LittleDiracOperator,CoarseVector> LinOpCoarse(LittleDiracOpPV);
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-4, 100, LinOpCoarse,simple,10,10);
PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-2, 10, LinOpCoarse,simple,20,20);
L2PGCR.Level(3);
c_res=Zero();
L2PGCR(c_src,c_res);
////////////////////////////////////////
// Fine grid smoother
////////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Fine Grid Smoother -- Level 2 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<LatticeFermionD> simple_fine;
// NonHermitianLinearOperator<PVdagM_t,LatticeFermionD> LinOpSmooth(PVdagM);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermionD> SmootherGCR(0.01,1,ShiftedPVdagM,simple_fine,16,16);
SmootherGCR.Level(2);
LatticeFermionD f_src(FGrid);
LatticeFermionD f_res(FGrid);
f_src = one; // 1 in every element for vector 1.
f_res=Zero();
SmootherGCR(f_src,f_res);
typedef MGPreconditionerSVD<vSpinColourVector, vTComplex,nbasis*2> TwoLevelMG;
TwoLevelMG TwoLevelPrecon(CombinedUV,CombinedUV,
PVdagM,
simple_fine,
SmootherGCR,
LinOpCoarse,
L2PGCR);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermion> L1PGCR(1.0e-8,1000,PVdagM,TwoLevelPrecon,20,20);
L1PGCR.Level(1);
f_res=Zero();
L1PGCR(f_src,f_res);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
Grid_finalize();
return 0;
}

View File

@@ -0,0 +1,479 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_padded_cell.cc
Copyright (C) 2023
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidualNonHermitian.h>
#include <Grid/algorithms/iterative/BiCGSTAB.h>
using namespace std;
using namespace Grid;
template<class Matrix,class Field>
class PVdagMLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
Matrix &_PV;
public:
PVdagMLinearOperator(Matrix &Mat,Matrix &PV): _Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void Op (const Field &in, Field &out){
// std::cout << GridLogMessage<< "Op: PVdag M "<<std::endl;
Field tmp(in.Grid());
_Mat.M(in,tmp);
_PV.Mdag(tmp,out);
}
void AdjOp (const Field &in, Field &out){
// std::cout << GridLogMessage<<"AdjOp: Mdag PV "<<std::endl;
Field tmp(in.Grid());
_PV.M(in,tmp);
_Mat.Mdag(tmp,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
assert(0);
}
void HermOp(const Field &in, Field &out){
// std::cout <<GridLogMessage<< "HermOp: Mdag PV PVdag M"<<std::endl;
Field tmp(in.Grid());
Op(in,tmp);
AdjOp(tmp,out);
// std::cout << "HermOp done "<<norm2(out)<<std::endl;
}
};
template<class Matrix,class Field>
class MdagPVLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
Matrix &_PV;
public:
MdagPVLinearOperator(Matrix &Mat,Matrix &PV): _Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void Op (const Field &in, Field &out){
Field tmp(in.Grid());
// std::cout <<GridLogMessage<< "Op: PVdag M "<<std::endl;
_PV.M(in,tmp);
_Mat.Mdag(tmp,out);
}
void AdjOp (const Field &in, Field &out){
// std::cout <<GridLogMessage<< "AdjOp: Mdag PV "<<std::endl;
Field tmp(in.Grid());
_Mat.M(in,tmp);
_PV.Mdag(tmp,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
assert(0);
}
void HermOp(const Field &in, Field &out){
// std::cout << GridLogMessage<<"HermOp: PVdag M Mdag PV "<<std::endl;
Field tmp(in.Grid());
Op(in,tmp);
AdjOp(tmp,out);
// std::cout << "HermOp done "<<norm2(out)<<std::endl;
}
};
template<class Matrix,class Field>
class ShiftedPVdagMLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
Matrix &_PV;
RealD shift;
public:
ShiftedPVdagMLinearOperator(RealD _shift,Matrix &Mat,Matrix &PV): shift(_shift),_Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) { assert(0); }
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); }
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); };
void Op (const Field &in, Field &out){
// std::cout << "Op: PVdag M "<<std::endl;
Field tmp(in.Grid());
_Mat.M(in,tmp);
_PV.Mdag(tmp,out);
out = out + shift * in;
}
void AdjOp (const Field &in, Field &out){
// std::cout << "AdjOp: Mdag PV "<<std::endl;
Field tmp(in.Grid());
_PV.M(tmp,out);
_Mat.Mdag(in,tmp);
out = out + shift * in;
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
void HermOp(const Field &in, Field &out){
// std::cout << "HermOp: Mdag PV PVdag M"<<std::endl;
Field tmp(in.Grid());
Op(in,tmp);
AdjOp(tmp,out);
}
};
template<class Fobj,class CComplex,int nbasis>
class MGPreconditionerSVD : public LinearFunction< Lattice<Fobj> > {
public:
using LinearFunction<Lattice<Fobj> >::operator();
typedef Aggregation<Fobj,CComplex,nbasis> Aggregates;
typedef typename Aggregation<Fobj,CComplex,nbasis>::FineField FineField;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseMatrix CoarseMatrix;
typedef LinearOperatorBase<FineField> FineOperator;
typedef LinearFunction <FineField> FineSmoother;
typedef LinearOperatorBase<CoarseVector> CoarseOperator;
typedef LinearFunction <CoarseVector> CoarseSolver;
///////////////////////////////
// SVD is M = U S Vdag
//
// Define a subset of Vc and Uc in Complex_f,c matrix
// - these are the coarsening, non-square matrices
//
// Solve a coarse approx to
//
// M psi = eta
//
// via
//
// Uc^dag U S Vdag Vc Vc^dag psi = Uc^dag eta
//
// M_coarse Vc^dag psi = M_coarse psi_c = eta_c
//
///////////////////////////////
Aggregates & _U;
Aggregates & _V;
FineOperator & _FineOperator;
FineSmoother & _PreSmoother;
FineSmoother & _PostSmoother;
CoarseOperator & _CoarseOperator;
CoarseSolver & _CoarseSolve;
int level; void Level(int lv) {level = lv; };
MGPreconditionerSVD(Aggregates &U,
Aggregates &V,
FineOperator &Fine,
FineSmoother &PreSmoother,
FineSmoother &PostSmoother,
CoarseOperator &CoarseOperator_,
CoarseSolver &CoarseSolve_)
: _U(U),
_V(V),
_FineOperator(Fine),
_PreSmoother(PreSmoother),
_PostSmoother(PostSmoother),
_CoarseOperator(CoarseOperator_),
_CoarseSolve(CoarseSolve_),
level(1) { }
virtual void operator()(const FineField &in, FineField & out)
{
GridBase *CoarseGrid = _U.CoarseGrid;
// auto CoarseGrid = _CoarseOperator.Grid();
CoarseVector Csrc(CoarseGrid);
CoarseVector Csol(CoarseGrid);
FineField vec1(in.Grid());
FineField vec2(in.Grid());
std::cout<<GridLogMessage << "Calling PreSmoother " <<std::endl;
// std::cout<<GridLogMessage << "Calling PreSmoother input residual "<<norm2(in) <<std::endl;
double t;
// Fine Smoother
// out = in;
out = Zero();
t=-usecond();
_PreSmoother(in,out);
t+=usecond();
std::cout<<GridLogMessage << "PreSmoother took "<< t/1000.0<< "ms" <<std::endl;
// Update the residual
_FineOperator.Op(out,vec1); sub(vec1, in ,vec1);
// std::cout<<GridLogMessage <<"Residual-1 now " <<norm2(vec1)<<std::endl;
// Uc^dag U S Vdag Vc Vc^dag psi = Uc^dag eta
// Fine to Coarse
t=-usecond();
_U.ProjectToSubspace (Csrc,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Project to coarse took "<< t/1000.0<< "ms" <<std::endl;
// Coarse correction
t=-usecond();
Csol = Zero();
_CoarseSolve(Csrc,Csol);
//Csol=Zero();
t+=usecond();
std::cout<<GridLogMessage << "Coarse solve took "<< t/1000.0<< "ms" <<std::endl;
// Coarse to Fine
t=-usecond();
// _CoarseOperator.PromoteFromSubspace(_Aggregates,Csol,vec1);
_V.PromoteFromSubspace(Csol,vec1);
add(out,out,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Promote to this level took "<< t/1000.0<< "ms" <<std::endl;
// Residual
_FineOperator.Op(out,vec1); sub(vec1 ,in , vec1);
// std::cout<<GridLogMessage <<"Residual-2 now " <<norm2(vec1)<<std::endl;
// Fine Smoother
t=-usecond();
// vec2=vec1;
vec2=Zero();
_PostSmoother(vec1,vec2);
t+=usecond();
std::cout<<GridLogMessage << "PostSmoother took "<< t/1000.0<< "ms" <<std::endl;
add( out,out,vec2);
std::cout<<GridLogMessage << "Done " <<std::endl;
}
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=16;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
// Construct a coarsened grid
Coordinate clatt = GridDefaultLatt();
for(int d=0;d<clatt.size();d++){
clatt[d] = clatt[d]/2;
// clatt[d] = clatt[d]/4;
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds);
LatticeFermion src(FGrid); random(RNG5,src);
LatticeFermion result(FGrid); result=Zero();
LatticeFermion ref(FGrid); ref=Zero();
LatticeFermion tmp(FGrid);
LatticeFermion err(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
std::string file("ckpoint_lat.4000");
NerscIO::readConfiguration(Umu,header,file);
RealD mass=0.01;
RealD M5=1.8;
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionD Dpv(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,1.0,M5);
const int nbasis = 60;
const int cb = 0 ;
NextToNearestStencilGeometry5D geom(Coarse5d);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
typedef PVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> PVdagM_t;
typedef MdagPVLinearOperator<DomainWallFermionD,LatticeFermionD> MdagPV_t;
typedef ShiftedPVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> ShiftedPVdagM_t;
PVdagM_t PVdagM(Ddwf,Dpv);
MdagPV_t MdagPV(Ddwf,Dpv);
// ShiftedPVdagM_t ShiftedPVdagM(2.0,Ddwf,Dpv); // 355
// ShiftedPVdagM_t ShiftedPVdagM(1.0,Ddwf,Dpv); // 246
// ShiftedPVdagM_t ShiftedPVdagM(0.5,Ddwf,Dpv); // 183
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 145
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 134
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 127 -- NULL space via inverse iteration
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 57 -- NULL space via inverse iteration; 3 iterations
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 57 , tighter inversion
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // nbasis 20 -- 49 iters
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // nbasis 20 -- 70 iters; asymmetric
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 58; Loosen coarse, tighten fine
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 56 ...
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 51 ... with 24 vecs
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 31 ... with 24 vecs and 2^4 blocking
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 43 ... with 16 vecs and 2^4 blocking, sloppier
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 35 ... with 20 vecs and 2^4 blocking
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 35 ... with 20 vecs and 2^4 blocking, looser coarse
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 64 ... with 20 vecs, Christoph setup, and 2^4 blocking, looser coarse
ShiftedPVdagM_t ShiftedPVdagM(0.01,Ddwf,Dpv); //
// Run power method on HOA??
PowerMethod<LatticeFermion> PM;
PM(PVdagM,src);
PM(MdagPV,src);
// Warning: This routine calls PVdagM.Op, not PVdagM.HermOp
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
Subspace V(Coarse5d,FGrid,cb);
// Subspace U(Coarse5d,FGrid,cb);
// Breeds right singular vectors with call to HermOp
V.CreateSubspaceChebyshev(RNG5,PVdagM,
nbasis,
4000.0,0.003,
300);
// Breeds left singular vectors with call to HermOp
// U.CreateSubspaceChebyshev(RNG5,MdagPV,
// nbasis,
// 4000.0,0.003,
// 300);
// U.subspace=V.subspace;
// typedef Aggregation<vSpinColourVector,vTComplex,2*nbasis> CombinedSubspace;
// CombinedSubspace CombinedUV(Coarse5d,FGrid,cb);
// for(int b=0;b<nbasis;b++){
// CombinedUV.subspace[b] = V.subspace[b];
// CombinedUV.subspace[b+nbasis] = U.subspace[b];
// }
// typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,2*nbasis> LittleDiracOperator;
typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperator;
typedef LittleDiracOperator::CoarseVector CoarseVector;
LittleDiracOperator LittleDiracOpPV(geom,FGrid,Coarse5d);
LittleDiracOpPV.CoarsenOperator(PVdagM,V,V);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"Testing coarsened operator "<<std::endl;
CoarseVector c_src (Coarse5d);
CoarseVector c_res (Coarse5d);
CoarseVector c_proj(Coarse5d);
Complex one(1.0);
c_src = one; // 1 in every element for vector 1.
// blockPromote(c_src,err,CoarseToFine.subspace);
LatticeFermion prom(FGrid);
prom=Zero();
for(int b=0;b<nbasis;b++){
prom=prom+V.subspace[b];
}
std::cout<<GridLogMessage<<"c_src "<<norm2(c_src)<<std::endl;
std::cout<<GridLogMessage<<"prom "<<norm2(prom)<<std::endl;
PVdagM.Op(prom,tmp);
blockProject(c_proj,tmp,V.subspace);
std::cout<<GridLogMessage<<" Called Big Dirac Op "<<norm2(tmp)<<std::endl;
LittleDiracOpPV.M(c_src,c_res);
std::cout<<GridLogMessage<<" Called Little Dirac Op c_src "<< norm2(c_src) << " c_res "<< norm2(c_res) <<std::endl;
std::cout<<GridLogMessage<<"Little dop : "<<norm2(c_res)<<std::endl;
std::cout<<GridLogMessage<<"Big dop in subspace : "<<norm2(c_proj)<<std::endl;
c_proj = c_proj - c_res;
std::cout<<GridLogMessage<<" ldop error: "<<norm2(c_proj)<<std::endl;
/**********
* Some solvers
**********
*/
///////////////////////////////////////
// Coarse grid solver test
///////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Coarse Grid Solve -- Level 3 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<CoarseVector> simple;
NonHermitianLinearOperator<LittleDiracOperator,CoarseVector> LinOpCoarse(LittleDiracOpPV);
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-4, 100, LinOpCoarse,simple,10,10);
PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L3PGCR(1.0e-4, 10, LinOpCoarse,simple,20,20);
L3PGCR.Level(3);
c_res=Zero();
L3PGCR(c_src,c_res);
////////////////////////////////////////
// Fine grid smoother
////////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Fine Grid Smoother -- Level 2 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<LatticeFermionD> simple_fine;
// NonHermitianLinearOperator<PVdagM_t,LatticeFermionD> LinOpSmooth(PVdagM);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermionD> SmootherGCR(0.01,1,ShiftedPVdagM,simple_fine,16,16);
SmootherGCR.Level(2);
LatticeFermionD f_src(FGrid);
LatticeFermionD f_res(FGrid);
f_src = one; // 1 in every element for vector 1.
f_res=Zero();
SmootherGCR(f_src,f_res);
// typedef MGPreconditionerSVD<vSpinColourVector, vTComplex,nbasis*2> TwoLevelMG;
typedef MGPreconditionerSVD<vSpinColourVector, vTComplex,nbasis> TwoLevelMG;
// TwoLevelMG TwoLevelPrecon(CombinedUV,CombinedUV,
TwoLevelMG TwoLevelPrecon(V,V,
PVdagM,
simple_fine,
SmootherGCR,
LinOpCoarse,
L3PGCR);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermion> L1PGCR(1.0e-8,1000,PVdagM,TwoLevelPrecon,16,16);
L1PGCR.Level(1);
f_res=Zero();
L1PGCR(f_src,f_res);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
Grid_finalize();
return 0;
}

View File

@@ -0,0 +1,333 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_padded_cell.cc
Copyright (C) 2023
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidualNonHermitian.h>
#include <Grid/algorithms/iterative/BiCGSTAB.h>
using namespace std;
using namespace Grid;
template<class Fobj,class CComplex,int nbasis>
class MGPreconditioner : public LinearFunction< Lattice<Fobj> > {
public:
using LinearFunction<Lattice<Fobj> >::operator();
typedef Aggregation<Fobj,CComplex,nbasis> Aggregates;
typedef typename Aggregation<Fobj,CComplex,nbasis>::FineField FineField;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseMatrix CoarseMatrix;
typedef LinearOperatorBase<FineField> FineOperator;
typedef LinearFunction <FineField> FineSmoother;
typedef LinearOperatorBase<CoarseVector> CoarseOperator;
typedef LinearFunction <CoarseVector> CoarseSolver;
Aggregates & _Aggregates;
FineOperator & _FineOperator;
FineSmoother & _PreSmoother;
FineSmoother & _PostSmoother;
CoarseOperator & _CoarseOperator;
CoarseSolver & _CoarseSolve;
int level; void Level(int lv) {level = lv; };
MGPreconditioner(Aggregates &Agg,
FineOperator &Fine,
FineSmoother &PreSmoother,
FineSmoother &PostSmoother,
CoarseOperator &CoarseOperator_,
CoarseSolver &CoarseSolve_)
: _Aggregates(Agg),
_FineOperator(Fine),
_PreSmoother(PreSmoother),
_PostSmoother(PostSmoother),
_CoarseOperator(CoarseOperator_),
_CoarseSolve(CoarseSolve_),
level(1) { }
virtual void operator()(const FineField &in, FineField & out)
{
GridBase *CoarseGrid = _Aggregates.CoarseGrid;
// auto CoarseGrid = _CoarseOperator.Grid();
CoarseVector Csrc(CoarseGrid);
CoarseVector Csol(CoarseGrid);
FineField vec1(in.Grid());
FineField vec2(in.Grid());
std::cout<<GridLogMessage << "Calling PreSmoother " <<std::endl;
// std::cout<<GridLogMessage << "Calling PreSmoother input residual "<<norm2(in) <<std::endl;
double t;
// Fine Smoother
// out = in;
out = Zero();
t=-usecond();
_PreSmoother(in,out);
t+=usecond();
std::cout<<GridLogMessage << "PreSmoother took "<< t/1000.0<< "ms" <<std::endl;
// Update the residual
_FineOperator.Op(out,vec1); sub(vec1, in ,vec1);
// std::cout<<GridLogMessage <<"Residual-1 now " <<norm2(vec1)<<std::endl;
// Fine to Coarse
t=-usecond();
_Aggregates.ProjectToSubspace (Csrc,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Project to coarse took "<< t/1000.0<< "ms" <<std::endl;
// Coarse correction
t=-usecond();
Csol = Zero();
_CoarseSolve(Csrc,Csol);
//Csol=Zero();
t+=usecond();
std::cout<<GridLogMessage << "Coarse solve took "<< t/1000.0<< "ms" <<std::endl;
// Coarse to Fine
t=-usecond();
// _CoarseOperator.PromoteFromSubspace(_Aggregates,Csol,vec1);
_Aggregates.PromoteFromSubspace(Csol,vec1);
add(out,out,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Promote to this level took "<< t/1000.0<< "ms" <<std::endl;
// Residual
_FineOperator.Op(out,vec1); sub(vec1 ,in , vec1);
// std::cout<<GridLogMessage <<"Residual-2 now " <<norm2(vec1)<<std::endl;
// Fine Smoother
t=-usecond();
// vec2=vec1;
vec2=Zero();
_PostSmoother(vec1,vec2);
t+=usecond();
std::cout<<GridLogMessage << "PostSmoother took "<< t/1000.0<< "ms" <<std::endl;
add( out,out,vec2);
std::cout<<GridLogMessage << "Done " <<std::endl;
}
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=16;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = UGrid;
GridRedBlackCartesian * FrbGrid = UrbGrid;
// Construct a coarsened grid
Coordinate clatt = GridDefaultLatt();
for(int d=0;d<clatt.size();d++){
clatt[d] = clatt[d]/2;
//clatt[d] = clatt[d]/4;
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
std::vector<int> seeds4({1,2,3,4});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse4d);CRNG.SeedFixedIntegers(cseeds);
Complex one(1.0);
LatticeFermion src(FGrid); src=one;
LatticeFermion result(FGrid); result=Zero();
LatticeFermion ref(FGrid); ref=Zero();
LatticeFermion tmp(FGrid);
LatticeFermion err(FGrid);
LatticeFermion precsrc(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
std::string file("ckpoint_lat");
NerscIO::readConfiguration(Umu,header,file);
RealD csw =0.0;
RealD mass=-0.92;
WilsonCloverFermionD Dw(Umu,*UGrid,*UrbGrid,mass,csw,csw);
const int nbasis = 20;
const int cb = 0 ;
LatticeFermion prom(FGrid);
typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,2*nbasis> LittleDiracOperator;
typedef LittleDiracOperator::CoarseVector CoarseVector;
NearestStencilGeometry4D geom(Coarse4d);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
// Warning: This routine calls Linop.Op, not LinOpo.HermOp
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
Subspace Aggregates(Coarse4d,FGrid,cb);
NonHermitianLinearOperator<WilsonCloverFermionD,LatticeFermion> LinOpDw(Dw);
ShiftedNonHermitianLinearOperator<WilsonCloverFermionD,LatticeFermion> ShiftedLinOpDw(Dw,0.01);
Aggregates.CreateSubspaceGCR(RNG4,
LinOpDw,
nbasis);
typedef Aggregation<vSpinColourVector,vTComplex,2*nbasis> CombinedSubspace;
CombinedSubspace CombinedUV(Coarse4d,UGrid,cb);
for(int b=0;b<nbasis;b++){
Gamma G5(Gamma::Algebra::Gamma5);
CombinedUV.subspace[b] = Aggregates.subspace[b];
CombinedUV.subspace[b+nbasis] = G5*Aggregates.subspace[b];
}
LittleDiracOperator LittleDiracOp(geom,FGrid,Coarse4d);
LittleDiracOp.CoarsenOperator(LinOpDw,CombinedUV);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"Testing coarsened operator "<<std::endl;
CoarseVector c_src (Coarse4d);
CoarseVector c_res (Coarse4d);
CoarseVector c_proj(Coarse4d);
std::vector<LatticeFermion> subspace(2*nbasis,FGrid);
subspace=CombinedUV.subspace;
c_src = one; // 1 in every element for vector 1.
blockPromote(c_src,err,subspace);
prom=Zero();
for(int b=0;b<2*nbasis;b++){
prom=prom+subspace[b];
}
err=err-prom;
std::cout<<GridLogMessage<<"Promoted back from subspace: err "<<norm2(err)<<std::endl;
std::cout<<GridLogMessage<<"c_src "<<norm2(c_src)<<std::endl;
std::cout<<GridLogMessage<<"prom "<<norm2(prom)<<std::endl;
LinOpDw.Op(prom,tmp);
blockProject(c_proj,tmp,subspace);
std::cout<<GridLogMessage<<" Called Big Dirac Op "<<norm2(tmp)<<std::endl;
LittleDiracOp.M(c_src,c_res);
std::cout<<GridLogMessage<<" Called Little Dirac Op c_src "<< norm2(c_src) << " c_res "<< norm2(c_res) <<std::endl;
std::cout<<GridLogMessage<<"Little dop : "<<norm2(c_res)<<std::endl;
// std::cout<<GridLogMessage<<" Little "<< c_res<<std::endl;
std::cout<<GridLogMessage<<"Big dop in subspace : "<<norm2(c_proj)<<std::endl;
// std::cout<<GridLogMessage<<" Big "<< c_proj<<std::endl;
c_proj = c_proj - c_res;
std::cout<<GridLogMessage<<" ldop error: "<<norm2(c_proj)<<std::endl;
// std::cout<<GridLogMessage<<" error "<< c_proj<<std::endl;
/**********
* Some solvers
**********
*/
// CG
{
MdagMLinearOperator<WilsonFermionD,LatticeFermion> HermOp(Dw);
ConjugateGradient<LatticeFermion> CG(1.0e-8,10000);
Dw.Mdag(src,precsrc);
CG(HermOp,precsrc,result);
result=Zero();
}
///////////////////////////////////////
// Coarse grid solver test
///////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Coarse Grid Solve -- Level 3 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<CoarseVector> simple;
NonHermitianLinearOperator<LittleDiracOperator,CoarseVector> LinOpCoarse(LittleDiracOp);
ShiftedNonHermitianLinearOperator<LittleDiracOperator,CoarseVector> ShiftedLinOpCoarse(LittleDiracOp,0.001);
// ShiftedNonHermitianLinearOperator<LittleDiracOperator,CoarseVector> ShiftedLinOpCoarse(LittleDiracOp,0.01);
// ShiftedNonHermitianLinearOperator<LittleDiracOperator,CoarseVector> ShiftedLinOpCoarse(LinOpCoarse,0.001);
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-4, 100, LinOpCoarse,simple,10,10);
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-1, 100, LinOpCoarse,simple,30,30);
PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(2.0e-1, 50, ShiftedLinOpCoarse,simple,50,50);
L2PGCR.Level(3);
c_res=Zero();
L2PGCR(c_src,c_res);
////////////////////////////////////////
// Fine grid smoother
////////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Fine Grid Smoother -- Level 2 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<LatticeFermionD> simple_fine;
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermionD> SmootherGCR(0.1,1,ShiftedLinOpDw,simple_fine,4,4);
SmootherGCR.Level(2);
LatticeFermionD f_src(FGrid);
LatticeFermionD f_res(FGrid);
f_src = one; // 1 in every element for vector 1.
f_res=Zero();
SmootherGCR(f_src,f_res);
typedef MGPreconditioner<vSpinColourVector, vTComplex,2*nbasis> TwoLevelMG;
TwoLevelMG TwoLevelPrecon(CombinedUV,
LinOpDw,
simple_fine,
SmootherGCR,
LinOpCoarse,
L2PGCR);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermion> L1PGCR(1.0e-8,1000,LinOpDw,TwoLevelPrecon,16,16);
L1PGCR.Level(1);
f_res=Zero();
L1PGCR(f_src,f_res);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
Grid_finalize();
return 0;
}

View File

@@ -0,0 +1,326 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_padded_cell.cc
Copyright (C) 2023
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidualNonHermitian.h>
#include <Grid/algorithms/iterative/BiCGSTAB.h>
using namespace std;
using namespace Grid;
template<class Fobj,class CComplex,int nbasis>
class MGPreconditioner : public LinearFunction< Lattice<Fobj> > {
public:
using LinearFunction<Lattice<Fobj> >::operator();
typedef Aggregation<Fobj,CComplex,nbasis> Aggregates;
typedef typename Aggregation<Fobj,CComplex,nbasis>::FineField FineField;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseMatrix CoarseMatrix;
typedef LinearOperatorBase<FineField> FineOperator;
typedef LinearFunction <FineField> FineSmoother;
typedef LinearOperatorBase<CoarseVector> CoarseOperator;
typedef LinearFunction <CoarseVector> CoarseSolver;
Aggregates & _Aggregates;
FineOperator & _FineOperator;
FineSmoother & _PreSmoother;
FineSmoother & _PostSmoother;
CoarseOperator & _CoarseOperator;
CoarseSolver & _CoarseSolve;
int level; void Level(int lv) {level = lv; };
MGPreconditioner(Aggregates &Agg,
FineOperator &Fine,
FineSmoother &PreSmoother,
FineSmoother &PostSmoother,
CoarseOperator &CoarseOperator_,
CoarseSolver &CoarseSolve_)
: _Aggregates(Agg),
_FineOperator(Fine),
_PreSmoother(PreSmoother),
_PostSmoother(PostSmoother),
_CoarseOperator(CoarseOperator_),
_CoarseSolve(CoarseSolve_),
level(1) { }
virtual void operator()(const FineField &in, FineField & out)
{
GridBase *CoarseGrid = _Aggregates.CoarseGrid;
// auto CoarseGrid = _CoarseOperator.Grid();
CoarseVector Csrc(CoarseGrid);
CoarseVector Csol(CoarseGrid);
FineField vec1(in.Grid());
FineField vec2(in.Grid());
std::cout<<GridLogMessage << "Calling PreSmoother " <<std::endl;
// std::cout<<GridLogMessage << "Calling PreSmoother input residual "<<norm2(in) <<std::endl;
double t;
// Fine Smoother
// out = in;
out = Zero();
t=-usecond();
_PreSmoother(in,out);
t+=usecond();
std::cout<<GridLogMessage << "PreSmoother took "<< t/1000.0<< "ms" <<std::endl;
// Update the residual
_FineOperator.Op(out,vec1); sub(vec1, in ,vec1);
// std::cout<<GridLogMessage <<"Residual-1 now " <<norm2(vec1)<<std::endl;
// Fine to Coarse
t=-usecond();
_Aggregates.ProjectToSubspace (Csrc,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Project to coarse took "<< t/1000.0<< "ms" <<std::endl;
// Coarse correction
t=-usecond();
Csol = Zero();
_CoarseSolve(Csrc,Csol);
//Csol=Zero();
t+=usecond();
std::cout<<GridLogMessage << "Coarse solve took "<< t/1000.0<< "ms" <<std::endl;
// Coarse to Fine
t=-usecond();
// _CoarseOperator.PromoteFromSubspace(_Aggregates,Csol,vec1);
_Aggregates.PromoteFromSubspace(Csol,vec1);
add(out,out,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Promote to this level took "<< t/1000.0<< "ms" <<std::endl;
// Residual
_FineOperator.Op(out,vec1); sub(vec1 ,in , vec1);
// std::cout<<GridLogMessage <<"Residual-2 now " <<norm2(vec1)<<std::endl;
// Fine Smoother
t=-usecond();
// vec2=vec1;
vec2=Zero();
_PostSmoother(vec1,vec2);
t+=usecond();
std::cout<<GridLogMessage << "PostSmoother took "<< t/1000.0<< "ms" <<std::endl;
add( out,out,vec2);
std::cout<<GridLogMessage << "Done " <<std::endl;
}
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=16;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = UGrid;
GridRedBlackCartesian * FrbGrid = UrbGrid;
// Construct a coarsened grid
Coordinate clatt = GridDefaultLatt();
for(int d=0;d<clatt.size();d++){
clatt[d] = clatt[d]/2;
// clatt[d] = clatt[d]/4;
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
std::vector<int> seeds4({1,2,3,4});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse4d);CRNG.SeedFixedIntegers(cseeds);
Complex one(1.0);
LatticeFermion src(FGrid); src=one;
LatticeFermion result(FGrid); result=Zero();
LatticeFermion ref(FGrid); ref=Zero();
LatticeFermion tmp(FGrid);
LatticeFermion err(FGrid);
LatticeFermion precsrc(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
std::string file("ckpoint_lat");
NerscIO::readConfiguration(Umu,header,file);
RealD csw =0.0;
RealD mass=-0.92;
WilsonCloverFermionD Dw(Umu,*UGrid,*UrbGrid,mass,csw,csw);
const int nbasis = 40;
const int cb = 0 ;
LatticeFermion prom(FGrid);
typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperator;
typedef LittleDiracOperator::CoarseVector CoarseVector;
NearestStencilGeometry4D geom(Coarse4d);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
// Warning: This routine calls Linop.Op, not LinOpo.HermOp
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
Subspace Aggregates(Coarse4d,FGrid,cb);
NonHermitianLinearOperator<WilsonCloverFermionD,LatticeFermion> LinOpDw(Dw);
ShiftedNonHermitianLinearOperator<WilsonCloverFermionD,LatticeFermion> ShiftedLinOpDw(Dw,0.01);
Aggregates.CreateSubspaceGCR(RNG4,
LinOpDw,
nbasis);
LittleDiracOperator LittleDiracOp(geom,FGrid,Coarse4d);
LittleDiracOp.CoarsenOperator(LinOpDw,Aggregates);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"Testing coarsened operator "<<std::endl;
CoarseVector c_src (Coarse4d);
CoarseVector c_res (Coarse4d);
CoarseVector c_proj(Coarse4d);
std::vector<LatticeFermion> subspace(nbasis,FGrid);
subspace=Aggregates.subspace;
c_src = one; // 1 in every element for vector 1.
blockPromote(c_src,err,subspace);
prom=Zero();
for(int b=0;b<nbasis;b++){
prom=prom+subspace[b];
}
err=err-prom;
std::cout<<GridLogMessage<<"Promoted back from subspace: err "<<norm2(err)<<std::endl;
std::cout<<GridLogMessage<<"c_src "<<norm2(c_src)<<std::endl;
std::cout<<GridLogMessage<<"prom "<<norm2(prom)<<std::endl;
LinOpDw.Op(prom,tmp);
blockProject(c_proj,tmp,subspace);
std::cout<<GridLogMessage<<" Called Big Dirac Op "<<norm2(tmp)<<std::endl;
LittleDiracOp.M(c_src,c_res);
std::cout<<GridLogMessage<<" Called Little Dirac Op c_src "<< norm2(c_src) << " c_res "<< norm2(c_res) <<std::endl;
std::cout<<GridLogMessage<<"Little dop : "<<norm2(c_res)<<std::endl;
// std::cout<<GridLogMessage<<" Little "<< c_res<<std::endl;
std::cout<<GridLogMessage<<"Big dop in subspace : "<<norm2(c_proj)<<std::endl;
// std::cout<<GridLogMessage<<" Big "<< c_proj<<std::endl;
c_proj = c_proj - c_res;
std::cout<<GridLogMessage<<" ldop error: "<<norm2(c_proj)<<std::endl;
// std::cout<<GridLogMessage<<" error "<< c_proj<<std::endl;
/**********
* Some solvers
**********
*/
// CG
{
MdagMLinearOperator<WilsonFermionD,LatticeFermion> HermOp(Dw);
ConjugateGradient<LatticeFermion> CG(1.0e-8,10000);
Dw.Mdag(src,precsrc);
CG(HermOp,precsrc,result);
result=Zero();
}
///////////////////////////////////////
// Coarse grid solver test
///////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Coarse Grid Solve -- Level 3 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<CoarseVector> simple;
NonHermitianLinearOperator<LittleDiracOperator,CoarseVector> LinOpCoarse(LittleDiracOp);
ShiftedNonHermitianLinearOperator<LittleDiracOperator,CoarseVector> ShiftedLinOpCoarse(LittleDiracOp,0.001);
// ShiftedNonHermitianLinearOperator<LittleDiracOperator,CoarseVector> ShiftedLinOpCoarse(LittleDiracOp,0.01);
// ShiftedNonHermitianLinearOperator<LittleDiracOperator,CoarseVector> ShiftedLinOpCoarse(LinOpCoarse,0.001);
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-4, 100, LinOpCoarse,simple,10,10);
PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-1, 100, LinOpCoarse,simple,30,30);
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(2.0e-1, 50, ShiftedLinOpCoarse,simple,50,50);
L2PGCR.Level(3);
c_res=Zero();
L2PGCR(c_src,c_res);
////////////////////////////////////////
// Fine grid smoother
////////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Fine Grid Smoother -- Level 2 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<LatticeFermionD> simple_fine;
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermionD> SmootherGCR(0.1,1,ShiftedLinOpDw,simple_fine,6,6);
SmootherGCR.Level(2);
LatticeFermionD f_src(FGrid);
LatticeFermionD f_res(FGrid);
f_src = one; // 1 in every element for vector 1.
f_res=Zero();
SmootherGCR(f_src,f_res);
typedef MGPreconditioner<vSpinColourVector, vTComplex,nbasis> TwoLevelMG;
TwoLevelMG TwoLevelPrecon(Aggregates,
LinOpDw,
simple_fine,
SmootherGCR,
LinOpCoarse,
L2PGCR);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermion> L1PGCR(1.0e-8,1000,LinOpDw,TwoLevelPrecon,16,16);
L1PGCR.Level(1);
f_res=Zero();
L1PGCR(f_src,f_res);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
Grid_finalize();
return 0;
}

View File

@@ -0,0 +1,320 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_padded_cell.cc
Copyright (C) 2023
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidualNonHermitian.h>
#include <Grid/algorithms/iterative/BiCGSTAB.h>
using namespace std;
using namespace Grid;
template<class Fobj,class CComplex,int nbasis>
class MGPreconditioner : public LinearFunction< Lattice<Fobj> > {
public:
using LinearFunction<Lattice<Fobj> >::operator();
typedef Aggregation<Fobj,CComplex,nbasis> Aggregates;
typedef typename Aggregation<Fobj,CComplex,nbasis>::FineField FineField;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseMatrix CoarseMatrix;
typedef LinearOperatorBase<FineField> FineOperator;
typedef LinearFunction <FineField> FineSmoother;
typedef LinearOperatorBase<CoarseVector> CoarseOperator;
typedef LinearFunction <CoarseVector> CoarseSolver;
Aggregates & _Aggregates;
FineOperator & _FineOperator;
FineSmoother & _PreSmoother;
FineSmoother & _PostSmoother;
CoarseOperator & _CoarseOperator;
CoarseSolver & _CoarseSolve;
int level; void Level(int lv) {level = lv; };
MGPreconditioner(Aggregates &Agg,
FineOperator &Fine,
FineSmoother &PreSmoother,
FineSmoother &PostSmoother,
CoarseOperator &CoarseOperator_,
CoarseSolver &CoarseSolve_)
: _Aggregates(Agg),
_FineOperator(Fine),
_PreSmoother(PreSmoother),
_PostSmoother(PostSmoother),
_CoarseOperator(CoarseOperator_),
_CoarseSolve(CoarseSolve_),
level(1) { }
virtual void operator()(const FineField &in, FineField & out)
{
GridBase *CoarseGrid = _Aggregates.CoarseGrid;
// auto CoarseGrid = _CoarseOperator.Grid();
CoarseVector Csrc(CoarseGrid);
CoarseVector Csol(CoarseGrid);
FineField vec1(in.Grid());
FineField vec2(in.Grid());
std::cout<<GridLogMessage << "Calling PreSmoother " <<std::endl;
// std::cout<<GridLogMessage << "Calling PreSmoother input residual "<<norm2(in) <<std::endl;
double t;
// Fine Smoother
// out = in;
out = Zero();
t=-usecond();
_PreSmoother(in,out);
t+=usecond();
std::cout<<GridLogMessage << "PreSmoother took "<< t/1000.0<< "ms" <<std::endl;
// Update the residual
_FineOperator.Op(out,vec1); sub(vec1, in ,vec1);
// std::cout<<GridLogMessage <<"Residual-1 now " <<norm2(vec1)<<std::endl;
// Fine to Coarse
t=-usecond();
_Aggregates.ProjectToSubspace (Csrc,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Project to coarse took "<< t/1000.0<< "ms" <<std::endl;
// Coarse correction
t=-usecond();
Csol = Zero();
_CoarseSolve(Csrc,Csol);
//Csol=Zero();
t+=usecond();
std::cout<<GridLogMessage << "Coarse solve took "<< t/1000.0<< "ms" <<std::endl;
// Coarse to Fine
t=-usecond();
// _CoarseOperator.PromoteFromSubspace(_Aggregates,Csol,vec1);
_Aggregates.PromoteFromSubspace(Csol,vec1);
add(out,out,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Promote to this level took "<< t/1000.0<< "ms" <<std::endl;
// Residual
_FineOperator.Op(out,vec1); sub(vec1 ,in , vec1);
// std::cout<<GridLogMessage <<"Residual-2 now " <<norm2(vec1)<<std::endl;
// Fine Smoother
t=-usecond();
// vec2=vec1;
vec2=Zero();
_PostSmoother(vec1,vec2);
t+=usecond();
std::cout<<GridLogMessage << "PostSmoother took "<< t/1000.0<< "ms" <<std::endl;
add( out,out,vec2);
std::cout<<GridLogMessage << "Done " <<std::endl;
}
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=16;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = UGrid;
GridRedBlackCartesian * FrbGrid = UrbGrid;
// Construct a coarsened grid
Coordinate clatt = GridDefaultLatt();
for(int d=0;d<clatt.size();d++){
clatt[d] = clatt[d]/2;
// clatt[d] = clatt[d]/4;
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
std::vector<int> seeds4({1,2,3,4});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse4d);CRNG.SeedFixedIntegers(cseeds);
LatticeFermion src(FGrid); random(RNG4,src);
LatticeFermion result(FGrid); result=Zero();
LatticeFermion ref(FGrid); ref=Zero();
LatticeFermion tmp(FGrid);
LatticeFermion err(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
std::string file("ckpoint_lat");
NerscIO::readConfiguration(Umu,header,file);
RealD csw =0.0;
RealD mass=-0.92;
WilsonCloverFermionD Dw(Umu,*UGrid,*UrbGrid,mass,csw,csw);
const int nbasis = 20;
const int cb = 0 ;
LatticeFermion prom(FGrid);
typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,2*nbasis> LittleDiracOperator;
typedef LittleDiracOperator::CoarseVector CoarseVector;
NearestStencilGeometry4D geom(Coarse4d);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
// Warning: This routine calls Linop.Op, not LinOpo.HermOp
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
Subspace Aggregates(Coarse4d,FGrid,cb);
MdagMLinearOperator<WilsonCloverFermionD,LatticeFermion> MdagMOpDw(Dw);
NonHermitianLinearOperator<WilsonCloverFermionD,LatticeFermion> LinOpDw(Dw);
ShiftedNonHermitianLinearOperator<WilsonCloverFermionD,LatticeFermion> ShiftedLinOpDw(Dw,0.5);
// Aggregates.CreateSubspaceGCR(RNG4,
// LinOpDw,
// nbasis);
Aggregates.CreateSubspace(RNG4,MdagMOpDw,nbasis);
typedef Aggregation<vSpinColourVector,vTComplex,2*nbasis> CombinedSubspace;
CombinedSubspace CombinedUV(Coarse4d,UGrid,cb);
for(int b=0;b<nbasis;b++){
Gamma G5(Gamma::Algebra::Gamma5);
CombinedUV.subspace[b] = Aggregates.subspace[b];
CombinedUV.subspace[b+nbasis] = G5*Aggregates.subspace[b];
}
LittleDiracOperator LittleDiracOp(geom,FGrid,Coarse4d);
LittleDiracOp.CoarsenOperator(LinOpDw,CombinedUV);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"Testing coarsened operator "<<std::endl;
CoarseVector c_src (Coarse4d);
CoarseVector c_res (Coarse4d);
CoarseVector c_proj(Coarse4d);
std::vector<LatticeFermion> subspace(2*nbasis,FGrid);
subspace=CombinedUV.subspace;
Complex one(1.0);
c_src = one; // 1 in every element for vector 1.
blockPromote(c_src,err,subspace);
prom=Zero();
for(int b=0;b<2*nbasis;b++){
prom=prom+subspace[b];
}
err=err-prom;
std::cout<<GridLogMessage<<"Promoted back from subspace: err "<<norm2(err)<<std::endl;
std::cout<<GridLogMessage<<"c_src "<<norm2(c_src)<<std::endl;
std::cout<<GridLogMessage<<"prom "<<norm2(prom)<<std::endl;
LinOpDw.Op(prom,tmp);
blockProject(c_proj,tmp,subspace);
std::cout<<GridLogMessage<<" Called Big Dirac Op "<<norm2(tmp)<<std::endl;
LittleDiracOp.M(c_src,c_res);
std::cout<<GridLogMessage<<" Called Little Dirac Op c_src "<< norm2(c_src) << " c_res "<< norm2(c_res) <<std::endl;
std::cout<<GridLogMessage<<"Little dop : "<<norm2(c_res)<<std::endl;
// std::cout<<GridLogMessage<<" Little "<< c_res<<std::endl;
std::cout<<GridLogMessage<<"Big dop in subspace : "<<norm2(c_proj)<<std::endl;
// std::cout<<GridLogMessage<<" Big "<< c_proj<<std::endl;
c_proj = c_proj - c_res;
std::cout<<GridLogMessage<<" ldop error: "<<norm2(c_proj)<<std::endl;
// std::cout<<GridLogMessage<<" error "<< c_proj<<std::endl;
/**********
* Some solvers
**********
*/
///////////////////////////////////////
// Coarse grid solver test
///////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Coarse Grid Solve -- Level 3 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<CoarseVector> simple;
NonHermitianLinearOperator<LittleDiracOperator,CoarseVector> LinOpCoarse(LittleDiracOp);
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-4, 100, LinOpCoarse,simple,10,10);
PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-2, 100, LinOpCoarse,simple,30,30);
L2PGCR.Level(3);
c_res=Zero();
L2PGCR(c_src,c_res);
////////////////////////////////////////
// Fine grid smoother
////////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Fine Grid Smoother -- Level 2 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<LatticeFermionD> simple_fine;
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermionD> SmootherGCR(0.01,1,ShiftedLinOpDw,simple_fine,4,4);
SmootherGCR.Level(2);
LatticeFermionD f_src(FGrid);
LatticeFermionD f_res(FGrid);
f_src = one; // 1 in every element for vector 1.
f_res=Zero();
SmootherGCR(f_src,f_res);
typedef MGPreconditioner<vSpinColourVector, vTComplex,2*nbasis> TwoLevelMG;
TwoLevelMG TwoLevelPrecon(CombinedUV,
LinOpDw,
simple_fine,
SmootherGCR,
LinOpCoarse,
L2PGCR);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermion> L1PGCR(1.0e-8,1000,LinOpDw,TwoLevelPrecon,32,32);
L1PGCR.Level(1);
f_res=Zero();
L1PGCR(f_src,f_res);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
Grid_finalize();
return 0;
}

View File

@@ -0,0 +1,312 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_padded_cell.cc
Copyright (C) 2023
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidualNonHermitian.h>
#include <Grid/algorithms/iterative/BiCGSTAB.h>
using namespace std;
using namespace Grid;
template<class Fobj,class CComplex,int nbasis>
class MGPreconditioner : public LinearFunction< Lattice<Fobj> > {
public:
using LinearFunction<Lattice<Fobj> >::operator();
typedef Aggregation<Fobj,CComplex,nbasis> Aggregates;
typedef typename Aggregation<Fobj,CComplex,nbasis>::FineField FineField;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseMatrix CoarseMatrix;
typedef LinearOperatorBase<FineField> FineOperator;
typedef LinearFunction <FineField> FineSmoother;
typedef LinearOperatorBase<CoarseVector> CoarseOperator;
typedef LinearFunction <CoarseVector> CoarseSolver;
Aggregates & _Aggregates;
FineOperator & _FineOperator;
FineSmoother & _PreSmoother;
FineSmoother & _PostSmoother;
CoarseOperator & _CoarseOperator;
CoarseSolver & _CoarseSolve;
int level; void Level(int lv) {level = lv; };
MGPreconditioner(Aggregates &Agg,
FineOperator &Fine,
FineSmoother &PreSmoother,
FineSmoother &PostSmoother,
CoarseOperator &CoarseOperator_,
CoarseSolver &CoarseSolve_)
: _Aggregates(Agg),
_FineOperator(Fine),
_PreSmoother(PreSmoother),
_PostSmoother(PostSmoother),
_CoarseOperator(CoarseOperator_),
_CoarseSolve(CoarseSolve_),
level(1) { }
virtual void operator()(const FineField &in, FineField & out)
{
GridBase *CoarseGrid = _Aggregates.CoarseGrid;
// auto CoarseGrid = _CoarseOperator.Grid();
CoarseVector Csrc(CoarseGrid);
CoarseVector Csol(CoarseGrid);
FineField vec1(in.Grid());
FineField vec2(in.Grid());
std::cout<<GridLogMessage << "Calling PreSmoother " <<std::endl;
// std::cout<<GridLogMessage << "Calling PreSmoother input residual "<<norm2(in) <<std::endl;
double t;
// Fine Smoother
// out = in;
out = Zero();
t=-usecond();
_PreSmoother(in,out);
t+=usecond();
std::cout<<GridLogMessage << "PreSmoother took "<< t/1000.0<< "ms" <<std::endl;
// Update the residual
_FineOperator.Op(out,vec1); sub(vec1, in ,vec1);
// std::cout<<GridLogMessage <<"Residual-1 now " <<norm2(vec1)<<std::endl;
// Fine to Coarse
t=-usecond();
_Aggregates.ProjectToSubspace (Csrc,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Project to coarse took "<< t/1000.0<< "ms" <<std::endl;
// Coarse correction
t=-usecond();
Csol = Zero();
_CoarseSolve(Csrc,Csol);
//Csol=Zero();
t+=usecond();
std::cout<<GridLogMessage << "Coarse solve took "<< t/1000.0<< "ms" <<std::endl;
// Coarse to Fine
t=-usecond();
// _CoarseOperator.PromoteFromSubspace(_Aggregates,Csol,vec1);
_Aggregates.PromoteFromSubspace(Csol,vec1);
add(out,out,vec1);
t+=usecond();
std::cout<<GridLogMessage << "Promote to this level took "<< t/1000.0<< "ms" <<std::endl;
// Residual
_FineOperator.Op(out,vec1); sub(vec1 ,in , vec1);
// std::cout<<GridLogMessage <<"Residual-2 now " <<norm2(vec1)<<std::endl;
// Fine Smoother
t=-usecond();
// vec2=vec1;
vec2=Zero();
_PostSmoother(vec1,vec2);
t+=usecond();
std::cout<<GridLogMessage << "PostSmoother took "<< t/1000.0<< "ms" <<std::endl;
add( out,out,vec2);
std::cout<<GridLogMessage << "Done " <<std::endl;
}
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=16;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = UGrid;
GridRedBlackCartesian * FrbGrid = UrbGrid;
// Construct a coarsened grid
Coordinate clatt = GridDefaultLatt();
for(int d=0;d<clatt.size();d++){
clatt[d] = clatt[d]/2;
// clatt[d] = clatt[d]/4;
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
std::vector<int> seeds4({1,2,3,4});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse4d);CRNG.SeedFixedIntegers(cseeds);
LatticeFermion src(FGrid); random(RNG4,src);
LatticeFermion result(FGrid); result=Zero();
LatticeFermion ref(FGrid); ref=Zero();
LatticeFermion tmp(FGrid);
LatticeFermion err(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
std::string file("ckpoint_lat");
NerscIO::readConfiguration(Umu,header,file);
RealD csw =0.0;
RealD mass=-0.92;
WilsonCloverFermionD Dw(Umu,*UGrid,*UrbGrid,mass,csw,csw);
const int nbasis = 40;
const int cb = 0 ;
LatticeFermion prom(FGrid);
typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperator;
typedef LittleDiracOperator::CoarseVector CoarseVector;
NearestStencilGeometry4D geom(Coarse4d);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
// Warning: This routine calls Linop.Op, not LinOpo.HermOp
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
Subspace Aggregates(Coarse4d,FGrid,cb);
MdagMLinearOperator<WilsonCloverFermionD,LatticeFermion> MdagMOpDw(Dw);
NonHermitianLinearOperator<WilsonCloverFermionD,LatticeFermion> LinOpDw(Dw);
ShiftedNonHermitianLinearOperator<WilsonCloverFermionD,LatticeFermion> ShiftedLinOpDw(Dw,0.5);
// Aggregates.CreateSubspaceGCR(RNG4,
// LinOpDw,
// nbasis);
Aggregates.CreateSubspace(RNG4,MdagMOpDw,nbasis);
LittleDiracOperator LittleDiracOp(geom,FGrid,Coarse4d);
LittleDiracOp.CoarsenOperator(LinOpDw,Aggregates);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"Testing coarsened operator "<<std::endl;
CoarseVector c_src (Coarse4d);
CoarseVector c_res (Coarse4d);
CoarseVector c_proj(Coarse4d);
std::vector<LatticeFermion> subspace(nbasis,FGrid);
subspace=Aggregates.subspace;
Complex one(1.0);
c_src = one; // 1 in every element for vector 1.
blockPromote(c_src,err,subspace);
prom=Zero();
for(int b=0;b<nbasis;b++){
prom=prom+subspace[b];
}
err=err-prom;
std::cout<<GridLogMessage<<"Promoted back from subspace: err "<<norm2(err)<<std::endl;
std::cout<<GridLogMessage<<"c_src "<<norm2(c_src)<<std::endl;
std::cout<<GridLogMessage<<"prom "<<norm2(prom)<<std::endl;
LinOpDw.Op(prom,tmp);
blockProject(c_proj,tmp,subspace);
std::cout<<GridLogMessage<<" Called Big Dirac Op "<<norm2(tmp)<<std::endl;
LittleDiracOp.M(c_src,c_res);
std::cout<<GridLogMessage<<" Called Little Dirac Op c_src "<< norm2(c_src) << " c_res "<< norm2(c_res) <<std::endl;
std::cout<<GridLogMessage<<"Little dop : "<<norm2(c_res)<<std::endl;
// std::cout<<GridLogMessage<<" Little "<< c_res<<std::endl;
std::cout<<GridLogMessage<<"Big dop in subspace : "<<norm2(c_proj)<<std::endl;
// std::cout<<GridLogMessage<<" Big "<< c_proj<<std::endl;
c_proj = c_proj - c_res;
std::cout<<GridLogMessage<<" ldop error: "<<norm2(c_proj)<<std::endl;
// std::cout<<GridLogMessage<<" error "<< c_proj<<std::endl;
/**********
* Some solvers
**********
*/
///////////////////////////////////////
// Coarse grid solver test
///////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Coarse Grid Solve -- Level 3 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<CoarseVector> simple;
NonHermitianLinearOperator<LittleDiracOperator,CoarseVector> LinOpCoarse(LittleDiracOp);
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-4, 100, LinOpCoarse,simple,10,10);
PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-2, 100, LinOpCoarse,simple,30,30);
L2PGCR.Level(3);
c_res=Zero();
L2PGCR(c_src,c_res);
////////////////////////////////////////
// Fine grid smoother
////////////////////////////////////////
std::cout<<GridLogMessage<<"******************* "<<std::endl;
std::cout<<GridLogMessage<<" Fine Grid Smoother -- Level 2 "<<std::endl;
std::cout<<GridLogMessage<<"******************* "<<std::endl;
TrivialPrecon<LatticeFermionD> simple_fine;
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermionD> SmootherGCR(0.01,1,ShiftedLinOpDw,simple_fine,6,6);
SmootherGCR.Level(2);
LatticeFermionD f_src(FGrid);
LatticeFermionD f_res(FGrid);
f_src = one; // 1 in every element for vector 1.
f_res=Zero();
SmootherGCR(f_src,f_res);
typedef MGPreconditioner<vSpinColourVector, vTComplex,nbasis> TwoLevelMG;
TwoLevelMG TwoLevelPrecon(Aggregates,
LinOpDw,
simple_fine,
SmootherGCR,
LinOpCoarse,
L2PGCR);
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermion> L1PGCR(1.0e-8,1000,LinOpDw,TwoLevelPrecon,32,32);
L1PGCR.Level(1);
f_res=Zero();
L1PGCR(f_src,f_res);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
Grid_finalize();
return 0;
}

View File

@@ -177,7 +177,7 @@ int main (int argc, char ** argv)
GaugeLorentz diff = derivOrig - derivNew;
double n = norm2(diff);
std::cout << GridLogMessage << "Difference " << n << " (expect 0)" << std::endl;
assert(n<1e-10);
GRID_ASSERT(n<1e-10);
std::cout << GridLogMessage << "Timings orig: " << (t1-t0)/1000 << "ms, new: " << (t2-t1)/1000 << "ms" << std::endl;
torig += (t1-t0)/1000; tnew += (t2-t1)/1000;

View File

@@ -86,7 +86,7 @@ int main (int argc, char ** argv)
GaugeMat diff = staple_orig - staple_opt;
double n = norm2(diff);
std::cout << GridLogMessage << mu << " " << n << std::endl;
assert(n<1e-10);
GRID_ASSERT(n<1e-10);
}
std::cout << GridLogMessage << "RectStaple timings orig: " << torig/1000/count << "ms, optimized: " << topt/1000/count << "ms" << std::endl;

View File

@@ -106,7 +106,7 @@ int main (int argc, char ** argv)
peekLocalSite(g,Ug_v,gcoor);
peekLocalSite(l,Ul_v,lcoor);
g=g-l;
assert(norm2(g)==0);
GRID_ASSERT(norm2(g)==0);
diff = diff + norm2(g);
n = n + norm2(l);
}}}}
@@ -198,6 +198,6 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << " Average plaquette via padded cell "<<result<<std::endl;
std::cout << GridLogMessage << " Diff "<<result-plaq<<std::endl;
assert(fabs(result-plaq)<1.0e-8);
GRID_ASSERT(fabs(result-plaq)<1.0e-8);
Grid_finalize();
}

View File

@@ -572,7 +572,7 @@ int main (int argc, char ** argv)
GaugeMat diff = staple_orig - staple_padded;
double n = norm2(diff);
std::cout << GridLogMessage << mu << " " << n << std::endl;
assert(n<1e-10);
GRID_ASSERT(n<1e-10);
}
std::cout << GridLogMessage << "RectStaple timings orig: " << torig/1000/count << "ms, padded: " << tpadded/1000/count << "ms" << std::endl;

View File

@@ -0,0 +1,14 @@
#pragma once
#ifndef BUILD_FERMION_INSTANTIATIONS
#include <iostream>
int main(void) {
std::cout << "This build of Grid was configured to exclude fermion instantiations, "
<< "which this test relies on. "
<< "Please reconfigure and rebuild Grid with --enable-fermion-instantiations"
<< "to run this test."
<< std::endl;
return 1;
}
#endif

View File

@@ -179,7 +179,7 @@ void ForceTest(Action<LatticeGaugeField> &action,LatticeGaugeField & U,MomentumF
std::cout<< GridLogMessage << "dSpred : "<< dSpred.real() <<std::endl;
std::cout<< GridLogMessage << "diff : "<< diff<<std::endl;
std::cout<< GridLogMessage << "*********************************************************"<<std::endl;
// assert(diff<1.0);
// GRID_ASSERT(diff<1.0);
std::cout<< GridLogMessage << "Done" <<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
}

View File

@@ -146,7 +146,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << " Sprime "<<Sprime<<std::endl;
std::cout << GridLogMessage << "dS "<<Sprime-S<<std::endl;
std::cout << GridLogMessage << "predict dS "<< dSpred <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 1.0e-2 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0e-2 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();

View File

@@ -376,7 +376,7 @@ void ForceTest(Action<LatticeGaugeField> &action,LatticeGaugeField & U,MomentumF
std::cout<< GridLogMessage << "dSpred : "<< dSpred.real() <<std::endl;
std::cout<< GridLogMessage << "diff : "<< diff<<std::endl;
std::cout<< GridLogMessage << "*********************************************************"<<std::endl;
// assert(diff<1.0);
// GRID_ASSERT(diff<1.0);
std::cout<< GridLogMessage << "Done" <<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
}

View File

@@ -150,7 +150,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "dS "<<Sprime-S<<std::endl;
std::cout << GridLogMessage << "predict dS "<< dSpred <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();

View File

@@ -166,7 +166,7 @@ int main (int argc, char** argv)
printf("real(dS_predict) = %1.15e\n", dSpred.real());
printf("imag(dS_predict) = %1.15e\n\n", dSpred.imag());
assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
std::cout << GridLogMessage << "Done" << std::endl;
Grid_finalize();

View File

@@ -241,7 +241,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "dS - dt^2 term "<< Hmomprime - Hmom + Sprime - S - dSm2 <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 5.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 5.0 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();

View File

@@ -170,7 +170,7 @@ int main (int argc, char** argv)
printf("real(dS_predict) = %1.15e\n", dSpred.real());
printf("imag(dS_predict) = %1.15e\n\n", dSpred.imag());
assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
std::cout << GridLogMessage << "Done" << std::endl;
Grid_finalize();

View File

@@ -62,6 +62,7 @@ void ForceTest(Action<LatticeGaugeField> &action,ConfigurationBase<LatticeGaugeF
Gimpl::generate_momenta(P,sRNG,RNG4);
// Filter.applyFilter(P);
std::cout << GridLogMessage << "Initial momenta " << norm2(P) << std::endl;
action.refresh(smU,sRNG,RNG4);
@@ -70,6 +71,8 @@ void ForceTest(Action<LatticeGaugeField> &action,ConfigurationBase<LatticeGaugeF
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S1 = action.S(smU);
std::cout << GridLogMessage << "Initial action " << S1 << std::endl;
Gimpl::update_field(P,U,eps);
smU.set_Field(U);
@@ -80,6 +83,7 @@ void ForceTest(Action<LatticeGaugeField> &action,ConfigurationBase<LatticeGaugeF
action.deriv(smU,UdSdU);
UdSdU = Ta(UdSdU);
// Filter.applyFilter(UdSdU);
std::cout << GridLogMessage << "Derivative " << norm2(UdSdU) << std::endl;
DumpSliceNorm("Force",UdSdU,Nd-1);
@@ -91,6 +95,7 @@ void ForceTest(Action<LatticeGaugeField> &action,ConfigurationBase<LatticeGaugeF
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S2 = action.S(smU);
std::cout << GridLogMessage << "Final action " << S1 << std::endl;
// Use the derivative
LatticeComplex dS(UGrid); dS = Zero();
@@ -109,7 +114,7 @@ void ForceTest(Action<LatticeGaugeField> &action,ConfigurationBase<LatticeGaugeF
std::cout<< GridLogMessage << "dSpred : "<< dSpred.real() <<std::endl;
std::cout<< GridLogMessage << "diff : "<< diff<<std::endl;
std::cout<< GridLogMessage << "*********************************************************"<<std::endl;
// assert(diff<1.0);
// GRID_ASSERT(diff<1.0);
std::cout<< GridLogMessage << "Done" <<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
}
@@ -145,6 +150,8 @@ int main (int argc, char ** argv)
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds);
SU<Nc>::HotConfiguration(RNG4,U);
#endif
std::cout << GridLogMessage << "Initial plaquette: " << WilsonLoops<PeriodicGimplR>::avgPlaquette(U) << std::endl;
WilsonGaugeActionR PlaqAction(6.0);

View File

@@ -120,7 +120,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << " Sprime "<<Sprime<<std::endl;
std::cout << GridLogMessage << "dS "<<Sprime-S<<std::endl;
std::cout << GridLogMessage << "pred dS "<< dSpred <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 1.0e-2 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0e-2 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();
}

View File

@@ -125,7 +125,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << " Sprime "<<Sprime<<std::endl;
std::cout << GridLogMessage << "dS "<<Sprime-S<<std::endl;
std::cout << GridLogMessage << "pred dS "<< dSpred <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 1.0e-1 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0e-1 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();
}

View File

@@ -202,7 +202,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << " Sprime "<<Sprime<<std::endl;
std::cout << GridLogMessage << "dS "<<Sprime-S<<std::endl;
std::cout << GridLogMessage << "predict dS "<< dSpred <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();
#endif

View File

@@ -43,7 +43,7 @@ void copyConjGauge(LatticeGaugeFieldD &Umu_1f, const LatticeGaugeFieldD &Umu_2f,
int L_2f = UGrid_2f->FullDimensions()[nu];
int L_1f = UGrid_1f->FullDimensions()[nu];
assert(L_1f == 2 * L_2f);
GRID_ASSERT(L_1f == 2 * L_2f);
//Coordinate grid for reference
LatticeInteger xcoor_1f(UGrid_1f);
@@ -73,7 +73,7 @@ void convertFermion1f_from_2f(FermionField1f &out_1f, const FermionField2f &in_2
Integer L_2f = FGrid_2f->FullDimensions()[nu+nuoff];
Integer L_1f = FGrid_1f->FullDimensions()[nu+nuoff];
assert(L_1f == 2 * L_2f);
GRID_ASSERT(L_1f == 2 * L_2f);
auto in_f0_2fgrid = PeekIndex<GparityFlavourIndex>(in_2f,0); //flavor 0 on 2f Grid
FermionField1f in_f0_1fgrid(FGrid_1f);
@@ -442,7 +442,7 @@ int main (int argc, char ** argv)
}else if(action == "DSDR"){
runTest<GparityWilsonTMFermionD, WilsonTMFermionD>(argc,argv);
}else{
assert(0);
GRID_ASSERT(0);
}
}

View File

@@ -152,7 +152,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "dS "<<Sprime-S<<std::endl;
std::cout << GridLogMessage << "predict dS "<< dSpred <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 2.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 2.0 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();

View File

@@ -166,7 +166,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "dS "<<Sprime-S<<std::endl;
std::cout << GridLogMessage << "pred dS "<< dSpred <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();

View File

@@ -230,7 +230,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "dS - dt^2 term "<< Hmomprime - Hmom + Sprime - S - dSm2 <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;

View File

@@ -111,8 +111,8 @@ int main (int argc, char** argv)
std::cout << "(phi, Mphi) - (eta,eta): " << test << " expect 0" << std::endl;
assert(test.real() < 1e-8);
assert(test.imag() < 1e-8);
GRID_ASSERT(test.real() < 1e-8);
GRID_ASSERT(test.imag() < 1e-8);
//Another test is to use heatbath twice to apply M^{-1/2} to Phi then apply M
// M Phi'
@@ -126,7 +126,7 @@ int main (int argc, char** argv)
test2 = test2 - eta;
RealD test2_norm = norm2(test2);
std::cout << "|M M^{-1/2} M^{-1/2} eta - eta|^2 = " << test2_norm << " expect 0" << std::endl;
assert( test2_norm < 1e-8 );
GRID_ASSERT( test2_norm < 1e-8 );
}
@@ -209,7 +209,7 @@ int main (int argc, char** argv)
printf("real(dS_predict) = %1.15e\n", dSpred.real());
printf("imag(dS_predict) = %1.15e\n\n", dSpred.imag());
assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
std::cout << GridLogMessage << "Done" << std::endl;
Grid_finalize();

View File

@@ -98,7 +98,7 @@ NAMESPACE_BEGIN(Grid);
std::cout << GridLogMessage << " Mixed precision CG wrapper operator() "<<std::endl;
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
GRID_ASSERT(&(SchurOpU->_Mat)==&(LinOpD._Mat));
precisionChange(FermOpF.Umu, FermOpD.Umu);
@@ -210,14 +210,14 @@ int main (int argc, char** argv)
std::cout << GridLogMessage << "Phi(double)=" << norm2(MeofaD.getPhi()) << " Phi(mixed)=" << norm2(MeofaMx.getPhi()) << " diff=" << n << std::endl;
assert(n < 1e-8);
GRID_ASSERT(n < 1e-8);
RealD Sd = MeofaD.S(Ud);
RealD Smx = MeofaMx.S(Ud);
std::cout << GridLogMessage << "Initial action double=" << Sd << " mixed=" << Smx << " diff=" << Sd-Smx << std::endl;
assert(fabs(Sd-Smx) < 1e-6);
GRID_ASSERT(fabs(Sd-Smx) < 1e-6);
SU<Nc>::HotConfiguration(RNG4,Ud);
precisionChange(Uf, Ud);
@@ -227,7 +227,7 @@ int main (int argc, char** argv)
std::cout << GridLogMessage << "After randomizing U, action double=" << Sd << " mixed=" << Smx << " diff=" << Sd-Smx << std::endl;
assert(fabs(Sd-Smx) < 1e-6);
GRID_ASSERT(fabs(Sd-Smx) < 1e-6);
std::cout << GridLogMessage << "Done" << std::endl;
Grid_finalize();

View File

@@ -167,7 +167,7 @@ int main (int argc, char** argv)
printf("real(dS_predict) = %1.15e\n", dSpred.real());
printf("imag(dS_predict) = %1.15e\n\n", dSpred.imag());
assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
std::cout << GridLogMessage << "Done" << std::endl;
Grid_finalize();

View File

@@ -149,7 +149,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "dS "<<Sprime-S<<std::endl;
std::cout << GridLogMessage << "predict dS "<< dSpred <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 1.0e-2 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0e-2 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();

View File

@@ -123,7 +123,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "dS "<<Sprime-S<<std::endl;
std::cout << GridLogMessage << "pred dS "<< dSpred <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 1.0e-2 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0e-2 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();

View File

@@ -205,7 +205,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "Total dS "<< Hmomprime - Hmom + Sprime - S <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 1.0 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();

View File

@@ -188,7 +188,7 @@ int main(int argc, char **argv)
std::cout << GridLogMessage << "Total dS " << Hmomprime - Hmom + Sprime - S << std::endl;
assert(fabs(real(Sprime - S - dSpred)) < 1.0);
GRID_ASSERT(fabs(real(Sprime - S - dSpred)) < 1.0);
std::cout << GridLogMessage << "Done" << std::endl;
Grid_finalize();

View File

@@ -163,7 +163,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "dS "<<Sprime-S<<std::endl;
std::cout << GridLogMessage << "predict dS "<< dSpred <<std::endl;
assert( fabs(real(Sprime-S-dSpred)) < 3.0 ) ;
GRID_ASSERT( fabs(real(Sprime-S-dSpred)) < 3.0 ) ;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();

View File

@@ -54,7 +54,7 @@ void copy2fTo1fFermionField(FermionField1f &out, const FermionField2f &in, int g
std::cout << "dim_2f " << dim_2f << std::endl;
std::cout << "dim_1f " << dim_1f << std::endl;
assert(dim_1f[gpdir] == 2*dim_2f[gpdir]);
GRID_ASSERT(dim_1f[gpdir] == 2*dim_2f[gpdir]);
LatticeInteger xcoor_1f(out.Grid()); //5d lattice integer
LatticeCoordinate(xcoor_1f,gpdir);

View File

@@ -139,7 +139,7 @@ int main (int argc, char ** argv)
// for(int n=0;n<poles.size();n++){
// a = a + residues[n]/(x+poles[n]);
// }
assert(Sqrt.order==degree);
GRID_ASSERT(Sqrt.order==degree);
combined = Sqrt.norm*src;
for(int i=0;i<degree;i++){

View File

@@ -110,10 +110,10 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "x^(1/4) : "<<ssx<<" "<<assx<<std::endl;
std::cout<<GridLogMessage << "x^(-1/2): "<<isx<<" "<<aisx<<std::endl;
std::cout<<GridLogMessage << "x^(-1/4): "<<issx<<" "<<aissx<<std::endl;
assert(fabs(sx-asx)<1.0e-6);
assert(fabs(ssx-assx)<1.0e-6);
assert(fabs(isx-aisx)<1.0e-6);
assert(fabs(issx-aissx)<1.0e-6);
GRID_ASSERT(fabs(sx-asx)<1.0e-6);
GRID_ASSERT(fabs(ssx-assx)<1.0e-6);
GRID_ASSERT(fabs(isx-aisx)<1.0e-6);
GRID_ASSERT(fabs(issx-aissx)<1.0e-6);
Grid_finalize();
}

View File

@@ -57,7 +57,7 @@ public:
void checkpointFine(std::string evecs_file,std::string evals_file)
{
assert(this->subspace.size()==nbasis);
GRID_ASSERT(this->subspace.size()==nbasis);
emptyUserRecord record;
Grid::ScidacWriter WR(this->_FineGrid->IsBoss());
WR.open(evecs_file);
@@ -79,7 +79,7 @@ public:
XmlReader RDx(evals_file);
read(RDx,"evals",this->evals_fine);
assert(this->evals_fine.size()==nbasis);
GRID_ASSERT(this->evals_fine.size()==nbasis);
std::cout << GridLogIRL<< "checkpointFineRestore: Reading evecs from "<<evecs_file<<std::endl;
emptyUserRecord record;
@@ -117,7 +117,7 @@ public:
XmlReader RDx(evals_file);
read(RDx,"evals",this->evals_coarse);
assert(this->evals_coarse.size()==nvec);
GRID_ASSERT(this->evals_coarse.size()==nvec);
emptyUserRecord record;
std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evecs from "<<evecs_file<<std::endl;
Grid::ScidacReader RD ;
@@ -163,18 +163,18 @@ int main (int argc, char ** argv) {
auto fineLatt = GridDefaultLatt();
int dims=fineLatt.size();
assert(blockSize.size()==dims+1);
GRID_ASSERT(blockSize.size()==dims+1);
Coordinate coarseLatt(dims);
for (int d=0;d<coarseLatt.size();d++){
coarseLatt[d] = fineLatt[d]/blockSize[d]; assert(coarseLatt[d]*blockSize[d]==fineLatt[d]);
coarseLatt[d] = fineLatt[d]/blockSize[d]; GRID_ASSERT(coarseLatt[d]*blockSize[d]==fineLatt[d]);
}
std::cout << GridLogMessage<< " 5d coarse lattice is ";
for (int i=0;i<coarseLatt.size();i++){
std::cout << coarseLatt[i]<<"x";
}
int cLs = Ls/blockSize[dims]; assert(cLs*blockSize[dims]==Ls);
int cLs = Ls/blockSize[dims]; GRID_ASSERT(cLs*blockSize[dims]==Ls);
std::cout << cLs<<std::endl;
GridCartesian * CoarseGrid4 = SpaceTimeGrid::makeFourDimGrid(coarseLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
@@ -201,14 +201,14 @@ int main (int argc, char ** argv) {
std::cout << GridLogMessage << "Keep " << fine.Nstop << " fine vectors" << std::endl;
std::cout << GridLogMessage << "Keep " << coarse.Nstop << " coarse vectors" << std::endl;
assert(Nm2 >= Nm1);
GRID_ASSERT(Nm2 >= Nm1);
const int nbasis= 60;
assert(nbasis==Ns1);
GRID_ASSERT(nbasis==Ns1);
LocalCoherenceLanczosScidac<vSpinColourVector,vTComplex,nbasis> _LocalCoherenceLanczos(FrbGrid,CoarseGrid5,HermOp,Odd);
std::cout << GridLogMessage << "Constructed LocalCoherenceLanczos" << std::endl;
assert( (Params.doFine)||(Params.doFineRead));
GRID_ASSERT( (Params.doFine)||(Params.doFineRead));
if ( Params.doFine ) {
std::cout << GridLogMessage << "Performing fine grid IRL Nstop "<< Ns1 << " Nk "<<Nk1<<" Nm "<<Nm1<< std::endl;

View File

@@ -100,7 +100,7 @@ public:
void checkpointFine(std::string evecs_file,std::string evals_file)
{
assert(this->subspace.size()==nbasis);
GRID_ASSERT(this->subspace.size()==nbasis);
emptyUserRecord record;
Grid::ScidacWriter WR(this->_FineGrid->IsBoss());
WR.open(evecs_file);
@@ -122,7 +122,7 @@ public:
XmlReader RDx(evals_file);
read(RDx,"evals",this->evals_fine);
if(this->evals_fine.size() < nbasis) assert(0 && "Not enough fine evals to complete basis");
if(this->evals_fine.size() < nbasis) GRID_ASSERT(0 && "Not enough fine evals to complete basis");
if(this->evals_fine.size() > nbasis){ //allow the use of precomputed evecs with a larger #evecs
std::cout << GridLogMessage << "Truncating " << this->evals_fine.size() << " evals to basis size " << nbasis << std::endl;
this->evals_fine.resize(nbasis);
@@ -164,7 +164,7 @@ public:
XmlReader RDx(evals_file);
read(RDx,"evals",this->evals_coarse);
assert(this->evals_coarse.size()==nvec);
GRID_ASSERT(this->evals_coarse.size()==nvec);
emptyUserRecord record;
std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evecs from "<<evecs_file<<std::endl;
Grid::ScidacReader RD ;
@@ -252,7 +252,7 @@ void runTest(const Options &opt){
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(opt.Ls,UGrid);
//Setup G-parity BCs
assert(Nd == 4);
GRID_ASSERT(Nd == 4);
std::vector<int> dirs4(4);
for(int i=0;i<3;i++) dirs4[i] = opt.GparityDirs[i];
dirs4[3] = 0; //periodic gauge BC in time
@@ -273,14 +273,14 @@ void runTest(const Options &opt){
auto fineLatt = GridDefaultLatt();
Coordinate coarseLatt(4);
for (int d=0;d<4;d++){
coarseLatt[d] = fineLatt[d]/opt.blockSize[d]; assert(coarseLatt[d]*opt.blockSize[d]==fineLatt[d]);
coarseLatt[d] = fineLatt[d]/opt.blockSize[d]; GRID_ASSERT(coarseLatt[d]*opt.blockSize[d]==fineLatt[d]);
}
std::cout << GridLogMessage<< " 5d coarse lattice is ";
for (int i=0;i<4;i++){
std::cout << coarseLatt[i]<<"x";
}
int cLs = opt.Ls/opt.blockSize[4]; assert(cLs*opt.blockSize[4]==opt.Ls);
int cLs = opt.Ls/opt.blockSize[4]; GRID_ASSERT(cLs*opt.blockSize[4]==opt.Ls);
std::cout << cLs<<std::endl;
GridCartesian * CoarseGrid4 = SpaceTimeGrid::makeFourDimGrid(coarseLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
@@ -304,9 +304,9 @@ void runTest(const Options &opt){
std::cout << GridLogMessage << "Keep " << fine.N_true_get << " fine vectors" << std::endl;
std::cout << GridLogMessage << "Keep " << coarse.N_true_get << " coarse vectors" << std::endl;
assert(coarse.N_true_get >= fine.N_true_get);
GRID_ASSERT(coarse.N_true_get >= fine.N_true_get);
assert(nbasis<=fine.N_true_get);
GRID_ASSERT(nbasis<=fine.N_true_get);
LocalCoherenceLanczosScidac<SiteSpinor,vTComplex,nbasis> _LocalCoherenceLanczos(FrbGrid,CoarseGrid5,SchurOp,Odd);
std::cout << GridLogMessage << "Constructed LocalCoherenceLanczos" << std::endl;
@@ -411,7 +411,7 @@ int main (int argc, char ** argv)
}
opt.config = argv[1];
GridCmdOptionIntVector(argv[2], opt.GparityDirs);
assert(opt.GparityDirs.size() == 3);
GRID_ASSERT(opt.GparityDirs.size() == 3);
for(int i=3;i<argc;i++){
std::string sarg = argv[i];
@@ -423,7 +423,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "Set quark mass to " << opt.mass << std::endl;
}else if(sarg == "--block"){
GridCmdOptionIntVector(argv[i+1], opt.blockSize);
assert(opt.blockSize.size() == 5);
GRID_ASSERT(opt.blockSize.size() == 5);
std::cout << GridLogMessage << "Set block size to ";
for(int q=0;q<5;q++) std::cout << opt.blockSize[q] << " ";
std::cout << std::endl;
@@ -480,7 +480,7 @@ int main (int argc, char ** argv)
runTest<350>(opt); break;
default:
std::cout << GridLogMessage << "Unsupported basis size " << basis_size << std::endl;
assert(0);
GRID_ASSERT(0);
}
Grid_finalize();

View File

@@ -392,7 +392,7 @@ int main(int argc, char** argv) {
}
}
FILE *fp = fopen("lego-plot.py","w"); assert(fp!=NULL);
FILE *fp = fopen("lego-plot.py","w"); GRID_ASSERT(fp!=NULL);
#define PYTHON_LINE(A) fprintf(fp,A"\n");
PYTHON_LINE("import matplotlib.pyplot as plt");
PYTHON_LINE("import numpy as np");

View File

@@ -95,13 +95,13 @@ void CmdJobParams::Parse(char **argv,int argc)
if( GridCmdOptionExists(argv,argv+argc,"--phase") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--phase");
pfile.open(arg);
assert(pfile);
GRID_ASSERT(pfile);
expect = 0;
while( pfile >> vstr ) {
if ( vstr.compare("boundary_phase") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(expect==idx);
GRID_ASSERT(expect==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
@@ -118,13 +118,13 @@ void CmdJobParams::Parse(char **argv,int argc)
if( GridCmdOptionExists(argv,argv+argc,"--omega") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--omega");
pfile.open(arg);
assert(pfile);
GRID_ASSERT(pfile);
Ls = 0;
while( pfile >> vstr ) {
if ( vstr.compare("omega") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(Ls==idx);
GRID_ASSERT(Ls==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
@@ -324,7 +324,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "mpi_layout= " << mpi_layout << std::endl;
std::cout << GridLogMessage << "mpi_split= " << mpi_split << std::endl;
std::cout << GridLogMessage << "mrhs= " << mrhs << std::endl;
// assert(JP.Nu==tmp);
// GRID_ASSERT(JP.Nu==tmp);
/////////////////////////////////////////////
// Split into 1^4 mpi communicators, keeping it explicitly single

View File

@@ -57,7 +57,7 @@ public:
void checkpointFine(std::string evecs_file,std::string evals_file)
{
assert(this->subspace.size()==nbasis);
GRID_ASSERT(this->subspace.size()==nbasis);
emptyUserRecord record;
Grid::ScidacWriter WR(this->_FineGrid->IsBoss());
WR.open(evecs_file);
@@ -79,7 +79,7 @@ public:
XmlReader RDx(evals_file);
read(RDx,"evals",this->evals_fine);
assert(this->evals_fine.size()==nbasis);
GRID_ASSERT(this->evals_fine.size()==nbasis);
std::cout << GridLogIRL<< "checkpointFineRestore: Reading evecs from "<<evecs_file<<std::endl;
emptyUserRecord record;
@@ -116,7 +116,7 @@ public:
XmlReader RDx(evals_file);
read(RDx,"evals",this->evals_coarse);
assert(this->evals_coarse.size()==nvec);
GRID_ASSERT(this->evals_coarse.size()==nvec);
emptyUserRecord record;
std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evecs from "<<evecs_file<<std::endl;
Grid::ScidacReader RD ;
@@ -162,19 +162,19 @@ int main (int argc, char ** argv) {
Coordinate fineLatt = GridDefaultLatt();
int dims=fineLatt.size();
assert(blockSize.size()==dims+1);
GRID_ASSERT(blockSize.size()==dims+1);
Coordinate coarseLatt(dims);
Coordinate coarseLatt5d ;
for (int d=0;d<coarseLatt.size();d++){
coarseLatt[d] = fineLatt[d]/blockSize[d]; assert(coarseLatt[d]*blockSize[d]==fineLatt[d]);
coarseLatt[d] = fineLatt[d]/blockSize[d]; GRID_ASSERT(coarseLatt[d]*blockSize[d]==fineLatt[d]);
}
std::cout << GridLogMessage<< " 5d coarse lattice is ";
for (int i=0;i<coarseLatt.size();i++){
std::cout << coarseLatt[i]<<"x";
}
int cLs = Ls/blockSize[dims]; assert(cLs*blockSize[dims]==Ls);
int cLs = Ls/blockSize[dims]; GRID_ASSERT(cLs*blockSize[dims]==Ls);
std::cout << cLs<<std::endl;
GridCartesian * CoarseGrid4 = SpaceTimeGrid::makeFourDimGrid(coarseLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
@@ -201,14 +201,14 @@ int main (int argc, char ** argv) {
std::cout << GridLogMessage << "Keep " << fine.Nstop << " fine vectors" << std::endl;
std::cout << GridLogMessage << "Keep " << coarse.Nstop << " coarse vectors" << std::endl;
assert(Nm2 >= Nm1);
GRID_ASSERT(Nm2 >= Nm1);
const int nbasis= 60;
assert(nbasis==Ns1);
GRID_ASSERT(nbasis==Ns1);
LocalCoherenceLanczosScidac<vSpinColourVector,vTComplex,nbasis> _LocalCoherenceLanczos(FrbGrid,CoarseGrid5,HermOp,Odd);
std::cout << GridLogMessage << "Constructed LocalCoherenceLanczos" << std::endl;
assert( (Params.doFine)||(Params.doFineRead));
GRID_ASSERT( (Params.doFine)||(Params.doFineRead));
if ( Params.doFine ) {
std::cout << GridLogMessage << "Performing fine grid IRL Nstop "<< Ns1 << " Nk "<<Nk1<<" Nm "<<Nm1<< std::endl;

View File

@@ -159,7 +159,7 @@ public:
void calcFine(RealD alpha, RealD beta,int Npoly,int Nm,RealD resid,
RealD MaxIt, RealD betastp, int MinRes)
{
assert(nbasis<=Nm);
GRID_ASSERT(nbasis<=Nm);
Chebyshev<FineField> Cheby(alpha,beta,Npoly);
FunctionHermOp<FineField> ChebyOp(Cheby,_FineOp);
PlainHermOp<FineField> Op(_FineOp);
@@ -269,19 +269,19 @@ int main (int argc, char ** argv) {
Coordinate fineLatt = GridDefaultLatt();
int dims=fineLatt.size();
assert(blockSize.size()==dims+1);
GRID_ASSERT(blockSize.size()==dims+1);
Coordinate coarseLatt(dims);
Coordinate coarseLatt5d ;
for (int d=0;d<coarseLatt.size();d++){
coarseLatt[d] = fineLatt[d]/blockSize[d]; assert(coarseLatt[d]*blockSize[d]==fineLatt[d]);
coarseLatt[d] = fineLatt[d]/blockSize[d]; GRID_ASSERT(coarseLatt[d]*blockSize[d]==fineLatt[d]);
}
std::cout << GridLogMessage<< " 5d coarse lattice is ";
for (int i=0;i<coarseLatt.size();i++){
std::cout << coarseLatt[i]<<"x";
}
int cLs = Ls/blockSize[dims]; assert(cLs*blockSize[dims]==Ls);
int cLs = Ls/blockSize[dims]; GRID_ASSERT(cLs*blockSize[dims]==Ls);
std::cout << cLs<<std::endl;
GridCartesian * CoarseGrid4 = SpaceTimeGrid::makeFourDimGrid(coarseLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
@@ -312,7 +312,7 @@ int main (int argc, char ** argv) {
std::cout << GridLogMessage << "Keep " << fine.Nk << " full vectors" << std::endl;
std::cout << GridLogMessage << "Keep " << coarse.Nk << " total vectors" << std::endl;
assert(Nm2 >= Nm1);
GRID_ASSERT(Nm2 >= Nm1);
const int nbasis= 32;
CoarseFineIRL<vSpinColourVector,vTComplex,nbasis> IRL(FrbGrid,CoarseGrid5rb,HermOp,Odd);

View File

@@ -96,7 +96,7 @@ public:
GridBase *FineGrid,
GridBase *CoarseGrid){
int nevecs = evecs_in.size();
assert(nevecs > nbasis);
GRID_ASSERT(nevecs > nbasis);
//Construct the basis
basis.resize(nbasis, FineGrid);
@@ -273,7 +273,7 @@ struct Args{
GparityWilsonImplD::ImplParams setupGparityParams(const std::vector<int> &GparityDirs){
//Setup G-parity BCs
assert(Nd == 4);
GRID_ASSERT(Nd == 4);
std::vector<int> dirs4(4);
for(int i=0;i<3;i++) dirs4[i] = GparityDirs[i];
dirs4[3] = 0; //periodic gauge BC in time
@@ -309,14 +309,14 @@ void run_b(ActionType &action, const std::string &config, const Args &args){
auto fineLatt = GridDefaultLatt();
Coordinate coarseLatt(4);
for (int d=0;d<4;d++){
coarseLatt[d] = fineLatt[d]/args.blockSize[d]; assert(coarseLatt[d]*args.blockSize[d]==fineLatt[d]);
coarseLatt[d] = fineLatt[d]/args.blockSize[d]; GRID_ASSERT(coarseLatt[d]*args.blockSize[d]==fineLatt[d]);
}
std::cout << GridLogMessage<< " 5d coarse lattice is ";
for (int i=0;i<4;i++){
std::cout << coarseLatt[i]<<"x";
}
int cLs = args.Ls/args.blockSize[4]; assert(cLs*args.blockSize[4]==args.Ls);
int cLs = args.Ls/args.blockSize[4]; GRID_ASSERT(cLs*args.blockSize[4]==args.Ls);
std::cout << cLs<<std::endl;
GridCartesian * CoarseGrid4 = SpaceTimeGrid::makeFourDimGrid(coarseLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
@@ -350,7 +350,7 @@ void run_b(ActionType &action, const std::string &config, const Args &args){
XmlReader RDx(evals_file);
read(RDx,"evals",evals);
assert(evals.size()==fine.N_true_get);
GRID_ASSERT(evals.size()==fine.N_true_get);
std::cout << GridLogIRL<< "Reading evecs from "<<evecs_file<<std::endl;
emptyUserRecord record;
@@ -369,7 +369,7 @@ void run_b(ActionType &action, const std::string &config, const Args &args){
RealD resid = fine.stop_rsd;
int MaxIt = fine.maxits;
assert(nbasis<=Nm);
GRID_ASSERT(nbasis<=Nm);
Chebyshev<FermionField> Cheby(fine.getChebyParams());
FunctionHermOp<FermionField> ChebyOp(Cheby,SchurOp);
PlainHermOp<FermionField> Op(SchurOp);
@@ -386,7 +386,7 @@ void run_b(ActionType &action, const std::string &config, const Args &args){
int Nconv;
IRL.calc(evals, evecs,src,Nconv,false);
if(Nconv < Nstop) assert(0 && "Fine lanczos failed to converge the required number of evecs"); //algorithm doesn't consider this a failure
if(Nconv < Nstop) GRID_ASSERT(0 && "Fine lanczos failed to converge the required number of evecs"); //algorithm doesn't consider this a failure
if(Nconv > Nstop){
//Yes this potentially throws away some evecs but it is better than having a random number of evecs between Nstop and Nm!
evals.resize(Nstop);
@@ -430,7 +430,7 @@ void run_b(ActionType &action, const std::string &config, const Args &args){
Chebyshev<FermionField> smoother(fine.getChebyParams());
//Test the quality of the uncompressed evecs
assert( compressor.testCompression(SchurOp, smoother, basis, compressed_evecs, evals, fine.stop_rsd, args.coarse_relax_tol) );
GRID_ASSERT( compressor.testCompression(SchurOp, smoother, basis, compressed_evecs, evals, fine.stop_rsd, args.coarse_relax_tol) );
}
template<typename ActionType>
@@ -453,7 +453,7 @@ void run(ActionType &action, const std::string &config, const Args &args){
case 400:
return run_b<400>(action,config,args);
default:
assert(0 && "Unsupported basis size: allowed values are 50,100,200,250,300,350,400");
GRID_ASSERT(0 && "Unsupported basis size: allowed values are 50,100,200,250,300,350,400");
}
}
@@ -489,7 +489,7 @@ int main (int argc, char ** argv) {
Args args;
GridCmdOptionIntVector(argv[2], args.GparityDirs);
assert(args.GparityDirs.size() == 3);
GRID_ASSERT(args.GparityDirs.size() == 3);
std::string action_s = "Mobius";
@@ -503,7 +503,7 @@ int main (int argc, char ** argv) {
std::cout << GridLogMessage << "Set quark mass to " << args.mass << std::endl;
}else if(sarg == "--block"){
GridCmdOptionIntVector(argv[i+1], args.blockSize);
assert(args.blockSize.size() == 5);
GRID_ASSERT(args.blockSize.size() == 5);
std::cout << GridLogMessage << "Set block size to ";
for(int q=0;q<5;q++) std::cout << args.blockSize[q] << " ";
std::cout << std::endl;
@@ -567,7 +567,7 @@ int main (int argc, char ** argv) {
run(action, config, args);
}
#else
assert(0);
GRID_ASSERT(0);
#endif
}else{
WilsonImplD::ImplParams Params = setupParams();

View File

@@ -71,23 +71,23 @@ public:
// Support for coarsening to a multigrid
void OpDiag (const Field &in, Field &out) {
assert(0);
GRID_ASSERT(0);
_Mat.Mdiag(in,out);
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
assert(0);
GRID_ASSERT(0);
_Mat.Mdir(in,out,dir,disp);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
assert(0);
GRID_ASSERT(0);
_Mat.MdirAll(in,out);
};
void Op (const Field &in, Field &out){
assert(0);
GRID_ASSERT(0);
_Mat.M(in,out);
}
void AdjOp (const Field &in, Field &out){
assert(0);
GRID_ASSERT(0);
_Mat.Mdag(in,out);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){

View File

@@ -0,0 +1,371 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_padded_cell.cc
Copyright (C) 2023
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
// copied here from Test_general_coarse_pvdagm.cc
#include <cstdlib>
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidualNonHermitian.h>
#include <Grid/algorithms/iterative/BiCGSTAB.h>
using namespace std;
using namespace Grid;
namespace Grid {
struct LanczosParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
RealD, mass ,
RealD, mstep ,
Integer, Nstop,
Integer, Nk,
Integer, Np,
Integer, ReadEvec,
Integer, maxIter,
RealD, resid,
RealD, ChebyLow,
RealD, ChebyHigh,
Integer, ChebyOrder)
LanczosParameters() {
////////////////////////////// Default values
mass = 0;
/////////////////////////////////
}
template <class ReaderClass >
LanczosParameters(Reader<ReaderClass> & TheReader){
initialize(TheReader);
}
template < class ReaderClass >
void initialize(Reader<ReaderClass> &TheReader){
// std::cout << GridLogMessage << "Reading HMC\n";
read(TheReader, "HMC", *this);
}
void print_parameters() const {
// std::cout << GridLogMessage << "[HMC parameters] Trajectories : " << Trajectories << "\n";
// std::cout << GridLogMessage << "[HMC parameters] Start trajectory : " << StartTrajectory << "\n";
// std::cout << GridLogMessage << "[HMC parameters] Metropolis test (on/off): " << std::boolalpha << MetropolisTest << "\n";
// std::cout << GridLogMessage << "[HMC parameters] Thermalization trajs : " << NoMetropolisUntil << "\n";
// std::cout << GridLogMessage << "[HMC parameters] Starting type : " << StartingType << "\n";
// MD.print_parameters();
}
};
}
template <class T> void writeFile(T& in, std::string const fname){
#if 1
// Ref: https://github.com/paboyle/Grid/blob/feature/scidac-wp1/tests/debug/Test_general_coarse_hdcg_phys48.cc#L111
std::cout << Grid::GridLogMessage << "Writes to: " << fname << std::endl;
Grid::emptyUserRecord record;
Grid::ScidacWriter WR(in.Grid()->IsBoss());
WR.open(fname);
WR.writeScidacFieldRecord(in,record,0);
WR.close();
#endif
// What is the appropriate way to throw error?
}
typedef WilsonFermionD WilsonOp;
typedef typename WilsonFermionD::FermionField FermionField;
template<class Matrix,class Field>
class InvertNonHermitianLinearOperator : public LinearOperatorBase<Field> {
Matrix &_Mat;
RealD _stp;
public:
InvertNonHermitianLinearOperator(Matrix &Mat,RealD stp=1e-8): _Mat(Mat),_stp(stp){};
// Support for coarsening to a multigrid
void OpDiag (const Field &in, Field &out) {
// _Mat.Mdiag(in,out);
// out = out + shift*in;
assert(0);
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
// _Mat.Mdir(in,out,dir,disp);
assert(0);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
// _Mat.MdirAll(in,out);
assert(0);
};
void Op (const Field &in, Field &out){
Field tmp(in.Grid());
_Mat.Mdag(in,tmp);
MdagMLinearOperator<Matrix,Field> HermOp(_Mat);
ConjugateGradient<Field> CG(_stp,10000);
CG(HermOp,tmp,out);
}
void AdjOp (const Field &in, Field &out){
_Mat.Mdag(in,out);
// out = out + shift * in;
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
assert(0);
}
void HermOp(const Field &in, Field &out){
assert(0);
}
};
template<class Field>
void testSchurFromHess(Arnoldi<Field>& Arn, Field& src, int Nlarge, int Nm, int Nk) {
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout << GridLogMessage << "Testing Schur reordering, Nm = " << Nm << ", Nk = " << Nk << std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout << GridLogMessage << "Running Arnoldi for 1 iteration to get a Hessenberg." << std::endl;
Arn(src, 1, Nlarge, Nm, Nlarge);
Eigen::MatrixXcd Hess = Arn.getHessenbergMat();
std::cout << GridLogMessage << "Hessenberg for use: " << std::endl << Hess << std::endl;
ComplexSchurDecomposition schur (Hess, true);
bool isDecomposed = schur.checkDecomposition();
std::cout << "Schur decomp holds? " << isDecomposed << std::endl;
std::cout << GridLogMessage << "S = " << std::endl << schur.getMatrixS() << std::endl;
std::cout << GridLogMessage << "Swapping S(3, 3) with S(4, 4)" << std::endl;
schur.swapEvals(3);
std::cout << GridLogMessage << "S after swap = " << std::endl << schur.getMatrixS() << std::endl;
std::cout << "Schur decomp still holds? " << schur.checkDecomposition() << std::endl;
// Now move last diagonal element all the way to the front.
std::cout << GridLogMessage << "Moving last eval to front. S at start = " << std::endl << schur.getMatrixS() << std::endl;
for (int i = 0; i < Nk - 1; i++) {
int swapIdx = Nk - 2 - i;
schur.swapEvals(swapIdx);
std::cout << GridLogMessage << "S after swap of index " << swapIdx << " = " << std::endl << schur.getMatrixS() << std::endl;
std::cout << "Schur decomp still holds? " << schur.checkDecomposition() << std::endl;
}
std::cout << GridLogMessage << "Testing Schur reorder" << std::endl;
schur.schurReorder(Nk);
std::cout << GridLogMessage << "S after reorder = " << std::endl << schur.getMatrixS() << std::endl;
std::cout << "Schur decomp still holds? " << schur.checkDecomposition() << std::endl;
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=16;
// GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
// std::vector<int> lat_size {32, 32, 32, 32};
// std::cout << "Lattice size: " << lat_size << std::endl;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
// GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
// GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
GridCartesian * FGrid = UGrid;
GridRedBlackCartesian * FrbGrid = UrbGrid;
// Construct a coarsened grid
// poare TODO: replace this with the following line?
Coordinate clatt = GridDefaultLatt();
// Coordinate clatt = GridDefaultLatt(); // [PO] initial line before I edited it
for(int d=0;d<clatt.size();d++){
std::cout << GridLogMessage<< clatt[d] <<std::endl;
clatt[d] = clatt[d]/2;
// clatt[d] = clatt[d]/4;
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds);
LatticeFermion result(FGrid); result=Zero();
LatticeFermion ref(FGrid); ref=Zero();
LatticeFermion tmp(FGrid);
LatticeFermion err(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
std::string file("config");
// std::string file("Users/patrickoare/libraries/PETSc-Grid/ckpoint_lat.4000");
NerscIO::readConfiguration(Umu,header,file);
LanczosParameters LanParams;
{
XmlReader HMCrd("LanParams.xml");
read(HMCrd,"LanczosParameters",LanParams);
}
std::cout << GridLogMessage<< LanParams <<std::endl;
{
XmlWriter HMCwr("LanParams.xml.out");
write(HMCwr,"LanczosParameters",LanParams);
}
RealD mass=0.01;
RealD M5=1.8;
// PowerMethod<LatticeFermion> PM; PM(PVdagM, src);
int Nm = 50;
int Nk = 12;
int Np = 38;
// int Nk = Nm+1; // if just running once
int maxIter = 10000;
int Nstop = 10;
RealD resid = 1.0e-5;
std::vector<Complex> boundary = {1,1,1,-1};
WilsonOp::ImplParams Params(boundary);
// DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
// DomainWallFermionD Dpv(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,1.0,M5);
mass=LanParams.mass;
std::cout << GridLogIRL<< "mass "<<mass<<std::endl;
WilsonOp WilsonOperator(Umu,*UGrid,*UrbGrid,mass,Params);
// const int nbasis = 20; // size of approximate basis for low-mode space
const int nbasis = 3; // size of approximate basis for low-mode space
const int cb = 0 ;
LatticeFermion prom(FGrid);
typedef GeneralCoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperator;
typedef LittleDiracOperator::CoarseVector CoarseVector;
NextToNearestStencilGeometry5D geom(Coarse5d);
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
// typedef PVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> PVdagM_t;
// typedef ShiftedPVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> ShiftedPVdagM_t;
// typedef ShiftedComplexPVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> ShiftedComplexPVdagM_t;
// PVdagM_t PVdagM(Ddwf, Dpv);
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv);
// SquaredLinearOperator<DomainWallFermionD, LatticeFermionD> Dsq (Ddwf);
// NonHermitianLinearOperator<DomainWallFermionD, LatticeFermionD> DLinOp (Ddwf);
NonHermitianLinearOperator<WilsonOp,FermionField> Dwilson(WilsonOperator); /// <-----
// InvertNonHermitianLinearOperator<WilsonOp,FermionField> Iwilson(WilsonOperator); /// <-----
MdagMLinearOperator<WilsonOp,FermionField> HermOp(WilsonOperator); /// <-----
Gamma5HermitianLinearOperator <WilsonOp,LatticeFermion> HermOp2(WilsonOperator); /// <----
// PowerMethod<LatticeFermion> PM; PM(PVdagM, src);
resid=LanParams.resid;
Nstop=LanParams.Nstop;
Nk=LanParams.Nk;
Np=LanParams.Np;
maxIter=LanParams.maxIter;
Nm = Nk + Np;
int Nu=16;
std::vector<LatticeFermion> src(Nu,FGrid);
for(int i=0;i<Nu;i++) random(RNG5,src[i]);
if(LanParams.ReadEvec) {
std::string evecs_file="evec_in";
std::cout << GridLogIRL<< "Reading evecs from "<<evecs_file<<std::endl;
emptyUserRecord record;
Grid::ScidacReader RD;
RD.open(evecs_file);
RD.readScidacFieldRecord(src[0],record);
RD.close();
}
Coordinate origin ({0,0,0,0});
auto tmpSrc = peekSite(src[0], origin);
std::cout << "[DEBUG] Source at origin = " << tmpSrc << std::endl;
LatticeFermion src2 = src[0];
// Run KrylovSchur and Arnoldi on a Hermitian matrix
std::cout << GridLogMessage << "Running Krylov Schur" << std::endl;
#if 1
RealD shift=1.5;
KrylovSchur KrySchur (Dwilson, UGrid, resid,EvalImNormSmall);
KrySchur(src[0], maxIter, Nm, Nk, Nstop,&shift);
#else
KrylovSchur KrySchur (Iwilson, UGrid, resid,EvalImNormSmall);
KrySchur(src[0], maxIter, Nm, Nk, Nstop);
#endif
// std::cout << GridLogMessage << "evec.size= " << KrySchur.evecs.size()<< std::endl;
LanczosBidiagonalization<Field> LB(Dwilson, UGrid);
LB.run(src[0], Nm, tol);
src[0]=KrySchur.evecs[0];
for (int i=1;i<Nstop;i++) src[0]+=KrySchur.evecs[i];
for (int i=0;i<Nstop;i++)
{
std::string evfile ("./evec_"+std::to_string(mass)+"_"+std::to_string(i));
auto evdensity = localInnerProduct(KrySchur.evecs[i],KrySchur.evecs[i] );
writeFile(evdensity,evfile);
}
{
std::string evfile ("./evec_"+std::to_string(mass)+"_sum");
// auto evdensity = localInnerProduct(evec[i],evec[i] );
writeFile(src[0],evfile);
}
/*
std::cout << GridLogMessage << "Running Arnoldi" << std::endl;
// Arnoldi Arn (Dsq, FGrid, 1e-8);
Arnoldi Arn (DLinOp, FGrid, 1e-8);
testSchurFromHess<LatticeFermion>(Arn, src, 10, 6, 4);
Arnoldi Arn2 (DLinOp, FGrid, 1e-8);
testSchurFromHess<LatticeFermion>(Arn2, src, 16, 12, 8);
*/
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage<<"*******************************************"<<std::endl;
std::cout<<GridLogMessage<<std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
Grid_finalize();
return 0;
}

View File

@@ -387,7 +387,7 @@ public:
Handle< LinearOperatorArray<T4> > M(S_f.linOp(fs));
return M;
}
assert(0);
GRID_ASSERT(0);
}
static Chroma::Handle< Chroma::SystemSolver<QDP::LatticeFermion> > GetSolver(QDP::multi1d<QDP::LatticeColorMatrix> &u, ChromaAction parms)
@@ -745,7 +745,7 @@ void calc_grid(ChromaAction action,Grid::LatticeGaugeField & Umu, Grid::LatticeF
return;
}
assert(0);
GRID_ASSERT(0);
}

View File

@@ -440,7 +440,7 @@ void calc_grid(ChromaAction action, Grid::LatticeGaugeField &Umu, Grid::LatticeF
return;
}
assert(0);
GRID_ASSERT(0);
}
int main(int argc, char **argv)

View File

@@ -47,7 +47,7 @@ RealD interpolate(const RealD t_int, const std::vector<std::pair<RealD,RealD> >
}
else if(diff < tdiff2){ tdiff2 = diff; t2_idx = i; }
}
assert(t1_idx != -1 && t2_idx != -1);
GRID_ASSERT(t1_idx != -1 && t2_idx != -1);
RealD t2 = data[t2_idx].first, v2 = data[t2_idx].second;
RealD t1 = data[t1_idx].first, v1 = data[t1_idx].second;

View File

@@ -169,7 +169,7 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "norm2(Munprec), norm2(Dhop + Mdiag), abs. deviation, rel. deviation: "
<< norm2(ref) << " " << norm2(res) << " " << absDev << " " << relDev << " -> check "
<< ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
GRID_ASSERT(relDev <= checkTolerance);
}
{
@@ -200,7 +200,7 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "norm2(Dhop), norm2(Meo + Moe), abs. deviation, rel. deviation: "
<< norm2(ref) << " " << norm2(res) << " " << absDev << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
GRID_ASSERT(relDev <= checkTolerance);
}
{
@@ -222,7 +222,7 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "Re(v^dag M^dag M v), Im(v^dag M^dag M v), rel.deviation: "
<< real(dot) << " " << imag(dot) << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
GRID_ASSERT(relDev <= checkTolerance);
}
{
@@ -242,7 +242,7 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "Re(v^dag Mooee^dag Mooee v), Im(v^dag Mooee^dag Mooee v), rel.deviation: "
<< real(dot) << " " << imag(dot) << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
GRID_ASSERT(relDev <= checkTolerance);
}
{
@@ -262,7 +262,7 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "norm2(src), norm2(MooeeInv Mooee src), abs. deviation, rel. deviation: "
<< norm2(src) << " " << norm2(phi) << " " << absDev << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
GRID_ASSERT(relDev <= checkTolerance);
}
{
@@ -343,7 +343,7 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "norm2(chi), norm2(MeeInv Mee chi), abs. deviation, rel. deviation: "
<< norm2(chi) << " " << norm2(phi) << " " << absDev << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
GRID_ASSERT(relDev <= checkTolerance);
}
{
@@ -380,7 +380,7 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "norm2(chi), norm2(MeeDag MeeInvDag chi), abs. deviation, rel. deviation: "
<< norm2(chi) << " " << norm2(phi) << " " << absDev << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
GRID_ASSERT(relDev <= checkTolerance);
}
{
@@ -429,7 +429,7 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "norm2(Dunprec), norm2(Deoprec), abs. deviation, rel. deviation: "
<< norm2(ref) << " " << norm2(phi) << " " << absDev << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
GRID_ASSERT(relDev <= checkTolerance);
}
{

View File

@@ -95,7 +95,7 @@ int main(int argc, char** argv) {
GridStopWatch CGTimer;
SchurDiagMooeeOperator<DomainWallFermionD, LatticeFermion> HermOpEO(Ddwf);
ConjugateGradient<LatticeFermion> CG(1.0e-5, 10000, 0);// switch off the assert
ConjugateGradient<LatticeFermion> CG(1.0e-5, 10000, 0);// switch off the GRID_ASSERT
CGTimer.Start();
CG(HermOpEO, src_o, result_o);

View File

@@ -267,7 +267,7 @@ int main (int argc, char ** argv)
Subspace Aggregates(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
GRID_ASSERT ( (nbasis & 0x1)==0);
{
int nb=nbasis/2;
Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.02,500,100,100,0.0);

View File

@@ -281,7 +281,7 @@ int main (int argc, char ** argv)
Subspace Aggregates(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
GRID_ASSERT ( (nbasis & 0x1)==0);
{
int nb=nbasis/2;
LatticeFermion A(FGrid);

View File

@@ -263,7 +263,7 @@ int main (int argc, char ** argv)
Subspace Aggregates(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
GRID_ASSERT ( (nbasis & 0x1)==0);
{
int nb=nbasis/2;
LatticeFermion A(FGrid);

View File

@@ -301,7 +301,7 @@ int main (int argc, char ** argv)
Subspace Aggregates(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
GRID_ASSERT ( (nbasis & 0x1)==0);
{
int nb=nbasis/2;
// Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.05,500,200,100,0.0);// 18s

View File

@@ -277,7 +277,7 @@ int main (int argc, char ** argv)
Subspace Aggregates(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
GRID_ASSERT ( (nbasis & 0x1)==0);
{
int nb=nbasis/2;
LatticeFermion A(FGrid);

View File

@@ -261,7 +261,7 @@ int main (int argc, char ** argv)
Subspace Aggregates(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
GRID_ASSERT ( (nbasis & 0x1)==0);
{
int nb=nbasis/2;
LatticeFermion A(FGrid);

View File

@@ -85,13 +85,13 @@ public:
PVdagMLinearOperator(Matrix &Mat,Matrix &PV): _Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) {
assert(0);
GRID_ASSERT(0);
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
assert(0);
GRID_ASSERT(0);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
assert(0);
GRID_ASSERT(0);
};
void Op (const Field &in, Field &out){
Field tmp(in.Grid());
@@ -104,10 +104,10 @@ public:
_Mat.Mdag(in,tmp);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
assert(0);
GRID_ASSERT(0);
}
void HermOp(const Field &in, Field &out){
assert(0);
GRID_ASSERT(0);
}
};
@@ -411,7 +411,7 @@ int main (int argc, char ** argv)
Subspace Aggregates(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
GRID_ASSERT ( (nbasis & 0x1)==0);
{
int nb=nbasis/2;
Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.02,500,100,100,0.0);

View File

@@ -69,7 +69,7 @@ void run_test(int argc, char ** argv, const typename SpeciesD::ImplParams &param
bool cfg_loaded=false;
for(int i=1;i<argc;i++){
if(std::string(argv[i]) == "--load_config"){
assert(i != argc-1);
GRID_ASSERT(i != argc-1);
std::string file = argv[i+1];
NerscIO io;
FieldMetaData metadata;
@@ -158,9 +158,9 @@ int main (int argc, char ** argv)
for(int i=1;i<argc;i++){
std::string arg(argv[i]);
if(arg == "--Gparity"){
assert(i!=argc-1);
GRID_ASSERT(i!=argc-1);
gpdir = std::stoi(argv[i+1]);
assert(gpdir >= 0 && gpdir <= 2); //spatial!
GRID_ASSERT(gpdir >= 0 && gpdir <= 2); //spatial!
gparity = true;
}
}

View File

@@ -106,7 +106,7 @@ int main (int argc, char** argv)
LatticeFermion diff = MinvMeta - eta;
std::cout << GridLogMessage << "eta: " << norm2(eta) << " M*eta: " << norm2(Meta) << " M^{-1}*M*eta: " << norm2(MinvMeta) << " M^{-1}*M*eta - eta: " << norm2(diff) << " (expect 0)" << std::endl;
assert(norm2(diff) < 1e-8);
GRID_ASSERT(norm2(diff) < 1e-8);
//Check right inverse
LatticeFermion MinvEta(FGrid);
@@ -118,7 +118,7 @@ int main (int argc, char** argv)
diff = MMinvEta - eta;
std::cout << GridLogMessage << "eta: " << norm2(eta) << " M^{-1}*eta: " << norm2(MinvEta) << " M*M^{-1}*eta: " << norm2(MMinvEta) << " M*M^{-1}*eta - eta: " << norm2(diff) << " (expect 0)" << std::endl;
assert(norm2(diff) < 1e-8);
GRID_ASSERT(norm2(diff) < 1e-8);
std::cout << GridLogMessage << "Done" << std::endl;
Grid_finalize();

View File

@@ -82,13 +82,13 @@ public:
PVdagMLinearOperator(Matrix &Mat,Matrix &PV): _Mat(Mat),_PV(PV){};
void OpDiag (const Field &in, Field &out) {
assert(0);
GRID_ASSERT(0);
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
assert(0);
GRID_ASSERT(0);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
assert(0);
GRID_ASSERT(0);
};
void Op (const Field &in, Field &out){
Field tmp(in.Grid());
@@ -101,10 +101,10 @@ public:
_Mat.Mdag(in,tmp);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
assert(0);
GRID_ASSERT(0);
}
void HermOp(const Field &in, Field &out){
assert(0);
GRID_ASSERT(0);
}
};
@@ -309,7 +309,7 @@ int main (int argc, char ** argv)
Subspace Aggregates4D(Coarse4d,UGrid,0);
Subspace Aggregates5D(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
GRID_ASSERT ( (nbasis & 0x1)==0);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << " 4D subspace build " <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;

View File

@@ -109,7 +109,7 @@ public:
///////////////////////////////////////////////////////////
// The Cayley coeffs (unprec)
///////////////////////////////////////////////////////////
assert(gamma.size()==Ls);
GRID_ASSERT(gamma.size()==Ls);
omega.resize(Ls);
bs.resize(Ls);
@@ -125,7 +125,7 @@ public:
for(int i=0; i < Ls; i++){
as[i] = 1.0;
omega[i] = _gamma[i]*_zolo_hi; //NB reciprocal relative to Chroma NEF code
assert(omega[i]!=Coeff_t(0.0));
GRID_ASSERT(omega[i]!=Coeff_t(0.0));
bs[i] = 0.5*(bpc/omega[i] + bmc);
cs[i] = 0.5*(bpc/omega[i] - bmc);
}
@@ -140,7 +140,7 @@ public:
for(int i=0;i<Ls;i++){
bee[i]=as[i]*(bs[i]*(4.0-this->M5) +1.0);
assert(bee[i]!=Coeff_t(0.0));
GRID_ASSERT(bee[i]!=Coeff_t(0.0));
cee[i]=as[i]*(1.0-cs[i]*(4.0-this->M5));
beo[i]=as[i]*bs[i];
ceo[i]=-as[i]*cs[i];
@@ -174,7 +174,7 @@ public:
leem[i]=mass*cee[Ls-1]/bee[0];
for(int j=0;j<i;j++) {
assert(bee[j+1]!=Coeff_t(0.0));
GRID_ASSERT(bee[j+1]!=Coeff_t(0.0));
leem[i]*= aee[j]/bee[j+1];
}
@@ -316,7 +316,7 @@ public:
autoView(psi , psi_i,AcceleratorRead);
autoView(phi , phi_i,AcceleratorRead);
autoView(chi , chi_i,AcceleratorWrite);
assert(phi.Checkerboard() == psi.Checkerboard());
GRID_ASSERT(phi.Checkerboard() == psi.Checkerboard());
auto pdiag = &diag[0];
auto pupper = &upper[0];
@@ -354,7 +354,7 @@ public:
autoView(psi , psi_i,AcceleratorRead);
autoView(phi , phi_i,AcceleratorRead);
autoView(chi , chi_i,AcceleratorWrite);
assert(phi.Checkerboard() == psi.Checkerboard());
GRID_ASSERT(phi.Checkerboard() == psi.Checkerboard());
auto pdiag = &diag[0];
auto pupper = &upper[0];
@@ -438,7 +438,7 @@ public:
}
virtual void Mdir (const CoarseVector &in, CoarseVector &out,int dir, int disp)
{
assert(0);
GRID_ASSERT(0);
}
virtual void MdirAll (const CoarseVector &in, std::vector<CoarseVector> &out)
{
@@ -679,13 +679,13 @@ public:
virtual std::vector<int> Displacements(void){ return _Mat.Displacements();};
void OpDiag (const Field &in, Field &out) {
assert(0);
GRID_ASSERT(0);
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
assert(0);
GRID_ASSERT(0);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
assert(0);
GRID_ASSERT(0);
};
void Op (const Field &in, Field &out){
Field tmp(in.Grid());
@@ -698,10 +698,10 @@ public:
_Mat.Mdag(in,tmp);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
assert(0);
GRID_ASSERT(0);
}
void HermOp(const Field &in, Field &out){
assert(0);
GRID_ASSERT(0);
}
};
@@ -1024,7 +1024,7 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << " 4D subspace build " <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
Subspace Aggregates4D(Coarse4d,UGrid,0);
assert ( (nbasis & 0x1)==0);
GRID_ASSERT ( (nbasis & 0x1)==0);
int nb=nbasis/2;
Gamma g5(Gamma::Algebra::Gamma5);

View File

@@ -109,7 +109,7 @@ public:
///////////////////////////////////////////////////////////
// The Cayley coeffs (unprec)
///////////////////////////////////////////////////////////
assert(gamma.size()==Ls);
GRID_ASSERT(gamma.size()==Ls);
omega.resize(Ls);
bs.resize(Ls);
@@ -125,7 +125,7 @@ public:
for(int i=0; i < Ls; i++){
as[i] = 1.0;
omega[i] = _gamma[i]*_zolo_hi; //NB reciprocal relative to Chroma NEF code
assert(omega[i]!=Coeff_t(0.0));
GRID_ASSERT(omega[i]!=Coeff_t(0.0));
bs[i] = 0.5*(bpc/omega[i] + bmc);
cs[i] = 0.5*(bpc/omega[i] - bmc);
}
@@ -140,7 +140,7 @@ public:
for(int i=0;i<Ls;i++){
bee[i]=as[i]*(bs[i]*(4.0-this->M5) +1.0);
assert(bee[i]!=Coeff_t(0.0));
GRID_ASSERT(bee[i]!=Coeff_t(0.0));
cee[i]=as[i]*(1.0-cs[i]*(4.0-this->M5));
beo[i]=as[i]*bs[i];
ceo[i]=-as[i]*cs[i];
@@ -174,7 +174,7 @@ public:
leem[i]=mass*cee[Ls-1]/bee[0];
for(int j=0;j<i;j++) {
assert(bee[j+1]!=Coeff_t(0.0));
GRID_ASSERT(bee[j+1]!=Coeff_t(0.0));
leem[i]*= aee[j]/bee[j+1];
}
@@ -316,7 +316,7 @@ public:
autoView(psi , psi_i,AcceleratorRead);
autoView(phi , phi_i,AcceleratorRead);
autoView(chi , chi_i,AcceleratorWrite);
assert(phi.Checkerboard() == psi.Checkerboard());
GRID_ASSERT(phi.Checkerboard() == psi.Checkerboard());
auto pdiag = &diag[0];
auto pupper = &upper[0];
@@ -354,7 +354,7 @@ public:
autoView(psi , psi_i,AcceleratorRead);
autoView(phi , phi_i,AcceleratorRead);
autoView(chi , chi_i,AcceleratorWrite);
assert(phi.Checkerboard() == psi.Checkerboard());
GRID_ASSERT(phi.Checkerboard() == psi.Checkerboard());
auto pdiag = &diag[0];
auto pupper = &upper[0];
@@ -438,7 +438,7 @@ public:
}
virtual void Mdir (const CoarseVector &in, CoarseVector &out,int dir, int disp)
{
assert(0);
GRID_ASSERT(0);
}
virtual void MdirAll (const CoarseVector &in, std::vector<CoarseVector> &out)
{
@@ -699,13 +699,13 @@ public:
virtual std::vector<int> Displacements(void){ return _Mat.Displacements();};
void OpDiag (const Field &in, Field &out) {
assert(0);
GRID_ASSERT(0);
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
assert(0);
GRID_ASSERT(0);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
assert(0);
GRID_ASSERT(0);
};
void Op (const Field &in, Field &out){
Field tmp(in.Grid());
@@ -718,10 +718,10 @@ public:
_Mat.Mdag(in,tmp);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
assert(0);
GRID_ASSERT(0);
}
void HermOp(const Field &in, Field &out){
assert(0);
GRID_ASSERT(0);
}
};
@@ -1048,7 +1048,7 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << " 4D subspace build " <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
Subspace Aggregates4D(Coarse4d,UGrid,0);
assert ( (nbasis & 0x1)==0);
GRID_ASSERT ( (nbasis & 0x1)==0);
int nb=nbasis/2;
Gamma g5(Gamma::Algebra::Gamma5);

View File

@@ -86,13 +86,13 @@ void checkParameterValidity(MultiGridParams const &params) {
auto correctSize = params.nLevels - 1;
assert(correctSize == params.blockSizes.size());
assert(correctSize == params.smootherTol.size());
assert(correctSize == params.smootherMaxOuterIter.size());
assert(correctSize == params.smootherMaxInnerIter.size());
assert(correctSize == params.kCycleTol.size());
assert(correctSize == params.kCycleMaxOuterIter.size());
assert(correctSize == params.kCycleMaxInnerIter.size());
GRID_ASSERT(correctSize == params.blockSizes.size());
GRID_ASSERT(correctSize == params.smootherTol.size());
GRID_ASSERT(correctSize == params.smootherMaxOuterIter.size());
GRID_ASSERT(correctSize == params.smootherMaxInnerIter.size());
GRID_ASSERT(correctSize == params.kCycleTol.size());
GRID_ASSERT(correctSize == params.kCycleMaxOuterIter.size());
GRID_ASSERT(correctSize == params.kCycleMaxInnerIter.size());
}
struct LevelInfo {
@@ -105,7 +105,7 @@ public:
auto nCoarseLevels = mgParams.blockSizes.size();
assert(nCoarseLevels == mgParams.nLevels - 1);
GRID_ASSERT(nCoarseLevels == mgParams.nLevels - 1);
// set up values for finest grid
Grids.push_back(FineGrid);
@@ -117,7 +117,7 @@ public:
for(int level = 1; level < mgParams.nLevels; ++level) {
auto Nd = Grids[level - 1]->_ndimension;
auto tmp = Grids[level - 1]->_fdimensions;
assert(tmp.size() == Nd);
GRID_ASSERT(tmp.size() == Nd);
Seeds.push_back(std::vector<int>(Nd));

View File

@@ -56,7 +56,7 @@ int main(int argc, char **argv) {
if(GridCmdOptionExists(argv, argv + argc, "--inputxml")) {
inputXml = GridCmdOptionPayload(argv, argv + argc, "--inputxml");
assert(inputXml.length() != 0);
GRID_ASSERT(inputXml.length() != 0);
}
{

Some files were not shown because too many files have changed in this diff Show More