1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-09 23:45:36 +00:00

Merge branch 'feature/dirichlet' of https://github.com/paboyle/Grid into feature/dirichlet

This commit is contained in:
Peter Boyle 2022-12-17 20:17:09 -05:00
commit 472ed2dd5c
174 changed files with 1966 additions and 802 deletions

View File

@ -44,10 +44,10 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#include <Grid/GridStd.h>
#include <Grid/threads/Pragmas.h>
#include <Grid/perfmon/Timer.h>
#include <Grid/perfmon/Tracing.h>
//#include <Grid/perfmon/PerfCount.h>
#include <Grid/util/Util.h>
#include <Grid/log/Log.h>
#include <Grid/perfmon/Tracing.h>
#include <Grid/allocator/Allocator.h>
#include <Grid/simd/Simd.h>
#include <Grid/threads/ThreadReduction.h>

View File

@ -324,9 +324,9 @@ public:
GridBase* _cbgrid;
int hermitian;
CartesianStencil<siteVector,siteVector,int> Stencil;
CartesianStencil<siteVector,siteVector,int> StencilEven;
CartesianStencil<siteVector,siteVector,int> StencilOdd;
CartesianStencil<siteVector,siteVector,DefaultImplParams> Stencil;
CartesianStencil<siteVector,siteVector,DefaultImplParams> StencilEven;
CartesianStencil<siteVector,siteVector,DefaultImplParams> StencilOdd;
std::vector<CoarseMatrix> A;
std::vector<CoarseMatrix> Aeven;
@ -631,7 +631,7 @@ public:
assert(Aself != nullptr);
}
void DselfInternal(CartesianStencil<siteVector,siteVector,int> &st, CoarseMatrix &a,
void DselfInternal(CartesianStencil<siteVector,siteVector,DefaultImplParams> &st, CoarseMatrix &a,
const CoarseVector &in, CoarseVector &out, int dag) {
int point = geom.npoint-1;
autoView( out_v, out, AcceleratorWrite);
@ -694,7 +694,7 @@ public:
}
}
void DhopInternal(CartesianStencil<siteVector,siteVector,int> &st, std::vector<CoarseMatrix> &a,
void DhopInternal(CartesianStencil<siteVector,siteVector,DefaultImplParams> &st, std::vector<CoarseMatrix> &a,
const CoarseVector &in, CoarseVector &out, int dag) {
SimpleCompressor<siteVector> compressor;
@ -784,9 +784,9 @@ public:
_cbgrid(new GridRedBlackCartesian(&CoarseGrid)),
geom(CoarseGrid._ndimension),
hermitian(hermitian_),
Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
StencilEven(_cbgrid,geom.npoint,Even,geom.directions,geom.displacements,0),
StencilOdd(_cbgrid,geom.npoint,Odd,geom.directions,geom.displacements,0),
Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements),
StencilEven(_cbgrid,geom.npoint,Even,geom.directions,geom.displacements),
StencilOdd(_cbgrid,geom.npoint,Odd,geom.directions,geom.displacements),
A(geom.npoint,&CoarseGrid),
Aeven(geom.npoint,_cbgrid),
Aodd(geom.npoint,_cbgrid),
@ -804,9 +804,9 @@ public:
_cbgrid(&CoarseRBGrid),
geom(CoarseGrid._ndimension),
hermitian(hermitian_),
Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
StencilEven(&CoarseRBGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
StencilOdd(&CoarseRBGrid,geom.npoint,Odd,geom.directions,geom.displacements,0),
Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements),
StencilEven(&CoarseRBGrid,geom.npoint,Even,geom.directions,geom.displacements),
StencilOdd(&CoarseRBGrid,geom.npoint,Odd,geom.directions,geom.displacements),
A(geom.npoint,&CoarseGrid),
Aeven(geom.npoint,&CoarseRBGrid),
Aodd(geom.npoint,&CoarseRBGrid),

View File

@ -526,6 +526,7 @@ public:
(*this)(Linop,in[k],out[k]);
}
};
virtual ~OperatorFunction(){};
};
template<class Field> class LinearFunction {

View File

@ -258,26 +258,12 @@ public:
for(int n=2;n<order;n++){
Linop.HermOp(*Tn,y);
#if 0
auto y_v = y.View();
auto Tn_v = Tn->View();
auto Tnp_v = Tnp->View();
auto Tnm_v = Tnm->View();
constexpr int Nsimd = vector_type::Nsimd();
accelerator_for(ss, in.Grid()->oSites(), Nsimd, {
coalescedWrite(y_v[ss],xscale*y_v(ss)+mscale*Tn_v(ss));
coalescedWrite(Tnp_v[ss],2.0*y_v(ss)-Tnm_v(ss));
});
if ( Coeffs[n] != 0.0) {
axpy(out,Coeffs[n],*Tnp,out);
}
#else
axpby(y,xscale,mscale,y,(*Tn));
axpby(*Tnp,2.0,-1.0,y,(*Tnm));
if ( Coeffs[n] != 0.0) {
axpy(out,Coeffs[n],*Tnp,out);
}
#endif
// Cycle pointers to avoid copies
Field *swizzle = Tnm;
Tnm =Tn;

View File

@ -36,6 +36,11 @@ NAMESPACE_BEGIN(Grid);
#define GRID_ALLOC_SMALL_LIMIT (4096)
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
#define FILE_LINE __FILE__ ":" TOSTRING(__LINE__)
#define AUDIT(a) MemoryManager::Audit(FILE_LINE)
/*Pinning pages is costly*/
////////////////////////////////////////////////////////////////////////////
// Advise the LatticeAccelerator class
@ -94,6 +99,7 @@ private:
static void PrintBytes(void);
public:
static void Audit(std::string s);
static void Init(void);
static void InitMessage(void);
static void *AcceleratorAllocate(size_t bytes);

View File

@ -8,9 +8,8 @@ NAMESPACE_BEGIN(Grid);
static char print_buffer [ MAXLINE ];
#define mprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer;
//#define dprintf(...) printf (__VA_ARGS__ ); fflush(stdout);
#define dprintf(...)
#define dprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer;
//#define dprintf(...)
////////////////////////////////////////////////////////////
@ -132,9 +131,11 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
///////////////////////////////////////////////////////////////////////////
assert(AccCache.state!=Empty);
mprintf("MemoryManager: Evict(%lx) %lx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
assert(AccCache.accLock==0);
assert(AccCache.cpuLock==0);
mprintf("MemoryManager: Evict cpu %lx acc %lx cpuLock %ld accLock %ld\n",
(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr,
(uint64_t)AccCache.cpuLock,(uint64_t)AccCache.accLock);
if (AccCache.accLock!=0) return;
if (AccCache.cpuLock!=0) return;
if(AccCache.state==AccDirty) {
Flush(AccCache);
}
@ -197,6 +198,7 @@ void MemoryManager::CpuDiscard(AcceleratorViewEntry &AccCache)
void MemoryManager::ViewClose(void* Ptr,ViewMode mode)
{
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
dprintf("AcceleratorViewClose %lx\n",(uint64_t)Ptr);
AcceleratorViewClose((uint64_t)Ptr);
} else if( (mode==CpuRead)||(mode==CpuWrite)){
CpuViewClose((uint64_t)Ptr);
@ -208,6 +210,7 @@ void *MemoryManager::ViewOpen(void* _CpuPtr,size_t bytes,ViewMode mode,ViewAdvis
{
uint64_t CpuPtr = (uint64_t)_CpuPtr;
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
dprintf("AcceleratorViewOpen %lx\n",(uint64_t)CpuPtr);
return (void *) AcceleratorViewOpen(CpuPtr,bytes,mode,hint);
} else if( (mode==CpuRead)||(mode==CpuWrite)){
return (void *)CpuViewOpen(CpuPtr,bytes,mode,hint);
@ -247,11 +250,12 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
assert(AccCache.cpuLock==0); // Programming error
if(AccCache.state!=Empty) {
dprintf("ViewOpen found entry %lx %lx : %ld %ld\n",
dprintf("ViewOpen found entry %lx %lx : %ld %ld accLock %ld\n",
(uint64_t)AccCache.CpuPtr,
(uint64_t)CpuPtr,
(uint64_t)AccCache.bytes,
(uint64_t)bytes);
(uint64_t)bytes,
(uint64_t)AccCache.accLock);
assert(AccCache.CpuPtr == CpuPtr);
assert(AccCache.bytes ==bytes);
}
@ -286,6 +290,7 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
AccCache.state = Consistent; // Empty + AccRead => Consistent
}
AccCache.accLock= 1;
dprintf("Copied Empty entry into device accLock= %d\n",AccCache.accLock);
} else if(AccCache.state==CpuDirty ){
if(mode==AcceleratorWriteDiscard) {
CpuDiscard(AccCache);
@ -298,21 +303,21 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
}
AccCache.accLock++;
dprintf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
dprintf("CpuDirty entry into device ++accLock= %d\n",AccCache.accLock);
} else if(AccCache.state==Consistent) {
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
else
AccCache.state = Consistent; // Consistent + AccRead => Consistent
AccCache.accLock++;
dprintf("Consistent entry into device accLock %d\n",AccCache.accLock);
dprintf("Consistent entry into device ++accLock= %d\n",AccCache.accLock);
} else if(AccCache.state==AccDirty) {
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
else
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
AccCache.accLock++;
dprintf("AccDirty entry into device accLock %d\n",AccCache.accLock);
dprintf("AccDirty entry ++accLock= %d\n",AccCache.accLock);
} else {
assert(0);
}
@ -320,6 +325,7 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
// If view is opened on device remove from LRU
if(AccCache.LRU_valid==1){
// must possibly remove from LRU as now locked on GPU
dprintf("AccCache entry removed from LRU \n");
LRUremove(AccCache);
}
@ -340,10 +346,12 @@ void MemoryManager::AcceleratorViewClose(uint64_t CpuPtr)
assert(AccCache.accLock>0);
AccCache.accLock--;
// Move to LRU queue if not locked and close on device
if(AccCache.accLock==0) {
dprintf("AccleratorViewClose %lx AccLock decremented to %ld move to LRU queue\n",(uint64_t)CpuPtr,(uint64_t)AccCache.accLock);
LRUinsert(AccCache);
} else {
dprintf("AccleratorViewClose %lx AccLock decremented to %ld\n",(uint64_t)CpuPtr,(uint64_t)AccCache.accLock);
}
}
void MemoryManager::CpuViewClose(uint64_t CpuPtr)
@ -479,6 +487,29 @@ int MemoryManager::isOpen (void* _CpuPtr)
return 0;
}
}
void MemoryManager::Audit(std::string s)
{
for(auto it=AccViewTable.begin();it!=AccViewTable.end();it++){
auto &AccCache = it->second;
std::string str;
if ( AccCache.state==Empty ) str = std::string("Empty");
if ( AccCache.state==CpuDirty ) str = std::string("CpuDirty");
if ( AccCache.state==AccDirty ) str = std::string("AccDirty");
if ( AccCache.state==Consistent)str = std::string("Consistent");
if ( AccCache.cpuLock || AccCache.accLock ) {
std::cout << GridLogError << s<< "\n\t 0x"<<std::hex<<AccCache.CpuPtr<<std::dec
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
<< "\t cpuLock " << AccCache.cpuLock
<< "\t accLock " << AccCache.accLock
<< "\t LRUvalid " << AccCache.LRU_valid<<std::endl;
}
assert( AccCache.cpuLock== 0 ) ;
assert( AccCache.accLock== 0 ) ;
}
}
void MemoryManager::PrintState(void* _CpuPtr)
{

View File

@ -13,6 +13,7 @@ uint64_t MemoryManager::DeviceToHostBytes;
uint64_t MemoryManager::HostToDeviceXfer;
uint64_t MemoryManager::DeviceToHostXfer;
void MemoryManager::Audit(void){};
void MemoryManager::ViewClose(void* AccPtr,ViewMode mode){};
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; };
int MemoryManager::isOpen (void* CpuPtr) { return 0;}

View File

@ -66,6 +66,7 @@ GridLogger GridLogError (1, "Error" , GridLogColours, "RED");
GridLogger GridLogWarning(1, "Warning", GridLogColours, "YELLOW");
GridLogger GridLogMessage(1, "Message", GridLogColours, "NORMAL");
GridLogger GridLogMemory (1, "Memory", GridLogColours, "NORMAL");
GridLogger GridLogTracing(1, "Tracing", GridLogColours, "NORMAL");
GridLogger GridLogDebug (1, "Debug", GridLogColours, "PURPLE");
GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN");
GridLogger GridLogDslash (1, "Dslash", GridLogColours, "BLUE");
@ -77,7 +78,8 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
GridLogError.Active(1);
GridLogWarning.Active(0);
GridLogMessage.Active(1); // at least the messages should be always on
GridLogMemory.Active(0); // at least the messages should be always on
GridLogMemory.Active(0);
GridLogTracing.Active(0);
GridLogIterative.Active(0);
GridLogDebug.Active(0);
GridLogPerformance.Active(0);
@ -87,6 +89,7 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
GridLogHMC.Active(1);
for (int i = 0; i < logstreams.size(); i++) {
if (logstreams[i] == std::string("Tracing")) GridLogTracing.Active(1);
if (logstreams[i] == std::string("Memory")) GridLogMemory.Active(1);
if (logstreams[i] == std::string("Warning")) GridLogWarning.Active(1);
if (logstreams[i] == std::string("NoMessage")) GridLogMessage.Active(0);
@ -94,8 +97,8 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
if (logstreams[i] == std::string("Performance")) GridLogPerformance.Active(1);
if (logstreams[i] == std::string("Dslash")) GridLogDslash.Active(1);
if (logstreams[i] == std::string("NoIntegrator")) GridLogIntegrator.Active(0);
if (logstreams[i] == std::string("NoHMC")) GridLogHMC.Active(0);
if (logstreams[i] == std::string("NoIntegrator"))GridLogIntegrator.Active(0);
if (logstreams[i] == std::string("NoHMC")) GridLogHMC.Active(0);
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
}
}

View File

@ -186,6 +186,7 @@ extern GridLogger GridLogIterative ;
extern GridLogger GridLogIntegrator ;
extern GridLogger GridLogHMC;
extern GridLogger GridLogMemory;
extern GridLogger GridLogTracing;
extern Colours GridLogColours;
std::string demangle(const char* name) ;

View File

@ -1,4 +1,7 @@
#pragma once
NAMESPACE_BEGIN(Grid);
#ifdef GRID_TRACING_NVTX
#include <nvToolsExt.h>
class GridTracer {
@ -64,3 +67,4 @@ inline void traceStop(int ID) { }
#else
#define GRID_TRACE(name) GridTracer uniq_name_using_macros##__COUNTER__(name);
#endif
NAMESPACE_END(Grid);

View File

@ -55,14 +55,18 @@ public:
deriv_num=0;
}
void deriv_log(RealD nrm, RealD max,RealD Fdt_nrm,RealD Fdt_max) {
deriv_max_sum+=max;
if ( max > deriv_max_sum ) {
deriv_max_sum=max;
}
deriv_norm_sum+=nrm;
Fdt_max_sum+=Fdt_max;
if ( Fdt_max > Fdt_max_sum ) {
Fdt_max_sum=Fdt_max;
}
Fdt_norm_sum+=Fdt_nrm; deriv_num++;
}
RealD deriv_max_average(void) { return deriv_max_sum/deriv_num; };
RealD deriv_max_average(void) { return deriv_max_sum; };
RealD deriv_norm_average(void) { return deriv_norm_sum/deriv_num; };
RealD Fdt_max_average(void) { return Fdt_max_sum/deriv_num; };
RealD Fdt_max_average(void) { return Fdt_max_sum; };
RealD Fdt_norm_average(void) { return Fdt_norm_sum/deriv_num; };
RealD deriv_timer(void) { return deriv_us; };
RealD S_timer(void) { return S_us; };

View File

@ -34,7 +34,7 @@ directory
NAMESPACE_BEGIN(Grid);
// These can move into a params header and be given MacroMagic serialisation
struct GparityWilsonImplParams {
Coordinate twists;
//mu=Nd-1 is assumed to be the time direction and a twist value of 1 indicates antiperiodic BCs

View File

@ -36,11 +36,16 @@ NAMESPACE_BEGIN(Grid);
// Wilson compressor will need FaceGather policies for:
// Periodic, Dirichlet, and partial Dirichlet for DWF
///////////////////////////////////////////////////////////////
const int dwf_compressor_depth=1;
#define DWF_COMPRESS
class FaceGatherPartialDWF
{
public:
static int PartialCompressionFactor(GridBase *grid) {return grid->_fdimensions[0]/2;};
// static int PartialCompressionFactor(GridBase *grid) { return 1;}
#ifdef DWF_COMPRESS
static int PartialCompressionFactor(GridBase *grid) {return grid->_fdimensions[0]/(2*dwf_compressor_depth);};
#else
static int PartialCompressionFactor(GridBase *grid) { return 1;}
#endif
template<class vobj,class cobj,class compressor>
static void Gather_plane_simple (commVector<std::pair<int,int> >& table,
const Lattice<vobj> &rhs,
@ -52,20 +57,32 @@ public:
// Shrinks local and remote comms buffers
GridBase *Grid = rhs.Grid();
int Ls = Grid->_rdimensions[0];
#ifdef DWF_COMPRESS
int depth=dwf_compressor_depth;
#else
int depth=Ls/2;
#endif
std::pair<int,int> *table_v = & table[0];
auto rhs_v = rhs.View(AcceleratorRead);
int vol=table.size()/Ls;
accelerator_forNB( idx,table.size(), vobj::Nsimd(), {
Integer i=idx/Ls;
Integer s=idx%Ls;
if(s==0) compress.Compress(buffer[off+i ],rhs_v[so+table_v[idx].second]);
if(s==Ls-1) compress.Compress(buffer[off+i+vol],rhs_v[so+table_v[idx].second]);
Integer sc=depth+s-(Ls-depth);
if(s<depth) compress.Compress(buffer[off+i+s*vol],rhs_v[so+table_v[idx].second]);
if(s>=Ls-depth) compress.Compress(buffer[off+i+sc*vol],rhs_v[so+table_v[idx].second]);
});
rhs_v.ViewClose();
}
template<class decompressor,class Decompression>
static void DecompressFace(decompressor decompress,Decompression &dd)
{
auto Ls = dd.dims[0];
#ifdef DWF_COMPRESS
int depth=dwf_compressor_depth;
#else
int depth=Ls/2;
#endif
// Just pass in the Grid
auto kp = dd.kernel_p;
auto mp = dd.mpi_p;
@ -74,11 +91,12 @@ public:
accelerator_forNB(o,size,1,{
int idx=o/Ls;
int s=o%Ls;
if ( s == 0 ) {
int oo=idx;
if ( s < depth ) {
int oo=s*vol+idx;
kp[o]=mp[oo];
} else if ( s == Ls-1 ) {
int oo=vol+idx;
} else if ( s >= Ls-depth ) {
int sc = depth + s - (Ls-depth);
int oo=sc*vol+idx;
kp[o]=mp[oo];
} else {
kp[o] = Zero();//fill rest with zero if partial dirichlet
@ -97,7 +115,12 @@ public:
{
GridBase *Grid = rhs.Grid();
int Ls = Grid->_rdimensions[0];
#ifdef DWF_COMPRESS
int depth=dwf_compressor_depth;
#else
int depth = Ls/2;
#endif
// insertion of zeroes...
assert( (table.size()&0x1)==0);
int num=table.size()/2;
@ -112,7 +135,7 @@ public:
// Reorders both local and remote comms buffers
//
int s = j % Ls;
int sp1 = (s+1)%Ls; // peri incremented s slice
int sp1 = (s+depth)%Ls; // peri incremented s slice
int hxyz= j/Ls;
@ -135,20 +158,25 @@ public:
static void MergeFace(decompressor decompress,Merger &mm)
{
auto Ls = mm.dims[0];
#ifdef DWF_COMPRESS
int depth=dwf_compressor_depth;
#else
int depth = Ls/2;
#endif
int num= mm.buffer_size/2; // relate vol and Ls to buffer size
auto mp = &mm.mpointer[0];
auto vp0= &mm.vpointers[0][0]; // First arg is exchange first
auto vp1= &mm.vpointers[1][0];
auto type= mm.type;
int nnum = num/Ls;
accelerator_forNB(o,num,vobj::Nsimd(),{
accelerator_forNB(o,num,Merger::Nsimd,{
int s=o%Ls;
int hxyz=o/Ls; // xyzt related component
int xyz0=hxyz*2;
int xyz1=hxyz*2+1;
int sp = (s+1)%Ls;
int sp = (s+depth)%Ls;
int jj= hxyz + sp*nnum ; // 0,1,2,3 -> Ls-1 slice , 0-slice, 1-slice ....
int oo0= s+xyz0*Ls;
@ -162,7 +190,11 @@ public:
class FaceGatherDWFMixedBCs
{
public:
static int PartialCompressionFactor(GridBase *grid) {return grid->_fdimensions[0]/2;};
#ifdef DWF_COMPRESS
static int PartialCompressionFactor(GridBase *grid) {return grid->_fdimensions[0]/(2*dwf_compressor_depth);};
#else
static int PartialCompressionFactor(GridBase *grid) {return 1;}
#endif
template<class vobj,class cobj,class compressor>
static void Gather_plane_simple (commVector<std::pair<int,int> >& table,
@ -171,6 +203,7 @@ public:
compressor &compress,
int off,int so,int partial)
{
// std::cout << " face gather simple DWF partial "<<partial <<std::endl;
if(partial) FaceGatherPartialDWF::Gather_plane_simple(table,rhs,buffer,compress,off,so,partial);
else FaceGatherSimple::Gather_plane_simple(table,rhs,buffer,compress,off,so,partial);
}
@ -179,6 +212,7 @@ public:
Vector<cobj *> pointers,int dimension,int plane,int cbmask,
compressor &compress,int type,int partial)
{
// std::cout << " face gather exch DWF partial "<<partial <<std::endl;
if(partial) FaceGatherPartialDWF::Gather_plane_exchange(table,rhs,pointers,dimension, plane,cbmask,compress,type,partial);
else FaceGatherSimple::Gather_plane_exchange (table,rhs,pointers,dimension, plane,cbmask,compress,type,partial);
}
@ -186,6 +220,7 @@ public:
static void MergeFace(decompressor decompress,Merger &mm)
{
int partial = mm.partial;
// std::cout << " merge DWF partial "<<partial <<std::endl;
if ( partial ) FaceGatherPartialDWF::MergeFace(decompress,mm);
else FaceGatherSimple::MergeFace(decompress,mm);
}
@ -194,6 +229,7 @@ public:
static void DecompressFace(decompressor decompress,Decompression &dd)
{
int partial = dd.partial;
// std::cout << " decompress DWF partial "<<partial <<std::endl;
if ( partial ) FaceGatherPartialDWF::DecompressFace(decompress,dd);
else FaceGatherSimple::DecompressFace(decompress,dd);
}

View File

@ -96,6 +96,8 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
Coordinate block = p.dirichlet;
if ( block[0] || block[1] || block[2] || block[3] || block[4] ){
Dirichlet = 1;
std::cout << GridLogMessage << " WilsonFermion: non-trivial Dirichlet condition "<< block << std::endl;
std::cout << GridLogMessage << " WilsonFermion: partial Dirichlet "<< p.partialDirichlet << std::endl;
Block = block;
}
} else {
@ -137,9 +139,6 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
StencilEven.BuildSurfaceList(LLs,vol4);
StencilOdd.BuildSurfaceList(LLs,vol4);
// std::cout << GridLogMessage << " SurfaceLists "<< Stencil.surface_list.size()
// <<" " << StencilEven.surface_list.size()<<std::endl;
}
template<class Impl>
@ -148,21 +147,29 @@ void WilsonFermion5D<Impl>::ImportGauge(const GaugeField &_Umu)
GaugeField HUmu(_Umu.Grid());
HUmu = _Umu*(-0.5);
if ( Dirichlet ) {
if ( this->Params.partialDirichlet ) {
std::cout << GridLogMessage << " partialDirichlet BCs " <<Block<<std::endl;
} else {
std::cout << GridLogMessage << " FULL Dirichlet BCs " <<Block<<std::endl;
}
std:: cout << GridLogMessage << "Checking block size multiple of rank boundaries for Dirichlet"<<std::endl;
for(int d=0;d<Nd;d++) {
int GaugeBlock = Block[d+1];
int ldim=GaugeGrid()->LocalDimensions()[d];
if (GaugeBlock) assert( (GaugeBlock%ldim)==0);
}
}
if ( Dirichlet && (!this->Params.partialDirichlet) ) {
std::cout << GridLogMessage << " Dirichlet filtering gauge field BCs block " <<Block<<std::endl;
Coordinate GaugeBlock(Nd);
for(int d=0;d<Nd;d++) GaugeBlock[d] = Block[d+1];
DirichletFilter<GaugeField> Filter(GaugeBlock);
Filter.applyFilter(HUmu);
} else {
std::cout << GridLogMessage << " Dirichlet "<< Dirichlet << " not filtered gauge field" <<std::endl;
if (!this->Params.partialDirichlet) {
std::cout << GridLogMessage << " Dirichlet filtering gauge field BCs block " <<Block<<std::endl;
Coordinate GaugeBlock(Nd);
for(int d=0;d<Nd;d++) GaugeBlock[d] = Block[d+1];
DirichletFilter<GaugeField> Filter(GaugeBlock);
Filter.applyFilter(HUmu);
} else {
std::cout << GridLogMessage << " Dirichlet "<< Dirichlet << " NOT filtered gauge field" <<std::endl;
}
}
Impl::DoubleStore(GaugeGrid(),Umu,HUmu);
pickCheckerboard(Even,UmuEven,Umu);

View File

@ -279,6 +279,7 @@ NAMESPACE_BEGIN(Grid);
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//////////////////////////////////////////////////////
virtual RealD Sinitial(const GaugeField &U) {
std::cout << GridLogMessage << "Returning stored two flavour refresh action "<<RefreshAction<<std::endl;
return RefreshAction;
}

View File

@ -38,7 +38,7 @@ NAMESPACE_BEGIN(Grid);
class TwoFlavourEvenOddRatioPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
INHERIT_IMPL_TYPES(Impl);
private:
FermionOperator<Impl> & NumOp;// the basic operator
FermionOperator<Impl> & DenOp;// the basic operator
@ -112,28 +112,48 @@ NAMESPACE_BEGIN(Grid);
// NumOp == V
// DenOp == M
//
AUDIT();
FermionField etaOdd (NumOp.FermionRedBlackGrid());
FermionField etaEven(NumOp.FermionRedBlackGrid());
FermionField tmp (NumOp.FermionRedBlackGrid());
AUDIT();
pickCheckerboard(Even,etaEven,eta);
AUDIT();
pickCheckerboard(Odd,etaOdd,eta);
AUDIT();
NumOp.ImportGauge(U);
AUDIT();
DenOp.ImportGauge(U);
std::cout << " TwoFlavourRefresh: Imported gauge "<<std::endl;
AUDIT();
SchurDifferentiableOperator<Impl> Mpc(DenOp);
AUDIT();
SchurDifferentiableOperator<Impl> Vpc(NumOp);
AUDIT();
std::cout << " TwoFlavourRefresh: Diff ops "<<std::endl;
AUDIT();
// Odd det factors
Mpc.MpcDag(etaOdd,PhiOdd);
AUDIT();
std::cout << " TwoFlavourRefresh: MpcDag "<<std::endl;
tmp=Zero();
AUDIT();
std::cout << " TwoFlavourRefresh: Zero() guess "<<std::endl;
AUDIT();
HeatbathSolver(Vpc,PhiOdd,tmp);
AUDIT();
std::cout << " TwoFlavourRefresh: Heatbath solver "<<std::endl;
Vpc.Mpc(tmp,PhiOdd);
std::cout << " TwoFlavourRefresh: Mpc "<<std::endl;
// Even det factors
DenOp.MooeeDag(etaEven,tmp);
NumOp.MooeeInvDag(tmp,PhiEven);
std::cout << " TwoFlavourRefresh: Mee "<<std::endl;
RefreshAction = norm2(etaEven)+norm2(etaOdd);
std::cout << " refresh " <<action_name()<< " action "<<RefreshAction<<std::endl;
@ -142,6 +162,10 @@ NAMESPACE_BEGIN(Grid);
//////////////////////////////////////////////////////
// S = phi^dag V (Mdag M)^-1 Vdag phi
//////////////////////////////////////////////////////
virtual RealD Sinitial(const GaugeField &U) {
std::cout << GridLogMessage << "Returning stored two flavour refresh action "<<RefreshAction<<std::endl;
return RefreshAction;
}
virtual RealD S(const GaugeField &U) {
NumOp.ImportGauge(U);

View File

@ -47,7 +47,7 @@ private:
const unsigned int N = Impl::Group::Dimension;
typedef typename Field::vector_object vobj;
typedef CartesianStencil<vobj, vobj,int> Stencil;
typedef CartesianStencil<vobj, vobj,DefaultImplParams> Stencil;
SimpleCompressor<vobj> compressor;
int npoint = 2 * Ndim;
@ -82,7 +82,7 @@ public:
virtual RealD S(const Field &p)
{
assert(p.Grid()->Nd() == Ndim);
static Stencil phiStencil(p.Grid(), npoint, 0, directions, displacements,0);
static Stencil phiStencil(p.Grid(), npoint, 0, directions, displacements);
phiStencil.HaloExchange(p, compressor);
Field action(p.Grid()), pshift(p.Grid()), phisquared(p.Grid());
phisquared = p * p;
@ -133,7 +133,7 @@ public:
double interm_t = usecond();
// move this outside
static Stencil phiStencil(p.Grid(), npoint, 0, directions, displacements,0);
static Stencil phiStencil(p.Grid(), npoint, 0, directions, displacements);
phiStencil.HaloExchange(p, compressor);
double halo_t = usecond();

View File

@ -143,6 +143,7 @@ private:
GridBase *Grid = U.Grid();
if(Params.PerformRandomShift){
#if 0
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Mainly for DDHMC perform a random translation of U modulo volume
//////////////////////////////////////////////////////////////////////////////////////////////////////
@ -167,11 +168,11 @@ private:
//shift all fields together in a way that respects the gauge BCs
for(int mu=0; mu < Grid->Nd(); mu++)
Umu[mu] = FieldImplementation::CshiftLink(Umu[mu],d,shift);
}
for(int mu=0;mu<Grid->Nd();mu++) PokeIndex<LorentzIndex>(U,Umu[mu],mu);
for(int mu=0;mu<Grid->Nd();mu++) PokeIndex<LorentzIndex>(U,Umu[mu],mu);
}
std::cout << GridLogMessage << "--------------------------------------------------\n";
#endif
}
TheIntegrator.reset_timer();

View File

@ -132,10 +132,17 @@ protected:
Field& Us = Smearer.get_U(as[level].actions.at(a)->is_smeared);
double start_force = usecond();
std::cout << GridLogMessage << "AuditForce["<<level<<"]["<<a<<"] before"<<std::endl;
AUDIT();
as[level].actions.at(a)->deriv_timer_start();
as[level].actions.at(a)->deriv(Us, force); // deriv should NOT include Ta
as[level].actions.at(a)->deriv_timer_stop();
std::cout << GridLogMessage << "AuditForce["<<level<<"]["<<a<<"] after"<<std::endl;
AUDIT();
std::cout << GridLogIntegrator << "Smearing (on/off): " << as[level].actions.at(a)->is_smeared << std::endl;
auto name = as[level].actions.at(a)->action_name();
if (as[level].actions.at(a)->is_smeared) Smearer.smeared_force(force);
@ -284,7 +291,7 @@ public:
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
std::cout << GridLogMessage
<< as[level].actions.at(actionID)->action_name()
<<"["<<level<<"]["<< actionID<<"] : "
<<"["<<level<<"]["<< actionID<<"] :\n\t\t "
<<" force max " << as[level].actions.at(actionID)->deriv_max_average()
<<" norm " << as[level].actions.at(actionID)->deriv_norm_average()
<<" Fdt max " << as[level].actions.at(actionID)->Fdt_max_average()
@ -364,9 +371,14 @@ public:
std::cout << GridLogMessage << "refresh [" << level << "][" << actionID << "] "<<name << std::endl;
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
std::cout << GridLogMessage << "AuditRefresh["<<level<<"]["<<actionID<<"] before"<<std::endl;
AUDIT();
as[level].actions.at(actionID)->refresh_timer_start();
as[level].actions.at(actionID)->refresh(Us, sRNG, pRNG);
as[level].actions.at(actionID)->refresh_timer_stop();
std::cout << GridLogMessage << "AuditRefresh["<<level<<"]["<<actionID<<"] after"<<std::endl;
AUDIT();
}
// Refresh the higher representation actions
@ -403,6 +415,7 @@ public:
// Actions
for (int level = 0; level < as.size(); ++level) {
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
AUDIT();
// get gauge field from the SmearingPolicy and
// based on the boolean is_smeared in actionID
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
@ -412,6 +425,7 @@ public:
as[level].actions.at(actionID)->S_timer_stop();
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
H += Hterm;
AUDIT();
}
as[level].apply(S_hireps, Representations, level, H);
}
@ -424,7 +438,9 @@ public:
void operator()(std::vector<Action<FieldType>*> repr_set, Repr& Rep, int level, RealD& H) {
for (int a = 0; a < repr_set.size(); ++a) {
AUDIT();
RealD Hterm = repr_set.at(a)->Sinitial(Rep.U);
AUDIT();
std::cout << GridLogMessage << "Sinitial Level " << level << " term " << a << " H Hirep = " << Hterm << std::endl;
H += Hterm;
@ -449,8 +465,10 @@ public:
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
as[level].actions.at(actionID)->S_timer_start();
AUDIT();
Hterm = as[level].actions.at(actionID)->Sinitial(Us);
as[level].actions.at(actionID)->S_timer_stop();
AUDIT();
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
H += Hterm;
}
@ -463,6 +481,7 @@ public:
void integrate(Field& U)
{
AUDIT();
// reset the clocks
t_U = 0;
for (int level = 0; level < as.size(); ++level) {
@ -480,8 +499,10 @@ public:
assert(fabs(t_U - t_P[level]) < 1.0e-6); // must be the same
std::cout << GridLogIntegrator << " times[" << level << "]= " << t_P[level] << " " << t_U << std::endl;
}
AUDIT();
FieldImplementation::Project(U);
AUDIT();
// and that we indeed got to the end of the trajectory
assert(fabs(t_U - Params.trajL) < 1.0e-6);

View File

@ -78,13 +78,13 @@ static Registrar<OneFlavourRatioEOFModule<FermionImplementationPolicy>,
// Now a specific registration with a fermion field
// here must instantiate CG and CR for every new fermion field type (macro!!)
static Registrar< ConjugateGradientModule<WilsonFermionR::FermionField>,
HMC_SolverModuleFactory<solver_string, WilsonFermionR::FermionField, Serialiser> > __CGWFmodXMLInit("ConjugateGradient");
static Registrar< ConjugateGradientModule<WilsonFermionD::FermionField>,
HMC_SolverModuleFactory<solver_string, WilsonFermionD::FermionField, Serialiser> > __CGWFmodXMLInit("ConjugateGradient");
static Registrar< BiCGSTABModule<WilsonFermionR::FermionField>,
HMC_SolverModuleFactory<solver_string, WilsonFermionR::FermionField, Serialiser> > __BiCGWFmodXMLInit("BiCGSTAB");
static Registrar< ConjugateResidualModule<WilsonFermionR::FermionField>,
HMC_SolverModuleFactory<solver_string, WilsonFermionR::FermionField, Serialiser> > __CRWFmodXMLInit("ConjugateResidual");
static Registrar< BiCGSTABModule<WilsonFermionD::FermionField>,
HMC_SolverModuleFactory<solver_string, WilsonFermionD::FermionField, Serialiser> > __BiCGWFmodXMLInit("BiCGSTAB");
static Registrar< ConjugateResidualModule<WilsonFermionD::FermionField>,
HMC_SolverModuleFactory<solver_string, WilsonFermionD::FermionField, Serialiser> > __CRWFmodXMLInit("ConjugateResidual");
// add the staggered, scalar versions here

View File

@ -73,7 +73,7 @@ public:
auto vp0= &mm.vpointers[0][0];
auto vp1= &mm.vpointers[1][0];
auto type= mm.type;
accelerator_forNB(o,mm.buffer_size/2,Merger::Nsimd(),{
accelerator_forNB(o,mm.buffer_size/2,Merger::Nsimd,{
decompress.Exchange(mp[2*o],mp[2*o+1],vp0[o],vp1[o],type);
});
}

View File

@ -52,6 +52,16 @@
NAMESPACE_BEGIN(Grid);
// These can move into a params header and be given MacroMagic serialisation
struct DefaultImplParams {
Coordinate dirichlet; // Blocksize of dirichlet BCs
int partialDirichlet;
DefaultImplParams() {
dirichlet.resize(0);
partialDirichlet=0;
};
};
///////////////////////////////////////////////////////////////////
// Gather for when there *is* need to SIMD split with compression
///////////////////////////////////////////////////////////////////
@ -193,7 +203,7 @@ class CartesianStencilAccelerator {
template<class vobj,class cobj,class Parameters>
class CartesianStencilView : public CartesianStencilAccelerator<vobj,cobj,Parameters>
{
private:
public:
int *closed;
StencilEntry *cpu_ptr;
ViewMode mode;
@ -666,6 +676,8 @@ public:
int block = dirichlet_block[dimension];
this->_comms_send[ii] = comm_dim;
this->_comms_recv[ii] = comm_dim;
this->_comms_partial_send[ii] = 0;
this->_comms_partial_recv[ii] = 0;
if ( block && comm_dim ) {
assert(abs(displacement) < ld );
// Quiesce communication across block boundaries
@ -698,7 +710,7 @@ public:
int checkerboard,
const std::vector<int> &directions,
const std::vector<int> &distances,
Parameters p)
Parameters p=Parameters())
{
face_table_computed=0;
_grid = grid;
@ -1121,6 +1133,7 @@ public:
send_buf = this->u_send_buf_p; // Gather locally, must send
assert(send_buf!=NULL);
// std::cout << " GatherPlaneSimple partial send "<< comms_partial_send<<std::endl;
compressor::Gather_plane_simple(face_table[face_idx],rhs,send_buf,compress,comm_off,so,comms_partial_send);
int duplicate = CheckForDuplicate(dimension,sx,comm_proc,(void *)&recv_buf[comm_off],0,xbytes,rbytes,cbmask);

View File

@ -143,7 +143,7 @@ NAMESPACE_BEGIN(Grid);
typedef ComplexD DoublePrecision2;
};
#ifdef GRID_CUDA
#if defined(GRID_CUDA) || defined(GRID_HIP)
template<> struct GridTypeMapper<std::complex<float> > : public GridTypeMapper_Base {
typedef std::complex<float> scalar_type;
typedef std::complex<double> scalar_typeD;

View File

@ -179,8 +179,11 @@ int main(int argc, char **argv) {
MD.name = std::string("Force Gradient");
//typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
// MD.name = std::string("MinimumNorm2");
// MD.MDsteps = 4;
MD.MDsteps = 4;
// TrajL = 2
// 4/2 => 0.6 dH
// 3/3 => 0.8 dH .. depth 3, slower
//MD.MDsteps = 4;
MD.MDsteps = 3;
MD.trajL = 0.5;
HMCparameters HMCparams;
@ -220,13 +223,15 @@ int main(int argc, char **argv) {
RealD c = 0.5;
Real beta = 2.13;
// Real light_mass = 5.4e-4;
Real light_mass = 7.8e-4;
Real light_mass = 7.8e-4;
Real light_mass_dir = 0.01;
Real strange_mass = 0.0362;
Real pv_mass = 1.0;
std::vector<Real> hasenbusch({ light_mass, 3.8e-3, 0.0145, 0.045, 0.108, 0.25, 0.51 , pv_mass });
std::vector<Real> hasenbusch({ 0.01, 0.045, 0.108, 0.25, 0.51 , pv_mass });
// std::vector<Real> hasenbusch({ light_mass, 0.01, 0.045, 0.108, 0.25, 0.51 , pv_mass });
// std::vector<Real> hasenbusch({ light_mass, 0.005, 0.0145, 0.045, 0.108, 0.25, 0.51 , pv_mass }); // Updated
// std::vector<Real> hasenbusch({ light_mass, 0.0145, 0.045, 0.108, 0.25, 0.51 , 0.75 , pv_mass });
OneFlavourRationalParams OFRp; // Up/down
OFRp.lo = 4.0e-5;
OFRp.hi = 90.0;
@ -235,18 +240,24 @@ int main(int argc, char **argv) {
OFRp.mdtolerance= 1.0e-3;
// OFRp.degree = 20; converges
// OFRp.degree = 16;
OFRp.degree = 12;
OFRp.degree = 18;
OFRp.precision= 80;
OFRp.BoundsCheckFreq=0;
std::vector<RealD> ActionTolByPole({
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8
});
std::vector<RealD> MDTolByPole({
1.0e-6,3.0e-7,1.0e-7,1.0e-7,
1.0e-5,5.0e-6,1.0e-6,1.0e-7, // soften convergence more more
// 1.0e-6,3.0e-7,1.0e-7,1.0e-7,
// 3.0e-6,1.0e-6,1.0e-7,1.0e-7, // soften convergence
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8
});
auto GridPtr = TheHMC.Resources.GetCartesian();
@ -327,6 +338,8 @@ int main(int argc, char **argv) {
ParamsF.dirichlet=NonDirichlet;
ParamsDir.dirichlet=Dirichlet;
ParamsDirF.dirichlet=Dirichlet;
ParamsDir.partialDirichlet=1;
ParamsDirF.partialDirichlet=1;
// double StoppingCondition = 1e-14;
// double MDStoppingCondition = 1e-9;
@ -342,8 +355,8 @@ int main(int argc, char **argv) {
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1);
ActionLevel<HMCWrapper::Field> Level2(2);
ActionLevel<HMCWrapper::Field> Level3(30);
ActionLevel<HMCWrapper::Field> Level2(3);
ActionLevel<HMCWrapper::Field> Level3(15);
////////////////////////////////////
// Strange action
@ -474,13 +487,21 @@ int main(int argc, char **argv) {
if ( dirichlet_den[h]==1) ParamsDen.dirichlet = Dirichlet;
else ParamsDen.dirichlet = NonDirichlet;
if ( dirichlet_num[h]==1) ParamsNum.partialDirichlet = 1;
else ParamsNum.partialDirichlet = 0;
if ( dirichlet_den[h]==1) ParamsDen.partialDirichlet = 1;
else ParamsDen.partialDirichlet = 0;
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, ParamsNum));
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, ParamsDen));
ParamsDenF.dirichlet = ParamsDen.dirichlet;
ParamsDenF.partialDirichlet = ParamsDen.partialDirichlet;
DenominatorsF.push_back(new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[h],M5,b,c, ParamsDenF));
ParamsNumF.dirichlet = ParamsNum.dirichlet;
ParamsNumF.partialDirichlet = ParamsNum.partialDirichlet;
NumeratorsF.push_back (new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_num[h],M5,b,c, ParamsNumF));
LinOpD.push_back(new LinearOperatorD(*Denominators[h]));
@ -516,21 +537,23 @@ int main(int argc, char **argv) {
FermionActionD2::ImplParams ParamsNumD2(boundary);
ParamsDenD2.dirichlet = ParamsDen.dirichlet;
ParamsDenD2.partialDirichlet = ParamsDen.partialDirichlet;
DenominatorsD2.push_back(new FermionActionD2(UD2,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[h],M5,b,c, ParamsDenD2));
ParamsNumD2.dirichlet = ParamsNum.dirichlet;
ParamsNumD2.partialDirichlet = ParamsNum.partialDirichlet;
NumeratorsD2.push_back (new FermionActionD2(UD2,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_num[h],M5,b,c, ParamsNumD2));
Bdys.push_back( new OneFlavourEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicy,FermionImplPolicyF,FermionImplPolicyD2>(
*Numerators[h],*Denominators[h],
*NumeratorsF[h],*DenominatorsF[h],
*NumeratorsD2[h],*DenominatorsD2[h],
OFRp, 200) );
OFRp, 400) );
Bdys.push_back( new OneFlavourEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicy,FermionImplPolicyF,FermionImplPolicyD2>(
*Numerators[h],*Denominators[h],
*NumeratorsF[h],*DenominatorsF[h],
*NumeratorsD2[h],*DenominatorsD2[h],
OFRp, 200) );
OFRp, 400) );
#else
Bdys.push_back( new OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],OFRp));
Bdys.push_back( new OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],OFRp));
@ -543,7 +566,8 @@ int main(int argc, char **argv) {
int nquo=Quotients.size();
Level1.push_back(Bdys[0]);
Level1.push_back(Bdys[1]);
for(int h=0;h<nquo-1;h++){
Level2.push_back(Quotients[0]);
for(int h=1;h<nquo-1;h++){
Level2.push_back(Quotients[h]);
}
Level2.push_back(Quotients[nquo-1]);

View File

@ -0,0 +1,492 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_hmc_EODWFRatio.cc
Copyright (C) 2015-2016
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
Author: Guido Cossu <guido.cossu@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
NAMESPACE_BEGIN(Grid);
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
class MixedPrecisionConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
public:
typedef typename FermionOperatorD::FermionField FieldD;
typedef typename FermionOperatorF::FermionField FieldF;
using OperatorFunction<FieldD>::operator();
RealD Tolerance;
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;
Integer MaxOuterIterations;
GridBase* SinglePrecGrid4; //Grid for single-precision fields
GridBase* SinglePrecGrid5; //Grid for single-precision fields
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
FermionOperatorF &FermOpF;
FermionOperatorD &FermOpD;;
SchurOperatorF &LinOpF;
SchurOperatorD &LinOpD;
Integer TotalInnerIterations; //Number of inner CG iterations
Integer TotalOuterIterations; //Number of restarts
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
MixedPrecisionConjugateGradientOperatorFunction(RealD tol,
Integer maxinnerit,
Integer maxouterit,
GridBase* _sp_grid4,
GridBase* _sp_grid5,
FermionOperatorF &_FermOpF,
FermionOperatorD &_FermOpD,
SchurOperatorF &_LinOpF,
SchurOperatorD &_LinOpD):
LinOpF(_LinOpF),
LinOpD(_LinOpD),
FermOpF(_FermOpF),
FermOpD(_FermOpD),
Tolerance(tol),
InnerTolerance(tol),
MaxInnerIterations(maxinnerit),
MaxOuterIterations(maxouterit),
SinglePrecGrid4(_sp_grid4),
SinglePrecGrid5(_sp_grid5),
OuterLoopNormMult(100.)
{
/* Debugging instances of objects; references are stored
std::cout << GridLogMessage << " Mixed precision CG wrapper LinOpF " <<std::hex<< &LinOpF<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper LinOpD " <<std::hex<< &LinOpD<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper FermOpF " <<std::hex<< &FermOpF<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper FermOpD " <<std::hex<< &FermOpD<<std::dec <<std::endl;
*/
};
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
std::cout << GridLogMessage << " Mixed precision CG wrapper operator() "<<std::endl;
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
// std::cout << GridLogMessage << " Mixed precision CG wrapper operator() FermOpU " <<std::hex<< &(SchurOpU->_Mat)<<std::dec <<std::endl;
// std::cout << GridLogMessage << " Mixed precision CG wrapper operator() FermOpD " <<std::hex<< &(LinOpD._Mat) <<std::dec <<std::endl;
// Assumption made in code to extract gauge field
// We could avoid storing LinopD reference alltogether ?
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
////////////////////////////////////////////////////////////////////////////////////
// Must snarf a single precision copy of the gauge field in Linop_d argument
////////////////////////////////////////////////////////////////////////////////////
typedef typename FermionOperatorF::GaugeField GaugeFieldF;
typedef typename FermionOperatorF::GaugeLinkField GaugeLinkFieldF;
typedef typename FermionOperatorD::GaugeField GaugeFieldD;
typedef typename FermionOperatorD::GaugeLinkField GaugeLinkFieldD;
GridBase * GridPtrF = SinglePrecGrid4;
GridBase * GridPtrD = FermOpD.Umu.Grid();
GaugeFieldF U_f (GridPtrF);
GaugeLinkFieldF Umu_f(GridPtrF);
// std::cout << " Dim gauge field "<<GridPtrF->Nd()<<std::endl; // 4d
// std::cout << " Dim gauge field "<<GridPtrD->Nd()<<std::endl; // 4d
////////////////////////////////////////////////////////////////////////////////////
// Moving this to a Clone method of fermion operator would allow to duplicate the
// physics parameters and decrease gauge field copies
////////////////////////////////////////////////////////////////////////////////////
GaugeLinkFieldD Umu_d(GridPtrD);
for(int mu=0;mu<Nd*2;mu++){
Umu_d = PeekIndex<LorentzIndex>(FermOpD.Umu, mu);
precisionChange(Umu_f,Umu_d);
PokeIndex<LorentzIndex>(FermOpF.Umu, Umu_f, mu);
}
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
////////////////////////////////////////////////////////////////////////////////////
// Make a mixed precision conjugate gradient
////////////////////////////////////////////////////////////////////////////////////
#if 1
RealD delta=1.e-4;
std::cout << GridLogMessage << "Calling reliable update Conjugate Gradient" <<std::endl;
ConjugateGradientReliableUpdate<FieldD,FieldF> MPCG(Tolerance,MaxInnerIterations*MaxOuterIterations,delta,SinglePrecGrid5,LinOpF,LinOpD);
#else
std::cout << GridLogMessage << "Calling mixed precision Conjugate Gradient" <<std::endl;
MixedPrecisionConjugateGradient<FieldD,FieldF> MPCG(Tolerance,MaxInnerIterations,MaxOuterIterations,SinglePrecGrid5,LinOpF,LinOpD);
#endif
MPCG(src,psi);
}
};
NAMESPACE_END(Grid);
int main(int argc, char **argv) {
using namespace Grid;
Grid_init(&argc, &argv);
CartesianCommunicator::BarrierWorld();
std::cout << GridLogMessage << " Clock skew check" <<std::endl;
int threads = GridThread::GetThreads();
// Typedefs to simplify notation
typedef WilsonImplD FermionImplPolicy;
typedef MobiusFermionD FermionAction;
typedef MobiusEOFAFermionD FermionEOFAAction;
typedef typename FermionAction::FermionField FermionField;
typedef WilsonImplF FermionImplPolicyF;
typedef MobiusFermionF FermionActionF;
typedef MobiusEOFAFermionF FermionEOFAActionF;
typedef typename FermionActionF::FermionField FermionFieldF;
typedef WilsonImplD2 FermionImplPolicyD2;
typedef MobiusFermionD2 FermionActionD2;
typedef MobiusEOFAFermionD2 FermionEOFAActionD2;
typedef typename FermionActionD2::FermionField FermionFieldD2;
typedef Grid::XmlReader Serialiser;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
// MD.name = std::string("Leap Frog");
typedef GenericHMCRunner<ForceGradient> HMCWrapper;
MD.name = std::string("Force Gradient");
//typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
// MD.name = std::string("MinimumNorm2");
// TrajL = 2
// 4/2 => 0.6 dH
// 3/3 => 0.8 dH .. depth 3, slower
//MD.MDsteps = 4;
MD.MDsteps = 3;
MD.trajL = 0.5;
HMCparameters HMCparams;
HMCparams.StartTrajectory = 1077;
HMCparams.Trajectories = 1;
HMCparams.NoMetropolisUntil= 0;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
// HMCparams.StartingType =std::string("ColdStart");
HMCparams.StartingType =std::string("CheckpointStart");
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_DDHMC_lat";
CPparams.rng_prefix = "ckpoint_DDHMC_rng";
CPparams.saveInterval = 1;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
std::cout << "loaded NERSC checpointer"<<std::endl;
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
// Construct observables
// here there is too much indirection
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 12;
RealD M5 = 1.8;
RealD b = 1.5;
RealD c = 0.5;
Real beta = 2.13;
// Real light_mass = 5.4e-4;
Real light_mass = 7.8e-4;
Real strange_mass = 0.0362;
Real pv_mass = 1.0;
// std::vector<Real> hasenbusch({ 0.01, 0.045, 0.108, 0.25, 0.51 , pv_mass });
// std::vector<Real> hasenbusch({ light_mass, 0.01, 0.045, 0.108, 0.25, 0.51 , pv_mass });
std::vector<Real> hasenbusch({ light_mass, 0.005, 0.0145, 0.045, 0.108, 0.25, 0.51 , pv_mass }); // Updated
// std::vector<Real> hasenbusch({ light_mass, 0.0145, 0.045, 0.108, 0.25, 0.51 , 0.75 , pv_mass });
auto GridPtr = TheHMC.Resources.GetCartesian();
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
typedef SchurDiagMooeeOperator<FermionActionF,FermionFieldF> LinearOperatorF;
typedef SchurDiagMooeeOperator<FermionAction ,FermionField > LinearOperatorD;
typedef SchurDiagMooeeOperator<FermionEOFAActionF,FermionFieldF> LinearOperatorEOFAF;
typedef SchurDiagMooeeOperator<FermionEOFAAction ,FermionField > LinearOperatorEOFAD;
typedef MixedPrecisionConjugateGradientOperatorFunction<MobiusFermionD,MobiusFermionF,LinearOperatorD,LinearOperatorF> MxPCG;
typedef MixedPrecisionConjugateGradientOperatorFunction<MobiusEOFAFermionD,MobiusEOFAFermionF,LinearOperatorEOFAD,LinearOperatorEOFAF> MxPCG_EOFA;
////////////////////////////////////////////////////////////////
// Domain decomposed
////////////////////////////////////////////////////////////////
Coordinate latt4 = GridPtr->GlobalDimensions();
Coordinate mpi = GridPtr->ProcessorGrid();
Coordinate shm;
GlobalSharedMemory::GetShmDims(mpi,shm);
Coordinate CommDim(Nd);
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
Coordinate NonDirichlet(Nd+1,0);
//////////////////////////
// Fermion Grids
//////////////////////////
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
Coordinate simdF = GridDefaultSimd(Nd,vComplexF::Nsimd());
auto GridPtrF = SpaceTimeGrid::makeFourDimGrid(latt4,simdF,mpi);
auto GridRBPtrF = SpaceTimeGrid::makeFourDimRedBlackGrid(GridPtrF);
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrF);
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrF);
IwasakiGaugeActionR GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeFieldD U(GridPtr); U=Zero();
LatticeGaugeFieldF UF(GridPtrF); UF=Zero();
LatticeGaugeFieldD2 UD2(GridPtrF); UD2=Zero();
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.ReadCommandLine(argc,argv); // params on CML or from param file
TheHMC.initializeGaugeFieldAndRNGs(U);
std::cout << "loaded NERSC gauge field"<<std::endl;
// These lines are unecessary if BC are all periodic
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
FermionActionF::ImplParams ParamsF(boundary);
Params.dirichlet=NonDirichlet;
ParamsF.dirichlet=NonDirichlet;
// double StoppingCondition = 1e-14;
// double MDStoppingCondition = 1e-9;
double StoppingCondition = 1e-8;
double MDStoppingCondition = 1e-7;
double MDStoppingConditionLoose = 1e-7;
double MDStoppingConditionStrange = 1e-7;
double MaxCGIterations = 300000;
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
ConjugateGradient<FermionField> MDCG(MDStoppingCondition,MaxCGIterations);
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1);
ActionLevel<HMCWrapper::Field> Level2(3);
ActionLevel<HMCWrapper::Field> Level3(15);
////////////////////////////////////
// Strange action
////////////////////////////////////
FermionAction StrangeOp (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,strange_mass,M5,b,c, Params);
FermionAction StrangePauliVillarsOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,pv_mass, M5,b,c, Params);
// Probably dominates the force - back to EOFA.
OneFlavourRationalParams SFRp;
SFRp.lo = 0.25;
SFRp.hi = 25.0;
SFRp.MaxIter = 10000;
SFRp.tolerance= 1.0e-5;
SFRp.mdtolerance= 2.0e-4;
SFRp.degree = 8;
SFRp.precision= 50;
MobiusEOFAFermionD Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
MobiusEOFAFermionF Strange_Op_LF(UF, *FGridF, *FrbGridF, *GridPtrF, *GridRBPtrF, strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
MobiusEOFAFermionD Strange_Op_R (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , pv_mass, strange_mass, pv_mass, -1.0, 1, M5, b, c);
MobiusEOFAFermionF Strange_Op_RF(UF, *FGridF, *FrbGridF, *GridPtrF, *GridRBPtrF, pv_mass, strange_mass, pv_mass, -1.0, 1, M5, b, c);
ConjugateGradient<FermionField> ActionCG(StoppingCondition,MaxCGIterations);
ConjugateGradient<FermionField> DerivativeCG(MDStoppingCondition,MaxCGIterations);
LinearOperatorEOFAD Strange_LinOp_L (Strange_Op_L);
LinearOperatorEOFAD Strange_LinOp_R (Strange_Op_R);
LinearOperatorEOFAF Strange_LinOp_LF(Strange_Op_LF);
LinearOperatorEOFAF Strange_LinOp_RF(Strange_Op_RF);
const int MX_inner = 1000;
MxPCG_EOFA ActionCGL(StoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_LF,Strange_Op_L,
Strange_LinOp_LF,Strange_LinOp_L);
MxPCG_EOFA DerivativeCGL(MDStoppingConditionStrange,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_LF,Strange_Op_L,
Strange_LinOp_LF,Strange_LinOp_L);
MxPCG_EOFA ActionCGR(StoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_RF,Strange_Op_R,
Strange_LinOp_RF,Strange_LinOp_R);
MxPCG_EOFA DerivativeCGR(MDStoppingConditionStrange,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_RF,Strange_Op_R,
Strange_LinOp_RF,Strange_LinOp_R);
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy>
EOFA(Strange_Op_L, Strange_Op_R,
ActionCG,
ActionCGL, ActionCGR,
DerivativeCGL, DerivativeCGR,
SFRp, true);
// Level2.push_back(&EOFA);
////////////////////////////////////
// up down action
////////////////////////////////////
std::vector<Real> light_den;
std::vector<Real> light_num;
std::vector<int> dirichlet_den;
std::vector<int> dirichlet_num;
int n_hasenbusch = hasenbusch.size();
light_den.push_back(light_mass); dirichlet_den.push_back(0);
for(int h=0;h<n_hasenbusch;h++){
light_den.push_back(hasenbusch[h]); dirichlet_den.push_back(0);
}
for(int h=0;h<n_hasenbusch;h++){
light_num.push_back(hasenbusch[h]); dirichlet_num.push_back(0);
}
light_num.push_back(pv_mass); dirichlet_num.push_back(0);
std::vector<FermionAction *> Numerators;
std::vector<FermionAction *> Denominators;
std::vector<FermionActionF *> NumeratorsF;
std::vector<FermionActionF *> DenominatorsF;
std::vector<FermionActionD2 *> NumeratorsD2;
std::vector<FermionActionD2 *> DenominatorsD2;
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
std::vector<MxPCG *> ActionMPCG;
std::vector<MxPCG *> MPCG;
#define MIXED_PRECISION
#ifdef MIXED_PRECISION
std::vector<OneFlavourEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicy,FermionImplPolicyF,FermionImplPolicyD2> *> Bdys;
#else
std::vector<OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> *> Bdys;
#endif
typedef SchurDiagMooeeOperator<FermionActionF,FermionFieldF> LinearOperatorF;
typedef SchurDiagMooeeOperator<FermionAction ,FermionField > LinearOperatorD;
std::vector<LinearOperatorD *> LinOpD;
std::vector<LinearOperatorF *> LinOpF;
for(int h=0;h<n_hasenbusch+1;h++){
std::cout << GridLogMessage
<< " 2f quotient Action ";
std::cout << "det D("<<light_den[h]<<")";
if ( dirichlet_den[h] ) std::cout << "^dirichlet ";
std::cout << "/ det D("<<light_num[h]<<")";
if ( dirichlet_num[h] ) std::cout << "^dirichlet ";
std::cout << std::endl;
FermionAction::ImplParams ParamsNum(boundary);
FermionAction::ImplParams ParamsDen(boundary);
FermionActionF::ImplParams ParamsDenF(boundary);
FermionActionF::ImplParams ParamsNumF(boundary);
ParamsNum.dirichlet = NonDirichlet;
ParamsDen.dirichlet = NonDirichlet;
ParamsNum.partialDirichlet = 0;
ParamsDen.partialDirichlet = 0;
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, ParamsNum));
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, ParamsDen));
ParamsDenF.dirichlet = ParamsDen.dirichlet;
ParamsDenF.partialDirichlet = ParamsDen.partialDirichlet;
DenominatorsF.push_back(new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[h],M5,b,c, ParamsDenF));
ParamsNumF.dirichlet = ParamsNum.dirichlet;
ParamsNumF.partialDirichlet = ParamsNum.partialDirichlet;
NumeratorsF.push_back (new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_num[h],M5,b,c, ParamsNumF));
LinOpD.push_back(new LinearOperatorD(*Denominators[h]));
LinOpF.push_back(new LinearOperatorF(*DenominatorsF[h]));
double conv = MDStoppingCondition;
if (h<3) conv= MDStoppingConditionLoose; // Relax on first two hasenbusch factors
const int MX_inner = 5000;
MPCG.push_back(new MxPCG(conv,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
*DenominatorsF[h],*Denominators[h],
*LinOpF[h], *LinOpD[h]) );
ActionMPCG.push_back(new MxPCG(StoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
*DenominatorsF[h],*Denominators[h],
*LinOpF[h], *LinOpD[h]) );
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],*MPCG[h],*ActionMPCG[h],CG));
}
int nquo=Quotients.size();
for(int h=0;h<nquo;h++){
Level2.push_back(Quotients[h]);
}
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
Level3.push_back(&GaugeAction);
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
TheHMC.TheAction.push_back(Level3);
std::cout << GridLogMessage << " Action complete "<< std::endl;
/////////////////////////////////////////////////////////////
TheHMC.Run(); // no smearing
Grid_finalize();
} // main

3
TODO
View File

@ -1,3 +1,6 @@
Lattice_basis.h -- > HIP and SYCL GPU code
======
DDHMC
======

View File

@ -88,6 +88,7 @@ int main (int argc, char ** argv)
// Node level
//////////////////////
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
// for(int d=0;d<Nd;d++) CommDim[d]= 1;
Dirichlet[0] = 0;
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0] * shm[0];
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1] * shm[1];
@ -222,7 +223,7 @@ void Benchmark(int Ls, Coordinate Dirichlet, int partial)
{
ref = Zero();
for(int mu=0;mu<Nd;mu++){
int depth=dwf_compressor_depth;
tmp = Cshift(src,mu+1,1);
{
autoView( tmp_v , tmp , CpuWrite);
@ -230,7 +231,7 @@ void Benchmark(int Ls, Coordinate Dirichlet, int partial)
autoView( Ucopy_v, Ucopy[mu] , CpuRead);
for(int ss=0;ss<U[mu].Grid()->oSites();ss++){
for(int s=0;s<Ls;s++){
if ( (s==0) || (s==Ls-1)){
if ( (s<depth) || (s>=Ls-depth)){
tmp_v[Ls*ss+s] = Ucopy_v[ss]*tmp_v[Ls*ss+s];
} else {
tmp_v[Ls*ss+s] = U_v[ss]*tmp_v[Ls*ss+s];
@ -246,7 +247,7 @@ void Benchmark(int Ls, Coordinate Dirichlet, int partial)
autoView( src_v, src , CpuRead);
for(int ss=0;ss<U[mu].Grid()->oSites();ss++){
for(int s=0;s<Ls;s++){
if ( (s==0) || (s==Ls-1) ){
if ( (s<depth) || (s>=Ls-depth)){
tmp_v[Ls*ss+s] = adj(Ucopy_v[ss])*src_v[Ls*ss+s];
} else {
tmp_v[Ls*ss+s] = adj(U_v[ss])*src_v[Ls*ss+s];
@ -342,6 +343,7 @@ void Benchmark(int Ls, Coordinate Dirichlet, int partial)
ref = Zero();
for(int mu=0;mu<Nd;mu++){
int depth=dwf_compressor_depth;
tmp = Cshift(src,mu+1,1);
{
autoView( tmp_v , tmp , CpuWrite);
@ -349,7 +351,7 @@ void Benchmark(int Ls, Coordinate Dirichlet, int partial)
autoView( Ucopy_v, Ucopy[mu] , CpuRead);
for(int ss=0;ss<U[mu].Grid()->oSites();ss++){
for(int s=0;s<Ls;s++){
if ( (s==0) || (s==Ls-1)){
if ( (s<depth) || (s>=Ls-depth)){
tmp_v[Ls*ss+s] = Ucopy_v[ss]*tmp_v[Ls*ss+s];
} else {
tmp_v[Ls*ss+s] = U_v[ss]*tmp_v[Ls*ss+s];
@ -365,7 +367,7 @@ void Benchmark(int Ls, Coordinate Dirichlet, int partial)
autoView( src_v, src , CpuRead);
for(int ss=0;ss<U[mu].Grid()->oSites();ss++){
for(int s=0;s<Ls;s++){
if ( (s==0) || (s==Ls-1) ){
if ( (s<depth) || (s>=Ls-depth)){
tmp_v[Ls*ss+s] = adj(Ucopy_v[ss])*src_v[Ls*ss+s];
} else {
tmp_v[Ls*ss+s] = adj(U_v[ss])*src_v[Ls*ss+s];

View File

@ -1,14 +1,16 @@
CLIME=`spack find --paths c-lime@2-3-9 | grep c-lime| cut -c 15-`
../../configure --enable-comms=mpi-auto \
--with-lime=$CLIME \
--enable-unified=no \
--enable-shm=nvlink \
--enable-tracing=timer \
--enable-accelerator=hip \
--enable-gen-simd-width=64 \
--enable-simd=GPU \
--disable-fermion-reps \
--with-gmp=$OLCF_GMP_ROOT \
--with-fftw=$FFTW_DIR/.. \
--with-mpfr=/opt/cray/pe/gcc/mpfr/3.1.4/ \
--enable-gparity \
--disable-fermion-reps \
CXX=hipcc MPICXX=mpicxx \
CXXFLAGS="-fPIC -I{$ROCM_PATH}/include/ -std=c++14 -I${MPICH_DIR}/include -L/lib64 " \
LDFLAGS="-L/lib64 -L/opt/rocm-5.2.0/lib/ -L${MPICH_DIR}/lib -lmpi -L${CRAY_MPICH_ROOTDIR}/gtl/lib -lmpi_gtl_hsa -lamdhip64 "

View File

@ -1,3 +1,5 @@
. /autofs/nccs-svm1_home1/paboyle/Crusher/Grid/spack/share/spack/setup-env.sh
spack load c-lime
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/sw/crusher/spack-envs/base/opt/cray-sles15-zen3/gcc-11.2.0/gperftools-2.9.1-72ubwtuc5wcz2meqltbfdb76epufgzo2/lib
module load emacs
#module load gperftools

View File

@ -75,8 +75,8 @@ int main (int argc, char ** argv)
RealD M5=1.8;
{
OverlapWilsonContFracTanhFermionR Dcf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
HermitianLinearOperator<OverlapWilsonContFracTanhFermionR,LatticeFermion> HermIndefOp(Dcf);
OverlapWilsonContFracTanhFermionD Dcf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
HermitianLinearOperator<OverlapWilsonContFracTanhFermionD,LatticeFermion> HermIndefOp(Dcf);
HermIndefOp.Op(src,ref);
HermIndefOp.OpDiag(src,result);
@ -92,8 +92,8 @@ int main (int argc, char ** argv)
}
{
OverlapWilsonPartialFractionTanhFermionR Dpf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
HermitianLinearOperator<OverlapWilsonPartialFractionTanhFermionR,LatticeFermion> HermIndefOp(Dpf);
OverlapWilsonPartialFractionTanhFermionD Dpf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
HermitianLinearOperator<OverlapWilsonPartialFractionTanhFermionD,LatticeFermion> HermIndefOp(Dpf);
HermIndefOp.Op(src,ref);
HermIndefOp.OpDiag(src,result);

View File

@ -140,14 +140,14 @@ int main (int argc, char ** argv)
// RealD mass=0.1;
// RealD M5=1.8;
// DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
// DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
// LatticeFermion src_o(FrbGrid);
// LatticeFermion result_o(FrbGrid);
// pickCheckerboard(Odd,src_o,src);
// result_o=Zero();
// SchurDiagMooeeOperator<DomainWallFermionR,LatticeFermion> HermOpEO(Ddwf);
// SchurDiagMooeeOperator<DomainWallFermionD,LatticeFermion> HermOpEO(Ddwf);
// ConjugateGradient<LatticeFermion> CG(1.0e-8,10000);
// CG(HermOpEO,src_o,result_o);

View File

@ -76,20 +76,20 @@ int main (int argc, char ** argv)
RealD M5 =1.8;
std::cout<<GridLogMessage <<"OverlapWilsonContFracTanhFermion test"<<std::endl;
OverlapWilsonContFracTanhFermionR Dcf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonContFracTanhFermionR>(Dcf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonContFracTanhFermionD Dcf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonContFracTanhFermionD>(Dcf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"OverlapWilsonContFracZolotarevFermion test"<<std::endl;
OverlapWilsonContFracZolotarevFermionR Dcfz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,6.0);
TestWhat<OverlapWilsonContFracZolotarevFermionR>(Dcfz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonContFracZolotarevFermionD Dcfz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,6.0);
TestWhat<OverlapWilsonContFracZolotarevFermionD>(Dcfz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"OverlapWilsonPartialFractionTanhFermion test"<<std::endl;
OverlapWilsonPartialFractionTanhFermionR Dpf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonPartialFractionTanhFermionR>(Dpf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonPartialFractionTanhFermionD Dpf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonPartialFractionTanhFermionD>(Dpf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"OverlapWilsonPartialFractionZolotarevFermion test"<<std::endl;
OverlapWilsonPartialFractionZolotarevFermionR Dpfz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,6.0);
TestWhat<OverlapWilsonPartialFractionZolotarevFermionR>(Dpfz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonPartialFractionZolotarevFermionD Dpfz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,6.0);
TestWhat<OverlapWilsonPartialFractionZolotarevFermionD>(Dpfz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
Grid_finalize();
}

View File

@ -90,7 +90,7 @@ int main (int argc, char ** argv)
RealD shift = 0.1234;
RealD M5 = 1.8;
int pm = 1;
DomainWallEOFAFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mq1, mq2, mq3, shift, pm, M5);
DomainWallEOFAFermionD Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mq1, mq2, mq3, shift, pm, M5);
LatticeFermion src_e (FrbGrid);
LatticeFermion src_o (FrbGrid);
@ -216,7 +216,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd , phi_o, phi);
SchurDiagMooeeOperator<DomainWallEOFAFermionR,LatticeFermion> HermOpEO(Ddwf);
SchurDiagMooeeOperator<DomainWallEOFAFermionD,LatticeFermion> HermOpEO(Ddwf);
HermOpEO.MpcDagMpc(chi_e, dchi_e);
HermOpEO.MpcDagMpc(chi_o, dchi_o);

View File

@ -86,7 +86,7 @@ int main (int argc, char ** argv)
RealD mass=0.1;
RealD M5 =1.8;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
LatticeFermion src_e (FrbGrid);
LatticeFermion src_o (FrbGrid);
@ -213,7 +213,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<DomainWallFermionR,LatticeFermion> HermOpEO(Ddwf);
SchurDiagMooeeOperator<DomainWallFermionD,LatticeFermion> HermOpEO(Ddwf);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -52,7 +52,7 @@ int main (int argc, char ** argv)
// pRNG.SeedFixedIntegers(seeds);
pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
typedef typename GparityWilsonFermionR::FermionField FermionField;
typedef typename GparityWilsonFermionD::FermionField FermionField;
FermionField src (&Grid); random(pRNG,src);
FermionField phi (&Grid); random(pRNG,phi);
@ -80,10 +80,10 @@ int main (int argc, char ** argv)
RealD mass=0.1;
GparityWilsonFermionR::ImplParams params;
GparityWilsonFermionD::ImplParams params;
std::vector<int> twists(Nd,0); twists[1] = 1;
params.twists = twists;
GparityWilsonFermionR Dw(Umu,Grid,RBGrid,mass,params);
GparityWilsonFermionD Dw(Umu,Grid,RBGrid,mass,params);
FermionField src_e (&RBGrid);
FermionField src_o (&RBGrid);
@ -199,7 +199,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<GparityWilsonFermionR,FermionField> HermOpEO(Dw);
SchurDiagMooeeOperator<GparityWilsonFermionD,FermionField> HermOpEO(Dw);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -92,7 +92,7 @@ int main (int argc, char ** argv)
RealD shift = 0.1234;
RealD M5 = 1.8;
int pm = 1;
MobiusEOFAFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mq1, mq2, mq3, shift, pm, M5, b, c);
MobiusEOFAFermionD Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mq1, mq2, mq3, shift, pm, M5, b, c);
LatticeFermion src_e (FrbGrid);
LatticeFermion src_o (FrbGrid);
@ -218,7 +218,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd , phi_o, phi);
SchurDiagMooeeOperator<MobiusEOFAFermionR,LatticeFermion> HermOpEO(Ddwf);
SchurDiagMooeeOperator<MobiusEOFAFermionD,LatticeFermion> HermOpEO(Ddwf);
HermOpEO.MpcDagMpc(chi_e, dchi_e);
HermOpEO.MpcDagMpc(chi_o, dchi_o);

View File

@ -108,8 +108,8 @@ int main (int argc, char ** argv)
omegas.push_back( std::complex<double>(0.0686324988446592,-0.0550658530827402) );
#endif
MobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, 0.5,0.5);
// DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
MobiusFermionD Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, 0.5,0.5);
// DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
LatticeFermion src_e (FrbGrid);
LatticeFermion src_o (FrbGrid);
@ -264,7 +264,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<MobiusFermionR,LatticeFermion> HermOpEO(Ddwf);
SchurDiagMooeeOperator<MobiusFermionD,LatticeFermion> HermOpEO(Ddwf);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -53,9 +53,9 @@ int main (int argc, char ** argv)
pRNG.SeedFixedIntegers(seeds);
// pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
typedef typename ImprovedStaggeredFermionR::FermionField FermionField;
typedef typename ImprovedStaggeredFermionR::ComplexField ComplexField;
typename ImprovedStaggeredFermionR::ImplParams params;
typedef typename ImprovedStaggeredFermionD::FermionField FermionField;
typedef typename ImprovedStaggeredFermionD::ComplexField ComplexField;
typename ImprovedStaggeredFermionD::ImplParams params;
FermionField src (&Grid); random(pRNG,src);
FermionField result(&Grid); result=Zero();
@ -130,7 +130,7 @@ int main (int argc, char ** argv)
// ref = ref + mass * src;
}
ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0,params);
ImprovedStaggeredFermionD Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0,params);
std::cout<<GridLogMessage<<"=========================================================="<<std::endl;
@ -269,7 +269,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<ImprovedStaggeredFermionR,FermionField> HermOpEO(Ds);
SchurDiagMooeeOperator<ImprovedStaggeredFermionD,FermionField> HermOpEO(Ds);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -60,9 +60,9 @@ int main (int argc, char ** argv)
pRNG4.SeedFixedIntegers(seeds);
pRNG5.SeedFixedIntegers(seeds);
typedef typename ImprovedStaggeredFermion5DR::FermionField FermionField;
typedef typename ImprovedStaggeredFermion5DR::ComplexField ComplexField;
typename ImprovedStaggeredFermion5DR::ImplParams params;
typedef typename ImprovedStaggeredFermion5DD::FermionField FermionField;
typedef typename ImprovedStaggeredFermion5DD::ComplexField ComplexField;
typename ImprovedStaggeredFermion5DD::ImplParams params;
FermionField src (FGrid);
@ -148,7 +148,7 @@ int main (int argc, char ** argv)
}
}
ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,c1,c2,u0,params);
ImprovedStaggeredFermion5DD Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,c1,c2,u0,params);
std::cout<<GridLogMessage<<"=========================================================="<<std::endl;
std::cout<<GridLogMessage<<"= Testing Dhop against cshift implementation "<<std::endl;
@ -288,7 +288,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<ImprovedStaggeredFermion5DR,FermionField> HermOpEO(Ds);
SchurDiagMooeeOperator<ImprovedStaggeredFermion5DD,FermionField> HermOpEO(Ds);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -52,9 +52,9 @@ int main (int argc, char ** argv)
pRNG.SeedFixedIntegers(seeds);
// pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
typedef typename NaiveStaggeredFermionR::FermionField FermionField;
typedef typename NaiveStaggeredFermionR::ComplexField ComplexField;
typename NaiveStaggeredFermionR::ImplParams params;
typedef typename NaiveStaggeredFermionD::FermionField FermionField;
typedef typename NaiveStaggeredFermionD::ComplexField ComplexField;
typename NaiveStaggeredFermionD::ImplParams params;
FermionField src (&Grid); random(pRNG,src);
FermionField result(&Grid); result=Zero();
@ -120,7 +120,7 @@ int main (int argc, char ** argv)
// ref = ref + mass * src;
}
NaiveStaggeredFermionR Ds(Umu,Grid,RBGrid,mass,c1,u0,params);
NaiveStaggeredFermionD Ds(Umu,Grid,RBGrid,mass,c1,u0,params);
std::cout<<GridLogMessage<<"=========================================================="<<std::endl;
@ -258,7 +258,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<NaiveStaggeredFermionR,FermionField> HermOpEO(Ds);
SchurDiagMooeeOperator<NaiveStaggeredFermionD,FermionField> HermOpEO(Ds);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -52,8 +52,8 @@ int main(int argc, char **argv)
pRNG.SeedFixedIntegers(seeds);
// pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
typedef typename WilsonCloverFermionR::FermionField FermionField;
typename WilsonCloverFermionR::ImplParams params;
typedef typename WilsonCloverFermionD::FermionField FermionField;
typename WilsonCloverFermionD::ImplParams params;
WilsonAnisotropyCoefficients anis;
FermionField src(&Grid);
@ -88,8 +88,8 @@ int main(int argc, char **argv)
RealD csw_r = 1.0;
RealD csw_t = 1.0;
WilsonCloverFermionR Dwc(Umu, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonCloverFermionR Dwc_compact(Umu, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
WilsonCloverFermionD Dwc(Umu, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonCloverFermionD Dwc_compact(Umu, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing that Deo + Doe = Dunprec " << std::endl;
@ -324,8 +324,8 @@ int main(int argc, char **argv)
}
/////////////////
WilsonCloverFermionR Dwc_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonCloverFermionR Dwc_compact_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
WilsonCloverFermionD Dwc_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonCloverFermionD Dwc_compact_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
tmp = Omega * src;
pickCheckerboard(Even, src_e, tmp);
@ -377,14 +377,14 @@ int main(int argc, char **argv)
chi = Zero();
phi = Zero();
WilsonFermionR Dw(Umu, Grid, RBGrid, mass, params);
WilsonFermionD Dw(Umu, Grid, RBGrid, mass, params);
Dw.M(src, result);
Dwc.M(src, chi);
Dwc_prime.M(Omega * src, phi);
WilsonFermionR Dw_prime(U_prime, Grid, RBGrid, mass, params);
WilsonFermionD Dw_prime(U_prime, Grid, RBGrid, mass, params);
Dw_prime.M(Omega * src, result2);
err = result - adj(Omega) * result2;
@ -411,7 +411,7 @@ int main(int argc, char **argv)
chi = Zero();
phi = Zero();
err = Zero();
WilsonCloverFermionR Dwc_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, anis, params); // <-- Notice: csw=0
WilsonCloverFermionD Dwc_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, anis, params); // <-- Notice: csw=0
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
@ -437,7 +437,7 @@ int main(int argc, char **argv)
chi = Zero();
phi = Zero();
err = Zero();
CompactWilsonCloverFermionR Dwc_compact_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, 1.0, anis, params); // <-- Notice: csw=0
CompactWilsonCloverFermionD Dwc_compact_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, 1.0, anis, params); // <-- Notice: csw=0
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);

View File

@ -74,7 +74,7 @@ int main (int argc, char ** argv)
SU<Nc>::HotConfiguration(RNG4,Umu);
}
typename WilsonCloverFermionR::ImplParams params;
typename WilsonCloverFermionD::ImplParams params;
WilsonAnisotropyCoefficients anis;
RealD mass = 0.1;
RealD csw_r = 1.0;
@ -83,32 +83,32 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage <<"=================================="<<std::endl;
std::cout<<GridLogMessage <<"WilsonFermion test"<<std::endl;
std::cout<<GridLogMessage <<"=================================="<<std::endl;
WilsonFermionR Dw(Umu,*UGrid,*UrbGrid,mass,params);
TestConserved<WilsonFermionR>(Dw,Umu,UGrid,UrbGrid,&RNG4);
WilsonFermionD Dw(Umu,*UGrid,*UrbGrid,mass,params);
TestConserved<WilsonFermionD>(Dw,Umu,UGrid,UrbGrid,&RNG4);
std::cout<<GridLogMessage <<"=================================="<<std::endl;
std::cout<<GridLogMessage <<"WilsonCloverFermion test"<<std::endl;
std::cout<<GridLogMessage <<"=================================="<<std::endl;
WilsonCloverFermionR Dwc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, anis, params);
TestConserved<WilsonCloverFermionR>(Dwc,Umu,UGrid,UrbGrid,&RNG4);
WilsonCloverFermionD Dwc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, anis, params);
TestConserved<WilsonCloverFermionD>(Dwc,Umu,UGrid,UrbGrid,&RNG4);
std::cout<<GridLogMessage <<"=================================="<<std::endl;
std::cout<<GridLogMessage <<"CompactWilsonCloverFermion test"<<std::endl;
std::cout<<GridLogMessage <<"=================================="<<std::endl;
CompactWilsonCloverFermionR Dwcc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, 1.0, anis, params);
TestConserved<CompactWilsonCloverFermionR>(Dwcc,Umu,UGrid,UrbGrid,&RNG4);
CompactWilsonCloverFermionD Dwcc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, 1.0, anis, params);
TestConserved<CompactWilsonCloverFermionD>(Dwcc,Umu,UGrid,UrbGrid,&RNG4);
std::cout<<GridLogMessage <<"=================================="<<std::endl;
std::cout<<GridLogMessage <<"WilsonExpCloverFermion test"<<std::endl;
std::cout<<GridLogMessage <<"=================================="<<std::endl;
WilsonExpCloverFermionR Dewc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, anis, params);
TestConserved<WilsonExpCloverFermionR>(Dewc,Umu,UGrid,UrbGrid,&RNG4);
WilsonExpCloverFermionD Dewc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, anis, params);
TestConserved<WilsonExpCloverFermionD>(Dewc,Umu,UGrid,UrbGrid,&RNG4);
std::cout<<GridLogMessage <<"=================================="<<std::endl;
std::cout<<GridLogMessage <<"CompactWilsonExpCloverFermion test"<<std::endl;
std::cout<<GridLogMessage <<"=================================="<<std::endl;
CompactWilsonExpCloverFermionR Dewcc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, 1.0, anis, params);
TestConserved<CompactWilsonExpCloverFermionR>(Dewcc,Umu,UGrid,UrbGrid,&RNG4);
CompactWilsonExpCloverFermionD Dewcc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, 1.0, anis, params);
TestConserved<CompactWilsonExpCloverFermionD>(Dewcc,Umu,UGrid,UrbGrid,&RNG4);
Grid_finalize();
}

View File

@ -89,7 +89,7 @@ int main (int argc, char ** argv)
RealD mass=0.1;
WilsonFermionR Dw(Umu,Grid,RBGrid,mass);
WilsonFermionD Dw(Umu,Grid,RBGrid,mass);
LatticeFermion src_e (&RBGrid);
LatticeFermion src_o (&RBGrid);
@ -205,7 +205,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<WilsonFermionR,LatticeFermion> HermOpEO(Dw);
SchurDiagMooeeOperator<WilsonFermionD,LatticeFermion> HermOpEO(Dw);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -52,8 +52,8 @@ int main(int argc, char **argv)
pRNG.SeedFixedIntegers(seeds);
// pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
typedef typename WilsonExpCloverFermionR::FermionField FermionField;
typename WilsonExpCloverFermionR::ImplParams params;
typedef typename WilsonExpCloverFermionD::FermionField FermionField;
typename WilsonExpCloverFermionD::ImplParams params;
WilsonAnisotropyCoefficients anis;
FermionField src(&Grid);
@ -88,8 +88,8 @@ int main(int argc, char **argv)
RealD csw_r = 1.0;
RealD csw_t = 1.0;
WilsonExpCloverFermionR Dwc(Umu, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonExpCloverFermionR Dwc_compact(Umu, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
WilsonExpCloverFermionD Dwc(Umu, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonExpCloverFermionD Dwc_compact(Umu, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing that Deo + Doe = Dunprec " << std::endl;
@ -324,8 +324,8 @@ int main(int argc, char **argv)
}
/////////////////
WilsonExpCloverFermionR Dwc_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonExpCloverFermionR Dwc_compact_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
WilsonExpCloverFermionD Dwc_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonExpCloverFermionD Dwc_compact_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
tmp = Omega * src;
pickCheckerboard(Even, src_e, tmp);
@ -377,14 +377,14 @@ int main(int argc, char **argv)
chi = Zero();
phi = Zero();
WilsonFermionR Dw(Umu, Grid, RBGrid, mass, params);
WilsonFermionD Dw(Umu, Grid, RBGrid, mass, params);
Dw.M(src, result);
Dwc.M(src, chi);
Dwc_prime.M(Omega * src, phi);
WilsonFermionR Dw_prime(U_prime, Grid, RBGrid, mass, params);
WilsonFermionD Dw_prime(U_prime, Grid, RBGrid, mass, params);
Dw_prime.M(Omega * src, result2);
err = result - adj(Omega) * result2;
@ -411,7 +411,7 @@ int main(int argc, char **argv)
chi = Zero();
phi = Zero();
err = Zero();
WilsonExpCloverFermionR Dwc_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, anis, params); // <-- Notice: csw=0
WilsonExpCloverFermionD Dwc_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, anis, params); // <-- Notice: csw=0
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
@ -437,7 +437,7 @@ int main(int argc, char **argv)
chi = Zero();
phi = Zero();
err = Zero();
CompactWilsonExpCloverFermionR Dwc_compact_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, 1.0, anis, params); // <-- Notice: csw=0
CompactWilsonExpCloverFermionD Dwc_compact_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, 1.0, anis, params); // <-- Notice: csw=0
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);

View File

@ -90,7 +90,7 @@ int main (int argc, char ** argv)
RealD mass=0.1;
RealD mu = 0.1;
WilsonTMFermionR Dw(Umu,Grid,RBGrid,mass,mu);
WilsonTMFermionD Dw(Umu,Grid,RBGrid,mass,mu);
LatticeFermion src_e (&RBGrid);
LatticeFermion src_o (&RBGrid);
@ -206,7 +206,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<WilsonTMFermionR,LatticeFermion> HermOpEO(Dw);
SchurDiagMooeeOperator<WilsonTMFermionD,LatticeFermion> HermOpEO(Dw);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -123,7 +123,7 @@ int main (int argc, char ** argv)
RealD _mass,RealD _M5,
std::vector<ComplexD> &gamma, RealD b,RealD c,const ImplParams &p= ImplParams()) :
*/
ZMobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omegas,RealD(1.),RealD(0.));
ZMobiusFermionD Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omegas,RealD(1.),RealD(0.));
LatticeFermion src_e (FrbGrid);
LatticeFermion src_o (FrbGrid);
@ -278,7 +278,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<ZMobiusFermionR,LatticeFermion> HermOpEO(Ddwf);
SchurDiagMooeeOperator<ZMobiusFermionD,LatticeFermion> HermOpEO(Ddwf);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -125,10 +125,10 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"DomainWallFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionF DdwfF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5);
TestCGinversions<DomainWallFermionR>(Ddwf,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<DomainWallFermionR,DomainWallFermionF>(Ddwf,DdwfF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<DomainWallFermionD>(Ddwf,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<DomainWallFermionD,DomainWallFermionF>(Ddwf,DdwfF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
RealD b=1.5;// Scale factor b+c=2, b-c=1
RealD c=0.5;
@ -137,54 +137,54 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"MobiusFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
MobiusFermionR Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
MobiusFermionD Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
MobiusFermionF DmobF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5,b,c);
TestCGinversions<MobiusFermionR>(Dmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<MobiusFermionR,MobiusFermionF>(Dmob,DmobF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<MobiusFermionD>(Dmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<MobiusFermionD,MobiusFermionF>(Dmob,DmobF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ZMobiusFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
ZMobiusFermionR ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,gamma,b,c);
TestCGinversions<ZMobiusFermionR>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<ZMobiusFermionR>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ZMobiusFermionD ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,gamma,b,c);
TestCGinversions<ZMobiusFermionD>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<ZMobiusFermionD>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"MobiusZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
MobiusZolotarevFermionR Dzolo(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,0.1,2.0);
TestCGinversions<MobiusZolotarevFermionR>(Dzolo,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<MobiusZolotarevFermionR>(Dzolo,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
MobiusZolotarevFermionD Dzolo(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,0.1,2.0);
TestCGinversions<MobiusZolotarevFermionD>(Dzolo,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<MobiusZolotarevFermionD>(Dzolo,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ScaledShamirFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
ScaledShamirFermionR Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
ScaledShamirFermionD Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
ScaledShamirFermionF DshamF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5,2.0);
TestCGinversions<ScaledShamirFermionR>(Dsham,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<ScaledShamirFermionR,ScaledShamirFermionF>(Dsham,DshamF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<ScaledShamirFermionD>(Dsham,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<ScaledShamirFermionD,ScaledShamirFermionF>(Dsham,DshamF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ShamirZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
ShamirZolotarevFermionR Dshamz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestCGinversions<ShamirZolotarevFermionR>(Dshamz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<ShamirZolotarevFermionR>(Dshamz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ShamirZolotarevFermionD Dshamz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestCGinversions<ShamirZolotarevFermionD>(Dshamz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<ShamirZolotarevFermionD>(Dshamz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"OverlapWilsonCayleyTanhFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
OverlapWilsonCayleyTanhFermionR Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
OverlapWilsonCayleyTanhFermionD Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
OverlapWilsonCayleyTanhFermionF DovF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5,1.0);
TestCGinversions<OverlapWilsonCayleyTanhFermionR>(Dov,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<OverlapWilsonCayleyTanhFermionR,OverlapWilsonCayleyTanhFermionF>(Dov,DovF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<OverlapWilsonCayleyTanhFermionD>(Dov,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<OverlapWilsonCayleyTanhFermionD,OverlapWilsonCayleyTanhFermionF>(Dov,DovF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"OverlapWilsonCayleyZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
OverlapWilsonCayleyZolotarevFermionR Dovz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestCGinversions<OverlapWilsonCayleyZolotarevFermionR>(Dovz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<OverlapWilsonCayleyZolotarevFermionR>(Dovz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonCayleyZolotarevFermionD Dovz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestCGinversions<OverlapWilsonCayleyZolotarevFermionD>(Dovz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<OverlapWilsonCayleyZolotarevFermionD>(Dovz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
Grid_finalize();
}

View File

@ -95,8 +95,8 @@ int main (int argc, char ** argv)
RealD mass=0.5;
RealD M5=1.8;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
Gamma5R5HermitianLinearOperator<DomainWallFermionR,LatticeFermion> HermIndefOp(Ddwf);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
Gamma5R5HermitianLinearOperator<DomainWallFermionD,LatticeFermion> HermIndefOp(Ddwf);
HermIndefOp.Op(src,ref);
HermIndefOp.OpDiag(src,result);
@ -118,7 +118,7 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage<<"Calling Aggregation class" <<std::endl;
MdagMLinearOperator<DomainWallFermionR,LatticeFermion> HermDefOp(Ddwf);
MdagMLinearOperator<DomainWallFermionD,LatticeFermion> HermDefOp(Ddwf);
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
Subspace Aggregates(Coarse5d,FGrid,cb);

View File

@ -76,41 +76,41 @@ int main (int argc, char ** argv)
RealD mass=0.1;
RealD M5 =1.8;
std::cout<<GridLogMessage <<"DomainWallFermion test"<<std::endl;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
TestWhat<DomainWallFermionR>(Ddwf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
TestWhat<DomainWallFermionD>(Ddwf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
RealD b=1.5;// Scale factor b+c=2, b-c=1
RealD c=0.5;
std::vector<ComplexD> gamma(Ls,ComplexD(1.0,0.1));
std::cout<<GridLogMessage <<"MobiusFermion test"<<std::endl;
MobiusFermionR Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
TestWhat<MobiusFermionR>(Dmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
MobiusFermionD Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
TestWhat<MobiusFermionD>(Dmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"ZMobiusFermion test"<<std::endl;
ZMobiusFermionR ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,gamma,b,c);
TestWhat<ZMobiusFermionR>(ZDmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ZMobiusFermionD ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,gamma,b,c);
TestWhat<ZMobiusFermionD>(ZDmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"MobiusZolotarevFermion test"<<std::endl;
MobiusZolotarevFermionR Dzolo(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,0.1,2.0);
TestWhat<MobiusZolotarevFermionR>(Dzolo,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
MobiusZolotarevFermionD Dzolo(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,0.1,2.0);
TestWhat<MobiusZolotarevFermionD>(Dzolo,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"ScaledShamirFermion test"<<std::endl;
ScaledShamirFermionR Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
TestWhat<ScaledShamirFermionR>(Dsham,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ScaledShamirFermionD Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
TestWhat<ScaledShamirFermionD>(Dsham,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"ShamirZolotarevFermion test"<<std::endl;
ShamirZolotarevFermionR Dshamz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestWhat<ShamirZolotarevFermionR>(Dshamz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ShamirZolotarevFermionD Dshamz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestWhat<ShamirZolotarevFermionD>(Dshamz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"OverlapWilsonCayleyTanhFermion test"<<std::endl;
OverlapWilsonCayleyTanhFermionR Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonCayleyTanhFermionR>(Dov,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonCayleyTanhFermionD Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonCayleyTanhFermionD>(Dov,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"OverlapWilsonCayleyZolotarevFermion test"<<std::endl;
OverlapWilsonCayleyZolotarevFermionR Dovz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestWhat<OverlapWilsonCayleyZolotarevFermionR>(Dovz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonCayleyZolotarevFermionD Dovz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestWhat<OverlapWilsonCayleyZolotarevFermionD>(Dovz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
Grid_finalize();
}

View File

@ -83,8 +83,8 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building g5R5 hermitian DWF operator" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
Gamma5R5HermitianLinearOperator<DomainWallFermionR,LatticeFermion> HermIndefOp(Ddwf);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
Gamma5R5HermitianLinearOperator<DomainWallFermionD,LatticeFermion> HermIndefOp(Ddwf);
const int nbasis = 8;
@ -95,7 +95,7 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling Aggregation class to build subspace" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<DomainWallFermionR,LatticeFermion> HermDefOp(Ddwf);
MdagMLinearOperator<DomainWallFermionD,LatticeFermion> HermDefOp(Ddwf);
Subspace Aggregates(Coarse5d,FGrid,cb);
Aggregates.CreateSubspace(RNG5,HermDefOp);

View File

@ -128,8 +128,8 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"DomainWallFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
TestConserved<DomainWallFermionR>(Ddwf,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
TestConserved<DomainWallFermionD>(Ddwf,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
RealD b=1.5;// Scale factor b+c=2, b-c=1
RealD c=0.5;
@ -138,23 +138,23 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"MobiusFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
MobiusFermionR Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
TestConserved<MobiusFermionR>(Dmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
MobiusFermionD Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
TestConserved<MobiusFermionD>(Dmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ScaledShamirFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
ScaledShamirFermionR Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
TestConserved<ScaledShamirFermionR>(Dsham,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ScaledShamirFermionD Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
TestConserved<ScaledShamirFermionD>(Dsham,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ZMobiusFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
for(int s=0;s<Ls;s++) omegasrev[s]=conjugate(omegas[Ls-1-s]);
// for(int s=0;s<Ls;s++) omegasrev[s]=omegas[Ls-1-s];
ZMobiusFermionR ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,omegas,b,c);
ZMobiusFermionR ZDmobrev(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,omegasrev,b,c);
TestConserved<ZMobiusFermionR>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5,&ZDmobrev);
ZMobiusFermionD ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,omegas,b,c);
ZMobiusFermionD ZDmobrev(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,omegasrev,b,c);
TestConserved<ZMobiusFermionD>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5,&ZDmobrev);
Grid_finalize();
}
@ -290,7 +290,7 @@ void TestConserved(Action & Ddwf,
const RealD DmuPAmu{real(TensorRemove(sumPA[t]-sumPA[(t-1+Nt)%Nt]))};
std::cout<<GridLogMessage<<" t "<<t<<" DmuPAmu "<<DmuPAmu
<<" PP "<<real(TensorRemove(sumPP[t]))<<" PJ5q "<<real(TensorRemove(sumPJ5q[t]))
<<" Ward Identity defect " <<(DmuPAmu - 2.*real(TensorRemove(Ddwf.mass*sumPP[t] + sumPJ5q[t])))<<std::endl;
<<" Ward Identity defect " <<(DmuPAmu - 2.*real(TensorRemove(Ddwf.Mass()*sumPP[t] + sumPJ5q[t])))<<std::endl;
}
///////////////////////////////
@ -539,7 +539,7 @@ void TestConserved1(Action & Ddwf, Action & Ddwfrev,
PA = trace(g5*Axial_mu);
PP = trace(adj(prop4)*prop4);
Defect = Defect - 2.0*Ddwf.mass* PP;
Defect = Defect - 2.0*Ddwf.Mass()* PP;
Defect = Defect - 2.0*PJ5q;
std::vector<TComplex> sumPAref;
@ -565,8 +565,8 @@ void TestConserved1(Action & Ddwf, Action & Ddwfrev,
std::cout <<" PAc action "<<real(TensorRemove(sumPA[t]));
std::cout <<" PJ5q ref "<<real(TensorRemove(sumPJ5qref[t]));
std::cout <<" PJ5q action "<<real(TensorRemove(sumPJ5q[t]));
std::cout <<"WTI defects "<<real(TensorRemove(sumPAref[t]-sumPAref[(t-1+Nt)%Nt] - 2.0*(Ddwf.mass*sumPP[t] + sumPJ5q[t]) ))<<",";
std::cout <<real(TensorRemove(sumPA[t]-sumPA[(t-1+Nt)%Nt] - 2.0*(Ddwf.mass*sumPP[t] + sumPJ5q[t]) ))<<"\n";
std::cout <<"WTI defects "<<real(TensorRemove(sumPAref[t]-sumPAref[(t-1+Nt)%Nt] - 2.0*(Ddwf.Mass()*sumPP[t] + sumPJ5q[t]) ))<<",";
std::cout <<real(TensorRemove(sumPA[t]-sumPA[(t-1+Nt)%Nt] - 2.0*(Ddwf.Mass()*sumPP[t] + sumPJ5q[t]) ))<<"\n";
}
}
#endif
@ -600,7 +600,7 @@ void TestConserved1(Action & Ddwf, Action & Ddwfrev,
// Dperp
{
RealD diag = 5.0 - Ddwf.M5;
mass = Ddwf.mass;
mass = Ddwf.Mass();
autoView( psi,result5,CpuRead);
autoView( chi,tmp, CpuWrite);
thread_for(sss,UGrid->oSites(),{

View File

@ -77,8 +77,8 @@ int main(int argc, char** argv)
LatticeGaugeField Umu(UGrid);
SU<Nc>::HotConfiguration(RNG4, Umu);
DomainWallEOFAFermionR Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5);
DomainWallEOFAFermionR Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5);
DomainWallEOFAFermionD Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5);
DomainWallEOFAFermionD Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5);
// Construct the action and test the heatbath (zero initial guess)
{

View File

@ -41,7 +41,7 @@ using namespace Grid;
;
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityDomainWallEOFAFermionR FermionAction;
typedef GparityDomainWallEOFAFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
// Parameters for test
@ -82,7 +82,7 @@ int main(int argc, char** argv)
LatticeGaugeField Umu(UGrid);
SU<Nc>::HotConfiguration(RNG4, Umu);
// GparityDomainWallFermionR::ImplParams params;
// GparityDomainWallFermionD::ImplParams params;
FermionAction::ImplParams params;
FermionAction Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5, params);
FermionAction Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5, params);

View File

@ -79,8 +79,8 @@ int main(int argc, char** argv)
LatticeGaugeField Umu(UGrid);
SU<Nc>::HotConfiguration(RNG4, Umu);
MobiusEOFAFermionR Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5, b, c);
MobiusEOFAFermionR Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5, b, c);
MobiusEOFAFermionD Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5, b, c);
MobiusEOFAFermionD Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5, b, c);
// Construct the action and test the heatbath (zero initial guess)
{

View File

@ -41,7 +41,7 @@ using namespace Grid;
;
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityMobiusEOFAFermionR FermionAction;
typedef GparityMobiusEOFAFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
// Parameters for test

View File

@ -105,10 +105,10 @@ int main(int argc, char **argv)
SU<Nc>::HotConfiguration(RNG4, Umu);
// Initialize RHMC fermion operators
DomainWallFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5);
DomainWallFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5);
SchurDiagMooeeOperator<DomainWallFermionR, LatticeFermion> MdagM(Ddwf_f);
SchurDiagMooeeOperator<DomainWallFermionR, LatticeFermion> VdagV(Ddwf_b);
DomainWallFermionD Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5);
DomainWallFermionD Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5);
SchurDiagMooeeOperator<DomainWallFermionD, LatticeFermion> MdagM(Ddwf_f);
SchurDiagMooeeOperator<DomainWallFermionD, LatticeFermion> VdagV(Ddwf_b);
// Degree 12 rational approximations to x^(1/4) and x^(-1/4)
double lo = 0.0001;
@ -153,10 +153,10 @@ int main(int argc, char **argv)
RealD shift_L = 0.0;
RealD shift_R = -1.0;
int pm = 1;
DomainWallEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5);
DomainWallEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5);
MdagMLinearOperator<DomainWallEOFAFermionR, LatticeFermion> LdagL(Deofa_L);
MdagMLinearOperator<DomainWallEOFAFermionR, LatticeFermion> RdagR(Deofa_R);
DomainWallEOFAFermionD Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5);
DomainWallEOFAFermionD Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5);
MdagMLinearOperator<DomainWallEOFAFermionD, LatticeFermion> LdagL(Deofa_L);
MdagMLinearOperator<DomainWallEOFAFermionD, LatticeFermion> RdagR(Deofa_R);
// Stochastically estimate reweighting factor via EOFA
RealD k = Deofa_L.k;

View File

@ -33,7 +33,7 @@ using namespace std;
using namespace Grid;
;
typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename GparityDomainWallFermionD::FermionField FermionField;
// parameters for test
const std::vector<int> grid_dim = { 8, 8, 8, 8 };
@ -107,11 +107,11 @@ int main(int argc, char **argv)
SU<Nc>::HotConfiguration(RNG4, Umu);
// Initialize RHMC fermion operators
GparityDomainWallFermionR::ImplParams params;
GparityDomainWallFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, params);
GparityDomainWallFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, params);
SchurDiagMooeeOperator<GparityDomainWallFermionR, FermionField> MdagM(Ddwf_f);
SchurDiagMooeeOperator<GparityDomainWallFermionR, FermionField> VdagV(Ddwf_b);
GparityDomainWallFermionD::ImplParams params;
GparityDomainWallFermionD Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, params);
GparityDomainWallFermionD Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, params);
SchurDiagMooeeOperator<GparityDomainWallFermionD, FermionField> MdagM(Ddwf_f);
SchurDiagMooeeOperator<GparityDomainWallFermionD, FermionField> VdagV(Ddwf_b);
// Degree 12 rational approximations to x^(1/4) and x^(-1/4)
double lo = 0.0001;
@ -156,10 +156,10 @@ int main(int argc, char **argv)
RealD shift_L = 0.0;
RealD shift_R = -1.0;
int pm = 1;
GparityDomainWallEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, params);
GparityDomainWallEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, params);
MdagMLinearOperator<GparityDomainWallEOFAFermionR, FermionField> LdagL(Deofa_L);
MdagMLinearOperator<GparityDomainWallEOFAFermionR, FermionField> RdagR(Deofa_R);
GparityDomainWallEOFAFermionD Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, params);
GparityDomainWallEOFAFermionD Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, params);
MdagMLinearOperator<GparityDomainWallEOFAFermionD, FermionField> LdagL(Deofa_L);
MdagMLinearOperator<GparityDomainWallEOFAFermionD, FermionField> RdagR(Deofa_R);
// Stochastically estimate reweighting factor via EOFA
RealD k = Deofa_L.k;

View File

@ -107,10 +107,10 @@ int main(int argc, char **argv)
SU<Nc>::HotConfiguration(RNG4, Umu);
// Initialize RHMC fermion operators
MobiusFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, b, c);
MobiusFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, b, c);
SchurDiagMooeeOperator<MobiusFermionR, LatticeFermion> MdagM(Ddwf_f);
SchurDiagMooeeOperator<MobiusFermionR, LatticeFermion> VdagV(Ddwf_b);
MobiusFermionD Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, b, c);
MobiusFermionD Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, b, c);
SchurDiagMooeeOperator<MobiusFermionD, LatticeFermion> MdagM(Ddwf_f);
SchurDiagMooeeOperator<MobiusFermionD, LatticeFermion> VdagV(Ddwf_b);
// Degree 12 rational approximations to x^(1/4) and x^(-1/4)
double lo = 0.0001;
@ -155,10 +155,10 @@ int main(int argc, char **argv)
RealD shift_L = 0.0;
RealD shift_R = -1.0;
int pm = 1;
MobiusEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, b, c);
MobiusEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, b, c);
MdagMLinearOperator<MobiusEOFAFermionR, LatticeFermion> LdagL(Deofa_L);
MdagMLinearOperator<MobiusEOFAFermionR, LatticeFermion> RdagR(Deofa_R);
MobiusEOFAFermionD Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, b, c);
MobiusEOFAFermionD Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, b, c);
MdagMLinearOperator<MobiusEOFAFermionD, LatticeFermion> LdagL(Deofa_L);
MdagMLinearOperator<MobiusEOFAFermionD, LatticeFermion> RdagR(Deofa_R);
// Stochastically estimate reweighting factor via EOFA
RealD k = Deofa_L.k;

View File

@ -33,7 +33,7 @@ using namespace std;
using namespace Grid;
;
typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename GparityDomainWallFermionD::FermionField FermionField;
// parameters for test
const std::vector<int> grid_dim = { 8, 8, 8, 8 };
@ -109,11 +109,11 @@ int main(int argc, char **argv)
SU<Nc>::HotConfiguration(RNG4, Umu);
// Initialize RHMC fermion operators
GparityDomainWallFermionR::ImplParams params;
GparityMobiusFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, b, c, params);
GparityMobiusFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, b, c, params);
SchurDiagMooeeOperator<GparityMobiusFermionR, FermionField> MdagM(Ddwf_f);
SchurDiagMooeeOperator<GparityMobiusFermionR, FermionField> VdagV(Ddwf_b);
GparityDomainWallFermionD::ImplParams params;
GparityMobiusFermionD Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, b, c, params);
GparityMobiusFermionD Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, b, c, params);
SchurDiagMooeeOperator<GparityMobiusFermionD, FermionField> MdagM(Ddwf_f);
SchurDiagMooeeOperator<GparityMobiusFermionD, FermionField> VdagV(Ddwf_b);
// Degree 12 rational approximations to x^(1/4) and x^(-1/4)
double lo = 0.0001;
@ -158,10 +158,10 @@ int main(int argc, char **argv)
RealD shift_L = 0.0;
RealD shift_R = -1.0;
int pm = 1;
GparityMobiusEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, b, c, params);
GparityMobiusEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, b, c, params);
MdagMLinearOperator<GparityMobiusEOFAFermionR, FermionField> LdagL(Deofa_L);
MdagMLinearOperator<GparityMobiusEOFAFermionR, FermionField> RdagR(Deofa_R);
GparityMobiusEOFAFermionD Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, b, c, params);
GparityMobiusEOFAFermionD Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, b, c, params);
MdagMLinearOperator<GparityMobiusEOFAFermionD, FermionField> LdagL(Deofa_L);
MdagMLinearOperator<GparityMobiusEOFAFermionD, FermionField> RdagR(Deofa_R);
// Stochastically estimate reweighting factor via EOFA
RealD k = Deofa_L.k;

View File

@ -66,7 +66,7 @@ int main (int argc, char ** argv)
////////////////////////////////////
RealD mass=0.01;
RealD M5=1.8;
OverlapWilsonContFracTanhFermionR Dcf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
OverlapWilsonContFracTanhFermionD Dcf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
Dcf.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -0,0 +1,510 @@
/*
2f Full det MdagM 10^6 force ~ 1.3e7
rid : Message : 1767.283471 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 1767.283476 s : S1 : 1.52885e+09
Grid : Message : 1767.283480 s : S2 : 1.52886e+09
Grid : Message : 1767.283482 s : dS : 8877.34
Grid : Message : 1767.283483 s : dSpred : 8877.7
Grid : Message : 1767.283484 s : diff : -0.360484
Grid : Message : 1767.283485 s : *********************************************************
2f Full det MpcdagMpc 10^6 force ~ 1.8e6
Grid : Message : 2399.576962 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 2399.576968 s : S1 : 1.52885e+09
Grid : Message : 2399.576972 s : S2 : 1.52886e+09
Grid : Message : 2399.576974 s : dS : 9728.49
Grid : Message : 2399.576975 s : dSpred : 9726.58
Grid : Message : 2399.576976 s : diff : 1.90683
Grid : Message : 2399.576977 s : *********************************************************
2f bdy MdagM 1500 force Force ~ 2800
Grid : Message : 4622.385061 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 4622.385067 s : S1 : 1.52885e+09
Grid : Message : 4622.385071 s : S2 : 1.52885e+09
Grid : Message : 4622.385072 s : dS : 25.4944
Grid : Message : 4622.385073 s : dSpred : 25.4672
Grid : Message : 4622.385074 s : diff : 0.0271414
Grid : Message : 4622.385075 s : *********************************************************
2f bdy MpcdagMpc 10^6 force Force ~ 2200
Grid : Message : 4622.385061 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 4622.385067 s : S1 : 1.52885e+09
Grid : Message : 4622.385071 s : S2 : 1.52885e+09
Grid : Message : 4622.385072 s : dS : 25.4944
Grid : Message : 4622.385073 s : dSpred : 25.4672
Grid : Message : 4622.385074 s : diff : 0.0271414
Grid : Message : 4622.385075 s : *********************************************************
1f Bdy Det
//
// These all had tol set by OFRp, not through MDpoles
// So assumptions it was Remez might be wrong.
//
Optimisation log: looser rational AND MD tolerances sloppy
MobiusForce.221179 -- same as HMC. dS is mispredicted Forece ~2.8
Grid : Message : 6582.258991 s : dS : 0.024478
Grid : Message : 6582.258992 s : dSpred : 0.00791876
Grid : Message : 6582.258994 s : diff : 0.0165592
MobiusForce.221193 -- tight rational AND MD tolerances to 1e-8 ~ 2.8 same
Grid : Message : 1964.939209 s : S1 : 7.64404e+08
Grid : Message : 1964.939213 s : S2 : 7.64404e+08
Grid : Message : 1964.939215 s : dS : -0.00775838 <--- too loose even on action
Grid : Message : 1964.939216 s : dSpred : -0.00416793
Grid : Message : 1964.939217 s : diff : -0.00359045
MobiusForce.221394 -- tight rational, MD tol sloppy Force ~ 2.8
Grid : Message : 2376.921950 s : S1 : 764404436.44069
Grid : Message : 2376.921954 s : S2 : 764404436.43299
Grid : Message : 2376.921956 s : dS : -0.0076971054077148
Grid : Message : 2376.921958 s : dSpred : -0.0041610472282526
Grid : Message : 2376.921959 s : diff : -0.0035360581794623
MobiusForce.221587 -- slightly sloppier action, coming from tol array
-- much sloppier force
-- degree 18
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-6,3.0e-7,1.0e-7,1.0e-7, // Orig sloppy
Grid : Message : 2438.875507 s : S1 : 764404436.42251
Grid : Message : 2438.875512 s : S2 : 764404436.4148
Grid : Message : 2438.875514 s : dS : -0.0077102184295654
Grid : Message : 2438.875516 s : dSpred : -0.0075684496959103
Grid : Message : 2438.875517 s : diff : -0.00014176873365508
MobiusForce.221639 3.0e-6,1.0e-6,1.0e-7,1.0e-7, // soften convergence more
Grid : Message : 2373.927550 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 2373.927600 s : S1 : 764404436.42251
Grid : Message : 2373.927640 s : S2 : 764404436.4148
Grid : Message : 2373.927660 s : dS : -0.0077102184295654
Grid : Message : 2373.927680 s : dSpred : -0.0075993463919849
Grid : Message : 2373.927690 s : diff : -0.00011087203758051
Grid : Message : 2373.927700 s : *********************************************************
Grid : Message : 69.269319 s : ApproxPowerMD shift[0] pole 9.5166866092503e-06 residue -2.0047722631555e-08 tol 3e-06
Grid : Message : 69.269321 s : ApproxPowerMD shift[1] pole 4.7123486192778e-05 residue -1.316766030683e-07 tol 1e-06
Grid : Message : 69.269323 s : ApproxPowerMD shift[2] pole 0.00014860967743736 residue -6.109883117444e-07 tol 1e-07
Grid : Message : 69.269325 s : ApproxPowerMD shift[3] pole 0.00041055696132763 residue -2.6088717433891e-06 tol 1e-07
Grid : Message : 69.269327 s : ApproxPowerMD shift[4] pole 0.0010822555692906 residue -1.0853799412802e-05 tol 1e-08
Grid : Message : 69.269329 s : ApproxPowerMD shift[5] pole 0.0028029613512087 residue -4.4741734470158e-05 tol 1e-08
Grid : Message : 69.269331 s : ApproxPowerMD shift[6] pole 0.0072103567378527 residue -0.00018380499193253 tol 1e-08
rusher 96I]$ more MobiusForce.221887
1.0e-5,3.0e-6,3.0e-7,1.0e-7, // soften convergence more more
// <-- this is the dirichlet solve, why poorer conditioned???
Grid : Message : 1627.226206 s : ConjugateGradientMultiShift k=3643 Shift 3 has converged
Grid : Message : 1667.373045 s : ConjugateGradientMultiShift k=5381 Shift 2 has converged
Grid : Message : 1705.236992 s : ConjugateGradientMultiShift k=7063 Shift 1 has converged
Grid : Message : 1752.493182 s : ConjugateGradientMultiShift k=9220 Shift 0 has converged
//
//Grid : Message : 1414.837250 s : OneFlavourEvenOddRatioRationalPseudoFermionAction deriv: doing (M^dag M)^{-1/2} ( (V^dag V)^{1/4} Phi)
Grid : Message : 1523.416680 s : ConjugateGradientMultiShift k=3846 Shift 2 has converged
Grid : Message : 1530.798503 s : ConjugateGradientMultiShift k=4143 Shift 1 has converged
Grid : Message : 1536.153421 s : ConjugateGradientMultiShift k=4353 Shift 0 has converged <-- this is the non-dirichlet solve
Grid : Message : 2339.927565 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 2339.927571 s : S1 : 764404436.42251
Grid : Message : 2339.927575 s : S2 : 764404436.4148
Grid : Message : 2339.927577 s : dS : -0.0077102184295654
Grid : Message : 2339.927579 s : dSpred : -0.0068752425267964
Grid : Message : 2339.927580 s : diff : -0.00083497590276901
Grid : Message : 2339.927581 s : *********************************************************
Grid : Message : 2339.927582 s : Done
Grid : Message : 2339.927582 s : *********************************************************
Force 76 S {S {S {(9.0175185326468,-3.5764415623768e-36)}}}
Force 77 S {S {S {(4.1289977678493,-4.3364721285803e-37)}}}
Force 78 S {S {S {(3.2299269465841,6.0391022273495e-37)}}}
Force 79 S {S {S {(3.0051199649288,-9.6243599973575e-37)}}}
Force 80 S {S {S {(2.8924316727872,-1.3371248240604e-37)}}}
Force 81 S {S {S {(2.8270868791781,1.792628885004e-37)}}}
Force 82 S {S {S {(2.8676819960087,-1.3518185034456e-36)}}}
Force 83 S {S {S {(2.7724152154523,1.4950818774521e-37)}}}
Force 84 S {S {S {(3.0204624534964,-9.6475025423893e-36)}}}
Force 85 S {S {S {(2.8631304063459,2.2426228161781e-37)}}}
Force 86 S {S {S {(2.9025673908905,-1.3942465026706e-36)}}}
Force 87 S {S {S {(2.8553405232646,-2.0938493124022e-38)}}}
Force 88 S {S {S {(3.2820184381375,-1.422348164495e-36)}}}
Force 89 S {S {S {(3.8974980085791,1.1682209795266e-35)}}}
Force 90 S {S {S {(4.660053618223,-1.4399805797573e-37)}}}
Force 91 S {S {S {(6.7993872372366,1.4524702072348e-36)}}}
Full
Grid : Message : 1523.416680 s : ConjugateGradientMultiShift k=3846 Shift 2 has converged
Grid : Message : 1530.798503 s : ConjugateGradientMultiShift k=4143 Shift 1 has converged
Grid : Message : 1536.153421 s : ConjugateGradientMultiShift k=4353 Shift 0 has converged
PV solve depth 3
Grid : Message : 1667.373045 s : ConjugateGradientMultiShift k=5381 Shift 2 has converged
Grid : Message : 1705.236992 s : ConjugateGradientMultiShift k=7063 Shift 1 has converged
Grid : Message : 1752.493182 s : ConjugateGradientMultiShift k=9220 Shift 0 has converged
MobiusForce.222490 depth 1
Grid : Message : 2155.595070 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 2155.595076 s : S1 : 764404436.37475
Grid : Message : 2155.595080 s : S2 : 764404436.21131
Grid : Message : 2155.595082 s : dS : -0.16344606876373
Grid : Message : 2155.595084 s : dSpred : -0.16235663327375
Grid : Message : 2155.595085 s : diff : -0.0010894354899788
Force 4 S {S {S {(24.512489110423,-7.4203080895657e-36)}}}
Force 5 S {S {S {(14.442663101577,7.3909207307951e-37)}}}
Force 6 S {S {S {(12.298567945213,2.1989091200069e-36)}}}
Force 7 S {S {S {(11.582362859271,-2.2540104177017e-36)}}}
Force 8 S {S {S {(11.465725500906,-2.9512255045332e-36)}}}
Force 9 S {S {S {(10.869067954412,-2.8388188572358e-36)}}}
Force 10 S {S {S {(10.937111429576,-3.3530976357206e-36)}}}
Force 11 S {S {S {(11.23500117508,-1.4487967873885e-36)}}}
Force 12 S {S {S {(10.900736551834,5.1427877848475e-36)}}} Force is bigger
Force 13 S {S {S {(10.951921323651,-1.2098775605838e-35)}}}
Force 14 S {S {S {(10.676529230575,-2.50527233519e-36)}}}
Force 15 S {S {S {(10.98568474467,3.2193851533145e-36)}}}
Force 16 S {S {S {(11.931707726568,-8.5223340434616e-37)}}}
Force 17 S {S {S {(13.751904678482,7.6337337826369e-36)}}}
Force 18 S {S {S {(17.518955473833,1.8073225643893e-36)}}}
Force 19 S {S {S {(20.36519304598,-2.5184966466368e-36)}}}
Full solve
Grid : Message : 1441.297575 s : ConjugateGradientMultiShift k=3846 Shift 2 has converged
Grid : Message : 1449.206520 s : ConjugateGradientMultiShift k=4143 Shift 1 has converged
Grid : Message : 1454.352909 s : ConjugateGradientMultiShift k=4353 Shift 0 has converged
Dirichlet solve -- why so expensive??
Spectral radius worse?
Grid : Message : 1571.887003 s : ConjugateGradientMultiShift k=5195 Shift 2 has converged
Grid : Message : 1599.543760 s : ConjugateGradientMultiShift k=6508 Shift 1 has converged
Grid : Message : 1625.368198 s : ConjugateGradientMultiShift k=7819 Shift 0 has converged
dS is much bigger.
MobiusForce.223606
Grid : Message : 1123.276405 s : ConjugateGradientMultiShift k=3273 Shift 0 has converged
Grid : Message : 1125.945359 s : ConjugateGradientMultiShift k=3407 Shift 1 has converged
Grid : Message : 1127.896580 s : ConjugateGradientMultiShift k=3508 Shift 2 has converged <-- 2 takes longer
first (bdy) hasenbusch mass raised to 0.005 -- reduces Dirchlet solve cost
Force looks ok still
Grid : Message : 1510.884960 s : OneFlavourEvenOddRatioRationalPseudoFermionAction compute action: complete
Grid : Message : 1510.969380 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 1510.969440 s : S1 : 764404436.37475
Grid : Message : 1510.969480 s : S2 : 764404436.17379
Grid : Message : 1510.969500 s : dS : -0.20095825195312
Grid : Message : 1510.969520 s : dSpred : -0.20025674631954
Grid : Message : 1510.969530 s : diff : -0.00070150563358654
Force 76 S {S {S {(24.161229317675,2.0147973173094e-35)}}}
Force 77 S {S {S {(15.841085162729,3.983456481349e-36)}}}
Force 78 S {S {S {(11.031761776856,9.0394046210295e-35)}}}
Force 79 S {S {S {(12.177830066719,1.583978637733e-36)}}}
Force 80 S {S {S {(9.8372072482222,6.4284847310594e-37)}}}
Force 81 S {S {S {(9.6588863493149,1.0501572656659e-35)}}}
Force 82 S {S {S {(10.623076227724,-4.4161853392455e-35)}}}
Force 83 S {S {S {(8.9477003784221,-7.067659784319e-37)}}}
Force 84 S {S {S {(9.7663166497594,-2.1014900256825e-35)}}}
Force 85 S {S {S {(8.9992648919057,-4.7107936109203e-36)}}}
Force 86 S {S {S {(9.0399987268337,6.4652189295226e-37)}}}
Force 87 S {S {S {(9.1319052497073,7.9566273871284e-37)}}}
Force 88 S {S {S {(10.094569606113,-1.263656427134e-37)}}}
Force 89 S {S {S {(11.563679905523,-1.2777623593438e-35)}}}
Force 90 S {S {S {(13.653150474463,2.9093485182852e-37)}}}
Force 91 S {S {S {(16.303719912019,2.9857556510886e-36)}}}
MobiusForce.223749
first (bdy) hasenbusch mass raised to 0.01 -- reduces Dirchlet solve cost
Grid : Message : 1374.472462 s : OneFlavourEvenOddRatioRationalPseudoFermionAction compute action: complete
Grid : Message : 1374.479206 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 1374.479211 s : S1 : 764404436.37428
Grid : Message : 1374.479215 s : S2 : 764404436.20009
Grid : Message : 1374.479217 s : dS : -0.17418932914734
Grid : Message : 1374.479219 s : dSpred : -0.17358090105485
Grid : Message : 1374.479220 s : diff : -0.00060842809248995
Force 76 S {S {S {(27.006858541753,4.2141472476979e-36)}}}
Force 77 S {S {S {(19.388701462694,-5.1620365048422e-35)}}}
Force 78 S {S {S {(13.502424539662,-2.4038859474316e-35)}}}
Force 79 S {S {S {(15.555776987064,6.0567346426118e-36)}}}
Force 80 S {S {S {(12.752116522904,-2.3720006631655e-35)}}}
Force 81 S {S {S {(12.656857824233,1.6912424972456e-35)}}}
Force 82 S {S {S {(15.159284452724,5.0898905390605e-36)}}}
Force 83 S {S {S {(12.222695136014,-2.2061824913027e-35)}}}
Force 84 S {S {S {(12.92077598466,9.6287681011731e-36)}}}
Force 85 S {S {S {(11.884630495484,2.822655809912e-36)}}}
Force 86 S {S {S {(11.896353116174,1.0926219990893e-35)}}}
Force 87 S {S {S {(11.557019282287,2.1532117771187e-35)}}}
Force 88 S {S {S {(11.945108384613,-3.0210204816133e-36)}}}
Force 89 S {S {S {(13.295373801078,7.3115748621146e-36)}}}
Force 90 S {S {S {(15.373728471417,-7.4923071185536e-36)}}}
Force 91 S {S {S {(17.348173714234,1.0344350287236e-36)}}}
MobiusForce.223829
1.0e-5,5.0e-6,1.0e-6,1.0e-7, // soften convergence more more
Grid : Message : 1000.951387 s : ConjugateGradientMultiShift k=1881 Shift 0 has converged
Grid : Message : 1002.619542 s : ConjugateGradientMultiShift k=1960 Shift 1 has converged
Grid : Message : 1003.726982 s : ConjugateGradientMultiShift k=2014 Shift 4 has converged
Grid : Message : 1005.698741 s : ConjugateGradientMultiShift k=2113 Shift 2 has converged
Grid : Message : 1007.320875 s : ConjugateGradientMultiShift k=2197 Shift 3 has converged
Grid : Message : 1351.171259 s : S1 : 764404436.37428
Grid : Message : 1351.171263 s : S2 : 764404436.20009
Grid : Message : 1351.171265 s : dS : -0.17418932914734
Grid : Message : 1351.171266 s : dSpred : -0.1743248065338
Grid : Message : 1351.171267 s : diff : 0.00013547738646566
Force 76 S {S {S {(27.004288088317,6.035575744297e-35)}}}
Force 77 S {S {S {(19.388023720604,-6.9736202362532e-36)}}}
Force 78 S {S {S {(13.502663916173,6.4067380855692e-35)}}}
Force 79 S {S {S {(15.55135748152,1.7219522871608e-35)}}}
Force 80 S {S {S {(12.75135802213,-1.1303847551095e-35)}}}
Force 81 S {S {S {(12.655732786276,1.689773129307e-36)}}}
Force 82 S {S {S {(15.158469055699,-6.7205950772387e-35)}}}
Force 83 S {S {S {(12.222907191126,-1.6775773754173e-35)}}}
Force 84 S {S {S {(12.916025368247,-1.9641041234302e-35)}}}
Force 85 S {S {S {(11.881879452577,-2.3054382955502e-36)}}}
Force 86 S {S {S {(11.897253557199,-3.3617669065579e-35)}}}
Force 87 S {S {S {(11.55717723524,-1.8690360178074e-36)}}}
Force 88 S {S {S {(11.945590605851,-6.7208889508264e-36)}}}
Force 89 S {S {S {(13.298173932749,-1.0322309768158e-35)}}}
Force 90 S {S {S {(15.373845416836,7.4158999857501e-36)}}}
Force 91 S {S {S {(17.348058307158,-1.8514036025451e-36)}}}
-- could make the stopping condition mandatory if shift 0 is converged.
-- Save 20% of iterations and single tunable
*/
//
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_double_ratio.cc
Copyright (C) 2022
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
typedef MobiusFermionD FermionAction;
typedef WilsonImplD FimplD;
typedef WilsonImplD FermionImplPolicy;
template<class Gimpl>
void ForceTest(Action<LatticeGaugeField> &action,LatticeGaugeField & U,MomentumFilterBase<LatticeGaugeField> &Filter)
{
GridBase *UGrid = U.Grid();
std::vector<int> seeds({1,2,3,5});
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds);
LatticeColourMatrix Pmu(UGrid);
LatticeGaugeField P(UGrid);
LatticeGaugeField UdSdU(UGrid);
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
std::cout << GridLogMessage << " Force test for "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
RealD eps=0.005;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Refresh "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
Gimpl::generate_momenta(P,sRNG,RNG4);
Filter.applyFilter(P);
FieldMetaData header;
std::string file("./ckpoint_lat.2000");
NerscIO::readConfiguration(U,header,file);
action.refresh(U,sRNG,RNG4);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Action "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S1 = action.S(U);
Gimpl::update_field(P,U,eps);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Derivative "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
action.deriv(U,UdSdU);
UdSdU = Ta(UdSdU);
Filter.applyFilter(UdSdU);
DumpSliceNorm("Force",UdSdU,Nd-1);
Gimpl::update_field(P,U,eps);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Action "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S2 = action.S(U);
// Use the derivative
LatticeComplex dS(UGrid); dS = Zero();
for(int mu=0;mu<Nd;mu++){
auto UdSdUmu = PeekIndex<LorentzIndex>(UdSdU,mu);
Pmu= PeekIndex<LorentzIndex>(P,mu);
dS = dS - trace(Pmu*UdSdUmu)*eps*2.0*2.0;
}
ComplexD dSpred = sum(dS);
RealD diff = S2-S1-dSpred.real();
std::cout<< GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout<< GridLogMessage << "S1 : "<< S1 <<std::endl;
std::cout<< GridLogMessage << "S2 : "<< S2 <<std::endl;
std::cout<< GridLogMessage << "dS : "<< S2-S1 <<std::endl;
std::cout<< GridLogMessage << "dSpred : "<< dSpred.real() <<std::endl;
std::cout<< GridLogMessage << "diff : "<< diff<<std::endl;
std::cout<< GridLogMessage << "*********************************************************"<<std::endl;
// assert(diff<1.0);
std::cout<< GridLogMessage << "Done" <<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
std::cout << std::setprecision(14);
Coordinate latt_size = GridDefaultLatt();
Coordinate mpi_layout = GridDefaultMpi();
Coordinate simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
Coordinate shm;
GlobalSharedMemory::GetShmDims(mpi_layout,shm);
const int Ls=12;
const int Nt = latt_size[3];
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
////////////////////////////////////////////////////////////////
// Domain decomposed operator
////////////////////////////////////////////////////////////////
Coordinate CommDim(Nd);
for(int d=0;d<Nd;d++) CommDim[d]= (mpi_layout[d]/shm[d])>1 ? 1 : 0;
Coordinate NonDirichlet(Nd+1,0);
Coordinate Dirichlet(Nd+1,0);
Dirichlet[1] = CommDim[0]*latt_size[0]/mpi_layout[0] * shm[0];
Dirichlet[2] = CommDim[1]*latt_size[1]/mpi_layout[1] * shm[1];
Dirichlet[3] = CommDim[2]*latt_size[2]/mpi_layout[2] * shm[2];
Dirichlet[4] = CommDim[3]*latt_size[3]/mpi_layout[3] * shm[3];
Coordinate Block4(Nd);
Block4[0] = Dirichlet[1];
Block4[1] = Dirichlet[2];
Block4[2] = Dirichlet[3];
Block4[3] = Dirichlet[4];
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
FermionAction::ImplParams ParamsDir(boundary);
Params.dirichlet=NonDirichlet;
ParamsDir.dirichlet=Dirichlet;
ParamsDir.partialDirichlet=1;
///////////////////// Gauge Field and Gauge Forces ////////////////////////////
LatticeGaugeField U(UGrid);
RealD beta=6.0;
WilsonGaugeActionR PlaqAction(beta);
IwasakiGaugeActionR RectAction(beta);
MomentumFilterNone<LatticeGaugeField> FilterNone;
ForceTest<GimplTypesR>(PlaqAction,U,FilterNone);
ForceTest<GimplTypesR>(RectAction,U,FilterNone);
////////////////////////////////////
// Action
////////////////////////////////////
RealD mass=0.00078;
RealD dmass=0.01;
RealD pvmass=1.0;
RealD M5=1.8;
RealD b=1.5;
RealD c=0.5;
// Double versions
FermionAction DdwfPeriodic(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,Params);
FermionAction PVPeriodic (U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,pvmass,M5,b,c,Params);
FermionAction DdwfDirichlet(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,dmass,M5,b,c,ParamsDir);
double StoppingCondition = 1.0e-8;
double MaxCGIterations = 50000;
ConjugateGradient<LatticeFermion> CG(StoppingCondition,MaxCGIterations);
//////////////////// Two Flavour Determinant Ratio ///////////////////////////////
TwoFlavourRatioPseudoFermionAction<FimplD> Nf2(PVPeriodic, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(Nf2,U,FilterNone);
//////////////////// Two Flavour Determinant force test Even Odd ///////////////////////////////
TwoFlavourEvenOddRatioPseudoFermionAction<FimplD> Nf2eo(PVPeriodic, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(Nf2eo,U,FilterNone);
//////////////////// Domain forces ////////////////////
int Width=4;
DDHMCFilter<WilsonImplD::Field> DDHMCFilter(Block4,Width);
//////////////////// Two flavour boundary det ////////////////////
TwoFlavourRatioPseudoFermionAction<FimplD> BdyNf2(DdwfDirichlet, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(BdyNf2,U,DDHMCFilter);
//////////////////// Two flavour eo boundary det ////////////////////
TwoFlavourEvenOddRatioPseudoFermionAction<FimplD> BdyNf2eo(DdwfDirichlet, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(BdyNf2eo,U,DDHMCFilter);
//////////////////// One flavour boundary det ////////////////////
OneFlavourRationalParams OFRp; // Up/down
OFRp.lo = 4.0e-5;
OFRp.hi = 90.0;
OFRp.MaxIter = 60000;
OFRp.tolerance= 1.0e-9;
OFRp.mdtolerance= 1.0e-8;
OFRp.degree = 18;
OFRp.precision= 80;
OFRp.BoundsCheckFreq=0;
std::vector<RealD> ActionTolByPole({
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8
});
std::vector<RealD> MDTolByPole({
1.0e-5,5.0e-6,1.0e-6,1.0e-7, // soften convergence more more
// 3.0e-6,1.0e-6,1.0e-7,1.0e-7, // soften convergence more
// 1.0e-6,3.0e-7,1.0e-7,1.0e-7, // Orig sloppy
// 1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8
});
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> BdySqrt(DdwfDirichlet,DdwfPeriodic,OFRp);
BdySqrt.SetTolerances(ActionTolByPole,MDTolByPole);
ForceTest<GimplTypesR>(BdySqrt,U,DDHMCFilter);
Grid_finalize();
}

View File

@ -67,7 +67,7 @@ int main (int argc, char ** argv)
////////////////////////////////////
RealD mass=0.01;
RealD M5=1.8;
DomainWallFermionR Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionD Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
Ddwf.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -80,8 +80,8 @@ int main (int argc, char** argv)
RealD mf = 0.01;
RealD mb = 1.0;
RealD M5 = 1.8;
DomainWallEOFAFermionR Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5);
DomainWallEOFAFermionR Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5);
DomainWallEOFAFermionD Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5);
DomainWallEOFAFermionD Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5);
OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, 12);
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, CG, CG, CG, CG, Params, true);

View File

@ -47,7 +47,7 @@ int main (int argc, char ** argv)
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename GparityDomainWallFermionD::FermionField FermionField;
int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
@ -76,10 +76,10 @@ int main (int argc, char ** argv)
std::vector<int> twists(Nd,0);
twists[nu] = 1;
twists[Nd-1] = 1; //antiperiodic in time
GparityDomainWallFermionR::ImplParams params;
GparityDomainWallFermionD::ImplParams params;
params.twists = twists;
GparityDomainWallFermionR Dw(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,params);
GparityDomainWallFermionD Dw(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,params);
Dw.M (phi,Mphi);

View File

@ -33,7 +33,7 @@ using namespace std;
using namespace Grid;
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityDomainWallEOFAFermionR FermionAction;
typedef GparityDomainWallEOFAFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
int main (int argc, char** argv)

View File

@ -56,7 +56,7 @@ int main (int argc, char ** argv)
int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename GparityDomainWallFermionD::FermionField FermionField;
FermionField phi (FGrid); gaussian(RNG5,phi);
FermionField Mphi (FGrid);
FermionField MphiPrime (FGrid);
@ -75,8 +75,8 @@ int main (int argc, char ** argv)
std::vector<int> twists(Nd,0);
twists[nu] = 1;
twists[3] = 1;
GparityDomainWallFermionR::ImplParams params; params.twists = twists;
GparityDomainWallFermionR Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,params);
GparityDomainWallFermionD::ImplParams params; params.twists = twists;
GparityDomainWallFermionD Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,params);
Ddwf.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -70,8 +70,8 @@ void convertFermion1f_from_2f(FermionField1f &out_1f, const FermionField2f &in_2
int nuoff = is_4d ? 0 : 1; //s in 0 direction
int L_2f = FGrid_2f->FullDimensions()[nu+nuoff];
int L_1f = FGrid_1f->FullDimensions()[nu+nuoff];
Integer L_2f = FGrid_2f->FullDimensions()[nu+nuoff];
Integer L_1f = FGrid_1f->FullDimensions()[nu+nuoff];
assert(L_1f == 2 * L_2f);
auto in_f0_2fgrid = PeekIndex<GparityFlavourIndex>(in_2f,0); //flavor 0 on 2f Grid

View File

@ -50,7 +50,7 @@ int main (int argc, char ** argv)
int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename GparityDomainWallFermionD::FermionField FermionField;
FermionField phi (FGrid); gaussian(RNG4,phi);
FermionField Mphi (FGrid);
FermionField MphiPrime (FGrid);
@ -70,8 +70,8 @@ int main (int argc, char ** argv)
std::vector<int> twists(Nd,0);
twists[nu] = 1;
twists[3]=1;
GparityWilsonFermionR::ImplParams params; params.twists = twists;
GparityWilsonFermionR Wil(U,*UGrid,*UrbGrid,mass,params);
GparityWilsonFermionD::ImplParams params; params.twists = twists;
GparityWilsonFermionD Wil(U,*UGrid,*UrbGrid,mass,params);
Wil.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -76,7 +76,7 @@ int main (int argc, char ** argv)
p.boundary_phases[2] = 1.0;
p.boundary_phases[3] =- 1.0;
MobiusFermionR Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,p);
MobiusFermionD Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,p);
Ddwf.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -82,8 +82,8 @@ int main (int argc, char** argv)
RealD mf = 0.01;
RealD mb = 1.0;
RealD M5 = 1.8;
MobiusEOFAFermionR Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5, b, c);
MobiusEOFAFermionR Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5, b, c);
MobiusEOFAFermionD Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5, b, c);
MobiusEOFAFermionD Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5, b, c);
OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, 12);
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, CG, CG, CG, CG, Params, false);

View File

@ -34,7 +34,7 @@ using namespace Grid;
;
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityMobiusEOFAFermionR FermionAction;
typedef GparityMobiusEOFAFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
int main (int argc, char** argv)

View File

@ -69,7 +69,7 @@ int main (int argc, char ** argv)
////////////////////////////////////
RealD mass=0.01;
RealD M5=1.8;
OverlapWilsonPartialFractionTanhFermionR Dpf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
OverlapWilsonPartialFractionTanhFermionD Dpf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
Dpf.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -67,7 +67,7 @@ int main (int argc, char ** argv)
// Unmodified matrix element
////////////////////////////////////
RealD mass=-4.0; //kills the diagonal term
WilsonFermionR Dw (U, Grid,RBGrid,mass);
WilsonFermionD Dw (U, Grid,RBGrid,mass);
Dw.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -70,7 +70,7 @@ int main(int argc, char **argv)
////////////////////////////////////
RealD mass = 0.1;
Real csw = 1.0;
WilsonCloverFermionR Dw(U, Grid, RBGrid, mass, csw, csw);
WilsonCloverFermionD Dw(U, Grid, RBGrid, mass, csw, csw);
Dw.ImportGauge(U);
Dw.M(phi, Mphi);
ComplexD S = innerProduct(Mphi, Mphi); // Action : pdag MdagM p

View File

@ -81,7 +81,7 @@ int main (int argc, char ** argv)
omegas.push_back( std::complex<double>(0.0686324988446592,0.0550658530827402) );
omegas.push_back( std::complex<double>(0.0686324988446592,-0.0550658530827402) );
ZMobiusFermionR Ddwf(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omegas,b,c);
ZMobiusFermionD Ddwf(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omegas,b,c);
Ddwf.M (phi,Mphi);

View File

@ -59,7 +59,7 @@ void copy2fTo1fFermionField(FermionField1f &out, const FermionField2f &in, int g
LatticeInteger xcoor_1f(out.Grid()); //5d lattice integer
LatticeCoordinate(xcoor_1f,gpdir);
int L = dim_2f[gpdir];
Integer L = dim_2f[gpdir];
out = where(xcoor_1f < L, f0_fullgrid_dbl, f1_fullgrid_dbl);
}
@ -76,7 +76,7 @@ void copy2fTo1fGaugeField(LatticeGaugeField &out, const LatticeGaugeField &in, i
LatticeInteger xcoor_1f(out.Grid());
LatticeCoordinate(xcoor_1f,gpdir);
int L = dim_2f[gpdir];
Integer L = dim_2f[gpdir];
out = where(xcoor_1f < L, U_dbl, Uconj_dbl);
}
@ -140,11 +140,11 @@ int main(int argc, char **argv) {
copy2fTo1fGaugeField(Umu_1f, Umu_2f, mu);
typedef GparityWilsonImplR FermionImplPolicy2f;
typedef GparityDomainWallFermionR FermionAction2f;
typedef GparityDomainWallFermionD FermionAction2f;
typedef typename FermionAction2f::FermionField FermionField2f;
typedef WilsonImplR FermionImplPolicy1f;
typedef DomainWallFermionR FermionAction1f;
typedef DomainWallFermionD FermionAction1f;
typedef typename FermionAction1f::FermionField FermionField1f;
std::cout << "Generating eta 2f" << std::endl;

View File

@ -43,7 +43,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef DomainWallFermionR FermionAction;
typedef DomainWallFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
@ -136,16 +136,9 @@ int main(int argc, char **argv) {
TheHMC.ReadCommandLine(argc, argv); // these can be parameters from file
// Reset performance counters
NumOp.ZeroCounters();
DenOp.ZeroCounters();
TheHMC.Run(); // no smearing
// TheHMC.Run(SmearingPolicy); // for smearing
std::cout << GridLogMessage << "Numerator report, Pauli-Villars term : " << std::endl;
NumOp.Report();
std::cout << GridLogMessage << "Denominator report, Dw(m) term (includes CG) : " << std::endl;
DenOp.Report();
Grid_finalize();
} // main

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
typedef ConjugateHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityDomainWallFermionR FermionAction;
typedef GparityDomainWallFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
@ -132,15 +132,9 @@ int main(int argc, char **argv) {
TheHMC.ReadCommandLine(argc, argv); // these can be parameters from file
// Reset performance counters
NumOp.ZeroCounters();
DenOp.ZeroCounters();
TheHMC.Run(); // no smearing
// TheHMC.Run(SmearingPolicy); // for smearing
std::cout << GridLogMessage << "Numerator report, Pauli-Villars term : " << std::endl;
NumOp.Report();
std::cout << GridLogMessage << "Denominator report, Dw(m) term (includes CG) : " << std::endl;
DenOp.Report();
Grid_finalize();

View File

@ -83,7 +83,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef MobiusFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
// Serialiser
typedef Grid::XmlReader Serialiser;
@ -211,8 +211,6 @@ int main(int argc, char **argv) {
*/
// Reset performance counters
NumOp.ZeroCounters();
DenOp.ZeroCounters();
if (ApplySmearing){
SmearingParameters SmPar(Reader);
@ -225,11 +223,6 @@ int main(int argc, char **argv) {
TheHMC.Run(); // no smearing
}
std::cout << GridLogMessage << "Numerator report, Pauli-Villars term : " << std::endl;
NumOp.Report();
std::cout << GridLogMessage << "Denominator report, Dw(m) term (includes CG) : " << std::endl;
DenOp.Report();
Grid_finalize();
} // main

View File

@ -89,7 +89,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef MobiusFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
// Serialiser
typedef Grid::XmlReader Serialiser;
@ -226,8 +226,6 @@ int main(int argc, char **argv) {
*/
// Reset performance counters
NumOp.ZeroCounters();
DenOp.ZeroCounters();
if (ApplySmearing){
SmearingParameters SmPar(Reader);
@ -240,10 +238,6 @@ int main(int argc, char **argv) {
TheHMC.Run(); // no smearing
}
std::cout << GridLogMessage << "Numerator report, Pauli-Villars term : " << std::endl;
NumOp.Report();
std::cout << GridLogMessage << "Denominator report, Dw(m) term (includes CG) : " << std::endl;
DenOp.Report();
Grid_finalize();
} // main

View File

@ -39,7 +39,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonCloverFermionR FermionAction;
typedef WilsonCloverFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -40,7 +40,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonFermionR FermionAction;
typedef WilsonFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonFermionR FermionAction;
typedef WilsonFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -41,7 +41,7 @@ int main(int argc, char **argv) {
typedef ConjugateHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityDomainWallFermionR FermionAction;
typedef GparityDomainWallFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
typedef ConjugateHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityDomainWallFermionR FermionAction;
typedef GparityDomainWallFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -39,7 +39,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef MobiusFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;
@ -148,14 +148,14 @@ int main(int argc, char **argv) {
// Level1.push_back(&StrangePseudoFermion);
// DJM: setup for EOFA ratio (Shamir)
// DomainWallEOFAFermionR Strange_Op_L(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, strange_mass, strange_mass, pv_mass, 0.0, -1, M5);
// DomainWallEOFAFermionR Strange_Op_R(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, pv_mass, strange_mass, pv_mass, -1.0, 1, M5);
// DomainWallEOFAFermionD Strange_Op_L(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, strange_mass, strange_mass, pv_mass, 0.0, -1, M5);
// DomainWallEOFAFermionD Strange_Op_R(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, pv_mass, strange_mass, pv_mass, -1.0, 1, M5);
// ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> EOFA(Strange_Op_L, Strange_Op_R, CG, OFRp, true);
// Level1.push_back(&EOFA);
// DJM: setup for EOFA ratio (Mobius)
MobiusEOFAFermionR Strange_Op_L(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
MobiusEOFAFermionR Strange_Op_R(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, pv_mass, strange_mass, pv_mass, -1.0, 1, M5, b, c);
MobiusEOFAFermionD Strange_Op_L(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
MobiusEOFAFermionD Strange_Op_R(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, pv_mass, strange_mass, pv_mass, -1.0, 1, M5, b, c);
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> EOFA(Strange_Op_L, Strange_Op_R, CG, OFRp, true);
Level1.push_back(&EOFA);

View File

@ -34,7 +34,7 @@ class ScalarActionParameters : Serializable {
double, lambda,
double, g);
ScalarActionParameters() = default;
ScalarActionParameters() {};
template <class ReaderClass >
ScalarActionParameters(Reader<ReaderClass>& Reader){
@ -45,7 +45,6 @@ class ScalarActionParameters : Serializable {
}
using namespace Grid;
;
template <class Impl>
class MagMeas : public HmcObservable<typename Impl::Field> {

View File

@ -28,7 +28,7 @@ directory
/* END LEGAL */
#include <Grid/Grid.h>
#ifdef ENABLE_FERMION_REPS
namespace Grid{
struct FermionParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(FermionParameters,
@ -80,7 +80,7 @@ int main(int argc, char **argv)
// Typedefs to simplify notation
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonTwoIndexAntiSymmetricImplR FermionImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonCloverTwoIndexAntiSymmetricFermionR FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonCloverTwoIndexAntiSymmetricFermionD FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef typename FermionAction::FermionField FermionField;
//typedef Grid::JSONReader Serialiser;
typedef Grid::XmlReader Serialiser;
@ -210,4 +210,6 @@ int main(int argc, char **argv)
Grid_finalize();
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -29,6 +29,7 @@ directory
#include <Grid/Grid.h>
#ifdef ENABLE_FERMION_REPS
namespace Grid{
struct FermionParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(FermionParameters,
@ -81,7 +82,7 @@ int main(int argc, char **argv)
// Typedefs to simplify notation
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonTwoIndexSymmetricImplR FermionImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonCloverTwoIndexSymmetricFermionR FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonCloverTwoIndexSymmetricFermionD FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef typename FermionAction::FermionField FermionField;
//typedef Grid::JSONReader Serialiser;
typedef Grid::XmlReader Serialiser;
@ -211,3 +212,6 @@ int main(int argc, char **argv)
Grid_finalize();
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -79,7 +79,7 @@ int main(int argc, char **argv)
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonCloverFermionR FermionAction;
typedef WilsonCloverFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;

View File

@ -32,6 +32,7 @@ directory
#include "Grid/Grid.h"
#ifdef ENABLE_FERMION_REPS
namespace Grid{
struct FermionParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(FermionParameters,
@ -84,11 +85,11 @@ int main(int argc, char **argv) {
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper;
typedef WilsonImplR FundImplPolicy;
typedef WilsonCloverFermionR FundFermionAction;
typedef WilsonCloverFermionD FundFermionAction;
typedef typename FundFermionAction::FermionField FundFermionField;
typedef WilsonTwoIndexAntiSymmetricImplR ASymmImplPolicy;
typedef WilsonCloverTwoIndexAntiSymmetricFermionR ASymmFermionAction;
typedef WilsonCloverTwoIndexAntiSymmetricFermionD ASymmFermionAction;
typedef typename ASymmFermionAction::FermionField ASymmFermionField;
typedef Grid::XmlReader Serialiser;
@ -222,3 +223,6 @@ int main(int argc, char **argv) {
Grid_finalize();
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -29,6 +29,7 @@ directory
#include <Grid/Grid.h>
#ifdef ENABLE_FERMION_REPS
namespace Grid{
struct FermionParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(FermionParameters,
@ -81,7 +82,7 @@ int main(int argc, char **argv)
// Typedefs to simplify notation
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonAdjImplR FermionImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonCloverAdjFermionR FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonCloverAdjFermionD FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;
@ -211,3 +212,6 @@ int main(int argc, char **argv)
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -31,9 +31,10 @@ directory
/* END LEGAL */
#include "Grid/Grid.h"
#ifdef ENABLE_FERMION_REPS
int main(int argc, char **argv) {
using namespace Grid;
;
// Here change the allowed (higher) representations
typedef Representations< FundamentalRepresentation, AdjointRepresentation > TheRepresentations;
@ -46,7 +47,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper;
typedef WilsonAdjImplR FermionImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonAdjFermionR FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonAdjFermionD FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef typename FermionAction::FermionField FermionField;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
@ -127,3 +128,6 @@ int main(int argc, char **argv) {
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -41,7 +41,7 @@ int main(int argc, char **argv)
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonCloverFermionR FermionAction;
typedef WilsonCloverFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonFermionR FermionAction;
typedef WilsonFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -69,8 +69,10 @@ int main(int argc, char **argv)
TopologyObsParameters TopParams;
TopParams.interval = 5;
TopParams.do_smearing = true;
TopParams.Smearing.steps = 200;
TopParams.Smearing.step_size = 0.01;
TopParams.Smearing.init_step_size = 0.01;
TopParams.Smearing.tolerance = 1e-5;
// TopParams.Smearing.steps = 200;
// TopParams.Smearing.step_size = 0.01;
TopParams.Smearing.meas_interval = 50;
TopParams.Smearing.maxTau = 2.0;
TheHMC.Resources.AddObservable<QObs>(TopParams);

View File

@ -33,6 +33,7 @@ directory
#ifdef ENABLE_FERMION_REPS
int main(int argc, char **argv) {
#ifndef GRID_CUDA
@ -51,9 +52,9 @@ int main(int argc, char **argv) {
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper;
typedef WilsonAdjImplR AdjImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonAdjFermionR AdjFermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonAdjFermionD AdjFermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonTwoIndexSymmetricImplR SymmImplPolicy;
typedef WilsonTwoIndexSymmetricFermionR SymmFermionAction;
typedef WilsonTwoIndexSymmetricFermionD SymmFermionAction;
typedef typename AdjFermionAction::FermionField AdjFermionField;
@ -138,3 +139,6 @@ int main(int argc, char **argv) {
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -41,7 +41,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonFermionR FermionAction;
typedef WilsonFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonTMFermionR FermionAction;
typedef WilsonTMFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -29,6 +29,7 @@ directory
/* END LEGAL */
#include "Grid/Grid.h"
#ifdef ENABLE_FERMION_REPS
int main(int argc, char **argv) {
using namespace Grid;
;
@ -45,7 +46,7 @@ int main(int argc, char **argv) {
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper;
typedef WilsonTwoIndexSymmetricImplR FermionImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonTwoIndexSymmetricFermionR FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonTwoIndexSymmetricFermionD FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef typename FermionAction::FermionField FermionField;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
@ -127,3 +128,6 @@ int main(int argc, char **argv) {
} // main
#else
int main(int argc, char **argv){}
#endif

Some files were not shown because too many files have changed in this diff Show More