mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-22 17:52:02 +01:00
Compare commits
37 Commits
feature/gp
...
d3496d2fe0
Author | SHA1 | Date | |
---|---|---|---|
d3496d2fe0 | |||
6121397587 | |||
0417b96896 | |||
81fe4c937e | |||
f77f3a6598 | |||
239afb18fb | |||
ef820a26cd | |||
65abe4d0d3 | |||
5012adfebf | |||
b808d48fa1 | |||
83f818a99d | |||
387397374a | |||
605cf401e1 | |||
f99c3660d2 | |||
92a83a9eb3 | |||
b615fa0f35 | |||
bb5c16b97f | |||
0d80eeb545 | |||
d1decee4cc | |||
d4ae71b880 | |||
b0f4eee78b | |||
5340e50427 | |||
e16fc5b2e4 | |||
694306f202 | |||
9aac1e6d64 | |||
3e882f555d | |||
0f1c5b08a1 | |||
70988e43d2 | |||
aab3bcb46f | |||
da06d15f73 | |||
e8b1251b8c | |||
fad5a74a4b | |||
e83f6a6ae9 | |||
6283d11d50 | |||
6616d5d090 | |||
42d56ea6b6 | |||
0b905a72dd |
@ -34,9 +34,6 @@ directory
|
||||
|
||||
#if defined __GNUC__ && __GNUC__>=6
|
||||
#pragma GCC diagnostic ignored "-Wignored-attributes"
|
||||
#endif
|
||||
#if defined __GNUC__
|
||||
#pragma GCC diagnostic ignored "-Wpsabi"
|
||||
#endif
|
||||
|
||||
//disables and intel compiler specific warning (in json.hpp)
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <functional>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <strings.h>
|
||||
#include <stdio.h>
|
||||
#include <signal.h>
|
||||
#include <ctime>
|
||||
|
@ -292,7 +292,6 @@ public:
|
||||
template<class Field>
|
||||
class ChebyshevLanczos : public Chebyshev<Field> {
|
||||
private:
|
||||
|
||||
std::vector<RealD> Coeffs;
|
||||
int order;
|
||||
RealD alpha;
|
||||
|
@ -82,11 +82,6 @@ NAMESPACE_BEGIN(Grid);
|
||||
RealD stop = src_norm * Tolerance*Tolerance;
|
||||
|
||||
GridBase* DoublePrecGrid = src_d_in.Grid();
|
||||
|
||||
//Generate precision change workspaces
|
||||
precisionChangeWorkspace wk_dp_from_sp(DoublePrecGrid, SinglePrecGrid);
|
||||
precisionChangeWorkspace wk_sp_from_dp(SinglePrecGrid, DoublePrecGrid);
|
||||
|
||||
FieldD tmp_d(DoublePrecGrid);
|
||||
tmp_d.Checkerboard() = cb;
|
||||
|
||||
@ -128,7 +123,7 @@ NAMESPACE_BEGIN(Grid);
|
||||
while(norm * inner_tol * inner_tol < stop) inner_tol *= 2; // inner_tol = sqrt(stop/norm) ??
|
||||
|
||||
PrecChangeTimer.Start();
|
||||
precisionChange(src_f, src_d, wk_sp_from_dp);
|
||||
precisionChange(src_f, src_d);
|
||||
PrecChangeTimer.Stop();
|
||||
|
||||
sol_f = Zero();
|
||||
@ -147,7 +142,7 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
//Convert sol back to double and add to double prec solution
|
||||
PrecChangeTimer.Start();
|
||||
precisionChange(tmp_d, sol_f, wk_dp_from_sp);
|
||||
precisionChange(tmp_d, sol_f);
|
||||
PrecChangeTimer.Stop();
|
||||
|
||||
axpy(sol_d, 1.0, tmp_d, sol_d);
|
||||
|
@ -128,8 +128,6 @@ public:
|
||||
void operator() (LinearOperatorBase<FieldD> &Linop_d, const FieldD &src_d, std::vector<FieldD> &psi_d)
|
||||
{
|
||||
GridBase *DoublePrecGrid = src_d.Grid();
|
||||
precisionChangeWorkspace wk_f_from_d(SinglePrecGrid, DoublePrecGrid);
|
||||
precisionChangeWorkspace wk_d_from_f(DoublePrecGrid, SinglePrecGrid);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Convenience references to the info stored in "MultiShiftFunction"
|
||||
@ -170,7 +168,7 @@ public:
|
||||
FieldF tmp_f(SinglePrecGrid);
|
||||
FieldF mmp_f(SinglePrecGrid);
|
||||
FieldF src_f(SinglePrecGrid);
|
||||
precisionChange(src_f, src_d, wk_f_from_d);
|
||||
precisionChange(src_f, src_d);
|
||||
|
||||
// Check lightest mass
|
||||
for(int s=0;s<nshift;s++){
|
||||
@ -245,7 +243,7 @@ public:
|
||||
|
||||
//Update double precision search direction by residual
|
||||
PrecChangeTimer.Start();
|
||||
precisionChange(r_d, r_f, wk_d_from_f);
|
||||
precisionChange(r_d, r_f);
|
||||
PrecChangeTimer.Stop();
|
||||
|
||||
AXPYTimer.Start();
|
||||
@ -264,7 +262,7 @@ public:
|
||||
AXPYTimer.Stop();
|
||||
|
||||
PrecChangeTimer.Start();
|
||||
precisionChange(p_f, p_d, wk_f_from_d); //get back single prec search direction for linop
|
||||
precisionChange(p_f, p_d); //get back single prec search direction for linop
|
||||
PrecChangeTimer.Stop();
|
||||
|
||||
cp=c;
|
||||
@ -327,7 +325,7 @@ public:
|
||||
std::cout<<GridLogMessage<<"ConjugateGradientMultiShiftMixedPrec k="<<k<< ", replaced |r|^2 = "<<c_f <<" with |r|^2 = "<<c_d<<std::endl;
|
||||
|
||||
PrecChangeTimer.Start();
|
||||
precisionChange(r_f, r_d, wk_f_from_d);
|
||||
precisionChange(r_f, r_d);
|
||||
PrecChangeTimer.Stop();
|
||||
c = c_d;
|
||||
}
|
||||
|
@ -53,10 +53,11 @@ public:
|
||||
// Communicator should know nothing of the physics grid, only processor grid.
|
||||
////////////////////////////////////////////
|
||||
int _Nprocessors; // How many in all
|
||||
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
|
||||
int _processor; // linear processor rank
|
||||
Coordinate _processor_coor; // linear processor coordinate
|
||||
unsigned long _ndimension;
|
||||
Coordinate _shm_processors; // Which dimensions get relayed out over processors lanes.
|
||||
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
|
||||
Coordinate _processor_coor; // linear processor coordinate
|
||||
static Grid_MPI_Comm communicator_world;
|
||||
Grid_MPI_Comm communicator;
|
||||
std::vector<Grid_MPI_Comm> communicator_halo;
|
||||
@ -97,8 +98,9 @@ public:
|
||||
int BossRank(void) ;
|
||||
int ThisRank(void) ;
|
||||
const Coordinate & ThisProcessorCoor(void) ;
|
||||
const Coordinate & ShmGrid(void) { return _shm_processors; } ;
|
||||
const Coordinate & ProcessorGrid(void) ;
|
||||
int ProcessorCount(void) ;
|
||||
int ProcessorCount(void) ;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// very VERY rarely (Log, serial RNG) we need world without a grid
|
||||
@ -142,16 +144,16 @@ public:
|
||||
int bytes);
|
||||
|
||||
double StencilSendToRecvFrom(void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int do_xmit,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int do_recv,
|
||||
int bytes,int dir);
|
||||
|
||||
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int do_xmit,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int do_recv,
|
||||
int bytes,int dir);
|
||||
|
||||
|
||||
|
@ -106,7 +106,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||
// Remap using the shared memory optimising routine
|
||||
// The remap creates a comm which must be freed
|
||||
////////////////////////////////////////////////////
|
||||
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm);
|
||||
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm,_shm_processors);
|
||||
InitFromMPICommunicator(processors,optimal_comm);
|
||||
SetCommunicator(optimal_comm);
|
||||
///////////////////////////////////////////////////
|
||||
@ -124,12 +124,13 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
||||
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
|
||||
Coordinate parent_processor_coor(_ndimension,0);
|
||||
Coordinate parent_processors (_ndimension,1);
|
||||
|
||||
Coordinate shm_processors (_ndimension,1);
|
||||
// Can make 5d grid from 4d etc...
|
||||
int pad = _ndimension-parent_ndimension;
|
||||
for(int d=0;d<parent_ndimension;d++){
|
||||
parent_processor_coor[pad+d]=parent._processor_coor[d];
|
||||
parent_processors [pad+d]=parent._processors[d];
|
||||
shm_processors [pad+d]=parent._shm_processors[d];
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -154,6 +155,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
||||
ccoor[d] = parent_processor_coor[d] % processors[d];
|
||||
scoor[d] = parent_processor_coor[d] / processors[d];
|
||||
ssize[d] = parent_processors[d] / processors[d];
|
||||
if ( processors[d] < shm_processors[d] ) shm_processors[d] = processors[d]; // subnode splitting.
|
||||
}
|
||||
|
||||
// rank within subcomm ; srank is rank of subcomm within blocks of subcomms
|
||||
@ -335,22 +337,22 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||
}
|
||||
// Basic Halo comms primitive
|
||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
int dest,
|
||||
int dest, int dox,
|
||||
void *recv,
|
||||
int from,
|
||||
int from, int dor,
|
||||
int bytes,int dir)
|
||||
{
|
||||
std::vector<CommsRequest_t> list;
|
||||
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,recv,from,bytes,dir);
|
||||
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,dox,recv,from,dor,bytes,dir);
|
||||
StencilSendToRecvFromComplete(list,dir);
|
||||
return offbytes;
|
||||
}
|
||||
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
int dest,int dox,
|
||||
void *recv,
|
||||
int from,
|
||||
int from,int dor,
|
||||
int bytes,int dir)
|
||||
{
|
||||
int ncomm =communicator_halo.size();
|
||||
@ -370,28 +372,32 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
||||
double off_node_bytes=0.0;
|
||||
int tag;
|
||||
|
||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+from*32;
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
off_node_bytes+=bytes;
|
||||
if ( dox ) {
|
||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+from*32;
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
off_node_bytes+=bytes;
|
||||
}
|
||||
}
|
||||
|
||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+_processor*32;
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
off_node_bytes+=bytes;
|
||||
} else {
|
||||
|
||||
if (dor) {
|
||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+_processor*32;
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
off_node_bytes+=bytes;
|
||||
} else {
|
||||
// TODO : make a OMP loop on CPU, call threaded bcopy
|
||||
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
||||
assert(shm!=NULL);
|
||||
// std::cout <<"acceleratorCopyDeviceToDeviceAsynch"<< std::endl;
|
||||
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
|
||||
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
||||
assert(shm!=NULL);
|
||||
// std::cout <<"acceleratorCopyDeviceToDeviceAsynch"<< std::endl;
|
||||
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
||||
this->StencilSendToRecvFromComplete(list,dir);
|
||||
}
|
||||
|
@ -45,12 +45,14 @@ void CartesianCommunicator::Init(int *argc, char *** arv)
|
||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
|
||||
: CartesianCommunicator(processors)
|
||||
{
|
||||
_shm_processors = Coordinate(processors.size(),1);
|
||||
srank=0;
|
||||
SetCommunicator(communicator_world);
|
||||
}
|
||||
|
||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||
{
|
||||
_shm_processors = Coordinate(processors.size(),1);
|
||||
_processors = processors;
|
||||
_ndimension = processors.size(); assert(_ndimension>=1);
|
||||
_processor_coor.resize(_ndimension);
|
||||
@ -111,18 +113,18 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest
|
||||
}
|
||||
|
||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int dox,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int dor,
|
||||
int bytes, int dir)
|
||||
{
|
||||
return 2.0*bytes;
|
||||
}
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int dox,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int dor,
|
||||
int bytes, int dir)
|
||||
{
|
||||
return 2.0*bytes;
|
||||
|
@ -93,9 +93,10 @@ public:
|
||||
// Create an optimal reordered communicator that makes MPI_Cart_create get it right
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
|
||||
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
// Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||
static void GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims);
|
||||
///////////////////////////////////////////////////
|
||||
// Provide shared memory facilities off comm world
|
||||
|
@ -152,7 +152,7 @@ int Log2Size(int TwoToPower,int MAXLOG2)
|
||||
}
|
||||
return log2size;
|
||||
}
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Look and see if it looks like an HPE 8600 based on hostname conventions
|
||||
@ -165,8 +165,8 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
|
||||
gethostname(name,namelen);
|
||||
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
|
||||
|
||||
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm);
|
||||
else OptimalCommunicatorSharedMemory(processors,optimal_comm);
|
||||
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm,SHM);
|
||||
else OptimalCommunicatorSharedMemory(processors,optimal_comm,SHM);
|
||||
}
|
||||
static inline int divides(int a,int b)
|
||||
{
|
||||
@ -221,7 +221,7 @@ void GlobalSharedMemory::GetShmDims(const Coordinate &WorldDims,Coordinate &ShmD
|
||||
dim=(dim+1) %ndimension;
|
||||
}
|
||||
}
|
||||
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Assert power of two shm_size.
|
||||
@ -294,7 +294,8 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
|
||||
Coordinate HyperCoor(ndimension);
|
||||
|
||||
GetShmDims(WorldDims,ShmDims);
|
||||
|
||||
SHM = ShmDims;
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Establish torus of processes and nodes with sub-blockings
|
||||
////////////////////////////////////////////////////////////////
|
||||
@ -341,7 +342,7 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
|
||||
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Identify subblock of ranks on node spreading across dims
|
||||
@ -353,6 +354,8 @@ void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &proce
|
||||
Coordinate ShmCoor(ndimension); Coordinate NodeCoor(ndimension); Coordinate WorldCoor(ndimension);
|
||||
|
||||
GetShmDims(WorldDims,ShmDims);
|
||||
SHM=ShmDims;
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Establish torus of processes and nodes with sub-blockings
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
@ -48,9 +48,10 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
||||
_ShmSetup=1;
|
||||
}
|
||||
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
optimal_comm = WorldComm;
|
||||
SHM = Coordinate(processors.size(),1);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -29,6 +29,19 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class vobj> void DumpSliceNorm(std::string s,Lattice<vobj> &f,int mu=-1)
|
||||
{
|
||||
auto ff = localNorm2(f);
|
||||
if ( mu==-1 ) mu = f.Grid()->Nd()-1;
|
||||
typedef typename vobj::tensor_reduced normtype;
|
||||
typedef typename normtype::scalar_object scalar;
|
||||
std::vector<scalar> sff;
|
||||
sliceSum(ff,sff,mu);
|
||||
for(int t=0;t<sff.size();t++){
|
||||
std::cout << s<<" "<<t<<" "<<sff[t]<<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj> uint32_t crc(Lattice<vobj> & buf)
|
||||
{
|
||||
autoView( buf_v , buf, CpuRead);
|
||||
|
@ -142,6 +142,15 @@ inline typename vobj::scalar_objectD sumD(const vobj *arg, Integer osites)
|
||||
return sumD_cpu(arg,osites);
|
||||
#endif
|
||||
}
|
||||
template<class vobj>
|
||||
inline typename vobj::scalar_objectD sumD_large(const vobj *arg, Integer osites)
|
||||
{
|
||||
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||
return sumD_gpu_large(arg,osites);
|
||||
#else
|
||||
return sumD_cpu(arg,osites);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
inline typename vobj::scalar_object sum(const Lattice<vobj> &arg)
|
||||
@ -159,6 +168,22 @@ inline typename vobj::scalar_object sum(const Lattice<vobj> &arg)
|
||||
return ssum;
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
inline typename vobj::scalar_object sum_large(const Lattice<vobj> &arg)
|
||||
{
|
||||
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||
autoView( arg_v, arg, AcceleratorRead);
|
||||
Integer osites = arg.Grid()->oSites();
|
||||
auto ssum= sum_gpu_large(&arg_v[0],osites);
|
||||
#else
|
||||
autoView(arg_v, arg, CpuRead);
|
||||
Integer osites = arg.Grid()->oSites();
|
||||
auto ssum= sum_cpu(&arg_v[0],osites);
|
||||
#endif
|
||||
arg.Grid()->GlobalSum(ssum);
|
||||
return ssum;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Deterministic Reduction operations
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -23,7 +23,7 @@ unsigned int nextPow2(Iterator x) {
|
||||
}
|
||||
|
||||
template <class Iterator>
|
||||
void getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator &threads, Iterator &blocks) {
|
||||
int getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator &threads, Iterator &blocks) {
|
||||
|
||||
int device;
|
||||
#ifdef GRID_CUDA
|
||||
@ -37,13 +37,13 @@ void getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator
|
||||
Iterator sharedMemPerBlock = gpu_props[device].sharedMemPerBlock;
|
||||
Iterator maxThreadsPerBlock = gpu_props[device].maxThreadsPerBlock;
|
||||
Iterator multiProcessorCount = gpu_props[device].multiProcessorCount;
|
||||
|
||||
/*
|
||||
std::cout << GridLogDebug << "GPU has:" << std::endl;
|
||||
std::cout << GridLogDebug << "\twarpSize = " << warpSize << std::endl;
|
||||
std::cout << GridLogDebug << "\tsharedMemPerBlock = " << sharedMemPerBlock << std::endl;
|
||||
std::cout << GridLogDebug << "\tmaxThreadsPerBlock = " << maxThreadsPerBlock << std::endl;
|
||||
std::cout << GridLogDebug << "\tmultiProcessorCount = " << multiProcessorCount << std::endl;
|
||||
|
||||
*/
|
||||
if (warpSize != WARP_SIZE) {
|
||||
std::cout << GridLogError << "The warp size of the GPU in use does not match the warp size set when compiling Grid." << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
@ -53,12 +53,12 @@ void getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator
|
||||
threads = warpSize;
|
||||
if ( threads*sizeofsobj > sharedMemPerBlock ) {
|
||||
std::cout << GridLogError << "The object is too large for the shared memory." << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
return 0;
|
||||
}
|
||||
while( 2*threads*sizeofsobj < sharedMemPerBlock && 2*threads <= maxThreadsPerBlock ) threads *= 2;
|
||||
// keep all the streaming multiprocessors busy
|
||||
blocks = nextPow2(multiProcessorCount);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
template <class sobj, class Iterator>
|
||||
@ -198,7 +198,7 @@ __global__ void reduceKernel(const vobj *lat, sobj *buffer, Iterator n) {
|
||||
// Possibly promote to double and sum
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
template <class vobj>
|
||||
inline typename vobj::scalar_objectD sumD_gpu(const vobj *lat, Integer osites)
|
||||
inline typename vobj::scalar_objectD sumD_gpu_small(const vobj *lat, Integer osites)
|
||||
{
|
||||
typedef typename vobj::scalar_objectD sobj;
|
||||
typedef decltype(lat) Iterator;
|
||||
@ -207,7 +207,9 @@ inline typename vobj::scalar_objectD sumD_gpu(const vobj *lat, Integer osites)
|
||||
Integer size = osites*nsimd;
|
||||
|
||||
Integer numThreads, numBlocks;
|
||||
getNumBlocksAndThreads(size, sizeof(sobj), numThreads, numBlocks);
|
||||
int ok = getNumBlocksAndThreads(size, sizeof(sobj), numThreads, numBlocks);
|
||||
assert(ok);
|
||||
|
||||
Integer smemSize = numThreads * sizeof(sobj);
|
||||
|
||||
Vector<sobj> buffer(numBlocks);
|
||||
@ -218,6 +220,54 @@ inline typename vobj::scalar_objectD sumD_gpu(const vobj *lat, Integer osites)
|
||||
auto result = buffer_v[0];
|
||||
return result;
|
||||
}
|
||||
|
||||
template <class vobj>
|
||||
inline typename vobj::scalar_objectD sumD_gpu_large(const vobj *lat, Integer osites)
|
||||
{
|
||||
typedef typename vobj::vector_type vector;
|
||||
typedef typename vobj::scalar_typeD scalarD;
|
||||
typedef typename vobj::scalar_objectD sobj;
|
||||
sobj ret;
|
||||
scalarD *ret_p = (scalarD *)&ret;
|
||||
|
||||
const int words = sizeof(vobj)/sizeof(vector);
|
||||
|
||||
Vector<vector> buffer(osites);
|
||||
vector *dat = (vector *)lat;
|
||||
vector *buf = &buffer[0];
|
||||
iScalar<vector> *tbuf =(iScalar<vector> *) &buffer[0];
|
||||
for(int w=0;w<words;w++) {
|
||||
|
||||
accelerator_for(ss,osites,1,{
|
||||
buf[ss] = dat[ss*words+w];
|
||||
});
|
||||
|
||||
ret_p[w] = sumD_gpu_small(tbuf,osites);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class vobj>
|
||||
inline typename vobj::scalar_objectD sumD_gpu(const vobj *lat, Integer osites)
|
||||
{
|
||||
typedef typename vobj::vector_type vector;
|
||||
typedef typename vobj::scalar_typeD scalarD;
|
||||
typedef typename vobj::scalar_objectD sobj;
|
||||
sobj ret;
|
||||
|
||||
Integer nsimd= vobj::Nsimd();
|
||||
Integer size = osites*nsimd;
|
||||
Integer numThreads, numBlocks;
|
||||
int ok = getNumBlocksAndThreads(size, sizeof(sobj), numThreads, numBlocks);
|
||||
|
||||
if ( ok ) {
|
||||
ret = sumD_gpu_small(lat,osites);
|
||||
} else {
|
||||
ret = sumD_gpu_large(lat,osites);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Return as same precision as input performing reduction in double precision though
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -230,6 +280,13 @@ inline typename vobj::scalar_object sum_gpu(const vobj *lat, Integer osites)
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
template <class vobj>
|
||||
inline typename vobj::scalar_object sum_gpu_large(const vobj *lat, Integer osites)
|
||||
{
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
sobj result;
|
||||
result = sumD_gpu_large(lat,osites);
|
||||
return result;
|
||||
}
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -32,9 +32,8 @@
|
||||
#include <random>
|
||||
|
||||
#ifdef RNG_SITMO
|
||||
#include <Grid/random/sitmo_prng_engine.hpp>
|
||||
#include <Grid/sitmo_rng/sitmo_prng_engine.hpp>
|
||||
#endif
|
||||
#include <Grid/random/gaussian.h>
|
||||
|
||||
#if defined(RNG_SITMO)
|
||||
#define RNG_FAST_DISCARD
|
||||
@ -143,8 +142,8 @@ public:
|
||||
|
||||
std::vector<RngEngine> _generators;
|
||||
std::vector<std::uniform_real_distribution<RealD> > _uniform;
|
||||
std::vector<Grid::gaussian_distribution<RealD> > _gaussian;
|
||||
// std::vector<std::discrete_distribution<int32_t> > _bernoulli;
|
||||
std::vector<std::normal_distribution<RealD> > _gaussian;
|
||||
std::vector<std::discrete_distribution<int32_t> > _bernoulli;
|
||||
std::vector<std::uniform_int_distribution<uint32_t> > _uid;
|
||||
|
||||
///////////////////////
|
||||
@ -244,8 +243,8 @@ public:
|
||||
GridSerialRNG() : GridRNGbase() {
|
||||
_generators.resize(1);
|
||||
_uniform.resize(1,std::uniform_real_distribution<RealD>{0,1});
|
||||
_gaussian.resize(1,gaussian_distribution<RealD>(0.0,1.0) );
|
||||
// _bernoulli.resize(1,std::discrete_distribution<int32_t>{1,1});
|
||||
_gaussian.resize(1,std::normal_distribution<RealD>(0.0,1.0) );
|
||||
_bernoulli.resize(1,std::discrete_distribution<int32_t>{1,1});
|
||||
_uid.resize(1,std::uniform_int_distribution<uint32_t>() );
|
||||
}
|
||||
|
||||
@ -358,8 +357,8 @@ public:
|
||||
|
||||
_generators.resize(_vol);
|
||||
_uniform.resize(_vol,std::uniform_real_distribution<RealD>{0,1});
|
||||
_gaussian.resize(_vol,gaussian_distribution<RealD>(0.0,1.0) );
|
||||
// _bernoulli.resize(_vol,std::discrete_distribution<int32_t>{1,1});
|
||||
_gaussian.resize(_vol,std::normal_distribution<RealD>(0.0,1.0) );
|
||||
_bernoulli.resize(_vol,std::discrete_distribution<int32_t>{1,1});
|
||||
_uid.resize(_vol,std::uniform_int_distribution<uint32_t>() );
|
||||
}
|
||||
|
||||
@ -516,11 +515,11 @@ public:
|
||||
|
||||
template <class vobj> inline void random(GridParallelRNG &rng,Lattice<vobj> &l) { rng.fill(l,rng._uniform); }
|
||||
template <class vobj> inline void gaussian(GridParallelRNG &rng,Lattice<vobj> &l) { rng.fill(l,rng._gaussian); }
|
||||
//template <class vobj> inline void bernoulli(GridParallelRNG &rng,Lattice<vobj> &l){ rng.fill(l,rng._bernoulli);}
|
||||
template <class vobj> inline void bernoulli(GridParallelRNG &rng,Lattice<vobj> &l){ rng.fill(l,rng._bernoulli);}
|
||||
|
||||
template <class sobj> inline void random(GridSerialRNG &rng,sobj &l) { rng.fill(l,rng._uniform ); }
|
||||
template <class sobj> inline void gaussian(GridSerialRNG &rng,sobj &l) { rng.fill(l,rng._gaussian ); }
|
||||
//template <class sobj> inline void bernoulli(GridSerialRNG &rng,sobj &l){ rng.fill(l,rng._bernoulli); }
|
||||
template <class sobj> inline void bernoulli(GridSerialRNG &rng,sobj &l){ rng.fill(l,rng._bernoulli); }
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
#endif
|
||||
|
@ -855,7 +855,7 @@ void ExtractSliceLocal(Lattice<vobj> &lowDim,const Lattice<vobj> & higherDim,int
|
||||
|
||||
|
||||
template<class vobj>
|
||||
void Replicate(const Lattice<vobj> &coarse,Lattice<vobj> & fine)
|
||||
void Replicate(Lattice<vobj> &coarse,Lattice<vobj> & fine)
|
||||
{
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
|
||||
@ -1080,95 +1080,53 @@ vectorizeFromRevLexOrdArray( std::vector<sobj> &in, Lattice<vobj> &out)
|
||||
});
|
||||
}
|
||||
|
||||
//The workspace for a precision change operation allowing for the reuse of the mapping to save time on subsequent calls
|
||||
class precisionChangeWorkspace{
|
||||
std::pair<Integer,Integer>* fmap_device; //device pointer
|
||||
public:
|
||||
precisionChangeWorkspace(GridBase *out_grid, GridBase *in_grid){
|
||||
//Build a map between the sites and lanes of the output field and the input field as we cannot use the Grids on the device
|
||||
assert(out_grid->Nd() == in_grid->Nd());
|
||||
for(int d=0;d<out_grid->Nd();d++){
|
||||
assert(out_grid->FullDimensions()[d] == in_grid->FullDimensions()[d]);
|
||||
}
|
||||
int Nsimd_out = out_grid->Nsimd();
|
||||
|
||||
std::vector<Coordinate> out_icorrs(out_grid->Nsimd()); //reuse these
|
||||
for(int lane=0; lane < out_grid->Nsimd(); lane++)
|
||||
out_grid->iCoorFromIindex(out_icorrs[lane], lane);
|
||||
|
||||
std::vector<std::pair<Integer,Integer> > fmap_host(out_grid->lSites()); //lsites = osites*Nsimd
|
||||
thread_for(out_oidx,out_grid->oSites(),{
|
||||
Coordinate out_ocorr;
|
||||
out_grid->oCoorFromOindex(out_ocorr, out_oidx);
|
||||
|
||||
Coordinate lcorr; //the local coordinate (common to both in and out as full coordinate)
|
||||
for(int out_lane=0; out_lane < Nsimd_out; out_lane++){
|
||||
out_grid->InOutCoorToLocalCoor(out_ocorr, out_icorrs[out_lane], lcorr);
|
||||
|
||||
//int in_oidx = in_grid->oIndex(lcorr), in_lane = in_grid->iIndex(lcorr);
|
||||
//Note oIndex and OcorrFromOindex (and same for iIndex) are not inverse for checkerboarded lattice, the former coordinates being defined on the full lattice and the latter on the reduced lattice
|
||||
//Until this is fixed we need to circumvent the problem locally. Here I will use the coordinates defined on the reduced lattice for simplicity
|
||||
int in_oidx = 0, in_lane = 0;
|
||||
for(int d=0;d<in_grid->_ndimension;d++){
|
||||
in_oidx += in_grid->_ostride[d] * ( lcorr[d] % in_grid->_rdimensions[d] );
|
||||
in_lane += in_grid->_istride[d] * ( lcorr[d] / in_grid->_rdimensions[d] );
|
||||
}
|
||||
fmap_host[out_lane + Nsimd_out*out_oidx] = std::pair<Integer,Integer>( in_oidx, in_lane );
|
||||
}
|
||||
});
|
||||
|
||||
//Copy the map to the device (if we had a way to tell if an accelerator is in use we could avoid this copy for CPU-only machines)
|
||||
size_t fmap_bytes = out_grid->lSites() * sizeof(std::pair<Integer,Integer>);
|
||||
fmap_device = (std::pair<Integer,Integer>*)acceleratorAllocDevice(fmap_bytes);
|
||||
acceleratorCopyToDevice(fmap_host.data(), fmap_device, fmap_bytes);
|
||||
}
|
||||
|
||||
//Prevent moving or copying
|
||||
precisionChangeWorkspace(const precisionChangeWorkspace &r) = delete;
|
||||
precisionChangeWorkspace(precisionChangeWorkspace &&r) = delete;
|
||||
precisionChangeWorkspace &operator=(const precisionChangeWorkspace &r) = delete;
|
||||
precisionChangeWorkspace &operator=(precisionChangeWorkspace &&r) = delete;
|
||||
|
||||
std::pair<Integer,Integer> const* getMap() const{ return fmap_device; }
|
||||
|
||||
~precisionChangeWorkspace(){
|
||||
acceleratorFreeDevice(fmap_device);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
//Convert a lattice of one precision to another. The input workspace contains the mapping data.
|
||||
template<class VobjOut, class VobjIn>
|
||||
void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in, const precisionChangeWorkspace &workspace){
|
||||
static_assert( std::is_same<typename VobjOut::DoublePrecision, typename VobjIn::DoublePrecision>::value == 1, "copyLane: tensor types must be the same" ); //if tensor types are same the DoublePrecision type must be the same
|
||||
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
constexpr int Nsimd_out = VobjOut::Nsimd();
|
||||
|
||||
std::pair<Integer,Integer> const* fmap_device = workspace.getMap();
|
||||
|
||||
//Do the copy/precision change
|
||||
autoView( out_v , out, AcceleratorWrite);
|
||||
autoView( in_v , in, AcceleratorRead);
|
||||
|
||||
accelerator_for(out_oidx, out.Grid()->oSites(), 1,{
|
||||
std::pair<Integer,Integer> const* fmap_osite = fmap_device + out_oidx*Nsimd_out;
|
||||
for(int out_lane=0; out_lane < Nsimd_out; out_lane++){
|
||||
int in_oidx = fmap_osite[out_lane].first;
|
||||
int in_lane = fmap_osite[out_lane].second;
|
||||
copyLane(out_v[out_oidx], out_lane, in_v[in_oidx], in_lane);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
//Convert a Lattice from one precision to another
|
||||
//Generate the workspace in place; if multiple calls with the same mapping are performed, consider pregenerating the workspace and reusing
|
||||
template<class VobjOut, class VobjIn>
|
||||
void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
|
||||
precisionChangeWorkspace workspace(out.Grid(), in.Grid());
|
||||
precisionChange(out, in, workspace);
|
||||
}
|
||||
void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in)
|
||||
{
|
||||
assert(out.Grid()->Nd() == in.Grid()->Nd());
|
||||
for(int d=0;d<out.Grid()->Nd();d++){
|
||||
assert(out.Grid()->FullDimensions()[d] == in.Grid()->FullDimensions()[d]);
|
||||
}
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
GridBase *in_grid=in.Grid();
|
||||
GridBase *out_grid = out.Grid();
|
||||
|
||||
typedef typename VobjOut::scalar_object SobjOut;
|
||||
typedef typename VobjIn::scalar_object SobjIn;
|
||||
|
||||
int ndim = out.Grid()->Nd();
|
||||
int out_nsimd = out_grid->Nsimd();
|
||||
|
||||
std::vector<Coordinate > out_icoor(out_nsimd);
|
||||
|
||||
for(int lane=0; lane < out_nsimd; lane++){
|
||||
out_icoor[lane].resize(ndim);
|
||||
out_grid->iCoorFromIindex(out_icoor[lane], lane);
|
||||
}
|
||||
|
||||
std::vector<SobjOut> in_slex_conv(in_grid->lSites());
|
||||
unvectorizeToLexOrdArray(in_slex_conv, in);
|
||||
|
||||
autoView( out_v , out, CpuWrite);
|
||||
thread_for(out_oidx,out_grid->oSites(),{
|
||||
Coordinate out_ocoor(ndim);
|
||||
out_grid->oCoorFromOindex(out_ocoor, out_oidx);
|
||||
|
||||
ExtractPointerArray<SobjOut> ptrs(out_nsimd);
|
||||
|
||||
Coordinate lcoor(out_grid->Nd());
|
||||
|
||||
for(int lane=0; lane < out_nsimd; lane++){
|
||||
for(int mu=0;mu<ndim;mu++)
|
||||
lcoor[mu] = out_ocoor[mu] + out_grid->_rdimensions[mu]*out_icoor[lane][mu];
|
||||
|
||||
int llex; Lexicographic::IndexFromCoor(lcoor, llex, out_grid->_ldimensions);
|
||||
ptrs[lane] = &in_slex_conv[llex];
|
||||
}
|
||||
merge(out_v[out_oidx], ptrs, 0);
|
||||
});
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Communicate between grids
|
||||
|
@ -40,6 +40,29 @@ class Action
|
||||
|
||||
public:
|
||||
bool is_smeared = false;
|
||||
RealD deriv_norm_sum;
|
||||
RealD deriv_max_sum;
|
||||
int deriv_num;
|
||||
RealD deriv_us;
|
||||
RealD S_us;
|
||||
RealD refresh_us;
|
||||
void reset_timer(void) {
|
||||
deriv_us = S_us = refresh_us = 0.0;
|
||||
deriv_num=0;
|
||||
deriv_norm_sum = deriv_max_sum=0.0;
|
||||
}
|
||||
void deriv_log(RealD nrm, RealD max) { deriv_max_sum+=max; deriv_norm_sum+=nrm; deriv_num++;}
|
||||
RealD deriv_max_average(void) { return deriv_max_sum/deriv_num; };
|
||||
RealD deriv_norm_average(void) { return deriv_norm_sum/deriv_num; };
|
||||
RealD deriv_timer(void) { return deriv_us; };
|
||||
RealD S_timer(void) { return deriv_us; };
|
||||
RealD refresh_timer(void) { return deriv_us; };
|
||||
void deriv_timer_start(void) { deriv_us-=usecond(); }
|
||||
void deriv_timer_stop(void) { deriv_us+=usecond(); }
|
||||
void refresh_timer_start(void) { refresh_us-=usecond(); }
|
||||
void refresh_timer_stop(void) { refresh_us+=usecond(); }
|
||||
void S_timer_start(void) { S_us-=usecond(); }
|
||||
void S_timer_stop(void) { S_us+=usecond(); }
|
||||
// Heatbath?
|
||||
virtual void refresh(const GaugeField& U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) = 0; // refresh pseudofermions
|
||||
virtual RealD S(const GaugeField& U) = 0; // evaluate the action
|
||||
|
@ -37,6 +37,10 @@ NAMESPACE_CHECK(ActionSet);
|
||||
#include <Grid/qcd/action/ActionParams.h>
|
||||
NAMESPACE_CHECK(ActionParams);
|
||||
|
||||
#include <Grid/qcd/action/filters/MomentumFilter.h>
|
||||
#include <Grid/qcd/action/filters/DirichletFilter.h>
|
||||
#include <Grid/qcd/action/filters/DDHMCFilter.h>
|
||||
|
||||
////////////////////////////////////////////
|
||||
// Gauge Actions
|
||||
////////////////////////////////////////////
|
||||
|
@ -36,8 +36,7 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
// These can move into a params header and be given MacroMagic serialisation
|
||||
struct GparityWilsonImplParams {
|
||||
Coordinate twists; //Here the first Nd-1 directions are treated as "spatial", and a twist value of 1 indicates G-parity BCs in that direction.
|
||||
//mu=Nd-1 is assumed to be the time direction and a twist value of 1 indicates antiperiodic BCs
|
||||
Coordinate twists;
|
||||
GparityWilsonImplParams() : twists(Nd, 0) {};
|
||||
};
|
||||
|
||||
@ -64,10 +63,10 @@ struct StaggeredImplParams {
|
||||
RealD, hi,
|
||||
int, MaxIter,
|
||||
RealD, tolerance,
|
||||
RealD, mdtolerance,
|
||||
int, degree,
|
||||
int, precision,
|
||||
int, BoundsCheckFreq,
|
||||
RealD, BoundsCheckTol);
|
||||
int, BoundsCheckFreq);
|
||||
|
||||
// MaxIter and tolerance, vectors??
|
||||
|
||||
@ -79,60 +78,16 @@ struct StaggeredImplParams {
|
||||
int _degree = 10,
|
||||
int _precision = 64,
|
||||
int _BoundsCheckFreq=20,
|
||||
double _BoundsCheckTol=1e-6)
|
||||
RealD mdtol = 1.0e-6)
|
||||
: lo(_lo),
|
||||
hi(_hi),
|
||||
MaxIter(_maxit),
|
||||
tolerance(tol),
|
||||
mdtolerance(mdtol),
|
||||
degree(_degree),
|
||||
precision(_precision),
|
||||
BoundsCheckFreq(_BoundsCheckFreq),
|
||||
BoundsCheckTol(_BoundsCheckTol){};
|
||||
BoundsCheckFreq(_BoundsCheckFreq){};
|
||||
};
|
||||
|
||||
|
||||
/*Action parameters for the generalized rational action
|
||||
The approximation is for (M^dag M)^{1/inv_pow}
|
||||
where inv_pow is the denominator of the fractional power.
|
||||
Default inv_pow=2 for square root, making this equivalent to
|
||||
the OneFlavourRational action
|
||||
*/
|
||||
struct RationalActionParams : Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(RationalActionParams,
|
||||
int, inv_pow,
|
||||
RealD, lo, //low eigenvalue bound of rational approx
|
||||
RealD, hi, //high eigenvalue bound of rational approx
|
||||
int, MaxIter, //maximum iterations in msCG
|
||||
RealD, action_tolerance, //msCG tolerance in action evaluation
|
||||
int, action_degree, //rational approx tolerance in action evaluation
|
||||
RealD, md_tolerance, //msCG tolerance in MD integration
|
||||
int, md_degree, //rational approx tolerance in MD integration
|
||||
int, precision, //precision of floating point arithmetic
|
||||
int, BoundsCheckFreq); //frequency the approximation is tested (with Metropolis degree/tolerance); 0 disables the check
|
||||
// constructor
|
||||
RationalActionParams(int _inv_pow = 2,
|
||||
RealD _lo = 0.0,
|
||||
RealD _hi = 1.0,
|
||||
int _maxit = 1000,
|
||||
RealD _action_tolerance = 1.0e-8,
|
||||
int _action_degree = 10,
|
||||
RealD _md_tolerance = 1.0e-8,
|
||||
int _md_degree = 10,
|
||||
int _precision = 64,
|
||||
int _BoundsCheckFreq=20)
|
||||
: inv_pow(_inv_pow),
|
||||
lo(_lo),
|
||||
hi(_hi),
|
||||
MaxIter(_maxit),
|
||||
action_tolerance(_action_tolerance),
|
||||
action_degree(_action_degree),
|
||||
md_tolerance(_md_tolerance),
|
||||
md_degree(_md_degree),
|
||||
precision(_precision),
|
||||
BoundsCheckFreq(_BoundsCheckFreq){};
|
||||
};
|
||||
|
||||
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -68,7 +68,7 @@ public:
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Support for MADWF tricks
|
||||
///////////////////////////////////////////////////////////////
|
||||
RealD Mass(void) { return mass; };
|
||||
virtual RealD Mass(void) { return mass; };
|
||||
void SetMass(RealD _mass) {
|
||||
mass=_mass;
|
||||
SetCoefficientsInternal(_zolo_hi,_gamma,_b,_c); // Reset coeffs
|
||||
|
@ -49,6 +49,8 @@ public:
|
||||
|
||||
virtual FermionField &tmp(void) = 0;
|
||||
|
||||
virtual void DirichletBlock(Coordinate & _Block) { assert(0); };
|
||||
|
||||
GridBase * Grid(void) { return FermionGrid(); }; // this is all the linalg routines need to know
|
||||
GridBase * RedBlackGrid(void) { return FermionRedBlackGrid(); };
|
||||
|
||||
|
@ -75,6 +75,10 @@ public:
|
||||
FermionField _tmp;
|
||||
FermionField &tmp(void) { return _tmp; }
|
||||
|
||||
int Dirichlet;
|
||||
Coordinate Block;
|
||||
|
||||
/********** Deprecate timers **********/
|
||||
void Report(void);
|
||||
void ZeroCounters(void);
|
||||
double DhopCalls;
|
||||
@ -173,7 +177,18 @@ public:
|
||||
GridCartesian &FourDimGrid,
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
double _M5,const ImplParams &p= ImplParams());
|
||||
|
||||
|
||||
virtual void DirichletBlock(Coordinate & block)
|
||||
{
|
||||
assert(block.size()==Nd+1);
|
||||
if ( block[0] || block[1] || block[2] || block[3] || block[4] ){
|
||||
Dirichlet = 1;
|
||||
Block = block;
|
||||
Stencil.DirichletBlock(block);
|
||||
StencilEven.DirichletBlock(block);
|
||||
StencilOdd.DirichletBlock(block);
|
||||
}
|
||||
}
|
||||
// Constructors
|
||||
/*
|
||||
WilsonFermion5D(int simd,
|
||||
|
@ -60,7 +60,8 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
|
||||
UmuOdd (_FourDimRedBlackGrid),
|
||||
Lebesgue(_FourDimGrid),
|
||||
LebesgueEvenOdd(_FourDimRedBlackGrid),
|
||||
_tmp(&FiveDimRedBlackGrid)
|
||||
_tmp(&FiveDimRedBlackGrid),
|
||||
Dirichlet(0)
|
||||
{
|
||||
// some assertions
|
||||
assert(FiveDimGrid._ndimension==5);
|
||||
@ -218,6 +219,14 @@ void WilsonFermion5D<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
{
|
||||
GaugeField HUmu(_Umu.Grid());
|
||||
HUmu = _Umu*(-0.5);
|
||||
if ( Dirichlet ) {
|
||||
std::cout << GridLogMessage << " Dirichlet BCs 5d " <<Block<<std::endl;
|
||||
Coordinate GaugeBlock(Nd);
|
||||
for(int d=0;d<Nd;d++) GaugeBlock[d] = Block[d+1];
|
||||
std::cout << GridLogMessage << " Dirichlet BCs 4d " <<GaugeBlock<<std::endl;
|
||||
DirichletFilter<GaugeField> Filter(GaugeBlock);
|
||||
Filter.applyFilter(HUmu);
|
||||
}
|
||||
Impl::DoubleStore(GaugeGrid(),Umu,HUmu);
|
||||
pickCheckerboard(Even,UmuEven,Umu);
|
||||
pickCheckerboard(Odd ,UmuOdd,Umu);
|
||||
|
102
Grid/qcd/action/filters/DDHMCFilter.h
Normal file
102
Grid/qcd/action/filters/DDHMCFilter.h
Normal file
@ -0,0 +1,102 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/hmc/integrators/DirichletFilter.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
//--------------------------------------------------------------------
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
////////////////////////////////////////////////////
|
||||
// DDHMC filter with sub-block size B[mu]
|
||||
////////////////////////////////////////////////////
|
||||
|
||||
template<typename GaugeField>
|
||||
struct DDHMCFilter: public MomentumFilterBase<GaugeField>
|
||||
{
|
||||
Coordinate Block;
|
||||
int Width;
|
||||
|
||||
DDHMCFilter(const Coordinate &_Block,int _Width=2): Block(_Block) { Width=_Width; }
|
||||
|
||||
void applyFilter(GaugeField &U) const override
|
||||
{
|
||||
GridBase *grid = U.Grid();
|
||||
Coordinate Global=grid->GlobalDimensions();
|
||||
GaugeField zzz(grid); zzz = Zero();
|
||||
LatticeInteger coor(grid);
|
||||
|
||||
auto zzz_mu = PeekIndex<LorentzIndex>(zzz,0);
|
||||
////////////////////////////////////////////////////
|
||||
// Zero BDY layers
|
||||
////////////////////////////////////////////////////
|
||||
std::cout<<GridLogMessage<<" DDHMC Force Filter Block "<<Block<<" width " <<Width<<std::endl;
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
|
||||
Integer B1 = Block[mu];
|
||||
if ( B1 && (B1 <= Global[mu]) ) {
|
||||
LatticeCoordinate(coor,mu);
|
||||
|
||||
////////////////////////////////
|
||||
// OmegaBar - zero all links contained in slice B-1,0 and
|
||||
// mu links connecting to Omega
|
||||
////////////////////////////////
|
||||
if ( Width==1) {
|
||||
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||
U_mu = where(mod(coor,B1)==Integer(B1-2),zzz_mu,U_mu);
|
||||
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||
}
|
||||
if ( Width==2) {
|
||||
U = where(mod(coor,B1)==Integer(B1-2),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(1) ,zzz,U);
|
||||
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||
U_mu = where(mod(coor,B1)==Integer(B1-3),zzz_mu,U_mu);
|
||||
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||
}
|
||||
if ( Width==3) {
|
||||
U = where(mod(coor,B1)==Integer(B1-3),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(B1-2),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(1) ,zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(2) ,zzz,U);
|
||||
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||
U_mu = where(mod(coor,B1)==Integer(B1-4),zzz_mu,U_mu);
|
||||
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
71
Grid/qcd/action/filters/DirichletFilter.h
Normal file
71
Grid/qcd/action/filters/DirichletFilter.h
Normal file
@ -0,0 +1,71 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/hmc/integrators/DirichletFilter.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
//--------------------------------------------------------------------
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<typename MomentaField>
|
||||
struct DirichletFilter: public MomentumFilterBase<MomentaField>
|
||||
{
|
||||
typedef typename MomentaField::vector_type vector_type; //SIMD-vectorized complex type
|
||||
typedef typename MomentaField::scalar_type scalar_type; //scalar complex type
|
||||
|
||||
typedef iScalar<iScalar<iScalar<vector_type> > > ScalarType; //complex phase for each site
|
||||
|
||||
Coordinate Block;
|
||||
|
||||
DirichletFilter(const Coordinate &_Block): Block(_Block){}
|
||||
|
||||
void applyFilter(MomentaField &P) const override
|
||||
{
|
||||
GridBase *grid = P.Grid();
|
||||
typedef decltype(PeekIndex<LorentzIndex>(P, 0)) LatCM;
|
||||
////////////////////////////////////////////////////
|
||||
// Zero strictly links crossing between domains
|
||||
////////////////////////////////////////////////////
|
||||
LatticeInteger coor(grid);
|
||||
LatCM zz(grid); zz = Zero();
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
if ( (Block[mu]) && (Block[mu] < grid->GlobalDimensions()[mu] ) ) {
|
||||
// If costly could provide Grid earlier and precompute masks
|
||||
std::cout << " Dirichlet in mu="<<mu<<std::endl;
|
||||
LatticeCoordinate(coor,mu);
|
||||
auto P_mu = PeekIndex<LorentzIndex>(P, mu);
|
||||
P_mu = where(mod(coor,Block[mu])==Integer(Block[mu]-1),zz,P_mu);
|
||||
PokeIndex<LorentzIndex>(P, P_mu, mu);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -37,7 +37,7 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<typename MomentaField>
|
||||
struct MomentumFilterBase{
|
||||
virtual void applyFilter(MomentaField &P) const;
|
||||
virtual void applyFilter(MomentaField &P) const = 0;
|
||||
};
|
||||
|
||||
//Do nothing
|
@ -69,11 +69,6 @@ public:
|
||||
return PeriodicBC::ShiftStaple(Link,mu);
|
||||
}
|
||||
|
||||
//Same as Cshift for periodic BCs
|
||||
static inline GaugeLinkField CshiftLink(const GaugeLinkField &Link, int mu, int shift){
|
||||
return PeriodicBC::CshiftLink(Link,mu,shift);
|
||||
}
|
||||
|
||||
static inline bool isPeriodicGaugeField(void) { return true; }
|
||||
};
|
||||
|
||||
@ -115,11 +110,6 @@ public:
|
||||
return PeriodicBC::CovShiftBackward(Link, mu, field);
|
||||
}
|
||||
|
||||
//If mu is a conjugate BC direction
|
||||
//Out(x) = U^dag_\mu(x-mu) | x_\mu != 0
|
||||
// = U^T_\mu(L-1) | x_\mu == 0
|
||||
//else
|
||||
//Out(x) = U^dag_\mu(x-mu mod L)
|
||||
static inline GaugeLinkField
|
||||
CovShiftIdentityBackward(const GaugeLinkField &Link, int mu)
|
||||
{
|
||||
@ -139,13 +129,6 @@ public:
|
||||
return PeriodicBC::CovShiftIdentityForward(Link,mu);
|
||||
}
|
||||
|
||||
|
||||
//If mu is a conjugate BC direction
|
||||
//Out(x) = S_\mu(x+mu) | x_\mu != L-1
|
||||
// = S*_\mu(x+mu) | x_\mu == L-1
|
||||
//else
|
||||
//Out(x) = S_\mu(x+mu mod L)
|
||||
//Note: While this is used for Staples it is also applicable for shifting gauge links or gauge transformation matrices
|
||||
static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu)
|
||||
{
|
||||
assert(_conjDirs.size() == Nd);
|
||||
@ -155,27 +138,6 @@ public:
|
||||
return PeriodicBC::ShiftStaple(Link,mu);
|
||||
}
|
||||
|
||||
//Boundary-aware C-shift of gauge links / gauge transformation matrices
|
||||
//For conjugate BC direction
|
||||
//shift = 1
|
||||
//Out(x) = U_\mu(x+\hat\mu) | x_\mu != L-1
|
||||
// = U*_\mu(0) | x_\mu == L-1
|
||||
//shift = -1
|
||||
//Out(x) = U_\mu(x-mu) | x_\mu != 0
|
||||
// = U*_\mu(L-1) | x_\mu == 0
|
||||
//else
|
||||
//shift = 1
|
||||
//Out(x) = U_\mu(x+\hat\mu mod L)
|
||||
//shift = -1
|
||||
//Out(x) = U_\mu(x-\hat\mu mod L)
|
||||
static inline GaugeLinkField CshiftLink(const GaugeLinkField &Link, int mu, int shift){
|
||||
assert(_conjDirs.size() == Nd);
|
||||
if(_conjDirs[mu])
|
||||
return ConjugateBC::CshiftLink(Link,mu,shift);
|
||||
else
|
||||
return PeriodicBC::CshiftLink(Link,mu,shift);
|
||||
}
|
||||
|
||||
static inline void setDirections(std::vector<int> &conjDirs) { _conjDirs=conjDirs; }
|
||||
static inline std::vector<int> getDirections(void) { return _conjDirs; }
|
||||
static inline bool isPeriodicGaugeField(void) { return false; }
|
||||
|
@ -13,6 +13,31 @@ NAMESPACE_BEGIN(Grid);
|
||||
std::cout << GridLogMessage << "Pseudofermion action lamda_max "<<lambda_max<<"( bound "<<hi<<")"<<std::endl;
|
||||
assert( (lambda_max < hi) && " High Bounds Check on operator failed" );
|
||||
}
|
||||
|
||||
template<class Field> void ChebyBoundsCheck(LinearOperatorBase<Field> &HermOp,
|
||||
Field &GaussNoise,
|
||||
RealD lo,RealD hi)
|
||||
{
|
||||
int orderfilter = 1000;
|
||||
Chebyshev<Field> Cheb(lo,hi,orderfilter);
|
||||
|
||||
GridBase *FermionGrid = GaussNoise.Grid();
|
||||
|
||||
Field X(FermionGrid);
|
||||
Field Z(FermionGrid);
|
||||
|
||||
X=GaussNoise;
|
||||
RealD Nx = norm2(X);
|
||||
Cheb(HermOp,X,Z);
|
||||
RealD Nz = norm2(Z);
|
||||
|
||||
std::cout << "************************* "<<std::endl;
|
||||
std::cout << " noise = "<<Nx<<std::endl;
|
||||
std::cout << " Cheb x noise = "<<Nz<<std::endl;
|
||||
std::cout << " Ratio = "<<Nz/Nx<<std::endl;
|
||||
std::cout << "************************* "<<std::endl;
|
||||
assert( ((Nz/Nx)<1.0) && " ChebyBoundsCheck ");
|
||||
}
|
||||
|
||||
template<class Field> void InverseSqrtBoundsCheck(int MaxIter,double tol,
|
||||
LinearOperatorBase<Field> &HermOp,
|
||||
@ -40,66 +65,13 @@ NAMESPACE_BEGIN(Grid);
|
||||
X=X-Y;
|
||||
RealD Nd = norm2(X);
|
||||
std::cout << "************************* "<<std::endl;
|
||||
std::cout << " | noise |^2 = "<<Nx<<std::endl;
|
||||
std::cout << " | (MdagM^-1/2)^2 noise |^2 = "<<Nz<<std::endl;
|
||||
std::cout << " | MdagM (MdagM^-1/2)^2 noise |^2 = "<<Ny<<std::endl;
|
||||
std::cout << " | noise - MdagM (MdagM^-1/2)^2 noise |^2 = "<<Nd<<std::endl;
|
||||
std::cout << " | noise - MdagM (MdagM^-1/2)^2 noise|/|noise| = " << std::sqrt(Nd/Nx) << std::endl;
|
||||
std::cout << " noise = "<<Nx<<std::endl;
|
||||
std::cout << " (MdagM^-1/2)^2 noise = "<<Nz<<std::endl;
|
||||
std::cout << " MdagM (MdagM^-1/2)^2 noise = "<<Ny<<std::endl;
|
||||
std::cout << " noise - MdagM (MdagM^-1/2)^2 noise = "<<Nd<<std::endl;
|
||||
std::cout << "************************* "<<std::endl;
|
||||
assert( (std::sqrt(Nd/Nx)<tol) && " InverseSqrtBoundsCheck ");
|
||||
}
|
||||
|
||||
/* For a HermOp = M^dag M, check the approximation of HermOp^{-1/inv_pow}
|
||||
by computing |X - HermOp * [ Hermop^{-1/inv_pow} ]^{inv_pow} X| < tol
|
||||
for noise X (aka GaussNoise).
|
||||
ApproxNegPow should be the rational approximation for X^{-1/inv_pow}
|
||||
*/
|
||||
template<class Field> void InversePowerBoundsCheck(int inv_pow,
|
||||
int MaxIter,double tol,
|
||||
LinearOperatorBase<Field> &HermOp,
|
||||
Field &GaussNoise,
|
||||
MultiShiftFunction &ApproxNegPow)
|
||||
{
|
||||
GridBase *FermionGrid = GaussNoise.Grid();
|
||||
|
||||
Field X(FermionGrid);
|
||||
Field Y(FermionGrid);
|
||||
Field Z(FermionGrid);
|
||||
|
||||
Field tmp1(FermionGrid), tmp2(FermionGrid);
|
||||
|
||||
X=GaussNoise;
|
||||
RealD Nx = norm2(X);
|
||||
|
||||
ConjugateGradientMultiShift<Field> msCG(MaxIter,ApproxNegPow);
|
||||
|
||||
tmp1 = X;
|
||||
|
||||
Field* in = &tmp1;
|
||||
Field* out = &tmp2;
|
||||
for(int i=0;i<inv_pow;i++){ //apply [ Hermop^{-1/inv_pow} ]^{inv_pow} X = HermOp^{-1} X
|
||||
msCG(HermOp, *in, *out); //backwards conventions!
|
||||
if(i!=inv_pow-1) std::swap(in, out);
|
||||
}
|
||||
Z = *out;
|
||||
|
||||
RealD Nz = norm2(Z);
|
||||
|
||||
HermOp.HermOp(Z,Y);
|
||||
RealD Ny = norm2(Y);
|
||||
|
||||
X=X-Y;
|
||||
RealD Nd = norm2(X);
|
||||
std::cout << "************************* "<<std::endl;
|
||||
std::cout << " | noise |^2 = "<<Nx<<std::endl;
|
||||
std::cout << " | (MdagM^-1/" << inv_pow << ")^" << inv_pow << " noise |^2 = "<<Nz<<std::endl;
|
||||
std::cout << " | MdagM (MdagM^-1/" << inv_pow << ")^" << inv_pow << " noise |^2 = "<<Ny<<std::endl;
|
||||
std::cout << " | noise - MdagM (MdagM^-1/" << inv_pow << ")^" << inv_pow << " noise |^2 = "<<Nd<<std::endl;
|
||||
std::cout << " | noise - MdagM (MdagM^-1/" << inv_pow << ")^" << inv_pow << " noise |/| noise | = "<<std::sqrt(Nd/Nx)<<std::endl;
|
||||
std::cout << "************************* "<<std::endl;
|
||||
assert( (std::sqrt(Nd/Nx)<tol) && " InversePowerBoundsCheck ");
|
||||
}
|
||||
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -0,0 +1,163 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/pseudofermion/DomainDecomposedTwoFlavourBoundaryBoson.h
|
||||
|
||||
Copyright (C) 2021
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
///////////////////////////////////////
|
||||
// Two flavour ratio
|
||||
///////////////////////////////////////
|
||||
template<class ImplD,class ImplF>
|
||||
class DomainDecomposedBoundaryTwoFlavourBosonPseudoFermion : public Action<typename ImplD::GaugeField> {
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(ImplD);
|
||||
|
||||
private:
|
||||
SchurFactoredFermionOperator<ImplD,ImplF> & NumOp;// the basic operator
|
||||
RealD InnerStoppingCondition;
|
||||
RealD ActionStoppingCondition;
|
||||
RealD DerivativeStoppingCondition;
|
||||
FermionField Phi; // the pseudo fermion field for this trajectory
|
||||
public:
|
||||
DomainDecomposedBoundaryTwoFlavourBosonPseudoFermion(SchurFactoredFermionOperator<ImplD,ImplF> &_NumOp,RealD _DerivativeTol, RealD _ActionTol, RealD _InnerTol=1.0e-6)
|
||||
: NumOp(_NumOp),
|
||||
DerivativeStoppingCondition(_DerivativeTol),
|
||||
ActionStoppingCondition(_ActionTol),
|
||||
InnerStoppingCondition(_InnerTol),
|
||||
Phi(_NumOp.FermionGrid()) {};
|
||||
|
||||
virtual std::string action_name(){return "DomainDecomposedBoundaryTwoFlavourBosonPseudoFermion";}
|
||||
|
||||
virtual std::string LogParameters(){
|
||||
std::stringstream sstream;
|
||||
return sstream.str();
|
||||
}
|
||||
|
||||
virtual void refresh(const GaugeField &U, GridSerialRNG& sRNG, GridParallelRNG& pRNG)
|
||||
{
|
||||
// P(phi) = e^{- phi^dag P^dag P phi}
|
||||
//
|
||||
// NumOp == P
|
||||
//
|
||||
// Take phi = P^{-1} eta ; eta = P Phi
|
||||
//
|
||||
// P(eta) = e^{- eta^dag eta}
|
||||
//
|
||||
// e^{x^2/2 sig^2} => sig^2 = 0.5.
|
||||
//
|
||||
// So eta should be of width sig = 1/sqrt(2) and must multiply by 0.707....
|
||||
//
|
||||
RealD scale = std::sqrt(0.5);
|
||||
|
||||
NumOp.tolinner=InnerStoppingCondition;
|
||||
NumOp.tol=ActionStoppingCondition;
|
||||
NumOp.ImportGauge(U);
|
||||
|
||||
FermionField eta(NumOp.FermionGrid());
|
||||
|
||||
gaussian(pRNG,eta); eta=eta*scale;
|
||||
|
||||
NumOp.ProjectBoundaryBar(eta);
|
||||
//DumpSliceNorm("eta",eta);
|
||||
NumOp.RInv(eta,Phi);
|
||||
|
||||
//DumpSliceNorm("Phi",Phi);
|
||||
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// S = phi^dag Pdag P phi
|
||||
//////////////////////////////////////////////////////
|
||||
virtual RealD S(const GaugeField &U) {
|
||||
|
||||
NumOp.tolinner=InnerStoppingCondition;
|
||||
NumOp.tol=ActionStoppingCondition;
|
||||
NumOp.ImportGauge(U);
|
||||
|
||||
FermionField Y(NumOp.FermionGrid());
|
||||
|
||||
NumOp.R(Phi,Y);
|
||||
|
||||
RealD action = norm2(Y);
|
||||
|
||||
return action;
|
||||
};
|
||||
|
||||
virtual void deriv(const GaugeField &U,GaugeField & dSdU)
|
||||
{
|
||||
NumOp.tolinner=InnerStoppingCondition;
|
||||
NumOp.tol=DerivativeStoppingCondition;
|
||||
NumOp.ImportGauge(U);
|
||||
|
||||
GridBase *fgrid = NumOp.FermionGrid();
|
||||
GridBase *ugrid = NumOp.GaugeGrid();
|
||||
|
||||
FermionField X(fgrid);
|
||||
FermionField Y(fgrid);
|
||||
FermionField tmp(fgrid);
|
||||
|
||||
GaugeField force(ugrid);
|
||||
|
||||
FermionField DobiDdbPhi(fgrid); // Vector A in my notes
|
||||
FermionField DoiDdDobiDdbPhi(fgrid); // Vector B in my notes
|
||||
FermionField DoidP_Phi(fgrid); // Vector E in my notes
|
||||
FermionField DobidDddDoidP_Phi(fgrid); // Vector F in my notes
|
||||
|
||||
FermionField P_Phi(fgrid);
|
||||
|
||||
// P term
|
||||
NumOp.dBoundaryBar(Phi,tmp);
|
||||
NumOp.dOmegaBarInv(tmp,DobiDdbPhi); // Vector A
|
||||
NumOp.dBoundary(DobiDdbPhi,tmp);
|
||||
NumOp.dOmegaInv(tmp,DoiDdDobiDdbPhi); // Vector B
|
||||
P_Phi = Phi - DoiDdDobiDdbPhi;
|
||||
NumOp.ProjectBoundaryBar(P_Phi);
|
||||
|
||||
// P^dag P term
|
||||
NumOp.dOmegaDagInv(P_Phi,DoidP_Phi); // Vector E
|
||||
NumOp.dBoundaryDag(DoidP_Phi,tmp);
|
||||
NumOp.dOmegaBarDagInv(tmp,DobidDddDoidP_Phi); // Vector F
|
||||
NumOp.dBoundaryBarDag(DobidDddDoidP_Phi,tmp);
|
||||
|
||||
X = DobiDdbPhi;
|
||||
Y = DobidDddDoidP_Phi;
|
||||
NumOp.DirichletFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=force;
|
||||
NumOp.DirichletFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
|
||||
|
||||
X = DoiDdDobiDdbPhi;
|
||||
Y = DoidP_Phi;
|
||||
NumOp.DirichletFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=dSdU+force;
|
||||
NumOp.DirichletFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
|
||||
|
||||
dSdU *= -1.0;
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -0,0 +1,158 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/pseudofermion/DomainDecomposedTwoFlavourBoundary.h
|
||||
|
||||
Copyright (C) 2021
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
///////////////////////////////////////
|
||||
// Two flavour ratio
|
||||
///////////////////////////////////////
|
||||
template<class ImplD,class ImplF>
|
||||
class DomainDecomposedBoundaryTwoFlavourPseudoFermion : public Action<typename ImplD::GaugeField> {
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(ImplD);
|
||||
|
||||
private:
|
||||
SchurFactoredFermionOperator<ImplD,ImplF> & DenOp;// the basic operator
|
||||
RealD ActionStoppingCondition;
|
||||
RealD DerivativeStoppingCondition;
|
||||
RealD InnerStoppingCondition;
|
||||
|
||||
FermionField Phi; // the pseudo fermion field for this trajectory
|
||||
|
||||
RealD refresh_action;
|
||||
public:
|
||||
DomainDecomposedBoundaryTwoFlavourPseudoFermion(SchurFactoredFermionOperator<ImplD,ImplF> &_DenOp,RealD _DerivativeTol, RealD _ActionTol, RealD _InnerTol = 1.0e-6 )
|
||||
: DenOp(_DenOp),
|
||||
DerivativeStoppingCondition(_DerivativeTol),
|
||||
ActionStoppingCondition(_ActionTol),
|
||||
InnerStoppingCondition(_InnerTol),
|
||||
Phi(_DenOp.FermionGrid()) {};
|
||||
|
||||
virtual std::string action_name(){return "DomainDecomposedBoundaryTwoFlavourPseudoFermion";}
|
||||
|
||||
|
||||
virtual std::string LogParameters(){
|
||||
std::stringstream sstream;
|
||||
return sstream.str();
|
||||
}
|
||||
|
||||
virtual void refresh(const GaugeField &U, GridSerialRNG& sRNG, GridParallelRNG& pRNG)
|
||||
{
|
||||
// P(phi) = e^{- phi^dag Rdag^-1 R^-1 phi}
|
||||
//
|
||||
// DenOp == R
|
||||
//
|
||||
// Take phi = R eta ; eta = R^-1 Phi
|
||||
//
|
||||
// P(eta) = e^{- eta^dag eta}
|
||||
//
|
||||
// e^{x^2/2 sig^2} => sig^2 = 0.5.
|
||||
//
|
||||
// So eta should be of width sig = 1/sqrt(2) and must multiply by 0.707....
|
||||
//
|
||||
RealD scale = std::sqrt(0.5);
|
||||
|
||||
DenOp.tolinner=InnerStoppingCondition;
|
||||
DenOp.tol =ActionStoppingCondition;
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
FermionField eta(DenOp.FermionGrid());
|
||||
|
||||
gaussian(pRNG,eta); eta=eta*scale;
|
||||
|
||||
DenOp.ProjectBoundaryBar(eta);
|
||||
DenOp.R(eta,Phi);
|
||||
//DumpSliceNorm("Phi",Phi);
|
||||
refresh_action = norm2(eta);
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// S = phi^dag Rdag^-1 R^-1 phi
|
||||
//////////////////////////////////////////////////////
|
||||
virtual RealD S(const GaugeField &U) {
|
||||
|
||||
DenOp.tolinner=InnerStoppingCondition;
|
||||
DenOp.tol=ActionStoppingCondition;
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
FermionField X(DenOp.FermionGrid());
|
||||
|
||||
DenOp.RInv(Phi,X);
|
||||
|
||||
RealD action = norm2(X);
|
||||
|
||||
return action;
|
||||
};
|
||||
|
||||
virtual void deriv(const GaugeField &U,GaugeField & dSdU)
|
||||
{
|
||||
DenOp.tolinner=InnerStoppingCondition;
|
||||
DenOp.tol=DerivativeStoppingCondition;
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
GridBase *fgrid = DenOp.FermionGrid();
|
||||
GridBase *ugrid = DenOp.GaugeGrid();
|
||||
|
||||
FermionField X(fgrid);
|
||||
FermionField Y(fgrid);
|
||||
FermionField tmp(fgrid);
|
||||
|
||||
GaugeField force(ugrid);
|
||||
|
||||
FermionField DiDdb_Phi(fgrid); // Vector C in my notes
|
||||
FermionField DidRinv_Phi(fgrid); // Vector D in my notes
|
||||
FermionField Rinv_Phi(fgrid);
|
||||
|
||||
// FermionField RinvDagRinv_Phi(fgrid);
|
||||
// FermionField DdbdDidRinv_Phi(fgrid);
|
||||
|
||||
// R^-1 term
|
||||
DenOp.dBoundaryBar(Phi,tmp);
|
||||
DenOp.Dinverse(tmp,DiDdb_Phi); // Vector C
|
||||
Rinv_Phi = Phi - DiDdb_Phi;
|
||||
DenOp.ProjectBoundaryBar(Rinv_Phi);
|
||||
|
||||
// R^-dagger R^-1 term
|
||||
DenOp.DinverseDag(Rinv_Phi,DidRinv_Phi); // Vector D
|
||||
/*
|
||||
DenOp.dBoundaryBarDag(DidRinv_Phi,DdbdDidRinv_Phi);
|
||||
RinvDagRinv_Phi = Rinv_Phi - DdbdDidRinv_Phi;
|
||||
DenOp.ProjectBoundaryBar(RinvDagRinv_Phi);
|
||||
*/
|
||||
X = DiDdb_Phi;
|
||||
Y = DidRinv_Phi;
|
||||
DenOp.PeriodicFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=force;
|
||||
DenOp.PeriodicFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
|
||||
DumpSliceNorm("force",dSdU);
|
||||
dSdU *= -1.0;
|
||||
};
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -0,0 +1,237 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/pseudofermion/DomainDecomposedTwoFlavourBoundary.h
|
||||
|
||||
Copyright (C) 2021
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
///////////////////////////////////////
|
||||
// Two flavour ratio
|
||||
///////////////////////////////////////
|
||||
template<class ImplD,class ImplF>
|
||||
class DomainDecomposedBoundaryTwoFlavourRatioPseudoFermion : public Action<typename ImplD::GaugeField> {
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(ImplD);
|
||||
|
||||
private:
|
||||
SchurFactoredFermionOperator<ImplD,ImplF> & NumOp;// the basic operator
|
||||
SchurFactoredFermionOperator<ImplD,ImplF> & DenOp;// the basic operator
|
||||
|
||||
RealD InnerStoppingCondition;
|
||||
RealD ActionStoppingCondition;
|
||||
RealD DerivativeStoppingCondition;
|
||||
|
||||
FermionField Phi; // the pseudo fermion field for this trajectory
|
||||
|
||||
public:
|
||||
DomainDecomposedBoundaryTwoFlavourRatioPseudoFermion(SchurFactoredFermionOperator<ImplD,ImplF> &_NumOp,
|
||||
SchurFactoredFermionOperator<ImplD,ImplF> &_DenOp,
|
||||
RealD _DerivativeTol, RealD _ActionTol, RealD _InnerTol=1.0e-6)
|
||||
: NumOp(_NumOp), DenOp(_DenOp),
|
||||
Phi(_NumOp.PeriodicFermOpD.FermionGrid()),
|
||||
InnerStoppingCondition(_InnerTol),
|
||||
DerivativeStoppingCondition(_DerivativeTol),
|
||||
ActionStoppingCondition(_ActionTol)
|
||||
{};
|
||||
|
||||
virtual std::string action_name(){return "DomainDecomposedBoundaryTwoFlavourRatioPseudoFermion";}
|
||||
|
||||
virtual std::string LogParameters(){
|
||||
std::stringstream sstream;
|
||||
return sstream.str();
|
||||
}
|
||||
|
||||
virtual void refresh(const GaugeField &U, GridSerialRNG& sRNG, GridParallelRNG& pRNG)
|
||||
{
|
||||
NumOp.ImportGauge(U);
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
FermionField eta(NumOp.PeriodicFermOpD.FermionGrid());
|
||||
FermionField tmp(NumOp.PeriodicFermOpD.FermionGrid());
|
||||
|
||||
// P(phi) = e^{- phi^dag P^dag Rdag^-1 R^-1 P phi}
|
||||
//
|
||||
// NumOp == P
|
||||
// DenOp == R
|
||||
//
|
||||
// Take phi = P^{-1} R eta ; eta = R^-1 P Phi
|
||||
//
|
||||
// P(eta) = e^{- eta^dag eta}
|
||||
//
|
||||
// e^{x^2/2 sig^2} => sig^2 = 0.5.
|
||||
//
|
||||
// So eta should be of width sig = 1/sqrt(2) and must multiply by 0.707....
|
||||
//
|
||||
RealD scale = std::sqrt(0.5);
|
||||
|
||||
gaussian(pRNG,eta); eta=eta*scale;
|
||||
|
||||
NumOp.ProjectBoundaryBar(eta);
|
||||
NumOp.tolinner=InnerStoppingCondition;
|
||||
DenOp.tolinner=InnerStoppingCondition;
|
||||
DenOp.tol = ActionStoppingCondition;
|
||||
NumOp.tol = ActionStoppingCondition;
|
||||
DenOp.R(eta,tmp);
|
||||
NumOp.RInv(tmp,Phi);
|
||||
DumpSliceNorm("Phi",Phi);
|
||||
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// S = phi^dag Pdag Rdag^-1 R^-1 P phi
|
||||
//////////////////////////////////////////////////////
|
||||
virtual RealD S(const GaugeField &U) {
|
||||
|
||||
NumOp.ImportGauge(U);
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
FermionField X(NumOp.PeriodicFermOpD.FermionGrid());
|
||||
FermionField Y(NumOp.PeriodicFermOpD.FermionGrid());
|
||||
|
||||
NumOp.tolinner=InnerStoppingCondition;
|
||||
DenOp.tolinner=InnerStoppingCondition;
|
||||
DenOp.tol = ActionStoppingCondition;
|
||||
NumOp.tol = ActionStoppingCondition;
|
||||
NumOp.R(Phi,Y);
|
||||
DenOp.RInv(Y,X);
|
||||
|
||||
RealD action = norm2(X);
|
||||
// std::cout << " DD boundary action is " <<action<<std::endl;
|
||||
|
||||
return action;
|
||||
};
|
||||
|
||||
virtual void deriv(const GaugeField &U,GaugeField & dSdU)
|
||||
{
|
||||
NumOp.ImportGauge(U);
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
GridBase *fgrid = NumOp.PeriodicFermOpD.FermionGrid();
|
||||
GridBase *ugrid = NumOp.PeriodicFermOpD.GaugeGrid();
|
||||
|
||||
FermionField X(fgrid);
|
||||
FermionField Y(fgrid);
|
||||
FermionField tmp(fgrid);
|
||||
|
||||
GaugeField force(ugrid);
|
||||
|
||||
FermionField DobiDdbPhi(fgrid); // Vector A in my notes
|
||||
FermionField DoiDdDobiDdbPhi(fgrid); // Vector B in my notes
|
||||
FermionField DiDdbP_Phi(fgrid); // Vector C in my notes
|
||||
FermionField DidRinvP_Phi(fgrid); // Vector D in my notes
|
||||
FermionField DdbdDidRinvP_Phi(fgrid);
|
||||
FermionField DoidRinvDagRinvP_Phi(fgrid); // Vector E in my notes
|
||||
FermionField DobidDddDoidRinvDagRinvP_Phi(fgrid); // Vector F in my notes
|
||||
|
||||
FermionField P_Phi(fgrid);
|
||||
FermionField RinvP_Phi(fgrid);
|
||||
FermionField RinvDagRinvP_Phi(fgrid);
|
||||
FermionField PdagRinvDagRinvP_Phi(fgrid);
|
||||
|
||||
// RealD action = S(U);
|
||||
NumOp.tolinner=InnerStoppingCondition;
|
||||
DenOp.tolinner=InnerStoppingCondition;
|
||||
DenOp.tol = DerivativeStoppingCondition;
|
||||
NumOp.tol = DerivativeStoppingCondition;
|
||||
|
||||
// P term
|
||||
NumOp.dBoundaryBar(Phi,tmp);
|
||||
NumOp.dOmegaBarInv(tmp,DobiDdbPhi); // Vector A
|
||||
NumOp.dBoundary(DobiDdbPhi,tmp);
|
||||
NumOp.dOmegaInv(tmp,DoiDdDobiDdbPhi); // Vector B
|
||||
P_Phi = Phi - DoiDdDobiDdbPhi;
|
||||
NumOp.ProjectBoundaryBar(P_Phi);
|
||||
|
||||
// R^-1 P term
|
||||
DenOp.dBoundaryBar(P_Phi,tmp);
|
||||
DenOp.Dinverse(tmp,DiDdbP_Phi); // Vector C
|
||||
RinvP_Phi = P_Phi - DiDdbP_Phi;
|
||||
DenOp.ProjectBoundaryBar(RinvP_Phi); // Correct to here
|
||||
|
||||
|
||||
// R^-dagger R^-1 P term
|
||||
DenOp.DinverseDag(RinvP_Phi,DidRinvP_Phi); // Vector D
|
||||
DenOp.dBoundaryBarDag(DidRinvP_Phi,DdbdDidRinvP_Phi);
|
||||
RinvDagRinvP_Phi = RinvP_Phi - DdbdDidRinvP_Phi;
|
||||
DenOp.ProjectBoundaryBar(RinvDagRinvP_Phi);
|
||||
|
||||
|
||||
// P^dag R^-dagger R^-1 P term
|
||||
NumOp.dOmegaDagInv(RinvDagRinvP_Phi,DoidRinvDagRinvP_Phi); // Vector E
|
||||
NumOp.dBoundaryDag(DoidRinvDagRinvP_Phi,tmp);
|
||||
NumOp.dOmegaBarDagInv(tmp,DobidDddDoidRinvDagRinvP_Phi); // Vector F
|
||||
NumOp.dBoundaryBarDag(DobidDddDoidRinvDagRinvP_Phi,tmp);
|
||||
PdagRinvDagRinvP_Phi = RinvDagRinvP_Phi- tmp;
|
||||
NumOp.ProjectBoundaryBar(PdagRinvDagRinvP_Phi);
|
||||
|
||||
/*
|
||||
std::cout << "S eval "<< action << std::endl;
|
||||
std::cout << "S - IP1 "<< innerProduct(Phi,PdagRinvDagRinvP_Phi) << std::endl;
|
||||
std::cout << "S - IP2 "<< norm2(RinvP_Phi) << std::endl;
|
||||
|
||||
NumOp.R(Phi,tmp);
|
||||
tmp = tmp - P_Phi;
|
||||
std::cout << "diff1 "<<norm2(tmp) <<std::endl;
|
||||
|
||||
|
||||
DenOp.RInv(P_Phi,tmp);
|
||||
tmp = tmp - RinvP_Phi;
|
||||
std::cout << "diff2 "<<norm2(tmp) <<std::endl;
|
||||
|
||||
DenOp.RDagInv(RinvP_Phi,tmp);
|
||||
tmp = tmp - RinvDagRinvP_Phi;
|
||||
std::cout << "diff3 "<<norm2(tmp) <<std::endl;
|
||||
|
||||
DenOp.RDag(RinvDagRinvP_Phi,tmp);
|
||||
tmp = tmp - PdagRinvDagRinvP_Phi;
|
||||
std::cout << "diff4 "<<norm2(tmp) <<std::endl;
|
||||
*/
|
||||
|
||||
dSdU=Zero();
|
||||
|
||||
X = DobiDdbPhi;
|
||||
Y = DobidDddDoidRinvDagRinvP_Phi;
|
||||
NumOp.DirichletFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=dSdU+force;
|
||||
NumOp.DirichletFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
|
||||
|
||||
X = DoiDdDobiDdbPhi;
|
||||
Y = DoidRinvDagRinvP_Phi;
|
||||
NumOp.DirichletFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=dSdU+force;
|
||||
NumOp.DirichletFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
|
||||
|
||||
X = DiDdbP_Phi;
|
||||
Y = DidRinvP_Phi;
|
||||
DenOp.PeriodicFermOpD.MDeriv(force,Y,X,DaggerNo); dSdU=dSdU+force;
|
||||
DenOp.PeriodicFermOpD.MDeriv(force,X,Y,DaggerYes); dSdU=dSdU+force;
|
||||
|
||||
dSdU *= -1.0;
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -44,10 +44,6 @@ NAMESPACE_BEGIN(Grid);
|
||||
// Exact one flavour implementation of DWF determinant ratio //
|
||||
///////////////////////////////////////////////////////////////
|
||||
|
||||
//Note: using mixed prec CG for the heatbath solver in this action class will not work
|
||||
// because the L, R operators must have their shift coefficients updated throughout the heatbath step
|
||||
// You will find that the heatbath solver simply won't converge.
|
||||
// To use mixed precision here use the ExactOneFlavourRatioMixedPrecHeatbathPseudoFermionAction variant below
|
||||
template<class Impl>
|
||||
class ExactOneFlavourRatioPseudoFermionAction : public Action<typename Impl::GaugeField>
|
||||
{
|
||||
@ -61,60 +57,37 @@ NAMESPACE_BEGIN(Grid);
|
||||
bool use_heatbath_forecasting;
|
||||
AbstractEOFAFermion<Impl>& Lop; // the basic LH operator
|
||||
AbstractEOFAFermion<Impl>& Rop; // the basic RH operator
|
||||
SchurRedBlackDiagMooeeSolve<FermionField> SolverHBL;
|
||||
SchurRedBlackDiagMooeeSolve<FermionField> SolverHBR;
|
||||
SchurRedBlackDiagMooeeSolve<FermionField> SolverHB;
|
||||
SchurRedBlackDiagMooeeSolve<FermionField> SolverL;
|
||||
SchurRedBlackDiagMooeeSolve<FermionField> SolverR;
|
||||
SchurRedBlackDiagMooeeSolve<FermionField> DerivativeSolverL;
|
||||
SchurRedBlackDiagMooeeSolve<FermionField> DerivativeSolverR;
|
||||
FermionField Phi; // the pseudofermion field for this trajectory
|
||||
|
||||
RealD norm2_eta; //|eta|^2 where eta is the random gaussian field used to generate the pseudofermion field
|
||||
bool initial_action; //true for the first call to S after refresh, for which the identity S = |eta|^2 holds provided the rational approx is good
|
||||
public:
|
||||
|
||||
//Used in the heatbath, refresh the shift coefficients of the L (LorR=0) or R (LorR=1) operator
|
||||
virtual void heatbathRefreshShiftCoefficients(int LorR, RealD to){
|
||||
AbstractEOFAFermion<Impl>&op = LorR == 0 ? Lop : Rop;
|
||||
op.RefreshShiftCoefficients(to);
|
||||
}
|
||||
|
||||
|
||||
//Use the same solver for L,R in all cases
|
||||
ExactOneFlavourRatioPseudoFermionAction(AbstractEOFAFermion<Impl>& _Lop,
|
||||
AbstractEOFAFermion<Impl>& _Rop,
|
||||
OperatorFunction<FermionField>& CG,
|
||||
Params& p,
|
||||
bool use_fc=false)
|
||||
: ExactOneFlavourRatioPseudoFermionAction(_Lop,_Rop,CG,CG,CG,CG,CG,CG,p,use_fc) {};
|
||||
|
||||
//Use the same solver for L,R in the heatbath but different solvers elsewhere
|
||||
: ExactOneFlavourRatioPseudoFermionAction(_Lop,_Rop,CG,CG,CG,CG,CG,p,use_fc) {};
|
||||
|
||||
ExactOneFlavourRatioPseudoFermionAction(AbstractEOFAFermion<Impl>& _Lop,
|
||||
AbstractEOFAFermion<Impl>& _Rop,
|
||||
OperatorFunction<FermionField>& HeatbathCG,
|
||||
OperatorFunction<FermionField>& ActionCGL, OperatorFunction<FermionField>& ActionCGR,
|
||||
OperatorFunction<FermionField>& DerivCGL , OperatorFunction<FermionField>& DerivCGR,
|
||||
Params& p,
|
||||
bool use_fc=false)
|
||||
: ExactOneFlavourRatioPseudoFermionAction(_Lop,_Rop,HeatbathCG,HeatbathCG, ActionCGL, ActionCGR, DerivCGL,DerivCGR,p,use_fc) {};
|
||||
|
||||
//Use different solvers for L,R in all cases
|
||||
ExactOneFlavourRatioPseudoFermionAction(AbstractEOFAFermion<Impl>& _Lop,
|
||||
AbstractEOFAFermion<Impl>& _Rop,
|
||||
OperatorFunction<FermionField>& HeatbathCGL, OperatorFunction<FermionField>& HeatbathCGR,
|
||||
OperatorFunction<FermionField>& HeatbathCG,
|
||||
OperatorFunction<FermionField>& ActionCGL, OperatorFunction<FermionField>& ActionCGR,
|
||||
OperatorFunction<FermionField>& DerivCGL , OperatorFunction<FermionField>& DerivCGR,
|
||||
Params& p,
|
||||
bool use_fc=false) :
|
||||
Lop(_Lop),
|
||||
Rop(_Rop),
|
||||
SolverHBL(HeatbathCGL,false,true), SolverHBR(HeatbathCGR,false,true),
|
||||
SolverHB(HeatbathCG,false,true),
|
||||
SolverL(ActionCGL, false, true), SolverR(ActionCGR, false, true),
|
||||
DerivativeSolverL(DerivCGL, false, true), DerivativeSolverR(DerivCGR, false, true),
|
||||
Phi(_Lop.FermionGrid()),
|
||||
param(p),
|
||||
use_heatbath_forecasting(use_fc),
|
||||
initial_action(false)
|
||||
use_heatbath_forecasting(use_fc)
|
||||
{
|
||||
AlgRemez remez(param.lo, param.hi, param.precision);
|
||||
|
||||
@ -124,8 +97,6 @@ NAMESPACE_BEGIN(Grid);
|
||||
PowerNegHalf.Init(remez, param.tolerance, true);
|
||||
};
|
||||
|
||||
const FermionField &getPhi() const{ return Phi; }
|
||||
|
||||
virtual std::string action_name() { return "ExactOneFlavourRatioPseudoFermionAction"; }
|
||||
|
||||
virtual std::string LogParameters() {
|
||||
@ -146,19 +117,6 @@ NAMESPACE_BEGIN(Grid);
|
||||
else{ for(int s=0; s<Ls; ++s){ axpby_ssp_pminus(out, 0.0, in, 1.0, in, s, s); } }
|
||||
}
|
||||
|
||||
virtual void refresh(const GaugeField &U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) {
|
||||
// P(eta_o) = e^{- eta_o^dag eta_o}
|
||||
//
|
||||
// e^{x^2/2 sig^2} => sig^2 = 0.5.
|
||||
//
|
||||
RealD scale = std::sqrt(0.5);
|
||||
|
||||
FermionField eta (Lop.FermionGrid());
|
||||
gaussian(pRNG,eta); eta = eta * scale;
|
||||
|
||||
refresh(U,eta);
|
||||
}
|
||||
|
||||
// EOFA heatbath: see Eqn. (29) of arXiv:1706.05843
|
||||
// We generate a Gaussian noise vector \eta, and then compute
|
||||
// \Phi = M_{\rm EOFA}^{-1/2} * \eta
|
||||
@ -166,10 +124,12 @@ NAMESPACE_BEGIN(Grid);
|
||||
//
|
||||
// As a check of rational require \Phi^dag M_{EOFA} \Phi == eta^dag M^-1/2^dag M M^-1/2 eta = eta^dag eta
|
||||
//
|
||||
void refresh(const GaugeField &U, const FermionField &eta) {
|
||||
virtual void refresh(const GaugeField& U, GridSerialRNG &sRNG, GridParallelRNG& pRNG)
|
||||
{
|
||||
Lop.ImportGauge(U);
|
||||
Rop.ImportGauge(U);
|
||||
|
||||
FermionField eta (Lop.FermionGrid());
|
||||
FermionField CG_src (Lop.FermionGrid());
|
||||
FermionField CG_soln (Lop.FermionGrid());
|
||||
FermionField Forecast_src(Lop.FermionGrid());
|
||||
@ -180,6 +140,11 @@ NAMESPACE_BEGIN(Grid);
|
||||
if(use_heatbath_forecasting){ prev_solns.reserve(param.degree); }
|
||||
ChronoForecast<AbstractEOFAFermion<Impl>, FermionField> Forecast;
|
||||
|
||||
// Seed with Gaussian noise vector (var = 0.5)
|
||||
RealD scale = std::sqrt(0.5);
|
||||
gaussian(pRNG,eta);
|
||||
eta = eta * scale;
|
||||
|
||||
// \Phi = ( \alpha_{0} + \sum_{k=1}^{N_{p}} \alpha_{l} * \gamma_{l} ) * \eta
|
||||
RealD N(PowerNegHalf.norm);
|
||||
for(int k=0; k<param.degree; ++k){ N += PowerNegHalf.residues[k] / ( 1.0 + PowerNegHalf.poles[k] ); }
|
||||
@ -195,16 +160,15 @@ NAMESPACE_BEGIN(Grid);
|
||||
tmp[1] = Zero();
|
||||
for(int k=0; k<param.degree; ++k){
|
||||
gamma_l = 1.0 / ( 1.0 + PowerNegHalf.poles[k] );
|
||||
heatbathRefreshShiftCoefficients(0, -gamma_l);
|
||||
//Lop.RefreshShiftCoefficients(-gamma_l);
|
||||
Lop.RefreshShiftCoefficients(-gamma_l);
|
||||
if(use_heatbath_forecasting){ // Forecast CG guess using solutions from previous poles
|
||||
Lop.Mdag(CG_src, Forecast_src);
|
||||
CG_soln = Forecast(Lop, Forecast_src, prev_solns);
|
||||
SolverHBL(Lop, CG_src, CG_soln);
|
||||
SolverHB(Lop, CG_src, CG_soln);
|
||||
prev_solns.push_back(CG_soln);
|
||||
} else {
|
||||
CG_soln = Zero(); // Just use zero as the initial guess
|
||||
SolverHBL(Lop, CG_src, CG_soln);
|
||||
SolverHB(Lop, CG_src, CG_soln);
|
||||
}
|
||||
Lop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
|
||||
tmp[1] = tmp[1] + ( PowerNegHalf.residues[k]*gamma_l*gamma_l*Lop.k ) * tmp[0];
|
||||
@ -223,16 +187,15 @@ NAMESPACE_BEGIN(Grid);
|
||||
if(use_heatbath_forecasting){ prev_solns.clear(); } // empirically, LH solns don't help for RH solves
|
||||
for(int k=0; k<param.degree; ++k){
|
||||
gamma_l = 1.0 / ( 1.0 + PowerNegHalf.poles[k] );
|
||||
heatbathRefreshShiftCoefficients(1, -gamma_l*PowerNegHalf.poles[k]);
|
||||
//Rop.RefreshShiftCoefficients(-gamma_l*PowerNegHalf.poles[k]);
|
||||
Rop.RefreshShiftCoefficients(-gamma_l*PowerNegHalf.poles[k]);
|
||||
if(use_heatbath_forecasting){
|
||||
Rop.Mdag(CG_src, Forecast_src);
|
||||
CG_soln = Forecast(Rop, Forecast_src, prev_solns);
|
||||
SolverHBR(Rop, CG_src, CG_soln);
|
||||
SolverHB(Rop, CG_src, CG_soln);
|
||||
prev_solns.push_back(CG_soln);
|
||||
} else {
|
||||
CG_soln = Zero();
|
||||
SolverHBR(Rop, CG_src, CG_soln);
|
||||
SolverHB(Rop, CG_src, CG_soln);
|
||||
}
|
||||
Rop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
|
||||
tmp[1] = tmp[1] - ( PowerNegHalf.residues[k]*gamma_l*gamma_l*Rop.k ) * tmp[0];
|
||||
@ -242,119 +205,49 @@ NAMESPACE_BEGIN(Grid);
|
||||
Phi = Phi + tmp[1];
|
||||
|
||||
// Reset shift coefficients for energy and force evals
|
||||
//Lop.RefreshShiftCoefficients(0.0);
|
||||
//Rop.RefreshShiftCoefficients(-1.0);
|
||||
heatbathRefreshShiftCoefficients(0, 0.0);
|
||||
heatbathRefreshShiftCoefficients(1, -1.0);
|
||||
|
||||
//Mark that the next call to S is the first after refresh
|
||||
initial_action = true;
|
||||
|
||||
Lop.RefreshShiftCoefficients(0.0);
|
||||
Rop.RefreshShiftCoefficients(-1.0);
|
||||
|
||||
// Bounds check
|
||||
RealD EtaDagEta = norm2(eta);
|
||||
norm2_eta = EtaDagEta;
|
||||
|
||||
// RealD PhiDagMPhi= norm2(eta);
|
||||
|
||||
};
|
||||
|
||||
void Meofa(const GaugeField& U,const FermionField &in, FermionField & out)
|
||||
void Meofa(const GaugeField& U,const FermionField &phi, FermionField & Mphi)
|
||||
{
|
||||
#if 0
|
||||
Lop.ImportGauge(U);
|
||||
Rop.ImportGauge(U);
|
||||
|
||||
FermionField spProj_in(Lop.FermionGrid());
|
||||
FermionField spProj_Phi(Lop.FermionGrid());
|
||||
FermionField mPhi(Lop.FermionGrid());
|
||||
std::vector<FermionField> tmp(2, Lop.FermionGrid());
|
||||
out = in;
|
||||
mPhi = phi;
|
||||
|
||||
// LH term: S = S - k <\Phi| P_{-} \Omega_{-}^{\dagger} H(mf)^{-1} \Omega_{-} P_{-} |\Phi>
|
||||
spProj(in, spProj_in, -1, Lop.Ls);
|
||||
Lop.Omega(spProj_in, tmp[0], -1, 0);
|
||||
spProj(Phi, spProj_Phi, -1, Lop.Ls);
|
||||
Lop.Omega(spProj_Phi, tmp[0], -1, 0);
|
||||
G5R5(tmp[1], tmp[0]);
|
||||
tmp[0] = Zero();
|
||||
SolverL(Lop, tmp[1], tmp[0]);
|
||||
Lop.Dtilde(tmp[0], tmp[1]); // We actually solved Cayley preconditioned system: transform back
|
||||
Lop.Omega(tmp[1], tmp[0], -1, 1);
|
||||
spProj(tmp[0], tmp[1], -1, Lop.Ls);
|
||||
|
||||
out = out - Lop.k * tmp[1];
|
||||
mPhi = mPhi - Lop.k * innerProduct(spProj_Phi, tmp[0]).real();
|
||||
|
||||
// RH term: S = S + k <\Phi| P_{+} \Omega_{+}^{\dagger} ( H(mb)
|
||||
// - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{+} P_{+} |\Phi>
|
||||
spProj(in, spProj_in, 1, Rop.Ls);
|
||||
Rop.Omega(spProj_in, tmp[0], 1, 0);
|
||||
// - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{-} P_{-} |\Phi>
|
||||
spProj(Phi, spProj_Phi, 1, Rop.Ls);
|
||||
Rop.Omega(spProj_Phi, tmp[0], 1, 0);
|
||||
G5R5(tmp[1], tmp[0]);
|
||||
tmp[0] = Zero();
|
||||
SolverR(Rop, tmp[1], tmp[0]);
|
||||
Rop.Dtilde(tmp[0], tmp[1]);
|
||||
Rop.Omega(tmp[1], tmp[0], 1, 1);
|
||||
spProj(tmp[0], tmp[1], 1, Rop.Ls);
|
||||
|
||||
out = out + Rop.k * tmp[1];
|
||||
action += Rop.k * innerProduct(spProj_Phi, tmp[0]).real();
|
||||
#endif
|
||||
}
|
||||
|
||||
//Due to the structure of EOFA, it is no more expensive to compute the inverse of Meofa
|
||||
//To ensure correctness we can simply reuse the heatbath code but use the rational approx
|
||||
//f(x) = 1/x which corresponds to alpha_0=0, alpha_1=1, beta_1=0 => gamma_1=1
|
||||
void MeofaInv(const GaugeField &U, const FermionField &in, FermionField &out) {
|
||||
Lop.ImportGauge(U);
|
||||
Rop.ImportGauge(U);
|
||||
|
||||
FermionField CG_src (Lop.FermionGrid());
|
||||
FermionField CG_soln (Lop.FermionGrid());
|
||||
std::vector<FermionField> tmp(2, Lop.FermionGrid());
|
||||
|
||||
// \Phi = ( \alpha_{0} + \sum_{k=1}^{N_{p}} \alpha_{l} * \gamma_{l} ) * \eta
|
||||
// = 1 * \eta
|
||||
out = in;
|
||||
|
||||
// LH terms:
|
||||
// \Phi = \Phi + k \sum_{k=1}^{N_{p}} P_{-} \Omega_{-}^{\dagger} ( H(mf)
|
||||
// - \gamma_{l} \Delta_{-}(mf,mb) P_{-} )^{-1} \Omega_{-} P_{-} \eta
|
||||
spProj(in, tmp[0], -1, Lop.Ls);
|
||||
Lop.Omega(tmp[0], tmp[1], -1, 0);
|
||||
G5R5(CG_src, tmp[1]);
|
||||
{
|
||||
heatbathRefreshShiftCoefficients(0, -1.); //-gamma_1 = -1.
|
||||
|
||||
CG_soln = Zero(); // Just use zero as the initial guess
|
||||
SolverHBL(Lop, CG_src, CG_soln);
|
||||
|
||||
Lop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
|
||||
tmp[1] = Lop.k * tmp[0];
|
||||
}
|
||||
Lop.Omega(tmp[1], tmp[0], -1, 1);
|
||||
spProj(tmp[0], tmp[1], -1, Lop.Ls);
|
||||
out = out + tmp[1];
|
||||
|
||||
// RH terms:
|
||||
// \Phi = \Phi - k \sum_{k=1}^{N_{p}} P_{+} \Omega_{+}^{\dagger} ( H(mb)
|
||||
// - \beta_l\gamma_{l} \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{+} P_{+} \eta
|
||||
spProj(in, tmp[0], 1, Rop.Ls);
|
||||
Rop.Omega(tmp[0], tmp[1], 1, 0);
|
||||
G5R5(CG_src, tmp[1]);
|
||||
{
|
||||
heatbathRefreshShiftCoefficients(1, 0.); //-gamma_1 * beta_1 = 0
|
||||
|
||||
CG_soln = Zero();
|
||||
SolverHBR(Rop, CG_src, CG_soln);
|
||||
|
||||
Rop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
|
||||
tmp[1] = - Rop.k * tmp[0];
|
||||
}
|
||||
Rop.Omega(tmp[1], tmp[0], 1, 1);
|
||||
spProj(tmp[0], tmp[1], 1, Rop.Ls);
|
||||
out = out + tmp[1];
|
||||
|
||||
// Reset shift coefficients for energy and force evals
|
||||
heatbathRefreshShiftCoefficients(0, 0.0);
|
||||
heatbathRefreshShiftCoefficients(1, -1.0);
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
// EOFA action: see Eqn. (10) of arXiv:1706.05843
|
||||
virtual RealD S(const GaugeField& U)
|
||||
{
|
||||
@ -378,7 +271,7 @@ NAMESPACE_BEGIN(Grid);
|
||||
action -= Lop.k * innerProduct(spProj_Phi, tmp[0]).real();
|
||||
|
||||
// RH term: S = S + k <\Phi| P_{+} \Omega_{+}^{\dagger} ( H(mb)
|
||||
// - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{+} P_{+} |\Phi>
|
||||
// - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{-} P_{-} |\Phi>
|
||||
spProj(Phi, spProj_Phi, 1, Rop.Ls);
|
||||
Rop.Omega(spProj_Phi, tmp[0], 1, 0);
|
||||
G5R5(tmp[1], tmp[0]);
|
||||
@ -388,26 +281,6 @@ NAMESPACE_BEGIN(Grid);
|
||||
Rop.Omega(tmp[1], tmp[0], 1, 1);
|
||||
action += Rop.k * innerProduct(spProj_Phi, tmp[0]).real();
|
||||
|
||||
if(initial_action){
|
||||
//For the first call to S after refresh, S = |eta|^2. We can use this to ensure the rational approx is good
|
||||
RealD diff = action - norm2_eta;
|
||||
|
||||
//S_init = eta^dag M^{-1/2} M M^{-1/2} eta
|
||||
//S_init - eta^dag eta = eta^dag ( M^{-1/2} M M^{-1/2} - 1 ) eta
|
||||
|
||||
//If approximate solution
|
||||
//S_init - eta^dag eta = eta^dag ( [M^{-1/2}+\delta M^{-1/2}] M [M^{-1/2}+\delta M^{-1/2}] - 1 ) eta
|
||||
// \approx eta^dag ( \delta M^{-1/2} M^{1/2} + M^{1/2}\delta M^{-1/2} ) eta
|
||||
// We divide out |eta|^2 to remove source scaling but the tolerance on this check should still be somewhat higher than the actual approx tolerance
|
||||
RealD test = fabs(diff)/norm2_eta; //test the quality of the rational approx
|
||||
|
||||
std::cout << GridLogMessage << action_name() << " initial action " << action << " expect " << norm2_eta << "; diff " << diff << std::endl;
|
||||
std::cout << GridLogMessage << action_name() << "[ eta^dag ( M^{-1/2} M M^{-1/2} - 1 ) eta ]/|eta^2| = " << test << " expect 0 (tol " << param.BoundsCheckTol << ")" << std::endl;
|
||||
|
||||
assert( ( test < param.BoundsCheckTol ) && " Initial action check failed" );
|
||||
initial_action = false;
|
||||
}
|
||||
|
||||
return action;
|
||||
};
|
||||
|
||||
@ -456,40 +329,6 @@ NAMESPACE_BEGIN(Grid);
|
||||
};
|
||||
};
|
||||
|
||||
template<class ImplD, class ImplF>
|
||||
class ExactOneFlavourRatioMixedPrecHeatbathPseudoFermionAction : public ExactOneFlavourRatioPseudoFermionAction<ImplD>{
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(ImplD);
|
||||
typedef OneFlavourRationalParams Params;
|
||||
|
||||
private:
|
||||
AbstractEOFAFermion<ImplF>& LopF; // the basic LH operator
|
||||
AbstractEOFAFermion<ImplF>& RopF; // the basic RH operator
|
||||
|
||||
public:
|
||||
|
||||
virtual std::string action_name() { return "ExactOneFlavourRatioMixedPrecHeatbathPseudoFermionAction"; }
|
||||
|
||||
//Used in the heatbath, refresh the shift coefficients of the L (LorR=0) or R (LorR=1) operator
|
||||
virtual void heatbathRefreshShiftCoefficients(int LorR, RealD to){
|
||||
AbstractEOFAFermion<ImplF> &op = LorR == 0 ? LopF : RopF;
|
||||
op.RefreshShiftCoefficients(to);
|
||||
this->ExactOneFlavourRatioPseudoFermionAction<ImplD>::heatbathRefreshShiftCoefficients(LorR,to);
|
||||
}
|
||||
|
||||
ExactOneFlavourRatioMixedPrecHeatbathPseudoFermionAction(AbstractEOFAFermion<ImplF>& _LopF,
|
||||
AbstractEOFAFermion<ImplF>& _RopF,
|
||||
AbstractEOFAFermion<ImplD>& _LopD,
|
||||
AbstractEOFAFermion<ImplD>& _RopD,
|
||||
OperatorFunction<FermionField>& HeatbathCGL, OperatorFunction<FermionField>& HeatbathCGR,
|
||||
OperatorFunction<FermionField>& ActionCGL, OperatorFunction<FermionField>& ActionCGR,
|
||||
OperatorFunction<FermionField>& DerivCGL , OperatorFunction<FermionField>& DerivCGR,
|
||||
Params& p,
|
||||
bool use_fc=false) :
|
||||
LopF(_LopF), RopF(_RopF), ExactOneFlavourRatioPseudoFermionAction<ImplD>(_LopD, _RopD, HeatbathCGL, HeatbathCGR, ActionCGL, ActionCGR, DerivCGL, DerivCGR, p, use_fc){}
|
||||
};
|
||||
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
#endif
|
||||
|
@ -1,372 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/pseudofermion/GeneralEvenOddRationalRatio.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#ifndef QCD_PSEUDOFERMION_GENERAL_EVEN_ODD_RATIONAL_RATIO_H
|
||||
#define QCD_PSEUDOFERMION_GENERAL_EVEN_ODD_RATIONAL_RATIO_H
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
/////////////////////////////////////////////////////////
|
||||
// Generic rational approximation for ratios of operators
|
||||
/////////////////////////////////////////////////////////
|
||||
|
||||
/* S_f = -log( det( [M^dag M]/[V^dag V] )^{1/inv_pow} )
|
||||
= chi^dag ( [M^dag M]/[V^dag V] )^{-1/inv_pow} chi\
|
||||
= chi^dag ( [V^dag V]^{-1/2} [M^dag M] [V^dag V]^{-1/2} )^{-1/inv_pow} chi\
|
||||
= chi^dag [V^dag V]^{1/(2*inv_pow)} [M^dag M]^{-1/inv_pow} [V^dag V]^{1/(2*inv_pow)} chi\
|
||||
|
||||
S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
|
||||
|
||||
BIG WARNING:
|
||||
Here V^dag V is referred to in this code as the "numerator" operator and M^dag M is the *denominator* operator.
|
||||
this refers to their position in the pseudofermion action, which is the *inverse* of what appears in the determinant
|
||||
Thus for DWF the numerator operator is the Pauli-Villars operator
|
||||
|
||||
Here P/Q \sim R_{1/(2*inv_pow)} ~ (V^dagV)^{1/(2*inv_pow)}
|
||||
Here N/D \sim R_{-1/inv_pow} ~ (M^dagM)^{-1/inv_pow}
|
||||
*/
|
||||
|
||||
template<class Impl>
|
||||
class GeneralEvenOddRatioRationalPseudoFermionAction : public Action<typename Impl::GaugeField> {
|
||||
public:
|
||||
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
|
||||
typedef RationalActionParams Params;
|
||||
Params param;
|
||||
|
||||
//For action evaluation
|
||||
MultiShiftFunction ApproxPowerAction ; //rational approx for X^{1/inv_pow}
|
||||
MultiShiftFunction ApproxNegPowerAction; //rational approx for X^{-1/inv_pow}
|
||||
MultiShiftFunction ApproxHalfPowerAction; //rational approx for X^{1/(2*inv_pow)}
|
||||
MultiShiftFunction ApproxNegHalfPowerAction; //rational approx for X^{-1/(2*inv_pow)}
|
||||
|
||||
//For the MD integration
|
||||
MultiShiftFunction ApproxPowerMD ; //rational approx for X^{1/inv_pow}
|
||||
MultiShiftFunction ApproxNegPowerMD; //rational approx for X^{-1/inv_pow}
|
||||
MultiShiftFunction ApproxHalfPowerMD; //rational approx for X^{1/(2*inv_pow)}
|
||||
MultiShiftFunction ApproxNegHalfPowerMD; //rational approx for X^{-1/(2*inv_pow)}
|
||||
|
||||
private:
|
||||
|
||||
FermionOperator<Impl> & NumOp;// the basic operator
|
||||
FermionOperator<Impl> & DenOp;// the basic operator
|
||||
FermionField PhiEven; // the pseudo fermion field for this trajectory
|
||||
FermionField PhiOdd; // the pseudo fermion field for this trajectory
|
||||
|
||||
//Generate the approximation to x^{1/inv_pow} (->approx) and x^{-1/inv_pow} (-> approx_inv) by an approx_degree degree rational approximation
|
||||
//CG_tolerance is used to issue a warning if the approximation error is larger than the tolerance of the CG and is otherwise just stored in the MultiShiftFunction for use by the multi-shift
|
||||
static void generateApprox(MultiShiftFunction &approx, MultiShiftFunction &approx_inv, int inv_pow, int approx_degree, double CG_tolerance, AlgRemez &remez){
|
||||
std::cout<<GridLogMessage << "Generating degree "<< approx_degree<<" approximation for x^(1/" << inv_pow << ")"<<std::endl;
|
||||
double error = remez.generateApprox(approx_degree,1,inv_pow);
|
||||
if(error > CG_tolerance)
|
||||
std::cout<<GridLogMessage << "WARNING: Remez approximation has a larger error " << error << " than the CG tolerance " << CG_tolerance << "! Try increasing the number of poles" << std::endl;
|
||||
|
||||
approx.Init(remez, CG_tolerance,false);
|
||||
approx_inv.Init(remez, CG_tolerance,true);
|
||||
}
|
||||
|
||||
|
||||
protected:
|
||||
static constexpr bool Numerator = true;
|
||||
static constexpr bool Denominator = false;
|
||||
|
||||
//Allow derived classes to override the multishift CG
|
||||
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionField &in, FermionField &out){
|
||||
SchurDifferentiableOperator<Impl> schurOp(numerator ? NumOp : DenOp);
|
||||
ConjugateGradientMultiShift<FermionField> msCG(MaxIter, approx);
|
||||
msCG(schurOp,in, out);
|
||||
}
|
||||
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionField &in, std::vector<FermionField> &out_elems, FermionField &out){
|
||||
SchurDifferentiableOperator<Impl> schurOp(numerator ? NumOp : DenOp);
|
||||
ConjugateGradientMultiShift<FermionField> msCG(MaxIter, approx);
|
||||
msCG(schurOp,in, out_elems, out);
|
||||
}
|
||||
//Allow derived classes to override the gauge import
|
||||
virtual void ImportGauge(const GaugeField &U){
|
||||
NumOp.ImportGauge(U);
|
||||
DenOp.ImportGauge(U);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
GeneralEvenOddRatioRationalPseudoFermionAction(FermionOperator<Impl> &_NumOp,
|
||||
FermionOperator<Impl> &_DenOp,
|
||||
const Params & p
|
||||
) :
|
||||
NumOp(_NumOp),
|
||||
DenOp(_DenOp),
|
||||
PhiOdd (_NumOp.FermionRedBlackGrid()),
|
||||
PhiEven(_NumOp.FermionRedBlackGrid()),
|
||||
param(p)
|
||||
{
|
||||
std::cout<<GridLogMessage << action_name() << " initialize: starting" << std::endl;
|
||||
AlgRemez remez(param.lo,param.hi,param.precision);
|
||||
|
||||
//Generate approximations for action eval
|
||||
generateApprox(ApproxPowerAction, ApproxNegPowerAction, param.inv_pow, param.action_degree, param.action_tolerance, remez);
|
||||
generateApprox(ApproxHalfPowerAction, ApproxNegHalfPowerAction, 2*param.inv_pow, param.action_degree, param.action_tolerance, remez);
|
||||
|
||||
//Generate approximations for MD
|
||||
if(param.md_degree != param.action_degree){ //note the CG tolerance is unrelated to the stopping condition of the Remez algorithm
|
||||
generateApprox(ApproxPowerMD, ApproxNegPowerMD, param.inv_pow, param.md_degree, param.md_tolerance, remez);
|
||||
generateApprox(ApproxHalfPowerMD, ApproxNegHalfPowerMD, 2*param.inv_pow, param.md_degree, param.md_tolerance, remez);
|
||||
}else{
|
||||
std::cout<<GridLogMessage << "Using same rational approximations for MD as for action evaluation" << std::endl;
|
||||
ApproxPowerMD = ApproxPowerAction;
|
||||
ApproxNegPowerMD = ApproxNegPowerAction;
|
||||
for(int i=0;i<ApproxPowerMD.tolerances.size();i++)
|
||||
ApproxNegPowerMD.tolerances[i] = ApproxPowerMD.tolerances[i] = param.md_tolerance; //used for multishift
|
||||
|
||||
ApproxHalfPowerMD = ApproxHalfPowerAction;
|
||||
ApproxNegHalfPowerMD = ApproxNegHalfPowerAction;
|
||||
for(int i=0;i<ApproxPowerMD.tolerances.size();i++)
|
||||
ApproxNegHalfPowerMD.tolerances[i] = ApproxHalfPowerMD.tolerances[i] = param.md_tolerance;
|
||||
}
|
||||
|
||||
std::cout<<GridLogMessage << action_name() << " initialize: complete" << std::endl;
|
||||
};
|
||||
|
||||
virtual std::string action_name(){return "GeneralEvenOddRatioRationalPseudoFermionAction";}
|
||||
|
||||
virtual std::string LogParameters(){
|
||||
std::stringstream sstream;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Power : 1/" << param.inv_pow << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Low :" << param.lo << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] High :" << param.hi << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Max iterations :" << param.MaxIter << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance (Action) :" << param.action_tolerance << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Degree (Action) :" << param.action_degree << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance (MD) :" << param.md_tolerance << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Degree (MD) :" << param.md_degree << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Precision :" << param.precision << std::endl;
|
||||
return sstream.str();
|
||||
}
|
||||
|
||||
//Access the fermion field
|
||||
const FermionField &getPhiOdd() const{ return PhiOdd; }
|
||||
|
||||
virtual void refresh(const GaugeField &U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) {
|
||||
std::cout<<GridLogMessage << action_name() << " refresh: starting" << std::endl;
|
||||
FermionField eta(NumOp.FermionGrid());
|
||||
|
||||
// P(eta) \propto e^{- eta^dag eta}
|
||||
//
|
||||
// The gaussian function draws from P(x) \propto e^{- x^2 / 2 } [i.e. sigma=1]
|
||||
// Thus eta = x/sqrt{2} = x * sqrt(1/2)
|
||||
RealD scale = std::sqrt(0.5);
|
||||
gaussian(pRNG,eta); eta=eta*scale;
|
||||
|
||||
refresh(U,eta);
|
||||
}
|
||||
|
||||
//Allow for manual specification of random field for testing
|
||||
void refresh(const GaugeField &U, const FermionField &eta) {
|
||||
|
||||
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
|
||||
//
|
||||
// P(phi) = e^{- phi^dag (VdagV)^1/(2*inv_pow) (MdagM)^-1/inv_pow (VdagV)^1/(2*inv_pow) phi}
|
||||
// = e^{- phi^dag (VdagV)^1/(2*inv_pow) (MdagM)^-1/(2*inv_pow) (MdagM)^-1/(2*inv_pow) (VdagV)^1/(2*inv_pow) phi}
|
||||
//
|
||||
// Phi = (VdagV)^-1/(2*inv_pow) Mdag^{1/(2*inv_pow)} eta
|
||||
|
||||
std::cout<<GridLogMessage << action_name() << " refresh: starting" << std::endl;
|
||||
|
||||
FermionField etaOdd (NumOp.FermionRedBlackGrid());
|
||||
FermionField etaEven(NumOp.FermionRedBlackGrid());
|
||||
FermionField tmp(NumOp.FermionRedBlackGrid());
|
||||
|
||||
pickCheckerboard(Even,etaEven,eta);
|
||||
pickCheckerboard(Odd,etaOdd,eta);
|
||||
|
||||
ImportGauge(U);
|
||||
|
||||
// MdagM^1/(2*inv_pow) eta
|
||||
std::cout<<GridLogMessage << action_name() << " refresh: doing (M^dag M)^{1/" << 2*param.inv_pow << "} eta" << std::endl;
|
||||
multiShiftInverse(Denominator, ApproxHalfPowerAction, param.MaxIter, etaOdd, tmp);
|
||||
|
||||
// VdagV^-1/(2*inv_pow) MdagM^1/(2*inv_pow) eta
|
||||
std::cout<<GridLogMessage << action_name() << " refresh: doing (V^dag V)^{-1/" << 2*param.inv_pow << "} ( (M^dag M)^{1/" << 2*param.inv_pow << "} eta)" << std::endl;
|
||||
multiShiftInverse(Numerator, ApproxNegHalfPowerAction, param.MaxIter, tmp, PhiOdd);
|
||||
|
||||
assert(NumOp.ConstEE() == 1);
|
||||
assert(DenOp.ConstEE() == 1);
|
||||
PhiEven = Zero();
|
||||
std::cout<<GridLogMessage << action_name() << " refresh: starting" << std::endl;
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
|
||||
//////////////////////////////////////////////////////
|
||||
virtual RealD S(const GaugeField &U) {
|
||||
std::cout<<GridLogMessage << action_name() << " compute action: starting" << std::endl;
|
||||
ImportGauge(U);
|
||||
|
||||
FermionField X(NumOp.FermionRedBlackGrid());
|
||||
FermionField Y(NumOp.FermionRedBlackGrid());
|
||||
|
||||
// VdagV^1/(2*inv_pow) Phi
|
||||
std::cout<<GridLogMessage << action_name() << " compute action: doing (V^dag V)^{1/" << 2*param.inv_pow << "} Phi" << std::endl;
|
||||
multiShiftInverse(Numerator, ApproxHalfPowerAction, param.MaxIter, PhiOdd,X);
|
||||
|
||||
// MdagM^-1/(2*inv_pow) VdagV^1/(2*inv_pow) Phi
|
||||
std::cout<<GridLogMessage << action_name() << " compute action: doing (M^dag M)^{-1/" << 2*param.inv_pow << "} ( (V^dag V)^{1/" << 2*param.inv_pow << "} Phi)" << std::endl;
|
||||
multiShiftInverse(Denominator, ApproxNegHalfPowerAction, param.MaxIter, X,Y);
|
||||
|
||||
// Randomly apply rational bounds checks.
|
||||
int rcheck = rand();
|
||||
auto grid = NumOp.FermionGrid();
|
||||
auto r=rand();
|
||||
grid->Broadcast(0,r);
|
||||
|
||||
if ( param.BoundsCheckFreq != 0 && (r % param.BoundsCheckFreq)==0 ) {
|
||||
std::cout<<GridLogMessage << action_name() << " compute action: doing bounds check" << std::endl;
|
||||
FermionField gauss(NumOp.FermionRedBlackGrid());
|
||||
gauss = PhiOdd;
|
||||
SchurDifferentiableOperator<Impl> MdagM(DenOp);
|
||||
std::cout<<GridLogMessage << action_name() << " compute action: checking high bounds" << std::endl;
|
||||
HighBoundCheck(MdagM,gauss,param.hi);
|
||||
std::cout<<GridLogMessage << action_name() << " compute action: full approximation" << std::endl;
|
||||
InversePowerBoundsCheck(param.inv_pow,param.MaxIter,param.action_tolerance*100,MdagM,gauss,ApproxNegPowerAction);
|
||||
std::cout<<GridLogMessage << action_name() << " compute action: bounds check complete" << std::endl;
|
||||
}
|
||||
|
||||
// Phidag VdagV^1/(2*inv_pow) MdagM^-1/(2*inv_pow) MdagM^-1/(2*inv_pow) VdagV^1/(2*inv_pow) Phi
|
||||
RealD action = norm2(Y);
|
||||
std::cout<<GridLogMessage << action_name() << " compute action: complete" << std::endl;
|
||||
|
||||
return action;
|
||||
};
|
||||
|
||||
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
|
||||
//
|
||||
// Here, M is some 5D operator and V is the Pauli-Villars field
|
||||
// N and D makeup the rat. poly of the M term and P and & makeup the rat.poly of the denom term
|
||||
//
|
||||
// Need
|
||||
// dS_f/dU = chi^dag d[P/Q] N/D P/Q chi
|
||||
// + chi^dag P/Q d[N/D] P/Q chi
|
||||
// + chi^dag P/Q N/D d[P/Q] chi
|
||||
//
|
||||
// P/Q is expressed as partial fraction expansion:
|
||||
//
|
||||
// a0 + \sum_k ak/(V^dagV + bk)
|
||||
//
|
||||
// d[P/Q] is then
|
||||
//
|
||||
// \sum_k -ak [V^dagV+bk]^{-1} [ dV^dag V + V^dag dV ] [V^dag V + bk]^{-1}
|
||||
//
|
||||
// and similar for N/D.
|
||||
//
|
||||
// Need
|
||||
// MpvPhi_k = [Vdag V + bk]^{-1} chi
|
||||
// MpvPhi = {a0 + \sum_k ak [Vdag V + bk]^{-1} }chi
|
||||
//
|
||||
// MfMpvPhi_k = [MdagM+bk]^{-1} MpvPhi
|
||||
// MfMpvPhi = {a0 + \sum_k ak [Mdag M + bk]^{-1} } MpvPhi
|
||||
//
|
||||
// MpvMfMpvPhi_k = [Vdag V + bk]^{-1} MfMpvchi
|
||||
//
|
||||
|
||||
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
|
||||
std::cout<<GridLogMessage << action_name() << " deriv: starting" << std::endl;
|
||||
const int n_f = ApproxNegPowerMD.poles.size();
|
||||
const int n_pv = ApproxHalfPowerMD.poles.size();
|
||||
|
||||
std::vector<FermionField> MpvPhi_k (n_pv,NumOp.FermionRedBlackGrid());
|
||||
std::vector<FermionField> MpvMfMpvPhi_k(n_pv,NumOp.FermionRedBlackGrid());
|
||||
std::vector<FermionField> MfMpvPhi_k (n_f ,NumOp.FermionRedBlackGrid());
|
||||
|
||||
FermionField MpvPhi(NumOp.FermionRedBlackGrid());
|
||||
FermionField MfMpvPhi(NumOp.FermionRedBlackGrid());
|
||||
FermionField MpvMfMpvPhi(NumOp.FermionRedBlackGrid());
|
||||
FermionField Y(NumOp.FermionRedBlackGrid());
|
||||
|
||||
GaugeField tmp(NumOp.GaugeGrid());
|
||||
|
||||
ImportGauge(U);
|
||||
|
||||
std::cout<<GridLogMessage << action_name() << " deriv: doing (V^dag V)^{1/" << 2*param.inv_pow << "} Phi" << std::endl;
|
||||
multiShiftInverse(Numerator, ApproxHalfPowerMD, param.MaxIter, PhiOdd,MpvPhi_k,MpvPhi);
|
||||
|
||||
std::cout<<GridLogMessage << action_name() << " deriv: doing (M^dag M)^{-1/" << param.inv_pow << "} ( (V^dag V)^{1/" << 2*param.inv_pow << "} Phi)" << std::endl;
|
||||
multiShiftInverse(Denominator, ApproxNegPowerMD, param.MaxIter, MpvPhi,MfMpvPhi_k,MfMpvPhi);
|
||||
|
||||
std::cout<<GridLogMessage << action_name() << " deriv: doing (V^dag V)^{1/" << 2*param.inv_pow << "} ( (M^dag M)^{-1/" << param.inv_pow << "} (V^dag V)^{1/" << 2*param.inv_pow << "} Phi)" << std::endl;
|
||||
multiShiftInverse(Numerator, ApproxHalfPowerMD, param.MaxIter, MfMpvPhi,MpvMfMpvPhi_k,MpvMfMpvPhi);
|
||||
|
||||
|
||||
SchurDifferentiableOperator<Impl> MdagM(DenOp);
|
||||
SchurDifferentiableOperator<Impl> VdagV(NumOp);
|
||||
|
||||
|
||||
RealD ak;
|
||||
|
||||
dSdU = Zero();
|
||||
|
||||
// With these building blocks
|
||||
//
|
||||
// dS/dU =
|
||||
// \sum_k -ak MfMpvPhi_k^dag [ dM^dag M + M^dag dM ] MfMpvPhi_k (1)
|
||||
// + \sum_k -ak MpvMfMpvPhi_k^\dag [ dV^dag V + V^dag dV ] MpvPhi_k (2)
|
||||
// -ak MpvPhi_k^dag [ dV^dag V + V^dag dV ] MpvMfMpvPhi_k (3)
|
||||
|
||||
//(1)
|
||||
std::cout<<GridLogMessage << action_name() << " deriv: doing dS/dU part (1)" << std::endl;
|
||||
for(int k=0;k<n_f;k++){
|
||||
ak = ApproxNegPowerMD.residues[k];
|
||||
MdagM.Mpc(MfMpvPhi_k[k],Y);
|
||||
MdagM.MpcDagDeriv(tmp , MfMpvPhi_k[k], Y ); dSdU=dSdU+ak*tmp;
|
||||
MdagM.MpcDeriv(tmp , Y, MfMpvPhi_k[k] ); dSdU=dSdU+ak*tmp;
|
||||
}
|
||||
|
||||
//(2)
|
||||
//(3)
|
||||
std::cout<<GridLogMessage << action_name() << " deriv: doing dS/dU part (2)+(3)" << std::endl;
|
||||
for(int k=0;k<n_pv;k++){
|
||||
|
||||
ak = ApproxHalfPowerMD.residues[k];
|
||||
|
||||
VdagV.Mpc(MpvPhi_k[k],Y);
|
||||
VdagV.MpcDagDeriv(tmp,MpvMfMpvPhi_k[k],Y); dSdU=dSdU+ak*tmp;
|
||||
VdagV.MpcDeriv (tmp,Y,MpvMfMpvPhi_k[k]); dSdU=dSdU+ak*tmp;
|
||||
|
||||
VdagV.Mpc(MpvMfMpvPhi_k[k],Y); // V as we take Ydag
|
||||
VdagV.MpcDeriv (tmp,Y, MpvPhi_k[k]); dSdU=dSdU+ak*tmp;
|
||||
VdagV.MpcDagDeriv(tmp,MpvPhi_k[k], Y); dSdU=dSdU+ak*tmp;
|
||||
|
||||
}
|
||||
|
||||
//dSdU = Ta(dSdU);
|
||||
std::cout<<GridLogMessage << action_name() << " deriv: complete" << std::endl;
|
||||
};
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
#endif
|
@ -1,93 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/pseudofermion/GeneralEvenOddRationalRatioMixedPrec.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#ifndef QCD_PSEUDOFERMION_GENERAL_EVEN_ODD_RATIONAL_RATIO_MIXED_PREC_H
|
||||
#define QCD_PSEUDOFERMION_GENERAL_EVEN_ODD_RATIONAL_RATIO_MIXED_PREC_H
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Generic rational approximation for ratios of operators utilizing the mixed precision multishift algorithm
|
||||
// cf. GeneralEvenOddRational.h for details
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
template<class ImplD, class ImplF>
|
||||
class GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction : public GeneralEvenOddRatioRationalPseudoFermionAction<ImplD> {
|
||||
private:
|
||||
typedef typename ImplD::FermionField FermionFieldD;
|
||||
typedef typename ImplF::FermionField FermionFieldF;
|
||||
|
||||
FermionOperator<ImplD> & NumOpD;
|
||||
FermionOperator<ImplD> & DenOpD;
|
||||
|
||||
FermionOperator<ImplF> & NumOpF;
|
||||
FermionOperator<ImplF> & DenOpF;
|
||||
|
||||
Integer ReliableUpdateFreq;
|
||||
protected:
|
||||
|
||||
//Allow derived classes to override the multishift CG
|
||||
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, FermionFieldD &out){
|
||||
SchurDifferentiableOperator<ImplD> schurOpD(numerator ? NumOpD : DenOpD);
|
||||
SchurDifferentiableOperator<ImplF> schurOpF(numerator ? NumOpF : DenOpF);
|
||||
|
||||
ConjugateGradientMultiShiftMixedPrec<FermionFieldD, FermionFieldF> msCG(MaxIter, approx, NumOpF.FermionRedBlackGrid(), schurOpF, ReliableUpdateFreq);
|
||||
msCG(schurOpD, in, out);
|
||||
}
|
||||
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, std::vector<FermionFieldD> &out_elems, FermionFieldD &out){
|
||||
SchurDifferentiableOperator<ImplD> schurOpD(numerator ? NumOpD : DenOpD);
|
||||
SchurDifferentiableOperator<ImplF> schurOpF(numerator ? NumOpF : DenOpF);
|
||||
|
||||
ConjugateGradientMultiShiftMixedPrec<FermionFieldD, FermionFieldF> msCG(MaxIter, approx, NumOpF.FermionRedBlackGrid(), schurOpF, ReliableUpdateFreq);
|
||||
msCG(schurOpD, in, out_elems, out);
|
||||
}
|
||||
//Allow derived classes to override the gauge import
|
||||
virtual void ImportGauge(const typename ImplD::GaugeField &Ud){
|
||||
typename ImplF::GaugeField Uf(NumOpF.GaugeGrid());
|
||||
precisionChange(Uf, Ud);
|
||||
|
||||
NumOpD.ImportGauge(Ud);
|
||||
DenOpD.ImportGauge(Ud);
|
||||
|
||||
NumOpF.ImportGauge(Uf);
|
||||
DenOpF.ImportGauge(Uf);
|
||||
}
|
||||
|
||||
public:
|
||||
GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction(FermionOperator<ImplD> &_NumOpD, FermionOperator<ImplD> &_DenOpD,
|
||||
FermionOperator<ImplF> &_NumOpF, FermionOperator<ImplF> &_DenOpF,
|
||||
const RationalActionParams & p, Integer _ReliableUpdateFreq
|
||||
) : GeneralEvenOddRatioRationalPseudoFermionAction<ImplD>(_NumOpD, _DenOpD, p),
|
||||
ReliableUpdateFreq(_ReliableUpdateFreq), NumOpD(_NumOpD), DenOpD(_DenOpD), NumOpF(_NumOpF), DenOpF(_DenOpF){}
|
||||
|
||||
virtual std::string action_name(){return "GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction";}
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
#endif
|
@ -40,31 +40,257 @@ NAMESPACE_BEGIN(Grid);
|
||||
// Here N/D \sim R_{-1/2} ~ (M^dagM)^{-1/2}
|
||||
|
||||
template<class Impl>
|
||||
class OneFlavourEvenOddRatioRationalPseudoFermionAction : public GeneralEvenOddRatioRationalPseudoFermionAction<Impl> {
|
||||
class OneFlavourEvenOddRatioRationalPseudoFermionAction : public Action<typename Impl::GaugeField> {
|
||||
public:
|
||||
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
|
||||
typedef OneFlavourRationalParams Params;
|
||||
Params param;
|
||||
|
||||
MultiShiftFunction PowerHalf ;
|
||||
MultiShiftFunction PowerNegHalf;
|
||||
MultiShiftFunction PowerQuarter;
|
||||
MultiShiftFunction PowerNegQuarter;
|
||||
|
||||
private:
|
||||
static RationalActionParams transcribe(const Params &in){
|
||||
RationalActionParams out;
|
||||
out.inv_pow = 2;
|
||||
out.lo = in.lo;
|
||||
out.hi = in.hi;
|
||||
out.MaxIter = in.MaxIter;
|
||||
out.action_tolerance = out.md_tolerance = in.tolerance;
|
||||
out.action_degree = out.md_degree = in.degree;
|
||||
out.precision = in.precision;
|
||||
out.BoundsCheckFreq = in.BoundsCheckFreq;
|
||||
return out;
|
||||
|
||||
FermionOperator<Impl> & NumOp;// the basic operator
|
||||
FermionOperator<Impl> & DenOp;// the basic operator
|
||||
FermionField PhiEven; // the pseudo fermion field for this trajectory
|
||||
FermionField PhiOdd; // the pseudo fermion field for this trajectory
|
||||
FermionField Noise; // spare noise field for bounds check
|
||||
|
||||
public:
|
||||
|
||||
OneFlavourEvenOddRatioRationalPseudoFermionAction(FermionOperator<Impl> &_NumOp,
|
||||
FermionOperator<Impl> &_DenOp,
|
||||
Params & p
|
||||
) :
|
||||
NumOp(_NumOp),
|
||||
DenOp(_DenOp),
|
||||
PhiOdd (_NumOp.FermionRedBlackGrid()),
|
||||
PhiEven(_NumOp.FermionRedBlackGrid()),
|
||||
Noise(_NumOp.FermionRedBlackGrid()),
|
||||
param(p)
|
||||
{
|
||||
AlgRemez remez(param.lo,param.hi,param.precision);
|
||||
|
||||
// MdagM^(+- 1/2)
|
||||
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/2)"<<std::endl;
|
||||
remez.generateApprox(param.degree,1,2);
|
||||
PowerHalf.Init(remez,param.tolerance,false);
|
||||
PowerNegHalf.Init(remez,param.tolerance,true);
|
||||
|
||||
// MdagM^(+- 1/4)
|
||||
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/4)"<<std::endl;
|
||||
remez.generateApprox(param.degree,1,4);
|
||||
PowerQuarter.Init(remez,param.tolerance,false);
|
||||
PowerNegQuarter.Init(remez,param.tolerance,true);
|
||||
};
|
||||
|
||||
virtual std::string action_name(){
|
||||
std::stringstream sstream;
|
||||
sstream<< "OneFlavourEvenOddRatioRationalPseudoFermionAction det("<< DenOp.Mass() << ") / det("<<NumOp.Mass()<<")";
|
||||
return sstream.str();
|
||||
}
|
||||
|
||||
public:
|
||||
OneFlavourEvenOddRatioRationalPseudoFermionAction(FermionOperator<Impl> &_NumOp,
|
||||
FermionOperator<Impl> &_DenOp,
|
||||
const Params & p
|
||||
) :
|
||||
GeneralEvenOddRatioRationalPseudoFermionAction<Impl>(_NumOp, _DenOp, transcribe(p)){}
|
||||
virtual std::string LogParameters(){
|
||||
std::stringstream sstream;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Low :" << param.lo << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] High :" << param.hi << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Max iterations :" << param.MaxIter << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance :" << param.tolerance << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Degree :" << param.degree << std::endl;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] Precision :" << param.precision << std::endl;
|
||||
return sstream.str();
|
||||
}
|
||||
|
||||
|
||||
virtual void refresh(const GaugeField &U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) {
|
||||
|
||||
virtual std::string action_name(){return "OneFlavourEvenOddRatioRationalPseudoFermionAction";}
|
||||
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
|
||||
//
|
||||
// P(phi) = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/2 (VdagV)^1/4 phi}
|
||||
// = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/4 (MdagM)^-1/4 (VdagV)^1/4 phi}
|
||||
//
|
||||
// Phi = (VdagV)^-1/4 Mdag^{1/4} eta
|
||||
//
|
||||
// P(eta) = e^{- eta^dag eta}
|
||||
//
|
||||
// e^{x^2/2 sig^2} => sig^2 = 0.5.
|
||||
//
|
||||
// So eta should be of width sig = 1/sqrt(2).
|
||||
|
||||
RealD scale = std::sqrt(0.5);
|
||||
|
||||
FermionField eta(NumOp.FermionGrid());
|
||||
FermionField etaOdd (NumOp.FermionRedBlackGrid());
|
||||
FermionField etaEven(NumOp.FermionRedBlackGrid());
|
||||
FermionField tmp(NumOp.FermionRedBlackGrid());
|
||||
|
||||
gaussian(pRNG,eta); eta=eta*scale;
|
||||
|
||||
pickCheckerboard(Even,etaEven,eta);
|
||||
pickCheckerboard(Odd,etaOdd,eta);
|
||||
|
||||
Noise = etaOdd;
|
||||
NumOp.ImportGauge(U);
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
|
||||
// MdagM^1/4 eta
|
||||
SchurDifferentiableOperator<Impl> MdagM(DenOp);
|
||||
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerQuarter);
|
||||
msCG_M(MdagM,etaOdd,tmp);
|
||||
|
||||
// VdagV^-1/4 MdagM^1/4 eta
|
||||
SchurDifferentiableOperator<Impl> VdagV(NumOp);
|
||||
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerNegQuarter);
|
||||
msCG_V(VdagV,tmp,PhiOdd);
|
||||
|
||||
assert(NumOp.ConstEE() == 1);
|
||||
assert(DenOp.ConstEE() == 1);
|
||||
PhiEven = Zero();
|
||||
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
|
||||
//////////////////////////////////////////////////////
|
||||
virtual RealD S(const GaugeField &U) {
|
||||
|
||||
NumOp.ImportGauge(U);
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
FermionField X(NumOp.FermionRedBlackGrid());
|
||||
FermionField Y(NumOp.FermionRedBlackGrid());
|
||||
|
||||
// VdagV^1/4 Phi
|
||||
SchurDifferentiableOperator<Impl> VdagV(NumOp);
|
||||
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
|
||||
msCG_V(VdagV,PhiOdd,X);
|
||||
|
||||
// MdagM^-1/4 VdagV^1/4 Phi
|
||||
SchurDifferentiableOperator<Impl> MdagM(DenOp);
|
||||
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegQuarter);
|
||||
msCG_M(MdagM,X,Y);
|
||||
|
||||
// Randomly apply rational bounds checks.
|
||||
auto grid = NumOp.FermionGrid();
|
||||
auto r=rand();
|
||||
grid->Broadcast(0,r);
|
||||
if ( (r%param.BoundsCheckFreq)==0 ) {
|
||||
FermionField gauss(NumOp.FermionRedBlackGrid());
|
||||
gauss = Noise;
|
||||
HighBoundCheck(MdagM,gauss,param.hi);
|
||||
InverseSqrtBoundsCheck(param.MaxIter,param.tolerance*100,MdagM,gauss,PowerNegHalf);
|
||||
ChebyBoundsCheck(MdagM,Noise,param.lo,param.hi);
|
||||
}
|
||||
|
||||
// Phidag VdagV^1/4 MdagM^-1/4 MdagM^-1/4 VdagV^1/4 Phi
|
||||
RealD action = norm2(Y);
|
||||
|
||||
return action;
|
||||
};
|
||||
|
||||
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
|
||||
//
|
||||
// Here, M is some 5D operator and V is the Pauli-Villars field
|
||||
// N and D makeup the rat. poly of the M term and P and & makeup the rat.poly of the denom term
|
||||
//
|
||||
// Need
|
||||
// dS_f/dU = chi^dag d[P/Q] N/D P/Q chi
|
||||
// + chi^dag P/Q d[N/D] P/Q chi
|
||||
// + chi^dag P/Q N/D d[P/Q] chi
|
||||
//
|
||||
// P/Q is expressed as partial fraction expansion:
|
||||
//
|
||||
// a0 + \sum_k ak/(V^dagV + bk)
|
||||
//
|
||||
// d[P/Q] is then
|
||||
//
|
||||
// \sum_k -ak [V^dagV+bk]^{-1} [ dV^dag V + V^dag dV ] [V^dag V + bk]^{-1}
|
||||
//
|
||||
// and similar for N/D.
|
||||
//
|
||||
// Need
|
||||
// MpvPhi_k = [Vdag V + bk]^{-1} chi
|
||||
// MpvPhi = {a0 + \sum_k ak [Vdag V + bk]^{-1} }chi
|
||||
//
|
||||
// MfMpvPhi_k = [MdagM+bk]^{-1} MpvPhi
|
||||
// MfMpvPhi = {a0 + \sum_k ak [Mdag M + bk]^{-1} } MpvPhi
|
||||
//
|
||||
// MpvMfMpvPhi_k = [Vdag V + bk]^{-1} MfMpvchi
|
||||
//
|
||||
|
||||
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
|
||||
|
||||
const int n_f = PowerNegHalf.poles.size();
|
||||
const int n_pv = PowerQuarter.poles.size();
|
||||
|
||||
std::vector<FermionField> MpvPhi_k (n_pv,NumOp.FermionRedBlackGrid());
|
||||
std::vector<FermionField> MpvMfMpvPhi_k(n_pv,NumOp.FermionRedBlackGrid());
|
||||
std::vector<FermionField> MfMpvPhi_k (n_f ,NumOp.FermionRedBlackGrid());
|
||||
|
||||
FermionField MpvPhi(NumOp.FermionRedBlackGrid());
|
||||
FermionField MfMpvPhi(NumOp.FermionRedBlackGrid());
|
||||
FermionField MpvMfMpvPhi(NumOp.FermionRedBlackGrid());
|
||||
FermionField Y(NumOp.FermionRedBlackGrid());
|
||||
|
||||
GaugeField tmp(NumOp.GaugeGrid());
|
||||
|
||||
NumOp.ImportGauge(U);
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
SchurDifferentiableOperator<Impl> VdagV(NumOp);
|
||||
SchurDifferentiableOperator<Impl> MdagM(DenOp);
|
||||
|
||||
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
|
||||
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegHalf);
|
||||
|
||||
msCG_V(VdagV,PhiOdd,MpvPhi_k,MpvPhi);
|
||||
msCG_M(MdagM,MpvPhi,MfMpvPhi_k,MfMpvPhi);
|
||||
msCG_V(VdagV,MfMpvPhi,MpvMfMpvPhi_k,MpvMfMpvPhi);
|
||||
|
||||
RealD ak;
|
||||
|
||||
dSdU = Zero();
|
||||
|
||||
// With these building blocks
|
||||
//
|
||||
// dS/dU =
|
||||
// \sum_k -ak MfMpvPhi_k^dag [ dM^dag M + M^dag dM ] MfMpvPhi_k (1)
|
||||
// + \sum_k -ak MpvMfMpvPhi_k^\dag [ dV^dag V + V^dag dV ] MpvPhi_k (2)
|
||||
// -ak MpvPhi_k^dag [ dV^dag V + V^dag dV ] MpvMfMpvPhi_k (3)
|
||||
|
||||
//(1)
|
||||
for(int k=0;k<n_f;k++){
|
||||
ak = PowerNegHalf.residues[k];
|
||||
MdagM.Mpc(MfMpvPhi_k[k],Y);
|
||||
MdagM.MpcDagDeriv(tmp , MfMpvPhi_k[k], Y ); dSdU=dSdU+ak*tmp;
|
||||
MdagM.MpcDeriv(tmp , Y, MfMpvPhi_k[k] ); dSdU=dSdU+ak*tmp;
|
||||
}
|
||||
|
||||
//(2)
|
||||
//(3)
|
||||
for(int k=0;k<n_pv;k++){
|
||||
|
||||
ak = PowerQuarter.residues[k];
|
||||
|
||||
VdagV.Mpc(MpvPhi_k[k],Y);
|
||||
VdagV.MpcDagDeriv(tmp,MpvMfMpvPhi_k[k],Y); dSdU=dSdU+ak*tmp;
|
||||
VdagV.MpcDeriv (tmp,Y,MpvMfMpvPhi_k[k]); dSdU=dSdU+ak*tmp;
|
||||
|
||||
VdagV.Mpc(MpvMfMpvPhi_k[k],Y); // V as we take Ydag
|
||||
VdagV.MpcDeriv (tmp,Y, MpvPhi_k[k]); dSdU=dSdU+ak*tmp;
|
||||
VdagV.MpcDagDeriv(tmp,MpvPhi_k[k], Y); dSdU=dSdU+ak*tmp;
|
||||
|
||||
}
|
||||
|
||||
//dSdU = Ta(dSdU);
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -49,10 +49,12 @@ NAMESPACE_BEGIN(Grid);
|
||||
Params param;
|
||||
|
||||
MultiShiftFunction PowerHalf ;
|
||||
MultiShiftFunction PowerNegHalf;
|
||||
MultiShiftFunction PowerQuarter;
|
||||
MultiShiftFunction PowerNegHalf;
|
||||
MultiShiftFunction PowerNegQuarter;
|
||||
|
||||
MultiShiftFunction MDPowerQuarter;
|
||||
MultiShiftFunction MDPowerNegHalf;
|
||||
private:
|
||||
|
||||
FermionOperator<Impl> & NumOp;// the basic operator
|
||||
@ -79,6 +81,10 @@ NAMESPACE_BEGIN(Grid);
|
||||
remez.generateApprox(param.degree,1,4);
|
||||
PowerQuarter.Init(remez,param.tolerance,false);
|
||||
PowerNegQuarter.Init(remez,param.tolerance,true);
|
||||
|
||||
// Derive solves different tol
|
||||
MDPowerQuarter.Init(remez,param.mdtolerance,false);
|
||||
MDPowerNegHalf.Init(remez,param.mdtolerance,true);
|
||||
};
|
||||
|
||||
virtual std::string action_name(){return "OneFlavourRatioRationalPseudoFermionAction";}
|
||||
@ -204,8 +210,8 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
|
||||
|
||||
const int n_f = PowerNegHalf.poles.size();
|
||||
const int n_pv = PowerQuarter.poles.size();
|
||||
const int n_f = MDPowerNegHalf.poles.size();
|
||||
const int n_pv = MDPowerQuarter.poles.size();
|
||||
|
||||
std::vector<FermionField> MpvPhi_k (n_pv,NumOp.FermionGrid());
|
||||
std::vector<FermionField> MpvMfMpvPhi_k(n_pv,NumOp.FermionGrid());
|
||||
@ -224,8 +230,8 @@ NAMESPACE_BEGIN(Grid);
|
||||
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagM(DenOp);
|
||||
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> VdagV(NumOp);
|
||||
|
||||
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
|
||||
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegHalf);
|
||||
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,MDPowerQuarter);
|
||||
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,MDPowerNegHalf);
|
||||
|
||||
msCG_V(VdagV,Phi,MpvPhi_k,MpvPhi);
|
||||
msCG_M(MdagM,MpvPhi,MfMpvPhi_k,MfMpvPhi);
|
||||
@ -244,7 +250,7 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
//(1)
|
||||
for(int k=0;k<n_f;k++){
|
||||
ak = PowerNegHalf.residues[k];
|
||||
ak = MDPowerNegHalf.residues[k];
|
||||
DenOp.M(MfMpvPhi_k[k],Y);
|
||||
DenOp.MDeriv(tmp , MfMpvPhi_k[k], Y,DaggerYes ); dSdU=dSdU+ak*tmp;
|
||||
DenOp.MDeriv(tmp , Y, MfMpvPhi_k[k], DaggerNo ); dSdU=dSdU+ak*tmp;
|
||||
@ -254,7 +260,7 @@ NAMESPACE_BEGIN(Grid);
|
||||
//(3)
|
||||
for(int k=0;k<n_pv;k++){
|
||||
|
||||
ak = PowerQuarter.residues[k];
|
||||
ak = MDPowerQuarter.residues[k];
|
||||
|
||||
NumOp.M(MpvPhi_k[k],Y);
|
||||
NumOp.MDeriv(tmp,MpvMfMpvPhi_k[k],Y,DaggerYes); dSdU=dSdU+ak*tmp;
|
||||
|
@ -40,8 +40,6 @@ directory
|
||||
#include <Grid/qcd/action/pseudofermion/OneFlavourRational.h>
|
||||
#include <Grid/qcd/action/pseudofermion/OneFlavourRationalRatio.h>
|
||||
#include <Grid/qcd/action/pseudofermion/OneFlavourEvenOddRational.h>
|
||||
#include <Grid/qcd/action/pseudofermion/GeneralEvenOddRationalRatio.h>
|
||||
#include <Grid/qcd/action/pseudofermion/GeneralEvenOddRationalRatioMixedPrec.h>
|
||||
#include <Grid/qcd/action/pseudofermion/OneFlavourEvenOddRationalRatio.h>
|
||||
#include <Grid/qcd/action/pseudofermion/ExactOneFlavourRatio.h>
|
||||
|
||||
|
@ -75,18 +75,28 @@ NAMESPACE_BEGIN(Grid);
|
||||
conformable(_NumOp.GaugeRedBlackGrid(), _DenOp.GaugeRedBlackGrid());
|
||||
};
|
||||
|
||||
virtual std::string action_name(){return "TwoFlavourEvenOddRatioPseudoFermionAction";}
|
||||
virtual std::string action_name(){
|
||||
std::stringstream sstream;
|
||||
sstream<<"TwoFlavourEvenOddRatioPseudoFermionAction det("<<DenOp.Mass()<<") / det("<<NumOp.Mass()<<")";
|
||||
return sstream.str();
|
||||
}
|
||||
|
||||
virtual std::string LogParameters(){
|
||||
std::stringstream sstream;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] has no parameters" << std::endl;
|
||||
sstream<< GridLogMessage << "["<<action_name()<<"] -- No further parameters "<<std::endl;
|
||||
return sstream.str();
|
||||
}
|
||||
|
||||
//Access the fermion field
|
||||
const FermionField &getPhiOdd() const{ return PhiOdd; }
|
||||
|
||||
|
||||
virtual void refresh(const GaugeField &U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) {
|
||||
|
||||
// P(phi) = e^{- phi^dag Vpc (MpcdagMpc)^-1 Vpcdag phi}
|
||||
//
|
||||
// NumOp == V
|
||||
// DenOp == M
|
||||
//
|
||||
// Take phi_o = Vpcdag^{-1} Mpcdag eta_o ; eta_o = Mpcdag^{-1} Vpcdag Phi
|
||||
//
|
||||
// P(eta_o) = e^{- eta_o^dag eta_o}
|
||||
//
|
||||
// e^{x^2/2 sig^2} => sig^2 = 0.5.
|
||||
@ -94,22 +104,12 @@ NAMESPACE_BEGIN(Grid);
|
||||
RealD scale = std::sqrt(0.5);
|
||||
|
||||
FermionField eta (NumOp.FermionGrid());
|
||||
gaussian(pRNG,eta); eta = eta * scale;
|
||||
|
||||
refresh(U,eta);
|
||||
}
|
||||
|
||||
void refresh(const GaugeField &U, const FermionField &eta) {
|
||||
// P(phi) = e^{- phi^dag Vpc (MpcdagMpc)^-1 Vpcdag phi}
|
||||
//
|
||||
// NumOp == V
|
||||
// DenOp == M
|
||||
//
|
||||
// Take phi_o = Vpcdag^{-1} Mpcdag eta_o ; eta_o = Mpcdag^{-1} Vpcdag Phi
|
||||
FermionField etaOdd (NumOp.FermionRedBlackGrid());
|
||||
FermionField etaEven(NumOp.FermionRedBlackGrid());
|
||||
FermionField tmp (NumOp.FermionRedBlackGrid());
|
||||
|
||||
gaussian(pRNG,eta);
|
||||
|
||||
pickCheckerboard(Even,etaEven,eta);
|
||||
pickCheckerboard(Odd,etaOdd,eta);
|
||||
|
||||
@ -129,8 +129,8 @@ NAMESPACE_BEGIN(Grid);
|
||||
DenOp.MooeeDag(etaEven,tmp);
|
||||
NumOp.MooeeInvDag(tmp,PhiEven);
|
||||
|
||||
//PhiOdd =PhiOdd*scale;
|
||||
//PhiEven=PhiEven*scale;
|
||||
PhiOdd =PhiOdd*scale;
|
||||
PhiEven=PhiEven*scale;
|
||||
|
||||
};
|
||||
|
||||
|
203
Grid/qcd/action/pseudofermion/TwoFlavourRatioEO4DPseudoFermion.h
Normal file
203
Grid/qcd/action/pseudofermion/TwoFlavourRatioEO4DPseudoFermion.h
Normal file
@ -0,0 +1,203 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/pseudofermion/TwoFlavourRatio.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
///////////////////////////////////////
|
||||
// Two flavour ratio
|
||||
///////////////////////////////////////
|
||||
template<class Impl>
|
||||
class TwoFlavourRatioEO4DPseudoFermionAction : public Action<typename Impl::GaugeField> {
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
|
||||
private:
|
||||
typedef FermionOperator<Impl> FermOp;
|
||||
FermionOperator<Impl> & NumOp;// the basic operator
|
||||
FermionOperator<Impl> & DenOp;// the basic operator
|
||||
|
||||
OperatorFunction<FermionField> &DerivativeSolver;
|
||||
OperatorFunction<FermionField> &DerivativeDagSolver;
|
||||
OperatorFunction<FermionField> &ActionSolver;
|
||||
OperatorFunction<FermionField> &HeatbathSolver;
|
||||
|
||||
FermionField phi4; // the pseudo fermion field for this trajectory
|
||||
|
||||
public:
|
||||
TwoFlavourRatioEO4DPseudoFermionAction(FermionOperator<Impl> &_NumOp,
|
||||
FermionOperator<Impl> &_DenOp,
|
||||
OperatorFunction<FermionField> & DS,
|
||||
OperatorFunction<FermionField> & AS ) :
|
||||
TwoFlavourRatioEO4DPseudoFermionAction(_NumOp,_DenOp, DS,DS,AS,AS) {};
|
||||
TwoFlavourRatioEO4DPseudoFermionAction(FermionOperator<Impl> &_NumOp,
|
||||
FermionOperator<Impl> &_DenOp,
|
||||
OperatorFunction<FermionField> & DS,
|
||||
OperatorFunction<FermionField> & DDS,
|
||||
OperatorFunction<FermionField> & AS,
|
||||
OperatorFunction<FermionField> & HS
|
||||
) : NumOp(_NumOp),
|
||||
DenOp(_DenOp),
|
||||
DerivativeSolver(DS),
|
||||
DerivativeDagSolver(DDS),
|
||||
ActionSolver(AS),
|
||||
HeatbathSolver(HS),
|
||||
phi4(_NumOp.GaugeGrid())
|
||||
{};
|
||||
|
||||
virtual std::string action_name(){return "TwoFlavourRatioEO4DPseudoFermionAction";}
|
||||
|
||||
virtual std::string LogParameters(){
|
||||
std::stringstream sstream;
|
||||
sstream << GridLogMessage << "["<<action_name()<<"] has no parameters" << std::endl;
|
||||
return sstream.str();
|
||||
}
|
||||
|
||||
virtual void refresh(const GaugeField &U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) {
|
||||
|
||||
// P(phi) = e^{- phi^dag (V^dag M^-dag)_11 (M^-1 V)_11 phi}
|
||||
//
|
||||
// NumOp == V
|
||||
// DenOp == M
|
||||
//
|
||||
// Take phi = (V^{-1} M)_11 eta ; eta = (M^{-1} V)_11 Phi
|
||||
//
|
||||
// P(eta) = e^{- eta^dag eta}
|
||||
//
|
||||
// e^{x^2/2 sig^2} => sig^2 = 0.5.
|
||||
//
|
||||
// So eta should be of width sig = 1/sqrt(2) and must multiply by 0.707....
|
||||
//
|
||||
RealD scale = std::sqrt(0.5);
|
||||
|
||||
FermionField eta4(NumOp.GaugeGrid());
|
||||
FermionField eta5(NumOp.FermionGrid());
|
||||
FermionField tmp(NumOp.FermionGrid());
|
||||
FermionField phi5(NumOp.FermionGrid());
|
||||
|
||||
gaussian(pRNG,eta4);
|
||||
NumOp.ImportFourDimPseudoFermion(eta4,eta5);
|
||||
NumOp.ImportGauge(U);
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
SchurRedBlackDiagMooeeSolve<FermionField> PrecSolve(HeatbathSolver);
|
||||
|
||||
DenOp.M(eta5,tmp); // M eta
|
||||
PrecSolve(NumOp,tmp,phi5); // phi = V^-1 M eta
|
||||
phi5=phi5*scale;
|
||||
std::cout << GridLogMessage << "4d pf refresh "<< norm2(phi5)<<"\n";
|
||||
// Project to 4d
|
||||
NumOp.ExportFourDimPseudoFermion(phi5,phi4);
|
||||
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// S = phi^dag (V^dag M^-dag)_11 (M^-1 V)_11 phi
|
||||
//////////////////////////////////////////////////////
|
||||
virtual RealD S(const GaugeField &U) {
|
||||
|
||||
NumOp.ImportGauge(U);
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
FermionField Y4(NumOp.GaugeGrid());
|
||||
FermionField X(NumOp.FermionGrid());
|
||||
FermionField Y(NumOp.FermionGrid());
|
||||
FermionField phi5(NumOp.FermionGrid());
|
||||
|
||||
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagMOp(DenOp);
|
||||
SchurRedBlackDiagMooeeSolve<FermionField> PrecSolve(ActionSolver);
|
||||
|
||||
NumOp.ImportFourDimPseudoFermion(phi4,phi5);
|
||||
NumOp.M(phi5,X); // X= V phi
|
||||
PrecSolve(DenOp,X,Y); // Y= (MdagM)^-1 Mdag Vdag phi = M^-1 V phi
|
||||
NumOp.ExportFourDimPseudoFermion(Y,Y4);
|
||||
|
||||
RealD action = norm2(Y4);
|
||||
|
||||
return action;
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// dS/du = 2 Re phi^dag (V^dag M^-dag)_11 (M^-1 d V)_11 phi
|
||||
// - 2 Re phi^dag (dV^dag M^-dag)_11 (M^-1 dM M^-1 V)_11 phi
|
||||
//////////////////////////////////////////////////////
|
||||
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
|
||||
|
||||
NumOp.ImportGauge(U);
|
||||
DenOp.ImportGauge(U);
|
||||
|
||||
FermionField X(NumOp.FermionGrid());
|
||||
FermionField Y(NumOp.FermionGrid());
|
||||
FermionField phi(NumOp.FermionGrid());
|
||||
FermionField Vphi(NumOp.FermionGrid());
|
||||
FermionField MinvVphi(NumOp.FermionGrid());
|
||||
FermionField tmp4(NumOp.GaugeGrid());
|
||||
FermionField MdagInvMinvVphi(NumOp.FermionGrid());
|
||||
|
||||
GaugeField force(NumOp.GaugeGrid());
|
||||
|
||||
//Y=V phi
|
||||
//X = (Mdag V phi
|
||||
//Y = (Mdag M)^-1 Mdag V phi = M^-1 V Phi
|
||||
NumOp.ImportFourDimPseudoFermion(phi4,phi);
|
||||
NumOp.M(phi,Vphi); // V phi
|
||||
SchurRedBlackDiagMooeeSolve<FermionField> PrecSolve(DerivativeSolver);
|
||||
PrecSolve(DenOp,Vphi,MinvVphi);// M^-1 V phi
|
||||
std::cout << GridLogMessage << "4d deriv solve "<< norm2(MinvVphi)<<"\n";
|
||||
|
||||
// Projects onto the physical space and back
|
||||
NumOp.ExportFourDimPseudoFermion(MinvVphi,tmp4);
|
||||
NumOp.ImportFourDimPseudoFermion(tmp4,Y);
|
||||
|
||||
SchurRedBlackDiagMooeeDagSolve<FermionField> PrecDagSolve(DerivativeDagSolver);
|
||||
// X = proj M^-dag V phi
|
||||
// Need an adjoint solve
|
||||
PrecDagSolve(DenOp,Y,MdagInvMinvVphi);
|
||||
std::cout << GridLogMessage << "4d deriv solve dag "<< norm2(MdagInvMinvVphi)<<"\n";
|
||||
|
||||
// phi^dag (Vdag Mdag^-1) (M^-1 dV) phi
|
||||
NumOp.MDeriv(force ,MdagInvMinvVphi , phi, DaggerNo ); dSdU=force;
|
||||
|
||||
// phi^dag (dVdag Mdag^-1) (M^-1 V) phi
|
||||
NumOp.MDeriv(force , phi, MdagInvMinvVphi ,DaggerYes ); dSdU=dSdU+force;
|
||||
|
||||
// - 2 Re phi^dag (dV^dag M^-dag)_11 (M^-1 dM M^-1 V)_11 phi
|
||||
DenOp.MDeriv(force,MdagInvMinvVphi,MinvVphi,DaggerNo); dSdU=dSdU-force;
|
||||
DenOp.MDeriv(force,MinvVphi,MdagInvMinvVphi,DaggerYes); dSdU=dSdU-force;
|
||||
|
||||
dSdU *= -1.0;
|
||||
//dSdU = - Ta(dSdU);
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -151,22 +151,12 @@ public:
|
||||
Resources.GetCheckPointer()->CheckpointRestore(Parameters.StartTrajectory, U,
|
||||
Resources.GetSerialRNG(),
|
||||
Resources.GetParallelRNG());
|
||||
} else if (Parameters.StartingType == "CheckpointStartReseed") {
|
||||
// Same as CheckpointRestart but reseed the RNGs using the fixed integer seeding used for ColdStart and HotStart
|
||||
// Useful for creating new evolution streams from an existing stream
|
||||
|
||||
// WARNING: Unfortunately because the checkpointer doesn't presently allow us to separately restore the RNG and gauge fields we have to load
|
||||
// an existing RNG checkpoint first; make sure one is available and named correctly
|
||||
Resources.GetCheckPointer()->CheckpointRestore(Parameters.StartTrajectory, U,
|
||||
Resources.GetSerialRNG(),
|
||||
Resources.GetParallelRNG());
|
||||
Resources.SeedFixedIntegers();
|
||||
} else {
|
||||
// others
|
||||
std::cout << GridLogError << "Unrecognized StartingType\n";
|
||||
std::cout
|
||||
<< GridLogError
|
||||
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart, CheckpointStartReseed]\n";
|
||||
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@ -186,6 +176,9 @@ private:
|
||||
typedef IntegratorType<SmearingPolicy> TheIntegrator;
|
||||
TheIntegrator MDynamics(UGrid, Parameters.MD, TheAction, Smearing);
|
||||
|
||||
// Sets the momentum filter
|
||||
MDynamics.setMomentumFilter(*(Resources.GetMomentumFilter()));
|
||||
|
||||
Smearing.set_Field(U);
|
||||
|
||||
HybridMonteCarlo<TheIntegrator> HMC(Parameters, MDynamics,
|
||||
|
@ -34,6 +34,7 @@ directory
|
||||
* @brief Classes for Hybrid Monte Carlo update
|
||||
*
|
||||
* @author Guido Cossu
|
||||
* @author Peter Boyle
|
||||
*/
|
||||
//--------------------------------------------------------------------
|
||||
#pragma once
|
||||
@ -115,22 +116,17 @@ private:
|
||||
|
||||
random(sRNG, rn_test);
|
||||
|
||||
std::cout << GridLogHMC
|
||||
<< "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "exp(-dH) = " << prob
|
||||
<< " Random = " << rn_test << "\n";
|
||||
std::cout << GridLogHMC
|
||||
<< "Acc. Probability = " << ((prob < 1.0) ? prob : 1.0) << "\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "exp(-dH) = " << prob << " Random = " << rn_test << "\n";
|
||||
std::cout << GridLogHMC << "Acc. Probability = " << ((prob < 1.0) ? prob : 1.0) << "\n";
|
||||
|
||||
if ((prob > 1.0) || (rn_test <= prob)) { // accepted
|
||||
std::cout << GridLogHMC << "Metropolis_test -- ACCEPTED\n";
|
||||
std::cout << GridLogHMC
|
||||
<< "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
return true;
|
||||
} else { // rejected
|
||||
std::cout << GridLogHMC << "Metropolis_test -- REJECTED\n";
|
||||
std::cout << GridLogHMC
|
||||
<< "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -139,19 +135,68 @@ private:
|
||||
// Evolution
|
||||
/////////////////////////////////////////////////////////
|
||||
RealD evolve_hmc_step(Field &U) {
|
||||
TheIntegrator.refresh(U, sRNG, pRNG); // set U and initialize P and phi's
|
||||
|
||||
RealD H0 = TheIntegrator.S(U); // initial state action
|
||||
GridBase *Grid = U.Grid();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Mainly for DDHMC perform a random translation of U modulo volume
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Random shifting gauge field by [";
|
||||
for(int d=0;d<Grid->Nd();d++) {
|
||||
|
||||
int L = Grid->GlobalDimensions()[d];
|
||||
|
||||
RealD rn_uniform; random(sRNG, rn_uniform);
|
||||
|
||||
int shift = (int) (rn_uniform*L);
|
||||
|
||||
std::cout << shift;
|
||||
if(d<Grid->Nd()-1) std::cout <<",";
|
||||
else std::cout <<"]\n";
|
||||
|
||||
U = Cshift(U,d,shift);
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
TheIntegrator.reset_timer();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// set U and initialize P and phi's
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Refresh momenta and pseudofermions";
|
||||
TheIntegrator.refresh(U, sRNG, pRNG);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// initial state action
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Compute initial action";
|
||||
RealD H0 = TheIntegrator.S(U);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
std::streamsize current_precision = std::cout.precision();
|
||||
std::cout.precision(15);
|
||||
std::cout << GridLogHMC << "Total H before trajectory = " << H0 << "\n";
|
||||
std::cout.precision(current_precision);
|
||||
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << " Molecular Dynamics evolution ";
|
||||
TheIntegrator.integrate(U);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
RealD H1 = TheIntegrator.S(U); // updated state action
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// updated state action
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Compute final action";
|
||||
RealD H1 = TheIntegrator.S(U);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
if(0){
|
||||
std::cout << "------------------------- Reversibility test" << std::endl;
|
||||
@ -163,17 +208,16 @@ private:
|
||||
}
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
std::cout.precision(15);
|
||||
std::cout << GridLogHMC << "Total H after trajectory = " << H1
|
||||
<< " dH = " << H1 - H0 << "\n";
|
||||
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "Total H after trajectory = " << H1 << " dH = " << H1 - H0 << "\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
|
||||
std::cout.precision(current_precision);
|
||||
|
||||
return (H1 - H0);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
public:
|
||||
/////////////////////////////////////////
|
||||
@ -195,8 +239,11 @@ public:
|
||||
|
||||
// Actual updates (evolve a copy Ucopy then copy back eventually)
|
||||
unsigned int FinalTrajectory = Params.Trajectories + Params.NoMetropolisUntil + Params.StartTrajectory;
|
||||
|
||||
for (int traj = Params.StartTrajectory; traj < FinalTrajectory; ++traj) {
|
||||
|
||||
std::cout << GridLogHMC << "-- # Trajectory = " << traj << "\n";
|
||||
|
||||
if (traj < Params.StartTrajectory + Params.NoMetropolisUntil) {
|
||||
std::cout << GridLogHMC << "-- Thermalization" << std::endl;
|
||||
}
|
||||
@ -216,11 +263,10 @@ public:
|
||||
if (accept)
|
||||
Ucur = Ucopy;
|
||||
|
||||
|
||||
|
||||
double t1=usecond();
|
||||
std::cout << GridLogHMC << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
|
||||
|
||||
TheIntegrator.print_timer();
|
||||
|
||||
for (int obs = 0; obs < Observables.size(); obs++) {
|
||||
std::cout << GridLogDebug << "Observables # " << obs << std::endl;
|
||||
|
@ -80,9 +80,7 @@ public:
|
||||
std::cout << GridLogError << "Seeds not initialized" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
std::cout << GridLogMessage << "Reseeding serial RNG with seed vector " << SerialSeeds << std::endl;
|
||||
sRNG_.SeedFixedIntegers(SerialSeeds);
|
||||
std::cout << GridLogMessage << "Reseeding parallel RNG with seed vector " << ParallelSeeds << std::endl;
|
||||
pRNG_->SeedFixedIntegers(ParallelSeeds);
|
||||
}
|
||||
};
|
||||
|
@ -72,6 +72,8 @@ class HMCResourceManager {
|
||||
typedef HMCModuleBase< BaseHmcCheckpointer<ImplementationPolicy> > CheckpointerBaseModule;
|
||||
typedef HMCModuleBase< HmcObservable<typename ImplementationPolicy::Field> > ObservableBaseModule;
|
||||
typedef ActionModuleBase< Action<typename ImplementationPolicy::Field>, GridModule > ActionBaseModule;
|
||||
typedef typename ImplementationPolicy::Field MomentaField;
|
||||
typedef typename ImplementationPolicy::Field Field;
|
||||
|
||||
// Named storage for grid pairs (std + red-black)
|
||||
std::unordered_map<std::string, GridModule> Grids;
|
||||
@ -80,6 +82,9 @@ class HMCResourceManager {
|
||||
// SmearingModule<ImplementationPolicy> Smearing;
|
||||
std::unique_ptr<CheckpointerBaseModule> CP;
|
||||
|
||||
// Momentum filter
|
||||
std::unique_ptr<MomentumFilterBase<typename ImplementationPolicy::Field> > Filter;
|
||||
|
||||
// A vector of HmcObservable modules
|
||||
std::vector<std::unique_ptr<ObservableBaseModule> > ObservablesList;
|
||||
|
||||
@ -90,6 +95,7 @@ class HMCResourceManager {
|
||||
|
||||
bool have_RNG;
|
||||
bool have_CheckPointer;
|
||||
bool have_Filter;
|
||||
|
||||
// NOTE: operator << is not overloaded for std::vector<string>
|
||||
// so this function is necessary
|
||||
@ -101,7 +107,7 @@ class HMCResourceManager {
|
||||
|
||||
|
||||
public:
|
||||
HMCResourceManager() : have_RNG(false), have_CheckPointer(false) {}
|
||||
HMCResourceManager() : have_RNG(false), have_CheckPointer(false), have_Filter(false) {}
|
||||
|
||||
template <class ReaderClass, class vector_type = vComplex >
|
||||
void initialize(ReaderClass &Read){
|
||||
@ -129,6 +135,7 @@ public:
|
||||
RNGModuleParameters RNGpar(Read);
|
||||
SetRNGSeeds(RNGpar);
|
||||
|
||||
|
||||
// Observables
|
||||
auto &ObsFactory = HMC_ObservablesModuleFactory<observable_string, typename ImplementationPolicy::Field, ReaderClass>::getInstance();
|
||||
Read.push(observable_string);// here must check if existing...
|
||||
@ -208,6 +215,16 @@ public:
|
||||
AddGrid(s, Mod);
|
||||
}
|
||||
|
||||
void SetMomentumFilter( MomentumFilterBase<typename ImplementationPolicy::Field> * MomFilter) {
|
||||
assert(have_Filter==false);
|
||||
Filter = std::unique_ptr<MomentumFilterBase<typename ImplementationPolicy::Field> >(MomFilter);
|
||||
have_Filter = true;
|
||||
}
|
||||
MomentumFilterBase<typename ImplementationPolicy::Field> *GetMomentumFilter(void) {
|
||||
if ( !have_Filter)
|
||||
SetMomentumFilter(new MomentumFilterNone<typename ImplementationPolicy::Field>());
|
||||
return Filter.get();
|
||||
}
|
||||
|
||||
GridCartesian* GetCartesian(std::string s = "") {
|
||||
if (s.empty()) s = Grids.begin()->first;
|
||||
|
@ -33,7 +33,6 @@ directory
|
||||
#define INTEGRATOR_INCLUDED
|
||||
|
||||
#include <memory>
|
||||
#include "MomentumFilter.h"
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
@ -67,6 +66,7 @@ public:
|
||||
template <class FieldImplementation, class SmearingPolicy, class RepresentationPolicy>
|
||||
class Integrator {
|
||||
protected:
|
||||
|
||||
typedef typename FieldImplementation::Field MomentaField; //for readability
|
||||
typedef typename FieldImplementation::Field Field;
|
||||
|
||||
@ -119,42 +119,58 @@ protected:
|
||||
}
|
||||
} update_P_hireps{};
|
||||
|
||||
|
||||
void update_P(MomentaField& Mom, Field& U, int level, double ep) {
|
||||
// input U actually not used in the fundamental case
|
||||
// Fundamental updates, include smearing
|
||||
|
||||
for (int a = 0; a < as[level].actions.size(); ++a) {
|
||||
|
||||
double start_full = usecond();
|
||||
Field force(U.Grid());
|
||||
conformable(U.Grid(), Mom.Grid());
|
||||
|
||||
Field& Us = Smearer.get_U(as[level].actions.at(a)->is_smeared);
|
||||
double start_force = usecond();
|
||||
as[level].actions.at(a)->deriv_timer_start();
|
||||
as[level].actions.at(a)->deriv(Us, force); // deriv should NOT include Ta
|
||||
as[level].actions.at(a)->deriv_timer_stop();
|
||||
|
||||
std::cout << GridLogIntegrator << "Smearing (on/off): " << as[level].actions.at(a)->is_smeared << std::endl;
|
||||
auto name = as[level].actions.at(a)->action_name();
|
||||
if (as[level].actions.at(a)->is_smeared) Smearer.smeared_force(force);
|
||||
|
||||
force = FieldImplementation::projectForce(force); // Ta for gauge fields
|
||||
double end_force = usecond();
|
||||
|
||||
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites()); //average per-site norm. nb. norm2(latt) = \sum_x norm2(latt[x])
|
||||
MomFilter->applyFilter(force);
|
||||
std::cout << GridLogIntegrator << " update_P : Level [" << level <<"]["<<a <<"] "<<name<< std::endl;
|
||||
// DumpSliceNorm("force ",force,Nd-1);
|
||||
|
||||
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites()); //average per-site norm. nb. norm2(latt) = \sum_x norm2(latt[x])
|
||||
Real impulse_abs = force_abs * ep * HMC_MOMENTUM_DENOMINATOR;
|
||||
|
||||
Real max_force_abs = std::sqrt(maxLocalNorm2(force));
|
||||
Real max_impulse_abs = max_force_abs * ep * HMC_MOMENTUM_DENOMINATOR;
|
||||
Real force_max = std::sqrt(maxLocalNorm2(force));
|
||||
Real impulse_max = force_max * ep * HMC_MOMENTUM_DENOMINATOR;
|
||||
|
||||
as[level].actions.at(a)->deriv_log(force_abs,force_max);
|
||||
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Force average: " << force_abs <<" "<<name<<std::endl;
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Force max : " << force_max <<" "<<name<<std::endl;
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Fdt average : " << impulse_abs <<" "<<name<<std::endl;
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Fdt max : " << impulse_max <<" "<<name<<std::endl;
|
||||
|
||||
std::cout << GridLogIntegrator << "["<<level<<"]["<<a<<"] Force average: " << force_abs << " Max force: " << max_force_abs << " Time step: " << ep << " Impulse average: " << impulse_abs << " Max impulse: " << max_impulse_abs << std::endl;
|
||||
Mom -= force * ep* HMC_MOMENTUM_DENOMINATOR;;
|
||||
double end_full = usecond();
|
||||
double time_full = (end_full - start_full) / 1e3;
|
||||
double time_force = (end_force - start_force) / 1e3;
|
||||
std::cout << GridLogMessage << "["<<level<<"]["<<a<<"] P update elapsed time: " << time_full << " ms (force: " << time_force << " ms)" << std::endl;
|
||||
|
||||
}
|
||||
|
||||
// Force from the other representations
|
||||
as[level].apply(update_P_hireps, Representations, Mom, U, ep);
|
||||
|
||||
MomFilter->applyFilter(Mom);
|
||||
}
|
||||
|
||||
void update_U(Field& U, double ep)
|
||||
@ -168,8 +184,12 @@ protected:
|
||||
|
||||
void update_U(MomentaField& Mom, Field& U, double ep)
|
||||
{
|
||||
MomentaField MomFiltered(Mom.Grid());
|
||||
MomFiltered = Mom;
|
||||
MomFilter->applyFilter(MomFiltered);
|
||||
|
||||
// exponential of Mom*U in the gauge fields case
|
||||
FieldImplementation::update_field(Mom, U, ep);
|
||||
FieldImplementation::update_field(MomFiltered, U, ep);
|
||||
|
||||
// Update the smeared fields, can be implemented as observer
|
||||
Smearer.set_Field(U);
|
||||
@ -212,6 +232,66 @@ public:
|
||||
const MomentaField & getMomentum() const{ return P; }
|
||||
|
||||
|
||||
void reset_timer(void)
|
||||
{
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
as[level].actions.at(actionID)->reset_timer();
|
||||
}
|
||||
}
|
||||
}
|
||||
void print_timer(void)
|
||||
{
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
||||
std::cout << GridLogMessage << " Refresh cumulative timings "<<std::endl;
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] "
|
||||
<< as[level].actions.at(actionID)->refresh_us*1.0e-6<<" s"<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
std::cout << GridLogMessage << " Action cumulative timings "<<std::endl;
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] "
|
||||
<< as[level].actions.at(actionID)->S_us*1.0e-6<<" s"<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
std::cout << GridLogMessage << " Force cumulative timings "<<std::endl;
|
||||
std::cout << GridLogMessage << "------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] "
|
||||
<< as[level].actions.at(actionID)->deriv_us*1.0e-6<<" s"<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
std::cout << GridLogMessage << " Force average size "<<std::endl;
|
||||
std::cout << GridLogMessage << "------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] : "
|
||||
<<" force max " << as[level].actions.at(actionID)->deriv_max_average()
|
||||
<<" norm " << as[level].actions.at(actionID)->deriv_norm_average()
|
||||
<<" calls " << as[level].actions.at(actionID)->deriv_num
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
|
||||
}
|
||||
|
||||
void print_parameters()
|
||||
{
|
||||
std::cout << GridLogMessage << "[Integrator] Name : "<< integrator_name() << std::endl;
|
||||
@ -230,7 +310,6 @@ public:
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
|
||||
|
||||
}
|
||||
|
||||
void reverse_momenta()
|
||||
@ -255,19 +334,15 @@ public:
|
||||
void refresh(Field& U, GridSerialRNG & sRNG, GridParallelRNG& pRNG)
|
||||
{
|
||||
assert(P.Grid() == U.Grid());
|
||||
std::cout << GridLogIntegrator << "Integrator refresh" << std::endl;
|
||||
std::cout << GridLogIntegrator << "Integrator refresh\n";
|
||||
|
||||
std::cout << GridLogIntegrator << "Generating momentum" << std::endl;
|
||||
FieldImplementation::generate_momenta(P, sRNG, pRNG);
|
||||
|
||||
// Update the smeared fields, can be implemented as observer
|
||||
// necessary to keep the fields updated even after a reject
|
||||
// of the Metropolis
|
||||
std::cout << GridLogIntegrator << "Updating smeared fields" << std::endl;
|
||||
Smearer.set_Field(U);
|
||||
// Set the (eventual) representations gauge fields
|
||||
|
||||
std::cout << GridLogIntegrator << "Updating representations" << std::endl;
|
||||
Representations.update(U);
|
||||
|
||||
// The Smearer is attached to a pointer of the gauge field
|
||||
@ -277,16 +352,19 @@ public:
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
// get gauge field from the SmearingPolicy and
|
||||
// based on the boolean is_smeared in actionID
|
||||
std::cout << GridLogIntegrator << "Refreshing integrator level " << level << " index " << actionID << std::endl;
|
||||
auto name = as[level].actions.at(actionID)->action_name();
|
||||
std::cout << GridLogMessage << "refresh [" << level << "][" << actionID << "] "<<name << std::endl;
|
||||
|
||||
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
|
||||
as[level].actions.at(actionID)->refresh_timer_start();
|
||||
as[level].actions.at(actionID)->refresh(Us, sRNG, pRNG);
|
||||
as[level].actions.at(actionID)->refresh_timer_stop();
|
||||
}
|
||||
|
||||
// Refresh the higher representation actions
|
||||
as[level].apply(refresh_hireps, Representations, sRNG, pRNG);
|
||||
}
|
||||
|
||||
MomFilter->applyFilter(P);
|
||||
}
|
||||
|
||||
// to be used by the actionlevel class to iterate
|
||||
@ -321,7 +399,9 @@ public:
|
||||
// based on the boolean is_smeared in actionID
|
||||
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
|
||||
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
|
||||
as[level].actions.at(actionID)->S_timer_start();
|
||||
Hterm = as[level].actions.at(actionID)->S(Us);
|
||||
as[level].actions.at(actionID)->S_timer_stop();
|
||||
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
|
||||
H += Hterm;
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ public:
|
||||
// using wilson flow by default here
|
||||
WilsonFlow<PeriodicGimplR> WF(Pars.Smearing.steps, Pars.Smearing.step_size, Pars.Smearing.meas_interval);
|
||||
WF.smear_adaptive(Usmear, U, Pars.Smearing.maxTau);
|
||||
Real T0 = WF.energyDensityPlaquette(Pars.Smearing.maxTau, Usmear);
|
||||
Real T0 = WF.energyDensityPlaquette(Usmear);
|
||||
std::cout << GridLogMessage << std::setprecision(std::numeric_limits<Real>::digits10 + 1)
|
||||
<< "T0 : [ " << traj << " ] "<< T0 << std::endl;
|
||||
}
|
||||
|
@ -7,7 +7,6 @@ Source file: ./lib/qcd/modules/plaquette.h
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -34,44 +33,28 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template <class Gimpl>
|
||||
class WilsonFlow: public Smear<Gimpl>{
|
||||
public:
|
||||
//Store generic measurements to take during smearing process using std::function
|
||||
typedef std::function<void(int, RealD, const typename Gimpl::GaugeField &)> FunctionType; //int: step, RealD: flow time, GaugeField : the gauge field
|
||||
|
||||
private:
|
||||
unsigned int Nstep;
|
||||
RealD epsilon; //for regular smearing this is the time step, for adaptive it is the initial time step
|
||||
|
||||
std::vector< std::pair<int, FunctionType> > functions; //The int maps to the measurement frequency
|
||||
unsigned int measure_interval;
|
||||
mutable RealD epsilon, taus;
|
||||
|
||||
|
||||
mutable WilsonGaugeAction<Gimpl> SG;
|
||||
|
||||
//Evolve the gauge field by 1 step and update tau
|
||||
void evolve_step(typename Gimpl::GaugeField &U, RealD &tau) const;
|
||||
//Evolve the gauge field by 1 step and update tau and the current time step eps
|
||||
void evolve_step_adaptive(typename Gimpl::GaugeField&U, RealD &tau, RealD &eps, RealD maxTau) const;
|
||||
void evolve_step(typename Gimpl::GaugeField&) const;
|
||||
void evolve_step_adaptive(typename Gimpl::GaugeField&, RealD);
|
||||
RealD tau(unsigned int t)const {return epsilon*(t+1.0); }
|
||||
|
||||
public:
|
||||
INHERIT_GIMPL_TYPES(Gimpl)
|
||||
|
||||
void resetActions(){ functions.clear(); }
|
||||
|
||||
void addMeasurement(int meas_interval, FunctionType meas){ functions.push_back({meas_interval, meas}); }
|
||||
|
||||
//Set the class to perform the default measurements:
|
||||
//the plaquette energy density every step
|
||||
//the plaquette topological charge every 'topq_meas_interval' steps
|
||||
//and output to stdout
|
||||
void setDefaultMeasurements(int topq_meas_interval = 1);
|
||||
|
||||
explicit WilsonFlow(unsigned int Nstep, RealD epsilon, unsigned int interval = 1):
|
||||
Nstep(Nstep),
|
||||
epsilon(epsilon),
|
||||
measure_interval(interval),
|
||||
SG(WilsonGaugeAction<Gimpl>(3.0)) {
|
||||
// WilsonGaugeAction with beta 3.0
|
||||
assert(epsilon > 0.0);
|
||||
LogMessage();
|
||||
setDefaultMeasurements(interval);
|
||||
}
|
||||
|
||||
void LogMessage() {
|
||||
@ -90,29 +73,9 @@ public:
|
||||
// undefined for WilsonFlow
|
||||
}
|
||||
|
||||
void smear_adaptive(GaugeField&, const GaugeField&, RealD maxTau) const;
|
||||
|
||||
//Compute t^2 <E(t)> for time t from the plaquette
|
||||
static RealD energyDensityPlaquette(const RealD t, const GaugeField& U);
|
||||
|
||||
//Compute t^2 <E(t)> for time t from the 1x1 cloverleaf form
|
||||
//t is the Wilson flow time
|
||||
static RealD energyDensityCloverleaf(const RealD t, const GaugeField& U);
|
||||
|
||||
//Evolve the gauge field by Nstep steps of epsilon and return the energy density computed every interval steps
|
||||
//The smeared field is output as V
|
||||
std::vector<RealD> flowMeasureEnergyDensityPlaquette(GaugeField &V, const GaugeField& U, int measure_interval = 1);
|
||||
|
||||
//Version that does not return the smeared field
|
||||
std::vector<RealD> flowMeasureEnergyDensityPlaquette(const GaugeField& U, int measure_interval = 1);
|
||||
|
||||
|
||||
//Evolve the gauge field by Nstep steps of epsilon and return the Cloverleaf energy density computed every interval steps
|
||||
//The smeared field is output as V
|
||||
std::vector<RealD> flowMeasureEnergyDensityCloverleaf(GaugeField &V, const GaugeField& U, int measure_interval = 1);
|
||||
|
||||
//Version that does not return the smeared field
|
||||
std::vector<RealD> flowMeasureEnergyDensityCloverleaf(const GaugeField& U, int measure_interval = 1);
|
||||
void smear_adaptive(GaugeField&, const GaugeField&, RealD maxTau);
|
||||
RealD energyDensityPlaquette(unsigned int step, const GaugeField& U) const;
|
||||
RealD energyDensityPlaquette(const GaugeField& U) const;
|
||||
};
|
||||
|
||||
|
||||
@ -120,7 +83,7 @@ public:
|
||||
// Implementations
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
template <class Gimpl>
|
||||
void WilsonFlow<Gimpl>::evolve_step(typename Gimpl::GaugeField &U, RealD &tau) const{
|
||||
void WilsonFlow<Gimpl>::evolve_step(typename Gimpl::GaugeField &U) const{
|
||||
GaugeField Z(U.Grid());
|
||||
GaugeField tmp(U.Grid());
|
||||
SG.deriv(U, Z);
|
||||
@ -136,13 +99,12 @@ void WilsonFlow<Gimpl>::evolve_step(typename Gimpl::GaugeField &U, RealD &tau) c
|
||||
SG.deriv(U, tmp); Z += tmp; // 4/3*(17/36*Z0 -8/9*Z1) +Z2
|
||||
Z *= 3.0/4.0; // Z = 17/36*Z0 -8/9*Z1 +3/4*Z2
|
||||
Gimpl::update_field(Z, U, -2.0*epsilon); // V(t+e) = exp(ep*Z)*W2
|
||||
tau += epsilon;
|
||||
}
|
||||
|
||||
template <class Gimpl>
|
||||
void WilsonFlow<Gimpl>::evolve_step_adaptive(typename Gimpl::GaugeField &U, RealD &tau, RealD &eps, RealD maxTau) const{
|
||||
if (maxTau - tau < eps){
|
||||
eps = maxTau-tau;
|
||||
void WilsonFlow<Gimpl>::evolve_step_adaptive(typename Gimpl::GaugeField &U, RealD maxTau) {
|
||||
if (maxTau - taus < epsilon){
|
||||
epsilon = maxTau-taus;
|
||||
}
|
||||
//std::cout << GridLogMessage << "Integration epsilon : " << epsilon << std::endl;
|
||||
GaugeField Z(U.Grid());
|
||||
@ -152,151 +114,95 @@ void WilsonFlow<Gimpl>::evolve_step_adaptive(typename Gimpl::GaugeField &U, Real
|
||||
SG.deriv(U, Z);
|
||||
Zprime = -Z;
|
||||
Z *= 0.25; // Z0 = 1/4 * F(U)
|
||||
Gimpl::update_field(Z, U, -2.0*eps); // U = W1 = exp(ep*Z0)*W0
|
||||
Gimpl::update_field(Z, U, -2.0*epsilon); // U = W1 = exp(ep*Z0)*W0
|
||||
|
||||
Z *= -17.0/8.0;
|
||||
SG.deriv(U, tmp); Z += tmp; // -17/32*Z0 +Z1
|
||||
Zprime += 2.0*tmp;
|
||||
Z *= 8.0/9.0; // Z = -17/36*Z0 +8/9*Z1
|
||||
Gimpl::update_field(Z, U, -2.0*eps); // U_= W2 = exp(ep*Z)*W1
|
||||
Gimpl::update_field(Z, U, -2.0*epsilon); // U_= W2 = exp(ep*Z)*W1
|
||||
|
||||
|
||||
Z *= -4.0/3.0;
|
||||
SG.deriv(U, tmp); Z += tmp; // 4/3*(17/36*Z0 -8/9*Z1) +Z2
|
||||
Z *= 3.0/4.0; // Z = 17/36*Z0 -8/9*Z1 +3/4*Z2
|
||||
Gimpl::update_field(Z, U, -2.0*eps); // V(t+e) = exp(ep*Z)*W2
|
||||
Gimpl::update_field(Z, U, -2.0*epsilon); // V(t+e) = exp(ep*Z)*W2
|
||||
|
||||
// Ramos
|
||||
Gimpl::update_field(Zprime, Uprime, -2.0*eps); // V'(t+e) = exp(ep*Z')*W0
|
||||
Gimpl::update_field(Zprime, Uprime, -2.0*epsilon); // V'(t+e) = exp(ep*Z')*W0
|
||||
// Compute distance as norm^2 of the difference
|
||||
GaugeField diffU = U - Uprime;
|
||||
RealD diff = norm2(diffU);
|
||||
// adjust integration step
|
||||
|
||||
tau += eps;
|
||||
taus += epsilon;
|
||||
//std::cout << GridLogMessage << "Adjusting integration step with distance: " << diff << std::endl;
|
||||
|
||||
eps = eps*0.95*std::pow(1e-4/diff,1./3.);
|
||||
epsilon = epsilon*0.95*std::pow(1e-4/diff,1./3.);
|
||||
//std::cout << GridLogMessage << "New epsilon : " << epsilon << std::endl;
|
||||
|
||||
}
|
||||
|
||||
|
||||
template <class Gimpl>
|
||||
RealD WilsonFlow<Gimpl>::energyDensityPlaquette(const RealD t, const GaugeField& U){
|
||||
static WilsonGaugeAction<Gimpl> SG(3.0);
|
||||
return 2.0 * t * t * SG.S(U)/U.Grid()->gSites();
|
||||
}
|
||||
|
||||
//Compute t^2 <E(t)> for time from the 1x1 cloverleaf form
|
||||
template <class Gimpl>
|
||||
RealD WilsonFlow<Gimpl>::energyDensityCloverleaf(const RealD t, const GaugeField& U){
|
||||
typedef typename Gimpl::GaugeLinkField GaugeMat;
|
||||
typedef typename Gimpl::GaugeField GaugeLorentz;
|
||||
|
||||
assert(Nd == 4);
|
||||
//E = 1/2 tr( F_munu F_munu )
|
||||
//However as F_numu = -F_munu, only need to sum the trace of the squares of the following 6 field strengths:
|
||||
//F_01 F_02 F_03 F_12 F_13 F_23
|
||||
GaugeMat F(U.Grid());
|
||||
LatticeComplexD R(U.Grid());
|
||||
R = Zero();
|
||||
|
||||
for(int mu=0;mu<3;mu++){
|
||||
for(int nu=mu+1;nu<4;nu++){
|
||||
WilsonLoops<Gimpl>::FieldStrength(F, U, mu, nu);
|
||||
R = R + trace(F*F);
|
||||
}
|
||||
}
|
||||
ComplexD out = sum(R);
|
||||
out = t*t*out / RealD(U.Grid()->gSites());
|
||||
return -real(out); //minus sign necessary for +ve energy
|
||||
}
|
||||
|
||||
|
||||
template <class Gimpl>
|
||||
std::vector<RealD> WilsonFlow<Gimpl>::flowMeasureEnergyDensityPlaquette(GaugeField &V, const GaugeField& U, int measure_interval){
|
||||
std::vector<RealD> out;
|
||||
resetActions();
|
||||
addMeasurement(measure_interval, [&out](int step, RealD t, const typename Gimpl::GaugeField &U){
|
||||
std::cout << GridLogMessage << "[WilsonFlow] Computing plaquette energy density for step " << step << std::endl;
|
||||
out.push_back( energyDensityPlaquette(t,U) );
|
||||
});
|
||||
smear(V,U);
|
||||
return out;
|
||||
RealD WilsonFlow<Gimpl>::energyDensityPlaquette(unsigned int step, const GaugeField& U) const {
|
||||
RealD td = tau(step);
|
||||
return 2.0 * td * td * SG.S(U)/U.Grid()->gSites();
|
||||
}
|
||||
|
||||
template <class Gimpl>
|
||||
std::vector<RealD> WilsonFlow<Gimpl>::flowMeasureEnergyDensityPlaquette(const GaugeField& U, int measure_interval){
|
||||
GaugeField V(U);
|
||||
return flowMeasureEnergyDensityPlaquette(V,U, measure_interval);
|
||||
RealD WilsonFlow<Gimpl>::energyDensityPlaquette(const GaugeField& U) const {
|
||||
return 2.0 * taus * taus * SG.S(U)/U.Grid()->gSites();
|
||||
}
|
||||
|
||||
template <class Gimpl>
|
||||
std::vector<RealD> WilsonFlow<Gimpl>::flowMeasureEnergyDensityCloverleaf(GaugeField &V, const GaugeField& U, int measure_interval){
|
||||
std::vector<RealD> out;
|
||||
resetActions();
|
||||
addMeasurement(measure_interval, [&out](int step, RealD t, const typename Gimpl::GaugeField &U){
|
||||
std::cout << GridLogMessage << "[WilsonFlow] Computing Cloverleaf energy density for step " << step << std::endl;
|
||||
out.push_back( energyDensityCloverleaf(t,U) );
|
||||
});
|
||||
smear(V,U);
|
||||
return out;
|
||||
}
|
||||
|
||||
template <class Gimpl>
|
||||
std::vector<RealD> WilsonFlow<Gimpl>::flowMeasureEnergyDensityCloverleaf(const GaugeField& U, int measure_interval){
|
||||
GaugeField V(U);
|
||||
return flowMeasureEnergyDensityCloverleaf(V,U, measure_interval);
|
||||
}
|
||||
|
||||
|
||||
|
||||
//#define WF_TIMING
|
||||
|
||||
|
||||
|
||||
template <class Gimpl>
|
||||
void WilsonFlow<Gimpl>::smear(GaugeField& out, const GaugeField& in) const{
|
||||
void WilsonFlow<Gimpl>::smear(GaugeField& out, const GaugeField& in) const {
|
||||
out = in;
|
||||
RealD taus = 0.;
|
||||
for (unsigned int step = 1; step <= Nstep; step++) { //step indicates the number of smearing steps applied at the time of measurement
|
||||
for (unsigned int step = 1; step <= Nstep; step++) {
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
evolve_step(out, taus);
|
||||
evolve_step(out);
|
||||
auto end = std::chrono::high_resolution_clock::now();
|
||||
std::chrono::duration<double> diff = end - start;
|
||||
#ifdef WF_TIMING
|
||||
std::cout << "Time to evolve " << diff.count() << " s\n";
|
||||
#endif
|
||||
//Perform measurements
|
||||
for(auto const &meas : functions)
|
||||
if( step % meas.first == 0 ) meas.second(step,taus,out);
|
||||
std::cout << GridLogMessage << "[WilsonFlow] Energy density (plaq) : "
|
||||
<< step << " " << tau(step) << " "
|
||||
<< energyDensityPlaquette(step,out) << std::endl;
|
||||
if( step % measure_interval == 0){
|
||||
std::cout << GridLogMessage << "[WilsonFlow] Top. charge : "
|
||||
<< step << " "
|
||||
<< WilsonLoops<PeriodicGimplR>::TopologicalCharge(out) << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class Gimpl>
|
||||
void WilsonFlow<Gimpl>::smear_adaptive(GaugeField& out, const GaugeField& in, RealD maxTau) const{
|
||||
void WilsonFlow<Gimpl>::smear_adaptive(GaugeField& out, const GaugeField& in, RealD maxTau){
|
||||
out = in;
|
||||
RealD taus = 0.;
|
||||
RealD eps = epsilon;
|
||||
taus = epsilon;
|
||||
unsigned int step = 0;
|
||||
do{
|
||||
step++;
|
||||
//std::cout << GridLogMessage << "Evolution time :"<< taus << std::endl;
|
||||
evolve_step_adaptive(out, taus, eps, maxTau);
|
||||
//Perform measurements
|
||||
for(auto const &meas : functions)
|
||||
if( step % meas.first == 0 ) meas.second(step,taus,out);
|
||||
evolve_step_adaptive(out, maxTau);
|
||||
std::cout << GridLogMessage << "[WilsonFlow] Energy density (plaq) : "
|
||||
<< step << " " << taus << " "
|
||||
<< energyDensityPlaquette(out) << std::endl;
|
||||
if( step % measure_interval == 0){
|
||||
std::cout << GridLogMessage << "[WilsonFlow] Top. charge : "
|
||||
<< step << " "
|
||||
<< WilsonLoops<PeriodicGimplR>::TopologicalCharge(out) << std::endl;
|
||||
}
|
||||
} while (taus < maxTau);
|
||||
}
|
||||
|
||||
template <class Gimpl>
|
||||
void WilsonFlow<Gimpl>::setDefaultMeasurements(int topq_meas_interval){
|
||||
addMeasurement(1, [](int step, RealD t, const typename Gimpl::GaugeField &U){
|
||||
std::cout << GridLogMessage << "[WilsonFlow] Energy density (plaq) : " << step << " " << t << " " << energyDensityPlaquette(t,U) << std::endl;
|
||||
});
|
||||
addMeasurement(topq_meas_interval, [](int step, RealD t, const typename Gimpl::GaugeField &U){
|
||||
std::cout << GridLogMessage << "[WilsonFlow] Top. charge : " << step << " " << WilsonLoops<Gimpl>::TopologicalCharge(U) << std::endl;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -88,12 +88,6 @@ namespace PeriodicBC {
|
||||
return CovShiftBackward(Link,mu,arg);
|
||||
}
|
||||
|
||||
//Boundary-aware C-shift of gauge links / gauge transformation matrices
|
||||
template<class gauge> Lattice<gauge>
|
||||
CshiftLink(const Lattice<gauge> &Link, int mu, int shift)
|
||||
{
|
||||
return Cshift(Link, mu, shift);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -164,9 +158,6 @@ namespace ConjugateBC {
|
||||
// std::cout<<"Gparity::CovCshiftBackward mu="<<mu<<std::endl;
|
||||
return Cshift(tmp,mu,-1);// moves towards positive mu
|
||||
}
|
||||
|
||||
//Out(x) = U^dag_\mu(x-mu) | x_\mu != 0
|
||||
// = U^T_\mu(L-1) | x_\mu == 0
|
||||
template<class gauge> Lattice<gauge>
|
||||
CovShiftIdentityBackward(const Lattice<gauge> &Link, int mu) {
|
||||
GridBase *grid = Link.Grid();
|
||||
@ -185,9 +176,6 @@ namespace ConjugateBC {
|
||||
return Link;
|
||||
}
|
||||
|
||||
//Out(x) = S_\mu(x+\hat\mu) | x_\mu != L-1
|
||||
// = S*_\mu(0) | x_\mu == L-1
|
||||
//Note: While this is used for Staples it is also applicable for shifting gauge links or gauge transformation matrices
|
||||
template<class gauge> Lattice<gauge>
|
||||
ShiftStaple(const Lattice<gauge> &Link, int mu)
|
||||
{
|
||||
@ -220,35 +208,6 @@ namespace ConjugateBC {
|
||||
return CovShiftBackward(Link,mu,arg);
|
||||
}
|
||||
|
||||
//Boundary-aware C-shift of gauge links / gauge transformation matrices
|
||||
//shift = 1
|
||||
//Out(x) = U_\mu(x+\hat\mu) | x_\mu != L-1
|
||||
// = U*_\mu(0) | x_\mu == L-1
|
||||
//shift = -1
|
||||
//Out(x) = U_\mu(x-mu) | x_\mu != 0
|
||||
// = U*_\mu(L-1) | x_\mu == 0
|
||||
template<class gauge> Lattice<gauge>
|
||||
CshiftLink(const Lattice<gauge> &Link, int mu, int shift)
|
||||
{
|
||||
GridBase *grid = Link.Grid();
|
||||
int Lmu = grid->GlobalDimensions()[mu] - 1;
|
||||
|
||||
Lattice<iScalar<vInteger>> coor(grid);
|
||||
LatticeCoordinate(coor, mu);
|
||||
|
||||
Lattice<gauge> tmp(grid);
|
||||
if(shift == 1){
|
||||
tmp = Cshift(Link, mu, 1);
|
||||
tmp = where(coor == Lmu, conjugate(tmp), tmp);
|
||||
return tmp;
|
||||
}else if(shift == -1){
|
||||
tmp = Link;
|
||||
tmp = where(coor == Lmu, conjugate(tmp), tmp);
|
||||
return Cshift(tmp, mu, -1);
|
||||
}else assert(0 && "Invalid shift value");
|
||||
return tmp; //shuts up the compiler fussing about the return type
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -40,46 +40,27 @@ public:
|
||||
typedef typename Gimpl::GaugeLinkField GaugeMat;
|
||||
typedef typename Gimpl::GaugeField GaugeLorentz;
|
||||
|
||||
//A_\mu(x) = -i Ta(U_\mu(x) ) where Ta(U) = 1/2( U - U^dag ) - 1/2N tr(U - U^dag) is the traceless antihermitian part. This is an O(A^3) approximation to the logarithm of U
|
||||
static void GaugeLinkToLieAlgebraField(const GaugeMat &U, GaugeMat &A) {
|
||||
Complex cmi(0.0,-1.0);
|
||||
A = Ta(U) * cmi;
|
||||
static void GaugeLinkToLieAlgebraField(const std::vector<GaugeMat> &U,std::vector<GaugeMat> &A) {
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
Complex cmi(0.0,-1.0);
|
||||
A[mu] = Ta(U[mu]) * cmi;
|
||||
}
|
||||
}
|
||||
|
||||
//The derivative of the Lie algebra field
|
||||
static void DmuAmu(const std::vector<GaugeMat> &U, GaugeMat &dmuAmu,int orthog) {
|
||||
GridBase* grid = U[0].Grid();
|
||||
GaugeMat Ax(grid);
|
||||
GaugeMat Axm1(grid);
|
||||
GaugeMat Utmp(grid);
|
||||
|
||||
static void DmuAmu(const std::vector<GaugeMat> &A,GaugeMat &dmuAmu,int orthog) {
|
||||
dmuAmu=Zero();
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
if ( mu != orthog ) {
|
||||
//Rather than define functionality to work out how the BCs apply to A_\mu we simply use the BC-aware Cshift to the gauge links and compute A_\mu(x) and A_\mu(x-1) separately
|
||||
//Ax = A_\mu(x)
|
||||
GaugeLinkToLieAlgebraField(U[mu], Ax);
|
||||
|
||||
//Axm1 = A_\mu(x_\mu-1)
|
||||
Utmp = Gimpl::CshiftLink(U[mu], mu, -1);
|
||||
GaugeLinkToLieAlgebraField(Utmp, Axm1);
|
||||
|
||||
//Derivative
|
||||
dmuAmu = dmuAmu + Ax - Axm1;
|
||||
dmuAmu = dmuAmu + A[mu] - Cshift(A[mu],mu,-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Fix the gauge field Umu
|
||||
//0 < alpha < 1 is related to the step size, cf https://arxiv.org/pdf/1405.5812.pdf
|
||||
static void SteepestDescentGaugeFix(GaugeLorentz &Umu, Real alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1) {
|
||||
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1) {
|
||||
GridBase *grid = Umu.Grid();
|
||||
GaugeMat xform(grid);
|
||||
SteepestDescentGaugeFix(Umu,xform,alpha,maxiter,Omega_tol,Phi_tol,Fourier,orthog);
|
||||
}
|
||||
|
||||
//Fix the gauge field Umu and also return the gauge transformation from the original gauge field, xform
|
||||
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,GaugeMat &xform, Real alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1) {
|
||||
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,GaugeMat &xform,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1) {
|
||||
|
||||
GridBase *grid = Umu.Grid();
|
||||
|
||||
@ -141,24 +122,27 @@ public:
|
||||
|
||||
}
|
||||
}
|
||||
assert(0 && "Gauge fixing did not converge within the specified number of iterations");
|
||||
};
|
||||
static Real SteepestDescentStep(std::vector<GaugeMat> &U,GaugeMat &xform, Real alpha, GaugeMat & dmuAmu,int orthog) {
|
||||
static Real SteepestDescentStep(std::vector<GaugeMat> &U,GaugeMat &xform,Real & alpha, GaugeMat & dmuAmu,int orthog) {
|
||||
GridBase *grid = U[0].Grid();
|
||||
|
||||
std::vector<GaugeMat> A(Nd,grid);
|
||||
GaugeMat g(grid);
|
||||
ExpiAlphaDmuAmu(U,g,alpha,dmuAmu,orthog);
|
||||
|
||||
GaugeLinkToLieAlgebraField(U,A);
|
||||
ExpiAlphaDmuAmu(A,g,alpha,dmuAmu,orthog);
|
||||
|
||||
|
||||
Real vol = grid->gSites();
|
||||
Real trG = TensorRemove(sum(trace(g))).real()/vol/Nc;
|
||||
|
||||
xform = g*xform ;
|
||||
SU<Nc>::GaugeTransform<Gimpl>(U,g);
|
||||
SU<Nc>::GaugeTransform(U,g);
|
||||
|
||||
return trG;
|
||||
}
|
||||
|
||||
static Real FourierAccelSteepestDescentStep(std::vector<GaugeMat> &U,GaugeMat &xform, Real alpha, GaugeMat & dmuAmu,int orthog) {
|
||||
static Real FourierAccelSteepestDescentStep(std::vector<GaugeMat> &U,GaugeMat &xform,Real & alpha, GaugeMat & dmuAmu,int orthog) {
|
||||
|
||||
GridBase *grid = U[0].Grid();
|
||||
|
||||
@ -173,7 +157,11 @@ public:
|
||||
|
||||
GaugeMat g(grid);
|
||||
GaugeMat dmuAmu_p(grid);
|
||||
DmuAmu(U,dmuAmu,orthog);
|
||||
std::vector<GaugeMat> A(Nd,grid);
|
||||
|
||||
GaugeLinkToLieAlgebraField(U,A);
|
||||
|
||||
DmuAmu(A,dmuAmu,orthog);
|
||||
|
||||
std::vector<int> mask(Nd,1);
|
||||
for(int mu=0;mu<Nd;mu++) if (mu==orthog) mask[mu]=0;
|
||||
@ -217,16 +205,16 @@ public:
|
||||
Real trG = TensorRemove(sum(trace(g))).real()/vol/Nc;
|
||||
|
||||
xform = g*xform ;
|
||||
SU<Nc>::GaugeTransform<Gimpl>(U,g);
|
||||
SU<Nc>::GaugeTransform(U,g);
|
||||
|
||||
return trG;
|
||||
}
|
||||
|
||||
static void ExpiAlphaDmuAmu(const std::vector<GaugeMat> &U,GaugeMat &g, Real alpha, GaugeMat &dmuAmu,int orthog) {
|
||||
static void ExpiAlphaDmuAmu(const std::vector<GaugeMat> &A,GaugeMat &g,Real & alpha, GaugeMat &dmuAmu,int orthog) {
|
||||
GridBase *grid = g.Grid();
|
||||
Complex cialpha(0.0,-alpha);
|
||||
GaugeMat ciadmam(grid);
|
||||
DmuAmu(U,dmuAmu,orthog);
|
||||
DmuAmu(A,dmuAmu,orthog);
|
||||
ciadmam = dmuAmu*cialpha;
|
||||
SU<Nc>::taExp(ciadmam,g);
|
||||
}
|
||||
|
@ -694,32 +694,32 @@ public:
|
||||
* Adjoint rep gauge xform
|
||||
*/
|
||||
|
||||
template<typename Gimpl>
|
||||
static void GaugeTransform(typename Gimpl::GaugeField &Umu, typename Gimpl::GaugeLinkField &g){
|
||||
template<typename GaugeField,typename GaugeMat>
|
||||
static void GaugeTransform( GaugeField &Umu, GaugeMat &g){
|
||||
GridBase *grid = Umu.Grid();
|
||||
conformable(grid,g.Grid());
|
||||
|
||||
typename Gimpl::GaugeLinkField U(grid);
|
||||
typename Gimpl::GaugeLinkField ag(grid); ag = adj(g);
|
||||
GaugeMat U(grid);
|
||||
GaugeMat ag(grid); ag = adj(g);
|
||||
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
U= PeekIndex<LorentzIndex>(Umu,mu);
|
||||
U = g*U*Gimpl::CshiftLink(ag, mu, 1); //BC-aware
|
||||
U = g*U*Cshift(ag, mu, 1);
|
||||
PokeIndex<LorentzIndex>(Umu,U,mu);
|
||||
}
|
||||
}
|
||||
template<typename Gimpl>
|
||||
static void GaugeTransform( std::vector<typename Gimpl::GaugeLinkField> &U, typename Gimpl::GaugeLinkField &g){
|
||||
template<typename GaugeMat>
|
||||
static void GaugeTransform( std::vector<GaugeMat> &U, GaugeMat &g){
|
||||
GridBase *grid = g.Grid();
|
||||
typename Gimpl::GaugeLinkField ag(grid); ag = adj(g);
|
||||
GaugeMat ag(grid); ag = adj(g);
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
U[mu] = g*U[mu]*Gimpl::CshiftLink(ag, mu, 1); //BC-aware
|
||||
U[mu] = g*U[mu]*Cshift(ag, mu, 1);
|
||||
}
|
||||
}
|
||||
template<typename Gimpl>
|
||||
static void RandomGaugeTransform(GridParallelRNG &pRNG, typename Gimpl::GaugeField &Umu, typename Gimpl::GaugeLinkField &g){
|
||||
template<typename GaugeField,typename GaugeMat>
|
||||
static void RandomGaugeTransform(GridParallelRNG &pRNG, GaugeField &Umu, GaugeMat &g){
|
||||
LieRandomize(pRNG,g,1.0);
|
||||
GaugeTransform<Gimpl>(Umu,g);
|
||||
GaugeTransform(Umu,g);
|
||||
}
|
||||
|
||||
// Projects the algebra components a lattice matrix (of dimension ncol*ncol -1 )
|
||||
|
@ -125,56 +125,6 @@ public:
|
||||
return sumplaq / vol / faces / Nc; // Nd , Nc dependent... FIXME
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// sum over all spatial planes of plaquette
|
||||
//////////////////////////////////////////////////
|
||||
static void siteSpatialPlaquette(ComplexField &Plaq,
|
||||
const std::vector<GaugeMat> &U) {
|
||||
ComplexField sitePlaq(U[0].Grid());
|
||||
Plaq = Zero();
|
||||
for (int mu = 1; mu < Nd-1; mu++) {
|
||||
for (int nu = 0; nu < mu; nu++) {
|
||||
traceDirPlaquette(sitePlaq, U, mu, nu);
|
||||
Plaq = Plaq + sitePlaq;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////
|
||||
// sum over all x,y,z and over all spatial planes of plaquette
|
||||
//////////////////////////////////////////////////
|
||||
static std::vector<RealD> timesliceSumSpatialPlaquette(const GaugeLorentz &Umu) {
|
||||
std::vector<GaugeMat> U(Nd, Umu.Grid());
|
||||
// inefficient here
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
|
||||
}
|
||||
|
||||
ComplexField Plaq(Umu.Grid());
|
||||
|
||||
siteSpatialPlaquette(Plaq, U);
|
||||
typedef typename ComplexField::scalar_object sobj;
|
||||
std::vector<sobj> Tq;
|
||||
sliceSum(Plaq, Tq, Nd-1);
|
||||
|
||||
std::vector<Real> out(Tq.size());
|
||||
for(int t=0;t<Tq.size();t++) out[t] = TensorRemove(Tq[t]).real();
|
||||
return out;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// average over all x,y,z and over all spatial planes of plaquette
|
||||
//////////////////////////////////////////////////
|
||||
static std::vector<RealD> timesliceAvgSpatialPlaquette(const GaugeLorentz &Umu) {
|
||||
std::vector<RealD> sumplaq = timesliceSumSpatialPlaquette(Umu);
|
||||
int Lt = Umu.Grid()->FullDimensions()[Nd-1];
|
||||
assert(sumplaq.size() == Lt);
|
||||
double vol = Umu.Grid()->gSites() / Lt;
|
||||
double faces = (1.0 * (Nd - 1)* (Nd - 2)) / 2.0;
|
||||
for(int t=0;t<Lt;t++)
|
||||
sumplaq[t] = sumplaq[t] / vol / faces / Nc; // Nd , Nc dependent... FIXME
|
||||
return sumplaq;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// average over all x,y,z the temporal loop
|
||||
@ -413,11 +363,11 @@ public:
|
||||
GaugeMat u = PeekIndex<LorentzIndex>(Umu, mu); // some redundant copies
|
||||
GaugeMat vu = v*u;
|
||||
//FS = 0.25*Ta(u*v + Cshift(vu, mu, -1));
|
||||
FS = (u*v + Gimpl::CshiftLink(vu, mu, -1));
|
||||
FS = (u*v + Cshift(vu, mu, -1));
|
||||
FS = 0.125*(FS - adj(FS));
|
||||
}
|
||||
|
||||
static Real TopologicalCharge(const GaugeLorentz &U){
|
||||
static Real TopologicalCharge(GaugeLorentz &U){
|
||||
// 4d topological charge
|
||||
assert(Nd==4);
|
||||
// Bx = -iF(y,z), By = -iF(z,y), Bz = -iF(x,y)
|
||||
@ -440,203 +390,6 @@ public:
|
||||
}
|
||||
|
||||
|
||||
//Clover-leaf Wilson loop combination for arbitrary mu-extent M and nu extent N, mu >= nu
|
||||
//cf https://arxiv.org/pdf/hep-lat/9701012.pdf Eq 7 for 1x2 Wilson loop
|
||||
//Clockwise ordering
|
||||
static void CloverleafMxN(GaugeMat &FS, const GaugeMat &Umu, const GaugeMat &Unu, int mu, int nu, int M, int N){
|
||||
#define Fmu(A) Gimpl::CovShiftForward(Umu, mu, A)
|
||||
#define Bmu(A) Gimpl::CovShiftBackward(Umu, mu, A)
|
||||
#define Fnu(A) Gimpl::CovShiftForward(Unu, nu, A)
|
||||
#define Bnu(A) Gimpl::CovShiftBackward(Unu, nu, A)
|
||||
#define FmuI Gimpl::CovShiftIdentityForward(Umu, mu)
|
||||
#define BmuI Gimpl::CovShiftIdentityBackward(Umu, mu)
|
||||
#define FnuI Gimpl::CovShiftIdentityForward(Unu, nu)
|
||||
#define BnuI Gimpl::CovShiftIdentityBackward(Unu, nu)
|
||||
|
||||
//Upper right loop
|
||||
GaugeMat tmp = BmuI;
|
||||
for(int i=1;i<M;i++)
|
||||
tmp = Bmu(tmp);
|
||||
for(int j=0;j<N;j++)
|
||||
tmp = Bnu(tmp);
|
||||
for(int i=0;i<M;i++)
|
||||
tmp = Fmu(tmp);
|
||||
for(int j=0;j<N;j++)
|
||||
tmp = Fnu(tmp);
|
||||
|
||||
FS = tmp;
|
||||
|
||||
//Upper left loop
|
||||
tmp = BnuI;
|
||||
for(int j=1;j<N;j++)
|
||||
tmp = Bnu(tmp);
|
||||
for(int i=0;i<M;i++)
|
||||
tmp = Fmu(tmp);
|
||||
for(int j=0;j<N;j++)
|
||||
tmp = Fnu(tmp);
|
||||
for(int i=0;i<M;i++)
|
||||
tmp = Bmu(tmp);
|
||||
|
||||
FS = FS + tmp;
|
||||
|
||||
//Lower right loop
|
||||
tmp = FnuI;
|
||||
for(int j=1;j<N;j++)
|
||||
tmp = Fnu(tmp);
|
||||
for(int i=0;i<M;i++)
|
||||
tmp = Bmu(tmp);
|
||||
for(int j=0;j<N;j++)
|
||||
tmp = Bnu(tmp);
|
||||
for(int i=0;i<M;i++)
|
||||
tmp = Fmu(tmp);
|
||||
|
||||
FS = FS + tmp;
|
||||
|
||||
//Lower left loop
|
||||
tmp = FmuI;
|
||||
for(int i=1;i<M;i++)
|
||||
tmp = Fmu(tmp);
|
||||
for(int j=0;j<N;j++)
|
||||
tmp = Fnu(tmp);
|
||||
for(int i=0;i<M;i++)
|
||||
tmp = Bmu(tmp);
|
||||
for(int j=0;j<N;j++)
|
||||
tmp = Bnu(tmp);
|
||||
|
||||
FS = FS + tmp;
|
||||
|
||||
#undef Fmu
|
||||
#undef Bmu
|
||||
#undef Fnu
|
||||
#undef Bnu
|
||||
#undef FmuI
|
||||
#undef BmuI
|
||||
#undef FnuI
|
||||
#undef BnuI
|
||||
}
|
||||
|
||||
//Field strength from MxN Wilson loop
|
||||
//Note F_numu = - F_munu
|
||||
static void FieldStrengthMxN(GaugeMat &FS, const GaugeLorentz &U, int mu, int nu, int M, int N){
|
||||
GaugeMat Umu = PeekIndex<LorentzIndex>(U, mu);
|
||||
GaugeMat Unu = PeekIndex<LorentzIndex>(U, nu);
|
||||
if(M == N){
|
||||
GaugeMat F(Umu.Grid());
|
||||
CloverleafMxN(F, Umu, Unu, mu, nu, M, N);
|
||||
FS = 0.125 * ( F - adj(F) );
|
||||
}else{
|
||||
//Average over both orientations
|
||||
GaugeMat horizontal(Umu.Grid()), vertical(Umu.Grid());
|
||||
CloverleafMxN(horizontal, Umu, Unu, mu, nu, M, N);
|
||||
CloverleafMxN(vertical, Umu, Unu, mu, nu, N, M);
|
||||
FS = 0.0625 * ( horizontal - adj(horizontal) + vertical - adj(vertical) );
|
||||
}
|
||||
}
|
||||
|
||||
//Topological charge contribution from MxN Wilson loops
|
||||
//cf https://arxiv.org/pdf/hep-lat/9701012.pdf Eq 6
|
||||
//output is the charge by timeslice: sum over timeslices to obtain the total
|
||||
static std::vector<Real> TimesliceTopologicalChargeMxN(const GaugeLorentz &U, int M, int N){
|
||||
assert(Nd == 4);
|
||||
std::vector<std::vector<GaugeMat*> > F(Nd,std::vector<GaugeMat*>(Nd,nullptr));
|
||||
//Note F_numu = - F_munu
|
||||
//hence we only need to loop over mu,nu,rho,sigma that aren't related by permuting mu,nu or rho,sigma
|
||||
//Use nu > mu
|
||||
for(int mu=0;mu<Nd-1;mu++){
|
||||
for(int nu=mu+1; nu<Nd; nu++){
|
||||
F[mu][nu] = new GaugeMat(U.Grid());
|
||||
FieldStrengthMxN(*F[mu][nu], U, mu, nu, M, N);
|
||||
}
|
||||
}
|
||||
Real coeff = -1./(32 * M_PI*M_PI * M*M * N*N); //overall sign to match CPS and Grid conventions, possibly related to time direction = 3 vs 0
|
||||
|
||||
static const int combs[3][4] = { {0,1,2,3}, {0,2,1,3}, {0,3,1,2} };
|
||||
static const int signs[3] = { 1, -1, 1 }; //epsilon_{mu nu rho sigma}
|
||||
|
||||
ComplexField fsum(U.Grid());
|
||||
fsum = Zero();
|
||||
for(int c=0;c<3;c++){
|
||||
int mu = combs[c][0], nu = combs[c][1], rho = combs[c][2], sigma = combs[c][3];
|
||||
int eps = signs[c];
|
||||
fsum = fsum + (8. * coeff * eps) * trace( (*F[mu][nu]) * (*F[rho][sigma]) );
|
||||
}
|
||||
|
||||
for(int mu=0;mu<Nd-1;mu++)
|
||||
for(int nu=mu+1; nu<Nd; nu++)
|
||||
delete F[mu][nu];
|
||||
|
||||
typedef typename ComplexField::scalar_object sobj;
|
||||
std::vector<sobj> Tq;
|
||||
sliceSum(fsum, Tq, Nd-1);
|
||||
|
||||
std::vector<Real> out(Tq.size());
|
||||
for(int t=0;t<Tq.size();t++) out[t] = TensorRemove(Tq[t]).real();
|
||||
return out;
|
||||
}
|
||||
static Real TopologicalChargeMxN(const GaugeLorentz &U, int M, int N){
|
||||
std::vector<Real> Tq = TimesliceTopologicalChargeMxN(U,M,N);
|
||||
Real out(0);
|
||||
for(int t=0;t<Tq.size();t++) out += Tq[t];
|
||||
return out;
|
||||
}
|
||||
|
||||
//Generate the contributions to the 5Li topological charge from Wilson loops of the following sizes
|
||||
//Use coefficients from hep-lat/9701012
|
||||
//1x1 : c1=(19.-55.*c5)/9.
|
||||
//2x2 : c2=(1-64.*c5)/9.
|
||||
//1x2 : c3=(-64.+640.*c5)/45.
|
||||
//1x3 : c4=1./5.-2.*c5
|
||||
//3x3 : c5=1./20.
|
||||
//Output array outer index contains the loops in the above order
|
||||
//Inner index is the time coordinate
|
||||
static std::vector<std::vector<Real> > TimesliceTopologicalCharge5LiContributions(const GaugeLorentz &U){
|
||||
static const int exts[5][2] = { {1,1}, {2,2}, {1,2}, {1,3}, {3,3} };
|
||||
std::vector<std::vector<Real> > out(5);
|
||||
for(int i=0;i<5;i++){
|
||||
out[i] = TimesliceTopologicalChargeMxN(U,exts[i][0],exts[i][1]);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
static std::vector<Real> TopologicalCharge5LiContributions(const GaugeLorentz &U){
|
||||
static const int exts[5][2] = { {1,1}, {2,2}, {1,2}, {1,3}, {3,3} };
|
||||
std::vector<Real> out(5);
|
||||
std::cout << GridLogMessage << "Computing topological charge" << std::endl;
|
||||
for(int i=0;i<5;i++){
|
||||
out[i] = TopologicalChargeMxN(U,exts[i][0],exts[i][1]);
|
||||
std::cout << GridLogMessage << exts[i][0] << "x" << exts[i][1] << " Wilson loop contribution " << out[i] << std::endl;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
//Compute the 5Li topological charge
|
||||
static std::vector<Real> TimesliceTopologicalCharge5Li(const GaugeLorentz &U){
|
||||
std::vector<std::vector<Real> > loops = TimesliceTopologicalCharge5LiContributions(U);
|
||||
|
||||
double c5=1./20.;
|
||||
double c4=1./5.-2.*c5;
|
||||
double c3=(-64.+640.*c5)/45.;
|
||||
double c2=(1-64.*c5)/9.;
|
||||
double c1=(19.-55.*c5)/9.;
|
||||
|
||||
int Lt = loops[0].size();
|
||||
std::vector<Real> out(Lt,0.);
|
||||
for(int t=0;t<Lt;t++)
|
||||
out[t] += c1*loops[0][t] + c2*loops[1][t] + c3*loops[2][t] + c4*loops[3][t] + c5*loops[4][t];
|
||||
return out;
|
||||
}
|
||||
|
||||
static Real TopologicalCharge5Li(const GaugeLorentz &U){
|
||||
std::vector<Real> Qt = TimesliceTopologicalCharge5Li(U);
|
||||
Real Q = 0.;
|
||||
for(int t=0;t<Qt.size();t++) Q += Qt[t];
|
||||
std::cout << GridLogMessage << "5Li Topological charge: " << Q << std::endl;
|
||||
return Q;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// Similar to above for rectangle is required
|
||||
//////////////////////////////////////////////////////
|
||||
|
@ -1,200 +0,0 @@
|
||||
// -*- C++ -*-
|
||||
//===--------------------------- random -----------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// Peter Boyle: Taken from libc++ in Clang/LLVM.
|
||||
// Reason is that libstdc++ and clang differ in their return order in the normal_distribution / box mueller type step.
|
||||
// standardise on one and call it "gaussian_distribution".
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <cmath>
|
||||
#include <type_traits>
|
||||
#include <initializer_list>
|
||||
#include <limits>
|
||||
#include <algorithm>
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <istream>
|
||||
#include <ostream>
|
||||
#include <random>
|
||||
|
||||
// normal_distribution -> gaussian distribution
|
||||
namespace Grid {
|
||||
|
||||
template<class _RealType = double>
|
||||
class gaussian_distribution
|
||||
{
|
||||
public:
|
||||
// types
|
||||
typedef _RealType result_type;
|
||||
|
||||
class param_type
|
||||
{
|
||||
result_type __mean_;
|
||||
result_type __stddev_;
|
||||
public:
|
||||
typedef gaussian_distribution distribution_type;
|
||||
|
||||
strong_inline
|
||||
explicit param_type(result_type __mean = 0, result_type __stddev = 1)
|
||||
: __mean_(__mean), __stddev_(__stddev) {}
|
||||
|
||||
strong_inline
|
||||
result_type mean() const {return __mean_;}
|
||||
strong_inline
|
||||
result_type stddev() const {return __stddev_;}
|
||||
|
||||
friend strong_inline
|
||||
bool operator==(const param_type& __x, const param_type& __y)
|
||||
{return __x.__mean_ == __y.__mean_ && __x.__stddev_ == __y.__stddev_;}
|
||||
friend strong_inline
|
||||
bool operator!=(const param_type& __x, const param_type& __y)
|
||||
{return !(__x == __y);}
|
||||
};
|
||||
|
||||
private:
|
||||
param_type __p_;
|
||||
result_type _V_;
|
||||
bool _V_hot_;
|
||||
|
||||
public:
|
||||
// constructors and reset functions
|
||||
strong_inline
|
||||
explicit gaussian_distribution(result_type __mean = 0, result_type __stddev = 1)
|
||||
: __p_(param_type(__mean, __stddev)), _V_hot_(false) {}
|
||||
strong_inline
|
||||
explicit gaussian_distribution(const param_type& __p)
|
||||
: __p_(__p), _V_hot_(false) {}
|
||||
strong_inline
|
||||
void reset() {_V_hot_ = false;}
|
||||
|
||||
// generating functions
|
||||
template<class _URNG>
|
||||
strong_inline
|
||||
result_type operator()(_URNG& __g)
|
||||
{return (*this)(__g, __p_);}
|
||||
template<class _URNG> result_type operator()(_URNG& __g, const param_type& __p);
|
||||
|
||||
// property functions
|
||||
strong_inline
|
||||
result_type mean() const {return __p_.mean();}
|
||||
strong_inline
|
||||
result_type stddev() const {return __p_.stddev();}
|
||||
|
||||
strong_inline
|
||||
param_type param() const {return __p_;}
|
||||
strong_inline
|
||||
void param(const param_type& __p) {__p_ = __p;}
|
||||
|
||||
strong_inline
|
||||
result_type min() const {return -std::numeric_limits<result_type>::infinity();}
|
||||
strong_inline
|
||||
result_type max() const {return std::numeric_limits<result_type>::infinity();}
|
||||
|
||||
friend strong_inline
|
||||
bool operator==(const gaussian_distribution& __x,
|
||||
const gaussian_distribution& __y)
|
||||
{return __x.__p_ == __y.__p_ && __x._V_hot_ == __y._V_hot_ &&
|
||||
(!__x._V_hot_ || __x._V_ == __y._V_);}
|
||||
friend strong_inline
|
||||
bool operator!=(const gaussian_distribution& __x,
|
||||
const gaussian_distribution& __y)
|
||||
{return !(__x == __y);}
|
||||
|
||||
template <class _CharT, class _Traits, class _RT>
|
||||
friend
|
||||
std::basic_ostream<_CharT, _Traits>&
|
||||
operator<<(std::basic_ostream<_CharT, _Traits>& __os,
|
||||
const gaussian_distribution<_RT>& __x);
|
||||
|
||||
template <class _CharT, class _Traits, class _RT>
|
||||
friend
|
||||
std::basic_istream<_CharT, _Traits>&
|
||||
operator>>(std::basic_istream<_CharT, _Traits>& __is,
|
||||
gaussian_distribution<_RT>& __x);
|
||||
};
|
||||
|
||||
template <class _RealType>
|
||||
template<class _URNG>
|
||||
_RealType
|
||||
gaussian_distribution<_RealType>::operator()(_URNG& __g, const param_type& __p)
|
||||
{
|
||||
result_type _Up;
|
||||
if (_V_hot_)
|
||||
{
|
||||
_V_hot_ = false;
|
||||
_Up = _V_;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::uniform_real_distribution<result_type> _Uni(-1, 1);
|
||||
result_type __u;
|
||||
result_type __v;
|
||||
result_type __s;
|
||||
do
|
||||
{
|
||||
__u = _Uni(__g);
|
||||
__v = _Uni(__g);
|
||||
__s = __u * __u + __v * __v;
|
||||
} while (__s > 1 || __s == 0);
|
||||
result_type _Fp = std::sqrt(-2 * std::log(__s) / __s);
|
||||
_V_ = __v * _Fp;
|
||||
_V_hot_ = true;
|
||||
_Up = __u * _Fp;
|
||||
}
|
||||
return _Up * __p.stddev() + __p.mean();
|
||||
}
|
||||
|
||||
template <class _CharT, class _Traits, class _RT>
|
||||
std::basic_ostream<_CharT, _Traits>&
|
||||
operator<<(std::basic_ostream<_CharT, _Traits>& __os,
|
||||
const gaussian_distribution<_RT>& __x)
|
||||
{
|
||||
auto __save_flags = __os.flags();
|
||||
__os.flags(std::ios_base::dec | std::ios_base::left | std::ios_base::fixed |
|
||||
std::ios_base::scientific);
|
||||
_CharT __sp = __os.widen(' ');
|
||||
__os.fill(__sp);
|
||||
__os << __x.mean() << __sp << __x.stddev() << __sp << __x._V_hot_;
|
||||
if (__x._V_hot_)
|
||||
__os << __sp << __x._V_;
|
||||
__os.flags(__save_flags);
|
||||
return __os;
|
||||
}
|
||||
|
||||
template <class _CharT, class _Traits, class _RT>
|
||||
std::basic_istream<_CharT, _Traits>&
|
||||
operator>>(std::basic_istream<_CharT, _Traits>& __is,
|
||||
gaussian_distribution<_RT>& __x)
|
||||
{
|
||||
typedef gaussian_distribution<_RT> _Eng;
|
||||
typedef typename _Eng::result_type result_type;
|
||||
typedef typename _Eng::param_type param_type;
|
||||
auto __save_flags = __is.flags();
|
||||
__is.flags(std::ios_base::dec | std::ios_base::skipws);
|
||||
result_type __mean;
|
||||
result_type __stddev;
|
||||
result_type _Vp = 0;
|
||||
bool _V_hot = false;
|
||||
__is >> __mean >> __stddev >> _V_hot;
|
||||
if (_V_hot)
|
||||
__is >> _Vp;
|
||||
if (!__is.fail())
|
||||
{
|
||||
__x.param(param_type(__mean, __stddev));
|
||||
__x._V_hot_ = _V_hot;
|
||||
__x._V_ = _Vp;
|
||||
}
|
||||
__is.flags(__save_flags);
|
||||
return __is;
|
||||
}
|
||||
}
|
@ -131,8 +131,11 @@ class CartesianStencilAccelerator {
|
||||
int _checkerboard;
|
||||
int _npoints; // Move to template param?
|
||||
int _osites;
|
||||
int _dirichlet;
|
||||
StencilVector _directions;
|
||||
StencilVector _distances;
|
||||
StencilVector _comms_send;
|
||||
StencilVector _comms_recv;
|
||||
StencilVector _comm_buf_size;
|
||||
StencilVector _permute_type;
|
||||
StencilVector same_node;
|
||||
@ -226,6 +229,8 @@ public:
|
||||
void * recv_buf;
|
||||
Integer to_rank;
|
||||
Integer from_rank;
|
||||
Integer do_send;
|
||||
Integer do_recv;
|
||||
Integer bytes;
|
||||
};
|
||||
struct Merge {
|
||||
@ -240,7 +245,20 @@ public:
|
||||
cobj * mpi_p;
|
||||
Integer buffer_size;
|
||||
};
|
||||
|
||||
struct CopyReceiveBuffer {
|
||||
void * from_p;
|
||||
void * to_p;
|
||||
Integer bytes;
|
||||
};
|
||||
struct CachedTransfer {
|
||||
Integer direction;
|
||||
Integer OrthogPlane;
|
||||
Integer DestProc;
|
||||
Integer bytes;
|
||||
Integer lane;
|
||||
Integer cb;
|
||||
void *recv_buf;
|
||||
};
|
||||
|
||||
protected:
|
||||
GridBase * _grid;
|
||||
@ -271,7 +289,8 @@ public:
|
||||
std::vector<Merge> MergersSHM;
|
||||
std::vector<Decompress> Decompressions;
|
||||
std::vector<Decompress> DecompressionsSHM;
|
||||
|
||||
std::vector<CopyReceiveBuffer> CopyReceiveBuffers ;
|
||||
std::vector<CachedTransfer> CachedTransfers;
|
||||
///////////////////////////////////////////////////////////
|
||||
// Unified Comms buffers for all directions
|
||||
///////////////////////////////////////////////////////////
|
||||
@ -284,29 +303,6 @@ public:
|
||||
int u_comm_offset;
|
||||
int _unified_buffer_size;
|
||||
|
||||
/////////////////////////////////////////
|
||||
// Timing info; ugly; possibly temporary
|
||||
/////////////////////////////////////////
|
||||
double commtime;
|
||||
double mpi3synctime;
|
||||
double mpi3synctime_g;
|
||||
double shmmergetime;
|
||||
double gathertime;
|
||||
double gathermtime;
|
||||
double halogtime;
|
||||
double mergetime;
|
||||
double decompresstime;
|
||||
double comms_bytes;
|
||||
double shm_bytes;
|
||||
double splicetime;
|
||||
double nosplicetime;
|
||||
double calls;
|
||||
std::vector<double> comm_bytes_thr;
|
||||
std::vector<double> shm_bytes_thr;
|
||||
std::vector<double> comm_time_thr;
|
||||
std::vector<double> comm_enter_thr;
|
||||
std::vector<double> comm_leave_thr;
|
||||
|
||||
////////////////////////////////////////
|
||||
// Stencil query
|
||||
////////////////////////////////////////
|
||||
@ -333,11 +329,12 @@ public:
|
||||
//////////////////////////////////////////
|
||||
// Comms packet queue for asynch thread
|
||||
// Use OpenMP Tasks for cleaner ???
|
||||
// must be called *inside* parallel region
|
||||
//////////////////////////////////////////
|
||||
/*
|
||||
void CommunicateThreaded()
|
||||
{
|
||||
#ifdef GRID_OMP
|
||||
// must be called in parallel region
|
||||
int mythread = omp_get_thread_num();
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
#else
|
||||
@ -346,65 +343,29 @@ public:
|
||||
#endif
|
||||
if (nthreads == -1) nthreads = 1;
|
||||
if (mythread < nthreads) {
|
||||
comm_enter_thr[mythread] = usecond();
|
||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
||||
uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
comm_bytes_thr[mythread] += bytes;
|
||||
shm_bytes_thr[mythread] += 2*Packets[i].bytes-bytes; // Send + Recv.
|
||||
|
||||
}
|
||||
comm_leave_thr[mythread]= usecond();
|
||||
comm_time_thr[mythread] += comm_leave_thr[mythread] - comm_enter_thr[mythread];
|
||||
}
|
||||
}
|
||||
|
||||
void CollateThreads(void)
|
||||
{
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
double first=0.0;
|
||||
double last =0.0;
|
||||
|
||||
for(int t=0;t<nthreads;t++) {
|
||||
|
||||
double t0 = comm_enter_thr[t];
|
||||
double t1 = comm_leave_thr[t];
|
||||
comms_bytes+=comm_bytes_thr[t];
|
||||
shm_bytes +=shm_bytes_thr[t];
|
||||
|
||||
comm_enter_thr[t] = 0.0;
|
||||
comm_leave_thr[t] = 0.0;
|
||||
comm_time_thr[t] = 0.0;
|
||||
comm_bytes_thr[t]=0;
|
||||
shm_bytes_thr[t]=0;
|
||||
|
||||
if ( first == 0.0 ) first = t0; // first is t0
|
||||
if ( (t0 > 0.0) && ( t0 < first ) ) first = t0; // min time seen
|
||||
|
||||
if ( t1 > last ) last = t1; // max time seen
|
||||
|
||||
}
|
||||
commtime+= last-first;
|
||||
}
|
||||
*/
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Non blocking send and receive. Necessarily parallel.
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
||||
{
|
||||
reqs.resize(Packets.size());
|
||||
commtime-=usecond();
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
uint64_t bytes=_grid->StencilSendToRecvFromBegin(reqs[i],
|
||||
Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
comms_bytes+=bytes;
|
||||
shm_bytes +=2*Packets[i].bytes-bytes;
|
||||
_grid->StencilSendToRecvFromBegin(reqs[i],
|
||||
Packets[i].send_buf,
|
||||
Packets[i].to_rank,Packets[i].do_send,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,Packets[i].do_recv,
|
||||
Packets[i].bytes,i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -413,7 +374,6 @@ public:
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
_grid->StencilSendToRecvFromComplete(reqs[i],i);
|
||||
}
|
||||
commtime+=usecond();
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Blocking send and receive. Either sequential or parallel.
|
||||
@ -421,28 +381,27 @@ public:
|
||||
void Communicate(void)
|
||||
{
|
||||
if ( CartesianCommunicator::CommunicatorPolicy == CartesianCommunicator::CommunicatorPolicySequential ){
|
||||
thread_region {
|
||||
// must be called in parallel region
|
||||
int mythread = thread_num();
|
||||
int maxthreads= thread_max();
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
assert(nthreads <= maxthreads);
|
||||
if (nthreads == -1) nthreads = 1;
|
||||
if (mythread < nthreads) {
|
||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
||||
double start = usecond();
|
||||
uint64_t bytes= _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
comm_bytes_thr[mythread] += bytes;
|
||||
shm_bytes_thr[mythread] += Packets[i].bytes - bytes;
|
||||
comm_time_thr[mythread] += usecond() - start;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else { // Concurrent and non-threaded asynch calls to MPI
|
||||
/////////////////////////////////////////////////////////
|
||||
// several way threaded on different communicators.
|
||||
// Cannot combine with Dirichlet operators
|
||||
// This scheme is needed on Intel Omnipath for best performance
|
||||
// Deprecate once there are very few omnipath clusters
|
||||
/////////////////////////////////////////////////////////
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
int old = GridThread::GetThreads();
|
||||
GridThread::SetThreads(nthreads);
|
||||
thread_for(i,Packets.size(),{
|
||||
_grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||
Packets[i].to_rank,Packets[i].do_send,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,Packets[i].do_recv,
|
||||
Packets[i].bytes,i);
|
||||
});
|
||||
GridThread::SetThreads(old);
|
||||
} else {
|
||||
/////////////////////////////////////////////////////////
|
||||
// Concurrent and non-threaded asynch calls to MPI
|
||||
/////////////////////////////////////////////////////////
|
||||
std::vector<std::vector<CommsRequest_t> > reqs;
|
||||
this->CommunicateBegin(reqs);
|
||||
this->CommunicateComplete(reqs);
|
||||
@ -484,31 +443,23 @@ public:
|
||||
sshift[1] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Odd);
|
||||
if ( sshift[0] == sshift[1] ) {
|
||||
if (splice_dim) {
|
||||
splicetime-=usecond();
|
||||
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx);
|
||||
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp;
|
||||
splicetime+=usecond();
|
||||
} else {
|
||||
nosplicetime-=usecond();
|
||||
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx);
|
||||
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp;
|
||||
nosplicetime+=usecond();
|
||||
}
|
||||
} else {
|
||||
if(splice_dim){
|
||||
splicetime-=usecond();
|
||||
// if checkerboard is unfavourable take two passes
|
||||
// both with block stride loop iteration
|
||||
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx);
|
||||
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx);
|
||||
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx,point);
|
||||
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp1 && tmp2;
|
||||
splicetime+=usecond();
|
||||
} else {
|
||||
nosplicetime-=usecond();
|
||||
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx);
|
||||
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx);
|
||||
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx,point);
|
||||
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp1 && tmp2;
|
||||
nosplicetime+=usecond();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -518,13 +469,10 @@ public:
|
||||
template<class compressor>
|
||||
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
||||
{
|
||||
mpi3synctime_g-=usecond();
|
||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||
mpi3synctime_g+=usecond();
|
||||
|
||||
// conformable(source.Grid(),_grid);
|
||||
assert(source.Grid()==_grid);
|
||||
halogtime-=usecond();
|
||||
|
||||
u_comm_offset=0;
|
||||
|
||||
@ -538,7 +486,6 @@ public:
|
||||
assert(u_comm_offset==_unified_buffer_size);
|
||||
|
||||
accelerator_barrier();
|
||||
halogtime+=usecond();
|
||||
}
|
||||
|
||||
/////////////////////////
|
||||
@ -551,14 +498,72 @@ public:
|
||||
Mergers.resize(0);
|
||||
MergersSHM.resize(0);
|
||||
Packets.resize(0);
|
||||
calls++;
|
||||
CopyReceiveBuffers.resize(0);
|
||||
CachedTransfers.resize(0);
|
||||
}
|
||||
void AddPacket(void *xmit,void * rcv, Integer to,Integer from,Integer bytes){
|
||||
void AddCopy(void *from,void * to, Integer bytes)
|
||||
{
|
||||
// std::cout << "Adding CopyReceiveBuffer "<<std::hex<<from<<" "<<to<<std::dec<<" "<<bytes<<std::endl;
|
||||
CopyReceiveBuffer obj;
|
||||
obj.from_p = from;
|
||||
obj.to_p = to;
|
||||
obj.bytes= bytes;
|
||||
CopyReceiveBuffers.push_back(obj);
|
||||
}
|
||||
void CommsCopy()
|
||||
{
|
||||
// These are device resident MPI buffers.
|
||||
for(int i=0;i<CopyReceiveBuffers.size();i++){
|
||||
cobj *from=(cobj *)CopyReceiveBuffers[i].from_p;
|
||||
cobj *to =(cobj *)CopyReceiveBuffers[i].to_p;
|
||||
Integer words = CopyReceiveBuffers[i].bytes/sizeof(cobj);
|
||||
// std::cout << "CopyReceiveBuffer "<<std::hex<<from<<" "<<to<<std::dec<<" "<<words*sizeof(cobj)<<std::endl;
|
||||
accelerator_forNB(j, words, cobj::Nsimd(), {
|
||||
coalescedWrite(to[j] ,coalescedRead(from [j]));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Integer CheckForDuplicate(Integer direction, Integer OrthogPlane, Integer DestProc, void *recv_buf,Integer lane,Integer bytes,Integer cb)
|
||||
{
|
||||
CachedTransfer obj;
|
||||
obj.direction = direction;
|
||||
obj.OrthogPlane = OrthogPlane;
|
||||
obj.DestProc = DestProc;
|
||||
obj.recv_buf = recv_buf;
|
||||
obj.lane = lane;
|
||||
obj.bytes = bytes;
|
||||
obj.cb = cb;
|
||||
|
||||
for(int i=0;i<CachedTransfers.size();i++){
|
||||
if ( (CachedTransfers[i].direction ==direction)
|
||||
&&(CachedTransfers[i].OrthogPlane==OrthogPlane)
|
||||
&&(CachedTransfers[i].DestProc ==DestProc)
|
||||
&&(CachedTransfers[i].bytes ==bytes)
|
||||
&&(CachedTransfers[i].lane ==lane)
|
||||
&&(CachedTransfers[i].cb ==cb)
|
||||
){
|
||||
// std::cout << "Found duplicate plane dir "<<direction<<" plane "<< OrthogPlane<< " simd "<<lane << " relproc "<<DestProc<< " bytes "<<bytes <<std::endl;
|
||||
AddCopy(CachedTransfers[i].recv_buf,recv_buf,bytes);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// std::cout << "No duplicate plane dir "<<direction<<" plane "<< OrthogPlane<< " simd "<<lane << " relproc "<<DestProc<<" bytes "<<bytes<<std::endl;
|
||||
CachedTransfers.push_back(obj);
|
||||
return 0;
|
||||
}
|
||||
void AddPacket(void *xmit,void * rcv,
|
||||
Integer to, Integer do_send,
|
||||
Integer from, Integer do_recv,
|
||||
Integer bytes){
|
||||
Packet p;
|
||||
p.send_buf = xmit;
|
||||
p.recv_buf = rcv;
|
||||
p.to_rank = to;
|
||||
p.from_rank= from;
|
||||
p.do_send = do_send;
|
||||
p.do_recv = do_recv;
|
||||
p.bytes = bytes;
|
||||
Packets.push_back(p);
|
||||
}
|
||||
@ -578,22 +583,17 @@ public:
|
||||
mv.push_back(m);
|
||||
}
|
||||
template<class decompressor> void CommsMerge(decompressor decompress) {
|
||||
CommsCopy();
|
||||
CommsMerge(decompress,Mergers,Decompressions);
|
||||
}
|
||||
template<class decompressor> void CommsMergeSHM(decompressor decompress) {
|
||||
mpi3synctime-=usecond();
|
||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||
mpi3synctime+=usecond();
|
||||
shmmergetime-=usecond();
|
||||
CommsMerge(decompress,MergersSHM,DecompressionsSHM);
|
||||
shmmergetime+=usecond();
|
||||
}
|
||||
|
||||
template<class decompressor>
|
||||
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
|
||||
|
||||
|
||||
mergetime-=usecond();
|
||||
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd)
|
||||
{
|
||||
for(int i=0;i<mm.size();i++){
|
||||
auto mp = &mm[i].mpointer[0];
|
||||
auto vp0= &mm[i].vpointers[0][0];
|
||||
@ -603,9 +603,7 @@ public:
|
||||
decompress.Exchange(mp,vp0,vp1,type,o);
|
||||
});
|
||||
}
|
||||
mergetime+=usecond();
|
||||
|
||||
decompresstime-=usecond();
|
||||
for(int i=0;i<dd.size();i++){
|
||||
auto kp = dd[i].kernel_p;
|
||||
auto mp = dd[i].mpi_p;
|
||||
@ -613,7 +611,6 @@ public:
|
||||
decompress.Decompress(kp,mp,o);
|
||||
});
|
||||
}
|
||||
decompresstime+=usecond();
|
||||
}
|
||||
////////////////////////////////////////
|
||||
// Set up routines
|
||||
@ -650,19 +647,58 @@ public:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Introduce a block structure and switch off comms on boundaries
|
||||
void DirichletBlock(const Coordinate &dirichlet_block)
|
||||
{
|
||||
this->_dirichlet = 1;
|
||||
for(int ii=0;ii<this->_npoints;ii++){
|
||||
int dimension = this->_directions[ii];
|
||||
int displacement = this->_distances[ii];
|
||||
int shift = displacement;
|
||||
int gd = _grid->_gdimensions[dimension];
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int pd = _grid->_processors [dimension];
|
||||
int ld = gd/pd;
|
||||
int pc = _grid->_processor_coor[dimension];
|
||||
///////////////////////////////////////////
|
||||
// Figure out dirichlet send and receive
|
||||
// on this leg of stencil.
|
||||
///////////////////////////////////////////
|
||||
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||
int block = dirichlet_block[dimension];
|
||||
this->_comms_send[ii] = comm_dim;
|
||||
this->_comms_recv[ii] = comm_dim;
|
||||
if ( block ) {
|
||||
assert(abs(displacement) < ld );
|
||||
|
||||
if( displacement > 0 ) {
|
||||
// High side, low side
|
||||
// | <--B--->|
|
||||
// | | |
|
||||
// noR
|
||||
// noS
|
||||
if ( (ld*(pc+1) ) % block == 0 ) this->_comms_recv[ii] = 0;
|
||||
if ( ( ld*pc ) % block == 0 ) this->_comms_send[ii] = 0;
|
||||
} else {
|
||||
// High side, low side
|
||||
// | <--B--->|
|
||||
// | | |
|
||||
// noS
|
||||
// noR
|
||||
if ( (ld*(pc+1) ) % block == 0 ) this->_comms_send[ii] = 0;
|
||||
if ( ( ld*pc ) % block == 0 ) this->_comms_recv[ii] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
CartesianStencil(GridBase *grid,
|
||||
int npoints,
|
||||
int checkerboard,
|
||||
const std::vector<int> &directions,
|
||||
const std::vector<int> &distances,
|
||||
Parameters p)
|
||||
: shm_bytes_thr(npoints),
|
||||
comm_bytes_thr(npoints),
|
||||
comm_enter_thr(npoints),
|
||||
comm_leave_thr(npoints),
|
||||
comm_time_thr(npoints)
|
||||
{
|
||||
this->_dirichlet = 0;
|
||||
face_table_computed=0;
|
||||
_grid = grid;
|
||||
this->parameters=p;
|
||||
@ -675,6 +711,8 @@ public:
|
||||
this->_simd_layout = _grid->_simd_layout; // copy simd_layout to give access to Accelerator Kernels
|
||||
this->_directions = StencilVector(directions);
|
||||
this->_distances = StencilVector(distances);
|
||||
this->_comms_send.resize(npoints);
|
||||
this->_comms_recv.resize(npoints);
|
||||
this->same_node.resize(npoints);
|
||||
|
||||
_unified_buffer_size=0;
|
||||
@ -693,24 +731,27 @@ public:
|
||||
int displacement = distances[i];
|
||||
int shift = displacement;
|
||||
|
||||
int gd = _grid->_gdimensions[dimension];
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int pd = _grid->_processors [dimension];
|
||||
int ld = gd/pd;
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
int pc = _grid->_processor_coor[dimension];
|
||||
this->_permute_type[point]=_grid->PermuteType(dimension);
|
||||
|
||||
this->_checkerboard = checkerboard;
|
||||
|
||||
//////////////////////////
|
||||
// the permute type
|
||||
//////////////////////////
|
||||
int simd_layout = _grid->_simd_layout[dimension];
|
||||
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
|
||||
int rotate_dim = _grid->_simd_layout[dimension]>2;
|
||||
|
||||
this->_comms_send[ii] = comm_dim;
|
||||
this->_comms_recv[ii] = comm_dim;
|
||||
|
||||
assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported
|
||||
|
||||
int sshift[2];
|
||||
|
||||
//////////////////////////
|
||||
// Underlying approach. For each local site build
|
||||
// up a table containing the npoint "neighbours" and whether they
|
||||
@ -811,6 +852,7 @@ public:
|
||||
GridBase *grid=_grid;
|
||||
const int Nsimd = grid->Nsimd();
|
||||
|
||||
int comms_recv = this->_comms_recv[point];
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int ld = _grid->_ldimensions[dimension];
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
@ -867,7 +909,9 @@ public:
|
||||
if ( (shiftpm== 1) && (sx<x) && (grid->_processor_coor[dimension]==grid->_processors[dimension]-1) ) {
|
||||
wraparound = 1;
|
||||
}
|
||||
if (!offnode) {
|
||||
|
||||
// Wrap locally dirichlet support case OR node local
|
||||
if ( (offnode==0) || (comms_recv==0) ) {
|
||||
|
||||
int permute_slice=0;
|
||||
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
||||
@ -984,11 +1028,14 @@ public:
|
||||
}
|
||||
|
||||
template<class compressor>
|
||||
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx)
|
||||
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx, int point)
|
||||
{
|
||||
typedef typename cobj::vector_type vector_type;
|
||||
typedef typename cobj::scalar_type scalar_type;
|
||||
|
||||
int comms_send = this->_comms_send[point] ;
|
||||
int comms_recv = this->_comms_recv[point] ;
|
||||
|
||||
assert(rhs.Grid()==_grid);
|
||||
// conformable(_grid,rhs.Grid());
|
||||
|
||||
@ -1011,9 +1058,11 @@ public:
|
||||
|
||||
int sx = (x+sshift)%rd;
|
||||
int comm_proc = ((x+sshift)/rd)%pd;
|
||||
|
||||
|
||||
if (comm_proc) {
|
||||
|
||||
|
||||
|
||||
int words = buffer_size;
|
||||
if (cbmask != 0x3) words=words>>1;
|
||||
|
||||
@ -1045,44 +1094,53 @@ public:
|
||||
recv_buf=this->u_recv_buf_p;
|
||||
}
|
||||
|
||||
|
||||
cobj *send_buf;
|
||||
send_buf = this->u_send_buf_p; // Gather locally, must send
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Gather locally
|
||||
////////////////////////////////////////////////////////
|
||||
gathertime-=usecond();
|
||||
assert(send_buf!=NULL);
|
||||
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so); face_idx++;
|
||||
gathertime+=usecond();
|
||||
if ( comms_send )
|
||||
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so);
|
||||
face_idx++;
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Build a list of things to do after we synchronise GPUs
|
||||
// Start comms now???
|
||||
///////////////////////////////////////////////////////////
|
||||
AddPacket((void *)&send_buf[u_comm_offset],
|
||||
(void *)&recv_buf[u_comm_offset],
|
||||
xmit_to_rank,
|
||||
recv_from_rank,
|
||||
bytes);
|
||||
int duplicate = CheckForDuplicate(dimension,sx,comm_proc,(void *)&recv_buf[u_comm_offset],0,bytes,cbmask);
|
||||
if ( (!duplicate) ) { // Force comms for now
|
||||
|
||||
if ( compress.DecompressionStep() ) {
|
||||
///////////////////////////////////////////////////////////
|
||||
// Build a list of things to do after we synchronise GPUs
|
||||
// Start comms now???
|
||||
///////////////////////////////////////////////////////////
|
||||
AddPacket((void *)&send_buf[u_comm_offset],
|
||||
(void *)&recv_buf[u_comm_offset],
|
||||
xmit_to_rank, comms_send,
|
||||
recv_from_rank, comms_recv,
|
||||
bytes);
|
||||
}
|
||||
|
||||
if ( compress.DecompressionStep() ) {
|
||||
AddDecompress(&this->u_recv_buf_p[u_comm_offset],
|
||||
&recv_buf[u_comm_offset],
|
||||
words,Decompressions);
|
||||
}
|
||||
u_comm_offset+=words;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<class compressor>
|
||||
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx)
|
||||
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx,int point)
|
||||
{
|
||||
const int Nsimd = _grid->Nsimd();
|
||||
|
||||
const int maxl =2;// max layout in a direction
|
||||
|
||||
int comms_send = this->_comms_send[point] ;
|
||||
int comms_recv = this->_comms_recv[point] ;
|
||||
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
int ld = _grid->_ldimensions[dimension];
|
||||
@ -1147,12 +1205,11 @@ public:
|
||||
&face_table[face_idx][0],
|
||||
face_table[face_idx].size()*sizeof(face_table_host[0]));
|
||||
}
|
||||
gathermtime-=usecond();
|
||||
|
||||
// if ( comms_send )
|
||||
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type);
|
||||
face_idx++;
|
||||
|
||||
gathermtime+=usecond();
|
||||
//spointers[0] -- low
|
||||
//spointers[1] -- high
|
||||
|
||||
@ -1181,8 +1238,13 @@ public:
|
||||
|
||||
rpointers[i] = rp;
|
||||
|
||||
AddPacket((void *)sp,(void *)rp,xmit_to_rank,recv_from_rank,bytes);
|
||||
|
||||
int duplicate = CheckForDuplicate(dimension,sx,nbr_proc,(void *)rp,i,bytes,cbmask);
|
||||
if ( !duplicate ) {
|
||||
AddPacket((void *)sp,(void *)rp,
|
||||
xmit_to_rank,comms_send,
|
||||
recv_from_rank,comms_recv,
|
||||
bytes);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
|
@ -55,7 +55,7 @@ template<class vtype, int N> accelerator_inline iVector<vtype, N> Exponentiate(c
|
||||
|
||||
|
||||
// Specialisation: Cayley-Hamilton exponential for SU(3)
|
||||
#ifndef GRID_CUDA
|
||||
#ifndef GRID_ACCELERATED
|
||||
template<class vtype, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0>::type * =nullptr>
|
||||
accelerator_inline iMatrix<vtype,3> Exponentiate(const iMatrix<vtype,3> &arg, RealD alpha , Integer Nexp = DEFAULT_MAT_EXP )
|
||||
{
|
||||
|
@ -208,46 +208,5 @@ void merge(vobj &vec,const ExtractPointerArray<sobj> &extracted, int offset)
|
||||
}
|
||||
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
//Copy a single lane of a SIMD tensor type from one object to another
|
||||
//Output object must be of the same tensor type but may be of a different precision (i.e. it can have a different root data type)
|
||||
///////////////////////////////////////////////////////////////////////////////////
|
||||
template<class vobjOut, class vobjIn>
|
||||
accelerator_inline
|
||||
void copyLane(vobjOut & __restrict__ vecOut, int lane_out, const vobjIn & __restrict__ vecIn, int lane_in)
|
||||
{
|
||||
static_assert( std::is_same<typename vobjOut::DoublePrecision, typename vobjIn::DoublePrecision>::value == 1, "copyLane: tensor types must be the same" ); //if tensor types are same the DoublePrecision type must be the same
|
||||
|
||||
typedef typename vobjOut::vector_type ovector_type;
|
||||
typedef typename vobjIn::vector_type ivector_type;
|
||||
constexpr int owords=sizeof(vobjOut)/sizeof(ovector_type);
|
||||
constexpr int iwords=sizeof(vobjIn)/sizeof(ivector_type);
|
||||
static_assert( owords == iwords, "copyLane: Expected number of vector words in input and output objects to be equal" );
|
||||
|
||||
typedef typename vobjOut::scalar_type oscalar_type;
|
||||
typedef typename vobjIn::scalar_type iscalar_type;
|
||||
typedef typename ExtractTypeMap<oscalar_type>::extract_type oextract_type;
|
||||
typedef typename ExtractTypeMap<iscalar_type>::extract_type iextract_type;
|
||||
|
||||
typedef oextract_type * opointer;
|
||||
typedef iextract_type * ipointer;
|
||||
|
||||
constexpr int oNsimd=ovector_type::Nsimd();
|
||||
constexpr int iNsimd=ivector_type::Nsimd();
|
||||
|
||||
iscalar_type itmp;
|
||||
oscalar_type otmp;
|
||||
|
||||
opointer __restrict__ op = (opointer)&vecOut;
|
||||
ipointer __restrict__ ip = (ipointer)&vecIn;
|
||||
for(int w=0;w<owords;w++){
|
||||
memcpy( (char*)&itmp, (char*)(ip + lane_in + iNsimd*w), sizeof(iscalar_type) );
|
||||
otmp = itmp; //potential precision change
|
||||
memcpy( (char*)(op + lane_out + oNsimd*w), (char*)&otmp, sizeof(oscalar_type) );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -206,8 +206,7 @@ inline void *acceleratorAllocShared(size_t bytes)
|
||||
auto err = cudaMallocManaged((void **)&ptr,bytes);
|
||||
if( err != cudaSuccess ) {
|
||||
ptr = (void *) NULL;
|
||||
printf(" cudaMallocManaged failed for %lu %s \n",bytes,cudaGetErrorString(err)); fflush(stdout);
|
||||
if (acceleratorAbortOnGpuError) assert(err==cudaSuccess);
|
||||
printf(" cudaMallocManaged failed for %d %s \n",bytes,cudaGetErrorString(err));
|
||||
}
|
||||
return ptr;
|
||||
};
|
||||
@ -217,47 +216,15 @@ inline void *acceleratorAllocDevice(size_t bytes)
|
||||
auto err = cudaMalloc((void **)&ptr,bytes);
|
||||
if( err != cudaSuccess ) {
|
||||
ptr = (void *) NULL;
|
||||
printf(" cudaMalloc failed for %lu %s \n",bytes,cudaGetErrorString(err)); fflush(stdout);
|
||||
if (acceleratorAbortOnGpuError) assert(err==cudaSuccess);
|
||||
printf(" cudaMalloc failed for %d %s \n",bytes,cudaGetErrorString(err));
|
||||
}
|
||||
return ptr;
|
||||
};
|
||||
inline void acceleratorFreeShared(void *ptr){
|
||||
auto err = cudaFree(ptr);
|
||||
if( err != cudaSuccess ) {
|
||||
printf(" cudaFree(Shared) failed %s \n",cudaGetErrorString(err)); fflush(stdout);
|
||||
if (acceleratorAbortOnGpuError) assert(err==cudaSuccess);
|
||||
}
|
||||
};
|
||||
inline void acceleratorFreeDevice(void *ptr){
|
||||
auto err = cudaFree(ptr);
|
||||
if( err != cudaSuccess ) {
|
||||
printf(" cudaFree(Device) failed %s \n",cudaGetErrorString(err)); fflush(stdout);
|
||||
if (acceleratorAbortOnGpuError) assert(err==cudaSuccess);
|
||||
}
|
||||
};
|
||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) {
|
||||
auto err = cudaMemcpy(to,from,bytes, cudaMemcpyHostToDevice);
|
||||
if( err != cudaSuccess ) {
|
||||
printf(" cudaMemcpy(host->device) failed for %lu %s \n",bytes,cudaGetErrorString(err)); fflush(stdout);
|
||||
if (acceleratorAbortOnGpuError) assert(err==cudaSuccess);
|
||||
}
|
||||
}
|
||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){
|
||||
auto err = cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);
|
||||
if( err != cudaSuccess ) {
|
||||
printf(" cudaMemcpy(device->host) failed for %lu %s \n",bytes,cudaGetErrorString(err)); fflush(stdout);
|
||||
if (acceleratorAbortOnGpuError) assert(err==cudaSuccess);
|
||||
}
|
||||
}
|
||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) {
|
||||
auto err = cudaMemset(base,value,bytes);
|
||||
if( err != cudaSuccess ) {
|
||||
printf(" cudaMemSet failed for %lu %s \n",bytes,cudaGetErrorString(err)); fflush(stdout);
|
||||
if (acceleratorAbortOnGpuError) assert(err==cudaSuccess);
|
||||
}
|
||||
}
|
||||
|
||||
inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);};
|
||||
inline void acceleratorFreeDevice(void *ptr){ cudaFree(ptr);};
|
||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { cudaMemcpy(to,from,bytes, cudaMemcpyHostToDevice);}
|
||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);}
|
||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) { cudaMemset(base,value,bytes);}
|
||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
|
||||
{
|
||||
cudaMemcpyAsync(to,from,bytes, cudaMemcpyDeviceToDevice,copyStream);
|
||||
@ -474,7 +441,7 @@ inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(bas
|
||||
|
||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
|
||||
{
|
||||
hipMemcpyAsync(to,from,bytes, hipMemcpyDeviceToDevice,copyStream);
|
||||
hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);
|
||||
}
|
||||
inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream); };
|
||||
|
||||
@ -494,6 +461,8 @@ inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream);
|
||||
accelerator_for2dNB(iter1, num1, iter2, num2, nsimd, { __VA_ARGS__ } ); \
|
||||
accelerator_barrier(dummy);
|
||||
|
||||
#define GRID_ACCELERATED
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////
|
||||
@ -514,9 +483,10 @@ inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream);
|
||||
#define accelerator_for2d(iter1, num1, iter2, num2, nsimd, ... ) thread_for2d(iter1,num1,iter2,num2,{ __VA_ARGS__ });
|
||||
|
||||
accelerator_inline int acceleratorSIMTlane(int Nsimd) { return 0; } // CUDA specific
|
||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { memcpy(to,from,bytes);}
|
||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ memcpy(to,from,bytes);}
|
||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { memcpy(to,from,bytes);}
|
||||
|
||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { thread_bcopy(from,to,bytes); }
|
||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ thread_bcopy(from,to,bytes);}
|
||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { thread_bcopy(from,to,bytes);}
|
||||
inline void acceleratorCopySynchronise(void) {};
|
||||
|
||||
inline int acceleratorIsCommunicable(void *ptr){ return 1; }
|
||||
|
@ -72,3 +72,20 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#define thread_region DO_PRAGMA(omp parallel)
|
||||
#define thread_critical DO_PRAGMA(omp critical)
|
||||
|
||||
#ifdef GRID_OMP
|
||||
inline void thread_bcopy(void *from, void *to,size_t bytes)
|
||||
{
|
||||
uint64_t *ufrom = (uint64_t *)from;
|
||||
uint64_t *uto = (uint64_t *)to;
|
||||
assert(bytes%8==0);
|
||||
uint64_t words=bytes/8;
|
||||
thread_for(w,words,{
|
||||
uto[w] = ufrom[w];
|
||||
});
|
||||
}
|
||||
#else
|
||||
inline void thread_bcopy(void *from, void *to,size_t bytes)
|
||||
{
|
||||
bcopy(from,to,bytes);
|
||||
}
|
||||
#endif
|
||||
|
@ -1,473 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./HMC/DWF2p1fIwasakiGparity.cc
|
||||
|
||||
Copyright (C) 2015-2016
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace Grid;
|
||||
|
||||
//2+1f DWF+I ensemble with G-parity BCs
|
||||
//designed to reproduce ensembles in https://arxiv.org/pdf/1908.08640.pdf
|
||||
struct RatQuoParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(RatQuoParameters,
|
||||
double, bnd_lo,
|
||||
double, bnd_hi,
|
||||
Integer, action_degree,
|
||||
double, action_tolerance,
|
||||
Integer, md_degree,
|
||||
double, md_tolerance,
|
||||
Integer, reliable_update_freq,
|
||||
Integer, bnd_check_freq);
|
||||
RatQuoParameters() {
|
||||
bnd_lo = 1e-2;
|
||||
bnd_hi = 30;
|
||||
action_degree = 10;
|
||||
action_tolerance = 1e-10;
|
||||
md_degree = 10;
|
||||
md_tolerance = 1e-8;
|
||||
bnd_check_freq = 20;
|
||||
reliable_update_freq = 50;
|
||||
}
|
||||
|
||||
void Export(RationalActionParams &into) const{
|
||||
into.lo = bnd_lo;
|
||||
into.hi = bnd_hi;
|
||||
into.action_degree = action_degree;
|
||||
into.action_tolerance = action_tolerance;
|
||||
into.md_degree = md_degree;
|
||||
into.md_tolerance = md_tolerance;
|
||||
into.BoundsCheckFreq = bnd_check_freq;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct EvolParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(EvolParameters,
|
||||
Integer, StartTrajectory,
|
||||
Integer, Trajectories,
|
||||
Integer, SaveInterval,
|
||||
Integer, Steps,
|
||||
bool, MetropolisTest,
|
||||
std::string, StartingType,
|
||||
std::vector<Integer>, GparityDirs,
|
||||
RatQuoParameters, rat_quo_l,
|
||||
RatQuoParameters, rat_quo_s);
|
||||
|
||||
EvolParameters() {
|
||||
//For initial thermalization; afterwards user should switch Metropolis on and use StartingType=CheckpointStart
|
||||
MetropolisTest = false;
|
||||
StartTrajectory = 0;
|
||||
Trajectories = 50;
|
||||
SaveInterval = 5;
|
||||
StartingType = "ColdStart";
|
||||
GparityDirs.resize(3, 1); //1 for G-parity, 0 for periodic
|
||||
Steps = 5;
|
||||
}
|
||||
};
|
||||
|
||||
bool fileExists(const std::string &fn){
|
||||
std::ifstream f(fn);
|
||||
return f.good();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
struct LanczosParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
|
||||
double, alpha,
|
||||
double, beta,
|
||||
double, mu,
|
||||
int, ord,
|
||||
int, n_stop,
|
||||
int, n_want,
|
||||
int, n_use,
|
||||
double, tolerance);
|
||||
|
||||
LanczosParameters() {
|
||||
alpha = 35;
|
||||
beta = 5;
|
||||
mu = 0;
|
||||
ord = 100;
|
||||
n_stop = 10;
|
||||
n_want = 10;
|
||||
n_use = 15;
|
||||
tolerance = 1e-6;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
template<typename FermionActionD, typename FermionFieldD>
|
||||
void computeEigenvalues(std::string param_file,
|
||||
GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &action, GridParallelRNG &rng){
|
||||
|
||||
LanczosParameters params;
|
||||
if(fileExists(param_file)){
|
||||
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
||||
Grid::XmlReader rd(param_file);
|
||||
read(rd, "LanczosParameters", params);
|
||||
}else if(!GlobalSharedMemory::WorldRank){
|
||||
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
||||
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
||||
Grid::XmlWriter wr(param_file + ".templ");
|
||||
write(wr, "LanczosParameters", params);
|
||||
}
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
action.ImportGauge(latt);
|
||||
|
||||
SchurDiagMooeeOperator<FermionActionD, FermionFieldD> hermop(action);
|
||||
PlainHermOp<FermionFieldD> hermop_wrap(hermop);
|
||||
//ChebyshevLanczos<FermionFieldD> Cheb(params.alpha, params.beta, params.mu, params.ord);
|
||||
assert(params.mu == 0.0);
|
||||
|
||||
Chebyshev<FermionFieldD> Cheb(params.beta*params.beta, params.alpha*params.alpha, params.ord+1);
|
||||
FunctionHermOp<FermionFieldD> Cheb_wrap(Cheb, hermop);
|
||||
|
||||
std::cout << "IRL: alpha=" << params.alpha << " beta=" << params.beta << " mu=" << params.mu << " ord=" << params.ord << std::endl;
|
||||
ImplicitlyRestartedLanczos<FermionFieldD> IRL(Cheb_wrap, hermop_wrap, params.n_stop, params.n_want, params.n_use, params.tolerance, 10000);
|
||||
|
||||
std::vector<RealD> eval(params.n_use);
|
||||
std::vector<FermionFieldD> evec(params.n_use, rbGrid);
|
||||
int Nconv;
|
||||
IRL.calc(eval, evec, gauss_o, Nconv);
|
||||
|
||||
std::cout << "Eigenvalues:" << std::endl;
|
||||
for(int i=0;i<params.n_want;i++){
|
||||
std::cout << i << " " << eval[i] << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//Check the quality of the RHMC approx
|
||||
template<typename FermionActionD, typename FermionFieldD, typename RHMCtype>
|
||||
void checkRHMC(GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &numOp, FermionActionD &denOp, RHMCtype &rhmc, GridParallelRNG &rng,
|
||||
int inv_pow, const std::string &quark_descr){
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
numOp.ImportGauge(latt);
|
||||
denOp.ImportGauge(latt);
|
||||
|
||||
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> MdagM(numOp);
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> VdagV(denOp);
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerAction); //use large tolerance to prevent exit on fail; we are trying to tune here!
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "-------------------------------------------------------------------------------" << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
// here make a routine to print all the relevant information on the run
|
||||
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
||||
|
||||
std::string param_file = "params.xml";
|
||||
bool file_load_check = false;
|
||||
for(int i=1;i<argc;i++){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--param_file"){
|
||||
assert(i!=argc-1);
|
||||
param_file = argv[i+1];
|
||||
}else if(sarg == "--read_check"){ //check the fields load correctly and pass checksum/plaquette repro
|
||||
file_load_check = true;
|
||||
}
|
||||
}
|
||||
|
||||
//Read the user parameters
|
||||
EvolParameters user_params;
|
||||
|
||||
if(fileExists(param_file)){
|
||||
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
||||
Grid::XmlReader rd(param_file);
|
||||
read(rd, "Params", user_params);
|
||||
}else if(!GlobalSharedMemory::WorldRank){
|
||||
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
||||
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
||||
Grid::XmlWriter wr(param_file + ".templ");
|
||||
write(wr, "Params", user_params);
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
//Check the parameters
|
||||
if(user_params.GparityDirs.size() != Nd-1){
|
||||
std::cerr << "Error in input parameters: expect GparityDirs to have size = " << Nd-1 << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
for(int i=0;i<Nd-1;i++)
|
||||
if(user_params.GparityDirs[i] != 0 && user_params.GparityDirs[i] != 1){
|
||||
std::cerr << "Error in input parameters: expect GparityDirs values to be 0 (periodic) or 1 (G-parity)" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// Typedefs to simplify notation
|
||||
typedef GparityDomainWallFermionD FermionActionD;
|
||||
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
||||
typedef typename FermionActionD::FermionField FermionFieldD;
|
||||
|
||||
typedef GparityDomainWallFermionF FermionActionF;
|
||||
typedef typename FermionActionF::Impl_t FermionImplPolicyF;
|
||||
typedef typename FermionActionF::FermionField FermionFieldF;
|
||||
|
||||
typedef GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicyD,FermionImplPolicyF> MixedPrecRHMC;
|
||||
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicyD> DoublePrecRHMC;
|
||||
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
IntegratorParameters MD;
|
||||
typedef ConjugateHMCRunnerD<MinimumNorm2> HMCWrapper; //NB: This is the "Omelyan integrator"
|
||||
typedef HMCWrapper::ImplPolicy GaugeImplPolicy;
|
||||
MD.name = std::string("MinimumNorm2");
|
||||
MD.MDsteps = user_params.Steps;
|
||||
MD.trajL = 1.0;
|
||||
|
||||
HMCparameters HMCparams;
|
||||
HMCparams.StartTrajectory = user_params.StartTrajectory;
|
||||
HMCparams.Trajectories = user_params.Trajectories;
|
||||
HMCparams.NoMetropolisUntil= 0;
|
||||
HMCparams.StartingType = user_params.StartingType;
|
||||
HMCparams.MetropolisTest = user_params.MetropolisTest;
|
||||
HMCparams.MD = MD;
|
||||
HMCWrapper TheHMC(HMCparams);
|
||||
|
||||
// Grid from the command line arguments --grid and --mpi
|
||||
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
|
||||
|
||||
CheckpointerParameters CPparams;
|
||||
CPparams.config_prefix = "ckpoint_lat";
|
||||
CPparams.rng_prefix = "ckpoint_rng";
|
||||
CPparams.saveInterval = user_params.SaveInterval;
|
||||
CPparams.format = "IEEE64BIG";
|
||||
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||
|
||||
//Note that checkpointing saves the RNG state so that this initialization is required only for the very first configuration
|
||||
RNGModuleParameters RNGpar;
|
||||
RNGpar.serial_seeds = "1 2 3 4 5";
|
||||
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
typedef PlaquetteMod<GaugeImplPolicy> PlaqObs;
|
||||
TheHMC.Resources.AddObservable<PlaqObs>();
|
||||
//////////////////////////////////////////////
|
||||
|
||||
const int Ls = 16;
|
||||
Real beta = 2.13;
|
||||
Real light_mass = 0.01;
|
||||
Real strange_mass = 0.032;
|
||||
Real pv_mass = 1.0;
|
||||
RealD M5 = 1.8;
|
||||
|
||||
//Setup the Grids
|
||||
auto GridPtrD = TheHMC.Resources.GetCartesian();
|
||||
auto GridRBPtrD = TheHMC.Resources.GetRBCartesian();
|
||||
auto FGridD = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrD);
|
||||
auto FrbGridD = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrD);
|
||||
|
||||
GridCartesian* GridPtrF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexF::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian* GridRBPtrF = SpaceTimeGrid::makeFourDimRedBlackGrid(GridPtrF);
|
||||
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrF);
|
||||
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrF);
|
||||
|
||||
ConjugateIwasakiGaugeActionD GaugeAction(beta);
|
||||
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeFieldD Ud(GridPtrD);
|
||||
LatticeGaugeFieldF Uf(GridPtrF);
|
||||
|
||||
//Setup the BCs
|
||||
FermionActionD::ImplParams Params;
|
||||
for(int i=0;i<Nd-1;i++) Params.twists[i] = user_params.GparityDirs[i]; //G-parity directions
|
||||
Params.twists[Nd-1] = 1; //APBC in time direction
|
||||
|
||||
std::vector<int> dirs4(Nd);
|
||||
for(int i=0;i<Nd-1;i++) dirs4[i] = user_params.GparityDirs[i];
|
||||
dirs4[Nd-1] = 0; //periodic gauge BC in time
|
||||
|
||||
GaugeImplPolicy::setDirections(dirs4); //gauge BC
|
||||
|
||||
//Run optional gauge field checksum checker and exit
|
||||
if(file_load_check){
|
||||
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Collect actions
|
||||
////////////////////////////////////
|
||||
ActionLevel<HMCWrapper::Field> Level1(1); //light quark + strange quark
|
||||
ActionLevel<HMCWrapper::Field> Level2(8); //gauge (8 increments per step)
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Light action
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
FermionActionD Numerator_lD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, light_mass,M5,Params);
|
||||
FermionActionD Denominator_lD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, pv_mass,M5,Params);
|
||||
|
||||
FermionActionF Numerator_lF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, light_mass,M5,Params);
|
||||
FermionActionF Denominator_lF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, pv_mass,M5,Params);
|
||||
|
||||
RationalActionParams rat_act_params_l;
|
||||
rat_act_params_l.inv_pow = 2; // (M^dag M)^{1/2}
|
||||
rat_act_params_l.precision= 60;
|
||||
rat_act_params_l.MaxIter = 10000;
|
||||
user_params.rat_quo_l.Export(rat_act_params_l);
|
||||
std::cout << GridLogMessage << " Light quark bounds check every " << rat_act_params_l.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
||||
|
||||
MixedPrecRHMC Quotient_l(Denominator_lD, Numerator_lD, Denominator_lF, Numerator_lF, rat_act_params_l, user_params.rat_quo_l.reliable_update_freq);
|
||||
//DoublePrecRHMC Quotient_l(Denominator_lD, Numerator_lD, rat_act_params_l);
|
||||
Level1.push_back(&Quotient_l);
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Strange action
|
||||
////////////////////////////////////
|
||||
FermionActionD Numerator_sD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD,strange_mass,M5,Params);
|
||||
FermionActionD Denominator_sD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, pv_mass,M5,Params);
|
||||
|
||||
FermionActionF Numerator_sF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,strange_mass,M5,Params);
|
||||
FermionActionF Denominator_sF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, pv_mass,M5,Params);
|
||||
|
||||
RationalActionParams rat_act_params_s;
|
||||
rat_act_params_s.inv_pow = 4; // (M^dag M)^{1/4}
|
||||
rat_act_params_s.precision= 60;
|
||||
rat_act_params_s.MaxIter = 10000;
|
||||
user_params.rat_quo_s.Export(rat_act_params_s);
|
||||
std::cout << GridLogMessage << " Heavy quark bounds check every " << rat_act_params_l.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
||||
|
||||
MixedPrecRHMC Quotient_s(Denominator_sD, Numerator_sD, Denominator_sF, Numerator_sF, rat_act_params_s, user_params.rat_quo_s.reliable_update_freq);
|
||||
//DoublePrecRHMC Quotient_s(Denominator_sD, Numerator_sD, rat_act_params_s);
|
||||
Level1.push_back(&Quotient_s);
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Gauge action
|
||||
/////////////////////////////////////////////////////////////
|
||||
Level2.push_back(&GaugeAction);
|
||||
TheHMC.TheAction.push_back(Level1);
|
||||
TheHMC.TheAction.push_back(Level2);
|
||||
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
||||
|
||||
|
||||
//Action tuning
|
||||
bool tune_rhmc_l=false, tune_rhmc_s=false, eigenrange_l=false, eigenrange_s=false;
|
||||
std::string lanc_params_l, lanc_params_s;
|
||||
for(int i=1;i<argc;i++){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--tune_rhmc_l") tune_rhmc_l=true;
|
||||
else if(sarg == "--tune_rhmc_s") tune_rhmc_s=true;
|
||||
else if(sarg == "--eigenrange_l"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_l=true;
|
||||
lanc_params_l = argv[i+1];
|
||||
}
|
||||
else if(sarg == "--eigenrange_s"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_s=true;
|
||||
lanc_params_s = argv[i+1];
|
||||
}
|
||||
}
|
||||
if(tune_rhmc_l || tune_rhmc_s || eigenrange_l || eigenrange_s){
|
||||
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
||||
if(eigenrange_l) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_l, FGridD, FrbGridD, Ud, Numerator_lD, TheHMC.Resources.GetParallelRNG());
|
||||
if(eigenrange_s) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_s, FGridD, FrbGridD, Ud, Numerator_sD, TheHMC.Resources.GetParallelRNG());
|
||||
if(tune_rhmc_l) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_l)>(FGridD, FrbGridD, Ud, Numerator_lD, Denominator_lD, Quotient_l, TheHMC.Resources.GetParallelRNG(), 2, "light");
|
||||
if(tune_rhmc_s) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_s)>(FGridD, FrbGridD, Ud, Numerator_sD, Denominator_sD, Quotient_s, TheHMC.Resources.GetParallelRNG(), 4, "strange");
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
//Run the HMC
|
||||
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
||||
TheHMC.Run();
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
} // main
|
||||
|
@ -1,473 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./HMC/DWF2p1fIwasakiGparity.cc
|
||||
|
||||
Copyright (C) 2015-2016
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace Grid;
|
||||
|
||||
//2+1f DWF+I ensemble with G-parity BCs
|
||||
//designed to reproduce ensembles in https://arxiv.org/pdf/1908.08640.pdf
|
||||
struct RatQuoParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(RatQuoParameters,
|
||||
double, bnd_lo,
|
||||
double, bnd_hi,
|
||||
Integer, action_degree,
|
||||
double, action_tolerance,
|
||||
Integer, md_degree,
|
||||
double, md_tolerance,
|
||||
Integer, reliable_update_freq,
|
||||
Integer, bnd_check_freq);
|
||||
RatQuoParameters() {
|
||||
bnd_lo = 1e-2;
|
||||
bnd_hi = 30;
|
||||
action_degree = 10;
|
||||
action_tolerance = 1e-10;
|
||||
md_degree = 10;
|
||||
md_tolerance = 1e-8;
|
||||
bnd_check_freq = 20;
|
||||
reliable_update_freq = 50;
|
||||
}
|
||||
|
||||
void Export(RationalActionParams &into) const{
|
||||
into.lo = bnd_lo;
|
||||
into.hi = bnd_hi;
|
||||
into.action_degree = action_degree;
|
||||
into.action_tolerance = action_tolerance;
|
||||
into.md_degree = md_degree;
|
||||
into.md_tolerance = md_tolerance;
|
||||
into.BoundsCheckFreq = bnd_check_freq;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct EvolParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(EvolParameters,
|
||||
Integer, StartTrajectory,
|
||||
Integer, Trajectories,
|
||||
Integer, SaveInterval,
|
||||
Integer, Steps,
|
||||
bool, MetropolisTest,
|
||||
std::string, StartingType,
|
||||
std::vector<Integer>, GparityDirs,
|
||||
RatQuoParameters, rat_quo_l,
|
||||
RatQuoParameters, rat_quo_s);
|
||||
|
||||
EvolParameters() {
|
||||
//For initial thermalization; afterwards user should switch Metropolis on and use StartingType=CheckpointStart
|
||||
MetropolisTest = false;
|
||||
StartTrajectory = 0;
|
||||
Trajectories = 50;
|
||||
SaveInterval = 5;
|
||||
StartingType = "ColdStart";
|
||||
GparityDirs.resize(3, 1); //1 for G-parity, 0 for periodic
|
||||
Steps = 5;
|
||||
}
|
||||
};
|
||||
|
||||
bool fileExists(const std::string &fn){
|
||||
std::ifstream f(fn);
|
||||
return f.good();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
struct LanczosParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
|
||||
double, alpha,
|
||||
double, beta,
|
||||
double, mu,
|
||||
int, ord,
|
||||
int, n_stop,
|
||||
int, n_want,
|
||||
int, n_use,
|
||||
double, tolerance);
|
||||
|
||||
LanczosParameters() {
|
||||
alpha = 35;
|
||||
beta = 5;
|
||||
mu = 0;
|
||||
ord = 100;
|
||||
n_stop = 10;
|
||||
n_want = 10;
|
||||
n_use = 15;
|
||||
tolerance = 1e-6;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
template<typename FermionActionD, typename FermionFieldD>
|
||||
void computeEigenvalues(std::string param_file,
|
||||
GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &action, GridParallelRNG &rng){
|
||||
|
||||
LanczosParameters params;
|
||||
if(fileExists(param_file)){
|
||||
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
||||
Grid::XmlReader rd(param_file);
|
||||
read(rd, "LanczosParameters", params);
|
||||
}else if(!GlobalSharedMemory::WorldRank){
|
||||
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
||||
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
||||
Grid::XmlWriter wr(param_file + ".templ");
|
||||
write(wr, "LanczosParameters", params);
|
||||
}
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
action.ImportGauge(latt);
|
||||
|
||||
SchurDiagMooeeOperator<FermionActionD, FermionFieldD> hermop(action);
|
||||
PlainHermOp<FermionFieldD> hermop_wrap(hermop);
|
||||
//ChebyshevLanczos<FermionFieldD> Cheb(params.alpha, params.beta, params.mu, params.ord);
|
||||
assert(params.mu == 0.0);
|
||||
|
||||
Chebyshev<FermionFieldD> Cheb(params.beta*params.beta, params.alpha*params.alpha, params.ord+1);
|
||||
FunctionHermOp<FermionFieldD> Cheb_wrap(Cheb, hermop);
|
||||
|
||||
std::cout << "IRL: alpha=" << params.alpha << " beta=" << params.beta << " mu=" << params.mu << " ord=" << params.ord << std::endl;
|
||||
ImplicitlyRestartedLanczos<FermionFieldD> IRL(Cheb_wrap, hermop_wrap, params.n_stop, params.n_want, params.n_use, params.tolerance, 10000);
|
||||
|
||||
std::vector<RealD> eval(params.n_use);
|
||||
std::vector<FermionFieldD> evec(params.n_use, rbGrid);
|
||||
int Nconv;
|
||||
IRL.calc(eval, evec, gauss_o, Nconv);
|
||||
|
||||
std::cout << "Eigenvalues:" << std::endl;
|
||||
for(int i=0;i<params.n_want;i++){
|
||||
std::cout << i << " " << eval[i] << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//Check the quality of the RHMC approx
|
||||
template<typename FermionActionD, typename FermionFieldD, typename RHMCtype>
|
||||
void checkRHMC(GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &numOp, FermionActionD &denOp, RHMCtype &rhmc, GridParallelRNG &rng,
|
||||
int inv_pow, const std::string &quark_descr){
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
numOp.ImportGauge(latt);
|
||||
denOp.ImportGauge(latt);
|
||||
|
||||
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> MdagM(numOp);
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> VdagV(denOp);
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerAction); //use large tolerance to prevent exit on fail; we are trying to tune here!
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "-------------------------------------------------------------------------------" << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
// here make a routine to print all the relevant information on the run
|
||||
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
||||
|
||||
std::string param_file = "params.xml";
|
||||
bool file_load_check = false;
|
||||
for(int i=1;i<argc;i++){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--param_file"){
|
||||
assert(i!=argc-1);
|
||||
param_file = argv[i+1];
|
||||
}else if(sarg == "--read_check"){ //check the fields load correctly and pass checksum/plaquette repro
|
||||
file_load_check = true;
|
||||
}
|
||||
}
|
||||
|
||||
//Read the user parameters
|
||||
EvolParameters user_params;
|
||||
|
||||
if(fileExists(param_file)){
|
||||
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
||||
Grid::XmlReader rd(param_file);
|
||||
read(rd, "Params", user_params);
|
||||
}else if(!GlobalSharedMemory::WorldRank){
|
||||
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
||||
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
||||
Grid::XmlWriter wr(param_file + ".templ");
|
||||
write(wr, "Params", user_params);
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
//Check the parameters
|
||||
if(user_params.GparityDirs.size() != Nd-1){
|
||||
std::cerr << "Error in input parameters: expect GparityDirs to have size = " << Nd-1 << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
for(int i=0;i<Nd-1;i++)
|
||||
if(user_params.GparityDirs[i] != 0 && user_params.GparityDirs[i] != 1){
|
||||
std::cerr << "Error in input parameters: expect GparityDirs values to be 0 (periodic) or 1 (G-parity)" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// Typedefs to simplify notation
|
||||
typedef GparityDomainWallFermionD FermionActionD;
|
||||
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
||||
typedef typename FermionActionD::FermionField FermionFieldD;
|
||||
|
||||
typedef GparityDomainWallFermionF FermionActionF;
|
||||
typedef typename FermionActionF::Impl_t FermionImplPolicyF;
|
||||
typedef typename FermionActionF::FermionField FermionFieldF;
|
||||
|
||||
typedef GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicyD,FermionImplPolicyF> MixedPrecRHMC;
|
||||
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicyD> DoublePrecRHMC;
|
||||
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
IntegratorParameters MD;
|
||||
typedef ConjugateHMCRunnerD<MinimumNorm2> HMCWrapper; //NB: This is the "Omelyan integrator"
|
||||
typedef HMCWrapper::ImplPolicy GaugeImplPolicy;
|
||||
MD.name = std::string("MinimumNorm2");
|
||||
MD.MDsteps = user_params.Steps;
|
||||
MD.trajL = 1.0;
|
||||
|
||||
HMCparameters HMCparams;
|
||||
HMCparams.StartTrajectory = user_params.StartTrajectory;
|
||||
HMCparams.Trajectories = user_params.Trajectories;
|
||||
HMCparams.NoMetropolisUntil= 0;
|
||||
HMCparams.StartingType = user_params.StartingType;
|
||||
HMCparams.MetropolisTest = user_params.MetropolisTest;
|
||||
HMCparams.MD = MD;
|
||||
HMCWrapper TheHMC(HMCparams);
|
||||
|
||||
// Grid from the command line arguments --grid and --mpi
|
||||
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
|
||||
|
||||
CheckpointerParameters CPparams;
|
||||
CPparams.config_prefix = "ckpoint_lat";
|
||||
CPparams.rng_prefix = "ckpoint_rng";
|
||||
CPparams.saveInterval = user_params.SaveInterval;
|
||||
CPparams.format = "IEEE64BIG";
|
||||
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||
|
||||
//Note that checkpointing saves the RNG state so that this initialization is required only for the very first configuration
|
||||
RNGModuleParameters RNGpar;
|
||||
RNGpar.serial_seeds = "1 2 3 4 5";
|
||||
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
typedef PlaquetteMod<GaugeImplPolicy> PlaqObs;
|
||||
TheHMC.Resources.AddObservable<PlaqObs>();
|
||||
//////////////////////////////////////////////
|
||||
|
||||
const int Ls = 16;
|
||||
Real beta = 2.13;
|
||||
Real light_mass = 0.01;
|
||||
Real strange_mass = 0.032;
|
||||
Real pv_mass = 1.0;
|
||||
RealD M5 = 1.8;
|
||||
|
||||
//Setup the Grids
|
||||
auto GridPtrD = TheHMC.Resources.GetCartesian();
|
||||
auto GridRBPtrD = TheHMC.Resources.GetRBCartesian();
|
||||
auto FGridD = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrD);
|
||||
auto FrbGridD = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrD);
|
||||
|
||||
GridCartesian* GridPtrF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexF::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian* GridRBPtrF = SpaceTimeGrid::makeFourDimRedBlackGrid(GridPtrF);
|
||||
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrF);
|
||||
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrF);
|
||||
|
||||
ConjugateIwasakiGaugeActionD GaugeAction(beta);
|
||||
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeFieldD Ud(GridPtrD);
|
||||
LatticeGaugeFieldF Uf(GridPtrF);
|
||||
|
||||
//Setup the BCs
|
||||
FermionActionD::ImplParams Params;
|
||||
for(int i=0;i<Nd-1;i++) Params.twists[i] = user_params.GparityDirs[i]; //G-parity directions
|
||||
Params.twists[Nd-1] = 1; //APBC in time direction
|
||||
|
||||
std::vector<int> dirs4(Nd);
|
||||
for(int i=0;i<Nd-1;i++) dirs4[i] = user_params.GparityDirs[i];
|
||||
dirs4[Nd-1] = 0; //periodic gauge BC in time
|
||||
|
||||
GaugeImplPolicy::setDirections(dirs4); //gauge BC
|
||||
|
||||
//Run optional gauge field checksum checker and exit
|
||||
if(file_load_check){
|
||||
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Collect actions
|
||||
////////////////////////////////////
|
||||
ActionLevel<HMCWrapper::Field> Level1(1); //light quark + strange quark
|
||||
ActionLevel<HMCWrapper::Field> Level2(8); //gauge (8 increments per step)
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Light action
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
FermionActionD Numerator_lD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, light_mass,M5,Params);
|
||||
FermionActionD Denominator_lD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, pv_mass,M5,Params);
|
||||
|
||||
FermionActionF Numerator_lF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, light_mass,M5,Params);
|
||||
FermionActionF Denominator_lF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, pv_mass,M5,Params);
|
||||
|
||||
RationalActionParams rat_act_params_l;
|
||||
rat_act_params_l.inv_pow = 2; // (M^dag M)^{1/2}
|
||||
rat_act_params_l.precision= 60;
|
||||
rat_act_params_l.MaxIter = 10000;
|
||||
user_params.rat_quo_l.Export(rat_act_params_l);
|
||||
std::cout << GridLogMessage << " Light quark bounds check every " << rat_act_params_l.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
||||
|
||||
//MixedPrecRHMC Quotient_l(Denominator_lD, Numerator_lD, Denominator_lF, Numerator_lF, rat_act_params_l, user_params.rat_quo_l.reliable_update_freq);
|
||||
DoublePrecRHMC Quotient_l(Denominator_lD, Numerator_lD, rat_act_params_l);
|
||||
Level1.push_back(&Quotient_l);
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Strange action
|
||||
////////////////////////////////////
|
||||
FermionActionD Numerator_sD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD,strange_mass,M5,Params);
|
||||
FermionActionD Denominator_sD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, pv_mass,M5,Params);
|
||||
|
||||
FermionActionF Numerator_sF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,strange_mass,M5,Params);
|
||||
FermionActionF Denominator_sF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, pv_mass,M5,Params);
|
||||
|
||||
RationalActionParams rat_act_params_s;
|
||||
rat_act_params_s.inv_pow = 4; // (M^dag M)^{1/4}
|
||||
rat_act_params_s.precision= 60;
|
||||
rat_act_params_s.MaxIter = 10000;
|
||||
user_params.rat_quo_s.Export(rat_act_params_s);
|
||||
std::cout << GridLogMessage << " Heavy quark bounds check every " << rat_act_params_l.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
||||
|
||||
//MixedPrecRHMC Quotient_s(Denominator_sD, Numerator_sD, Denominator_sF, Numerator_sF, rat_act_params_s, user_params.rat_quo_s.reliable_update_freq);
|
||||
DoublePrecRHMC Quotient_s(Denominator_sD, Numerator_sD, rat_act_params_s);
|
||||
Level1.push_back(&Quotient_s);
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Gauge action
|
||||
/////////////////////////////////////////////////////////////
|
||||
Level2.push_back(&GaugeAction);
|
||||
TheHMC.TheAction.push_back(Level1);
|
||||
TheHMC.TheAction.push_back(Level2);
|
||||
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
||||
|
||||
|
||||
//Action tuning
|
||||
bool tune_rhmc_l=false, tune_rhmc_s=false, eigenrange_l=false, eigenrange_s=false;
|
||||
std::string lanc_params_l, lanc_params_s;
|
||||
for(int i=1;i<argc;i++){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--tune_rhmc_l") tune_rhmc_l=true;
|
||||
else if(sarg == "--tune_rhmc_s") tune_rhmc_s=true;
|
||||
else if(sarg == "--eigenrange_l"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_l=true;
|
||||
lanc_params_l = argv[i+1];
|
||||
}
|
||||
else if(sarg == "--eigenrange_s"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_s=true;
|
||||
lanc_params_s = argv[i+1];
|
||||
}
|
||||
}
|
||||
if(tune_rhmc_l || tune_rhmc_s || eigenrange_l || eigenrange_s){
|
||||
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
||||
if(eigenrange_l) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_l, FGridD, FrbGridD, Ud, Numerator_lD, TheHMC.Resources.GetParallelRNG());
|
||||
if(eigenrange_s) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_s, FGridD, FrbGridD, Ud, Numerator_sD, TheHMC.Resources.GetParallelRNG());
|
||||
if(tune_rhmc_l) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_l)>(FGridD, FrbGridD, Ud, Numerator_lD, Denominator_lD, Quotient_l, TheHMC.Resources.GetParallelRNG(), 2, "light");
|
||||
if(tune_rhmc_s) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_s)>(FGridD, FrbGridD, Ud, Numerator_sD, Denominator_sD, Quotient_s, TheHMC.Resources.GetParallelRNG(), 4, "strange");
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
//Run the HMC
|
||||
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
||||
TheHMC.Run();
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
} // main
|
||||
|
@ -1,765 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./HMC/Mobius2p1fIDSDRGparityEOFA.cc
|
||||
|
||||
Copyright (C) 2015-2016
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace Grid;
|
||||
|
||||
//We try to reproduce with G-parity BCs the 246 MeV 1.37 GeV ensemble
|
||||
//To speed things up we will use Mobius DWF with b+c=32/12 and Ls=12 to match the Ls=32 of the original
|
||||
//These parameters match those used in the 2020 K->pipi paper
|
||||
|
||||
struct RatQuoParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(RatQuoParameters,
|
||||
double, bnd_lo,
|
||||
double, bnd_hi,
|
||||
Integer, action_degree,
|
||||
double, action_tolerance,
|
||||
Integer, md_degree,
|
||||
double, md_tolerance,
|
||||
Integer, reliable_update_freq,
|
||||
Integer, bnd_check_freq);
|
||||
RatQuoParameters() {
|
||||
bnd_lo = 1e-2;
|
||||
bnd_hi = 30;
|
||||
action_degree = 10;
|
||||
action_tolerance = 1e-10;
|
||||
md_degree = 10;
|
||||
md_tolerance = 1e-8;
|
||||
bnd_check_freq = 20;
|
||||
reliable_update_freq = 50;
|
||||
}
|
||||
|
||||
void Export(RationalActionParams &into) const{
|
||||
into.lo = bnd_lo;
|
||||
into.hi = bnd_hi;
|
||||
into.action_degree = action_degree;
|
||||
into.action_tolerance = action_tolerance;
|
||||
into.md_degree = md_degree;
|
||||
into.md_tolerance = md_tolerance;
|
||||
into.BoundsCheckFreq = bnd_check_freq;
|
||||
}
|
||||
};
|
||||
|
||||
struct EOFAparameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(EOFAparameters,
|
||||
OneFlavourRationalParams, rat_params,
|
||||
double, action_tolerance,
|
||||
double, action_mixcg_inner_tolerance,
|
||||
double, md_tolerance,
|
||||
double, md_mixcg_inner_tolerance);
|
||||
|
||||
EOFAparameters() {
|
||||
action_mixcg_inner_tolerance = 1e-8;
|
||||
action_tolerance = 1e-10;
|
||||
md_tolerance = 1e-8;
|
||||
md_mixcg_inner_tolerance = 1e-8;
|
||||
|
||||
rat_params.lo = 0.1;
|
||||
rat_params.hi = 25.0;
|
||||
rat_params.MaxIter = 10000;
|
||||
rat_params.tolerance= 1.0e-9;
|
||||
rat_params.degree = 14;
|
||||
rat_params.precision= 50;
|
||||
}
|
||||
};
|
||||
|
||||
struct EvolParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(EvolParameters,
|
||||
Integer, StartTrajectory,
|
||||
Integer, Trajectories,
|
||||
Integer, SaveInterval,
|
||||
Integer, Steps,
|
||||
bool, MetropolisTest,
|
||||
std::string, StartingType,
|
||||
std::vector<Integer>, GparityDirs,
|
||||
EOFAparameters, eofa_l,
|
||||
RatQuoParameters, rat_quo_s,
|
||||
RatQuoParameters, rat_quo_DSDR);
|
||||
|
||||
EvolParameters() {
|
||||
//For initial thermalization; afterwards user should switch Metropolis on and use StartingType=CheckpointStart
|
||||
MetropolisTest = false;
|
||||
StartTrajectory = 0;
|
||||
Trajectories = 50;
|
||||
SaveInterval = 5;
|
||||
StartingType = "ColdStart";
|
||||
GparityDirs.resize(3, 1); //1 for G-parity, 0 for periodic
|
||||
Steps = 5;
|
||||
}
|
||||
};
|
||||
|
||||
bool fileExists(const std::string &fn){
|
||||
std::ifstream f(fn);
|
||||
return f.good();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
struct LanczosParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
|
||||
double, alpha,
|
||||
double, beta,
|
||||
double, mu,
|
||||
int, ord,
|
||||
int, n_stop,
|
||||
int, n_want,
|
||||
int, n_use,
|
||||
double, tolerance);
|
||||
|
||||
LanczosParameters() {
|
||||
alpha = 35;
|
||||
beta = 5;
|
||||
mu = 0;
|
||||
ord = 100;
|
||||
n_stop = 10;
|
||||
n_want = 10;
|
||||
n_use = 15;
|
||||
tolerance = 1e-6;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
template<typename FermionActionD, typename FermionFieldD>
|
||||
void computeEigenvalues(std::string param_file,
|
||||
GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &action, GridParallelRNG &rng){
|
||||
|
||||
LanczosParameters params;
|
||||
if(fileExists(param_file)){
|
||||
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
||||
Grid::XmlReader rd(param_file);
|
||||
read(rd, "LanczosParameters", params);
|
||||
}else if(!GlobalSharedMemory::WorldRank){
|
||||
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
||||
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
||||
Grid::XmlWriter wr(param_file + ".templ");
|
||||
write(wr, "LanczosParameters", params);
|
||||
}
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
action.ImportGauge(latt);
|
||||
|
||||
SchurDiagMooeeOperator<FermionActionD, FermionFieldD> hermop(action);
|
||||
PlainHermOp<FermionFieldD> hermop_wrap(hermop);
|
||||
//ChebyshevLanczos<FermionFieldD> Cheb(params.alpha, params.beta, params.mu, params.ord);
|
||||
assert(params.mu == 0.0);
|
||||
|
||||
Chebyshev<FermionFieldD> Cheb(params.beta*params.beta, params.alpha*params.alpha, params.ord+1);
|
||||
FunctionHermOp<FermionFieldD> Cheb_wrap(Cheb, hermop);
|
||||
|
||||
std::cout << "IRL: alpha=" << params.alpha << " beta=" << params.beta << " mu=" << params.mu << " ord=" << params.ord << std::endl;
|
||||
ImplicitlyRestartedLanczos<FermionFieldD> IRL(Cheb_wrap, hermop_wrap, params.n_stop, params.n_want, params.n_use, params.tolerance, 10000);
|
||||
|
||||
std::vector<RealD> eval(params.n_use);
|
||||
std::vector<FermionFieldD> evec(params.n_use, rbGrid);
|
||||
int Nconv;
|
||||
IRL.calc(eval, evec, gauss_o, Nconv);
|
||||
|
||||
std::cout << "Eigenvalues:" << std::endl;
|
||||
for(int i=0;i<params.n_want;i++){
|
||||
std::cout << i << " " << eval[i] << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//Check the quality of the RHMC approx
|
||||
//action_or_md toggles checking the action (0), MD (1) or both (2) setups
|
||||
template<typename FermionActionD, typename FermionFieldD, typename RHMCtype>
|
||||
void checkRHMC(GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &numOp, FermionActionD &denOp, RHMCtype &rhmc, GridParallelRNG &rng,
|
||||
int inv_pow, const std::string &quark_descr, int action_or_md){
|
||||
assert(action_or_md == 0 || action_or_md == 1 || action_or_md == 2);
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
numOp.ImportGauge(latt);
|
||||
denOp.ImportGauge(latt);
|
||||
|
||||
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> MdagM(numOp);
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> VdagV(denOp);
|
||||
|
||||
PowerMethod<FermionFieldD> power_method;
|
||||
RealD lambda_max;
|
||||
|
||||
std::cout << "Starting: Get RHMC high bound approx for " << quark_descr << " numerator" << std::endl;
|
||||
|
||||
lambda_max = power_method(MdagM,gauss_o);
|
||||
std::cout << GridLogMessage << "Got lambda_max "<<lambda_max<<std::endl;
|
||||
|
||||
std::cout << "Starting: Get RHMC high bound approx for " << quark_descr << " denominator" << std::endl;
|
||||
lambda_max = power_method(VdagV,gauss_o);
|
||||
std::cout << GridLogMessage << "Got lambda_max "<<lambda_max<<std::endl;
|
||||
|
||||
if(action_or_md == 0 || action_or_md == 2){
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerAction); //use large tolerance to prevent exit on fail; we are trying to tune here!
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
}
|
||||
|
||||
std::cout << "-------------------------------------------------------------------------------" << std::endl;
|
||||
|
||||
if(action_or_md == 1 || action_or_md == 2){
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
void checkEOFA(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA,
|
||||
GridCartesian* FGrid, GridParallelRNG &rng, const LatticeGaugeFieldD &latt){
|
||||
std::cout << GridLogMessage << "Starting EOFA action/bounds check" << std::endl;
|
||||
typename FermionImplPolicy::FermionField eta(FGrid);
|
||||
RealD scale = std::sqrt(0.5);
|
||||
gaussian(rng,eta); eta = eta * scale;
|
||||
|
||||
//Use the inbuilt check
|
||||
EOFA.refresh(latt, eta);
|
||||
EOFA.S(latt);
|
||||
std::cout << GridLogMessage << "Finished EOFA upper action/bounds check" << std::endl;
|
||||
}
|
||||
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
class EOFAlinop: public LinearOperatorBase<typename FermionImplPolicy::FermionField>{
|
||||
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA;
|
||||
LatticeGaugeFieldD &U;
|
||||
public:
|
||||
EOFAlinop(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA, LatticeGaugeFieldD &U): EOFA(EOFA), U(U){}
|
||||
|
||||
typedef typename FermionImplPolicy::FermionField Field;
|
||||
void OpDiag (const Field &in, Field &out){ assert(0); }
|
||||
void OpDir (const Field &in, Field &out,int dir,int disp){ assert(0); }
|
||||
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); }
|
||||
|
||||
void Op (const Field &in, Field &out){ assert(0); }
|
||||
void AdjOp (const Field &in, Field &out){ assert(0); }
|
||||
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
|
||||
void HermOp(const Field &in, Field &out){ EOFA.Meofa(U, in, out); }
|
||||
};
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
void upperBoundEOFA(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA,
|
||||
GridCartesian* FGrid, GridParallelRNG &rng, LatticeGaugeFieldD &latt){
|
||||
std::cout << GridLogMessage << "Starting EOFA upper bound compute" << std::endl;
|
||||
EOFAlinop<FermionImplPolicy> linop(EOFA, latt);
|
||||
typename FermionImplPolicy::FermionField eta(FGrid);
|
||||
gaussian(rng,eta);
|
||||
PowerMethod<typename FermionImplPolicy::FermionField> power_method;
|
||||
auto lambda_max = power_method(linop,eta);
|
||||
std::cout << GridLogMessage << "Upper bound of EOFA operator " << lambda_max << std::endl;
|
||||
}
|
||||
|
||||
//Applications of M^{-1} cost the same as M for EOFA!
|
||||
template<typename FermionImplPolicy>
|
||||
class EOFAinvLinop: public LinearOperatorBase<typename FermionImplPolicy::FermionField>{
|
||||
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA;
|
||||
LatticeGaugeFieldD &U;
|
||||
public:
|
||||
EOFAinvLinop(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA, LatticeGaugeFieldD &U): EOFA(EOFA), U(U){}
|
||||
|
||||
typedef typename FermionImplPolicy::FermionField Field;
|
||||
void OpDiag (const Field &in, Field &out){ assert(0); }
|
||||
void OpDir (const Field &in, Field &out,int dir,int disp){ assert(0); }
|
||||
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); }
|
||||
|
||||
void Op (const Field &in, Field &out){ assert(0); }
|
||||
void AdjOp (const Field &in, Field &out){ assert(0); }
|
||||
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
|
||||
void HermOp(const Field &in, Field &out){ EOFA.MeofaInv(U, in, out); }
|
||||
};
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
void lowerBoundEOFA(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA,
|
||||
GridCartesian* FGrid, GridParallelRNG &rng, LatticeGaugeFieldD &latt){
|
||||
std::cout << GridLogMessage << "Starting EOFA lower bound compute using power method on M^{-1}. Inverse of highest eigenvalue is the lowest eigenvalue of M" << std::endl;
|
||||
EOFAinvLinop<FermionImplPolicy> linop(EOFA, latt);
|
||||
typename FermionImplPolicy::FermionField eta(FGrid);
|
||||
gaussian(rng,eta);
|
||||
PowerMethod<typename FermionImplPolicy::FermionField> power_method;
|
||||
auto lambda_max = power_method(linop,eta);
|
||||
std::cout << GridLogMessage << "Lower bound of EOFA operator " << 1./lambda_max << std::endl;
|
||||
}
|
||||
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
|
||||
class MixedPrecisionConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
|
||||
public:
|
||||
typedef typename FermionOperatorD::FermionField FieldD;
|
||||
typedef typename FermionOperatorF::FermionField FieldF;
|
||||
|
||||
using OperatorFunction<FieldD>::operator();
|
||||
|
||||
RealD Tolerance;
|
||||
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
|
||||
Integer MaxInnerIterations;
|
||||
Integer MaxOuterIterations;
|
||||
GridBase* SinglePrecGrid4; //Grid for single-precision fields
|
||||
GridBase* SinglePrecGrid5; //Grid for single-precision fields
|
||||
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
|
||||
|
||||
FermionOperatorF &FermOpF;
|
||||
FermionOperatorD &FermOpD;;
|
||||
SchurOperatorF &LinOpF;
|
||||
SchurOperatorD &LinOpD;
|
||||
|
||||
Integer TotalInnerIterations; //Number of inner CG iterations
|
||||
Integer TotalOuterIterations; //Number of restarts
|
||||
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
|
||||
|
||||
MixedPrecisionConjugateGradientOperatorFunction(RealD tol,
|
||||
Integer maxinnerit,
|
||||
Integer maxouterit,
|
||||
GridBase* _sp_grid4,
|
||||
GridBase* _sp_grid5,
|
||||
FermionOperatorF &_FermOpF,
|
||||
FermionOperatorD &_FermOpD,
|
||||
SchurOperatorF &_LinOpF,
|
||||
SchurOperatorD &_LinOpD):
|
||||
LinOpF(_LinOpF),
|
||||
LinOpD(_LinOpD),
|
||||
FermOpF(_FermOpF),
|
||||
FermOpD(_FermOpD),
|
||||
Tolerance(tol),
|
||||
InnerTolerance(tol),
|
||||
MaxInnerIterations(maxinnerit),
|
||||
MaxOuterIterations(maxouterit),
|
||||
SinglePrecGrid4(_sp_grid4),
|
||||
SinglePrecGrid5(_sp_grid5),
|
||||
OuterLoopNormMult(100.)
|
||||
{
|
||||
};
|
||||
|
||||
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
|
||||
|
||||
std::cout << GridLogMessage << " Mixed precision CG wrapper operator() "<<std::endl;
|
||||
|
||||
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
|
||||
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
|
||||
|
||||
precisionChange(FermOpF.Umu, FermOpD.Umu);
|
||||
|
||||
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
|
||||
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// Make a mixed precision conjugate gradient
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
MixedPrecisionConjugateGradient<FieldD,FieldF> MPCG(Tolerance,MaxInnerIterations,MaxOuterIterations,SinglePrecGrid5,LinOpF,LinOpD);
|
||||
MPCG.InnerTolerance = InnerTolerance;
|
||||
std::cout << GridLogMessage << "Calling mixed precision Conjugate Gradient" <<std::endl;
|
||||
MPCG(src,psi);
|
||||
}
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
// here make a routine to print all the relevant information on the run
|
||||
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
||||
|
||||
std::string param_file = "params.xml";
|
||||
bool file_load_check = false;
|
||||
for(int i=1;i<argc;i++){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--param_file"){
|
||||
assert(i!=argc-1);
|
||||
param_file = argv[i+1];
|
||||
}else if(sarg == "--read_check"){ //check the fields load correctly and pass checksum/plaquette repro
|
||||
file_load_check = true;
|
||||
}
|
||||
}
|
||||
|
||||
//Read the user parameters
|
||||
EvolParameters user_params;
|
||||
|
||||
if(fileExists(param_file)){
|
||||
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
||||
Grid::XmlReader rd(param_file);
|
||||
read(rd, "Params", user_params);
|
||||
}else if(!GlobalSharedMemory::WorldRank){
|
||||
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
||||
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
||||
{
|
||||
Grid::XmlWriter wr(param_file + ".templ");
|
||||
write(wr, "Params", user_params);
|
||||
}
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
//Check the parameters
|
||||
if(user_params.GparityDirs.size() != Nd-1){
|
||||
std::cerr << "Error in input parameters: expect GparityDirs to have size = " << Nd-1 << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
for(int i=0;i<Nd-1;i++)
|
||||
if(user_params.GparityDirs[i] != 0 && user_params.GparityDirs[i] != 1){
|
||||
std::cerr << "Error in input parameters: expect GparityDirs values to be 0 (periodic) or 1 (G-parity)" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
||||
typedef GparityMobiusEOFAFermionD EOFAactionD;
|
||||
typedef GparityMobiusFermionD FermionActionD;
|
||||
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
||||
typedef typename FermionActionD::FermionField FermionFieldD;
|
||||
|
||||
typedef GparityMobiusEOFAFermionF EOFAactionF;
|
||||
typedef GparityMobiusFermionF FermionActionF;
|
||||
typedef typename FermionActionF::Impl_t FermionImplPolicyF;
|
||||
typedef typename FermionActionF::FermionField FermionFieldF;
|
||||
|
||||
typedef GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicyD,FermionImplPolicyF> MixedPrecRHMC;
|
||||
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicyD> DoublePrecRHMC;
|
||||
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
IntegratorParameters MD;
|
||||
typedef ConjugateHMCRunnerD<MinimumNorm2> HMCWrapper; //NB: This is the "Omelyan integrator"
|
||||
typedef HMCWrapper::ImplPolicy GaugeImplPolicy;
|
||||
MD.name = std::string("MinimumNorm2");
|
||||
MD.MDsteps = user_params.Steps;
|
||||
MD.trajL = 1.0;
|
||||
|
||||
HMCparameters HMCparams;
|
||||
HMCparams.StartTrajectory = user_params.StartTrajectory;
|
||||
HMCparams.Trajectories = user_params.Trajectories;
|
||||
HMCparams.NoMetropolisUntil= 0;
|
||||
HMCparams.StartingType = user_params.StartingType;
|
||||
HMCparams.MetropolisTest = user_params.MetropolisTest;
|
||||
HMCparams.MD = MD;
|
||||
HMCWrapper TheHMC(HMCparams);
|
||||
|
||||
// Grid from the command line arguments --grid and --mpi
|
||||
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
|
||||
|
||||
CheckpointerParameters CPparams;
|
||||
CPparams.config_prefix = "ckpoint_lat";
|
||||
CPparams.rng_prefix = "ckpoint_rng";
|
||||
CPparams.saveInterval = user_params.SaveInterval;
|
||||
CPparams.format = "IEEE64BIG";
|
||||
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||
|
||||
//Note that checkpointing saves the RNG state so that this initialization is required only for the very first configuration
|
||||
RNGModuleParameters RNGpar;
|
||||
RNGpar.serial_seeds = "1 2 3 4 5";
|
||||
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
typedef PlaquetteMod<GaugeImplPolicy> PlaqObs;
|
||||
TheHMC.Resources.AddObservable<PlaqObs>();
|
||||
//////////////////////////////////////////////
|
||||
|
||||
const int Ls = 12;
|
||||
Real beta = 1.75;
|
||||
Real light_mass = 0.0042; //240 MeV
|
||||
Real strange_mass = 0.045;
|
||||
Real pv_mass = 1.0;
|
||||
RealD M5 = 1.8;
|
||||
RealD mobius_scale = 32./12.; //b+c
|
||||
|
||||
RealD mob_bmc = 1.0;
|
||||
RealD mob_b = (mobius_scale + mob_bmc)/2.;
|
||||
RealD mob_c = (mobius_scale - mob_bmc)/2.;
|
||||
|
||||
//Setup the Grids
|
||||
auto UGridD = TheHMC.Resources.GetCartesian();
|
||||
auto UrbGridD = TheHMC.Resources.GetRBCartesian();
|
||||
auto FGridD = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridD);
|
||||
auto FrbGridD = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridD);
|
||||
|
||||
GridCartesian* UGridF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexF::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian* UrbGridF = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridF);
|
||||
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridF);
|
||||
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridF);
|
||||
|
||||
ConjugateIwasakiGaugeActionD GaugeAction(beta);
|
||||
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeFieldD Ud(UGridD);
|
||||
LatticeGaugeFieldF Uf(UGridF);
|
||||
|
||||
//Setup the BCs
|
||||
FermionActionD::ImplParams Params;
|
||||
for(int i=0;i<Nd-1;i++) Params.twists[i] = user_params.GparityDirs[i]; //G-parity directions
|
||||
Params.twists[Nd-1] = 1; //APBC in time direction
|
||||
|
||||
std::vector<int> dirs4(Nd);
|
||||
for(int i=0;i<Nd-1;i++) dirs4[i] = user_params.GparityDirs[i];
|
||||
dirs4[Nd-1] = 0; //periodic gauge BC in time
|
||||
|
||||
GaugeImplPolicy::setDirections(dirs4); //gauge BC
|
||||
|
||||
//Run optional gauge field checksum checker and exit
|
||||
if(file_load_check){
|
||||
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Collect actions
|
||||
////////////////////////////////////
|
||||
ActionLevel<HMCWrapper::Field> Level1(1); //light quark + strange quark
|
||||
ActionLevel<HMCWrapper::Field> Level2(1); //DSDR
|
||||
ActionLevel<HMCWrapper::Field> Level3(8); //gauge (8 increments per step)
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Light EOFA action
|
||||
// have to be careful with the parameters, cf. Test_dwf_gpforce_eofa.cc
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
EOFAactionD LopD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, light_mass, light_mass, pv_mass, 0.0, -1, M5, mob_b, mob_c, Params);
|
||||
EOFAactionF LopF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, light_mass, light_mass, pv_mass, 0.0, -1, M5, mob_b, mob_c, Params);
|
||||
EOFAactionD RopD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, pv_mass, light_mass, pv_mass, -1.0, 1, M5, mob_b, mob_c, Params);
|
||||
EOFAactionF RopF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, pv_mass, light_mass, pv_mass, -1.0, 1, M5, mob_b, mob_c, Params);
|
||||
|
||||
typedef SchurDiagMooeeOperator<EOFAactionD,FermionFieldD> EOFAschuropD;
|
||||
typedef SchurDiagMooeeOperator<EOFAactionF,FermionFieldF> EOFAschuropF;
|
||||
|
||||
EOFAschuropD linopL_D(LopD);
|
||||
EOFAschuropD linopR_D(RopD);
|
||||
|
||||
EOFAschuropF linopL_F(LopF);
|
||||
EOFAschuropF linopR_F(RopF);
|
||||
|
||||
typedef MixedPrecisionConjugateGradientOperatorFunction<EOFAactionD, EOFAactionF, EOFAschuropD, EOFAschuropF> EOFA_mxCG;
|
||||
|
||||
EOFA_mxCG ActionMCG_L(user_params.eofa_l.action_tolerance, 10000, 1000, UGridF, FrbGridF, LopF, LopD, linopL_F, linopL_D);
|
||||
ActionMCG_L.InnerTolerance = user_params.eofa_l.action_mixcg_inner_tolerance;
|
||||
|
||||
EOFA_mxCG ActionMCG_R(user_params.eofa_l.action_tolerance, 10000, 1000, UGridF, FrbGridF, RopF, RopD, linopR_F, linopR_D);
|
||||
ActionMCG_R.InnerTolerance = user_params.eofa_l.action_mixcg_inner_tolerance;
|
||||
|
||||
EOFA_mxCG DerivMCG_L(user_params.eofa_l.md_tolerance, 10000, 1000, UGridF, FrbGridF, LopF, LopD, linopL_F, linopL_D);
|
||||
DerivMCG_L.InnerTolerance = user_params.eofa_l.md_mixcg_inner_tolerance;
|
||||
|
||||
EOFA_mxCG DerivMCG_R(user_params.eofa_l.md_tolerance, 10000, 1000, UGridF, FrbGridF, RopF, RopD, linopR_F, linopR_D);
|
||||
DerivMCG_R.InnerTolerance = user_params.eofa_l.md_mixcg_inner_tolerance;
|
||||
|
||||
std::cout << GridLogMessage << "Set EOFA action solver action tolerance outer=" << ActionMCG_L.Tolerance << " inner=" << ActionMCG_L.InnerTolerance << std::endl;
|
||||
std::cout << GridLogMessage << "Set EOFA MD solver tolerance outer=" << DerivMCG_L.Tolerance << " inner=" << DerivMCG_L.InnerTolerance << std::endl;
|
||||
|
||||
ConjugateGradient<FermionFieldD> ActionCG(user_params.eofa_l.action_tolerance, 10000);
|
||||
ConjugateGradient<FermionFieldD> DerivativeCG(user_params.eofa_l.md_tolerance, 10000);
|
||||
|
||||
// ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicyD> EOFA(LopD, RopD,
|
||||
// ActionCG, ActionCG, ActionCG,
|
||||
// DerivativeCG, DerivativeCG,
|
||||
// user_params.eofa_l.rat_params, true);
|
||||
|
||||
// ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicyD> EOFA(LopD, RopD,
|
||||
// ActionMCG_L, ActionMCG_R,
|
||||
// ActionMCG_L, ActionMCG_R,
|
||||
// DerivMCG_L, DerivMCG_R,
|
||||
// user_params.eofa_l.rat_params, true);
|
||||
|
||||
ExactOneFlavourRatioMixedPrecHeatbathPseudoFermionAction<FermionImplPolicyD, FermionImplPolicyF> EOFA(LopF, RopF,
|
||||
LopD, RopD,
|
||||
ActionMCG_L, ActionMCG_R,
|
||||
ActionMCG_L, ActionMCG_R,
|
||||
DerivMCG_L, DerivMCG_R,
|
||||
user_params.eofa_l.rat_params, true);
|
||||
|
||||
|
||||
Level1.push_back(&EOFA);
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Strange action
|
||||
////////////////////////////////////
|
||||
FermionActionD Numerator_sD(Ud,*FGridD,*FrbGridD,*UGridD,*UrbGridD,strange_mass,M5,mob_b,mob_c,Params);
|
||||
FermionActionD Denominator_sD(Ud,*FGridD,*FrbGridD,*UGridD,*UrbGridD, pv_mass,M5,mob_b,mob_c,Params);
|
||||
|
||||
FermionActionF Numerator_sF(Uf,*FGridF,*FrbGridF,*UGridF,*UrbGridF,strange_mass,M5,mob_b,mob_c,Params);
|
||||
FermionActionF Denominator_sF(Uf,*FGridF,*FrbGridF,*UGridF,*UrbGridF, pv_mass,M5,mob_b,mob_c,Params);
|
||||
|
||||
RationalActionParams rat_act_params_s;
|
||||
rat_act_params_s.inv_pow = 4; // (M^dag M)^{1/4}
|
||||
rat_act_params_s.precision= 60;
|
||||
rat_act_params_s.MaxIter = 10000;
|
||||
user_params.rat_quo_s.Export(rat_act_params_s);
|
||||
std::cout << GridLogMessage << " Heavy quark bounds check every " << rat_act_params_s.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
||||
|
||||
//MixedPrecRHMC Quotient_s(Denominator_sD, Numerator_sD, Denominator_sF, Numerator_sF, rat_act_params_s, user_params.rat_quo_s.reliable_update_freq);
|
||||
DoublePrecRHMC Quotient_s(Denominator_sD, Numerator_sD, rat_act_params_s);
|
||||
Level1.push_back(&Quotient_s);
|
||||
|
||||
///////////////////////////////////
|
||||
// DSDR action
|
||||
///////////////////////////////////
|
||||
RealD dsdr_mass=-1.8;
|
||||
//Use same DSDR twists as https://arxiv.org/pdf/1208.4412.pdf
|
||||
RealD dsdr_epsilon_f = 0.02; //numerator (in determinant)
|
||||
RealD dsdr_epsilon_b = 0.5;
|
||||
GparityWilsonTMFermionD Numerator_DSDR_D(Ud, *UGridD, *UrbGridD, dsdr_mass, dsdr_epsilon_f, Params);
|
||||
GparityWilsonTMFermionF Numerator_DSDR_F(Uf, *UGridF, *UrbGridF, dsdr_mass, dsdr_epsilon_f, Params);
|
||||
|
||||
GparityWilsonTMFermionD Denominator_DSDR_D(Ud, *UGridD, *UrbGridD, dsdr_mass, dsdr_epsilon_b, Params);
|
||||
GparityWilsonTMFermionF Denominator_DSDR_F(Uf, *UGridF, *UrbGridF, dsdr_mass, dsdr_epsilon_b, Params);
|
||||
|
||||
RationalActionParams rat_act_params_DSDR;
|
||||
rat_act_params_DSDR.inv_pow = 2; // (M^dag M)^{1/2}
|
||||
rat_act_params_DSDR.precision= 60;
|
||||
rat_act_params_DSDR.MaxIter = 10000;
|
||||
user_params.rat_quo_DSDR.Export(rat_act_params_DSDR);
|
||||
std::cout << GridLogMessage << "DSDR quark bounds check every " << rat_act_params_DSDR.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
||||
|
||||
DoublePrecRHMC Quotient_DSDR(Denominator_DSDR_D, Numerator_DSDR_D, rat_act_params_DSDR);
|
||||
Level2.push_back(&Quotient_DSDR);
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Gauge action
|
||||
/////////////////////////////////////////////////////////////
|
||||
Level3.push_back(&GaugeAction);
|
||||
|
||||
TheHMC.TheAction.push_back(Level1);
|
||||
TheHMC.TheAction.push_back(Level2);
|
||||
TheHMC.TheAction.push_back(Level3);
|
||||
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
||||
|
||||
|
||||
//Action tuning
|
||||
bool
|
||||
tune_rhmc_s=false, eigenrange_s=false,
|
||||
tune_rhmc_DSDR=false, eigenrange_DSDR=false,
|
||||
check_eofa=false,
|
||||
upper_bound_eofa=false, lower_bound_eofa(false);
|
||||
|
||||
std::string lanc_params_s;
|
||||
std::string lanc_params_DSDR;
|
||||
int tune_rhmc_s_action_or_md;
|
||||
int tune_rhmc_DSDR_action_or_md;
|
||||
|
||||
for(int i=1;i<argc;i++){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--tune_rhmc_s"){
|
||||
assert(i < argc-1);
|
||||
tune_rhmc_s=true;
|
||||
tune_rhmc_s_action_or_md = std::stoi(argv[i+1]);
|
||||
}
|
||||
else if(sarg == "--eigenrange_s"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_s=true;
|
||||
lanc_params_s = argv[i+1];
|
||||
}
|
||||
else if(sarg == "--tune_rhmc_DSDR"){
|
||||
assert(i < argc-1);
|
||||
tune_rhmc_DSDR=true;
|
||||
tune_rhmc_DSDR_action_or_md = std::stoi(argv[i+1]);
|
||||
}
|
||||
else if(sarg == "--eigenrange_DSDR"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_DSDR=true;
|
||||
lanc_params_DSDR = argv[i+1];
|
||||
}
|
||||
else if(sarg == "--check_eofa") check_eofa = true;
|
||||
else if(sarg == "--upper_bound_eofa") upper_bound_eofa = true;
|
||||
else if(sarg == "--lower_bound_eofa") lower_bound_eofa = true;
|
||||
}
|
||||
if(tune_rhmc_s || eigenrange_s || tune_rhmc_DSDR || eigenrange_DSDR ||check_eofa || upper_bound_eofa || lower_bound_eofa) {
|
||||
std::cout << GridLogMessage << "Running checks" << std::endl;
|
||||
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
||||
|
||||
std::cout << GridLogMessage << "EOFA action solver action tolerance outer=" << ActionMCG_L.Tolerance << " inner=" << ActionMCG_L.InnerTolerance << std::endl;
|
||||
std::cout << GridLogMessage << "EOFA MD solver tolerance outer=" << DerivMCG_L.Tolerance << " inner=" << DerivMCG_L.InnerTolerance << std::endl;
|
||||
|
||||
|
||||
if(check_eofa) checkEOFA(EOFA, FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
if(upper_bound_eofa) upperBoundEOFA(EOFA, FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
if(lower_bound_eofa) lowerBoundEOFA(EOFA, FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
if(eigenrange_s) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_s, FGridD, FrbGridD, Ud, Numerator_sD, TheHMC.Resources.GetParallelRNG());
|
||||
if(tune_rhmc_s) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_s)>(FGridD, FrbGridD, Ud, Numerator_sD, Denominator_sD, Quotient_s, TheHMC.Resources.GetParallelRNG(), 4, "strange", tune_rhmc_s_action_or_md);
|
||||
if(eigenrange_DSDR) computeEigenvalues<GparityWilsonTMFermionD, GparityWilsonTMFermionD::FermionField>(lanc_params_DSDR, UGridD, UrbGridD, Ud, Numerator_DSDR_D, TheHMC.Resources.GetParallelRNG());
|
||||
if(tune_rhmc_DSDR) checkRHMC<GparityWilsonTMFermionD, GparityWilsonTMFermionD::FermionField, decltype(Quotient_DSDR)>(UGridD, UrbGridD, Ud, Numerator_DSDR_D, Denominator_DSDR_D, Quotient_DSDR, TheHMC.Resources.GetParallelRNG(), 2, "DSDR", tune_rhmc_DSDR_action_or_md);
|
||||
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
//Run the HMC
|
||||
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
||||
TheHMC.Run();
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
} // main
|
@ -1,918 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./HMC/Mobius2p1fIDSDRGparityEOFA.cc
|
||||
|
||||
Copyright (C) 2015-2016
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace Grid;
|
||||
|
||||
//Production binary for the 40ID G-parity ensemble
|
||||
|
||||
struct RatQuoParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(RatQuoParameters,
|
||||
double, bnd_lo,
|
||||
double, bnd_hi,
|
||||
Integer, action_degree,
|
||||
double, action_tolerance,
|
||||
Integer, md_degree,
|
||||
double, md_tolerance,
|
||||
Integer, reliable_update_freq,
|
||||
Integer, bnd_check_freq);
|
||||
RatQuoParameters() {
|
||||
bnd_lo = 1e-2;
|
||||
bnd_hi = 30;
|
||||
action_degree = 10;
|
||||
action_tolerance = 1e-10;
|
||||
md_degree = 10;
|
||||
md_tolerance = 1e-8;
|
||||
bnd_check_freq = 20;
|
||||
reliable_update_freq = 50;
|
||||
}
|
||||
|
||||
void Export(RationalActionParams &into) const{
|
||||
into.lo = bnd_lo;
|
||||
into.hi = bnd_hi;
|
||||
into.action_degree = action_degree;
|
||||
into.action_tolerance = action_tolerance;
|
||||
into.md_degree = md_degree;
|
||||
into.md_tolerance = md_tolerance;
|
||||
into.BoundsCheckFreq = bnd_check_freq;
|
||||
}
|
||||
};
|
||||
|
||||
struct EOFAparameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(EOFAparameters,
|
||||
OneFlavourRationalParams, rat_params,
|
||||
double, action_tolerance,
|
||||
double, action_mixcg_inner_tolerance,
|
||||
double, md_tolerance,
|
||||
double, md_mixcg_inner_tolerance);
|
||||
|
||||
EOFAparameters() {
|
||||
action_mixcg_inner_tolerance = 1e-8;
|
||||
action_tolerance = 1e-10;
|
||||
md_tolerance = 1e-8;
|
||||
md_mixcg_inner_tolerance = 1e-8;
|
||||
|
||||
rat_params.lo = 1.0;
|
||||
rat_params.hi = 25.0;
|
||||
rat_params.MaxIter = 50000;
|
||||
rat_params.tolerance= 1.0e-9;
|
||||
rat_params.degree = 14;
|
||||
rat_params.precision= 50;
|
||||
}
|
||||
};
|
||||
|
||||
struct EvolParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(EvolParameters,
|
||||
Integer, StartTrajectory,
|
||||
Integer, Trajectories,
|
||||
Integer, SaveInterval,
|
||||
Integer, Steps,
|
||||
RealD, TrajectoryLength,
|
||||
bool, MetropolisTest,
|
||||
std::string, StartingType,
|
||||
std::vector<Integer>, GparityDirs,
|
||||
std::vector<EOFAparameters>, eofa_l,
|
||||
RatQuoParameters, rat_quo_s,
|
||||
RatQuoParameters, rat_quo_DSDR);
|
||||
|
||||
EvolParameters() {
|
||||
//For initial thermalization; afterwards user should switch Metropolis on and use StartingType=CheckpointStart
|
||||
MetropolisTest = false;
|
||||
StartTrajectory = 0;
|
||||
Trajectories = 50;
|
||||
SaveInterval = 5;
|
||||
StartingType = "ColdStart";
|
||||
GparityDirs.resize(3, 1); //1 for G-parity, 0 for periodic
|
||||
Steps = 5;
|
||||
TrajectoryLength = 1.0;
|
||||
}
|
||||
};
|
||||
|
||||
bool fileExists(const std::string &fn){
|
||||
std::ifstream f(fn);
|
||||
return f.good();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
struct LanczosParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
|
||||
double, alpha,
|
||||
double, beta,
|
||||
double, mu,
|
||||
int, ord,
|
||||
int, n_stop,
|
||||
int, n_want,
|
||||
int, n_use,
|
||||
double, tolerance);
|
||||
|
||||
LanczosParameters() {
|
||||
alpha = 35;
|
||||
beta = 5;
|
||||
mu = 0;
|
||||
ord = 100;
|
||||
n_stop = 10;
|
||||
n_want = 10;
|
||||
n_use = 15;
|
||||
tolerance = 1e-6;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
template<typename FermionActionD, typename FermionFieldD>
|
||||
void computeEigenvalues(std::string param_file,
|
||||
GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &action, GridParallelRNG &rng){
|
||||
|
||||
LanczosParameters params;
|
||||
if(fileExists(param_file)){
|
||||
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
||||
Grid::XmlReader rd(param_file);
|
||||
read(rd, "LanczosParameters", params);
|
||||
}else if(!GlobalSharedMemory::WorldRank){
|
||||
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
||||
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
||||
Grid::XmlWriter wr(param_file + ".templ");
|
||||
write(wr, "LanczosParameters", params);
|
||||
}
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
action.ImportGauge(latt);
|
||||
|
||||
SchurDiagMooeeOperator<FermionActionD, FermionFieldD> hermop(action);
|
||||
PlainHermOp<FermionFieldD> hermop_wrap(hermop);
|
||||
//ChebyshevLanczos<FermionFieldD> Cheb(params.alpha, params.beta, params.mu, params.ord);
|
||||
assert(params.mu == 0.0);
|
||||
|
||||
Chebyshev<FermionFieldD> Cheb(params.beta*params.beta, params.alpha*params.alpha, params.ord+1);
|
||||
FunctionHermOp<FermionFieldD> Cheb_wrap(Cheb, hermop);
|
||||
|
||||
std::cout << "IRL: alpha=" << params.alpha << " beta=" << params.beta << " mu=" << params.mu << " ord=" << params.ord << std::endl;
|
||||
ImplicitlyRestartedLanczos<FermionFieldD> IRL(Cheb_wrap, hermop_wrap, params.n_stop, params.n_want, params.n_use, params.tolerance, 50000);
|
||||
|
||||
std::vector<RealD> eval(params.n_use);
|
||||
std::vector<FermionFieldD> evec(params.n_use, rbGrid);
|
||||
int Nconv;
|
||||
IRL.calc(eval, evec, gauss_o, Nconv);
|
||||
|
||||
std::cout << "Eigenvalues:" << std::endl;
|
||||
for(int i=0;i<params.n_want;i++){
|
||||
std::cout << i << " " << eval[i] << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//Check the quality of the RHMC approx
|
||||
//action_or_md toggles checking the action (0), MD (1) or both (2) setups
|
||||
template<typename FermionActionD, typename FermionFieldD, typename RHMCtype>
|
||||
void checkRHMC(GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &numOp, FermionActionD &denOp, RHMCtype &rhmc, GridParallelRNG &rng,
|
||||
int inv_pow, const std::string &quark_descr, int action_or_md){
|
||||
assert(action_or_md == 0 || action_or_md == 1 || action_or_md == 2);
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
numOp.ImportGauge(latt);
|
||||
denOp.ImportGauge(latt);
|
||||
|
||||
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> MdagM(numOp);
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> VdagV(denOp);
|
||||
|
||||
PowerMethod<FermionFieldD> power_method;
|
||||
RealD lambda_max;
|
||||
|
||||
std::cout << "Starting: Get RHMC high bound approx for " << quark_descr << " numerator" << std::endl;
|
||||
|
||||
lambda_max = power_method(MdagM,gauss_o);
|
||||
std::cout << GridLogMessage << "Got lambda_max "<<lambda_max<<std::endl;
|
||||
|
||||
std::cout << "Starting: Get RHMC high bound approx for " << quark_descr << " denominator" << std::endl;
|
||||
lambda_max = power_method(VdagV,gauss_o);
|
||||
std::cout << GridLogMessage << "Got lambda_max "<<lambda_max<<std::endl;
|
||||
|
||||
if(action_or_md == 0 || action_or_md == 2){
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 50000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerAction); //use large tolerance to prevent exit on fail; we are trying to tune here!
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 50000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 50000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 50000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
}
|
||||
|
||||
std::cout << "-------------------------------------------------------------------------------" << std::endl;
|
||||
|
||||
if(action_or_md == 1 || action_or_md == 2){
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 50000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 50000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 50000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 50000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
void checkEOFA(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA,
|
||||
GridCartesian* FGrid, GridParallelRNG &rng, const LatticeGaugeFieldD &latt){
|
||||
std::cout << GridLogMessage << "Starting EOFA action/bounds check" << std::endl;
|
||||
typename FermionImplPolicy::FermionField eta(FGrid);
|
||||
RealD scale = std::sqrt(0.5);
|
||||
gaussian(rng,eta); eta = eta * scale;
|
||||
|
||||
//Use the inbuilt check
|
||||
EOFA.refresh(latt, eta);
|
||||
EOFA.S(latt);
|
||||
std::cout << GridLogMessage << "Finished EOFA upper action/bounds check" << std::endl;
|
||||
}
|
||||
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
class EOFAlinop: public LinearOperatorBase<typename FermionImplPolicy::FermionField>{
|
||||
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA;
|
||||
LatticeGaugeFieldD &U;
|
||||
public:
|
||||
EOFAlinop(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA, LatticeGaugeFieldD &U): EOFA(EOFA), U(U){}
|
||||
|
||||
typedef typename FermionImplPolicy::FermionField Field;
|
||||
void OpDiag (const Field &in, Field &out){ assert(0); }
|
||||
void OpDir (const Field &in, Field &out,int dir,int disp){ assert(0); }
|
||||
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); }
|
||||
|
||||
void Op (const Field &in, Field &out){ assert(0); }
|
||||
void AdjOp (const Field &in, Field &out){ assert(0); }
|
||||
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
|
||||
void HermOp(const Field &in, Field &out){ EOFA.Meofa(U, in, out); }
|
||||
};
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
void upperBoundEOFA(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA,
|
||||
GridCartesian* FGrid, GridParallelRNG &rng, LatticeGaugeFieldD &latt){
|
||||
std::cout << GridLogMessage << "Starting EOFA upper bound compute" << std::endl;
|
||||
EOFAlinop<FermionImplPolicy> linop(EOFA, latt);
|
||||
typename FermionImplPolicy::FermionField eta(FGrid);
|
||||
gaussian(rng,eta);
|
||||
PowerMethod<typename FermionImplPolicy::FermionField> power_method;
|
||||
auto lambda_max = power_method(linop,eta);
|
||||
std::cout << GridLogMessage << "Upper bound of EOFA operator " << lambda_max << std::endl;
|
||||
}
|
||||
|
||||
//Applications of M^{-1} cost the same as M for EOFA!
|
||||
template<typename FermionImplPolicy>
|
||||
class EOFAinvLinop: public LinearOperatorBase<typename FermionImplPolicy::FermionField>{
|
||||
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA;
|
||||
LatticeGaugeFieldD &U;
|
||||
public:
|
||||
EOFAinvLinop(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA, LatticeGaugeFieldD &U): EOFA(EOFA), U(U){}
|
||||
|
||||
typedef typename FermionImplPolicy::FermionField Field;
|
||||
void OpDiag (const Field &in, Field &out){ assert(0); }
|
||||
void OpDir (const Field &in, Field &out,int dir,int disp){ assert(0); }
|
||||
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); }
|
||||
|
||||
void Op (const Field &in, Field &out){ assert(0); }
|
||||
void AdjOp (const Field &in, Field &out){ assert(0); }
|
||||
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
|
||||
void HermOp(const Field &in, Field &out){ EOFA.MeofaInv(U, in, out); }
|
||||
};
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
void lowerBoundEOFA(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA,
|
||||
GridCartesian* FGrid, GridParallelRNG &rng, LatticeGaugeFieldD &latt){
|
||||
std::cout << GridLogMessage << "Starting EOFA lower bound compute using power method on M^{-1}. Inverse of highest eigenvalue is the lowest eigenvalue of M" << std::endl;
|
||||
EOFAinvLinop<FermionImplPolicy> linop(EOFA, latt);
|
||||
typename FermionImplPolicy::FermionField eta(FGrid);
|
||||
gaussian(rng,eta);
|
||||
PowerMethod<typename FermionImplPolicy::FermionField> power_method;
|
||||
auto lambda_max = power_method(linop,eta);
|
||||
std::cout << GridLogMessage << "Lower bound of EOFA operator " << 1./lambda_max << std::endl;
|
||||
}
|
||||
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
|
||||
class MixedPrecisionConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
|
||||
public:
|
||||
typedef typename FermionOperatorD::FermionField FieldD;
|
||||
typedef typename FermionOperatorF::FermionField FieldF;
|
||||
|
||||
using OperatorFunction<FieldD>::operator();
|
||||
|
||||
RealD Tolerance;
|
||||
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
|
||||
Integer MaxInnerIterations;
|
||||
Integer MaxOuterIterations;
|
||||
GridBase* SinglePrecGrid4; //Grid for single-precision fields
|
||||
GridBase* SinglePrecGrid5; //Grid for single-precision fields
|
||||
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
|
||||
|
||||
FermionOperatorF &FermOpF;
|
||||
FermionOperatorD &FermOpD;;
|
||||
SchurOperatorF &LinOpF;
|
||||
SchurOperatorD &LinOpD;
|
||||
|
||||
Integer TotalInnerIterations; //Number of inner CG iterations
|
||||
Integer TotalOuterIterations; //Number of restarts
|
||||
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
|
||||
|
||||
MixedPrecisionConjugateGradientOperatorFunction(RealD tol,
|
||||
Integer maxinnerit,
|
||||
Integer maxouterit,
|
||||
GridBase* _sp_grid4,
|
||||
GridBase* _sp_grid5,
|
||||
FermionOperatorF &_FermOpF,
|
||||
FermionOperatorD &_FermOpD,
|
||||
SchurOperatorF &_LinOpF,
|
||||
SchurOperatorD &_LinOpD):
|
||||
LinOpF(_LinOpF),
|
||||
LinOpD(_LinOpD),
|
||||
FermOpF(_FermOpF),
|
||||
FermOpD(_FermOpD),
|
||||
Tolerance(tol),
|
||||
InnerTolerance(tol),
|
||||
MaxInnerIterations(maxinnerit),
|
||||
MaxOuterIterations(maxouterit),
|
||||
SinglePrecGrid4(_sp_grid4),
|
||||
SinglePrecGrid5(_sp_grid5),
|
||||
OuterLoopNormMult(100.)
|
||||
{
|
||||
};
|
||||
|
||||
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
|
||||
|
||||
std::cout << GridLogMessage << " Mixed precision CG wrapper operator() "<<std::endl;
|
||||
|
||||
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
|
||||
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
|
||||
|
||||
precisionChange(FermOpF.Umu, FermOpD.Umu);
|
||||
|
||||
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
|
||||
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// Make a mixed precision conjugate gradient
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
MixedPrecisionConjugateGradient<FieldD,FieldF> MPCG(Tolerance,MaxInnerIterations,MaxOuterIterations,SinglePrecGrid5,LinOpF,LinOpD);
|
||||
MPCG.InnerTolerance = InnerTolerance;
|
||||
std::cout << GridLogMessage << "Calling mixed precision Conjugate Gradient" <<std::endl;
|
||||
MPCG(src,psi);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
|
||||
class MixedPrecisionReliableUpdateConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
|
||||
public:
|
||||
typedef typename FermionOperatorD::FermionField FieldD;
|
||||
typedef typename FermionOperatorF::FermionField FieldF;
|
||||
|
||||
using OperatorFunction<FieldD>::operator();
|
||||
|
||||
RealD Tolerance;
|
||||
Integer MaxIterations;
|
||||
|
||||
RealD Delta; //reliable update parameter
|
||||
|
||||
GridBase* SinglePrecGrid4; //Grid for single-precision fields
|
||||
GridBase* SinglePrecGrid5; //Grid for single-precision fields
|
||||
|
||||
FermionOperatorF &FermOpF;
|
||||
FermionOperatorD &FermOpD;;
|
||||
SchurOperatorF &LinOpF;
|
||||
SchurOperatorD &LinOpD;
|
||||
|
||||
MixedPrecisionReliableUpdateConjugateGradientOperatorFunction(RealD tol,
|
||||
RealD delta,
|
||||
Integer maxit,
|
||||
GridBase* _sp_grid4,
|
||||
GridBase* _sp_grid5,
|
||||
FermionOperatorF &_FermOpF,
|
||||
FermionOperatorD &_FermOpD,
|
||||
SchurOperatorF &_LinOpF,
|
||||
SchurOperatorD &_LinOpD):
|
||||
LinOpF(_LinOpF),
|
||||
LinOpD(_LinOpD),
|
||||
FermOpF(_FermOpF),
|
||||
FermOpD(_FermOpD),
|
||||
Tolerance(tol),
|
||||
Delta(delta),
|
||||
MaxIterations(maxit),
|
||||
SinglePrecGrid4(_sp_grid4),
|
||||
SinglePrecGrid5(_sp_grid5)
|
||||
{
|
||||
};
|
||||
|
||||
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
|
||||
|
||||
std::cout << GridLogMessage << " Mixed precision reliable CG update wrapper operator() "<<std::endl;
|
||||
|
||||
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
|
||||
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
|
||||
|
||||
precisionChange(FermOpF.Umu, FermOpD.Umu);
|
||||
|
||||
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
|
||||
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// Make a mixed precision conjugate gradient
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
ConjugateGradientReliableUpdate<FieldD,FieldF> MPCG(Tolerance,MaxIterations,Delta,SinglePrecGrid5,LinOpF,LinOpD);
|
||||
std::cout << GridLogMessage << "Calling mixed precision reliable update Conjugate Gradient" <<std::endl;
|
||||
MPCG(src,psi);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
// here make a routine to print all the relevant information on the run
|
||||
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
||||
|
||||
std::string param_file = "params.xml";
|
||||
bool file_load_check = false;
|
||||
|
||||
std::string serial_seeds = "1 2 3 4 5";
|
||||
std::string parallel_seeds = "6 7 8 9 10";
|
||||
|
||||
int i=1;
|
||||
while(i < argc){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--param_file"){
|
||||
assert(i!=argc-1);
|
||||
param_file = argv[i+1];
|
||||
i+=2;
|
||||
}else if(sarg == "--read_check"){ //check the fields load correctly and pass checksum/plaquette repro
|
||||
file_load_check = true;
|
||||
i++;
|
||||
}else if(sarg == "--set_seeds"){ //set the rng seeds. Expects two vector args, e.g. --set_seeds 1.2.3.4 5.6.7.8
|
||||
assert(i < argc-2);
|
||||
std::vector<int> tmp;
|
||||
GridCmdOptionIntVector(argv[i+1],tmp);
|
||||
{
|
||||
std::stringstream ss;
|
||||
for(int j=0;j<tmp.size()-1;j++) ss << tmp[j] << " ";
|
||||
ss << tmp.back();
|
||||
serial_seeds = ss.str();
|
||||
}
|
||||
GridCmdOptionIntVector(argv[i+2],tmp);
|
||||
{
|
||||
std::stringstream ss;
|
||||
for(int j=0;j<tmp.size()-1;j++) ss << tmp[j] << " ";
|
||||
ss << tmp.back();
|
||||
parallel_seeds = ss.str();
|
||||
}
|
||||
i+=3;
|
||||
std::cout << GridLogMessage << "Set serial seeds to " << serial_seeds << std::endl;
|
||||
std::cout << GridLogMessage << "Set parallel seeds to " << parallel_seeds << std::endl;
|
||||
|
||||
}else{
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//Read the user parameters
|
||||
EvolParameters user_params;
|
||||
|
||||
if(fileExists(param_file)){
|
||||
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
||||
Grid::XmlReader rd(param_file);
|
||||
read(rd, "Params", user_params);
|
||||
}else if(!GlobalSharedMemory::WorldRank){
|
||||
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
||||
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
||||
{
|
||||
Grid::XmlWriter wr(param_file + ".templ");
|
||||
write(wr, "Params", user_params);
|
||||
}
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
//Check the parameters
|
||||
if(user_params.GparityDirs.size() != Nd-1){
|
||||
std::cerr << "Error in input parameters: expect GparityDirs to have size = " << Nd-1 << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
for(int i=0;i<Nd-1;i++)
|
||||
if(user_params.GparityDirs[i] != 0 && user_params.GparityDirs[i] != 1){
|
||||
std::cerr << "Error in input parameters: expect GparityDirs values to be 0 (periodic) or 1 (G-parity)" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
||||
typedef GparityMobiusEOFAFermionD EOFAactionD;
|
||||
typedef GparityMobiusFermionD FermionActionD;
|
||||
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
||||
typedef typename FermionActionD::FermionField FermionFieldD;
|
||||
|
||||
typedef GparityMobiusEOFAFermionF EOFAactionF;
|
||||
typedef GparityMobiusFermionF FermionActionF;
|
||||
typedef typename FermionActionF::Impl_t FermionImplPolicyF;
|
||||
typedef typename FermionActionF::FermionField FermionFieldF;
|
||||
|
||||
typedef GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicyD,FermionImplPolicyF> MixedPrecRHMC;
|
||||
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicyD> DoublePrecRHMC;
|
||||
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
IntegratorParameters MD;
|
||||
typedef ConjugateHMCRunnerD<MinimumNorm2> HMCWrapper; //NB: This is the "Omelyan integrator"
|
||||
MD.name = std::string("MinimumNorm2");
|
||||
|
||||
// typedef ConjugateHMCRunnerD<ForceGradient> HMCWrapper;
|
||||
// MD.name = std::string("ForceGradient");
|
||||
|
||||
MD.MDsteps = user_params.Steps;
|
||||
MD.trajL = user_params.TrajectoryLength;
|
||||
|
||||
typedef HMCWrapper::ImplPolicy GaugeImplPolicy;
|
||||
|
||||
HMCparameters HMCparams;
|
||||
HMCparams.StartTrajectory = user_params.StartTrajectory;
|
||||
HMCparams.Trajectories = user_params.Trajectories;
|
||||
HMCparams.NoMetropolisUntil= 0;
|
||||
HMCparams.StartingType = user_params.StartingType;
|
||||
HMCparams.MetropolisTest = user_params.MetropolisTest;
|
||||
HMCparams.MD = MD;
|
||||
HMCWrapper TheHMC(HMCparams);
|
||||
|
||||
// Grid from the command line arguments --grid and --mpi
|
||||
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
|
||||
|
||||
CheckpointerParameters CPparams;
|
||||
CPparams.config_prefix = "ckpoint_lat";
|
||||
CPparams.rng_prefix = "ckpoint_rng";
|
||||
CPparams.saveInterval = user_params.SaveInterval;
|
||||
CPparams.format = "IEEE64BIG";
|
||||
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||
|
||||
//Note that checkpointing saves the RNG state so that this initialization is required only for the very first configuration
|
||||
RNGModuleParameters RNGpar;
|
||||
RNGpar.serial_seeds = serial_seeds;
|
||||
RNGpar.parallel_seeds = parallel_seeds;
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
typedef PlaquetteMod<GaugeImplPolicy> PlaqObs;
|
||||
TheHMC.Resources.AddObservable<PlaqObs>();
|
||||
//////////////////////////////////////////////
|
||||
//aiming for ainv=1.723 GeV
|
||||
// me bob
|
||||
//Estimated a(ml+mres) [40ID] = 0.001305 0.00131
|
||||
// a(mh+mres) [40ID] = 0.035910 0.03529
|
||||
//Estimate Ls=12, b+c=2 mres~0.0011
|
||||
|
||||
//1/24/2022 initial mres measurement gives mres=0.001, adjusted light quark mass to 0.0003 from 0.0001
|
||||
|
||||
const int Ls = 12;
|
||||
Real beta = 1.848;
|
||||
Real light_mass = 0.0003;
|
||||
Real strange_mass = 0.0342;
|
||||
Real pv_mass = 1.0;
|
||||
RealD M5 = 1.8;
|
||||
RealD mobius_scale = 2.; //b+c
|
||||
|
||||
RealD mob_bmc = 1.0;
|
||||
RealD mob_b = (mobius_scale + mob_bmc)/2.;
|
||||
RealD mob_c = (mobius_scale - mob_bmc)/2.;
|
||||
|
||||
std::cout << GridLogMessage
|
||||
<< "Ensemble parameters:" << std::endl
|
||||
<< "Ls=" << Ls << std::endl
|
||||
<< "beta=" << beta << std::endl
|
||||
<< "light_mass=" << light_mass << std::endl
|
||||
<< "strange_mass=" << strange_mass << std::endl
|
||||
<< "mobius_scale=" << mobius_scale << std::endl;
|
||||
|
||||
//Setup the Grids
|
||||
auto UGridD = TheHMC.Resources.GetCartesian();
|
||||
auto UrbGridD = TheHMC.Resources.GetRBCartesian();
|
||||
auto FGridD = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridD);
|
||||
auto FrbGridD = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridD);
|
||||
|
||||
GridCartesian* UGridF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexF::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian* UrbGridF = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridF);
|
||||
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridF);
|
||||
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridF);
|
||||
|
||||
ConjugateIwasakiGaugeActionD GaugeAction(beta);
|
||||
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeFieldD Ud(UGridD);
|
||||
LatticeGaugeFieldF Uf(UGridF);
|
||||
|
||||
//Setup the BCs
|
||||
FermionActionD::ImplParams Params;
|
||||
for(int i=0;i<Nd-1;i++) Params.twists[i] = user_params.GparityDirs[i]; //G-parity directions
|
||||
Params.twists[Nd-1] = 1; //APBC in time direction
|
||||
|
||||
std::vector<int> dirs4(Nd);
|
||||
for(int i=0;i<Nd-1;i++) dirs4[i] = user_params.GparityDirs[i];
|
||||
dirs4[Nd-1] = 0; //periodic gauge BC in time
|
||||
|
||||
GaugeImplPolicy::setDirections(dirs4); //gauge BC
|
||||
|
||||
//Run optional gauge field checksum checker and exit
|
||||
if(file_load_check){
|
||||
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Collect actions
|
||||
////////////////////////////////////
|
||||
ActionLevel<HMCWrapper::Field> Level1(1); //light quark + strange quark
|
||||
ActionLevel<HMCWrapper::Field> Level2(4); //DSDR
|
||||
ActionLevel<HMCWrapper::Field> Level3(2); //gauge
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Light EOFA action
|
||||
// have to be careful with the parameters, cf. Test_dwf_gpforce_eofa.cc
|
||||
/////////////////////////////////////////////////////////////
|
||||
typedef SchurDiagMooeeOperator<EOFAactionD,FermionFieldD> EOFAschuropD;
|
||||
typedef SchurDiagMooeeOperator<EOFAactionF,FermionFieldF> EOFAschuropF;
|
||||
typedef ExactOneFlavourRatioMixedPrecHeatbathPseudoFermionAction<FermionImplPolicyD, FermionImplPolicyF> EOFAmixPrecPFaction;
|
||||
typedef MixedPrecisionConjugateGradientOperatorFunction<EOFAactionD, EOFAactionF, EOFAschuropD, EOFAschuropF> EOFA_mxCG;
|
||||
typedef MixedPrecisionReliableUpdateConjugateGradientOperatorFunction<EOFAactionD, EOFAactionF, EOFAschuropD, EOFAschuropF> EOFA_relupCG;
|
||||
|
||||
|
||||
std::vector<RealD> eofa_light_masses = { light_mass , 0.004, 0.016, 0.064, 0.256 };
|
||||
std::vector<RealD> eofa_pv_masses = { 0.004 , 0.016, 0.064, 0.256, 1.0 };
|
||||
int n_light_hsb = 5;
|
||||
assert(user_params.eofa_l.size() == n_light_hsb);
|
||||
|
||||
EOFAmixPrecPFaction* EOFA_pfactions[n_light_hsb];
|
||||
|
||||
for(int i=0;i<n_light_hsb;i++){
|
||||
RealD iml = eofa_light_masses[i];
|
||||
RealD ipv = eofa_pv_masses[i];
|
||||
|
||||
EOFAactionD* LopD = new EOFAactionD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, iml, iml, ipv, 0.0, -1, M5, mob_b, mob_c, Params);
|
||||
EOFAactionF* LopF = new EOFAactionF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, iml, iml, ipv, 0.0, -1, M5, mob_b, mob_c, Params);
|
||||
EOFAactionD* RopD = new EOFAactionD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, ipv, iml, ipv, -1.0, 1, M5, mob_b, mob_c, Params);
|
||||
EOFAactionF* RopF = new EOFAactionF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, ipv, iml, ipv, -1.0, 1, M5, mob_b, mob_c, Params);
|
||||
|
||||
EOFAschuropD* linopL_D = new EOFAschuropD(*LopD);
|
||||
EOFAschuropD* linopR_D = new EOFAschuropD(*RopD);
|
||||
|
||||
EOFAschuropF* linopL_F = new EOFAschuropF(*LopF);
|
||||
EOFAschuropF* linopR_F = new EOFAschuropF(*RopF);
|
||||
|
||||
#if 1
|
||||
//Note reusing user_params.eofa_l.action(|md)_mixcg_inner_tolerance as Delta for now
|
||||
EOFA_relupCG* ActionMCG_L = new EOFA_relupCG(user_params.eofa_l[i].action_tolerance, user_params.eofa_l[i].action_mixcg_inner_tolerance, 50000, UGridF, FrbGridF, *LopF, *LopD, *linopL_F, *linopL_D);
|
||||
EOFA_relupCG* ActionMCG_R = new EOFA_relupCG(user_params.eofa_l[i].action_tolerance, user_params.eofa_l[i].action_mixcg_inner_tolerance, 50000, UGridF, FrbGridF, *RopF, *RopD, *linopR_F, *linopR_D);
|
||||
|
||||
EOFA_relupCG* DerivMCG_L = new EOFA_relupCG(user_params.eofa_l[i].md_tolerance, user_params.eofa_l[i].md_mixcg_inner_tolerance, 50000, UGridF, FrbGridF, *LopF, *LopD, *linopL_F, *linopL_D);
|
||||
EOFA_relupCG* DerivMCG_R = new EOFA_relupCG(user_params.eofa_l[i].md_tolerance, user_params.eofa_l[i].md_mixcg_inner_tolerance, 50000, UGridF, FrbGridF, *RopF, *RopD, *linopR_F, *linopR_D);
|
||||
|
||||
#else
|
||||
EOFA_mxCG* ActionMCG_L = new EOFA_mxCG(user_params.eofa_l[i].action_tolerance, 50000, 1000, UGridF, FrbGridF, *LopF, *LopD, *linopL_F, *linopL_D);
|
||||
ActionMCG_L->InnerTolerance = user_params.eofa_l[i].action_mixcg_inner_tolerance;
|
||||
|
||||
EOFA_mxCG* ActionMCG_R = new EOFA_mxCG(user_params.eofa_l[i].action_tolerance, 50000, 1000, UGridF, FrbGridF, *RopF, *RopD, *linopR_F, *linopR_D);
|
||||
ActionMCG_R->InnerTolerance = user_params.eofa_l[i].action_mixcg_inner_tolerance;
|
||||
|
||||
EOFA_mxCG* DerivMCG_L = new EOFA_mxCG(user_params.eofa_l[i].md_tolerance, 50000, 1000, UGridF, FrbGridF, *LopF, *LopD, *linopL_F, *linopL_D);
|
||||
DerivMCG_L->InnerTolerance = user_params.eofa_l[i].md_mixcg_inner_tolerance;
|
||||
|
||||
EOFA_mxCG* DerivMCG_R = new EOFA_mxCG(user_params.eofa_l[i].md_tolerance, 50000, 1000, UGridF, FrbGridF, *RopF, *RopD, *linopR_F, *linopR_D);
|
||||
DerivMCG_R->InnerTolerance = user_params.eofa_l[i].md_mixcg_inner_tolerance;
|
||||
|
||||
std::cout << GridLogMessage << "Set EOFA action solver action tolerance outer=" << ActionMCG_L->Tolerance << " inner=" << ActionMCG_L->InnerTolerance << std::endl;
|
||||
std::cout << GridLogMessage << "Set EOFA MD solver tolerance outer=" << DerivMCG_L->Tolerance << " inner=" << DerivMCG_L->InnerTolerance << std::endl;
|
||||
#endif
|
||||
|
||||
EOFAmixPrecPFaction* EOFA = new EOFAmixPrecPFaction(*LopF, *RopF,
|
||||
*LopD, *RopD,
|
||||
*ActionMCG_L, *ActionMCG_R,
|
||||
*ActionMCG_L, *ActionMCG_R,
|
||||
*DerivMCG_L, *DerivMCG_R,
|
||||
user_params.eofa_l[i].rat_params, true);
|
||||
EOFA_pfactions[i] = EOFA;
|
||||
Level1.push_back(EOFA);
|
||||
}
|
||||
|
||||
////////////////////////////////////
|
||||
// Strange action
|
||||
////////////////////////////////////
|
||||
FermionActionD Numerator_sD(Ud,*FGridD,*FrbGridD,*UGridD,*UrbGridD,strange_mass,M5,mob_b,mob_c,Params);
|
||||
FermionActionD Denominator_sD(Ud,*FGridD,*FrbGridD,*UGridD,*UrbGridD, pv_mass,M5,mob_b,mob_c,Params);
|
||||
|
||||
FermionActionF Numerator_sF(Uf,*FGridF,*FrbGridF,*UGridF,*UrbGridF,strange_mass,M5,mob_b,mob_c,Params);
|
||||
FermionActionF Denominator_sF(Uf,*FGridF,*FrbGridF,*UGridF,*UrbGridF, pv_mass,M5,mob_b,mob_c,Params);
|
||||
|
||||
RationalActionParams rat_act_params_s;
|
||||
rat_act_params_s.inv_pow = 4; // (M^dag M)^{1/4}
|
||||
rat_act_params_s.precision= 60;
|
||||
rat_act_params_s.MaxIter = 50000;
|
||||
user_params.rat_quo_s.Export(rat_act_params_s);
|
||||
std::cout << GridLogMessage << " Heavy quark bounds check every " << rat_act_params_s.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
||||
|
||||
//MixedPrecRHMC Quotient_s(Denominator_sD, Numerator_sD, Denominator_sF, Numerator_sF, rat_act_params_s, user_params.rat_quo_s.reliable_update_freq);
|
||||
DoublePrecRHMC Quotient_s(Denominator_sD, Numerator_sD, rat_act_params_s);
|
||||
Level1.push_back(&Quotient_s);
|
||||
|
||||
///////////////////////////////////
|
||||
// DSDR action
|
||||
///////////////////////////////////
|
||||
RealD dsdr_mass=-1.8;
|
||||
//Use same DSDR twists as https://arxiv.org/pdf/1208.4412.pdf
|
||||
RealD dsdr_epsilon_f = 0.02; //numerator (in determinant)
|
||||
RealD dsdr_epsilon_b = 0.5;
|
||||
GparityWilsonTMFermionD Numerator_DSDR_D(Ud, *UGridD, *UrbGridD, dsdr_mass, dsdr_epsilon_f, Params);
|
||||
GparityWilsonTMFermionF Numerator_DSDR_F(Uf, *UGridF, *UrbGridF, dsdr_mass, dsdr_epsilon_f, Params);
|
||||
|
||||
GparityWilsonTMFermionD Denominator_DSDR_D(Ud, *UGridD, *UrbGridD, dsdr_mass, dsdr_epsilon_b, Params);
|
||||
GparityWilsonTMFermionF Denominator_DSDR_F(Uf, *UGridF, *UrbGridF, dsdr_mass, dsdr_epsilon_b, Params);
|
||||
|
||||
RationalActionParams rat_act_params_DSDR;
|
||||
rat_act_params_DSDR.inv_pow = 2; // (M^dag M)^{1/2}
|
||||
rat_act_params_DSDR.precision= 60;
|
||||
rat_act_params_DSDR.MaxIter = 50000;
|
||||
user_params.rat_quo_DSDR.Export(rat_act_params_DSDR);
|
||||
std::cout << GridLogMessage << "DSDR quark bounds check every " << rat_act_params_DSDR.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
||||
|
||||
DoublePrecRHMC Quotient_DSDR(Denominator_DSDR_D, Numerator_DSDR_D, rat_act_params_DSDR);
|
||||
Level2.push_back(&Quotient_DSDR);
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Gauge action
|
||||
/////////////////////////////////////////////////////////////
|
||||
Level3.push_back(&GaugeAction);
|
||||
|
||||
TheHMC.TheAction.push_back(Level1);
|
||||
TheHMC.TheAction.push_back(Level2);
|
||||
TheHMC.TheAction.push_back(Level3);
|
||||
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
||||
|
||||
|
||||
//Action tuning
|
||||
bool
|
||||
tune_rhmc_s=false, eigenrange_s=false,
|
||||
tune_rhmc_DSDR=false, eigenrange_DSDR=false,
|
||||
check_eofa=false,
|
||||
upper_bound_eofa=false, lower_bound_eofa(false);
|
||||
|
||||
std::string lanc_params_s;
|
||||
std::string lanc_params_DSDR;
|
||||
int tune_rhmc_s_action_or_md;
|
||||
int tune_rhmc_DSDR_action_or_md;
|
||||
int eofa_which_hsb;
|
||||
|
||||
for(int i=1;i<argc;i++){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--tune_rhmc_s"){
|
||||
assert(i < argc-1);
|
||||
tune_rhmc_s=true;
|
||||
tune_rhmc_s_action_or_md = std::stoi(argv[i+1]);
|
||||
}
|
||||
else if(sarg == "--eigenrange_s"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_s=true;
|
||||
lanc_params_s = argv[i+1];
|
||||
}
|
||||
else if(sarg == "--tune_rhmc_DSDR"){
|
||||
assert(i < argc-1);
|
||||
tune_rhmc_DSDR=true;
|
||||
tune_rhmc_DSDR_action_or_md = std::stoi(argv[i+1]);
|
||||
}
|
||||
else if(sarg == "--eigenrange_DSDR"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_DSDR=true;
|
||||
lanc_params_DSDR = argv[i+1];
|
||||
}
|
||||
else if(sarg == "--check_eofa"){
|
||||
assert(i < argc-1);
|
||||
check_eofa = true;
|
||||
eofa_which_hsb = std::stoi(argv[i+1]); //-1 indicates all hasenbusch
|
||||
assert(eofa_which_hsb == -1 || (eofa_which_hsb >= 0 && eofa_which_hsb < n_light_hsb) );
|
||||
}
|
||||
else if(sarg == "--upper_bound_eofa"){
|
||||
assert(i < argc-1);
|
||||
upper_bound_eofa = true;
|
||||
eofa_which_hsb = std::stoi(argv[i+1]);
|
||||
assert(eofa_which_hsb >= 0 && eofa_which_hsb < n_light_hsb);
|
||||
}
|
||||
else if(sarg == "--lower_bound_eofa"){
|
||||
assert(i < argc-1);
|
||||
lower_bound_eofa = true;
|
||||
eofa_which_hsb = std::stoi(argv[i+1]);
|
||||
assert(eofa_which_hsb >= 0 && eofa_which_hsb < n_light_hsb);
|
||||
}
|
||||
}
|
||||
if(tune_rhmc_s || eigenrange_s || tune_rhmc_DSDR || eigenrange_DSDR ||check_eofa || upper_bound_eofa || lower_bound_eofa) {
|
||||
std::cout << GridLogMessage << "Running checks" << std::endl;
|
||||
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
||||
|
||||
//std::cout << GridLogMessage << "EOFA action solver action tolerance outer=" << ActionMCG_L.Tolerance << " inner=" << ActionMCG_L.InnerTolerance << std::endl;
|
||||
//std::cout << GridLogMessage << "EOFA MD solver tolerance outer=" << DerivMCG_L.Tolerance << " inner=" << DerivMCG_L.InnerTolerance << std::endl;
|
||||
|
||||
if(check_eofa){
|
||||
if(eofa_which_hsb >= 0){
|
||||
std::cout << GridLogMessage << "Starting checking EOFA Hasenbusch " << eofa_which_hsb << std::endl;
|
||||
checkEOFA(*EOFA_pfactions[eofa_which_hsb], FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
std::cout << GridLogMessage << "Finished checking EOFA Hasenbusch " << eofa_which_hsb << std::endl;
|
||||
}else{
|
||||
for(int i=0;i<n_light_hsb;i++){
|
||||
std::cout << GridLogMessage << "Starting checking EOFA Hasenbusch " << i << std::endl;
|
||||
checkEOFA(*EOFA_pfactions[i], FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
std::cout << GridLogMessage << "Finished checking EOFA Hasenbusch " << i << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(upper_bound_eofa) upperBoundEOFA(*EOFA_pfactions[eofa_which_hsb], FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
if(lower_bound_eofa) lowerBoundEOFA(*EOFA_pfactions[eofa_which_hsb], FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
if(eigenrange_s) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_s, FGridD, FrbGridD, Ud, Numerator_sD, TheHMC.Resources.GetParallelRNG());
|
||||
if(tune_rhmc_s) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_s)>(FGridD, FrbGridD, Ud, Numerator_sD, Denominator_sD, Quotient_s, TheHMC.Resources.GetParallelRNG(), 4, "strange", tune_rhmc_s_action_or_md);
|
||||
if(eigenrange_DSDR) computeEigenvalues<GparityWilsonTMFermionD, GparityWilsonTMFermionD::FermionField>(lanc_params_DSDR, UGridD, UrbGridD, Ud, Numerator_DSDR_D, TheHMC.Resources.GetParallelRNG());
|
||||
if(tune_rhmc_DSDR) checkRHMC<GparityWilsonTMFermionD, GparityWilsonTMFermionD::FermionField, decltype(Quotient_DSDR)>(UGridD, UrbGridD, Ud, Numerator_DSDR_D, Denominator_DSDR_D, Quotient_DSDR, TheHMC.Resources.GetParallelRNG(), 2, "DSDR", tune_rhmc_DSDR_action_or_md);
|
||||
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
//Run the HMC
|
||||
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
||||
TheHMC.Run();
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
} // main
|
@ -1,873 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./HMC/Mobius2p1fIDSDRGparityEOFA.cc
|
||||
|
||||
Copyright (C) 2015-2016
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace Grid;
|
||||
|
||||
//Production binary for the 40ID G-parity ensemble
|
||||
|
||||
struct RatQuoParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(RatQuoParameters,
|
||||
double, bnd_lo,
|
||||
double, bnd_hi,
|
||||
Integer, action_degree,
|
||||
double, action_tolerance,
|
||||
Integer, md_degree,
|
||||
double, md_tolerance,
|
||||
Integer, reliable_update_freq,
|
||||
Integer, bnd_check_freq);
|
||||
RatQuoParameters() {
|
||||
bnd_lo = 1e-2;
|
||||
bnd_hi = 30;
|
||||
action_degree = 10;
|
||||
action_tolerance = 1e-10;
|
||||
md_degree = 10;
|
||||
md_tolerance = 1e-8;
|
||||
bnd_check_freq = 20;
|
||||
reliable_update_freq = 50;
|
||||
}
|
||||
|
||||
void Export(RationalActionParams &into) const{
|
||||
into.lo = bnd_lo;
|
||||
into.hi = bnd_hi;
|
||||
into.action_degree = action_degree;
|
||||
into.action_tolerance = action_tolerance;
|
||||
into.md_degree = md_degree;
|
||||
into.md_tolerance = md_tolerance;
|
||||
into.BoundsCheckFreq = bnd_check_freq;
|
||||
}
|
||||
};
|
||||
|
||||
struct EOFAparameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(EOFAparameters,
|
||||
OneFlavourRationalParams, rat_params,
|
||||
double, action_tolerance,
|
||||
double, action_mixcg_inner_tolerance,
|
||||
double, md_tolerance,
|
||||
double, md_mixcg_inner_tolerance);
|
||||
|
||||
EOFAparameters() {
|
||||
action_mixcg_inner_tolerance = 1e-8;
|
||||
action_tolerance = 1e-10;
|
||||
md_tolerance = 1e-8;
|
||||
md_mixcg_inner_tolerance = 1e-8;
|
||||
|
||||
rat_params.lo = 1.0;
|
||||
rat_params.hi = 25.0;
|
||||
rat_params.MaxIter = 10000;
|
||||
rat_params.tolerance= 1.0e-9;
|
||||
rat_params.degree = 14;
|
||||
rat_params.precision= 50;
|
||||
}
|
||||
};
|
||||
|
||||
struct EvolParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(EvolParameters,
|
||||
Integer, StartTrajectory,
|
||||
Integer, Trajectories,
|
||||
Integer, SaveInterval,
|
||||
Integer, Steps,
|
||||
RealD, TrajectoryLength,
|
||||
bool, MetropolisTest,
|
||||
std::string, StartingType,
|
||||
std::vector<Integer>, GparityDirs,
|
||||
std::vector<EOFAparameters>, eofa_l,
|
||||
RatQuoParameters, rat_quo_s,
|
||||
RatQuoParameters, rat_quo_DSDR);
|
||||
|
||||
EvolParameters() {
|
||||
//For initial thermalization; afterwards user should switch Metropolis on and use StartingType=CheckpointStart
|
||||
MetropolisTest = false;
|
||||
StartTrajectory = 0;
|
||||
Trajectories = 50;
|
||||
SaveInterval = 5;
|
||||
StartingType = "ColdStart";
|
||||
GparityDirs.resize(3, 1); //1 for G-parity, 0 for periodic
|
||||
Steps = 5;
|
||||
TrajectoryLength = 1.0;
|
||||
}
|
||||
};
|
||||
|
||||
bool fileExists(const std::string &fn){
|
||||
std::ifstream f(fn);
|
||||
return f.good();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
struct LanczosParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
|
||||
double, alpha,
|
||||
double, beta,
|
||||
double, mu,
|
||||
int, ord,
|
||||
int, n_stop,
|
||||
int, n_want,
|
||||
int, n_use,
|
||||
double, tolerance);
|
||||
|
||||
LanczosParameters() {
|
||||
alpha = 35;
|
||||
beta = 5;
|
||||
mu = 0;
|
||||
ord = 100;
|
||||
n_stop = 10;
|
||||
n_want = 10;
|
||||
n_use = 15;
|
||||
tolerance = 1e-6;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
template<typename FermionActionD, typename FermionFieldD>
|
||||
void computeEigenvalues(std::string param_file,
|
||||
GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &action, GridParallelRNG &rng){
|
||||
|
||||
LanczosParameters params;
|
||||
if(fileExists(param_file)){
|
||||
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
||||
Grid::XmlReader rd(param_file);
|
||||
read(rd, "LanczosParameters", params);
|
||||
}else if(!GlobalSharedMemory::WorldRank){
|
||||
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
||||
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
||||
Grid::XmlWriter wr(param_file + ".templ");
|
||||
write(wr, "LanczosParameters", params);
|
||||
}
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
action.ImportGauge(latt);
|
||||
|
||||
SchurDiagMooeeOperator<FermionActionD, FermionFieldD> hermop(action);
|
||||
PlainHermOp<FermionFieldD> hermop_wrap(hermop);
|
||||
//ChebyshevLanczos<FermionFieldD> Cheb(params.alpha, params.beta, params.mu, params.ord);
|
||||
assert(params.mu == 0.0);
|
||||
|
||||
Chebyshev<FermionFieldD> Cheb(params.beta*params.beta, params.alpha*params.alpha, params.ord+1);
|
||||
FunctionHermOp<FermionFieldD> Cheb_wrap(Cheb, hermop);
|
||||
|
||||
std::cout << "IRL: alpha=" << params.alpha << " beta=" << params.beta << " mu=" << params.mu << " ord=" << params.ord << std::endl;
|
||||
ImplicitlyRestartedLanczos<FermionFieldD> IRL(Cheb_wrap, hermop_wrap, params.n_stop, params.n_want, params.n_use, params.tolerance, 10000);
|
||||
|
||||
std::vector<RealD> eval(params.n_use);
|
||||
std::vector<FermionFieldD> evec(params.n_use, rbGrid);
|
||||
int Nconv;
|
||||
IRL.calc(eval, evec, gauss_o, Nconv);
|
||||
|
||||
std::cout << "Eigenvalues:" << std::endl;
|
||||
for(int i=0;i<params.n_want;i++){
|
||||
std::cout << i << " " << eval[i] << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//Check the quality of the RHMC approx
|
||||
//action_or_md toggles checking the action (0), MD (1) or both (2) setups
|
||||
template<typename FermionActionD, typename FermionFieldD, typename RHMCtype>
|
||||
void checkRHMC(GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &numOp, FermionActionD &denOp, RHMCtype &rhmc, GridParallelRNG &rng,
|
||||
int inv_pow, const std::string &quark_descr, int action_or_md){
|
||||
assert(action_or_md == 0 || action_or_md == 1 || action_or_md == 2);
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
numOp.ImportGauge(latt);
|
||||
denOp.ImportGauge(latt);
|
||||
|
||||
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> MdagM(numOp);
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> VdagV(denOp);
|
||||
|
||||
PowerMethod<FermionFieldD> power_method;
|
||||
RealD lambda_max;
|
||||
|
||||
std::cout << "Starting: Get RHMC high bound approx for " << quark_descr << " numerator" << std::endl;
|
||||
|
||||
lambda_max = power_method(MdagM,gauss_o);
|
||||
std::cout << GridLogMessage << "Got lambda_max "<<lambda_max<<std::endl;
|
||||
|
||||
std::cout << "Starting: Get RHMC high bound approx for " << quark_descr << " denominator" << std::endl;
|
||||
lambda_max = power_method(VdagV,gauss_o);
|
||||
std::cout << GridLogMessage << "Got lambda_max "<<lambda_max<<std::endl;
|
||||
|
||||
if(action_or_md == 0 || action_or_md == 2){
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerAction); //use large tolerance to prevent exit on fail; we are trying to tune here!
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
}
|
||||
|
||||
std::cout << "-------------------------------------------------------------------------------" << std::endl;
|
||||
|
||||
if(action_or_md == 1 || action_or_md == 2){
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
void checkEOFA(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA,
|
||||
GridCartesian* FGrid, GridParallelRNG &rng, const LatticeGaugeFieldD &latt){
|
||||
std::cout << GridLogMessage << "Starting EOFA action/bounds check" << std::endl;
|
||||
typename FermionImplPolicy::FermionField eta(FGrid);
|
||||
RealD scale = std::sqrt(0.5);
|
||||
gaussian(rng,eta); eta = eta * scale;
|
||||
|
||||
//Use the inbuilt check
|
||||
EOFA.refresh(latt, eta);
|
||||
EOFA.S(latt);
|
||||
std::cout << GridLogMessage << "Finished EOFA upper action/bounds check" << std::endl;
|
||||
}
|
||||
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
class EOFAlinop: public LinearOperatorBase<typename FermionImplPolicy::FermionField>{
|
||||
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA;
|
||||
LatticeGaugeFieldD &U;
|
||||
public:
|
||||
EOFAlinop(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA, LatticeGaugeFieldD &U): EOFA(EOFA), U(U){}
|
||||
|
||||
typedef typename FermionImplPolicy::FermionField Field;
|
||||
void OpDiag (const Field &in, Field &out){ assert(0); }
|
||||
void OpDir (const Field &in, Field &out,int dir,int disp){ assert(0); }
|
||||
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); }
|
||||
|
||||
void Op (const Field &in, Field &out){ assert(0); }
|
||||
void AdjOp (const Field &in, Field &out){ assert(0); }
|
||||
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
|
||||
void HermOp(const Field &in, Field &out){ EOFA.Meofa(U, in, out); }
|
||||
};
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
void upperBoundEOFA(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA,
|
||||
GridCartesian* FGrid, GridParallelRNG &rng, LatticeGaugeFieldD &latt){
|
||||
std::cout << GridLogMessage << "Starting EOFA upper bound compute" << std::endl;
|
||||
EOFAlinop<FermionImplPolicy> linop(EOFA, latt);
|
||||
typename FermionImplPolicy::FermionField eta(FGrid);
|
||||
gaussian(rng,eta);
|
||||
PowerMethod<typename FermionImplPolicy::FermionField> power_method;
|
||||
auto lambda_max = power_method(linop,eta);
|
||||
std::cout << GridLogMessage << "Upper bound of EOFA operator " << lambda_max << std::endl;
|
||||
}
|
||||
|
||||
//Applications of M^{-1} cost the same as M for EOFA!
|
||||
template<typename FermionImplPolicy>
|
||||
class EOFAinvLinop: public LinearOperatorBase<typename FermionImplPolicy::FermionField>{
|
||||
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA;
|
||||
LatticeGaugeFieldD &U;
|
||||
public:
|
||||
EOFAinvLinop(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA, LatticeGaugeFieldD &U): EOFA(EOFA), U(U){}
|
||||
|
||||
typedef typename FermionImplPolicy::FermionField Field;
|
||||
void OpDiag (const Field &in, Field &out){ assert(0); }
|
||||
void OpDir (const Field &in, Field &out,int dir,int disp){ assert(0); }
|
||||
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); }
|
||||
|
||||
void Op (const Field &in, Field &out){ assert(0); }
|
||||
void AdjOp (const Field &in, Field &out){ assert(0); }
|
||||
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
|
||||
void HermOp(const Field &in, Field &out){ EOFA.MeofaInv(U, in, out); }
|
||||
};
|
||||
|
||||
template<typename FermionImplPolicy>
|
||||
void lowerBoundEOFA(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA,
|
||||
GridCartesian* FGrid, GridParallelRNG &rng, LatticeGaugeFieldD &latt){
|
||||
std::cout << GridLogMessage << "Starting EOFA lower bound compute using power method on M^{-1}. Inverse of highest eigenvalue is the lowest eigenvalue of M" << std::endl;
|
||||
EOFAinvLinop<FermionImplPolicy> linop(EOFA, latt);
|
||||
typename FermionImplPolicy::FermionField eta(FGrid);
|
||||
gaussian(rng,eta);
|
||||
PowerMethod<typename FermionImplPolicy::FermionField> power_method;
|
||||
auto lambda_max = power_method(linop,eta);
|
||||
std::cout << GridLogMessage << "Lower bound of EOFA operator " << 1./lambda_max << std::endl;
|
||||
}
|
||||
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
|
||||
class MixedPrecisionConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
|
||||
public:
|
||||
typedef typename FermionOperatorD::FermionField FieldD;
|
||||
typedef typename FermionOperatorF::FermionField FieldF;
|
||||
|
||||
using OperatorFunction<FieldD>::operator();
|
||||
|
||||
RealD Tolerance;
|
||||
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
|
||||
Integer MaxInnerIterations;
|
||||
Integer MaxOuterIterations;
|
||||
GridBase* SinglePrecGrid4; //Grid for single-precision fields
|
||||
GridBase* SinglePrecGrid5; //Grid for single-precision fields
|
||||
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
|
||||
|
||||
FermionOperatorF &FermOpF;
|
||||
FermionOperatorD &FermOpD;;
|
||||
SchurOperatorF &LinOpF;
|
||||
SchurOperatorD &LinOpD;
|
||||
|
||||
Integer TotalInnerIterations; //Number of inner CG iterations
|
||||
Integer TotalOuterIterations; //Number of restarts
|
||||
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
|
||||
|
||||
MixedPrecisionConjugateGradientOperatorFunction(RealD tol,
|
||||
Integer maxinnerit,
|
||||
Integer maxouterit,
|
||||
GridBase* _sp_grid4,
|
||||
GridBase* _sp_grid5,
|
||||
FermionOperatorF &_FermOpF,
|
||||
FermionOperatorD &_FermOpD,
|
||||
SchurOperatorF &_LinOpF,
|
||||
SchurOperatorD &_LinOpD):
|
||||
LinOpF(_LinOpF),
|
||||
LinOpD(_LinOpD),
|
||||
FermOpF(_FermOpF),
|
||||
FermOpD(_FermOpD),
|
||||
Tolerance(tol),
|
||||
InnerTolerance(tol),
|
||||
MaxInnerIterations(maxinnerit),
|
||||
MaxOuterIterations(maxouterit),
|
||||
SinglePrecGrid4(_sp_grid4),
|
||||
SinglePrecGrid5(_sp_grid5),
|
||||
OuterLoopNormMult(100.)
|
||||
{
|
||||
};
|
||||
|
||||
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
|
||||
|
||||
std::cout << GridLogMessage << " Mixed precision CG wrapper operator() "<<std::endl;
|
||||
|
||||
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
|
||||
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
|
||||
|
||||
precisionChange(FermOpF.Umu, FermOpD.Umu);
|
||||
|
||||
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
|
||||
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// Make a mixed precision conjugate gradient
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
MixedPrecisionConjugateGradient<FieldD,FieldF> MPCG(Tolerance,MaxInnerIterations,MaxOuterIterations,SinglePrecGrid5,LinOpF,LinOpD);
|
||||
MPCG.InnerTolerance = InnerTolerance;
|
||||
std::cout << GridLogMessage << "Calling mixed precision Conjugate Gradient" <<std::endl;
|
||||
MPCG(src,psi);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
|
||||
class MixedPrecisionReliableUpdateConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
|
||||
public:
|
||||
typedef typename FermionOperatorD::FermionField FieldD;
|
||||
typedef typename FermionOperatorF::FermionField FieldF;
|
||||
|
||||
using OperatorFunction<FieldD>::operator();
|
||||
|
||||
RealD Tolerance;
|
||||
Integer MaxIterations;
|
||||
|
||||
RealD Delta; //reliable update parameter
|
||||
|
||||
GridBase* SinglePrecGrid4; //Grid for single-precision fields
|
||||
GridBase* SinglePrecGrid5; //Grid for single-precision fields
|
||||
|
||||
FermionOperatorF &FermOpF;
|
||||
FermionOperatorD &FermOpD;;
|
||||
SchurOperatorF &LinOpF;
|
||||
SchurOperatorD &LinOpD;
|
||||
|
||||
MixedPrecisionReliableUpdateConjugateGradientOperatorFunction(RealD tol,
|
||||
RealD delta,
|
||||
Integer maxit,
|
||||
GridBase* _sp_grid4,
|
||||
GridBase* _sp_grid5,
|
||||
FermionOperatorF &_FermOpF,
|
||||
FermionOperatorD &_FermOpD,
|
||||
SchurOperatorF &_LinOpF,
|
||||
SchurOperatorD &_LinOpD):
|
||||
LinOpF(_LinOpF),
|
||||
LinOpD(_LinOpD),
|
||||
FermOpF(_FermOpF),
|
||||
FermOpD(_FermOpD),
|
||||
Tolerance(tol),
|
||||
Delta(delta),
|
||||
MaxIterations(maxit),
|
||||
SinglePrecGrid4(_sp_grid4),
|
||||
SinglePrecGrid5(_sp_grid5)
|
||||
{
|
||||
};
|
||||
|
||||
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
|
||||
|
||||
std::cout << GridLogMessage << " Mixed precision reliable CG update wrapper operator() "<<std::endl;
|
||||
|
||||
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
|
||||
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
|
||||
|
||||
precisionChange(FermOpF.Umu, FermOpD.Umu);
|
||||
|
||||
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
|
||||
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// Make a mixed precision conjugate gradient
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
ConjugateGradientReliableUpdate<FieldD,FieldF> MPCG(Tolerance,MaxIterations,Delta,SinglePrecGrid5,LinOpF,LinOpD);
|
||||
std::cout << GridLogMessage << "Calling mixed precision reliable update Conjugate Gradient" <<std::endl;
|
||||
MPCG(src,psi);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
// here make a routine to print all the relevant information on the run
|
||||
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
||||
|
||||
std::string param_file = "params.xml";
|
||||
bool file_load_check = false;
|
||||
for(int i=1;i<argc;i++){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--param_file"){
|
||||
assert(i!=argc-1);
|
||||
param_file = argv[i+1];
|
||||
}else if(sarg == "--read_check"){ //check the fields load correctly and pass checksum/plaquette repro
|
||||
file_load_check = true;
|
||||
}
|
||||
}
|
||||
|
||||
//Read the user parameters
|
||||
EvolParameters user_params;
|
||||
|
||||
if(fileExists(param_file)){
|
||||
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
||||
Grid::XmlReader rd(param_file);
|
||||
read(rd, "Params", user_params);
|
||||
}else if(!GlobalSharedMemory::WorldRank){
|
||||
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
||||
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
||||
{
|
||||
Grid::XmlWriter wr(param_file + ".templ");
|
||||
write(wr, "Params", user_params);
|
||||
}
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
//Check the parameters
|
||||
if(user_params.GparityDirs.size() != Nd-1){
|
||||
std::cerr << "Error in input parameters: expect GparityDirs to have size = " << Nd-1 << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
for(int i=0;i<Nd-1;i++)
|
||||
if(user_params.GparityDirs[i] != 0 && user_params.GparityDirs[i] != 1){
|
||||
std::cerr << "Error in input parameters: expect GparityDirs values to be 0 (periodic) or 1 (G-parity)" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
||||
typedef GparityMobiusEOFAFermionD EOFAactionD;
|
||||
typedef GparityMobiusFermionD FermionActionD;
|
||||
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
||||
typedef typename FermionActionD::FermionField FermionFieldD;
|
||||
|
||||
typedef GparityMobiusEOFAFermionF EOFAactionF;
|
||||
typedef GparityMobiusFermionF FermionActionF;
|
||||
typedef typename FermionActionF::Impl_t FermionImplPolicyF;
|
||||
typedef typename FermionActionF::FermionField FermionFieldF;
|
||||
|
||||
typedef GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicyD,FermionImplPolicyF> MixedPrecRHMC;
|
||||
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicyD> DoublePrecRHMC;
|
||||
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
IntegratorParameters MD;
|
||||
typedef ConjugateHMCRunnerD<MinimumNorm2> HMCWrapper; //NB: This is the "Omelyan integrator"
|
||||
typedef HMCWrapper::ImplPolicy GaugeImplPolicy;
|
||||
MD.name = std::string("MinimumNorm2");
|
||||
MD.MDsteps = user_params.Steps;
|
||||
MD.trajL = user_params.TrajectoryLength;
|
||||
|
||||
HMCparameters HMCparams;
|
||||
HMCparams.StartTrajectory = user_params.StartTrajectory;
|
||||
HMCparams.Trajectories = user_params.Trajectories;
|
||||
HMCparams.NoMetropolisUntil= 0;
|
||||
HMCparams.StartingType = user_params.StartingType;
|
||||
HMCparams.MetropolisTest = user_params.MetropolisTest;
|
||||
HMCparams.MD = MD;
|
||||
HMCWrapper TheHMC(HMCparams);
|
||||
|
||||
// Grid from the command line arguments --grid and --mpi
|
||||
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
|
||||
|
||||
CheckpointerParameters CPparams;
|
||||
CPparams.config_prefix = "ckpoint_lat";
|
||||
CPparams.rng_prefix = "ckpoint_rng";
|
||||
CPparams.saveInterval = user_params.SaveInterval;
|
||||
CPparams.format = "IEEE64BIG";
|
||||
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||
|
||||
//Note that checkpointing saves the RNG state so that this initialization is required only for the very first configuration
|
||||
RNGModuleParameters RNGpar;
|
||||
RNGpar.serial_seeds = "1 2 3 4 5";
|
||||
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
typedef PlaquetteMod<GaugeImplPolicy> PlaqObs;
|
||||
TheHMC.Resources.AddObservable<PlaqObs>();
|
||||
//////////////////////////////////////////////
|
||||
|
||||
//aiming for ainv=2.068 me Bob
|
||||
//Estimated a(ml+mres) [48ID] = 0.001048 0.00104
|
||||
// a(mh+mres) [48ID] = 0.028847 0.02805
|
||||
//Estimate Ls=12, b+c=2 mres~0.0003
|
||||
|
||||
const int Ls = 12;
|
||||
Real beta = 1.946;
|
||||
Real light_mass = 0.00074; //0.00104 - mres_approx;
|
||||
Real strange_mass = 0.02775; //0.02805 - mres_approx
|
||||
Real pv_mass = 1.0;
|
||||
RealD M5 = 1.8;
|
||||
RealD mobius_scale = 2.; //b+c
|
||||
|
||||
RealD mob_bmc = 1.0;
|
||||
RealD mob_b = (mobius_scale + mob_bmc)/2.;
|
||||
RealD mob_c = (mobius_scale - mob_bmc)/2.;
|
||||
|
||||
//Setup the Grids
|
||||
auto UGridD = TheHMC.Resources.GetCartesian();
|
||||
auto UrbGridD = TheHMC.Resources.GetRBCartesian();
|
||||
auto FGridD = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridD);
|
||||
auto FrbGridD = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridD);
|
||||
|
||||
GridCartesian* UGridF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexF::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian* UrbGridF = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridF);
|
||||
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridF);
|
||||
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridF);
|
||||
|
||||
ConjugateIwasakiGaugeActionD GaugeAction(beta);
|
||||
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeFieldD Ud(UGridD);
|
||||
LatticeGaugeFieldF Uf(UGridF);
|
||||
|
||||
//Setup the BCs
|
||||
FermionActionD::ImplParams Params;
|
||||
for(int i=0;i<Nd-1;i++) Params.twists[i] = user_params.GparityDirs[i]; //G-parity directions
|
||||
Params.twists[Nd-1] = 1; //APBC in time direction
|
||||
|
||||
std::vector<int> dirs4(Nd);
|
||||
for(int i=0;i<Nd-1;i++) dirs4[i] = user_params.GparityDirs[i];
|
||||
dirs4[Nd-1] = 0; //periodic gauge BC in time
|
||||
|
||||
GaugeImplPolicy::setDirections(dirs4); //gauge BC
|
||||
|
||||
//Run optional gauge field checksum checker and exit
|
||||
if(file_load_check){
|
||||
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Collect actions
|
||||
////////////////////////////////////
|
||||
ActionLevel<HMCWrapper::Field> Level1(1); //light quark + strange quark
|
||||
ActionLevel<HMCWrapper::Field> Level2(4); //DSDR
|
||||
ActionLevel<HMCWrapper::Field> Level3(2); //gauge
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Light EOFA action
|
||||
// have to be careful with the parameters, cf. Test_dwf_gpforce_eofa.cc
|
||||
/////////////////////////////////////////////////////////////
|
||||
typedef SchurDiagMooeeOperator<EOFAactionD,FermionFieldD> EOFAschuropD;
|
||||
typedef SchurDiagMooeeOperator<EOFAactionF,FermionFieldF> EOFAschuropF;
|
||||
typedef ExactOneFlavourRatioMixedPrecHeatbathPseudoFermionAction<FermionImplPolicyD, FermionImplPolicyF> EOFAmixPrecPFaction;
|
||||
typedef MixedPrecisionConjugateGradientOperatorFunction<EOFAactionD, EOFAactionF, EOFAschuropD, EOFAschuropF> EOFA_mxCG;
|
||||
typedef MixedPrecisionReliableUpdateConjugateGradientOperatorFunction<EOFAactionD, EOFAactionF, EOFAschuropD, EOFAschuropF> EOFA_relupCG;
|
||||
|
||||
std::vector<RealD> eofa_light_masses = { light_mass , 0.004, 0.016, 0.064, 0.256 };
|
||||
std::vector<RealD> eofa_pv_masses = { 0.004 , 0.016, 0.064, 0.256, 1.0 };
|
||||
int n_light_hsb = 5;
|
||||
assert(user_params.eofa_l.size() == n_light_hsb);
|
||||
|
||||
EOFAmixPrecPFaction* EOFA_pfactions[n_light_hsb];
|
||||
|
||||
for(int i=0;i<n_light_hsb;i++){
|
||||
RealD iml = eofa_light_masses[i];
|
||||
RealD ipv = eofa_pv_masses[i];
|
||||
|
||||
EOFAactionD* LopD = new EOFAactionD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, iml, iml, ipv, 0.0, -1, M5, mob_b, mob_c, Params);
|
||||
EOFAactionF* LopF = new EOFAactionF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, iml, iml, ipv, 0.0, -1, M5, mob_b, mob_c, Params);
|
||||
EOFAactionD* RopD = new EOFAactionD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, ipv, iml, ipv, -1.0, 1, M5, mob_b, mob_c, Params);
|
||||
EOFAactionF* RopF = new EOFAactionF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, ipv, iml, ipv, -1.0, 1, M5, mob_b, mob_c, Params);
|
||||
|
||||
EOFAschuropD* linopL_D = new EOFAschuropD(*LopD);
|
||||
EOFAschuropD* linopR_D = new EOFAschuropD(*RopD);
|
||||
|
||||
EOFAschuropF* linopL_F = new EOFAschuropF(*LopF);
|
||||
EOFAschuropF* linopR_F = new EOFAschuropF(*RopF);
|
||||
|
||||
#if 1
|
||||
//Note reusing user_params.eofa_l.action(|md)_mixcg_inner_tolerance as Delta for now
|
||||
EOFA_relupCG* ActionMCG_L = new EOFA_relupCG(user_params.eofa_l[i].action_tolerance, user_params.eofa_l[i].action_mixcg_inner_tolerance, 50000, UGridF, FrbGridF, *LopF, *LopD, *linopL_F, *linopL_D);
|
||||
EOFA_relupCG* ActionMCG_R = new EOFA_relupCG(user_params.eofa_l[i].action_tolerance, user_params.eofa_l[i].action_mixcg_inner_tolerance, 50000, UGridF, FrbGridF, *RopF, *RopD, *linopR_F, *linopR_D);
|
||||
|
||||
EOFA_relupCG* DerivMCG_L = new EOFA_relupCG(user_params.eofa_l[i].md_tolerance, user_params.eofa_l[i].md_mixcg_inner_tolerance, 50000, UGridF, FrbGridF, *LopF, *LopD, *linopL_F, *linopL_D);
|
||||
EOFA_relupCG* DerivMCG_R = new EOFA_relupCG(user_params.eofa_l[i].md_tolerance, user_params.eofa_l[i].md_mixcg_inner_tolerance, 50000, UGridF, FrbGridF, *RopF, *RopD, *linopR_F, *linopR_D);
|
||||
|
||||
#else
|
||||
|
||||
EOFA_mxCG* ActionMCG_L = new EOFA_mxCG(user_params.eofa_l[i].action_tolerance, 10000, 1000, UGridF, FrbGridF, *LopF, *LopD, *linopL_F, *linopL_D);
|
||||
ActionMCG_L->InnerTolerance = user_params.eofa_l[i].action_mixcg_inner_tolerance;
|
||||
|
||||
EOFA_mxCG* ActionMCG_R = new EOFA_mxCG(user_params.eofa_l[i].action_tolerance, 10000, 1000, UGridF, FrbGridF, *RopF, *RopD, *linopR_F, *linopR_D);
|
||||
ActionMCG_R->InnerTolerance = user_params.eofa_l[i].action_mixcg_inner_tolerance;
|
||||
|
||||
EOFA_mxCG* DerivMCG_L = new EOFA_mxCG(user_params.eofa_l[i].md_tolerance, 10000, 1000, UGridF, FrbGridF, *LopF, *LopD, *linopL_F, *linopL_D);
|
||||
DerivMCG_L->InnerTolerance = user_params.eofa_l[i].md_mixcg_inner_tolerance;
|
||||
|
||||
EOFA_mxCG* DerivMCG_R = new EOFA_mxCG(user_params.eofa_l[i].md_tolerance, 10000, 1000, UGridF, FrbGridF, *RopF, *RopD, *linopR_F, *linopR_D);
|
||||
DerivMCG_R->InnerTolerance = user_params.eofa_l[i].md_mixcg_inner_tolerance;
|
||||
|
||||
std::cout << GridLogMessage << "Set EOFA action solver action tolerance outer=" << ActionMCG_L->Tolerance << " inner=" << ActionMCG_L->InnerTolerance << std::endl;
|
||||
std::cout << GridLogMessage << "Set EOFA MD solver tolerance outer=" << DerivMCG_L->Tolerance << " inner=" << DerivMCG_L->InnerTolerance << std::endl;
|
||||
#endif
|
||||
|
||||
|
||||
EOFAmixPrecPFaction* EOFA = new EOFAmixPrecPFaction(*LopF, *RopF,
|
||||
*LopD, *RopD,
|
||||
*ActionMCG_L, *ActionMCG_R,
|
||||
*ActionMCG_L, *ActionMCG_R,
|
||||
*DerivMCG_L, *DerivMCG_R,
|
||||
user_params.eofa_l[i].rat_params, true);
|
||||
EOFA_pfactions[i] = EOFA;
|
||||
Level1.push_back(EOFA);
|
||||
}
|
||||
|
||||
////////////////////////////////////
|
||||
// Strange action
|
||||
////////////////////////////////////
|
||||
FermionActionD Numerator_sD(Ud,*FGridD,*FrbGridD,*UGridD,*UrbGridD,strange_mass,M5,mob_b,mob_c,Params);
|
||||
FermionActionD Denominator_sD(Ud,*FGridD,*FrbGridD,*UGridD,*UrbGridD, pv_mass,M5,mob_b,mob_c,Params);
|
||||
|
||||
FermionActionF Numerator_sF(Uf,*FGridF,*FrbGridF,*UGridF,*UrbGridF,strange_mass,M5,mob_b,mob_c,Params);
|
||||
FermionActionF Denominator_sF(Uf,*FGridF,*FrbGridF,*UGridF,*UrbGridF, pv_mass,M5,mob_b,mob_c,Params);
|
||||
|
||||
RationalActionParams rat_act_params_s;
|
||||
rat_act_params_s.inv_pow = 4; // (M^dag M)^{1/4}
|
||||
rat_act_params_s.precision= 60;
|
||||
rat_act_params_s.MaxIter = 10000;
|
||||
user_params.rat_quo_s.Export(rat_act_params_s);
|
||||
std::cout << GridLogMessage << " Heavy quark bounds check every " << rat_act_params_s.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
||||
|
||||
//MixedPrecRHMC Quotient_s(Denominator_sD, Numerator_sD, Denominator_sF, Numerator_sF, rat_act_params_s, user_params.rat_quo_s.reliable_update_freq);
|
||||
DoublePrecRHMC Quotient_s(Denominator_sD, Numerator_sD, rat_act_params_s);
|
||||
Level1.push_back(&Quotient_s);
|
||||
|
||||
///////////////////////////////////
|
||||
// DSDR action
|
||||
///////////////////////////////////
|
||||
RealD dsdr_mass=-1.8;
|
||||
//Use same DSDR twists as https://arxiv.org/pdf/1208.4412.pdf
|
||||
RealD dsdr_epsilon_f = 0.02; //numerator (in determinant)
|
||||
RealD dsdr_epsilon_b = 0.5;
|
||||
GparityWilsonTMFermionD Numerator_DSDR_D(Ud, *UGridD, *UrbGridD, dsdr_mass, dsdr_epsilon_f, Params);
|
||||
GparityWilsonTMFermionF Numerator_DSDR_F(Uf, *UGridF, *UrbGridF, dsdr_mass, dsdr_epsilon_f, Params);
|
||||
|
||||
GparityWilsonTMFermionD Denominator_DSDR_D(Ud, *UGridD, *UrbGridD, dsdr_mass, dsdr_epsilon_b, Params);
|
||||
GparityWilsonTMFermionF Denominator_DSDR_F(Uf, *UGridF, *UrbGridF, dsdr_mass, dsdr_epsilon_b, Params);
|
||||
|
||||
RationalActionParams rat_act_params_DSDR;
|
||||
rat_act_params_DSDR.inv_pow = 2; // (M^dag M)^{1/2}
|
||||
rat_act_params_DSDR.precision= 60;
|
||||
rat_act_params_DSDR.MaxIter = 10000;
|
||||
user_params.rat_quo_DSDR.Export(rat_act_params_DSDR);
|
||||
std::cout << GridLogMessage << "DSDR quark bounds check every " << rat_act_params_DSDR.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
||||
|
||||
DoublePrecRHMC Quotient_DSDR(Denominator_DSDR_D, Numerator_DSDR_D, rat_act_params_DSDR);
|
||||
Level2.push_back(&Quotient_DSDR);
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Gauge action
|
||||
/////////////////////////////////////////////////////////////
|
||||
Level3.push_back(&GaugeAction);
|
||||
|
||||
TheHMC.TheAction.push_back(Level1);
|
||||
TheHMC.TheAction.push_back(Level2);
|
||||
TheHMC.TheAction.push_back(Level3);
|
||||
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
||||
|
||||
|
||||
//Action tuning
|
||||
bool
|
||||
tune_rhmc_s=false, eigenrange_s=false,
|
||||
tune_rhmc_DSDR=false, eigenrange_DSDR=false,
|
||||
check_eofa=false,
|
||||
upper_bound_eofa=false, lower_bound_eofa(false);
|
||||
|
||||
std::string lanc_params_s;
|
||||
std::string lanc_params_DSDR;
|
||||
int tune_rhmc_s_action_or_md;
|
||||
int tune_rhmc_DSDR_action_or_md;
|
||||
int eofa_which_hsb;
|
||||
|
||||
for(int i=1;i<argc;i++){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--tune_rhmc_s"){
|
||||
assert(i < argc-1);
|
||||
tune_rhmc_s=true;
|
||||
tune_rhmc_s_action_or_md = std::stoi(argv[i+1]);
|
||||
}
|
||||
else if(sarg == "--eigenrange_s"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_s=true;
|
||||
lanc_params_s = argv[i+1];
|
||||
}
|
||||
else if(sarg == "--tune_rhmc_DSDR"){
|
||||
assert(i < argc-1);
|
||||
tune_rhmc_DSDR=true;
|
||||
tune_rhmc_DSDR_action_or_md = std::stoi(argv[i+1]);
|
||||
}
|
||||
else if(sarg == "--eigenrange_DSDR"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_DSDR=true;
|
||||
lanc_params_DSDR = argv[i+1];
|
||||
}
|
||||
else if(sarg == "--check_eofa"){
|
||||
assert(i < argc-1);
|
||||
check_eofa = true;
|
||||
eofa_which_hsb = std::stoi(argv[i+1]); //-1 indicates all hasenbusch
|
||||
assert(eofa_which_hsb == -1 || (eofa_which_hsb >= 0 && eofa_which_hsb < n_light_hsb) );
|
||||
}
|
||||
else if(sarg == "--upper_bound_eofa"){
|
||||
assert(i < argc-1);
|
||||
upper_bound_eofa = true;
|
||||
eofa_which_hsb = std::stoi(argv[i+1]);
|
||||
assert(eofa_which_hsb >= 0 && eofa_which_hsb < n_light_hsb);
|
||||
}
|
||||
else if(sarg == "--lower_bound_eofa"){
|
||||
assert(i < argc-1);
|
||||
lower_bound_eofa = true;
|
||||
eofa_which_hsb = std::stoi(argv[i+1]);
|
||||
assert(eofa_which_hsb >= 0 && eofa_which_hsb < n_light_hsb);
|
||||
}
|
||||
}
|
||||
if(tune_rhmc_s || eigenrange_s || tune_rhmc_DSDR || eigenrange_DSDR ||check_eofa || upper_bound_eofa || lower_bound_eofa) {
|
||||
std::cout << GridLogMessage << "Running checks" << std::endl;
|
||||
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
||||
|
||||
//std::cout << GridLogMessage << "EOFA action solver action tolerance outer=" << ActionMCG_L.Tolerance << " inner=" << ActionMCG_L.InnerTolerance << std::endl;
|
||||
//std::cout << GridLogMessage << "EOFA MD solver tolerance outer=" << DerivMCG_L.Tolerance << " inner=" << DerivMCG_L.InnerTolerance << std::endl;
|
||||
|
||||
|
||||
if(check_eofa){
|
||||
if(eofa_which_hsb >= 0){
|
||||
std::cout << GridLogMessage << "Starting checking EOFA Hasenbusch " << eofa_which_hsb << std::endl;
|
||||
checkEOFA(*EOFA_pfactions[eofa_which_hsb], FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
std::cout << GridLogMessage << "Finished checking EOFA Hasenbusch " << eofa_which_hsb << std::endl;
|
||||
}else{
|
||||
for(int i=0;i<n_light_hsb;i++){
|
||||
std::cout << GridLogMessage << "Starting checking EOFA Hasenbusch " << i << std::endl;
|
||||
checkEOFA(*EOFA_pfactions[i], FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
std::cout << GridLogMessage << "Finished checking EOFA Hasenbusch " << i << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(upper_bound_eofa) upperBoundEOFA(*EOFA_pfactions[eofa_which_hsb], FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
if(lower_bound_eofa) lowerBoundEOFA(*EOFA_pfactions[eofa_which_hsb], FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
if(eigenrange_s) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_s, FGridD, FrbGridD, Ud, Numerator_sD, TheHMC.Resources.GetParallelRNG());
|
||||
if(tune_rhmc_s) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_s)>(FGridD, FrbGridD, Ud, Numerator_sD, Denominator_sD, Quotient_s, TheHMC.Resources.GetParallelRNG(), 4, "strange", tune_rhmc_s_action_or_md);
|
||||
if(eigenrange_DSDR) computeEigenvalues<GparityWilsonTMFermionD, GparityWilsonTMFermionD::FermionField>(lanc_params_DSDR, UGridD, UrbGridD, Ud, Numerator_DSDR_D, TheHMC.Resources.GetParallelRNG());
|
||||
if(tune_rhmc_DSDR) checkRHMC<GparityWilsonTMFermionD, GparityWilsonTMFermionD::FermionField, decltype(Quotient_DSDR)>(UGridD, UrbGridD, Ud, Numerator_DSDR_D, Denominator_DSDR_D, Quotient_DSDR, TheHMC.Resources.GetParallelRNG(), 2, "DSDR", tune_rhmc_DSDR_action_or_md);
|
||||
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
//Run the HMC
|
||||
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
||||
TheHMC.Run();
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
} // main
|
265
HMC/Mobius2p1f_DD_RHMC.cc
Normal file
265
HMC/Mobius2p1f_DD_RHMC.cc
Normal file
@ -0,0 +1,265 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/Test_hmc_EODWFRatio.cc
|
||||
|
||||
Copyright (C) 2015-2016
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
using namespace Grid;
|
||||
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
|
||||
// Typedefs to simplify notation
|
||||
typedef WilsonImplR FermionImplPolicy;
|
||||
typedef MobiusFermionR FermionAction;
|
||||
typedef typename FermionAction::FermionField FermionField;
|
||||
|
||||
typedef Grid::XmlReader Serialiser;
|
||||
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
IntegratorParameters MD;
|
||||
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
|
||||
// MD.name = std::string("Leap Frog");
|
||||
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
|
||||
// MD.name = std::string("Force Gradient");
|
||||
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
|
||||
MD.name = std::string("MinimumNorm2");
|
||||
MD.MDsteps = 4;
|
||||
MD.trajL = 1.0;
|
||||
|
||||
HMCparameters HMCparams;
|
||||
HMCparams.StartTrajectory = 17;
|
||||
HMCparams.Trajectories = 200;
|
||||
HMCparams.NoMetropolisUntil= 0;
|
||||
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
|
||||
// HMCparams.StartingType =std::string("ColdStart");
|
||||
HMCparams.StartingType =std::string("CheckpointStart");
|
||||
HMCparams.MD = MD;
|
||||
HMCWrapper TheHMC(HMCparams);
|
||||
|
||||
// Grid from the command line arguments --grid and --mpi
|
||||
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
|
||||
|
||||
CheckpointerParameters CPparams;
|
||||
CPparams.config_prefix = "ckpoint_DDHMC_lat";
|
||||
CPparams.rng_prefix = "ckpoint_DDHMC_rng";
|
||||
CPparams.saveInterval = 1;
|
||||
CPparams.format = "IEEE64BIG";
|
||||
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||
|
||||
RNGModuleParameters RNGpar;
|
||||
RNGpar.serial_seeds = "1 2 3 4 5";
|
||||
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
// Construct observables
|
||||
// here there is too much indirection
|
||||
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
|
||||
TheHMC.Resources.AddObservable<PlaqObs>();
|
||||
//////////////////////////////////////////////
|
||||
|
||||
const int Ls = 16;
|
||||
RealD M5 = 1.8;
|
||||
RealD b = 1.0;
|
||||
RealD c = 0.0;
|
||||
Real beta = 2.13;
|
||||
Real light_mass = 0.01;
|
||||
Real strange_mass = 0.04;
|
||||
Real pv_mass = 1.0;
|
||||
std::vector<Real> hasenbusch({ light_mass, 0.04, 0.25, 0.4, 0.7 , pv_mass });
|
||||
|
||||
// FIXME:
|
||||
// Same in MC and MD
|
||||
// Need to mix precision too
|
||||
OneFlavourRationalParams SFRp;
|
||||
SFRp.lo = 4.0e-3;
|
||||
SFRp.hi = 30.0;
|
||||
SFRp.MaxIter = 10000;
|
||||
SFRp.tolerance= 1.0e-8;
|
||||
SFRp.mdtolerance= 1.0e-6;
|
||||
SFRp.degree = 16;
|
||||
SFRp.precision= 50;
|
||||
SFRp.BoundsCheckFreq=5;
|
||||
|
||||
OneFlavourRationalParams OFRp;
|
||||
OFRp.lo = 1.0e-4;
|
||||
OFRp.hi = 30.0;
|
||||
OFRp.MaxIter = 10000;
|
||||
OFRp.tolerance= 1.0e-8;
|
||||
OFRp.mdtolerance= 1.0e-6;
|
||||
OFRp.degree = 16;
|
||||
OFRp.precision= 50;
|
||||
OFRp.BoundsCheckFreq=5;
|
||||
|
||||
auto GridPtr = TheHMC.Resources.GetCartesian();
|
||||
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Domain decomposed
|
||||
////////////////////////////////////////////////////////////////
|
||||
Coordinate latt4 = GridPtr->GlobalDimensions();
|
||||
Coordinate mpi = GridPtr->ProcessorGrid();
|
||||
Coordinate shm;
|
||||
|
||||
GlobalSharedMemory::GetShmDims(mpi,shm);
|
||||
|
||||
Coordinate CommDim(Nd);
|
||||
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
|
||||
|
||||
Coordinate Dirichlet(Nd+1,0);
|
||||
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0] * shm[0];
|
||||
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1] * shm[1];
|
||||
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2] * shm[2];
|
||||
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3] * shm[3];
|
||||
|
||||
Coordinate Block4(Nd);
|
||||
Block4[0] = Dirichlet[1];
|
||||
Block4[1] = Dirichlet[2];
|
||||
Block4[2] = Dirichlet[3];
|
||||
Block4[3] = Dirichlet[4];
|
||||
int Width=3;
|
||||
TheHMC.Resources.SetMomentumFilter(new DDHMCFilter<WilsonImplR::Field>(Block4,Width));
|
||||
|
||||
//////////////////////////
|
||||
// Fermion Grid
|
||||
//////////////////////////
|
||||
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
|
||||
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
|
||||
|
||||
IwasakiGaugeActionR GaugeAction(beta);
|
||||
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeField U(GridPtr);
|
||||
|
||||
// These lines are unecessary if BC are all periodic
|
||||
std::vector<Complex> boundary = {1,1,1,-1};
|
||||
FermionAction::ImplParams Params(boundary);
|
||||
|
||||
double StoppingCondition = 1e-8;
|
||||
double MaxCGIterations = 30000;
|
||||
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
|
||||
|
||||
////////////////////////////////////
|
||||
// Collect actions
|
||||
////////////////////////////////////
|
||||
ActionLevel<HMCWrapper::Field> Level1(1);
|
||||
ActionLevel<HMCWrapper::Field> Level2(4);
|
||||
ActionLevel<HMCWrapper::Field> Level3(6);
|
||||
|
||||
////////////////////////////////////
|
||||
// Strange action
|
||||
////////////////////////////////////
|
||||
FermionAction StrangeOp (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,strange_mass,M5,b,c, Params);
|
||||
FermionAction StrangePauliVillarsOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,pv_mass, M5,b,c, Params);
|
||||
|
||||
FermionAction StrangeOpDir (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,strange_mass,M5,b,c, Params);
|
||||
FermionAction StrangePauliVillarsOpDir(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,pv_mass, M5,b,c, Params);
|
||||
StrangeOpDir.DirichletBlock(Dirichlet);
|
||||
StrangePauliVillarsOpDir.DirichletBlock(Dirichlet);
|
||||
|
||||
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermionBdy(StrangeOpDir,StrangeOp,SFRp);
|
||||
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermionLocal(StrangePauliVillarsOpDir,StrangeOpDir,SFRp);
|
||||
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermionPVBdy(StrangePauliVillarsOp,StrangePauliVillarsOpDir,SFRp);
|
||||
Level1.push_back(&StrangePseudoFermionBdy);
|
||||
Level2.push_back(&StrangePseudoFermionLocal);
|
||||
Level1.push_back(&StrangePseudoFermionPVBdy);
|
||||
|
||||
////////////////////////////////////
|
||||
// up down action
|
||||
////////////////////////////////////
|
||||
std::vector<Real> light_den;
|
||||
std::vector<Real> light_num;
|
||||
std::vector<int> dirichlet_den;
|
||||
std::vector<int> dirichlet_num;
|
||||
|
||||
int n_hasenbusch = hasenbusch.size();
|
||||
light_den.push_back(light_mass); dirichlet_den.push_back(0);
|
||||
for(int h=0;h<n_hasenbusch;h++){
|
||||
light_den.push_back(hasenbusch[h]); dirichlet_den.push_back(1);
|
||||
}
|
||||
|
||||
for(int h=0;h<n_hasenbusch;h++){
|
||||
light_num.push_back(hasenbusch[h]); dirichlet_num.push_back(1);
|
||||
}
|
||||
light_num.push_back(pv_mass); dirichlet_num.push_back(0);
|
||||
|
||||
std::vector<FermionAction *> Numerators;
|
||||
std::vector<FermionAction *> Denominators;
|
||||
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
|
||||
std::vector<OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> *> Bdys;
|
||||
|
||||
for(int h=0;h<n_hasenbusch+1;h++){
|
||||
std::cout << GridLogMessage
|
||||
<< " 2f quotient Action ";
|
||||
std::cout << "det D("<<light_den[h]<<")";
|
||||
if ( dirichlet_den[h] ) std::cout << "^dirichlet ";
|
||||
std::cout << "/ det D("<<light_num[h]<<")";
|
||||
if ( dirichlet_num[h] ) std::cout << "^dirichlet ";
|
||||
std::cout << std::endl;
|
||||
|
||||
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
|
||||
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
|
||||
if(h!=0) {
|
||||
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],CG,CG));
|
||||
} else {
|
||||
Bdys.push_back( new OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],OFRp));
|
||||
Bdys.push_back( new OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],OFRp));
|
||||
}
|
||||
if ( dirichlet_den[h]==1) Denominators[h]->DirichletBlock(Dirichlet);
|
||||
if ( dirichlet_num[h]==1) Numerators[h]->DirichletBlock(Dirichlet);
|
||||
}
|
||||
|
||||
int nquo=Quotients.size();
|
||||
Level1.push_back(Bdys[0]);
|
||||
Level1.push_back(Bdys[1]);
|
||||
for(int h=0;h<nquo-1;h++){
|
||||
Level2.push_back(Quotients[h]);
|
||||
}
|
||||
Level1.push_back(Quotients[nquo-1]); // PV dirichlet fix on coarse timestep
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Gauge action
|
||||
/////////////////////////////////////////////////////////////
|
||||
Level3.push_back(&GaugeAction);
|
||||
TheHMC.TheAction.push_back(Level1);
|
||||
TheHMC.TheAction.push_back(Level2);
|
||||
TheHMC.TheAction.push_back(Level3);
|
||||
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
||||
TheHMC.ReadCommandLine(argc,argv); // params on CML or from param file
|
||||
TheHMC.Run(); // no smearing
|
||||
|
||||
Grid_finalize();
|
||||
} // main
|
||||
|
||||
|
||||
|
@ -217,9 +217,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu);
|
||||
|
||||
comm_proc = mpi_layout[mu]-1;
|
||||
@ -228,9 +228,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu+4][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu+4][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu+4);
|
||||
|
||||
}
|
||||
@ -309,9 +309,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu);
|
||||
Grid.StencilSendToRecvFromComplete(requests,mu);
|
||||
requests.resize(0);
|
||||
@ -322,9 +322,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu+4][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu+4][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu+4);
|
||||
Grid.StencilSendToRecvFromComplete(requests,mu+4);
|
||||
requests.resize(0);
|
||||
@ -411,8 +411,8 @@ int main (int argc, char ** argv)
|
||||
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
||||
}
|
||||
int tid = omp_get_thread_num();
|
||||
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
|
||||
(void *)&rbuf[dir][0], recv_from_rank, bytes,tid);
|
||||
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,1,
|
||||
(void *)&rbuf[dir][0], recv_from_rank,1, bytes,tid);
|
||||
|
||||
thread_critical { dbytes+=tbytes; }
|
||||
}
|
||||
|
@ -32,18 +32,18 @@
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
|
||||
template<class d>
|
||||
struct scal {
|
||||
d internal;
|
||||
////////////////////////
|
||||
/// Move to domains ////
|
||||
////////////////////////
|
||||
|
||||
Gamma::Algebra Gmu [] = {
|
||||
Gamma::Algebra::GammaX,
|
||||
Gamma::Algebra::GammaY,
|
||||
Gamma::Algebra::GammaZ,
|
||||
Gamma::Algebra::GammaT
|
||||
};
|
||||
|
||||
Gamma::Algebra Gmu [] = {
|
||||
Gamma::Algebra::GammaX,
|
||||
Gamma::Algebra::GammaY,
|
||||
Gamma::Algebra::GammaZ,
|
||||
Gamma::Algebra::GammaT
|
||||
};
|
||||
|
||||
void Benchmark(int Ls, Coordinate Dirichlet);
|
||||
|
||||
int main (int argc, char ** argv)
|
||||
{
|
||||
@ -52,24 +52,82 @@ int main (int argc, char ** argv)
|
||||
|
||||
int threads = GridThread::GetThreads();
|
||||
|
||||
Coordinate latt4 = GridDefaultLatt();
|
||||
int Ls=16;
|
||||
for(int i=0;i<argc;i++)
|
||||
for(int i=0;i<argc;i++) {
|
||||
if(std::string(argv[i]) == "-Ls"){
|
||||
std::stringstream ss(argv[i+1]); ss >> Ls;
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////
|
||||
// With comms
|
||||
//////////////////
|
||||
Coordinate Dirichlet(Nd+1,0);
|
||||
|
||||
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
std::cout << GridLogMessage<< " Testing with full communication " <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
|
||||
Benchmark(Ls,Dirichlet);
|
||||
|
||||
//////////////////
|
||||
// Domain decomposed
|
||||
//////////////////
|
||||
Coordinate latt4 = GridDefaultLatt();
|
||||
Coordinate mpi = GridDefaultMpi();
|
||||
Coordinate CommDim(Nd);
|
||||
Coordinate shm;
|
||||
GlobalSharedMemory::GetShmDims(mpi,shm);
|
||||
|
||||
|
||||
//////////////////////
|
||||
// Node level
|
||||
//////////////////////
|
||||
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
std::cout << GridLogMessage<< " Testing without internode communication " <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
|
||||
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
|
||||
Dirichlet[0] = 0;
|
||||
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0] * shm[0];
|
||||
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1] * shm[1];
|
||||
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2] * shm[2];
|
||||
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3] * shm[3];
|
||||
|
||||
Benchmark(Ls,Dirichlet);
|
||||
|
||||
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
std::cout << GridLogMessage<< " Testing without intranode communication " <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
|
||||
for(int d=0;d<Nd;d++) CommDim[d]= mpi[d]>1 ? 1 : 0;
|
||||
Dirichlet[0] = 0;
|
||||
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0];
|
||||
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1];
|
||||
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2];
|
||||
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3];
|
||||
|
||||
Benchmark(Ls,Dirichlet);
|
||||
|
||||
Grid_finalize();
|
||||
exit(0);
|
||||
}
|
||||
void Benchmark(int Ls, Coordinate Dirichlet)
|
||||
{
|
||||
Coordinate latt4 = GridDefaultLatt();
|
||||
GridLogLayout();
|
||||
|
||||
long unsigned int single_site_flops = 8*Nc*(7+16*Nc);
|
||||
|
||||
|
||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||
|
||||
std::cout << GridLogMessage << "Making s innermost grids"<<std::endl;
|
||||
GridCartesian * sUGrid = SpaceTimeGrid::makeFourDimDWFGrid(GridDefaultLatt(),GridDefaultMpi());
|
||||
GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid);
|
||||
GridCartesian * sFGrid = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid);
|
||||
@ -80,9 +138,9 @@ int main (int argc, char ** argv)
|
||||
|
||||
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
|
||||
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString(std::string("The 4D RNG"));
|
||||
|
||||
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
|
||||
GridParallelRNG RNG5(FGrid); RNG5.SeedUniqueString(std::string("The 5D RNG"));
|
||||
std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
|
||||
|
||||
LatticeFermionF src (FGrid); random(RNG5,src);
|
||||
#if 0
|
||||
@ -100,7 +158,6 @@ int main (int argc, char ** argv)
|
||||
src = src*N2;
|
||||
#endif
|
||||
|
||||
|
||||
LatticeFermionF result(FGrid); result=Zero();
|
||||
LatticeFermionF ref(FGrid); ref=Zero();
|
||||
LatticeFermionF tmp(FGrid);
|
||||
@ -108,29 +165,31 @@ int main (int argc, char ** argv)
|
||||
|
||||
std::cout << GridLogMessage << "Drawing gauge field" << std::endl;
|
||||
LatticeGaugeFieldF Umu(UGrid);
|
||||
LatticeGaugeFieldF UmuCopy(UGrid);
|
||||
SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||
UmuCopy=Umu;
|
||||
std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
|
||||
#if 0
|
||||
Umu=1.0;
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
LatticeColourMatrixF ttmp(UGrid);
|
||||
ttmp = PeekIndex<LorentzIndex>(Umu,mu);
|
||||
// if (mu !=2 ) ttmp = 0;
|
||||
// ttmp = ttmp* pow(10.0,mu);
|
||||
PokeIndex<LorentzIndex>(Umu,ttmp,mu);
|
||||
}
|
||||
std::cout << GridLogMessage << "Forced to diagonal " << std::endl;
|
||||
#endif
|
||||
|
||||
////////////////////////////////////
|
||||
// Apply BCs
|
||||
////////////////////////////////////
|
||||
Coordinate Block(4);
|
||||
for(int d=0;d<4;d++) Block[d]= Dirichlet[d+1];
|
||||
|
||||
std::cout << GridLogMessage << "Applying BCs for Dirichlet Block5 " << Dirichlet << std::endl;
|
||||
std::cout << GridLogMessage << "Applying BCs for Dirichlet Block4 " << Block << std::endl;
|
||||
|
||||
DirichletFilter<LatticeGaugeFieldF> Filter(Block);
|
||||
Filter.applyFilter(Umu);
|
||||
|
||||
////////////////////////////////////
|
||||
// Naive wilson implementation
|
||||
////////////////////////////////////
|
||||
// replicate across fifth dimension
|
||||
// LatticeGaugeFieldF Umu5d(FGrid);
|
||||
std::vector<LatticeColourMatrixF> U(4,UGrid);
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
||||
}
|
||||
|
||||
std::cout << GridLogMessage << "Setting up Cshift based reference " << std::endl;
|
||||
|
||||
if (1)
|
||||
@ -191,11 +250,13 @@ int main (int argc, char ** argv)
|
||||
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
||||
|
||||
DomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||
Dw.DirichletBlock(Dirichlet);
|
||||
Dw.ImportGauge(Umu);
|
||||
|
||||
int ncall =300;
|
||||
|
||||
if (1) {
|
||||
FGrid->Barrier();
|
||||
Dw.ZeroCounters();
|
||||
Dw.Dhop(src,result,0);
|
||||
std::cout<<GridLogMessage<<"Called warmup"<<std::endl;
|
||||
double t0=usecond();
|
||||
@ -220,29 +281,20 @@ int main (int argc, char ** argv)
|
||||
double data_mem = (volume * (2*Nd+1)*Nd*Nc + (volume/Ls) *2*Nd*Nc*Nc) * simdwidth / nsimd * ncall / (1024.*1024.*1024.);
|
||||
|
||||
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
||||
// std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
||||
// std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
||||
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
||||
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
|
||||
std::cout<<GridLogMessage << "RF GiB/s (base 2) = "<< 1000000. * data_rf/((t1-t0))<<std::endl;
|
||||
std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl;
|
||||
// std::cout<<GridLogMessage << "RF GiB/s (base 2) = "<< 1000000. * data_rf/((t1-t0))<<std::endl;
|
||||
// std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl;
|
||||
err = ref-result;
|
||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||
//exit(0);
|
||||
|
||||
if(( norm2(err)>1.0e-4) ) {
|
||||
/*
|
||||
std::cout << "RESULT\n " << result<<std::endl;
|
||||
std::cout << "REF \n " << ref <<std::endl;
|
||||
std::cout << "ERR \n " << err <<std::endl;
|
||||
*/
|
||||
std::cout<<GridLogMessage << "WRONG RESULT" << std::endl;
|
||||
FGrid->Barrier();
|
||||
exit(-1);
|
||||
}
|
||||
assert (norm2(err)< 1.0e-4 );
|
||||
Dw.Report();
|
||||
}
|
||||
|
||||
if (1)
|
||||
@ -286,21 +338,20 @@ int main (int argc, char ** argv)
|
||||
}
|
||||
ref = -0.5*ref;
|
||||
}
|
||||
// dump=1;
|
||||
Dw.Dhop(src,result,1);
|
||||
|
||||
Dw.Dhop(src,result,DaggerYes);
|
||||
|
||||
std::cout << GridLogMessage << "----------------------------------------------------------------" << std::endl;
|
||||
std::cout << GridLogMessage << "Compare to naive wilson implementation Dag to verify correctness" << std::endl;
|
||||
std::cout << GridLogMessage << "----------------------------------------------------------------" << std::endl;
|
||||
|
||||
std::cout<<GridLogMessage << "Called DwDag"<<std::endl;
|
||||
std::cout<<GridLogMessage << "norm dag result "<< norm2(result)<<std::endl;
|
||||
std::cout<<GridLogMessage << "norm dag ref "<< norm2(ref)<<std::endl;
|
||||
err = ref-result;
|
||||
std::cout<<GridLogMessage << "norm dag diff "<< norm2(err)<<std::endl;
|
||||
if((norm2(err)>1.0e-4)){
|
||||
/*
|
||||
std::cout<< "DAG RESULT\n " <<ref << std::endl;
|
||||
std::cout<< "DAG sRESULT\n " <<result << std::endl;
|
||||
std::cout<< "DAG ERR \n " << err <<std::endl;
|
||||
*/
|
||||
}
|
||||
assert((norm2(err)<1.0e-4));
|
||||
|
||||
LatticeFermionF src_e (FrbGrid);
|
||||
LatticeFermionF src_o (FrbGrid);
|
||||
LatticeFermionF r_e (FrbGrid);
|
||||
@ -330,7 +381,6 @@ int main (int argc, char ** argv)
|
||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||
{
|
||||
Dw.ZeroCounters();
|
||||
FGrid->Barrier();
|
||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||
double t0=usecond();
|
||||
@ -352,7 +402,6 @@ int main (int argc, char ** argv)
|
||||
std::cout<<GridLogMessage << "Deo mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||
std::cout<<GridLogMessage << "Deo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl;
|
||||
std::cout<<GridLogMessage << "Deo mflop/s per node "<< flops/(t1-t0)/NN<<std::endl;
|
||||
Dw.Report();
|
||||
}
|
||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||
Dw.DhopOE(src_e,r_o,DaggerNo);
|
||||
@ -367,13 +416,7 @@ int main (int argc, char ** argv)
|
||||
|
||||
err = r_eo-result;
|
||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||
if((norm2(err)>1.0e-4)){
|
||||
/*
|
||||
std::cout<< "Deo RESULT\n " <<r_eo << std::endl;
|
||||
std::cout<< "Deo REF\n " <<result << std::endl;
|
||||
std::cout<< "Deo ERR \n " << err <<std::endl;
|
||||
*/
|
||||
}
|
||||
assert(norm2(err)<1.0e-4);
|
||||
|
||||
pickCheckerboard(Even,src_e,err);
|
||||
pickCheckerboard(Odd,src_o,err);
|
||||
@ -382,6 +425,4 @@ int main (int argc, char ** argv)
|
||||
|
||||
assert(norm2(src_e)<1.0e-4);
|
||||
assert(norm2(src_o)<1.0e-4);
|
||||
Grid_finalize();
|
||||
exit(0);
|
||||
}
|
||||
|
@ -1,27 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
LOG=$1
|
||||
SWEEPS=`grep dH.= $LOG | wc -l`
|
||||
SWEEPS=`expr $SWEEPS - 100`
|
||||
SWEEPS=`grep dH $LOG | wc -l`
|
||||
SWEEPS=`expr $SWEEPS - 80`
|
||||
echo
|
||||
echo $SWEEPS thermalised sweeps
|
||||
echo
|
||||
plaq=`grep Plaq $LOG | tail -n $SWEEPS | awk '{ S=S+$12} END { print S/NR} ' `
|
||||
plaqe=`grep Plaq $LOG | tail -n $SWEEPS | awk '{ S=S+$12 ; SS=SS+$12*$12 } END { print sqrt( (SS/NR - S*S/NR/NR)/NR) } ' `
|
||||
plaq=`grep Plaq $LOG | tail -n $SWEEPS | awk '{ S=S+$10} END { print S/NR} ' `
|
||||
plaqe=`grep Plaq $LOG | tail -n $SWEEPS | awk '{ S=S+$10 ; SS=SS+$10*$10 } END { print sqrt( (SS/NR - S*S/NR/NR)/NR) } ' `
|
||||
echo "Plaquette: $plaq (${plaqe})"
|
||||
echo
|
||||
|
||||
grep Plaq $LOG | tail -n $SWEEPS | awk '{ S=S+$12/20; if(NR%20==0){ print NR/20, " ", S; S=0;} } ' > plaq.binned
|
||||
|
||||
plaq=`cat plaq.binned | awk '{ S=S+$2} END { print S/NR} ' `
|
||||
plaqe=`cat plaq.binned | awk '{ S=S+$2 ; SS=SS+$2*$2 } END { print sqrt( (SS/NR - S*S/NR/NR)/NR) } ' `
|
||||
echo "Binned Plaquette: $plaq (${plaqe})"
|
||||
echo
|
||||
|
||||
dHv=`grep dH.= $LOG | tail -n $SWEEPS | awk '{ S=S+$16 ; SS=SS+$16*$16 } END { print sqrt(SS/NR) } ' `
|
||||
edH=`grep dH.= $LOG | tail -n $SWEEPS | awk '{ S=S+exp(-$16)} END { print S/NR} '`
|
||||
dedH=`grep dH.= $LOG | tail -n $SWEEPS | awk '{ S=S+exp(-$16); SS=SS+exp(-$16)*exp(-$16)} END { print sqrt( (SS/NR - S*S/NR/NR)/NR) } '`
|
||||
echo "<e-dH>: $edH (${dedH})"
|
||||
dHv=`grep dH $LOG | tail -n $SWEEPS | awk '{ S=S+$10 ; SS=SS+$10*$10 } END { print sqrt(SS/NR) } ' `
|
||||
edH=`grep dH $LOG | tail -n $SWEEPS | awk '{ S=S+exp(-$10)} END { print S/NR} '`
|
||||
echo "<e-dH>: $edH"
|
||||
echo "<rms dH>: $dHv"
|
||||
|
||||
TRAJ=`grep Acc $LOG | wc -l`
|
||||
@ -30,13 +22,12 @@ PACC=`expr 100 \* ${ACC} / ${TRAJ} `
|
||||
echo
|
||||
echo "Acceptance $PACC % $ACC / $TRAJ "
|
||||
|
||||
grep Plaq $LOG | awk '{ print $12 }' | uniq > plaq.dat
|
||||
grep dH.= $LOG | awk '{ print $16 }' > dH.dat
|
||||
echo set yrange [0.58:0.60] > plot.gnu
|
||||
grep Plaq $LOG | awk '{ print $10 }' | uniq > plaq.dat
|
||||
grep dH $LOG | awk '{ print $10 }' > dH.dat
|
||||
echo set yrange [-0.2:1.0] > plot.gnu
|
||||
echo set terminal 'pdf' >> plot.gnu
|
||||
echo "f(x) =0.588" >> plot.gnu
|
||||
echo "set output 'plaq.${LOG}.pdf'" >> plot.gnu
|
||||
echo "plot 'plaq.dat' w l, f(x) " >> plot.gnu
|
||||
echo "plot 'plaq.dat' w l, 'dH.dat' w l " >> plot.gnu
|
||||
echo
|
||||
gnuplot plot.gnu >& gnu.errs
|
||||
open plaq.${LOG}.pdf
|
||||
|
26
systems/Crusher/comms.slurm
Normal file
26
systems/Crusher/comms.slurm
Normal file
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
# Begin LSF Directives
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 01:00:00
|
||||
##SBATCH -U openmpThu
|
||||
#SBATCH -p ecp
|
||||
#SBATCH -J comms
|
||||
#SBATCH -o comms.%J
|
||||
#SBATCH -e comms.%J
|
||||
#SBATCH -N 1
|
||||
#SBATCH -n 2
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
AT=8
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
PARAMS=" --accelerator-threads ${AT} --grid 64.64.32.32 --mpi 2.1.1.1 "
|
||||
srun -n2 --label -c$OMP_NUM_THREADS --gpus-per-task=1 ./mpiwrapper.sh ./benchmarks/Benchmark_comms_host_device $PARAMS
|
||||
|
@ -5,6 +5,8 @@
|
||||
--enable-gen-simd-width=64 \
|
||||
--enable-simd=GPU \
|
||||
--disable-fermion-reps \
|
||||
--with-gmp=$OLCF_GMP_ROOT \
|
||||
--with-mpfr=/opt/cray/pe/gcc/mpfr/3.1.4/ \
|
||||
--disable-gparity \
|
||||
CXX=hipcc MPICXX=mpicxx \
|
||||
CXXFLAGS="-fPIC -I/opt/rocm-4.5.0/include/ -std=c++14 -I${MPICH_DIR}/include " \
|
||||
|
@ -3,28 +3,28 @@
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 01:00:00
|
||||
##SBATCH -U openmpThu
|
||||
##SBATCH -p ecp
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
#SBATCH -e DWF.%J
|
||||
#SBATCH -N 1
|
||||
#SBATCH -n 1
|
||||
#SBATCH --exclusive
|
||||
#SBATCH -n 8
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --gpu-bind=map_gpu:0,1,2,3,7,6,5,4
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
#export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export OMP_NUM_THREADS=1
|
||||
|
||||
AT=8
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
|
||||
PARAMS=" --accelerator-threads ${AT} --grid 24.24.24.24 --shm-mpi 0 --mpi 1.1.1.1"
|
||||
|
||||
srun --gpus-per-task 1 -n1 ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
PARAMS=" --accelerator-threads 16 --grid 32.32.32.256 --mpi 1.1.1.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
srun --gpus-per-task 1 -n8 ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
|
||||
|
||||
|
||||
|
@ -6,22 +6,43 @@
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
#SBATCH -e DWF.%J
|
||||
#SBATCH -N 1
|
||||
#SBATCH -n 8
|
||||
#SBATCH -N 8
|
||||
#SBATCH -n 64
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --gpu-bind=map_gpu:0,1,2,3,7,6,5,4
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
export OMP_NUM_THREADS=1
|
||||
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
PARAMS=" --accelerator-threads 8 --grid 32.64.64.64 --mpi 1.2.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
|
||||
srun --gpus-per-task 1 -n8 ./mpiwrapper.sh ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.256 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.256.8node
|
||||
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 4.4.4.1 --comms-overlap --shm 2048 --shm-mpi 1"
|
||||
echo $PARAMS
|
||||
srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.32.8node
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 4.4.4.1 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.32.8node.shm0
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 1"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_ITT $PARAMS > itt.8node
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_ITT $PARAMS > itt.8node_shm0
|
||||
|
||||
|
||||
|
||||
|
@ -1,10 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
lrank=$SLURM_LOCALID
|
||||
lgpu=(0 1 2 3 7 6 5 4)
|
||||
|
||||
export ROCR_VISIBLE_DEVICES=$SLURM_LOCALID
|
||||
export ROCR_VISIBLE_DEVICES=${lgpu[$lrank]}
|
||||
|
||||
echo "`hostname` - $lrank device=$ROCR_VISIBLE_DEVICES binding=$BINDING"
|
||||
echo "`hostname` - $lrank device=$ROCR_VISIBLE_DEVICES "
|
||||
|
||||
$*
|
||||
|
||||
|
@ -3,3 +3,4 @@ module load rocm/4.5.0
|
||||
module load gmp
|
||||
module load cray-fftw
|
||||
module load craype-accel-amd-gfx90a
|
||||
export LD_LIBRARY_PATH=/opt/gcc/mpfr/3.1.4/lib:$LD_LIBRARY_PATH
|
||||
|
@ -6,6 +6,8 @@
|
||||
--enable-simd=GPU \
|
||||
--disable-fermion-reps \
|
||||
--disable-gparity \
|
||||
--with-gmp=$OLCF_GMP_ROOT \
|
||||
--with-mpfr=/opt/cray/pe/gcc/mpfr/3.1.4/ \
|
||||
CXX=hipcc MPICXX=mpicxx \
|
||||
CXXFLAGS="-fPIC -I/opt/rocm-4.3.0/include/ -std=c++14 -I${MPICH_DIR}/include " \
|
||||
--prefix=/ccs/home/chulwoo/Grid \
|
||||
|
@ -1,8 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Begin LSF Directives
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 01:00:00
|
||||
##SBATCH -U openmpThu
|
||||
#SBATCH -t 3:00:00
|
||||
#SBATCH -p ecp
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
@ -14,13 +13,12 @@ DIR=.
|
||||
module list
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
AT=8
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
PARAMS=" --accelerator-threads ${AT} --grid 32.64.64.64 --mpi 1.2.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
srun -n8 --label -c$OMP_NUM_THREADS --gpus-per-task=1 ./mpiwrapper.sh ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
PARAMS=" --accelerator-threads ${AT} --grid 16.16.16.48 --mpi 1.2.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
srun -N2 -n8 --label -c$OMP_NUM_THREADS --gpus-per-task=1 ./mpiwrapper.sh ./HMC/Mobius2p1f_DD_RHMC $PARAMS
|
||||
|
||||
|
@ -1,5 +1,9 @@
|
||||
module load emacs
|
||||
module load PrgEnv-gnu
|
||||
module load rocm/4.3.0
|
||||
module load rocm/4.5.0
|
||||
module load gmp
|
||||
module load cray-fftw
|
||||
module load craype-accel-amd-gfx908
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
export LD_LIBRARY_PATH=/opt/cray/pe/gcc/mpfr/3.1.4/lib/:$LD_LIBRARY_PATH
|
||||
|
@ -1,25 +1,25 @@
|
||||
tu-c0r0n00 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n00 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n09 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n00 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n06 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n06 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n09 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n09 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n03 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n06 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n09 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r0n00 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r0n03 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n03 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n06 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r0n03 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n00 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n00 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n00 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n00 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n06 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n06 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n06 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n06 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n03 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n03 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n03 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n03 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n09 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n09 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n09 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n09 - 3 device=3 binding=--interleave=6,7
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
@ -33,11 +33,41 @@ AcceleratorCudaInit[0]: pciBusID: 3
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
@ -50,43 +80,25 @@ AcceleratorCudaInit[0]: pciBusID: 3
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
local rank 1 device 0 bus id: 0000:44:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
local rank 0 device 0 bus id: 0000:03:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
local rank 0 device 0 bus id: 0000:03:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
AcceleratorCudaInit: ================================================
|
||||
local rank 2 device 0 bus id: 0000:84:00.0
|
||||
SharedMemoryMpi: World communicator of size 16
|
||||
SharedMemoryMpi: Node communicator of size 4
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x7fcd80000000 for comms buffers
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x153960000000 for comms buffers
|
||||
Setting up IPC
|
||||
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
@ -116,7 +128,7 @@ This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
Current Grid git commit hash=9d2238148c56e3fbadfa95dcabf2b83d4bde14cd: (HEAD -> develop) uncommited changes
|
||||
Current Grid git commit hash=da06d15f73184ceb15d66d4e7e702b02fed7b940: (HEAD -> feature/dirichlet, develop) uncommited changes
|
||||
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : MPI is initialised and logging filters activated
|
||||
@ -124,122 +136,102 @@ Grid : Message : ================================================
|
||||
Grid : Message : Requested 2147483648 byte stencil comms buffers
|
||||
Grid : Message : MemoryManager Cache 34004218675 bytes
|
||||
Grid : Message : MemoryManager::Init() setting up
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 32 LARGE 8
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 8 LARGE 2
|
||||
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
|
||||
Grid : Message : MemoryManager::Init() Using cudaMalloc
|
||||
Grid : Message : 1.198523 s : Grid Layout
|
||||
Grid : Message : 1.198530 s : Global lattice size : 64 64 64 64
|
||||
Grid : Message : 1.198534 s : OpenMP threads : 4
|
||||
Grid : Message : 1.198535 s : MPI tasks : 2 2 2 2
|
||||
Grid : Message : 1.397615 s : Making s innermost grids
|
||||
Grid : Message : 1.441828 s : Initialising 4d RNG
|
||||
Grid : Message : 1.547973 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 1.547998 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 1.954777 s : Initialising 5d RNG
|
||||
Grid : Message : 3.633825 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 3.633869 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 12.162710 s : Initialised RNGs
|
||||
Grid : Message : 15.882520 s : Drawing gauge field
|
||||
Grid : Message : 15.816362 s : Random gauge initialised
|
||||
Grid : Message : 17.279671 s : Setting up Cshift based reference
|
||||
Grid : Message : 26.331426 s : *****************************************************************
|
||||
Grid : Message : 26.331452 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 26.331454 s : *****************************************************************
|
||||
Grid : Message : 26.331456 s : *****************************************************************
|
||||
Grid : Message : 26.331458 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 26.331459 s : * Vectorising space-time by 8
|
||||
Grid : Message : 26.331463 s : * VComplexF size is 64 B
|
||||
Grid : Message : 26.331465 s : * SINGLE precision
|
||||
Grid : Message : 26.331467 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 26.331468 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 26.331469 s : *****************************************************************
|
||||
Grid : Message : 28.413717 s : Called warmup
|
||||
Grid : Message : 56.418423 s : Called Dw 3000 times in 2.80047e+07 us
|
||||
Grid : Message : 56.418476 s : mflop/s = 3.79581e+07
|
||||
Grid : Message : 56.418479 s : mflop/s per rank = 2.37238e+06
|
||||
Grid : Message : 56.418481 s : mflop/s per node = 9.48953e+06
|
||||
Grid : Message : 56.418483 s : RF GiB/s (base 2) = 77130
|
||||
Grid : Message : 56.418485 s : mem GiB/s (base 2) = 48206.3
|
||||
Grid : Message : 56.422076 s : norm diff 1.03481e-13
|
||||
Grid : Message : 56.456894 s : #### Dhop calls report
|
||||
Grid : Message : 56.456899 s : WilsonFermion5D Number of DhopEO Calls : 6002
|
||||
Grid : Message : 56.456903 s : WilsonFermion5D TotalTime /Calls : 4710.93 us
|
||||
Grid : Message : 56.456905 s : WilsonFermion5D CommTime /Calls : 3196.15 us
|
||||
Grid : Message : 56.456908 s : WilsonFermion5D FaceTime /Calls : 494.392 us
|
||||
Grid : Message : 56.456910 s : WilsonFermion5D ComputeTime1/Calls : 44.4107 us
|
||||
Grid : Message : 56.456912 s : WilsonFermion5D ComputeTime2/Calls : 1037.75 us
|
||||
Grid : Message : 56.456921 s : Average mflops/s per call : 3.55691e+09
|
||||
Grid : Message : 56.456925 s : Average mflops/s per call per rank : 2.22307e+08
|
||||
Grid : Message : 56.456928 s : Average mflops/s per call per node : 8.89228e+08
|
||||
Grid : Message : 56.456930 s : Average mflops/s per call (full) : 3.82915e+07
|
||||
Grid : Message : 56.456933 s : Average mflops/s per call per rank (full): 2.39322e+06
|
||||
Grid : Message : 56.456952 s : Average mflops/s per call per node (full): 9.57287e+06
|
||||
Grid : Message : 56.456954 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 56.457016 s : Stencil calls 3001
|
||||
Grid : Message : 56.457022 s : Stencil halogtime 0
|
||||
Grid : Message : 56.457024 s : Stencil gathertime 55.9154
|
||||
Grid : Message : 56.457026 s : Stencil gathermtime 20.1073
|
||||
Grid : Message : 56.457028 s : Stencil mergetime 18.5585
|
||||
Grid : Message : 56.457030 s : Stencil decompresstime 0.0639787
|
||||
Grid : Message : 56.457032 s : Stencil comms_bytes 4.02653e+08
|
||||
Grid : Message : 56.457034 s : Stencil commtime 6379.93
|
||||
Grid : Message : 56.457036 s : Stencil 63.1124 GB/s per rank
|
||||
Grid : Message : 56.457038 s : Stencil 252.45 GB/s per node
|
||||
Grid : Message : 56.457040 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 56.457048 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 56.457062 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 56.457065 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 56.457066 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 79.259261 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 79.259287 s : Called DwDag
|
||||
Grid : Message : 79.259288 s : norm dag result 12.0421
|
||||
Grid : Message : 79.271740 s : norm dag ref 12.0421
|
||||
Grid : Message : 79.287759 s : norm dag diff 7.63236e-14
|
||||
Grid : Message : 79.328100 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
||||
Grid : Message : 79.955951 s : src_e0.499997
|
||||
Grid : Message : 80.633620 s : src_o0.500003
|
||||
Grid : Message : 80.164163 s : *********************************************************
|
||||
Grid : Message : 80.164168 s : * Benchmarking DomainWallFermionF::DhopEO
|
||||
Grid : Message : 80.164170 s : * Vectorising space-time by 8
|
||||
Grid : Message : 80.164172 s : * SINGLE precision
|
||||
Grid : Message : 80.164174 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 80.164177 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 80.164178 s : *********************************************************
|
||||
Grid : Message : 93.797635 s : Deo mflop/s = 3.93231e+07
|
||||
Grid : Message : 93.797670 s : Deo mflop/s per rank 2.45769e+06
|
||||
Grid : Message : 93.797672 s : Deo mflop/s per node 9.83077e+06
|
||||
Grid : Message : 93.797674 s : #### Dhop calls report
|
||||
Grid : Message : 93.797675 s : WilsonFermion5D Number of DhopEO Calls : 3001
|
||||
Grid : Message : 93.797677 s : WilsonFermion5D TotalTime /Calls : 4542.83 us
|
||||
Grid : Message : 93.797679 s : WilsonFermion5D CommTime /Calls : 2978.97 us
|
||||
Grid : Message : 93.797681 s : WilsonFermion5D FaceTime /Calls : 602.287 us
|
||||
Grid : Message : 93.797683 s : WilsonFermion5D ComputeTime1/Calls : 67.1416 us
|
||||
Grid : Message : 93.797685 s : WilsonFermion5D ComputeTime2/Calls : 1004.07 us
|
||||
Grid : Message : 93.797713 s : Average mflops/s per call : 3.30731e+09
|
||||
Grid : Message : 93.797717 s : Average mflops/s per call per rank : 2.06707e+08
|
||||
Grid : Message : 93.797719 s : Average mflops/s per call per node : 8.26827e+08
|
||||
Grid : Message : 93.797721 s : Average mflops/s per call (full) : 3.97084e+07
|
||||
Grid : Message : 93.797727 s : Average mflops/s per call per rank (full): 2.48178e+06
|
||||
Grid : Message : 93.797732 s : Average mflops/s per call per node (full): 9.92711e+06
|
||||
Grid : Message : 93.797735 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 93.797746 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 93.797758 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 93.797769 s : Stencil calls 3001
|
||||
Grid : Message : 93.797773 s : Stencil halogtime 0
|
||||
Grid : Message : 93.797776 s : Stencil gathertime 56.7458
|
||||
Grid : Message : 93.797780 s : Stencil gathermtime 22.6504
|
||||
Grid : Message : 93.797782 s : Stencil mergetime 21.1913
|
||||
Grid : Message : 93.797786 s : Stencil decompresstime 0.0556481
|
||||
Grid : Message : 93.797788 s : Stencil comms_bytes 2.01327e+08
|
||||
Grid : Message : 93.797791 s : Stencil commtime 2989.33
|
||||
Grid : Message : 93.797795 s : Stencil 67.3484 GB/s per rank
|
||||
Grid : Message : 93.797798 s : Stencil 269.394 GB/s per node
|
||||
Grid : Message : 93.797801 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 93.797803 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 93.797805 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 93.873429 s : r_e6.02111
|
||||
Grid : Message : 93.879931 s : r_o6.02102
|
||||
Grid : Message : 93.885912 s : res12.0421
|
||||
Grid : Message : 94.876555 s : norm diff 0
|
||||
Grid : Message : 95.485643 s : norm diff even 0
|
||||
Grid : Message : 95.581236 s : norm diff odd 0
|
||||
Grid : Message : 1.875883 s : Grid Layout
|
||||
Grid : Message : 1.875893 s : Global lattice size : 64 64 64 64
|
||||
Grid : Message : 1.875897 s : OpenMP threads : 4
|
||||
Grid : Message : 1.875898 s : MPI tasks : 2 2 2 2
|
||||
Grid : Message : 1.993571 s : Initialising 4d RNG
|
||||
Grid : Message : 2.881990 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 2.882370 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 2.495044 s : Initialising 5d RNG
|
||||
Grid : Message : 4.120900 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 4.121350 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 15.268010 s : Drawing gauge field
|
||||
Grid : Message : 16.234025 s : Random gauge initialised
|
||||
Grid : Message : 16.234057 s : Applying BCs
|
||||
Grid : Message : 16.365565 s : Setting up Cshift based reference
|
||||
Grid : Message : 44.512418 s : *****************************************************************
|
||||
Grid : Message : 44.512448 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 44.512450 s : *****************************************************************
|
||||
Grid : Message : 44.512451 s : *****************************************************************
|
||||
Grid : Message : 44.512452 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 44.512453 s : * Vectorising space-time by 8
|
||||
Grid : Message : 44.512454 s : * VComplexF size is 64 B
|
||||
Grid : Message : 44.512456 s : * SINGLE precision
|
||||
Grid : Message : 44.512459 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 44.512460 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 44.512461 s : *****************************************************************
|
||||
Grid : Message : 46.389070 s : Called warmup
|
||||
Grid : Message : 49.211265 s : Called Dw 300 times in 2.82203e+06 us
|
||||
Grid : Message : 49.211295 s : mflop/s = 3.76681e+07
|
||||
Grid : Message : 49.211297 s : mflop/s per rank = 2.35425e+06
|
||||
Grid : Message : 49.211299 s : mflop/s per node = 9.41702e+06
|
||||
Grid : Message : 49.211301 s : RF GiB/s (base 2) = 76540.6
|
||||
Grid : Message : 49.211308 s : mem GiB/s (base 2) = 47837.9
|
||||
Grid : Message : 49.214868 s : norm diff 1.06409e-13
|
||||
Grid : Message : 92.647781 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 92.647816 s : Called DwDag
|
||||
Grid : Message : 92.647817 s : norm dag result 12.0421
|
||||
Grid : Message : 92.801806 s : norm dag ref 12.0421
|
||||
Grid : Message : 92.817724 s : norm dag diff 7.21921e-14
|
||||
Grid : Message : 92.858973 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
||||
Grid : Message : 93.210378 s : src_e0.499997
|
||||
Grid : Message : 93.583286 s : src_o0.500003
|
||||
Grid : Message : 93.682468 s : *********************************************************
|
||||
Grid : Message : 93.682471 s : * Benchmarking DomainWallFermionF::DhopEO
|
||||
Grid : Message : 93.682472 s : * Vectorising space-time by 8
|
||||
Grid : Message : 93.682473 s : * SINGLE precision
|
||||
Grid : Message : 93.682475 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 93.682476 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 93.682477 s : *********************************************************
|
||||
Grid : Message : 95.162342 s : Deo mflop/s = 3.92487e+07
|
||||
Grid : Message : 95.162387 s : Deo mflop/s per rank 2.45305e+06
|
||||
Grid : Message : 95.162389 s : Deo mflop/s per node 9.81219e+06
|
||||
Grid : Message : 95.232801 s : r_e6.02111
|
||||
Grid : Message : 95.240061 s : r_o6.02102
|
||||
Grid : Message : 95.245975 s : res12.0421
|
||||
Grid : Message : 95.833402 s : norm diff 0
|
||||
Grid : Message : 96.573829 s : norm diff even 0
|
||||
Grid : Message : 96.868272 s : norm diff odd 0
|
||||
Dirichlet block [0 64 64 32 32]
|
||||
Grid : Message : 97.756909 s : Grid Layout
|
||||
Grid : Message : 97.756911 s : Global lattice size : 64 64 64 64
|
||||
Grid : Message : 97.756921 s : OpenMP threads : 4
|
||||
Grid : Message : 97.756922 s : MPI tasks : 2 2 2 2
|
||||
Grid : Message : 97.897085 s : Initialising 4d RNG
|
||||
Grid : Message : 97.965061 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 97.965097 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 98.367431 s : Initialising 5d RNG
|
||||
Grid : Message : 99.752745 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 99.752790 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 111.290148 s : Drawing gauge field
|
||||
Grid : Message : 112.349289 s : Random gauge initialised
|
||||
Grid : Message : 112.349320 s : Applying BCs
|
||||
Grid : Message : 113.948740 s : Setting up Cshift based reference
|
||||
Grid : Message : 140.320415 s : *****************************************************************
|
||||
Grid : Message : 140.320443 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 140.320444 s : *****************************************************************
|
||||
Grid : Message : 140.320445 s : *****************************************************************
|
||||
Grid : Message : 140.320446 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 140.320447 s : * Vectorising space-time by 8
|
||||
Grid : Message : 140.320448 s : * VComplexF size is 64 B
|
||||
Grid : Message : 140.320450 s : * SINGLE precision
|
||||
Grid : Message : 140.320451 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 140.320452 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 140.320453 s : *****************************************************************
|
||||
Grid : Message : 142.296150 s : Called warmup
|
||||
Grid : Message : 144.397678 s : Called Dw 300 times in 2.36719e+06 us
|
||||
Grid : Message : 144.397700 s : mflop/s = 4.49058e+07
|
||||
Grid : Message : 144.397702 s : mflop/s per rank = 2.80661e+06
|
||||
Grid : Message : 144.397704 s : mflop/s per node = 1.12265e+07
|
||||
Grid : Message : 144.397706 s : RF GiB/s (base 2) = 91247.6
|
||||
Grid : Message : 144.397708 s : mem GiB/s (base 2) = 57029.7
|
||||
Grid : Message : 144.401269 s : norm diff 9.78944e-14
|
||||
Grid : Message : 186.885460 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 186.885492 s : Called DwDag
|
||||
Grid : Message : 186.885493 s : norm dag result 10.4157
|
||||
Grid : Message : 186.897154 s : norm dag ref 11.2266
|
||||
Grid : Message : 186.912538 s : norm dag diff 0.484633
|
||||
|
@ -1,14 +1,13 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -J dslash
|
||||
#SBATCH -A tc002
|
||||
#SBATCH -t 2:20:00
|
||||
#SBATCH --nodelist=tu-c0r0n[00,03,06,09]
|
||||
#SBATCH -A dp207
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --nodes=4
|
||||
#SBATCH --ntasks=16
|
||||
#SBATCH --qos=standard
|
||||
#SBATCH --ntasks-per-node=4
|
||||
#SBATCH --cpus-per-task=8
|
||||
#SBATCH --time=12:00:00
|
||||
#SBATCH --time=0:05:00
|
||||
#SBATCH --partition=gpu
|
||||
#SBATCH --gres=gpu:4
|
||||
#SBATCH --output=%x.%j.out
|
||||
|
1
systems/mac-arm/config-command-mpi
Normal file
1
systems/mac-arm/config-command-mpi
Normal file
@ -0,0 +1 @@
|
||||
CXX=mpicxx-openmpi-mp CXXFLAGS=-I/opt/local/include/ LDFLAGS=-L/opt/local/lib/ ../../configure --enable-simd=GEN --enable-debug --enable-comms=mpi
|
@ -1,184 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/IO/Test_field_array_io.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
|
||||
//This test demonstrates and checks a single-file write of an arbitrary array of fields
|
||||
|
||||
uint64_t writeHeader(const uint32_t size, const uint32_t checksum, const std::string &format, const std::string &file){
|
||||
std::ofstream fout(file,std::ios::out|std::ios::in);
|
||||
fout.seekp(0,std::ios::beg);
|
||||
fout << std::setw(10) << size << std::endl;
|
||||
fout << std::hex << std::setw(10) << checksum << std::endl;
|
||||
fout << format << std::endl;
|
||||
return fout.tellp();
|
||||
}
|
||||
|
||||
uint64_t readHeader(uint32_t &size, uint32_t &checksum, std::string &format, const std::string &file){
|
||||
std::ifstream fin(file);
|
||||
std::string line;
|
||||
getline(fin,line);
|
||||
{
|
||||
std::stringstream ss; ss <<line ; ss >> size;
|
||||
}
|
||||
getline(fin,line);
|
||||
{
|
||||
std::stringstream ss; ss <<line ; ss >> std::hex >> checksum;
|
||||
}
|
||||
getline(fin,format);
|
||||
removeWhitespace(format);
|
||||
|
||||
return fin.tellg();
|
||||
}
|
||||
|
||||
template<typename FieldType>
|
||||
void writeFieldArray(const std::string &file, const std::vector<FieldType> &data){
|
||||
typedef typename FieldType::vector_object vobj;
|
||||
typedef typename FieldType::scalar_object sobj;
|
||||
GridBase* grid = data[0].Grid(); //assume all fields have the same Grid
|
||||
BinarySimpleMunger<sobj, sobj> munge; //straight copy
|
||||
|
||||
//We need a 2-pass header write, first to establish the size, the second pass writes the checksum
|
||||
std::string format = getFormatString<typename FieldType::vector_object>();
|
||||
|
||||
uint64_t offset; //leave 64 bits for header
|
||||
if ( grid->IsBoss() ) {
|
||||
NerscIO::truncate(file);
|
||||
offset = writeHeader(data.size(), 0, format, file);
|
||||
}
|
||||
grid->Broadcast(0,(void *)&offset,sizeof(offset)); //use as a barrier
|
||||
|
||||
std::cout << "Data offset write " << offset << std::endl;
|
||||
std::cout << "Data size write " << data.size() << std::endl;
|
||||
uint64_t field_size = uint64_t(grid->gSites()) * sizeof(sobj);
|
||||
std::cout << "Field size = " << field_size << " B" << std::endl;
|
||||
|
||||
uint32_t checksum = 0;
|
||||
for(int i=0;i<data.size();i++){
|
||||
std::cout << "Data field write " << i << " offset " << offset << std::endl;
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
BinaryIO::writeLatticeObject<vobj,sobj>(const_cast<FieldType &>(data[i]),file,munge,offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
offset += field_size;
|
||||
checksum ^= nersc_csum + 0x9e3779b9 + (checksum<<6) + (checksum>>2);
|
||||
}
|
||||
std::cout << "Write checksum " << checksum << std::endl;
|
||||
|
||||
if ( grid->IsBoss() ) {
|
||||
writeHeader(data.size(), checksum, format, file);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename FieldType>
|
||||
void readFieldArray(std::vector<FieldType> &data, const std::string &file){
|
||||
typedef typename FieldType::vector_object vobj;
|
||||
typedef typename FieldType::scalar_object sobj;
|
||||
assert(data.size() > 0);
|
||||
GridBase* grid = data[0].Grid(); //assume all fields have the same Grid
|
||||
BinarySimpleUnmunger<sobj, sobj> munge; //straight copy
|
||||
|
||||
uint32_t hdr_checksum, hdr_size;
|
||||
std::string format;
|
||||
uint64_t offset = readHeader(hdr_size, hdr_checksum, format, file);
|
||||
|
||||
std::cout << "Data offset read " << offset << std::endl;
|
||||
std::cout << "Data size read " << hdr_size << std::endl;
|
||||
assert(data.size() == hdr_size);
|
||||
|
||||
uint64_t field_size = uint64_t(grid->gSites()) * sizeof(sobj);
|
||||
|
||||
uint32_t checksum = 0;
|
||||
|
||||
for(int i=0;i<data.size();i++){
|
||||
std::cout << "Data field read " << i << " offset " << offset << std::endl;
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
BinaryIO::readLatticeObject<vobj,sobj>(data[i],file,munge,offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
offset += field_size;
|
||||
checksum ^= nersc_csum + 0x9e3779b9 + (checksum<<6) + (checksum>>2);
|
||||
}
|
||||
|
||||
std::cout << "Header checksum " << hdr_checksum << std::endl;
|
||||
std::cout << "Read checksum " << checksum << std::endl;
|
||||
|
||||
|
||||
assert( hdr_checksum == checksum );
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
int main (int argc, char ** argv)
|
||||
{
|
||||
Grid_init(&argc,&argv);
|
||||
|
||||
Coordinate latt = GridDefaultLatt();
|
||||
Coordinate simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||
Coordinate mpi_layout = GridDefaultMpi();
|
||||
|
||||
const int Ls=8;
|
||||
|
||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(latt, simd_layout, mpi_layout);
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||
|
||||
std::vector<int> seeds4({1,2,3,4});
|
||||
std::vector<int> seeds5({5,6,7,8});
|
||||
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
|
||||
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
||||
|
||||
typedef DomainWallFermionD::FermionField FermionField;
|
||||
|
||||
int nfield = 20;
|
||||
std::vector<FermionField> data(nfield, FGrid);
|
||||
|
||||
for(int i=0;i<data.size();i++)
|
||||
gaussian(RNG5, data[i]);
|
||||
|
||||
std::string file = "test_field_array_io.0";
|
||||
writeFieldArray(file, data);
|
||||
|
||||
std::vector<FermionField> data_r(nfield, FGrid);
|
||||
readFieldArray(data_r, file);
|
||||
|
||||
for(int i=0;i<nfield;i++){
|
||||
FermionField diff = data_r[i] - data[i];
|
||||
RealD norm_diff = norm2(diff);
|
||||
std::cout << "Norm2 of difference between stored and loaded data index " << i << " : " << norm_diff << std::endl;
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
|
||||
Grid_finalize();
|
||||
}
|
@ -299,12 +299,12 @@ int main (int argc, char ** argv)
|
||||
SpinColourVectorD ferm; gaussian(sRNG,ferm);
|
||||
pokeSite(ferm,src,point);
|
||||
|
||||
const int Ls=64;
|
||||
const int Ls=32;
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,&GRID);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,&GRID);
|
||||
|
||||
RealD mass=1.0;
|
||||
RealD M5 =0.99;
|
||||
RealD mass=0.01;
|
||||
RealD M5 =0.8;
|
||||
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,GRID,RBGRID,mass,M5);
|
||||
|
||||
// Momentum space prop
|
||||
@ -353,12 +353,6 @@ int main (int argc, char ** argv)
|
||||
std::cout << " Taking difference" <<std::endl;
|
||||
std::cout << "Ddwf result4 "<<norm2(result4)<<std::endl;
|
||||
std::cout << "Ddwf ref "<<norm2(ref)<<std::endl;
|
||||
auto twopoint = localInnerProduct(result4,result4);
|
||||
std::vector<TComplex> pion_prop;
|
||||
sliceSum(twopoint,pion_prop,Nd-1);
|
||||
for(int t=0;t<pion_prop.size();t++){
|
||||
std::cout << "Pion_prop["<<t<<"]="<<pion_prop[t]<<std::endl;
|
||||
}
|
||||
|
||||
diff = ref - result4;
|
||||
std::cout << "result - ref "<<norm2(diff)<<std::endl;
|
||||
@ -389,7 +383,7 @@ int main (int argc, char ** argv)
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,&GRID);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,&GRID);
|
||||
|
||||
RealD mass=1.0;
|
||||
RealD mass=0.01;
|
||||
RealD M5 =0.8;
|
||||
|
||||
OverlapWilsonCayleyTanhFermionD Dov(Umu,*FGrid,*FrbGrid,GRID,RBGRID,mass,M5,1.0);
|
||||
|
@ -29,10 +29,14 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace Grid;
|
||||
;
|
||||
|
||||
template<typename Gimpl>
|
||||
void run(double alpha, bool do_fft_gfix){
|
||||
int main (int argc, char ** argv)
|
||||
{
|
||||
std::vector<int> seeds({1,2,3,4});
|
||||
|
||||
Grid_init(&argc,&argv);
|
||||
|
||||
int threads = GridThread::GetThreads();
|
||||
|
||||
Coordinate latt_size = GridDefaultLatt();
|
||||
@ -51,7 +55,10 @@ void run(double alpha, bool do_fft_gfix){
|
||||
FFT theFFT(&GRID);
|
||||
|
||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||
std::cout<<GridLogMessage << "Using alpha=" << alpha << std::endl;
|
||||
|
||||
std::cout<< "*****************************************************************" <<std::endl;
|
||||
std::cout<< "* Testing we can gauge fix steep descent a RGT of Unit gauge *" <<std::endl;
|
||||
std::cout<< "*****************************************************************" <<std::endl;
|
||||
|
||||
// int coulomb_dir = -1;
|
||||
int coulomb_dir = Nd-1;
|
||||
@ -65,165 +72,81 @@ void run(double alpha, bool do_fft_gfix){
|
||||
LatticeColourMatrix xform1(&GRID); // Gauge xform
|
||||
LatticeColourMatrix xform2(&GRID); // Gauge xform
|
||||
LatticeColourMatrix xform3(&GRID); // Gauge xform
|
||||
|
||||
//#########################################################################################
|
||||
|
||||
std::cout<< "*********************************************************************************************************" <<std::endl;
|
||||
std::cout<< "* Testing steepest descent fixing to Landau gauge with randomly transformed unit gauge configuration *" <<std::endl;
|
||||
std::cout<< "*********************************************************************************************************" <<std::endl;
|
||||
|
||||
SU<Nc>::ColdConfiguration(pRNG,Umu); // Unit gauge
|
||||
Uorg=Umu;
|
||||
|
||||
Real init_plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
|
||||
std::cout << " Initial plaquette "<< init_plaq << std::endl;
|
||||
|
||||
//Apply a random gauge transformation to the unit gauge config
|
||||
Urnd=Umu;
|
||||
SU<Nc>::RandomGaugeTransform<Gimpl>(pRNG,Urnd,g);
|
||||
|
||||
//Gauge fix the randomly transformed field
|
||||
SU<Nc>::RandomGaugeTransform(pRNG,Urnd,g); // Unit gauge
|
||||
|
||||
Real plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
|
||||
std::cout << " Initial plaquette "<<plaq << std::endl;
|
||||
|
||||
Real alpha=0.1;
|
||||
|
||||
Umu = Urnd;
|
||||
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,xform1,alpha,10000,1.0e-12, 1.0e-12,false);
|
||||
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(Umu,xform1,alpha,10000,1.0e-12, 1.0e-12,false);
|
||||
|
||||
// Check the gauge xform matrices
|
||||
Utmp=Urnd;
|
||||
SU<Nc>::GaugeTransform<Gimpl>(Utmp,xform1);
|
||||
SU<Nc>::GaugeTransform(Utmp,xform1);
|
||||
Utmp = Utmp - Umu;
|
||||
std::cout << " Check the output gauge transformation matrices applied to the original field produce the xformed field "<< norm2(Utmp) << " (expect 0)" << std::endl;
|
||||
std::cout << " Norm Difference of xformed gauge "<< norm2(Utmp) << std::endl;
|
||||
|
||||
|
||||
Real plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
|
||||
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
|
||||
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
|
||||
std::cout << " Final plaquette "<<plaq << std::endl;
|
||||
|
||||
Uorg = Uorg - Umu;
|
||||
std::cout << " Norm difference between a unit gauge configuration and the gauge fixed configuration "<< norm2(Uorg) << " (expect 0)" << std::endl;
|
||||
std::cout << " Norm of gauge fixed configuration "<< norm2(Umu) << std::endl;
|
||||
|
||||
//#########################################################################################
|
||||
if(do_fft_gfix){
|
||||
std::cout<< "*************************************************************************************" <<std::endl;
|
||||
std::cout<< "* Testing Fourier accelerated fixing to Landau gauge with unit gauge configuration *" <<std::endl;
|
||||
std::cout<< "*************************************************************************************" <<std::endl;
|
||||
Umu=Urnd;
|
||||
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,xform2,alpha,10000,1.0e-12, 1.0e-12,true);
|
||||
|
||||
Utmp=Urnd;
|
||||
SU<Nc>::GaugeTransform<Gimpl>(Utmp,xform2);
|
||||
Utmp = Utmp - Umu;
|
||||
std::cout << " Check the output gauge transformation matrices applied to the original field produce the xformed field "<< norm2(Utmp) << " (expect 0)" << std::endl;
|
||||
std::cout << " Norm Difference "<< norm2(Uorg) << std::endl;
|
||||
std::cout << " Norm "<< norm2(Umu) << std::endl;
|
||||
|
||||
|
||||
plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
|
||||
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
|
||||
}
|
||||
//#########################################################################################
|
||||
std::cout<< "*****************************************************************" <<std::endl;
|
||||
std::cout<< "* Testing Fourier accelerated fixing *" <<std::endl;
|
||||
std::cout<< "*****************************************************************" <<std::endl;
|
||||
Umu=Urnd;
|
||||
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(Umu,xform2,alpha,10000,1.0e-12, 1.0e-12,true);
|
||||
|
||||
std::cout<< "******************************************************************************************" <<std::endl;
|
||||
std::cout<< "* Testing steepest descent fixing to Landau gauge with random configuration **" <<std::endl;
|
||||
std::cout<< "******************************************************************************************" <<std::endl;
|
||||
Utmp=Urnd;
|
||||
SU<Nc>::GaugeTransform(Utmp,xform2);
|
||||
Utmp = Utmp - Umu;
|
||||
std::cout << " Norm Difference of xformed gauge "<< norm2(Utmp) << std::endl;
|
||||
|
||||
SU<Nc>::HotConfiguration(pRNG,Umu);
|
||||
|
||||
init_plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
|
||||
std::cout << " Initial plaquette "<< init_plaq << std::endl;
|
||||
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
|
||||
std::cout << " Final plaquette "<<plaq << std::endl;
|
||||
|
||||
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,alpha,10000,1.0e-12, 1.0e-12,false);
|
||||
std::cout<< "*****************************************************************" <<std::endl;
|
||||
std::cout<< "* Testing non-unit configuration *" <<std::endl;
|
||||
std::cout<< "*****************************************************************" <<std::endl;
|
||||
|
||||
plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
|
||||
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
|
||||
SU<Nc>::HotConfiguration(pRNG,Umu); // Unit gauge
|
||||
|
||||
//#########################################################################################
|
||||
if(do_fft_gfix){
|
||||
std::cout<< "******************************************************************************************" <<std::endl;
|
||||
std::cout<< "* Testing Fourier accelerated fixing to Landau gauge with random configuration **" <<std::endl;
|
||||
std::cout<< "******************************************************************************************" <<std::endl;
|
||||
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
|
||||
std::cout << " Initial plaquette "<<plaq << std::endl;
|
||||
|
||||
SU<Nc>::HotConfiguration(pRNG,Umu);
|
||||
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(Umu,alpha,10000,1.0e-12, 1.0e-12,true);
|
||||
|
||||
init_plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
|
||||
std::cout << " Initial plaquette "<< init_plaq << std::endl;
|
||||
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
|
||||
std::cout << " Final plaquette "<<plaq << std::endl;
|
||||
|
||||
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,alpha,10000,1.0e-12, 1.0e-12,true);
|
||||
|
||||
plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
|
||||
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
|
||||
}
|
||||
//#########################################################################################
|
||||
|
||||
std::cout<< "*******************************************************************************************" <<std::endl;
|
||||
std::cout<< "* Testing steepest descent fixing to coulomb gauge with random configuration *" <<std::endl;
|
||||
std::cout<< "*******************************************************************************************" <<std::endl;
|
||||
std::cout<< "*****************************************************************" <<std::endl;
|
||||
std::cout<< "* Testing Fourier accelerated fixing to coulomb gauge *" <<std::endl;
|
||||
std::cout<< "*****************************************************************" <<std::endl;
|
||||
|
||||
Umu=Urnd;
|
||||
SU<Nc>::HotConfiguration(pRNG,Umu);
|
||||
SU<Nc>::HotConfiguration(pRNG,Umu); // Unit gauge
|
||||
|
||||
init_plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
|
||||
std::cout << " Initial plaquette "<< init_plaq << std::endl;
|
||||
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
|
||||
std::cout << " Initial plaquette "<<plaq << std::endl;
|
||||
|
||||
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,xform3,alpha,10000,1.0e-12, 1.0e-12,false,coulomb_dir);
|
||||
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(Umu,xform3,alpha,10000,1.0e-12, 1.0e-12,true,coulomb_dir);
|
||||
|
||||
plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
|
||||
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
|
||||
std::cout << Umu<<std::endl;
|
||||
|
||||
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
|
||||
std::cout << " Final plaquette "<<plaq << std::endl;
|
||||
|
||||
//#########################################################################################
|
||||
if(do_fft_gfix){
|
||||
std::cout<< "*******************************************************************************************" <<std::endl;
|
||||
std::cout<< "* Testing Fourier accelerated fixing to coulomb gauge with random configuration *" <<std::endl;
|
||||
std::cout<< "*******************************************************************************************" <<std::endl;
|
||||
|
||||
Umu=Urnd;
|
||||
SU<Nc>::HotConfiguration(pRNG,Umu);
|
||||
|
||||
init_plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
|
||||
std::cout << " Initial plaquette "<< init_plaq << std::endl;
|
||||
|
||||
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,xform3,alpha,10000,1.0e-12, 1.0e-12,true,coulomb_dir);
|
||||
|
||||
plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
|
||||
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
int main (int argc, char ** argv)
|
||||
{
|
||||
Grid_init(&argc,&argv);
|
||||
|
||||
double alpha=0.1; //step size
|
||||
std::string gimpl = "periodic";
|
||||
bool do_fft_gfix = true; //test fourier transformed gfix as well as steepest descent
|
||||
for(int i=1;i<argc;i++){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--gimpl"){
|
||||
assert(i<argc-1 && "--gimpl option requires an argument");
|
||||
gimpl = argv[i+1];
|
||||
if(gimpl != "periodic" && gimpl != "conjugate")
|
||||
assert(0 && "Invalid gimpl");
|
||||
}else if(sarg == "--no-fft-gfix"){
|
||||
std::cout << "Not doing the Fourier accelerated gauge fixing tests" << std::endl;
|
||||
do_fft_gfix = false;
|
||||
}else if(sarg == "--alpha"){
|
||||
assert(i<argc-1 && "--alpha option requires an argument");
|
||||
std::istringstream ss(argv[i+1]); ss >> alpha;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if(gimpl == "periodic"){
|
||||
std::cout << GridLogMessage << "Using periodic boundary condition" << std::endl;
|
||||
run<PeriodicGimplR>(alpha, do_fft_gfix);
|
||||
}else{
|
||||
std::vector<int> conjdirs = {1,1,0,0}; //test with 2 conjugate dirs and 2 not
|
||||
std::cout << GridLogMessage << "Using complex conjugate boundary conditions in dimensions ";
|
||||
for(int i=0;i<Nd;i++)
|
||||
if(conjdirs[i])
|
||||
std::cout << i << " ";
|
||||
std::cout << std::endl;
|
||||
|
||||
ConjugateGimplR::setDirections(conjdirs);
|
||||
run<ConjugateGimplR>(alpha, do_fft_gfix);
|
||||
}
|
||||
|
||||
Grid_finalize();
|
||||
}
|
||||
|
@ -228,59 +228,6 @@ void checkGammaL(const Gamma::Algebra a, GridSerialRNG &rng)
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
void checkChargeConjMatrix(){
|
||||
//Check the properties of the charge conjugation matrix
|
||||
//In the Grid basis C = -\gamma^2 \gamma^4
|
||||
SpinMatrix C = testAlgebra[Gamma::Algebra::MinusGammaY] * testAlgebra[Gamma::Algebra::GammaT];
|
||||
SpinMatrix mC = -C;
|
||||
SpinMatrix one = testAlgebra[Gamma::Algebra::Identity];
|
||||
|
||||
std::cout << "Testing properties of charge conjugation matrix C = -\\gamma^2 \\gamma^4 (in Grid's basis)" << std::endl;
|
||||
|
||||
//C^T = -C
|
||||
SpinMatrix Ct = transpose(C);
|
||||
std::cout << GridLogMessage << "C^T=-C ";
|
||||
test(Ct, mC);
|
||||
std::cout << std::endl;
|
||||
|
||||
//C^\dagger = -C
|
||||
SpinMatrix Cdag = adj(C);
|
||||
std::cout << GridLogMessage << "C^dag=-C ";
|
||||
test(Cdag, mC);
|
||||
std::cout << std::endl;
|
||||
|
||||
//C^* = C
|
||||
SpinMatrix Cstar = conjugate(C);
|
||||
std::cout << GridLogMessage << "C^*=C ";
|
||||
test(Cstar, C);
|
||||
std::cout << std::endl;
|
||||
|
||||
//C^{-1} = -C
|
||||
SpinMatrix CinvC = mC * C;
|
||||
std::cout << GridLogMessage << "C^{-1}=-C ";
|
||||
test(CinvC, one);
|
||||
std::cout << std::endl;
|
||||
|
||||
// C^{-1} \gamma^\mu C = -[\gamma^\mu]^T
|
||||
Gamma::Algebra gmu_a[4] = { Gamma::Algebra::GammaX, Gamma::Algebra::GammaY, Gamma::Algebra::GammaZ, Gamma::Algebra::GammaT };
|
||||
for(int mu=0;mu<4;mu++){
|
||||
SpinMatrix gmu = testAlgebra[gmu_a[mu]];
|
||||
SpinMatrix Cinv_gmu_C = mC * gmu * C;
|
||||
SpinMatrix mgmu_T = -transpose(gmu);
|
||||
std::cout << GridLogMessage << "C^{-1} \\gamma^" << mu << " C = -[\\gamma^" << mu << "]^T ";
|
||||
test(Cinv_gmu_C, mgmu_T);
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
//[C, \gamma^5] = 0
|
||||
SpinMatrix Cg5 = C * testAlgebra[Gamma::Algebra::Gamma5];
|
||||
SpinMatrix g5C = testAlgebra[Gamma::Algebra::Gamma5] * C;
|
||||
std::cout << GridLogMessage << "C \\gamma^5 = \\gamma^5 C";
|
||||
test(Cg5, g5C);
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
Grid_init(&argc,&argv);
|
||||
@ -323,13 +270,6 @@ int main(int argc, char *argv[])
|
||||
{
|
||||
checkGammaL(i, sRNG);
|
||||
}
|
||||
|
||||
std::cout << GridLogMessage << "======== Charge conjugation matrix check" << std::endl;
|
||||
checkChargeConjMatrix();
|
||||
std::cout << GridLogMessage << std::endl;
|
||||
|
||||
|
||||
|
||||
|
||||
Grid_finalize();
|
||||
|
||||
|
@ -1,114 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/core/Test_precision_change.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace Grid;
|
||||
|
||||
|
||||
int main (int argc, char ** argv){
|
||||
Grid_init(&argc, &argv);
|
||||
int Ls = 16;
|
||||
std::cout << GridLogMessage << "Lattice dimensions: " << GridDefaultLatt() << " and Ls=" << Ls << std::endl;
|
||||
GridCartesian* UGrid_d = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexD::Nsimd()), GridDefaultMpi());
|
||||
GridCartesian* FGrid_d = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid_d);
|
||||
GridRedBlackCartesian* FrbGrid_d = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid_d);
|
||||
|
||||
GridCartesian* UGrid_f = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexF::Nsimd()), GridDefaultMpi());
|
||||
GridCartesian* FGrid_f = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid_f);
|
||||
GridRedBlackCartesian* FrbGrid_f = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid_f);
|
||||
|
||||
|
||||
std::vector<int> seeds4({1, 2, 3, 4});
|
||||
std::vector<int> seeds5({5, 6, 7, 8});
|
||||
GridParallelRNG RNG5(FGrid_d);
|
||||
RNG5.SeedFixedIntegers(seeds5);
|
||||
GridParallelRNG RNG4(UGrid_d);
|
||||
RNG4.SeedFixedIntegers(seeds4);
|
||||
|
||||
//Gauge fields
|
||||
LatticeGaugeFieldD Umu_d(UGrid_d);
|
||||
LatticeGaugeFieldF Umu_f(UGrid_f);
|
||||
LatticeGaugeFieldD Umu_d_r(UGrid_d);
|
||||
LatticeGaugeFieldD Utmp_d(UGrid_d);
|
||||
|
||||
for(int i=0;i<5;i++){
|
||||
random(RNG4, Umu_d);
|
||||
|
||||
precisionChange(Umu_f, Umu_d);
|
||||
std::cout << GridLogMessage << "Norm of double-prec and single-prec gauge fields (should be ~equal): " << norm2(Umu_d) << " " << norm2(Umu_f) << std::endl;
|
||||
precisionChange(Umu_d_r, Umu_f);
|
||||
RealD normdiff = axpy_norm(Utmp_d, -1.0, Umu_d_r, Umu_d);
|
||||
std::cout << GridLogMessage << "Norm of difference of back-converted double-prec gauge fields (should be ~0) = " << normdiff << std::endl;
|
||||
}
|
||||
|
||||
//Fermion fields
|
||||
LatticeFermionD psi_d(FGrid_d);
|
||||
LatticeFermionF psi_f(FGrid_f);
|
||||
LatticeFermionD psi_d_r(FGrid_d);
|
||||
LatticeFermionD psi_tmp_d(FGrid_d);
|
||||
|
||||
for(int i=0;i<5;i++){
|
||||
random(RNG5, psi_d);
|
||||
|
||||
precisionChange(psi_f, psi_d);
|
||||
std::cout << GridLogMessage << "Norm of double-prec and single-prec fermion fields (should be ~equal): " << norm2(psi_d) << " " << norm2(psi_f) << std::endl;
|
||||
precisionChange(psi_d_r, psi_f);
|
||||
RealD normdiff = axpy_norm(psi_tmp_d, -1.0, psi_d_r, psi_d);
|
||||
std::cout << GridLogMessage << "Norm of difference of back-converted double-prec fermion fields (should be ~0)= " << normdiff << std::endl;
|
||||
}
|
||||
|
||||
//Checkerboarded fermion fields
|
||||
LatticeFermionD psi_cb_d(FrbGrid_d);
|
||||
LatticeFermionF psi_cb_f(FrbGrid_f);
|
||||
LatticeFermionD psi_cb_d_r(FrbGrid_d);
|
||||
LatticeFermionD psi_cb_tmp_d(FrbGrid_d);
|
||||
|
||||
for(int i=0;i<5;i++){
|
||||
random(RNG5, psi_d);
|
||||
pickCheckerboard(Odd, psi_cb_d, psi_d);
|
||||
|
||||
precisionChange(psi_cb_f, psi_cb_d);
|
||||
std::cout << GridLogMessage << "Norm of odd-cb double-prec and single-prec fermion fields (should be ~equal): " << norm2(psi_cb_d) << " " << norm2(psi_cb_f) << std::endl;
|
||||
precisionChange(psi_cb_d_r, psi_cb_f);
|
||||
RealD normdiff = axpy_norm(psi_cb_tmp_d, -1.0, psi_cb_d_r, psi_cb_d);
|
||||
std::cout << GridLogMessage << "Norm of difference of back-converted odd-cb double-prec fermion fields (should be ~0)= " << normdiff << std::endl;
|
||||
|
||||
|
||||
pickCheckerboard(Even, psi_cb_d, psi_d);
|
||||
|
||||
precisionChange(psi_cb_f, psi_cb_d);
|
||||
std::cout << GridLogMessage << "Norm of even-cb double-prec and single-prec fermion fields (should be ~equal): " << norm2(psi_cb_d) << " " << norm2(psi_cb_f) << std::endl;
|
||||
precisionChange(psi_cb_d_r, psi_cb_f);
|
||||
normdiff = axpy_norm(psi_cb_tmp_d, -1.0, psi_cb_d_r, psi_cb_d);
|
||||
std::cout << GridLogMessage << "Norm of difference of back-converted even-cb double-prec fermion fields (should be ~0)= " << normdiff << std::endl;
|
||||
}
|
||||
|
||||
|
||||
|
||||
Grid_finalize();
|
||||
}
|
@ -93,28 +93,16 @@ int main (int argc, char ** argv)
|
||||
////////////////////////////////////
|
||||
// Modify the gauge field a little
|
||||
////////////////////////////////////
|
||||
RealD dt = 0.01;
|
||||
RealD dt = 0.0001;
|
||||
|
||||
LatticeColourMatrix zz(UGrid); zz=Zero();
|
||||
LatticeColourMatrix mommu(UGrid);
|
||||
LatticeColourMatrix forcemu(UGrid);
|
||||
LatticeGaugeField mom(UGrid);
|
||||
LatticeGaugeField Uprime(UGrid);
|
||||
|
||||
const int Lnu=latt_size[nu];
|
||||
Lattice<iScalar<vInteger> > coor(UGrid);
|
||||
LatticeCoordinate(coor,nu);
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
|
||||
// Traceless antihermitian momentum; gaussian in lie alg
|
||||
SU<Nc>::GaussianFundamentalLieAlgebraMatrix(RNG4, mommu);
|
||||
if(0){
|
||||
if(mu==nu){
|
||||
mommu=where(coor==Lnu-1,mommu,zz);
|
||||
} else {
|
||||
mommu=Zero();
|
||||
}
|
||||
}
|
||||
SU<Nc>::GaussianFundamentalLieAlgebraMatrix(RNG4, mommu); // Traceless antihermitian momentum; gaussian in lie alg
|
||||
|
||||
PokeIndex<LorentzIndex>(mom,mommu,mu);
|
||||
|
||||
@ -139,12 +127,6 @@ int main (int argc, char ** argv)
|
||||
|
||||
ComplexD Sprime = innerProduct(MphiPrime ,MphiPrime);
|
||||
|
||||
|
||||
LatticeComplex lip(FGrid); lip=localInnerProduct(Mphi,Mphi);
|
||||
LatticeComplex lipp(FGrid); lipp=localInnerProduct(MphiPrime,MphiPrime);
|
||||
LatticeComplex dip(FGrid); dip = lipp - lip;
|
||||
std::cout << " dip "<<dip<<std::endl;
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Use derivative to estimate dS
|
||||
//////////////////////////////////////////////
|
||||
|
@ -1,446 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./forces/Test_gpdwf_force_1f_2f.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
|
||||
//Here we test the G-parity action and force between the 1f (doubled-lattice) and 2f approaches
|
||||
|
||||
|
||||
void copyConjGauge(LatticeGaugeFieldD &Umu_1f, const LatticeGaugeFieldD &Umu_2f, const int nu){
|
||||
GridBase* UGrid_2f = Umu_2f.Grid();
|
||||
GridBase* UGrid_1f = Umu_1f.Grid();
|
||||
|
||||
Replicate(Umu_2f,Umu_1f);
|
||||
|
||||
int L_2f = UGrid_2f->FullDimensions()[nu];
|
||||
int L_1f = UGrid_1f->FullDimensions()[nu];
|
||||
assert(L_1f == 2 * L_2f);
|
||||
|
||||
//Coordinate grid for reference
|
||||
LatticeInteger xcoor_1f(UGrid_1f);
|
||||
LatticeCoordinate(xcoor_1f,nu);
|
||||
|
||||
//Copy-conjugate the gauge field
|
||||
//First C-shift the lattice by Lx/2
|
||||
{
|
||||
LatticeGaugeField Umu_shift = conjugate( Cshift(Umu_1f,nu,L_2f) );
|
||||
Umu_1f = where( xcoor_1f >= Integer(L_2f), Umu_shift, Umu_1f );
|
||||
|
||||
//We use the in built APBC
|
||||
//Make the gauge field antiperiodic in nu-direction
|
||||
//decltype(PeekIndex<LorentzIndex>(Umu_1f,nu)) Unu(UGrid_1f);
|
||||
//Unu = PeekIndex<LorentzIndex>(Umu_1f,nu);
|
||||
//Unu = where(xcoor_1f == Integer(2*L_2f-1), -Unu, Unu);
|
||||
//PokeIndex<LorentzIndex>(Umu_1f,Unu,nu);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename FermionField2f, typename FermionField1f>
|
||||
void convertFermion1f_from_2f(FermionField1f &out_1f, const FermionField2f &in_2f, const int nu, bool is_4d){
|
||||
GridBase* FGrid_1f = out_1f.Grid();
|
||||
GridBase* FGrid_2f = in_2f.Grid();
|
||||
|
||||
int nuoff = is_4d ? 0 : 1; //s in 0 direction
|
||||
|
||||
int L_2f = FGrid_2f->FullDimensions()[nu+nuoff];
|
||||
int L_1f = FGrid_1f->FullDimensions()[nu+nuoff];
|
||||
assert(L_1f == 2 * L_2f);
|
||||
|
||||
auto in_f0_2fgrid = PeekIndex<GparityFlavourIndex>(in_2f,0); //flavor 0 on 2f Grid
|
||||
FermionField1f in_f0_1fgrid(FGrid_1f);
|
||||
Replicate(in_f0_2fgrid, in_f0_1fgrid); //has flavor 0 on both halves
|
||||
|
||||
auto in_f1_2fgrid = PeekIndex<GparityFlavourIndex>(in_2f,1); //flavor 1 on 2f Grid
|
||||
FermionField1f in_f1_1fgrid(FGrid_1f);
|
||||
Replicate(in_f1_2fgrid, in_f1_1fgrid); //has flavor 1 on both halves
|
||||
|
||||
LatticeInteger xcoor_1f(FGrid_1f);
|
||||
LatticeCoordinate(xcoor_1f,nu+nuoff);
|
||||
|
||||
out_1f = where(xcoor_1f < L_2f, in_f0_1fgrid, in_f1_1fgrid);
|
||||
}
|
||||
|
||||
template<typename GparityAction, typename StandardAction>
|
||||
class RatioActionSetupBase{
|
||||
protected:
|
||||
TwoFlavourEvenOddRatioPseudoFermionAction<WilsonImplD> *pf_1f;
|
||||
TwoFlavourEvenOddRatioPseudoFermionAction<GparityWilsonImplD> *pf_2f;
|
||||
|
||||
GparityAction* action_2f;
|
||||
GparityAction* action_PV_2f;
|
||||
StandardAction* action_1f;
|
||||
StandardAction* action_PV_1f;
|
||||
|
||||
ConjugateGradient<typename StandardAction::FermionField> CG_1f;
|
||||
ConjugateGradient<typename GparityAction::FermionField> CG_2f;
|
||||
|
||||
RatioActionSetupBase(): CG_1f(1.0e-8,10000), CG_2f(1.0e-8,10000){}
|
||||
|
||||
void setupPseudofermion(){
|
||||
pf_1f = new TwoFlavourEvenOddRatioPseudoFermionAction<WilsonImplD>(*action_PV_1f, *action_1f, CG_1f, CG_1f);
|
||||
pf_2f = new TwoFlavourEvenOddRatioPseudoFermionAction<GparityWilsonImplD>(*action_PV_2f, *action_2f, CG_2f, CG_2f);
|
||||
}
|
||||
|
||||
public:
|
||||
GparityAction & action2f(){ return *action_2f; }
|
||||
StandardAction & action1f(){ return *action_1f; }
|
||||
|
||||
void refreshAction(LatticeGaugeField &Umu_2f, typename GparityAction::FermionField &eta_2f,
|
||||
LatticeGaugeField &Umu_1f, typename StandardAction::FermionField &eta_1f){
|
||||
pf_1f->refresh(Umu_1f, eta_1f);
|
||||
pf_2f->refresh(Umu_2f, eta_2f);
|
||||
|
||||
//Compare PhiOdd
|
||||
RealD norm_1f = norm2(pf_1f->getPhiOdd());
|
||||
RealD norm_2f = norm2(pf_2f->getPhiOdd());
|
||||
|
||||
std::cout << "Test PhiOdd 2f: " << norm_2f << " 1f: " << norm_1f << std::endl;
|
||||
}
|
||||
|
||||
void computeAction(RealD &S_2f, RealD &S_1f, LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f){
|
||||
S_1f = pf_1f->S(Umu_1f);
|
||||
S_2f = pf_2f->S(Umu_2f);
|
||||
}
|
||||
|
||||
void computeDeriv(LatticeGaugeField &deriv_2f, LatticeGaugeField &deriv_1f, LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f){
|
||||
pf_1f->deriv(Umu_1f, deriv_1f);
|
||||
pf_2f->deriv(Umu_2f, deriv_2f);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
template<typename GparityAction, typename StandardAction>
|
||||
struct setupAction{};
|
||||
|
||||
template<>
|
||||
struct setupAction<GparityWilsonTMFermionD, WilsonTMFermionD>: public RatioActionSetupBase<GparityWilsonTMFermionD, WilsonTMFermionD>{
|
||||
typedef GparityWilsonTMFermionD GparityAction;
|
||||
typedef WilsonTMFermionD StandardAction;
|
||||
|
||||
setupAction(GridCartesian* UGrid_2f, GridRedBlackCartesian* UrbGrid_2f, GridCartesian* FGrid_2f, GridRedBlackCartesian* FrbGrid_2f,
|
||||
GridCartesian* UGrid_1f, GridRedBlackCartesian* UrbGrid_1f, GridCartesian* FGrid_1f, GridRedBlackCartesian* FrbGrid_1f,
|
||||
LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f, int nu): RatioActionSetupBase(){
|
||||
RealD mass=-1.8;
|
||||
//Use same DSDR twists as https://arxiv.org/pdf/1208.4412.pdf
|
||||
RealD epsilon_f = 0.02; //numerator (in determinant)
|
||||
RealD epsilon_b = 0.5;
|
||||
|
||||
std::vector<int> twists(Nd,0);
|
||||
twists[nu] = 1; //GPBC in y
|
||||
twists[3] = 1; //APBC
|
||||
GparityAction::ImplParams params_2f; params_2f.twists = twists;
|
||||
action_2f = new GparityWilsonTMFermionD(Umu_2f,*UGrid_2f,*UrbGrid_2f, mass, epsilon_f, params_2f);
|
||||
action_PV_2f = new GparityWilsonTMFermionD(Umu_2f,*UGrid_2f,*UrbGrid_2f, mass, epsilon_b, params_2f);
|
||||
|
||||
DomainWallFermionD::ImplParams params_1f;
|
||||
params_1f.boundary_phases[nu] = -1;
|
||||
params_1f.boundary_phases[3] = -1;
|
||||
|
||||
action_1f = new WilsonTMFermionD(Umu_1f,*UGrid_1f,*UrbGrid_1f, mass, epsilon_f, params_1f);
|
||||
action_PV_1f = new WilsonTMFermionD(Umu_1f,*UGrid_1f,*UrbGrid_1f, mass, epsilon_b, params_1f);
|
||||
|
||||
setupPseudofermion();
|
||||
}
|
||||
|
||||
static bool is4d(){ return true; }
|
||||
};
|
||||
|
||||
|
||||
template<>
|
||||
struct setupAction<GparityDomainWallFermionD, DomainWallFermionD>: public RatioActionSetupBase<GparityDomainWallFermionD, DomainWallFermionD>{
|
||||
typedef GparityDomainWallFermionD GparityAction;
|
||||
typedef DomainWallFermionD StandardAction;
|
||||
|
||||
setupAction(GridCartesian* UGrid_2f, GridRedBlackCartesian* UrbGrid_2f, GridCartesian* FGrid_2f, GridRedBlackCartesian* FrbGrid_2f,
|
||||
GridCartesian* UGrid_1f, GridRedBlackCartesian* UrbGrid_1f, GridCartesian* FGrid_1f, GridRedBlackCartesian* FrbGrid_1f,
|
||||
LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f, int nu): RatioActionSetupBase(){
|
||||
RealD mass=0.01;
|
||||
RealD M5=1.8;
|
||||
|
||||
std::vector<int> twists(Nd,0);
|
||||
twists[nu] = 1; //GPBC in y
|
||||
twists[3] = 1; //APBC
|
||||
GparityDomainWallFermionD::ImplParams params_2f; params_2f.twists = twists;
|
||||
action_2f = new GparityDomainWallFermionD(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f,*UrbGrid_2f,mass,M5,params_2f);
|
||||
action_PV_2f = new GparityDomainWallFermionD(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f,*UrbGrid_2f,1.0,M5,params_2f);
|
||||
|
||||
DomainWallFermionD::ImplParams params_1f;
|
||||
params_1f.boundary_phases[nu] = -1;
|
||||
params_1f.boundary_phases[3] = -1;
|
||||
|
||||
action_1f = new DomainWallFermionD(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f,*UrbGrid_1f,mass,M5,params_1f);
|
||||
action_PV_1f = new DomainWallFermionD(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f,*UrbGrid_1f,1.0,M5,params_1f);
|
||||
|
||||
setupPseudofermion();
|
||||
}
|
||||
|
||||
static bool is4d(){ return false; }
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
//For EOFA we need a different pseudofermion type
|
||||
template<>
|
||||
struct setupAction<GparityDomainWallEOFAFermionD, DomainWallEOFAFermionD>{
|
||||
typedef GparityDomainWallEOFAFermionD GparityAction;
|
||||
typedef DomainWallEOFAFermionD StandardAction;
|
||||
|
||||
ExactOneFlavourRatioPseudoFermionAction<WilsonImplD> *pf_1f;
|
||||
ExactOneFlavourRatioPseudoFermionAction<GparityWilsonImplD> *pf_2f;
|
||||
|
||||
GparityAction* action_2f;
|
||||
GparityAction* action_PV_2f;
|
||||
StandardAction* action_1f;
|
||||
StandardAction* action_PV_1f;
|
||||
|
||||
ConjugateGradient<typename StandardAction::FermionField> CG_1f;
|
||||
ConjugateGradient<typename GparityAction::FermionField> CG_2f;
|
||||
|
||||
public:
|
||||
GparityAction & action2f(){ return *action_2f; }
|
||||
StandardAction & action1f(){ return *action_1f; }
|
||||
|
||||
void refreshAction(LatticeGaugeField &Umu_2f, typename GparityAction::FermionField &eta_2f,
|
||||
LatticeGaugeField &Umu_1f, typename StandardAction::FermionField &eta_1f){
|
||||
pf_1f->refresh(Umu_1f, eta_1f);
|
||||
pf_2f->refresh(Umu_2f, eta_2f);
|
||||
|
||||
//Compare PhiOdd
|
||||
RealD norm_1f = norm2(pf_1f->getPhi());
|
||||
RealD norm_2f = norm2(pf_2f->getPhi());
|
||||
|
||||
std::cout << "Test Phi 2f: " << norm_2f << " 1f: " << norm_1f << std::endl;
|
||||
}
|
||||
|
||||
void computeAction(RealD &S_2f, RealD &S_1f, LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f){
|
||||
S_1f = pf_1f->S(Umu_1f);
|
||||
S_2f = pf_2f->S(Umu_2f);
|
||||
}
|
||||
|
||||
void computeDeriv(LatticeGaugeField &deriv_2f, LatticeGaugeField &deriv_1f, LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f){
|
||||
pf_1f->deriv(Umu_1f, deriv_1f);
|
||||
pf_2f->deriv(Umu_2f, deriv_2f);
|
||||
}
|
||||
|
||||
|
||||
setupAction(GridCartesian* UGrid_2f, GridRedBlackCartesian* UrbGrid_2f, GridCartesian* FGrid_2f, GridRedBlackCartesian* FrbGrid_2f,
|
||||
GridCartesian* UGrid_1f, GridRedBlackCartesian* UrbGrid_1f, GridCartesian* FGrid_1f, GridRedBlackCartesian* FrbGrid_1f,
|
||||
LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f, int nu): CG_1f(1.0e-8,10000), CG_2f(1.0e-8,10000){
|
||||
RealD mass=0.01;
|
||||
RealD M5=1.8;
|
||||
|
||||
std::vector<int> twists(Nd,0);
|
||||
twists[nu] = 1; //GPBC in y
|
||||
twists[3] = 1; //APBC
|
||||
GparityAction::ImplParams params_2f; params_2f.twists = twists;
|
||||
action_2f = new GparityAction(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f,*UrbGrid_2f, mass, mass, 1.0, 0.0, -1, M5, params_2f);
|
||||
action_PV_2f = new GparityAction(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f,*UrbGrid_2f, 1.0, mass, 1.0, -1.0, 1, M5, params_2f); //cf Test_dwf_gpforce_eofa.cc
|
||||
|
||||
StandardAction::ImplParams params_1f;
|
||||
params_1f.boundary_phases[nu] = -1;
|
||||
params_1f.boundary_phases[3] = -1;
|
||||
|
||||
action_1f = new StandardAction(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f,*UrbGrid_1f, mass, mass, 1.0, 0.0, -1, M5, params_1f);
|
||||
action_PV_1f = new StandardAction(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f,*UrbGrid_1f, 1.0, mass, 1.0, -1.0, 1, M5, params_1f);
|
||||
|
||||
OneFlavourRationalParams RationalParams(0.95, 100.0, 5000, 1.0e-12, 12);
|
||||
|
||||
pf_1f = new ExactOneFlavourRatioPseudoFermionAction<WilsonImplD>(*action_1f, *action_PV_1f, CG_1f, CG_1f, CG_1f, CG_1f, CG_1f, RationalParams, true);
|
||||
pf_2f = new ExactOneFlavourRatioPseudoFermionAction<GparityWilsonImplD>(*action_2f, *action_PV_2f, CG_2f, CG_2f, CG_2f, CG_2f, CG_2f, RationalParams, true);
|
||||
}
|
||||
|
||||
static bool is4d(){ return false; }
|
||||
};
|
||||
|
||||
|
||||
template<typename GparityAction, typename StandardAction>
|
||||
void runTest(int argc, char** argv){
|
||||
Grid_init(&argc,&argv);
|
||||
|
||||
const int nu = 1;
|
||||
Coordinate latt_2f = GridDefaultLatt();
|
||||
Coordinate latt_1f = latt_2f;
|
||||
latt_1f[nu] *= 2;
|
||||
|
||||
Coordinate simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||
Coordinate mpi_layout = GridDefaultMpi();
|
||||
|
||||
const int Ls=8;
|
||||
|
||||
GridCartesian * UGrid_1f = SpaceTimeGrid::makeFourDimGrid(latt_1f, simd_layout, mpi_layout);
|
||||
GridRedBlackCartesian * UrbGrid_1f = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_1f);
|
||||
GridCartesian * FGrid_1f = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid_1f);
|
||||
GridRedBlackCartesian * FrbGrid_1f = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid_1f);
|
||||
|
||||
|
||||
GridCartesian * UGrid_2f = SpaceTimeGrid::makeFourDimGrid(latt_2f, simd_layout, mpi_layout);
|
||||
GridRedBlackCartesian * UrbGrid_2f = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_2f);
|
||||
GridCartesian * FGrid_2f = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid_2f);
|
||||
GridRedBlackCartesian * FrbGrid_2f = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid_2f);
|
||||
|
||||
std::vector<int> seeds4({1,2,3,4});
|
||||
std::vector<int> seeds5({5,6,7,8});
|
||||
GridParallelRNG RNG5_2f(FGrid_2f); RNG5_2f.SeedFixedIntegers(seeds5);
|
||||
GridParallelRNG RNG4_2f(UGrid_2f); RNG4_2f.SeedFixedIntegers(seeds4);
|
||||
|
||||
LatticeGaugeField Umu_2f(UGrid_2f);
|
||||
SU<Nc>::HotConfiguration(RNG4_2f,Umu_2f);
|
||||
|
||||
LatticeGaugeField Umu_1f(UGrid_1f);
|
||||
copyConjGauge(Umu_1f, Umu_2f, nu);
|
||||
|
||||
typedef typename GparityAction::FermionField GparityFermionField;
|
||||
typedef typename StandardAction::FermionField StandardFermionField;
|
||||
|
||||
setupAction<GparityAction, StandardAction> setup(UGrid_2f, UrbGrid_2f, FGrid_2f, FrbGrid_2f,
|
||||
UGrid_1f, UrbGrid_1f, FGrid_1f, FrbGrid_1f,
|
||||
Umu_2f, Umu_1f, nu);
|
||||
GridBase* FGrid_2f_a = setup.action2f().FermionGrid();
|
||||
GridBase* FGrid_1f_a = setup.action1f().FermionGrid();
|
||||
GridBase* FrbGrid_2f_a = setup.action2f().FermionRedBlackGrid();
|
||||
GridBase* FrbGrid_1f_a = setup.action1f().FermionRedBlackGrid();
|
||||
bool is_4d = setup.is4d();
|
||||
|
||||
//Check components by doing an inversion
|
||||
{
|
||||
setup.action2f().ImportGauge(Umu_2f);
|
||||
setup.action1f().ImportGauge(Umu_1f);
|
||||
|
||||
GparityFermionField src_2f(FGrid_2f_a);
|
||||
gaussian(is_4d ? RNG4_2f : RNG5_2f, src_2f);
|
||||
|
||||
StandardFermionField src_1f(FGrid_1f_a);
|
||||
convertFermion1f_from_2f(src_1f, src_2f, nu, is_4d);
|
||||
|
||||
StandardFermionField src_o_1f(FrbGrid_1f_a);
|
||||
StandardFermionField result_o_1f(FrbGrid_1f_a);
|
||||
pickCheckerboard(Odd,src_o_1f,src_1f);
|
||||
result_o_1f=Zero();
|
||||
|
||||
SchurDiagMooeeOperator<StandardAction,StandardFermionField> HermOpEO_1f(setup.action1f());
|
||||
ConjugateGradient<StandardFermionField> CG_1f(1.0e-8,10000);
|
||||
CG_1f(HermOpEO_1f,src_o_1f,result_o_1f);
|
||||
|
||||
|
||||
GparityFermionField src_o_2f(FrbGrid_2f_a);
|
||||
GparityFermionField result_o_2f(FrbGrid_2f_a);
|
||||
pickCheckerboard(Odd,src_o_2f,src_2f);
|
||||
result_o_2f=Zero();
|
||||
|
||||
SchurDiagMooeeOperator<GparityAction,GparityFermionField> HermOpEO_2f(setup.action2f());
|
||||
ConjugateGradient<GparityFermionField> CG_2f(1.0e-8,10000);
|
||||
CG_2f(HermOpEO_2f,src_o_2f,result_o_2f);
|
||||
|
||||
RealD norm_1f = norm2(result_o_1f);
|
||||
RealD norm_2f = norm2(result_o_2f);
|
||||
|
||||
std::cout << "Test fermion inversion 2f: " << norm_2f << " 1f: " << norm_1f << std::endl;
|
||||
}
|
||||
|
||||
//Generate eta
|
||||
RealD scale = std::sqrt(0.5);
|
||||
|
||||
GparityFermionField eta_2f(FGrid_2f_a);
|
||||
gaussian(is_4d ? RNG4_2f : RNG5_2f,eta_2f); eta_2f = eta_2f * scale;
|
||||
|
||||
StandardFermionField eta_1f(FGrid_1f_a);
|
||||
convertFermion1f_from_2f(eta_1f, eta_2f, nu, is_4d);
|
||||
|
||||
setup.refreshAction(Umu_2f, eta_2f, Umu_1f, eta_1f);
|
||||
|
||||
//Initial action is just |eta^2|
|
||||
RealD S_1f, S_2f;
|
||||
|
||||
setup.computeAction(S_2f, S_1f, Umu_2f, Umu_1f);
|
||||
|
||||
std::cout << "Test Initial action 2f: " << S_2f << " 1f: " << S_1f << " diff: " << S_2f - S_1f << std::endl;
|
||||
|
||||
//Do a random gauge field refresh
|
||||
SU<Nc>::HotConfiguration(RNG4_2f,Umu_2f);
|
||||
copyConjGauge(Umu_1f, Umu_2f, nu);
|
||||
|
||||
//Compute the action again
|
||||
setup.computeAction(S_2f, S_1f, Umu_2f, Umu_1f);
|
||||
|
||||
std::cout << "Test Action after gauge field randomize 2f: " << S_2f << " 1f: " << S_1f << " diff: " << S_2f - S_1f << std::endl;
|
||||
|
||||
//Compute the derivative and test the conjugate relation
|
||||
LatticeGaugeField deriv_2f(UGrid_2f);
|
||||
LatticeGaugeField deriv_1f(UGrid_1f);
|
||||
setup.computeDeriv(deriv_2f, deriv_1f, Umu_2f, Umu_1f);
|
||||
|
||||
//Have to combine the two forces on the 1f by symmetrizing under the complex conjugate
|
||||
{
|
||||
RealD norm2_pre = norm2(deriv_1f);
|
||||
LatticeGaugeField deriv_1f_shift = conjugate( Cshift(deriv_1f, nu, latt_2f[nu]) );
|
||||
deriv_1f = deriv_1f + deriv_1f_shift;
|
||||
std::cout << "Test combine/symmetrize forces on 1f lattice, dS/dU : " << norm2_pre << " -> " << norm2(deriv_1f) << std::endl;
|
||||
}
|
||||
|
||||
LatticeGaugeField deriv_1f_from_2f(UGrid_1f);
|
||||
copyConjGauge(deriv_1f_from_2f, deriv_2f, nu);
|
||||
std::cout << "Test copy-conj 2f dS/dU to obtain equivalent 1f force : " << norm2(deriv_2f) << " -> " << norm2(deriv_1f_from_2f) << std::endl;
|
||||
|
||||
LatticeGaugeField diff_deriv_1f = deriv_1f - deriv_1f_from_2f;
|
||||
|
||||
std::cout << "Test dS/dU 1f constructed from 2f derivative: " << norm2(deriv_1f_from_2f) << " dS/dU 1f actual: " << norm2(deriv_1f) << " Norm of difference: " << norm2(diff_deriv_1f) << std::endl;
|
||||
|
||||
std::cout<< GridLogMessage << "Done" <<std::endl;
|
||||
Grid_finalize();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
int main (int argc, char ** argv)
|
||||
{
|
||||
std::string action = "DWF";
|
||||
for(int i=1;i<argc;i++){
|
||||
if(std::string(argv[i]) == "--action"){
|
||||
action = argv[i+1];
|
||||
}
|
||||
}
|
||||
|
||||
if(action == "DWF"){
|
||||
runTest<GparityDomainWallFermionD, DomainWallFermionD>(argc, argv);
|
||||
}else if(action == "EOFA"){
|
||||
runTest<GparityDomainWallEOFAFermionD, DomainWallEOFAFermionD>(argc, argv);
|
||||
}else if(action == "DSDR"){
|
||||
runTest<GparityWilsonTMFermionD, WilsonTMFermionD>(argc,argv);
|
||||
}else{
|
||||
assert(0);
|
||||
}
|
||||
}
|
@ -91,28 +91,17 @@ int main (int argc, char ** argv)
|
||||
RealD dt = 0.01;
|
||||
|
||||
LatticeColourMatrix mommu(UGrid);
|
||||
LatticeColourMatrix zz(UGrid);
|
||||
LatticeColourMatrix forcemu(UGrid);
|
||||
LatticeGaugeField mom(UGrid);
|
||||
LatticeGaugeField Uprime(UGrid);
|
||||
|
||||
|
||||
Lattice<iScalar<vInteger> > coor(UGrid);
|
||||
LatticeCoordinate(coor,nu);
|
||||
zz=Zero();
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
|
||||
// Traceless antihermitian momentum; gaussian in lie alg
|
||||
SU<Nc>::GaussianFundamentalLieAlgebraMatrix(RNG4, mommu);
|
||||
if(0){
|
||||
if(mu==nu){
|
||||
mommu=where(coor==Lnu-1,mommu,zz);
|
||||
} else {
|
||||
mommu=Zero();
|
||||
}
|
||||
}
|
||||
SU<Nc>::GaussianFundamentalLieAlgebraMatrix(RNG4, mommu);
|
||||
|
||||
PokeIndex<LorentzIndex>(mom,mommu,mu);
|
||||
|
||||
|
||||
// fourth order exponential approx
|
||||
autoView( mom_v, mom, CpuRead);
|
||||
autoView( U_v , U, CpuRead);
|
||||
@ -145,10 +134,6 @@ int main (int argc, char ** argv)
|
||||
mommu=Ta(mommu)*2.0;
|
||||
PokeIndex<LorentzIndex>(UdSdU,mommu,mu);
|
||||
}
|
||||
LatticeComplex lip(UGrid); lip=localInnerProduct(Mphi,Mphi);
|
||||
LatticeComplex lipp(UGrid); lipp=localInnerProduct(MphiPrime,MphiPrime);
|
||||
LatticeComplex dip(UGrid); dip = lipp - lip;
|
||||
std::cout << " dip "<<dip<<std::endl;
|
||||
|
||||
LatticeComplex dS(UGrid); dS = Zero();
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
@ -158,14 +143,12 @@ int main (int argc, char ** argv)
|
||||
// Update PF action density
|
||||
dS = dS+trace(mommu*forcemu)*dt;
|
||||
}
|
||||
std::cout << "mommu"<<mommu<<std::endl;
|
||||
std::cout << "dS" << dS<<std::endl;
|
||||
|
||||
|
||||
ComplexD dSpred = sum(dS);
|
||||
|
||||
std::cout << GridLogMessage << " S "<<S<<std::endl;
|
||||
std::cout << GridLogMessage << " Sprime "<<Sprime<<std::endl;
|
||||
std::cout << GridLogMessage << "Delta S "<<Sprime-S<<std::endl;
|
||||
std::cout << GridLogMessage << "dS "<<Sprime-S<<std::endl;
|
||||
std::cout << GridLogMessage << "predict dS "<< dSpred <<std::endl;
|
||||
|
||||
assert( fabs(real(Sprime-S-dSpred)) < 2.0 ) ;
|
||||
|
@ -89,49 +89,7 @@ int main (int argc, char** argv)
|
||||
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, CG, CG, CG, CG, Params, false);
|
||||
|
||||
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds4);
|
||||
|
||||
//Check the rational approximation
|
||||
{
|
||||
RealD scale = std::sqrt(0.5);
|
||||
LatticeFermion eta (Lop.FermionGrid());
|
||||
gaussian(RNG5,eta); eta = eta * scale;
|
||||
|
||||
Meofa.refresh(U, eta);
|
||||
|
||||
//Phi = M^{-1/2} eta
|
||||
//M is Hermitian
|
||||
//(Phi, M Phi) = eta^\dagger M^{-1/2} M M^{-1/2} eta = eta^\dagger eta
|
||||
LatticeFermion phi = Meofa.getPhi();
|
||||
LatticeFermion Mphi(FGrid);
|
||||
|
||||
Meofa.Meofa(U, phi, Mphi);
|
||||
std::cout << "Computing inner product" << std::endl;
|
||||
ComplexD inner = innerProduct(phi, Mphi);
|
||||
ComplexD test = inner - norm2(eta);
|
||||
|
||||
std::cout << "(phi, Mphi) - (eta,eta): " << test << " expect 0" << std::endl;
|
||||
|
||||
assert(test.real() < 1e-8);
|
||||
assert(test.imag() < 1e-8);
|
||||
|
||||
//Another test is to use heatbath twice to apply M^{-1/2} to Phi then apply M
|
||||
// M Phi'
|
||||
//= M M^{-1/2} Phi
|
||||
//= M M^{-1/2} M^{-1/2} eta
|
||||
//= eta
|
||||
Meofa.refresh(U, phi);
|
||||
LatticeFermion phi2 = Meofa.getPhi();
|
||||
LatticeFermion test2(FGrid);
|
||||
Meofa.Meofa(U, phi2, test2);
|
||||
test2 = test2 - eta;
|
||||
RealD test2_norm = norm2(test2);
|
||||
std::cout << "|M M^{-1/2} M^{-1/2} eta - eta|^2 = " << test2_norm << " expect 0" << std::endl;
|
||||
assert( test2_norm < 1e-8 );
|
||||
}
|
||||
|
||||
|
||||
Meofa.refresh(U, sRNG, RNG5 );
|
||||
|
||||
RealD S = Meofa.S(U); // pdag M p
|
||||
|
||||
// get the deriv of phidag M phi with respect to "U"
|
||||
|
@ -1,260 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/forces/Test_mobius_gparity_eofa_mixed.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
;
|
||||
|
||||
typedef GparityWilsonImplD FermionImplPolicyD;
|
||||
typedef GparityMobiusEOFAFermionD FermionActionD;
|
||||
typedef typename FermionActionD::FermionField FermionFieldD;
|
||||
|
||||
typedef GparityWilsonImplF FermionImplPolicyF;
|
||||
typedef GparityMobiusEOFAFermionF FermionActionF;
|
||||
typedef typename FermionActionF::FermionField FermionFieldF;
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
|
||||
class MixedPrecisionConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
|
||||
public:
|
||||
typedef typename FermionOperatorD::FermionField FieldD;
|
||||
typedef typename FermionOperatorF::FermionField FieldF;
|
||||
|
||||
using OperatorFunction<FieldD>::operator();
|
||||
|
||||
RealD Tolerance;
|
||||
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
|
||||
Integer MaxInnerIterations;
|
||||
Integer MaxOuterIterations;
|
||||
GridBase* SinglePrecGrid4; //Grid for single-precision fields
|
||||
GridBase* SinglePrecGrid5; //Grid for single-precision fields
|
||||
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
|
||||
|
||||
FermionOperatorF &FermOpF;
|
||||
FermionOperatorD &FermOpD;;
|
||||
SchurOperatorF &LinOpF;
|
||||
SchurOperatorD &LinOpD;
|
||||
|
||||
Integer TotalInnerIterations; //Number of inner CG iterations
|
||||
Integer TotalOuterIterations; //Number of restarts
|
||||
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
|
||||
|
||||
MixedPrecisionConjugateGradientOperatorFunction(RealD tol,
|
||||
Integer maxinnerit,
|
||||
Integer maxouterit,
|
||||
GridBase* _sp_grid4,
|
||||
GridBase* _sp_grid5,
|
||||
FermionOperatorF &_FermOpF,
|
||||
FermionOperatorD &_FermOpD,
|
||||
SchurOperatorF &_LinOpF,
|
||||
SchurOperatorD &_LinOpD):
|
||||
LinOpF(_LinOpF),
|
||||
LinOpD(_LinOpD),
|
||||
FermOpF(_FermOpF),
|
||||
FermOpD(_FermOpD),
|
||||
Tolerance(tol),
|
||||
InnerTolerance(tol),
|
||||
MaxInnerIterations(maxinnerit),
|
||||
MaxOuterIterations(maxouterit),
|
||||
SinglePrecGrid4(_sp_grid4),
|
||||
SinglePrecGrid5(_sp_grid5),
|
||||
OuterLoopNormMult(100.)
|
||||
{
|
||||
};
|
||||
|
||||
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
|
||||
|
||||
std::cout << GridLogMessage << " Mixed precision CG wrapper operator() "<<std::endl;
|
||||
|
||||
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
|
||||
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// Must snarf a single precision copy of the gauge field in Linop_d argument
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
//typedef typename FermionOperatorF::GaugeField GaugeFieldF;
|
||||
//typedef typename FermionOperatorF::GaugeLinkField GaugeLinkFieldF;
|
||||
//typedef typename FermionOperatorD::GaugeField GaugeFieldD;
|
||||
//typedef typename FermionOperatorD::GaugeLinkField GaugeLinkFieldD;
|
||||
|
||||
//GridBase * GridPtrF = SinglePrecGrid4;
|
||||
//GridBase * GridPtrD = FermOpD.Umu.Grid();
|
||||
//GaugeFieldF U_f (GridPtrF);
|
||||
//GaugeLinkFieldF Umu_f(GridPtrF);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// Moving this to a Clone method of fermion operator would allow to duplicate the
|
||||
// physics parameters and decrease gauge field copies
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
//typedef typename std::decay<decltype(PeekIndex<LorentzIndex>(FermOpD.Umu, 0))>::type DoubleS
|
||||
|
||||
//GaugeLinkFieldD Umu_d(GridPtrD);
|
||||
//for(int mu=0;mu<Nd*2;mu++){
|
||||
//Umu_d = PeekIndex<LorentzIndex>(FermOpD.Umu, mu);
|
||||
//precisionChange(Umu_f,Umu_d);
|
||||
//PokeIndex<LorentzIndex>(FermOpF.Umu, Umu_f, mu);
|
||||
//}
|
||||
|
||||
precisionChange(FermOpF.Umu, FermOpD.Umu);
|
||||
|
||||
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
|
||||
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// Make a mixed precision conjugate gradient
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
MixedPrecisionConjugateGradient<FieldD,FieldF> MPCG(Tolerance,MaxInnerIterations,MaxOuterIterations,SinglePrecGrid5,LinOpF,LinOpD);
|
||||
MPCG.InnerTolerance = InnerTolerance;
|
||||
std::cout << GridLogMessage << "Calling mixed precision Conjugate Gradient" <<std::endl;
|
||||
MPCG(src,psi);
|
||||
}
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
||||
|
||||
int main (int argc, char** argv)
|
||||
{
|
||||
Grid_init(&argc, &argv);
|
||||
|
||||
Coordinate latt_size = GridDefaultLatt();
|
||||
Coordinate mpi_layout = GridDefaultMpi();
|
||||
|
||||
const int Ls = 8;
|
||||
|
||||
GridCartesian *UGridD = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexD::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian *UrbGridD = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridD);
|
||||
GridCartesian *FGridD = SpaceTimeGrid::makeFiveDimGrid(Ls, UGridD);
|
||||
GridRedBlackCartesian *FrbGridD = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGridD);
|
||||
|
||||
GridCartesian *UGridF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian *UrbGridF = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridF);
|
||||
GridCartesian *FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls, UGridF);
|
||||
GridRedBlackCartesian *FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGridF);
|
||||
|
||||
std::vector<int> seeds4({1,2,3,5});
|
||||
std::vector<int> seeds5({5,6,7,8});
|
||||
GridParallelRNG RNG5(FGridD); RNG5.SeedFixedIntegers(seeds5);
|
||||
GridParallelRNG RNG4(UGridD); RNG4.SeedFixedIntegers(seeds4);
|
||||
|
||||
int threads = GridThread::GetThreads();
|
||||
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
||||
|
||||
LatticeGaugeFieldD Ud(UGridD);
|
||||
SU<Nc>::HotConfiguration(RNG4,Ud);
|
||||
|
||||
LatticeGaugeFieldF Uf(UGridF);
|
||||
precisionChange(Uf, Ud);
|
||||
|
||||
RealD b = 2.5;
|
||||
RealD c = 1.5;
|
||||
RealD mf = 0.01;
|
||||
RealD mb = 1.0;
|
||||
RealD M5 = 1.8;
|
||||
FermionActionD::ImplParams params;
|
||||
params.twists[0] = 1; //GPBC in X
|
||||
params.twists[Nd-1] = 1; //APRD in T
|
||||
|
||||
std::vector<int> gtwists(4,0);
|
||||
gtwists[0] = 1;
|
||||
|
||||
ConjugateGimplD::setDirections(gtwists);
|
||||
|
||||
FermionActionD LopD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, mf, mf, mb, 0.0, -1, M5, b, c, params);
|
||||
FermionActionD RopD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, mb, mf, mb, -1.0, 1, M5, b, c, params);
|
||||
|
||||
FermionActionF LopF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, mf, mf, mb, 0.0, -1, M5, b, c, params);
|
||||
FermionActionF RopF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, mb, mf, mb, -1.0, 1, M5, b, c, params);
|
||||
|
||||
|
||||
OneFlavourRationalParams OFRp(0.95, 100.0, 5000, 1.0e-12, 12);
|
||||
ConjugateGradient<FermionFieldD> CG(1.0e-10, 10000);
|
||||
|
||||
|
||||
typedef SchurDiagMooeeOperator<FermionActionD,FermionFieldD> EOFAschuropD;
|
||||
typedef SchurDiagMooeeOperator<FermionActionF,FermionFieldF> EOFAschuropF;
|
||||
|
||||
EOFAschuropD linopL_D(LopD);
|
||||
EOFAschuropD linopR_D(RopD);
|
||||
|
||||
EOFAschuropF linopL_F(LopF);
|
||||
EOFAschuropF linopR_F(RopF);
|
||||
|
||||
typedef MixedPrecisionConjugateGradientOperatorFunction<FermionActionD, FermionActionF, EOFAschuropD, EOFAschuropF> EOFA_mxCG;
|
||||
|
||||
EOFA_mxCG MCG_L(1e-10, 10000, 1000, UGridF, FrbGridF, LopF, LopD, linopL_F, linopL_D);
|
||||
MCG_L.InnerTolerance = 1e-5;
|
||||
|
||||
EOFA_mxCG MCG_R(1e-10, 10000, 1000, UGridF, FrbGridF, RopF, RopD, linopR_F, linopR_D);
|
||||
MCG_R.InnerTolerance = 1e-5;
|
||||
|
||||
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicyD> MeofaD(LopD, RopD, CG, CG, CG, CG, CG, OFRp, true);
|
||||
ExactOneFlavourRatioMixedPrecHeatbathPseudoFermionAction<FermionImplPolicyD, FermionImplPolicyF> MeofaMx(LopF, RopF, LopD, RopD, MCG_L, MCG_R, MCG_L, MCG_R, MCG_L, MCG_R, OFRp, true);
|
||||
|
||||
FermionFieldD eta(FGridD);
|
||||
gaussian(RNG5, eta);
|
||||
|
||||
MeofaD.refresh(Ud, eta);
|
||||
MeofaMx.refresh(Ud, eta);
|
||||
|
||||
FermionFieldD diff_phi(FGridD);
|
||||
diff_phi = MeofaD.getPhi() - MeofaMx.getPhi();
|
||||
|
||||
RealD n = norm2(diff_phi);
|
||||
|
||||
std::cout << GridLogMessage << "Phi(double)=" << norm2(MeofaD.getPhi()) << " Phi(mixed)=" << norm2(MeofaMx.getPhi()) << " diff=" << n << std::endl;
|
||||
|
||||
assert(n < 1e-8);
|
||||
|
||||
RealD Sd = MeofaD.S(Ud);
|
||||
RealD Smx = MeofaMx.S(Ud);
|
||||
|
||||
std::cout << GridLogMessage << "Initial action double=" << Sd << " mixed=" << Smx << " diff=" << Sd-Smx << std::endl;
|
||||
|
||||
assert(fabs(Sd-Smx) < 1e-6);
|
||||
|
||||
SU<Nc>::HotConfiguration(RNG4,Ud);
|
||||
precisionChange(Uf, Ud);
|
||||
|
||||
Sd = MeofaD.S(Ud);
|
||||
Smx = MeofaMx.S(Ud);
|
||||
|
||||
std::cout << GridLogMessage << "After randomizing U, action double=" << Sd << " mixed=" << Smx << " diff=" << Sd-Smx << std::endl;
|
||||
|
||||
assert(fabs(Sd-Smx) < 1e-6);
|
||||
|
||||
std::cout << GridLogMessage << "Done" << std::endl;
|
||||
Grid_finalize();
|
||||
}
|
@ -1,257 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: tests/hmc/Test_action_dwf_gparity2fvs1f.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace Grid;
|
||||
|
||||
|
||||
|
||||
template<typename FermionField2f, typename FermionField1f>
|
||||
void copy2fTo1fFermionField(FermionField1f &out, const FermionField2f &in, int gpdir){
|
||||
auto f0_halfgrid = PeekIndex<GparityFlavourIndex>(in,0); //on 2f Grid
|
||||
FermionField1f f0_fullgrid_dbl(out.Grid());
|
||||
Replicate(f0_halfgrid, f0_fullgrid_dbl); //double it up to live on the 1f Grid
|
||||
|
||||
auto f1_halfgrid = PeekIndex<GparityFlavourIndex>(in,1);
|
||||
FermionField1f f1_fullgrid_dbl(out.Grid());
|
||||
Replicate(f1_halfgrid, f1_fullgrid_dbl);
|
||||
|
||||
const Coordinate &dim_2f = in.Grid()->GlobalDimensions();
|
||||
const Coordinate &dim_1f = out.Grid()->GlobalDimensions();
|
||||
|
||||
//We have to be careful for 5d fields; the s-direction is placed before the x,y,z,t and so we need to shift gpdir by 1
|
||||
std::cout << "gpdir " << gpdir << std::endl;
|
||||
|
||||
gpdir+=1;
|
||||
std::cout << "gpdir for 5D fields " << gpdir << std::endl;
|
||||
|
||||
std::cout << "dim_2f " << dim_2f << std::endl;
|
||||
std::cout << "dim_1f " << dim_1f << std::endl;
|
||||
|
||||
assert(dim_1f[gpdir] == 2*dim_2f[gpdir]);
|
||||
|
||||
LatticeInteger xcoor_1f(out.Grid()); //5d lattice integer
|
||||
LatticeCoordinate(xcoor_1f,gpdir);
|
||||
|
||||
int L = dim_2f[gpdir];
|
||||
|
||||
out = where(xcoor_1f < L, f0_fullgrid_dbl, f1_fullgrid_dbl);
|
||||
}
|
||||
|
||||
//Both have the same field type
|
||||
void copy2fTo1fGaugeField(LatticeGaugeField &out, const LatticeGaugeField &in, int gpdir){
|
||||
LatticeGaugeField U_dbl(out.Grid());
|
||||
Replicate(in, U_dbl);
|
||||
|
||||
LatticeGaugeField Uconj_dbl = conjugate( U_dbl );
|
||||
|
||||
const Coordinate &dim_2f = in.Grid()->GlobalDimensions();
|
||||
|
||||
LatticeInteger xcoor_1f(out.Grid());
|
||||
LatticeCoordinate(xcoor_1f,gpdir);
|
||||
|
||||
int L = dim_2f[gpdir];
|
||||
|
||||
out = where(xcoor_1f < L, U_dbl, Uconj_dbl);
|
||||
}
|
||||
|
||||
|
||||
std::ostream & operator<<(std::ostream &os, const Coordinate &x){
|
||||
os << "(";
|
||||
for(int i=0;i<x.size();i++) os << x[i] << (i<x.size()-1 ? " " : "");
|
||||
os << ")";
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
using namespace Grid;
|
||||
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
|
||||
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
||||
|
||||
int Ls = 16;
|
||||
|
||||
Coordinate latt_2f = GridDefaultLatt();
|
||||
Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd());
|
||||
Coordinate mpi_layout = GridDefaultMpi();
|
||||
|
||||
int mu = 0; //Gparity direction
|
||||
|
||||
Coordinate latt_1f = latt_2f;
|
||||
latt_1f[mu] *= 2;
|
||||
|
||||
GridCartesian * UGrid_1f = SpaceTimeGrid::makeFourDimGrid(latt_1f, simd_layout, mpi_layout);
|
||||
GridRedBlackCartesian * UrbGrid_1f = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_1f);
|
||||
GridCartesian * FGrid_1f = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid_1f);
|
||||
GridRedBlackCartesian * FrbGrid_1f = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid_1f);
|
||||
|
||||
|
||||
GridCartesian * UGrid_2f = SpaceTimeGrid::makeFourDimGrid(latt_2f, simd_layout, mpi_layout);
|
||||
GridRedBlackCartesian * UrbGrid_2f = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_2f);
|
||||
GridCartesian * FGrid_2f = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid_2f);
|
||||
GridRedBlackCartesian * FrbGrid_2f = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid_2f);
|
||||
|
||||
|
||||
std::cout << "SIMD layout " << simd_layout << std::endl;
|
||||
std::cout << "MPI layout " << mpi_layout << std::endl;
|
||||
std::cout << "2f dimensions " << latt_2f << std::endl;
|
||||
std::cout << "1f dimensions " << latt_1f << std::endl;
|
||||
|
||||
std::vector<int> seeds4({1,2,3,4});
|
||||
std::vector<int> seeds5({5,6,7,8});
|
||||
GridParallelRNG RNG5_2f(FGrid_2f); RNG5_2f.SeedFixedIntegers(seeds5);
|
||||
GridParallelRNG RNG4_2f(UGrid_2f); RNG4_2f.SeedFixedIntegers(seeds4);
|
||||
|
||||
std::cout << "Generating hot 2f gauge configuration" << std::endl;
|
||||
LatticeGaugeField Umu_2f(UGrid_2f);
|
||||
SU<Nc>::HotConfiguration(RNG4_2f,Umu_2f);
|
||||
|
||||
std::cout << "Copying 2f->1f gauge field" << std::endl;
|
||||
LatticeGaugeField Umu_1f(UGrid_1f);
|
||||
copy2fTo1fGaugeField(Umu_1f, Umu_2f, mu);
|
||||
|
||||
typedef GparityWilsonImplR FermionImplPolicy2f;
|
||||
typedef GparityDomainWallFermionR FermionAction2f;
|
||||
typedef typename FermionAction2f::FermionField FermionField2f;
|
||||
|
||||
typedef WilsonImplR FermionImplPolicy1f;
|
||||
typedef DomainWallFermionR FermionAction1f;
|
||||
typedef typename FermionAction1f::FermionField FermionField1f;
|
||||
|
||||
std::cout << "Generating eta 2f" << std::endl;
|
||||
FermionField2f eta_2f(FGrid_2f);
|
||||
gaussian(RNG5_2f, eta_2f);
|
||||
|
||||
RealD scale = std::sqrt(0.5);
|
||||
eta_2f=eta_2f*scale;
|
||||
|
||||
std::cout << "Copying 2f->1f eta" << std::endl;
|
||||
FermionField1f eta_1f(FGrid_1f);
|
||||
copy2fTo1fFermionField(eta_1f, eta_2f, mu);
|
||||
|
||||
Real beta = 2.13;
|
||||
Real light_mass = 0.01;
|
||||
Real strange_mass = 0.032;
|
||||
Real pv_mass = 1.0;
|
||||
RealD M5 = 1.8;
|
||||
|
||||
//Setup the Dirac operators
|
||||
std::cout << "Initializing Dirac operators" << std::endl;
|
||||
|
||||
FermionAction2f::ImplParams Params_2f;
|
||||
Params_2f.twists[mu] = 1;
|
||||
Params_2f.twists[Nd-1] = 1; //APBC in time direction
|
||||
|
||||
//note 'Num' and 'Den' here refer to the determinant ratio, not the operator ratio in the pseudofermion action where the two are inverted
|
||||
//to my mind the Pauli Villars and 'denominator' are synonymous but the Grid convention has this as the 'Numerator' operator in the RHMC implementation
|
||||
FermionAction2f NumOp_2f(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f, *UrbGrid_2f, light_mass,M5,Params_2f);
|
||||
FermionAction2f DenOp_2f(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f, *UrbGrid_2f, pv_mass, M5,Params_2f);
|
||||
|
||||
FermionAction1f::ImplParams Params_1f;
|
||||
Params_1f.boundary_phases[mu] = -1; //antiperiodic in doubled lattice in GP direction
|
||||
Params_1f.boundary_phases[Nd-1] = -1;
|
||||
|
||||
FermionAction1f NumOp_1f(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f, *UrbGrid_1f, light_mass,M5,Params_1f);
|
||||
FermionAction1f DenOp_1f(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f, *UrbGrid_1f, pv_mass, M5,Params_1f);
|
||||
|
||||
//Test the replication routines by running a CG on eta
|
||||
double StoppingCondition = 1e-10;
|
||||
double MaxCGIterations = 30000;
|
||||
ConjugateGradient<FermionField2f> CG_2f(StoppingCondition,MaxCGIterations);
|
||||
ConjugateGradient<FermionField1f> CG_1f(StoppingCondition,MaxCGIterations);
|
||||
|
||||
NumOp_1f.ImportGauge(Umu_1f);
|
||||
NumOp_2f.ImportGauge(Umu_2f);
|
||||
|
||||
FermionField1f test_1f(FGrid_1f);
|
||||
FermionField2f test_2f(FGrid_2f);
|
||||
|
||||
MdagMLinearOperator<FermionAction1f, FermionField1f> Linop_1f(NumOp_1f);
|
||||
MdagMLinearOperator<FermionAction2f, FermionField2f> Linop_2f(NumOp_2f);
|
||||
|
||||
CG_1f(Linop_1f, eta_1f, test_1f);
|
||||
CG_2f(Linop_2f, eta_2f, test_2f);
|
||||
RealD test_1f_norm = norm2(test_1f);
|
||||
RealD test_2f_norm = norm2(test_2f);
|
||||
|
||||
std::cout << "Verification of replication routines: " << test_1f_norm << " " << test_2f_norm << " " << test_1f_norm - test_2f_norm << std::endl;
|
||||
|
||||
|
||||
#if 1
|
||||
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy2f> Action2f;
|
||||
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy1f> Action1f;
|
||||
|
||||
RationalActionParams rational_params;
|
||||
rational_params.inv_pow = 2;
|
||||
rational_params.lo = 1e-5;
|
||||
rational_params.hi = 32;
|
||||
rational_params.md_degree = 16;
|
||||
rational_params.action_degree = 16;
|
||||
|
||||
Action2f action_2f(DenOp_2f, NumOp_2f, rational_params);
|
||||
Action1f action_1f(DenOp_1f, NumOp_1f, rational_params);
|
||||
#else
|
||||
typedef TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy2f> Action2f;
|
||||
typedef TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy1f> Action1f;
|
||||
|
||||
Action2f action_2f(DenOp_2f, NumOp_2f, CG_2f, CG_2f);
|
||||
Action1f action_1f(DenOp_1f, NumOp_1f, CG_1f, CG_1f);
|
||||
#endif
|
||||
|
||||
|
||||
std::cout << "Action refresh" << std::endl;
|
||||
action_2f.refresh(Umu_2f, eta_2f);
|
||||
action_1f.refresh(Umu_1f, eta_1f);
|
||||
|
||||
std::cout << "Action compute post heatbath" << std::endl;
|
||||
RealD S_2f = action_2f.S(Umu_2f);
|
||||
RealD S_1f = action_1f.S(Umu_1f);
|
||||
|
||||
std::cout << "Action comparison post heatbath" << std::endl;
|
||||
std::cout << S_2f << " " << S_1f << " " << S_2f-S_1f << std::endl;
|
||||
|
||||
//Change the gauge field between refresh and action eval else the matrix and inverse matrices all cancel and we just get |eta|^2
|
||||
SU<Nc>::HotConfiguration(RNG4_2f,Umu_2f);
|
||||
copy2fTo1fGaugeField(Umu_1f, Umu_2f, mu);
|
||||
|
||||
//Now compute the action with the new gauge field
|
||||
std::cout << "Action compute post gauge field update" << std::endl;
|
||||
S_2f = action_2f.S(Umu_2f);
|
||||
S_1f = action_1f.S(Umu_1f);
|
||||
|
||||
std::cout << "Action comparison post gauge field update" << std::endl;
|
||||
std::cout << S_2f << " " << S_1f << " " << S_2f-S_1f << std::endl;
|
||||
|
||||
Grid_finalize();
|
||||
} // main
|
||||
|
||||
|
@ -58,7 +58,7 @@ int main(int argc, char **argv) {
|
||||
CheckpointerParameters CPparams;
|
||||
CPparams.config_prefix = "ckpoint_EODWF_lat";
|
||||
CPparams.rng_prefix = "ckpoint_EODWF_rng";
|
||||
CPparams.saveInterval = 1;
|
||||
CPparams.saveInterval = 5;
|
||||
CPparams.format = "IEEE64BIG";
|
||||
|
||||
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||
@ -79,7 +79,7 @@ int main(int argc, char **argv) {
|
||||
// that have a complex construction
|
||||
// standard
|
||||
RealD beta = 2.6 ;
|
||||
const int nu = 1;
|
||||
const int nu = 3;
|
||||
std::vector<int> twists(Nd,0);
|
||||
twists[nu] = 1;
|
||||
ConjugateGimplD::setDirections(twists);
|
||||
|
@ -1,139 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/Test_rhmc_EOWilsonRatio.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
//This test is for the Wilson action with the determinant det( M^dag M)^1/4
|
||||
//testing the generic RHMC
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
using namespace Grid;
|
||||
;
|
||||
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
// here make a routine to print all the relevant information on the run
|
||||
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
||||
|
||||
// Typedefs to simplify notation
|
||||
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
|
||||
typedef WilsonImplR FermionImplPolicy;
|
||||
typedef WilsonFermionR FermionAction;
|
||||
typedef typename FermionAction::FermionField FermionField;
|
||||
|
||||
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
HMCWrapper TheHMC;
|
||||
|
||||
// Grid from the command line
|
||||
TheHMC.Resources.AddFourDimGrid("gauge");
|
||||
|
||||
// Checkpointer definition
|
||||
CheckpointerParameters CPparams;
|
||||
CPparams.config_prefix = "ckpoint_lat";
|
||||
CPparams.rng_prefix = "ckpoint_rng";
|
||||
CPparams.saveInterval = 5;
|
||||
CPparams.format = "IEEE64BIG";
|
||||
|
||||
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||
|
||||
RNGModuleParameters RNGpar;
|
||||
RNGpar.serial_seeds = "1 2 3 4 5";
|
||||
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
// Construct observables
|
||||
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
|
||||
TheHMC.Resources.AddObservable<PlaqObs>();
|
||||
//////////////////////////////////////////////
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Collect actions, here use more encapsulation
|
||||
// need wrappers of the fermionic classes
|
||||
// that have a complex construction
|
||||
// standard
|
||||
RealD beta = 5.6 ;
|
||||
WilsonGaugeActionR Waction(beta);
|
||||
|
||||
auto GridPtr = TheHMC.Resources.GetCartesian();
|
||||
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
|
||||
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeField U(GridPtr);
|
||||
|
||||
Real mass = -0.77;
|
||||
Real pv = 0.0;
|
||||
|
||||
// Can we define an overloaded operator that does not need U and initialises
|
||||
// it with zeroes?
|
||||
FermionAction DenOp(U, *GridPtr, *GridRBPtr, mass);
|
||||
FermionAction NumOp(U, *GridPtr, *GridRBPtr, pv);
|
||||
|
||||
|
||||
// 1/2+1/2 flavour
|
||||
// RationalActionParams(int _inv_pow = 2,
|
||||
// RealD _lo = 0.0,
|
||||
// RealD _hi = 1.0,
|
||||
// int _maxit = 1000,
|
||||
// RealD tol = 1.0e-8,
|
||||
// int _degree = 10,
|
||||
// int _precision = 64,
|
||||
// int _BoundsCheckFreq=20)
|
||||
|
||||
|
||||
int inv_pow = 4;
|
||||
RationalActionParams Params(inv_pow,1.0e-2,64.0,1000,1.0e-6,14,64,1);
|
||||
|
||||
GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> RHMC(NumOp,DenOp,Params);
|
||||
|
||||
// Collect actions
|
||||
ActionLevel<HMCWrapper::Field> Level1(1);
|
||||
Level1.push_back(&RHMC);
|
||||
|
||||
ActionLevel<HMCWrapper::Field> Level2(4);
|
||||
Level2.push_back(&Waction);
|
||||
|
||||
TheHMC.TheAction.push_back(Level1);
|
||||
TheHMC.TheAction.push_back(Level2);
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
// HMC parameters are serialisable
|
||||
TheHMC.Parameters.MD.MDsteps = 20;
|
||||
TheHMC.Parameters.MD.trajL = 1.0;
|
||||
|
||||
TheHMC.ReadCommandLine(argc, argv); // these can be parameters from file
|
||||
TheHMC.Run();
|
||||
|
||||
Grid_finalize();
|
||||
|
||||
} // main
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1,119 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/Test_rhmc_EOWilsonRatio_doubleVsMixedPrec.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
//This test ensures the mixed precision RHMC gives the same result as the regular double precision
|
||||
int main(int argc, char **argv) {
|
||||
using namespace Grid;
|
||||
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
||||
|
||||
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
|
||||
|
||||
typedef WilsonImplD FermionImplPolicyD;
|
||||
typedef WilsonFermionD FermionActionD;
|
||||
typedef typename FermionActionD::FermionField FermionFieldD;
|
||||
|
||||
typedef WilsonImplF FermionImplPolicyF;
|
||||
typedef WilsonFermionF FermionActionF;
|
||||
typedef typename FermionActionF::FermionField FermionFieldF;
|
||||
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
HMCWrapper TheHMC;
|
||||
TheHMC.Resources.AddFourDimGrid("gauge");
|
||||
|
||||
RNGModuleParameters RNGpar;
|
||||
RNGpar.serial_seeds = "1 2 3 4 5";
|
||||
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
auto GridPtrD = TheHMC.Resources.GetCartesian();
|
||||
auto GridRBPtrD = TheHMC.Resources.GetRBCartesian();
|
||||
|
||||
GridCartesian* GridPtrF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexF::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian* GridRBPtrF = SpaceTimeGrid::makeFourDimRedBlackGrid(GridPtrF);
|
||||
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeFieldD Ud(GridPtrD);
|
||||
LatticeGaugeFieldF Uf(GridPtrF);
|
||||
|
||||
Real mass = -0.77;
|
||||
Real pv = 0.0;
|
||||
|
||||
FermionActionD DenOpD(Ud, *GridPtrD, *GridRBPtrD, mass);
|
||||
FermionActionD NumOpD(Ud, *GridPtrD, *GridRBPtrD, pv);
|
||||
|
||||
FermionActionF DenOpF(Uf, *GridPtrF, *GridRBPtrF, mass);
|
||||
FermionActionF NumOpF(Uf, *GridPtrF, *GridRBPtrF, pv);
|
||||
|
||||
TheHMC.Resources.AddRNGs();
|
||||
PeriodicGimplR::HotConfiguration(TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
|
||||
std::string seed_string = "the_seed";
|
||||
|
||||
//Setup the pseudofermion actions
|
||||
RationalActionParams GenParams;
|
||||
GenParams.inv_pow = 2;
|
||||
GenParams.lo = 1e-2;
|
||||
GenParams.hi = 64.0;
|
||||
GenParams.MaxIter = 1000;
|
||||
GenParams.action_tolerance = GenParams.md_tolerance = 1e-6;
|
||||
GenParams.action_degree = GenParams.md_degree = 6;
|
||||
GenParams.precision = 64;
|
||||
GenParams.BoundsCheckFreq = 20;
|
||||
|
||||
GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicyD> GenD(NumOpD,DenOpD,GenParams);
|
||||
GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicyD, FermionImplPolicyF> GenFD(NumOpD, DenOpD,
|
||||
NumOpF, DenOpF,
|
||||
GenParams, 50);
|
||||
TheHMC.Resources.GetParallelRNG().SeedUniqueString(seed_string);
|
||||
GenD.refresh(Ud, TheHMC.Resources.GetSerialRNG(), TheHMC.Resources.GetParallelRNG());
|
||||
RealD Sd = GenD.S(Ud);
|
||||
LatticeGaugeField derivD(Ud);
|
||||
GenD.deriv(Ud,derivD);
|
||||
|
||||
TheHMC.Resources.GetParallelRNG().SeedUniqueString(seed_string);
|
||||
GenFD.refresh(Ud, TheHMC.Resources.GetSerialRNG(), TheHMC.Resources.GetParallelRNG());
|
||||
RealD Sfd = GenFD.S(Ud);
|
||||
LatticeGaugeField derivFD(Ud);
|
||||
GenFD.deriv(Ud,derivFD);
|
||||
|
||||
//Compare
|
||||
std::cout << "Action : " << Sd << " " << Sfd << " reldiff " << (Sd - Sfd)/Sd << std::endl;
|
||||
|
||||
LatticeGaugeField diff(Ud);
|
||||
axpy(diff, -1.0, derivD, derivFD);
|
||||
std::cout << "Norm of difference in deriv " << sqrt(norm2(diff)) << std::endl;
|
||||
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,122 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/Test_rhmc_EOWilsonRatio_genericVsOneFlavor.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
//This test ensures that the OneFlavourEvenOddRatioRationalPseudoFermionAction and GeneralEvenOddRatioRationalPseudoFermionAction action (with parameters set appropriately0
|
||||
//give the same results
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
using namespace Grid;
|
||||
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
||||
|
||||
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
|
||||
typedef WilsonImplR FermionImplPolicy;
|
||||
typedef WilsonFermionR FermionAction;
|
||||
typedef typename FermionAction::FermionField FermionField;
|
||||
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
HMCWrapper TheHMC;
|
||||
TheHMC.Resources.AddFourDimGrid("gauge");
|
||||
|
||||
|
||||
// // Checkpointer definition
|
||||
// CheckpointerParameters CPparams;
|
||||
// CPparams.config_prefix = "ckpoint_lat";
|
||||
// CPparams.rng_prefix = "ckpoint_rng";
|
||||
// CPparams.saveInterval = 5;
|
||||
// CPparams.format = "IEEE64BIG";
|
||||
|
||||
// TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||
|
||||
RNGModuleParameters RNGpar;
|
||||
RNGpar.serial_seeds = "1 2 3 4 5";
|
||||
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
auto GridPtr = TheHMC.Resources.GetCartesian();
|
||||
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
|
||||
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeField U(GridPtr);
|
||||
|
||||
Real mass = -0.77;
|
||||
Real pv = 0.0;
|
||||
|
||||
FermionAction DenOp(U, *GridPtr, *GridRBPtr, mass);
|
||||
FermionAction NumOp(U, *GridPtr, *GridRBPtr, pv);
|
||||
|
||||
TheHMC.Resources.AddRNGs();
|
||||
PeriodicGimplR::HotConfiguration(TheHMC.Resources.GetParallelRNG(), U);
|
||||
|
||||
std::string seed_string = "the_seed";
|
||||
|
||||
//1f action
|
||||
OneFlavourRationalParams OneFParams(1.0e-2,64.0,1000,1.0e-6,6);
|
||||
|
||||
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> OneF(NumOp,DenOp,OneFParams);
|
||||
TheHMC.Resources.GetParallelRNG().SeedUniqueString(seed_string);
|
||||
OneF.refresh(U, TheHMC.Resources.GetParallelRNG());
|
||||
RealD OneFS = OneF.S(U);
|
||||
LatticeGaugeField OneFderiv(U);
|
||||
OneF.deriv(U,OneFderiv);
|
||||
|
||||
//general action
|
||||
RationalActionParams GenParams;
|
||||
GenParams.inv_pow = 2;
|
||||
GenParams.lo = OneFParams.lo;
|
||||
GenParams.hi = OneFParams.hi;
|
||||
GenParams.MaxIter = OneFParams.MaxIter;
|
||||
GenParams.action_tolerance = GenParams.md_tolerance = OneFParams.tolerance;
|
||||
GenParams.action_degree = GenParams.md_degree = OneFParams.degree;
|
||||
GenParams.precision = OneFParams.precision;
|
||||
GenParams.BoundsCheckFreq = OneFParams.BoundsCheckFreq;
|
||||
|
||||
GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> Gen(NumOp,DenOp,GenParams);
|
||||
TheHMC.Resources.GetParallelRNG().SeedUniqueString(seed_string);
|
||||
Gen.refresh(U, TheHMC.Resources.GetParallelRNG());
|
||||
RealD GenS = Gen.S(U);
|
||||
LatticeGaugeField Genderiv(U);
|
||||
Gen.deriv(U,Genderiv);
|
||||
|
||||
|
||||
//Compare
|
||||
std::cout << "Action : " << OneFS << " " << GenS << " reldiff " << (OneFS - GenS)/OneFS << std::endl;
|
||||
|
||||
LatticeGaugeField diff(U);
|
||||
axpy(diff, -1.0, Genderiv, OneFderiv);
|
||||
std::cout << "Norm of difference in deriv " << sqrt(norm2(diff)) << std::endl;
|
||||
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,425 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/Test_compressed_lanczos_gparity.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Leans heavily on Christoph Lehner's code
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
/*
|
||||
* Reimplement the badly named "multigrid" lanczos as compressed Lanczos using the features
|
||||
* in Grid that were intended to be used to support blocked Aggregates, from
|
||||
*/
|
||||
#include <Grid/Grid.h>
|
||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
||||
#include <Grid/algorithms/iterative/LocalCoherenceLanczos.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
|
||||
// template<class VectorInt>
|
||||
// void GridCmdOptionIntVector(const std::string &str, VectorInt & vec)
|
||||
// {
|
||||
// vec.resize(0);
|
||||
// std::stringstream ss(str);
|
||||
// int i;
|
||||
// while (ss >> i){
|
||||
// vec.push_back(i);
|
||||
// if(std::ispunct(ss.peek()))
|
||||
// ss.ignore();
|
||||
// }
|
||||
// return;
|
||||
// }
|
||||
|
||||
|
||||
//For the CPS configurations we have to manually seed the RNG and deal with an incorrect factor of 2 in the plaquette metadata
|
||||
void readConfiguration(LatticeGaugeFieldD &U,
|
||||
const std::string &config,
|
||||
bool is_cps_cfg = false){
|
||||
|
||||
if(is_cps_cfg) NerscIO::exitOnReadPlaquetteMismatch() = false;
|
||||
|
||||
typedef GaugeStatistics<ConjugateGimplD> GaugeStats;
|
||||
|
||||
FieldMetaData header;
|
||||
NerscIO::readConfiguration<GaugeStats>(U, header, config);
|
||||
|
||||
if(is_cps_cfg) NerscIO::exitOnReadPlaquetteMismatch() = true;
|
||||
}
|
||||
|
||||
//Lanczos parameters in CPS conventions
|
||||
struct CPSLanczosParams : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(CPSLanczosParams,
|
||||
RealD, alpha,
|
||||
RealD, beta,
|
||||
int, ch_ord,
|
||||
int, N_use,
|
||||
int, N_get,
|
||||
int, N_true_get,
|
||||
RealD, stop_rsd,
|
||||
int, maxits);
|
||||
|
||||
//Translations
|
||||
ChebyParams getChebyParams() const{
|
||||
ChebyParams out;
|
||||
out.alpha = beta*beta; //aka lo
|
||||
out.beta = alpha*alpha; //aka hi
|
||||
out.Npoly = ch_ord+1;
|
||||
return out;
|
||||
}
|
||||
int Nstop() const{ return N_true_get; }
|
||||
int Nm() const{ return N_use; }
|
||||
int Nk() const{ return N_get; }
|
||||
};
|
||||
|
||||
//Maybe this class should be in the main library?
|
||||
template<class Fobj,class CComplex,int nbasis>
|
||||
class LocalCoherenceLanczosScidac : public LocalCoherenceLanczos<Fobj,CComplex,nbasis>
|
||||
{
|
||||
public:
|
||||
typedef iVector<CComplex,nbasis > CoarseSiteVector;
|
||||
typedef Lattice<CoarseSiteVector> CoarseField;
|
||||
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field
|
||||
typedef Lattice<Fobj> FineField;
|
||||
|
||||
LocalCoherenceLanczosScidac(GridBase *FineGrid,GridBase *CoarseGrid,
|
||||
LinearOperatorBase<FineField> &FineOp,
|
||||
int checkerboard)
|
||||
// Base constructor
|
||||
: LocalCoherenceLanczos<Fobj,CComplex,nbasis>(FineGrid,CoarseGrid,FineOp,checkerboard)
|
||||
{};
|
||||
|
||||
void checkpointFine(std::string evecs_file,std::string evals_file)
|
||||
{
|
||||
assert(this->subspace.size()==nbasis);
|
||||
emptyUserRecord record;
|
||||
Grid::ScidacWriter WR(this->_FineGrid->IsBoss());
|
||||
WR.open(evecs_file);
|
||||
for(int k=0;k<nbasis;k++) {
|
||||
WR.writeScidacFieldRecord(this->subspace[k],record);
|
||||
}
|
||||
WR.close();
|
||||
|
||||
XmlWriter WRx(evals_file);
|
||||
write(WRx,"evals",this->evals_fine);
|
||||
}
|
||||
|
||||
void checkpointFineRestore(std::string evecs_file,std::string evals_file)
|
||||
{
|
||||
this->evals_fine.resize(nbasis);
|
||||
this->subspace.resize(nbasis,this->_FineGrid);
|
||||
|
||||
std::cout << GridLogIRL<< "checkpointFineRestore: Reading evals from "<<evals_file<<std::endl;
|
||||
XmlReader RDx(evals_file);
|
||||
read(RDx,"evals",this->evals_fine);
|
||||
|
||||
assert(this->evals_fine.size()==nbasis);
|
||||
|
||||
std::cout << GridLogIRL<< "checkpointFineRestore: Reading evecs from "<<evecs_file<<std::endl;
|
||||
emptyUserRecord record;
|
||||
Grid::ScidacReader RD ;
|
||||
RD.open(evecs_file);
|
||||
for(int k=0;k<nbasis;k++) {
|
||||
this->subspace[k].Checkerboard()=this->_checkerboard;
|
||||
RD.readScidacFieldRecord(this->subspace[k],record);
|
||||
|
||||
}
|
||||
RD.close();
|
||||
}
|
||||
|
||||
void checkpointCoarse(std::string evecs_file,std::string evals_file)
|
||||
{
|
||||
int n = this->evec_coarse.size();
|
||||
emptyUserRecord record;
|
||||
Grid::ScidacWriter WR(this->_CoarseGrid->IsBoss());
|
||||
WR.open(evecs_file);
|
||||
for(int k=0;k<n;k++) {
|
||||
WR.writeScidacFieldRecord(this->evec_coarse[k],record);
|
||||
}
|
||||
WR.close();
|
||||
|
||||
XmlWriter WRx(evals_file);
|
||||
write(WRx,"evals",this->evals_coarse);
|
||||
}
|
||||
|
||||
void checkpointCoarseRestore(std::string evecs_file,std::string evals_file,int nvec)
|
||||
{
|
||||
std::cout << "resizing coarse vecs to " << nvec<< std::endl;
|
||||
this->evals_coarse.resize(nvec);
|
||||
this->evec_coarse.resize(nvec,this->_CoarseGrid);
|
||||
std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evals from "<<evals_file<<std::endl;
|
||||
XmlReader RDx(evals_file);
|
||||
read(RDx,"evals",this->evals_coarse);
|
||||
|
||||
assert(this->evals_coarse.size()==nvec);
|
||||
emptyUserRecord record;
|
||||
std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evecs from "<<evecs_file<<std::endl;
|
||||
Grid::ScidacReader RD ;
|
||||
RD.open(evecs_file);
|
||||
for(int k=0;k<nvec;k++) {
|
||||
RD.readScidacFieldRecord(this->evec_coarse[k],record);
|
||||
}
|
||||
RD.close();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
//Note: because we rely upon physical properties we must use a "real" gauge configuration
|
||||
int main (int argc, char ** argv) {
|
||||
Grid_init(&argc,&argv);
|
||||
GridLogIRL.TimingMode(1);
|
||||
|
||||
std::vector<int> blockSize = {2,2,2,2,2};
|
||||
std::vector<int> GparityDirs = {1,1,1}; //1 for each GP direction
|
||||
|
||||
int Ls = 12;
|
||||
RealD mass = 0.01;
|
||||
RealD M5 = 1.8;
|
||||
bool is_cps_cfg = false;
|
||||
|
||||
CPSLanczosParams fine, coarse;
|
||||
|
||||
fine.alpha = 2;
|
||||
fine.beta = 0.1;
|
||||
fine.ch_ord = 100;
|
||||
fine.N_use = 70;
|
||||
fine.N_get = 60;
|
||||
fine.N_true_get = 60;
|
||||
fine.stop_rsd = 1e-8;
|
||||
fine.maxits = 10000;
|
||||
|
||||
coarse.alpha = 2;
|
||||
coarse.beta = 0.1;
|
||||
coarse.ch_ord = 100;
|
||||
coarse.N_use = 200;
|
||||
coarse.N_get = 190;
|
||||
coarse.N_true_get = 190;
|
||||
coarse.stop_rsd = 1e-8;
|
||||
coarse.maxits = 10000;
|
||||
|
||||
double coarse_relax_tol = 1e5;
|
||||
int smoother_ord = 20;
|
||||
|
||||
if(argc < 3){
|
||||
std::cout << GridLogMessage << "Usage: <exe> <config> <gparity dirs> <options>" << std::endl;
|
||||
std::cout << GridLogMessage << "<gparity dirs> should have the format a.b.c where a,b,c are 0,1 depending on whether there are G-parity BCs in that direction" << std::endl;
|
||||
std::cout << GridLogMessage << "Options:" << std::endl;
|
||||
std::cout << GridLogMessage << "--Ls <value> : Set Ls (default 12)" << std::endl;
|
||||
std::cout << GridLogMessage << "--mass <value> : Set the mass (default 0.01)" << std::endl;
|
||||
std::cout << GridLogMessage << "--block <value> : Set the block size. Format should be a.b.c.d.e where a-e are the block extents (default 2.2.2.2.2)" << std::endl;
|
||||
std::cout << GridLogMessage << "--is_cps_cfg : Indicate that the configuration was generated with CPS where until recently the stored plaquette was wrong by a factor of 2" << std::endl;
|
||||
std::cout << GridLogMessage << "--write_irl_templ: Write a template for the parameters file of the Lanczos to \"irl_templ.xml\"" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_irl_fine <filename>: Real the parameters file for the fine Lanczos" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_irl_coarse <filename>: Real the parameters file for the coarse Lanczos" << std::endl;
|
||||
std::cout << GridLogMessage << "--write_fine <filename stub>: Write fine evecs/evals to filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_fine <filename stub>: Read fine evecs/evals from filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--write_coarse <filename stub>: Write coarse evecs/evals to filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_coarse <filename stub>: Read coarse evecs/evals from filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--smoother_ord : Set the Chebyshev order of the smoother (default 20)" << std::endl;
|
||||
std::cout << GridLogMessage << "--coarse_relax_tol : Set the relaxation parameter for evaluating the residual of the reconstructed eigenvectors outside of the basis (default 1e5)" << std::endl;
|
||||
Grid_finalize();
|
||||
return 1;
|
||||
}
|
||||
std::string config = argv[1];
|
||||
GridCmdOptionIntVector(argv[2], GparityDirs);
|
||||
assert(GparityDirs.size() == 3);
|
||||
|
||||
bool write_fine = false;
|
||||
std::string write_fine_file;
|
||||
|
||||
bool read_fine = false;
|
||||
std::string read_fine_file;
|
||||
|
||||
bool write_coarse = false;
|
||||
std::string write_coarse_file;
|
||||
|
||||
bool read_coarse = false;
|
||||
std::string read_coarse_file;
|
||||
|
||||
for(int i=3;i<argc;i++){
|
||||
std::string sarg = argv[i];
|
||||
if(sarg == "--Ls"){
|
||||
Ls = std::stoi(argv[i+1]);
|
||||
std::cout << GridLogMessage << "Set Ls to " << Ls << std::endl;
|
||||
}else if(sarg == "--mass"){
|
||||
std::istringstream ss(argv[i+1]); ss >> mass;
|
||||
std::cout << GridLogMessage << "Set quark mass to " << mass << std::endl;
|
||||
}else if(sarg == "--block"){
|
||||
GridCmdOptionIntVector(argv[i+1], blockSize);
|
||||
assert(blockSize.size() == 5);
|
||||
std::cout << GridLogMessage << "Set block size to ";
|
||||
for(int q=0;q<5;q++) std::cout << blockSize[q] << " ";
|
||||
std::cout << std::endl;
|
||||
}else if(sarg == "--is_cps_cfg"){
|
||||
is_cps_cfg = true;
|
||||
}else if(sarg == "--write_irl_templ"){
|
||||
XmlWriter writer("irl_templ.xml");
|
||||
write(writer,"Params",fine);
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}else if(sarg == "--read_irl_fine"){
|
||||
std::cout << GridLogMessage << "Reading fine IRL params from " << argv[i+1] << std::endl;
|
||||
XmlReader reader(argv[i+1]);
|
||||
read(reader, "Params", fine);
|
||||
}else if(sarg == "--read_irl_coarse"){
|
||||
std::cout << GridLogMessage << "Reading coarse IRL params from " << argv[i+1] << std::endl;
|
||||
XmlReader reader(argv[i+1]);
|
||||
read(reader, "Params", coarse);
|
||||
}else if(sarg == "--write_fine"){
|
||||
write_fine = true;
|
||||
write_fine_file = argv[i+1];
|
||||
}else if(sarg == "--read_fine"){
|
||||
read_fine = true;
|
||||
read_fine_file = argv[i+1];
|
||||
}else if(sarg == "--write_coarse"){
|
||||
write_coarse = true;
|
||||
write_coarse_file = argv[i+1];
|
||||
}else if(sarg == "--read_coarse"){
|
||||
read_coarse = true;
|
||||
read_coarse_file = argv[i+1];
|
||||
}else if(sarg == "--smoother_ord"){
|
||||
std::istringstream ss(argv[i+1]); ss >> smoother_ord;
|
||||
std::cout << GridLogMessage << "Set smoother order to " << smoother_ord << std::endl;
|
||||
}else if(sarg == "--coarse_relax_tol"){
|
||||
std::istringstream ss(argv[i+1]); ss >> coarse_relax_tol;
|
||||
std::cout << GridLogMessage << "Set coarse IRL relaxation parameter to " << coarse_relax_tol << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
//Fine grids
|
||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||
|
||||
//Setup G-parity BCs
|
||||
assert(Nd == 4);
|
||||
std::vector<int> dirs4(4);
|
||||
for(int i=0;i<3;i++) dirs4[i] = GparityDirs[i];
|
||||
dirs4[3] = 0; //periodic gauge BC in time
|
||||
|
||||
std::cout << GridLogMessage << "Gauge BCs: " << dirs4 << std::endl;
|
||||
ConjugateGimplD::setDirections(dirs4); //gauge BC
|
||||
|
||||
GparityWilsonImplD::ImplParams Params;
|
||||
for(int i=0;i<Nd-1;i++) Params.twists[i] = GparityDirs[i]; //G-parity directions
|
||||
Params.twists[Nd-1] = 1; //APBC in time direction
|
||||
std::cout << GridLogMessage << "Fermion BCs: " << Params.twists << std::endl;
|
||||
|
||||
//Read the gauge field
|
||||
LatticeGaugeField Umu(UGrid);
|
||||
readConfiguration(Umu, config, is_cps_cfg);
|
||||
|
||||
//Setup the coarse grids
|
||||
auto fineLatt = GridDefaultLatt();
|
||||
Coordinate coarseLatt(4);
|
||||
for (int d=0;d<4;d++){
|
||||
coarseLatt[d] = fineLatt[d]/blockSize[d]; assert(coarseLatt[d]*blockSize[d]==fineLatt[d]);
|
||||
}
|
||||
|
||||
std::cout << GridLogMessage<< " 5d coarse lattice is ";
|
||||
for (int i=0;i<4;i++){
|
||||
std::cout << coarseLatt[i]<<"x";
|
||||
}
|
||||
int cLs = Ls/blockSize[4]; assert(cLs*blockSize[4]==Ls);
|
||||
std::cout << cLs<<std::endl;
|
||||
|
||||
GridCartesian * CoarseGrid4 = SpaceTimeGrid::makeFourDimGrid(coarseLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
||||
GridRedBlackCartesian * CoarseGrid4rb = SpaceTimeGrid::makeFourDimRedBlackGrid(CoarseGrid4);
|
||||
GridCartesian * CoarseGrid5 = SpaceTimeGrid::makeFiveDimGrid(cLs,CoarseGrid4);
|
||||
|
||||
//Dirac operator
|
||||
GparityDomainWallFermionD action(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, Params);
|
||||
typedef GparityDomainWallFermionD::FermionField FermionField;
|
||||
|
||||
SchurDiagTwoOperator<GparityDomainWallFermionD,FermionField> SchurOp(action);
|
||||
|
||||
typedef GparityWilsonImplD::SiteSpinor SiteSpinor;
|
||||
|
||||
std::cout << GridLogMessage << "Keep " << fine.N_true_get << " fine vectors" << std::endl;
|
||||
std::cout << GridLogMessage << "Keep " << coarse.N_true_get << " coarse vectors" << std::endl;
|
||||
assert(coarse.N_true_get >= fine.N_true_get);
|
||||
|
||||
const int nbasis= 60;
|
||||
assert(nbasis<=fine.N_true_get);
|
||||
LocalCoherenceLanczosScidac<SiteSpinor,vTComplex,nbasis> _LocalCoherenceLanczos(FrbGrid,CoarseGrid5,SchurOp,Odd);
|
||||
std::cout << GridLogMessage << "Constructed LocalCoherenceLanczos" << std::endl;
|
||||
|
||||
//Compute and/or read fine evecs
|
||||
if(read_fine){
|
||||
_LocalCoherenceLanczos.checkpointFineRestore(read_fine_file + "_evecs.scidac", read_fine_file + "_evals.xml");
|
||||
}else{
|
||||
std::cout << GridLogMessage << "Performing fine grid IRL" << std::endl;
|
||||
std::cout << GridLogMessage << "Using Chebyshev alpha=" << fine.alpha << " beta=" << fine.beta << " ord=" << fine.ch_ord << std::endl;
|
||||
_LocalCoherenceLanczos.calcFine(fine.getChebyParams(),
|
||||
fine.Nstop(),fine.Nk(),fine.Nm(),
|
||||
fine.stop_rsd,fine.maxits,0,0);
|
||||
if(write_fine){
|
||||
std::cout << GridLogIRL<<"Checkpointing Fine evecs"<<std::endl;
|
||||
_LocalCoherenceLanczos.checkpointFine(write_fine_file + "_evecs.scidac", write_fine_file + "_evals.xml");
|
||||
}
|
||||
}
|
||||
|
||||
//Block orthonormalise (this should be part of calcFine?)
|
||||
std::cout << GridLogIRL<<"Orthogonalising"<<std::endl;
|
||||
_LocalCoherenceLanczos.Orthogonalise();
|
||||
std::cout << GridLogIRL<<"Orthogonaled"<<std::endl;
|
||||
|
||||
ChebyParams smoother = fine.getChebyParams();
|
||||
smoother.Npoly = smoother_ord+1;
|
||||
|
||||
if(read_coarse){
|
||||
_LocalCoherenceLanczos.checkpointCoarseRestore(read_coarse_file + "_evecs.scidac", read_coarse_file + "_evals.xml",coarse.Nstop());
|
||||
|
||||
}else{
|
||||
std::cout << GridLogMessage << "Performing coarse grid IRL" << std::endl;
|
||||
std::cout << GridLogMessage << "Using Chebyshev alpha=" << coarse.alpha << " beta=" << coarse.beta << " ord=" << coarse.ch_ord << std::endl;
|
||||
_LocalCoherenceLanczos.calcCoarse(coarse.getChebyParams(), smoother, coarse_relax_tol,
|
||||
coarse.Nstop(), coarse.Nk() ,coarse.Nm(),
|
||||
coarse.stop_rsd, coarse.maxits,
|
||||
0,0);
|
||||
|
||||
if(write_coarse){
|
||||
std::cout << GridLogIRL<<"Checkpointing Coarse evecs"<<std::endl;
|
||||
_LocalCoherenceLanczos.checkpointCoarse(write_coarse_file + "_evecs.scidac", write_coarse_file + "_evals.xml");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//Test the eigenvectors
|
||||
FermionField evec(FrbGrid);
|
||||
FermionField tmp(FrbGrid);
|
||||
RealD eval;
|
||||
|
||||
for(int i=0;i<coarse.N_true_get;i++){
|
||||
_LocalCoherenceLanczos.getFineEvecEval(evec, eval, i);
|
||||
SchurOp.HermOp(evec, tmp);
|
||||
tmp = tmp - eval*evec;
|
||||
std::cout << GridLogMessage << "Eval " << eval << " resid " << sqrt(norm2(tmp)) << std::endl;
|
||||
}
|
||||
|
||||
Grid_finalize();
|
||||
}
|
||||
|
@ -1,576 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/Test_evec_compression.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
/*
|
||||
*
|
||||
* This test generates eigenvectors using the Lanczos algorithm then attempts to use local coherence compression
|
||||
* to express those vectors in terms of a basis formed from a subset. This test is useful for finding the optimal
|
||||
* blocking and basis size for performing a Local Coherence Lanczos
|
||||
*/
|
||||
#include <Grid/Grid.h>
|
||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
||||
#include <Grid/algorithms/iterative/LocalCoherenceLanczos.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
|
||||
//For the CPS configurations we have to manually seed the RNG and deal with an incorrect factor of 2 in the plaquette metadata
|
||||
template<typename Gimpl>
|
||||
void readConfiguration(LatticeGaugeFieldD &U,
|
||||
const std::string &config,
|
||||
bool is_cps_cfg = false){
|
||||
|
||||
if(is_cps_cfg) NerscIO::exitOnReadPlaquetteMismatch() = false;
|
||||
|
||||
typedef GaugeStatistics<Gimpl> GaugeStats;
|
||||
|
||||
FieldMetaData header;
|
||||
NerscIO::readConfiguration<GaugeStats>(U, header, config);
|
||||
|
||||
if(is_cps_cfg) NerscIO::exitOnReadPlaquetteMismatch() = true;
|
||||
}
|
||||
|
||||
//Lanczos parameters in CPS conventions
|
||||
struct CPSLanczosParams : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(CPSLanczosParams,
|
||||
RealD, alpha,
|
||||
RealD, beta,
|
||||
int, ch_ord,
|
||||
int, N_use,
|
||||
int, N_get,
|
||||
int, N_true_get,
|
||||
RealD, stop_rsd,
|
||||
int, maxits);
|
||||
|
||||
//Translations
|
||||
ChebyParams getChebyParams() const{
|
||||
ChebyParams out;
|
||||
out.alpha = beta*beta; //aka lo
|
||||
out.beta = alpha*alpha; //aka hi
|
||||
out.Npoly = ch_ord+1;
|
||||
return out;
|
||||
}
|
||||
int Nstop() const{ return N_true_get; }
|
||||
int Nm() const{ return N_use; }
|
||||
int Nk() const{ return N_get; }
|
||||
};
|
||||
|
||||
|
||||
template<class Fobj,class CComplex,int nbasis>
|
||||
class LocalCoherenceCompressor{
|
||||
public:
|
||||
typedef iVector<CComplex,nbasis > CoarseSiteVector;
|
||||
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field
|
||||
typedef Lattice<CoarseSiteVector> CoarseField;
|
||||
typedef Lattice<Fobj> FineField;
|
||||
|
||||
void compress(std::vector<FineField> &basis,
|
||||
std::vector<CoarseField> &compressed_evecs,
|
||||
const std::vector<FineField> &evecs_in,
|
||||
GridBase *FineGrid,
|
||||
GridBase *CoarseGrid){
|
||||
int nevecs = evecs_in.size();
|
||||
assert(nevecs > nbasis);
|
||||
|
||||
//Construct the basis
|
||||
basis.resize(nbasis, FineGrid);
|
||||
for(int b=0;b<nbasis;b++) basis[b] = evecs_in[b];
|
||||
|
||||
//Block othornormalize basis
|
||||
CoarseScalar InnerProd(CoarseGrid);
|
||||
std::cout << GridLogMessage <<" Gramm-Schmidt pass 1"<<std::endl;
|
||||
blockOrthogonalise(InnerProd,basis);
|
||||
std::cout << GridLogMessage <<" Gramm-Schmidt pass 2"<<std::endl;
|
||||
blockOrthogonalise(InnerProd,basis);
|
||||
|
||||
//The coarse grid representation is the field of vectors of block inner products
|
||||
std::cout << GridLogMessage << "Compressing eigevectors" << std::endl;
|
||||
compressed_evecs.resize(nevecs, CoarseGrid);
|
||||
for(int i=0;i<nevecs;i++) blockProject(compressed_evecs[i], evecs_in[i], basis);
|
||||
std::cout << GridLogMessage << "Compression complete" << std::endl;
|
||||
}
|
||||
|
||||
void uncompress(FineField &evec, const int i, const std::vector<FineField> &basis, const std::vector<CoarseField> &compressed_evecs) const{
|
||||
blockPromote(compressed_evecs[i],evec,basis);
|
||||
}
|
||||
|
||||
//Test uncompressed eigenvectors of Linop.HermOp to precision 'base_tolerance' for i<nbasis and 'base_tolerance*relax' for i>=nbasis
|
||||
//Because the uncompressed evec has a lot of high mode noise (unimportant for deflation) we apply a smoother before testing.
|
||||
//The Chebyshev used by the Lanczos should be sufficient as a smoother
|
||||
bool testCompression(LinearOperatorBase<FineField> &Linop, OperatorFunction<FineField> &smoother,
|
||||
const std::vector<FineField> &basis, const std::vector<CoarseField> &compressed_evecs, const std::vector<RealD> &evals,
|
||||
const RealD base_tolerance, const RealD relax){
|
||||
std::cout << GridLogMessage << "Testing quality of uncompressed evecs (after smoothing)" << std::endl;
|
||||
|
||||
GridBase* FineGrid = basis[0].Grid();
|
||||
GridBase* CoarseGrid = compressed_evecs[0].Grid();
|
||||
|
||||
bool fail = false;
|
||||
FineField evec(FineGrid), Mevec(FineGrid), evec_sm(FineGrid);
|
||||
for(int i=0;i<compressed_evecs.size();i++){
|
||||
std::cout << GridLogMessage << "Uncompressing evec " << i << std::endl;
|
||||
uncompress(evec, i, basis, compressed_evecs);
|
||||
|
||||
std::cout << GridLogMessage << "Smoothing evec " << i << std::endl;
|
||||
smoother(Linop, evec, evec_sm);
|
||||
|
||||
std::cout << GridLogMessage << "Computing residual for evec " << i << std::endl;
|
||||
std::cout << GridLogMessage << "Linop" << std::endl;
|
||||
Linop.HermOp(evec_sm, Mevec);
|
||||
std::cout << GridLogMessage << "Linalg" << std::endl;
|
||||
Mevec = Mevec - evals[i]*evec_sm;
|
||||
|
||||
std::cout << GridLogMessage << "Resid" << std::endl;
|
||||
RealD tol = base_tolerance * (i<nbasis ? 1. : relax);
|
||||
RealD res = sqrt(norm2(Mevec));
|
||||
std::cout << GridLogMessage << "Evec idx " << i << " res " << res << " tol " << tol << std::endl;
|
||||
if(res > tol) fail = true;
|
||||
}
|
||||
return fail;
|
||||
}
|
||||
|
||||
//Compare uncompressed evecs to original evecs
|
||||
void compareEvecs(const std::vector<FineField> &basis, const std::vector<CoarseField> &compressed_evecs, const std::vector<FineField> &orig_evecs){
|
||||
std::cout << GridLogMessage << "Comparing uncompressed evecs to original evecs" << std::endl;
|
||||
|
||||
GridBase* FineGrid = basis[0].Grid();
|
||||
GridBase* CoarseGrid = compressed_evecs[0].Grid();
|
||||
|
||||
FineField evec(FineGrid), diff(FineGrid);
|
||||
for(int i=0;i<compressed_evecs.size();i++){
|
||||
std::cout << GridLogMessage << "Uncompressing evec " << i << std::endl;
|
||||
uncompress(evec, i, basis, compressed_evecs);
|
||||
diff = orig_evecs[i] - evec;
|
||||
RealD res = sqrt(norm2(diff));
|
||||
std::cout << GridLogMessage << "Evec idx " << i << " res " << res << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
template<class Fobj,class CComplex,int nbasis>
|
||||
void compareBlockPromoteTimings(const std::vector<Lattice<Fobj> > &basis, const std::vector<Lattice<iVector<CComplex,nbasis > > > &compressed_evecs){
|
||||
typedef iVector<CComplex,nbasis > CoarseSiteVector;
|
||||
typedef Lattice<CComplex> CoarseScalar;
|
||||
typedef Lattice<CoarseSiteVector> CoarseField;
|
||||
typedef Lattice<Fobj> FineField;
|
||||
|
||||
GridStopWatch timer;
|
||||
|
||||
GridBase* FineGrid = basis[0].Grid();
|
||||
GridBase* CoarseGrid = compressed_evecs[0].Grid();
|
||||
|
||||
FineField v1(FineGrid), v2(FineGrid);
|
||||
|
||||
//Start with a cold start
|
||||
for(int i=0;i<basis.size();i++){
|
||||
autoView( b_ , basis[i], CpuWrite);
|
||||
}
|
||||
for(int i=0;i<compressed_evecs.size();i++){
|
||||
autoView( b_ , compressed_evecs[i], CpuWrite);
|
||||
}
|
||||
{
|
||||
autoView( b_, v1, CpuWrite );
|
||||
}
|
||||
|
||||
timer.Start();
|
||||
blockPromote(compressed_evecs[0],v1,basis);
|
||||
timer.Stop();
|
||||
std::cout << GridLogMessage << "Time for cold blockPromote v1 " << timer.Elapsed() << std::endl;
|
||||
|
||||
//Test to ensure it is actually doing a cold start by repeating
|
||||
for(int i=0;i<basis.size();i++){
|
||||
autoView( b_ , basis[i], CpuWrite);
|
||||
}
|
||||
for(int i=0;i<compressed_evecs.size();i++){
|
||||
autoView( b_ , compressed_evecs[i], CpuWrite);
|
||||
}
|
||||
{
|
||||
autoView( b_, v1, CpuWrite );
|
||||
}
|
||||
|
||||
timer.Reset();
|
||||
timer.Start();
|
||||
blockPromote(compressed_evecs[0],v1,basis);
|
||||
timer.Stop();
|
||||
std::cout << GridLogMessage << "Time for cold blockPromote v1 repeat (should be the same as above) " << timer.Elapsed() << std::endl;
|
||||
}
|
||||
|
||||
struct Args{
|
||||
int Ls;
|
||||
RealD mass;
|
||||
RealD M5;
|
||||
bool is_cps_cfg;
|
||||
RealD mobius_scale; //b+c
|
||||
|
||||
CPSLanczosParams fine;
|
||||
double coarse_relax_tol;
|
||||
|
||||
std::vector<int> blockSize;
|
||||
std::vector<int> GparityDirs;
|
||||
|
||||
bool write_fine;
|
||||
std::string write_fine_file;
|
||||
bool read_fine;
|
||||
std::string read_fine_file;
|
||||
|
||||
int basis_size;
|
||||
|
||||
Args(){
|
||||
blockSize = {2,2,2,2,2};
|
||||
GparityDirs = {1,1,1}; //1 for each GP direction
|
||||
|
||||
Ls = 12;
|
||||
mass = 0.01;
|
||||
M5 = 1.8;
|
||||
is_cps_cfg = false;
|
||||
mobius_scale = 2;
|
||||
|
||||
fine.alpha = 2;
|
||||
fine.beta = 0.1;
|
||||
fine.ch_ord = 100;
|
||||
fine.N_use = 70;
|
||||
fine.N_get = 60;
|
||||
fine.N_true_get = 60;
|
||||
fine.stop_rsd = 1e-8;
|
||||
fine.maxits = 10000;
|
||||
|
||||
coarse_relax_tol = 1e5;
|
||||
|
||||
write_fine = false;
|
||||
read_fine = false;
|
||||
|
||||
basis_size = 100;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
GparityWilsonImplD::ImplParams setupGparityParams(const std::vector<int> &GparityDirs){
|
||||
//Setup G-parity BCs
|
||||
assert(Nd == 4);
|
||||
std::vector<int> dirs4(4);
|
||||
for(int i=0;i<3;i++) dirs4[i] = GparityDirs[i];
|
||||
dirs4[3] = 0; //periodic gauge BC in time
|
||||
|
||||
std::cout << GridLogMessage << "Gauge BCs: " << dirs4 << std::endl;
|
||||
ConjugateGimplD::setDirections(dirs4); //gauge BC
|
||||
|
||||
GparityWilsonImplD::ImplParams Params;
|
||||
for(int i=0;i<Nd-1;i++) Params.twists[i] = GparityDirs[i]; //G-parity directions
|
||||
Params.twists[Nd-1] = 1; //APBC in time direction
|
||||
std::cout << GridLogMessage << "Fermion BCs: " << Params.twists << std::endl;
|
||||
return Params;
|
||||
}
|
||||
|
||||
WilsonImplD::ImplParams setupParams(){
|
||||
WilsonImplD::ImplParams Params;
|
||||
Complex one(1.0);
|
||||
Complex mone(-1.0);
|
||||
for(int i=0;i<Nd-1;i++) Params.boundary_phases[i] = one;
|
||||
Params.boundary_phases[Nd-1] = mone;
|
||||
return Params;
|
||||
}
|
||||
|
||||
template<int nbasis, typename ActionType>
|
||||
void run_b(ActionType &action, const std::string &config, const Args &args){
|
||||
//Fine grids
|
||||
GridCartesian * UGrid = (GridCartesian*)action.GaugeGrid();
|
||||
GridRedBlackCartesian * UrbGrid = (GridRedBlackCartesian*)action.GaugeRedBlackGrid();
|
||||
GridCartesian * FGrid = (GridCartesian*)action.FermionGrid();
|
||||
GridRedBlackCartesian * FrbGrid = (GridRedBlackCartesian*)action.FermionRedBlackGrid();
|
||||
|
||||
//Setup the coarse grids
|
||||
auto fineLatt = GridDefaultLatt();
|
||||
Coordinate coarseLatt(4);
|
||||
for (int d=0;d<4;d++){
|
||||
coarseLatt[d] = fineLatt[d]/args.blockSize[d]; assert(coarseLatt[d]*args.blockSize[d]==fineLatt[d]);
|
||||
}
|
||||
|
||||
std::cout << GridLogMessage<< " 5d coarse lattice is ";
|
||||
for (int i=0;i<4;i++){
|
||||
std::cout << coarseLatt[i]<<"x";
|
||||
}
|
||||
int cLs = args.Ls/args.blockSize[4]; assert(cLs*args.blockSize[4]==args.Ls);
|
||||
std::cout << cLs<<std::endl;
|
||||
|
||||
GridCartesian * CoarseGrid4 = SpaceTimeGrid::makeFourDimGrid(coarseLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
||||
GridRedBlackCartesian * CoarseGrid4rb = SpaceTimeGrid::makeFourDimRedBlackGrid(CoarseGrid4);
|
||||
GridCartesian * CoarseGrid5 = SpaceTimeGrid::makeFiveDimGrid(cLs,CoarseGrid4);
|
||||
typedef vTComplex CComplex;
|
||||
typedef iVector<CComplex,nbasis > CoarseSiteVector;
|
||||
typedef Lattice<CComplex> CoarseScalar;
|
||||
typedef Lattice<CoarseSiteVector> CoarseField;
|
||||
|
||||
typedef typename ActionType::FermionField FermionField;
|
||||
|
||||
SchurDiagTwoOperator<ActionType,FermionField> SchurOp(action);
|
||||
|
||||
typedef typename ActionType::SiteSpinor SiteSpinor;
|
||||
|
||||
const CPSLanczosParams &fine = args.fine;
|
||||
|
||||
//Do the fine Lanczos
|
||||
std::vector<RealD> evals;
|
||||
std::vector<FermionField> evecs;
|
||||
|
||||
if(args.read_fine){
|
||||
evals.resize(fine.N_true_get);
|
||||
evecs.resize(fine.N_true_get, FrbGrid);
|
||||
|
||||
std::string evals_file = args.read_fine_file + "_evals.xml";
|
||||
std::string evecs_file = args.read_fine_file + "_evecs.scidac";
|
||||
|
||||
std::cout << GridLogIRL<< "Reading evals from "<<evals_file<<std::endl;
|
||||
XmlReader RDx(evals_file);
|
||||
read(RDx,"evals",evals);
|
||||
|
||||
assert(evals.size()==fine.N_true_get);
|
||||
|
||||
std::cout << GridLogIRL<< "Reading evecs from "<<evecs_file<<std::endl;
|
||||
emptyUserRecord record;
|
||||
Grid::ScidacReader RD ;
|
||||
RD.open(evecs_file);
|
||||
for(int k=0;k<fine.N_true_get;k++) {
|
||||
evecs[k].Checkerboard()=Odd;
|
||||
RD.readScidacFieldRecord(evecs[k],record);
|
||||
|
||||
}
|
||||
RD.close();
|
||||
}else{
|
||||
int Nstop = fine.Nstop(); //==N_true_get
|
||||
int Nm = fine.Nm();
|
||||
int Nk = fine.Nk();
|
||||
RealD resid = fine.stop_rsd;
|
||||
int MaxIt = fine.maxits;
|
||||
|
||||
assert(nbasis<=Nm);
|
||||
Chebyshev<FermionField> Cheby(fine.getChebyParams());
|
||||
FunctionHermOp<FermionField> ChebyOp(Cheby,SchurOp);
|
||||
PlainHermOp<FermionField> Op(SchurOp);
|
||||
|
||||
evals.resize(Nm);
|
||||
evecs.resize(Nm,FrbGrid);
|
||||
|
||||
ImplicitlyRestartedLanczos<FermionField> IRL(ChebyOp,Op,Nstop,Nk,Nm,resid,MaxIt,0,0);
|
||||
|
||||
FermionField src(FrbGrid);
|
||||
typedef typename FermionField::scalar_type Scalar;
|
||||
src=Scalar(1.0);
|
||||
src.Checkerboard() = Odd;
|
||||
|
||||
int Nconv;
|
||||
IRL.calc(evals, evecs,src,Nconv,false);
|
||||
if(Nconv < Nstop) assert(0 && "Fine lanczos failed to converge the required number of evecs"); //algorithm doesn't consider this a failure
|
||||
if(Nconv > Nstop){
|
||||
//Yes this potentially throws away some evecs but it is better than having a random number of evecs between Nstop and Nm!
|
||||
evals.resize(Nstop);
|
||||
evecs.resize(Nstop, FrbGrid);
|
||||
}
|
||||
|
||||
if(args.write_fine){
|
||||
std::string evals_file = args.write_fine_file + "_evals.xml";
|
||||
std::string evecs_file = args.write_fine_file + "_evecs.scidac";
|
||||
|
||||
std::cout << GridLogIRL<< "Writing evecs to "<<evecs_file<<std::endl;
|
||||
|
||||
emptyUserRecord record;
|
||||
Grid::ScidacWriter WR(FrbGrid->IsBoss());
|
||||
WR.open(evecs_file);
|
||||
for(int k=0;k<evecs.size();k++) {
|
||||
WR.writeScidacFieldRecord(evecs[k],record);
|
||||
}
|
||||
WR.close();
|
||||
|
||||
std::cout << GridLogIRL<< "Writing evals to "<<evals_file<<std::endl;
|
||||
|
||||
XmlWriter WRx(evals_file);
|
||||
write(WRx,"evals",evals);
|
||||
}
|
||||
}
|
||||
|
||||
//Do the compression
|
||||
LocalCoherenceCompressor<SiteSpinor,vTComplex,nbasis> compressor;
|
||||
std::vector<FermionField> basis(nbasis,FrbGrid);
|
||||
std::vector<CoarseField> compressed_evecs(evecs.size(),CoarseGrid5);
|
||||
|
||||
compressor.compress(basis, compressed_evecs, evecs, FrbGrid, CoarseGrid5);
|
||||
|
||||
compareBlockPromoteTimings(basis, compressed_evecs);
|
||||
|
||||
//Compare uncompressed and original evecs
|
||||
compressor.compareEvecs(basis, compressed_evecs, evecs);
|
||||
|
||||
//Create the smoother
|
||||
Chebyshev<FermionField> smoother(fine.getChebyParams());
|
||||
|
||||
//Test the quality of the uncompressed evecs
|
||||
assert( compressor.testCompression(SchurOp, smoother, basis, compressed_evecs, evals, fine.stop_rsd, args.coarse_relax_tol) );
|
||||
}
|
||||
|
||||
template<typename ActionType>
|
||||
void run(ActionType &action, const std::string &config, const Args &args){
|
||||
switch(args.basis_size){
|
||||
case 50:
|
||||
return run_b<50>(action,config,args);
|
||||
case 100:
|
||||
return run_b<100>(action,config,args);
|
||||
case 150:
|
||||
return run_b<150>(action,config,args);
|
||||
case 200:
|
||||
return run_b<200>(action,config,args);
|
||||
case 250:
|
||||
return run_b<250>(action,config,args);
|
||||
default:
|
||||
assert(0 && "Unsupported basis size: allowed values are 50,100,200");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
//Note: because we rely upon physical properties we must use a "real" gauge configuration
|
||||
int main (int argc, char ** argv) {
|
||||
Grid_init(&argc,&argv);
|
||||
GridLogIRL.TimingMode(1);
|
||||
|
||||
if(argc < 3){
|
||||
std::cout << GridLogMessage << "Usage: <exe> <config file> <gparity dirs> <options>" << std::endl;
|
||||
std::cout << GridLogMessage << "<gparity dirs> should have the format a.b.c where a,b,c are 0,1 depending on whether there are G-parity BCs in that direction" << std::endl;
|
||||
std::cout << GridLogMessage << "Options:" << std::endl;
|
||||
std::cout << GridLogMessage << "--Ls <value> : Set Ls (default 12)" << std::endl;
|
||||
std::cout << GridLogMessage << "--mass <value> : Set the mass (default 0.01)" << std::endl;
|
||||
std::cout << GridLogMessage << "--block <value> : Set the block size. Format should be a.b.c.d.e where a-e are the block extents (default 2.2.2.2.2)" << std::endl;
|
||||
std::cout << GridLogMessage << "--is_cps_cfg : Indicate that the configuration was generated with CPS where until recently the stored plaquette was wrong by a factor of 2" << std::endl;
|
||||
std::cout << GridLogMessage << "--write_irl_templ: Write a template for the parameters file of the Lanczos to \"irl_templ.xml\"" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_irl_fine <filename>: Real the parameters file for the fine Lanczos" << std::endl;
|
||||
std::cout << GridLogMessage << "--write_fine <filename stub>: Write fine evecs/evals to filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_fine <filename stub>: Read fine evecs/evals from filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--coarse_relax_tol : Set the relaxation parameter for evaluating the residual of the reconstructed eigenvectors outside of the basis (default 1e5)" << std::endl;
|
||||
std::cout << GridLogMessage << "--action : Set the action from 'DWF', 'Mobius' (default Mobius)" << std::endl;
|
||||
std::cout << GridLogMessage << "--mobius_scale : Set the Mobius scale b+c (default 2)" << std::endl;
|
||||
std::cout << GridLogMessage << "--basis_size : Set the basis size from 50,100,150,200,250 (default 100)" << std::endl;
|
||||
|
||||
Grid_finalize();
|
||||
return 1;
|
||||
}
|
||||
std::string config = argv[1];
|
||||
|
||||
Args args;
|
||||
GridCmdOptionIntVector(argv[2], args.GparityDirs);
|
||||
assert(args.GparityDirs.size() == 3);
|
||||
|
||||
std::string action_s = "Mobius";
|
||||
|
||||
for(int i=3;i<argc;i++){
|
||||
std::string sarg = argv[i];
|
||||
if(sarg == "--Ls"){
|
||||
args.Ls = std::stoi(argv[i+1]);
|
||||
std::cout << GridLogMessage << "Set Ls to " << args.Ls << std::endl;
|
||||
}else if(sarg == "--mass"){
|
||||
std::istringstream ss(argv[i+1]); ss >> args.mass;
|
||||
std::cout << GridLogMessage << "Set quark mass to " << args.mass << std::endl;
|
||||
}else if(sarg == "--block"){
|
||||
GridCmdOptionIntVector(argv[i+1], args.blockSize);
|
||||
assert(args.blockSize.size() == 5);
|
||||
std::cout << GridLogMessage << "Set block size to ";
|
||||
for(int q=0;q<5;q++) std::cout << args.blockSize[q] << " ";
|
||||
std::cout << std::endl;
|
||||
}else if(sarg == "--is_cps_cfg"){
|
||||
args.is_cps_cfg = true;
|
||||
}else if(sarg == "--write_irl_templ"){
|
||||
XmlWriter writer("irl_templ.xml");
|
||||
write(writer,"Params",args.fine);
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}else if(sarg == "--read_irl_fine"){
|
||||
std::cout << GridLogMessage << "Reading fine IRL params from " << argv[i+1] << std::endl;
|
||||
XmlReader reader(argv[i+1]);
|
||||
read(reader, "Params", args.fine);
|
||||
}else if(sarg == "--write_fine"){
|
||||
args.write_fine = true;
|
||||
args.write_fine_file = argv[i+1];
|
||||
}else if(sarg == "--read_fine"){
|
||||
args.read_fine = true;
|
||||
args.read_fine_file = argv[i+1];
|
||||
}else if(sarg == "--coarse_relax_tol"){
|
||||
std::istringstream ss(argv[i+1]); ss >> args.coarse_relax_tol;
|
||||
std::cout << GridLogMessage << "Set coarse IRL relaxation parameter to " << args.coarse_relax_tol << std::endl;
|
||||
}else if(sarg == "--action"){
|
||||
action_s = argv[i+1];
|
||||
std::cout << "Action set to " << action_s << std::endl;
|
||||
}else if(sarg == "--mobius_scale"){
|
||||
std::istringstream ss(argv[i+1]); ss >> args.mobius_scale;
|
||||
std::cout << GridLogMessage << "Set Mobius scale to " << args.mobius_scale << std::endl;
|
||||
}else if(sarg == "--basis_size"){
|
||||
args.basis_size = std::stoi(argv[i+1]);
|
||||
std::cout << GridLogMessage << "Set basis size to " << args.basis_size << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
//Fine grids
|
||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(args.Ls,UGrid);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(args.Ls,UGrid);
|
||||
|
||||
LatticeGaugeField Umu(UGrid);
|
||||
|
||||
bool is_gparity = false;
|
||||
for(auto g : args.GparityDirs) if(g) is_gparity = true;
|
||||
|
||||
double bmc = 1.;
|
||||
double b = (args.mobius_scale + bmc)/2.; // b = 1/2 [ (b+c) + (b-c) ]
|
||||
double c = (args.mobius_scale - bmc)/2.; // c = 1/2 [ (b+c) - (b-c) ]
|
||||
|
||||
if(is_gparity){
|
||||
GparityWilsonImplD::ImplParams Params = setupGparityParams(args.GparityDirs);
|
||||
readConfiguration<ConjugateGimplD>(Umu, config, args.is_cps_cfg); //Read the gauge field
|
||||
|
||||
if(action_s == "DWF"){
|
||||
GparityDomainWallFermionD action(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, args.mass, args.M5, Params);
|
||||
run(action, config, args);
|
||||
}else if(action_s == "Mobius"){
|
||||
GparityMobiusFermionD action(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, args.mass, args.M5, b, c, Params);
|
||||
run(action, config, args);
|
||||
}
|
||||
}else{
|
||||
WilsonImplD::ImplParams Params = setupParams();
|
||||
readConfiguration<PeriodicGimplD>(Umu, config, args.is_cps_cfg); //Read the gauge field
|
||||
|
||||
if(action_s == "DWF"){
|
||||
DomainWallFermionD action(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, args.mass, args.M5, Params);
|
||||
run(action, config, args);
|
||||
}else if(action_s == "Mobius"){
|
||||
MobiusFermionD action(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, args.mass, args.M5, b, c, Params);
|
||||
run(action, config, args);
|
||||
}
|
||||
}
|
||||
|
||||
Grid_finalize();
|
||||
}
|
@ -1,125 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/solver/Test_eofa_inv.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
;
|
||||
|
||||
int main (int argc, char** argv)
|
||||
{
|
||||
Grid_init(&argc, &argv);
|
||||
|
||||
Coordinate latt_size = GridDefaultLatt();
|
||||
Coordinate simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||
Coordinate mpi_layout = GridDefaultMpi();
|
||||
|
||||
const int Ls = 8;
|
||||
|
||||
GridCartesian *UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian *UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
GridCartesian *FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls, UGrid);
|
||||
GridRedBlackCartesian *FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGrid);
|
||||
|
||||
// Want a different conf at every run
|
||||
// First create an instance of an engine.
|
||||
std::random_device rnd_device;
|
||||
// Specify the engine and distribution.
|
||||
std::mt19937 mersenne_engine(rnd_device());
|
||||
std::uniform_int_distribution<int> dist(1, 100);
|
||||
|
||||
auto gen = std::bind(dist, mersenne_engine);
|
||||
std::vector<int> seeds4(4);
|
||||
generate(begin(seeds4), end(seeds4), gen);
|
||||
|
||||
//std::vector<int> seeds4({1,2,3,5});
|
||||
std::vector<int> seeds5({5,6,7,8});
|
||||
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
|
||||
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
||||
|
||||
int threads = GridThread::GetThreads();
|
||||
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
||||
|
||||
LatticeFermion phi (FGrid); gaussian(RNG5, phi);
|
||||
LatticeFermion Mphi (FGrid);
|
||||
LatticeFermion MphiPrime (FGrid);
|
||||
|
||||
LatticeGaugeField U(UGrid);
|
||||
SU<Nc>::HotConfiguration(RNG4,U);
|
||||
|
||||
////////////////////////////////////
|
||||
// Unmodified matrix element
|
||||
////////////////////////////////////
|
||||
RealD b = 2.5;
|
||||
RealD c = 1.5;
|
||||
RealD mf = 0.01;
|
||||
RealD mb = 1.0;
|
||||
RealD M5 = 1.8;
|
||||
MobiusEOFAFermionR Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5, b, c);
|
||||
MobiusEOFAFermionR Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5, b, c);
|
||||
OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-10, 12);
|
||||
ConjugateGradient<LatticeFermion> CG(1.0e-10, 5000);
|
||||
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, CG, CG, CG, CG, Params, false);
|
||||
|
||||
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds4);
|
||||
|
||||
|
||||
//Random field
|
||||
LatticeFermion eta(FGrid);
|
||||
gaussian(RNG5,eta);
|
||||
|
||||
//Check left inverse
|
||||
LatticeFermion Meta(FGrid);
|
||||
Meofa.Meofa(U, eta, Meta);
|
||||
|
||||
LatticeFermion MinvMeta(FGrid);
|
||||
Meofa.MeofaInv(U, Meta, MinvMeta);
|
||||
|
||||
LatticeFermion diff = MinvMeta - eta;
|
||||
|
||||
std::cout << GridLogMessage << "eta: " << norm2(eta) << " M*eta: " << norm2(Meta) << " M^{-1}*M*eta: " << norm2(MinvMeta) << " M^{-1}*M*eta - eta: " << norm2(diff) << " (expect 0)" << std::endl;
|
||||
assert(norm2(diff) < 1e-8);
|
||||
|
||||
//Check right inverse
|
||||
LatticeFermion MinvEta(FGrid);
|
||||
Meofa.MeofaInv(U, eta, MinvEta);
|
||||
|
||||
LatticeFermion MMinvEta(FGrid);
|
||||
Meofa.Meofa(U, MinvEta, MMinvEta);
|
||||
|
||||
diff = MMinvEta - eta;
|
||||
|
||||
std::cout << GridLogMessage << "eta: " << norm2(eta) << " M^{-1}*eta: " << norm2(MinvEta) << " M*M^{-1}*eta: " << norm2(MMinvEta) << " M*M^{-1}*eta - eta: " << norm2(diff) << " (expect 0)" << std::endl;
|
||||
assert(norm2(diff) < 1e-8);
|
||||
|
||||
std::cout << GridLogMessage << "Done" << std::endl;
|
||||
Grid_finalize();
|
||||
}
|
Reference in New Issue
Block a user