1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-21 09:12:03 +01:00

Compare commits

...

53 Commits

Author SHA1 Message Date
80359e0d49 Bland SYCL compile 2023-09-26 13:20:27 -07:00
3d437c5cc4 Making SYCL happy 2023-09-26 13:19:42 -07:00
b8a7004365 Partial fraction test 2023-08-14 15:17:03 -04:00
994512048e Merge pull request #439 from felixerben/bugfix/IRL_convergence
Bugfix/irl convergence
2023-07-12 16:32:26 -04:00
78bae9417c returning Nstop vectors even if not all meet true convergence criterion 2023-06-27 14:38:19 +01:00
dd170ead01 whitespace 2023-06-27 11:37:01 +01:00
014704856f do one more iteration if not all vectors converged 2023-06-27 11:33:30 +01:00
ee92e08edb Merge pull request #435 from fjosw/fix/warnings_in_WilsonKernelsImplementation
Unused variable in WilsonKernelsImplementation
2023-06-23 11:47:19 -04:00
c1dcee9328 Merge pull request #437 from fjosw/fix/stencil_debug
Added GridLogDebug to BuildSurfaceList debug message
2023-06-23 11:47:00 -04:00
6b150961fe Better script 2023-06-23 18:09:25 +03:00
5bafcaedfa Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2023-06-22 19:59:45 +03:00
bfeceae708 FTHMC 2023-06-22 12:58:18 -04:00
eacb66591f Config command 2023-06-22 19:56:40 +03:00
fadaa85626 Update 2023-06-22 19:56:27 +03:00
02a5b0d786 Updating run during testing 2023-06-22 19:52:46 +03:00
0e2141442a Dennis says broken 2023-06-22 19:19:51 +03:00
769eb0eecb Precision coverage 2023-06-22 19:19:20 +03:00
85e35c4da1 fix: added GridLogDebug to BuildSurfaceList debug message. 2023-06-16 10:31:16 +01:00
d72e914cf0 Profiling temporary code until optimised 2023-06-15 10:43:04 -04:00
3b5254e2d5 Optional checkpoint smeared configs for FTHMC 2023-06-15 10:43:04 -04:00
f1c358b596 Additional tests 2023-06-15 10:43:04 -04:00
c0ef210265 Hot start should be properly Hot 2023-06-15 10:43:04 -04:00
e3e1cc1962 Ta project 2023-06-15 10:43:04 -04:00
723eadbb5c Keep methods virtual 2023-06-15 10:43:04 -04:00
e24637ec1e Clean up 2023-06-15 10:43:04 -04:00
8b01ff4ce7 Integrator over to smeared force structure 2023-06-15 10:43:04 -04:00
588197c487 Smeared action virtual class 2023-06-15 10:43:04 -04:00
1352bad2e4 Sunspot compile 2023-06-15 11:22:46 +00:00
ffd7301649 Updated masked / fthmc smeared config container 2023-06-01 06:23:02 -04:00
d2a8494044 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2023-06-01 06:22:33 -04:00
0982e0d19b Jacobian action wrapper for FTHMC 2023-06-01 06:15:08 -04:00
3badbfc3c1 Refactor the Action and Smeared gauge configuration containers. Add first pass at FTHMC action 2023-06-01 06:14:28 -04:00
5465961e30 New test for FTHMC portion 2023-06-01 06:14:04 -04:00
477b794bc5 fix: unused variable removed. 2023-05-29 14:08:53 +01:00
4835fd1a87 HIP stream synch 2023-05-27 17:58:22 +03:00
6533c25814 Lumi 2023-05-27 16:13:32 +03:00
1b2914ec09 FT-HMC smearing, derivative chain rule, log det and force first pass. 2023-05-22 10:21:37 -04:00
519f795066 Header not liked by gcc on mac? puzzling 2023-05-22 10:21:12 -04:00
4240ad5ca8 Preparing for FTHMC 2023-05-19 21:21:55 -04:00
d418347d86 public for convenience to see rho params 2023-05-19 21:21:05 -04:00
29a4bfe5e5 Clean up 2023-05-19 21:20:45 -04:00
9955bf9daf Regresses to Qlat 2023-05-19 17:32:13 -04:00
876c8f4478 Nodes on padded cell 2023-05-11 12:35:49 -04:00
9c8750f261 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2023-05-11 12:29:09 -04:00
91efd08179 Option for Qlat generator basis 2023-05-11 12:27:45 -04:00
9953511b65 Mac compile 2023-05-11 12:27:29 -04:00
025fa9991a For FTHMC 2023-05-11 12:26:14 -04:00
e8c60c355b Padded cell code 2023-05-11 12:25:50 -04:00
6c9c7f9d85 Permute fix 2023-05-11 12:24:21 -04:00
f534523ede Debug 2023-05-11 12:23:11 -04:00
1b8a834beb Debug 2023-05-11 12:22:24 -04:00
ccd21f96ff Plaquette agreeing and moving to final form (slowly) need to optimise 2023-02-01 22:57:44 -05:00
4b90cb8888 First cut passes combining padded cell with general stencil towards fast plaquette and staggered force 2023-02-01 22:14:10 -05:00
48 changed files with 2757 additions and 167 deletions

View File

@ -419,14 +419,15 @@ until convergence
}
}
if ( Nconv < Nstop )
if ( Nconv < Nstop ) {
std::cout << GridLogIRL << "Nconv ("<<Nconv<<") < Nstop ("<<Nstop<<")"<<std::endl;
std::cout << GridLogIRL << "returning Nstop vectors, the last "<< Nstop-Nconv << "of which might meet convergence criterion only approximately" <<std::endl;
}
eval=eval2;
//Keep only converged
eval.resize(Nconv);// Nstop?
evec.resize(Nconv,grid);// Nstop?
eval.resize(Nstop);// was Nconv
evec.resize(Nstop,grid);// was Nconv
basisSortInPlace(evec,eval,reverse);
}

View File

@ -27,7 +27,7 @@ Author: Christoph Lehner <christoph@lhnr.de>
*************************************************************************************/
/* END LEGAL */
#define header "SharedMemoryMpi: "
#define Mheader "SharedMemoryMpi: "
#include <Grid/GridCore.h>
#include <pwd.h>
@ -174,8 +174,8 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
MPI_Comm_size(WorldShmComm ,&WorldShmSize);
if ( WorldRank == 0) {
std::cout << header " World communicator of size " <<WorldSize << std::endl;
std::cout << header " Node communicator of size " <<WorldShmSize << std::endl;
std::cout << Mheader " World communicator of size " <<WorldSize << std::endl;
std::cout << Mheader " Node communicator of size " <<WorldShmSize << std::endl;
}
// WorldShmComm, WorldShmSize, WorldShmRank
@ -452,7 +452,7 @@ void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &proce
#ifdef GRID_MPI3_SHMGET
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << header "SharedMemoryAllocate "<< bytes<< " shmget implementation "<<std::endl;
std::cout << Mheader "SharedMemoryAllocate "<< bytes<< " shmget implementation "<<std::endl;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
@ -537,7 +537,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
exit(EXIT_FAILURE);
}
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
std::cout << WorldRank << Mheader " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
SharedMemoryZero(ShmCommBuf,bytes);
@ -580,7 +580,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
exit(EXIT_FAILURE);
}
if ( WorldRank == 0 ){
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
std::cout << WorldRank << Mheader " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
<< "bytes at "<< std::hex<< ShmCommBuf << " - "<<(bytes-1+(uint64_t)ShmCommBuf) <<std::dec<<" for comms buffers " <<std::endl;
}
SharedMemoryZero(ShmCommBuf,bytes);
@ -604,8 +604,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
typedef struct { int fd; pid_t pid ; ze_ipc_mem_handle_t ze; } clone_mem_t;
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_device());
auto zeContext = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_context());
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::ext_oneapi_level_zero>(theGridAccelerator->get_device());
auto zeContext = cl::sycl::get_native<cl::sycl::backend::ext_oneapi_level_zero>(theGridAccelerator->get_context());
ze_ipc_mem_handle_t ihandle;
clone_mem_t handle;
@ -744,7 +744,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
#ifdef GRID_MPI3_SHMMMAP
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << header "SharedMemoryAllocate "<< bytes<< " MMAP implementation "<< GRID_SHM_PATH <<std::endl;
std::cout << Mheader "SharedMemoryAllocate "<< bytes<< " MMAP implementation "<< GRID_SHM_PATH <<std::endl;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -781,7 +781,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
assert(((uint64_t)ptr&0x3F)==0);
close(fd);
WorldShmCommBufs[r] =ptr;
// std::cout << header "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< bytes<< "bytes)"<<std::endl;
// std::cout << Mheader "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< bytes<< "bytes)"<<std::endl;
}
_ShmAlloc=1;
_ShmAllocBytes = bytes;
@ -791,7 +791,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
#ifdef GRID_MPI3_SHM_NONE
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << header "SharedMemoryAllocate "<< bytes<< " MMAP anonymous implementation "<<std::endl;
std::cout << Mheader "SharedMemoryAllocate "<< bytes<< " MMAP anonymous implementation "<<std::endl;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -838,7 +838,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
////////////////////////////////////////////////////////////////////////////////////////////
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << header "SharedMemoryAllocate "<< bytes<< " SHMOPEN implementation "<<std::endl;
std::cout << Mheader "SharedMemoryAllocate "<< bytes<< " SHMOPEN implementation "<<std::endl;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
MPI_Barrier(WorldShmComm);

View File

@ -707,9 +707,9 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro
Coordinate ist = Tg->_istride;
Coordinate ost = Tg->_ostride;
autoView( t_v , To, AcceleratorWrite);
autoView( f_v , From, AcceleratorRead);
accelerator_for(idx,Fg->lSites(),1,{
autoView( t_v , To, CpuWrite);
autoView( f_v , From, CpuRead);
thread_for(idx,Fg->lSites(),{
sobj s;
Coordinate Fcoor(nd);
Coordinate Tcoor(nd);
@ -722,15 +722,20 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro
Tcoor[d] = ToLowerLeft[d]+ Fcoor[d]-FromLowerLeft[d];
}
if (in_region) {
Integer idx_f = 0; for(int d=0;d<nd;d++) idx_f+=isf[d]*(Fcoor[d]/rdf[d]);
Integer idx_t = 0; for(int d=0;d<nd;d++) idx_t+=ist[d]*(Tcoor[d]/rdt[d]);
Integer odx_f = 0; for(int d=0;d<nd;d++) odx_f+=osf[d]*(Fcoor[d]%rdf[d]);
Integer odx_t = 0; for(int d=0;d<nd;d++) odx_t+=ost[d]*(Tcoor[d]%rdt[d]);
vector_type * fp = (vector_type *)&f_v[odx_f];
vector_type * tp = (vector_type *)&t_v[odx_t];
#if 0
Integer idx_f = 0; for(int d=0;d<nd;d++) idx_f+=isf[d]*(Fcoor[d]/rdf[d]); // inner index from
Integer idx_t = 0; for(int d=0;d<nd;d++) idx_t+=ist[d]*(Tcoor[d]/rdt[d]); // inner index to
Integer odx_f = 0; for(int d=0;d<nd;d++) odx_f+=osf[d]*(Fcoor[d]%rdf[d]); // outer index from
Integer odx_t = 0; for(int d=0;d<nd;d++) odx_t+=ost[d]*(Tcoor[d]%rdt[d]); // outer index to
scalar_type * fp = (scalar_type *)&f_v[odx_f];
scalar_type * tp = (scalar_type *)&t_v[odx_t];
for(int w=0;w<words;w++){
tp[w].putlane(fp[w].getlane(idx_f),idx_t);
}
#else
peekLocalSite(s,f_v,Fcoor);
pokeLocalSite(s,t_v,Tcoor);
#endif
}
});
}
@ -841,9 +846,9 @@ void InsertSliceLocal(const Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int
for(int d=0;d<nh;d++){
if ( d!=orthog ) {
assert(lg->_processors[d] == hg->_processors[d]);
assert(lg->_ldimensions[d] == hg->_ldimensions[d]);
}
assert(lg->_processors[d] == hg->_processors[d]);
assert(lg->_ldimensions[d] == hg->_ldimensions[d]);
}
}
// the above should guarantee that the operations are local

136
Grid/lattice/PaddedCell.h Normal file
View File

@ -0,0 +1,136 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/lattice/PaddedCell.h
Copyright (C) 2019
Author: Peter Boyle pboyle@bnl.gov
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
class PaddedCell {
public:
GridCartesian * unpadded_grid;
int dims;
int depth;
std::vector<GridCartesian *> grids;
~PaddedCell()
{
DeleteGrids();
}
PaddedCell(int _depth,GridCartesian *_grid)
{
unpadded_grid = _grid;
depth=_depth;
dims=_grid->Nd();
AllocateGrids();
Coordinate local =unpadded_grid->LocalDimensions();
for(int d=0;d<dims;d++){
assert(local[d]>=depth);
}
}
void DeleteGrids(void)
{
for(int d=0;d<grids.size();d++){
delete grids[d];
}
grids.resize(0);
};
void AllocateGrids(void)
{
Coordinate local =unpadded_grid->LocalDimensions();
Coordinate simd =unpadded_grid->_simd_layout;
Coordinate processors=unpadded_grid->_processors;
Coordinate plocal =unpadded_grid->LocalDimensions();
Coordinate global(dims);
// expand up one dim at a time
for(int d=0;d<dims;d++){
plocal[d] += 2*depth;
for(int d=0;d<dims;d++){
global[d] = plocal[d]*processors[d];
}
grids.push_back(new GridCartesian(global,simd,processors));
}
};
template<class vobj>
inline Lattice<vobj> Extract(Lattice<vobj> &in)
{
Lattice<vobj> out(unpadded_grid);
Coordinate local =unpadded_grid->LocalDimensions();
Coordinate fll(dims,depth); // depends on the MPI spread
Coordinate tll(dims,0); // depends on the MPI spread
localCopyRegion(in,out,fll,tll,local);
return out;
}
template<class vobj>
inline Lattice<vobj> Exchange(Lattice<vobj> &in)
{
GridBase *old_grid = in.Grid();
int dims = old_grid->Nd();
Lattice<vobj> tmp = in;
for(int d=0;d<dims;d++){
tmp = Expand(d,tmp); // rvalue && assignment
}
return tmp;
}
// expand up one dim at a time
template<class vobj>
inline Lattice<vobj> Expand(int dim,Lattice<vobj> &in)
{
GridBase *old_grid = in.Grid();
GridCartesian *new_grid = grids[dim];//These are new grids
Lattice<vobj> padded(new_grid);
Lattice<vobj> shifted(old_grid);
Coordinate local =old_grid->LocalDimensions();
Coordinate plocal =new_grid->LocalDimensions();
if(dim==0) conformable(old_grid,unpadded_grid);
else conformable(old_grid,grids[dim-1]);
std::cout << " dim "<<dim<<" local "<<local << " padding to "<<plocal<<std::endl;
// Middle bit
for(int x=0;x<local[dim];x++){
InsertSliceLocal(in,padded,x,depth+x,dim);
}
// High bit
shifted = Cshift(in,dim,depth);
for(int x=0;x<depth;x++){
InsertSliceLocal(shifted,padded,local[dim]-depth+x,depth+local[dim]+x,dim);
}
// Low bit
shifted = Cshift(in,dim,-depth);
for(int x=0;x<depth;x++){
InsertSliceLocal(shifted,padded,x,x,dim);
}
return padded;
}
};
NAMESPACE_END(Grid);

View File

@ -104,6 +104,7 @@ template<typename vtype> using iSpinMatrix = iScalar<iMatrix<iSca
template<typename vtype> using iColourMatrix = iScalar<iScalar<iMatrix<vtype, Nc> > > ;
template<typename vtype> using iSpinColourMatrix = iScalar<iMatrix<iMatrix<vtype, Nc>, Ns> >;
template<typename vtype> using iLorentzColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nd > ;
template<typename vtype> using iLorentzComplex = iVector<iScalar<iScalar<vtype> >, Nd > ;
template<typename vtype> using iDoubleStoredColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nds > ;
template<typename vtype> using iSpinVector = iScalar<iVector<iScalar<vtype>, Ns> >;
template<typename vtype> using iColourVector = iScalar<iScalar<iVector<vtype, Nc> > >;
@ -178,6 +179,15 @@ typedef iLorentzColourMatrix<vComplexF> vLorentzColourMatrixF;
typedef iLorentzColourMatrix<vComplexD> vLorentzColourMatrixD;
typedef iLorentzColourMatrix<vComplexD2> vLorentzColourMatrixD2;
// LorentzComplex
typedef iLorentzComplex<Complex > LorentzComplex;
typedef iLorentzComplex<ComplexF > LorentzComplexF;
typedef iLorentzComplex<ComplexD > LorentzComplexD;
typedef iLorentzComplex<vComplex > vLorentzComplex;
typedef iLorentzComplex<vComplexF> vLorentzComplexF;
typedef iLorentzComplex<vComplexD> vLorentzComplexD;
// DoubleStored gauge field
typedef iDoubleStoredColourMatrix<Complex > DoubleStoredColourMatrix;
typedef iDoubleStoredColourMatrix<ComplexF > DoubleStoredColourMatrixF;
@ -307,6 +317,10 @@ typedef Lattice<vLorentzColourMatrixF> LatticeLorentzColourMatrixF;
typedef Lattice<vLorentzColourMatrixD> LatticeLorentzColourMatrixD;
typedef Lattice<vLorentzColourMatrixD2> LatticeLorentzColourMatrixD2;
typedef Lattice<vLorentzComplex> LatticeLorentzComplex;
typedef Lattice<vLorentzComplexF> LatticeLorentzComplexF;
typedef Lattice<vLorentzComplexD> LatticeLorentzComplexD;
// DoubleStored gauge field
typedef Lattice<vDoubleStoredColourMatrix> LatticeDoubleStoredColourMatrix;
typedef Lattice<vDoubleStoredColourMatrixF> LatticeDoubleStoredColourMatrixF;

View File

@ -34,10 +34,24 @@ directory
NAMESPACE_BEGIN(Grid);
///////////////////////////////////
// Smart configuration base class
///////////////////////////////////
template< class Field >
class ConfigurationBase
{
public:
ConfigurationBase() {}
virtual ~ConfigurationBase() {}
virtual void set_Field(Field& U) =0;
virtual void smeared_force(Field&) = 0;
virtual Field& get_SmearedU() =0;
virtual Field &get_U(bool smeared = false) = 0;
};
template <class GaugeField >
class Action
{
public:
bool is_smeared = false;
RealD deriv_norm_sum;
@ -77,11 +91,39 @@ public:
void refresh_timer_stop(void) { refresh_us+=usecond(); }
void S_timer_start(void) { S_us-=usecond(); }
void S_timer_stop(void) { S_us+=usecond(); }
/////////////////////////////
// Heatbath?
/////////////////////////////
virtual void refresh(const GaugeField& U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) = 0; // refresh pseudofermions
virtual RealD S(const GaugeField& U) = 0; // evaluate the action
virtual RealD Sinitial(const GaugeField& U) { return this->S(U); } ; // if the refresh computes the action, can cache it. Alternately refreshAndAction() ?
virtual void deriv(const GaugeField& U, GaugeField& dSdU) = 0; // evaluate the action derivative
/////////////////////////////////////////////////////////////
// virtual smeared interface through configuration container
/////////////////////////////////////////////////////////////
virtual void refresh(ConfigurationBase<GaugeField> & U, GridSerialRNG &sRNG, GridParallelRNG& pRNG)
{
refresh(U.get_U(is_smeared),sRNG,pRNG);
}
virtual RealD S(ConfigurationBase<GaugeField>& U)
{
return S(U.get_U(is_smeared));
}
virtual RealD Sinitial(ConfigurationBase<GaugeField>& U)
{
return Sinitial(U.get_U(is_smeared));
}
virtual void deriv(ConfigurationBase<GaugeField>& U, GaugeField& dSdU)
{
deriv(U.get_U(is_smeared),dSdU);
if ( is_smeared ) {
U.smeared_force(dSdU);
}
}
///////////////////////////////
// Logging
///////////////////////////////
virtual std::string action_name() = 0; // return the action name
virtual std::string LogParameters() = 0; // prints action parameters
virtual ~Action(){}

View File

@ -30,6 +30,8 @@ directory
#ifndef QCD_ACTION_CORE
#define QCD_ACTION_CORE
#include <Grid/qcd/action/gauge/GaugeImplementations.h>
#include <Grid/qcd/action/ActionBase.h>
NAMESPACE_CHECK(ActionBase);
#include <Grid/qcd/action/ActionSet.h>

View File

@ -423,7 +423,6 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
#define KERNEL_CALL(A) KERNEL_CALLNB(A); accelerator_barrier();
#define KERNEL_CALL_EXT(A) \
const uint64_t NN = Nsite*Ls; \
const uint64_t sz = st.surface_list.size(); \
auto ptr = &st.surface_list[0]; \
accelerator_forNB( ss, sz, Simd::Nsimd(), { \

View File

@ -283,12 +283,13 @@ public:
std::cout << GridLogHMC << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
TheIntegrator.print_timer();
TheIntegrator.Smearer.set_Field(Ucur);
for (int obs = 0; obs < Observables.size(); obs++) {
std::cout << GridLogDebug << "Observables # " << obs << std::endl;
std::cout << GridLogDebug << "Observables total " << Observables.size() << std::endl;
std::cout << GridLogDebug << "Observables pointer " << Observables[obs] << std::endl;
Observables[obs]->TrajectoryComplete(traj + 1, Ucur, sRNG, pRNG);
Observables[obs]->TrajectoryComplete(traj + 1, TheIntegrator.Smearer, sRNG, pRNG);
}
std::cout << GridLogHMC << ":::::::::::::::::::::::::::::::::::::::::::" << std::endl;
}

View File

@ -35,13 +35,16 @@ class CheckpointerParameters : Serializable {
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(CheckpointerParameters,
std::string, config_prefix,
std::string, smeared_prefix,
std::string, rng_prefix,
int, saveInterval,
bool, saveSmeared,
std::string, format, );
CheckpointerParameters(std::string cf = "cfg", std::string rn = "rng",
CheckpointerParameters(std::string cf = "cfg", std::string sf="cfg_smr" , std::string rn = "rng",
int savemodulo = 1, const std::string &f = "IEEE64BIG")
: config_prefix(cf),
smeared_prefix(sf),
rng_prefix(rn),
saveInterval(savemodulo),
format(f){};
@ -61,13 +64,21 @@ template <class Impl>
class BaseHmcCheckpointer : public HmcObservable<typename Impl::Field> {
public:
void build_filenames(int traj, CheckpointerParameters &Params,
std::string &conf_file, std::string &rng_file) {
std::string &conf_file,
std::string &smear_file,
std::string &rng_file) {
{
std::ostringstream os;
os << Params.rng_prefix << "." << traj;
rng_file = os.str();
}
{
std::ostringstream os;
os << Params.smeared_prefix << "." << traj;
smear_file = os.str();
}
{
std::ostringstream os;
os << Params.config_prefix << "." << traj;
@ -84,6 +95,11 @@ public:
}
virtual void initialize(const CheckpointerParameters &Params) = 0;
virtual void TrajectoryComplete(int traj,
typename Impl::Field &U,
GridSerialRNG &sRNG,
GridParallelRNG &pRNG) { assert(0); } ; // HMC should pass the smart config with smeared and unsmeared
virtual void CheckpointRestore(int traj, typename Impl::Field &U,
GridSerialRNG &sRNG,
GridParallelRNG &pRNG) = 0;

View File

@ -61,11 +61,14 @@ public:
fout.close();
}
void TrajectoryComplete(int traj, Field &U, GridSerialRNG &sRNG, GridParallelRNG &pRNG) {
void TrajectoryComplete(int traj,
ConfigurationBase<Field> &SmartConfig,
GridSerialRNG &sRNG, GridParallelRNG &pRNG)
{
if ((traj % Params.saveInterval) == 0) {
std::string config, rng;
this->build_filenames(traj, Params, config, rng);
std::string config, rng, smr;
this->build_filenames(traj, Params, config, smr, rng);
uint32_t nersc_csum;
uint32_t scidac_csuma;
@ -74,9 +77,15 @@ public:
BinarySimpleUnmunger<sobj_double, sobj> munge;
truncate(rng);
BinaryIO::writeRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
truncate(config);
std::cout << GridLogMessage << "Written Binary RNG " << rng
<< " checksum " << std::hex
<< nersc_csum <<"/"
<< scidac_csuma <<"/"
<< scidac_csumb
<< std::dec << std::endl;
BinaryIO::writeLatticeObject<vobj, sobj_double>(U, config, munge, 0, Params.format,
truncate(config);
BinaryIO::writeLatticeObject<vobj, sobj_double>(SmartConfig.get_U(false), config, munge, 0, Params.format,
nersc_csum,scidac_csuma,scidac_csumb);
std::cout << GridLogMessage << "Written Binary Configuration " << config
@ -85,6 +94,18 @@ public:
<< scidac_csuma <<"/"
<< scidac_csumb
<< std::dec << std::endl;
if ( Params.saveSmeared ) {
truncate(smr);
BinaryIO::writeLatticeObject<vobj, sobj_double>(SmartConfig.get_U(true), smr, munge, 0, Params.format,
nersc_csum,scidac_csuma,scidac_csumb);
std::cout << GridLogMessage << "Written Binary Smeared Configuration " << smr
<< " checksum " << std::hex
<< nersc_csum <<"/"
<< scidac_csuma <<"/"
<< scidac_csumb
<< std::dec << std::endl;
}
}
};

View File

@ -69,17 +69,27 @@ public:
}
}
void TrajectoryComplete(int traj, GaugeField &U, GridSerialRNG &sRNG,
void TrajectoryComplete(int traj,
ConfigurationBase<GaugeField> &SmartConfig,
GridSerialRNG &sRNG,
GridParallelRNG &pRNG) {
if ((traj % Params.saveInterval) == 0) {
std::string config, rng;
std::string config, rng, smr;
this->build_filenames(traj, Params, config, rng);
GridBase *grid = U.Grid();
GridBase *grid = SmartConfig.get_U(false).Grid();
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
BinaryIO::writeRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
std::cout << GridLogMessage << "Written BINARY RNG " << rng
<< " checksum " << std::hex
<< nersc_csum<<"/"
<< scidac_csuma<<"/"
<< scidac_csumb
<< std::dec << std::endl;
IldgWriter _IldgWriter(grid->IsBoss());
_IldgWriter.open(config);
_IldgWriter.writeConfiguration<GaugeStats>(U, traj, config, config);
_IldgWriter.writeConfiguration<GaugeStats>(SmartConfig.get_U(false), traj, config, config);
_IldgWriter.close();
std::cout << GridLogMessage << "Written ILDG Configuration on " << config
@ -88,6 +98,21 @@ public:
<< scidac_csuma<<"/"
<< scidac_csumb
<< std::dec << std::endl;
if ( Params.saveSmeared ) {
IldgWriter _IldgWriter(grid->IsBoss());
_IldgWriter.open(smr);
_IldgWriter.writeConfiguration<GaugeStats>(SmartConfig.get_U(true), traj, config, config);
_IldgWriter.close();
std::cout << GridLogMessage << "Written ILDG Configuration on " << smr
<< " checksum " << std::hex
<< nersc_csum<<"/"
<< scidac_csuma<<"/"
<< scidac_csumb
<< std::dec << std::endl;
}
}
};

View File

@ -52,23 +52,29 @@ public:
Params.format = "IEEE64BIG"; // fixed, overwrite any other choice
}
void TrajectoryComplete(int traj, GaugeField &U, GridSerialRNG &sRNG,
GridParallelRNG &pRNG) {
virtual void TrajectoryComplete(int traj,
ConfigurationBase<GaugeField> &SmartConfig,
GridSerialRNG &sRNG,
GridParallelRNG &pRNG)
{
if ((traj % Params.saveInterval) == 0) {
std::string config, rng;
this->build_filenames(traj, Params, config, rng);
std::string config, rng, smr;
this->build_filenames(traj, Params, config, smr, rng);
int precision32 = 1;
int tworow = 0;
NerscIO::writeRNGState(sRNG, pRNG, rng);
NerscIO::writeConfiguration<GaugeStats>(U, config, tworow, precision32);
NerscIO::writeConfiguration<GaugeStats>(SmartConfig.get_U(false), config, tworow, precision32);
if ( Params.saveSmeared ) {
NerscIO::writeConfiguration<GaugeStats>(SmartConfig.get_U(true), smr, tworow, precision32);
}
}
};
void CheckpointRestore(int traj, GaugeField &U, GridSerialRNG &sRNG,
GridParallelRNG &pRNG) {
std::string config, rng;
this->build_filenames(traj, Params, config, rng);
std::string config, rng, smr;
this->build_filenames(traj, Params, config, smr, rng );
this->check_filename(rng);
this->check_filename(config);

View File

@ -70,19 +70,37 @@ class ScidacHmcCheckpointer : public BaseHmcCheckpointer<Implementation> {
}
}
void TrajectoryComplete(int traj, Field &U, GridSerialRNG &sRNG,
void TrajectoryComplete(int traj,
ConfigurationBase<Field> &SmartConfig,
GridSerialRNG &sRNG,
GridParallelRNG &pRNG) {
if ((traj % Params.saveInterval) == 0) {
std::string config, rng;
this->build_filenames(traj, Params, config, rng);
GridBase *grid = U.Grid();
std::string config, rng,smr;
this->build_filenames(traj, Params, config, smr, rng);
GridBase *grid = SmartConfig.get_U(false).Grid();
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
BinaryIO::writeRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
ScidacWriter _ScidacWriter(grid->IsBoss());
_ScidacWriter.open(config);
_ScidacWriter.writeScidacFieldRecord(U, MData);
_ScidacWriter.close();
std::cout << GridLogMessage << "Written Binary RNG " << rng
<< " checksum " << std::hex
<< nersc_csum <<"/"
<< scidac_csuma <<"/"
<< scidac_csumb
<< std::dec << std::endl;
{
ScidacWriter _ScidacWriter(grid->IsBoss());
_ScidacWriter.open(config);
_ScidacWriter.writeScidacFieldRecord(SmartConfig.get_U(false), MData);
_ScidacWriter.close();
}
if ( Params.saveSmeared ) {
ScidacWriter _ScidacWriter(grid->IsBoss());
_ScidacWriter.open(smr);
_ScidacWriter.writeScidacFieldRecord(SmartConfig.get_U(true), MData);
_ScidacWriter.close();
}
std::cout << GridLogMessage << "Written Scidac Configuration on " << config << std::endl;
}
};

View File

@ -66,6 +66,7 @@ public:
template <class FieldImplementation_, class SmearingPolicy, class RepresentationPolicy>
class Integrator {
protected:
public:
typedef FieldImplementation_ FieldImplementation;
typedef typename FieldImplementation::Field MomentaField; //for readability
typedef typename FieldImplementation::Field Field;
@ -96,7 +97,6 @@ protected:
{
t_P[level] += ep;
update_P(P, U, level, ep);
std::cout << GridLogIntegrator << "[" << level << "] P " << " dt " << ep << " : t_P " << t_P[level] << std::endl;
}
@ -130,28 +130,20 @@ protected:
Field force(U.Grid());
conformable(U.Grid(), Mom.Grid());
Field& Us = Smearer.get_U(as[level].actions.at(a)->is_smeared);
double start_force = usecond();
std::cout << GridLogMessage << "AuditForce["<<level<<"]["<<a<<"] before"<<std::endl;
as[level].actions.at(a)->deriv_timer_start();
as[level].actions.at(a)->deriv(Us, force); // deriv should NOT include Ta
as[level].actions.at(a)->deriv(Smearer, force); // deriv should NOT include Ta
as[level].actions.at(a)->deriv_timer_stop();
std::cout << GridLogMessage << "AuditForce["<<level<<"]["<<a<<"] after"<<std::endl;
std::cout << GridLogIntegrator << "Smearing (on/off): " << as[level].actions.at(a)->is_smeared << std::endl;
auto name = as[level].actions.at(a)->action_name();
if (as[level].actions.at(a)->is_smeared) Smearer.smeared_force(force);
force = FieldImplementation::projectForce(force); // Ta for gauge fields
double end_force = usecond();
// DumpSliceNorm("force ",force,Nd-1);
MomFilter->applyFilter(force);
std::cout << GridLogIntegrator << " update_P : Level [" << level <<"]["<<a <<"] "<<name<<" dt "<<ep<< std::endl;
DumpSliceNorm("force filtered ",force,Nd-1);
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites()); //average per-site norm. nb. norm2(latt) = \sum_x norm2(latt[x])
Real impulse_abs = force_abs * ep * HMC_MOMENTUM_DENOMINATOR;
@ -377,14 +369,9 @@ public:
auto name = as[level].actions.at(actionID)->action_name();
std::cout << GridLogMessage << "refresh [" << level << "][" << actionID << "] "<<name << std::endl;
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
std::cout << GridLogMessage << "AuditRefresh["<<level<<"]["<<actionID<<"] before"<<std::endl;
as[level].actions.at(actionID)->refresh_timer_start();
as[level].actions.at(actionID)->refresh(Us, sRNG, pRNG);
as[level].actions.at(actionID)->refresh(Smearer, sRNG, pRNG);
as[level].actions.at(actionID)->refresh_timer_stop();
std::cout << GridLogMessage << "AuditRefresh["<<level<<"]["<<actionID<<"] after"<<std::endl;
}
@ -425,10 +412,9 @@ public:
// get gauge field from the SmearingPolicy and
// based on the boolean is_smeared in actionID
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
as[level].actions.at(actionID)->S_timer_start();
Hterm = as[level].actions.at(actionID)->S(Us);
Hterm = as[level].actions.at(actionID)->S(Smearer);
as[level].actions.at(actionID)->S_timer_stop();
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
H += Hterm;
@ -469,12 +455,11 @@ public:
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
// get gauge field from the SmearingPolicy and
// based on the boolean is_smeared in actionID
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
as[level].actions.at(actionID)->S_timer_start();
Hterm = as[level].actions.at(actionID)->Sinitial(Us);
as[level].actions.at(actionID)->S_timer_stop();
as[level].actions.at(actionID)->S_timer_start();
Hterm = as[level].actions.at(actionID)->S(Smearer);
as[level].actions.at(actionID)->S_timer_stop();
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
H += Hterm;

View File

@ -34,6 +34,13 @@ NAMESPACE_BEGIN(Grid);
template <class Field>
class HmcObservable {
public:
virtual void TrajectoryComplete(int traj,
ConfigurationBase<Field> &SmartConfig,
GridSerialRNG &sRNG,
GridParallelRNG &pRNG)
{
TrajectoryComplete(traj,SmartConfig.get_U(false),sRNG,pRNG); // Unsmeared observable
};
virtual void TrajectoryComplete(int traj,
Field &U,
GridSerialRNG &sRNG,

View File

@ -42,6 +42,18 @@ public:
// necessary for HmcObservable compatibility
typedef typename Impl::Field Field;
virtual void TrajectoryComplete(int traj,
ConfigurationBase<Field> &SmartConfig,
GridSerialRNG &sRNG,
GridParallelRNG &pRNG)
{
std::cout << GridLogMessage << "+++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << "Unsmeared plaquette"<<std::endl;
TrajectoryComplete(traj,SmartConfig.get_U(false),sRNG,pRNG); // Unsmeared observable
std::cout << GridLogMessage << "Smeared plaquette"<<std::endl;
TrajectoryComplete(traj,SmartConfig.get_U(true),sRNG,pRNG); // Unsmeared observable
std::cout << GridLogMessage << "+++++++++++++++++++"<<std::endl;
};
void TrajectoryComplete(int traj,
Field &U,
GridSerialRNG &sRNG,

View File

@ -7,26 +7,27 @@
NAMESPACE_BEGIN(Grid);
//trivial class for no smearing
template< class Impl >
class NoSmearing
class NoSmearing : public ConfigurationBase<typename Impl::Field>
{
public:
INHERIT_FIELD_TYPES(Impl);
Field* ThinField;
Field* ThinLinks;
NoSmearing(): ThinField(NULL) {}
NoSmearing(): ThinLinks(NULL) {}
void set_Field(Field& U) { ThinField = &U; }
virtual void set_Field(Field& U) { ThinLinks = &U; }
void smeared_force(Field&) const {}
virtual void smeared_force(Field&) {}
Field& get_SmearedU() { return *ThinField; }
virtual Field& get_SmearedU() { return *ThinLinks; }
Field &get_U(bool smeared = false)
virtual Field &get_U(bool smeared = false)
{
return *ThinField;
return *ThinLinks;
}
};
@ -42,19 +43,24 @@ public:
It stores a list of smeared configurations.
*/
template <class Gimpl>
class SmearedConfiguration
class SmearedConfiguration : public ConfigurationBase<typename Gimpl::Field>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
private:
protected:
const unsigned int smearingLevels;
Smear_Stout<Gimpl> *StoutSmearing;
std::vector<GaugeField> SmearedSet;
public:
GaugeField* ThinLinks; /* Pointer to the thin links configuration */ // move to base???
protected:
// Member functions
//====================================================================
void fill_smearedSet(GaugeField &U)
// Overridden in masked version
virtual void fill_smearedSet(GaugeField &U)
{
ThinLinks = &U; // attach the smearing routine to the field U
@ -82,9 +88,10 @@ private:
}
}
}
//====================================================================
GaugeField AnalyticSmearedForce(const GaugeField& SigmaKPrime,
const GaugeField& GaugeK) const
//overridden in masked verson
virtual GaugeField AnalyticSmearedForce(const GaugeField& SigmaKPrime,
const GaugeField& GaugeK) const
{
GridBase* grid = GaugeK.Grid();
GaugeField C(grid), SigmaK(grid), iLambda(grid);
@ -213,8 +220,6 @@ private:
//====================================================================
public:
GaugeField*
ThinLinks; /* Pointer to the thin links configuration */
/* Standard constructor */
SmearedConfiguration(GridCartesian* UGrid, unsigned int Nsmear,
@ -230,7 +235,7 @@ public:
: smearingLevels(0), StoutSmearing(nullptr), SmearedSet(), ThinLinks(NULL) {}
// attach the smeared routines to the thin links U and fill the smeared set
void set_Field(GaugeField &U)
virtual void set_Field(GaugeField &U)
{
double start = usecond();
fill_smearedSet(U);
@ -240,7 +245,7 @@ public:
}
//====================================================================
void smeared_force(GaugeField &SigmaTilde) const
virtual void smeared_force(GaugeField &SigmaTilde)
{
if (smearingLevels > 0)
{
@ -267,14 +272,16 @@ public:
}
double end = usecond();
double time = (end - start)/ 1e3;
std::cout << GridLogMessage << "Smearing force in " << time << " ms" << std::endl;
std::cout << GridLogMessage << " GaugeConfiguration: Smeared Force chain rule took " << time << " ms" << std::endl;
} // if smearingLevels = 0 do nothing
SigmaTilde=Gimpl::projectForce(SigmaTilde); // Ta
}
//====================================================================
GaugeField& get_SmearedU() { return SmearedSet[smearingLevels - 1]; }
virtual GaugeField& get_SmearedU() { return SmearedSet[smearingLevels - 1]; }
GaugeField &get_U(bool smeared = false)
virtual GaugeField &get_U(bool smeared = false)
{
// get the config, thin links by default
if (smeared)

View File

@ -0,0 +1,813 @@
/*!
@file GaugeConfiguration.h
@brief Declares the GaugeConfiguration class
*/
#pragma once
NAMESPACE_BEGIN(Grid);
/*!
@brief Smeared configuration masked container
Modified for a multi-subset smearing (aka Luscher Flowed HMC)
*/
template <class Gimpl>
class SmearedConfigurationMasked : public SmearedConfiguration<Gimpl>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
private:
// These live in base class
// const unsigned int smearingLevels;
// Smear_Stout<Gimpl> *StoutSmearing;
// std::vector<GaugeField> SmearedSet;
std::vector<LatticeLorentzComplex> masks;
typedef typename SU3Adjoint::AMatrix AdjMatrix;
typedef typename SU3Adjoint::LatticeAdjMatrix AdjMatrixField;
typedef typename SU3Adjoint::LatticeAdjVector AdjVectorField;
// Adjoint vector to GaugeField force
void InsertForce(GaugeField &Fdet,AdjVectorField &Fdet_nu,int nu)
{
Complex ci(0,1);
GaugeLinkField Fdet_pol(Fdet.Grid());
Fdet_pol=Zero();
for(int e=0;e<8;e++){
ColourMatrix te;
SU3::generator(e, te);
auto tmp=peekColour(Fdet_nu,e);
Fdet_pol=Fdet_pol + ci*tmp*te; // but norm of te is different.. why?
}
pokeLorentz(Fdet, Fdet_pol, nu);
}
void Compute_MpInvJx_dNxxdSy(const GaugeLinkField &PlaqL,const GaugeLinkField &PlaqR, AdjMatrixField MpInvJx,AdjVectorField &Fdet2 )
{
GaugeLinkField UtaU(PlaqL.Grid());
GaugeLinkField D(PlaqL.Grid());
AdjMatrixField Dbc(PlaqL.Grid());
LatticeComplex tmp(PlaqL.Grid());
const int Ngen = SU3Adjoint::Dimension;
Complex ci(0,1);
ColourMatrix ta,tb,tc;
for(int a=0;a<Ngen;a++) {
SU3::generator(a, ta);
// Qlat Tb = 2i Tb^Grid
UtaU= 2.0*ci*adj(PlaqL)*ta*PlaqR;
for(int c=0;c<Ngen;c++) {
SU3::generator(c, tc);
D = Ta( (2.0)*ci*tc *UtaU);
for(int b=0;b<Ngen;b++){
SU3::generator(b, tb);
tmp =-trace(ci*tb*D);
PokeIndex<ColourIndex>(Dbc,tmp,b,c); // Adjoint rep
}
}
tmp = trace(MpInvJx * Dbc);
PokeIndex<ColourIndex>(Fdet2,tmp,a);
}
}
void ComputeNxy(const GaugeLinkField &PlaqL,const GaugeLinkField &PlaqR,AdjMatrixField &NxAd)
{
GaugeLinkField Nx(PlaqL.Grid());
const int Ngen = SU3Adjoint::Dimension;
Complex ci(0,1);
ColourMatrix tb;
ColourMatrix tc;
for(int b=0;b<Ngen;b++) {
SU3::generator(b, tb);
Nx = (2.0)*Ta( adj(PlaqL)*ci*tb * PlaqR );
for(int c=0;c<Ngen;c++) {
SU3::generator(c, tc);
auto tmp =closure( -trace(ci*tc*Nx));
PokeIndex<ColourIndex>(NxAd,tmp,c,b);
}
}
}
void ApplyMask(GaugeField &U,int smr)
{
LatticeComplex tmp(U.Grid());
GaugeLinkField Umu(U.Grid());
for(int mu=0;mu<Nd;mu++){
Umu=PeekIndex<LorentzIndex>(U,mu);
tmp=PeekIndex<LorentzIndex>(masks[smr],mu);
Umu=Umu*tmp;
PokeIndex<LorentzIndex>(U, Umu, mu);
}
}
public:
void logDetJacobianForceLevel(const GaugeField &U, GaugeField &force ,int smr)
{
GridBase* grid = U.Grid();
ColourMatrix tb;
ColourMatrix tc;
ColourMatrix ta;
GaugeField C(grid);
GaugeField Umsk(grid);
std::vector<GaugeLinkField> Umu(Nd,grid);
GaugeLinkField Cmu(grid); // U and staple; C contains factor of epsilon
GaugeLinkField Zx(grid); // U times Staple, contains factor of epsilon
GaugeLinkField Nxx(grid); // Nxx fundamental space
GaugeLinkField Utmp(grid);
GaugeLinkField PlaqL(grid);
GaugeLinkField PlaqR(grid);
const int Ngen = SU3Adjoint::Dimension;
AdjMatrix TRb;
ColourMatrix Ident;
LatticeComplex cplx(grid);
AdjVectorField dJdXe_nMpInv(grid);
AdjVectorField dJdXe_nMpInv_y(grid);
AdjMatrixField MpAd(grid); // Mprime luchang's notes
AdjMatrixField MpAdInv(grid); // Mprime inverse
AdjMatrixField NxxAd(grid); // Nxx in adjoint space
AdjMatrixField JxAd(grid);
AdjMatrixField ZxAd(grid);
AdjMatrixField mZxAd(grid);
AdjMatrixField X(grid);
Complex ci(0,1);
RealD t0 = usecond();
Ident = ComplexD(1.0);
for(int d=0;d<Nd;d++){
Umu[d] = peekLorentz(U, d);
}
int mu= (smr/2) %Nd;
////////////////////////////////////////////////////////////////////////////////
// Mask the gauge field
////////////////////////////////////////////////////////////////////////////////
auto mask=PeekIndex<LorentzIndex>(masks[smr],mu); // the cb mask
Umsk = U;
ApplyMask(Umsk,smr);
Utmp = peekLorentz(Umsk,mu);
////////////////////////////////////////////////////////////////////////////////
// Retrieve the eps/rho parameter(s) -- could allow all different but not so far
////////////////////////////////////////////////////////////////////////////////
double rho=this->StoutSmearing->SmearRho[1];
int idx=0;
for(int mu=0;mu<4;mu++){
for(int nu=0;nu<4;nu++){
if ( mu!=nu) assert(this->StoutSmearing->SmearRho[idx]==rho);
else assert(this->StoutSmearing->SmearRho[idx]==0.0);
idx++;
}}
//////////////////////////////////////////////////////////////////
// Assemble the N matrix
//////////////////////////////////////////////////////////////////
// Computes ALL the staples -- could compute one only and do it here
RealD time;
time=-usecond();
this->StoutSmearing->BaseSmear(C, U);
Cmu = peekLorentz(C, mu);
//////////////////////////////////////////////////////////////////
// Assemble Luscher exp diff map J matrix
//////////////////////////////////////////////////////////////////
// Ta so Z lives in Lie algabra
Zx = Ta(Cmu * adj(Umu[mu]));
time+=usecond();
std::cout << GridLogMessage << "Z took "<<time<< " us"<<std::endl;
time=-usecond();
// Move Z to the Adjoint Rep == make_adjoint_representation
ZxAd = Zero();
for(int b=0;b<8;b++) {
// Adj group sets traceless antihermitian T's -- Guido, really????
SU3::generator(b, tb); // Fund group sets traceless hermitian T's
SU3Adjoint::generator(b,TRb);
TRb=-TRb;
cplx = 2.0*trace(ci*tb*Zx); // my convention 1/2 delta ba
ZxAd = ZxAd + cplx * TRb; // is this right? YES - Guido used Anti herm Ta's and with bloody wrong sign.
}
time+=usecond();
std::cout << GridLogMessage << "ZxAd took "<<time<< " us"<<std::endl;
//////////////////////////////////////
// J(x) = 1 + Sum_k=1..N (-Zac)^k/(k+1)!
//////////////////////////////////////
time=-usecond();
X=1.0;
JxAd = X;
mZxAd = (-1.0)*ZxAd;
RealD kpfac = 1;
for(int k=1;k<12;k++){
X=X*mZxAd;
kpfac = kpfac /(k+1);
JxAd = JxAd + X * kpfac;
}
time+=usecond();
std::cout << GridLogMessage << "Jx took "<<time<< " us"<<std::endl;
//////////////////////////////////////
// dJ(x)/dxe
//////////////////////////////////////
time=-usecond();
std::vector<AdjMatrixField> dJdX; dJdX.resize(8,grid);
AdjMatrixField tbXn(grid);
AdjMatrixField sumXtbX(grid);
AdjMatrixField t2(grid);
AdjMatrixField dt2(grid);
AdjMatrixField t3(grid);
AdjMatrixField dt3(grid);
AdjMatrixField aunit(grid);
for(int b=0;b<8;b++){
aunit = ComplexD(1.0);
SU3Adjoint::generator(b, TRb); //dt2
X = (-1.0)*ZxAd;
t2 = X;
dt2 = TRb;
for (int j = 20; j > 1; --j) {
t3 = t2*(1.0 / (j + 1)) + aunit;
dt3 = dt2*(1.0 / (j + 1));
t2 = X * t3;
dt2 = TRb * t3 + X * dt3;
}
dJdX[b] = -dt2;
}
time+=usecond();
std::cout << GridLogMessage << "dJx took "<<time<< " us"<<std::endl;
/////////////////////////////////////////////////////////////////
// Mask Umu for this link
/////////////////////////////////////////////////////////////////
time=-usecond();
PlaqL = Ident;
PlaqR = Utmp*adj(Cmu);
ComputeNxy(PlaqL,PlaqR,NxxAd);
time+=usecond();
std::cout << GridLogMessage << "ComputeNxy took "<<time<< " us"<<std::endl;
////////////////////////////
// Mab
////////////////////////////
MpAd = Complex(1.0,0.0);
MpAd = MpAd - JxAd * NxxAd;
/////////////////////////
// invert the 8x8
/////////////////////////
time=-usecond();
MpAdInv = Inverse(MpAd);
time+=usecond();
std::cout << GridLogMessage << "MpAdInv took "<<time<< " us"<<std::endl;
RealD t3a = usecond();
/////////////////////////////////////////////////////////////////
// Nxx Mp^-1
/////////////////////////////////////////////////////////////////
AdjVectorField FdetV(grid);
AdjVectorField Fdet1_nu(grid);
AdjVectorField Fdet2_nu(grid);
AdjVectorField Fdet2_mu(grid);
AdjVectorField Fdet1_mu(grid);
AdjMatrixField nMpInv(grid);
nMpInv= NxxAd *MpAdInv;
AdjMatrixField MpInvJx(grid);
AdjMatrixField MpInvJx_nu(grid);
MpInvJx = (-1.0)*MpAdInv * JxAd;// rho is on the plaq factor
Compute_MpInvJx_dNxxdSy(PlaqL,PlaqR,MpInvJx,FdetV);
Fdet2_mu=FdetV;
Fdet1_mu=Zero();
for(int e =0 ; e<8 ; e++){
LatticeComplexD tr(grid);
ColourMatrix te;
SU3::generator(e, te);
tr = trace(dJdX[e] * nMpInv);
pokeColour(dJdXe_nMpInv,tr,e);
}
///////////////////////////////
// Mask it off
///////////////////////////////
auto tmp=PeekIndex<LorentzIndex>(masks[smr],mu);
dJdXe_nMpInv = dJdXe_nMpInv*tmp;
// dJdXe_nMpInv needs to multiply:
// Nxx_mu (site local) (1)
// Nxy_mu one site forward in each nu direction (3)
// Nxy_mu one site backward in each nu direction (3)
// Nxy_nu 0,0 ; +mu,0; 0,-nu; +mu-nu [ 3x4 = 12]
// 19 terms.
AdjMatrixField Nxy(grid);
GaugeField Fdet1(grid);
GaugeField Fdet2(grid);
GaugeLinkField Fdet_pol(grid); // one polarisation
RealD t4 = usecond();
for(int nu=0;nu<Nd;nu++){
if (nu!=mu) {
///////////////// +ve nu /////////////////
// __
// | |
// x== // nu polarisation -- clockwise
time=-usecond();
PlaqL=Ident;
PlaqR=(-rho)*Gimpl::CovShiftForward(Umu[nu], nu,
Gimpl::CovShiftForward(Umu[mu], mu,
Gimpl::CovShiftBackward(Umu[nu], nu,
Gimpl::CovShiftIdentityBackward(Utmp, mu))));
time+=usecond();
std::cout << GridLogMessage << "PlaqLR took "<<time<< " us"<<std::endl;
time=-usecond();
dJdXe_nMpInv_y = dJdXe_nMpInv;
ComputeNxy(PlaqL,PlaqR,Nxy);
Fdet1_nu = transpose(Nxy)*dJdXe_nMpInv_y;
time+=usecond();
std::cout << GridLogMessage << "ComputeNxy (occurs 6x) took "<<time<< " us"<<std::endl;
time=-usecond();
PlaqR=(-1.0)*PlaqR;
Compute_MpInvJx_dNxxdSy(PlaqL,PlaqR,MpInvJx,FdetV);
Fdet2_nu = FdetV;
time+=usecond();
std::cout << GridLogMessage << "Compute_MpInvJx_dNxxSy (occurs 6x) took "<<time<< " us"<<std::endl;
// x==
// | |
// .__| // nu polarisation -- anticlockwise
PlaqR=(rho)*Gimpl::CovShiftForward(Umu[nu], nu,
Gimpl::CovShiftBackward(Umu[mu], mu,
Gimpl::CovShiftIdentityBackward(Umu[nu], nu)));
PlaqL=Gimpl::CovShiftIdentityBackward(Utmp, mu);
dJdXe_nMpInv_y = Cshift(dJdXe_nMpInv,mu,-1);
ComputeNxy(PlaqL, PlaqR,Nxy);
Fdet1_nu = Fdet1_nu+transpose(Nxy)*dJdXe_nMpInv_y;
MpInvJx_nu = Cshift(MpInvJx,mu,-1);
Compute_MpInvJx_dNxxdSy(PlaqL,PlaqR,MpInvJx_nu,FdetV);
Fdet2_nu = Fdet2_nu+FdetV;
///////////////// -ve nu /////////////////
// __
// | |
// x== // nu polarisation -- clockwise
PlaqL=(rho)* Gimpl::CovShiftForward(Umu[mu], mu,
Gimpl::CovShiftForward(Umu[nu], nu,
Gimpl::CovShiftIdentityBackward(Utmp, mu)));
PlaqR = Gimpl::CovShiftIdentityForward(Umu[nu], nu);
dJdXe_nMpInv_y = Cshift(dJdXe_nMpInv,nu,1);
ComputeNxy(PlaqL,PlaqR,Nxy);
Fdet1_nu = Fdet1_nu + transpose(Nxy)*dJdXe_nMpInv_y;
MpInvJx_nu = Cshift(MpInvJx,nu,1);
Compute_MpInvJx_dNxxdSy(PlaqL,PlaqR,MpInvJx_nu,FdetV);
Fdet2_nu = Fdet2_nu+FdetV;
// x==
// | |
// |__| // nu polarisation
PlaqL=(-rho)*Gimpl::CovShiftForward(Umu[nu], nu,
Gimpl::CovShiftIdentityBackward(Utmp, mu));
PlaqR=Gimpl::CovShiftBackward(Umu[mu], mu,
Gimpl::CovShiftIdentityForward(Umu[nu], nu));
dJdXe_nMpInv_y = Cshift(dJdXe_nMpInv,mu,-1);
dJdXe_nMpInv_y = Cshift(dJdXe_nMpInv_y,nu,1);
ComputeNxy(PlaqL,PlaqR,Nxy);
Fdet1_nu = Fdet1_nu + transpose(Nxy)*dJdXe_nMpInv_y;
MpInvJx_nu = Cshift(MpInvJx,mu,-1);
MpInvJx_nu = Cshift(MpInvJx_nu,nu,1);
Compute_MpInvJx_dNxxdSy(PlaqL,PlaqR,MpInvJx_nu,FdetV);
Fdet2_nu = Fdet2_nu+FdetV;
/////////////////////////////////////////////////////////////////////
// Set up the determinant force contribution in 3x3 algebra basis
/////////////////////////////////////////////////////////////////////
InsertForce(Fdet1,Fdet1_nu,nu);
InsertForce(Fdet2,Fdet2_nu,nu);
//////////////////////////////////////////////////
// Parallel direction terms
//////////////////////////////////////////////////
// __
// | "
// |__"x // mu polarisation
PlaqL=(-rho)*Gimpl::CovShiftForward(Umu[mu], mu,
Gimpl::CovShiftBackward(Umu[nu], nu,
Gimpl::CovShiftIdentityBackward(Utmp, mu)));
PlaqR=Gimpl::CovShiftIdentityBackward(Umu[nu], nu);
dJdXe_nMpInv_y = Cshift(dJdXe_nMpInv,nu,-1);
ComputeNxy(PlaqL,PlaqR,Nxy);
Fdet1_mu = Fdet1_mu + transpose(Nxy)*dJdXe_nMpInv_y;
MpInvJx_nu = Cshift(MpInvJx,nu,-1);
Compute_MpInvJx_dNxxdSy(PlaqL,PlaqR,MpInvJx_nu,FdetV);
Fdet2_mu = Fdet2_mu+FdetV;
// __
// " |
// x__| // mu polarisation
PlaqL=(-rho)*Gimpl::CovShiftForward(Umu[mu], mu,
Gimpl::CovShiftForward(Umu[nu], nu,
Gimpl::CovShiftIdentityBackward(Utmp, mu)));
PlaqR=Gimpl::CovShiftIdentityForward(Umu[nu], nu);
dJdXe_nMpInv_y = Cshift(dJdXe_nMpInv,nu,1);
ComputeNxy(PlaqL,PlaqR,Nxy);
Fdet1_mu = Fdet1_mu + transpose(Nxy)*dJdXe_nMpInv_y;
MpInvJx_nu = Cshift(MpInvJx,nu,1);
Compute_MpInvJx_dNxxdSy(PlaqL,PlaqR,MpInvJx_nu,FdetV);
Fdet2_mu = Fdet2_mu+FdetV;
}
}
RealD t5 = usecond();
Fdet1_mu = Fdet1_mu + transpose(NxxAd)*dJdXe_nMpInv;
InsertForce(Fdet1,Fdet1_mu,mu);
InsertForce(Fdet2,Fdet2_mu,mu);
force= (-0.5)*( Fdet1 + Fdet2);
RealD t1 = usecond();
std::cout << GridLogMessage << " logDetJacobianForce level took "<<t1-t0<<" us "<<std::endl;
std::cout << GridLogMessage << " logDetJacobianForce t3-t0 "<<t3a-t0<<" us "<<std::endl;
std::cout << GridLogMessage << " logDetJacobianForce t4-t3 dJdXe_nMpInv "<<t4-t3a<<" us "<<std::endl;
std::cout << GridLogMessage << " logDetJacobianForce t5-t4 mu nu loop "<<t5-t4<<" us "<<std::endl;
std::cout << GridLogMessage << " logDetJacobianForce t1-t5 "<<t1-t5<<" us "<<std::endl;
std::cout << GridLogMessage << " logDetJacobianForce level took "<<t1-t0<<" us "<<std::endl;
}
RealD logDetJacobianLevel(const GaugeField &U,int smr)
{
GridBase* grid = U.Grid();
GaugeField C(grid);
GaugeLinkField Nb(grid);
GaugeLinkField Z(grid);
GaugeLinkField Umu(grid), Cmu(grid);
ColourMatrix Tb;
ColourMatrix Tc;
typedef typename SU3Adjoint::AMatrix AdjMatrix;
typedef typename SU3Adjoint::LatticeAdjMatrix AdjMatrixField;
typedef typename SU3Adjoint::LatticeAdjVector AdjVectorField;
const int Ngen = SU3Adjoint::Dimension;
AdjMatrix TRb;
LatticeComplex cplx(grid);
AdjVectorField AlgV(grid);
AdjMatrixField Mab(grid);
AdjMatrixField Ncb(grid);
AdjMatrixField Jac(grid);
AdjMatrixField Zac(grid);
AdjMatrixField mZac(grid);
AdjMatrixField X(grid);
int mu= (smr/2) %Nd;
auto mask=PeekIndex<LorentzIndex>(masks[smr],mu); // the cb mask
//////////////////////////////////////////////////////////////////
// Assemble the N matrix
//////////////////////////////////////////////////////////////////
// Computes ALL the staples -- could compute one only here
this->StoutSmearing->BaseSmear(C, U);
Cmu = peekLorentz(C, mu);
Umu = peekLorentz(U, mu);
Complex ci(0,1);
for(int b=0;b<Ngen;b++) {
SU3::generator(b, Tb);
// Qlat Tb = 2i Tb^Grid
Nb = (2.0)*Ta( ci*Tb * Umu * adj(Cmu));
for(int c=0;c<Ngen;c++) {
SU3::generator(c, Tc);
auto tmp = -trace(ci*Tc*Nb); // Luchang's norm: (2Tc) (2Td) N^db = -2 delta cd N^db // - was important
PokeIndex<ColourIndex>(Ncb,tmp,c,b);
}
}
//////////////////////////////////////////////////////////////////
// Assemble Luscher exp diff map J matrix
//////////////////////////////////////////////////////////////////
// Ta so Z lives in Lie algabra
Z = Ta(Cmu * adj(Umu));
// Move Z to the Adjoint Rep == make_adjoint_representation
Zac = Zero();
for(int b=0;b<8;b++) {
// Adj group sets traceless antihermitian T's -- Guido, really????
// Is the mapping of these the same? Same structure constants
// Might never have been checked.
SU3::generator(b, Tb); // Fund group sets traceless hermitian T's
SU3Adjoint::generator(b,TRb);
TRb=-TRb;
cplx = 2.0*trace(ci*Tb*Z); // my convention 1/2 delta ba
Zac = Zac + cplx * TRb; // is this right? YES - Guido used Anti herm Ta's and with bloody wrong sign.
}
//////////////////////////////////////
// J(x) = 1 + Sum_k=1..N (-Zac)^k/(k+1)!
//////////////////////////////////////
X=1.0;
Jac = X;
mZac = (-1.0)*Zac;
RealD kpfac = 1;
for(int k=1;k<12;k++){
X=X*mZac;
kpfac = kpfac /(k+1);
Jac = Jac + X * kpfac;
}
////////////////////////////
// Mab
////////////////////////////
Mab = Complex(1.0,0.0);
Mab = Mab - Jac * Ncb;
////////////////////////////
// det
////////////////////////////
LatticeComplex det(grid);
det = Determinant(Mab);
////////////////////////////
// ln det
////////////////////////////
LatticeComplex ln_det(grid);
ln_det = log(det);
////////////////////////////
// Masked sum
////////////////////////////
ln_det = ln_det * mask;
Complex result = sum(ln_det);
return result.real();
}
public:
RealD logDetJacobian(void)
{
RealD ln_det = 0;
if (this->smearingLevels > 0)
{
double start = usecond();
for (int ismr = this->smearingLevels - 1; ismr > 0; --ismr) {
ln_det+= logDetJacobianLevel(this->get_smeared_conf(ismr-1),ismr);
}
ln_det +=logDetJacobianLevel(*(this->ThinLinks),0);
double end = usecond();
double time = (end - start)/ 1e3;
std::cout << GridLogMessage << "GaugeConfigurationMasked: logDetJacobian took " << time << " ms" << std::endl;
}
return ln_det;
}
void logDetJacobianForce(GaugeField &force)
{
force =Zero();
GaugeField force_det(force.Grid());
if (this->smearingLevels > 0)
{
double start = usecond();
GaugeLinkField tmp_mu(force.Grid());
for (int ismr = this->smearingLevels - 1; ismr > 0; --ismr) {
// remove U in UdSdU...
for (int mu = 0; mu < Nd; mu++) {
tmp_mu = adj(peekLorentz(this->get_smeared_conf(ismr), mu)) * peekLorentz(force, mu);
pokeLorentz(force, tmp_mu, mu);
}
// Propagate existing force
force = this->AnalyticSmearedForce(force, this->get_smeared_conf(ismr - 1), ismr);
// Add back U in UdSdU...
for (int mu = 0; mu < Nd; mu++) {
tmp_mu = peekLorentz(this->get_smeared_conf(ismr - 1), mu) * peekLorentz(force, mu);
pokeLorentz(force, tmp_mu, mu);
}
// Get this levels determinant force
force_det = Zero();
logDetJacobianForceLevel(this->get_smeared_conf(ismr-1),force_det,ismr);
// Sum the contributions
force = force + force_det;
}
// remove U in UdSdU...
for (int mu = 0; mu < Nd; mu++) {
tmp_mu = adj(peekLorentz(this->get_smeared_conf(0), mu)) * peekLorentz(force, mu);
pokeLorentz(force, tmp_mu, mu);
}
force = this->AnalyticSmearedForce(force, *this->ThinLinks,0);
for (int mu = 0; mu < Nd; mu++) {
tmp_mu = peekLorentz(*this->ThinLinks, mu) * peekLorentz(force, mu);
pokeLorentz(force, tmp_mu, mu);
}
force_det = Zero();
logDetJacobianForceLevel(*this->ThinLinks,force_det,0);
force = force + force_det;
force=Ta(force); // Ta
double end = usecond();
double time = (end - start)/ 1e3;
std::cout << GridLogMessage << "GaugeConfigurationMasked: lnDetJacobianForce took " << time << " ms" << std::endl;
} // if smearingLevels = 0 do nothing
}
private:
//====================================================================
// Override base clas here to mask it
virtual void fill_smearedSet(GaugeField &U)
{
this->ThinLinks = &U; // attach the smearing routine to the field U
// check the pointer is not null
if (this->ThinLinks == NULL)
std::cout << GridLogError << "[SmearedConfigurationMasked] Error in ThinLinks pointer\n";
if (this->smearingLevels > 0)
{
std::cout << GridLogMessage << "[SmearedConfigurationMasked] Filling SmearedSet\n";
GaugeField previous_u(this->ThinLinks->Grid());
GaugeField smeared_A(this->ThinLinks->Grid());
GaugeField smeared_B(this->ThinLinks->Grid());
previous_u = *this->ThinLinks;
double start = usecond();
for (int smearLvl = 0; smearLvl < this->smearingLevels; ++smearLvl)
{
this->StoutSmearing->smear(smeared_A, previous_u);
ApplyMask(smeared_A,smearLvl);
smeared_B = previous_u;
ApplyMask(smeared_B,smearLvl);
// Replace only the masked portion
this->SmearedSet[smearLvl] = previous_u-smeared_B + smeared_A;
previous_u = this->SmearedSet[smearLvl];
// For debug purposes
RealD impl_plaq = WilsonLoops<Gimpl>::avgPlaquette(previous_u);
std::cout << GridLogMessage << "[SmearedConfigurationMasked] smeared Plaq: " << impl_plaq << std::endl;
}
double end = usecond();
double time = (end - start)/ 1e3;
std::cout << GridLogMessage << "GaugeConfigurationMasked: Link smearing took " << time << " ms" << std::endl;
}
}
//====================================================================
// Override base to add masking
virtual GaugeField AnalyticSmearedForce(const GaugeField& SigmaKPrime,
const GaugeField& GaugeK,int level)
{
GridBase* grid = GaugeK.Grid();
GaugeField C(grid), SigmaK(grid), iLambda(grid);
GaugeField SigmaKPrimeA(grid);
GaugeField SigmaKPrimeB(grid);
GaugeLinkField iLambda_mu(grid);
GaugeLinkField iQ(grid), e_iQ(grid);
GaugeLinkField SigmaKPrime_mu(grid);
GaugeLinkField GaugeKmu(grid), Cmu(grid);
this->StoutSmearing->BaseSmear(C, GaugeK);
SigmaK = Zero();
iLambda = Zero();
SigmaKPrimeA = SigmaKPrime;
ApplyMask(SigmaKPrimeA,level);
SigmaKPrimeB = SigmaKPrime - SigmaKPrimeA;
// Could get away with computing only one polarisation here
// int mu= (smr/2) %Nd;
// SigmaKprime_A has only one component
for (int mu = 0; mu < Nd; mu++)
{
Cmu = peekLorentz(C, mu);
GaugeKmu = peekLorentz(GaugeK, mu);
SigmaKPrime_mu = peekLorentz(SigmaKPrimeA, mu);
iQ = Ta(Cmu * adj(GaugeKmu));
this->set_iLambda(iLambda_mu, e_iQ, iQ, SigmaKPrime_mu, GaugeKmu);
pokeLorentz(SigmaK, SigmaKPrime_mu * e_iQ + adj(Cmu) * iLambda_mu, mu);
pokeLorentz(iLambda, iLambda_mu, mu);
}
this->StoutSmearing->derivative(SigmaK, iLambda,GaugeK); // derivative of SmearBase
////////////////////////////////////////////////////////////////////////////////////
// propagate the rest of the force as identity map, just add back
////////////////////////////////////////////////////////////////////////////////////
SigmaK = SigmaK+SigmaKPrimeB;
return SigmaK;
}
public:
/* Standard constructor */
SmearedConfigurationMasked(GridCartesian* _UGrid, unsigned int Nsmear, Smear_Stout<Gimpl>& Stout)
: SmearedConfiguration<Gimpl>(_UGrid, Nsmear,Stout)
{
assert(Nsmear%(2*Nd)==0); // Or multiply by 8??
// was resized in base class
assert(this->SmearedSet.size()==Nsmear);
GridRedBlackCartesian * UrbGrid;
UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(_UGrid);
LatticeComplex one(_UGrid); one = ComplexD(1.0,0.0);
LatticeComplex tmp(_UGrid);
for (unsigned int i = 0; i < this->smearingLevels; ++i) {
masks.push_back(*(new LatticeLorentzComplex(_UGrid)));
int mu= (i/2) %Nd;
int cb= (i%2);
LatticeComplex tmpcb(UrbGrid);
masks[i]=Zero();
////////////////////
// Setup the mask
////////////////////
tmp = Zero();
pickCheckerboard(cb,tmpcb,one);
setCheckerboard(tmp,tmpcb);
PokeIndex<LorentzIndex>(masks[i],tmp, mu);
}
delete UrbGrid;
}
virtual void smeared_force(GaugeField &SigmaTilde)
{
if (this->smearingLevels > 0)
{
double start = usecond();
GaugeField force = SigmaTilde; // actually = U*SigmaTilde
GaugeLinkField tmp_mu(SigmaTilde.Grid());
// Remove U from UdSdU
for (int mu = 0; mu < Nd; mu++)
{
// to get just SigmaTilde
tmp_mu = adj(peekLorentz(this->SmearedSet[this->smearingLevels - 1], mu)) * peekLorentz(force, mu);
pokeLorentz(force, tmp_mu, mu);
}
for (int ismr = this->smearingLevels - 1; ismr > 0; --ismr) {
force = this->AnalyticSmearedForce(force, this->get_smeared_conf(ismr - 1),ismr);
}
force = this->AnalyticSmearedForce(force, *this->ThinLinks,0);
// Add U to UdSdU
for (int mu = 0; mu < Nd; mu++)
{
tmp_mu = peekLorentz(*this->ThinLinks, mu) * peekLorentz(force, mu);
pokeLorentz(SigmaTilde, tmp_mu, mu);
}
double end = usecond();
double time = (end - start)/ 1e3;
std::cout << GridLogMessage << " GaugeConfigurationMasked: Smeared Force chain rule took " << time << " ms" << std::endl;
} // if smearingLevels = 0 do nothing
SigmaTilde=Gimpl::projectForce(SigmaTilde); // Ta
}
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,87 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/gauge/JacobianAction.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
////////////////////////////////////////////////////////////////////////
// Jacobian Action ..
////////////////////////////////////////////////////////////////////////
template <class Gimpl>
class JacobianAction : public Action<typename Gimpl::GaugeField> {
public:
INHERIT_GIMPL_TYPES(Gimpl);
SmearedConfigurationMasked<Gimpl> * smearer;
/////////////////////////// constructors
explicit JacobianAction(SmearedConfigurationMasked<Gimpl> * _smearer ) { smearer=_smearer;};
virtual std::string action_name() {return "JacobianAction";}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "[JacobianAction] " << std::endl;
return sstream.str();
}
//////////////////////////////////
// Usual cases are not used
//////////////////////////////////
virtual void refresh(const GaugeField &U, GridSerialRNG &sRNG, GridParallelRNG &pRNG){ assert(0);};
virtual RealD S(const GaugeField &U) { assert(0); }
virtual void deriv(const GaugeField &U, GaugeField &dSdU) { assert(0); }
//////////////////////////////////
// Functions of smart configs only
//////////////////////////////////
virtual void refresh(ConfigurationBase<GaugeField> & U, GridSerialRNG &sRNG, GridParallelRNG& pRNG)
{
return;
}
virtual RealD S(ConfigurationBase<GaugeField>& U)
{
// det M = e^{ - ( - logDetM) }
assert( &U == smearer );
return -smearer->logDetJacobian();
}
virtual RealD Sinitial(ConfigurationBase<GaugeField>& U)
{
return S(U);
}
virtual void deriv(ConfigurationBase<GaugeField>& U, GaugeField& dSdU)
{
assert( &U == smearer );
smearer->logDetJacobianForce(dSdU);
}
private:
};
NAMESPACE_END(Grid);

View File

@ -40,7 +40,9 @@ template <class Gimpl>
class Smear_Stout : public Smear<Gimpl> {
private:
int OrthogDim = -1;
public:
const std::vector<double> SmearRho;
private:
// Smear<Gimpl>* ownership semantics:
// Smear<Gimpl>* passed in to constructor are owned by caller, so we don't delete them here
// Smear<Gimpl>* created within constructor need to be deleted as part of the destructor

View File

@ -34,6 +34,61 @@ directory
NAMESPACE_BEGIN(Grid);
template<int N, class Vec>
Lattice<iScalar<iScalar<iScalar<Vec> > > > Determinant(const Lattice<iScalar<iScalar<iMatrix<Vec, N> > > > &Umu)
{
GridBase *grid=Umu.Grid();
auto lvol = grid->lSites();
Lattice<iScalar<iScalar<iScalar<Vec> > > > ret(grid);
typedef typename Vec::scalar_type scalar;
autoView(Umu_v,Umu,CpuRead);
autoView(ret_v,ret,CpuWrite);
thread_for(site,lvol,{
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
Coordinate lcoor;
grid->LocalIndexToLocalCoor(site, lcoor);
iScalar<iScalar<iMatrix<scalar, N> > > Us;
peekLocalSite(Us, Umu_v, lcoor);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
scalar tmp= Us()()(i,j);
ComplexD ztmp(real(tmp),imag(tmp));
EigenU(i,j)=ztmp;
}}
ComplexD detD = EigenU.determinant();
typename Vec::scalar_type det(detD.real(),detD.imag());
pokeLocalSite(det,ret_v,lcoor);
});
return ret;
}
template<int N, class Vec>
static void ProjectSUn(Lattice<iScalar<iScalar<iMatrix<Vec, N> > > > &Umu)
{
Umu = ProjectOnGroup(Umu);
auto det = Determinant(Umu);
det = conjugate(det);
for(int i=0;i<N;i++){
auto element = PeekIndex<ColourIndex>(Umu,N-1,i);
element = element * det;
PokeIndex<ColourIndex>(Umu,element,Nc-1,i);
}
}
template<int N,class Vec>
static void ProjectSUn(Lattice<iVector<iScalar<iMatrix<Vec, N> >,Nd> > &U)
{
GridBase *grid=U.Grid();
// Reunitarise
for(int mu=0;mu<Nd;mu++){
auto Umu = PeekIndex<LorentzIndex>(U,mu);
Umu = ProjectOnGroup(Umu);
ProjectSUn(Umu);
PokeIndex<LorentzIndex>(U,Umu,mu);
}
}
template <int ncolour>
class SU {
public:
@ -741,8 +796,14 @@ public:
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
LatticeMatrixType tmp(out.Grid());
for (int mu = 0; mu < Nd; mu++) {
LieRandomize(pRNG, Umu, 1.0);
// LieRandomize(pRNG, Umu, 1.0);
// PokeIndex<LorentzIndex>(out, Umu, mu);
gaussian(pRNG,Umu);
tmp = Ta(Umu);
taExp(tmp,Umu);
ProjectSUn(Umu);
PokeIndex<LorentzIndex>(out, Umu, mu);
}
}
@ -799,12 +860,12 @@ public:
};
template<int N>
LatticeComplexD Determinant(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > Inverse(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
{
GridBase *grid=Umu.Grid();
auto lvol = grid->lSites();
LatticeComplexD ret(grid);
Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > ret(grid);
autoView(Umu_v,Umu,CpuRead);
autoView(ret_v,ret,CpuWrite);
thread_for(site,lvol,{
@ -812,42 +873,21 @@ LatticeComplexD Determinant(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N>
Coordinate lcoor;
grid->LocalIndexToLocalCoor(site, lcoor);
iScalar<iScalar<iMatrix<ComplexD, N> > > Us;
iScalar<iScalar<iMatrix<ComplexD, N> > > Ui;
peekLocalSite(Us, Umu_v, lcoor);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
EigenU(i,j) = Us()()(i,j);
}}
ComplexD det = EigenU.determinant();
pokeLocalSite(det,ret_v,lcoor);
Eigen::MatrixXcd EigenUinv = EigenU.inverse();
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
Ui()()(i,j) = EigenUinv(i,j);
}}
pokeLocalSite(Ui,ret_v,lcoor);
});
return ret;
}
template<int N>
static void ProjectSUn(Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
{
Umu = ProjectOnGroup(Umu);
auto det = Determinant(Umu);
det = conjugate(det);
for(int i=0;i<N;i++){
auto element = PeekIndex<ColourIndex>(Umu,N-1,i);
element = element * det;
PokeIndex<ColourIndex>(Umu,element,Nc-1,i);
}
}
template<int N>
static void ProjectSUn(Lattice<iVector<iScalar<iMatrix<vComplexD, N> >,Nd> > &U)
{
GridBase *grid=U.Grid();
// Reunitarise
for(int mu=0;mu<Nd;mu++){
auto Umu = PeekIndex<LorentzIndex>(U,mu);
Umu = ProjectOnGroup(Umu);
ProjectSUn(Umu);
PokeIndex<LorentzIndex>(U,Umu,mu);
}
}
// Explicit specialisation for SU(3).
// Explicit specialisation for SU(3).
static void

View File

@ -51,6 +51,7 @@ public:
typedef Lattice<iVector<iScalar<iMatrix<vComplexF, Dimension> >, Nd> > LatticeAdjFieldF;
typedef Lattice<iVector<iScalar<iMatrix<vComplexD, Dimension> >, Nd> > LatticeAdjFieldD;
typedef Lattice<iScalar<iScalar<iVector<vComplex, Dimension> > > > LatticeAdjVector;
template <class cplx>
static void generator(int Index, iSUnAdjointMatrix<cplx> &iAdjTa) {

View File

@ -123,7 +123,7 @@ public:
}
if ( permute_slice ) {
int ptype =grid->PermuteType(d);
uint8_t mask =grid->Nsimd() >> (ptype + 1);
uint8_t mask =0x1<<ptype;
SE._permute |= mask;
}
}

View File

@ -451,7 +451,6 @@ public:
else if ( this->fullDirichlet ) DslashLogDirichlet();
else DslashLogFull();
acceleratorCopySynchronise();
// Everyone agrees we are all done
_grid->StencilBarrier();
}
////////////////////////////////////////////////////////////////////////
@ -540,6 +539,7 @@ public:
compress.Point(point);
HaloGatherDir(source,compress,point,face_idx);
}
accelerator_barrier();
face_table_computed=1;
assert(u_comm_offset==_unified_buffer_size);
@ -705,7 +705,7 @@ public:
}
}
}
std::cout << "BuildSurfaceList size is "<<surface_list.size()<<std::endl;
std::cout << GridLogDebug << "BuildSurfaceList size is "<<surface_list.size()<<std::endl;
}
/// Introduce a block structure and switch off comms on boundaries
void DirichletBlock(const Coordinate &dirichlet_block)

View File

@ -90,10 +90,12 @@ template<class vtype,int N> accelerator_inline iVector<vtype,N> ProjectOnGroup(c
template<class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
{
typedef typename iMatrix<vtype,N>::scalar_type scalar;
// need a check for the group type?
iMatrix<vtype,N> ret(arg);
vtype nrm;
vtype inner;
scalar one(1.0);
for(int c1=0;c1<N;c1++){
// Normalises row c1
@ -102,7 +104,7 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
nrm = sqrt(inner);
nrm = 1.0/nrm;
nrm = one/nrm;
for(int c2=0;c2<N;c2++)
ret._internal[c1][c2]*= nrm;
@ -127,7 +129,7 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
nrm = sqrt(inner);
nrm = 1.0/nrm;
nrm = one/nrm;
for(int c2=0;c2<N;c2++)
ret._internal[c1][c2]*= nrm;
}

View File

@ -55,7 +55,7 @@ template<class vtype, int N> accelerator_inline iVector<vtype, N> Exponentiate(c
// Specialisation: Cayley-Hamilton exponential for SU(3)
#ifndef GRID_ACCELERATED
#if 0
template<class vtype, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0>::type * =nullptr>
accelerator_inline iMatrix<vtype,3> Exponentiate(const iMatrix<vtype,3> &arg, RealD alpha , Integer Nexp = DEFAULT_MAT_EXP )
{

224
HMC/FTHMC2p1f.cc Normal file
View File

@ -0,0 +1,224 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Copyright (C) 2023
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/qcd/smearing/GaugeConfigurationMasked.h>
#include <Grid/qcd/smearing/JacobianAction.h>
using namespace Grid;
int main(int argc, char **argv)
{
std::cout << std::setprecision(12);
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
// here make a routine to print all the relevant information on the run
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
// Typedefs to simplify notation
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
// MD.name = std::string("Leap Frog");
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
// MD.name = std::string("Force Gradient");
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
MD.name = std::string("MinimumNorm2");
MD.MDsteps = 12;
MD.trajL = 1.0;
HMCparameters HMCparams;
HMCparams.StartTrajectory = 0;
HMCparams.Trajectories = 200;
HMCparams.NoMetropolisUntil= 20;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
HMCparams.StartingType =std::string("HotStart");
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_EODWF_lat";
CPparams.smeared_prefix = "ckpoint_EODWF_lat_smr";
CPparams.rng_prefix = "ckpoint_EODWF_rng";
CPparams.saveInterval = 1;
CPparams.saveSmeared = true;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
// Construct observables
// here there is too much indirection
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
Real beta = 2.13;
Real light_mass = 0.01;
Real strange_mass = 0.04;
Real pv_mass = 1.0;
RealD M5 = 1.8;
RealD b = 1.0; // Scale factor two
RealD c = 0.0;
OneFlavourRationalParams OFRp;
OFRp.lo = 1.0e-2;
OFRp.hi = 64;
OFRp.MaxIter = 10000;
OFRp.tolerance= 1.0e-10;
OFRp.degree = 14;
OFRp.precision= 40;
std::vector<Real> hasenbusch({ 0.1 });
auto GridPtr = TheHMC.Resources.GetCartesian();
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
IwasakiGaugeActionR GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeField U(GridPtr);
LatticeGaugeField Uhot(GridPtr);
// These lines are unecessary if BC are all periodic
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
double StoppingCondition = 1e-10;
double MaxCGIterations = 30000;
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
bool ApplySmearing = true;
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1);
ActionLevel<HMCWrapper::Field> Level2(2);
ActionLevel<HMCWrapper::Field> Level3(4);
////////////////////////////////////
// Strange action
////////////////////////////////////
MobiusEOFAFermionD Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
MobiusEOFAFermionD Strange_Op_R (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , pv_mass, strange_mass, pv_mass, -1.0, 1, M5, b, c);
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy>
EOFA(Strange_Op_L, Strange_Op_R,
CG,
CG, CG,
CG, CG,
OFRp, false);
EOFA.is_smeared = ApplySmearing;
Level1.push_back(&EOFA);
////////////////////////////////////
// up down action
////////////////////////////////////
std::vector<Real> light_den;
std::vector<Real> light_num;
int n_hasenbusch = hasenbusch.size();
light_den.push_back(light_mass);
for(int h=0;h<n_hasenbusch;h++){
light_den.push_back(hasenbusch[h]);
light_num.push_back(hasenbusch[h]);
}
light_num.push_back(pv_mass);
std::vector<FermionAction *> Numerators;
std::vector<FermionAction *> Denominators;
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
for(int h=0;h<n_hasenbusch+1;h++){
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h] << " / " << light_den[h]<< std::endl;
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],CG,CG));
}
for(int h=0;h<n_hasenbusch+1;h++){
Quotients[h]->is_smeared = ApplySmearing;
Level1.push_back(Quotients[h]);
}
/////////////////////////////////////////////////////////////
// lnDetJacobianAction
/////////////////////////////////////////////////////////////
double rho = 0.1; // smearing parameter
int Nsmear = 1; // number of smearing levels - must be multiple of 2Nd
int Nstep = 8*Nsmear; // number of smearing levels - must be multiple of 2Nd
Smear_Stout<HMCWrapper::ImplPolicy> Stout(rho);
SmearedConfigurationMasked<HMCWrapper::ImplPolicy> SmearingPolicy(GridPtr, Nstep, Stout);
JacobianAction<HMCWrapper::ImplPolicy> Jacobian(&SmearingPolicy);
if( ApplySmearing ) Level2.push_back(&Jacobian);
std::cout << GridLogMessage << " Built the Jacobian "<< std::endl;
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
// GaugeAction.is_smeared = ApplySmearing;
GaugeAction.is_smeared = true;
Level3.push_back(&GaugeAction);
std::cout << GridLogMessage << " ************************************************"<< std::endl;
std::cout << GridLogMessage << " Action complete -- NO FERMIONS FOR NOW -- FIXME"<< std::endl;
std::cout << GridLogMessage << " ************************************************"<< std::endl;
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << " Running the FT HMC "<< std::endl;
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
TheHMC.TheAction.push_back(Level3);
TheHMC.Run(SmearingPolicy); // for smearing
Grid_finalize();
} // main

View File

@ -146,6 +146,8 @@ NAMESPACE_END(Grid);
int main(int argc, char **argv) {
using namespace Grid;
std::cout << " Grid Initialise "<<std::endl;
Grid_init(&argc, &argv);
CartesianCommunicator::BarrierWorld();
@ -170,24 +172,24 @@ int main(int argc, char **argv) {
IntegratorParameters MD;
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
// MD.name = std::string("Leap Frog");
typedef GenericHMCRunner<ForceGradient> HMCWrapper;
MD.name = std::string("Force Gradient");
//typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
// MD.name = std::string("MinimumNorm2");
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
// MD.name = std::string("Force Gradient");
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
MD.name = std::string("MinimumNorm2");
// TrajL = 2
// 4/2 => 0.6 dH
// 3/3 => 0.8 dH .. depth 3, slower
//MD.MDsteps = 4;
MD.MDsteps = 12;
MD.MDsteps = 14;
MD.trajL = 0.5;
HMCparameters HMCparams;
HMCparams.StartTrajectory = 1077;
HMCparams.Trajectories = 1;
HMCparams.Trajectories = 20;
HMCparams.NoMetropolisUntil= 0;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
// HMCparams.StartingType =std::string("ColdStart");
HMCparams.StartingType =std::string("CheckpointStart");
HMCparams.StartingType =std::string("ColdStart");
// HMCparams.StartingType =std::string("CheckpointStart");
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
@ -223,7 +225,7 @@ int main(int argc, char **argv) {
Real pv_mass = 1.0;
// std::vector<Real> hasenbusch({ 0.01, 0.045, 0.108, 0.25, 0.51 , pv_mass });
// std::vector<Real> hasenbusch({ light_mass, 0.01, 0.045, 0.108, 0.25, 0.51 , pv_mass });
std::vector<Real> hasenbusch({ 0.005, 0.0145, 0.045, 0.108, 0.25, 0.51 , pv_mass }); // Updated
std::vector<Real> hasenbusch({ 0.005, 0.0145, 0.045, 0.108, 0.25, 0.51 }); // Updated
// std::vector<Real> hasenbusch({ light_mass, 0.0145, 0.045, 0.108, 0.25, 0.51 , 0.75 , pv_mass });
auto GridPtr = TheHMC.Resources.GetCartesian();
@ -275,10 +277,10 @@ int main(int argc, char **argv) {
// double StoppingCondition = 1e-14;
// double MDStoppingCondition = 1e-9;
double StoppingCondition = 1e-8;
double MDStoppingCondition = 1e-7;
double MDStoppingConditionLoose = 1e-7;
double MDStoppingConditionStrange = 1e-7;
double StoppingCondition = 1e-9;
double MDStoppingCondition = 1e-8;
double MDStoppingConditionLoose = 1e-8;
double MDStoppingConditionStrange = 1e-8;
double MaxCGIterations = 300000;
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
ConjugateGradient<FermionField> MDCG(MDStoppingCondition,MaxCGIterations);

View File

@ -0,0 +1,90 @@
Branch: develop
Files:
Grid/lattice/PaddedCell.h -- Halo exchange
tests/Test_general_stencil.cc -- test local off axis stencil addressing
tests/debug/Test_padded_cell.cc -- test PaddedCell halo exchange and the General local stencil by computing ALL plaquettes on lattice
Functionality:
-- extend a lattice field:
Grid/lattice/PaddedCell.h
// Constructor
PaddedCell(int _depth,GridCartesian *_grid)
// Expand a field "in" to depth "d"
template<class vobj>
inline Lattice<vobj> Exchange(Lattice<vobj> &in)
// Take the "apple core" of in to a smaller local volume
template<class vobj>
inline Lattice<vobj> Extract(Lattice<vobj> &in)
-- Plaquette test:
tests/debug/Test_padded_cell.cc
/////////////////////////////////////////////////
// Create a padded cell of extra padding depth=1
/////////////////////////////////////////////////
int depth = 1;
PaddedCell Ghost(depth,&GRID);
LatticeGaugeField Ughost = Ghost.Exchange(Umu);
///// Array for the site plaquette
GridBase *GhostGrid = Ughost.Grid();
LatticeComplex gplaq(GhostGrid);
std::vector<Coordinate> shifts;
for(int mu=0;mu<Nd;mu++){
for(int nu=mu+1;nu<Nd;nu++){
// Umu(x) Unu(x+mu) Umu^dag(x+nu) Unu^dag(x)
Coordinate shift_0(Nd,0);
Coordinate shift_mu(Nd,0); shift_mu[mu]=1;
Coordinate shift_nu(Nd,0); shift_nu[nu]=1;
shifts.push_back(shift_0);
shifts.push_back(shift_mu);
shifts.push_back(shift_nu);
shifts.push_back(shift_0);
}
}
GeneralLocalStencil gStencil(GhostGrid,shifts);
gplaq=Zero();
{
autoView( gp_v , gplaq, CpuWrite);
autoView( t_v , trplaq, CpuRead);
autoView( U_v , Ughost, CpuRead);
for(int ss=0;ss<gp_v.size();ss++){
int s=0;
for(int mu=0;mu<Nd;mu++){
for(int nu=mu+1;nu<Nd;nu++){
auto SE0 = gStencil.GetEntry(s+0,ss);
auto SE1 = gStencil.GetEntry(s+1,ss);
auto SE2 = gStencil.GetEntry(s+2,ss);
auto SE3 = gStencil.GetEntry(s+3,ss);
int o0 = SE0->_offset;
int o1 = SE1->_offset;
int o2 = SE2->_offset;
int o3 = SE3->_offset;
auto U0 = U_v[o0](mu);
auto U1 = U_v[o1](nu);
auto U2 = adj(U_v[o2](mu));
auto U3 = adj(U_v[o3](nu));
gpermute(U0,SE0->_permute);
gpermute(U1,SE1->_permute);
gpermute(U2,SE2->_permute);
gpermute(U3,SE3->_permute);
gp_v[ss]() =gp_v[ss]() + trace( U0*U1*U2*U3 );
s=s+4;
}
}
}
}
cplaq = Ghost.Extract(gplaq);

View File

@ -60,7 +60,7 @@ while test $# -gt 0; do
;;
--cxxflags)
echo @GRID_CXXFLAGS@
echo @GRID_CXXFLAGS@ -I@prefix@/include
;;
--cxx)
@ -72,11 +72,11 @@ while test $# -gt 0; do
;;
--ldflags)
echo @GRID_LDFLAGS@
echo @GRID_LDFLAGS@ -L@prefix@/lib
;;
--libs)
echo @GRID_LIBS@
echo @GRID_LIBS@ -lGrid
;;
--summary)

View File

@ -0,0 +1,44 @@
#!/bin/bash -l
#SBATCH --job-name=bench_lehner
#SBATCH --partition=small-g
#SBATCH --nodes=2
#SBATCH --ntasks-per-node=8
#SBATCH --cpus-per-task=7
#SBATCH --gpus-per-node=8
#SBATCH --time=00:10:00
#SBATCH --account=project_465000546
#SBATCH --gpu-bind=none
#SBATCH --exclusive
#SBATCH --mem=0
CPU_BIND="map_cpu:48,56,32,40,16,24,1,8"
echo $CPU_BIND
cat << EOF > select_gpu
#!/bin/bash
export GPU_MAP=(0 1 2 3 4 5 6 7)
export GPU=\${GPU_MAP[\$SLURM_LOCALID]}
export HIP_VISIBLE_DEVICES=\$GPU
unset ROCR_VISIBLE_DEVICES
echo RANK \$SLURM_LOCALID using GPU \$GPU
exec \$*
EOF
chmod +x ./select_gpu
root=/scratch/project_465000546/boylepet/Grid/systems/Lumi
source ${root}/sourceme.sh
export OMP_NUM_THREADS=7
export MPICH_GPU_SUPPORT_ENABLED=1
export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
for vol in 16.16.16.64 32.32.32.64 32.32.32.128
do
srun --cpu-bind=${CPU_BIND} ./select_gpu ./Benchmark_dwf_fp32 --mpi 2.2.2.2 --accelerator-threads 8 --comms-overlap --shm 2048 --shm-mpi 0 --grid $vol > log.shm0.ov.$vol
#srun --cpu-bind=${CPU_BIND} ./select_gpu ./Benchmark_dwf_fp32 --mpi 2.2.2.2 --accelerator-threads 8 --comms-overlap --shm 2048 --shm-mpi 1 --grid $vol > log.shm1.ov.$vol
srun --cpu-bind=${CPU_BIND} ./select_gpu ./Benchmark_dwf_fp32 --mpi 2.2.2.2 --accelerator-threads 8 --comms-sequential --shm 2048 --shm-mpi 0 --grid $vol > log.shm0.seq.$vol
#srun --cpu-bind=${CPU_BIND} ./select_gpu ./Benchmark_dwf_fp32 --mpi 2.2.2.2 --accelerator-threads 8 --comms-sequential --shm 2048 --shm-mpi 1 --grid $vol > log.shm1.seq.$vol
done

View File

@ -0,0 +1,30 @@
spack load c-lime
spack load gmp
spack load mpfr
CLIME=`spack find --paths c-lime | grep c-lime| cut -c 15-`
GMP=`spack find --paths gmp | grep gmp | cut -c 12-`
MPFR=`spack find --paths mpfr | grep mpfr | cut -c 13-`
echo clime X$CLIME
echo gmp X$GMP
echo mpfr X$MPFR
../../configure \
--enable-comms=mpi-auto \
--with-lime=$CLIME \
--enable-unified=no \
--enable-shm=nvlink \
--enable-accelerator=hip \
--enable-gen-simd-width=64 \
--enable-simd=GPU \
--enable-accelerator-cshift \
--with-gmp=$GMP \
--with-mpfr=$MPFR \
--with-fftw=$FFTW_DIR/.. \
--disable-fermion-reps \
--disable-gparity \
CXX=hipcc MPICXX=mpicxx \
CXXFLAGS="-fPIC --offload-arch=gfx90a -I/opt/rocm/include/ -std=c++14 -I/opt/cray/pe/mpich/8.1.23/ofi/gnu/9.1/include" \
LDFLAGS="-L/opt/cray/pe/mpich/8.1.23/ofi/gnu/9.1/lib -lmpi -L/opt/cray/pe/mpich/8.1.23/gtl/lib -lmpi_gtl_hsa -lamdhip64 -fopenmp"

5
systems/Lumi/sourceme.sh Normal file
View File

@ -0,0 +1,5 @@
source ~/spack/share/spack/setup-env.sh
module load CrayEnv LUMI/22.12 partition/G cray-fftw/3.3.10.1 rocm
spack load c-lime
spack load gmp
spack load mpfr

53
systems/OEM/README Normal file
View File

@ -0,0 +1,53 @@
1. Prerequisites:
===================
Make sure you have the latest Intel ipcx release loaded (via modules or similar)
Make sure you have SYCL aware MPICH or Intel MPI loaded (assumed as mpicxx)
2. Obtain Grid:
===================
bash$
git clone https://github.com/paboyle/Grid
cd Grid
./bootstrap.sh
cd systems/PVC
3. Build Grid:
===================
Here, configure command is stored in file config-command:
bash$
../../configure \
--enable-simd=GPU \
--enable-gen-simd-width=64 \
--enable-comms=mpi-auto \
--enable-accelerator-cshift \
--disable-gparity \
--disable-fermion-reps \
--enable-shm=nvlink \
--enable-accelerator=sycl \
--enable-unified=no \
MPICXX=mpicxx \
CXX=icpx \
LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader " \
CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare "
make all
4. Run a benchmark:
===================
*** Assumes interactive access to node. ***
run Benchmark_dwf_fp32 using benchmarks/bench.sh
bash$
cd benchmarks
./bench.sh

18
systems/OEM/benchmarks/bench.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/bash
export EnableImplicitScaling=0
export ZE_ENABLE_PCI_ID_DEVICE_ORDER=1
export ZE_AFFINITY_MASK=$gpu_id.$tile_id
export ONEAPI_DEVICE_FILTER=gpu,level_zero
export SYCL_PI_LEVEL_ZERO_DEVICE_SCOPE_EVENTS=0
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:2
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
mpiexec -launcher ssh -n 1 -host localhost ./select_gpu.sh ./Benchmark_dwf_fp32 --mpi 1.1.1.1 --grid 32.32.32.32 --accelerator-threads 16 --shm-mpi 1 --shm 2048 --device-mem 32768 | tee 1tile.log
mpiexec -launcher ssh -n 2 -host localhost ./select_gpu.sh ./Benchmark_dwf_fp32 --mpi 1.1.1.2 --grid 32.32.32.64 --accelerator-threads 16 --shm-mpi 1 --shm 2048 --device-mem 32768 | tee 2tile.log
#mpiexec -launcher ssh -n 4 -host localhost ./select_gpu.sh ./Benchmark_dwf_fp32 --mpi 1.1.2.2 --grid 16.16.64.64 --accelerator-threads 16 --shm-mpi 0 --shm 2048 --device-mem 32768 | tee 4tile.log
#mpiexec -launcher ssh -n 8 -host localhost ./select_gpu.sh ./Benchmark_dwf_fp32 --mpi 1.1.2.4 --grid 16.16.64.128 --accelerator-threads 16 --shm-mpi 0 --shm 2048 --device-mem 32768 | tee 8tile.log

View File

@ -0,0 +1,13 @@
#!/bin/bash
num_tile=2
gpu_id=$(( (MPI_LOCAL_RANKID % num_tile ) ))
tile_id=$((MPI_LOCAL_RANKID / num_tile))
export ZE_AFFINITY_MASK=$gpu_id.$tile_id
echo "local rank $MPI_LOCALRANKID ; ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK"
"$@"

View File

@ -0,0 +1,15 @@
../../configure \
--enable-simd=GPU \
--enable-gen-simd-width=64 \
--enable-comms=mpi-auto \
--enable-accelerator-cshift \
--disable-gparity \
--disable-fermion-reps \
--enable-shm=nvlink \
--enable-accelerator=sycl \
--enable-unified=no \
MPICXX=mpicxx \
CXX=icpx \
LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader " \
CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare "

3
systems/OEM/setup.sh Normal file
View File

@ -0,0 +1,3 @@
export https_proxy=http://proxy-chain.intel.com:911
module load intel-release
module load intel/mpich

View File

@ -0,0 +1,46 @@
#!/bin/bash
#PBS -l select=1:system=sunspot,place=scatter
#PBS -A LatticeQCD_aesp_CNDA
#PBS -l walltime=01:00:00
#PBS -N dwf
#PBS -k doe
HDIR=/home/paboyle/
module use /soft/testing/modulefiles/
module load intel-UMD23.05.25593.11/23.05.25593.11
module load tools/pti-gpu
export LD_LIBRARY_PATH=$HDIR/tools/lib64:$LD_LIBRARY_PATH
export PATH=$HDIR/tools/bin:$PATH
export TZ='/usr/share/zoneinfo/US/Central'
export OMP_PROC_BIND=spread
export OMP_NUM_THREADS=3
unset OMP_PLACES
cd $PBS_O_WORKDIR
qsub jobscript.pbs
echo Jobid: $PBS_JOBID
echo Running on host `hostname`
echo Running on nodes `cat $PBS_NODEFILE`
echo NODES
cat $PBS_NODEFILE
NNODES=`wc -l < $PBS_NODEFILE`
NRANKS=12 # Number of MPI ranks per node
NDEPTH=4 # Number of hardware threads per rank, spacing between MPI ranks on a node
NTHREADS=$OMP_NUM_THREADS # Number of OMP threads per rank, given to OMP_NUM_THREADS
NTOTRANKS=$(( NNODES * NRANKS ))
echo "NUM_NODES=${NNODES} TOTAL_RANKS=${NTOTRANKS} RANKS_PER_NODE=${NRANKS} THREADS_PER_RANK=${OMP_NUM_THREADS}"
echo "OMP_PROC_BIND=$OMP_PROC_BIND OMP_PLACES=$OMP_PLACES"
CMD="mpiexec -np ${NTOTRANKS} -ppn ${NRANKS} -d ${NDEPTH} --cpu-bind=depth -envall \
./gpu_tile_compact.sh \
./Benchmark_dwf_fp32 --mpi 1.1.2.6 --grid 16.32.64.192 --comms-overlap \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32"

View File

@ -0,0 +1,52 @@
#!/bin/bash
display_help() {
echo " Will map gpu tile to rank in compact and then round-robin fashion"
echo " Usage (only work for one node of ATS/PVC):"
echo " mpiexec --np N gpu_tile_compact.sh ./a.out"
echo
echo " Example 3 GPU of 2 Tiles with 7 Ranks:"
echo " 0 Rank 0.0"
echo " 1 Rank 0.1"
echo " 2 Rank 1.0"
echo " 3 Rank 1.1"
echo " 4 Rank 2.0"
echo " 5 Rank 2.1"
echo " 6 Rank 0.0"
echo
echo " Hacked together by apl@anl.gov, please contact if bug found"
exit 1
}
#This give the exact GPU count i915 knows about and I use udev to only enumerate the devices with physical presence.
#works? num_gpu=$(/usr/bin/udevadm info /sys/module/i915/drivers/pci\:i915/* |& grep -v Unknown | grep -c "P: /devices")
num_gpu=6
num_tile=2
if [ "$#" -eq 0 ] || [ "$1" == "--help" ] || [ "$1" == "-h" ] || [ "$num_gpu" = 0 ]; then
display_help
fi
gpu_id=$(( (PALS_LOCAL_RANKID / num_tile ) % num_gpu ))
tile_id=$((PALS_LOCAL_RANKID % num_tile))
unset EnableWalkerPartition
export EnableImplicitScaling=0
export ZE_ENABLE_PCI_ID_DEVICE_ORDER=1
export ZE_AFFINITY_MASK=$gpu_id.$tile_id
export ONEAPI_DEVICE_FILTER=gpu,level_zero
export SYCL_PI_LEVEL_ZERO_DEVICE_SCOPE_EVENTS=0
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:2
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
#export SYCL_PI_LEVEL_ZERO_USM_RESIDENT=1
echo "rank $PALS_RANKID ; local rank $PALS_LOCAL_RANKID ; ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK"
if [ $PALS_LOCAL_RANKID = 0 ]
then
onetrace --chrome-device-timeline "$@"
# "$@"
else
"$@"
fi

View File

@ -0,0 +1,16 @@
TOOLS=$HOME/tools
../../configure \
--enable-simd=GPU \
--enable-gen-simd-width=64 \
--enable-comms=mpi-auto \
--enable-accelerator-cshift \
--disable-gparity \
--disable-fermion-reps \
--enable-shm=nvlink \
--enable-accelerator=sycl \
--enable-unified=no \
MPICXX=mpicxx \
CXX=icpx \
LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -lapmidg -L$TOOLS/lib64/" \
CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -I$TOOLS/include"

View File

@ -1,2 +1,4 @@
CXX=mpicxx-openmpi-mp CXXFLAGS=-I/opt/local/include/ LDFLAGS=-L/opt/local/lib/ ../../configure --enable-simd=GEN --enable-debug --enable-comms=mpi --enable-unified=yes
BREW=/opt/local/
MPICXX=mpicxx CXX=c++-12 ../../configure --enable-simd=GEN --enable-comms=mpi-auto --enable-unified=yes --prefix $HOME/QCD/GridInstall --with-lime=/Users/peterboyle/QCD/SciDAC/install/ --with-openssl=$BREW --disable-fermion-reps --disable-gparity --disable-debug

View File

@ -115,6 +115,7 @@ int main(int argc, char ** argv)
if (SE->_permute & 0x2 ) { permute(check[i],tmp,1); tmp=check[i];}
if (SE->_permute & 0x4 ) { permute(check[i],tmp,2); tmp=check[i];}
if (SE->_permute & 0x8 ) { permute(check[i],tmp,3); tmp=check[i];}
// std::cout<<GridLogMessage<<"stencil["<<i<<"] "<< check[i]<< " perm "<<(uint32_t)SE->_permute <<std::endl;
}
Real nrmC = norm2(Check);
@ -138,18 +139,17 @@ int main(int argc, char ** argv)
ddiff = check -bar;
diff =norm2(ddiff);
if ( diff > 0){
std::cout <<"Coor (" << coor[0]<<","<<coor[1]<<","<<coor[2]<<","<<coor[3]
<<") " <<check<<" vs "<<bar<<std::endl;
std::cout <<"Diff at Coor (" << coor[0]<<","<<coor[1]<<","<<coor[2]<<","<<coor[3]
<<") stencil " <<check<<" vs cshift "<<bar<<std::endl;
}
}}}}
if (nrm > 1.0e-4) {
autoView( check , Check, CpuRead);
autoView( bar , Bar, CpuRead);
for(int i=0;i<check.size();i++){
std::cout << i<<" Check "<<check[i]<< "\n"<<i<<" Bar "<<bar[i]<<std::endl;
std::cout << i<<" ERROR Check \n"<<check[i]<< "\n"<<i<<" Bar \n"<<bar[i]<<std::endl;
}
}
if (nrm > 1.0e-4) exit(-1);

307
tests/core/Test_fft_pf.cc Normal file
View File

@ -0,0 +1,307 @@
/*************************************************************************************
grid` physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_cshift.cc
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace Grid;
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
Coordinate latt_size = GridDefaultLatt();
Coordinate simd_layout( { vComplexD::Nsimd(),1,1,1});
Coordinate mpi_layout = GridDefaultMpi();
int vol = 1;
for(int d=0;d<latt_size.size();d++){
vol = vol * latt_size[d];
}
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
GridRedBlackCartesian RBGRID(&GRID);
ComplexD ci(0.0,1.0);
std::vector<int> seeds({1,2,3,4});
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds); // naughty seeding
GridParallelRNG pRNG(&GRID);
pRNG.SeedFixedIntegers(seeds);
LatticeGaugeFieldD Umu(&GRID);
SU<Nc>::ColdConfiguration(pRNG,Umu); // Unit gauge
////////////////////////////////////////////////////
// PF prop
////////////////////////////////////////////////////
LatticeFermionD src(&GRID);
gaussian(pRNG,src);
#if 1
Coordinate point(4,0);
src=Zero();
SpinColourVectorD ferm; gaussian(sRNG,ferm);
pokeSite(ferm,src,point);
#endif
{
std::cout<<"****************************************"<<std::endl;
std::cout << "Testing PartialFraction Hw kernel Mom space 4d propagator \n";
std::cout<<"****************************************"<<std::endl;
// LatticeFermionD src(&GRID); gaussian(pRNG,src);
LatticeFermionD tmp(&GRID);
LatticeFermionD ref(&GRID);
LatticeFermionD diff(&GRID);
const int Ls=48+1;
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,&GRID);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,&GRID);
RealD mass=0.1;
RealD M5 =0.8;
OverlapWilsonPartialFractionZolotarevFermionD Dov(Umu,*FGrid,*FrbGrid,GRID,RBGRID,mass,M5,0.001,8.0);
// Momentum space prop
std::cout << " Solving by FFT and Feynman rules" <<std::endl;
bool fiveD = false; //calculate 4d free propagator
std::cout << " Free propagator " <<std::endl;
Dov.FreePropagator(src,ref,mass) ;
std::cout << " Free propagator norm "<< norm2(ref) <<std::endl;
Gamma G5(Gamma::Algebra::Gamma5);
LatticeFermionD src5(FGrid); src5=Zero();
LatticeFermionD tmp5(FGrid);
LatticeFermionD result5(FGrid); result5=Zero();
LatticeFermionD result4(&GRID);
const int sdir=0;
////////////////////////////////////////////////////////////////////////
// Import
////////////////////////////////////////////////////////////////////////
std::cout << " Free propagator Import "<< norm2(src) <<std::endl;
Dov.ImportPhysicalFermionSource (src,src5);
std::cout << " Free propagator Imported "<< norm2(src5) <<std::endl;
////////////////////////////////////////////////////////////////////////
// Conjugate gradient on normal equations system
////////////////////////////////////////////////////////////////////////
std::cout << " Solving by Conjugate Gradient (CGNE)" <<std::endl;
Dov.Mdag(src5,tmp5);
src5=tmp5;
MdagMLinearOperator<OverlapWilsonPartialFractionZolotarevFermionD,LatticeFermionD> HermOp(Dov);
ConjugateGradient<LatticeFermionD> CG(1.0e-8,10000);
CG(HermOp,src5,result5);
////////////////////////////////////////////////////////////////////////
// Domain wall physical field propagator
////////////////////////////////////////////////////////////////////////
Dov.ExportPhysicalFermionSolution(result5,result4);
// From DWF4d.pdf :
//
// Dov_pf = 2/(1-m) D_cayley_ovlap [ Page 43 ]
// Dinv_cayley_ovlap = 2/(1-m) Dinv_pf
// Dinv_cayley_surface =1/(1-m) ( Dinv_cayley_ovlap - 1 ) => 2/(1-m)^2 Dinv_pf - 1/(1-m) * src [ Eq.2.67 ]
RealD scale = 2.0/(1.0-mass)/(1.0-mass);
result4 = result4 * scale;
result4 = result4 - src*(1.0/(1.0-mass)); // Subtract contact term
DumpSliceNorm("Src",src);
DumpSliceNorm("Grid",result4);
DumpSliceNorm("Fourier",ref);
std::cout << "Dov result4 "<<norm2(result4)<<std::endl;
std::cout << "Dov ref "<<norm2(ref)<<std::endl;
diff = result4- ref;
DumpSliceNorm("diff ",diff);
}
////////////////////////////////////////////////////
// Dwf prop
////////////////////////////////////////////////////
{
std::cout<<"****************************************"<<std::endl;
std::cout << "Testing Dov(Hw) Mom space 4d propagator \n";
std::cout<<"****************************************"<<std::endl;
LatticeFermionD tmp(&GRID);
LatticeFermionD ref(&GRID);
LatticeFermionD diff(&GRID);
const int Ls=48;
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,&GRID);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,&GRID);
RealD mass=0.1;
RealD M5 =0.8;
OverlapWilsonCayleyTanhFermionD Dov(Umu,*FGrid,*FrbGrid,GRID,RBGRID,mass,M5,1.0);
// Momentum space prop
std::cout << " Solving by FFT and Feynman rules" <<std::endl;
Dov.FreePropagator(src,ref,mass) ;
Gamma G5(Gamma::Algebra::Gamma5);
LatticeFermionD src5(FGrid); src5=Zero();
LatticeFermionD tmp5(FGrid);
LatticeFermionD result5(FGrid); result5=Zero();
LatticeFermionD result4(&GRID);
const int sdir=0;
////////////////////////////////////////////////////////////////////////
// Domain wall physical field source; need D_minus
////////////////////////////////////////////////////////////////////////
/*
chi_5[0] = chiralProjectPlus(chi);
chi_5[Ls-1]= chiralProjectMinus(chi);
*/
tmp = (src + G5*src)*0.5; InsertSlice(tmp,src5, 0,sdir);
tmp = (src - G5*src)*0.5; InsertSlice(tmp,src5,Ls-1,sdir);
////////////////////////////////////////////////////////////////////////
// Conjugate gradient on normal equations system
////////////////////////////////////////////////////////////////////////
std::cout << " Solving by Conjugate Gradient (CGNE)" <<std::endl;
Dov.Dminus(src5,tmp5);
src5=tmp5;
Dov.Mdag(src5,tmp5);
src5=tmp5;
MdagMLinearOperator<OverlapWilsonCayleyTanhFermionD,LatticeFermionD> HermOp(Dov);
ConjugateGradient<LatticeFermionD> CG(1.0e-16,10000);
CG(HermOp,src5,result5);
////////////////////////////////////////////////////////////////////////
// Domain wall physical field propagator
////////////////////////////////////////////////////////////////////////
/*
psi = chiralProjectMinus(psi_5[0]);
psi += chiralProjectPlus(psi_5[Ls-1]);
*/
ExtractSlice(tmp,result5,0 ,sdir); result4 = (tmp-G5*tmp)*0.5;
ExtractSlice(tmp,result5,Ls-1,sdir); result4 = result4+(tmp+G5*tmp)*0.5;
std::cout << " Taking difference" <<std::endl;
std::cout << "Dov result4 "<<norm2(result4)<<std::endl;
std::cout << "Dov ref "<<norm2(ref)<<std::endl;
DumpSliceNorm("Grid",result4);
DumpSliceNorm("Fourier",ref);
diff = ref - result4;
std::cout << "result - ref "<<norm2(diff)<<std::endl;
DumpSliceNorm("diff",diff);
}
{
std::cout<<"****************************************"<<std::endl;
std::cout << "Testing PartialFraction Hw kernel Mom space 4d propagator with q\n";
std::cout<<"****************************************"<<std::endl;
// LatticeFermionD src(&GRID); gaussian(pRNG,src);
LatticeFermionD tmp(&GRID);
LatticeFermionD ref(&GRID);
LatticeFermionD diff(&GRID);
const int Ls=48+1;
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,&GRID);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,&GRID);
RealD mass=0.1;
RealD M5 =0.8;
OverlapWilsonPartialFractionZolotarevFermionD Dov(Umu,*FGrid,*FrbGrid,GRID,RBGRID,mass,M5,0.001,8.0);
// Momentum space prop
std::cout << " Solving by FFT and Feynman rules" <<std::endl;
bool fiveD = false; //calculate 4d free propagator
std::cout << " Free propagator " <<std::endl;
Dov.FreePropagator(src,ref,mass) ;
std::cout << " Free propagator norm "<< norm2(ref) <<std::endl;
Gamma G5(Gamma::Algebra::Gamma5);
LatticeFermionD src5(FGrid); src5=Zero();
LatticeFermionD tmp5(FGrid);
LatticeFermionD result5(FGrid); result5=Zero();
LatticeFermionD result4(&GRID);
const int sdir=0;
////////////////////////////////////////////////////////////////////////
// Import
////////////////////////////////////////////////////////////////////////
std::cout << " Free propagator Import "<< norm2(src) <<std::endl;
Dov.ImportPhysicalFermionSource (src,src5);
std::cout << " Free propagator Imported "<< norm2(src5) <<std::endl;
////////////////////////////////////////////////////////////////////////
// Conjugate gradient on normal equations system
////////////////////////////////////////////////////////////////////////
std::cout << " Solving by Conjugate Gradient (CGNE)" <<std::endl;
Dov.Mdag(src5,tmp5);
src5=tmp5;
MdagMLinearOperator<OverlapWilsonPartialFractionZolotarevFermionD,LatticeFermionD> HermOp(Dov);
ConjugateGradient<LatticeFermionD> CG(1.0e-8,10000);
CG(HermOp,src5,result5);
////////////////////////////////////////////////////////////////////////
// Domain wall physical field propagator
////////////////////////////////////////////////////////////////////////
Dov.ExportPhysicalFermionSolution(result5,result4);
// From DWF4d.pdf :
//
// Dov_pf = 2/(1-m) D_cayley_ovlap [ Page 43 ]
// Dinv_cayley_ovlap = 2/(1-m) Dinv_pf
// Dinv_cayley_surface =1/(1-m) ( Dinv_cayley_ovlap - 1 ) => 2/(1-m)^2 Dinv_pf - 1/(1-m) * src [ Eq.2.67 ]
RealD scale = 2.0/(1.0-mass)/(1.0-mass);
result4 = result4 * scale;
result4 = result4 - src*(1.0/(1.0-mass)); // Subtract contact term
DumpSliceNorm("Src",src);
DumpSliceNorm("Grid",result4);
DumpSliceNorm("Fourier",ref);
std::cout << "Dov result4 "<<norm2(result4)<<std::endl;
std::cout << "Dov ref "<<norm2(ref)<<std::endl;
diff = result4- ref;
DumpSliceNorm("diff ",diff);
}
Grid_finalize();
}

View File

@ -63,7 +63,9 @@ int main(int argc, char** argv) {
std::cout << "Dimension of adjoint representation: "<< SU2Adjoint::Dimension << std::endl;
// guard as this code fails to compile for Nc != 3
#if (Nc == 3)
#if 1
std::cout << " Printing Adjoint Generators"<< std::endl;
SU2Adjoint::printGenerators();
SU2::testGenerators();
@ -148,10 +150,33 @@ int main(int argc, char** argv) {
typename AdjointRep<Nc>::LatticeMatrix Vrmu = peekLorentz(Vr,mu);
pokeLorentz(UrVr,Urmu*Vrmu, mu);
}
typedef typename SU_Adjoint<Nc>::AMatrix AdjointMatrix;
typename AdjointRep<Nc>::LatticeField Diff_check = UVr - UrVr;
std::cout << GridLogMessage << "Group structure SU("<<Nc<<") check difference (Adjoint representation) : " << norm2(Diff_check) << std::endl;
std::cout << GridLogMessage << "****************************************** " << std::endl;
std::cout << GridLogMessage << " MAP BETWEEN FUNDAMENTAL AND ADJOINT CHECK " << std::endl;
std::cout << GridLogMessage << "****************************************** " << std::endl;
for(int a=0;a<Nc*Nc-1;a++){
for(int b=0;b<Nc*Nc-1;b++){
for(int c=0;c<Nc*Nc-1;c++){
ColourMatrix Ta;
ColourMatrix Tb;
ColourMatrix Tc;
SU3::generator(a, Ta);
SU3::generator(b, Tb);
SU3::generator(c, Tc);
AdjointMatrix TRa;
SU3Adjoint::generator(a,TRa);
Complex tr1 = trace ( Tc * ( Ta*Tb-Tb*Ta)); // i/2 fabc
Complex tr2 = TRa()()(b,c) * Complex(0,1);
std::cout << " 2 Tr( Tc[Ta,Tb]) " << 2.0*tr1<<std::endl;
std::cout << " - TRa_bc " << tr2<<std::endl;
assert(abs( (2.0*tr1-tr2) ) < 1.0e-7);
std::cout << "------------------"<<std::endl;
}}}
// Check correspondence of algebra and group transformations
// Create a random vector
SU3::LatticeAlgebraVector h_adj(grid);

View File

@ -0,0 +1,184 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_padded_cell.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
using namespace std;
using namespace Grid;
template<class vobj> void gpermute(vobj & inout,int perm){
vobj tmp=inout;
if (perm & 0x1 ) { permute(inout,tmp,0); tmp=inout;}
if (perm & 0x2 ) { permute(inout,tmp,1); tmp=inout;}
if (perm & 0x4 ) { permute(inout,tmp,2); tmp=inout;}
if (perm & 0x8 ) { permute(inout,tmp,3); tmp=inout;}
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
Coordinate latt_size = GridDefaultLatt();
Coordinate simd_layout= GridDefaultSimd(Nd,vComplexD::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
std::cout << " mpi "<<mpi_layout<<std::endl;
std::cout << " simd "<<simd_layout<<std::endl;
std::cout << " latt "<<latt_size<<std::endl;
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
GridParallelRNG pRNG(&GRID);
pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
LatticeGaugeField Umu(&GRID);
SU<Nc>::HotConfiguration(pRNG,Umu);
Real plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
LatticeComplex trplaq(&GRID);
std::vector<LatticeColourMatrix> U(Nd, Umu.Grid());
for (int mu = 0; mu < Nd; mu++) {
U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
}
std::cout << GridLogMessage << " Average plaquette "<<plaq<<std::endl;
LatticeComplex cplaq(&GRID); cplaq=Zero();
/////////////////////////////////////////////////
// Create a padded cell of extra padding depth=1
/////////////////////////////////////////////////
int depth = 1;
PaddedCell Ghost(depth,&GRID);
LatticeGaugeField Ughost = Ghost.Exchange(Umu);
///////////////////////////////////////////////////////////////////
// Temporary debug Hack for single rank sim:
// Check the contents of the cell are periodcally replicated
// In future ONLY pad those dimensions that are not local to node
///////////////////////////////////////////////////////////////////
#if 0
{
double diff=0;
double n=0;
{
autoView( Ug_v , Ughost, CpuRead);
autoView( Ul_v , Umu , CpuRead);
for(int x=0;x<latt_size[0]+2;x++){
for(int y=0;y<latt_size[1]+2;y++){
for(int z=0;z<latt_size[2]+2;z++){
for(int t=0;t<latt_size[3]+2;t++){
int lx=(x-1+latt_size[0])%latt_size[0];
int ly=(y-1+latt_size[1])%latt_size[1];
int lz=(z-1+latt_size[2])%latt_size[2];
int lt=(t-1+latt_size[3])%latt_size[3];
Coordinate gcoor({x,y,z,t});
Coordinate lcoor({lx,ly,lz,lt});
LorentzColourMatrix g;
LorentzColourMatrix l;
peekLocalSite(g,Ug_v,gcoor);
peekLocalSite(l,Ul_v,lcoor);
g=g-l;
assert(norm2(g)==0);
diff = diff + norm2(g);
n = n + norm2(l);
}}}}
}
std::cout << "padded field check diff "<< diff <<" / "<< n<<std::endl;
std::cout << norm2(Ughost)<< " " << norm2(Umu)<<std::endl;
}
#endif
///// Array for the site plaquette
GridBase *GhostGrid = Ughost.Grid();
LatticeComplex gplaq(GhostGrid);
std::vector<Coordinate> shifts;
for(int mu=0;mu<Nd;mu++){
for(int nu=mu+1;nu<Nd;nu++){
// Umu(x) Unu(x+mu) Umu^dag(x+nu) Unu^dag(x)
Coordinate shift_0(Nd,0);
Coordinate shift_mu(Nd,0); shift_mu[mu]=1;
Coordinate shift_nu(Nd,0); shift_nu[nu]=1;
shifts.push_back(shift_0);
shifts.push_back(shift_mu);
shifts.push_back(shift_nu);
shifts.push_back(shift_0);
}
}
GeneralLocalStencil gStencil(GhostGrid,shifts);
gplaq=Zero();
{
autoView( gp_v , gplaq, CpuWrite);
autoView( t_v , trplaq, CpuRead);
autoView( U_v , Ughost, CpuRead);
for(int ss=0;ss<gp_v.size();ss++){
int s=0;
for(int mu=0;mu<Nd;mu++){
for(int nu=mu+1;nu<Nd;nu++){
auto SE0 = gStencil.GetEntry(s+0,ss);
auto SE1 = gStencil.GetEntry(s+1,ss);
auto SE2 = gStencil.GetEntry(s+2,ss);
auto SE3 = gStencil.GetEntry(s+3,ss);
int o0 = SE0->_offset;
int o1 = SE1->_offset;
int o2 = SE2->_offset;
int o3 = SE3->_offset;
auto U0 = U_v[o0](mu);
auto U1 = U_v[o1](nu);
auto U2 = adj(U_v[o2](mu));
auto U3 = adj(U_v[o3](nu));
gpermute(U0,SE0->_permute);
gpermute(U1,SE1->_permute);
gpermute(U2,SE2->_permute);
gpermute(U3,SE3->_permute);
gp_v[ss]() =gp_v[ss]() + trace( U0*U1*U2*U3 );
s=s+4;
}
}
}
}
cplaq = Ghost.Extract(gplaq);
RealD vol = cplaq.Grid()->gSites();
RealD faces = (Nd * (Nd-1))/2;
auto p = TensorRemove(sum(cplaq));
auto result = p.real()/vol/faces/Nc;
std::cout << GridLogMessage << " Average plaquette via padded cell "<<result<<std::endl;
std::cout << GridLogMessage << " Diff "<<result-plaq<<std::endl;
assert(fabs(result-plaq)<1.0e-8);
Grid_finalize();
}

219
tests/forces/Test_fthmc.cc Normal file
View File

@ -0,0 +1,219 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_fthmc.cc
Copyright (C) 2022
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/qcd/smearing/GaugeConfigurationMasked.h>
#include <Grid/qcd/smearing/JacobianAction.h>
using namespace std;
using namespace Grid;
typedef MobiusFermionD FermionAction;
typedef WilsonImplD FimplD;
typedef WilsonImplD FermionImplPolicy;
template<class Gimpl>
void ForceTest(Action<LatticeGaugeField> &action,ConfigurationBase<LatticeGaugeField> & smU,MomentumFilterBase<LatticeGaugeField> &Filter)
{
LatticeGaugeField U = smU.get_U(false); // unsmeared config
GridBase *UGrid = U.Grid();
std::vector<int> seeds({1,2,3,5});
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds);
LatticeColourMatrix Pmu(UGrid);
LatticeGaugeField P(UGrid);
LatticeGaugeField UdSdU(UGrid);
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
std::cout << GridLogMessage << " Force test for "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
RealD eps=0.01;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Refresh "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
Gimpl::generate_momenta(P,sRNG,RNG4);
// Filter.applyFilter(P);
action.refresh(smU,sRNG,RNG4);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Action "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S1 = action.S(smU);
Gimpl::update_field(P,U,eps);
smU.set_Field(U);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Derivative "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
action.deriv(smU,UdSdU);
UdSdU = Ta(UdSdU);
// Filter.applyFilter(UdSdU);
DumpSliceNorm("Force",UdSdU,Nd-1);
Gimpl::update_field(P,U,eps);
smU.set_Field(U);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Action "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S2 = action.S(smU);
// Use the derivative
LatticeComplex dS(UGrid); dS = Zero();
for(int mu=0;mu<Nd;mu++){
auto UdSdUmu = PeekIndex<LorentzIndex>(UdSdU,mu);
Pmu= PeekIndex<LorentzIndex>(P,mu);
dS = dS - trace(Pmu*UdSdUmu)*eps*2.0*HMC_MOMENTUM_DENOMINATOR;
}
ComplexD dSpred = sum(dS);
RealD diff = S2-S1-dSpred.real();
std::cout<< GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout<< GridLogMessage << "S1 : "<< S1 <<std::endl;
std::cout<< GridLogMessage << "S2 : "<< S2 <<std::endl;
std::cout<< GridLogMessage << "dS : "<< S2-S1 <<std::endl;
std::cout<< GridLogMessage << "dSpred : "<< dSpred.real() <<std::endl;
std::cout<< GridLogMessage << "diff : "<< diff<<std::endl;
std::cout<< GridLogMessage << "*********************************************************"<<std::endl;
// assert(diff<1.0);
std::cout<< GridLogMessage << "Done" <<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
std::cout << std::setprecision(14);
Coordinate latt_size = GridDefaultLatt();
Coordinate mpi_layout = GridDefaultMpi();
Coordinate simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
Coordinate shm;
GlobalSharedMemory::GetShmDims(mpi_layout,shm);
const int Ls=12;
const int Nt = latt_size[3];
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
///////////////////// Gauge Field and Gauge Forces ////////////////////////////
LatticeGaugeField U(UGrid);
#if 0
FieldMetaData header;
std::string file("./ckpoint_lat.2000");
NerscIO::readConfiguration(U,header,file);
#else
std::vector<int> seeds({1,2,3,4,5,6,7,8});
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds);
SU<Nc>::HotConfiguration(RNG4,U);
#endif
WilsonGaugeActionR PlaqAction(6.0);
IwasakiGaugeActionR RectAction(2.13);
PlaqAction.is_smeared = true;
RectAction.is_smeared = true;
////////////////////////////////////
// Fermion Action
////////////////////////////////////
RealD mass=0.01;
RealD pvmass=1.0;
RealD M5=1.8;
RealD b=1.5;
RealD c=0.5;
// Double versions
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
FermionAction DdwfPeriodic(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,Params);
FermionAction PVPeriodic (U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,pvmass,M5,b,c,Params);
double StoppingCondition = 1.0e-8;
double MaxCGIterations = 50000;
ConjugateGradient<LatticeFermion> CG(StoppingCondition,MaxCGIterations);
TwoFlavourRatioPseudoFermionAction<FimplD> Nf2(PVPeriodic, DdwfPeriodic,CG,CG);
Nf2.is_smeared = true;
////////////////////////////////////////////////
// Plaquette only FTHMC smearer
////////////////////////////////////////////////
double rho = 0.1;
Smear_Stout<PeriodicGimplR> Smearer(rho);
SmearedConfigurationMasked<PeriodicGimplR> SmartConfig(UGrid,2*Nd,Smearer);
SmearedConfiguration<PeriodicGimplR> StoutConfig(UGrid,1,Smearer);
JacobianAction<PeriodicGimplR> Jacobian(&SmartConfig);
////////////////////////////////////////////////
// Run some tests
////////////////////////////////////////////////
MomentumFilterNone<LatticeGaugeField> FilterNone;
std::cout << " ********* FIELD TRANSFORM SMEARING ***** "<<std::endl;
SmartConfig.set_Field(U);
ForceTest<GimplTypesR>(PlaqAction,SmartConfig,FilterNone);
SmartConfig.set_Field(U);
ForceTest<GimplTypesR>(RectAction,SmartConfig,FilterNone);
SmartConfig.set_Field(U);
ForceTest<GimplTypesR>(Jacobian,SmartConfig,FilterNone);
SmartConfig.set_Field(U);
ForceTest<GimplTypesR>(Nf2,SmartConfig,FilterNone);
std::cout << " ********* STOUT SMEARING ***** "<<std::endl;
StoutConfig.set_Field(U);
ForceTest<GimplTypesR>(PlaqAction,StoutConfig,FilterNone);
StoutConfig.set_Field(U);
ForceTest<GimplTypesR>(RectAction,StoutConfig,FilterNone);
StoutConfig.set_Field(U);
ForceTest<GimplTypesR>(Nf2,StoutConfig,FilterNone);
Grid_finalize();
}