1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-10 07:55:35 +00:00

Merge remote-tracking branch 'upstream/develop' into develop

This commit is contained in:
Henrique B. R. 2021-09-02 16:57:42 +01:00
commit 11ee8a1061
13 changed files with 1105 additions and 58 deletions

View File

@ -73,6 +73,7 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
WorldNodes = WorldSize/WorldShmSize;
assert( (WorldNodes * WorldShmSize) == WorldSize );
// FIXME: Check all WorldShmSize are the same ?
/////////////////////////////////////////////////////////////////////
@ -451,7 +452,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
////////////////////////////////////////////////////////////////////////////////////////////
#if defined(GRID_CUDA) ||defined(GRID_HIP) || defined(GRID_SYCL)
#if defined(GRID_SYCL)
//if defined(GRID_SYCL)
#if 0
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
void * ShmCommBuf ;
@ -488,7 +490,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
}
#endif
#if defined(GRID_CUDA) ||defined(GRID_HIP)
#if defined(GRID_CUDA) ||defined(GRID_HIP) ||defined(GRID_SYCL)
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
void * ShmCommBuf ;
@ -511,8 +513,16 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Each MPI rank should allocate our own buffer
///////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_device());
auto zeContext= cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_context());
ze_device_mem_alloc_desc_t zeDesc = {};
zeMemAllocDevice(zeContext,&zeDesc,bytes,2*1024*1024,zeDevice,&ShmCommBuf);
std::cout << WorldRank << header " SharedMemoryMPI.cc zeMemAllocDevice "<< bytes
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
#else
ShmCommBuf = acceleratorAllocDevice(bytes);
#endif
if (ShmCommBuf == (void *)NULL ) {
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
exit(EXIT_FAILURE);
@ -522,8 +532,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
}
SharedMemoryZero(ShmCommBuf,bytes);
// SharedMemoryZero(ShmCommBuf,bytes);
std::cout<< "Setting up IPC"<<std::endl;
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Loop over ranks/gpu's on our node
///////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -533,6 +543,23 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
//////////////////////////////////////////////////
// If it is me, pass around the IPC access key
//////////////////////////////////////////////////
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
ze_ipc_mem_handle_t handle;
if ( r==WorldShmRank ) {
auto err = zeMemGetIpcHandle(zeContext,ShmCommBuf,&handle);
if ( err != ZE_RESULT_SUCCESS ) {
std::cerr << "SharedMemoryMPI.cc zeMemGetIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
exit(EXIT_FAILURE);
} else {
std::cerr << "SharedMemoryMPI.cc zeMemGetIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
}
std::cerr<<"Allocated IpcHandle rank "<<r<<" (hex) ";
for(int c=0;c<ZE_MAX_IPC_HANDLE_SIZE;c++){
std::cerr<<std::hex<<(uint32_t)((uint8_t)handle.data[c])<<std::dec;
}
std::cerr<<std::endl;
}
#endif
#ifdef GRID_CUDA
cudaIpcMemHandle_t handle;
if ( r==WorldShmRank ) {
@ -569,6 +596,25 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
// If I am not the source, overwrite thisBuf with remote buffer
///////////////////////////////////////////////////////////////
void * thisBuf = ShmCommBuf;
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
if ( r!=WorldShmRank ) {
thisBuf = nullptr;
std::cerr<<"Using IpcHandle rank "<<r<<" ";
for(int c=0;c<ZE_MAX_IPC_HANDLE_SIZE;c++){
std::cerr<<std::hex<<(uint32_t)((uint8_t)handle.data[c])<<std::dec;
}
std::cerr<<std::endl;
auto err = zeMemOpenIpcHandle(zeContext,zeDevice,handle,0,&thisBuf);
if ( err != ZE_RESULT_SUCCESS ) {
std::cerr << "SharedMemoryMPI.cc "<<zeContext<<" "<<zeDevice<<std::endl;
std::cerr << "SharedMemoryMPI.cc zeMemOpenIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
exit(EXIT_FAILURE);
} else {
std::cerr << "SharedMemoryMPI.cc zeMemOpenIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
}
assert(thisBuf!=nullptr);
}
#endif
#ifdef GRID_CUDA
if ( r!=WorldShmRank ) {
auto err = cudaIpcOpenMemHandle(&thisBuf,handle,cudaIpcMemLazyEnablePeerAccess);
@ -844,7 +890,7 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
}
#endif
SharedMemoryTest();
//SharedMemoryTest();
}
//////////////////////////////////////////////////////////////////
// On node barrier

View File

@ -171,7 +171,6 @@ void acceleratorInit(void)
#ifdef GRID_SYCL
cl::sycl::queue *theGridAccelerator;
void acceleratorInit(void)
{
int nDevices = 1;
@ -179,6 +178,10 @@ void acceleratorInit(void)
cl::sycl::device selectedDevice { selector };
theGridAccelerator = new sycl::queue (selectedDevice);
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
zeInit(0);
#endif
char * localRankStr = NULL;
int rank = 0, world_rank=0;
#define ENV_LOCAL_RANK_OMPI "OMPI_COMM_WORLD_LOCAL_RANK"

View File

@ -39,6 +39,10 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#ifdef HAVE_MM_MALLOC_H
#include <mm_malloc.h>
#endif
#ifdef __APPLE__
// no memalign
inline void *memalign(size_t align, size_t bytes) { return malloc(bytes); }
#endif
NAMESPACE_BEGIN(Grid);
@ -233,6 +237,13 @@ inline int acceleratorIsCommunicable(void *ptr)
NAMESPACE_END(Grid);
#include <CL/sycl.hpp>
#include <CL/sycl/usm.hpp>
#define GRID_SYCL_LEVEL_ZERO_IPC
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
#include <level_zero/ze_api.h>
#include <CL/sycl/backend/level_zero.hpp>
#endif
NAMESPACE_BEGIN(Grid);
extern cl::sycl::queue *theGridAccelerator;
@ -412,6 +423,8 @@ inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(bas
#undef GRID_SIMT
#define accelerator
#define accelerator_inline strong_inline
#define accelerator_for(iterator,num,nsimd, ... ) thread_for(iterator, num, { __VA_ARGS__ });

View File

@ -56,6 +56,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
static int
feenableexcept (unsigned int excepts)
{
#if 0
// Fails on Apple M1
static fenv_t fenv;
unsigned int new_excepts = excepts & FE_ALL_EXCEPT;
unsigned int old_excepts; // previous masks
@ -70,6 +72,8 @@ feenableexcept (unsigned int excepts)
iold_excepts = (int) old_excepts;
return ( fesetenv (&fenv) ? -1 : iold_excepts );
#endif
return 0;
}
#endif

View File

@ -1,5 +1,5 @@
# additional include paths necessary to compile the C++ library
SUBDIRS = Grid HMC benchmarks tests
SUBDIRS = Grid HMC benchmarks tests examples
include $(top_srcdir)/doxygen.inc

View File

@ -133,34 +133,30 @@ public:
std::vector<HalfSpinColourVectorD *> xbuf(8);
std::vector<HalfSpinColourVectorD *> rbuf(8);
Grid.ShmBufferFreeAll();
//Grid.ShmBufferFreeAll();
uint64_t bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
for(int d=0;d<8;d++){
xbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
rbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
xbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes);
rbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes);
// bzero((void *)xbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
// bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
}
int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
int ncomm;
double dbytes;
std::vector<double> times(Nloop);
for(int i=0;i<Nloop;i++){
double start=usecond();
for(int dir=0;dir<8;dir++) {
int mu =dir % 4;
if (mpi_layout[mu]>1 ) {
dbytes=0;
ncomm=0;
std::vector<double> times(Nloop);
for(int i=0;i<Nloop;i++){
thread_for(dir,8,{
double tbytes;
int mu =dir % 4;
if (mpi_layout[mu]>1 ) {
dbytes=0;
double start=usecond();
int xmit_to_rank;
int recv_from_rank;
if ( dir == mu ) {
int comm_proc=1;
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
@ -168,40 +164,38 @@ public:
int comm_proc = mpi_layout[mu]-1;
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
}
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
(void *)&rbuf[dir][0], recv_from_rank,
bytes,dir);
thread_critical {
ncomm++;
dbytes+=tbytes;
}
Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
(void *)&rbuf[dir][0], recv_from_rank,
bytes);
dbytes+=bytes;
double stop=usecond();
t_time[i] = stop-start; // microseconds
}
});
Grid.Barrier();
double stop=usecond();
t_time[i] = stop-start; // microseconds
timestat.statistics(t_time);
dbytes=dbytes*ppn;
double xbytes = dbytes*0.5;
double bidibytes = dbytes;
std::cout<<GridLogMessage << lat<<"\t"<<Ls<<"\t "
<< bytes << " \t "
<<xbytes/timestat.mean<<" \t "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " \t "
<<xbytes/timestat.max <<" "<< xbytes/timestat.min
<< "\t\t"<< bidibytes/timestat.mean<< " " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " "
<< bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl;
}
}
timestat.statistics(t_time);
dbytes=dbytes*ppn;
double xbytes = dbytes*0.5;
// double rbytes = dbytes*0.5;
double bidibytes = dbytes;
std::cout<<GridLogMessage << lat<<"\t"<<Ls<<"\t "
<< bytes << " \t "
<<xbytes/timestat.mean<<" \t "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " \t "
<<xbytes/timestat.max <<" "<< xbytes/timestat.min
<< "\t\t"<< bidibytes/timestat.mean<< " " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " "
<< bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl;
}
}
for(int d=0;d<8;d++){
acceleratorFreeDevice(xbuf[d]);
acceleratorFreeDevice(rbuf[d]);
}
}
}
return;
}
static void Memory(void)
@ -281,7 +275,6 @@ public:
uint64_t lmax=32;
#define NLOOP (1000*lmax*lmax*lmax*lmax/lat/lat/lat/lat)
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
for(int lat=8;lat<=lmax;lat+=8){
@ -445,7 +438,7 @@ public:
// 1344= 3*(2*8+6)*2*8 + 8*3*2*2 + 3*4*2*8
// 1344 = Nc* (6+(Nc-1)*8)*2*Nd + Nd*Nc*2*2 + Nd*Nc*Ns*2
// double flops=(1344.0*volume)/2;
#if 1
#if 0
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + Nd*Nc*Ns + Nd*Nc*Ns*2;
#else
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + 2*Nd*Nc*Ns + 2*Nd*Nc*Ns*2;
@ -716,12 +709,12 @@ int main (int argc, char ** argv)
if ( do_su4 ) {
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
std::cout<<GridLogMessage << " Memory benchmark " <<std::endl;
std::cout<<GridLogMessage << " SU(4) benchmark " <<std::endl;
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
Benchmark::SU4();
}
if ( do_comms && (NN>1) ) {
if ( do_comms ) {
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
std::cout<<GridLogMessage << " Communications benchmark " <<std::endl;
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;

View File

@ -815,6 +815,7 @@ AC_CONFIG_FILES(tests/smearing/Makefile)
AC_CONFIG_FILES(tests/qdpxx/Makefile)
AC_CONFIG_FILES(tests/testu01/Makefile)
AC_CONFIG_FILES(benchmarks/Makefile)
AC_CONFIG_FILES(examples/Makefile)
AC_OUTPUT
echo ""

View File

@ -0,0 +1,396 @@
#include <Grid/Grid.h>
using namespace Grid;
/*
/////////////////////////////////////////////////////////////////////////////////////////////
// Grid/algorithms/SparseMatrix.h: Interface defining what I expect of a general sparse matrix, such as a Fermion action
/////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SparseMatrixBase {
public:
virtual GridBase *Grid(void) =0;
virtual void M (const Field &in, Field &out)=0;
virtual void Mdag (const Field &in, Field &out)=0;
virtual void MdagM(const Field &in, Field &out) {
Field tmp (in.Grid());
M(in,tmp);
Mdag(tmp,out);
}
virtual void Mdiag (const Field &in, Field &out)=0;
virtual void Mdir (const Field &in, Field &out,int dir, int disp)=0;
virtual void MdirAll (const Field &in, std::vector<Field> &out)=0;
};
*/
const std::vector<int> directions ({Xdir,Ydir,Zdir,Xdir,Ydir,Zdir});
const std::vector<int> displacements({1,1,1,-1,-1,-1});
template<class Field> class FreeLaplacianCshift : public SparseMatrixBase<Field>
{
public:
GridBase *grid;
FreeLaplacianCshift(GridBase *_grid)
{
grid=_grid;
};
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &in, Field &out)
{
out = Zero();
for(int mu=0;mu<Nd-1;mu++) {
out = out + Cshift(in,mu,1) + Cshift(in,mu,-1) - 2.0 * in;
}
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
GridBase *grid;
GaugeField U;
CovariantLaplacianCshift(GaugeField &_U) :
grid(_U.Grid()),
U(_U) { };
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &in, Field &out)
{
out=Zero();
for(int mu=0;mu<Nd-1;mu++) {
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
out = out + Gimpl::CovShiftForward(Umu,mu,in);
out = out + Gimpl::CovShiftBackward(Umu,mu,in);
out = out - 2.0*in;
}
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
#define LEG_LOAD(Dir) \
SE = st.GetEntry(ptype, Dir, ss); \
if (SE->_is_local ) { \
int perm= SE->_permute; \
chi = coalescedReadPermute(in[SE->_offset],ptype,perm,lane); \
} else { \
chi = coalescedRead(buf[SE->_offset],lane); \
} \
acceleratorSynchronise();
template<class Field> class FreeLaplacianStencil : public SparseMatrixBase<Field>
{
public:
typedef typename Field::vector_object siteObject;
typedef CartesianStencil<siteObject, siteObject, int> StencilImpl;
GridBase *grid;
StencilImpl Stencil;
SimpleCompressor<siteObject> Compressor;
FreeLaplacianStencil(GridBase *_grid)
: Stencil (_grid,6,Even,directions,displacements,0), grid(_grid)
{ };
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &_in, Field &_out)
{
///////////////////////////////////////////////
// Halo exchange for this geometry of stencil
///////////////////////////////////////////////
Stencil.HaloExchange(_in, Compressor);
///////////////////////////////////
// Arithmetic expressions
///////////////////////////////////
// Views; device friendly/accessible pointers
auto st = Stencil.View(AcceleratorRead);
auto buf = st.CommBuf();
autoView( in , _in , AcceleratorRead);
autoView( out , _out , AcceleratorWrite);
typedef typename Field::vector_object vobj;
typedef decltype(coalescedRead(in[0])) calcObj;
const int Nsimd = vobj::Nsimd();
const uint64_t NN = grid->oSites();
accelerator_for( ss, NN, Nsimd, {
StencilEntry *SE;
const int lane=acceleratorSIMTlane(Nsimd);
calcObj chi;
calcObj res;
int ptype;
res = coalescedRead(in[ss])*(-6.0);
LEG_LOAD(0); res = res + chi;
LEG_LOAD(1); res = res + chi;
LEG_LOAD(2); res = res + chi;
LEG_LOAD(3); res = res + chi;
LEG_LOAD(4); res = res + chi;
LEG_LOAD(5); res = res + chi;
coalescedWrite(out[ss], res,lane);
});
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
template<class Gimpl,class Field> class CovariantLaplacianStencil : public SparseMatrixBase<Field>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
typedef typename Field::vector_object siteObject;
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Nc> >, Nds>;
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
typedef CartesianStencil<siteObject, siteObject, int> StencilImpl;
GridBase *grid;
StencilImpl Stencil;
SimpleCompressor<siteObject> Compressor;
DoubledGaugeField Uds;
CovariantLaplacianStencil(GaugeField &Umu)
:
grid(Umu.Grid()),
Stencil (grid,6,Even,directions,displacements,0),
Uds(grid)
{
for (int mu = 0; mu < Nd; mu++) {
auto U = PeekIndex<LorentzIndex>(Umu, mu);
PokeIndex<LorentzIndex>(Uds, U, mu );
U = adj(Cshift(U, mu, -1));
PokeIndex<LorentzIndex>(Uds, U, mu + 4);
}
};
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &_in, Field &_out)
{
///////////////////////////////////////////////
// Halo exchange for this geometry of stencil
///////////////////////////////////////////////
Stencil.HaloExchange(_in, Compressor);
///////////////////////////////////
// Arithmetic expressions
///////////////////////////////////
auto st = Stencil.View(AcceleratorRead);
auto buf = st.CommBuf();
autoView( in , _in , AcceleratorRead);
autoView( out , _out , AcceleratorWrite);
autoView( U , Uds , AcceleratorRead);
typedef typename Field::vector_object vobj;
typedef decltype(coalescedRead(in[0])) calcObj;
typedef decltype(coalescedRead(U[0](0))) calcLink;
const int Nsimd = vobj::Nsimd();
const uint64_t NN = grid->oSites();
accelerator_for( ss, NN, Nsimd, {
StencilEntry *SE;
const int lane=acceleratorSIMTlane(Nsimd);
calcObj chi;
calcObj res;
calcObj Uchi;
calcLink UU;
int ptype;
res = coalescedRead(in[ss])*(-6.0);
#define LEG_LOAD_MULT(leg,polarisation) \
UU = coalescedRead(U[ss](polarisation)); \
LEG_LOAD(leg); \
mult(&Uchi(), &UU, &chi()); \
res = res + Uchi;
LEG_LOAD_MULT(0,Xp);
LEG_LOAD_MULT(1,Yp);
LEG_LOAD_MULT(2,Zp);
LEG_LOAD_MULT(3,Xm);
LEG_LOAD_MULT(4,Ym);
LEG_LOAD_MULT(5,Zm);
coalescedWrite(out[ss], res,lane);
});
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
#undef LEG_LOAD_MULT
#undef LEG_LOAD
int main(int argc, char ** argv)
{
Grid_init(&argc, &argv);
typedef LatticeColourVector Field;
auto latt_size = GridDefaultLatt();
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
auto mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
FreeLaplacianCshift<Field> FLcs(&Grid);
FreeLaplacianStencil<Field> FLst(&Grid);
LatticeGaugeField U(&Grid);
SU<Nc>::ColdConfiguration(RNG,U);
std::cout << " Gauge field has norm " <<norm2(U)<<std::endl;
CovariantLaplacianCshift <PeriodicGimplR,Field> CLcs(U);
CovariantLaplacianStencil<PeriodicGimplR,Field> CLst(U);
Field in(&Grid); gaussian(RNG,in);
Field out_FLcs(&Grid);
Field out_FLst(&Grid);
Field out_CLcs(&Grid);
Field out_CLst(&Grid);
Field diff(&Grid);
////////////////////////////////////////////////////////
// First test: in free field these should all agree
////////////////////////////////////////////////////////
FLcs.M(in,out_FLcs);
FLst.M(in,out_FLst);
CLcs.M(in,out_CLcs);
CLst.M(in,out_CLst);
std:: cout << "******************************************************************" <<std::endl;
std:: cout << " Test A: consistency of four different Laplacian implementations " <<std::endl;
std:: cout << "******************************************************************" <<std::endl;
std:: cout << " Input test vector " <<norm2(in)<<std::endl;
std:: cout << "--------------------------------------------------------" <<std::endl;
std:: cout << " Free cshift output vector " <<norm2(out_FLcs)<<std::endl;
std:: cout << " Free stencil output vector " <<norm2(out_FLst)<<std::endl;
std:: cout << " Cov cshift output vector " <<norm2(out_CLcs)<<std::endl;
std:: cout << " Cov stencil output vector " <<norm2(out_CLst)<<std::endl;
std:: cout << "--------------------------------------------------------" <<std::endl;
diff = out_FLcs - out_FLst;
std:: cout << " Difference between free Cshift Laplacian and free Stencil Laplacian = " <<norm2(diff)<<std::endl;
diff = out_FLcs - out_CLcs;
std:: cout << " Difference between free Cshift Laplacian and covariant Cshift Laplacian = " <<norm2(diff)<<std::endl;
diff = out_FLcs - out_CLst;
std:: cout << " Difference between free Cshift Laplacian and covariant Stencil Laplacian = " <<norm2(diff)<<std::endl;
std:: cout << "--------------------------------------------------------" <<std::endl;
std:: cout << "******************************************************************" <<std::endl;
std:: cout << " Test B: gauge covariance " <<std::endl;
std:: cout << "******************************************************************" <<std::endl;
LatticeGaugeField U_GT(&Grid); // Gauge transformed field
LatticeColourMatrix g(&Grid); // local Gauge xform matrix
U_GT = U;
// Make a random xform to teh gauge field
SU<Nc>::RandomGaugeTransform(RNG,U_GT,g); // Unit gauge
Field in_GT(&Grid);
Field out_GT(&Grid);
Field out_CLcs_GT(&Grid);
Field out_CLst_GT(&Grid);
CovariantLaplacianCshift <PeriodicGimplR,Field> CLcs_GT(U_GT);
CovariantLaplacianStencil<PeriodicGimplR,Field> CLst_GT(U_GT);
in_GT = g*in;
out_GT = g*out_FLcs;
// Check M^GT_xy in_GT = g(x) M_xy g^dag(y) g(y) in = g(x) out(x)
CLcs_GT.M(in_GT,out_CLcs_GT);
CLst_GT.M(in_GT,out_CLst_GT);
diff = out_CLcs_GT - out_GT;
std:: cout << " Difference between Gauge xformed result and covariant Cshift Laplacian in xformed gauge = " <<norm2(diff)<<std::endl;
diff = out_CLst_GT - out_GT;
std:: cout << " Difference between Gauge xformed result and covariant Stencil Laplacian in xformed gauge = " <<norm2(diff)<<std::endl;
std:: cout << "--------------------------------------------------------" <<std::endl;
std:: cout << "******************************************************************" <<std::endl;
std:: cout << " Test C: compare in free Field to \"Feynman rule\" " <<std::endl;
std:: cout << "******************************************************************" <<std::endl;
std::vector<int> dim_mask({1,1,1,0}); // 3d FFT
FFT theFFT(&Grid);
Field out(&Grid);
Field F_out(&Grid);
Field F_in(&Grid);
// FFT the random input vector
theFFT.FFT_dim_mask(F_in,in,dim_mask,FFT::forward);
// Convolution theorem: multiply by Fourier representation of (discrete) Laplacian to apply diff op
LatticeComplexD lap(&Grid); lap = Zero();
LatticeComplexD kmu(&Grid);
ComplexD ci(0.0,1.0);
for(int mu=0;mu<3;mu++) {
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(kmu,mu);
kmu = TwoPiL * kmu;
// (e^ik_mu + e^-ik_mu - 2) = 2( cos kmu - 1) ~ 2 (1 - k_mu^2/2 -1 ) = - k_mu^2 + O(k^4)
lap = lap + 2.0*cos(kmu) - 2.0;
}
F_out = lap * F_in;
// Inverse FFT the result
theFFT.FFT_dim_mask(out,F_out,dim_mask,FFT::backward);
std::cout<<"Fourier xformed (in) "<<norm2(F_in)<<std::endl;
std::cout<<"Fourier xformed Laplacian x (in) "<<norm2(F_out)<<std::endl;
std::cout<<"Momentum space Laplacian application "<< norm2(out)<<std::endl;
std::cout<<"Stencil Laplacian application "<< norm2(out_CLcs)<<std::endl;
diff = out_CLcs - out;
std::cout<<"diff "<< norm2(diff)<<std::endl;
Grid_finalize();
}

View File

@ -0,0 +1,127 @@
#include <Grid/Grid.h>
using namespace Grid;
// Function used for Chebyshev smearing
//
Real MomentumSmearing(Real p2)
{
return (1 - 4.0*p2) * exp(-p2/4);
}
Real DistillationSmearing(Real p2)
{
if ( p2 > 0.5 ) return 0.0;
else return 1.0;
}
// Flip sign to make prop to p^2, not -p^2 relative to last example
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
GridBase *grid;
GaugeField U;
CovariantLaplacianCshift(GaugeField &_U) :
grid(_U.Grid()),
U(_U) { };
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &in, Field &out)
{
out=Zero();
for(int mu=0;mu<Nd-1;mu++) {
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
out = out - Gimpl::CovShiftForward(Umu,mu,in);
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
out = out + 2.0*in;
}
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
int main(int argc, char ** argv)
{
Grid_init(&argc, &argv);
typedef LatticeColourVector Field;
auto latt_size = GridDefaultLatt();
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
auto mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
LatticeGaugeField U(&Grid);
SU<Nc>::ColdConfiguration(RNG,U);
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
Laplacian_t Laplacian(U);
ColourVector ColourKronecker;
ColourKronecker = Zero();
ColourKronecker()()(0) = 1.0;
Coordinate site({latt_size[0]/2,
latt_size[1]/2,
latt_size[2]/2,
0});
Field kronecker(&Grid);
kronecker = Zero();
pokeSite(ColourKronecker,kronecker,site);
Field psi(&Grid), chi(&Grid);
//////////////////////////////////////
// Classic Wuppertal smearing
//////////////////////////////////////
Integer Iterations = 80;
Real width = 2.0;
Real coeff = (width*width) / Real(4*Iterations);
chi=kronecker;
// chi = (1-p^2/2N)^N kronecker
for(int n = 0; n < Iterations; ++n) {
Laplacian.M(chi,psi);
chi = chi - coeff*psi;
}
std::cout << " Wuppertal smeared operator is chi = \n" << chi <<std::endl;
/////////////////////////////////////
// Chebyshev smearing
/////////////////////////////////////
RealD lo = 0.0;
RealD hi = 12.0; // Analytic free field bound
HermitianLinearOperator<Laplacian_t,Field> HermOp(Laplacian);
std::cout << " Checking spectral range of our POSITIVE definite operator \n";
PowerMethod<Field> PM;
PM(HermOp,kronecker);
// Chebyshev<Field> ChebySmear(lo,hi,20,DistillationSmearing);
Chebyshev<Field> ChebySmear(lo,hi,20,MomentumSmearing);
{
std::ofstream of("chebysmear");
ChebySmear.csv(of);
}
ChebySmear(HermOp,kronecker,chi);
std::cout << " Chebyshev smeared operator is chi = \n" << chi <<std::endl;
Grid_finalize();
}

View File

@ -0,0 +1,129 @@
#include <Grid/Grid.h>
using namespace Grid;
template<class Field>
void SimpleConjugateGradient(LinearOperatorBase<Field> &HPDop,const Field &b, Field &x)
{
RealD cp, c, alpha, d, beta, ssq, qq;
RealD Tolerance=1.0e-10;
int MaxIterations=10000;
Field p(b), mmp(b), r(b);
HPDop.HermOpAndNorm(x, mmp, d, beta);
r = b - mmp;
p = r;
cp = alpha = norm2(p);
ssq = norm2(b);
RealD rsq = Tolerance * Tolerance * ssq;
for (int k = 1; k <= MaxIterations; k++) {
c = cp;
HPDop.HermOp(p, mmp);
d = real(innerProduct(p,mmp));
alpha = c / d;
r = r - alpha *mmp;
cp = norm2(r);
beta = cp / c;
x = x + alpha* p ;
p = r + beta* p ;
std::cout << "iteration "<<k<<" cp " <<std::sqrt(cp/ssq) << std::endl;
if (cp <= rsq) {
return;
}
}
assert(0);
}
// Flip sign to make prop to p^2, not -p^2 relative to last example
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
GridBase *grid;
GaugeField U;
RealD m2=1.0e-2;
CovariantLaplacianCshift(GaugeField &_U) :
grid(_U.Grid()),
U(_U) { };
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &in, Field &out)
{
out=Zero();
for(int mu=0;mu<Nd-1;mu++) {
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
out = out - Gimpl::CovShiftForward(Umu,mu,in);
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
out = out + 2.0*in + m2*in;
}
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
int main(int argc, char ** argv)
{
Grid_init(&argc, &argv);
typedef LatticeColourVector Field;
auto latt_size = GridDefaultLatt();
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
auto mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
LatticeGaugeField U(&Grid);
SU<Nc>::ColdConfiguration(RNG,U);
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
Laplacian_t Laplacian(U);
ColourVector ColourKronecker;
ColourKronecker = Zero();
ColourKronecker()()(0) = 1.0;
Coordinate site({0,0,0,0}); // Point source at origin
Field kronecker(&Grid);
kronecker = Zero();
pokeSite(ColourKronecker,kronecker,site);
Field psi(&Grid); psi=Zero();
HermitianLinearOperator<Laplacian_t,Field> HermOp(Laplacian);
SimpleConjugateGradient(HermOp, kronecker,psi);
Field r(&Grid);
Laplacian.M(psi,r);
r=kronecker-r;
std::cout << "True residual "<< norm2(r) <<std::endl;
// Optionally print the result vector
// std::cout << psi<<std::endl;
Grid_finalize();
}

View File

@ -0,0 +1,314 @@
/*
* Warning: This code illustrative only: not well tested, and not meant for production use
* without regression / tests being applied
*/
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
GridBase *grid;
GaugeField U;
CovariantLaplacianCshift(GaugeField &_U) :
grid(_U.Grid()),
U(_U) { };
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &in, Field &out)
{
out=Zero();
for(int mu=0;mu<Nd-1;mu++) {
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
out = out - Gimpl::CovShiftForward(Umu,mu,in);
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
out = out + 2.0*in;
}
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
void MakePhase(Coordinate mom,LatticeComplex &phase)
{
GridBase *grid = phase.Grid();
auto latt_size = grid->GlobalDimensions();
ComplexD ci(0.0,1.0);
phase=Zero();
LatticeComplex coor(phase.Grid());
for(int mu=0;mu<Nd;mu++){
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(coor,mu);
phase = phase + (TwoPiL * mom[mu]) * coor;
}
phase = exp(phase*ci);
}
void PointSource(Coordinate &coor,LatticePropagator &source)
{
// Coordinate coor({0,0,0,0});
source=Zero();
SpinColourMatrix kronecker; kronecker=1.0;
pokeSite(kronecker,source,coor);
}
void Z2WallSource(GridParallelRNG &RNG,int tslice,LatticePropagator &source)
{
GridBase *grid = source.Grid();
LatticeComplex noise(grid);
LatticeComplex zz(grid); zz=Zero();
LatticeInteger t(grid);
RealD nrm=1.0/sqrt(2);
bernoulli(RNG, noise); // 0,1 50:50
noise = (2.*noise - Complex(1,1))*nrm;
LatticeCoordinate(t,Tdir);
noise = where(t==Integer(tslice), noise, zz);
source = 1.0;
source = source*noise;
std::cout << " Z2 wall " << norm2(source) << std::endl;
}
template<class Field>
void GaussianSmear(LatticeGaugeField &U,Field &unsmeared,Field &smeared)
{
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
Laplacian_t Laplacian(U);
Integer Iterations = 40;
Real width = 2.0;
Real coeff = (width*width) / Real(4*Iterations);
Field tmp(U.Grid());
smeared=unsmeared;
// chi = (1-p^2/2N)^N kronecker
for(int n = 0; n < Iterations; ++n) {
Laplacian.M(smeared,tmp);
smeared = smeared - coeff*tmp;
std::cout << " smear iter " << n<<" " <<norm2(smeared)<<std::endl;
}
}
void GaussianSource(Coordinate &site,LatticeGaugeField &U,LatticePropagator &source)
{
LatticePropagator tmp(source.Grid());
PointSource(site,source);
std::cout << " GaussianSource Kronecker "<< norm2(source)<<std::endl;
tmp = source;
GaussianSmear(U,tmp,source);
std::cout << " GaussianSource Smeared "<< norm2(source)<<std::endl;
}
void GaussianWallSource(GridParallelRNG &RNG,int tslice,LatticeGaugeField &U,LatticePropagator &source)
{
Z2WallSource(RNG,tslice,source);
auto tmp = source;
GaussianSmear(U,tmp,source);
}
void SequentialSource(int tslice,Coordinate &mom,LatticePropagator &spectator,LatticePropagator &source)
{
assert(mom.size()==Nd);
assert(mom[Tdir] == 0);
GridBase * grid = spectator.Grid();
LatticeInteger ts(grid);
LatticeCoordinate(ts,Tdir);
source = Zero();
source = where(ts==Integer(tslice),spectator,source); // Stick in a slice of the spectator, zero everywhere else
LatticeComplex phase(grid);
MakePhase(mom,phase);
source = source *phase;
}
template<class Action>
void Solve(Action &D,LatticePropagator &source,LatticePropagator &propagator)
{
GridBase *UGrid = D.GaugeGrid();
GridBase *FGrid = D.FermionGrid();
LatticeFermion src4 (UGrid);
LatticeFermion src5 (FGrid);
LatticeFermion result5(FGrid);
LatticeFermion result4(UGrid);
ConjugateGradient<LatticeFermion> CG(1.0e-8,100000);
SchurRedBlackDiagMooeeSolve<LatticeFermion> schur(CG);
ZeroGuesser<LatticeFermion> ZG; // Could be a DeflatedGuesser if have eigenvectors
for(int s=0;s<Nd;s++){
for(int c=0;c<Nc;c++){
PropToFerm<Action>(src4,source,s,c);
D.ImportPhysicalFermionSource(src4,src5);
result5=Zero();
schur(D,src5,result5,ZG);
std::cout<<GridLogMessage
<<"spin "<<s<<" color "<<c
<<" norm2(src5d) " <<norm2(src5)
<<" norm2(result5d) "<<norm2(result5)<<std::endl;
D.ExportPhysicalFermionSolution(result5,result4);
FermToProp<Action>(propagator,result4,s,c);
}
}
}
class MesonFile: Serializable {
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(MesonFile, std::vector<std::vector<Complex> >, data);
};
void MesonTrace(std::string file,LatticePropagator &q1,LatticePropagator &q2,LatticeComplex &phase)
{
const int nchannel=4;
Gamma::Algebra Gammas[nchannel][2] = {
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::Gamma5},
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::GammaTGamma5},
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::Gamma5},
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::GammaTGamma5}
};
Gamma G5(Gamma::Algebra::Gamma5);
LatticeComplex meson_CF(q1.Grid());
MesonFile MF;
for(int ch=0;ch<nchannel;ch++){
Gamma Gsrc(Gammas[ch][0]);
Gamma Gsnk(Gammas[ch][1]);
meson_CF = trace(G5*adj(q1)*G5*Gsnk*q2*adj(Gsrc));
std::vector<TComplex> meson_T;
sliceSum(meson_CF,meson_T, Tdir);
int nt=meson_T.size();
std::vector<Complex> corr(nt);
for(int t=0;t<nt;t++){
corr[t] = TensorRemove(meson_T[t]); // Yes this is ugly, not figured a work around
std::cout << " channel "<<ch<<" t "<<t<<" " <<corr[t]<<std::endl;
}
MF.data.push_back(corr);
}
{
XmlWriter WR(file);
write(WR,"MesonFile",MF);
}
}
int main (int argc, char ** argv)
{
const int Ls=8;
Grid_init(&argc,&argv);
// Double precision grids
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
//////////////////////////////////////////////////////////////////////
// You can manage seeds however you like.
// Recommend SeedUniqueString.
//////////////////////////////////////////////////////////////////////
std::vector<int> seeds4({1,2,3,4});
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
LatticeGaugeField Umu(UGrid);
std::string config;
if( argc > 1 && argv[1][0] != '-' )
{
std::cout<<GridLogMessage <<"Loading configuration from "<<argv[1]<<std::endl;
FieldMetaData header;
NerscIO::readConfiguration(Umu, header, argv[1]);
config=argv[1];
}
else
{
std::cout<<GridLogMessage <<"Using hot configuration"<<std::endl;
SU<Nc>::ColdConfiguration(Umu);
// SU<Nc>::HotConfiguration(RNG4,Umu);
config="HotConfig";
}
std::vector<RealD> masses({ 0.03,0.04,0.45} ); // u/d, s, c ??
int nmass = masses.size();
std::vector<MobiusFermionR *> FermActs;
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"MobiusFermion action as Scaled Shamir kernel"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
for(auto mass: masses) {
RealD M5=1.0;
RealD b=1.5;// Scale factor b+c=2, b-c=1
RealD c=0.5;
FermActs.push_back(new MobiusFermionR(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c));
}
LatticePropagator point_source(UGrid);
LatticePropagator wall_source(UGrid);
LatticePropagator gaussian_source(UGrid);
Coordinate Origin({0,0,0,0});
PointSource (Origin,point_source);
Z2WallSource (RNG4,0,wall_source);
GaussianSource(Origin,Umu,gaussian_source);
std::vector<LatticePropagator> PointProps(nmass,UGrid);
std::vector<LatticePropagator> GaussProps(nmass,UGrid);
std::vector<LatticePropagator> Z2Props (nmass,UGrid);
for(int m=0;m<nmass;m++) {
Solve(*FermActs[m],point_source ,PointProps[m]);
Solve(*FermActs[m],gaussian_source,GaussProps[m]);
Solve(*FermActs[m],wall_source ,Z2Props[m]);
}
LatticeComplex phase(UGrid);
Coordinate mom({0,0,0,0});
MakePhase(mom,phase);
for(int m1=0 ;m1<nmass;m1++) {
for(int m2=m1;m2<nmass;m2++) {
std::stringstream ssp,ssg,ssz;
ssp<<config<< "_m" << m1 << "_m"<< m2 << "_point_meson.xml";
ssg<<config<< "_m" << m1 << "_m"<< m2 << "_smeared_meson.xml";
ssz<<config<< "_m" << m1 << "_m"<< m2 << "_wall_meson.xml";
MesonTrace(ssp.str(),PointProps[m1],PointProps[m2],phase);
MesonTrace(ssg.str(),GaussProps[m1],GaussProps[m2],phase);
MesonTrace(ssz.str(),Z2Props[m1],Z2Props[m2],phase);
}}
Grid_finalize();
}

6
examples/Makefile.am Normal file
View File

@ -0,0 +1,6 @@
SUBDIRS = .
include Make.inc

View File

@ -82,3 +82,18 @@ for f in $TESTS; do
echo >> Make.inc
done
cd ..
# examples Make.inc
cd $home/examples/
echo> Make.inc
TESTS=`ls *.cc`
TESTLIST=`echo ${TESTS} | sed s/.cc//g `
echo bin_PROGRAMS = ${TESTLIST} > Make.inc
echo >> Make.inc
for f in $TESTS; do
BNAME=`basename $f .cc`
echo ${BNAME}_SOURCES=$f >> Make.inc
echo ${BNAME}_LDADD='$(top_builddir)/Grid/libGrid.a'>> Make.inc
echo >> Make.inc
done
cd ..