mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-09 23:45:36 +00:00
Merge branch 'develop' into gparity_HMC
This commit is contained in:
commit
d184b8c921
56
.travis.yml
56
.travis.yml
@ -1,56 +0,0 @@
|
|||||||
language: cpp
|
|
||||||
|
|
||||||
cache:
|
|
||||||
directories:
|
|
||||||
- clang
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- os: osx
|
|
||||||
osx_image: xcode8.3
|
|
||||||
compiler: clang
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- export GRIDDIR=`pwd`
|
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ "$CC" == "clang" ]] && [ ! -e clang/bin ]; then wget $CLANG_LINK; tar -xf `basename $CLANG_LINK`; mkdir clang; mv clang+*/* clang/; fi
|
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ "$CC" == "clang" ]]; then export PATH="${GRIDDIR}/clang/bin:${PATH}"; fi
|
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ "$CC" == "clang" ]]; then export LD_LIBRARY_PATH="${GRIDDIR}/clang/lib:${LD_LIBRARY_PATH}"; fi
|
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update; fi
|
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install libmpc openssl; fi
|
|
||||||
|
|
||||||
install:
|
|
||||||
- export CWD=`pwd`
|
|
||||||
- echo $CWD
|
|
||||||
- export CC=$CC$VERSION
|
|
||||||
- export CXX=$CXX$VERSION
|
|
||||||
- echo $PATH
|
|
||||||
- which autoconf
|
|
||||||
- autoconf --version
|
|
||||||
- which automake
|
|
||||||
- automake --version
|
|
||||||
- which $CC
|
|
||||||
- $CC --version
|
|
||||||
- which $CXX
|
|
||||||
- $CXX --version
|
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export LDFLAGS='-L/usr/local/lib'; fi
|
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export EXTRACONF='--with-openssl=/usr/local/opt/openssl'; fi
|
|
||||||
|
|
||||||
script:
|
|
||||||
- ./bootstrap.sh
|
|
||||||
- mkdir build
|
|
||||||
- cd build
|
|
||||||
- mkdir lime
|
|
||||||
- cd lime
|
|
||||||
- mkdir build
|
|
||||||
- cd build
|
|
||||||
- wget http://usqcd-software.github.io/downloads/c-lime/lime-1.3.2.tar.gz
|
|
||||||
- tar xf lime-1.3.2.tar.gz
|
|
||||||
- cd lime-1.3.2
|
|
||||||
- ./configure --prefix=$CWD/build/lime/install
|
|
||||||
- make -j4
|
|
||||||
- make install
|
|
||||||
- cd $CWD/build
|
|
||||||
- ../configure --enable-simd=SSE4 --enable-comms=none --with-lime=$CWD/build/lime/install ${EXTRACONF}
|
|
||||||
- make -j4
|
|
||||||
- ./benchmarks/Benchmark_dwf --threads 1 --debug-signals
|
|
||||||
- make check
|
|
@ -35,6 +35,9 @@ Author: Christoph Lehner <christoph@lhnr.de>
|
|||||||
#endif
|
#endif
|
||||||
#ifdef GRID_HIP
|
#ifdef GRID_HIP
|
||||||
#include <hip/hip_runtime_api.h>
|
#include <hip/hip_runtime_api.h>
|
||||||
|
#endif
|
||||||
|
#ifdef GRID_SYCl
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
@ -70,6 +73,7 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
|||||||
WorldNodes = WorldSize/WorldShmSize;
|
WorldNodes = WorldSize/WorldShmSize;
|
||||||
assert( (WorldNodes * WorldShmSize) == WorldSize );
|
assert( (WorldNodes * WorldShmSize) == WorldSize );
|
||||||
|
|
||||||
|
|
||||||
// FIXME: Check all WorldShmSize are the same ?
|
// FIXME: Check all WorldShmSize are the same ?
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
@ -446,7 +450,47 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Hugetlbfs mapping intended
|
// Hugetlbfs mapping intended
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
#if defined(GRID_CUDA) ||defined(GRID_HIP)
|
#if defined(GRID_CUDA) ||defined(GRID_HIP) || defined(GRID_SYCL)
|
||||||
|
|
||||||
|
//if defined(GRID_SYCL)
|
||||||
|
#if 0
|
||||||
|
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||||
|
{
|
||||||
|
void * ShmCommBuf ;
|
||||||
|
assert(_ShmSetup==1);
|
||||||
|
assert(_ShmAlloc==0);
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// allocate the pointer array for shared windows for our group
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
MPI_Barrier(WorldShmComm);
|
||||||
|
WorldShmCommBufs.resize(WorldShmSize);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Each MPI rank should allocate our own buffer
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
ShmCommBuf = acceleratorAllocDevice(bytes);
|
||||||
|
|
||||||
|
if (ShmCommBuf == (void *)NULL ) {
|
||||||
|
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
|
||||||
|
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
||||||
|
|
||||||
|
SharedMemoryZero(ShmCommBuf,bytes);
|
||||||
|
|
||||||
|
assert(WorldShmSize == 1);
|
||||||
|
for(int r=0;r<WorldShmSize;r++){
|
||||||
|
WorldShmCommBufs[r] = ShmCommBuf;
|
||||||
|
}
|
||||||
|
_ShmAllocBytes=bytes;
|
||||||
|
_ShmAlloc=1;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(GRID_CUDA) ||defined(GRID_HIP) ||defined(GRID_SYCL)
|
||||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||||
{
|
{
|
||||||
void * ShmCommBuf ;
|
void * ShmCommBuf ;
|
||||||
@ -469,8 +513,16 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Each MPI rank should allocate our own buffer
|
// Each MPI rank should allocate our own buffer
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_device());
|
||||||
|
auto zeContext= cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_context());
|
||||||
|
ze_device_mem_alloc_desc_t zeDesc = {};
|
||||||
|
zeMemAllocDevice(zeContext,&zeDesc,bytes,2*1024*1024,zeDevice,&ShmCommBuf);
|
||||||
|
std::cout << WorldRank << header " SharedMemoryMPI.cc zeMemAllocDevice "<< bytes
|
||||||
|
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
||||||
|
#else
|
||||||
ShmCommBuf = acceleratorAllocDevice(bytes);
|
ShmCommBuf = acceleratorAllocDevice(bytes);
|
||||||
|
#endif
|
||||||
if (ShmCommBuf == (void *)NULL ) {
|
if (ShmCommBuf == (void *)NULL ) {
|
||||||
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
|
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
@ -480,8 +532,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
|
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
|
||||||
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
||||||
}
|
}
|
||||||
SharedMemoryZero(ShmCommBuf,bytes);
|
// SharedMemoryZero(ShmCommBuf,bytes);
|
||||||
|
std::cout<< "Setting up IPC"<<std::endl;
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Loop over ranks/gpu's on our node
|
// Loop over ranks/gpu's on our node
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -491,6 +543,23 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
// If it is me, pass around the IPC access key
|
// If it is me, pass around the IPC access key
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
ze_ipc_mem_handle_t handle;
|
||||||
|
if ( r==WorldShmRank ) {
|
||||||
|
auto err = zeMemGetIpcHandle(zeContext,ShmCommBuf,&handle);
|
||||||
|
if ( err != ZE_RESULT_SUCCESS ) {
|
||||||
|
std::cerr << "SharedMemoryMPI.cc zeMemGetIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
} else {
|
||||||
|
std::cerr << "SharedMemoryMPI.cc zeMemGetIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
||||||
|
}
|
||||||
|
std::cerr<<"Allocated IpcHandle rank "<<r<<" (hex) ";
|
||||||
|
for(int c=0;c<ZE_MAX_IPC_HANDLE_SIZE;c++){
|
||||||
|
std::cerr<<std::hex<<(uint32_t)((uint8_t)handle.data[c])<<std::dec;
|
||||||
|
}
|
||||||
|
std::cerr<<std::endl;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
#ifdef GRID_CUDA
|
#ifdef GRID_CUDA
|
||||||
cudaIpcMemHandle_t handle;
|
cudaIpcMemHandle_t handle;
|
||||||
if ( r==WorldShmRank ) {
|
if ( r==WorldShmRank ) {
|
||||||
@ -527,6 +596,25 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
// If I am not the source, overwrite thisBuf with remote buffer
|
// If I am not the source, overwrite thisBuf with remote buffer
|
||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
void * thisBuf = ShmCommBuf;
|
void * thisBuf = ShmCommBuf;
|
||||||
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
if ( r!=WorldShmRank ) {
|
||||||
|
thisBuf = nullptr;
|
||||||
|
std::cerr<<"Using IpcHandle rank "<<r<<" ";
|
||||||
|
for(int c=0;c<ZE_MAX_IPC_HANDLE_SIZE;c++){
|
||||||
|
std::cerr<<std::hex<<(uint32_t)((uint8_t)handle.data[c])<<std::dec;
|
||||||
|
}
|
||||||
|
std::cerr<<std::endl;
|
||||||
|
auto err = zeMemOpenIpcHandle(zeContext,zeDevice,handle,0,&thisBuf);
|
||||||
|
if ( err != ZE_RESULT_SUCCESS ) {
|
||||||
|
std::cerr << "SharedMemoryMPI.cc "<<zeContext<<" "<<zeDevice<<std::endl;
|
||||||
|
std::cerr << "SharedMemoryMPI.cc zeMemOpenIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
} else {
|
||||||
|
std::cerr << "SharedMemoryMPI.cc zeMemOpenIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
||||||
|
}
|
||||||
|
assert(thisBuf!=nullptr);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
#ifdef GRID_CUDA
|
#ifdef GRID_CUDA
|
||||||
if ( r!=WorldShmRank ) {
|
if ( r!=WorldShmRank ) {
|
||||||
auto err = cudaIpcOpenMemHandle(&thisBuf,handle,cudaIpcMemLazyEnablePeerAccess);
|
auto err = cudaIpcOpenMemHandle(&thisBuf,handle,cudaIpcMemLazyEnablePeerAccess);
|
||||||
@ -557,6 +645,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
_ShmAllocBytes=bytes;
|
_ShmAllocBytes=bytes;
|
||||||
_ShmAlloc=1;
|
_ShmAlloc=1;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#ifdef GRID_MPI3_SHMMMAP
|
#ifdef GRID_MPI3_SHMMMAP
|
||||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||||
@ -727,16 +817,16 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
/////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////
|
||||||
void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
|
void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
|
||||||
{
|
{
|
||||||
#ifdef GRID_CUDA
|
#if defined(GRID_CUDA) || defined(GRID_HIP) || defined(GRID_SYCL)
|
||||||
cudaMemset(dest,0,bytes);
|
acceleratorMemSet(dest,0,bytes);
|
||||||
#else
|
#else
|
||||||
bzero(dest,bytes);
|
bzero(dest,bytes);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
|
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
|
||||||
{
|
{
|
||||||
#ifdef GRID_CUDA
|
#if defined(GRID_CUDA) || defined(GRID_HIP) || defined(GRID_SYCL)
|
||||||
cudaMemcpy(dest,src,bytes,cudaMemcpyDefault);
|
acceleratorCopyToDevice(src,dest,bytes);
|
||||||
#else
|
#else
|
||||||
bcopy(src,dest,bytes);
|
bcopy(src,dest,bytes);
|
||||||
#endif
|
#endif
|
||||||
@ -800,7 +890,7 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
SharedMemoryTest();
|
//SharedMemoryTest();
|
||||||
}
|
}
|
||||||
//////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////
|
||||||
// On node barrier
|
// On node barrier
|
||||||
|
@ -207,11 +207,20 @@ public:
|
|||||||
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
|
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Preferred interface
|
||||||
|
template<class GaugeStats=PeriodicGaugeStatistics>
|
||||||
|
static inline void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,
|
||||||
|
std::string file,
|
||||||
|
std::string ens_label = std::string("DWF"))
|
||||||
|
{
|
||||||
|
writeConfiguration(Umu,file,0,1,ens_label);
|
||||||
|
}
|
||||||
template<class GaugeStats=PeriodicGaugeStatistics>
|
template<class GaugeStats=PeriodicGaugeStatistics>
|
||||||
static inline void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,
|
static inline void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,
|
||||||
std::string file,
|
std::string file,
|
||||||
int two_row,
|
int two_row,
|
||||||
int bits32)
|
int bits32,
|
||||||
|
std::string ens_label = std::string("DWF"))
|
||||||
{
|
{
|
||||||
typedef vLorentzColourMatrixD vobj;
|
typedef vLorentzColourMatrixD vobj;
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
@ -221,8 +230,8 @@ public:
|
|||||||
// Following should become arguments
|
// Following should become arguments
|
||||||
///////////////////////////////////////////
|
///////////////////////////////////////////
|
||||||
header.sequence_number = 1;
|
header.sequence_number = 1;
|
||||||
header.ensemble_id = "UKQCD";
|
header.ensemble_id = std::string("UKQCD");
|
||||||
header.ensemble_label = "DWF";
|
header.ensemble_label = ens_label;
|
||||||
|
|
||||||
typedef LorentzColourMatrixD fobj3D;
|
typedef LorentzColourMatrixD fobj3D;
|
||||||
typedef LorentzColour2x3D fobj2D;
|
typedef LorentzColour2x3D fobj2D;
|
||||||
@ -234,7 +243,7 @@ public:
|
|||||||
GaugeStats Stats; Stats(Umu,header);
|
GaugeStats Stats; Stats(Umu,header);
|
||||||
MachineCharacteristics(header);
|
MachineCharacteristics(header);
|
||||||
|
|
||||||
uint64_t offset;
|
uint64_t offset;
|
||||||
|
|
||||||
// Sod it -- always write 3x3 double
|
// Sod it -- always write 3x3 double
|
||||||
header.floating_point = std::string("IEEE64BIG");
|
header.floating_point = std::string("IEEE64BIG");
|
||||||
|
@ -291,12 +291,6 @@ typedef ImprovedStaggeredFermion5D<StaggeredImplR> ImprovedStaggeredFermion5DR;
|
|||||||
typedef ImprovedStaggeredFermion5D<StaggeredImplF> ImprovedStaggeredFermion5DF;
|
typedef ImprovedStaggeredFermion5D<StaggeredImplF> ImprovedStaggeredFermion5DF;
|
||||||
typedef ImprovedStaggeredFermion5D<StaggeredImplD> ImprovedStaggeredFermion5DD;
|
typedef ImprovedStaggeredFermion5D<StaggeredImplD> ImprovedStaggeredFermion5DD;
|
||||||
|
|
||||||
#ifndef GRID_CUDA
|
|
||||||
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplR> ImprovedStaggeredFermionVec5dR;
|
|
||||||
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplF> ImprovedStaggeredFermionVec5dF;
|
|
||||||
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplD> ImprovedStaggeredFermionVec5dD;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
|
||||||
////////////////////
|
////////////////////
|
||||||
|
@ -183,7 +183,8 @@ NAMESPACE_CHECK(ImplStaggered);
|
|||||||
/////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
// Single flavour one component spinors with colour index. 5d vec
|
// Single flavour one component spinors with colour index. 5d vec
|
||||||
/////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
#include <Grid/qcd/action/fermion/StaggeredVec5dImpl.h>
|
// Deprecate Vec5d
|
||||||
NAMESPACE_CHECK(ImplStaggered5dVec);
|
//#include <Grid/qcd/action/fermion/StaggeredVec5dImpl.h>
|
||||||
|
//NAMESPACE_CHECK(ImplStaggered5dVec);
|
||||||
|
|
||||||
|
|
||||||
|
@ -680,7 +680,8 @@ void StaggeredKernels<Impl>::DhopSiteAsm(StencilView &st,
|
|||||||
gauge2 =(uint64_t)&UU[sU]( Z ); \
|
gauge2 =(uint64_t)&UU[sU]( Z ); \
|
||||||
gauge3 =(uint64_t)&UU[sU]( T );
|
gauge3 =(uint64_t)&UU[sU]( T );
|
||||||
|
|
||||||
|
#undef STAG_VEC5D
|
||||||
|
#ifdef STAG_VEC5D
|
||||||
// This is the single precision 5th direction vectorised kernel
|
// This is the single precision 5th direction vectorised kernel
|
||||||
#include <Grid/simd/Intel512single.h>
|
#include <Grid/simd/Intel512single.h>
|
||||||
template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilView &st,
|
template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilView &st,
|
||||||
@ -790,7 +791,7 @@ template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilView
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#define PERMUTE_DIR3 __asm__ ( \
|
#define PERMUTE_DIR3 __asm__ ( \
|
||||||
|
@ -32,25 +32,50 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
#define LOAD_CHI(b) \
|
#ifdef GRID_SIMT
|
||||||
|
|
||||||
|
#define LOAD_CHI(ptype,b) \
|
||||||
|
const SiteSpinor & ref (b[offset]); \
|
||||||
|
Chi_0=coalescedReadPermute<ptype>(ref()()(0),perm,lane); \
|
||||||
|
Chi_1=coalescedReadPermute<ptype>(ref()()(1),perm,lane); \
|
||||||
|
Chi_2=coalescedReadPermute<ptype>(ref()()(2),perm,lane);
|
||||||
|
|
||||||
|
#define LOAD_CHI_COMMS(b) \
|
||||||
const SiteSpinor & ref (b[offset]); \
|
const SiteSpinor & ref (b[offset]); \
|
||||||
Chi_0=ref()()(0);\
|
Chi_0=coalescedRead(ref()()(0),lane); \
|
||||||
Chi_1=ref()()(1);\
|
Chi_1=coalescedRead(ref()()(1),lane); \
|
||||||
Chi_2=ref()()(2);
|
Chi_2=coalescedRead(ref()()(2),lane);
|
||||||
|
|
||||||
|
#define PERMUTE_DIR(dir) ;
|
||||||
|
#else
|
||||||
|
#define LOAD_CHI(ptype,b) LOAD_CHI_COMMS(b)
|
||||||
|
|
||||||
|
#define LOAD_CHI_COMMS(b) \
|
||||||
|
const SiteSpinor & ref (b[offset]); \
|
||||||
|
Chi_0=ref()()(0); \
|
||||||
|
Chi_1=ref()()(1); \
|
||||||
|
Chi_2=ref()()(2);
|
||||||
|
|
||||||
|
#define PERMUTE_DIR(dir) \
|
||||||
|
permute##dir(Chi_0,Chi_0); \
|
||||||
|
permute##dir(Chi_1,Chi_1); \
|
||||||
|
permute##dir(Chi_2,Chi_2);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
// To splat or not to splat depends on the implementation
|
// To splat or not to splat depends on the implementation
|
||||||
#define MULT(A,UChi) \
|
#define MULT(A,UChi) \
|
||||||
auto & ref(U[sU](A)); \
|
auto & ref(U[sU](A)); \
|
||||||
Impl::loadLinkElement(U_00,ref()(0,0)); \
|
U_00=coalescedRead(ref()(0,0),lane); \
|
||||||
Impl::loadLinkElement(U_10,ref()(1,0)); \
|
U_10=coalescedRead(ref()(1,0),lane); \
|
||||||
Impl::loadLinkElement(U_20,ref()(2,0)); \
|
U_20=coalescedRead(ref()(2,0),lane); \
|
||||||
Impl::loadLinkElement(U_01,ref()(0,1)); \
|
U_01=coalescedRead(ref()(0,1),lane); \
|
||||||
Impl::loadLinkElement(U_11,ref()(1,1)); \
|
U_11=coalescedRead(ref()(1,1),lane); \
|
||||||
Impl::loadLinkElement(U_21,ref()(2,1)); \
|
U_21=coalescedRead(ref()(2,1),lane); \
|
||||||
Impl::loadLinkElement(U_02,ref()(0,2)); \
|
U_02=coalescedRead(ref()(0,2),lane); \
|
||||||
Impl::loadLinkElement(U_12,ref()(1,2)); \
|
U_12=coalescedRead(ref()(1,2),lane); \
|
||||||
Impl::loadLinkElement(U_22,ref()(2,2)); \
|
U_22=coalescedRead(ref()(2,2),lane); \
|
||||||
UChi ## _0 = U_00*Chi_0; \
|
UChi ## _0 = U_00*Chi_0; \
|
||||||
UChi ## _1 = U_10*Chi_0;\
|
UChi ## _1 = U_10*Chi_0;\
|
||||||
UChi ## _2 = U_20*Chi_0;\
|
UChi ## _2 = U_20*Chi_0;\
|
||||||
@ -63,15 +88,15 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
|
|
||||||
#define MULT_ADD(U,A,UChi) \
|
#define MULT_ADD(U,A,UChi) \
|
||||||
auto & ref(U[sU](A)); \
|
auto & ref(U[sU](A)); \
|
||||||
Impl::loadLinkElement(U_00,ref()(0,0)); \
|
U_00=coalescedRead(ref()(0,0),lane); \
|
||||||
Impl::loadLinkElement(U_10,ref()(1,0)); \
|
U_10=coalescedRead(ref()(1,0),lane); \
|
||||||
Impl::loadLinkElement(U_20,ref()(2,0)); \
|
U_20=coalescedRead(ref()(2,0),lane); \
|
||||||
Impl::loadLinkElement(U_01,ref()(0,1)); \
|
U_01=coalescedRead(ref()(0,1),lane); \
|
||||||
Impl::loadLinkElement(U_11,ref()(1,1)); \
|
U_11=coalescedRead(ref()(1,1),lane); \
|
||||||
Impl::loadLinkElement(U_21,ref()(2,1)); \
|
U_21=coalescedRead(ref()(2,1),lane); \
|
||||||
Impl::loadLinkElement(U_02,ref()(0,2)); \
|
U_02=coalescedRead(ref()(0,2),lane); \
|
||||||
Impl::loadLinkElement(U_12,ref()(1,2)); \
|
U_12=coalescedRead(ref()(1,2),lane); \
|
||||||
Impl::loadLinkElement(U_22,ref()(2,2)); \
|
U_22=coalescedRead(ref()(2,2),lane); \
|
||||||
UChi ## _0 += U_00*Chi_0; \
|
UChi ## _0 += U_00*Chi_0; \
|
||||||
UChi ## _1 += U_10*Chi_0;\
|
UChi ## _1 += U_10*Chi_0;\
|
||||||
UChi ## _2 += U_20*Chi_0;\
|
UChi ## _2 += U_20*Chi_0;\
|
||||||
@ -83,24 +108,18 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
UChi ## _2 += U_22*Chi_2;
|
UChi ## _2 += U_22*Chi_2;
|
||||||
|
|
||||||
|
|
||||||
#define PERMUTE_DIR(dir) \
|
|
||||||
permute##dir(Chi_0,Chi_0); \
|
|
||||||
permute##dir(Chi_1,Chi_1); \
|
|
||||||
permute##dir(Chi_2,Chi_2);
|
|
||||||
|
|
||||||
|
|
||||||
#define HAND_STENCIL_LEG_BASE(Dir,Perm,skew) \
|
#define HAND_STENCIL_LEG_BASE(Dir,Perm,skew) \
|
||||||
SE=st.GetEntry(ptype,Dir+skew,sF); \
|
SE=st.GetEntry(ptype,Dir+skew,sF); \
|
||||||
offset = SE->_offset; \
|
offset = SE->_offset; \
|
||||||
local = SE->_is_local; \
|
local = SE->_is_local; \
|
||||||
perm = SE->_permute; \
|
perm = SE->_permute; \
|
||||||
if ( local ) { \
|
if ( local ) { \
|
||||||
LOAD_CHI(in); \
|
LOAD_CHI(Perm,in); \
|
||||||
if ( perm) { \
|
if ( perm) { \
|
||||||
PERMUTE_DIR(Perm); \
|
PERMUTE_DIR(Perm); \
|
||||||
} \
|
} \
|
||||||
} else { \
|
} else { \
|
||||||
LOAD_CHI(buf); \
|
LOAD_CHI_COMMS(buf); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define HAND_STENCIL_LEG_BEGIN(Dir,Perm,skew,even) \
|
#define HAND_STENCIL_LEG_BEGIN(Dir,Perm,skew,even) \
|
||||||
@ -116,19 +135,18 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define HAND_STENCIL_LEG_INT(U,Dir,Perm,skew,even) \
|
#define HAND_STENCIL_LEG_INT(U,Dir,Perm,skew,even) \
|
||||||
SE=st.GetEntry(ptype,Dir+skew,sF); \
|
SE=st.GetEntry(ptype,Dir+skew,sF); \
|
||||||
offset = SE->_offset; \
|
offset = SE->_offset; \
|
||||||
local = SE->_is_local; \
|
local = SE->_is_local; \
|
||||||
perm = SE->_permute; \
|
perm = SE->_permute; \
|
||||||
if ( local ) { \
|
if ( local ) { \
|
||||||
LOAD_CHI(in); \
|
LOAD_CHI(Perm,in); \
|
||||||
if ( perm) { \
|
if ( perm) { \
|
||||||
PERMUTE_DIR(Perm); \
|
PERMUTE_DIR(Perm); \
|
||||||
} \
|
} \
|
||||||
} else if ( st.same_node[Dir] ) { \
|
} else if ( st.same_node[Dir] ) { \
|
||||||
LOAD_CHI(buf); \
|
LOAD_CHI_COMMS(buf); \
|
||||||
} \
|
} \
|
||||||
if (local || st.same_node[Dir] ) { \
|
if (local || st.same_node[Dir] ) { \
|
||||||
MULT_ADD(U,Dir,even); \
|
MULT_ADD(U,Dir,even); \
|
||||||
@ -140,10 +158,32 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
local = SE->_is_local; \
|
local = SE->_is_local; \
|
||||||
if ((!local) && (!st.same_node[Dir]) ) { \
|
if ((!local) && (!st.same_node[Dir]) ) { \
|
||||||
nmu++; \
|
nmu++; \
|
||||||
{ LOAD_CHI(buf); } \
|
{ LOAD_CHI_COMMS(buf); } \
|
||||||
{ MULT_ADD(U,Dir,even); } \
|
{ MULT_ADD(U,Dir,even); } \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define HAND_DECLARATIONS(Simd) \
|
||||||
|
Simd even_0; \
|
||||||
|
Simd even_1; \
|
||||||
|
Simd even_2; \
|
||||||
|
Simd odd_0; \
|
||||||
|
Simd odd_1; \
|
||||||
|
Simd odd_2; \
|
||||||
|
\
|
||||||
|
Simd Chi_0; \
|
||||||
|
Simd Chi_1; \
|
||||||
|
Simd Chi_2; \
|
||||||
|
\
|
||||||
|
Simd U_00; \
|
||||||
|
Simd U_10; \
|
||||||
|
Simd U_20; \
|
||||||
|
Simd U_01; \
|
||||||
|
Simd U_11; \
|
||||||
|
Simd U_21; \
|
||||||
|
Simd U_02; \
|
||||||
|
Simd U_12; \
|
||||||
|
Simd U_22;
|
||||||
|
|
||||||
|
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
template <int Naik> accelerator_inline
|
template <int Naik> accelerator_inline
|
||||||
@ -155,28 +195,14 @@ void StaggeredKernels<Impl>::DhopSiteHand(StencilView &st,
|
|||||||
typedef typename Simd::scalar_type S;
|
typedef typename Simd::scalar_type S;
|
||||||
typedef typename Simd::vector_type V;
|
typedef typename Simd::vector_type V;
|
||||||
|
|
||||||
Simd even_0; // 12 regs on knc
|
|
||||||
Simd even_1;
|
|
||||||
Simd even_2;
|
|
||||||
Simd odd_0; // 12 regs on knc
|
|
||||||
Simd odd_1;
|
|
||||||
Simd odd_2;
|
|
||||||
|
|
||||||
Simd Chi_0; // two spinor; 6 regs
|
const int Nsimd = SiteHalfSpinor::Nsimd();
|
||||||
Simd Chi_1;
|
const int lane=acceleratorSIMTlane(Nsimd);
|
||||||
Simd Chi_2;
|
typedef decltype( coalescedRead( in[0]()()(0) )) Simt;
|
||||||
|
HAND_DECLARATIONS(Simt);
|
||||||
Simd U_00; // two rows of U matrix
|
|
||||||
Simd U_10;
|
|
||||||
Simd U_20;
|
|
||||||
Simd U_01;
|
|
||||||
Simd U_11;
|
|
||||||
Simd U_21; // 2 reg left.
|
|
||||||
Simd U_02;
|
|
||||||
Simd U_12;
|
|
||||||
Simd U_22;
|
|
||||||
|
|
||||||
SiteSpinor result;
|
typedef decltype( coalescedRead( in[0] )) calcSiteSpinor;
|
||||||
|
calcSiteSpinor result;
|
||||||
int offset,local,perm, ptype;
|
int offset,local,perm, ptype;
|
||||||
|
|
||||||
StencilEntry *SE;
|
StencilEntry *SE;
|
||||||
@ -215,7 +241,7 @@ void StaggeredKernels<Impl>::DhopSiteHand(StencilView &st,
|
|||||||
result()()(1) = even_1 + odd_1;
|
result()()(1) = even_1 + odd_1;
|
||||||
result()()(2) = even_2 + odd_2;
|
result()()(2) = even_2 + odd_2;
|
||||||
}
|
}
|
||||||
vstream(out[sF],result);
|
coalescedWrite(out[sF],result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -230,28 +256,13 @@ void StaggeredKernels<Impl>::DhopSiteHandInt(StencilView &st,
|
|||||||
typedef typename Simd::scalar_type S;
|
typedef typename Simd::scalar_type S;
|
||||||
typedef typename Simd::vector_type V;
|
typedef typename Simd::vector_type V;
|
||||||
|
|
||||||
Simd even_0; // 12 regs on knc
|
const int Nsimd = SiteHalfSpinor::Nsimd();
|
||||||
Simd even_1;
|
const int lane=acceleratorSIMTlane(Nsimd);
|
||||||
Simd even_2;
|
typedef decltype( coalescedRead( in[0]()()(0) )) Simt;
|
||||||
Simd odd_0; // 12 regs on knc
|
HAND_DECLARATIONS(Simt);
|
||||||
Simd odd_1;
|
|
||||||
Simd odd_2;
|
|
||||||
|
|
||||||
Simd Chi_0; // two spinor; 6 regs
|
typedef decltype( coalescedRead( in[0] )) calcSiteSpinor;
|
||||||
Simd Chi_1;
|
calcSiteSpinor result;
|
||||||
Simd Chi_2;
|
|
||||||
|
|
||||||
Simd U_00; // two rows of U matrix
|
|
||||||
Simd U_10;
|
|
||||||
Simd U_20;
|
|
||||||
Simd U_01;
|
|
||||||
Simd U_11;
|
|
||||||
Simd U_21; // 2 reg left.
|
|
||||||
Simd U_02;
|
|
||||||
Simd U_12;
|
|
||||||
Simd U_22;
|
|
||||||
|
|
||||||
SiteSpinor result;
|
|
||||||
int offset, ptype, local, perm;
|
int offset, ptype, local, perm;
|
||||||
|
|
||||||
StencilEntry *SE;
|
StencilEntry *SE;
|
||||||
@ -261,8 +272,8 @@ void StaggeredKernels<Impl>::DhopSiteHandInt(StencilView &st,
|
|||||||
// int sF=s+LLs*sU;
|
// int sF=s+LLs*sU;
|
||||||
{
|
{
|
||||||
|
|
||||||
even_0 = Zero(); even_1 = Zero(); even_2 = Zero();
|
zeroit(even_0); zeroit(even_1); zeroit(even_2);
|
||||||
odd_0 = Zero(); odd_1 = Zero(); odd_2 = Zero();
|
zeroit(odd_0); zeroit(odd_1); zeroit(odd_2);
|
||||||
|
|
||||||
skew = 0;
|
skew = 0;
|
||||||
HAND_STENCIL_LEG_INT(U,Xp,3,skew,even);
|
HAND_STENCIL_LEG_INT(U,Xp,3,skew,even);
|
||||||
@ -294,7 +305,7 @@ void StaggeredKernels<Impl>::DhopSiteHandInt(StencilView &st,
|
|||||||
result()()(1) = even_1 + odd_1;
|
result()()(1) = even_1 + odd_1;
|
||||||
result()()(2) = even_2 + odd_2;
|
result()()(2) = even_2 + odd_2;
|
||||||
}
|
}
|
||||||
vstream(out[sF],result);
|
coalescedWrite(out[sF],result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -309,28 +320,13 @@ void StaggeredKernels<Impl>::DhopSiteHandExt(StencilView &st,
|
|||||||
typedef typename Simd::scalar_type S;
|
typedef typename Simd::scalar_type S;
|
||||||
typedef typename Simd::vector_type V;
|
typedef typename Simd::vector_type V;
|
||||||
|
|
||||||
Simd even_0; // 12 regs on knc
|
const int Nsimd = SiteHalfSpinor::Nsimd();
|
||||||
Simd even_1;
|
const int lane=acceleratorSIMTlane(Nsimd);
|
||||||
Simd even_2;
|
typedef decltype( coalescedRead( in[0]()()(0) )) Simt;
|
||||||
Simd odd_0; // 12 regs on knc
|
HAND_DECLARATIONS(Simt);
|
||||||
Simd odd_1;
|
|
||||||
Simd odd_2;
|
|
||||||
|
|
||||||
Simd Chi_0; // two spinor; 6 regs
|
typedef decltype( coalescedRead( in[0] )) calcSiteSpinor;
|
||||||
Simd Chi_1;
|
calcSiteSpinor result;
|
||||||
Simd Chi_2;
|
|
||||||
|
|
||||||
Simd U_00; // two rows of U matrix
|
|
||||||
Simd U_10;
|
|
||||||
Simd U_20;
|
|
||||||
Simd U_01;
|
|
||||||
Simd U_11;
|
|
||||||
Simd U_21; // 2 reg left.
|
|
||||||
Simd U_02;
|
|
||||||
Simd U_12;
|
|
||||||
Simd U_22;
|
|
||||||
|
|
||||||
SiteSpinor result;
|
|
||||||
int offset, ptype, local;
|
int offset, ptype, local;
|
||||||
|
|
||||||
StencilEntry *SE;
|
StencilEntry *SE;
|
||||||
@ -340,8 +336,8 @@ void StaggeredKernels<Impl>::DhopSiteHandExt(StencilView &st,
|
|||||||
// int sF=s+LLs*sU;
|
// int sF=s+LLs*sU;
|
||||||
{
|
{
|
||||||
|
|
||||||
even_0 = Zero(); even_1 = Zero(); even_2 = Zero();
|
zeroit(even_0); zeroit(even_1); zeroit(even_2);
|
||||||
odd_0 = Zero(); odd_1 = Zero(); odd_2 = Zero();
|
zeroit(odd_0); zeroit(odd_1); zeroit(odd_2);
|
||||||
int nmu=0;
|
int nmu=0;
|
||||||
skew = 0;
|
skew = 0;
|
||||||
HAND_STENCIL_LEG_EXT(U,Xp,3,skew,even);
|
HAND_STENCIL_LEG_EXT(U,Xp,3,skew,even);
|
||||||
@ -374,7 +370,7 @@ void StaggeredKernels<Impl>::DhopSiteHandExt(StencilView &st,
|
|||||||
result()()(1) = even_1 + odd_1;
|
result()()(1) = even_1 + odd_1;
|
||||||
result()()(2) = even_2 + odd_2;
|
result()()(2) = even_2 + odd_2;
|
||||||
}
|
}
|
||||||
out[sF] = out[sF] + result;
|
coalescedWrite(out[sF] , out(sF)+ result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -397,6 +393,7 @@ void StaggeredKernels<Impl>::DhopSiteHandExt(StencilView &st,
|
|||||||
const FermionFieldView &in, FermionFieldView &out, int dag); \
|
const FermionFieldView &in, FermionFieldView &out, int dag); \
|
||||||
*/
|
*/
|
||||||
#undef LOAD_CHI
|
#undef LOAD_CHI
|
||||||
|
#undef HAND_DECLARATIONS
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
|
||||||
|
@ -85,21 +85,18 @@ public:
|
|||||||
|
|
||||||
std::cout << GridLogDebug << "Stout smearing started\n";
|
std::cout << GridLogDebug << "Stout smearing started\n";
|
||||||
|
|
||||||
// Smear the configurations
|
// C contains the staples multiplied by some rho
|
||||||
|
u_smr = U ; // set the smeared field to the current gauge field
|
||||||
SmearBase->smear(C, U);
|
SmearBase->smear(C, U);
|
||||||
|
|
||||||
for (int mu = 0; mu < Nd; mu++) {
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
if( mu == OrthogDim )
|
if( mu == OrthogDim ) continue ;
|
||||||
tmp = 1.0; // Don't smear in the orthogonal direction
|
// u_smr = exp(iQ_mu)*U_mu apart from Orthogdim
|
||||||
else {
|
Umu = peekLorentz(U, mu);
|
||||||
tmp = peekLorentz(C, mu);
|
tmp = peekLorentz(C, mu);
|
||||||
Umu = peekLorentz(U, mu);
|
iq_mu = Ta( tmp * adj(Umu));
|
||||||
iq_mu = Ta(
|
exponentiate_iQ(tmp, iq_mu);
|
||||||
tmp *
|
pokeLorentz(u_smr, tmp * Umu, mu);
|
||||||
adj(Umu)); // iq_mu = Ta(Omega_mu) to match the signs with the paper
|
|
||||||
exponentiate_iQ(tmp, iq_mu);
|
|
||||||
}
|
|
||||||
pokeLorentz(u_smr, tmp * Umu, mu); // u_smr = exp(iQ_mu)*U_mu
|
|
||||||
}
|
}
|
||||||
std::cout << GridLogDebug << "Stout smearing completed\n";
|
std::cout << GridLogDebug << "Stout smearing completed\n";
|
||||||
};
|
};
|
||||||
|
@ -65,7 +65,8 @@ void coalescedWriteNonTemporal(vobj & __restrict__ vec,const vobj & __restrict__
|
|||||||
#else
|
#else
|
||||||
|
|
||||||
|
|
||||||
#ifndef GRID_SYCL
|
//#ifndef GRID_SYCL
|
||||||
|
#if 1
|
||||||
// Use the scalar as our own complex on GPU ... thrust::complex or std::complex
|
// Use the scalar as our own complex on GPU ... thrust::complex or std::complex
|
||||||
template<class vsimd,IfSimd<vsimd> = 0> accelerator_inline
|
template<class vsimd,IfSimd<vsimd> = 0> accelerator_inline
|
||||||
typename vsimd::scalar_type
|
typename vsimd::scalar_type
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*************************************************************************************
|
/*************************************************************************************
|
||||||
|
n
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
Source file: ./lib/tensors/Tensor_extract_merge.h
|
Source file: ./lib/tensors/Tensor_extract_merge.h
|
||||||
@ -153,7 +153,7 @@ void insertLane(int lane, vobj & __restrict__ vec,const typename vobj::scalar_ob
|
|||||||
// Extract to a bunch of scalar object pointers of different scalar type, with offset. Useful for precision change
|
// Extract to a bunch of scalar object pointers of different scalar type, with offset. Useful for precision change
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
template<class vobj, class sobj> accelerator
|
template<class vobj, class sobj> accelerator
|
||||||
void extract(const vobj &vec,ExtractPointerArray<sobj> &extracted, int offset)
|
void extract(const vobj &vec,const ExtractPointerArray<sobj> &extracted, int offset)
|
||||||
{
|
{
|
||||||
typedef typename GridTypeMapper<sobj>::scalar_type sobj_scalar_type;
|
typedef typename GridTypeMapper<sobj>::scalar_type sobj_scalar_type;
|
||||||
typedef typename GridTypeMapper<vobj>::scalar_type scalar_type;
|
typedef typename GridTypeMapper<vobj>::scalar_type scalar_type;
|
||||||
@ -181,7 +181,7 @@ void extract(const vobj &vec,ExtractPointerArray<sobj> &extracted, int offset)
|
|||||||
// Merge bunch of scalar object pointers of different scalar type, with offset. Useful for precision change
|
// Merge bunch of scalar object pointers of different scalar type, with offset. Useful for precision change
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
template<class vobj, class sobj> accelerator
|
template<class vobj, class sobj> accelerator
|
||||||
void merge(vobj &vec,ExtractPointerArray<sobj> &extracted, int offset)
|
void merge(vobj &vec,const ExtractPointerArray<sobj> &extracted, int offset)
|
||||||
{
|
{
|
||||||
typedef typename GridTypeMapper<sobj>::scalar_type sobj_scalar_type;
|
typedef typename GridTypeMapper<sobj>::scalar_type sobj_scalar_type;
|
||||||
typedef typename GridTypeMapper<vobj>::scalar_type scalar_type;
|
typedef typename GridTypeMapper<vobj>::scalar_type scalar_type;
|
||||||
|
@ -171,7 +171,6 @@ void acceleratorInit(void)
|
|||||||
#ifdef GRID_SYCL
|
#ifdef GRID_SYCL
|
||||||
|
|
||||||
cl::sycl::queue *theGridAccelerator;
|
cl::sycl::queue *theGridAccelerator;
|
||||||
|
|
||||||
void acceleratorInit(void)
|
void acceleratorInit(void)
|
||||||
{
|
{
|
||||||
int nDevices = 1;
|
int nDevices = 1;
|
||||||
@ -179,6 +178,10 @@ void acceleratorInit(void)
|
|||||||
cl::sycl::device selectedDevice { selector };
|
cl::sycl::device selectedDevice { selector };
|
||||||
theGridAccelerator = new sycl::queue (selectedDevice);
|
theGridAccelerator = new sycl::queue (selectedDevice);
|
||||||
|
|
||||||
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
zeInit(0);
|
||||||
|
#endif
|
||||||
|
|
||||||
char * localRankStr = NULL;
|
char * localRankStr = NULL;
|
||||||
int rank = 0, world_rank=0;
|
int rank = 0, world_rank=0;
|
||||||
#define ENV_LOCAL_RANK_OMPI "OMPI_COMM_WORLD_LOCAL_RANK"
|
#define ENV_LOCAL_RANK_OMPI "OMPI_COMM_WORLD_LOCAL_RANK"
|
||||||
|
@ -39,6 +39,10 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifdef HAVE_MM_MALLOC_H
|
#ifdef HAVE_MM_MALLOC_H
|
||||||
#include <mm_malloc.h>
|
#include <mm_malloc.h>
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef __APPLE__
|
||||||
|
// no memalign
|
||||||
|
inline void *memalign(size_t align, size_t bytes) { return malloc(bytes); }
|
||||||
|
#endif
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
@ -233,6 +237,13 @@ inline int acceleratorIsCommunicable(void *ptr)
|
|||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
#include <CL/sycl.hpp>
|
#include <CL/sycl.hpp>
|
||||||
#include <CL/sycl/usm.hpp>
|
#include <CL/sycl/usm.hpp>
|
||||||
|
|
||||||
|
#define GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
|
||||||
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
#include <level_zero/ze_api.h>
|
||||||
|
#include <CL/sycl/backend/level_zero.hpp>
|
||||||
|
#endif
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
extern cl::sycl::queue *theGridAccelerator;
|
extern cl::sycl::queue *theGridAccelerator;
|
||||||
@ -257,11 +268,14 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
|||||||
unsigned long nt=acceleratorThreads(); \
|
unsigned long nt=acceleratorThreads(); \
|
||||||
unsigned long unum1 = num1; \
|
unsigned long unum1 = num1; \
|
||||||
unsigned long unum2 = num2; \
|
unsigned long unum2 = num2; \
|
||||||
|
if(nt < 8)nt=8; \
|
||||||
cl::sycl::range<3> local {nt,1,nsimd}; \
|
cl::sycl::range<3> local {nt,1,nsimd}; \
|
||||||
cl::sycl::range<3> global{unum1,unum2,nsimd}; \
|
cl::sycl::range<3> global{unum1,unum2,nsimd}; \
|
||||||
cgh.parallel_for<class dslash>( \
|
cgh.parallel_for<class dslash>( \
|
||||||
cl::sycl::nd_range<3>(global,local), \
|
cl::sycl::nd_range<3>(global,local), \
|
||||||
[=] (cl::sycl::nd_item<3> item) /*mutable*/ { \
|
[=] (cl::sycl::nd_item<3> item) /*mutable*/ \
|
||||||
|
[[intel::reqd_sub_group_size(8)]] \
|
||||||
|
{ \
|
||||||
auto iter1 = item.get_global_id(0); \
|
auto iter1 = item.get_global_id(0); \
|
||||||
auto iter2 = item.get_global_id(1); \
|
auto iter2 = item.get_global_id(1); \
|
||||||
auto lane = item.get_global_id(2); \
|
auto lane = item.get_global_id(2); \
|
||||||
@ -409,6 +423,8 @@ inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(bas
|
|||||||
|
|
||||||
#undef GRID_SIMT
|
#undef GRID_SIMT
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define accelerator
|
#define accelerator
|
||||||
#define accelerator_inline strong_inline
|
#define accelerator_inline strong_inline
|
||||||
#define accelerator_for(iterator,num,nsimd, ... ) thread_for(iterator, num, { __VA_ARGS__ });
|
#define accelerator_for(iterator,num,nsimd, ... ) thread_for(iterator, num, { __VA_ARGS__ });
|
||||||
@ -457,7 +473,7 @@ accelerator_inline void acceleratorSynchronise(void)
|
|||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
#endif
|
||||||
#ifdef GRID_SYCL
|
#ifdef GRID_SYCL
|
||||||
cl::sycl::detail::workGroupBarrier();
|
//cl::sycl::detail::workGroupBarrier();
|
||||||
#endif
|
#endif
|
||||||
#ifdef GRID_HIP
|
#ifdef GRID_HIP
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
|
@ -56,6 +56,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
static int
|
static int
|
||||||
feenableexcept (unsigned int excepts)
|
feenableexcept (unsigned int excepts)
|
||||||
{
|
{
|
||||||
|
#if 0
|
||||||
|
// Fails on Apple M1
|
||||||
static fenv_t fenv;
|
static fenv_t fenv;
|
||||||
unsigned int new_excepts = excepts & FE_ALL_EXCEPT;
|
unsigned int new_excepts = excepts & FE_ALL_EXCEPT;
|
||||||
unsigned int old_excepts; // previous masks
|
unsigned int old_excepts; // previous masks
|
||||||
@ -70,6 +72,8 @@ feenableexcept (unsigned int excepts)
|
|||||||
|
|
||||||
iold_excepts = (int) old_excepts;
|
iold_excepts = (int) old_excepts;
|
||||||
return ( fesetenv (&fenv) ? -1 : iold_excepts );
|
return ( fesetenv (&fenv) ? -1 : iold_excepts );
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# additional include paths necessary to compile the C++ library
|
# additional include paths necessary to compile the C++ library
|
||||||
SUBDIRS = Grid HMC benchmarks tests
|
SUBDIRS = Grid HMC benchmarks tests examples
|
||||||
|
|
||||||
include $(top_srcdir)/doxygen.inc
|
include $(top_srcdir)/doxygen.inc
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Grid [![Teamcity status](http://ci.cliath.ph.ed.ac.uk/app/rest/builds/aggregated/strob:(buildType:(affectedProject(id:GridBasedSoftware_Grid)),branch:name:develop)/statusIcon.svg)](http://ci.cliath.ph.ed.ac.uk/project.html?projectId=GridBasedSoftware_Grid&tab=projectOverview) [![Travis status](https://travis-ci.org/paboyle/Grid.svg?branch=develop)](https://travis-ci.org/paboyle/Grid)
|
# Grid [![Teamcity status](http://ci.cliath.ph.ed.ac.uk/app/rest/builds/aggregated/strob:(buildType:(affectedProject(id:GridBasedSoftware_Grid)),branch:name:develop)/statusIcon.svg)](http://ci.cliath.ph.ed.ac.uk/project.html?projectId=GridBasedSoftware_Grid&tab=projectOverview)
|
||||||
|
|
||||||
**Data parallel C++ mathematical object library.**
|
**Data parallel C++ mathematical object library.**
|
||||||
|
|
||||||
|
@ -133,34 +133,30 @@ public:
|
|||||||
|
|
||||||
std::vector<HalfSpinColourVectorD *> xbuf(8);
|
std::vector<HalfSpinColourVectorD *> xbuf(8);
|
||||||
std::vector<HalfSpinColourVectorD *> rbuf(8);
|
std::vector<HalfSpinColourVectorD *> rbuf(8);
|
||||||
Grid.ShmBufferFreeAll();
|
//Grid.ShmBufferFreeAll();
|
||||||
|
uint64_t bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
|
||||||
for(int d=0;d<8;d++){
|
for(int d=0;d<8;d++){
|
||||||
xbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
|
xbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes);
|
||||||
rbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
|
rbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes);
|
||||||
// bzero((void *)xbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
|
// bzero((void *)xbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
|
||||||
// bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
|
// bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
|
||||||
}
|
}
|
||||||
|
|
||||||
int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
|
|
||||||
int ncomm;
|
int ncomm;
|
||||||
double dbytes;
|
double dbytes;
|
||||||
std::vector<double> times(Nloop);
|
|
||||||
for(int i=0;i<Nloop;i++){
|
|
||||||
|
|
||||||
double start=usecond();
|
for(int dir=0;dir<8;dir++) {
|
||||||
|
int mu =dir % 4;
|
||||||
|
if (mpi_layout[mu]>1 ) {
|
||||||
|
|
||||||
dbytes=0;
|
std::vector<double> times(Nloop);
|
||||||
ncomm=0;
|
for(int i=0;i<Nloop;i++){
|
||||||
|
|
||||||
thread_for(dir,8,{
|
dbytes=0;
|
||||||
|
double start=usecond();
|
||||||
double tbytes;
|
|
||||||
int mu =dir % 4;
|
|
||||||
|
|
||||||
if (mpi_layout[mu]>1 ) {
|
|
||||||
|
|
||||||
int xmit_to_rank;
|
int xmit_to_rank;
|
||||||
int recv_from_rank;
|
int recv_from_rank;
|
||||||
|
|
||||||
if ( dir == mu ) {
|
if ( dir == mu ) {
|
||||||
int comm_proc=1;
|
int comm_proc=1;
|
||||||
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
||||||
@ -168,40 +164,38 @@ public:
|
|||||||
int comm_proc = mpi_layout[mu]-1;
|
int comm_proc = mpi_layout[mu]-1;
|
||||||
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
||||||
}
|
}
|
||||||
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
|
Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
|
||||||
(void *)&rbuf[dir][0], recv_from_rank,
|
(void *)&rbuf[dir][0], recv_from_rank,
|
||||||
bytes,dir);
|
bytes);
|
||||||
thread_critical {
|
dbytes+=bytes;
|
||||||
ncomm++;
|
|
||||||
dbytes+=tbytes;
|
double stop=usecond();
|
||||||
}
|
t_time[i] = stop-start; // microseconds
|
||||||
|
|
||||||
}
|
}
|
||||||
});
|
timestat.statistics(t_time);
|
||||||
Grid.Barrier();
|
|
||||||
double stop=usecond();
|
dbytes=dbytes*ppn;
|
||||||
t_time[i] = stop-start; // microseconds
|
double xbytes = dbytes*0.5;
|
||||||
|
double bidibytes = dbytes;
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage << lat<<"\t"<<Ls<<"\t "
|
||||||
|
<< bytes << " \t "
|
||||||
|
<<xbytes/timestat.mean<<" \t "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " \t "
|
||||||
|
<<xbytes/timestat.max <<" "<< xbytes/timestat.min
|
||||||
|
<< "\t\t"<< bidibytes/timestat.mean<< " " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " "
|
||||||
|
<< bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
for(int d=0;d<8;d++){
|
||||||
timestat.statistics(t_time);
|
acceleratorFreeDevice(xbuf[d]);
|
||||||
|
acceleratorFreeDevice(rbuf[d]);
|
||||||
dbytes=dbytes*ppn;
|
}
|
||||||
double xbytes = dbytes*0.5;
|
}
|
||||||
// double rbytes = dbytes*0.5;
|
}
|
||||||
double bidibytes = dbytes;
|
|
||||||
|
|
||||||
std::cout<<GridLogMessage << lat<<"\t"<<Ls<<"\t "
|
|
||||||
<< bytes << " \t "
|
|
||||||
<<xbytes/timestat.mean<<" \t "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " \t "
|
|
||||||
<<xbytes/timestat.max <<" "<< xbytes/timestat.min
|
|
||||||
<< "\t\t"<< bidibytes/timestat.mean<< " " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " "
|
|
||||||
<< bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl;
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
static void Memory(void)
|
static void Memory(void)
|
||||||
@ -281,7 +275,6 @@ public:
|
|||||||
|
|
||||||
|
|
||||||
uint64_t lmax=32;
|
uint64_t lmax=32;
|
||||||
#define NLOOP (1000*lmax*lmax*lmax*lmax/lat/lat/lat/lat)
|
|
||||||
|
|
||||||
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
||||||
for(int lat=8;lat<=lmax;lat+=8){
|
for(int lat=8;lat<=lmax;lat+=8){
|
||||||
@ -445,7 +438,7 @@ public:
|
|||||||
// 1344= 3*(2*8+6)*2*8 + 8*3*2*2 + 3*4*2*8
|
// 1344= 3*(2*8+6)*2*8 + 8*3*2*2 + 3*4*2*8
|
||||||
// 1344 = Nc* (6+(Nc-1)*8)*2*Nd + Nd*Nc*2*2 + Nd*Nc*Ns*2
|
// 1344 = Nc* (6+(Nc-1)*8)*2*Nd + Nd*Nc*2*2 + Nd*Nc*Ns*2
|
||||||
// double flops=(1344.0*volume)/2;
|
// double flops=(1344.0*volume)/2;
|
||||||
#if 1
|
#if 0
|
||||||
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + Nd*Nc*Ns + Nd*Nc*Ns*2;
|
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + Nd*Nc*Ns + Nd*Nc*Ns*2;
|
||||||
#else
|
#else
|
||||||
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + 2*Nd*Nc*Ns + 2*Nd*Nc*Ns*2;
|
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + 2*Nd*Nc*Ns + 2*Nd*Nc*Ns*2;
|
||||||
@ -716,12 +709,12 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
if ( do_su4 ) {
|
if ( do_su4 ) {
|
||||||
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
||||||
std::cout<<GridLogMessage << " Memory benchmark " <<std::endl;
|
std::cout<<GridLogMessage << " SU(4) benchmark " <<std::endl;
|
||||||
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
||||||
Benchmark::SU4();
|
Benchmark::SU4();
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( do_comms && (NN>1) ) {
|
if ( do_comms ) {
|
||||||
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
||||||
std::cout<<GridLogMessage << " Communications benchmark " <<std::endl;
|
std::cout<<GridLogMessage << " Communications benchmark " <<std::endl;
|
||||||
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
||||||
|
@ -815,6 +815,7 @@ AC_CONFIG_FILES(tests/smearing/Makefile)
|
|||||||
AC_CONFIG_FILES(tests/qdpxx/Makefile)
|
AC_CONFIG_FILES(tests/qdpxx/Makefile)
|
||||||
AC_CONFIG_FILES(tests/testu01/Makefile)
|
AC_CONFIG_FILES(tests/testu01/Makefile)
|
||||||
AC_CONFIG_FILES(benchmarks/Makefile)
|
AC_CONFIG_FILES(benchmarks/Makefile)
|
||||||
|
AC_CONFIG_FILES(examples/Makefile)
|
||||||
AC_OUTPUT
|
AC_OUTPUT
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
|
396
examples/Example_Laplacian.cc
Normal file
396
examples/Example_Laplacian.cc
Normal file
@ -0,0 +1,396 @@
|
|||||||
|
#include <Grid/Grid.h>
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
/*
|
||||||
|
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Grid/algorithms/SparseMatrix.h: Interface defining what I expect of a general sparse matrix, such as a Fermion action
|
||||||
|
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
template<class Field> class SparseMatrixBase {
|
||||||
|
public:
|
||||||
|
virtual GridBase *Grid(void) =0;
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)=0;
|
||||||
|
virtual void Mdag (const Field &in, Field &out)=0;
|
||||||
|
virtual void MdagM(const Field &in, Field &out) {
|
||||||
|
Field tmp (in.Grid());
|
||||||
|
M(in,tmp);
|
||||||
|
Mdag(tmp,out);
|
||||||
|
}
|
||||||
|
virtual void Mdiag (const Field &in, Field &out)=0;
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp)=0;
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out)=0;
|
||||||
|
};
|
||||||
|
*/
|
||||||
|
|
||||||
|
const std::vector<int> directions ({Xdir,Ydir,Zdir,Xdir,Ydir,Zdir});
|
||||||
|
const std::vector<int> displacements({1,1,1,-1,-1,-1});
|
||||||
|
|
||||||
|
template<class Field> class FreeLaplacianCshift : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
GridBase *grid;
|
||||||
|
FreeLaplacianCshift(GridBase *_grid)
|
||||||
|
{
|
||||||
|
grid=_grid;
|
||||||
|
};
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)
|
||||||
|
{
|
||||||
|
out = Zero();
|
||||||
|
for(int mu=0;mu<Nd-1;mu++) {
|
||||||
|
out = out + Cshift(in,mu,1) + Cshift(in,mu,-1) - 2.0 * in;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
GaugeField U;
|
||||||
|
|
||||||
|
CovariantLaplacianCshift(GaugeField &_U) :
|
||||||
|
grid(_U.Grid()),
|
||||||
|
U(_U) { };
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)
|
||||||
|
{
|
||||||
|
out=Zero();
|
||||||
|
for(int mu=0;mu<Nd-1;mu++) {
|
||||||
|
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
|
||||||
|
out = out + Gimpl::CovShiftForward(Umu,mu,in);
|
||||||
|
out = out + Gimpl::CovShiftBackward(Umu,mu,in);
|
||||||
|
out = out - 2.0*in;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#define LEG_LOAD(Dir) \
|
||||||
|
SE = st.GetEntry(ptype, Dir, ss); \
|
||||||
|
if (SE->_is_local ) { \
|
||||||
|
int perm= SE->_permute; \
|
||||||
|
chi = coalescedReadPermute(in[SE->_offset],ptype,perm,lane); \
|
||||||
|
} else { \
|
||||||
|
chi = coalescedRead(buf[SE->_offset],lane); \
|
||||||
|
} \
|
||||||
|
acceleratorSynchronise();
|
||||||
|
|
||||||
|
template<class Field> class FreeLaplacianStencil : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
typedef typename Field::vector_object siteObject;
|
||||||
|
typedef CartesianStencil<siteObject, siteObject, int> StencilImpl;
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
StencilImpl Stencil;
|
||||||
|
SimpleCompressor<siteObject> Compressor;
|
||||||
|
|
||||||
|
FreeLaplacianStencil(GridBase *_grid)
|
||||||
|
: Stencil (_grid,6,Even,directions,displacements,0), grid(_grid)
|
||||||
|
{ };
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &_in, Field &_out)
|
||||||
|
{
|
||||||
|
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
// Halo exchange for this geometry of stencil
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
Stencil.HaloExchange(_in, Compressor);
|
||||||
|
|
||||||
|
///////////////////////////////////
|
||||||
|
// Arithmetic expressions
|
||||||
|
///////////////////////////////////
|
||||||
|
|
||||||
|
// Views; device friendly/accessible pointers
|
||||||
|
auto st = Stencil.View(AcceleratorRead);
|
||||||
|
auto buf = st.CommBuf();
|
||||||
|
autoView( in , _in , AcceleratorRead);
|
||||||
|
autoView( out , _out , AcceleratorWrite);
|
||||||
|
|
||||||
|
typedef typename Field::vector_object vobj;
|
||||||
|
typedef decltype(coalescedRead(in[0])) calcObj;
|
||||||
|
|
||||||
|
const int Nsimd = vobj::Nsimd();
|
||||||
|
const uint64_t NN = grid->oSites();
|
||||||
|
|
||||||
|
accelerator_for( ss, NN, Nsimd, {
|
||||||
|
|
||||||
|
StencilEntry *SE;
|
||||||
|
|
||||||
|
const int lane=acceleratorSIMTlane(Nsimd);
|
||||||
|
|
||||||
|
calcObj chi;
|
||||||
|
calcObj res;
|
||||||
|
int ptype;
|
||||||
|
|
||||||
|
res = coalescedRead(in[ss])*(-6.0);
|
||||||
|
LEG_LOAD(0); res = res + chi;
|
||||||
|
LEG_LOAD(1); res = res + chi;
|
||||||
|
LEG_LOAD(2); res = res + chi;
|
||||||
|
LEG_LOAD(3); res = res + chi;
|
||||||
|
LEG_LOAD(4); res = res + chi;
|
||||||
|
LEG_LOAD(5); res = res + chi;
|
||||||
|
|
||||||
|
coalescedWrite(out[ss], res,lane);
|
||||||
|
|
||||||
|
});
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class Gimpl,class Field> class CovariantLaplacianStencil : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
typedef typename Field::vector_object siteObject;
|
||||||
|
|
||||||
|
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Nc> >, Nds>;
|
||||||
|
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
|
||||||
|
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||||
|
|
||||||
|
typedef CartesianStencil<siteObject, siteObject, int> StencilImpl;
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
StencilImpl Stencil;
|
||||||
|
SimpleCompressor<siteObject> Compressor;
|
||||||
|
DoubledGaugeField Uds;
|
||||||
|
CovariantLaplacianStencil(GaugeField &Umu)
|
||||||
|
:
|
||||||
|
grid(Umu.Grid()),
|
||||||
|
Stencil (grid,6,Even,directions,displacements,0),
|
||||||
|
Uds(grid)
|
||||||
|
{
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
auto U = PeekIndex<LorentzIndex>(Umu, mu);
|
||||||
|
PokeIndex<LorentzIndex>(Uds, U, mu );
|
||||||
|
U = adj(Cshift(U, mu, -1));
|
||||||
|
PokeIndex<LorentzIndex>(Uds, U, mu + 4);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &_in, Field &_out)
|
||||||
|
{
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
// Halo exchange for this geometry of stencil
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
Stencil.HaloExchange(_in, Compressor);
|
||||||
|
|
||||||
|
///////////////////////////////////
|
||||||
|
// Arithmetic expressions
|
||||||
|
///////////////////////////////////
|
||||||
|
auto st = Stencil.View(AcceleratorRead);
|
||||||
|
auto buf = st.CommBuf();
|
||||||
|
|
||||||
|
autoView( in , _in , AcceleratorRead);
|
||||||
|
autoView( out , _out , AcceleratorWrite);
|
||||||
|
autoView( U , Uds , AcceleratorRead);
|
||||||
|
|
||||||
|
typedef typename Field::vector_object vobj;
|
||||||
|
typedef decltype(coalescedRead(in[0])) calcObj;
|
||||||
|
typedef decltype(coalescedRead(U[0](0))) calcLink;
|
||||||
|
|
||||||
|
const int Nsimd = vobj::Nsimd();
|
||||||
|
const uint64_t NN = grid->oSites();
|
||||||
|
|
||||||
|
accelerator_for( ss, NN, Nsimd, {
|
||||||
|
|
||||||
|
StencilEntry *SE;
|
||||||
|
|
||||||
|
const int lane=acceleratorSIMTlane(Nsimd);
|
||||||
|
|
||||||
|
calcObj chi;
|
||||||
|
calcObj res;
|
||||||
|
calcObj Uchi;
|
||||||
|
calcLink UU;
|
||||||
|
int ptype;
|
||||||
|
|
||||||
|
res = coalescedRead(in[ss])*(-6.0);
|
||||||
|
|
||||||
|
#define LEG_LOAD_MULT(leg,polarisation) \
|
||||||
|
UU = coalescedRead(U[ss](polarisation)); \
|
||||||
|
LEG_LOAD(leg); \
|
||||||
|
mult(&Uchi(), &UU, &chi()); \
|
||||||
|
res = res + Uchi;
|
||||||
|
|
||||||
|
LEG_LOAD_MULT(0,Xp);
|
||||||
|
LEG_LOAD_MULT(1,Yp);
|
||||||
|
LEG_LOAD_MULT(2,Zp);
|
||||||
|
LEG_LOAD_MULT(3,Xm);
|
||||||
|
LEG_LOAD_MULT(4,Ym);
|
||||||
|
LEG_LOAD_MULT(5,Zm);
|
||||||
|
|
||||||
|
coalescedWrite(out[ss], res,lane);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
#undef LEG_LOAD_MULT
|
||||||
|
#undef LEG_LOAD
|
||||||
|
|
||||||
|
int main(int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc, &argv);
|
||||||
|
|
||||||
|
typedef LatticeColourVector Field;
|
||||||
|
|
||||||
|
auto latt_size = GridDefaultLatt();
|
||||||
|
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
|
auto mpi_layout = GridDefaultMpi();
|
||||||
|
|
||||||
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
||||||
|
|
||||||
|
FreeLaplacianCshift<Field> FLcs(&Grid);
|
||||||
|
FreeLaplacianStencil<Field> FLst(&Grid);
|
||||||
|
|
||||||
|
LatticeGaugeField U(&Grid);
|
||||||
|
|
||||||
|
SU<Nc>::ColdConfiguration(RNG,U);
|
||||||
|
|
||||||
|
std::cout << " Gauge field has norm " <<norm2(U)<<std::endl;
|
||||||
|
|
||||||
|
CovariantLaplacianCshift <PeriodicGimplR,Field> CLcs(U);
|
||||||
|
CovariantLaplacianStencil<PeriodicGimplR,Field> CLst(U);
|
||||||
|
|
||||||
|
Field in(&Grid); gaussian(RNG,in);
|
||||||
|
Field out_FLcs(&Grid);
|
||||||
|
Field out_FLst(&Grid);
|
||||||
|
Field out_CLcs(&Grid);
|
||||||
|
Field out_CLst(&Grid);
|
||||||
|
Field diff(&Grid);
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
// First test: in free field these should all agree
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
FLcs.M(in,out_FLcs);
|
||||||
|
FLst.M(in,out_FLst);
|
||||||
|
CLcs.M(in,out_CLcs);
|
||||||
|
CLst.M(in,out_CLst);
|
||||||
|
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
std:: cout << " Test A: consistency of four different Laplacian implementations " <<std::endl;
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
std:: cout << " Input test vector " <<norm2(in)<<std::endl;
|
||||||
|
std:: cout << "--------------------------------------------------------" <<std::endl;
|
||||||
|
std:: cout << " Free cshift output vector " <<norm2(out_FLcs)<<std::endl;
|
||||||
|
std:: cout << " Free stencil output vector " <<norm2(out_FLst)<<std::endl;
|
||||||
|
std:: cout << " Cov cshift output vector " <<norm2(out_CLcs)<<std::endl;
|
||||||
|
std:: cout << " Cov stencil output vector " <<norm2(out_CLst)<<std::endl;
|
||||||
|
std:: cout << "--------------------------------------------------------" <<std::endl;
|
||||||
|
|
||||||
|
diff = out_FLcs - out_FLst;
|
||||||
|
std:: cout << " Difference between free Cshift Laplacian and free Stencil Laplacian = " <<norm2(diff)<<std::endl;
|
||||||
|
|
||||||
|
diff = out_FLcs - out_CLcs;
|
||||||
|
std:: cout << " Difference between free Cshift Laplacian and covariant Cshift Laplacian = " <<norm2(diff)<<std::endl;
|
||||||
|
|
||||||
|
diff = out_FLcs - out_CLst;
|
||||||
|
std:: cout << " Difference between free Cshift Laplacian and covariant Stencil Laplacian = " <<norm2(diff)<<std::endl;
|
||||||
|
std:: cout << "--------------------------------------------------------" <<std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
std:: cout << " Test B: gauge covariance " <<std::endl;
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
|
||||||
|
LatticeGaugeField U_GT(&Grid); // Gauge transformed field
|
||||||
|
LatticeColourMatrix g(&Grid); // local Gauge xform matrix
|
||||||
|
|
||||||
|
U_GT = U;
|
||||||
|
// Make a random xform to teh gauge field
|
||||||
|
SU<Nc>::RandomGaugeTransform(RNG,U_GT,g); // Unit gauge
|
||||||
|
|
||||||
|
Field in_GT(&Grid);
|
||||||
|
Field out_GT(&Grid);
|
||||||
|
|
||||||
|
Field out_CLcs_GT(&Grid);
|
||||||
|
Field out_CLst_GT(&Grid);
|
||||||
|
|
||||||
|
CovariantLaplacianCshift <PeriodicGimplR,Field> CLcs_GT(U_GT);
|
||||||
|
CovariantLaplacianStencil<PeriodicGimplR,Field> CLst_GT(U_GT);
|
||||||
|
|
||||||
|
in_GT = g*in;
|
||||||
|
out_GT = g*out_FLcs;
|
||||||
|
|
||||||
|
// Check M^GT_xy in_GT = g(x) M_xy g^dag(y) g(y) in = g(x) out(x)
|
||||||
|
CLcs_GT.M(in_GT,out_CLcs_GT);
|
||||||
|
CLst_GT.M(in_GT,out_CLst_GT);
|
||||||
|
|
||||||
|
diff = out_CLcs_GT - out_GT;
|
||||||
|
std:: cout << " Difference between Gauge xformed result and covariant Cshift Laplacian in xformed gauge = " <<norm2(diff)<<std::endl;
|
||||||
|
|
||||||
|
diff = out_CLst_GT - out_GT;
|
||||||
|
std:: cout << " Difference between Gauge xformed result and covariant Stencil Laplacian in xformed gauge = " <<norm2(diff)<<std::endl;
|
||||||
|
std:: cout << "--------------------------------------------------------" <<std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
std:: cout << " Test C: compare in free Field to \"Feynman rule\" " <<std::endl;
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
|
||||||
|
std::vector<int> dim_mask({1,1,1,0}); // 3d FFT
|
||||||
|
FFT theFFT(&Grid);
|
||||||
|
Field out(&Grid);
|
||||||
|
Field F_out(&Grid);
|
||||||
|
Field F_in(&Grid);
|
||||||
|
|
||||||
|
// FFT the random input vector
|
||||||
|
theFFT.FFT_dim_mask(F_in,in,dim_mask,FFT::forward);
|
||||||
|
|
||||||
|
// Convolution theorem: multiply by Fourier representation of (discrete) Laplacian to apply diff op
|
||||||
|
LatticeComplexD lap(&Grid); lap = Zero();
|
||||||
|
LatticeComplexD kmu(&Grid);
|
||||||
|
ComplexD ci(0.0,1.0);
|
||||||
|
for(int mu=0;mu<3;mu++) {
|
||||||
|
|
||||||
|
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||||
|
|
||||||
|
LatticeCoordinate(kmu,mu);
|
||||||
|
kmu = TwoPiL * kmu;
|
||||||
|
|
||||||
|
// (e^ik_mu + e^-ik_mu - 2) = 2( cos kmu - 1) ~ 2 (1 - k_mu^2/2 -1 ) = - k_mu^2 + O(k^4)
|
||||||
|
lap = lap + 2.0*cos(kmu) - 2.0;
|
||||||
|
|
||||||
|
}
|
||||||
|
F_out = lap * F_in;
|
||||||
|
|
||||||
|
// Inverse FFT the result
|
||||||
|
theFFT.FFT_dim_mask(out,F_out,dim_mask,FFT::backward);
|
||||||
|
|
||||||
|
std::cout<<"Fourier xformed (in) "<<norm2(F_in)<<std::endl;
|
||||||
|
std::cout<<"Fourier xformed Laplacian x (in) "<<norm2(F_out)<<std::endl;
|
||||||
|
|
||||||
|
std::cout<<"Momentum space Laplacian application "<< norm2(out)<<std::endl;
|
||||||
|
std::cout<<"Stencil Laplacian application "<< norm2(out_CLcs)<<std::endl;
|
||||||
|
|
||||||
|
diff = out_CLcs - out;
|
||||||
|
std::cout<<"diff "<< norm2(diff)<<std::endl;
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
127
examples/Example_Laplacian_smearing.cc
Normal file
127
examples/Example_Laplacian_smearing.cc
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
#include <Grid/Grid.h>
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
// Function used for Chebyshev smearing
|
||||||
|
//
|
||||||
|
Real MomentumSmearing(Real p2)
|
||||||
|
{
|
||||||
|
return (1 - 4.0*p2) * exp(-p2/4);
|
||||||
|
}
|
||||||
|
Real DistillationSmearing(Real p2)
|
||||||
|
{
|
||||||
|
if ( p2 > 0.5 ) return 0.0;
|
||||||
|
else return 1.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flip sign to make prop to p^2, not -p^2 relative to last example
|
||||||
|
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
GaugeField U;
|
||||||
|
|
||||||
|
CovariantLaplacianCshift(GaugeField &_U) :
|
||||||
|
grid(_U.Grid()),
|
||||||
|
U(_U) { };
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)
|
||||||
|
{
|
||||||
|
out=Zero();
|
||||||
|
for(int mu=0;mu<Nd-1;mu++) {
|
||||||
|
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
|
||||||
|
out = out - Gimpl::CovShiftForward(Umu,mu,in);
|
||||||
|
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
|
||||||
|
out = out + 2.0*in;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc, &argv);
|
||||||
|
|
||||||
|
typedef LatticeColourVector Field;
|
||||||
|
|
||||||
|
auto latt_size = GridDefaultLatt();
|
||||||
|
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
|
auto mpi_layout = GridDefaultMpi();
|
||||||
|
|
||||||
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
||||||
|
|
||||||
|
|
||||||
|
LatticeGaugeField U(&Grid);
|
||||||
|
|
||||||
|
SU<Nc>::ColdConfiguration(RNG,U);
|
||||||
|
|
||||||
|
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
|
||||||
|
Laplacian_t Laplacian(U);
|
||||||
|
|
||||||
|
|
||||||
|
ColourVector ColourKronecker;
|
||||||
|
ColourKronecker = Zero();
|
||||||
|
ColourKronecker()()(0) = 1.0;
|
||||||
|
|
||||||
|
Coordinate site({latt_size[0]/2,
|
||||||
|
latt_size[1]/2,
|
||||||
|
latt_size[2]/2,
|
||||||
|
0});
|
||||||
|
|
||||||
|
Field kronecker(&Grid);
|
||||||
|
kronecker = Zero();
|
||||||
|
pokeSite(ColourKronecker,kronecker,site);
|
||||||
|
|
||||||
|
|
||||||
|
Field psi(&Grid), chi(&Grid);
|
||||||
|
|
||||||
|
//////////////////////////////////////
|
||||||
|
// Classic Wuppertal smearing
|
||||||
|
//////////////////////////////////////
|
||||||
|
|
||||||
|
Integer Iterations = 80;
|
||||||
|
Real width = 2.0;
|
||||||
|
Real coeff = (width*width) / Real(4*Iterations);
|
||||||
|
|
||||||
|
chi=kronecker;
|
||||||
|
// chi = (1-p^2/2N)^N kronecker
|
||||||
|
for(int n = 0; n < Iterations; ++n) {
|
||||||
|
Laplacian.M(chi,psi);
|
||||||
|
chi = chi - coeff*psi;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << " Wuppertal smeared operator is chi = \n" << chi <<std::endl;
|
||||||
|
|
||||||
|
/////////////////////////////////////
|
||||||
|
// Chebyshev smearing
|
||||||
|
/////////////////////////////////////
|
||||||
|
RealD lo = 0.0;
|
||||||
|
RealD hi = 12.0; // Analytic free field bound
|
||||||
|
HermitianLinearOperator<Laplacian_t,Field> HermOp(Laplacian);
|
||||||
|
|
||||||
|
std::cout << " Checking spectral range of our POSITIVE definite operator \n";
|
||||||
|
PowerMethod<Field> PM;
|
||||||
|
PM(HermOp,kronecker);
|
||||||
|
|
||||||
|
// Chebyshev<Field> ChebySmear(lo,hi,20,DistillationSmearing);
|
||||||
|
Chebyshev<Field> ChebySmear(lo,hi,20,MomentumSmearing);
|
||||||
|
{
|
||||||
|
std::ofstream of("chebysmear");
|
||||||
|
ChebySmear.csv(of);
|
||||||
|
}
|
||||||
|
|
||||||
|
ChebySmear(HermOp,kronecker,chi);
|
||||||
|
|
||||||
|
std::cout << " Chebyshev smeared operator is chi = \n" << chi <<std::endl;
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
129
examples/Example_Laplacian_solver.cc
Normal file
129
examples/Example_Laplacian_solver.cc
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
#include <Grid/Grid.h>
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
template<class Field>
|
||||||
|
void SimpleConjugateGradient(LinearOperatorBase<Field> &HPDop,const Field &b, Field &x)
|
||||||
|
{
|
||||||
|
RealD cp, c, alpha, d, beta, ssq, qq;
|
||||||
|
RealD Tolerance=1.0e-10;
|
||||||
|
int MaxIterations=10000;
|
||||||
|
|
||||||
|
Field p(b), mmp(b), r(b);
|
||||||
|
|
||||||
|
HPDop.HermOpAndNorm(x, mmp, d, beta);
|
||||||
|
|
||||||
|
r = b - mmp;
|
||||||
|
p = r;
|
||||||
|
|
||||||
|
cp = alpha = norm2(p);
|
||||||
|
ssq = norm2(b);
|
||||||
|
|
||||||
|
RealD rsq = Tolerance * Tolerance * ssq;
|
||||||
|
|
||||||
|
for (int k = 1; k <= MaxIterations; k++) {
|
||||||
|
c = cp;
|
||||||
|
|
||||||
|
HPDop.HermOp(p, mmp);
|
||||||
|
|
||||||
|
d = real(innerProduct(p,mmp));
|
||||||
|
|
||||||
|
alpha = c / d;
|
||||||
|
|
||||||
|
r = r - alpha *mmp;
|
||||||
|
cp = norm2(r);
|
||||||
|
beta = cp / c;
|
||||||
|
|
||||||
|
x = x + alpha* p ;
|
||||||
|
p = r + beta* p ;
|
||||||
|
|
||||||
|
std::cout << "iteration "<<k<<" cp " <<std::sqrt(cp/ssq) << std::endl;
|
||||||
|
if (cp <= rsq) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Flip sign to make prop to p^2, not -p^2 relative to last example
|
||||||
|
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
GaugeField U;
|
||||||
|
RealD m2=1.0e-2;
|
||||||
|
CovariantLaplacianCshift(GaugeField &_U) :
|
||||||
|
grid(_U.Grid()),
|
||||||
|
U(_U) { };
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)
|
||||||
|
{
|
||||||
|
out=Zero();
|
||||||
|
for(int mu=0;mu<Nd-1;mu++) {
|
||||||
|
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
|
||||||
|
out = out - Gimpl::CovShiftForward(Umu,mu,in);
|
||||||
|
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
|
||||||
|
out = out + 2.0*in + m2*in;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc, &argv);
|
||||||
|
|
||||||
|
typedef LatticeColourVector Field;
|
||||||
|
|
||||||
|
auto latt_size = GridDefaultLatt();
|
||||||
|
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
|
auto mpi_layout = GridDefaultMpi();
|
||||||
|
|
||||||
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
||||||
|
|
||||||
|
|
||||||
|
LatticeGaugeField U(&Grid);
|
||||||
|
|
||||||
|
SU<Nc>::ColdConfiguration(RNG,U);
|
||||||
|
|
||||||
|
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
|
||||||
|
Laplacian_t Laplacian(U);
|
||||||
|
|
||||||
|
|
||||||
|
ColourVector ColourKronecker;
|
||||||
|
ColourKronecker = Zero();
|
||||||
|
ColourKronecker()()(0) = 1.0;
|
||||||
|
|
||||||
|
Coordinate site({0,0,0,0}); // Point source at origin
|
||||||
|
|
||||||
|
Field kronecker(&Grid);
|
||||||
|
kronecker = Zero();
|
||||||
|
pokeSite(ColourKronecker,kronecker,site);
|
||||||
|
|
||||||
|
Field psi(&Grid); psi=Zero();
|
||||||
|
|
||||||
|
HermitianLinearOperator<Laplacian_t,Field> HermOp(Laplacian);
|
||||||
|
SimpleConjugateGradient(HermOp, kronecker,psi);
|
||||||
|
|
||||||
|
Field r(&Grid);
|
||||||
|
Laplacian.M(psi,r);
|
||||||
|
r=kronecker-r;
|
||||||
|
|
||||||
|
std::cout << "True residual "<< norm2(r) <<std::endl;
|
||||||
|
|
||||||
|
// Optionally print the result vector
|
||||||
|
// std::cout << psi<<std::endl;
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
314
examples/Example_Mobius_spectrum.cc
Normal file
314
examples/Example_Mobius_spectrum.cc
Normal file
@ -0,0 +1,314 @@
|
|||||||
|
/*
|
||||||
|
* Warning: This code illustrative only: not well tested, and not meant for production use
|
||||||
|
* without regression / tests being applied
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
GaugeField U;
|
||||||
|
|
||||||
|
CovariantLaplacianCshift(GaugeField &_U) :
|
||||||
|
grid(_U.Grid()),
|
||||||
|
U(_U) { };
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)
|
||||||
|
{
|
||||||
|
out=Zero();
|
||||||
|
for(int mu=0;mu<Nd-1;mu++) {
|
||||||
|
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
|
||||||
|
out = out - Gimpl::CovShiftForward(Umu,mu,in);
|
||||||
|
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
|
||||||
|
out = out + 2.0*in;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
void MakePhase(Coordinate mom,LatticeComplex &phase)
|
||||||
|
{
|
||||||
|
GridBase *grid = phase.Grid();
|
||||||
|
auto latt_size = grid->GlobalDimensions();
|
||||||
|
ComplexD ci(0.0,1.0);
|
||||||
|
phase=Zero();
|
||||||
|
|
||||||
|
LatticeComplex coor(phase.Grid());
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||||
|
LatticeCoordinate(coor,mu);
|
||||||
|
phase = phase + (TwoPiL * mom[mu]) * coor;
|
||||||
|
}
|
||||||
|
phase = exp(phase*ci);
|
||||||
|
}
|
||||||
|
void PointSource(Coordinate &coor,LatticePropagator &source)
|
||||||
|
{
|
||||||
|
// Coordinate coor({0,0,0,0});
|
||||||
|
source=Zero();
|
||||||
|
SpinColourMatrix kronecker; kronecker=1.0;
|
||||||
|
pokeSite(kronecker,source,coor);
|
||||||
|
}
|
||||||
|
void Z2WallSource(GridParallelRNG &RNG,int tslice,LatticePropagator &source)
|
||||||
|
{
|
||||||
|
GridBase *grid = source.Grid();
|
||||||
|
LatticeComplex noise(grid);
|
||||||
|
LatticeComplex zz(grid); zz=Zero();
|
||||||
|
LatticeInteger t(grid);
|
||||||
|
|
||||||
|
RealD nrm=1.0/sqrt(2);
|
||||||
|
bernoulli(RNG, noise); // 0,1 50:50
|
||||||
|
|
||||||
|
noise = (2.*noise - Complex(1,1))*nrm;
|
||||||
|
|
||||||
|
LatticeCoordinate(t,Tdir);
|
||||||
|
noise = where(t==Integer(tslice), noise, zz);
|
||||||
|
|
||||||
|
source = 1.0;
|
||||||
|
source = source*noise;
|
||||||
|
std::cout << " Z2 wall " << norm2(source) << std::endl;
|
||||||
|
}
|
||||||
|
template<class Field>
|
||||||
|
void GaussianSmear(LatticeGaugeField &U,Field &unsmeared,Field &smeared)
|
||||||
|
{
|
||||||
|
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
|
||||||
|
Laplacian_t Laplacian(U);
|
||||||
|
|
||||||
|
Integer Iterations = 40;
|
||||||
|
Real width = 2.0;
|
||||||
|
Real coeff = (width*width) / Real(4*Iterations);
|
||||||
|
|
||||||
|
Field tmp(U.Grid());
|
||||||
|
smeared=unsmeared;
|
||||||
|
// chi = (1-p^2/2N)^N kronecker
|
||||||
|
for(int n = 0; n < Iterations; ++n) {
|
||||||
|
Laplacian.M(smeared,tmp);
|
||||||
|
smeared = smeared - coeff*tmp;
|
||||||
|
std::cout << " smear iter " << n<<" " <<norm2(smeared)<<std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void GaussianSource(Coordinate &site,LatticeGaugeField &U,LatticePropagator &source)
|
||||||
|
{
|
||||||
|
LatticePropagator tmp(source.Grid());
|
||||||
|
PointSource(site,source);
|
||||||
|
std::cout << " GaussianSource Kronecker "<< norm2(source)<<std::endl;
|
||||||
|
tmp = source;
|
||||||
|
GaussianSmear(U,tmp,source);
|
||||||
|
std::cout << " GaussianSource Smeared "<< norm2(source)<<std::endl;
|
||||||
|
}
|
||||||
|
void GaussianWallSource(GridParallelRNG &RNG,int tslice,LatticeGaugeField &U,LatticePropagator &source)
|
||||||
|
{
|
||||||
|
Z2WallSource(RNG,tslice,source);
|
||||||
|
auto tmp = source;
|
||||||
|
GaussianSmear(U,tmp,source);
|
||||||
|
}
|
||||||
|
void SequentialSource(int tslice,Coordinate &mom,LatticePropagator &spectator,LatticePropagator &source)
|
||||||
|
{
|
||||||
|
assert(mom.size()==Nd);
|
||||||
|
assert(mom[Tdir] == 0);
|
||||||
|
|
||||||
|
GridBase * grid = spectator.Grid();
|
||||||
|
|
||||||
|
|
||||||
|
LatticeInteger ts(grid);
|
||||||
|
LatticeCoordinate(ts,Tdir);
|
||||||
|
source = Zero();
|
||||||
|
source = where(ts==Integer(tslice),spectator,source); // Stick in a slice of the spectator, zero everywhere else
|
||||||
|
|
||||||
|
LatticeComplex phase(grid);
|
||||||
|
MakePhase(mom,phase);
|
||||||
|
|
||||||
|
source = source *phase;
|
||||||
|
}
|
||||||
|
template<class Action>
|
||||||
|
void Solve(Action &D,LatticePropagator &source,LatticePropagator &propagator)
|
||||||
|
{
|
||||||
|
GridBase *UGrid = D.GaugeGrid();
|
||||||
|
GridBase *FGrid = D.FermionGrid();
|
||||||
|
|
||||||
|
LatticeFermion src4 (UGrid);
|
||||||
|
LatticeFermion src5 (FGrid);
|
||||||
|
LatticeFermion result5(FGrid);
|
||||||
|
LatticeFermion result4(UGrid);
|
||||||
|
|
||||||
|
ConjugateGradient<LatticeFermion> CG(1.0e-8,100000);
|
||||||
|
SchurRedBlackDiagMooeeSolve<LatticeFermion> schur(CG);
|
||||||
|
ZeroGuesser<LatticeFermion> ZG; // Could be a DeflatedGuesser if have eigenvectors
|
||||||
|
for(int s=0;s<Nd;s++){
|
||||||
|
for(int c=0;c<Nc;c++){
|
||||||
|
PropToFerm<Action>(src4,source,s,c);
|
||||||
|
|
||||||
|
D.ImportPhysicalFermionSource(src4,src5);
|
||||||
|
|
||||||
|
result5=Zero();
|
||||||
|
schur(D,src5,result5,ZG);
|
||||||
|
std::cout<<GridLogMessage
|
||||||
|
<<"spin "<<s<<" color "<<c
|
||||||
|
<<" norm2(src5d) " <<norm2(src5)
|
||||||
|
<<" norm2(result5d) "<<norm2(result5)<<std::endl;
|
||||||
|
|
||||||
|
D.ExportPhysicalFermionSolution(result5,result4);
|
||||||
|
|
||||||
|
FermToProp<Action>(propagator,result4,s,c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class MesonFile: Serializable {
|
||||||
|
public:
|
||||||
|
GRID_SERIALIZABLE_CLASS_MEMBERS(MesonFile, std::vector<std::vector<Complex> >, data);
|
||||||
|
};
|
||||||
|
|
||||||
|
void MesonTrace(std::string file,LatticePropagator &q1,LatticePropagator &q2,LatticeComplex &phase)
|
||||||
|
{
|
||||||
|
const int nchannel=4;
|
||||||
|
Gamma::Algebra Gammas[nchannel][2] = {
|
||||||
|
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::Gamma5},
|
||||||
|
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::GammaTGamma5},
|
||||||
|
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::Gamma5},
|
||||||
|
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::GammaTGamma5}
|
||||||
|
};
|
||||||
|
|
||||||
|
Gamma G5(Gamma::Algebra::Gamma5);
|
||||||
|
|
||||||
|
LatticeComplex meson_CF(q1.Grid());
|
||||||
|
MesonFile MF;
|
||||||
|
|
||||||
|
for(int ch=0;ch<nchannel;ch++){
|
||||||
|
|
||||||
|
Gamma Gsrc(Gammas[ch][0]);
|
||||||
|
Gamma Gsnk(Gammas[ch][1]);
|
||||||
|
|
||||||
|
meson_CF = trace(G5*adj(q1)*G5*Gsnk*q2*adj(Gsrc));
|
||||||
|
|
||||||
|
std::vector<TComplex> meson_T;
|
||||||
|
sliceSum(meson_CF,meson_T, Tdir);
|
||||||
|
|
||||||
|
int nt=meson_T.size();
|
||||||
|
|
||||||
|
std::vector<Complex> corr(nt);
|
||||||
|
for(int t=0;t<nt;t++){
|
||||||
|
corr[t] = TensorRemove(meson_T[t]); // Yes this is ugly, not figured a work around
|
||||||
|
std::cout << " channel "<<ch<<" t "<<t<<" " <<corr[t]<<std::endl;
|
||||||
|
}
|
||||||
|
MF.data.push_back(corr);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
XmlWriter WR(file);
|
||||||
|
write(WR,"MesonFile",MF);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
const int Ls=8;
|
||||||
|
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
// Double precision grids
|
||||||
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
|
||||||
|
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
||||||
|
GridDefaultMpi());
|
||||||
|
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||||
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||||
|
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
// You can manage seeds however you like.
|
||||||
|
// Recommend SeedUniqueString.
|
||||||
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
std::vector<int> seeds4({1,2,3,4});
|
||||||
|
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
||||||
|
|
||||||
|
LatticeGaugeField Umu(UGrid);
|
||||||
|
std::string config;
|
||||||
|
if( argc > 1 && argv[1][0] != '-' )
|
||||||
|
{
|
||||||
|
std::cout<<GridLogMessage <<"Loading configuration from "<<argv[1]<<std::endl;
|
||||||
|
FieldMetaData header;
|
||||||
|
NerscIO::readConfiguration(Umu, header, argv[1]);
|
||||||
|
config=argv[1];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
std::cout<<GridLogMessage <<"Using hot configuration"<<std::endl;
|
||||||
|
SU<Nc>::ColdConfiguration(Umu);
|
||||||
|
// SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||||
|
config="HotConfig";
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<RealD> masses({ 0.03,0.04,0.45} ); // u/d, s, c ??
|
||||||
|
|
||||||
|
int nmass = masses.size();
|
||||||
|
|
||||||
|
std::vector<MobiusFermionR *> FermActs;
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage <<"======================"<<std::endl;
|
||||||
|
std::cout<<GridLogMessage <<"MobiusFermion action as Scaled Shamir kernel"<<std::endl;
|
||||||
|
std::cout<<GridLogMessage <<"======================"<<std::endl;
|
||||||
|
|
||||||
|
for(auto mass: masses) {
|
||||||
|
|
||||||
|
RealD M5=1.0;
|
||||||
|
RealD b=1.5;// Scale factor b+c=2, b-c=1
|
||||||
|
RealD c=0.5;
|
||||||
|
|
||||||
|
FermActs.push_back(new MobiusFermionR(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
LatticePropagator point_source(UGrid);
|
||||||
|
LatticePropagator wall_source(UGrid);
|
||||||
|
LatticePropagator gaussian_source(UGrid);
|
||||||
|
|
||||||
|
Coordinate Origin({0,0,0,0});
|
||||||
|
PointSource (Origin,point_source);
|
||||||
|
Z2WallSource (RNG4,0,wall_source);
|
||||||
|
GaussianSource(Origin,Umu,gaussian_source);
|
||||||
|
|
||||||
|
std::vector<LatticePropagator> PointProps(nmass,UGrid);
|
||||||
|
std::vector<LatticePropagator> GaussProps(nmass,UGrid);
|
||||||
|
std::vector<LatticePropagator> Z2Props (nmass,UGrid);
|
||||||
|
|
||||||
|
for(int m=0;m<nmass;m++) {
|
||||||
|
|
||||||
|
Solve(*FermActs[m],point_source ,PointProps[m]);
|
||||||
|
Solve(*FermActs[m],gaussian_source,GaussProps[m]);
|
||||||
|
Solve(*FermActs[m],wall_source ,Z2Props[m]);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
LatticeComplex phase(UGrid);
|
||||||
|
Coordinate mom({0,0,0,0});
|
||||||
|
MakePhase(mom,phase);
|
||||||
|
|
||||||
|
for(int m1=0 ;m1<nmass;m1++) {
|
||||||
|
for(int m2=m1;m2<nmass;m2++) {
|
||||||
|
std::stringstream ssp,ssg,ssz;
|
||||||
|
|
||||||
|
ssp<<config<< "_m" << m1 << "_m"<< m2 << "_point_meson.xml";
|
||||||
|
ssg<<config<< "_m" << m1 << "_m"<< m2 << "_smeared_meson.xml";
|
||||||
|
ssz<<config<< "_m" << m1 << "_m"<< m2 << "_wall_meson.xml";
|
||||||
|
|
||||||
|
MesonTrace(ssp.str(),PointProps[m1],PointProps[m2],phase);
|
||||||
|
MesonTrace(ssg.str(),GaussProps[m1],GaussProps[m2],phase);
|
||||||
|
MesonTrace(ssz.str(),Z2Props[m1],Z2Props[m2],phase);
|
||||||
|
}}
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
6
examples/Makefile.am
Normal file
6
examples/Makefile.am
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
SUBDIRS = .
|
||||||
|
|
||||||
|
include Make.inc
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -82,3 +82,18 @@ for f in $TESTS; do
|
|||||||
echo >> Make.inc
|
echo >> Make.inc
|
||||||
done
|
done
|
||||||
cd ..
|
cd ..
|
||||||
|
|
||||||
|
# examples Make.inc
|
||||||
|
cd $home/examples/
|
||||||
|
echo> Make.inc
|
||||||
|
TESTS=`ls *.cc`
|
||||||
|
TESTLIST=`echo ${TESTS} | sed s/.cc//g `
|
||||||
|
echo bin_PROGRAMS = ${TESTLIST} > Make.inc
|
||||||
|
echo >> Make.inc
|
||||||
|
for f in $TESTS; do
|
||||||
|
BNAME=`basename $f .cc`
|
||||||
|
echo ${BNAME}_SOURCES=$f >> Make.inc
|
||||||
|
echo ${BNAME}_LDADD='$(top_builddir)/Grid/libGrid.a'>> Make.inc
|
||||||
|
echo >> Make.inc
|
||||||
|
done
|
||||||
|
cd ..
|
||||||
|
@ -66,7 +66,9 @@ int main(int argc, char** argv)
|
|||||||
// Set up RNGs
|
// Set up RNGs
|
||||||
std::vector<int> seeds4({1, 2, 3, 4});
|
std::vector<int> seeds4({1, 2, 3, 4});
|
||||||
std::vector<int> seeds5({5, 6, 7, 8});
|
std::vector<int> seeds5({5, 6, 7, 8});
|
||||||
|
GridSerialRNG sRNG;
|
||||||
GridParallelRNG RNG5(FGrid);
|
GridParallelRNG RNG5(FGrid);
|
||||||
|
sRNG.SeedFixedIntegers(seeds5);
|
||||||
RNG5.SeedFixedIntegers(seeds5);
|
RNG5.SeedFixedIntegers(seeds5);
|
||||||
GridParallelRNG RNG4(UGrid);
|
GridParallelRNG RNG4(UGrid);
|
||||||
RNG4.SeedFixedIntegers(seeds4);
|
RNG4.SeedFixedIntegers(seeds4);
|
||||||
@ -84,7 +86,7 @@ int main(int argc, char** argv)
|
|||||||
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
|
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
|
||||||
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, Params, false);
|
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, Params, false);
|
||||||
|
|
||||||
Meofa.refresh(Umu, RNG5);
|
Meofa.refresh(Umu,sRNG, RNG5);
|
||||||
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,7 +96,7 @@ int main(int argc, char** argv)
|
|||||||
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
|
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
|
||||||
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, Params, true);
|
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, Params, true);
|
||||||
|
|
||||||
Meofa.refresh(Umu, RNG5);
|
Meofa.refresh(Umu,sRNG, RNG5);
|
||||||
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,6 +74,9 @@ int main(int argc, char** argv)
|
|||||||
RNG5.SeedFixedIntegers(seeds5);
|
RNG5.SeedFixedIntegers(seeds5);
|
||||||
GridParallelRNG RNG4(UGrid);
|
GridParallelRNG RNG4(UGrid);
|
||||||
RNG4.SeedFixedIntegers(seeds4);
|
RNG4.SeedFixedIntegers(seeds4);
|
||||||
|
GridSerialRNG sRNG;
|
||||||
|
RNG4.SeedFixedIntegers(seeds4);
|
||||||
|
sRNG.SeedFixedIntegers(seeds5);
|
||||||
|
|
||||||
// Random gauge field
|
// Random gauge field
|
||||||
LatticeGaugeField Umu(UGrid);
|
LatticeGaugeField Umu(UGrid);
|
||||||
@ -90,7 +93,7 @@ int main(int argc, char** argv)
|
|||||||
ConjugateGradient<FermionField> CG(1.0e-12, 5000);
|
ConjugateGradient<FermionField> CG(1.0e-12, 5000);
|
||||||
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> Meofa(Lop, Rop, CG, Params, false);
|
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> Meofa(Lop, Rop, CG, Params, false);
|
||||||
|
|
||||||
Meofa.refresh(Umu, RNG5);
|
Meofa.refresh(Umu,sRNG, RNG5);
|
||||||
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,7 +103,7 @@ int main(int argc, char** argv)
|
|||||||
ConjugateGradient<FermionField> CG(1.0e-12, 5000);
|
ConjugateGradient<FermionField> CG(1.0e-12, 5000);
|
||||||
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> Meofa(Lop, Rop, CG, Params, true);
|
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> Meofa(Lop, Rop, CG, Params, true);
|
||||||
|
|
||||||
Meofa.refresh(Umu, RNG5);
|
Meofa.refresh(Umu,sRNG, RNG5);
|
||||||
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,8 +68,10 @@ int main(int argc, char** argv)
|
|||||||
// Set up RNGs
|
// Set up RNGs
|
||||||
std::vector<int> seeds4({1, 2, 3, 4});
|
std::vector<int> seeds4({1, 2, 3, 4});
|
||||||
std::vector<int> seeds5({5, 6, 7, 8});
|
std::vector<int> seeds5({5, 6, 7, 8});
|
||||||
|
GridSerialRNG sRNG;
|
||||||
GridParallelRNG RNG5(FGrid);
|
GridParallelRNG RNG5(FGrid);
|
||||||
RNG5.SeedFixedIntegers(seeds5);
|
RNG5.SeedFixedIntegers(seeds5);
|
||||||
|
sRNG.SeedFixedIntegers(seeds5);
|
||||||
GridParallelRNG RNG4(UGrid);
|
GridParallelRNG RNG4(UGrid);
|
||||||
RNG4.SeedFixedIntegers(seeds4);
|
RNG4.SeedFixedIntegers(seeds4);
|
||||||
|
|
||||||
@ -86,7 +88,7 @@ int main(int argc, char** argv)
|
|||||||
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
|
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
|
||||||
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, Params, false);
|
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, Params, false);
|
||||||
|
|
||||||
Meofa.refresh(Umu, RNG5);
|
Meofa.refresh(Umu, sRNG,RNG5);
|
||||||
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -96,7 +98,7 @@ int main(int argc, char** argv)
|
|||||||
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
|
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
|
||||||
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, Params, true);
|
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, Params, true);
|
||||||
|
|
||||||
Meofa.refresh(Umu, RNG5);
|
Meofa.refresh(Umu, sRNG,RNG5);
|
||||||
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,7 +73,9 @@ int main(int argc, char** argv)
|
|||||||
std::vector<int> seeds4({1, 2, 3, 4});
|
std::vector<int> seeds4({1, 2, 3, 4});
|
||||||
std::vector<int> seeds5({5, 6, 7, 8});
|
std::vector<int> seeds5({5, 6, 7, 8});
|
||||||
GridParallelRNG RNG5(FGrid);
|
GridParallelRNG RNG5(FGrid);
|
||||||
|
GridSerialRNG sRNG;
|
||||||
RNG5.SeedFixedIntegers(seeds5);
|
RNG5.SeedFixedIntegers(seeds5);
|
||||||
|
sRNG.SeedFixedIntegers(seeds5);
|
||||||
GridParallelRNG RNG4(UGrid);
|
GridParallelRNG RNG4(UGrid);
|
||||||
RNG4.SeedFixedIntegers(seeds4);
|
RNG4.SeedFixedIntegers(seeds4);
|
||||||
|
|
||||||
@ -91,7 +93,7 @@ int main(int argc, char** argv)
|
|||||||
ConjugateGradient<FermionField> CG(1.0e-12, 5000);
|
ConjugateGradient<FermionField> CG(1.0e-12, 5000);
|
||||||
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> Meofa(Lop, Rop, CG, Params, false);
|
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> Meofa(Lop, Rop, CG, Params, false);
|
||||||
|
|
||||||
Meofa.refresh(Umu, RNG5);
|
Meofa.refresh(Umu, sRNG, RNG5);
|
||||||
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,7 +103,7 @@ int main(int argc, char** argv)
|
|||||||
ConjugateGradient<FermionField> CG(1.0e-12, 5000);
|
ConjugateGradient<FermionField> CG(1.0e-12, 5000);
|
||||||
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> Meofa(Lop, Rop, CG, Params, true);
|
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> Meofa(Lop, Rop, CG, Params, true);
|
||||||
|
|
||||||
Meofa.refresh(Umu, RNG5);
|
Meofa.refresh(Umu, sRNG, RNG5);
|
||||||
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
printf("<Phi|Meofa|Phi> = %1.15e\n", Meofa.S(Umu));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user