mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-11 22:50:45 +01:00
Merge branch 'develop' of https://github.com/paboyle/Grid into develop
This commit is contained in:
commit
c7baeb5bae
@ -35,6 +35,12 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#ifdef GRID_MPI3_SHM_NVLINK
|
||||||
|
const bool Stencil_force_mpi = true;
|
||||||
|
#else
|
||||||
|
const bool Stencil_force_mpi = false;
|
||||||
|
#endif
|
||||||
|
|
||||||
class CartesianCommunicator : public SharedMemory {
|
class CartesianCommunicator : public SharedMemory {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -370,7 +370,7 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
|||||||
double off_node_bytes=0.0;
|
double off_node_bytes=0.0;
|
||||||
int tag;
|
int tag;
|
||||||
|
|
||||||
if ( gfrom ==MPI_UNDEFINED) {
|
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||||
tag= dir+from*32;
|
tag= dir+from*32;
|
||||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
@ -378,7 +378,7 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
|||||||
off_node_bytes+=bytes;
|
off_node_bytes+=bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( gdest == MPI_UNDEFINED ) {
|
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||||
tag= dir+_processor*32;
|
tag= dir+_processor*32;
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
@ -73,6 +73,7 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
|||||||
WorldNodes = WorldSize/WorldShmSize;
|
WorldNodes = WorldSize/WorldShmSize;
|
||||||
assert( (WorldNodes * WorldShmSize) == WorldSize );
|
assert( (WorldNodes * WorldShmSize) == WorldSize );
|
||||||
|
|
||||||
|
|
||||||
// FIXME: Check all WorldShmSize are the same ?
|
// FIXME: Check all WorldShmSize are the same ?
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
@ -451,7 +452,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
#if defined(GRID_CUDA) ||defined(GRID_HIP) || defined(GRID_SYCL)
|
#if defined(GRID_CUDA) ||defined(GRID_HIP) || defined(GRID_SYCL)
|
||||||
|
|
||||||
#if defined(GRID_SYCL)
|
//if defined(GRID_SYCL)
|
||||||
|
#if 0
|
||||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||||
{
|
{
|
||||||
void * ShmCommBuf ;
|
void * ShmCommBuf ;
|
||||||
@ -488,7 +490,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(GRID_CUDA) ||defined(GRID_HIP)
|
#if defined(GRID_CUDA) ||defined(GRID_HIP) ||defined(GRID_SYCL)
|
||||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||||
{
|
{
|
||||||
void * ShmCommBuf ;
|
void * ShmCommBuf ;
|
||||||
@ -511,8 +513,16 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Each MPI rank should allocate our own buffer
|
// Each MPI rank should allocate our own buffer
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_device());
|
||||||
|
auto zeContext= cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_context());
|
||||||
|
ze_device_mem_alloc_desc_t zeDesc = {};
|
||||||
|
zeMemAllocDevice(zeContext,&zeDesc,bytes,2*1024*1024,zeDevice,&ShmCommBuf);
|
||||||
|
std::cout << WorldRank << header " SharedMemoryMPI.cc zeMemAllocDevice "<< bytes
|
||||||
|
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
||||||
|
#else
|
||||||
ShmCommBuf = acceleratorAllocDevice(bytes);
|
ShmCommBuf = acceleratorAllocDevice(bytes);
|
||||||
|
#endif
|
||||||
if (ShmCommBuf == (void *)NULL ) {
|
if (ShmCommBuf == (void *)NULL ) {
|
||||||
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
|
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
@ -522,8 +532,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
|
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
|
||||||
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
||||||
}
|
}
|
||||||
SharedMemoryZero(ShmCommBuf,bytes);
|
// SharedMemoryZero(ShmCommBuf,bytes);
|
||||||
|
std::cout<< "Setting up IPC"<<std::endl;
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Loop over ranks/gpu's on our node
|
// Loop over ranks/gpu's on our node
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -533,6 +543,23 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
// If it is me, pass around the IPC access key
|
// If it is me, pass around the IPC access key
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
ze_ipc_mem_handle_t handle;
|
||||||
|
if ( r==WorldShmRank ) {
|
||||||
|
auto err = zeMemGetIpcHandle(zeContext,ShmCommBuf,&handle);
|
||||||
|
if ( err != ZE_RESULT_SUCCESS ) {
|
||||||
|
std::cerr << "SharedMemoryMPI.cc zeMemGetIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
} else {
|
||||||
|
std::cerr << "SharedMemoryMPI.cc zeMemGetIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
||||||
|
}
|
||||||
|
std::cerr<<"Allocated IpcHandle rank "<<r<<" (hex) ";
|
||||||
|
for(int c=0;c<ZE_MAX_IPC_HANDLE_SIZE;c++){
|
||||||
|
std::cerr<<std::hex<<(uint32_t)((uint8_t)handle.data[c])<<std::dec;
|
||||||
|
}
|
||||||
|
std::cerr<<std::endl;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
#ifdef GRID_CUDA
|
#ifdef GRID_CUDA
|
||||||
cudaIpcMemHandle_t handle;
|
cudaIpcMemHandle_t handle;
|
||||||
if ( r==WorldShmRank ) {
|
if ( r==WorldShmRank ) {
|
||||||
@ -569,6 +596,25 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
// If I am not the source, overwrite thisBuf with remote buffer
|
// If I am not the source, overwrite thisBuf with remote buffer
|
||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
void * thisBuf = ShmCommBuf;
|
void * thisBuf = ShmCommBuf;
|
||||||
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
if ( r!=WorldShmRank ) {
|
||||||
|
thisBuf = nullptr;
|
||||||
|
std::cerr<<"Using IpcHandle rank "<<r<<" ";
|
||||||
|
for(int c=0;c<ZE_MAX_IPC_HANDLE_SIZE;c++){
|
||||||
|
std::cerr<<std::hex<<(uint32_t)((uint8_t)handle.data[c])<<std::dec;
|
||||||
|
}
|
||||||
|
std::cerr<<std::endl;
|
||||||
|
auto err = zeMemOpenIpcHandle(zeContext,zeDevice,handle,0,&thisBuf);
|
||||||
|
if ( err != ZE_RESULT_SUCCESS ) {
|
||||||
|
std::cerr << "SharedMemoryMPI.cc "<<zeContext<<" "<<zeDevice<<std::endl;
|
||||||
|
std::cerr << "SharedMemoryMPI.cc zeMemOpenIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
} else {
|
||||||
|
std::cerr << "SharedMemoryMPI.cc zeMemOpenIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
||||||
|
}
|
||||||
|
assert(thisBuf!=nullptr);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
#ifdef GRID_CUDA
|
#ifdef GRID_CUDA
|
||||||
if ( r!=WorldShmRank ) {
|
if ( r!=WorldShmRank ) {
|
||||||
auto err = cudaIpcOpenMemHandle(&thisBuf,handle,cudaIpcMemLazyEnablePeerAccess);
|
auto err = cudaIpcOpenMemHandle(&thisBuf,handle,cudaIpcMemLazyEnablePeerAccess);
|
||||||
|
@ -115,9 +115,9 @@ typedef WilsonFermion<WilsonImplR> WilsonFermionR;
|
|||||||
typedef WilsonFermion<WilsonImplF> WilsonFermionF;
|
typedef WilsonFermion<WilsonImplF> WilsonFermionF;
|
||||||
typedef WilsonFermion<WilsonImplD> WilsonFermionD;
|
typedef WilsonFermion<WilsonImplD> WilsonFermionD;
|
||||||
|
|
||||||
typedef WilsonFermion<WilsonImplRL> WilsonFermionRL;
|
//typedef WilsonFermion<WilsonImplRL> WilsonFermionRL;
|
||||||
typedef WilsonFermion<WilsonImplFH> WilsonFermionFH;
|
//typedef WilsonFermion<WilsonImplFH> WilsonFermionFH;
|
||||||
typedef WilsonFermion<WilsonImplDF> WilsonFermionDF;
|
//typedef WilsonFermion<WilsonImplDF> WilsonFermionDF;
|
||||||
|
|
||||||
typedef WilsonFermion<WilsonAdjImplR> WilsonAdjFermionR;
|
typedef WilsonFermion<WilsonAdjImplR> WilsonAdjFermionR;
|
||||||
typedef WilsonFermion<WilsonAdjImplF> WilsonAdjFermionF;
|
typedef WilsonFermion<WilsonAdjImplF> WilsonAdjFermionF;
|
||||||
@ -158,41 +158,41 @@ typedef DomainWallFermion<WilsonImplR> DomainWallFermionR;
|
|||||||
typedef DomainWallFermion<WilsonImplF> DomainWallFermionF;
|
typedef DomainWallFermion<WilsonImplF> DomainWallFermionF;
|
||||||
typedef DomainWallFermion<WilsonImplD> DomainWallFermionD;
|
typedef DomainWallFermion<WilsonImplD> DomainWallFermionD;
|
||||||
|
|
||||||
typedef DomainWallFermion<WilsonImplRL> DomainWallFermionRL;
|
//typedef DomainWallFermion<WilsonImplRL> DomainWallFermionRL;
|
||||||
typedef DomainWallFermion<WilsonImplFH> DomainWallFermionFH;
|
//typedef DomainWallFermion<WilsonImplFH> DomainWallFermionFH;
|
||||||
typedef DomainWallFermion<WilsonImplDF> DomainWallFermionDF;
|
//typedef DomainWallFermion<WilsonImplDF> DomainWallFermionDF;
|
||||||
|
|
||||||
typedef DomainWallEOFAFermion<WilsonImplR> DomainWallEOFAFermionR;
|
typedef DomainWallEOFAFermion<WilsonImplR> DomainWallEOFAFermionR;
|
||||||
typedef DomainWallEOFAFermion<WilsonImplF> DomainWallEOFAFermionF;
|
typedef DomainWallEOFAFermion<WilsonImplF> DomainWallEOFAFermionF;
|
||||||
typedef DomainWallEOFAFermion<WilsonImplD> DomainWallEOFAFermionD;
|
typedef DomainWallEOFAFermion<WilsonImplD> DomainWallEOFAFermionD;
|
||||||
|
|
||||||
typedef DomainWallEOFAFermion<WilsonImplRL> DomainWallEOFAFermionRL;
|
//typedef DomainWallEOFAFermion<WilsonImplRL> DomainWallEOFAFermionRL;
|
||||||
typedef DomainWallEOFAFermion<WilsonImplFH> DomainWallEOFAFermionFH;
|
//typedef DomainWallEOFAFermion<WilsonImplFH> DomainWallEOFAFermionFH;
|
||||||
typedef DomainWallEOFAFermion<WilsonImplDF> DomainWallEOFAFermionDF;
|
//typedef DomainWallEOFAFermion<WilsonImplDF> DomainWallEOFAFermionDF;
|
||||||
|
|
||||||
typedef MobiusFermion<WilsonImplR> MobiusFermionR;
|
typedef MobiusFermion<WilsonImplR> MobiusFermionR;
|
||||||
typedef MobiusFermion<WilsonImplF> MobiusFermionF;
|
typedef MobiusFermion<WilsonImplF> MobiusFermionF;
|
||||||
typedef MobiusFermion<WilsonImplD> MobiusFermionD;
|
typedef MobiusFermion<WilsonImplD> MobiusFermionD;
|
||||||
|
|
||||||
typedef MobiusFermion<WilsonImplRL> MobiusFermionRL;
|
//typedef MobiusFermion<WilsonImplRL> MobiusFermionRL;
|
||||||
typedef MobiusFermion<WilsonImplFH> MobiusFermionFH;
|
//typedef MobiusFermion<WilsonImplFH> MobiusFermionFH;
|
||||||
typedef MobiusFermion<WilsonImplDF> MobiusFermionDF;
|
//typedef MobiusFermion<WilsonImplDF> MobiusFermionDF;
|
||||||
|
|
||||||
typedef MobiusEOFAFermion<WilsonImplR> MobiusEOFAFermionR;
|
typedef MobiusEOFAFermion<WilsonImplR> MobiusEOFAFermionR;
|
||||||
typedef MobiusEOFAFermion<WilsonImplF> MobiusEOFAFermionF;
|
typedef MobiusEOFAFermion<WilsonImplF> MobiusEOFAFermionF;
|
||||||
typedef MobiusEOFAFermion<WilsonImplD> MobiusEOFAFermionD;
|
typedef MobiusEOFAFermion<WilsonImplD> MobiusEOFAFermionD;
|
||||||
|
|
||||||
typedef MobiusEOFAFermion<WilsonImplRL> MobiusEOFAFermionRL;
|
//typedef MobiusEOFAFermion<WilsonImplRL> MobiusEOFAFermionRL;
|
||||||
typedef MobiusEOFAFermion<WilsonImplFH> MobiusEOFAFermionFH;
|
//typedef MobiusEOFAFermion<WilsonImplFH> MobiusEOFAFermionFH;
|
||||||
typedef MobiusEOFAFermion<WilsonImplDF> MobiusEOFAFermionDF;
|
//typedef MobiusEOFAFermion<WilsonImplDF> MobiusEOFAFermionDF;
|
||||||
|
|
||||||
typedef ZMobiusFermion<ZWilsonImplR> ZMobiusFermionR;
|
typedef ZMobiusFermion<ZWilsonImplR> ZMobiusFermionR;
|
||||||
typedef ZMobiusFermion<ZWilsonImplF> ZMobiusFermionF;
|
typedef ZMobiusFermion<ZWilsonImplF> ZMobiusFermionF;
|
||||||
typedef ZMobiusFermion<ZWilsonImplD> ZMobiusFermionD;
|
typedef ZMobiusFermion<ZWilsonImplD> ZMobiusFermionD;
|
||||||
|
|
||||||
typedef ZMobiusFermion<ZWilsonImplRL> ZMobiusFermionRL;
|
//typedef ZMobiusFermion<ZWilsonImplRL> ZMobiusFermionRL;
|
||||||
typedef ZMobiusFermion<ZWilsonImplFH> ZMobiusFermionFH;
|
//typedef ZMobiusFermion<ZWilsonImplFH> ZMobiusFermionFH;
|
||||||
typedef ZMobiusFermion<ZWilsonImplDF> ZMobiusFermionDF;
|
//typedef ZMobiusFermion<ZWilsonImplDF> ZMobiusFermionDF;
|
||||||
|
|
||||||
// Ls vectorised
|
// Ls vectorised
|
||||||
typedef ScaledShamirFermion<WilsonImplR> ScaledShamirFermionR;
|
typedef ScaledShamirFermion<WilsonImplR> ScaledShamirFermionR;
|
||||||
@ -235,49 +235,49 @@ typedef WilsonFermion<GparityWilsonImplR> GparityWilsonFermionR;
|
|||||||
typedef WilsonFermion<GparityWilsonImplF> GparityWilsonFermionF;
|
typedef WilsonFermion<GparityWilsonImplF> GparityWilsonFermionF;
|
||||||
typedef WilsonFermion<GparityWilsonImplD> GparityWilsonFermionD;
|
typedef WilsonFermion<GparityWilsonImplD> GparityWilsonFermionD;
|
||||||
|
|
||||||
typedef WilsonFermion<GparityWilsonImplRL> GparityWilsonFermionRL;
|
//typedef WilsonFermion<GparityWilsonImplRL> GparityWilsonFermionRL;
|
||||||
typedef WilsonFermion<GparityWilsonImplFH> GparityWilsonFermionFH;
|
//typedef WilsonFermion<GparityWilsonImplFH> GparityWilsonFermionFH;
|
||||||
typedef WilsonFermion<GparityWilsonImplDF> GparityWilsonFermionDF;
|
//typedef WilsonFermion<GparityWilsonImplDF> GparityWilsonFermionDF;
|
||||||
|
|
||||||
typedef DomainWallFermion<GparityWilsonImplR> GparityDomainWallFermionR;
|
typedef DomainWallFermion<GparityWilsonImplR> GparityDomainWallFermionR;
|
||||||
typedef DomainWallFermion<GparityWilsonImplF> GparityDomainWallFermionF;
|
typedef DomainWallFermion<GparityWilsonImplF> GparityDomainWallFermionF;
|
||||||
typedef DomainWallFermion<GparityWilsonImplD> GparityDomainWallFermionD;
|
typedef DomainWallFermion<GparityWilsonImplD> GparityDomainWallFermionD;
|
||||||
|
|
||||||
typedef DomainWallFermion<GparityWilsonImplRL> GparityDomainWallFermionRL;
|
//typedef DomainWallFermion<GparityWilsonImplRL> GparityDomainWallFermionRL;
|
||||||
typedef DomainWallFermion<GparityWilsonImplFH> GparityDomainWallFermionFH;
|
//typedef DomainWallFermion<GparityWilsonImplFH> GparityDomainWallFermionFH;
|
||||||
typedef DomainWallFermion<GparityWilsonImplDF> GparityDomainWallFermionDF;
|
//typedef DomainWallFermion<GparityWilsonImplDF> GparityDomainWallFermionDF;
|
||||||
|
|
||||||
typedef DomainWallEOFAFermion<GparityWilsonImplR> GparityDomainWallEOFAFermionR;
|
typedef DomainWallEOFAFermion<GparityWilsonImplR> GparityDomainWallEOFAFermionR;
|
||||||
typedef DomainWallEOFAFermion<GparityWilsonImplF> GparityDomainWallEOFAFermionF;
|
typedef DomainWallEOFAFermion<GparityWilsonImplF> GparityDomainWallEOFAFermionF;
|
||||||
typedef DomainWallEOFAFermion<GparityWilsonImplD> GparityDomainWallEOFAFermionD;
|
typedef DomainWallEOFAFermion<GparityWilsonImplD> GparityDomainWallEOFAFermionD;
|
||||||
|
|
||||||
typedef DomainWallEOFAFermion<GparityWilsonImplRL> GparityDomainWallEOFAFermionRL;
|
//typedef DomainWallEOFAFermion<GparityWilsonImplRL> GparityDomainWallEOFAFermionRL;
|
||||||
typedef DomainWallEOFAFermion<GparityWilsonImplFH> GparityDomainWallEOFAFermionFH;
|
//typedef DomainWallEOFAFermion<GparityWilsonImplFH> GparityDomainWallEOFAFermionFH;
|
||||||
typedef DomainWallEOFAFermion<GparityWilsonImplDF> GparityDomainWallEOFAFermionDF;
|
//typedef DomainWallEOFAFermion<GparityWilsonImplDF> GparityDomainWallEOFAFermionDF;
|
||||||
|
|
||||||
typedef WilsonTMFermion<GparityWilsonImplR> GparityWilsonTMFermionR;
|
typedef WilsonTMFermion<GparityWilsonImplR> GparityWilsonTMFermionR;
|
||||||
typedef WilsonTMFermion<GparityWilsonImplF> GparityWilsonTMFermionF;
|
typedef WilsonTMFermion<GparityWilsonImplF> GparityWilsonTMFermionF;
|
||||||
typedef WilsonTMFermion<GparityWilsonImplD> GparityWilsonTMFermionD;
|
typedef WilsonTMFermion<GparityWilsonImplD> GparityWilsonTMFermionD;
|
||||||
|
|
||||||
typedef WilsonTMFermion<GparityWilsonImplRL> GparityWilsonTMFermionRL;
|
//typedef WilsonTMFermion<GparityWilsonImplRL> GparityWilsonTMFermionRL;
|
||||||
typedef WilsonTMFermion<GparityWilsonImplFH> GparityWilsonTMFermionFH;
|
//typedef WilsonTMFermion<GparityWilsonImplFH> GparityWilsonTMFermionFH;
|
||||||
typedef WilsonTMFermion<GparityWilsonImplDF> GparityWilsonTMFermionDF;
|
//typedef WilsonTMFermion<GparityWilsonImplDF> GparityWilsonTMFermionDF;
|
||||||
|
|
||||||
typedef MobiusFermion<GparityWilsonImplR> GparityMobiusFermionR;
|
typedef MobiusFermion<GparityWilsonImplR> GparityMobiusFermionR;
|
||||||
typedef MobiusFermion<GparityWilsonImplF> GparityMobiusFermionF;
|
typedef MobiusFermion<GparityWilsonImplF> GparityMobiusFermionF;
|
||||||
typedef MobiusFermion<GparityWilsonImplD> GparityMobiusFermionD;
|
typedef MobiusFermion<GparityWilsonImplD> GparityMobiusFermionD;
|
||||||
|
|
||||||
typedef MobiusFermion<GparityWilsonImplRL> GparityMobiusFermionRL;
|
//typedef MobiusFermion<GparityWilsonImplRL> GparityMobiusFermionRL;
|
||||||
typedef MobiusFermion<GparityWilsonImplFH> GparityMobiusFermionFH;
|
//typedef MobiusFermion<GparityWilsonImplFH> GparityMobiusFermionFH;
|
||||||
typedef MobiusFermion<GparityWilsonImplDF> GparityMobiusFermionDF;
|
//typedef MobiusFermion<GparityWilsonImplDF> GparityMobiusFermionDF;
|
||||||
|
|
||||||
typedef MobiusEOFAFermion<GparityWilsonImplR> GparityMobiusEOFAFermionR;
|
typedef MobiusEOFAFermion<GparityWilsonImplR> GparityMobiusEOFAFermionR;
|
||||||
typedef MobiusEOFAFermion<GparityWilsonImplF> GparityMobiusEOFAFermionF;
|
typedef MobiusEOFAFermion<GparityWilsonImplF> GparityMobiusEOFAFermionF;
|
||||||
typedef MobiusEOFAFermion<GparityWilsonImplD> GparityMobiusEOFAFermionD;
|
typedef MobiusEOFAFermion<GparityWilsonImplD> GparityMobiusEOFAFermionD;
|
||||||
|
|
||||||
typedef MobiusEOFAFermion<GparityWilsonImplRL> GparityMobiusEOFAFermionRL;
|
//typedef MobiusEOFAFermion<GparityWilsonImplRL> GparityMobiusEOFAFermionRL;
|
||||||
typedef MobiusEOFAFermion<GparityWilsonImplFH> GparityMobiusEOFAFermionFH;
|
//typedef MobiusEOFAFermion<GparityWilsonImplFH> GparityMobiusEOFAFermionFH;
|
||||||
typedef MobiusEOFAFermion<GparityWilsonImplDF> GparityMobiusEOFAFermionDF;
|
//typedef MobiusEOFAFermion<GparityWilsonImplDF> GparityMobiusEOFAFermionDF;
|
||||||
|
|
||||||
typedef ImprovedStaggeredFermion<StaggeredImplR> ImprovedStaggeredFermionR;
|
typedef ImprovedStaggeredFermion<StaggeredImplR> ImprovedStaggeredFermionR;
|
||||||
typedef ImprovedStaggeredFermion<StaggeredImplF> ImprovedStaggeredFermionF;
|
typedef ImprovedStaggeredFermion<StaggeredImplF> ImprovedStaggeredFermionF;
|
||||||
|
@ -327,8 +327,8 @@ typedef GparityWilsonImpl<vComplex , FundamentalRepresentation,CoeffReal> Gparit
|
|||||||
typedef GparityWilsonImpl<vComplexF, FundamentalRepresentation,CoeffReal> GparityWilsonImplF; // Float
|
typedef GparityWilsonImpl<vComplexF, FundamentalRepresentation,CoeffReal> GparityWilsonImplF; // Float
|
||||||
typedef GparityWilsonImpl<vComplexD, FundamentalRepresentation,CoeffReal> GparityWilsonImplD; // Double
|
typedef GparityWilsonImpl<vComplexD, FundamentalRepresentation,CoeffReal> GparityWilsonImplD; // Double
|
||||||
|
|
||||||
typedef GparityWilsonImpl<vComplex , FundamentalRepresentation,CoeffRealHalfComms> GparityWilsonImplRL; // Real.. whichever prec
|
//typedef GparityWilsonImpl<vComplex , FundamentalRepresentation,CoeffRealHalfComms> GparityWilsonImplRL; // Real.. whichever prec
|
||||||
typedef GparityWilsonImpl<vComplexF, FundamentalRepresentation,CoeffRealHalfComms> GparityWilsonImplFH; // Float
|
//typedef GparityWilsonImpl<vComplexF, FundamentalRepresentation,CoeffRealHalfComms> GparityWilsonImplFH; // Float
|
||||||
typedef GparityWilsonImpl<vComplexD, FundamentalRepresentation,CoeffRealHalfComms> GparityWilsonImplDF; // Double
|
//typedef GparityWilsonImpl<vComplexD, FundamentalRepresentation,CoeffRealHalfComms> GparityWilsonImplDF; // Double
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
@ -68,11 +68,12 @@ public:
|
|||||||
/*****************************************************/
|
/*****************************************************/
|
||||||
/* Compress includes precision change if mpi data is not same */
|
/* Compress includes precision change if mpi data is not same */
|
||||||
/*****************************************************/
|
/*****************************************************/
|
||||||
template<class _SiteHalfSpinor, class _SiteSpinor>
|
accelerator_inline void Compress(SiteHalfSpinor &buf,const SiteSpinor &in) const {
|
||||||
accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) const {
|
typedef decltype(coalescedRead(buf)) sobj;
|
||||||
_SiteHalfSpinor tmp;
|
sobj sp;
|
||||||
projector::Proj(tmp,in,mu,dag);
|
auto sin = coalescedRead(in);
|
||||||
vstream(buf[o],tmp);
|
projector::Proj(sp,sin,mu,dag);
|
||||||
|
coalescedWrite(buf,sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*****************************************************/
|
/*****************************************************/
|
||||||
@ -82,13 +83,18 @@ public:
|
|||||||
const SiteHalfSpinor * __restrict__ vp0,
|
const SiteHalfSpinor * __restrict__ vp0,
|
||||||
const SiteHalfSpinor * __restrict__ vp1,
|
const SiteHalfSpinor * __restrict__ vp1,
|
||||||
Integer type,Integer o) const {
|
Integer type,Integer o) const {
|
||||||
|
#ifdef GRID_SIMT
|
||||||
|
exchangeSIMT(mp[2*o],mp[2*o+1],vp0[o],vp1[o],type);
|
||||||
|
#else
|
||||||
SiteHalfSpinor tmp1;
|
SiteHalfSpinor tmp1;
|
||||||
SiteHalfSpinor tmp2;
|
SiteHalfSpinor tmp2;
|
||||||
exchange(tmp1,tmp2,vp0[o],vp1[o],type);
|
exchange(tmp1,tmp2,vp0[o],vp1[o],type);
|
||||||
vstream(mp[2*o ],tmp1);
|
vstream(mp[2*o ],tmp1);
|
||||||
vstream(mp[2*o+1],tmp2);
|
vstream(mp[2*o+1],tmp2);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*****************************************************/
|
/*****************************************************/
|
||||||
/* Have a decompression step if mpi data is not same */
|
/* Have a decompression step if mpi data is not same */
|
||||||
/*****************************************************/
|
/*****************************************************/
|
||||||
@ -105,6 +111,28 @@ public:
|
|||||||
const SiteSpinor * __restrict__ in,
|
const SiteSpinor * __restrict__ in,
|
||||||
Integer j,Integer k, Integer m,Integer type) const
|
Integer j,Integer k, Integer m,Integer type) const
|
||||||
{
|
{
|
||||||
|
#ifdef GRID_SIMT
|
||||||
|
typedef SiteSpinor vobj;
|
||||||
|
typedef SiteHalfSpinor hvobj;
|
||||||
|
typedef decltype(coalescedRead(*in)) sobj;
|
||||||
|
typedef decltype(coalescedRead(*out0)) hsobj;
|
||||||
|
|
||||||
|
unsigned int Nsimd = vobj::Nsimd();
|
||||||
|
unsigned int mask = Nsimd >> (type + 1);
|
||||||
|
int lane = acceleratorSIMTlane(Nsimd);
|
||||||
|
int j0 = lane &(~mask); // inner coor zero
|
||||||
|
int j1 = lane |(mask) ; // inner coor one
|
||||||
|
const vobj *vp0 = &in[k];
|
||||||
|
const vobj *vp1 = &in[m];
|
||||||
|
const vobj *vp = (lane&mask) ? vp1:vp0;
|
||||||
|
auto sa = coalescedRead(*vp,j0);
|
||||||
|
auto sb = coalescedRead(*vp,j1);
|
||||||
|
hsobj psa, psb;
|
||||||
|
projector::Proj(psa,sa,mu,dag);
|
||||||
|
projector::Proj(psb,sb,mu,dag);
|
||||||
|
coalescedWrite(out0[j],psa);
|
||||||
|
coalescedWrite(out1[j],psb);
|
||||||
|
#else
|
||||||
SiteHalfSpinor temp1, temp2;
|
SiteHalfSpinor temp1, temp2;
|
||||||
SiteHalfSpinor temp3, temp4;
|
SiteHalfSpinor temp3, temp4;
|
||||||
projector::Proj(temp1,in[k],mu,dag);
|
projector::Proj(temp1,in[k],mu,dag);
|
||||||
@ -112,6 +140,7 @@ public:
|
|||||||
exchange(temp3,temp4,temp1,temp2,type);
|
exchange(temp3,temp4,temp1,temp2,type);
|
||||||
vstream(out0[j],temp3);
|
vstream(out0[j],temp3);
|
||||||
vstream(out1[j],temp4);
|
vstream(out1[j],temp4);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/*****************************************************/
|
/*****************************************************/
|
||||||
@ -121,6 +150,7 @@ public:
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if 0
|
||||||
template<class _HCspinor,class _Hspinor,class _Spinor, class projector>
|
template<class _HCspinor,class _Hspinor,class _Spinor, class projector>
|
||||||
class WilsonCompressorTemplate< _HCspinor, _Hspinor, _Spinor, projector,
|
class WilsonCompressorTemplate< _HCspinor, _Hspinor, _Spinor, projector,
|
||||||
typename std::enable_if<!std::is_same<_HCspinor,_Hspinor>::value>::type >
|
typename std::enable_if<!std::is_same<_HCspinor,_Hspinor>::value>::type >
|
||||||
@ -149,13 +179,23 @@ public:
|
|||||||
/*****************************************************/
|
/*****************************************************/
|
||||||
/* Compress includes precision change if mpi data is not same */
|
/* Compress includes precision change if mpi data is not same */
|
||||||
/*****************************************************/
|
/*****************************************************/
|
||||||
template<class _SiteHalfSpinor, class _SiteSpinor>
|
accelerator_inline void Compress(SiteHalfSpinor &buf,const SiteSpinor &in) const {
|
||||||
accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) const {
|
SiteHalfSpinor hsp;
|
||||||
_SiteHalfSpinor hsp;
|
|
||||||
SiteHalfCommSpinor *hbuf = (SiteHalfCommSpinor *)buf;
|
SiteHalfCommSpinor *hbuf = (SiteHalfCommSpinor *)buf;
|
||||||
projector::Proj(hsp,in,mu,dag);
|
projector::Proj(hsp,in,mu,dag);
|
||||||
precisionChange((vComplexLow *)&hbuf[o],(vComplexHigh *)&hsp,Nw);
|
precisionChange((vComplexLow *)&hbuf[o],(vComplexHigh *)&hsp,Nw);
|
||||||
}
|
}
|
||||||
|
accelerator_inline void Compress(SiteHalfSpinor &buf,const SiteSpinor &in) const {
|
||||||
|
#ifdef GRID_SIMT
|
||||||
|
typedef decltype(coalescedRead(buf)) sobj;
|
||||||
|
sobj sp;
|
||||||
|
auto sin = coalescedRead(in);
|
||||||
|
projector::Proj(sp,sin,mu,dag);
|
||||||
|
coalescedWrite(buf,sp);
|
||||||
|
#else
|
||||||
|
projector::Proj(buf,in,mu,dag);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/*****************************************************/
|
/*****************************************************/
|
||||||
/* Exchange includes precision change if mpi data is not same */
|
/* Exchange includes precision change if mpi data is not same */
|
||||||
@ -203,6 +243,7 @@ public:
|
|||||||
accelerator_inline bool DecompressionStep(void) const { return true; }
|
accelerator_inline bool DecompressionStep(void) const { return true; }
|
||||||
|
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
#define DECLARE_PROJ(Projector,Compressor,spProj) \
|
#define DECLARE_PROJ(Projector,Compressor,spProj) \
|
||||||
class Projector { \
|
class Projector { \
|
||||||
@ -253,33 +294,8 @@ public:
|
|||||||
typedef typename Base::View_type View_type;
|
typedef typename Base::View_type View_type;
|
||||||
typedef typename Base::StencilVector StencilVector;
|
typedef typename Base::StencilVector StencilVector;
|
||||||
|
|
||||||
double timer0;
|
void ZeroCountersi(void) { }
|
||||||
double timer1;
|
void Reporti(int calls) { }
|
||||||
double timer2;
|
|
||||||
double timer3;
|
|
||||||
double timer4;
|
|
||||||
double timer5;
|
|
||||||
double timer6;
|
|
||||||
uint64_t callsi;
|
|
||||||
void ZeroCountersi(void)
|
|
||||||
{
|
|
||||||
timer0=0;
|
|
||||||
timer1=0;
|
|
||||||
timer2=0;
|
|
||||||
timer3=0;
|
|
||||||
timer4=0;
|
|
||||||
timer5=0;
|
|
||||||
timer6=0;
|
|
||||||
callsi=0;
|
|
||||||
}
|
|
||||||
void Reporti(int calls)
|
|
||||||
{
|
|
||||||
if ( timer0 ) std::cout << GridLogMessage << " timer0 (HaloGatherOpt) " <<timer0/calls <<std::endl;
|
|
||||||
if ( timer1 ) std::cout << GridLogMessage << " timer1 (Communicate) " <<timer1/calls <<std::endl;
|
|
||||||
if ( timer2 ) std::cout << GridLogMessage << " timer2 (CommsMerge ) " <<timer2/calls <<std::endl;
|
|
||||||
if ( timer3 ) std::cout << GridLogMessage << " timer3 (commsMergeShm) " <<timer3/calls <<std::endl;
|
|
||||||
if ( timer4 ) std::cout << GridLogMessage << " timer4 " <<timer4 <<std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<int> surface_list;
|
std::vector<int> surface_list;
|
||||||
|
|
||||||
@ -321,26 +337,18 @@ public:
|
|||||||
{
|
{
|
||||||
std::vector<std::vector<CommsRequest_t> > reqs;
|
std::vector<std::vector<CommsRequest_t> > reqs;
|
||||||
this->HaloExchangeOptGather(source,compress);
|
this->HaloExchangeOptGather(source,compress);
|
||||||
double t1=usecond();
|
|
||||||
// Asynchronous MPI calls multidirectional, Isend etc...
|
// Asynchronous MPI calls multidirectional, Isend etc...
|
||||||
// Non-overlapped directions within a thread. Asynchronous calls except MPI3, threaded up to comm threads ways.
|
// Non-overlapped directions within a thread. Asynchronous calls except MPI3, threaded up to comm threads ways.
|
||||||
this->Communicate();
|
this->Communicate();
|
||||||
double t2=usecond(); timer1 += t2-t1;
|
|
||||||
this->CommsMerge(compress);
|
this->CommsMerge(compress);
|
||||||
double t3=usecond(); timer2 += t3-t2;
|
|
||||||
this->CommsMergeSHM(compress);
|
this->CommsMergeSHM(compress);
|
||||||
double t4=usecond(); timer3 += t4-t3;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class compressor>
|
template <class compressor>
|
||||||
void HaloExchangeOptGather(const Lattice<vobj> &source,compressor &compress)
|
void HaloExchangeOptGather(const Lattice<vobj> &source,compressor &compress)
|
||||||
{
|
{
|
||||||
this->Prepare();
|
this->Prepare();
|
||||||
double t0=usecond();
|
|
||||||
this->HaloGatherOpt(source,compress);
|
this->HaloGatherOpt(source,compress);
|
||||||
double t1=usecond();
|
|
||||||
timer0 += t1-t0;
|
|
||||||
callsi++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class compressor>
|
template <class compressor>
|
||||||
@ -352,12 +360,9 @@ public:
|
|||||||
typedef typename compressor::SiteHalfSpinor SiteHalfSpinor;
|
typedef typename compressor::SiteHalfSpinor SiteHalfSpinor;
|
||||||
typedef typename compressor::SiteHalfCommSpinor SiteHalfCommSpinor;
|
typedef typename compressor::SiteHalfCommSpinor SiteHalfCommSpinor;
|
||||||
|
|
||||||
this->mpi3synctime_g-=usecond();
|
|
||||||
this->_grid->StencilBarrier();
|
this->_grid->StencilBarrier();
|
||||||
this->mpi3synctime_g+=usecond();
|
|
||||||
|
|
||||||
assert(source.Grid()==this->_grid);
|
assert(source.Grid()==this->_grid);
|
||||||
this->halogtime-=usecond();
|
|
||||||
|
|
||||||
this->u_comm_offset=0;
|
this->u_comm_offset=0;
|
||||||
|
|
||||||
@ -393,7 +398,6 @@ public:
|
|||||||
}
|
}
|
||||||
this->face_table_computed=1;
|
this->face_table_computed=1;
|
||||||
assert(this->u_comm_offset==this->_unified_buffer_size);
|
assert(this->u_comm_offset==this->_unified_buffer_size);
|
||||||
this->halogtime+=usecond();
|
|
||||||
accelerator_barrier();
|
accelerator_barrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,17 +243,17 @@ typedef WilsonImpl<vComplex, FundamentalRepresentation, CoeffReal > WilsonImplR
|
|||||||
typedef WilsonImpl<vComplexF, FundamentalRepresentation, CoeffReal > WilsonImplF; // Float
|
typedef WilsonImpl<vComplexF, FundamentalRepresentation, CoeffReal > WilsonImplF; // Float
|
||||||
typedef WilsonImpl<vComplexD, FundamentalRepresentation, CoeffReal > WilsonImplD; // Double
|
typedef WilsonImpl<vComplexD, FundamentalRepresentation, CoeffReal > WilsonImplD; // Double
|
||||||
|
|
||||||
typedef WilsonImpl<vComplex, FundamentalRepresentation, CoeffRealHalfComms > WilsonImplRL; // Real.. whichever prec
|
//typedef WilsonImpl<vComplex, FundamentalRepresentation, CoeffRealHalfComms > WilsonImplRL; // Real.. whichever prec
|
||||||
typedef WilsonImpl<vComplexF, FundamentalRepresentation, CoeffRealHalfComms > WilsonImplFH; // Float
|
//typedef WilsonImpl<vComplexF, FundamentalRepresentation, CoeffRealHalfComms > WilsonImplFH; // Float
|
||||||
typedef WilsonImpl<vComplexD, FundamentalRepresentation, CoeffRealHalfComms > WilsonImplDF; // Double
|
//typedef WilsonImpl<vComplexD, FundamentalRepresentation, CoeffRealHalfComms > WilsonImplDF; // Double
|
||||||
|
|
||||||
typedef WilsonImpl<vComplex, FundamentalRepresentation, CoeffComplex > ZWilsonImplR; // Real.. whichever prec
|
typedef WilsonImpl<vComplex, FundamentalRepresentation, CoeffComplex > ZWilsonImplR; // Real.. whichever prec
|
||||||
typedef WilsonImpl<vComplexF, FundamentalRepresentation, CoeffComplex > ZWilsonImplF; // Float
|
typedef WilsonImpl<vComplexF, FundamentalRepresentation, CoeffComplex > ZWilsonImplF; // Float
|
||||||
typedef WilsonImpl<vComplexD, FundamentalRepresentation, CoeffComplex > ZWilsonImplD; // Double
|
typedef WilsonImpl<vComplexD, FundamentalRepresentation, CoeffComplex > ZWilsonImplD; // Double
|
||||||
|
|
||||||
typedef WilsonImpl<vComplex, FundamentalRepresentation, CoeffComplexHalfComms > ZWilsonImplRL; // Real.. whichever prec
|
//typedef WilsonImpl<vComplex, FundamentalRepresentation, CoeffComplexHalfComms > ZWilsonImplRL; // Real.. whichever prec
|
||||||
typedef WilsonImpl<vComplexF, FundamentalRepresentation, CoeffComplexHalfComms > ZWilsonImplFH; // Float
|
//typedef WilsonImpl<vComplexF, FundamentalRepresentation, CoeffComplexHalfComms > ZWilsonImplFH; // Float
|
||||||
typedef WilsonImpl<vComplexD, FundamentalRepresentation, CoeffComplexHalfComms > ZWilsonImplDF; // Double
|
//typedef WilsonImpl<vComplexD, FundamentalRepresentation, CoeffComplexHalfComms > ZWilsonImplDF; // Double
|
||||||
|
|
||||||
typedef WilsonImpl<vComplex, AdjointRepresentation, CoeffReal > WilsonAdjImplR; // Real.. whichever prec
|
typedef WilsonImpl<vComplex, AdjointRepresentation, CoeffReal > WilsonAdjImplR; // Real.. whichever prec
|
||||||
typedef WilsonImpl<vComplexF, AdjointRepresentation, CoeffReal > WilsonAdjImplF; // Float
|
typedef WilsonImpl<vComplexF, AdjointRepresentation, CoeffReal > WilsonAdjImplF; // Float
|
||||||
|
@ -880,7 +880,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::vector<RealD> G_s(Ls,1.0);
|
std::vector<RealD> G_s(Ls,1.0);
|
||||||
Integer sign = 1; // sign flip for vector/tadpole
|
RealD sign = 1; // sign flip for vector/tadpole
|
||||||
if ( curr_type == Current::Axial ) {
|
if ( curr_type == Current::Axial ) {
|
||||||
for(int s=0;s<Ls/2;s++){
|
for(int s=0;s<Ls/2;s++){
|
||||||
G_s[s] = -1.0;
|
G_s[s] = -1.0;
|
||||||
@ -901,8 +901,8 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
|||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
|
|
||||||
int sp = (s+1)%Ls;
|
int sp = (s+1)%Ls;
|
||||||
int sr = Ls-1-s;
|
// int sr = Ls-1-s;
|
||||||
int srp= (sr+1)%Ls;
|
// int srp= (sr+1)%Ls;
|
||||||
|
|
||||||
// Mobius parameters
|
// Mobius parameters
|
||||||
auto b=this->bs[s];
|
auto b=this->bs[s];
|
||||||
|
@ -1 +0,0 @@
|
|||||||
../CayleyFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../ContinuedFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../DomainWallEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../MobiusEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../PartialFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonCloverFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiationGparity.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonTMFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
#define IMPLEMENTATION GparityWilsonImplDF
|
|
@ -1 +0,0 @@
|
|||||||
../CayleyFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../ContinuedFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../DomainWallEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../MobiusEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../PartialFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonCloverFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiationGparity.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonTMFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
#define IMPLEMENTATION GparityWilsonImplFH
|
|
@ -1 +0,0 @@
|
|||||||
../CayleyFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../ContinuedFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../DomainWallEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../MobiusEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../PartialFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonCloverFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonFermionInstantiation.cc.master
|
|
@ -1,51 +0,0 @@
|
|||||||
/*************************************************************************************
|
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
|
||||||
|
|
||||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
|
||||||
|
|
||||||
Copyright (C) 2015, 2020
|
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License along
|
|
||||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution
|
|
||||||
directory
|
|
||||||
*************************************************************************************/
|
|
||||||
/* END LEGAL */
|
|
||||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
|
||||||
|
|
||||||
#ifndef AVX512
|
|
||||||
#ifndef QPX
|
|
||||||
#ifndef A64FX
|
|
||||||
#ifndef A64FXFIXEDSIZE
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
|
||||||
|
|
||||||
#include "impl.h"
|
|
||||||
template class WilsonKernels<IMPLEMENTATION>;
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonTMFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
#define IMPLEMENTATION WilsonImplDF
|
|
@ -1 +0,0 @@
|
|||||||
../CayleyFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../ContinuedFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../DomainWallEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../MobiusEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../PartialFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonCloverFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonFermionInstantiation.cc.master
|
|
@ -1,51 +0,0 @@
|
|||||||
/*************************************************************************************
|
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
|
||||||
|
|
||||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
|
||||||
|
|
||||||
Copyright (C) 2015, 2020
|
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License along
|
|
||||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution
|
|
||||||
directory
|
|
||||||
*************************************************************************************/
|
|
||||||
/* END LEGAL */
|
|
||||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
|
||||||
|
|
||||||
#ifndef AVX512
|
|
||||||
#ifndef QPX
|
|
||||||
#ifndef A64FX
|
|
||||||
#ifndef A64FXFIXEDSIZE
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
|
||||||
|
|
||||||
#include "impl.h"
|
|
||||||
template class WilsonKernels<IMPLEMENTATION>;
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonTMFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
#define IMPLEMENTATION WilsonImplFH
|
|
@ -1 +0,0 @@
|
|||||||
../CayleyFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../ContinuedFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../DomainWallEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../MobiusEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../PartialFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonFermion5DInstantiation.cc.master
|
|
@ -1,51 +0,0 @@
|
|||||||
/*************************************************************************************
|
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
|
||||||
|
|
||||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
|
||||||
|
|
||||||
Copyright (C) 2015, 2020
|
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License along
|
|
||||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution
|
|
||||||
directory
|
|
||||||
*************************************************************************************/
|
|
||||||
/* END LEGAL */
|
|
||||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
|
||||||
|
|
||||||
#ifndef AVX512
|
|
||||||
#ifndef QPX
|
|
||||||
#ifndef A64FX
|
|
||||||
#ifndef A64FXFIXEDSIZE
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
|
||||||
|
|
||||||
#include "impl.h"
|
|
||||||
template class WilsonKernels<IMPLEMENTATION>;
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
|
@ -1 +0,0 @@
|
|||||||
#define IMPLEMENTATION ZWilsonImplDF
|
|
@ -1 +0,0 @@
|
|||||||
../CayleyFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../ContinuedFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../DomainWallEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../MobiusEOFAFermionInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../PartialFractionFermion5DInstantiation.cc.master
|
|
@ -1 +0,0 @@
|
|||||||
../WilsonFermion5DInstantiation.cc.master
|
|
@ -1,51 +0,0 @@
|
|||||||
/*************************************************************************************
|
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
|
||||||
|
|
||||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
|
||||||
|
|
||||||
Copyright (C) 2015, 2020
|
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License along
|
|
||||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution
|
|
||||||
directory
|
|
||||||
*************************************************************************************/
|
|
||||||
/* END LEGAL */
|
|
||||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
|
||||||
|
|
||||||
#ifndef AVX512
|
|
||||||
#ifndef QPX
|
|
||||||
#ifndef A64FX
|
|
||||||
#ifndef A64FXFIXEDSIZE
|
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
|
||||||
|
|
||||||
#include "impl.h"
|
|
||||||
template class WilsonKernels<IMPLEMENTATION>;
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
|
@ -1 +0,0 @@
|
|||||||
#define IMPLEMENTATION ZWilsonImplFH
|
|
@ -9,8 +9,6 @@ STAG5_IMPL_LIST=""
|
|||||||
WILSON_IMPL_LIST=" \
|
WILSON_IMPL_LIST=" \
|
||||||
WilsonImplF \
|
WilsonImplF \
|
||||||
WilsonImplD \
|
WilsonImplD \
|
||||||
WilsonImplFH \
|
|
||||||
WilsonImplDF \
|
|
||||||
WilsonAdjImplF \
|
WilsonAdjImplF \
|
||||||
WilsonAdjImplD \
|
WilsonAdjImplD \
|
||||||
WilsonTwoIndexSymmetricImplF \
|
WilsonTwoIndexSymmetricImplF \
|
||||||
@ -18,26 +16,17 @@ WILSON_IMPL_LIST=" \
|
|||||||
WilsonTwoIndexAntiSymmetricImplF \
|
WilsonTwoIndexAntiSymmetricImplF \
|
||||||
WilsonTwoIndexAntiSymmetricImplD \
|
WilsonTwoIndexAntiSymmetricImplD \
|
||||||
GparityWilsonImplF \
|
GparityWilsonImplF \
|
||||||
GparityWilsonImplD \
|
GparityWilsonImplD "
|
||||||
GparityWilsonImplFH \
|
|
||||||
GparityWilsonImplDF"
|
|
||||||
|
|
||||||
DWF_IMPL_LIST=" \
|
DWF_IMPL_LIST=" \
|
||||||
WilsonImplF \
|
WilsonImplF \
|
||||||
WilsonImplD \
|
WilsonImplD \
|
||||||
WilsonImplFH \
|
|
||||||
WilsonImplDF \
|
|
||||||
ZWilsonImplF \
|
ZWilsonImplF \
|
||||||
ZWilsonImplD \
|
ZWilsonImplD "
|
||||||
ZWilsonImplFH \
|
|
||||||
ZWilsonImplDF "
|
|
||||||
|
|
||||||
GDWF_IMPL_LIST=" \
|
GDWF_IMPL_LIST=" \
|
||||||
GparityWilsonImplF \
|
GparityWilsonImplF \
|
||||||
GparityWilsonImplD \
|
GparityWilsonImplD "
|
||||||
GparityWilsonImplFH \
|
|
||||||
GparityWilsonImplDF"
|
|
||||||
|
|
||||||
|
|
||||||
IMPL_LIST="$STAG_IMPL_LIST $WILSON_IMPL_LIST $DWF_IMPL_LIST $GDWF_IMPL_LIST"
|
IMPL_LIST="$STAG_IMPL_LIST $WILSON_IMPL_LIST $DWF_IMPL_LIST $GDWF_IMPL_LIST"
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
|
|||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
// Dirac algebra adjoint operator (not in to overload other adj)
|
// Dirac algebra adjoint operator (not in to overload other adj)
|
||||||
accelerator_inline Gamma adj(const Gamma &g)
|
inline Gamma adj(const Gamma &g)
|
||||||
{
|
{
|
||||||
return Gamma (Gamma::adj[g.g]);
|
return Gamma (Gamma::adj[g.g]);
|
||||||
}
|
}
|
||||||
@ -48,7 +48,7 @@ accelerator_inline Gamma adj(const Gamma &g)
|
|||||||
|
|
||||||
|
|
||||||
// Dirac algebra mutliplication operator
|
// Dirac algebra mutliplication operator
|
||||||
accelerator_inline Gamma operator*(const Gamma &g1, const Gamma &g2)
|
inline Gamma operator*(const Gamma &g1, const Gamma &g2)
|
||||||
{
|
{
|
||||||
return Gamma (Gamma::mul[g1.g][g2.g]);
|
return Gamma (Gamma::mul[g1.g][g2.g]);
|
||||||
}
|
}
|
||||||
|
@ -3,20 +3,48 @@
|
|||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
template<class vobj>
|
||||||
|
accelerator_inline void exchangeSIMT(vobj &mp0,vobj &mp1,const vobj &vp0,const vobj &vp1,Integer type)
|
||||||
|
{
|
||||||
|
typedef decltype(coalescedRead(mp0)) sobj;
|
||||||
|
unsigned int Nsimd = vobj::Nsimd();
|
||||||
|
unsigned int mask = Nsimd >> (type + 1);
|
||||||
|
int lane = acceleratorSIMTlane(Nsimd);
|
||||||
|
int j0 = lane &(~mask); // inner coor zero
|
||||||
|
int j1 = lane |(mask) ; // inner coor one
|
||||||
|
const vobj *vpa = &vp0;
|
||||||
|
const vobj *vpb = &vp1;
|
||||||
|
const vobj *vp = (lane&mask) ? (vpb) : (vpa);
|
||||||
|
auto sa = coalescedRead(vp[0],j0);
|
||||||
|
auto sb = coalescedRead(vp[0],j1);
|
||||||
|
coalescedWrite(mp0,sa);
|
||||||
|
coalescedWrite(mp1,sb);
|
||||||
|
}
|
||||||
|
|
||||||
template<class vobj>
|
template<class vobj>
|
||||||
class SimpleCompressor {
|
class SimpleCompressor {
|
||||||
public:
|
public:
|
||||||
void Point(int) {};
|
void Point(int) {};
|
||||||
accelerator_inline int CommDatumSize(void) const { return sizeof(vobj); }
|
accelerator_inline int CommDatumSize(void) const { return sizeof(vobj); }
|
||||||
accelerator_inline bool DecompressionStep(void) const { return false; }
|
accelerator_inline bool DecompressionStep(void) const { return false; }
|
||||||
template<class cobj> accelerator_inline void Compress(cobj *buf,int o,const cobj &in) const { buf[o]=in; }
|
accelerator_inline void Compress(vobj &buf,const vobj &in) const {
|
||||||
|
coalescedWrite(buf,coalescedRead(in));
|
||||||
|
}
|
||||||
accelerator_inline void Exchange(vobj *mp,vobj *vp0,vobj *vp1,Integer type,Integer o) const {
|
accelerator_inline void Exchange(vobj *mp,vobj *vp0,vobj *vp1,Integer type,Integer o) const {
|
||||||
|
#ifdef GRID_SIMT
|
||||||
|
exchangeSIMT(mp[2*o],mp[2*o+1],vp0[o],vp1[o],type);
|
||||||
|
#else
|
||||||
exchange(mp[2*o],mp[2*o+1],vp0[o],vp1[o],type);
|
exchange(mp[2*o],mp[2*o+1],vp0[o],vp1[o],type);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
accelerator_inline void Decompress(vobj *out,vobj *in, int o) const { assert(0); }
|
accelerator_inline void Decompress(vobj *out,vobj *in, int o) const { assert(0); }
|
||||||
accelerator_inline void CompressExchange(vobj *out0,vobj *out1,const vobj *in,
|
accelerator_inline void CompressExchange(vobj *out0,vobj *out1,const vobj *in,
|
||||||
int j,int k, int m,int type) const {
|
int j,int k, int m,int type) const {
|
||||||
|
#ifdef GRID_SIMT
|
||||||
|
exchangeSIMT(out0[j],out1[j],in[k],in[m],type);
|
||||||
|
#else
|
||||||
exchange(out0[j],out1[j],in[k],in[m],type);
|
exchange(out0[j],out1[j],in[k],in[m],type);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
// For cshift. Cshift should drop compressor coupling altogether
|
// For cshift. Cshift should drop compressor coupling altogether
|
||||||
// because I had to decouple the code from the Stencil anyway
|
// because I had to decouple the code from the Stencil anyway
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
void Gather_plane_table_compute (GridBase *grid,int dimension,int plane,int cbmask,
|
void Gather_plane_table_compute (GridBase *grid,int dimension,int plane,int cbmask,
|
||||||
int off,Vector<std::pair<int,int> > & table)
|
int off,std::vector<std::pair<int,int> > & table)
|
||||||
{
|
{
|
||||||
table.resize(0);
|
table.resize(0);
|
||||||
|
|
||||||
|
@ -57,27 +57,22 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
void Gather_plane_table_compute (GridBase *grid,int dimension,int plane,int cbmask,
|
void Gather_plane_table_compute (GridBase *grid,int dimension,int plane,int cbmask,
|
||||||
int off,Vector<std::pair<int,int> > & table);
|
int off,std::vector<std::pair<int,int> > & table);
|
||||||
|
|
||||||
template<class vobj,class cobj,class compressor>
|
template<class vobj,class cobj,class compressor>
|
||||||
void Gather_plane_simple_table (Vector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so) __attribute__((noinline));
|
void Gather_plane_simple_table (commVector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so) __attribute__((noinline));
|
||||||
|
|
||||||
template<class vobj,class cobj,class compressor>
|
template<class vobj,class cobj,class compressor>
|
||||||
void Gather_plane_simple_table (Vector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so)
|
void Gather_plane_simple_table (commVector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so)
|
||||||
{
|
{
|
||||||
int num=table.size();
|
int num=table.size();
|
||||||
std::pair<int,int> *table_v = & table[0];
|
std::pair<int,int> *table_v = & table[0];
|
||||||
|
|
||||||
auto rhs_v = rhs.View(AcceleratorRead);
|
auto rhs_v = rhs.View(AcceleratorRead);
|
||||||
accelerator_forNB( i,num, vobj::Nsimd(), {
|
accelerator_forNB( i,num, vobj::Nsimd(), {
|
||||||
typedef decltype(coalescedRead(buffer[0])) compressed_t;
|
compress.Compress(buffer[off+table_v[i].first],rhs_v[so+table_v[i].second]);
|
||||||
compressed_t tmp_c;
|
|
||||||
uint64_t o = table_v[i].first;
|
|
||||||
compress.Compress(&tmp_c,0,rhs_v(so+table_v[i].second));
|
|
||||||
coalescedWrite(buffer[off+o],tmp_c);
|
|
||||||
});
|
});
|
||||||
rhs_v.ViewClose();
|
rhs_v.ViewClose();
|
||||||
// Further optimisatoin: i) software prefetch the first element of the next table entry, prefetch the table
|
|
||||||
}
|
}
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
@ -85,10 +80,10 @@ void Gather_plane_simple_table (Vector<std::pair<int,int> >& table,const Lattice
|
|||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
template<class cobj,class vobj,class compressor>
|
template<class cobj,class vobj,class compressor>
|
||||||
void Gather_plane_exchange_table(const Lattice<vobj> &rhs,
|
void Gather_plane_exchange_table(const Lattice<vobj> &rhs,
|
||||||
Vector<cobj *> pointers,int dimension,int plane,int cbmask,compressor &compress,int type) __attribute__((noinline));
|
commVector<cobj *> pointers,int dimension,int plane,int cbmask,compressor &compress,int type) __attribute__((noinline));
|
||||||
|
|
||||||
template<class cobj,class vobj,class compressor>
|
template<class cobj,class vobj,class compressor>
|
||||||
void Gather_plane_exchange_table(Vector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,
|
void Gather_plane_exchange_table(commVector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,
|
||||||
Vector<cobj *> pointers,int dimension,int plane,int cbmask,
|
Vector<cobj *> pointers,int dimension,int plane,int cbmask,
|
||||||
compressor &compress,int type)
|
compressor &compress,int type)
|
||||||
{
|
{
|
||||||
@ -100,7 +95,7 @@ void Gather_plane_exchange_table(Vector<std::pair<int,int> >& table,const Lattic
|
|||||||
auto p0=&pointers[0][0];
|
auto p0=&pointers[0][0];
|
||||||
auto p1=&pointers[1][0];
|
auto p1=&pointers[1][0];
|
||||||
auto tp=&table[0];
|
auto tp=&table[0];
|
||||||
accelerator_forNB(j, num, 1, {
|
accelerator_forNB(j, num, vobj::Nsimd(), {
|
||||||
compress.CompressExchange(p0,p1, &rhs_v[0], j,
|
compress.CompressExchange(p0,p1, &rhs_v[0], j,
|
||||||
so+tp[2*j ].second,
|
so+tp[2*j ].second,
|
||||||
so+tp[2*j+1].second,
|
so+tp[2*j+1].second,
|
||||||
@ -266,10 +261,11 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
int face_table_computed;
|
int face_table_computed;
|
||||||
std::vector<Vector<std::pair<int,int> > > face_table ;
|
std::vector<commVector<std::pair<int,int> > > face_table ;
|
||||||
Vector<int> surface_list;
|
Vector<int> surface_list;
|
||||||
|
|
||||||
stencilVector<StencilEntry> _entries; // Resident in managed memory
|
stencilVector<StencilEntry> _entries; // Resident in managed memory
|
||||||
|
commVector<StencilEntry> _entries_device; // Resident in managed memory
|
||||||
std::vector<Packet> Packets;
|
std::vector<Packet> Packets;
|
||||||
std::vector<Merge> Mergers;
|
std::vector<Merge> Mergers;
|
||||||
std::vector<Merge> MergersSHM;
|
std::vector<Merge> MergersSHM;
|
||||||
@ -342,7 +338,7 @@ public:
|
|||||||
|
|
||||||
void *shm = (void *) _grid->ShmBufferTranslate(recv_from_rank,this->u_recv_buf_p);
|
void *shm = (void *) _grid->ShmBufferTranslate(recv_from_rank,this->u_recv_buf_p);
|
||||||
|
|
||||||
if ( shm==NULL ) return 0;
|
if ( (shm==NULL) || Stencil_force_mpi ) return 0;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@ -609,13 +605,14 @@ public:
|
|||||||
template<class decompressor>
|
template<class decompressor>
|
||||||
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
|
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
|
||||||
|
|
||||||
|
|
||||||
mergetime-=usecond();
|
mergetime-=usecond();
|
||||||
for(int i=0;i<mm.size();i++){
|
for(int i=0;i<mm.size();i++){
|
||||||
auto mp = &mm[i].mpointer[0];
|
auto mp = &mm[i].mpointer[0];
|
||||||
auto vp0= &mm[i].vpointers[0][0];
|
auto vp0= &mm[i].vpointers[0][0];
|
||||||
auto vp1= &mm[i].vpointers[1][0];
|
auto vp1= &mm[i].vpointers[1][0];
|
||||||
auto type= mm[i].type;
|
auto type= mm[i].type;
|
||||||
accelerator_forNB(o,mm[i].buffer_size/2,1,{
|
accelerator_forNB(o,mm[i].buffer_size/2,vobj::Nsimd(),{
|
||||||
decompress.Exchange(mp,vp0,vp1,type,o);
|
decompress.Exchange(mp,vp0,vp1,type,o);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -1039,7 +1036,12 @@ public:
|
|||||||
int so = sx*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
|
int so = sx*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
|
||||||
if ( !face_table_computed ) {
|
if ( !face_table_computed ) {
|
||||||
face_table.resize(face_idx+1);
|
face_table.resize(face_idx+1);
|
||||||
Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table[face_idx]);
|
std::vector<std::pair<int,int> > face_table_host ;
|
||||||
|
Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table_host);
|
||||||
|
face_table[face_idx].resize(face_table_host.size());
|
||||||
|
acceleratorCopyToDevice(&face_table_host[0],
|
||||||
|
&face_table[face_idx][0],
|
||||||
|
face_table[face_idx].size()*sizeof(face_table_host[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
// int rank = _grid->_processor;
|
// int rank = _grid->_processor;
|
||||||
@ -1062,20 +1064,20 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
send_buf = (cobj *)_grid->ShmBufferTranslate(xmit_to_rank,recv_buf);
|
send_buf = (cobj *)_grid->ShmBufferTranslate(xmit_to_rank,recv_buf);
|
||||||
if ( send_buf==NULL ) {
|
if ( (send_buf==NULL) || Stencil_force_mpi ) {
|
||||||
send_buf = this->u_send_buf_p;
|
send_buf = this->u_send_buf_p;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find out if we get the direct copy.
|
// Find out if we get the direct copy.
|
||||||
void *success = (void *) _grid->ShmBufferTranslate(recv_from_rank,this->u_send_buf_p);
|
void *success = (void *) _grid->ShmBufferTranslate(recv_from_rank,this->u_send_buf_p);
|
||||||
if (success==NULL) {
|
if ((success==NULL)||Stencil_force_mpi) {
|
||||||
// we found a packet that comes from MPI and contributes to this leg of stencil
|
// we found a packet that comes from MPI and contributes to this leg of stencil
|
||||||
shm_receive_only = 0;
|
shm_receive_only = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
gathertime-=usecond();
|
gathertime-=usecond();
|
||||||
assert(send_buf!=NULL);
|
assert(send_buf!=NULL);
|
||||||
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so); face_idx++;
|
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so); face_idx++;
|
||||||
gathertime+=usecond();
|
gathertime+=usecond();
|
||||||
|
|
||||||
if ( compress.DecompressionStep() ) {
|
if ( compress.DecompressionStep() ) {
|
||||||
@ -1172,11 +1174,18 @@ public:
|
|||||||
|
|
||||||
if ( !face_table_computed ) {
|
if ( !face_table_computed ) {
|
||||||
face_table.resize(face_idx+1);
|
face_table.resize(face_idx+1);
|
||||||
Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table[face_idx]);
|
std::vector<std::pair<int,int> > face_table_host ;
|
||||||
|
|
||||||
|
Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table_host);
|
||||||
|
face_table[face_idx].resize(face_table_host.size());
|
||||||
|
acceleratorCopyToDevice(&face_table_host[0],
|
||||||
|
&face_table[face_idx][0],
|
||||||
|
face_table[face_idx].size()*sizeof(face_table_host[0]));
|
||||||
}
|
}
|
||||||
gathermtime-=usecond();
|
gathermtime-=usecond();
|
||||||
|
|
||||||
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type); face_idx++;
|
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type);
|
||||||
|
face_idx++;
|
||||||
|
|
||||||
gathermtime+=usecond();
|
gathermtime+=usecond();
|
||||||
//spointers[0] -- low
|
//spointers[0] -- low
|
||||||
@ -1208,7 +1217,7 @@ public:
|
|||||||
// shm == receive pointer if offnode
|
// shm == receive pointer if offnode
|
||||||
// shm == Translate[send pointer] if on node -- my view of his send pointer
|
// shm == Translate[send pointer] if on node -- my view of his send pointer
|
||||||
cobj *shm = (cobj *) _grid->ShmBufferTranslate(recv_from_rank,sp);
|
cobj *shm = (cobj *) _grid->ShmBufferTranslate(recv_from_rank,sp);
|
||||||
if (shm==NULL) {
|
if ((shm==NULL)||Stencil_force_mpi) {
|
||||||
shm = rp;
|
shm = rp;
|
||||||
// we found a packet that comes from MPI and contributes to this shift.
|
// we found a packet that comes from MPI and contributes to this shift.
|
||||||
// is_same_node is only used in the WilsonStencil, and gets set for this point in the stencil.
|
// is_same_node is only used in the WilsonStencil, and gets set for this point in the stencil.
|
||||||
|
@ -171,7 +171,6 @@ void acceleratorInit(void)
|
|||||||
#ifdef GRID_SYCL
|
#ifdef GRID_SYCL
|
||||||
|
|
||||||
cl::sycl::queue *theGridAccelerator;
|
cl::sycl::queue *theGridAccelerator;
|
||||||
|
|
||||||
void acceleratorInit(void)
|
void acceleratorInit(void)
|
||||||
{
|
{
|
||||||
int nDevices = 1;
|
int nDevices = 1;
|
||||||
@ -179,6 +178,10 @@ void acceleratorInit(void)
|
|||||||
cl::sycl::device selectedDevice { selector };
|
cl::sycl::device selectedDevice { selector };
|
||||||
theGridAccelerator = new sycl::queue (selectedDevice);
|
theGridAccelerator = new sycl::queue (selectedDevice);
|
||||||
|
|
||||||
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
zeInit(0);
|
||||||
|
#endif
|
||||||
|
|
||||||
char * localRankStr = NULL;
|
char * localRankStr = NULL;
|
||||||
int rank = 0, world_rank=0;
|
int rank = 0, world_rank=0;
|
||||||
#define ENV_LOCAL_RANK_OMPI "OMPI_COMM_WORLD_LOCAL_RANK"
|
#define ENV_LOCAL_RANK_OMPI "OMPI_COMM_WORLD_LOCAL_RANK"
|
||||||
|
@ -39,6 +39,10 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifdef HAVE_MM_MALLOC_H
|
#ifdef HAVE_MM_MALLOC_H
|
||||||
#include <mm_malloc.h>
|
#include <mm_malloc.h>
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef __APPLE__
|
||||||
|
// no memalign
|
||||||
|
inline void *memalign(size_t align, size_t bytes) { return malloc(bytes); }
|
||||||
|
#endif
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
@ -233,6 +237,13 @@ inline int acceleratorIsCommunicable(void *ptr)
|
|||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
#include <CL/sycl.hpp>
|
#include <CL/sycl.hpp>
|
||||||
#include <CL/sycl/usm.hpp>
|
#include <CL/sycl/usm.hpp>
|
||||||
|
|
||||||
|
#define GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
|
||||||
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
#include <level_zero/ze_api.h>
|
||||||
|
#include <CL/sycl/backend/level_zero.hpp>
|
||||||
|
#endif
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
extern cl::sycl::queue *theGridAccelerator;
|
extern cl::sycl::queue *theGridAccelerator;
|
||||||
@ -412,6 +423,8 @@ inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(bas
|
|||||||
|
|
||||||
#undef GRID_SIMT
|
#undef GRID_SIMT
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define accelerator
|
#define accelerator
|
||||||
#define accelerator_inline strong_inline
|
#define accelerator_inline strong_inline
|
||||||
#define accelerator_for(iterator,num,nsimd, ... ) thread_for(iterator, num, { __VA_ARGS__ });
|
#define accelerator_for(iterator,num,nsimd, ... ) thread_for(iterator, num, { __VA_ARGS__ });
|
||||||
|
@ -56,6 +56,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
static int
|
static int
|
||||||
feenableexcept (unsigned int excepts)
|
feenableexcept (unsigned int excepts)
|
||||||
{
|
{
|
||||||
|
#if 0
|
||||||
|
// Fails on Apple M1
|
||||||
static fenv_t fenv;
|
static fenv_t fenv;
|
||||||
unsigned int new_excepts = excepts & FE_ALL_EXCEPT;
|
unsigned int new_excepts = excepts & FE_ALL_EXCEPT;
|
||||||
unsigned int old_excepts; // previous masks
|
unsigned int old_excepts; // previous masks
|
||||||
@ -70,6 +72,8 @@ feenableexcept (unsigned int excepts)
|
|||||||
|
|
||||||
iold_excepts = (int) old_excepts;
|
iold_excepts = (int) old_excepts;
|
||||||
return ( fesetenv (&fenv) ? -1 : iold_excepts );
|
return ( fesetenv (&fenv) ? -1 : iold_excepts );
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# additional include paths necessary to compile the C++ library
|
# additional include paths necessary to compile the C++ library
|
||||||
SUBDIRS = Grid HMC benchmarks tests
|
SUBDIRS = Grid HMC benchmarks tests examples
|
||||||
|
|
||||||
include $(top_srcdir)/doxygen.inc
|
include $(top_srcdir)/doxygen.inc
|
||||||
|
|
||||||
|
@ -236,34 +236,6 @@ int main (int argc, char ** argv)
|
|||||||
Dw.Report();
|
Dw.Report();
|
||||||
}
|
}
|
||||||
|
|
||||||
DomainWallFermionRL DwH(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
|
||||||
if (0) {
|
|
||||||
FGrid->Barrier();
|
|
||||||
DwH.ZeroCounters();
|
|
||||||
DwH.Dhop(src,result,0);
|
|
||||||
double t0=usecond();
|
|
||||||
for(int i=0;i<ncall;i++){
|
|
||||||
__SSC_START;
|
|
||||||
DwH.Dhop(src,result,0);
|
|
||||||
__SSC_STOP;
|
|
||||||
}
|
|
||||||
double t1=usecond();
|
|
||||||
FGrid->Barrier();
|
|
||||||
|
|
||||||
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
|
|
||||||
double flops=single_site_flops*volume*ncall;
|
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "Called half prec comms Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
|
||||||
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
|
||||||
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
|
||||||
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
|
|
||||||
err = ref-result;
|
|
||||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
|
||||||
|
|
||||||
assert (norm2(err)< 1.0e-3 );
|
|
||||||
DwH.Report();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (1)
|
if (1)
|
||||||
{ // Naive wilson dag implementation
|
{ // Naive wilson dag implementation
|
||||||
ref = Zero();
|
ref = Zero();
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
#include <sstream>
|
#include <sstream>
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace Grid;
|
using namespace Grid;
|
||||||
;
|
|
||||||
|
|
||||||
template<class d>
|
template<class d>
|
||||||
struct scal {
|
struct scal {
|
||||||
@ -118,30 +117,6 @@ int main (int argc, char ** argv)
|
|||||||
Dw.Report();
|
Dw.Report();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout << GridLogMessage<< "* SINGLE/HALF"<<std::endl;
|
|
||||||
GparityDomainWallFermionFH DwH(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
|
||||||
if (1) {
|
|
||||||
FGrid->Barrier();
|
|
||||||
DwH.ZeroCounters();
|
|
||||||
DwH.Dhop(src,result,0);
|
|
||||||
double t0=usecond();
|
|
||||||
for(int i=0;i<ncall;i++){
|
|
||||||
__SSC_START;
|
|
||||||
DwH.Dhop(src,result,0);
|
|
||||||
__SSC_STOP;
|
|
||||||
}
|
|
||||||
double t1=usecond();
|
|
||||||
FGrid->Barrier();
|
|
||||||
|
|
||||||
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
|
|
||||||
double flops=2*1320*volume*ncall;
|
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "Called half prec comms Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
|
||||||
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
|
||||||
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
|
||||||
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
|
|
||||||
DwH.Report();
|
|
||||||
}
|
|
||||||
|
|
||||||
GridCartesian * UGrid_d = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexD::Nsimd()),GridDefaultMpi());
|
GridCartesian * UGrid_d = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexD::Nsimd()),GridDefaultMpi());
|
||||||
GridRedBlackCartesian * UrbGrid_d = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_d);
|
GridRedBlackCartesian * UrbGrid_d = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_d);
|
||||||
|
@ -815,6 +815,7 @@ AC_CONFIG_FILES(tests/smearing/Makefile)
|
|||||||
AC_CONFIG_FILES(tests/qdpxx/Makefile)
|
AC_CONFIG_FILES(tests/qdpxx/Makefile)
|
||||||
AC_CONFIG_FILES(tests/testu01/Makefile)
|
AC_CONFIG_FILES(tests/testu01/Makefile)
|
||||||
AC_CONFIG_FILES(benchmarks/Makefile)
|
AC_CONFIG_FILES(benchmarks/Makefile)
|
||||||
|
AC_CONFIG_FILES(examples/Makefile)
|
||||||
AC_OUTPUT
|
AC_OUTPUT
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
|
396
examples/Example_Laplacian.cc
Normal file
396
examples/Example_Laplacian.cc
Normal file
@ -0,0 +1,396 @@
|
|||||||
|
#include <Grid/Grid.h>
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
/*
|
||||||
|
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Grid/algorithms/SparseMatrix.h: Interface defining what I expect of a general sparse matrix, such as a Fermion action
|
||||||
|
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
template<class Field> class SparseMatrixBase {
|
||||||
|
public:
|
||||||
|
virtual GridBase *Grid(void) =0;
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)=0;
|
||||||
|
virtual void Mdag (const Field &in, Field &out)=0;
|
||||||
|
virtual void MdagM(const Field &in, Field &out) {
|
||||||
|
Field tmp (in.Grid());
|
||||||
|
M(in,tmp);
|
||||||
|
Mdag(tmp,out);
|
||||||
|
}
|
||||||
|
virtual void Mdiag (const Field &in, Field &out)=0;
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp)=0;
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out)=0;
|
||||||
|
};
|
||||||
|
*/
|
||||||
|
|
||||||
|
const std::vector<int> directions ({Xdir,Ydir,Zdir,Xdir,Ydir,Zdir});
|
||||||
|
const std::vector<int> displacements({1,1,1,-1,-1,-1});
|
||||||
|
|
||||||
|
template<class Field> class FreeLaplacianCshift : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
GridBase *grid;
|
||||||
|
FreeLaplacianCshift(GridBase *_grid)
|
||||||
|
{
|
||||||
|
grid=_grid;
|
||||||
|
};
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)
|
||||||
|
{
|
||||||
|
out = Zero();
|
||||||
|
for(int mu=0;mu<Nd-1;mu++) {
|
||||||
|
out = out + Cshift(in,mu,1) + Cshift(in,mu,-1) - 2.0 * in;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
GaugeField U;
|
||||||
|
|
||||||
|
CovariantLaplacianCshift(GaugeField &_U) :
|
||||||
|
grid(_U.Grid()),
|
||||||
|
U(_U) { };
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)
|
||||||
|
{
|
||||||
|
out=Zero();
|
||||||
|
for(int mu=0;mu<Nd-1;mu++) {
|
||||||
|
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
|
||||||
|
out = out + Gimpl::CovShiftForward(Umu,mu,in);
|
||||||
|
out = out + Gimpl::CovShiftBackward(Umu,mu,in);
|
||||||
|
out = out - 2.0*in;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#define LEG_LOAD(Dir) \
|
||||||
|
SE = st.GetEntry(ptype, Dir, ss); \
|
||||||
|
if (SE->_is_local ) { \
|
||||||
|
int perm= SE->_permute; \
|
||||||
|
chi = coalescedReadPermute(in[SE->_offset],ptype,perm,lane); \
|
||||||
|
} else { \
|
||||||
|
chi = coalescedRead(buf[SE->_offset],lane); \
|
||||||
|
} \
|
||||||
|
acceleratorSynchronise();
|
||||||
|
|
||||||
|
template<class Field> class FreeLaplacianStencil : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
typedef typename Field::vector_object siteObject;
|
||||||
|
typedef CartesianStencil<siteObject, siteObject, int> StencilImpl;
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
StencilImpl Stencil;
|
||||||
|
SimpleCompressor<siteObject> Compressor;
|
||||||
|
|
||||||
|
FreeLaplacianStencil(GridBase *_grid)
|
||||||
|
: Stencil (_grid,6,Even,directions,displacements,0), grid(_grid)
|
||||||
|
{ };
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &_in, Field &_out)
|
||||||
|
{
|
||||||
|
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
// Halo exchange for this geometry of stencil
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
Stencil.HaloExchange(_in, Compressor);
|
||||||
|
|
||||||
|
///////////////////////////////////
|
||||||
|
// Arithmetic expressions
|
||||||
|
///////////////////////////////////
|
||||||
|
|
||||||
|
// Views; device friendly/accessible pointers
|
||||||
|
auto st = Stencil.View(AcceleratorRead);
|
||||||
|
auto buf = st.CommBuf();
|
||||||
|
autoView( in , _in , AcceleratorRead);
|
||||||
|
autoView( out , _out , AcceleratorWrite);
|
||||||
|
|
||||||
|
typedef typename Field::vector_object vobj;
|
||||||
|
typedef decltype(coalescedRead(in[0])) calcObj;
|
||||||
|
|
||||||
|
const int Nsimd = vobj::Nsimd();
|
||||||
|
const uint64_t NN = grid->oSites();
|
||||||
|
|
||||||
|
accelerator_for( ss, NN, Nsimd, {
|
||||||
|
|
||||||
|
StencilEntry *SE;
|
||||||
|
|
||||||
|
const int lane=acceleratorSIMTlane(Nsimd);
|
||||||
|
|
||||||
|
calcObj chi;
|
||||||
|
calcObj res;
|
||||||
|
int ptype;
|
||||||
|
|
||||||
|
res = coalescedRead(in[ss])*(-6.0);
|
||||||
|
LEG_LOAD(0); res = res + chi;
|
||||||
|
LEG_LOAD(1); res = res + chi;
|
||||||
|
LEG_LOAD(2); res = res + chi;
|
||||||
|
LEG_LOAD(3); res = res + chi;
|
||||||
|
LEG_LOAD(4); res = res + chi;
|
||||||
|
LEG_LOAD(5); res = res + chi;
|
||||||
|
|
||||||
|
coalescedWrite(out[ss], res,lane);
|
||||||
|
|
||||||
|
});
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class Gimpl,class Field> class CovariantLaplacianStencil : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
typedef typename Field::vector_object siteObject;
|
||||||
|
|
||||||
|
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Nc> >, Nds>;
|
||||||
|
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
|
||||||
|
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||||
|
|
||||||
|
typedef CartesianStencil<siteObject, siteObject, int> StencilImpl;
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
StencilImpl Stencil;
|
||||||
|
SimpleCompressor<siteObject> Compressor;
|
||||||
|
DoubledGaugeField Uds;
|
||||||
|
CovariantLaplacianStencil(GaugeField &Umu)
|
||||||
|
:
|
||||||
|
grid(Umu.Grid()),
|
||||||
|
Stencil (grid,6,Even,directions,displacements,0),
|
||||||
|
Uds(grid)
|
||||||
|
{
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
auto U = PeekIndex<LorentzIndex>(Umu, mu);
|
||||||
|
PokeIndex<LorentzIndex>(Uds, U, mu );
|
||||||
|
U = adj(Cshift(U, mu, -1));
|
||||||
|
PokeIndex<LorentzIndex>(Uds, U, mu + 4);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &_in, Field &_out)
|
||||||
|
{
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
// Halo exchange for this geometry of stencil
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
Stencil.HaloExchange(_in, Compressor);
|
||||||
|
|
||||||
|
///////////////////////////////////
|
||||||
|
// Arithmetic expressions
|
||||||
|
///////////////////////////////////
|
||||||
|
auto st = Stencil.View(AcceleratorRead);
|
||||||
|
auto buf = st.CommBuf();
|
||||||
|
|
||||||
|
autoView( in , _in , AcceleratorRead);
|
||||||
|
autoView( out , _out , AcceleratorWrite);
|
||||||
|
autoView( U , Uds , AcceleratorRead);
|
||||||
|
|
||||||
|
typedef typename Field::vector_object vobj;
|
||||||
|
typedef decltype(coalescedRead(in[0])) calcObj;
|
||||||
|
typedef decltype(coalescedRead(U[0](0))) calcLink;
|
||||||
|
|
||||||
|
const int Nsimd = vobj::Nsimd();
|
||||||
|
const uint64_t NN = grid->oSites();
|
||||||
|
|
||||||
|
accelerator_for( ss, NN, Nsimd, {
|
||||||
|
|
||||||
|
StencilEntry *SE;
|
||||||
|
|
||||||
|
const int lane=acceleratorSIMTlane(Nsimd);
|
||||||
|
|
||||||
|
calcObj chi;
|
||||||
|
calcObj res;
|
||||||
|
calcObj Uchi;
|
||||||
|
calcLink UU;
|
||||||
|
int ptype;
|
||||||
|
|
||||||
|
res = coalescedRead(in[ss])*(-6.0);
|
||||||
|
|
||||||
|
#define LEG_LOAD_MULT(leg,polarisation) \
|
||||||
|
UU = coalescedRead(U[ss](polarisation)); \
|
||||||
|
LEG_LOAD(leg); \
|
||||||
|
mult(&Uchi(), &UU, &chi()); \
|
||||||
|
res = res + Uchi;
|
||||||
|
|
||||||
|
LEG_LOAD_MULT(0,Xp);
|
||||||
|
LEG_LOAD_MULT(1,Yp);
|
||||||
|
LEG_LOAD_MULT(2,Zp);
|
||||||
|
LEG_LOAD_MULT(3,Xm);
|
||||||
|
LEG_LOAD_MULT(4,Ym);
|
||||||
|
LEG_LOAD_MULT(5,Zm);
|
||||||
|
|
||||||
|
coalescedWrite(out[ss], res,lane);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
#undef LEG_LOAD_MULT
|
||||||
|
#undef LEG_LOAD
|
||||||
|
|
||||||
|
int main(int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc, &argv);
|
||||||
|
|
||||||
|
typedef LatticeColourVector Field;
|
||||||
|
|
||||||
|
auto latt_size = GridDefaultLatt();
|
||||||
|
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
|
auto mpi_layout = GridDefaultMpi();
|
||||||
|
|
||||||
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
||||||
|
|
||||||
|
FreeLaplacianCshift<Field> FLcs(&Grid);
|
||||||
|
FreeLaplacianStencil<Field> FLst(&Grid);
|
||||||
|
|
||||||
|
LatticeGaugeField U(&Grid);
|
||||||
|
|
||||||
|
SU<Nc>::ColdConfiguration(RNG,U);
|
||||||
|
|
||||||
|
std::cout << " Gauge field has norm " <<norm2(U)<<std::endl;
|
||||||
|
|
||||||
|
CovariantLaplacianCshift <PeriodicGimplR,Field> CLcs(U);
|
||||||
|
CovariantLaplacianStencil<PeriodicGimplR,Field> CLst(U);
|
||||||
|
|
||||||
|
Field in(&Grid); gaussian(RNG,in);
|
||||||
|
Field out_FLcs(&Grid);
|
||||||
|
Field out_FLst(&Grid);
|
||||||
|
Field out_CLcs(&Grid);
|
||||||
|
Field out_CLst(&Grid);
|
||||||
|
Field diff(&Grid);
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
// First test: in free field these should all agree
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
FLcs.M(in,out_FLcs);
|
||||||
|
FLst.M(in,out_FLst);
|
||||||
|
CLcs.M(in,out_CLcs);
|
||||||
|
CLst.M(in,out_CLst);
|
||||||
|
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
std:: cout << " Test A: consistency of four different Laplacian implementations " <<std::endl;
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
std:: cout << " Input test vector " <<norm2(in)<<std::endl;
|
||||||
|
std:: cout << "--------------------------------------------------------" <<std::endl;
|
||||||
|
std:: cout << " Free cshift output vector " <<norm2(out_FLcs)<<std::endl;
|
||||||
|
std:: cout << " Free stencil output vector " <<norm2(out_FLst)<<std::endl;
|
||||||
|
std:: cout << " Cov cshift output vector " <<norm2(out_CLcs)<<std::endl;
|
||||||
|
std:: cout << " Cov stencil output vector " <<norm2(out_CLst)<<std::endl;
|
||||||
|
std:: cout << "--------------------------------------------------------" <<std::endl;
|
||||||
|
|
||||||
|
diff = out_FLcs - out_FLst;
|
||||||
|
std:: cout << " Difference between free Cshift Laplacian and free Stencil Laplacian = " <<norm2(diff)<<std::endl;
|
||||||
|
|
||||||
|
diff = out_FLcs - out_CLcs;
|
||||||
|
std:: cout << " Difference between free Cshift Laplacian and covariant Cshift Laplacian = " <<norm2(diff)<<std::endl;
|
||||||
|
|
||||||
|
diff = out_FLcs - out_CLst;
|
||||||
|
std:: cout << " Difference between free Cshift Laplacian and covariant Stencil Laplacian = " <<norm2(diff)<<std::endl;
|
||||||
|
std:: cout << "--------------------------------------------------------" <<std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
std:: cout << " Test B: gauge covariance " <<std::endl;
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
|
||||||
|
LatticeGaugeField U_GT(&Grid); // Gauge transformed field
|
||||||
|
LatticeColourMatrix g(&Grid); // local Gauge xform matrix
|
||||||
|
|
||||||
|
U_GT = U;
|
||||||
|
// Make a random xform to teh gauge field
|
||||||
|
SU<Nc>::RandomGaugeTransform(RNG,U_GT,g); // Unit gauge
|
||||||
|
|
||||||
|
Field in_GT(&Grid);
|
||||||
|
Field out_GT(&Grid);
|
||||||
|
|
||||||
|
Field out_CLcs_GT(&Grid);
|
||||||
|
Field out_CLst_GT(&Grid);
|
||||||
|
|
||||||
|
CovariantLaplacianCshift <PeriodicGimplR,Field> CLcs_GT(U_GT);
|
||||||
|
CovariantLaplacianStencil<PeriodicGimplR,Field> CLst_GT(U_GT);
|
||||||
|
|
||||||
|
in_GT = g*in;
|
||||||
|
out_GT = g*out_FLcs;
|
||||||
|
|
||||||
|
// Check M^GT_xy in_GT = g(x) M_xy g^dag(y) g(y) in = g(x) out(x)
|
||||||
|
CLcs_GT.M(in_GT,out_CLcs_GT);
|
||||||
|
CLst_GT.M(in_GT,out_CLst_GT);
|
||||||
|
|
||||||
|
diff = out_CLcs_GT - out_GT;
|
||||||
|
std:: cout << " Difference between Gauge xformed result and covariant Cshift Laplacian in xformed gauge = " <<norm2(diff)<<std::endl;
|
||||||
|
|
||||||
|
diff = out_CLst_GT - out_GT;
|
||||||
|
std:: cout << " Difference between Gauge xformed result and covariant Stencil Laplacian in xformed gauge = " <<norm2(diff)<<std::endl;
|
||||||
|
std:: cout << "--------------------------------------------------------" <<std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
std:: cout << " Test C: compare in free Field to \"Feynman rule\" " <<std::endl;
|
||||||
|
std:: cout << "******************************************************************" <<std::endl;
|
||||||
|
|
||||||
|
std::vector<int> dim_mask({1,1,1,0}); // 3d FFT
|
||||||
|
FFT theFFT(&Grid);
|
||||||
|
Field out(&Grid);
|
||||||
|
Field F_out(&Grid);
|
||||||
|
Field F_in(&Grid);
|
||||||
|
|
||||||
|
// FFT the random input vector
|
||||||
|
theFFT.FFT_dim_mask(F_in,in,dim_mask,FFT::forward);
|
||||||
|
|
||||||
|
// Convolution theorem: multiply by Fourier representation of (discrete) Laplacian to apply diff op
|
||||||
|
LatticeComplexD lap(&Grid); lap = Zero();
|
||||||
|
LatticeComplexD kmu(&Grid);
|
||||||
|
ComplexD ci(0.0,1.0);
|
||||||
|
for(int mu=0;mu<3;mu++) {
|
||||||
|
|
||||||
|
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||||
|
|
||||||
|
LatticeCoordinate(kmu,mu);
|
||||||
|
kmu = TwoPiL * kmu;
|
||||||
|
|
||||||
|
// (e^ik_mu + e^-ik_mu - 2) = 2( cos kmu - 1) ~ 2 (1 - k_mu^2/2 -1 ) = - k_mu^2 + O(k^4)
|
||||||
|
lap = lap + 2.0*cos(kmu) - 2.0;
|
||||||
|
|
||||||
|
}
|
||||||
|
F_out = lap * F_in;
|
||||||
|
|
||||||
|
// Inverse FFT the result
|
||||||
|
theFFT.FFT_dim_mask(out,F_out,dim_mask,FFT::backward);
|
||||||
|
|
||||||
|
std::cout<<"Fourier xformed (in) "<<norm2(F_in)<<std::endl;
|
||||||
|
std::cout<<"Fourier xformed Laplacian x (in) "<<norm2(F_out)<<std::endl;
|
||||||
|
|
||||||
|
std::cout<<"Momentum space Laplacian application "<< norm2(out)<<std::endl;
|
||||||
|
std::cout<<"Stencil Laplacian application "<< norm2(out_CLcs)<<std::endl;
|
||||||
|
|
||||||
|
diff = out_CLcs - out;
|
||||||
|
std::cout<<"diff "<< norm2(diff)<<std::endl;
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
127
examples/Example_Laplacian_smearing.cc
Normal file
127
examples/Example_Laplacian_smearing.cc
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
#include <Grid/Grid.h>
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
// Function used for Chebyshev smearing
|
||||||
|
//
|
||||||
|
Real MomentumSmearing(Real p2)
|
||||||
|
{
|
||||||
|
return (1 - 4.0*p2) * exp(-p2/4);
|
||||||
|
}
|
||||||
|
Real DistillationSmearing(Real p2)
|
||||||
|
{
|
||||||
|
if ( p2 > 0.5 ) return 0.0;
|
||||||
|
else return 1.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flip sign to make prop to p^2, not -p^2 relative to last example
|
||||||
|
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
GaugeField U;
|
||||||
|
|
||||||
|
CovariantLaplacianCshift(GaugeField &_U) :
|
||||||
|
grid(_U.Grid()),
|
||||||
|
U(_U) { };
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)
|
||||||
|
{
|
||||||
|
out=Zero();
|
||||||
|
for(int mu=0;mu<Nd-1;mu++) {
|
||||||
|
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
|
||||||
|
out = out - Gimpl::CovShiftForward(Umu,mu,in);
|
||||||
|
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
|
||||||
|
out = out + 2.0*in;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc, &argv);
|
||||||
|
|
||||||
|
typedef LatticeColourVector Field;
|
||||||
|
|
||||||
|
auto latt_size = GridDefaultLatt();
|
||||||
|
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
|
auto mpi_layout = GridDefaultMpi();
|
||||||
|
|
||||||
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
||||||
|
|
||||||
|
|
||||||
|
LatticeGaugeField U(&Grid);
|
||||||
|
|
||||||
|
SU<Nc>::ColdConfiguration(RNG,U);
|
||||||
|
|
||||||
|
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
|
||||||
|
Laplacian_t Laplacian(U);
|
||||||
|
|
||||||
|
|
||||||
|
ColourVector ColourKronecker;
|
||||||
|
ColourKronecker = Zero();
|
||||||
|
ColourKronecker()()(0) = 1.0;
|
||||||
|
|
||||||
|
Coordinate site({latt_size[0]/2,
|
||||||
|
latt_size[1]/2,
|
||||||
|
latt_size[2]/2,
|
||||||
|
0});
|
||||||
|
|
||||||
|
Field kronecker(&Grid);
|
||||||
|
kronecker = Zero();
|
||||||
|
pokeSite(ColourKronecker,kronecker,site);
|
||||||
|
|
||||||
|
|
||||||
|
Field psi(&Grid), chi(&Grid);
|
||||||
|
|
||||||
|
//////////////////////////////////////
|
||||||
|
// Classic Wuppertal smearing
|
||||||
|
//////////////////////////////////////
|
||||||
|
|
||||||
|
Integer Iterations = 80;
|
||||||
|
Real width = 2.0;
|
||||||
|
Real coeff = (width*width) / Real(4*Iterations);
|
||||||
|
|
||||||
|
chi=kronecker;
|
||||||
|
// chi = (1-p^2/2N)^N kronecker
|
||||||
|
for(int n = 0; n < Iterations; ++n) {
|
||||||
|
Laplacian.M(chi,psi);
|
||||||
|
chi = chi - coeff*psi;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << " Wuppertal smeared operator is chi = \n" << chi <<std::endl;
|
||||||
|
|
||||||
|
/////////////////////////////////////
|
||||||
|
// Chebyshev smearing
|
||||||
|
/////////////////////////////////////
|
||||||
|
RealD lo = 0.0;
|
||||||
|
RealD hi = 12.0; // Analytic free field bound
|
||||||
|
HermitianLinearOperator<Laplacian_t,Field> HermOp(Laplacian);
|
||||||
|
|
||||||
|
std::cout << " Checking spectral range of our POSITIVE definite operator \n";
|
||||||
|
PowerMethod<Field> PM;
|
||||||
|
PM(HermOp,kronecker);
|
||||||
|
|
||||||
|
// Chebyshev<Field> ChebySmear(lo,hi,20,DistillationSmearing);
|
||||||
|
Chebyshev<Field> ChebySmear(lo,hi,20,MomentumSmearing);
|
||||||
|
{
|
||||||
|
std::ofstream of("chebysmear");
|
||||||
|
ChebySmear.csv(of);
|
||||||
|
}
|
||||||
|
|
||||||
|
ChebySmear(HermOp,kronecker,chi);
|
||||||
|
|
||||||
|
std::cout << " Chebyshev smeared operator is chi = \n" << chi <<std::endl;
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
129
examples/Example_Laplacian_solver.cc
Normal file
129
examples/Example_Laplacian_solver.cc
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
#include <Grid/Grid.h>
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
template<class Field>
|
||||||
|
void SimpleConjugateGradient(LinearOperatorBase<Field> &HPDop,const Field &b, Field &x)
|
||||||
|
{
|
||||||
|
RealD cp, c, alpha, d, beta, ssq, qq;
|
||||||
|
RealD Tolerance=1.0e-10;
|
||||||
|
int MaxIterations=10000;
|
||||||
|
|
||||||
|
Field p(b), mmp(b), r(b);
|
||||||
|
|
||||||
|
HPDop.HermOpAndNorm(x, mmp, d, beta);
|
||||||
|
|
||||||
|
r = b - mmp;
|
||||||
|
p = r;
|
||||||
|
|
||||||
|
cp = alpha = norm2(p);
|
||||||
|
ssq = norm2(b);
|
||||||
|
|
||||||
|
RealD rsq = Tolerance * Tolerance * ssq;
|
||||||
|
|
||||||
|
for (int k = 1; k <= MaxIterations; k++) {
|
||||||
|
c = cp;
|
||||||
|
|
||||||
|
HPDop.HermOp(p, mmp);
|
||||||
|
|
||||||
|
d = real(innerProduct(p,mmp));
|
||||||
|
|
||||||
|
alpha = c / d;
|
||||||
|
|
||||||
|
r = r - alpha *mmp;
|
||||||
|
cp = norm2(r);
|
||||||
|
beta = cp / c;
|
||||||
|
|
||||||
|
x = x + alpha* p ;
|
||||||
|
p = r + beta* p ;
|
||||||
|
|
||||||
|
std::cout << "iteration "<<k<<" cp " <<std::sqrt(cp/ssq) << std::endl;
|
||||||
|
if (cp <= rsq) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Flip sign to make prop to p^2, not -p^2 relative to last example
|
||||||
|
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
GaugeField U;
|
||||||
|
RealD m2=1.0e-2;
|
||||||
|
CovariantLaplacianCshift(GaugeField &_U) :
|
||||||
|
grid(_U.Grid()),
|
||||||
|
U(_U) { };
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)
|
||||||
|
{
|
||||||
|
out=Zero();
|
||||||
|
for(int mu=0;mu<Nd-1;mu++) {
|
||||||
|
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
|
||||||
|
out = out - Gimpl::CovShiftForward(Umu,mu,in);
|
||||||
|
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
|
||||||
|
out = out + 2.0*in + m2*in;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc, &argv);
|
||||||
|
|
||||||
|
typedef LatticeColourVector Field;
|
||||||
|
|
||||||
|
auto latt_size = GridDefaultLatt();
|
||||||
|
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
|
auto mpi_layout = GridDefaultMpi();
|
||||||
|
|
||||||
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
||||||
|
|
||||||
|
|
||||||
|
LatticeGaugeField U(&Grid);
|
||||||
|
|
||||||
|
SU<Nc>::ColdConfiguration(RNG,U);
|
||||||
|
|
||||||
|
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
|
||||||
|
Laplacian_t Laplacian(U);
|
||||||
|
|
||||||
|
|
||||||
|
ColourVector ColourKronecker;
|
||||||
|
ColourKronecker = Zero();
|
||||||
|
ColourKronecker()()(0) = 1.0;
|
||||||
|
|
||||||
|
Coordinate site({0,0,0,0}); // Point source at origin
|
||||||
|
|
||||||
|
Field kronecker(&Grid);
|
||||||
|
kronecker = Zero();
|
||||||
|
pokeSite(ColourKronecker,kronecker,site);
|
||||||
|
|
||||||
|
Field psi(&Grid); psi=Zero();
|
||||||
|
|
||||||
|
HermitianLinearOperator<Laplacian_t,Field> HermOp(Laplacian);
|
||||||
|
SimpleConjugateGradient(HermOp, kronecker,psi);
|
||||||
|
|
||||||
|
Field r(&Grid);
|
||||||
|
Laplacian.M(psi,r);
|
||||||
|
r=kronecker-r;
|
||||||
|
|
||||||
|
std::cout << "True residual "<< norm2(r) <<std::endl;
|
||||||
|
|
||||||
|
// Optionally print the result vector
|
||||||
|
// std::cout << psi<<std::endl;
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
314
examples/Example_Mobius_spectrum.cc
Normal file
314
examples/Example_Mobius_spectrum.cc
Normal file
@ -0,0 +1,314 @@
|
|||||||
|
/*
|
||||||
|
* Warning: This code illustrative only: not well tested, and not meant for production use
|
||||||
|
* without regression / tests being applied
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
GridBase *grid;
|
||||||
|
GaugeField U;
|
||||||
|
|
||||||
|
CovariantLaplacianCshift(GaugeField &_U) :
|
||||||
|
grid(_U.Grid()),
|
||||||
|
U(_U) { };
|
||||||
|
|
||||||
|
virtual GridBase *Grid(void) { return grid; };
|
||||||
|
|
||||||
|
virtual void M (const Field &in, Field &out)
|
||||||
|
{
|
||||||
|
out=Zero();
|
||||||
|
for(int mu=0;mu<Nd-1;mu++) {
|
||||||
|
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
|
||||||
|
out = out - Gimpl::CovShiftForward(Umu,mu,in);
|
||||||
|
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
|
||||||
|
out = out + 2.0*in;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||||
|
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||||
|
};
|
||||||
|
|
||||||
|
void MakePhase(Coordinate mom,LatticeComplex &phase)
|
||||||
|
{
|
||||||
|
GridBase *grid = phase.Grid();
|
||||||
|
auto latt_size = grid->GlobalDimensions();
|
||||||
|
ComplexD ci(0.0,1.0);
|
||||||
|
phase=Zero();
|
||||||
|
|
||||||
|
LatticeComplex coor(phase.Grid());
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||||
|
LatticeCoordinate(coor,mu);
|
||||||
|
phase = phase + (TwoPiL * mom[mu]) * coor;
|
||||||
|
}
|
||||||
|
phase = exp(phase*ci);
|
||||||
|
}
|
||||||
|
void PointSource(Coordinate &coor,LatticePropagator &source)
|
||||||
|
{
|
||||||
|
// Coordinate coor({0,0,0,0});
|
||||||
|
source=Zero();
|
||||||
|
SpinColourMatrix kronecker; kronecker=1.0;
|
||||||
|
pokeSite(kronecker,source,coor);
|
||||||
|
}
|
||||||
|
void Z2WallSource(GridParallelRNG &RNG,int tslice,LatticePropagator &source)
|
||||||
|
{
|
||||||
|
GridBase *grid = source.Grid();
|
||||||
|
LatticeComplex noise(grid);
|
||||||
|
LatticeComplex zz(grid); zz=Zero();
|
||||||
|
LatticeInteger t(grid);
|
||||||
|
|
||||||
|
RealD nrm=1.0/sqrt(2);
|
||||||
|
bernoulli(RNG, noise); // 0,1 50:50
|
||||||
|
|
||||||
|
noise = (2.*noise - Complex(1,1))*nrm;
|
||||||
|
|
||||||
|
LatticeCoordinate(t,Tdir);
|
||||||
|
noise = where(t==Integer(tslice), noise, zz);
|
||||||
|
|
||||||
|
source = 1.0;
|
||||||
|
source = source*noise;
|
||||||
|
std::cout << " Z2 wall " << norm2(source) << std::endl;
|
||||||
|
}
|
||||||
|
template<class Field>
|
||||||
|
void GaussianSmear(LatticeGaugeField &U,Field &unsmeared,Field &smeared)
|
||||||
|
{
|
||||||
|
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
|
||||||
|
Laplacian_t Laplacian(U);
|
||||||
|
|
||||||
|
Integer Iterations = 40;
|
||||||
|
Real width = 2.0;
|
||||||
|
Real coeff = (width*width) / Real(4*Iterations);
|
||||||
|
|
||||||
|
Field tmp(U.Grid());
|
||||||
|
smeared=unsmeared;
|
||||||
|
// chi = (1-p^2/2N)^N kronecker
|
||||||
|
for(int n = 0; n < Iterations; ++n) {
|
||||||
|
Laplacian.M(smeared,tmp);
|
||||||
|
smeared = smeared - coeff*tmp;
|
||||||
|
std::cout << " smear iter " << n<<" " <<norm2(smeared)<<std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void GaussianSource(Coordinate &site,LatticeGaugeField &U,LatticePropagator &source)
|
||||||
|
{
|
||||||
|
LatticePropagator tmp(source.Grid());
|
||||||
|
PointSource(site,source);
|
||||||
|
std::cout << " GaussianSource Kronecker "<< norm2(source)<<std::endl;
|
||||||
|
tmp = source;
|
||||||
|
GaussianSmear(U,tmp,source);
|
||||||
|
std::cout << " GaussianSource Smeared "<< norm2(source)<<std::endl;
|
||||||
|
}
|
||||||
|
void GaussianWallSource(GridParallelRNG &RNG,int tslice,LatticeGaugeField &U,LatticePropagator &source)
|
||||||
|
{
|
||||||
|
Z2WallSource(RNG,tslice,source);
|
||||||
|
auto tmp = source;
|
||||||
|
GaussianSmear(U,tmp,source);
|
||||||
|
}
|
||||||
|
void SequentialSource(int tslice,Coordinate &mom,LatticePropagator &spectator,LatticePropagator &source)
|
||||||
|
{
|
||||||
|
assert(mom.size()==Nd);
|
||||||
|
assert(mom[Tdir] == 0);
|
||||||
|
|
||||||
|
GridBase * grid = spectator.Grid();
|
||||||
|
|
||||||
|
|
||||||
|
LatticeInteger ts(grid);
|
||||||
|
LatticeCoordinate(ts,Tdir);
|
||||||
|
source = Zero();
|
||||||
|
source = where(ts==Integer(tslice),spectator,source); // Stick in a slice of the spectator, zero everywhere else
|
||||||
|
|
||||||
|
LatticeComplex phase(grid);
|
||||||
|
MakePhase(mom,phase);
|
||||||
|
|
||||||
|
source = source *phase;
|
||||||
|
}
|
||||||
|
template<class Action>
|
||||||
|
void Solve(Action &D,LatticePropagator &source,LatticePropagator &propagator)
|
||||||
|
{
|
||||||
|
GridBase *UGrid = D.GaugeGrid();
|
||||||
|
GridBase *FGrid = D.FermionGrid();
|
||||||
|
|
||||||
|
LatticeFermion src4 (UGrid);
|
||||||
|
LatticeFermion src5 (FGrid);
|
||||||
|
LatticeFermion result5(FGrid);
|
||||||
|
LatticeFermion result4(UGrid);
|
||||||
|
|
||||||
|
ConjugateGradient<LatticeFermion> CG(1.0e-8,100000);
|
||||||
|
SchurRedBlackDiagMooeeSolve<LatticeFermion> schur(CG);
|
||||||
|
ZeroGuesser<LatticeFermion> ZG; // Could be a DeflatedGuesser if have eigenvectors
|
||||||
|
for(int s=0;s<Nd;s++){
|
||||||
|
for(int c=0;c<Nc;c++){
|
||||||
|
PropToFerm<Action>(src4,source,s,c);
|
||||||
|
|
||||||
|
D.ImportPhysicalFermionSource(src4,src5);
|
||||||
|
|
||||||
|
result5=Zero();
|
||||||
|
schur(D,src5,result5,ZG);
|
||||||
|
std::cout<<GridLogMessage
|
||||||
|
<<"spin "<<s<<" color "<<c
|
||||||
|
<<" norm2(src5d) " <<norm2(src5)
|
||||||
|
<<" norm2(result5d) "<<norm2(result5)<<std::endl;
|
||||||
|
|
||||||
|
D.ExportPhysicalFermionSolution(result5,result4);
|
||||||
|
|
||||||
|
FermToProp<Action>(propagator,result4,s,c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class MesonFile: Serializable {
|
||||||
|
public:
|
||||||
|
GRID_SERIALIZABLE_CLASS_MEMBERS(MesonFile, std::vector<std::vector<Complex> >, data);
|
||||||
|
};
|
||||||
|
|
||||||
|
void MesonTrace(std::string file,LatticePropagator &q1,LatticePropagator &q2,LatticeComplex &phase)
|
||||||
|
{
|
||||||
|
const int nchannel=4;
|
||||||
|
Gamma::Algebra Gammas[nchannel][2] = {
|
||||||
|
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::Gamma5},
|
||||||
|
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::GammaTGamma5},
|
||||||
|
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::Gamma5},
|
||||||
|
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::GammaTGamma5}
|
||||||
|
};
|
||||||
|
|
||||||
|
Gamma G5(Gamma::Algebra::Gamma5);
|
||||||
|
|
||||||
|
LatticeComplex meson_CF(q1.Grid());
|
||||||
|
MesonFile MF;
|
||||||
|
|
||||||
|
for(int ch=0;ch<nchannel;ch++){
|
||||||
|
|
||||||
|
Gamma Gsrc(Gammas[ch][0]);
|
||||||
|
Gamma Gsnk(Gammas[ch][1]);
|
||||||
|
|
||||||
|
meson_CF = trace(G5*adj(q1)*G5*Gsnk*q2*adj(Gsrc));
|
||||||
|
|
||||||
|
std::vector<TComplex> meson_T;
|
||||||
|
sliceSum(meson_CF,meson_T, Tdir);
|
||||||
|
|
||||||
|
int nt=meson_T.size();
|
||||||
|
|
||||||
|
std::vector<Complex> corr(nt);
|
||||||
|
for(int t=0;t<nt;t++){
|
||||||
|
corr[t] = TensorRemove(meson_T[t]); // Yes this is ugly, not figured a work around
|
||||||
|
std::cout << " channel "<<ch<<" t "<<t<<" " <<corr[t]<<std::endl;
|
||||||
|
}
|
||||||
|
MF.data.push_back(corr);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
XmlWriter WR(file);
|
||||||
|
write(WR,"MesonFile",MF);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
const int Ls=8;
|
||||||
|
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
// Double precision grids
|
||||||
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
|
||||||
|
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
||||||
|
GridDefaultMpi());
|
||||||
|
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||||
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||||
|
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
// You can manage seeds however you like.
|
||||||
|
// Recommend SeedUniqueString.
|
||||||
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
std::vector<int> seeds4({1,2,3,4});
|
||||||
|
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
||||||
|
|
||||||
|
LatticeGaugeField Umu(UGrid);
|
||||||
|
std::string config;
|
||||||
|
if( argc > 1 && argv[1][0] != '-' )
|
||||||
|
{
|
||||||
|
std::cout<<GridLogMessage <<"Loading configuration from "<<argv[1]<<std::endl;
|
||||||
|
FieldMetaData header;
|
||||||
|
NerscIO::readConfiguration(Umu, header, argv[1]);
|
||||||
|
config=argv[1];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
std::cout<<GridLogMessage <<"Using hot configuration"<<std::endl;
|
||||||
|
SU<Nc>::ColdConfiguration(Umu);
|
||||||
|
// SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||||
|
config="HotConfig";
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<RealD> masses({ 0.03,0.04,0.45} ); // u/d, s, c ??
|
||||||
|
|
||||||
|
int nmass = masses.size();
|
||||||
|
|
||||||
|
std::vector<MobiusFermionR *> FermActs;
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage <<"======================"<<std::endl;
|
||||||
|
std::cout<<GridLogMessage <<"MobiusFermion action as Scaled Shamir kernel"<<std::endl;
|
||||||
|
std::cout<<GridLogMessage <<"======================"<<std::endl;
|
||||||
|
|
||||||
|
for(auto mass: masses) {
|
||||||
|
|
||||||
|
RealD M5=1.0;
|
||||||
|
RealD b=1.5;// Scale factor b+c=2, b-c=1
|
||||||
|
RealD c=0.5;
|
||||||
|
|
||||||
|
FermActs.push_back(new MobiusFermionR(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
LatticePropagator point_source(UGrid);
|
||||||
|
LatticePropagator wall_source(UGrid);
|
||||||
|
LatticePropagator gaussian_source(UGrid);
|
||||||
|
|
||||||
|
Coordinate Origin({0,0,0,0});
|
||||||
|
PointSource (Origin,point_source);
|
||||||
|
Z2WallSource (RNG4,0,wall_source);
|
||||||
|
GaussianSource(Origin,Umu,gaussian_source);
|
||||||
|
|
||||||
|
std::vector<LatticePropagator> PointProps(nmass,UGrid);
|
||||||
|
std::vector<LatticePropagator> GaussProps(nmass,UGrid);
|
||||||
|
std::vector<LatticePropagator> Z2Props (nmass,UGrid);
|
||||||
|
|
||||||
|
for(int m=0;m<nmass;m++) {
|
||||||
|
|
||||||
|
Solve(*FermActs[m],point_source ,PointProps[m]);
|
||||||
|
Solve(*FermActs[m],gaussian_source,GaussProps[m]);
|
||||||
|
Solve(*FermActs[m],wall_source ,Z2Props[m]);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
LatticeComplex phase(UGrid);
|
||||||
|
Coordinate mom({0,0,0,0});
|
||||||
|
MakePhase(mom,phase);
|
||||||
|
|
||||||
|
for(int m1=0 ;m1<nmass;m1++) {
|
||||||
|
for(int m2=m1;m2<nmass;m2++) {
|
||||||
|
std::stringstream ssp,ssg,ssz;
|
||||||
|
|
||||||
|
ssp<<config<< "_m" << m1 << "_m"<< m2 << "_point_meson.xml";
|
||||||
|
ssg<<config<< "_m" << m1 << "_m"<< m2 << "_smeared_meson.xml";
|
||||||
|
ssz<<config<< "_m" << m1 << "_m"<< m2 << "_wall_meson.xml";
|
||||||
|
|
||||||
|
MesonTrace(ssp.str(),PointProps[m1],PointProps[m2],phase);
|
||||||
|
MesonTrace(ssg.str(),GaussProps[m1],GaussProps[m2],phase);
|
||||||
|
MesonTrace(ssz.str(),Z2Props[m1],Z2Props[m2],phase);
|
||||||
|
}}
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
6
examples/Makefile.am
Normal file
6
examples/Makefile.am
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
SUBDIRS = .
|
||||||
|
|
||||||
|
include Make.inc
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -82,3 +82,18 @@ for f in $TESTS; do
|
|||||||
echo >> Make.inc
|
echo >> Make.inc
|
||||||
done
|
done
|
||||||
cd ..
|
cd ..
|
||||||
|
|
||||||
|
# examples Make.inc
|
||||||
|
cd $home/examples/
|
||||||
|
echo> Make.inc
|
||||||
|
TESTS=`ls *.cc`
|
||||||
|
TESTLIST=`echo ${TESTS} | sed s/.cc//g `
|
||||||
|
echo bin_PROGRAMS = ${TESTLIST} > Make.inc
|
||||||
|
echo >> Make.inc
|
||||||
|
for f in $TESTS; do
|
||||||
|
BNAME=`basename $f .cc`
|
||||||
|
echo ${BNAME}_SOURCES=$f >> Make.inc
|
||||||
|
echo ${BNAME}_LDADD='$(top_builddir)/Grid/libGrid.a'>> Make.inc
|
||||||
|
echo >> Make.inc
|
||||||
|
done
|
||||||
|
cd ..
|
||||||
|
@ -29,19 +29,12 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace Grid;
|
using namespace Grid;
|
||||||
;
|
|
||||||
|
|
||||||
template<class d>
|
#if 1
|
||||||
struct scal {
|
int main (int argc, char ** argv) {}
|
||||||
d internal;
|
|
||||||
};
|
#else
|
||||||
|
|
||||||
Gamma::Algebra Gmu [] = {
|
|
||||||
Gamma::Algebra::GammaX,
|
|
||||||
Gamma::Algebra::GammaY,
|
|
||||||
Gamma::Algebra::GammaZ,
|
|
||||||
Gamma::Algebra::GammaT
|
|
||||||
};
|
|
||||||
|
|
||||||
int main (int argc, char ** argv)
|
int main (int argc, char ** argv)
|
||||||
{
|
{
|
||||||
@ -124,3 +117,4 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
Grid_finalize();
|
Grid_finalize();
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
Loading…
x
Reference in New Issue
Block a user