mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-23 10:12:02 +01:00
Compare commits
20 Commits
feature/ca
...
aab3bcb46f
Author | SHA1 | Date | |
---|---|---|---|
aab3bcb46f | |||
da06d15f73 | |||
e8b1251b8c | |||
fad5a74a4b | |||
e83f6a6ae9 | |||
0c1618197f | |||
f49d5c2d22 | |||
a3b022d469 | |||
48772f0976 | |||
c322420580 | |||
6283d11d50 | |||
86f4e17928 | |||
6616d5d090 | |||
215df671be | |||
1b6b12589f | |||
3082ab8252 | |||
add86cd7f4 | |||
0b6fd20c54 | |||
e83423fee6 | |||
b4f8e87982 |
@ -113,11 +113,6 @@ private:
|
|||||||
static uint64_t DeviceToHostBytes;
|
static uint64_t DeviceToHostBytes;
|
||||||
static uint64_t HostToDeviceXfer;
|
static uint64_t HostToDeviceXfer;
|
||||||
static uint64_t DeviceToHostXfer;
|
static uint64_t DeviceToHostXfer;
|
||||||
|
|
||||||
static uint64_t DeviceAccesses;
|
|
||||||
static uint64_t HostAccesses;
|
|
||||||
static uint64_t DeviceAccessBytes;
|
|
||||||
static uint64_t HostAccessBytes;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
#ifndef GRID_UVM
|
#ifndef GRID_UVM
|
||||||
@ -157,7 +152,6 @@ private:
|
|||||||
|
|
||||||
// static void LRUupdate(AcceleratorViewEntry &AccCache);
|
// static void LRUupdate(AcceleratorViewEntry &AccCache);
|
||||||
static void LRUinsert(AcceleratorViewEntry &AccCache);
|
static void LRUinsert(AcceleratorViewEntry &AccCache);
|
||||||
static void LRUinsertback(AcceleratorViewEntry &AccCache);
|
|
||||||
static void LRUremove(AcceleratorViewEntry &AccCache);
|
static void LRUremove(AcceleratorViewEntry &AccCache);
|
||||||
|
|
||||||
// manage entries in the table
|
// manage entries in the table
|
||||||
|
@ -23,11 +23,6 @@ uint64_t MemoryManager::HostToDeviceBytes;
|
|||||||
uint64_t MemoryManager::DeviceToHostBytes;
|
uint64_t MemoryManager::DeviceToHostBytes;
|
||||||
uint64_t MemoryManager::HostToDeviceXfer;
|
uint64_t MemoryManager::HostToDeviceXfer;
|
||||||
uint64_t MemoryManager::DeviceToHostXfer;
|
uint64_t MemoryManager::DeviceToHostXfer;
|
||||||
uint64_t MemoryManager::DeviceAccesses;
|
|
||||||
uint64_t MemoryManager::HostAccesses;
|
|
||||||
uint64_t MemoryManager::DeviceAccessBytes;
|
|
||||||
uint64_t MemoryManager::HostAccessBytes;
|
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////
|
////////////////////////////////////
|
||||||
// Priority ordering for unlocked entries
|
// Priority ordering for unlocked entries
|
||||||
@ -91,14 +86,6 @@ void MemoryManager::LRUinsert(AcceleratorViewEntry &AccCache)
|
|||||||
AccCache.LRU_valid = 1;
|
AccCache.LRU_valid = 1;
|
||||||
DeviceLRUBytes+=AccCache.bytes;
|
DeviceLRUBytes+=AccCache.bytes;
|
||||||
}
|
}
|
||||||
void MemoryManager::LRUinsertback(AcceleratorViewEntry &AccCache)
|
|
||||||
{
|
|
||||||
assert(AccCache.LRU_valid==0);
|
|
||||||
LRU.push_back(AccCache.CpuPtr);
|
|
||||||
AccCache.LRU_entry = --LRU.end();
|
|
||||||
AccCache.LRU_valid = 1;
|
|
||||||
DeviceLRUBytes+=AccCache.bytes;
|
|
||||||
}
|
|
||||||
void MemoryManager::LRUremove(AcceleratorViewEntry &AccCache)
|
void MemoryManager::LRUremove(AcceleratorViewEntry &AccCache)
|
||||||
{
|
{
|
||||||
assert(AccCache.LRU_valid==1);
|
assert(AccCache.LRU_valid==1);
|
||||||
@ -142,7 +129,6 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
|
|||||||
dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
||||||
assert(AccCache.accLock==0);
|
assert(AccCache.accLock==0);
|
||||||
assert(AccCache.cpuLock==0);
|
assert(AccCache.cpuLock==0);
|
||||||
|
|
||||||
if(AccCache.state==AccDirty) {
|
if(AccCache.state==AccDirty) {
|
||||||
Flush(AccCache);
|
Flush(AccCache);
|
||||||
}
|
}
|
||||||
@ -245,9 +231,6 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
EntryCreate(CpuPtr,bytes,mode,hint);
|
EntryCreate(CpuPtr,bytes,mode,hint);
|
||||||
}
|
}
|
||||||
|
|
||||||
DeviceAccesses++;
|
|
||||||
DeviceAccessBytes+=bytes;
|
|
||||||
|
|
||||||
auto AccCacheIterator = EntryLookup(CpuPtr);
|
auto AccCacheIterator = EntryLookup(CpuPtr);
|
||||||
auto & AccCache = AccCacheIterator->second;
|
auto & AccCache = AccCacheIterator->second;
|
||||||
if (!AccCache.AccPtr) {
|
if (!AccCache.AccPtr) {
|
||||||
@ -366,10 +349,6 @@ void MemoryManager::CpuViewClose(uint64_t CpuPtr)
|
|||||||
assert(AccCache.accLock==0);
|
assert(AccCache.accLock==0);
|
||||||
|
|
||||||
AccCache.cpuLock--;
|
AccCache.cpuLock--;
|
||||||
|
|
||||||
if(AccCache.cpuLock==0) {
|
|
||||||
LRUinsertback(AccCache);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Action State StateNext Flush Clone
|
* Action State StateNext Flush Clone
|
||||||
@ -392,9 +371,6 @@ uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,V
|
|||||||
EntryCreate(CpuPtr,bytes,mode,transient);
|
EntryCreate(CpuPtr,bytes,mode,transient);
|
||||||
}
|
}
|
||||||
|
|
||||||
HostAccesses++;
|
|
||||||
HostAccessBytes+=bytes;
|
|
||||||
|
|
||||||
auto AccCacheIterator = EntryLookup(CpuPtr);
|
auto AccCacheIterator = EntryLookup(CpuPtr);
|
||||||
auto & AccCache = AccCacheIterator->second;
|
auto & AccCache = AccCacheIterator->second;
|
||||||
|
|
||||||
@ -440,12 +416,6 @@ uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,V
|
|||||||
|
|
||||||
AccCache.transient= transient? EvictNext : 0;
|
AccCache.transient= transient? EvictNext : 0;
|
||||||
|
|
||||||
// If view is opened on host remove from LRU
|
|
||||||
// Host close says evict next from device
|
|
||||||
if(AccCache.LRU_valid==1){
|
|
||||||
LRUremove(AccCache);
|
|
||||||
}
|
|
||||||
|
|
||||||
return AccCache.CpuPtr;
|
return AccCache.CpuPtr;
|
||||||
}
|
}
|
||||||
void MemoryManager::NotifyDeletion(void *_ptr)
|
void MemoryManager::NotifyDeletion(void *_ptr)
|
||||||
|
@ -12,10 +12,6 @@ uint64_t MemoryManager::HostToDeviceBytes;
|
|||||||
uint64_t MemoryManager::DeviceToHostBytes;
|
uint64_t MemoryManager::DeviceToHostBytes;
|
||||||
uint64_t MemoryManager::HostToDeviceXfer;
|
uint64_t MemoryManager::HostToDeviceXfer;
|
||||||
uint64_t MemoryManager::DeviceToHostXfer;
|
uint64_t MemoryManager::DeviceToHostXfer;
|
||||||
uint64_t MemoryManager::DeviceAccesses;
|
|
||||||
uint64_t MemoryManager::HostAccesses;
|
|
||||||
uint64_t MemoryManager::DeviceAccessBytes;
|
|
||||||
uint64_t MemoryManager::HostAccessBytes;
|
|
||||||
|
|
||||||
void MemoryManager::ViewClose(void* AccPtr,ViewMode mode){};
|
void MemoryManager::ViewClose(void* AccPtr,ViewMode mode){};
|
||||||
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; };
|
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; };
|
||||||
|
@ -53,10 +53,11 @@ public:
|
|||||||
// Communicator should know nothing of the physics grid, only processor grid.
|
// Communicator should know nothing of the physics grid, only processor grid.
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
int _Nprocessors; // How many in all
|
int _Nprocessors; // How many in all
|
||||||
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
|
|
||||||
int _processor; // linear processor rank
|
int _processor; // linear processor rank
|
||||||
Coordinate _processor_coor; // linear processor coordinate
|
|
||||||
unsigned long _ndimension;
|
unsigned long _ndimension;
|
||||||
|
Coordinate _shm_processors; // Which dimensions get relayed out over processors lanes.
|
||||||
|
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
|
||||||
|
Coordinate _processor_coor; // linear processor coordinate
|
||||||
static Grid_MPI_Comm communicator_world;
|
static Grid_MPI_Comm communicator_world;
|
||||||
Grid_MPI_Comm communicator;
|
Grid_MPI_Comm communicator;
|
||||||
std::vector<Grid_MPI_Comm> communicator_halo;
|
std::vector<Grid_MPI_Comm> communicator_halo;
|
||||||
@ -97,8 +98,9 @@ public:
|
|||||||
int BossRank(void) ;
|
int BossRank(void) ;
|
||||||
int ThisRank(void) ;
|
int ThisRank(void) ;
|
||||||
const Coordinate & ThisProcessorCoor(void) ;
|
const Coordinate & ThisProcessorCoor(void) ;
|
||||||
|
const Coordinate & ShmGrid(void) { return _shm_processors; } ;
|
||||||
const Coordinate & ProcessorGrid(void) ;
|
const Coordinate & ProcessorGrid(void) ;
|
||||||
int ProcessorCount(void) ;
|
int ProcessorCount(void) ;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// very VERY rarely (Log, serial RNG) we need world without a grid
|
// very VERY rarely (Log, serial RNG) we need world without a grid
|
||||||
@ -142,16 +144,16 @@ public:
|
|||||||
int bytes);
|
int bytes);
|
||||||
|
|
||||||
double StencilSendToRecvFrom(void *xmit,
|
double StencilSendToRecvFrom(void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,int do_xmit,
|
||||||
void *recv,
|
void *recv,
|
||||||
int recv_from_rank,
|
int recv_from_rank,int do_recv,
|
||||||
int bytes,int dir);
|
int bytes,int dir);
|
||||||
|
|
||||||
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,int do_xmit,
|
||||||
void *recv,
|
void *recv,
|
||||||
int recv_from_rank,
|
int recv_from_rank,int do_recv,
|
||||||
int bytes,int dir);
|
int bytes,int dir);
|
||||||
|
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
|||||||
// Remap using the shared memory optimising routine
|
// Remap using the shared memory optimising routine
|
||||||
// The remap creates a comm which must be freed
|
// The remap creates a comm which must be freed
|
||||||
////////////////////////////////////////////////////
|
////////////////////////////////////////////////////
|
||||||
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm);
|
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm,_shm_processors);
|
||||||
InitFromMPICommunicator(processors,optimal_comm);
|
InitFromMPICommunicator(processors,optimal_comm);
|
||||||
SetCommunicator(optimal_comm);
|
SetCommunicator(optimal_comm);
|
||||||
///////////////////////////////////////////////////
|
///////////////////////////////////////////////////
|
||||||
@ -124,12 +124,13 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
|||||||
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
|
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
|
||||||
Coordinate parent_processor_coor(_ndimension,0);
|
Coordinate parent_processor_coor(_ndimension,0);
|
||||||
Coordinate parent_processors (_ndimension,1);
|
Coordinate parent_processors (_ndimension,1);
|
||||||
|
Coordinate shm_processors (_ndimension,1);
|
||||||
// Can make 5d grid from 4d etc...
|
// Can make 5d grid from 4d etc...
|
||||||
int pad = _ndimension-parent_ndimension;
|
int pad = _ndimension-parent_ndimension;
|
||||||
for(int d=0;d<parent_ndimension;d++){
|
for(int d=0;d<parent_ndimension;d++){
|
||||||
parent_processor_coor[pad+d]=parent._processor_coor[d];
|
parent_processor_coor[pad+d]=parent._processor_coor[d];
|
||||||
parent_processors [pad+d]=parent._processors[d];
|
parent_processors [pad+d]=parent._processors[d];
|
||||||
|
shm_processors [pad+d]=parent._shm_processors[d];
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -154,6 +155,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
|||||||
ccoor[d] = parent_processor_coor[d] % processors[d];
|
ccoor[d] = parent_processor_coor[d] % processors[d];
|
||||||
scoor[d] = parent_processor_coor[d] / processors[d];
|
scoor[d] = parent_processor_coor[d] / processors[d];
|
||||||
ssize[d] = parent_processors[d] / processors[d];
|
ssize[d] = parent_processors[d] / processors[d];
|
||||||
|
if ( processors[d] < shm_processors[d] ) shm_processors[d] = processors[d]; // subnode splitting.
|
||||||
}
|
}
|
||||||
|
|
||||||
// rank within subcomm ; srank is rank of subcomm within blocks of subcomms
|
// rank within subcomm ; srank is rank of subcomm within blocks of subcomms
|
||||||
@ -335,22 +337,22 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
|||||||
}
|
}
|
||||||
// Basic Halo comms primitive
|
// Basic Halo comms primitive
|
||||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||||
int dest,
|
int dest, int dox,
|
||||||
void *recv,
|
void *recv,
|
||||||
int from,
|
int from, int dor,
|
||||||
int bytes,int dir)
|
int bytes,int dir)
|
||||||
{
|
{
|
||||||
std::vector<CommsRequest_t> list;
|
std::vector<CommsRequest_t> list;
|
||||||
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,recv,from,bytes,dir);
|
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,dox,recv,from,dor,bytes,dir);
|
||||||
StencilSendToRecvFromComplete(list,dir);
|
StencilSendToRecvFromComplete(list,dir);
|
||||||
return offbytes;
|
return offbytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int dest,
|
int dest,int dox,
|
||||||
void *recv,
|
void *recv,
|
||||||
int from,
|
int from,int dor,
|
||||||
int bytes,int dir)
|
int bytes,int dir)
|
||||||
{
|
{
|
||||||
int ncomm =communicator_halo.size();
|
int ncomm =communicator_halo.size();
|
||||||
@ -370,28 +372,32 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
|||||||
double off_node_bytes=0.0;
|
double off_node_bytes=0.0;
|
||||||
int tag;
|
int tag;
|
||||||
|
|
||||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
if ( dox ) {
|
||||||
tag= dir+from*32;
|
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
tag= dir+from*32;
|
||||||
assert(ierr==0);
|
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||||
list.push_back(rrq);
|
assert(ierr==0);
|
||||||
off_node_bytes+=bytes;
|
list.push_back(rrq);
|
||||||
|
off_node_bytes+=bytes;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
if (dor) {
|
||||||
tag= dir+_processor*32;
|
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
tag= dir+_processor*32;
|
||||||
assert(ierr==0);
|
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||||
list.push_back(xrq);
|
assert(ierr==0);
|
||||||
off_node_bytes+=bytes;
|
list.push_back(xrq);
|
||||||
} else {
|
off_node_bytes+=bytes;
|
||||||
|
} else {
|
||||||
// TODO : make a OMP loop on CPU, call threaded bcopy
|
// TODO : make a OMP loop on CPU, call threaded bcopy
|
||||||
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
||||||
assert(shm!=NULL);
|
assert(shm!=NULL);
|
||||||
// std::cout <<"acceleratorCopyDeviceToDeviceAsynch"<< std::endl;
|
// std::cout <<"acceleratorCopyDeviceToDeviceAsynch"<< std::endl;
|
||||||
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
|
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
||||||
this->StencilSendToRecvFromComplete(list,dir);
|
this->StencilSendToRecvFromComplete(list,dir);
|
||||||
}
|
}
|
||||||
|
@ -45,12 +45,14 @@ void CartesianCommunicator::Init(int *argc, char *** arv)
|
|||||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
|
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
|
||||||
: CartesianCommunicator(processors)
|
: CartesianCommunicator(processors)
|
||||||
{
|
{
|
||||||
|
_shm_processors = Coordinate(processors.size(),1);
|
||||||
srank=0;
|
srank=0;
|
||||||
SetCommunicator(communicator_world);
|
SetCommunicator(communicator_world);
|
||||||
}
|
}
|
||||||
|
|
||||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||||
{
|
{
|
||||||
|
_shm_processors = Coordinate(processors.size(),1);
|
||||||
_processors = processors;
|
_processors = processors;
|
||||||
_ndimension = processors.size(); assert(_ndimension>=1);
|
_ndimension = processors.size(); assert(_ndimension>=1);
|
||||||
_processor_coor.resize(_ndimension);
|
_processor_coor.resize(_ndimension);
|
||||||
@ -111,18 +113,18 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest
|
|||||||
}
|
}
|
||||||
|
|
||||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,int dox,
|
||||||
void *recv,
|
void *recv,
|
||||||
int recv_from_rank,
|
int recv_from_rank,int dor,
|
||||||
int bytes, int dir)
|
int bytes, int dir)
|
||||||
{
|
{
|
||||||
return 2.0*bytes;
|
return 2.0*bytes;
|
||||||
}
|
}
|
||||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,int dox,
|
||||||
void *recv,
|
void *recv,
|
||||||
int recv_from_rank,
|
int recv_from_rank,int dor,
|
||||||
int bytes, int dir)
|
int bytes, int dir)
|
||||||
{
|
{
|
||||||
return 2.0*bytes;
|
return 2.0*bytes;
|
||||||
|
@ -93,9 +93,10 @@ public:
|
|||||||
// Create an optimal reordered communicator that makes MPI_Cart_create get it right
|
// Create an optimal reordered communicator that makes MPI_Cart_create get it right
|
||||||
//////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////
|
||||||
static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
|
static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
|
||||||
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
// Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||||
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||||
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||||
|
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||||
static void GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims);
|
static void GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims);
|
||||||
///////////////////////////////////////////////////
|
///////////////////////////////////////////////////
|
||||||
// Provide shared memory facilities off comm world
|
// Provide shared memory facilities off comm world
|
||||||
|
@ -152,7 +152,7 @@ int Log2Size(int TwoToPower,int MAXLOG2)
|
|||||||
}
|
}
|
||||||
return log2size;
|
return log2size;
|
||||||
}
|
}
|
||||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||||
{
|
{
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
// Look and see if it looks like an HPE 8600 based on hostname conventions
|
// Look and see if it looks like an HPE 8600 based on hostname conventions
|
||||||
@ -165,8 +165,8 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
|
|||||||
gethostname(name,namelen);
|
gethostname(name,namelen);
|
||||||
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
|
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
|
||||||
|
|
||||||
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm);
|
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm,SHM);
|
||||||
else OptimalCommunicatorSharedMemory(processors,optimal_comm);
|
else OptimalCommunicatorSharedMemory(processors,optimal_comm,SHM);
|
||||||
}
|
}
|
||||||
static inline int divides(int a,int b)
|
static inline int divides(int a,int b)
|
||||||
{
|
{
|
||||||
@ -221,7 +221,7 @@ void GlobalSharedMemory::GetShmDims(const Coordinate &WorldDims,Coordinate &ShmD
|
|||||||
dim=(dim+1) %ndimension;
|
dim=(dim+1) %ndimension;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||||
{
|
{
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Assert power of two shm_size.
|
// Assert power of two shm_size.
|
||||||
@ -294,7 +294,8 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
|
|||||||
Coordinate HyperCoor(ndimension);
|
Coordinate HyperCoor(ndimension);
|
||||||
|
|
||||||
GetShmDims(WorldDims,ShmDims);
|
GetShmDims(WorldDims,ShmDims);
|
||||||
|
SHM = ShmDims;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Establish torus of processes and nodes with sub-blockings
|
// Establish torus of processes and nodes with sub-blockings
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
@ -341,7 +342,7 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
|
|||||||
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
|
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
}
|
}
|
||||||
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||||
{
|
{
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Identify subblock of ranks on node spreading across dims
|
// Identify subblock of ranks on node spreading across dims
|
||||||
@ -353,6 +354,8 @@ void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &proce
|
|||||||
Coordinate ShmCoor(ndimension); Coordinate NodeCoor(ndimension); Coordinate WorldCoor(ndimension);
|
Coordinate ShmCoor(ndimension); Coordinate NodeCoor(ndimension); Coordinate WorldCoor(ndimension);
|
||||||
|
|
||||||
GetShmDims(WorldDims,ShmDims);
|
GetShmDims(WorldDims,ShmDims);
|
||||||
|
SHM=ShmDims;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Establish torus of processes and nodes with sub-blockings
|
// Establish torus of processes and nodes with sub-blockings
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
|
@ -48,9 +48,10 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
|||||||
_ShmSetup=1;
|
_ShmSetup=1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||||
{
|
{
|
||||||
optimal_comm = WorldComm;
|
optimal_comm = WorldComm;
|
||||||
|
SHM = Coordinate(processors.size(),1);
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
240
Grid/qcd/action/fermion/CompactWilsonCloverFermion.h
Normal file
240
Grid/qcd/action/fermion/CompactWilsonCloverFermion.h
Normal file
@ -0,0 +1,240 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/CompactWilsonCloverFermion.h
|
||||||
|
|
||||||
|
Copyright (C) 2020 - 2022
|
||||||
|
|
||||||
|
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonCloverTypes.h>
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonCloverHelpers.h>
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
// see Grid/qcd/action/fermion/WilsonCloverFermion.h for description
|
||||||
|
//
|
||||||
|
// Modifications done here:
|
||||||
|
//
|
||||||
|
// Original: clover term = 12x12 matrix per site
|
||||||
|
//
|
||||||
|
// But: Only two diagonal 6x6 hermitian blocks are non-zero (also true for original, verified by running)
|
||||||
|
// Sufficient to store/transfer only the real parts of the diagonal and one triangular part
|
||||||
|
// 2 * (6 + 15 * 2) = 72 real or 36 complex words to be stored/transfered
|
||||||
|
//
|
||||||
|
// Here: Above but diagonal as complex numbers, i.e., need to store/transfer
|
||||||
|
// 2 * (6 * 2 + 15 * 2) = 84 real or 42 complex words
|
||||||
|
//
|
||||||
|
// Words per site and improvement compared to original (combined with the input and output spinors):
|
||||||
|
//
|
||||||
|
// - Original: 2*12 + 12*12 = 168 words -> 1.00 x less
|
||||||
|
// - Minimal: 2*12 + 36 = 60 words -> 2.80 x less
|
||||||
|
// - Here: 2*12 + 42 = 66 words -> 2.55 x less
|
||||||
|
//
|
||||||
|
// These improvements directly translate to wall-clock time
|
||||||
|
//
|
||||||
|
// Data layout:
|
||||||
|
//
|
||||||
|
// - diagonal and triangle part as separate lattice fields,
|
||||||
|
// this was faster than as 1 combined field on all tested machines
|
||||||
|
// - diagonal: as expected
|
||||||
|
// - triangle: store upper right triangle in row major order
|
||||||
|
// - graphical:
|
||||||
|
// 0 1 2 3 4
|
||||||
|
// 5 6 7 8
|
||||||
|
// 9 10 11 = upper right triangle indices
|
||||||
|
// 12 13
|
||||||
|
// 14
|
||||||
|
// 0
|
||||||
|
// 1
|
||||||
|
// 2
|
||||||
|
// 3 = diagonal indices
|
||||||
|
// 4
|
||||||
|
// 5
|
||||||
|
// 0
|
||||||
|
// 1 5
|
||||||
|
// 2 6 9 = lower left triangle indices
|
||||||
|
// 3 7 10 12
|
||||||
|
// 4 8 11 13 14
|
||||||
|
//
|
||||||
|
// Impact on total memory consumption:
|
||||||
|
// - Original: (2 * 1 + 8 * 1/2) 12x12 matrices = 6 12x12 matrices = 864 complex words per site
|
||||||
|
// - Here: (2 * 1 + 4 * 1/2) diagonal parts = 4 diagonal parts = 24 complex words per site
|
||||||
|
// + (2 * 1 + 4 * 1/2) triangle parts = 4 triangle parts = 60 complex words per site
|
||||||
|
// = 84 complex words per site
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
class CompactWilsonCloverFermion : public WilsonFermion<Impl>,
|
||||||
|
public WilsonCloverHelpers<Impl>,
|
||||||
|
public CompactWilsonCloverHelpers<Impl> {
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
// Sizes
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
INHERIT_COMPACT_CLOVER_SIZES(Impl);
|
||||||
|
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
// Type definitions
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
|
INHERIT_CLOVER_TYPES(Impl);
|
||||||
|
INHERIT_COMPACT_CLOVER_TYPES(Impl);
|
||||||
|
|
||||||
|
typedef WilsonFermion<Impl> WilsonBase;
|
||||||
|
typedef WilsonCloverHelpers<Impl> Helpers;
|
||||||
|
typedef CompactWilsonCloverHelpers<Impl> CompactHelpers;
|
||||||
|
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
// Constructors
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
CompactWilsonCloverFermion(GaugeField& _Umu,
|
||||||
|
GridCartesian& Fgrid,
|
||||||
|
GridRedBlackCartesian& Hgrid,
|
||||||
|
const RealD _mass,
|
||||||
|
const RealD _csw_r = 0.0,
|
||||||
|
const RealD _csw_t = 0.0,
|
||||||
|
const RealD _cF = 1.0,
|
||||||
|
const WilsonAnisotropyCoefficients& clover_anisotropy = WilsonAnisotropyCoefficients(),
|
||||||
|
const ImplParams& impl_p = ImplParams());
|
||||||
|
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
// Member functions (implementing interface)
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
virtual void Instantiatable() {};
|
||||||
|
int ConstEE() override { return 0; };
|
||||||
|
int isTrivialEE() override { return 0; };
|
||||||
|
|
||||||
|
void Dhop(const FermionField& in, FermionField& out, int dag) override;
|
||||||
|
|
||||||
|
void DhopOE(const FermionField& in, FermionField& out, int dag) override;
|
||||||
|
|
||||||
|
void DhopEO(const FermionField& in, FermionField& out, int dag) override;
|
||||||
|
|
||||||
|
void DhopDir(const FermionField& in, FermionField& out, int dir, int disp) override;
|
||||||
|
|
||||||
|
void DhopDirAll(const FermionField& in, std::vector<FermionField>& out) /* override */;
|
||||||
|
|
||||||
|
void M(const FermionField& in, FermionField& out) override;
|
||||||
|
|
||||||
|
void Mdag(const FermionField& in, FermionField& out) override;
|
||||||
|
|
||||||
|
void Meooe(const FermionField& in, FermionField& out) override;
|
||||||
|
|
||||||
|
void MeooeDag(const FermionField& in, FermionField& out) override;
|
||||||
|
|
||||||
|
void Mooee(const FermionField& in, FermionField& out) override;
|
||||||
|
|
||||||
|
void MooeeDag(const FermionField& in, FermionField& out) override;
|
||||||
|
|
||||||
|
void MooeeInv(const FermionField& in, FermionField& out) override;
|
||||||
|
|
||||||
|
void MooeeInvDag(const FermionField& in, FermionField& out) override;
|
||||||
|
|
||||||
|
void Mdir(const FermionField& in, FermionField& out, int dir, int disp) override;
|
||||||
|
|
||||||
|
void MdirAll(const FermionField& in, std::vector<FermionField>& out) override;
|
||||||
|
|
||||||
|
void MDeriv(GaugeField& force, const FermionField& X, const FermionField& Y, int dag) override;
|
||||||
|
|
||||||
|
void MooDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) override;
|
||||||
|
|
||||||
|
void MeeDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) override;
|
||||||
|
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
// Member functions (internals)
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
|
||||||
|
void MooeeInternal(const FermionField& in,
|
||||||
|
FermionField& out,
|
||||||
|
const CloverDiagonalField& diagonal,
|
||||||
|
const CloverTriangleField& triangle);
|
||||||
|
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
// Helpers
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
|
||||||
|
void ImportGauge(const GaugeField& _Umu) override;
|
||||||
|
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
// Helpers
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
template<class Field>
|
||||||
|
const MaskField* getCorrectMaskField(const Field &in) const {
|
||||||
|
if(in.Grid()->_isCheckerBoarded) {
|
||||||
|
if(in.Checkerboard() == Odd) {
|
||||||
|
return &this->BoundaryMaskOdd;
|
||||||
|
} else {
|
||||||
|
return &this->BoundaryMaskEven;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return &this->BoundaryMask;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Field>
|
||||||
|
void ApplyBoundaryMask(Field& f) {
|
||||||
|
const MaskField* m = getCorrectMaskField(f); assert(m != nullptr);
|
||||||
|
assert(m != nullptr);
|
||||||
|
CompactHelpers::ApplyBoundaryMask(f, *m);
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
// Member Data
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
RealD csw_r;
|
||||||
|
RealD csw_t;
|
||||||
|
RealD cF;
|
||||||
|
|
||||||
|
bool open_boundaries;
|
||||||
|
|
||||||
|
CloverDiagonalField Diagonal, DiagonalEven, DiagonalOdd;
|
||||||
|
CloverDiagonalField DiagonalInv, DiagonalInvEven, DiagonalInvOdd;
|
||||||
|
|
||||||
|
CloverTriangleField Triangle, TriangleEven, TriangleOdd;
|
||||||
|
CloverTriangleField TriangleInv, TriangleInvEven, TriangleInvOdd;
|
||||||
|
|
||||||
|
FermionField Tmp;
|
||||||
|
|
||||||
|
MaskField BoundaryMask, BoundaryMaskEven, BoundaryMaskOdd;
|
||||||
|
};
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -53,6 +53,7 @@ NAMESPACE_CHECK(Wilson);
|
|||||||
#include <Grid/qcd/action/fermion/WilsonTMFermion.h> // 4d wilson like
|
#include <Grid/qcd/action/fermion/WilsonTMFermion.h> // 4d wilson like
|
||||||
NAMESPACE_CHECK(WilsonTM);
|
NAMESPACE_CHECK(WilsonTM);
|
||||||
#include <Grid/qcd/action/fermion/WilsonCloverFermion.h> // 4d wilson clover fermions
|
#include <Grid/qcd/action/fermion/WilsonCloverFermion.h> // 4d wilson clover fermions
|
||||||
|
#include <Grid/qcd/action/fermion/CompactWilsonCloverFermion.h> // 4d compact wilson clover fermions
|
||||||
NAMESPACE_CHECK(WilsonClover);
|
NAMESPACE_CHECK(WilsonClover);
|
||||||
#include <Grid/qcd/action/fermion/WilsonFermion5D.h> // 5d base used by all 5d overlap types
|
#include <Grid/qcd/action/fermion/WilsonFermion5D.h> // 5d base used by all 5d overlap types
|
||||||
NAMESPACE_CHECK(Wilson5D);
|
NAMESPACE_CHECK(Wilson5D);
|
||||||
@ -153,6 +154,23 @@ typedef WilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplR> WilsonCloverTwoInd
|
|||||||
typedef WilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplF> WilsonCloverTwoIndexAntiSymmetricFermionF;
|
typedef WilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplF> WilsonCloverTwoIndexAntiSymmetricFermionF;
|
||||||
typedef WilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplD> WilsonCloverTwoIndexAntiSymmetricFermionD;
|
typedef WilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplD> WilsonCloverTwoIndexAntiSymmetricFermionD;
|
||||||
|
|
||||||
|
// Compact Clover fermions
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonImplR> CompactWilsonCloverFermionR;
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonImplF> CompactWilsonCloverFermionF;
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonImplD> CompactWilsonCloverFermionD;
|
||||||
|
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonAdjImplR> CompactWilsonCloverAdjFermionR;
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonAdjImplF> CompactWilsonCloverAdjFermionF;
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonAdjImplD> CompactWilsonCloverAdjFermionD;
|
||||||
|
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexSymmetricImplR> CompactWilsonCloverTwoIndexSymmetricFermionR;
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexSymmetricImplF> CompactWilsonCloverTwoIndexSymmetricFermionF;
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexSymmetricImplD> CompactWilsonCloverTwoIndexSymmetricFermionD;
|
||||||
|
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplR> CompactWilsonCloverTwoIndexAntiSymmetricFermionR;
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplF> CompactWilsonCloverTwoIndexAntiSymmetricFermionF;
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplD> CompactWilsonCloverTwoIndexAntiSymmetricFermionD;
|
||||||
|
|
||||||
// Domain Wall fermions
|
// Domain Wall fermions
|
||||||
typedef DomainWallFermion<WilsonImplR> DomainWallFermionR;
|
typedef DomainWallFermion<WilsonImplR> DomainWallFermionR;
|
||||||
typedef DomainWallFermion<WilsonImplF> DomainWallFermionF;
|
typedef DomainWallFermion<WilsonImplF> DomainWallFermionF;
|
||||||
|
@ -4,10 +4,11 @@
|
|||||||
|
|
||||||
Source file: ./lib/qcd/action/fermion/WilsonCloverFermion.h
|
Source file: ./lib/qcd/action/fermion/WilsonCloverFermion.h
|
||||||
|
|
||||||
Copyright (C) 2017
|
Copyright (C) 2017 - 2022
|
||||||
|
|
||||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||||
Author: David Preti <>
|
Author: David Preti <>
|
||||||
|
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -29,7 +30,8 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/WilsonCloverTypes.h>
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonCloverHelpers.h>
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
@ -50,18 +52,15 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
//////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
class WilsonCloverFermion : public WilsonFermion<Impl>
|
class WilsonCloverFermion : public WilsonFermion<Impl>,
|
||||||
|
public WilsonCloverHelpers<Impl>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
// Types definitions
|
|
||||||
INHERIT_IMPL_TYPES(Impl);
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
template <typename vtype>
|
INHERIT_CLOVER_TYPES(Impl);
|
||||||
using iImplClover = iScalar<iMatrix<iMatrix<vtype, Impl::Dimension>, Ns>>;
|
|
||||||
typedef iImplClover<Simd> SiteCloverType;
|
|
||||||
typedef Lattice<SiteCloverType> CloverFieldType;
|
|
||||||
|
|
||||||
public:
|
typedef WilsonFermion<Impl> WilsonBase;
|
||||||
typedef WilsonFermion<Impl> WilsonBase;
|
typedef WilsonCloverHelpers<Impl> Helpers;
|
||||||
|
|
||||||
virtual int ConstEE(void) { return 0; };
|
virtual int ConstEE(void) { return 0; };
|
||||||
virtual void Instantiatable(void){};
|
virtual void Instantiatable(void){};
|
||||||
@ -72,42 +71,7 @@ public:
|
|||||||
const RealD _csw_r = 0.0,
|
const RealD _csw_r = 0.0,
|
||||||
const RealD _csw_t = 0.0,
|
const RealD _csw_t = 0.0,
|
||||||
const WilsonAnisotropyCoefficients &clover_anisotropy = WilsonAnisotropyCoefficients(),
|
const WilsonAnisotropyCoefficients &clover_anisotropy = WilsonAnisotropyCoefficients(),
|
||||||
const ImplParams &impl_p = ImplParams()) : WilsonFermion<Impl>(_Umu,
|
const ImplParams &impl_p = ImplParams());
|
||||||
Fgrid,
|
|
||||||
Hgrid,
|
|
||||||
_mass, impl_p, clover_anisotropy),
|
|
||||||
CloverTerm(&Fgrid),
|
|
||||||
CloverTermInv(&Fgrid),
|
|
||||||
CloverTermEven(&Hgrid),
|
|
||||||
CloverTermOdd(&Hgrid),
|
|
||||||
CloverTermInvEven(&Hgrid),
|
|
||||||
CloverTermInvOdd(&Hgrid),
|
|
||||||
CloverTermDagEven(&Hgrid),
|
|
||||||
CloverTermDagOdd(&Hgrid),
|
|
||||||
CloverTermInvDagEven(&Hgrid),
|
|
||||||
CloverTermInvDagOdd(&Hgrid)
|
|
||||||
{
|
|
||||||
assert(Nd == 4); // require 4 dimensions
|
|
||||||
|
|
||||||
if (clover_anisotropy.isAnisotropic)
|
|
||||||
{
|
|
||||||
csw_r = _csw_r * 0.5 / clover_anisotropy.xi_0;
|
|
||||||
diag_mass = _mass + 1.0 + (Nd - 1) * (clover_anisotropy.nu / clover_anisotropy.xi_0);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
csw_r = _csw_r * 0.5;
|
|
||||||
diag_mass = 4.0 + _mass;
|
|
||||||
}
|
|
||||||
csw_t = _csw_t * 0.5;
|
|
||||||
|
|
||||||
if (csw_r == 0)
|
|
||||||
std::cout << GridLogWarning << "Initializing WilsonCloverFermion with csw_r = 0" << std::endl;
|
|
||||||
if (csw_t == 0)
|
|
||||||
std::cout << GridLogWarning << "Initializing WilsonCloverFermion with csw_t = 0" << std::endl;
|
|
||||||
|
|
||||||
ImportGauge(_Umu);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void M(const FermionField &in, FermionField &out);
|
virtual void M(const FermionField &in, FermionField &out);
|
||||||
virtual void Mdag(const FermionField &in, FermionField &out);
|
virtual void Mdag(const FermionField &in, FermionField &out);
|
||||||
@ -124,250 +88,21 @@ public:
|
|||||||
void ImportGauge(const GaugeField &_Umu);
|
void ImportGauge(const GaugeField &_Umu);
|
||||||
|
|
||||||
// Derivative parts unpreconditioned pseudofermions
|
// Derivative parts unpreconditioned pseudofermions
|
||||||
void MDeriv(GaugeField &force, const FermionField &X, const FermionField &Y, int dag)
|
void MDeriv(GaugeField &force, const FermionField &X, const FermionField &Y, int dag);
|
||||||
{
|
|
||||||
conformable(X.Grid(), Y.Grid());
|
|
||||||
conformable(X.Grid(), force.Grid());
|
|
||||||
GaugeLinkField force_mu(force.Grid()), lambda(force.Grid());
|
|
||||||
GaugeField clover_force(force.Grid());
|
|
||||||
PropagatorField Lambda(force.Grid());
|
|
||||||
|
|
||||||
// Guido: Here we are hitting some performance issues:
|
public:
|
||||||
// need to extract the components of the DoubledGaugeField
|
|
||||||
// for each call
|
|
||||||
// Possible solution
|
|
||||||
// Create a vector object to store them? (cons: wasting space)
|
|
||||||
std::vector<GaugeLinkField> U(Nd, this->Umu.Grid());
|
|
||||||
|
|
||||||
Impl::extractLinkField(U, this->Umu);
|
|
||||||
|
|
||||||
force = Zero();
|
|
||||||
// Derivative of the Wilson hopping term
|
|
||||||
this->DhopDeriv(force, X, Y, dag);
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////
|
|
||||||
// Clover term derivative
|
|
||||||
///////////////////////////////////////////////////////////
|
|
||||||
Impl::outerProductImpl(Lambda, X, Y);
|
|
||||||
//std::cout << "Lambda:" << Lambda << std::endl;
|
|
||||||
|
|
||||||
Gamma::Algebra sigma[] = {
|
|
||||||
Gamma::Algebra::SigmaXY,
|
|
||||||
Gamma::Algebra::SigmaXZ,
|
|
||||||
Gamma::Algebra::SigmaXT,
|
|
||||||
Gamma::Algebra::MinusSigmaXY,
|
|
||||||
Gamma::Algebra::SigmaYZ,
|
|
||||||
Gamma::Algebra::SigmaYT,
|
|
||||||
Gamma::Algebra::MinusSigmaXZ,
|
|
||||||
Gamma::Algebra::MinusSigmaYZ,
|
|
||||||
Gamma::Algebra::SigmaZT,
|
|
||||||
Gamma::Algebra::MinusSigmaXT,
|
|
||||||
Gamma::Algebra::MinusSigmaYT,
|
|
||||||
Gamma::Algebra::MinusSigmaZT};
|
|
||||||
|
|
||||||
/*
|
|
||||||
sigma_{\mu \nu}=
|
|
||||||
| 0 sigma[0] sigma[1] sigma[2] |
|
|
||||||
| sigma[3] 0 sigma[4] sigma[5] |
|
|
||||||
| sigma[6] sigma[7] 0 sigma[8] |
|
|
||||||
| sigma[9] sigma[10] sigma[11] 0 |
|
|
||||||
*/
|
|
||||||
|
|
||||||
int count = 0;
|
|
||||||
clover_force = Zero();
|
|
||||||
for (int mu = 0; mu < 4; mu++)
|
|
||||||
{
|
|
||||||
force_mu = Zero();
|
|
||||||
for (int nu = 0; nu < 4; nu++)
|
|
||||||
{
|
|
||||||
if (mu == nu)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
RealD factor;
|
|
||||||
if (nu == 4 || mu == 4)
|
|
||||||
{
|
|
||||||
factor = 2.0 * csw_t;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
factor = 2.0 * csw_r;
|
|
||||||
}
|
|
||||||
PropagatorField Slambda = Gamma(sigma[count]) * Lambda; // sigma checked
|
|
||||||
Impl::TraceSpinImpl(lambda, Slambda); // traceSpin ok
|
|
||||||
force_mu -= factor*Cmunu(U, lambda, mu, nu); // checked
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
|
|
||||||
pokeLorentz(clover_force, U[mu] * force_mu, mu);
|
|
||||||
}
|
|
||||||
//clover_force *= csw;
|
|
||||||
force += clover_force;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Computing C_{\mu \nu}(x) as in Eq.(B.39) in Zbigniew Sroczynski's PhD thesis
|
|
||||||
GaugeLinkField Cmunu(std::vector<GaugeLinkField> &U, GaugeLinkField &lambda, int mu, int nu)
|
|
||||||
{
|
|
||||||
conformable(lambda.Grid(), U[0].Grid());
|
|
||||||
GaugeLinkField out(lambda.Grid()), tmp(lambda.Grid());
|
|
||||||
// insertion in upper staple
|
|
||||||
// please check redundancy of shift operations
|
|
||||||
|
|
||||||
// C1+
|
|
||||||
tmp = lambda * U[nu];
|
|
||||||
out = Impl::ShiftStaple(Impl::CovShiftForward(tmp, nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu);
|
|
||||||
|
|
||||||
// C2+
|
|
||||||
tmp = U[mu] * Impl::ShiftStaple(adj(lambda), mu);
|
|
||||||
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(tmp, mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu);
|
|
||||||
|
|
||||||
// C3+
|
|
||||||
tmp = U[nu] * Impl::ShiftStaple(adj(lambda), nu);
|
|
||||||
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(tmp, nu))), mu);
|
|
||||||
|
|
||||||
// C4+
|
|
||||||
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu) * lambda;
|
|
||||||
|
|
||||||
// insertion in lower staple
|
|
||||||
// C1-
|
|
||||||
out -= Impl::ShiftStaple(lambda, mu) * Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu);
|
|
||||||
|
|
||||||
// C2-
|
|
||||||
tmp = adj(lambda) * U[nu];
|
|
||||||
out -= Impl::ShiftStaple(Impl::CovShiftBackward(tmp, nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu);
|
|
||||||
|
|
||||||
// C3-
|
|
||||||
tmp = lambda * U[nu];
|
|
||||||
out -= Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, tmp)), mu);
|
|
||||||
|
|
||||||
// C4-
|
|
||||||
out -= Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu) * lambda;
|
|
||||||
|
|
||||||
return out;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
|
||||||
// here fixing the 4 dimensions, make it more general?
|
// here fixing the 4 dimensions, make it more general?
|
||||||
|
|
||||||
RealD csw_r; // Clover coefficient - spatial
|
RealD csw_r; // Clover coefficient - spatial
|
||||||
RealD csw_t; // Clover coefficient - temporal
|
RealD csw_t; // Clover coefficient - temporal
|
||||||
RealD diag_mass; // Mass term
|
RealD diag_mass; // Mass term
|
||||||
CloverFieldType CloverTerm, CloverTermInv; // Clover term
|
CloverField CloverTerm, CloverTermInv; // Clover term
|
||||||
CloverFieldType CloverTermEven, CloverTermOdd; // Clover term EO
|
CloverField CloverTermEven, CloverTermOdd; // Clover term EO
|
||||||
CloverFieldType CloverTermInvEven, CloverTermInvOdd; // Clover term Inv EO
|
CloverField CloverTermInvEven, CloverTermInvOdd; // Clover term Inv EO
|
||||||
CloverFieldType CloverTermDagEven, CloverTermDagOdd; // Clover term Dag EO
|
CloverField CloverTermDagEven, CloverTermDagOdd; // Clover term Dag EO
|
||||||
CloverFieldType CloverTermInvDagEven, CloverTermInvDagOdd; // Clover term Inv Dag EO
|
CloverField CloverTermInvDagEven, CloverTermInvDagOdd; // Clover term Inv Dag EO
|
||||||
|
|
||||||
public:
|
|
||||||
// eventually these can be compressed into 6x6 blocks instead of the 12x12
|
|
||||||
// using the DeGrand-Rossi basis for the gamma matrices
|
|
||||||
CloverFieldType fillCloverYZ(const GaugeLinkField &F)
|
|
||||||
{
|
|
||||||
CloverFieldType T(F.Grid());
|
|
||||||
T = Zero();
|
|
||||||
autoView(T_v,T,AcceleratorWrite);
|
|
||||||
autoView(F_v,F,AcceleratorRead);
|
|
||||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
|
||||||
{
|
|
||||||
T_v[i]()(0, 1) = timesMinusI(F_v[i]()());
|
|
||||||
T_v[i]()(1, 0) = timesMinusI(F_v[i]()());
|
|
||||||
T_v[i]()(2, 3) = timesMinusI(F_v[i]()());
|
|
||||||
T_v[i]()(3, 2) = timesMinusI(F_v[i]()());
|
|
||||||
});
|
|
||||||
|
|
||||||
return T;
|
|
||||||
}
|
|
||||||
|
|
||||||
CloverFieldType fillCloverXZ(const GaugeLinkField &F)
|
|
||||||
{
|
|
||||||
CloverFieldType T(F.Grid());
|
|
||||||
T = Zero();
|
|
||||||
|
|
||||||
autoView(T_v, T,AcceleratorWrite);
|
|
||||||
autoView(F_v, F,AcceleratorRead);
|
|
||||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
|
||||||
{
|
|
||||||
T_v[i]()(0, 1) = -F_v[i]()();
|
|
||||||
T_v[i]()(1, 0) = F_v[i]()();
|
|
||||||
T_v[i]()(2, 3) = -F_v[i]()();
|
|
||||||
T_v[i]()(3, 2) = F_v[i]()();
|
|
||||||
});
|
|
||||||
|
|
||||||
return T;
|
|
||||||
}
|
|
||||||
|
|
||||||
CloverFieldType fillCloverXY(const GaugeLinkField &F)
|
|
||||||
{
|
|
||||||
CloverFieldType T(F.Grid());
|
|
||||||
T = Zero();
|
|
||||||
|
|
||||||
autoView(T_v,T,AcceleratorWrite);
|
|
||||||
autoView(F_v,F,AcceleratorRead);
|
|
||||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
|
||||||
{
|
|
||||||
T_v[i]()(0, 0) = timesMinusI(F_v[i]()());
|
|
||||||
T_v[i]()(1, 1) = timesI(F_v[i]()());
|
|
||||||
T_v[i]()(2, 2) = timesMinusI(F_v[i]()());
|
|
||||||
T_v[i]()(3, 3) = timesI(F_v[i]()());
|
|
||||||
});
|
|
||||||
|
|
||||||
return T;
|
|
||||||
}
|
|
||||||
|
|
||||||
CloverFieldType fillCloverXT(const GaugeLinkField &F)
|
|
||||||
{
|
|
||||||
CloverFieldType T(F.Grid());
|
|
||||||
T = Zero();
|
|
||||||
|
|
||||||
autoView( T_v , T, AcceleratorWrite);
|
|
||||||
autoView( F_v , F, AcceleratorRead);
|
|
||||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
|
||||||
{
|
|
||||||
T_v[i]()(0, 1) = timesI(F_v[i]()());
|
|
||||||
T_v[i]()(1, 0) = timesI(F_v[i]()());
|
|
||||||
T_v[i]()(2, 3) = timesMinusI(F_v[i]()());
|
|
||||||
T_v[i]()(3, 2) = timesMinusI(F_v[i]()());
|
|
||||||
});
|
|
||||||
|
|
||||||
return T;
|
|
||||||
}
|
|
||||||
|
|
||||||
CloverFieldType fillCloverYT(const GaugeLinkField &F)
|
|
||||||
{
|
|
||||||
CloverFieldType T(F.Grid());
|
|
||||||
T = Zero();
|
|
||||||
|
|
||||||
autoView( T_v ,T,AcceleratorWrite);
|
|
||||||
autoView( F_v ,F,AcceleratorRead);
|
|
||||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
|
||||||
{
|
|
||||||
T_v[i]()(0, 1) = -(F_v[i]()());
|
|
||||||
T_v[i]()(1, 0) = (F_v[i]()());
|
|
||||||
T_v[i]()(2, 3) = (F_v[i]()());
|
|
||||||
T_v[i]()(3, 2) = -(F_v[i]()());
|
|
||||||
});
|
|
||||||
|
|
||||||
return T;
|
|
||||||
}
|
|
||||||
|
|
||||||
CloverFieldType fillCloverZT(const GaugeLinkField &F)
|
|
||||||
{
|
|
||||||
CloverFieldType T(F.Grid());
|
|
||||||
|
|
||||||
T = Zero();
|
|
||||||
|
|
||||||
autoView( T_v , T,AcceleratorWrite);
|
|
||||||
autoView( F_v , F,AcceleratorRead);
|
|
||||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
|
||||||
{
|
|
||||||
T_v[i]()(0, 0) = timesI(F_v[i]()());
|
|
||||||
T_v[i]()(1, 1) = timesMinusI(F_v[i]()());
|
|
||||||
T_v[i]()(2, 2) = timesMinusI(F_v[i]()());
|
|
||||||
T_v[i]()(3, 3) = timesI(F_v[i]()());
|
|
||||||
});
|
|
||||||
|
|
||||||
return T;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
|
||||||
|
|
||||||
|
761
Grid/qcd/action/fermion/WilsonCloverHelpers.h
Normal file
761
Grid/qcd/action/fermion/WilsonCloverHelpers.h
Normal file
@ -0,0 +1,761 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonCloverHelpers.h
|
||||||
|
|
||||||
|
Copyright (C) 2021 - 2022
|
||||||
|
|
||||||
|
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
// Helper routines that implement common clover functionality
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
template<class Impl> class WilsonCloverHelpers {
|
||||||
|
public:
|
||||||
|
|
||||||
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
|
INHERIT_CLOVER_TYPES(Impl);
|
||||||
|
|
||||||
|
// Computing C_{\mu \nu}(x) as in Eq.(B.39) in Zbigniew Sroczynski's PhD thesis
|
||||||
|
static GaugeLinkField Cmunu(std::vector<GaugeLinkField> &U, GaugeLinkField &lambda, int mu, int nu)
|
||||||
|
{
|
||||||
|
conformable(lambda.Grid(), U[0].Grid());
|
||||||
|
GaugeLinkField out(lambda.Grid()), tmp(lambda.Grid());
|
||||||
|
// insertion in upper staple
|
||||||
|
// please check redundancy of shift operations
|
||||||
|
|
||||||
|
// C1+
|
||||||
|
tmp = lambda * U[nu];
|
||||||
|
out = Impl::ShiftStaple(Impl::CovShiftForward(tmp, nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu);
|
||||||
|
|
||||||
|
// C2+
|
||||||
|
tmp = U[mu] * Impl::ShiftStaple(adj(lambda), mu);
|
||||||
|
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(tmp, mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu);
|
||||||
|
|
||||||
|
// C3+
|
||||||
|
tmp = U[nu] * Impl::ShiftStaple(adj(lambda), nu);
|
||||||
|
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(tmp, nu))), mu);
|
||||||
|
|
||||||
|
// C4+
|
||||||
|
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu) * lambda;
|
||||||
|
|
||||||
|
// insertion in lower staple
|
||||||
|
// C1-
|
||||||
|
out -= Impl::ShiftStaple(lambda, mu) * Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu);
|
||||||
|
|
||||||
|
// C2-
|
||||||
|
tmp = adj(lambda) * U[nu];
|
||||||
|
out -= Impl::ShiftStaple(Impl::CovShiftBackward(tmp, nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu);
|
||||||
|
|
||||||
|
// C3-
|
||||||
|
tmp = lambda * U[nu];
|
||||||
|
out -= Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, tmp)), mu);
|
||||||
|
|
||||||
|
// C4-
|
||||||
|
out -= Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu) * lambda;
|
||||||
|
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
static CloverField fillCloverYZ(const GaugeLinkField &F)
|
||||||
|
{
|
||||||
|
CloverField T(F.Grid());
|
||||||
|
T = Zero();
|
||||||
|
autoView(T_v,T,AcceleratorWrite);
|
||||||
|
autoView(F_v,F,AcceleratorRead);
|
||||||
|
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||||
|
{
|
||||||
|
coalescedWrite(T_v[i]()(0, 1), coalescedRead(timesMinusI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(1, 0), coalescedRead(timesMinusI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(2, 3), coalescedRead(timesMinusI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(3, 2), coalescedRead(timesMinusI(F_v[i]()())));
|
||||||
|
});
|
||||||
|
|
||||||
|
return T;
|
||||||
|
}
|
||||||
|
|
||||||
|
static CloverField fillCloverXZ(const GaugeLinkField &F)
|
||||||
|
{
|
||||||
|
CloverField T(F.Grid());
|
||||||
|
T = Zero();
|
||||||
|
|
||||||
|
autoView(T_v, T,AcceleratorWrite);
|
||||||
|
autoView(F_v, F,AcceleratorRead);
|
||||||
|
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||||
|
{
|
||||||
|
coalescedWrite(T_v[i]()(0, 1), coalescedRead(-F_v[i]()()));
|
||||||
|
coalescedWrite(T_v[i]()(1, 0), coalescedRead(F_v[i]()()));
|
||||||
|
coalescedWrite(T_v[i]()(2, 3), coalescedRead(-F_v[i]()()));
|
||||||
|
coalescedWrite(T_v[i]()(3, 2), coalescedRead(F_v[i]()()));
|
||||||
|
});
|
||||||
|
|
||||||
|
return T;
|
||||||
|
}
|
||||||
|
|
||||||
|
static CloverField fillCloverXY(const GaugeLinkField &F)
|
||||||
|
{
|
||||||
|
CloverField T(F.Grid());
|
||||||
|
T = Zero();
|
||||||
|
|
||||||
|
autoView(T_v,T,AcceleratorWrite);
|
||||||
|
autoView(F_v,F,AcceleratorRead);
|
||||||
|
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||||
|
{
|
||||||
|
coalescedWrite(T_v[i]()(0, 0), coalescedRead(timesMinusI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(1, 1), coalescedRead(timesI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(2, 2), coalescedRead(timesMinusI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(3, 3), coalescedRead(timesI(F_v[i]()())));
|
||||||
|
});
|
||||||
|
|
||||||
|
return T;
|
||||||
|
}
|
||||||
|
|
||||||
|
static CloverField fillCloverXT(const GaugeLinkField &F)
|
||||||
|
{
|
||||||
|
CloverField T(F.Grid());
|
||||||
|
T = Zero();
|
||||||
|
|
||||||
|
autoView( T_v , T, AcceleratorWrite);
|
||||||
|
autoView( F_v , F, AcceleratorRead);
|
||||||
|
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||||
|
{
|
||||||
|
coalescedWrite(T_v[i]()(0, 1), coalescedRead(timesI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(1, 0), coalescedRead(timesI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(2, 3), coalescedRead(timesMinusI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(3, 2), coalescedRead(timesMinusI(F_v[i]()())));
|
||||||
|
});
|
||||||
|
|
||||||
|
return T;
|
||||||
|
}
|
||||||
|
|
||||||
|
static CloverField fillCloverYT(const GaugeLinkField &F)
|
||||||
|
{
|
||||||
|
CloverField T(F.Grid());
|
||||||
|
T = Zero();
|
||||||
|
|
||||||
|
autoView( T_v ,T,AcceleratorWrite);
|
||||||
|
autoView( F_v ,F,AcceleratorRead);
|
||||||
|
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||||
|
{
|
||||||
|
coalescedWrite(T_v[i]()(0, 1), coalescedRead(-(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(1, 0), coalescedRead((F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(2, 3), coalescedRead((F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(3, 2), coalescedRead(-(F_v[i]()())));
|
||||||
|
});
|
||||||
|
|
||||||
|
return T;
|
||||||
|
}
|
||||||
|
|
||||||
|
static CloverField fillCloverZT(const GaugeLinkField &F)
|
||||||
|
{
|
||||||
|
CloverField T(F.Grid());
|
||||||
|
|
||||||
|
T = Zero();
|
||||||
|
|
||||||
|
autoView( T_v , T,AcceleratorWrite);
|
||||||
|
autoView( F_v , F,AcceleratorRead);
|
||||||
|
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||||
|
{
|
||||||
|
coalescedWrite(T_v[i]()(0, 0), coalescedRead(timesI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(1, 1), coalescedRead(timesMinusI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(2, 2), coalescedRead(timesMinusI(F_v[i]()())));
|
||||||
|
coalescedWrite(T_v[i]()(3, 3), coalescedRead(timesI(F_v[i]()())));
|
||||||
|
});
|
||||||
|
|
||||||
|
return T;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class _Spinor>
|
||||||
|
static accelerator_inline void multClover(_Spinor& phi, const SiteClover& C, const _Spinor& chi) {
|
||||||
|
auto CC = coalescedRead(C);
|
||||||
|
mult(&phi, &CC, &chi);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class _SpinorField>
|
||||||
|
inline void multCloverField(_SpinorField& out, const CloverField& C, const _SpinorField& phi) {
|
||||||
|
const int Nsimd = SiteSpinor::Nsimd();
|
||||||
|
autoView(out_v, out, AcceleratorWrite);
|
||||||
|
autoView(phi_v, phi, AcceleratorRead);
|
||||||
|
autoView(C_v, C, AcceleratorRead);
|
||||||
|
typedef decltype(coalescedRead(out_v[0])) calcSpinor;
|
||||||
|
accelerator_for(sss,out.Grid()->oSites(),Nsimd,{
|
||||||
|
calcSpinor tmp;
|
||||||
|
multClover(tmp,C_v[sss],phi_v(sss));
|
||||||
|
coalescedWrite(out_v[sss],tmp);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
template<class Impl> class CompactWilsonCloverHelpers {
|
||||||
|
public:
|
||||||
|
|
||||||
|
INHERIT_COMPACT_CLOVER_SIZES(Impl);
|
||||||
|
|
||||||
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
|
INHERIT_CLOVER_TYPES(Impl);
|
||||||
|
INHERIT_COMPACT_CLOVER_TYPES(Impl);
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
static accelerator_inline typename SiteCloverTriangle::vector_type triangle_elem(const SiteCloverTriangle& triangle, int block, int i, int j) {
|
||||||
|
assert(i != j);
|
||||||
|
if(i < j) {
|
||||||
|
return triangle()(block)(triangle_index(i, j));
|
||||||
|
} else { // i > j
|
||||||
|
return conjugate(triangle()(block)(triangle_index(i, j)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
template<typename vobj>
|
||||||
|
static accelerator_inline vobj triangle_elem(const iImplCloverTriangle<vobj>& triangle, int block, int i, int j) {
|
||||||
|
assert(i != j);
|
||||||
|
if(i < j) {
|
||||||
|
return triangle()(block)(triangle_index(i, j));
|
||||||
|
} else { // i > j
|
||||||
|
return conjugate(triangle()(block)(triangle_index(i, j)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static accelerator_inline int triangle_index(int i, int j) {
|
||||||
|
if(i == j)
|
||||||
|
return 0;
|
||||||
|
else if(i < j)
|
||||||
|
return Nred * (Nred - 1) / 2 - (Nred - i) * (Nred - i - 1) / 2 + j - i - 1;
|
||||||
|
else // i > j
|
||||||
|
return Nred * (Nred - 1) / 2 - (Nred - j) * (Nred - j - 1) / 2 + i - j - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void MooeeKernel_gpu(int Nsite,
|
||||||
|
int Ls,
|
||||||
|
const FermionField& in,
|
||||||
|
FermionField& out,
|
||||||
|
const CloverDiagonalField& diagonal,
|
||||||
|
const CloverTriangleField& triangle) {
|
||||||
|
autoView(diagonal_v, diagonal, AcceleratorRead);
|
||||||
|
autoView(triangle_v, triangle, AcceleratorRead);
|
||||||
|
autoView(in_v, in, AcceleratorRead);
|
||||||
|
autoView(out_v, out, AcceleratorWrite);
|
||||||
|
|
||||||
|
typedef decltype(coalescedRead(out_v[0])) CalcSpinor;
|
||||||
|
|
||||||
|
const uint64_t NN = Nsite * Ls;
|
||||||
|
|
||||||
|
accelerator_for(ss, NN, Simd::Nsimd(), {
|
||||||
|
int sF = ss;
|
||||||
|
int sU = ss/Ls;
|
||||||
|
CalcSpinor res;
|
||||||
|
CalcSpinor in_t = in_v(sF);
|
||||||
|
auto diagonal_t = diagonal_v(sU);
|
||||||
|
auto triangle_t = triangle_v(sU);
|
||||||
|
for(int block=0; block<Nhs; block++) {
|
||||||
|
int s_start = block*Nhs;
|
||||||
|
for(int i=0; i<Nred; i++) {
|
||||||
|
int si = s_start + i/Nc, ci = i%Nc;
|
||||||
|
res()(si)(ci) = diagonal_t()(block)(i) * in_t()(si)(ci);
|
||||||
|
for(int j=0; j<Nred; j++) {
|
||||||
|
if (j == i) continue;
|
||||||
|
int sj = s_start + j/Nc, cj = j%Nc;
|
||||||
|
res()(si)(ci) = res()(si)(ci) + triangle_elem(triangle_t, block, i, j) * in_t()(sj)(cj);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
coalescedWrite(out_v[sF], res);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
static void MooeeKernel_cpu(int Nsite,
|
||||||
|
int Ls,
|
||||||
|
const FermionField& in,
|
||||||
|
FermionField& out,
|
||||||
|
const CloverDiagonalField& diagonal,
|
||||||
|
const CloverTriangleField& triangle) {
|
||||||
|
autoView(diagonal_v, diagonal, CpuRead);
|
||||||
|
autoView(triangle_v, triangle, CpuRead);
|
||||||
|
autoView(in_v, in, CpuRead);
|
||||||
|
autoView(out_v, out, CpuWrite);
|
||||||
|
|
||||||
|
typedef SiteSpinor CalcSpinor;
|
||||||
|
|
||||||
|
#if defined(A64FX) || defined(A64FXFIXEDSIZE)
|
||||||
|
#define PREFETCH_CLOVER(BASE) { \
|
||||||
|
uint64_t base; \
|
||||||
|
int pf_dist_L1 = 1; \
|
||||||
|
int pf_dist_L2 = -5; /* -> penalty -> disable */ \
|
||||||
|
\
|
||||||
|
if ((pf_dist_L1 >= 0) && (sU + pf_dist_L1 < Nsite)) { \
|
||||||
|
base = (uint64_t)&diag_t()(pf_dist_L1+BASE)(0); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 0), SV_PLDL1STRM); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 256), SV_PLDL1STRM); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 512), SV_PLDL1STRM); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 768), SV_PLDL1STRM); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 1024), SV_PLDL1STRM); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 1280), SV_PLDL1STRM); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
if ((pf_dist_L2 >= 0) && (sU + pf_dist_L2 < Nsite)) { \
|
||||||
|
base = (uint64_t)&diag_t()(pf_dist_L2+BASE)(0); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 0), SV_PLDL2STRM); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 256), SV_PLDL2STRM); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 512), SV_PLDL2STRM); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 768), SV_PLDL2STRM); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 1024), SV_PLDL2STRM); \
|
||||||
|
svprfd(svptrue_b64(), (int64_t*)(base + 1280), SV_PLDL2STRM); \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
// TODO: Implement/generalize this for other architectures
|
||||||
|
// I played around a bit on KNL (see below) but didn't bring anything
|
||||||
|
// #elif defined(AVX512)
|
||||||
|
// #define PREFETCH_CLOVER(BASE) { \
|
||||||
|
// uint64_t base; \
|
||||||
|
// int pf_dist_L1 = 1; \
|
||||||
|
// int pf_dist_L2 = +4; \
|
||||||
|
// \
|
||||||
|
// if ((pf_dist_L1 >= 0) && (sU + pf_dist_L1 < Nsite)) { \
|
||||||
|
// base = (uint64_t)&diag_t()(pf_dist_L1+BASE)(0); \
|
||||||
|
// _mm_prefetch((const char*)(base + 0), _MM_HINT_T0); \
|
||||||
|
// _mm_prefetch((const char*)(base + 64), _MM_HINT_T0); \
|
||||||
|
// _mm_prefetch((const char*)(base + 128), _MM_HINT_T0); \
|
||||||
|
// _mm_prefetch((const char*)(base + 192), _MM_HINT_T0); \
|
||||||
|
// _mm_prefetch((const char*)(base + 256), _MM_HINT_T0); \
|
||||||
|
// _mm_prefetch((const char*)(base + 320), _MM_HINT_T0); \
|
||||||
|
// } \
|
||||||
|
// \
|
||||||
|
// if ((pf_dist_L2 >= 0) && (sU + pf_dist_L2 < Nsite)) { \
|
||||||
|
// base = (uint64_t)&diag_t()(pf_dist_L2+BASE)(0); \
|
||||||
|
// _mm_prefetch((const char*)(base + 0), _MM_HINT_T1); \
|
||||||
|
// _mm_prefetch((const char*)(base + 64), _MM_HINT_T1); \
|
||||||
|
// _mm_prefetch((const char*)(base + 128), _MM_HINT_T1); \
|
||||||
|
// _mm_prefetch((const char*)(base + 192), _MM_HINT_T1); \
|
||||||
|
// _mm_prefetch((const char*)(base + 256), _MM_HINT_T1); \
|
||||||
|
// _mm_prefetch((const char*)(base + 320), _MM_HINT_T1); \
|
||||||
|
// } \
|
||||||
|
// }
|
||||||
|
#else
|
||||||
|
#define PREFETCH_CLOVER(BASE)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
const uint64_t NN = Nsite * Ls;
|
||||||
|
|
||||||
|
thread_for(ss, NN, {
|
||||||
|
int sF = ss;
|
||||||
|
int sU = ss/Ls;
|
||||||
|
CalcSpinor res;
|
||||||
|
CalcSpinor in_t = in_v[sF];
|
||||||
|
auto diag_t = diagonal_v[sU]; // "diag" instead of "diagonal" here to make code below easier to read
|
||||||
|
auto triangle_t = triangle_v[sU];
|
||||||
|
|
||||||
|
// upper half
|
||||||
|
PREFETCH_CLOVER(0);
|
||||||
|
|
||||||
|
auto in_cc_0_0 = conjugate(in_t()(0)(0)); // Nils: reduces number
|
||||||
|
auto in_cc_0_1 = conjugate(in_t()(0)(1)); // of conjugates from
|
||||||
|
auto in_cc_0_2 = conjugate(in_t()(0)(2)); // 30 to 20
|
||||||
|
auto in_cc_1_0 = conjugate(in_t()(1)(0));
|
||||||
|
auto in_cc_1_1 = conjugate(in_t()(1)(1));
|
||||||
|
|
||||||
|
res()(0)(0) = diag_t()(0)( 0) * in_t()(0)(0)
|
||||||
|
+ triangle_t()(0)( 0) * in_t()(0)(1)
|
||||||
|
+ triangle_t()(0)( 1) * in_t()(0)(2)
|
||||||
|
+ triangle_t()(0)( 2) * in_t()(1)(0)
|
||||||
|
+ triangle_t()(0)( 3) * in_t()(1)(1)
|
||||||
|
+ triangle_t()(0)( 4) * in_t()(1)(2);
|
||||||
|
|
||||||
|
res()(0)(1) = triangle_t()(0)( 0) * in_cc_0_0;
|
||||||
|
res()(0)(1) = diag_t()(0)( 1) * in_t()(0)(1)
|
||||||
|
+ triangle_t()(0)( 5) * in_t()(0)(2)
|
||||||
|
+ triangle_t()(0)( 6) * in_t()(1)(0)
|
||||||
|
+ triangle_t()(0)( 7) * in_t()(1)(1)
|
||||||
|
+ triangle_t()(0)( 8) * in_t()(1)(2)
|
||||||
|
+ conjugate( res()(0)( 1));
|
||||||
|
|
||||||
|
res()(0)(2) = triangle_t()(0)( 1) * in_cc_0_0
|
||||||
|
+ triangle_t()(0)( 5) * in_cc_0_1;
|
||||||
|
res()(0)(2) = diag_t()(0)( 2) * in_t()(0)(2)
|
||||||
|
+ triangle_t()(0)( 9) * in_t()(1)(0)
|
||||||
|
+ triangle_t()(0)(10) * in_t()(1)(1)
|
||||||
|
+ triangle_t()(0)(11) * in_t()(1)(2)
|
||||||
|
+ conjugate( res()(0)( 2));
|
||||||
|
|
||||||
|
res()(1)(0) = triangle_t()(0)( 2) * in_cc_0_0
|
||||||
|
+ triangle_t()(0)( 6) * in_cc_0_1
|
||||||
|
+ triangle_t()(0)( 9) * in_cc_0_2;
|
||||||
|
res()(1)(0) = diag_t()(0)( 3) * in_t()(1)(0)
|
||||||
|
+ triangle_t()(0)(12) * in_t()(1)(1)
|
||||||
|
+ triangle_t()(0)(13) * in_t()(1)(2)
|
||||||
|
+ conjugate( res()(1)( 0));
|
||||||
|
|
||||||
|
res()(1)(1) = triangle_t()(0)( 3) * in_cc_0_0
|
||||||
|
+ triangle_t()(0)( 7) * in_cc_0_1
|
||||||
|
+ triangle_t()(0)(10) * in_cc_0_2
|
||||||
|
+ triangle_t()(0)(12) * in_cc_1_0;
|
||||||
|
res()(1)(1) = diag_t()(0)( 4) * in_t()(1)(1)
|
||||||
|
+ triangle_t()(0)(14) * in_t()(1)(2)
|
||||||
|
+ conjugate( res()(1)( 1));
|
||||||
|
|
||||||
|
res()(1)(2) = triangle_t()(0)( 4) * in_cc_0_0
|
||||||
|
+ triangle_t()(0)( 8) * in_cc_0_1
|
||||||
|
+ triangle_t()(0)(11) * in_cc_0_2
|
||||||
|
+ triangle_t()(0)(13) * in_cc_1_0
|
||||||
|
+ triangle_t()(0)(14) * in_cc_1_1;
|
||||||
|
res()(1)(2) = diag_t()(0)( 5) * in_t()(1)(2)
|
||||||
|
+ conjugate( res()(1)( 2));
|
||||||
|
|
||||||
|
vstream(out_v[sF]()(0)(0), res()(0)(0));
|
||||||
|
vstream(out_v[sF]()(0)(1), res()(0)(1));
|
||||||
|
vstream(out_v[sF]()(0)(2), res()(0)(2));
|
||||||
|
vstream(out_v[sF]()(1)(0), res()(1)(0));
|
||||||
|
vstream(out_v[sF]()(1)(1), res()(1)(1));
|
||||||
|
vstream(out_v[sF]()(1)(2), res()(1)(2));
|
||||||
|
|
||||||
|
// lower half
|
||||||
|
PREFETCH_CLOVER(1);
|
||||||
|
|
||||||
|
auto in_cc_2_0 = conjugate(in_t()(2)(0));
|
||||||
|
auto in_cc_2_1 = conjugate(in_t()(2)(1));
|
||||||
|
auto in_cc_2_2 = conjugate(in_t()(2)(2));
|
||||||
|
auto in_cc_3_0 = conjugate(in_t()(3)(0));
|
||||||
|
auto in_cc_3_1 = conjugate(in_t()(3)(1));
|
||||||
|
|
||||||
|
res()(2)(0) = diag_t()(1)( 0) * in_t()(2)(0)
|
||||||
|
+ triangle_t()(1)( 0) * in_t()(2)(1)
|
||||||
|
+ triangle_t()(1)( 1) * in_t()(2)(2)
|
||||||
|
+ triangle_t()(1)( 2) * in_t()(3)(0)
|
||||||
|
+ triangle_t()(1)( 3) * in_t()(3)(1)
|
||||||
|
+ triangle_t()(1)( 4) * in_t()(3)(2);
|
||||||
|
|
||||||
|
res()(2)(1) = triangle_t()(1)( 0) * in_cc_2_0;
|
||||||
|
res()(2)(1) = diag_t()(1)( 1) * in_t()(2)(1)
|
||||||
|
+ triangle_t()(1)( 5) * in_t()(2)(2)
|
||||||
|
+ triangle_t()(1)( 6) * in_t()(3)(0)
|
||||||
|
+ triangle_t()(1)( 7) * in_t()(3)(1)
|
||||||
|
+ triangle_t()(1)( 8) * in_t()(3)(2)
|
||||||
|
+ conjugate( res()(2)( 1));
|
||||||
|
|
||||||
|
res()(2)(2) = triangle_t()(1)( 1) * in_cc_2_0
|
||||||
|
+ triangle_t()(1)( 5) * in_cc_2_1;
|
||||||
|
res()(2)(2) = diag_t()(1)( 2) * in_t()(2)(2)
|
||||||
|
+ triangle_t()(1)( 9) * in_t()(3)(0)
|
||||||
|
+ triangle_t()(1)(10) * in_t()(3)(1)
|
||||||
|
+ triangle_t()(1)(11) * in_t()(3)(2)
|
||||||
|
+ conjugate( res()(2)( 2));
|
||||||
|
|
||||||
|
res()(3)(0) = triangle_t()(1)( 2) * in_cc_2_0
|
||||||
|
+ triangle_t()(1)( 6) * in_cc_2_1
|
||||||
|
+ triangle_t()(1)( 9) * in_cc_2_2;
|
||||||
|
res()(3)(0) = diag_t()(1)( 3) * in_t()(3)(0)
|
||||||
|
+ triangle_t()(1)(12) * in_t()(3)(1)
|
||||||
|
+ triangle_t()(1)(13) * in_t()(3)(2)
|
||||||
|
+ conjugate( res()(3)( 0));
|
||||||
|
|
||||||
|
res()(3)(1) = triangle_t()(1)( 3) * in_cc_2_0
|
||||||
|
+ triangle_t()(1)( 7) * in_cc_2_1
|
||||||
|
+ triangle_t()(1)(10) * in_cc_2_2
|
||||||
|
+ triangle_t()(1)(12) * in_cc_3_0;
|
||||||
|
res()(3)(1) = diag_t()(1)( 4) * in_t()(3)(1)
|
||||||
|
+ triangle_t()(1)(14) * in_t()(3)(2)
|
||||||
|
+ conjugate( res()(3)( 1));
|
||||||
|
|
||||||
|
res()(3)(2) = triangle_t()(1)( 4) * in_cc_2_0
|
||||||
|
+ triangle_t()(1)( 8) * in_cc_2_1
|
||||||
|
+ triangle_t()(1)(11) * in_cc_2_2
|
||||||
|
+ triangle_t()(1)(13) * in_cc_3_0
|
||||||
|
+ triangle_t()(1)(14) * in_cc_3_1;
|
||||||
|
res()(3)(2) = diag_t()(1)( 5) * in_t()(3)(2)
|
||||||
|
+ conjugate( res()(3)( 2));
|
||||||
|
|
||||||
|
vstream(out_v[sF]()(2)(0), res()(2)(0));
|
||||||
|
vstream(out_v[sF]()(2)(1), res()(2)(1));
|
||||||
|
vstream(out_v[sF]()(2)(2), res()(2)(2));
|
||||||
|
vstream(out_v[sF]()(3)(0), res()(3)(0));
|
||||||
|
vstream(out_v[sF]()(3)(1), res()(3)(1));
|
||||||
|
vstream(out_v[sF]()(3)(2), res()(3)(2));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
static void MooeeKernel(int Nsite,
|
||||||
|
int Ls,
|
||||||
|
const FermionField& in,
|
||||||
|
FermionField& out,
|
||||||
|
const CloverDiagonalField& diagonal,
|
||||||
|
const CloverTriangleField& triangle) {
|
||||||
|
#if defined(GRID_CUDA) || defined(GRID_HIP)
|
||||||
|
MooeeKernel_gpu(Nsite, Ls, in, out, diagonal, triangle);
|
||||||
|
#else
|
||||||
|
MooeeKernel_cpu(Nsite, Ls, in, out, diagonal, triangle);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static void Invert(const CloverDiagonalField& diagonal,
|
||||||
|
const CloverTriangleField& triangle,
|
||||||
|
CloverDiagonalField& diagonalInv,
|
||||||
|
CloverTriangleField& triangleInv) {
|
||||||
|
conformable(diagonal, diagonalInv);
|
||||||
|
conformable(triangle, triangleInv);
|
||||||
|
conformable(diagonal, triangle);
|
||||||
|
|
||||||
|
diagonalInv.Checkerboard() = diagonal.Checkerboard();
|
||||||
|
triangleInv.Checkerboard() = triangle.Checkerboard();
|
||||||
|
|
||||||
|
GridBase* grid = diagonal.Grid();
|
||||||
|
|
||||||
|
long lsites = grid->lSites();
|
||||||
|
|
||||||
|
typedef typename SiteCloverDiagonal::scalar_object scalar_object_diagonal;
|
||||||
|
typedef typename SiteCloverTriangle::scalar_object scalar_object_triangle;
|
||||||
|
|
||||||
|
autoView(diagonal_v, diagonal, CpuRead);
|
||||||
|
autoView(triangle_v, triangle, CpuRead);
|
||||||
|
autoView(diagonalInv_v, diagonalInv, CpuWrite);
|
||||||
|
autoView(triangleInv_v, triangleInv, CpuWrite);
|
||||||
|
|
||||||
|
thread_for(site, lsites, { // NOTE: Not on GPU because of Eigen & (peek/poke)LocalSite
|
||||||
|
Eigen::MatrixXcd clover_inv_eigen = Eigen::MatrixXcd::Zero(Ns*Nc, Ns*Nc);
|
||||||
|
Eigen::MatrixXcd clover_eigen = Eigen::MatrixXcd::Zero(Ns*Nc, Ns*Nc);
|
||||||
|
|
||||||
|
scalar_object_diagonal diagonal_tmp = Zero();
|
||||||
|
scalar_object_diagonal diagonal_inv_tmp = Zero();
|
||||||
|
scalar_object_triangle triangle_tmp = Zero();
|
||||||
|
scalar_object_triangle triangle_inv_tmp = Zero();
|
||||||
|
|
||||||
|
Coordinate lcoor;
|
||||||
|
grid->LocalIndexToLocalCoor(site, lcoor);
|
||||||
|
|
||||||
|
peekLocalSite(diagonal_tmp, diagonal_v, lcoor);
|
||||||
|
peekLocalSite(triangle_tmp, triangle_v, lcoor);
|
||||||
|
|
||||||
|
// TODO: can we save time here by inverting the two 6x6 hermitian matrices separately?
|
||||||
|
for (long s_row=0;s_row<Ns;s_row++) {
|
||||||
|
for (long s_col=0;s_col<Ns;s_col++) {
|
||||||
|
if(abs(s_row - s_col) > 1 || s_row + s_col == 3) continue;
|
||||||
|
int block = s_row / Nhs;
|
||||||
|
int s_row_block = s_row % Nhs;
|
||||||
|
int s_col_block = s_col % Nhs;
|
||||||
|
for (long c_row=0;c_row<Nc;c_row++) {
|
||||||
|
for (long c_col=0;c_col<Nc;c_col++) {
|
||||||
|
int i = s_row_block * Nc + c_row;
|
||||||
|
int j = s_col_block * Nc + c_col;
|
||||||
|
if(i == j)
|
||||||
|
clover_eigen(s_row*Nc+c_row, s_col*Nc+c_col) = static_cast<ComplexD>(TensorRemove(diagonal_tmp()(block)(i)));
|
||||||
|
else
|
||||||
|
clover_eigen(s_row*Nc+c_row, s_col*Nc+c_col) = static_cast<ComplexD>(TensorRemove(triangle_elem(triangle_tmp, block, i, j)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
clover_inv_eigen = clover_eigen.inverse();
|
||||||
|
|
||||||
|
for (long s_row=0;s_row<Ns;s_row++) {
|
||||||
|
for (long s_col=0;s_col<Ns;s_col++) {
|
||||||
|
if(abs(s_row - s_col) > 1 || s_row + s_col == 3) continue;
|
||||||
|
int block = s_row / Nhs;
|
||||||
|
int s_row_block = s_row % Nhs;
|
||||||
|
int s_col_block = s_col % Nhs;
|
||||||
|
for (long c_row=0;c_row<Nc;c_row++) {
|
||||||
|
for (long c_col=0;c_col<Nc;c_col++) {
|
||||||
|
int i = s_row_block * Nc + c_row;
|
||||||
|
int j = s_col_block * Nc + c_col;
|
||||||
|
if(i == j)
|
||||||
|
diagonal_inv_tmp()(block)(i) = clover_inv_eigen(s_row*Nc+c_row, s_col*Nc+c_col);
|
||||||
|
else if(i < j)
|
||||||
|
triangle_inv_tmp()(block)(triangle_index(i, j)) = clover_inv_eigen(s_row*Nc+c_row, s_col*Nc+c_col);
|
||||||
|
else
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pokeLocalSite(diagonal_inv_tmp, diagonalInv_v, lcoor);
|
||||||
|
pokeLocalSite(triangle_inv_tmp, triangleInv_v, lcoor);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ConvertLayout(const CloverField& full,
|
||||||
|
CloverDiagonalField& diagonal,
|
||||||
|
CloverTriangleField& triangle) {
|
||||||
|
conformable(full, diagonal);
|
||||||
|
conformable(full, triangle);
|
||||||
|
|
||||||
|
diagonal.Checkerboard() = full.Checkerboard();
|
||||||
|
triangle.Checkerboard() = full.Checkerboard();
|
||||||
|
|
||||||
|
autoView(full_v, full, AcceleratorRead);
|
||||||
|
autoView(diagonal_v, diagonal, AcceleratorWrite);
|
||||||
|
autoView(triangle_v, triangle, AcceleratorWrite);
|
||||||
|
|
||||||
|
// NOTE: this function cannot be 'private' since nvcc forbids this for kernels
|
||||||
|
accelerator_for(ss, full.Grid()->oSites(), 1, {
|
||||||
|
for(int s_row = 0; s_row < Ns; s_row++) {
|
||||||
|
for(int s_col = 0; s_col < Ns; s_col++) {
|
||||||
|
if(abs(s_row - s_col) > 1 || s_row + s_col == 3) continue;
|
||||||
|
int block = s_row / Nhs;
|
||||||
|
int s_row_block = s_row % Nhs;
|
||||||
|
int s_col_block = s_col % Nhs;
|
||||||
|
for(int c_row = 0; c_row < Nc; c_row++) {
|
||||||
|
for(int c_col = 0; c_col < Nc; c_col++) {
|
||||||
|
int i = s_row_block * Nc + c_row;
|
||||||
|
int j = s_col_block * Nc + c_col;
|
||||||
|
if(i == j)
|
||||||
|
diagonal_v[ss]()(block)(i) = full_v[ss]()(s_row, s_col)(c_row, c_col);
|
||||||
|
else if(i < j)
|
||||||
|
triangle_v[ss]()(block)(triangle_index(i, j)) = full_v[ss]()(s_row, s_col)(c_row, c_col);
|
||||||
|
else
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void ConvertLayout(const CloverDiagonalField& diagonal,
|
||||||
|
const CloverTriangleField& triangle,
|
||||||
|
CloverField& full) {
|
||||||
|
conformable(full, diagonal);
|
||||||
|
conformable(full, triangle);
|
||||||
|
|
||||||
|
full.Checkerboard() = diagonal.Checkerboard();
|
||||||
|
|
||||||
|
full = Zero();
|
||||||
|
|
||||||
|
autoView(diagonal_v, diagonal, AcceleratorRead);
|
||||||
|
autoView(triangle_v, triangle, AcceleratorRead);
|
||||||
|
autoView(full_v, full, AcceleratorWrite);
|
||||||
|
|
||||||
|
// NOTE: this function cannot be 'private' since nvcc forbids this for kernels
|
||||||
|
accelerator_for(ss, full.Grid()->oSites(), 1, {
|
||||||
|
for(int s_row = 0; s_row < Ns; s_row++) {
|
||||||
|
for(int s_col = 0; s_col < Ns; s_col++) {
|
||||||
|
if(abs(s_row - s_col) > 1 || s_row + s_col == 3) continue;
|
||||||
|
int block = s_row / Nhs;
|
||||||
|
int s_row_block = s_row % Nhs;
|
||||||
|
int s_col_block = s_col % Nhs;
|
||||||
|
for(int c_row = 0; c_row < Nc; c_row++) {
|
||||||
|
for(int c_col = 0; c_col < Nc; c_col++) {
|
||||||
|
int i = s_row_block * Nc + c_row;
|
||||||
|
int j = s_col_block * Nc + c_col;
|
||||||
|
if(i == j)
|
||||||
|
full_v[ss]()(s_row, s_col)(c_row, c_col) = diagonal_v[ss]()(block)(i);
|
||||||
|
else
|
||||||
|
full_v[ss]()(s_row, s_col)(c_row, c_col) = triangle_elem(triangle_v[ss], block, i, j);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ModifyBoundaries(CloverDiagonalField& diagonal, CloverTriangleField& triangle, RealD csw_t, RealD cF, RealD diag_mass) {
|
||||||
|
// Checks/grid
|
||||||
|
double t0 = usecond();
|
||||||
|
conformable(diagonal, triangle);
|
||||||
|
GridBase* grid = diagonal.Grid();
|
||||||
|
|
||||||
|
// Determine the boundary coordinates/sites
|
||||||
|
double t1 = usecond();
|
||||||
|
int t_dir = Nd - 1;
|
||||||
|
Lattice<iScalar<vInteger>> t_coor(grid);
|
||||||
|
LatticeCoordinate(t_coor, t_dir);
|
||||||
|
int T = grid->GlobalDimensions()[t_dir];
|
||||||
|
|
||||||
|
// Set off-diagonal parts at boundary to zero -- OK
|
||||||
|
double t2 = usecond();
|
||||||
|
CloverTriangleField zeroTriangle(grid);
|
||||||
|
zeroTriangle.Checkerboard() = triangle.Checkerboard();
|
||||||
|
zeroTriangle = Zero();
|
||||||
|
triangle = where(t_coor == 0, zeroTriangle, triangle);
|
||||||
|
triangle = where(t_coor == T-1, zeroTriangle, triangle);
|
||||||
|
|
||||||
|
// Set diagonal to unity (scaled correctly) -- OK
|
||||||
|
double t3 = usecond();
|
||||||
|
CloverDiagonalField tmp(grid);
|
||||||
|
tmp.Checkerboard() = diagonal.Checkerboard();
|
||||||
|
tmp = -1.0 * csw_t + diag_mass;
|
||||||
|
diagonal = where(t_coor == 0, tmp, diagonal);
|
||||||
|
diagonal = where(t_coor == T-1, tmp, diagonal);
|
||||||
|
|
||||||
|
// Correct values next to boundary
|
||||||
|
double t4 = usecond();
|
||||||
|
if(cF != 1.0) {
|
||||||
|
tmp = cF - 1.0;
|
||||||
|
tmp += diagonal;
|
||||||
|
diagonal = where(t_coor == 1, tmp, diagonal);
|
||||||
|
diagonal = where(t_coor == T-2, tmp, diagonal);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Report timings
|
||||||
|
double t5 = usecond();
|
||||||
|
#if 0
|
||||||
|
std::cout << GridLogMessage << "CompactWilsonCloverHelpers::ModifyBoundaries timings:"
|
||||||
|
<< " checks = " << (t1 - t0) / 1e6
|
||||||
|
<< ", coordinate = " << (t2 - t1) / 1e6
|
||||||
|
<< ", off-diag zero = " << (t3 - t2) / 1e6
|
||||||
|
<< ", diagonal unity = " << (t4 - t3) / 1e6
|
||||||
|
<< ", near-boundary = " << (t5 - t4) / 1e6
|
||||||
|
<< ", total = " << (t5 - t0) / 1e6
|
||||||
|
<< std::endl;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Field, class Mask>
|
||||||
|
static strong_inline void ApplyBoundaryMask(Field& f, const Mask& m) {
|
||||||
|
conformable(f, m);
|
||||||
|
auto grid = f.Grid();
|
||||||
|
const int Nsite = grid->oSites();
|
||||||
|
const int Nsimd = grid->Nsimd();
|
||||||
|
autoView(f_v, f, AcceleratorWrite);
|
||||||
|
autoView(m_v, m, AcceleratorRead);
|
||||||
|
// NOTE: this function cannot be 'private' since nvcc forbids this for kernels
|
||||||
|
accelerator_for(ss, Nsite, Nsimd, {
|
||||||
|
coalescedWrite(f_v[ss], m_v(ss) * f_v(ss));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class MaskField>
|
||||||
|
static void SetupMasks(MaskField& full, MaskField& even, MaskField& odd) {
|
||||||
|
assert(even.Grid()->_isCheckerBoarded && even.Checkerboard() == Even);
|
||||||
|
assert(odd.Grid()->_isCheckerBoarded && odd.Checkerboard() == Odd);
|
||||||
|
assert(!full.Grid()->_isCheckerBoarded);
|
||||||
|
|
||||||
|
GridBase* grid = full.Grid();
|
||||||
|
int t_dir = Nd-1;
|
||||||
|
Lattice<iScalar<vInteger>> t_coor(grid);
|
||||||
|
LatticeCoordinate(t_coor, t_dir);
|
||||||
|
int T = grid->GlobalDimensions()[t_dir];
|
||||||
|
|
||||||
|
MaskField zeroMask(grid); zeroMask = Zero();
|
||||||
|
full = 1.0;
|
||||||
|
full = where(t_coor == 0, zeroMask, full);
|
||||||
|
full = where(t_coor == T-1, zeroMask, full);
|
||||||
|
|
||||||
|
pickCheckerboard(Even, even, full);
|
||||||
|
pickCheckerboard(Odd, odd, full);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
92
Grid/qcd/action/fermion/WilsonCloverTypes.h
Normal file
92
Grid/qcd/action/fermion/WilsonCloverTypes.h
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonCloverTypes.h
|
||||||
|
|
||||||
|
Copyright (C) 2021 - 2022
|
||||||
|
|
||||||
|
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
class WilsonCloverTypes {
|
||||||
|
public:
|
||||||
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
|
|
||||||
|
template <typename vtype> using iImplClover = iScalar<iMatrix<iMatrix<vtype, Impl::Dimension>, Ns>>;
|
||||||
|
|
||||||
|
typedef iImplClover<Simd> SiteClover;
|
||||||
|
|
||||||
|
typedef Lattice<SiteClover> CloverField;
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
class CompactWilsonCloverTypes {
|
||||||
|
public:
|
||||||
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
|
|
||||||
|
static_assert(Nd == 4 && Nc == 3 && Ns == 4 && Impl::Dimension == 3, "Wrong dimensions");
|
||||||
|
|
||||||
|
static constexpr int Nred = Nc * Nhs; // 6
|
||||||
|
static constexpr int Nblock = Nhs; // 2
|
||||||
|
static constexpr int Ndiagonal = Nred; // 6
|
||||||
|
static constexpr int Ntriangle = (Nred - 1) * Nc; // 15
|
||||||
|
|
||||||
|
template<typename vtype> using iImplCloverDiagonal = iScalar<iVector<iVector<vtype, Ndiagonal>, Nblock>>;
|
||||||
|
template<typename vtype> using iImplCloverTriangle = iScalar<iVector<iVector<vtype, Ntriangle>, Nblock>>;
|
||||||
|
|
||||||
|
typedef iImplCloverDiagonal<Simd> SiteCloverDiagonal;
|
||||||
|
typedef iImplCloverTriangle<Simd> SiteCloverTriangle;
|
||||||
|
typedef iSinglet<Simd> SiteMask;
|
||||||
|
|
||||||
|
typedef Lattice<SiteCloverDiagonal> CloverDiagonalField;
|
||||||
|
typedef Lattice<SiteCloverTriangle> CloverTriangleField;
|
||||||
|
typedef Lattice<SiteMask> MaskField;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define INHERIT_CLOVER_TYPES(Impl) \
|
||||||
|
typedef typename WilsonCloverTypes<Impl>::SiteClover SiteClover; \
|
||||||
|
typedef typename WilsonCloverTypes<Impl>::CloverField CloverField;
|
||||||
|
|
||||||
|
#define INHERIT_COMPACT_CLOVER_TYPES(Impl) \
|
||||||
|
typedef typename CompactWilsonCloverTypes<Impl>::SiteCloverDiagonal SiteCloverDiagonal; \
|
||||||
|
typedef typename CompactWilsonCloverTypes<Impl>::SiteCloverTriangle SiteCloverTriangle; \
|
||||||
|
typedef typename CompactWilsonCloverTypes<Impl>::SiteMask SiteMask; \
|
||||||
|
typedef typename CompactWilsonCloverTypes<Impl>::CloverDiagonalField CloverDiagonalField; \
|
||||||
|
typedef typename CompactWilsonCloverTypes<Impl>::CloverTriangleField CloverTriangleField; \
|
||||||
|
typedef typename CompactWilsonCloverTypes<Impl>::MaskField MaskField; \
|
||||||
|
/* ugly duplication but needed inside functionality classes */ \
|
||||||
|
template<typename vtype> using iImplCloverDiagonal = \
|
||||||
|
iScalar<iVector<iVector<vtype, CompactWilsonCloverTypes<Impl>::Ndiagonal>, CompactWilsonCloverTypes<Impl>::Nblock>>; \
|
||||||
|
template<typename vtype> using iImplCloverTriangle = \
|
||||||
|
iScalar<iVector<iVector<vtype, CompactWilsonCloverTypes<Impl>::Ntriangle>, CompactWilsonCloverTypes<Impl>::Nblock>>;
|
||||||
|
|
||||||
|
#define INHERIT_COMPACT_CLOVER_SIZES(Impl) \
|
||||||
|
static constexpr int Nred = CompactWilsonCloverTypes<Impl>::Nred; \
|
||||||
|
static constexpr int Nblock = CompactWilsonCloverTypes<Impl>::Nblock; \
|
||||||
|
static constexpr int Ndiagonal = CompactWilsonCloverTypes<Impl>::Ndiagonal; \
|
||||||
|
static constexpr int Ntriangle = CompactWilsonCloverTypes<Impl>::Ntriangle;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -173,7 +173,12 @@ public:
|
|||||||
GridCartesian &FourDimGrid,
|
GridCartesian &FourDimGrid,
|
||||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||||
double _M5,const ImplParams &p= ImplParams());
|
double _M5,const ImplParams &p= ImplParams());
|
||||||
|
|
||||||
|
void DirichletBlock(std::vector<int> & block){
|
||||||
|
Stencil.DirichletBlock(block);
|
||||||
|
StencilEven.DirichletBlock(block);
|
||||||
|
StencilOdd.DirichletBlock(block);
|
||||||
|
}
|
||||||
// Constructors
|
// Constructors
|
||||||
/*
|
/*
|
||||||
WilsonFermion5D(int simd,
|
WilsonFermion5D(int simd,
|
||||||
|
@ -0,0 +1,363 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/CompactWilsonCloverFermionImplementation.h
|
||||||
|
|
||||||
|
Copyright (C) 2017 - 2022
|
||||||
|
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||||
|
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
#include <Grid/qcd/spin/Dirac.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CompactWilsonCloverFermion.h>
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
template<class Impl>
|
||||||
|
CompactWilsonCloverFermion<Impl>::CompactWilsonCloverFermion(GaugeField& _Umu,
|
||||||
|
GridCartesian& Fgrid,
|
||||||
|
GridRedBlackCartesian& Hgrid,
|
||||||
|
const RealD _mass,
|
||||||
|
const RealD _csw_r,
|
||||||
|
const RealD _csw_t,
|
||||||
|
const RealD _cF,
|
||||||
|
const WilsonAnisotropyCoefficients& clover_anisotropy,
|
||||||
|
const ImplParams& impl_p)
|
||||||
|
: WilsonBase(_Umu, Fgrid, Hgrid, _mass, impl_p, clover_anisotropy)
|
||||||
|
, csw_r(_csw_r)
|
||||||
|
, csw_t(_csw_t)
|
||||||
|
, cF(_cF)
|
||||||
|
, open_boundaries(impl_p.boundary_phases[Nd-1] == 0.0)
|
||||||
|
, Diagonal(&Fgrid), Triangle(&Fgrid)
|
||||||
|
, DiagonalEven(&Hgrid), TriangleEven(&Hgrid)
|
||||||
|
, DiagonalOdd(&Hgrid), TriangleOdd(&Hgrid)
|
||||||
|
, DiagonalInv(&Fgrid), TriangleInv(&Fgrid)
|
||||||
|
, DiagonalInvEven(&Hgrid), TriangleInvEven(&Hgrid)
|
||||||
|
, DiagonalInvOdd(&Hgrid), TriangleInvOdd(&Hgrid)
|
||||||
|
, Tmp(&Fgrid)
|
||||||
|
, BoundaryMask(&Fgrid)
|
||||||
|
, BoundaryMaskEven(&Hgrid), BoundaryMaskOdd(&Hgrid)
|
||||||
|
{
|
||||||
|
csw_r *= 0.5;
|
||||||
|
csw_t *= 0.5;
|
||||||
|
if (clover_anisotropy.isAnisotropic)
|
||||||
|
csw_r /= clover_anisotropy.xi_0;
|
||||||
|
|
||||||
|
ImportGauge(_Umu);
|
||||||
|
if (open_boundaries)
|
||||||
|
CompactHelpers::SetupMasks(this->BoundaryMask, this->BoundaryMaskEven, this->BoundaryMaskOdd);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::Dhop(const FermionField& in, FermionField& out, int dag) {
|
||||||
|
WilsonBase::Dhop(in, out, dag);
|
||||||
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::DhopOE(const FermionField& in, FermionField& out, int dag) {
|
||||||
|
WilsonBase::DhopOE(in, out, dag);
|
||||||
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::DhopEO(const FermionField& in, FermionField& out, int dag) {
|
||||||
|
WilsonBase::DhopEO(in, out, dag);
|
||||||
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::DhopDir(const FermionField& in, FermionField& out, int dir, int disp) {
|
||||||
|
WilsonBase::DhopDir(in, out, dir, disp);
|
||||||
|
if(this->open_boundaries) ApplyBoundaryMask(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::DhopDirAll(const FermionField& in, std::vector<FermionField>& out) {
|
||||||
|
WilsonBase::DhopDirAll(in, out);
|
||||||
|
if(this->open_boundaries) {
|
||||||
|
for(auto& o : out) ApplyBoundaryMask(o);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::M(const FermionField& in, FermionField& out) {
|
||||||
|
out.Checkerboard() = in.Checkerboard();
|
||||||
|
WilsonBase::Dhop(in, out, DaggerNo); // call base to save applying bc
|
||||||
|
Mooee(in, Tmp);
|
||||||
|
axpy(out, 1.0, out, Tmp);
|
||||||
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::Mdag(const FermionField& in, FermionField& out) {
|
||||||
|
out.Checkerboard() = in.Checkerboard();
|
||||||
|
WilsonBase::Dhop(in, out, DaggerYes); // call base to save applying bc
|
||||||
|
MooeeDag(in, Tmp);
|
||||||
|
axpy(out, 1.0, out, Tmp);
|
||||||
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::Meooe(const FermionField& in, FermionField& out) {
|
||||||
|
WilsonBase::Meooe(in, out);
|
||||||
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::MeooeDag(const FermionField& in, FermionField& out) {
|
||||||
|
WilsonBase::MeooeDag(in, out);
|
||||||
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::Mooee(const FermionField& in, FermionField& out) {
|
||||||
|
if(in.Grid()->_isCheckerBoarded) {
|
||||||
|
if(in.Checkerboard() == Odd) {
|
||||||
|
MooeeInternal(in, out, DiagonalOdd, TriangleOdd);
|
||||||
|
} else {
|
||||||
|
MooeeInternal(in, out, DiagonalEven, TriangleEven);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
MooeeInternal(in, out, Diagonal, Triangle);
|
||||||
|
}
|
||||||
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::MooeeDag(const FermionField& in, FermionField& out) {
|
||||||
|
Mooee(in, out); // blocks are hermitian
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::MooeeInv(const FermionField& in, FermionField& out) {
|
||||||
|
if(in.Grid()->_isCheckerBoarded) {
|
||||||
|
if(in.Checkerboard() == Odd) {
|
||||||
|
MooeeInternal(in, out, DiagonalInvOdd, TriangleInvOdd);
|
||||||
|
} else {
|
||||||
|
MooeeInternal(in, out, DiagonalInvEven, TriangleInvEven);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
MooeeInternal(in, out, DiagonalInv, TriangleInv);
|
||||||
|
}
|
||||||
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::MooeeInvDag(const FermionField& in, FermionField& out) {
|
||||||
|
MooeeInv(in, out); // blocks are hermitian
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::Mdir(const FermionField& in, FermionField& out, int dir, int disp) {
|
||||||
|
DhopDir(in, out, dir, disp);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::MdirAll(const FermionField& in, std::vector<FermionField>& out) {
|
||||||
|
DhopDirAll(in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::MDeriv(GaugeField& force, const FermionField& X, const FermionField& Y, int dag) {
|
||||||
|
assert(!open_boundaries); // TODO check for changes required for open bc
|
||||||
|
|
||||||
|
// NOTE: code copied from original clover term
|
||||||
|
conformable(X.Grid(), Y.Grid());
|
||||||
|
conformable(X.Grid(), force.Grid());
|
||||||
|
GaugeLinkField force_mu(force.Grid()), lambda(force.Grid());
|
||||||
|
GaugeField clover_force(force.Grid());
|
||||||
|
PropagatorField Lambda(force.Grid());
|
||||||
|
|
||||||
|
// Guido: Here we are hitting some performance issues:
|
||||||
|
// need to extract the components of the DoubledGaugeField
|
||||||
|
// for each call
|
||||||
|
// Possible solution
|
||||||
|
// Create a vector object to store them? (cons: wasting space)
|
||||||
|
std::vector<GaugeLinkField> U(Nd, this->Umu.Grid());
|
||||||
|
|
||||||
|
Impl::extractLinkField(U, this->Umu);
|
||||||
|
|
||||||
|
force = Zero();
|
||||||
|
// Derivative of the Wilson hopping term
|
||||||
|
this->DhopDeriv(force, X, Y, dag);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////
|
||||||
|
// Clover term derivative
|
||||||
|
///////////////////////////////////////////////////////////
|
||||||
|
Impl::outerProductImpl(Lambda, X, Y);
|
||||||
|
//std::cout << "Lambda:" << Lambda << std::endl;
|
||||||
|
|
||||||
|
Gamma::Algebra sigma[] = {
|
||||||
|
Gamma::Algebra::SigmaXY,
|
||||||
|
Gamma::Algebra::SigmaXZ,
|
||||||
|
Gamma::Algebra::SigmaXT,
|
||||||
|
Gamma::Algebra::MinusSigmaXY,
|
||||||
|
Gamma::Algebra::SigmaYZ,
|
||||||
|
Gamma::Algebra::SigmaYT,
|
||||||
|
Gamma::Algebra::MinusSigmaXZ,
|
||||||
|
Gamma::Algebra::MinusSigmaYZ,
|
||||||
|
Gamma::Algebra::SigmaZT,
|
||||||
|
Gamma::Algebra::MinusSigmaXT,
|
||||||
|
Gamma::Algebra::MinusSigmaYT,
|
||||||
|
Gamma::Algebra::MinusSigmaZT};
|
||||||
|
|
||||||
|
/*
|
||||||
|
sigma_{\mu \nu}=
|
||||||
|
| 0 sigma[0] sigma[1] sigma[2] |
|
||||||
|
| sigma[3] 0 sigma[4] sigma[5] |
|
||||||
|
| sigma[6] sigma[7] 0 sigma[8] |
|
||||||
|
| sigma[9] sigma[10] sigma[11] 0 |
|
||||||
|
*/
|
||||||
|
|
||||||
|
int count = 0;
|
||||||
|
clover_force = Zero();
|
||||||
|
for (int mu = 0; mu < 4; mu++)
|
||||||
|
{
|
||||||
|
force_mu = Zero();
|
||||||
|
for (int nu = 0; nu < 4; nu++)
|
||||||
|
{
|
||||||
|
if (mu == nu)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
RealD factor;
|
||||||
|
if (nu == 4 || mu == 4)
|
||||||
|
{
|
||||||
|
factor = 2.0 * csw_t;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
factor = 2.0 * csw_r;
|
||||||
|
}
|
||||||
|
PropagatorField Slambda = Gamma(sigma[count]) * Lambda; // sigma checked
|
||||||
|
Impl::TraceSpinImpl(lambda, Slambda); // traceSpin ok
|
||||||
|
force_mu -= factor*Helpers::Cmunu(U, lambda, mu, nu); // checked
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
pokeLorentz(clover_force, U[mu] * force_mu, mu);
|
||||||
|
}
|
||||||
|
//clover_force *= csw;
|
||||||
|
force += clover_force;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::MooDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) {
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::MeeDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) {
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::MooeeInternal(const FermionField& in,
|
||||||
|
FermionField& out,
|
||||||
|
const CloverDiagonalField& diagonal,
|
||||||
|
const CloverTriangleField& triangle) {
|
||||||
|
assert(in.Checkerboard() == Odd || in.Checkerboard() == Even);
|
||||||
|
out.Checkerboard() = in.Checkerboard();
|
||||||
|
conformable(in, out);
|
||||||
|
conformable(in, diagonal);
|
||||||
|
conformable(in, triangle);
|
||||||
|
|
||||||
|
CompactHelpers::MooeeKernel(diagonal.oSites(), 1, in, out, diagonal, triangle);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void CompactWilsonCloverFermion<Impl>::ImportGauge(const GaugeField& _Umu) {
|
||||||
|
// NOTE: parts copied from original implementation
|
||||||
|
|
||||||
|
// Import gauge into base class
|
||||||
|
double t0 = usecond();
|
||||||
|
WilsonBase::ImportGauge(_Umu); // NOTE: called here and in wilson constructor -> performed twice, but can't avoid that
|
||||||
|
|
||||||
|
// Initialize temporary variables
|
||||||
|
double t1 = usecond();
|
||||||
|
conformable(_Umu.Grid(), this->GaugeGrid());
|
||||||
|
GridBase* grid = _Umu.Grid();
|
||||||
|
typename Impl::GaugeLinkField Bx(grid), By(grid), Bz(grid), Ex(grid), Ey(grid), Ez(grid);
|
||||||
|
CloverField TmpOriginal(grid);
|
||||||
|
|
||||||
|
// Compute the field strength terms mu>nu
|
||||||
|
double t2 = usecond();
|
||||||
|
WilsonLoops<Impl>::FieldStrength(Bx, _Umu, Zdir, Ydir);
|
||||||
|
WilsonLoops<Impl>::FieldStrength(By, _Umu, Zdir, Xdir);
|
||||||
|
WilsonLoops<Impl>::FieldStrength(Bz, _Umu, Ydir, Xdir);
|
||||||
|
WilsonLoops<Impl>::FieldStrength(Ex, _Umu, Tdir, Xdir);
|
||||||
|
WilsonLoops<Impl>::FieldStrength(Ey, _Umu, Tdir, Ydir);
|
||||||
|
WilsonLoops<Impl>::FieldStrength(Ez, _Umu, Tdir, Zdir);
|
||||||
|
|
||||||
|
// Compute the Clover Operator acting on Colour and Spin
|
||||||
|
// multiply here by the clover coefficients for the anisotropy
|
||||||
|
double t3 = usecond();
|
||||||
|
TmpOriginal = Helpers::fillCloverYZ(Bx) * csw_r;
|
||||||
|
TmpOriginal += Helpers::fillCloverXZ(By) * csw_r;
|
||||||
|
TmpOriginal += Helpers::fillCloverXY(Bz) * csw_r;
|
||||||
|
TmpOriginal += Helpers::fillCloverXT(Ex) * csw_t;
|
||||||
|
TmpOriginal += Helpers::fillCloverYT(Ey) * csw_t;
|
||||||
|
TmpOriginal += Helpers::fillCloverZT(Ez) * csw_t;
|
||||||
|
TmpOriginal += this->diag_mass;
|
||||||
|
|
||||||
|
// Convert the data layout of the clover term
|
||||||
|
double t4 = usecond();
|
||||||
|
CompactHelpers::ConvertLayout(TmpOriginal, Diagonal, Triangle);
|
||||||
|
|
||||||
|
// Possible modify the boundary values
|
||||||
|
double t5 = usecond();
|
||||||
|
if(open_boundaries) CompactHelpers::ModifyBoundaries(Diagonal, Triangle, csw_t, cF, this->diag_mass);
|
||||||
|
|
||||||
|
// Invert the clover term in the improved layout
|
||||||
|
double t6 = usecond();
|
||||||
|
CompactHelpers::Invert(Diagonal, Triangle, DiagonalInv, TriangleInv);
|
||||||
|
|
||||||
|
// Fill the remaining clover fields
|
||||||
|
double t7 = usecond();
|
||||||
|
pickCheckerboard(Even, DiagonalEven, Diagonal);
|
||||||
|
pickCheckerboard(Even, TriangleEven, Triangle);
|
||||||
|
pickCheckerboard(Odd, DiagonalOdd, Diagonal);
|
||||||
|
pickCheckerboard(Odd, TriangleOdd, Triangle);
|
||||||
|
pickCheckerboard(Even, DiagonalInvEven, DiagonalInv);
|
||||||
|
pickCheckerboard(Even, TriangleInvEven, TriangleInv);
|
||||||
|
pickCheckerboard(Odd, DiagonalInvOdd, DiagonalInv);
|
||||||
|
pickCheckerboard(Odd, TriangleInvOdd, TriangleInv);
|
||||||
|
|
||||||
|
// Report timings
|
||||||
|
double t8 = usecond();
|
||||||
|
#if 0
|
||||||
|
std::cout << GridLogMessage << "CompactWilsonCloverFermion::ImportGauge timings:"
|
||||||
|
<< " WilsonFermion::Importgauge = " << (t1 - t0) / 1e6
|
||||||
|
<< ", allocations = " << (t2 - t1) / 1e6
|
||||||
|
<< ", field strength = " << (t3 - t2) / 1e6
|
||||||
|
<< ", fill clover = " << (t4 - t3) / 1e6
|
||||||
|
<< ", convert = " << (t5 - t4) / 1e6
|
||||||
|
<< ", boundaries = " << (t6 - t5) / 1e6
|
||||||
|
<< ", inversions = " << (t7 - t6) / 1e6
|
||||||
|
<< ", pick cbs = " << (t8 - t7) / 1e6
|
||||||
|
<< ", total = " << (t8 - t0) / 1e6
|
||||||
|
<< std::endl;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -2,12 +2,13 @@
|
|||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
Source file: ./lib/qcd/action/fermion/WilsonCloverFermion.cc
|
Source file: ./lib/qcd/action/fermion/WilsonCloverFermionImplementation.h
|
||||||
|
|
||||||
Copyright (C) 2017
|
Copyright (C) 2017 - 2022
|
||||||
|
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||||
|
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -33,6 +34,45 @@
|
|||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
WilsonCloverFermion<Impl>::WilsonCloverFermion(GaugeField& _Umu,
|
||||||
|
GridCartesian& Fgrid,
|
||||||
|
GridRedBlackCartesian& Hgrid,
|
||||||
|
const RealD _mass,
|
||||||
|
const RealD _csw_r,
|
||||||
|
const RealD _csw_t,
|
||||||
|
const WilsonAnisotropyCoefficients& clover_anisotropy,
|
||||||
|
const ImplParams& impl_p)
|
||||||
|
: WilsonFermion<Impl>(_Umu, Fgrid, Hgrid, _mass, impl_p, clover_anisotropy)
|
||||||
|
, CloverTerm(&Fgrid)
|
||||||
|
, CloverTermInv(&Fgrid)
|
||||||
|
, CloverTermEven(&Hgrid)
|
||||||
|
, CloverTermOdd(&Hgrid)
|
||||||
|
, CloverTermInvEven(&Hgrid)
|
||||||
|
, CloverTermInvOdd(&Hgrid)
|
||||||
|
, CloverTermDagEven(&Hgrid)
|
||||||
|
, CloverTermDagOdd(&Hgrid)
|
||||||
|
, CloverTermInvDagEven(&Hgrid)
|
||||||
|
, CloverTermInvDagOdd(&Hgrid) {
|
||||||
|
assert(Nd == 4); // require 4 dimensions
|
||||||
|
|
||||||
|
if(clover_anisotropy.isAnisotropic) {
|
||||||
|
csw_r = _csw_r * 0.5 / clover_anisotropy.xi_0;
|
||||||
|
diag_mass = _mass + 1.0 + (Nd - 1) * (clover_anisotropy.nu / clover_anisotropy.xi_0);
|
||||||
|
} else {
|
||||||
|
csw_r = _csw_r * 0.5;
|
||||||
|
diag_mass = 4.0 + _mass;
|
||||||
|
}
|
||||||
|
csw_t = _csw_t * 0.5;
|
||||||
|
|
||||||
|
if(csw_r == 0)
|
||||||
|
std::cout << GridLogWarning << "Initializing WilsonCloverFermion with csw_r = 0" << std::endl;
|
||||||
|
if(csw_t == 0)
|
||||||
|
std::cout << GridLogWarning << "Initializing WilsonCloverFermion with csw_t = 0" << std::endl;
|
||||||
|
|
||||||
|
ImportGauge(_Umu);
|
||||||
|
}
|
||||||
|
|
||||||
// *NOT* EO
|
// *NOT* EO
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl>::M(const FermionField &in, FermionField &out)
|
void WilsonCloverFermion<Impl>::M(const FermionField &in, FermionField &out)
|
||||||
@ -67,10 +107,13 @@ void WilsonCloverFermion<Impl>::Mdag(const FermionField &in, FermionField &out)
|
|||||||
template <class Impl>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||||
{
|
{
|
||||||
|
double t0 = usecond();
|
||||||
WilsonFermion<Impl>::ImportGauge(_Umu);
|
WilsonFermion<Impl>::ImportGauge(_Umu);
|
||||||
|
double t1 = usecond();
|
||||||
GridBase *grid = _Umu.Grid();
|
GridBase *grid = _Umu.Grid();
|
||||||
typename Impl::GaugeLinkField Bx(grid), By(grid), Bz(grid), Ex(grid), Ey(grid), Ez(grid);
|
typename Impl::GaugeLinkField Bx(grid), By(grid), Bz(grid), Ex(grid), Ey(grid), Ez(grid);
|
||||||
|
|
||||||
|
double t2 = usecond();
|
||||||
// Compute the field strength terms mu>nu
|
// Compute the field strength terms mu>nu
|
||||||
WilsonLoops<Impl>::FieldStrength(Bx, _Umu, Zdir, Ydir);
|
WilsonLoops<Impl>::FieldStrength(Bx, _Umu, Zdir, Ydir);
|
||||||
WilsonLoops<Impl>::FieldStrength(By, _Umu, Zdir, Xdir);
|
WilsonLoops<Impl>::FieldStrength(By, _Umu, Zdir, Xdir);
|
||||||
@ -79,19 +122,22 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
|||||||
WilsonLoops<Impl>::FieldStrength(Ey, _Umu, Tdir, Ydir);
|
WilsonLoops<Impl>::FieldStrength(Ey, _Umu, Tdir, Ydir);
|
||||||
WilsonLoops<Impl>::FieldStrength(Ez, _Umu, Tdir, Zdir);
|
WilsonLoops<Impl>::FieldStrength(Ez, _Umu, Tdir, Zdir);
|
||||||
|
|
||||||
|
double t3 = usecond();
|
||||||
// Compute the Clover Operator acting on Colour and Spin
|
// Compute the Clover Operator acting on Colour and Spin
|
||||||
// multiply here by the clover coefficients for the anisotropy
|
// multiply here by the clover coefficients for the anisotropy
|
||||||
CloverTerm = fillCloverYZ(Bx) * csw_r;
|
CloverTerm = Helpers::fillCloverYZ(Bx) * csw_r;
|
||||||
CloverTerm += fillCloverXZ(By) * csw_r;
|
CloverTerm += Helpers::fillCloverXZ(By) * csw_r;
|
||||||
CloverTerm += fillCloverXY(Bz) * csw_r;
|
CloverTerm += Helpers::fillCloverXY(Bz) * csw_r;
|
||||||
CloverTerm += fillCloverXT(Ex) * csw_t;
|
CloverTerm += Helpers::fillCloverXT(Ex) * csw_t;
|
||||||
CloverTerm += fillCloverYT(Ey) * csw_t;
|
CloverTerm += Helpers::fillCloverYT(Ey) * csw_t;
|
||||||
CloverTerm += fillCloverZT(Ez) * csw_t;
|
CloverTerm += Helpers::fillCloverZT(Ez) * csw_t;
|
||||||
CloverTerm += diag_mass;
|
CloverTerm += diag_mass;
|
||||||
|
|
||||||
|
double t4 = usecond();
|
||||||
int lvol = _Umu.Grid()->lSites();
|
int lvol = _Umu.Grid()->lSites();
|
||||||
int DimRep = Impl::Dimension;
|
int DimRep = Impl::Dimension;
|
||||||
|
|
||||||
|
double t5 = usecond();
|
||||||
{
|
{
|
||||||
autoView(CTv,CloverTerm,CpuRead);
|
autoView(CTv,CloverTerm,CpuRead);
|
||||||
autoView(CTIv,CloverTermInv,CpuWrite);
|
autoView(CTIv,CloverTermInv,CpuWrite);
|
||||||
@ -100,7 +146,7 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
|||||||
grid->LocalIndexToLocalCoor(site, lcoor);
|
grid->LocalIndexToLocalCoor(site, lcoor);
|
||||||
Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||||
Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||||
typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero();
|
typename SiteClover::scalar_object Qx = Zero(), Qxinv = Zero();
|
||||||
peekLocalSite(Qx, CTv, lcoor);
|
peekLocalSite(Qx, CTv, lcoor);
|
||||||
//if (csw!=0){
|
//if (csw!=0){
|
||||||
for (int j = 0; j < Ns; j++)
|
for (int j = 0; j < Ns; j++)
|
||||||
@ -125,6 +171,7 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double t6 = usecond();
|
||||||
// Separate the even and odd parts
|
// Separate the even and odd parts
|
||||||
pickCheckerboard(Even, CloverTermEven, CloverTerm);
|
pickCheckerboard(Even, CloverTermEven, CloverTerm);
|
||||||
pickCheckerboard(Odd, CloverTermOdd, CloverTerm);
|
pickCheckerboard(Odd, CloverTermOdd, CloverTerm);
|
||||||
@ -137,6 +184,20 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
|||||||
|
|
||||||
pickCheckerboard(Even, CloverTermInvDagEven, adj(CloverTermInv));
|
pickCheckerboard(Even, CloverTermInvDagEven, adj(CloverTermInv));
|
||||||
pickCheckerboard(Odd, CloverTermInvDagOdd, adj(CloverTermInv));
|
pickCheckerboard(Odd, CloverTermInvDagOdd, adj(CloverTermInv));
|
||||||
|
double t7 = usecond();
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
std::cout << GridLogMessage << "WilsonCloverFermion::ImportGauge timings:"
|
||||||
|
<< " WilsonFermion::Importgauge = " << (t1 - t0) / 1e6
|
||||||
|
<< ", allocations = " << (t2 - t1) / 1e6
|
||||||
|
<< ", field strength = " << (t3 - t2) / 1e6
|
||||||
|
<< ", fill clover = " << (t4 - t3) / 1e6
|
||||||
|
<< ", misc = " << (t5 - t4) / 1e6
|
||||||
|
<< ", inversions = " << (t6 - t5) / 1e6
|
||||||
|
<< ", pick cbs = " << (t7 - t6) / 1e6
|
||||||
|
<< ", total = " << (t7 - t0) / 1e6
|
||||||
|
<< std::endl;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
@ -167,7 +228,7 @@ template <class Impl>
|
|||||||
void WilsonCloverFermion<Impl>::MooeeInternal(const FermionField &in, FermionField &out, int dag, int inv)
|
void WilsonCloverFermion<Impl>::MooeeInternal(const FermionField &in, FermionField &out, int dag, int inv)
|
||||||
{
|
{
|
||||||
out.Checkerboard() = in.Checkerboard();
|
out.Checkerboard() = in.Checkerboard();
|
||||||
CloverFieldType *Clover;
|
CloverField *Clover;
|
||||||
assert(in.Checkerboard() == Odd || in.Checkerboard() == Even);
|
assert(in.Checkerboard() == Odd || in.Checkerboard() == Even);
|
||||||
|
|
||||||
if (dag)
|
if (dag)
|
||||||
@ -182,12 +243,12 @@ void WilsonCloverFermion<Impl>::MooeeInternal(const FermionField &in, FermionFie
|
|||||||
{
|
{
|
||||||
Clover = (inv) ? &CloverTermInvDagEven : &CloverTermDagEven;
|
Clover = (inv) ? &CloverTermInvDagEven : &CloverTermDagEven;
|
||||||
}
|
}
|
||||||
out = *Clover * in;
|
Helpers::multCloverField(out, *Clover, in);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Clover = (inv) ? &CloverTermInv : &CloverTerm;
|
Clover = (inv) ? &CloverTermInv : &CloverTerm;
|
||||||
out = adj(*Clover) * in;
|
Helpers::multCloverField(out, *Clover, in); // don't bother with adj, hermitian anyway
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -205,18 +266,98 @@ void WilsonCloverFermion<Impl>::MooeeInternal(const FermionField &in, FermionFie
|
|||||||
// std::cout << "Calling clover term Even" << std::endl;
|
// std::cout << "Calling clover term Even" << std::endl;
|
||||||
Clover = (inv) ? &CloverTermInvEven : &CloverTermEven;
|
Clover = (inv) ? &CloverTermInvEven : &CloverTermEven;
|
||||||
}
|
}
|
||||||
out = *Clover * in;
|
Helpers::multCloverField(out, *Clover, in);
|
||||||
// std::cout << GridLogMessage << "*Clover.Checkerboard() " << (*Clover).Checkerboard() << std::endl;
|
// std::cout << GridLogMessage << "*Clover.Checkerboard() " << (*Clover).Checkerboard() << std::endl;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Clover = (inv) ? &CloverTermInv : &CloverTerm;
|
Clover = (inv) ? &CloverTermInv : &CloverTerm;
|
||||||
out = *Clover * in;
|
Helpers::multCloverField(out, *Clover, in);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // MooeeInternal
|
} // MooeeInternal
|
||||||
|
|
||||||
|
// Derivative parts unpreconditioned pseudofermions
|
||||||
|
template <class Impl>
|
||||||
|
void WilsonCloverFermion<Impl>::MDeriv(GaugeField &force, const FermionField &X, const FermionField &Y, int dag)
|
||||||
|
{
|
||||||
|
conformable(X.Grid(), Y.Grid());
|
||||||
|
conformable(X.Grid(), force.Grid());
|
||||||
|
GaugeLinkField force_mu(force.Grid()), lambda(force.Grid());
|
||||||
|
GaugeField clover_force(force.Grid());
|
||||||
|
PropagatorField Lambda(force.Grid());
|
||||||
|
|
||||||
|
// Guido: Here we are hitting some performance issues:
|
||||||
|
// need to extract the components of the DoubledGaugeField
|
||||||
|
// for each call
|
||||||
|
// Possible solution
|
||||||
|
// Create a vector object to store them? (cons: wasting space)
|
||||||
|
std::vector<GaugeLinkField> U(Nd, this->Umu.Grid());
|
||||||
|
|
||||||
|
Impl::extractLinkField(U, this->Umu);
|
||||||
|
|
||||||
|
force = Zero();
|
||||||
|
// Derivative of the Wilson hopping term
|
||||||
|
this->DhopDeriv(force, X, Y, dag);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////
|
||||||
|
// Clover term derivative
|
||||||
|
///////////////////////////////////////////////////////////
|
||||||
|
Impl::outerProductImpl(Lambda, X, Y);
|
||||||
|
//std::cout << "Lambda:" << Lambda << std::endl;
|
||||||
|
|
||||||
|
Gamma::Algebra sigma[] = {
|
||||||
|
Gamma::Algebra::SigmaXY,
|
||||||
|
Gamma::Algebra::SigmaXZ,
|
||||||
|
Gamma::Algebra::SigmaXT,
|
||||||
|
Gamma::Algebra::MinusSigmaXY,
|
||||||
|
Gamma::Algebra::SigmaYZ,
|
||||||
|
Gamma::Algebra::SigmaYT,
|
||||||
|
Gamma::Algebra::MinusSigmaXZ,
|
||||||
|
Gamma::Algebra::MinusSigmaYZ,
|
||||||
|
Gamma::Algebra::SigmaZT,
|
||||||
|
Gamma::Algebra::MinusSigmaXT,
|
||||||
|
Gamma::Algebra::MinusSigmaYT,
|
||||||
|
Gamma::Algebra::MinusSigmaZT};
|
||||||
|
|
||||||
|
/*
|
||||||
|
sigma_{\mu \nu}=
|
||||||
|
| 0 sigma[0] sigma[1] sigma[2] |
|
||||||
|
| sigma[3] 0 sigma[4] sigma[5] |
|
||||||
|
| sigma[6] sigma[7] 0 sigma[8] |
|
||||||
|
| sigma[9] sigma[10] sigma[11] 0 |
|
||||||
|
*/
|
||||||
|
|
||||||
|
int count = 0;
|
||||||
|
clover_force = Zero();
|
||||||
|
for (int mu = 0; mu < 4; mu++)
|
||||||
|
{
|
||||||
|
force_mu = Zero();
|
||||||
|
for (int nu = 0; nu < 4; nu++)
|
||||||
|
{
|
||||||
|
if (mu == nu)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
RealD factor;
|
||||||
|
if (nu == 4 || mu == 4)
|
||||||
|
{
|
||||||
|
factor = 2.0 * csw_t;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
factor = 2.0 * csw_r;
|
||||||
|
}
|
||||||
|
PropagatorField Slambda = Gamma(sigma[count]) * Lambda; // sigma checked
|
||||||
|
Impl::TraceSpinImpl(lambda, Slambda); // traceSpin ok
|
||||||
|
force_mu -= factor*Helpers::Cmunu(U, lambda, mu, nu); // checked
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
pokeLorentz(clover_force, U[mu] * force_mu, mu);
|
||||||
|
}
|
||||||
|
//clover_force *= csw;
|
||||||
|
force += clover_force;
|
||||||
|
}
|
||||||
|
|
||||||
// Derivative parts
|
// Derivative parts
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
|
@ -0,0 +1,41 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/ qcd/action/fermion/instantiation/CompactWilsonCloverFermionInstantiation.cc.master
|
||||||
|
|
||||||
|
Copyright (C) 2017 - 2022
|
||||||
|
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||||
|
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
#include <Grid/qcd/spin/Dirac.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CompactWilsonCloverFermion.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/CompactWilsonCloverFermionImplementation.h>
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#include "impl.h"
|
||||||
|
template class CompactWilsonCloverFermion<IMPLEMENTATION>;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -0,0 +1 @@
|
|||||||
|
../CompactWilsonCloverFermionInstantiation.cc.master
|
@ -0,0 +1 @@
|
|||||||
|
../CompactWilsonCloverFermionInstantiation.cc.master
|
@ -40,7 +40,7 @@ EOF
|
|||||||
|
|
||||||
done
|
done
|
||||||
|
|
||||||
CC_LIST="WilsonCloverFermionInstantiation WilsonFermionInstantiation WilsonKernelsInstantiation WilsonTMFermionInstantiation"
|
CC_LIST="WilsonCloverFermionInstantiation CompactWilsonCloverFermionInstantiation WilsonFermionInstantiation WilsonKernelsInstantiation WilsonTMFermionInstantiation"
|
||||||
|
|
||||||
for impl in $WILSON_IMPL_LIST
|
for impl in $WILSON_IMPL_LIST
|
||||||
do
|
do
|
||||||
|
@ -131,8 +131,11 @@ class CartesianStencilAccelerator {
|
|||||||
int _checkerboard;
|
int _checkerboard;
|
||||||
int _npoints; // Move to template param?
|
int _npoints; // Move to template param?
|
||||||
int _osites;
|
int _osites;
|
||||||
|
int _dirichlet;
|
||||||
StencilVector _directions;
|
StencilVector _directions;
|
||||||
StencilVector _distances;
|
StencilVector _distances;
|
||||||
|
StencilVector _comms_send;
|
||||||
|
StencilVector _comms_recv;
|
||||||
StencilVector _comm_buf_size;
|
StencilVector _comm_buf_size;
|
||||||
StencilVector _permute_type;
|
StencilVector _permute_type;
|
||||||
StencilVector same_node;
|
StencilVector same_node;
|
||||||
@ -226,6 +229,8 @@ public:
|
|||||||
void * recv_buf;
|
void * recv_buf;
|
||||||
Integer to_rank;
|
Integer to_rank;
|
||||||
Integer from_rank;
|
Integer from_rank;
|
||||||
|
Integer do_send;
|
||||||
|
Integer do_recv;
|
||||||
Integer bytes;
|
Integer bytes;
|
||||||
};
|
};
|
||||||
struct Merge {
|
struct Merge {
|
||||||
@ -240,7 +245,20 @@ public:
|
|||||||
cobj * mpi_p;
|
cobj * mpi_p;
|
||||||
Integer buffer_size;
|
Integer buffer_size;
|
||||||
};
|
};
|
||||||
|
struct CopyReceiveBuffer {
|
||||||
|
void * from_p;
|
||||||
|
void * to_p;
|
||||||
|
Integer bytes;
|
||||||
|
};
|
||||||
|
struct CachedTransfer {
|
||||||
|
Integer direction;
|
||||||
|
Integer OrthogPlane;
|
||||||
|
Integer DestProc;
|
||||||
|
Integer bytes;
|
||||||
|
Integer lane;
|
||||||
|
Integer cb;
|
||||||
|
void *recv_buf;
|
||||||
|
};
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
GridBase * _grid;
|
GridBase * _grid;
|
||||||
@ -271,7 +289,8 @@ public:
|
|||||||
std::vector<Merge> MergersSHM;
|
std::vector<Merge> MergersSHM;
|
||||||
std::vector<Decompress> Decompressions;
|
std::vector<Decompress> Decompressions;
|
||||||
std::vector<Decompress> DecompressionsSHM;
|
std::vector<Decompress> DecompressionsSHM;
|
||||||
|
std::vector<CopyReceiveBuffer> CopyReceiveBuffers ;
|
||||||
|
std::vector<CachedTransfer> CachedTransfers;
|
||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
// Unified Comms buffers for all directions
|
// Unified Comms buffers for all directions
|
||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
@ -284,29 +303,6 @@ public:
|
|||||||
int u_comm_offset;
|
int u_comm_offset;
|
||||||
int _unified_buffer_size;
|
int _unified_buffer_size;
|
||||||
|
|
||||||
/////////////////////////////////////////
|
|
||||||
// Timing info; ugly; possibly temporary
|
|
||||||
/////////////////////////////////////////
|
|
||||||
double commtime;
|
|
||||||
double mpi3synctime;
|
|
||||||
double mpi3synctime_g;
|
|
||||||
double shmmergetime;
|
|
||||||
double gathertime;
|
|
||||||
double gathermtime;
|
|
||||||
double halogtime;
|
|
||||||
double mergetime;
|
|
||||||
double decompresstime;
|
|
||||||
double comms_bytes;
|
|
||||||
double shm_bytes;
|
|
||||||
double splicetime;
|
|
||||||
double nosplicetime;
|
|
||||||
double calls;
|
|
||||||
std::vector<double> comm_bytes_thr;
|
|
||||||
std::vector<double> shm_bytes_thr;
|
|
||||||
std::vector<double> comm_time_thr;
|
|
||||||
std::vector<double> comm_enter_thr;
|
|
||||||
std::vector<double> comm_leave_thr;
|
|
||||||
|
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
// Stencil query
|
// Stencil query
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
@ -333,11 +329,12 @@ public:
|
|||||||
//////////////////////////////////////////
|
//////////////////////////////////////////
|
||||||
// Comms packet queue for asynch thread
|
// Comms packet queue for asynch thread
|
||||||
// Use OpenMP Tasks for cleaner ???
|
// Use OpenMP Tasks for cleaner ???
|
||||||
|
// must be called *inside* parallel region
|
||||||
//////////////////////////////////////////
|
//////////////////////////////////////////
|
||||||
|
/*
|
||||||
void CommunicateThreaded()
|
void CommunicateThreaded()
|
||||||
{
|
{
|
||||||
#ifdef GRID_OMP
|
#ifdef GRID_OMP
|
||||||
// must be called in parallel region
|
|
||||||
int mythread = omp_get_thread_num();
|
int mythread = omp_get_thread_num();
|
||||||
int nthreads = CartesianCommunicator::nCommThreads;
|
int nthreads = CartesianCommunicator::nCommThreads;
|
||||||
#else
|
#else
|
||||||
@ -346,65 +343,29 @@ public:
|
|||||||
#endif
|
#endif
|
||||||
if (nthreads == -1) nthreads = 1;
|
if (nthreads == -1) nthreads = 1;
|
||||||
if (mythread < nthreads) {
|
if (mythread < nthreads) {
|
||||||
comm_enter_thr[mythread] = usecond();
|
|
||||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
||||||
uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||||
Packets[i].to_rank,
|
Packets[i].to_rank,
|
||||||
Packets[i].recv_buf,
|
Packets[i].recv_buf,
|
||||||
Packets[i].from_rank,
|
Packets[i].from_rank,
|
||||||
Packets[i].bytes,i);
|
Packets[i].bytes,i);
|
||||||
comm_bytes_thr[mythread] += bytes;
|
|
||||||
shm_bytes_thr[mythread] += 2*Packets[i].bytes-bytes; // Send + Recv.
|
|
||||||
|
|
||||||
}
|
}
|
||||||
comm_leave_thr[mythread]= usecond();
|
|
||||||
comm_time_thr[mythread] += comm_leave_thr[mythread] - comm_enter_thr[mythread];
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
void CollateThreads(void)
|
|
||||||
{
|
|
||||||
int nthreads = CartesianCommunicator::nCommThreads;
|
|
||||||
double first=0.0;
|
|
||||||
double last =0.0;
|
|
||||||
|
|
||||||
for(int t=0;t<nthreads;t++) {
|
|
||||||
|
|
||||||
double t0 = comm_enter_thr[t];
|
|
||||||
double t1 = comm_leave_thr[t];
|
|
||||||
comms_bytes+=comm_bytes_thr[t];
|
|
||||||
shm_bytes +=shm_bytes_thr[t];
|
|
||||||
|
|
||||||
comm_enter_thr[t] = 0.0;
|
|
||||||
comm_leave_thr[t] = 0.0;
|
|
||||||
comm_time_thr[t] = 0.0;
|
|
||||||
comm_bytes_thr[t]=0;
|
|
||||||
shm_bytes_thr[t]=0;
|
|
||||||
|
|
||||||
if ( first == 0.0 ) first = t0; // first is t0
|
|
||||||
if ( (t0 > 0.0) && ( t0 < first ) ) first = t0; // min time seen
|
|
||||||
|
|
||||||
if ( t1 > last ) last = t1; // max time seen
|
|
||||||
|
|
||||||
}
|
|
||||||
commtime+= last-first;
|
|
||||||
}
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// Non blocking send and receive. Necessarily parallel.
|
// Non blocking send and receive. Necessarily parallel.
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
||||||
{
|
{
|
||||||
reqs.resize(Packets.size());
|
reqs.resize(Packets.size());
|
||||||
commtime-=usecond();
|
|
||||||
for(int i=0;i<Packets.size();i++){
|
for(int i=0;i<Packets.size();i++){
|
||||||
uint64_t bytes=_grid->StencilSendToRecvFromBegin(reqs[i],
|
_grid->StencilSendToRecvFromBegin(reqs[i],
|
||||||
Packets[i].send_buf,
|
Packets[i].send_buf,
|
||||||
Packets[i].to_rank,
|
Packets[i].to_rank,Packets[i].do_send,
|
||||||
Packets[i].recv_buf,
|
Packets[i].recv_buf,
|
||||||
Packets[i].from_rank,
|
Packets[i].from_rank,Packets[i].do_recv,
|
||||||
Packets[i].bytes,i);
|
Packets[i].bytes,i);
|
||||||
comms_bytes+=bytes;
|
|
||||||
shm_bytes +=2*Packets[i].bytes-bytes;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -413,7 +374,6 @@ public:
|
|||||||
for(int i=0;i<Packets.size();i++){
|
for(int i=0;i<Packets.size();i++){
|
||||||
_grid->StencilSendToRecvFromComplete(reqs[i],i);
|
_grid->StencilSendToRecvFromComplete(reqs[i],i);
|
||||||
}
|
}
|
||||||
commtime+=usecond();
|
|
||||||
}
|
}
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// Blocking send and receive. Either sequential or parallel.
|
// Blocking send and receive. Either sequential or parallel.
|
||||||
@ -421,28 +381,27 @@ public:
|
|||||||
void Communicate(void)
|
void Communicate(void)
|
||||||
{
|
{
|
||||||
if ( CartesianCommunicator::CommunicatorPolicy == CartesianCommunicator::CommunicatorPolicySequential ){
|
if ( CartesianCommunicator::CommunicatorPolicy == CartesianCommunicator::CommunicatorPolicySequential ){
|
||||||
thread_region {
|
/////////////////////////////////////////////////////////
|
||||||
// must be called in parallel region
|
// several way threaded on different communicators.
|
||||||
int mythread = thread_num();
|
// Cannot combine with Dirichlet operators
|
||||||
int maxthreads= thread_max();
|
// This scheme is needed on Intel Omnipath for best performance
|
||||||
int nthreads = CartesianCommunicator::nCommThreads;
|
// Deprecate once there are very few omnipath clusters
|
||||||
assert(nthreads <= maxthreads);
|
/////////////////////////////////////////////////////////
|
||||||
if (nthreads == -1) nthreads = 1;
|
int nthreads = CartesianCommunicator::nCommThreads;
|
||||||
if (mythread < nthreads) {
|
int old = GridThread::GetThreads();
|
||||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
GridThread::SetThreads(nthreads);
|
||||||
double start = usecond();
|
thread_for(i,Packets.size(),{
|
||||||
uint64_t bytes= _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
_grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||||
Packets[i].to_rank,
|
Packets[i].to_rank,Packets[i].do_send,
|
||||||
Packets[i].recv_buf,
|
Packets[i].recv_buf,
|
||||||
Packets[i].from_rank,
|
Packets[i].from_rank,Packets[i].do_recv,
|
||||||
Packets[i].bytes,i);
|
Packets[i].bytes,i);
|
||||||
comm_bytes_thr[mythread] += bytes;
|
});
|
||||||
shm_bytes_thr[mythread] += Packets[i].bytes - bytes;
|
GridThread::SetThreads(old);
|
||||||
comm_time_thr[mythread] += usecond() - start;
|
} else {
|
||||||
}
|
/////////////////////////////////////////////////////////
|
||||||
}
|
// Concurrent and non-threaded asynch calls to MPI
|
||||||
}
|
/////////////////////////////////////////////////////////
|
||||||
} else { // Concurrent and non-threaded asynch calls to MPI
|
|
||||||
std::vector<std::vector<CommsRequest_t> > reqs;
|
std::vector<std::vector<CommsRequest_t> > reqs;
|
||||||
this->CommunicateBegin(reqs);
|
this->CommunicateBegin(reqs);
|
||||||
this->CommunicateComplete(reqs);
|
this->CommunicateComplete(reqs);
|
||||||
@ -484,31 +443,23 @@ public:
|
|||||||
sshift[1] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Odd);
|
sshift[1] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Odd);
|
||||||
if ( sshift[0] == sshift[1] ) {
|
if ( sshift[0] == sshift[1] ) {
|
||||||
if (splice_dim) {
|
if (splice_dim) {
|
||||||
splicetime-=usecond();
|
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx,point);
|
||||||
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx);
|
|
||||||
is_same_node = is_same_node && tmp;
|
is_same_node = is_same_node && tmp;
|
||||||
splicetime+=usecond();
|
|
||||||
} else {
|
} else {
|
||||||
nosplicetime-=usecond();
|
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx,point);
|
||||||
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx);
|
|
||||||
is_same_node = is_same_node && tmp;
|
is_same_node = is_same_node && tmp;
|
||||||
nosplicetime+=usecond();
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if(splice_dim){
|
if(splice_dim){
|
||||||
splicetime-=usecond();
|
|
||||||
// if checkerboard is unfavourable take two passes
|
// if checkerboard is unfavourable take two passes
|
||||||
// both with block stride loop iteration
|
// both with block stride loop iteration
|
||||||
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx);
|
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx,point);
|
||||||
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx);
|
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx,point);
|
||||||
is_same_node = is_same_node && tmp1 && tmp2;
|
is_same_node = is_same_node && tmp1 && tmp2;
|
||||||
splicetime+=usecond();
|
|
||||||
} else {
|
} else {
|
||||||
nosplicetime-=usecond();
|
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx,point);
|
||||||
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx);
|
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx,point);
|
||||||
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx);
|
|
||||||
is_same_node = is_same_node && tmp1 && tmp2;
|
is_same_node = is_same_node && tmp1 && tmp2;
|
||||||
nosplicetime+=usecond();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -518,13 +469,10 @@ public:
|
|||||||
template<class compressor>
|
template<class compressor>
|
||||||
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
||||||
{
|
{
|
||||||
mpi3synctime_g-=usecond();
|
|
||||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||||
mpi3synctime_g+=usecond();
|
|
||||||
|
|
||||||
// conformable(source.Grid(),_grid);
|
// conformable(source.Grid(),_grid);
|
||||||
assert(source.Grid()==_grid);
|
assert(source.Grid()==_grid);
|
||||||
halogtime-=usecond();
|
|
||||||
|
|
||||||
u_comm_offset=0;
|
u_comm_offset=0;
|
||||||
|
|
||||||
@ -538,7 +486,6 @@ public:
|
|||||||
assert(u_comm_offset==_unified_buffer_size);
|
assert(u_comm_offset==_unified_buffer_size);
|
||||||
|
|
||||||
accelerator_barrier();
|
accelerator_barrier();
|
||||||
halogtime+=usecond();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////
|
/////////////////////////
|
||||||
@ -551,14 +498,72 @@ public:
|
|||||||
Mergers.resize(0);
|
Mergers.resize(0);
|
||||||
MergersSHM.resize(0);
|
MergersSHM.resize(0);
|
||||||
Packets.resize(0);
|
Packets.resize(0);
|
||||||
calls++;
|
CopyReceiveBuffers.resize(0);
|
||||||
|
CachedTransfers.resize(0);
|
||||||
}
|
}
|
||||||
void AddPacket(void *xmit,void * rcv, Integer to,Integer from,Integer bytes){
|
void AddCopy(void *from,void * to, Integer bytes)
|
||||||
|
{
|
||||||
|
// std::cout << "Adding CopyReceiveBuffer "<<std::hex<<from<<" "<<to<<std::dec<<" "<<bytes<<std::endl;
|
||||||
|
CopyReceiveBuffer obj;
|
||||||
|
obj.from_p = from;
|
||||||
|
obj.to_p = to;
|
||||||
|
obj.bytes= bytes;
|
||||||
|
CopyReceiveBuffers.push_back(obj);
|
||||||
|
}
|
||||||
|
void CommsCopy()
|
||||||
|
{
|
||||||
|
// These are device resident MPI buffers.
|
||||||
|
for(int i=0;i<CopyReceiveBuffers.size();i++){
|
||||||
|
cobj *from=(cobj *)CopyReceiveBuffers[i].from_p;
|
||||||
|
cobj *to =(cobj *)CopyReceiveBuffers[i].to_p;
|
||||||
|
Integer words = CopyReceiveBuffers[i].bytes/sizeof(cobj);
|
||||||
|
// std::cout << "CopyReceiveBuffer "<<std::hex<<from<<" "<<to<<std::dec<<" "<<words*sizeof(cobj)<<std::endl;
|
||||||
|
accelerator_forNB(j, words, cobj::Nsimd(), {
|
||||||
|
coalescedWrite(to[j] ,coalescedRead(from [j]));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Integer CheckForDuplicate(Integer direction, Integer OrthogPlane, Integer DestProc, void *recv_buf,Integer lane,Integer bytes,Integer cb)
|
||||||
|
{
|
||||||
|
CachedTransfer obj;
|
||||||
|
obj.direction = direction;
|
||||||
|
obj.OrthogPlane = OrthogPlane;
|
||||||
|
obj.DestProc = DestProc;
|
||||||
|
obj.recv_buf = recv_buf;
|
||||||
|
obj.lane = lane;
|
||||||
|
obj.bytes = bytes;
|
||||||
|
obj.cb = cb;
|
||||||
|
|
||||||
|
for(int i=0;i<CachedTransfers.size();i++){
|
||||||
|
if ( (CachedTransfers[i].direction ==direction)
|
||||||
|
&&(CachedTransfers[i].OrthogPlane==OrthogPlane)
|
||||||
|
&&(CachedTransfers[i].DestProc ==DestProc)
|
||||||
|
&&(CachedTransfers[i].bytes ==bytes)
|
||||||
|
&&(CachedTransfers[i].lane ==lane)
|
||||||
|
&&(CachedTransfers[i].cb ==cb)
|
||||||
|
){
|
||||||
|
// std::cout << "Found duplicate plane dir "<<direction<<" plane "<< OrthogPlane<< " simd "<<lane << " relproc "<<DestProc<< " bytes "<<bytes <<std::endl;
|
||||||
|
AddCopy(CachedTransfers[i].recv_buf,recv_buf,bytes);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// std::cout << "No duplicate plane dir "<<direction<<" plane "<< OrthogPlane<< " simd "<<lane << " relproc "<<DestProc<<" bytes "<<bytes<<std::endl;
|
||||||
|
CachedTransfers.push_back(obj);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
void AddPacket(void *xmit,void * rcv,
|
||||||
|
Integer to, Integer do_send,
|
||||||
|
Integer from, Integer do_recv,
|
||||||
|
Integer bytes){
|
||||||
Packet p;
|
Packet p;
|
||||||
p.send_buf = xmit;
|
p.send_buf = xmit;
|
||||||
p.recv_buf = rcv;
|
p.recv_buf = rcv;
|
||||||
p.to_rank = to;
|
p.to_rank = to;
|
||||||
p.from_rank= from;
|
p.from_rank= from;
|
||||||
|
p.do_send = do_send;
|
||||||
|
p.do_recv = do_recv;
|
||||||
p.bytes = bytes;
|
p.bytes = bytes;
|
||||||
Packets.push_back(p);
|
Packets.push_back(p);
|
||||||
}
|
}
|
||||||
@ -578,22 +583,17 @@ public:
|
|||||||
mv.push_back(m);
|
mv.push_back(m);
|
||||||
}
|
}
|
||||||
template<class decompressor> void CommsMerge(decompressor decompress) {
|
template<class decompressor> void CommsMerge(decompressor decompress) {
|
||||||
|
CommsCopy();
|
||||||
CommsMerge(decompress,Mergers,Decompressions);
|
CommsMerge(decompress,Mergers,Decompressions);
|
||||||
}
|
}
|
||||||
template<class decompressor> void CommsMergeSHM(decompressor decompress) {
|
template<class decompressor> void CommsMergeSHM(decompressor decompress) {
|
||||||
mpi3synctime-=usecond();
|
|
||||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||||
mpi3synctime+=usecond();
|
|
||||||
shmmergetime-=usecond();
|
|
||||||
CommsMerge(decompress,MergersSHM,DecompressionsSHM);
|
CommsMerge(decompress,MergersSHM,DecompressionsSHM);
|
||||||
shmmergetime+=usecond();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class decompressor>
|
template<class decompressor>
|
||||||
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
|
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd)
|
||||||
|
{
|
||||||
|
|
||||||
mergetime-=usecond();
|
|
||||||
for(int i=0;i<mm.size();i++){
|
for(int i=0;i<mm.size();i++){
|
||||||
auto mp = &mm[i].mpointer[0];
|
auto mp = &mm[i].mpointer[0];
|
||||||
auto vp0= &mm[i].vpointers[0][0];
|
auto vp0= &mm[i].vpointers[0][0];
|
||||||
@ -603,9 +603,7 @@ public:
|
|||||||
decompress.Exchange(mp,vp0,vp1,type,o);
|
decompress.Exchange(mp,vp0,vp1,type,o);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
mergetime+=usecond();
|
|
||||||
|
|
||||||
decompresstime-=usecond();
|
|
||||||
for(int i=0;i<dd.size();i++){
|
for(int i=0;i<dd.size();i++){
|
||||||
auto kp = dd[i].kernel_p;
|
auto kp = dd[i].kernel_p;
|
||||||
auto mp = dd[i].mpi_p;
|
auto mp = dd[i].mpi_p;
|
||||||
@ -613,7 +611,6 @@ public:
|
|||||||
decompress.Decompress(kp,mp,o);
|
decompress.Decompress(kp,mp,o);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
decompresstime+=usecond();
|
|
||||||
}
|
}
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
// Set up routines
|
// Set up routines
|
||||||
@ -650,19 +647,58 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/// Introduce a block structure and switch off comms on boundaries
|
||||||
|
void DirichletBlock(const std::vector<int> &dirichlet_block)
|
||||||
|
{
|
||||||
|
this->_dirichlet = 1;
|
||||||
|
for(int ii=0;ii<this->_npoints;ii++){
|
||||||
|
int dimension = this->_directions[ii];
|
||||||
|
int displacement = this->_distances[ii];
|
||||||
|
int shift = displacement;
|
||||||
|
int gd = _grid->_gdimensions[dimension];
|
||||||
|
int fd = _grid->_fdimensions[dimension];
|
||||||
|
int pd = _grid->_processors [dimension];
|
||||||
|
int ld = gd/pd;
|
||||||
|
int pc = _grid->_processor_coor[dimension];
|
||||||
|
///////////////////////////////////////////
|
||||||
|
// Figure out dirichlet send and receive
|
||||||
|
// on this leg of stencil.
|
||||||
|
///////////////////////////////////////////
|
||||||
|
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||||
|
int block = dirichlet_block[dimension];
|
||||||
|
this->_comms_send[ii] = comm_dim;
|
||||||
|
this->_comms_recv[ii] = comm_dim;
|
||||||
|
if ( block ) {
|
||||||
|
assert(abs(displacement) < ld );
|
||||||
|
|
||||||
|
if( displacement > 0 ) {
|
||||||
|
// High side, low side
|
||||||
|
// | <--B--->|
|
||||||
|
// | | |
|
||||||
|
// noR
|
||||||
|
// noS
|
||||||
|
if ( (ld*(pc+1) ) % block == 0 ) this->_comms_recv[ii] = 0;
|
||||||
|
if ( ( ld*pc ) % block == 0 ) this->_comms_send[ii] = 0;
|
||||||
|
} else {
|
||||||
|
// High side, low side
|
||||||
|
// | <--B--->|
|
||||||
|
// | | |
|
||||||
|
// noS
|
||||||
|
// noR
|
||||||
|
if ( (ld*(pc+1) ) % block == 0 ) this->_comms_send[ii] = 0;
|
||||||
|
if ( ( ld*pc ) % block == 0 ) this->_comms_recv[ii] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
CartesianStencil(GridBase *grid,
|
CartesianStencil(GridBase *grid,
|
||||||
int npoints,
|
int npoints,
|
||||||
int checkerboard,
|
int checkerboard,
|
||||||
const std::vector<int> &directions,
|
const std::vector<int> &directions,
|
||||||
const std::vector<int> &distances,
|
const std::vector<int> &distances,
|
||||||
Parameters p)
|
Parameters p)
|
||||||
: shm_bytes_thr(npoints),
|
|
||||||
comm_bytes_thr(npoints),
|
|
||||||
comm_enter_thr(npoints),
|
|
||||||
comm_leave_thr(npoints),
|
|
||||||
comm_time_thr(npoints)
|
|
||||||
{
|
{
|
||||||
|
this->_dirichlet = 0;
|
||||||
face_table_computed=0;
|
face_table_computed=0;
|
||||||
_grid = grid;
|
_grid = grid;
|
||||||
this->parameters=p;
|
this->parameters=p;
|
||||||
@ -675,6 +711,8 @@ public:
|
|||||||
this->_simd_layout = _grid->_simd_layout; // copy simd_layout to give access to Accelerator Kernels
|
this->_simd_layout = _grid->_simd_layout; // copy simd_layout to give access to Accelerator Kernels
|
||||||
this->_directions = StencilVector(directions);
|
this->_directions = StencilVector(directions);
|
||||||
this->_distances = StencilVector(distances);
|
this->_distances = StencilVector(distances);
|
||||||
|
this->_comms_send.resize(npoints);
|
||||||
|
this->_comms_recv.resize(npoints);
|
||||||
this->same_node.resize(npoints);
|
this->same_node.resize(npoints);
|
||||||
|
|
||||||
_unified_buffer_size=0;
|
_unified_buffer_size=0;
|
||||||
@ -693,24 +731,27 @@ public:
|
|||||||
int displacement = distances[i];
|
int displacement = distances[i];
|
||||||
int shift = displacement;
|
int shift = displacement;
|
||||||
|
|
||||||
|
int gd = _grid->_gdimensions[dimension];
|
||||||
int fd = _grid->_fdimensions[dimension];
|
int fd = _grid->_fdimensions[dimension];
|
||||||
|
int pd = _grid->_processors [dimension];
|
||||||
|
int ld = gd/pd;
|
||||||
int rd = _grid->_rdimensions[dimension];
|
int rd = _grid->_rdimensions[dimension];
|
||||||
|
int pc = _grid->_processor_coor[dimension];
|
||||||
this->_permute_type[point]=_grid->PermuteType(dimension);
|
this->_permute_type[point]=_grid->PermuteType(dimension);
|
||||||
|
|
||||||
this->_checkerboard = checkerboard;
|
this->_checkerboard = checkerboard;
|
||||||
|
|
||||||
//////////////////////////
|
|
||||||
// the permute type
|
|
||||||
//////////////////////////
|
|
||||||
int simd_layout = _grid->_simd_layout[dimension];
|
int simd_layout = _grid->_simd_layout[dimension];
|
||||||
int comm_dim = _grid->_processors[dimension] >1 ;
|
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||||
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
|
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
|
||||||
int rotate_dim = _grid->_simd_layout[dimension]>2;
|
int rotate_dim = _grid->_simd_layout[dimension]>2;
|
||||||
|
|
||||||
|
this->_comms_send[ii] = comm_dim;
|
||||||
|
this->_comms_recv[ii] = comm_dim;
|
||||||
|
|
||||||
assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported
|
assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported
|
||||||
|
|
||||||
int sshift[2];
|
int sshift[2];
|
||||||
|
|
||||||
//////////////////////////
|
//////////////////////////
|
||||||
// Underlying approach. For each local site build
|
// Underlying approach. For each local site build
|
||||||
// up a table containing the npoint "neighbours" and whether they
|
// up a table containing the npoint "neighbours" and whether they
|
||||||
@ -811,6 +852,7 @@ public:
|
|||||||
GridBase *grid=_grid;
|
GridBase *grid=_grid;
|
||||||
const int Nsimd = grid->Nsimd();
|
const int Nsimd = grid->Nsimd();
|
||||||
|
|
||||||
|
int comms_recv = this->_comms_recv[point];
|
||||||
int fd = _grid->_fdimensions[dimension];
|
int fd = _grid->_fdimensions[dimension];
|
||||||
int ld = _grid->_ldimensions[dimension];
|
int ld = _grid->_ldimensions[dimension];
|
||||||
int rd = _grid->_rdimensions[dimension];
|
int rd = _grid->_rdimensions[dimension];
|
||||||
@ -867,7 +909,9 @@ public:
|
|||||||
if ( (shiftpm== 1) && (sx<x) && (grid->_processor_coor[dimension]==grid->_processors[dimension]-1) ) {
|
if ( (shiftpm== 1) && (sx<x) && (grid->_processor_coor[dimension]==grid->_processors[dimension]-1) ) {
|
||||||
wraparound = 1;
|
wraparound = 1;
|
||||||
}
|
}
|
||||||
if (!offnode) {
|
|
||||||
|
// Wrap locally dirichlet support case OR node local
|
||||||
|
if ( (offnode==0) || (comms_recv==0) ) {
|
||||||
|
|
||||||
int permute_slice=0;
|
int permute_slice=0;
|
||||||
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
||||||
@ -984,11 +1028,14 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
template<class compressor>
|
template<class compressor>
|
||||||
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx)
|
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx, int point)
|
||||||
{
|
{
|
||||||
typedef typename cobj::vector_type vector_type;
|
typedef typename cobj::vector_type vector_type;
|
||||||
typedef typename cobj::scalar_type scalar_type;
|
typedef typename cobj::scalar_type scalar_type;
|
||||||
|
|
||||||
|
int comms_send = this->_comms_send[point] ;
|
||||||
|
int comms_recv = this->_comms_recv[point] ;
|
||||||
|
|
||||||
assert(rhs.Grid()==_grid);
|
assert(rhs.Grid()==_grid);
|
||||||
// conformable(_grid,rhs.Grid());
|
// conformable(_grid,rhs.Grid());
|
||||||
|
|
||||||
@ -1011,9 +1058,11 @@ public:
|
|||||||
|
|
||||||
int sx = (x+sshift)%rd;
|
int sx = (x+sshift)%rd;
|
||||||
int comm_proc = ((x+sshift)/rd)%pd;
|
int comm_proc = ((x+sshift)/rd)%pd;
|
||||||
|
|
||||||
if (comm_proc) {
|
if (comm_proc) {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int words = buffer_size;
|
int words = buffer_size;
|
||||||
if (cbmask != 0x3) words=words>>1;
|
if (cbmask != 0x3) words=words>>1;
|
||||||
|
|
||||||
@ -1045,44 +1094,53 @@ public:
|
|||||||
recv_buf=this->u_recv_buf_p;
|
recv_buf=this->u_recv_buf_p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
cobj *send_buf;
|
cobj *send_buf;
|
||||||
send_buf = this->u_send_buf_p; // Gather locally, must send
|
send_buf = this->u_send_buf_p; // Gather locally, must send
|
||||||
|
|
||||||
////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////
|
||||||
// Gather locally
|
// Gather locally
|
||||||
////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////
|
||||||
gathertime-=usecond();
|
|
||||||
assert(send_buf!=NULL);
|
assert(send_buf!=NULL);
|
||||||
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so); face_idx++;
|
if ( comms_send )
|
||||||
gathertime+=usecond();
|
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so);
|
||||||
|
face_idx++;
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////
|
int duplicate = CheckForDuplicate(dimension,sx,comm_proc,(void *)&recv_buf[u_comm_offset],0,bytes,cbmask);
|
||||||
// Build a list of things to do after we synchronise GPUs
|
if ( (!duplicate) ) { // Force comms for now
|
||||||
// Start comms now???
|
|
||||||
///////////////////////////////////////////////////////////
|
|
||||||
AddPacket((void *)&send_buf[u_comm_offset],
|
|
||||||
(void *)&recv_buf[u_comm_offset],
|
|
||||||
xmit_to_rank,
|
|
||||||
recv_from_rank,
|
|
||||||
bytes);
|
|
||||||
|
|
||||||
if ( compress.DecompressionStep() ) {
|
///////////////////////////////////////////////////////////
|
||||||
|
// Build a list of things to do after we synchronise GPUs
|
||||||
|
// Start comms now???
|
||||||
|
///////////////////////////////////////////////////////////
|
||||||
|
AddPacket((void *)&send_buf[u_comm_offset],
|
||||||
|
(void *)&recv_buf[u_comm_offset],
|
||||||
|
xmit_to_rank, comms_send,
|
||||||
|
recv_from_rank, comms_recv,
|
||||||
|
bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( compress.DecompressionStep() && comms_recv ) {
|
||||||
AddDecompress(&this->u_recv_buf_p[u_comm_offset],
|
AddDecompress(&this->u_recv_buf_p[u_comm_offset],
|
||||||
&recv_buf[u_comm_offset],
|
&recv_buf[u_comm_offset],
|
||||||
words,Decompressions);
|
words,Decompressions);
|
||||||
}
|
}
|
||||||
u_comm_offset+=words;
|
u_comm_offset+=words;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class compressor>
|
template<class compressor>
|
||||||
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx)
|
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx,int point)
|
||||||
{
|
{
|
||||||
const int Nsimd = _grid->Nsimd();
|
const int Nsimd = _grid->Nsimd();
|
||||||
|
|
||||||
const int maxl =2;// max layout in a direction
|
const int maxl =2;// max layout in a direction
|
||||||
|
|
||||||
|
int comms_send = this->_comms_send[point] ;
|
||||||
|
int comms_recv = this->_comms_recv[point] ;
|
||||||
|
|
||||||
int fd = _grid->_fdimensions[dimension];
|
int fd = _grid->_fdimensions[dimension];
|
||||||
int rd = _grid->_rdimensions[dimension];
|
int rd = _grid->_rdimensions[dimension];
|
||||||
int ld = _grid->_ldimensions[dimension];
|
int ld = _grid->_ldimensions[dimension];
|
||||||
@ -1147,12 +1205,11 @@ public:
|
|||||||
&face_table[face_idx][0],
|
&face_table[face_idx][0],
|
||||||
face_table[face_idx].size()*sizeof(face_table_host[0]));
|
face_table[face_idx].size()*sizeof(face_table_host[0]));
|
||||||
}
|
}
|
||||||
gathermtime-=usecond();
|
|
||||||
|
|
||||||
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type);
|
if ( comms_send )
|
||||||
|
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type);
|
||||||
face_idx++;
|
face_idx++;
|
||||||
|
|
||||||
gathermtime+=usecond();
|
|
||||||
//spointers[0] -- low
|
//spointers[0] -- low
|
||||||
//spointers[1] -- high
|
//spointers[1] -- high
|
||||||
|
|
||||||
@ -1181,8 +1238,13 @@ public:
|
|||||||
|
|
||||||
rpointers[i] = rp;
|
rpointers[i] = rp;
|
||||||
|
|
||||||
AddPacket((void *)sp,(void *)rp,xmit_to_rank,recv_from_rank,bytes);
|
int duplicate = CheckForDuplicate(dimension,sx,nbr_proc,(void *)rp,i,bytes,cbmask);
|
||||||
|
if ( (!duplicate) ) { // Force comms for now
|
||||||
|
AddPacket((void *)sp,(void *)rp,
|
||||||
|
xmit_to_rank,comms_send,
|
||||||
|
recv_from_rank,comms_recv,
|
||||||
|
bytes);
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
@ -1191,7 +1253,9 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AddMerge(&this->u_recv_buf_p[u_comm_offset],rpointers,reduced_buffer_size,permute_type,Mergers);
|
if ( comms_recv ) {
|
||||||
|
AddMerge(&this->u_recv_buf_p[u_comm_offset],rpointers,reduced_buffer_size,permute_type,Mergers);
|
||||||
|
}
|
||||||
|
|
||||||
u_comm_offset +=buffer_size;
|
u_comm_offset +=buffer_size;
|
||||||
}
|
}
|
||||||
|
@ -342,7 +342,7 @@ extern hipStream_t copyStream;
|
|||||||
/*These routines define mapping from thread grid to loop & vector lane indexing */
|
/*These routines define mapping from thread grid to loop & vector lane indexing */
|
||||||
accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
||||||
#ifdef GRID_SIMT
|
#ifdef GRID_SIMT
|
||||||
return hipThreadIdx_z;
|
return hipThreadIdx_x;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
@ -356,19 +356,41 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
|||||||
{ __VA_ARGS__;} \
|
{ __VA_ARGS__;} \
|
||||||
}; \
|
}; \
|
||||||
int nt=acceleratorThreads(); \
|
int nt=acceleratorThreads(); \
|
||||||
dim3 hip_threads(nt,1,nsimd); \
|
dim3 hip_threads(nsimd, nt, 1); \
|
||||||
dim3 hip_blocks ((num1+nt-1)/nt,num2,1); \
|
dim3 hip_blocks ((num1+nt-1)/nt,num2,1); \
|
||||||
hipLaunchKernelGGL(LambdaApply,hip_blocks,hip_threads, \
|
if(hip_threads.x * hip_threads.y * hip_threads.z <= 64){ \
|
||||||
0,0, \
|
hipLaunchKernelGGL(LambdaApply64,hip_blocks,hip_threads, \
|
||||||
num1,num2,nsimd,lambda); \
|
0,0, \
|
||||||
|
num1,num2,nsimd, lambda); \
|
||||||
|
} else { \
|
||||||
|
hipLaunchKernelGGL(LambdaApply,hip_blocks,hip_threads, \
|
||||||
|
0,0, \
|
||||||
|
num1,num2,nsimd, lambda); \
|
||||||
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template<typename lambda> __global__
|
template<typename lambda> __global__
|
||||||
|
__launch_bounds__(64,1)
|
||||||
|
void LambdaApply64(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
|
||||||
|
{
|
||||||
|
// Following the same scheme as CUDA for now
|
||||||
|
uint64_t x = threadIdx.y + blockDim.y*blockIdx.x;
|
||||||
|
uint64_t y = threadIdx.z + blockDim.z*blockIdx.y;
|
||||||
|
uint64_t z = threadIdx.x;
|
||||||
|
if ( (x < numx) && (y<numy) && (z<numz) ) {
|
||||||
|
Lambda(x,y,z);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename lambda> __global__
|
||||||
|
__launch_bounds__(1024,1)
|
||||||
void LambdaApply(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
|
void LambdaApply(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
|
||||||
{
|
{
|
||||||
uint64_t x = hipThreadIdx_x + hipBlockDim_x*hipBlockIdx_x;
|
// Following the same scheme as CUDA for now
|
||||||
uint64_t y = hipThreadIdx_y + hipBlockDim_y*hipBlockIdx_y;
|
uint64_t x = threadIdx.y + blockDim.y*blockIdx.x;
|
||||||
uint64_t z = hipThreadIdx_z ;//+ hipBlockDim_z*hipBlockIdx_z;
|
uint64_t y = threadIdx.z + blockDim.z*blockIdx.y;
|
||||||
|
uint64_t z = threadIdx.x;
|
||||||
if ( (x < numx) && (y<numy) && (z<numz) ) {
|
if ( (x < numx) && (y<numy) && (z<numz) ) {
|
||||||
Lambda(x,y,z);
|
Lambda(x,y,z);
|
||||||
}
|
}
|
||||||
|
@ -167,6 +167,13 @@ void GridCmdOptionInt(std::string &str,int & val)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GridCmdOptionFloat(std::string &str,float & val)
|
||||||
|
{
|
||||||
|
std::stringstream ss(str);
|
||||||
|
ss>>val;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void GridParseLayout(char **argv,int argc,
|
void GridParseLayout(char **argv,int argc,
|
||||||
Coordinate &latt_c,
|
Coordinate &latt_c,
|
||||||
|
@ -57,6 +57,7 @@ void GridCmdOptionCSL(std::string str,std::vector<std::string> & vec);
|
|||||||
template<class VectorInt>
|
template<class VectorInt>
|
||||||
void GridCmdOptionIntVector(const std::string &str,VectorInt & vec);
|
void GridCmdOptionIntVector(const std::string &str,VectorInt & vec);
|
||||||
void GridCmdOptionInt(std::string &str,int & val);
|
void GridCmdOptionInt(std::string &str,int & val);
|
||||||
|
void GridCmdOptionFloat(std::string &str,float & val);
|
||||||
|
|
||||||
|
|
||||||
void GridParseLayout(char **argv,int argc,
|
void GridParseLayout(char **argv,int argc,
|
||||||
|
@ -217,9 +217,9 @@ int main (int argc, char ** argv)
|
|||||||
dbytes+=
|
dbytes+=
|
||||||
Grid.StencilSendToRecvFromBegin(requests,
|
Grid.StencilSendToRecvFromBegin(requests,
|
||||||
(void *)&xbuf[mu][0],
|
(void *)&xbuf[mu][0],
|
||||||
xmit_to_rank,
|
xmit_to_rank,1,
|
||||||
(void *)&rbuf[mu][0],
|
(void *)&rbuf[mu][0],
|
||||||
recv_from_rank,
|
recv_from_rank,1,
|
||||||
bytes,mu);
|
bytes,mu);
|
||||||
|
|
||||||
comm_proc = mpi_layout[mu]-1;
|
comm_proc = mpi_layout[mu]-1;
|
||||||
@ -228,9 +228,9 @@ int main (int argc, char ** argv)
|
|||||||
dbytes+=
|
dbytes+=
|
||||||
Grid.StencilSendToRecvFromBegin(requests,
|
Grid.StencilSendToRecvFromBegin(requests,
|
||||||
(void *)&xbuf[mu+4][0],
|
(void *)&xbuf[mu+4][0],
|
||||||
xmit_to_rank,
|
xmit_to_rank,1,
|
||||||
(void *)&rbuf[mu+4][0],
|
(void *)&rbuf[mu+4][0],
|
||||||
recv_from_rank,
|
recv_from_rank,1,
|
||||||
bytes,mu+4);
|
bytes,mu+4);
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -309,9 +309,9 @@ int main (int argc, char ** argv)
|
|||||||
dbytes+=
|
dbytes+=
|
||||||
Grid.StencilSendToRecvFromBegin(requests,
|
Grid.StencilSendToRecvFromBegin(requests,
|
||||||
(void *)&xbuf[mu][0],
|
(void *)&xbuf[mu][0],
|
||||||
xmit_to_rank,
|
xmit_to_rank,1,
|
||||||
(void *)&rbuf[mu][0],
|
(void *)&rbuf[mu][0],
|
||||||
recv_from_rank,
|
recv_from_rank,1,
|
||||||
bytes,mu);
|
bytes,mu);
|
||||||
Grid.StencilSendToRecvFromComplete(requests,mu);
|
Grid.StencilSendToRecvFromComplete(requests,mu);
|
||||||
requests.resize(0);
|
requests.resize(0);
|
||||||
@ -322,9 +322,9 @@ int main (int argc, char ** argv)
|
|||||||
dbytes+=
|
dbytes+=
|
||||||
Grid.StencilSendToRecvFromBegin(requests,
|
Grid.StencilSendToRecvFromBegin(requests,
|
||||||
(void *)&xbuf[mu+4][0],
|
(void *)&xbuf[mu+4][0],
|
||||||
xmit_to_rank,
|
xmit_to_rank,1,
|
||||||
(void *)&rbuf[mu+4][0],
|
(void *)&rbuf[mu+4][0],
|
||||||
recv_from_rank,
|
recv_from_rank,1,
|
||||||
bytes,mu+4);
|
bytes,mu+4);
|
||||||
Grid.StencilSendToRecvFromComplete(requests,mu+4);
|
Grid.StencilSendToRecvFromComplete(requests,mu+4);
|
||||||
requests.resize(0);
|
requests.resize(0);
|
||||||
@ -411,8 +411,8 @@ int main (int argc, char ** argv)
|
|||||||
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
||||||
}
|
}
|
||||||
int tid = omp_get_thread_num();
|
int tid = omp_get_thread_num();
|
||||||
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
|
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,1,
|
||||||
(void *)&rbuf[dir][0], recv_from_rank, bytes,tid);
|
(void *)&rbuf[dir][0], recv_from_rank,1, bytes,tid);
|
||||||
|
|
||||||
thread_critical { dbytes+=tbytes; }
|
thread_critical { dbytes+=tbytes; }
|
||||||
}
|
}
|
||||||
|
@ -32,18 +32,112 @@
|
|||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace Grid;
|
using namespace Grid;
|
||||||
|
|
||||||
template<class d>
|
////////////////////////
|
||||||
struct scal {
|
/// Move to domains ////
|
||||||
d internal;
|
////////////////////////
|
||||||
|
|
||||||
|
struct DomainDecomposition
|
||||||
|
{
|
||||||
|
Coordinate Block;
|
||||||
|
|
||||||
|
DomainDecomposition(const Coordinate &_Block): Block(_Block){ assert(Block.size()==Nd);};
|
||||||
|
|
||||||
|
template<class Field>
|
||||||
|
void ProjectDomain(Field &f,Integer domain)
|
||||||
|
{
|
||||||
|
GridBase *grid = f.Grid();
|
||||||
|
int dims = grid->Nd();
|
||||||
|
int isDWF= (dims==Nd+1);
|
||||||
|
assert((dims==Nd)||(dims==Nd+1));
|
||||||
|
|
||||||
|
Field zz(grid); zz = Zero();
|
||||||
|
LatticeInteger coor(grid);
|
||||||
|
LatticeInteger domaincoor(grid);
|
||||||
|
LatticeInteger mask(grid); mask = Integer(1);
|
||||||
|
LatticeInteger zi(grid); zi = Integer(0);
|
||||||
|
for(int d=0;d<Nd;d++){
|
||||||
|
Integer B= Block[d];
|
||||||
|
if ( B ) {
|
||||||
|
LatticeCoordinate(coor,d+isDWF);
|
||||||
|
domaincoor = mod(coor,B);
|
||||||
|
mask = where(domaincoor==Integer(0),zi,mask);
|
||||||
|
mask = where(domaincoor==Integer(B-1),zi,mask);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ( !domain )
|
||||||
|
f = where(mask==Integer(1),f,zz);
|
||||||
|
else
|
||||||
|
f = where(mask==Integer(0),f,zz);
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
Gamma::Algebra Gmu [] = {
|
template<typename MomentaField>
|
||||||
Gamma::Algebra::GammaX,
|
struct DirichletFilter: public MomentumFilterBase<MomentaField>
|
||||||
Gamma::Algebra::GammaY,
|
{
|
||||||
Gamma::Algebra::GammaZ,
|
Coordinate Block;
|
||||||
Gamma::Algebra::GammaT
|
|
||||||
};
|
DirichletFilter(const Coordinate &_Block): Block(_Block) {}
|
||||||
|
|
||||||
|
// Edge detect using domain projectors
|
||||||
|
void applyFilter (MomentaField &U) const override
|
||||||
|
{
|
||||||
|
DomainDecomposition Domains(Block);
|
||||||
|
GridBase *grid = U.Grid();
|
||||||
|
LatticeInteger coor(grid);
|
||||||
|
LatticeInteger face(grid);
|
||||||
|
LatticeInteger one(grid); one = 1;
|
||||||
|
LatticeInteger zero(grid); zero = 0;
|
||||||
|
LatticeInteger omega(grid);
|
||||||
|
LatticeInteger omegabar(grid);
|
||||||
|
LatticeInteger tmp(grid);
|
||||||
|
|
||||||
|
omega=one; Domains.ProjectDomain(omega,0);
|
||||||
|
omegabar=one; Domains.ProjectDomain(omegabar,1);
|
||||||
|
|
||||||
|
LatticeInteger nface(grid); nface=Zero();
|
||||||
|
|
||||||
|
MomentaField projected(grid); projected=Zero();
|
||||||
|
typedef decltype(PeekIndex<LorentzIndex>(U,0)) MomentaLinkField;
|
||||||
|
MomentaLinkField Umu(grid);
|
||||||
|
MomentaLinkField zz(grid); zz=Zero();
|
||||||
|
|
||||||
|
int dims = grid->Nd();
|
||||||
|
Coordinate Global=grid->GlobalDimensions();
|
||||||
|
assert(dims==Nd);
|
||||||
|
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
|
||||||
|
if ( Block[mu]!=0 ) {
|
||||||
|
|
||||||
|
Umu = PeekIndex<LorentzIndex>(U,mu);
|
||||||
|
|
||||||
|
// Upper face
|
||||||
|
tmp = Cshift(omegabar,mu,1);
|
||||||
|
tmp = tmp + omega;
|
||||||
|
face = where(tmp == Integer(2),one,zero );
|
||||||
|
|
||||||
|
tmp = Cshift(omega,mu,1);
|
||||||
|
tmp = tmp + omegabar;
|
||||||
|
face = where(tmp == Integer(2),one,face );
|
||||||
|
|
||||||
|
Umu = where(face,zz,Umu);
|
||||||
|
|
||||||
|
PokeIndex<LorentzIndex>(U, Umu, mu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Gamma::Algebra Gmu [] = {
|
||||||
|
Gamma::Algebra::GammaX,
|
||||||
|
Gamma::Algebra::GammaY,
|
||||||
|
Gamma::Algebra::GammaZ,
|
||||||
|
Gamma::Algebra::GammaT
|
||||||
|
};
|
||||||
|
|
||||||
|
void Benchmark(int Ls, std::vector<int> Dirichlet);
|
||||||
|
|
||||||
int main (int argc, char ** argv)
|
int main (int argc, char ** argv)
|
||||||
{
|
{
|
||||||
@ -52,24 +146,48 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
|
|
||||||
Coordinate latt4 = GridDefaultLatt();
|
|
||||||
int Ls=16;
|
int Ls=16;
|
||||||
for(int i=0;i<argc;i++)
|
for(int i=0;i<argc;i++) {
|
||||||
if(std::string(argv[i]) == "-Ls"){
|
if(std::string(argv[i]) == "-Ls"){
|
||||||
std::stringstream ss(argv[i+1]); ss >> Ls;
|
std::stringstream ss(argv[i+1]); ss >> Ls;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
std::vector<int> Dirichlet(5,0);
|
||||||
|
Benchmark(Ls,Dirichlet);
|
||||||
|
Coordinate latt4 = GridDefaultLatt();
|
||||||
|
Coordinate mpi = GridDefaultMpi();
|
||||||
|
Coordinate shm;
|
||||||
|
GlobalSharedMemory::GetShmDims(mpi,shm);
|
||||||
|
/*
|
||||||
|
Dirichlet = std::vector<int>({0,
|
||||||
|
latt4[0]/mpi[0] * shm[0],
|
||||||
|
latt4[1]/mpi[1] * shm[1],
|
||||||
|
latt4[2]/mpi[2] * shm[2],
|
||||||
|
latt4[3]/mpi[3] * shm[3]});
|
||||||
|
*/
|
||||||
|
Dirichlet = std::vector<int>({0,
|
||||||
|
latt4[0]/mpi[0] ,
|
||||||
|
latt4[1]/mpi[1] ,
|
||||||
|
latt4[2]/mpi[2] ,
|
||||||
|
latt4[3]/mpi[3] });
|
||||||
|
|
||||||
|
std::cout << " Dirichlet block "<< Dirichlet<< std::endl;
|
||||||
|
Benchmark(Ls,Dirichlet);
|
||||||
|
Grid_finalize();
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
void Benchmark(int Ls, std::vector<int> Dirichlet)
|
||||||
|
{
|
||||||
|
Coordinate latt4 = GridDefaultLatt();
|
||||||
GridLogLayout();
|
GridLogLayout();
|
||||||
|
|
||||||
long unsigned int single_site_flops = 8*Nc*(7+16*Nc);
|
long unsigned int single_site_flops = 8*Nc*(7+16*Nc);
|
||||||
|
|
||||||
|
|
||||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
|
||||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||||
|
|
||||||
std::cout << GridLogMessage << "Making s innermost grids"<<std::endl;
|
|
||||||
GridCartesian * sUGrid = SpaceTimeGrid::makeFourDimDWFGrid(GridDefaultLatt(),GridDefaultMpi());
|
GridCartesian * sUGrid = SpaceTimeGrid::makeFourDimDWFGrid(GridDefaultLatt(),GridDefaultMpi());
|
||||||
GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid);
|
GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid);
|
||||||
GridCartesian * sFGrid = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid);
|
GridCartesian * sFGrid = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid);
|
||||||
@ -80,26 +198,13 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
|
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
|
||||||
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString(std::string("The 4D RNG"));
|
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString(std::string("The 4D RNG"));
|
||||||
|
|
||||||
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
|
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
|
||||||
GridParallelRNG RNG5(FGrid); RNG5.SeedUniqueString(std::string("The 5D RNG"));
|
GridParallelRNG RNG5(FGrid); RNG5.SeedUniqueString(std::string("The 5D RNG"));
|
||||||
std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
|
|
||||||
|
|
||||||
LatticeFermionF src (FGrid); random(RNG5,src);
|
LatticeFermionF src (FGrid); random(RNG5,src);
|
||||||
#if 0
|
|
||||||
src = Zero();
|
|
||||||
{
|
|
||||||
Coordinate origin({0,0,0,latt4[2]-1,0});
|
|
||||||
SpinColourVectorF tmp;
|
|
||||||
tmp=Zero();
|
|
||||||
tmp()(0)(0)=Complex(-2.0,0.0);
|
|
||||||
std::cout << " source site 0 " << tmp<<std::endl;
|
|
||||||
pokeSite(tmp,src,origin);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
RealD N2 = 1.0/::sqrt(norm2(src));
|
RealD N2 = 1.0/::sqrt(norm2(src));
|
||||||
src = src*N2;
|
src = src*N2;
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
LatticeFermionF result(FGrid); result=Zero();
|
LatticeFermionF result(FGrid); result=Zero();
|
||||||
LatticeFermionF ref(FGrid); ref=Zero();
|
LatticeFermionF ref(FGrid); ref=Zero();
|
||||||
@ -110,18 +215,18 @@ int main (int argc, char ** argv)
|
|||||||
LatticeGaugeFieldF Umu(UGrid);
|
LatticeGaugeFieldF Umu(UGrid);
|
||||||
SU<Nc>::HotConfiguration(RNG4,Umu);
|
SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||||
std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
|
std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
|
||||||
#if 0
|
|
||||||
Umu=1.0;
|
|
||||||
for(int mu=0;mu<Nd;mu++){
|
|
||||||
LatticeColourMatrixF ttmp(UGrid);
|
|
||||||
ttmp = PeekIndex<LorentzIndex>(Umu,mu);
|
|
||||||
// if (mu !=2 ) ttmp = 0;
|
|
||||||
// ttmp = ttmp* pow(10.0,mu);
|
|
||||||
PokeIndex<LorentzIndex>(Umu,ttmp,mu);
|
|
||||||
}
|
|
||||||
std::cout << GridLogMessage << "Forced to diagonal " << std::endl;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
////////////////////////////////////
|
||||||
|
// Apply BCs
|
||||||
|
////////////////////////////////////
|
||||||
|
std::cout << GridLogMessage << "Applying BCs " << std::endl;
|
||||||
|
Coordinate Block(4);
|
||||||
|
for(int d=0;d<4;d++) Block[d]= Dirichlet[d+1];
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "Dirichlet Block " << Block<< std::endl;
|
||||||
|
DirichletFilter<LatticeGaugeFieldF> Filter(Block);
|
||||||
|
Filter.applyFilter(Umu);
|
||||||
|
|
||||||
////////////////////////////////////
|
////////////////////////////////////
|
||||||
// Naive wilson implementation
|
// Naive wilson implementation
|
||||||
////////////////////////////////////
|
////////////////////////////////////
|
||||||
@ -191,11 +296,11 @@ int main (int argc, char ** argv)
|
|||||||
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
||||||
|
|
||||||
DomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
DomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||||
|
Dw.DirichletBlock(Dirichlet);
|
||||||
int ncall =300;
|
int ncall =300;
|
||||||
|
|
||||||
if (1) {
|
if (1) {
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
Dw.ZeroCounters();
|
|
||||||
Dw.Dhop(src,result,0);
|
Dw.Dhop(src,result,0);
|
||||||
std::cout<<GridLogMessage<<"Called warmup"<<std::endl;
|
std::cout<<GridLogMessage<<"Called warmup"<<std::endl;
|
||||||
double t0=usecond();
|
double t0=usecond();
|
||||||
@ -220,8 +325,6 @@ int main (int argc, char ** argv)
|
|||||||
double data_mem = (volume * (2*Nd+1)*Nd*Nc + (volume/Ls) *2*Nd*Nc*Nc) * simdwidth / nsimd * ncall / (1024.*1024.*1024.);
|
double data_mem = (volume * (2*Nd+1)*Nd*Nc + (volume/Ls) *2*Nd*Nc*Nc) * simdwidth / nsimd * ncall / (1024.*1024.*1024.);
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
||||||
// std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
|
||||||
// std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
|
||||||
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
|
||||||
@ -229,20 +332,13 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl;
|
std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl;
|
||||||
err = ref-result;
|
err = ref-result;
|
||||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||||
//exit(0);
|
|
||||||
|
|
||||||
if(( norm2(err)>1.0e-4) ) {
|
if(( norm2(err)>1.0e-4) ) {
|
||||||
/*
|
|
||||||
std::cout << "RESULT\n " << result<<std::endl;
|
|
||||||
std::cout << "REF \n " << ref <<std::endl;
|
|
||||||
std::cout << "ERR \n " << err <<std::endl;
|
|
||||||
*/
|
|
||||||
std::cout<<GridLogMessage << "WRONG RESULT" << std::endl;
|
std::cout<<GridLogMessage << "WRONG RESULT" << std::endl;
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
assert (norm2(err)< 1.0e-4 );
|
assert (norm2(err)< 1.0e-4 );
|
||||||
Dw.Report();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (1)
|
if (1)
|
||||||
@ -294,13 +390,14 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << "norm dag ref "<< norm2(ref)<<std::endl;
|
std::cout<<GridLogMessage << "norm dag ref "<< norm2(ref)<<std::endl;
|
||||||
err = ref-result;
|
err = ref-result;
|
||||||
std::cout<<GridLogMessage << "norm dag diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm dag diff "<< norm2(err)<<std::endl;
|
||||||
if((norm2(err)>1.0e-4)){
|
|
||||||
/*
|
if ( norm2(err) > 1.0e-4 ) {
|
||||||
std::cout<< "DAG RESULT\n " <<ref << std::endl;
|
std::cout << "Error vector is\n" <<err << std::endl;
|
||||||
std::cout<< "DAG sRESULT\n " <<result << std::endl;
|
std::cout << "Ref vector is\n" <<ref << std::endl;
|
||||||
std::cout<< "DAG ERR \n " << err <<std::endl;
|
std::cout << "Result vector is\n" <<result << std::endl;
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
assert((norm2(err)<1.0e-4));
|
||||||
|
|
||||||
LatticeFermionF src_e (FrbGrid);
|
LatticeFermionF src_e (FrbGrid);
|
||||||
LatticeFermionF src_o (FrbGrid);
|
LatticeFermionF src_o (FrbGrid);
|
||||||
LatticeFermionF r_e (FrbGrid);
|
LatticeFermionF r_e (FrbGrid);
|
||||||
@ -330,7 +427,6 @@ int main (int argc, char ** argv)
|
|||||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
||||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||||
{
|
{
|
||||||
Dw.ZeroCounters();
|
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||||
double t0=usecond();
|
double t0=usecond();
|
||||||
@ -352,7 +448,6 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << "Deo mflop/s = "<< flops/(t1-t0)<<std::endl;
|
std::cout<<GridLogMessage << "Deo mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "Deo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl;
|
std::cout<<GridLogMessage << "Deo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl;
|
||||||
std::cout<<GridLogMessage << "Deo mflop/s per node "<< flops/(t1-t0)/NN<<std::endl;
|
std::cout<<GridLogMessage << "Deo mflop/s per node "<< flops/(t1-t0)/NN<<std::endl;
|
||||||
Dw.Report();
|
|
||||||
}
|
}
|
||||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||||
Dw.DhopOE(src_e,r_o,DaggerNo);
|
Dw.DhopOE(src_e,r_o,DaggerNo);
|
||||||
@ -367,13 +462,7 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
err = r_eo-result;
|
err = r_eo-result;
|
||||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||||
if((norm2(err)>1.0e-4)){
|
assert(norm2(err)<1.0e-4);
|
||||||
/*
|
|
||||||
std::cout<< "Deo RESULT\n " <<r_eo << std::endl;
|
|
||||||
std::cout<< "Deo REF\n " <<result << std::endl;
|
|
||||||
std::cout<< "Deo ERR \n " << err <<std::endl;
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
pickCheckerboard(Even,src_e,err);
|
pickCheckerboard(Even,src_e,err);
|
||||||
pickCheckerboard(Odd,src_o,err);
|
pickCheckerboard(Odd,src_o,err);
|
||||||
@ -382,6 +471,4 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
assert(norm2(src_e)<1.0e-4);
|
assert(norm2(src_e)<1.0e-4);
|
||||||
assert(norm2(src_o)<1.0e-4);
|
assert(norm2(src_o)<1.0e-4);
|
||||||
Grid_finalize();
|
|
||||||
exit(0);
|
|
||||||
}
|
}
|
||||||
|
12
systems/Crusher/config-command
Normal file
12
systems/Crusher/config-command
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
../../configure --enable-comms=mpi-auto \
|
||||||
|
--enable-unified=no \
|
||||||
|
--enable-shm=nvlink \
|
||||||
|
--enable-accelerator=hip \
|
||||||
|
--enable-gen-simd-width=64 \
|
||||||
|
--enable-simd=GPU \
|
||||||
|
--disable-fermion-reps \
|
||||||
|
--disable-gparity \
|
||||||
|
CXX=hipcc MPICXX=mpicxx \
|
||||||
|
CXXFLAGS="-fPIC -I/opt/rocm-4.5.0/include/ -std=c++14 -I${MPICH_DIR}/include " \
|
||||||
|
LDFLAGS=" -L${MPICH_DIR}/lib -lmpi -L${CRAY_MPICH_ROOTDIR}/gtl/lib -lmpi_gtl_hsa "
|
||||||
|
HIPFLAGS = --amdgpu-target=gfx90a
|
30
systems/Crusher/dwf.slurm
Normal file
30
systems/Crusher/dwf.slurm
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Begin LSF Directives
|
||||||
|
#SBATCH -A LGT104
|
||||||
|
#SBATCH -t 01:00:00
|
||||||
|
##SBATCH -U openmpThu
|
||||||
|
##SBATCH -p ecp
|
||||||
|
#SBATCH -J DWF
|
||||||
|
#SBATCH -o DWF.%J
|
||||||
|
#SBATCH -e DWF.%J
|
||||||
|
#SBATCH -N 1
|
||||||
|
#SBATCH -n 1
|
||||||
|
#SBATCH --exclusive
|
||||||
|
|
||||||
|
DIR=.
|
||||||
|
module list
|
||||||
|
#export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||||
|
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||||
|
export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||||
|
#export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||||
|
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||||
|
export OMP_NUM_THREADS=1
|
||||||
|
|
||||||
|
AT=8
|
||||||
|
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||||
|
|
||||||
|
PARAMS=" --accelerator-threads ${AT} --grid 24.24.24.24 --shm-mpi 0 --mpi 1.1.1.1"
|
||||||
|
|
||||||
|
srun --gpus-per-task 1 -n1 ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||||
|
|
||||||
|
|
27
systems/Crusher/dwf4.slurm
Normal file
27
systems/Crusher/dwf4.slurm
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Begin LSF Directives
|
||||||
|
#SBATCH -A LGT104
|
||||||
|
#SBATCH -t 01:00:00
|
||||||
|
##SBATCH -U openmpThu
|
||||||
|
#SBATCH -J DWF
|
||||||
|
#SBATCH -o DWF.%J
|
||||||
|
#SBATCH -e DWF.%J
|
||||||
|
#SBATCH -N 1
|
||||||
|
#SBATCH -n 4
|
||||||
|
#SBATCH --exclusive
|
||||||
|
|
||||||
|
DIR=.
|
||||||
|
module list
|
||||||
|
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||||
|
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||||
|
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||||
|
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||||
|
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||||
|
export OMP_NUM_THREADS=4
|
||||||
|
|
||||||
|
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||||
|
PARAMS=" --accelerator-threads 8 --grid 32.32.64.64 --mpi 1.1.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||||
|
|
||||||
|
srun --gpus-per-task 1 -n4 ./mpiwrapper.sh ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||||
|
|
||||||
|
|
27
systems/Crusher/dwf8.slurm
Normal file
27
systems/Crusher/dwf8.slurm
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Begin LSF Directives
|
||||||
|
#SBATCH -A LGT104
|
||||||
|
#SBATCH -t 01:00:00
|
||||||
|
##SBATCH -U openmpThu
|
||||||
|
#SBATCH -J DWF
|
||||||
|
#SBATCH -o DWF.%J
|
||||||
|
#SBATCH -e DWF.%J
|
||||||
|
#SBATCH -N 1
|
||||||
|
#SBATCH -n 8
|
||||||
|
#SBATCH --exclusive
|
||||||
|
|
||||||
|
DIR=.
|
||||||
|
module list
|
||||||
|
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||||
|
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||||
|
export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||||
|
#export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||||
|
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||||
|
export OMP_NUM_THREADS=1
|
||||||
|
|
||||||
|
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||||
|
PARAMS=" --accelerator-threads 8 --grid 32.64.64.64 --mpi 1.2.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||||
|
|
||||||
|
srun --gpus-per-task 1 -n8 ./mpiwrapper.sh ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||||
|
|
||||||
|
|
12
systems/Crusher/mpiwrapper.sh
Executable file
12
systems/Crusher/mpiwrapper.sh
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
lrank=$SLURM_LOCALID
|
||||||
|
|
||||||
|
export ROCR_VISIBLE_DEVICES=$SLURM_LOCALID
|
||||||
|
|
||||||
|
echo "`hostname` - $lrank device=$ROCR_VISIBLE_DEVICES binding=$BINDING"
|
||||||
|
|
||||||
|
$*
|
||||||
|
|
||||||
|
|
||||||
|
|
5
systems/Crusher/sourceme.sh
Normal file
5
systems/Crusher/sourceme.sh
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
module load PrgEnv-gnu
|
||||||
|
module load rocm/4.5.0
|
||||||
|
module load gmp
|
||||||
|
module load cray-fftw
|
||||||
|
module load craype-accel-amd-gfx90a
|
@ -1,25 +1,25 @@
|
|||||||
tu-c0r0n00 - 0 device=0 binding=--interleave=0,1
|
tu-c0r3n00 - 0 device=0 binding=--interleave=0,1
|
||||||
tu-c0r0n00 - 1 device=1 binding=--interleave=2,3
|
tu-c0r3n00 - 1 device=1 binding=--interleave=2,3
|
||||||
tu-c0r0n09 - 1 device=1 binding=--interleave=2,3
|
tu-c0r3n00 - 2 device=2 binding=--interleave=4,5
|
||||||
tu-c0r0n00 - 2 device=2 binding=--interleave=4,5
|
tu-c0r3n00 - 3 device=3 binding=--interleave=6,7
|
||||||
tu-c0r0n06 - 0 device=0 binding=--interleave=0,1
|
tu-c0r3n06 - 1 device=1 binding=--interleave=2,3
|
||||||
tu-c0r0n06 - 1 device=1 binding=--interleave=2,3
|
tu-c0r3n06 - 3 device=3 binding=--interleave=6,7
|
||||||
tu-c0r0n09 - 0 device=0 binding=--interleave=0,1
|
tu-c0r3n06 - 0 device=0 binding=--interleave=0,1
|
||||||
tu-c0r0n09 - 2 device=2 binding=--interleave=4,5
|
tu-c0r3n06 - 2 device=2 binding=--interleave=4,5
|
||||||
tu-c0r0n03 - 1 device=1 binding=--interleave=2,3
|
tu-c0r3n03 - 1 device=1 binding=--interleave=2,3
|
||||||
tu-c0r0n06 - 2 device=2 binding=--interleave=4,5
|
tu-c0r3n03 - 2 device=2 binding=--interleave=4,5
|
||||||
tu-c0r0n09 - 3 device=3 binding=--interleave=6,7
|
tu-c0r3n03 - 0 device=0 binding=--interleave=0,1
|
||||||
tu-c0r0n00 - 3 device=3 binding=--interleave=6,7
|
tu-c0r3n03 - 3 device=3 binding=--interleave=6,7
|
||||||
tu-c0r0n03 - 0 device=0 binding=--interleave=0,1
|
tu-c0r3n09 - 0 device=0 binding=--interleave=0,1
|
||||||
tu-c0r0n03 - 2 device=2 binding=--interleave=4,5
|
tu-c0r3n09 - 1 device=1 binding=--interleave=2,3
|
||||||
tu-c0r0n06 - 3 device=3 binding=--interleave=6,7
|
tu-c0r3n09 - 2 device=2 binding=--interleave=4,5
|
||||||
tu-c0r0n03 - 3 device=3 binding=--interleave=6,7
|
tu-c0r3n09 - 3 device=3 binding=--interleave=6,7
|
||||||
OPENMPI detected
|
OPENMPI detected
|
||||||
AcceleratorCudaInit: using default device
|
AcceleratorCudaInit: using default device
|
||||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
AcceleratorCudaInit: assume user either uses
|
||||||
|
AcceleratorCudaInit: a) IBM jsrun, or
|
||||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||||
AcceleratorCudaInit: ================================================
|
|
||||||
OPENMPI detected
|
OPENMPI detected
|
||||||
AcceleratorCudaInit[0]: ========================
|
AcceleratorCudaInit[0]: ========================
|
||||||
AcceleratorCudaInit[0]: Device Number : 0
|
AcceleratorCudaInit[0]: Device Number : 0
|
||||||
@ -33,11 +33,41 @@ AcceleratorCudaInit[0]: pciBusID: 3
|
|||||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||||
AcceleratorCudaInit: using default device
|
AcceleratorCudaInit: using default device
|
||||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
AcceleratorCudaInit: assume user either uses
|
||||||
|
AcceleratorCudaInit: a) IBM jsrun, or
|
||||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||||
AcceleratorCudaInit: ================================================
|
|
||||||
OPENMPI detected
|
OPENMPI detected
|
||||||
|
AcceleratorCudaInit: using default device
|
||||||
|
AcceleratorCudaInit: assume user either uses
|
||||||
|
AcceleratorCudaInit: a) IBM jsrun, or
|
||||||
|
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||||
|
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||||
|
OPENMPI detected
|
||||||
|
AcceleratorCudaInit: using default device
|
||||||
|
AcceleratorCudaInit: assume user either uses
|
||||||
|
AcceleratorCudaInit: a) IBM jsrun, or
|
||||||
|
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||||
|
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||||
|
OPENMPI detected
|
||||||
|
AcceleratorCudaInit: using default device
|
||||||
|
AcceleratorCudaInit: assume user either uses
|
||||||
|
AcceleratorCudaInit: a) IBM jsrun, or
|
||||||
|
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||||
|
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||||
|
OPENMPI detected
|
||||||
|
OPENMPI detected
|
||||||
|
AcceleratorCudaInit: using default device
|
||||||
|
AcceleratorCudaInit: assume user either uses
|
||||||
|
AcceleratorCudaInit: a) IBM jsrun, or
|
||||||
|
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||||
|
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||||
|
OPENMPI detected
|
||||||
|
AcceleratorCudaInit: using default device
|
||||||
|
AcceleratorCudaInit: assume user either uses
|
||||||
|
AcceleratorCudaInit: a) IBM jsrun, or
|
||||||
|
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||||
|
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||||
AcceleratorCudaInit[0]: ========================
|
AcceleratorCudaInit[0]: ========================
|
||||||
AcceleratorCudaInit[0]: Device Number : 0
|
AcceleratorCudaInit[0]: Device Number : 0
|
||||||
AcceleratorCudaInit[0]: ========================
|
AcceleratorCudaInit[0]: ========================
|
||||||
@ -50,43 +80,25 @@ AcceleratorCudaInit[0]: pciBusID: 3
|
|||||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||||
AcceleratorCudaInit: using default device
|
AcceleratorCudaInit: using default device
|
||||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
AcceleratorCudaInit: assume user either uses
|
||||||
|
AcceleratorCudaInit: a) IBM jsrun, or
|
||||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||||
|
local rank 1 device 0 bus id: 0000:44:00.0
|
||||||
AcceleratorCudaInit: ================================================
|
AcceleratorCudaInit: ================================================
|
||||||
OPENMPI detected
|
local rank 0 device 0 bus id: 0000:03:00.0
|
||||||
AcceleratorCudaInit: using default device
|
|
||||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
|
||||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
|
||||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
|
||||||
AcceleratorCudaInit: ================================================
|
AcceleratorCudaInit: ================================================
|
||||||
OPENMPI detected
|
|
||||||
AcceleratorCudaInit: using default device
|
|
||||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
|
||||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
|
||||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
|
||||||
AcceleratorCudaInit: ================================================
|
AcceleratorCudaInit: ================================================
|
||||||
OPENMPI detected
|
|
||||||
AcceleratorCudaInit: using default device
|
|
||||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
|
||||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
|
||||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
|
||||||
AcceleratorCudaInit: ================================================
|
AcceleratorCudaInit: ================================================
|
||||||
OPENMPI detected
|
|
||||||
AcceleratorCudaInit: using default device
|
|
||||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
|
||||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
|
||||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
|
||||||
AcceleratorCudaInit: ================================================
|
AcceleratorCudaInit: ================================================
|
||||||
OPENMPI detected
|
|
||||||
AcceleratorCudaInit: using default device
|
|
||||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
|
||||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
|
||||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
|
||||||
AcceleratorCudaInit: ================================================
|
AcceleratorCudaInit: ================================================
|
||||||
|
local rank 0 device 0 bus id: 0000:03:00.0
|
||||||
|
AcceleratorCudaInit: ================================================
|
||||||
|
AcceleratorCudaInit: ================================================
|
||||||
|
local rank 2 device 0 bus id: 0000:84:00.0
|
||||||
SharedMemoryMpi: World communicator of size 16
|
SharedMemoryMpi: World communicator of size 16
|
||||||
SharedMemoryMpi: Node communicator of size 4
|
SharedMemoryMpi: Node communicator of size 4
|
||||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x7fcd80000000 for comms buffers
|
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x153960000000 for comms buffers
|
||||||
Setting up IPC
|
Setting up IPC
|
||||||
|
|
||||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||||
@ -116,7 +128,7 @@ This program is distributed in the hope that it will be useful,
|
|||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
GNU General Public License for more details.
|
GNU General Public License for more details.
|
||||||
Current Grid git commit hash=9d2238148c56e3fbadfa95dcabf2b83d4bde14cd: (HEAD -> develop) uncommited changes
|
Current Grid git commit hash=da06d15f73184ceb15d66d4e7e702b02fed7b940: (HEAD -> feature/dirichlet, develop) uncommited changes
|
||||||
|
|
||||||
Grid : Message : ================================================
|
Grid : Message : ================================================
|
||||||
Grid : Message : MPI is initialised and logging filters activated
|
Grid : Message : MPI is initialised and logging filters activated
|
||||||
@ -124,122 +136,102 @@ Grid : Message : ================================================
|
|||||||
Grid : Message : Requested 2147483648 byte stencil comms buffers
|
Grid : Message : Requested 2147483648 byte stencil comms buffers
|
||||||
Grid : Message : MemoryManager Cache 34004218675 bytes
|
Grid : Message : MemoryManager Cache 34004218675 bytes
|
||||||
Grid : Message : MemoryManager::Init() setting up
|
Grid : Message : MemoryManager::Init() setting up
|
||||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 32 LARGE 8
|
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 8 LARGE 2
|
||||||
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
|
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
|
||||||
Grid : Message : MemoryManager::Init() Using cudaMalloc
|
Grid : Message : MemoryManager::Init() Using cudaMalloc
|
||||||
Grid : Message : 1.198523 s : Grid Layout
|
Grid : Message : 1.875883 s : Grid Layout
|
||||||
Grid : Message : 1.198530 s : Global lattice size : 64 64 64 64
|
Grid : Message : 1.875893 s : Global lattice size : 64 64 64 64
|
||||||
Grid : Message : 1.198534 s : OpenMP threads : 4
|
Grid : Message : 1.875897 s : OpenMP threads : 4
|
||||||
Grid : Message : 1.198535 s : MPI tasks : 2 2 2 2
|
Grid : Message : 1.875898 s : MPI tasks : 2 2 2 2
|
||||||
Grid : Message : 1.397615 s : Making s innermost grids
|
Grid : Message : 1.993571 s : Initialising 4d RNG
|
||||||
Grid : Message : 1.441828 s : Initialising 4d RNG
|
Grid : Message : 2.881990 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||||
Grid : Message : 1.547973 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
Grid : Message : 2.882370 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||||
Grid : Message : 1.547998 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
Grid : Message : 2.495044 s : Initialising 5d RNG
|
||||||
Grid : Message : 1.954777 s : Initialising 5d RNG
|
Grid : Message : 4.120900 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||||
Grid : Message : 3.633825 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
Grid : Message : 4.121350 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||||
Grid : Message : 3.633869 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
Grid : Message : 15.268010 s : Drawing gauge field
|
||||||
Grid : Message : 12.162710 s : Initialised RNGs
|
Grid : Message : 16.234025 s : Random gauge initialised
|
||||||
Grid : Message : 15.882520 s : Drawing gauge field
|
Grid : Message : 16.234057 s : Applying BCs
|
||||||
Grid : Message : 15.816362 s : Random gauge initialised
|
Grid : Message : 16.365565 s : Setting up Cshift based reference
|
||||||
Grid : Message : 17.279671 s : Setting up Cshift based reference
|
Grid : Message : 44.512418 s : *****************************************************************
|
||||||
Grid : Message : 26.331426 s : *****************************************************************
|
Grid : Message : 44.512448 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||||
Grid : Message : 26.331452 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
Grid : Message : 44.512450 s : *****************************************************************
|
||||||
Grid : Message : 26.331454 s : *****************************************************************
|
Grid : Message : 44.512451 s : *****************************************************************
|
||||||
Grid : Message : 26.331456 s : *****************************************************************
|
Grid : Message : 44.512452 s : * Benchmarking DomainWallFermionR::Dhop
|
||||||
Grid : Message : 26.331458 s : * Benchmarking DomainWallFermionR::Dhop
|
Grid : Message : 44.512453 s : * Vectorising space-time by 8
|
||||||
Grid : Message : 26.331459 s : * Vectorising space-time by 8
|
Grid : Message : 44.512454 s : * VComplexF size is 64 B
|
||||||
Grid : Message : 26.331463 s : * VComplexF size is 64 B
|
Grid : Message : 44.512456 s : * SINGLE precision
|
||||||
Grid : Message : 26.331465 s : * SINGLE precision
|
Grid : Message : 44.512459 s : * Using Overlapped Comms/Compute
|
||||||
Grid : Message : 26.331467 s : * Using Overlapped Comms/Compute
|
Grid : Message : 44.512460 s : * Using GENERIC Nc WilsonKernels
|
||||||
Grid : Message : 26.331468 s : * Using GENERIC Nc WilsonKernels
|
Grid : Message : 44.512461 s : *****************************************************************
|
||||||
Grid : Message : 26.331469 s : *****************************************************************
|
Grid : Message : 46.389070 s : Called warmup
|
||||||
Grid : Message : 28.413717 s : Called warmup
|
Grid : Message : 49.211265 s : Called Dw 300 times in 2.82203e+06 us
|
||||||
Grid : Message : 56.418423 s : Called Dw 3000 times in 2.80047e+07 us
|
Grid : Message : 49.211295 s : mflop/s = 3.76681e+07
|
||||||
Grid : Message : 56.418476 s : mflop/s = 3.79581e+07
|
Grid : Message : 49.211297 s : mflop/s per rank = 2.35425e+06
|
||||||
Grid : Message : 56.418479 s : mflop/s per rank = 2.37238e+06
|
Grid : Message : 49.211299 s : mflop/s per node = 9.41702e+06
|
||||||
Grid : Message : 56.418481 s : mflop/s per node = 9.48953e+06
|
Grid : Message : 49.211301 s : RF GiB/s (base 2) = 76540.6
|
||||||
Grid : Message : 56.418483 s : RF GiB/s (base 2) = 77130
|
Grid : Message : 49.211308 s : mem GiB/s (base 2) = 47837.9
|
||||||
Grid : Message : 56.418485 s : mem GiB/s (base 2) = 48206.3
|
Grid : Message : 49.214868 s : norm diff 1.06409e-13
|
||||||
Grid : Message : 56.422076 s : norm diff 1.03481e-13
|
Grid : Message : 92.647781 s : Compare to naive wilson implementation Dag to verify correctness
|
||||||
Grid : Message : 56.456894 s : #### Dhop calls report
|
Grid : Message : 92.647816 s : Called DwDag
|
||||||
Grid : Message : 56.456899 s : WilsonFermion5D Number of DhopEO Calls : 6002
|
Grid : Message : 92.647817 s : norm dag result 12.0421
|
||||||
Grid : Message : 56.456903 s : WilsonFermion5D TotalTime /Calls : 4710.93 us
|
Grid : Message : 92.801806 s : norm dag ref 12.0421
|
||||||
Grid : Message : 56.456905 s : WilsonFermion5D CommTime /Calls : 3196.15 us
|
Grid : Message : 92.817724 s : norm dag diff 7.21921e-14
|
||||||
Grid : Message : 56.456908 s : WilsonFermion5D FaceTime /Calls : 494.392 us
|
Grid : Message : 92.858973 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
||||||
Grid : Message : 56.456910 s : WilsonFermion5D ComputeTime1/Calls : 44.4107 us
|
Grid : Message : 93.210378 s : src_e0.499997
|
||||||
Grid : Message : 56.456912 s : WilsonFermion5D ComputeTime2/Calls : 1037.75 us
|
Grid : Message : 93.583286 s : src_o0.500003
|
||||||
Grid : Message : 56.456921 s : Average mflops/s per call : 3.55691e+09
|
Grid : Message : 93.682468 s : *********************************************************
|
||||||
Grid : Message : 56.456925 s : Average mflops/s per call per rank : 2.22307e+08
|
Grid : Message : 93.682471 s : * Benchmarking DomainWallFermionF::DhopEO
|
||||||
Grid : Message : 56.456928 s : Average mflops/s per call per node : 8.89228e+08
|
Grid : Message : 93.682472 s : * Vectorising space-time by 8
|
||||||
Grid : Message : 56.456930 s : Average mflops/s per call (full) : 3.82915e+07
|
Grid : Message : 93.682473 s : * SINGLE precision
|
||||||
Grid : Message : 56.456933 s : Average mflops/s per call per rank (full): 2.39322e+06
|
Grid : Message : 93.682475 s : * Using Overlapped Comms/Compute
|
||||||
Grid : Message : 56.456952 s : Average mflops/s per call per node (full): 9.57287e+06
|
Grid : Message : 93.682476 s : * Using GENERIC Nc WilsonKernels
|
||||||
Grid : Message : 56.456954 s : WilsonFermion5D Stencil
|
Grid : Message : 93.682477 s : *********************************************************
|
||||||
Grid : Message : 56.457016 s : Stencil calls 3001
|
Grid : Message : 95.162342 s : Deo mflop/s = 3.92487e+07
|
||||||
Grid : Message : 56.457022 s : Stencil halogtime 0
|
Grid : Message : 95.162387 s : Deo mflop/s per rank 2.45305e+06
|
||||||
Grid : Message : 56.457024 s : Stencil gathertime 55.9154
|
Grid : Message : 95.162389 s : Deo mflop/s per node 9.81219e+06
|
||||||
Grid : Message : 56.457026 s : Stencil gathermtime 20.1073
|
Grid : Message : 95.232801 s : r_e6.02111
|
||||||
Grid : Message : 56.457028 s : Stencil mergetime 18.5585
|
Grid : Message : 95.240061 s : r_o6.02102
|
||||||
Grid : Message : 56.457030 s : Stencil decompresstime 0.0639787
|
Grid : Message : 95.245975 s : res12.0421
|
||||||
Grid : Message : 56.457032 s : Stencil comms_bytes 4.02653e+08
|
Grid : Message : 95.833402 s : norm diff 0
|
||||||
Grid : Message : 56.457034 s : Stencil commtime 6379.93
|
Grid : Message : 96.573829 s : norm diff even 0
|
||||||
Grid : Message : 56.457036 s : Stencil 63.1124 GB/s per rank
|
Grid : Message : 96.868272 s : norm diff odd 0
|
||||||
Grid : Message : 56.457038 s : Stencil 252.45 GB/s per node
|
Dirichlet block [0 64 64 32 32]
|
||||||
Grid : Message : 56.457040 s : WilsonFermion5D StencilEven
|
Grid : Message : 97.756909 s : Grid Layout
|
||||||
Grid : Message : 56.457048 s : WilsonFermion5D StencilOdd
|
Grid : Message : 97.756911 s : Global lattice size : 64 64 64 64
|
||||||
Grid : Message : 56.457062 s : WilsonFermion5D Stencil Reporti()
|
Grid : Message : 97.756921 s : OpenMP threads : 4
|
||||||
Grid : Message : 56.457065 s : WilsonFermion5D StencilEven Reporti()
|
Grid : Message : 97.756922 s : MPI tasks : 2 2 2 2
|
||||||
Grid : Message : 56.457066 s : WilsonFermion5D StencilOdd Reporti()
|
Grid : Message : 97.897085 s : Initialising 4d RNG
|
||||||
Grid : Message : 79.259261 s : Compare to naive wilson implementation Dag to verify correctness
|
Grid : Message : 97.965061 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||||
Grid : Message : 79.259287 s : Called DwDag
|
Grid : Message : 97.965097 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||||
Grid : Message : 79.259288 s : norm dag result 12.0421
|
Grid : Message : 98.367431 s : Initialising 5d RNG
|
||||||
Grid : Message : 79.271740 s : norm dag ref 12.0421
|
Grid : Message : 99.752745 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||||
Grid : Message : 79.287759 s : norm dag diff 7.63236e-14
|
Grid : Message : 99.752790 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||||
Grid : Message : 79.328100 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
Grid : Message : 111.290148 s : Drawing gauge field
|
||||||
Grid : Message : 79.955951 s : src_e0.499997
|
Grid : Message : 112.349289 s : Random gauge initialised
|
||||||
Grid : Message : 80.633620 s : src_o0.500003
|
Grid : Message : 112.349320 s : Applying BCs
|
||||||
Grid : Message : 80.164163 s : *********************************************************
|
Grid : Message : 113.948740 s : Setting up Cshift based reference
|
||||||
Grid : Message : 80.164168 s : * Benchmarking DomainWallFermionF::DhopEO
|
Grid : Message : 140.320415 s : *****************************************************************
|
||||||
Grid : Message : 80.164170 s : * Vectorising space-time by 8
|
Grid : Message : 140.320443 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||||
Grid : Message : 80.164172 s : * SINGLE precision
|
Grid : Message : 140.320444 s : *****************************************************************
|
||||||
Grid : Message : 80.164174 s : * Using Overlapped Comms/Compute
|
Grid : Message : 140.320445 s : *****************************************************************
|
||||||
Grid : Message : 80.164177 s : * Using GENERIC Nc WilsonKernels
|
Grid : Message : 140.320446 s : * Benchmarking DomainWallFermionR::Dhop
|
||||||
Grid : Message : 80.164178 s : *********************************************************
|
Grid : Message : 140.320447 s : * Vectorising space-time by 8
|
||||||
Grid : Message : 93.797635 s : Deo mflop/s = 3.93231e+07
|
Grid : Message : 140.320448 s : * VComplexF size is 64 B
|
||||||
Grid : Message : 93.797670 s : Deo mflop/s per rank 2.45769e+06
|
Grid : Message : 140.320450 s : * SINGLE precision
|
||||||
Grid : Message : 93.797672 s : Deo mflop/s per node 9.83077e+06
|
Grid : Message : 140.320451 s : * Using Overlapped Comms/Compute
|
||||||
Grid : Message : 93.797674 s : #### Dhop calls report
|
Grid : Message : 140.320452 s : * Using GENERIC Nc WilsonKernels
|
||||||
Grid : Message : 93.797675 s : WilsonFermion5D Number of DhopEO Calls : 3001
|
Grid : Message : 140.320453 s : *****************************************************************
|
||||||
Grid : Message : 93.797677 s : WilsonFermion5D TotalTime /Calls : 4542.83 us
|
Grid : Message : 142.296150 s : Called warmup
|
||||||
Grid : Message : 93.797679 s : WilsonFermion5D CommTime /Calls : 2978.97 us
|
Grid : Message : 144.397678 s : Called Dw 300 times in 2.36719e+06 us
|
||||||
Grid : Message : 93.797681 s : WilsonFermion5D FaceTime /Calls : 602.287 us
|
Grid : Message : 144.397700 s : mflop/s = 4.49058e+07
|
||||||
Grid : Message : 93.797683 s : WilsonFermion5D ComputeTime1/Calls : 67.1416 us
|
Grid : Message : 144.397702 s : mflop/s per rank = 2.80661e+06
|
||||||
Grid : Message : 93.797685 s : WilsonFermion5D ComputeTime2/Calls : 1004.07 us
|
Grid : Message : 144.397704 s : mflop/s per node = 1.12265e+07
|
||||||
Grid : Message : 93.797713 s : Average mflops/s per call : 3.30731e+09
|
Grid : Message : 144.397706 s : RF GiB/s (base 2) = 91247.6
|
||||||
Grid : Message : 93.797717 s : Average mflops/s per call per rank : 2.06707e+08
|
Grid : Message : 144.397708 s : mem GiB/s (base 2) = 57029.7
|
||||||
Grid : Message : 93.797719 s : Average mflops/s per call per node : 8.26827e+08
|
Grid : Message : 144.401269 s : norm diff 9.78944e-14
|
||||||
Grid : Message : 93.797721 s : Average mflops/s per call (full) : 3.97084e+07
|
Grid : Message : 186.885460 s : Compare to naive wilson implementation Dag to verify correctness
|
||||||
Grid : Message : 93.797727 s : Average mflops/s per call per rank (full): 2.48178e+06
|
Grid : Message : 186.885492 s : Called DwDag
|
||||||
Grid : Message : 93.797732 s : Average mflops/s per call per node (full): 9.92711e+06
|
Grid : Message : 186.885493 s : norm dag result 10.4157
|
||||||
Grid : Message : 93.797735 s : WilsonFermion5D Stencil
|
Grid : Message : 186.897154 s : norm dag ref 11.2266
|
||||||
Grid : Message : 93.797746 s : WilsonFermion5D StencilEven
|
Grid : Message : 186.912538 s : norm dag diff 0.484633
|
||||||
Grid : Message : 93.797758 s : WilsonFermion5D StencilOdd
|
|
||||||
Grid : Message : 93.797769 s : Stencil calls 3001
|
|
||||||
Grid : Message : 93.797773 s : Stencil halogtime 0
|
|
||||||
Grid : Message : 93.797776 s : Stencil gathertime 56.7458
|
|
||||||
Grid : Message : 93.797780 s : Stencil gathermtime 22.6504
|
|
||||||
Grid : Message : 93.797782 s : Stencil mergetime 21.1913
|
|
||||||
Grid : Message : 93.797786 s : Stencil decompresstime 0.0556481
|
|
||||||
Grid : Message : 93.797788 s : Stencil comms_bytes 2.01327e+08
|
|
||||||
Grid : Message : 93.797791 s : Stencil commtime 2989.33
|
|
||||||
Grid : Message : 93.797795 s : Stencil 67.3484 GB/s per rank
|
|
||||||
Grid : Message : 93.797798 s : Stencil 269.394 GB/s per node
|
|
||||||
Grid : Message : 93.797801 s : WilsonFermion5D Stencil Reporti()
|
|
||||||
Grid : Message : 93.797803 s : WilsonFermion5D StencilEven Reporti()
|
|
||||||
Grid : Message : 93.797805 s : WilsonFermion5D StencilOdd Reporti()
|
|
||||||
Grid : Message : 93.873429 s : r_e6.02111
|
|
||||||
Grid : Message : 93.879931 s : r_o6.02102
|
|
||||||
Grid : Message : 93.885912 s : res12.0421
|
|
||||||
Grid : Message : 94.876555 s : norm diff 0
|
|
||||||
Grid : Message : 95.485643 s : norm diff even 0
|
|
||||||
Grid : Message : 95.581236 s : norm diff odd 0
|
|
||||||
|
@ -1,14 +1,13 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#SBATCH -J dslash
|
#SBATCH -J dslash
|
||||||
#SBATCH -A tc002
|
#SBATCH -A dp207
|
||||||
#SBATCH -t 2:20:00
|
|
||||||
#SBATCH --nodelist=tu-c0r0n[00,03,06,09]
|
|
||||||
#SBATCH --exclusive
|
#SBATCH --exclusive
|
||||||
#SBATCH --nodes=4
|
#SBATCH --nodes=4
|
||||||
#SBATCH --ntasks=16
|
#SBATCH --ntasks=16
|
||||||
|
#SBATCH --qos=standard
|
||||||
#SBATCH --ntasks-per-node=4
|
#SBATCH --ntasks-per-node=4
|
||||||
#SBATCH --cpus-per-task=8
|
#SBATCH --cpus-per-task=8
|
||||||
#SBATCH --time=12:00:00
|
#SBATCH --time=0:05:00
|
||||||
#SBATCH --partition=gpu
|
#SBATCH --partition=gpu
|
||||||
#SBATCH --gres=gpu:4
|
#SBATCH --gres=gpu:4
|
||||||
#SBATCH --output=%x.%j.out
|
#SBATCH --output=%x.%j.out
|
||||||
|
226
tests/core/Test_compact_wilson_clover_speedup.cc
Normal file
226
tests/core/Test_compact_wilson_clover_speedup.cc
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/core/Test_compact_wilson_clover_speedup.cc
|
||||||
|
|
||||||
|
Copyright (C) 2020 - 2022
|
||||||
|
|
||||||
|
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(CommandlineHelpers);
|
||||||
|
|
||||||
|
static bool checkPresent(int* argc, char*** argv, const std::string& option) {
|
||||||
|
return GridCmdOptionExists(*argv, *argv + *argc, option);
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string getContent(int* argc, char*** argv, const std::string& option) {
|
||||||
|
return GridCmdOptionPayload(*argv, *argv + *argc, option);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int readInt(int* argc, char*** argv, std::string&& option, int defaultValue) {
|
||||||
|
std::string arg;
|
||||||
|
int ret = defaultValue;
|
||||||
|
if(checkPresent(argc, argv, option)) {
|
||||||
|
arg = getContent(argc, argv, option);
|
||||||
|
GridCmdOptionInt(arg, ret);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static float readFloat(int* argc, char*** argv, std::string&& option, float defaultValue) {
|
||||||
|
std::string arg;
|
||||||
|
float ret = defaultValue;
|
||||||
|
if(checkPresent(argc, argv, option)) {
|
||||||
|
arg = getContent(argc, argv, option);
|
||||||
|
GridCmdOptionFloat(arg, ret);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
NAMESPACE_END(CommandlineHelpers);
|
||||||
|
|
||||||
|
|
||||||
|
#define _grid_printf(LOGGER, ...) \
|
||||||
|
{ \
|
||||||
|
if((LOGGER).isActive()) { /* this makes it safe to put, e.g., norm2 in the calling code w.r.t. performance */ \
|
||||||
|
char _printf_buf[1024]; \
|
||||||
|
std::sprintf(_printf_buf, __VA_ARGS__); \
|
||||||
|
std::cout << (LOGGER) << _printf_buf; \
|
||||||
|
fflush(stdout); \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
#define grid_printf_msg(...) _grid_printf(GridLogMessage, __VA_ARGS__)
|
||||||
|
|
||||||
|
|
||||||
|
template<typename Field>
|
||||||
|
bool resultsAgree(const Field& ref, const Field& res, const std::string& name) {
|
||||||
|
RealD checkTolerance = (getPrecision<Field>::value == 2) ? 1e-15 : 1e-7;
|
||||||
|
Field diff(ref.Grid());
|
||||||
|
diff = ref - res;
|
||||||
|
auto absDev = norm2(diff);
|
||||||
|
auto relDev = absDev / norm2(ref);
|
||||||
|
std::cout << GridLogMessage
|
||||||
|
<< "norm2(reference), norm2(" << name << "), abs. deviation, rel. deviation: " << norm2(ref) << " "
|
||||||
|
<< norm2(res) << " " << absDev << " " << relDev << " -> check "
|
||||||
|
<< ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
|
||||||
|
|
||||||
|
return relDev <= checkTolerance;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename vCoeff_t>
|
||||||
|
void runBenchmark(int* argc, char*** argv) {
|
||||||
|
// read from command line
|
||||||
|
const int nIter = CommandlineHelpers::readInt( argc, argv, "--niter", 1000);
|
||||||
|
const RealD mass = CommandlineHelpers::readFloat( argc, argv, "--mass", 0.5);
|
||||||
|
const RealD csw = CommandlineHelpers::readFloat( argc, argv, "--csw", 1.0);
|
||||||
|
const RealD cF = CommandlineHelpers::readFloat( argc, argv, "--cF", 1.0);
|
||||||
|
const bool antiPeriodic = CommandlineHelpers::checkPresent(argc, argv, "--antiperiodic");
|
||||||
|
|
||||||
|
// precision
|
||||||
|
static_assert(getPrecision<vCoeff_t>::value == 2 || getPrecision<vCoeff_t>::value == 1, "Incorrect precision"); // double or single
|
||||||
|
std::string precision = (getPrecision<vCoeff_t>::value == 2 ? "double" : "single");
|
||||||
|
|
||||||
|
// setup grids
|
||||||
|
GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vCoeff_t::Nsimd()), GridDefaultMpi());
|
||||||
|
GridRedBlackCartesian* UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||||
|
// clang-format on
|
||||||
|
|
||||||
|
// setup rng
|
||||||
|
std::vector<int> seeds({1, 2, 3, 4});
|
||||||
|
GridParallelRNG pRNG(UGrid);
|
||||||
|
pRNG.SeedFixedIntegers(seeds);
|
||||||
|
|
||||||
|
// type definitions
|
||||||
|
typedef WilsonImpl<vCoeff_t, FundamentalRepresentation, CoeffReal> WImpl;
|
||||||
|
typedef WilsonCloverFermion<WImpl> WilsonCloverOperator;
|
||||||
|
typedef CompactWilsonCloverFermion<WImpl> CompactWilsonCloverOperator;
|
||||||
|
typedef typename WilsonCloverOperator::FermionField Fermion;
|
||||||
|
typedef typename WilsonCloverOperator::GaugeField Gauge;
|
||||||
|
|
||||||
|
// setup fields
|
||||||
|
Fermion src(UGrid); random(pRNG, src);
|
||||||
|
Fermion ref(UGrid); ref = Zero();
|
||||||
|
Fermion res(UGrid); res = Zero();
|
||||||
|
Fermion hop(UGrid); hop = Zero();
|
||||||
|
Fermion diff(UGrid); diff = Zero();
|
||||||
|
Gauge Umu(UGrid); SU3::HotConfiguration(pRNG, Umu);
|
||||||
|
|
||||||
|
// setup boundary phases
|
||||||
|
typename WilsonCloverOperator::ImplParams implParams;
|
||||||
|
std::vector<Complex> boundary_phases(Nd, 1.);
|
||||||
|
if(antiPeriodic) boundary_phases[Nd-1] = -1.;
|
||||||
|
implParams.boundary_phases = boundary_phases;
|
||||||
|
WilsonAnisotropyCoefficients anisParams;
|
||||||
|
|
||||||
|
// misc stuff needed for benchmarks
|
||||||
|
double volume=1.0; for(int mu=0; mu<Nd; mu++) volume*=UGrid->_fdimensions[mu];
|
||||||
|
|
||||||
|
// setup fermion operators
|
||||||
|
WilsonCloverOperator Dwc( Umu, *UGrid, *UrbGrid, mass, csw, csw, anisParams, implParams);
|
||||||
|
CompactWilsonCloverOperator Dwc_compact(Umu, *UGrid, *UrbGrid, mass, csw, csw, cF, anisParams, implParams);
|
||||||
|
|
||||||
|
// now test the conversions
|
||||||
|
typename CompactWilsonCloverOperator::CloverField tmp_ref(UGrid); tmp_ref = Dwc.CloverTerm;
|
||||||
|
typename CompactWilsonCloverOperator::CloverField tmp_res(UGrid); tmp_res = Zero();
|
||||||
|
typename CompactWilsonCloverOperator::CloverField tmp_diff(UGrid); tmp_diff = Zero();
|
||||||
|
typename CompactWilsonCloverOperator::CloverDiagonalField diagonal(UGrid); diagonal = Zero();
|
||||||
|
typename CompactWilsonCloverOperator::CloverTriangleField triangle(UGrid); diagonal = Zero();
|
||||||
|
CompactWilsonCloverOperator::CompactHelpers::ConvertLayout(tmp_ref, diagonal, triangle);
|
||||||
|
CompactWilsonCloverOperator::CompactHelpers::ConvertLayout(diagonal, triangle, tmp_res);
|
||||||
|
tmp_diff = tmp_ref - tmp_res;
|
||||||
|
std::cout << GridLogMessage << "conversion: ref, res, diff, eps"
|
||||||
|
<< " " << norm2(tmp_ref)
|
||||||
|
<< " " << norm2(tmp_res)
|
||||||
|
<< " " << norm2(tmp_diff)
|
||||||
|
<< " " << norm2(tmp_diff) / norm2(tmp_ref)
|
||||||
|
<< std::endl;
|
||||||
|
|
||||||
|
// performance per site (use minimal values necessary)
|
||||||
|
double hop_flop_per_site = 1320; // Rich's Talk + what Peter uses
|
||||||
|
double hop_byte_per_site = (8 * 9 + 9 * 12) * 2 * getPrecision<vCoeff_t>::value * 4;
|
||||||
|
double clov_flop_per_site = 504; // Rich's Talk and 1412.2629
|
||||||
|
double clov_byte_per_site = (2 * 18 + 12 + 12) * 2 * getPrecision<vCoeff_t>::value * 4;
|
||||||
|
double clov_flop_per_site_performed = 1128;
|
||||||
|
double clov_byte_per_site_performed = (12 * 12 + 12 + 12) * 2 * getPrecision<vCoeff_t>::value * 4;
|
||||||
|
|
||||||
|
// total performance numbers
|
||||||
|
double hop_gflop_total = volume * nIter * hop_flop_per_site / 1e9;
|
||||||
|
double hop_gbyte_total = volume * nIter * hop_byte_per_site / 1e9;
|
||||||
|
double clov_gflop_total = volume * nIter * clov_flop_per_site / 1e9;
|
||||||
|
double clov_gbyte_total = volume * nIter * clov_byte_per_site / 1e9;
|
||||||
|
double clov_gflop_performed_total = volume * nIter * clov_flop_per_site_performed / 1e9;
|
||||||
|
double clov_gbyte_performed_total = volume * nIter * clov_byte_per_site_performed / 1e9;
|
||||||
|
|
||||||
|
// warmup + measure dhop
|
||||||
|
for(auto n : {1, 2, 3, 4, 5}) Dwc.Dhop(src, hop, 0);
|
||||||
|
double t0 = usecond();
|
||||||
|
for(int n = 0; n < nIter; n++) Dwc.Dhop(src, hop, 0);
|
||||||
|
double t1 = usecond();
|
||||||
|
double secs_hop = (t1-t0)/1e6;
|
||||||
|
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n",
|
||||||
|
"hop", precision.c_str(), secs_hop, hop_gflop_total/secs_hop, hop_gbyte_total/secs_hop, 0.0, secs_hop/secs_hop);
|
||||||
|
|
||||||
|
#define BENCH_CLOVER_KERNEL(KERNEL) { \
|
||||||
|
/* warmup + measure reference clover */ \
|
||||||
|
for(auto n : {1, 2, 3, 4, 5}) Dwc.KERNEL(src, ref); \
|
||||||
|
double t2 = usecond(); \
|
||||||
|
for(int n = 0; n < nIter; n++) Dwc.KERNEL(src, ref); \
|
||||||
|
double t3 = usecond(); \
|
||||||
|
double secs_ref = (t3-t2)/1e6; \
|
||||||
|
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n", \
|
||||||
|
"reference_"#KERNEL, precision.c_str(), secs_ref, clov_gflop_total/secs_ref, clov_gbyte_total/secs_ref, secs_ref/secs_ref, secs_ref/secs_hop); \
|
||||||
|
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n", /* to see how well the ET performs */ \
|
||||||
|
"reference_"#KERNEL"_performed", precision.c_str(), secs_ref, clov_gflop_performed_total/secs_ref, clov_gbyte_performed_total/secs_ref, secs_ref/secs_ref, secs_ref/secs_hop); \
|
||||||
|
\
|
||||||
|
/* warmup + measure compact clover */ \
|
||||||
|
for(auto n : {1, 2, 3, 4, 5}) Dwc_compact.KERNEL(src, res); \
|
||||||
|
double t4 = usecond(); \
|
||||||
|
for(int n = 0; n < nIter; n++) Dwc_compact.KERNEL(src, res); \
|
||||||
|
double t5 = usecond(); \
|
||||||
|
double secs_res = (t5-t4)/1e6; \
|
||||||
|
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n", \
|
||||||
|
"compact_"#KERNEL, precision.c_str(), secs_res, clov_gflop_total/secs_res, clov_gbyte_total/secs_res, secs_ref/secs_res, secs_res/secs_hop); \
|
||||||
|
assert(resultsAgree(ref, res, #KERNEL)); \
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCH_CLOVER_KERNEL(Mooee);
|
||||||
|
BENCH_CLOVER_KERNEL(MooeeDag);
|
||||||
|
BENCH_CLOVER_KERNEL(MooeeInv);
|
||||||
|
BENCH_CLOVER_KERNEL(MooeeInvDag);
|
||||||
|
|
||||||
|
grid_printf_msg("finalize %s\n", precision.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char** argv) {
|
||||||
|
Grid_init(&argc, &argv);
|
||||||
|
|
||||||
|
runBenchmark<vComplexD>(&argc, &argv);
|
||||||
|
runBenchmark<vComplexF>(&argc, &argv);
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
Reference in New Issue
Block a user