mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-22 09:42:02 +01:00
Compare commits
38 Commits
hotfix/nvc
...
876c8f4478
Author | SHA1 | Date | |
---|---|---|---|
876c8f4478 | |||
9c8750f261 | |||
91efd08179 | |||
9953511b65 | |||
025fa9991a | |||
e8c60c355b | |||
6c9c7f9d85 | |||
f534523ede | |||
1b8a834beb | |||
3aa43e6065 | |||
78ac4044ff | |||
119c3db47f | |||
21bbdb8fc2 | |||
739bd7572c | |||
074627a5bd | |||
6a23b2c599 | |||
bd891fb3f5 | |||
3984265851 | |||
45361d188f | |||
80c9d77e02 | |||
3aff64dddb | |||
b4f2ca81ff | |||
d1dea5f840 | |||
54f8b84d16 | |||
da503fef0e | |||
4a6802098a | |||
f9b41a84d2 | |||
5d7e0d18b9 | |||
86dac5ff4f | |||
4a382fad3f | |||
cc753670d9 | |||
cc9d88ea1c | |||
b281b0166e | |||
6a21f694ff | |||
39214702f6 | |||
3e4614c63a | |||
ccd21f96ff | |||
4b90cb8888 |
54
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
54
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
name: Bug report
|
||||||
|
description: Report a bug.
|
||||||
|
title: "<insert title>"
|
||||||
|
labels: [bug]
|
||||||
|
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: >
|
||||||
|
Thank you for taking the time to file a bug report.
|
||||||
|
Please check that the code is pointing to the HEAD of develop
|
||||||
|
or any commit in master which is tagged with a version number.
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: "Describe the issue:"
|
||||||
|
description: >
|
||||||
|
Describe the issue and any previous attempt to solve it.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: "Code example:"
|
||||||
|
description: >
|
||||||
|
If relevant, show how to reproduce the issue using a minimal working
|
||||||
|
example.
|
||||||
|
placeholder: |
|
||||||
|
<< your code here >>
|
||||||
|
render: shell
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: "Target platform:"
|
||||||
|
description: >
|
||||||
|
Give a description of the target platform (CPU, network, compiler).
|
||||||
|
Please give the full CPU part description, using for example
|
||||||
|
`cat /proc/cpuinfo | grep 'model name' | uniq` (Linux)
|
||||||
|
or `sysctl machdep.cpu.brand_string` (macOS) and the full output
|
||||||
|
the `--version` option of your compiler.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: "Configure options:"
|
||||||
|
description: >
|
||||||
|
Please give the exact configure command used and attach
|
||||||
|
`config.log`, `grid.config.summary` and the output of `make V=1`.
|
||||||
|
render: shell
|
||||||
|
validations:
|
||||||
|
required: true
|
@ -542,6 +542,7 @@ public:
|
|||||||
(*this)(in[i], out[i]);
|
(*this)(in[i], out[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
virtual ~LinearFunction(){};
|
||||||
};
|
};
|
||||||
|
|
||||||
template<class Field> class IdentityLinearFunction : public LinearFunction<Field> {
|
template<class Field> class IdentityLinearFunction : public LinearFunction<Field> {
|
||||||
|
@ -166,16 +166,16 @@ public:
|
|||||||
rsqf[s] =rsq[s];
|
rsqf[s] =rsq[s];
|
||||||
std::cout<<GridLogMessage<<"ConjugateGradientMultiShiftMixedPrecCleanup: shift "<< s <<" target resid "<<rsq[s]<<std::endl;
|
std::cout<<GridLogMessage<<"ConjugateGradientMultiShiftMixedPrecCleanup: shift "<< s <<" target resid "<<rsq[s]<<std::endl;
|
||||||
// ps_d[s] = src_d;
|
// ps_d[s] = src_d;
|
||||||
precisionChangeFast(ps_f[s],src_d);
|
precisionChange(ps_f[s],src_d);
|
||||||
}
|
}
|
||||||
// r and p for primary
|
// r and p for primary
|
||||||
p_d = src_d; //primary copy --- make this a reference to ps_d to save axpys
|
p_d = src_d; //primary copy --- make this a reference to ps_d to save axpys
|
||||||
r_d = p_d;
|
r_d = p_d;
|
||||||
|
|
||||||
//MdagM+m[0]
|
//MdagM+m[0]
|
||||||
precisionChangeFast(p_f,p_d);
|
precisionChange(p_f,p_d);
|
||||||
Linop_f.HermOpAndNorm(p_f,mmp_f,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp)
|
Linop_f.HermOpAndNorm(p_f,mmp_f,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp)
|
||||||
precisionChangeFast(tmp_d,mmp_f);
|
precisionChange(tmp_d,mmp_f);
|
||||||
Linop_d.HermOpAndNorm(p_d,mmp_d,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp)
|
Linop_d.HermOpAndNorm(p_d,mmp_d,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp)
|
||||||
tmp_d = tmp_d - mmp_d;
|
tmp_d = tmp_d - mmp_d;
|
||||||
std::cout << " Testing operators match "<<norm2(mmp_d)<<" f "<<norm2(mmp_f)<<" diff "<< norm2(tmp_d)<<std::endl;
|
std::cout << " Testing operators match "<<norm2(mmp_d)<<" f "<<norm2(mmp_f)<<" diff "<< norm2(tmp_d)<<std::endl;
|
||||||
@ -204,7 +204,7 @@ public:
|
|||||||
|
|
||||||
for(int s=0;s<nshift;s++) {
|
for(int s=0;s<nshift;s++) {
|
||||||
axpby(psi_d[s],0.,-bs[s]*alpha[s],src_d,src_d);
|
axpby(psi_d[s],0.,-bs[s]*alpha[s],src_d,src_d);
|
||||||
precisionChangeFast(psi_f[s],psi_d[s]);
|
precisionChange(psi_f[s],psi_d[s]);
|
||||||
}
|
}
|
||||||
|
|
||||||
///////////////////////////////////////
|
///////////////////////////////////////
|
||||||
@ -225,7 +225,7 @@ public:
|
|||||||
AXPYTimer.Stop();
|
AXPYTimer.Stop();
|
||||||
|
|
||||||
PrecChangeTimer.Start();
|
PrecChangeTimer.Start();
|
||||||
precisionChangeFast(r_f, r_d);
|
precisionChange(r_f, r_d);
|
||||||
PrecChangeTimer.Stop();
|
PrecChangeTimer.Stop();
|
||||||
|
|
||||||
AXPYTimer.Start();
|
AXPYTimer.Start();
|
||||||
@ -243,13 +243,13 @@ public:
|
|||||||
|
|
||||||
cp=c;
|
cp=c;
|
||||||
PrecChangeTimer.Start();
|
PrecChangeTimer.Start();
|
||||||
precisionChangeFast(p_f, p_d); //get back single prec search direction for linop
|
precisionChange(p_f, p_d); //get back single prec search direction for linop
|
||||||
PrecChangeTimer.Stop();
|
PrecChangeTimer.Stop();
|
||||||
MatrixTimer.Start();
|
MatrixTimer.Start();
|
||||||
Linop_f.HermOp(p_f,mmp_f);
|
Linop_f.HermOp(p_f,mmp_f);
|
||||||
MatrixTimer.Stop();
|
MatrixTimer.Stop();
|
||||||
PrecChangeTimer.Start();
|
PrecChangeTimer.Start();
|
||||||
precisionChangeFast(mmp_d, mmp_f); // From Float to Double
|
precisionChange(mmp_d, mmp_f); // From Float to Double
|
||||||
PrecChangeTimer.Stop();
|
PrecChangeTimer.Stop();
|
||||||
|
|
||||||
d=real(innerProduct(p_d,mmp_d));
|
d=real(innerProduct(p_d,mmp_d));
|
||||||
@ -311,7 +311,7 @@ public:
|
|||||||
SolverTimer.Stop();
|
SolverTimer.Stop();
|
||||||
|
|
||||||
for(int s=0;s<nshift;s++){
|
for(int s=0;s<nshift;s++){
|
||||||
precisionChangeFast(psi_d[s],psi_f[s]);
|
precisionChange(psi_d[s],psi_f[s]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -211,7 +211,7 @@ public:
|
|||||||
Linop_d.HermOpAndNorm(p_d,mmp_d,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp)
|
Linop_d.HermOpAndNorm(p_d,mmp_d,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp)
|
||||||
tmp_d = tmp_d - mmp_d;
|
tmp_d = tmp_d - mmp_d;
|
||||||
std::cout << " Testing operators match "<<norm2(mmp_d)<<" f "<<norm2(mmp_f)<<" diff "<< norm2(tmp_d)<<std::endl;
|
std::cout << " Testing operators match "<<norm2(mmp_d)<<" f "<<norm2(mmp_f)<<" diff "<< norm2(tmp_d)<<std::endl;
|
||||||
// assert(norm2(tmp_d)< 1.0e-4);
|
assert(norm2(tmp_d)< 1.0);
|
||||||
|
|
||||||
axpy(mmp_d,mass[0],p_d,mmp_d);
|
axpy(mmp_d,mass[0],p_d,mmp_d);
|
||||||
RealD rn = norm2(p_d);
|
RealD rn = norm2(p_d);
|
||||||
|
@ -27,9 +27,10 @@ Author: Christoph Lehner <christoph@lhnr.de>
|
|||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
|
#define header "SharedMemoryMpi: "
|
||||||
|
|
||||||
#include <Grid/GridCore.h>
|
#include <Grid/GridCore.h>
|
||||||
#include <pwd.h>
|
#include <pwd.h>
|
||||||
#include <syscall.h>
|
|
||||||
|
|
||||||
#ifdef GRID_CUDA
|
#ifdef GRID_CUDA
|
||||||
#include <cuda_runtime_api.h>
|
#include <cuda_runtime_api.h>
|
||||||
@ -39,11 +40,118 @@ Author: Christoph Lehner <christoph@lhnr.de>
|
|||||||
#endif
|
#endif
|
||||||
#ifdef GRID_SYCL
|
#ifdef GRID_SYCL
|
||||||
#define GRID_SYCL_LEVEL_ZERO_IPC
|
#define GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
|
#include <syscall.h>
|
||||||
|
#define SHM_SOCKETS
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <sys/socket.h>
|
||||||
|
#include <sys/un.h>
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#ifdef SHM_SOCKETS
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Barbaric extra intranode communication route in case we need sockets to pass FDs
|
||||||
|
* Forced by level_zero not being nicely designed
|
||||||
|
*/
|
||||||
|
static int sock;
|
||||||
|
static const char *sock_path_fmt = "/tmp/GridUnixSocket.%d";
|
||||||
|
static char sock_path[256];
|
||||||
|
class UnixSockets {
|
||||||
|
public:
|
||||||
|
static void Open(int rank)
|
||||||
|
{
|
||||||
|
int errnum;
|
||||||
|
|
||||||
|
sock = socket(AF_UNIX, SOCK_DGRAM, 0); assert(sock>0);
|
||||||
|
|
||||||
|
struct sockaddr_un sa_un = { 0 };
|
||||||
|
sa_un.sun_family = AF_UNIX;
|
||||||
|
snprintf(sa_un.sun_path, sizeof(sa_un.sun_path),sock_path_fmt,rank);
|
||||||
|
unlink(sa_un.sun_path);
|
||||||
|
if (bind(sock, (struct sockaddr *)&sa_un, sizeof(sa_un))) {
|
||||||
|
perror("bind failure");
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int RecvFileDescriptor(void)
|
||||||
|
{
|
||||||
|
int n;
|
||||||
|
int fd;
|
||||||
|
char buf[1];
|
||||||
|
struct iovec iov;
|
||||||
|
struct msghdr msg;
|
||||||
|
struct cmsghdr *cmsg;
|
||||||
|
char cms[CMSG_SPACE(sizeof(int))];
|
||||||
|
|
||||||
|
iov.iov_base = buf;
|
||||||
|
iov.iov_len = 1;
|
||||||
|
|
||||||
|
memset(&msg, 0, sizeof msg);
|
||||||
|
msg.msg_name = 0;
|
||||||
|
msg.msg_namelen = 0;
|
||||||
|
msg.msg_iov = &iov;
|
||||||
|
msg.msg_iovlen = 1;
|
||||||
|
|
||||||
|
msg.msg_control = (caddr_t)cms;
|
||||||
|
msg.msg_controllen = sizeof cms;
|
||||||
|
|
||||||
|
if((n=recvmsg(sock, &msg, 0)) < 0) {
|
||||||
|
perror("recvmsg failed");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if(n == 0){
|
||||||
|
perror("recvmsg returned 0");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
cmsg = CMSG_FIRSTHDR(&msg);
|
||||||
|
|
||||||
|
memmove(&fd, CMSG_DATA(cmsg), sizeof(int));
|
||||||
|
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void SendFileDescriptor(int fildes,int xmit_to_rank)
|
||||||
|
{
|
||||||
|
struct msghdr msg;
|
||||||
|
struct iovec iov;
|
||||||
|
struct cmsghdr *cmsg = NULL;
|
||||||
|
char ctrl[CMSG_SPACE(sizeof(int))];
|
||||||
|
char data = ' ';
|
||||||
|
|
||||||
|
memset(&msg, 0, sizeof(struct msghdr));
|
||||||
|
memset(ctrl, 0, CMSG_SPACE(sizeof(int)));
|
||||||
|
iov.iov_base = &data;
|
||||||
|
iov.iov_len = sizeof(data);
|
||||||
|
|
||||||
|
sprintf(sock_path,sock_path_fmt,xmit_to_rank);
|
||||||
|
|
||||||
|
struct sockaddr_un sa_un = { 0 };
|
||||||
|
sa_un.sun_family = AF_UNIX;
|
||||||
|
snprintf(sa_un.sun_path, sizeof(sa_un.sun_path),sock_path_fmt,xmit_to_rank);
|
||||||
|
|
||||||
|
msg.msg_name = (void *)&sa_un;
|
||||||
|
msg.msg_namelen = sizeof(sa_un);
|
||||||
|
msg.msg_iov = &iov;
|
||||||
|
msg.msg_iovlen = 1;
|
||||||
|
msg.msg_controllen = CMSG_SPACE(sizeof(int));
|
||||||
|
msg.msg_control = ctrl;
|
||||||
|
|
||||||
|
cmsg = CMSG_FIRSTHDR(&msg);
|
||||||
|
cmsg->cmsg_level = SOL_SOCKET;
|
||||||
|
cmsg->cmsg_type = SCM_RIGHTS;
|
||||||
|
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
|
||||||
|
|
||||||
|
*((int *) CMSG_DATA(cmsg)) = fildes;
|
||||||
|
|
||||||
|
sendmsg(sock, &msg, 0);
|
||||||
|
};
|
||||||
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
|
||||||
#define header "SharedMemoryMpi: "
|
|
||||||
/*Construct from an MPI communicator*/
|
/*Construct from an MPI communicator*/
|
||||||
void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
||||||
{
|
{
|
||||||
@ -480,8 +588,13 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Loop over ranks/gpu's on our node
|
// Loop over ranks/gpu's on our node
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
#ifdef SHM_SOCKETS
|
||||||
|
UnixSockets::Open(WorldShmRank);
|
||||||
|
#endif
|
||||||
for(int r=0;r<WorldShmSize;r++){
|
for(int r=0;r<WorldShmSize;r++){
|
||||||
|
|
||||||
|
MPI_Barrier(WorldShmComm);
|
||||||
|
|
||||||
#ifndef GRID_MPI3_SHM_NONE
|
#ifndef GRID_MPI3_SHM_NONE
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
// If it is me, pass around the IPC access key
|
// If it is me, pass around the IPC access key
|
||||||
@ -489,24 +602,32 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
void * thisBuf = ShmCommBuf;
|
void * thisBuf = ShmCommBuf;
|
||||||
if(!Stencil_force_mpi) {
|
if(!Stencil_force_mpi) {
|
||||||
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
typedef struct { int fd; pid_t pid ; } clone_mem_t;
|
typedef struct { int fd; pid_t pid ; ze_ipc_mem_handle_t ze; } clone_mem_t;
|
||||||
|
|
||||||
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_device());
|
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_device());
|
||||||
auto zeContext = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_context());
|
auto zeContext = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_context());
|
||||||
|
|
||||||
ze_ipc_mem_handle_t ihandle;
|
ze_ipc_mem_handle_t ihandle;
|
||||||
clone_mem_t handle;
|
clone_mem_t handle;
|
||||||
|
|
||||||
if ( r==WorldShmRank ) {
|
if ( r==WorldShmRank ) {
|
||||||
auto err = zeMemGetIpcHandle(zeContext,ShmCommBuf,&ihandle);
|
auto err = zeMemGetIpcHandle(zeContext,ShmCommBuf,&ihandle);
|
||||||
if ( err != ZE_RESULT_SUCCESS ) {
|
if ( err != ZE_RESULT_SUCCESS ) {
|
||||||
std::cout << "SharedMemoryMPI.cc zeMemGetIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
std::cerr << "SharedMemoryMPI.cc zeMemGetIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
} else {
|
} else {
|
||||||
std::cout << "SharedMemoryMPI.cc zeMemGetIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
std::cout << "SharedMemoryMPI.cc zeMemGetIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
||||||
}
|
}
|
||||||
memcpy((void *)&handle.fd,(void *)&ihandle,sizeof(int));
|
memcpy((void *)&handle.fd,(void *)&ihandle,sizeof(int));
|
||||||
handle.pid = getpid();
|
handle.pid = getpid();
|
||||||
|
memcpy((void *)&handle.ze,(void *)&ihandle,sizeof(ihandle));
|
||||||
|
#ifdef SHM_SOCKETS
|
||||||
|
for(int rr=0;rr<WorldShmSize;rr++){
|
||||||
|
if(rr!=r){
|
||||||
|
UnixSockets::SendFileDescriptor(handle.fd,rr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#ifdef GRID_CUDA
|
#ifdef GRID_CUDA
|
||||||
@ -534,6 +655,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
// Share this IPC handle across the Shm Comm
|
// Share this IPC handle across the Shm Comm
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
{
|
{
|
||||||
|
MPI_Barrier(WorldShmComm);
|
||||||
int ierr=MPI_Bcast(&handle,
|
int ierr=MPI_Bcast(&handle,
|
||||||
sizeof(handle),
|
sizeof(handle),
|
||||||
MPI_BYTE,
|
MPI_BYTE,
|
||||||
@ -549,6 +671,10 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
if ( r!=WorldShmRank ) {
|
if ( r!=WorldShmRank ) {
|
||||||
thisBuf = nullptr;
|
thisBuf = nullptr;
|
||||||
|
int myfd;
|
||||||
|
#ifdef SHM_SOCKETS
|
||||||
|
myfd=UnixSockets::RecvFileDescriptor();
|
||||||
|
#else
|
||||||
std::cout<<"mapping seeking remote pid/fd "
|
std::cout<<"mapping seeking remote pid/fd "
|
||||||
<<handle.pid<<"/"
|
<<handle.pid<<"/"
|
||||||
<<handle.fd<<std::endl;
|
<<handle.fd<<std::endl;
|
||||||
@ -556,16 +682,22 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
int pidfd = syscall(SYS_pidfd_open,handle.pid,0);
|
int pidfd = syscall(SYS_pidfd_open,handle.pid,0);
|
||||||
std::cout<<"Using IpcHandle pidfd "<<pidfd<<"\n";
|
std::cout<<"Using IpcHandle pidfd "<<pidfd<<"\n";
|
||||||
// int myfd = syscall(SYS_pidfd_getfd,pidfd,handle.fd,0);
|
// int myfd = syscall(SYS_pidfd_getfd,pidfd,handle.fd,0);
|
||||||
int myfd = syscall(438,pidfd,handle.fd,0);
|
myfd = syscall(438,pidfd,handle.fd,0);
|
||||||
|
int err_t = errno;
|
||||||
std::cout<<"Using IpcHandle myfd "<<myfd<<"\n";
|
if (myfd < 0) {
|
||||||
|
fprintf(stderr,"pidfd_getfd returned %d errno was %d\n", myfd,err_t); fflush(stderr);
|
||||||
|
perror("pidfd_getfd failed ");
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
std::cout<<"Using IpcHandle mapped remote pid "<<handle.pid <<" FD "<<handle.fd <<" to myfd "<<myfd<<"\n";
|
||||||
|
memcpy((void *)&ihandle,(void *)&handle.ze,sizeof(ihandle));
|
||||||
memcpy((void *)&ihandle,(void *)&myfd,sizeof(int));
|
memcpy((void *)&ihandle,(void *)&myfd,sizeof(int));
|
||||||
|
|
||||||
auto err = zeMemOpenIpcHandle(zeContext,zeDevice,ihandle,0,&thisBuf);
|
auto err = zeMemOpenIpcHandle(zeContext,zeDevice,ihandle,0,&thisBuf);
|
||||||
if ( err != ZE_RESULT_SUCCESS ) {
|
if ( err != ZE_RESULT_SUCCESS ) {
|
||||||
std::cout << "SharedMemoryMPI.cc "<<zeContext<<" "<<zeDevice<<std::endl;
|
std::cerr << "SharedMemoryMPI.cc "<<zeContext<<" "<<zeDevice<<std::endl;
|
||||||
std::cout << "SharedMemoryMPI.cc zeMemOpenIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
std::cerr << "SharedMemoryMPI.cc zeMemOpenIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
} else {
|
} else {
|
||||||
std::cout << "SharedMemoryMPI.cc zeMemOpenIpcHandle succeeded for rank "<<r<<std::endl;
|
std::cout << "SharedMemoryMPI.cc zeMemOpenIpcHandle succeeded for rank "<<r<<std::endl;
|
||||||
@ -600,6 +732,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
#else
|
#else
|
||||||
WorldShmCommBufs[r] = ShmCommBuf;
|
WorldShmCommBufs[r] = ShmCommBuf;
|
||||||
#endif
|
#endif
|
||||||
|
MPI_Barrier(WorldShmComm);
|
||||||
}
|
}
|
||||||
|
|
||||||
_ShmAllocBytes=bytes;
|
_ShmAllocBytes=bytes;
|
||||||
|
@ -707,9 +707,9 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro
|
|||||||
Coordinate ist = Tg->_istride;
|
Coordinate ist = Tg->_istride;
|
||||||
Coordinate ost = Tg->_ostride;
|
Coordinate ost = Tg->_ostride;
|
||||||
|
|
||||||
autoView( t_v , To, AcceleratorWrite);
|
autoView( t_v , To, CpuWrite);
|
||||||
autoView( f_v , From, AcceleratorRead);
|
autoView( f_v , From, CpuRead);
|
||||||
accelerator_for(idx,Fg->lSites(),1,{
|
thread_for(idx,Fg->lSites(),{
|
||||||
sobj s;
|
sobj s;
|
||||||
Coordinate Fcoor(nd);
|
Coordinate Fcoor(nd);
|
||||||
Coordinate Tcoor(nd);
|
Coordinate Tcoor(nd);
|
||||||
@ -722,15 +722,20 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro
|
|||||||
Tcoor[d] = ToLowerLeft[d]+ Fcoor[d]-FromLowerLeft[d];
|
Tcoor[d] = ToLowerLeft[d]+ Fcoor[d]-FromLowerLeft[d];
|
||||||
}
|
}
|
||||||
if (in_region) {
|
if (in_region) {
|
||||||
Integer idx_f = 0; for(int d=0;d<nd;d++) idx_f+=isf[d]*(Fcoor[d]/rdf[d]);
|
#if 0
|
||||||
Integer idx_t = 0; for(int d=0;d<nd;d++) idx_t+=ist[d]*(Tcoor[d]/rdt[d]);
|
Integer idx_f = 0; for(int d=0;d<nd;d++) idx_f+=isf[d]*(Fcoor[d]/rdf[d]); // inner index from
|
||||||
Integer odx_f = 0; for(int d=0;d<nd;d++) odx_f+=osf[d]*(Fcoor[d]%rdf[d]);
|
Integer idx_t = 0; for(int d=0;d<nd;d++) idx_t+=ist[d]*(Tcoor[d]/rdt[d]); // inner index to
|
||||||
Integer odx_t = 0; for(int d=0;d<nd;d++) odx_t+=ost[d]*(Tcoor[d]%rdt[d]);
|
Integer odx_f = 0; for(int d=0;d<nd;d++) odx_f+=osf[d]*(Fcoor[d]%rdf[d]); // outer index from
|
||||||
vector_type * fp = (vector_type *)&f_v[odx_f];
|
Integer odx_t = 0; for(int d=0;d<nd;d++) odx_t+=ost[d]*(Tcoor[d]%rdt[d]); // outer index to
|
||||||
vector_type * tp = (vector_type *)&t_v[odx_t];
|
scalar_type * fp = (scalar_type *)&f_v[odx_f];
|
||||||
|
scalar_type * tp = (scalar_type *)&t_v[odx_t];
|
||||||
for(int w=0;w<words;w++){
|
for(int w=0;w<words;w++){
|
||||||
tp[w].putlane(fp[w].getlane(idx_f),idx_t);
|
tp[w].putlane(fp[w].getlane(idx_f),idx_t);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
peekLocalSite(s,f_v,Fcoor);
|
||||||
|
pokeLocalSite(s,t_v,Tcoor);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -841,9 +846,9 @@ void InsertSliceLocal(const Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int
|
|||||||
|
|
||||||
for(int d=0;d<nh;d++){
|
for(int d=0;d<nh;d++){
|
||||||
if ( d!=orthog ) {
|
if ( d!=orthog ) {
|
||||||
assert(lg->_processors[d] == hg->_processors[d]);
|
assert(lg->_processors[d] == hg->_processors[d]);
|
||||||
assert(lg->_ldimensions[d] == hg->_ldimensions[d]);
|
assert(lg->_ldimensions[d] == hg->_ldimensions[d]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// the above should guarantee that the operations are local
|
// the above should guarantee that the operations are local
|
||||||
|
136
Grid/lattice/PaddedCell.h
Normal file
136
Grid/lattice/PaddedCell.h
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/lattice/PaddedCell.h
|
||||||
|
|
||||||
|
Copyright (C) 2019
|
||||||
|
|
||||||
|
Author: Peter Boyle pboyle@bnl.gov
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
class PaddedCell {
|
||||||
|
public:
|
||||||
|
GridCartesian * unpadded_grid;
|
||||||
|
int dims;
|
||||||
|
int depth;
|
||||||
|
std::vector<GridCartesian *> grids;
|
||||||
|
~PaddedCell()
|
||||||
|
{
|
||||||
|
DeleteGrids();
|
||||||
|
}
|
||||||
|
PaddedCell(int _depth,GridCartesian *_grid)
|
||||||
|
{
|
||||||
|
unpadded_grid = _grid;
|
||||||
|
depth=_depth;
|
||||||
|
dims=_grid->Nd();
|
||||||
|
AllocateGrids();
|
||||||
|
Coordinate local =unpadded_grid->LocalDimensions();
|
||||||
|
for(int d=0;d<dims;d++){
|
||||||
|
assert(local[d]>=depth);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void DeleteGrids(void)
|
||||||
|
{
|
||||||
|
for(int d=0;d<grids.size();d++){
|
||||||
|
delete grids[d];
|
||||||
|
}
|
||||||
|
grids.resize(0);
|
||||||
|
};
|
||||||
|
void AllocateGrids(void)
|
||||||
|
{
|
||||||
|
Coordinate local =unpadded_grid->LocalDimensions();
|
||||||
|
Coordinate simd =unpadded_grid->_simd_layout;
|
||||||
|
Coordinate processors=unpadded_grid->_processors;
|
||||||
|
Coordinate plocal =unpadded_grid->LocalDimensions();
|
||||||
|
Coordinate global(dims);
|
||||||
|
|
||||||
|
// expand up one dim at a time
|
||||||
|
for(int d=0;d<dims;d++){
|
||||||
|
|
||||||
|
plocal[d] += 2*depth;
|
||||||
|
|
||||||
|
for(int d=0;d<dims;d++){
|
||||||
|
global[d] = plocal[d]*processors[d];
|
||||||
|
}
|
||||||
|
|
||||||
|
grids.push_back(new GridCartesian(global,simd,processors));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
template<class vobj>
|
||||||
|
inline Lattice<vobj> Extract(Lattice<vobj> &in)
|
||||||
|
{
|
||||||
|
Lattice<vobj> out(unpadded_grid);
|
||||||
|
|
||||||
|
Coordinate local =unpadded_grid->LocalDimensions();
|
||||||
|
Coordinate fll(dims,depth); // depends on the MPI spread
|
||||||
|
Coordinate tll(dims,0); // depends on the MPI spread
|
||||||
|
localCopyRegion(in,out,fll,tll,local);
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
template<class vobj>
|
||||||
|
inline Lattice<vobj> Exchange(Lattice<vobj> &in)
|
||||||
|
{
|
||||||
|
GridBase *old_grid = in.Grid();
|
||||||
|
int dims = old_grid->Nd();
|
||||||
|
Lattice<vobj> tmp = in;
|
||||||
|
for(int d=0;d<dims;d++){
|
||||||
|
tmp = Expand(d,tmp); // rvalue && assignment
|
||||||
|
}
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
// expand up one dim at a time
|
||||||
|
template<class vobj>
|
||||||
|
inline Lattice<vobj> Expand(int dim,Lattice<vobj> &in)
|
||||||
|
{
|
||||||
|
GridBase *old_grid = in.Grid();
|
||||||
|
GridCartesian *new_grid = grids[dim];//These are new grids
|
||||||
|
Lattice<vobj> padded(new_grid);
|
||||||
|
Lattice<vobj> shifted(old_grid);
|
||||||
|
Coordinate local =old_grid->LocalDimensions();
|
||||||
|
Coordinate plocal =new_grid->LocalDimensions();
|
||||||
|
if(dim==0) conformable(old_grid,unpadded_grid);
|
||||||
|
else conformable(old_grid,grids[dim-1]);
|
||||||
|
|
||||||
|
std::cout << " dim "<<dim<<" local "<<local << " padding to "<<plocal<<std::endl;
|
||||||
|
// Middle bit
|
||||||
|
for(int x=0;x<local[dim];x++){
|
||||||
|
InsertSliceLocal(in,padded,x,depth+x,dim);
|
||||||
|
}
|
||||||
|
// High bit
|
||||||
|
shifted = Cshift(in,dim,depth);
|
||||||
|
for(int x=0;x<depth;x++){
|
||||||
|
InsertSliceLocal(shifted,padded,local[dim]-depth+x,depth+local[dim]+x,dim);
|
||||||
|
}
|
||||||
|
// Low bit
|
||||||
|
shifted = Cshift(in,dim,-depth);
|
||||||
|
for(int x=0;x<depth;x++){
|
||||||
|
InsertSliceLocal(shifted,padded,x,x,dim);
|
||||||
|
}
|
||||||
|
return padded;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
||||||
|
|
@ -104,6 +104,7 @@ template<typename vtype> using iSpinMatrix = iScalar<iMatrix<iSca
|
|||||||
template<typename vtype> using iColourMatrix = iScalar<iScalar<iMatrix<vtype, Nc> > > ;
|
template<typename vtype> using iColourMatrix = iScalar<iScalar<iMatrix<vtype, Nc> > > ;
|
||||||
template<typename vtype> using iSpinColourMatrix = iScalar<iMatrix<iMatrix<vtype, Nc>, Ns> >;
|
template<typename vtype> using iSpinColourMatrix = iScalar<iMatrix<iMatrix<vtype, Nc>, Ns> >;
|
||||||
template<typename vtype> using iLorentzColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nd > ;
|
template<typename vtype> using iLorentzColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nd > ;
|
||||||
|
template<typename vtype> using iLorentzComplex = iVector<iScalar<iScalar<vtype> >, Nd > ;
|
||||||
template<typename vtype> using iDoubleStoredColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nds > ;
|
template<typename vtype> using iDoubleStoredColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nds > ;
|
||||||
template<typename vtype> using iSpinVector = iScalar<iVector<iScalar<vtype>, Ns> >;
|
template<typename vtype> using iSpinVector = iScalar<iVector<iScalar<vtype>, Ns> >;
|
||||||
template<typename vtype> using iColourVector = iScalar<iScalar<iVector<vtype, Nc> > >;
|
template<typename vtype> using iColourVector = iScalar<iScalar<iVector<vtype, Nc> > >;
|
||||||
@ -178,6 +179,15 @@ typedef iLorentzColourMatrix<vComplexF> vLorentzColourMatrixF;
|
|||||||
typedef iLorentzColourMatrix<vComplexD> vLorentzColourMatrixD;
|
typedef iLorentzColourMatrix<vComplexD> vLorentzColourMatrixD;
|
||||||
typedef iLorentzColourMatrix<vComplexD2> vLorentzColourMatrixD2;
|
typedef iLorentzColourMatrix<vComplexD2> vLorentzColourMatrixD2;
|
||||||
|
|
||||||
|
// LorentzComplex
|
||||||
|
typedef iLorentzComplex<Complex > LorentzComplex;
|
||||||
|
typedef iLorentzComplex<ComplexF > LorentzComplexF;
|
||||||
|
typedef iLorentzComplex<ComplexD > LorentzComplexD;
|
||||||
|
|
||||||
|
typedef iLorentzComplex<vComplex > vLorentzComplex;
|
||||||
|
typedef iLorentzComplex<vComplexF> vLorentzComplexF;
|
||||||
|
typedef iLorentzComplex<vComplexD> vLorentzComplexD;
|
||||||
|
|
||||||
// DoubleStored gauge field
|
// DoubleStored gauge field
|
||||||
typedef iDoubleStoredColourMatrix<Complex > DoubleStoredColourMatrix;
|
typedef iDoubleStoredColourMatrix<Complex > DoubleStoredColourMatrix;
|
||||||
typedef iDoubleStoredColourMatrix<ComplexF > DoubleStoredColourMatrixF;
|
typedef iDoubleStoredColourMatrix<ComplexF > DoubleStoredColourMatrixF;
|
||||||
@ -307,6 +317,10 @@ typedef Lattice<vLorentzColourMatrixF> LatticeLorentzColourMatrixF;
|
|||||||
typedef Lattice<vLorentzColourMatrixD> LatticeLorentzColourMatrixD;
|
typedef Lattice<vLorentzColourMatrixD> LatticeLorentzColourMatrixD;
|
||||||
typedef Lattice<vLorentzColourMatrixD2> LatticeLorentzColourMatrixD2;
|
typedef Lattice<vLorentzColourMatrixD2> LatticeLorentzColourMatrixD2;
|
||||||
|
|
||||||
|
typedef Lattice<vLorentzComplex> LatticeLorentzComplex;
|
||||||
|
typedef Lattice<vLorentzComplexF> LatticeLorentzComplexF;
|
||||||
|
typedef Lattice<vLorentzComplexD> LatticeLorentzComplexD;
|
||||||
|
|
||||||
// DoubleStored gauge field
|
// DoubleStored gauge field
|
||||||
typedef Lattice<vDoubleStoredColourMatrix> LatticeDoubleStoredColourMatrix;
|
typedef Lattice<vDoubleStoredColourMatrix> LatticeDoubleStoredColourMatrix;
|
||||||
typedef Lattice<vDoubleStoredColourMatrixF> LatticeDoubleStoredColourMatrixF;
|
typedef Lattice<vDoubleStoredColourMatrixF> LatticeDoubleStoredColourMatrixF;
|
||||||
|
@ -507,6 +507,7 @@ public:
|
|||||||
}
|
}
|
||||||
this->face_table_computed=1;
|
this->face_table_computed=1;
|
||||||
assert(this->u_comm_offset==this->_unified_buffer_size);
|
assert(this->u_comm_offset==this->_unified_buffer_size);
|
||||||
|
accelerator_barrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -332,8 +332,7 @@ void WilsonFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st, Lebesg
|
|||||||
/////////////////////////////
|
/////////////////////////////
|
||||||
{
|
{
|
||||||
GRID_TRACE("Gather");
|
GRID_TRACE("Gather");
|
||||||
st.HaloExchangeOptGather(in,compressor);
|
st.HaloExchangeOptGather(in,compressor); // Put the barrier in the routine
|
||||||
accelerator_barrier();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::vector<CommsRequest_t> > requests;
|
std::vector<std::vector<CommsRequest_t> > requests;
|
||||||
|
@ -428,9 +428,10 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
|
|||||||
auto ptr = &st.surface_list[0]; \
|
auto ptr = &st.surface_list[0]; \
|
||||||
accelerator_forNB( ss, sz, Simd::Nsimd(), { \
|
accelerator_forNB( ss, sz, Simd::Nsimd(), { \
|
||||||
int sF = ptr[ss]; \
|
int sF = ptr[ss]; \
|
||||||
int sU = ss/Ls; \
|
int sU = sF/Ls; \
|
||||||
WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,in_v,out_v); \
|
WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,in_v,out_v); \
|
||||||
});
|
}); \
|
||||||
|
accelerator_barrier();
|
||||||
|
|
||||||
#define ASM_CALL(A) \
|
#define ASM_CALL(A) \
|
||||||
thread_for( sss, Nsite, { \
|
thread_for( sss, Nsite, { \
|
||||||
@ -474,9 +475,10 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
|
|||||||
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteInt); return;}
|
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteInt); return;}
|
||||||
#endif
|
#endif
|
||||||
} else if( exterior ) {
|
} else if( exterior ) {
|
||||||
|
// dependent on result of merge
|
||||||
acceleratorFenceComputeStream();
|
acceleratorFenceComputeStream();
|
||||||
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteExt); return;}
|
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL_EXT(GenericDhopSiteExt); return;}
|
||||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteExt); return;}
|
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL_EXT(HandDhopSiteExt); return;}
|
||||||
#ifndef GRID_CUDA
|
#ifndef GRID_CUDA
|
||||||
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteExt); return;}
|
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteExt); return;}
|
||||||
#endif
|
#endif
|
||||||
@ -506,9 +508,10 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
|
|||||||
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagInt); return;}
|
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagInt); return;}
|
||||||
#endif
|
#endif
|
||||||
} else if( exterior ) {
|
} else if( exterior ) {
|
||||||
|
// Dependent on result of merge
|
||||||
acceleratorFenceComputeStream();
|
acceleratorFenceComputeStream();
|
||||||
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteDagExt); return;}
|
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL_EXT(GenericDhopSiteDagExt); return;}
|
||||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDagExt); return;}
|
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL_EXT(HandDhopSiteDagExt); return;}
|
||||||
#ifndef GRID_CUDA
|
#ifndef GRID_CUDA
|
||||||
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagExt); return;}
|
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagExt); return;}
|
||||||
#endif
|
#endif
|
||||||
|
@ -53,9 +53,10 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
Integer ReliableUpdateFreq;
|
Integer ReliableUpdateFreq;
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
//Action evaluation
|
||||||
//Allow derived classes to override the multishift CG
|
//Allow derived classes to override the multishift CG
|
||||||
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, FermionFieldD &out){
|
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, FermionFieldD &out){
|
||||||
#if 0
|
#if 1
|
||||||
SchurDifferentiableOperator<ImplD> schurOp(numerator ? NumOpD : DenOpD);
|
SchurDifferentiableOperator<ImplD> schurOp(numerator ? NumOpD : DenOpD);
|
||||||
ConjugateGradientMultiShift<FermionFieldD> msCG(MaxIter, approx);
|
ConjugateGradientMultiShift<FermionFieldD> msCG(MaxIter, approx);
|
||||||
msCG(schurOp,in, out);
|
msCG(schurOp,in, out);
|
||||||
@ -70,9 +71,10 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
msCG(schurOpD, in, out);
|
msCG(schurOpD, in, out);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
//Force evaluation
|
||||||
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, std::vector<FermionFieldD> &out_elems, FermionFieldD &out){
|
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, std::vector<FermionFieldD> &out_elems, FermionFieldD &out){
|
||||||
SchurDifferentiableOperator<ImplD> schurOpD(numerator ? NumOpD : DenOpD);
|
SchurDifferentiableOperator<ImplD> schurOpD(numerator ? NumOpD : DenOpD);
|
||||||
SchurDifferentiableOperator<ImplF> schurOpF (numerator ? NumOpF : DenOpF);
|
SchurDifferentiableOperator<ImplF> schurOpF(numerator ? NumOpF : DenOpF);
|
||||||
|
|
||||||
FermionFieldD inD(NumOpD.FermionRedBlackGrid());
|
FermionFieldD inD(NumOpD.FermionRedBlackGrid());
|
||||||
FermionFieldD outD(NumOpD.FermionRedBlackGrid());
|
FermionFieldD outD(NumOpD.FermionRedBlackGrid());
|
||||||
@ -84,20 +86,15 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
virtual void ImportGauge(const typename ImplD::GaugeField &Ud){
|
virtual void ImportGauge(const typename ImplD::GaugeField &Ud){
|
||||||
|
|
||||||
typename ImplF::GaugeField Uf(NumOpF.GaugeGrid());
|
typename ImplF::GaugeField Uf(NumOpF.GaugeGrid());
|
||||||
typename ImplD::GaugeField Ud2(NumOpD.GaugeGrid());
|
|
||||||
precisionChange(Uf, Ud);
|
precisionChange(Uf, Ud);
|
||||||
precisionChange(Ud2, Ud);
|
|
||||||
|
|
||||||
std::cout << "Importing "<<norm2(Ud)<<" "<< norm2(Uf)<<" " << norm2(Ud2)<<std::endl;
|
std::cout << "Importing "<<norm2(Ud)<<" "<< norm2(Uf)<<" " <<std::endl;
|
||||||
|
|
||||||
NumOpD.ImportGauge(Ud);
|
NumOpD.ImportGauge(Ud);
|
||||||
DenOpD.ImportGauge(Ud);
|
DenOpD.ImportGauge(Ud);
|
||||||
|
|
||||||
NumOpF.ImportGauge(Uf);
|
NumOpF.ImportGauge(Uf);
|
||||||
DenOpF.ImportGauge(Uf);
|
DenOpF.ImportGauge(Uf);
|
||||||
|
|
||||||
NumOpD.ImportGauge(Ud2);
|
|
||||||
DenOpD.ImportGauge(Ud2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -207,20 +207,27 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
//X = (Mdag M)^-1 V^dag phi
|
//X = (Mdag M)^-1 V^dag phi
|
||||||
//Y = (Mdag)^-1 V^dag phi
|
//Y = (Mdag)^-1 V^dag phi
|
||||||
Vpc.MpcDag(PhiOdd,Y); // Y= Vdag phi
|
Vpc.MpcDag(PhiOdd,Y); // Y= Vdag phi
|
||||||
|
std::cout << GridLogMessage <<" Y "<<norm2(Y)<<std::endl;
|
||||||
X=Zero();
|
X=Zero();
|
||||||
DerivativeSolver(Mpc,Y,X); // X= (MdagM)^-1 Vdag phi
|
DerivativeSolver(Mpc,Y,X); // X= (MdagM)^-1 Vdag phi
|
||||||
|
std::cout << GridLogMessage <<" X "<<norm2(X)<<std::endl;
|
||||||
Mpc.Mpc(X,Y); // Y= Mdag^-1 Vdag phi
|
Mpc.Mpc(X,Y); // Y= Mdag^-1 Vdag phi
|
||||||
|
std::cout << GridLogMessage <<" Y "<<norm2(Y)<<std::endl;
|
||||||
|
|
||||||
// phi^dag V (Mdag M)^-1 dV^dag phi
|
// phi^dag V (Mdag M)^-1 dV^dag phi
|
||||||
Vpc.MpcDagDeriv(force , X, PhiOdd ); dSdU = force;
|
Vpc.MpcDagDeriv(force , X, PhiOdd ); dSdU = force;
|
||||||
|
std::cout << GridLogMessage <<" deriv "<<norm2(force)<<std::endl;
|
||||||
|
|
||||||
// phi^dag dV (Mdag M)^-1 V^dag phi
|
// phi^dag dV (Mdag M)^-1 V^dag phi
|
||||||
Vpc.MpcDeriv(force , PhiOdd, X ); dSdU = dSdU+force;
|
Vpc.MpcDeriv(force , PhiOdd, X ); dSdU = dSdU+force;
|
||||||
|
std::cout << GridLogMessage <<" deriv "<<norm2(force)<<std::endl;
|
||||||
|
|
||||||
// - phi^dag V (Mdag M)^-1 Mdag dM (Mdag M)^-1 V^dag phi
|
// - phi^dag V (Mdag M)^-1 Mdag dM (Mdag M)^-1 V^dag phi
|
||||||
// - phi^dag V (Mdag M)^-1 dMdag M (Mdag M)^-1 V^dag phi
|
// - phi^dag V (Mdag M)^-1 dMdag M (Mdag M)^-1 V^dag phi
|
||||||
Mpc.MpcDeriv(force,Y,X); dSdU = dSdU-force;
|
Mpc.MpcDeriv(force,Y,X); dSdU = dSdU-force;
|
||||||
|
std::cout << GridLogMessage <<" deriv "<<norm2(force)<<std::endl;
|
||||||
Mpc.MpcDagDeriv(force,X,Y); dSdU = dSdU-force;
|
Mpc.MpcDagDeriv(force,X,Y); dSdU = dSdU-force;
|
||||||
|
std::cout << GridLogMessage <<" deriv "<<norm2(force)<<std::endl;
|
||||||
|
|
||||||
// FIXME No force contribution from EvenEven assumed here
|
// FIXME No force contribution from EvenEven assumed here
|
||||||
// Needs a fix for clover.
|
// Needs a fix for clover.
|
||||||
|
@ -123,7 +123,7 @@ public:
|
|||||||
}
|
}
|
||||||
if ( permute_slice ) {
|
if ( permute_slice ) {
|
||||||
int ptype =grid->PermuteType(d);
|
int ptype =grid->PermuteType(d);
|
||||||
uint8_t mask =grid->Nsimd() >> (ptype + 1);
|
uint8_t mask =0x1<<ptype;
|
||||||
SE._permute |= mask;
|
SE._permute |= mask;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -339,8 +339,8 @@ public:
|
|||||||
// Vectors that live on the symmetric heap in case of SHMEM
|
// Vectors that live on the symmetric heap in case of SHMEM
|
||||||
// These are used; either SHM objects or refs to the above symmetric heap vectors
|
// These are used; either SHM objects or refs to the above symmetric heap vectors
|
||||||
// depending on comms target
|
// depending on comms target
|
||||||
Vector<cobj *> u_simd_send_buf;
|
std::vector<cobj *> u_simd_send_buf;
|
||||||
Vector<cobj *> u_simd_recv_buf;
|
std::vector<cobj *> u_simd_recv_buf;
|
||||||
|
|
||||||
int u_comm_offset;
|
int u_comm_offset;
|
||||||
int _unified_buffer_size;
|
int _unified_buffer_size;
|
||||||
@ -348,7 +348,7 @@ public:
|
|||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
// Stencil query
|
// Stencil query
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
#ifdef SHM_FAST_PATH
|
#if 1
|
||||||
inline int SameNode(int point) {
|
inline int SameNode(int point) {
|
||||||
|
|
||||||
int dimension = this->_directions[point];
|
int dimension = this->_directions[point];
|
||||||
@ -434,7 +434,6 @@ public:
|
|||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
||||||
{
|
{
|
||||||
accelerator_barrier();
|
|
||||||
for(int i=0;i<Packets.size();i++){
|
for(int i=0;i<Packets.size();i++){
|
||||||
_grid->StencilSendToRecvFromBegin(MpiReqs,
|
_grid->StencilSendToRecvFromBegin(MpiReqs,
|
||||||
Packets[i].send_buf,
|
Packets[i].send_buf,
|
||||||
@ -666,11 +665,9 @@ public:
|
|||||||
for(int i=0;i<mm.size();i++){
|
for(int i=0;i<mm.size();i++){
|
||||||
decompressor::MergeFace(decompress,mm[i]);
|
decompressor::MergeFace(decompress,mm[i]);
|
||||||
}
|
}
|
||||||
if ( mm.size() ) acceleratorFenceComputeStream();
|
|
||||||
for(int i=0;i<dd.size();i++){
|
for(int i=0;i<dd.size();i++){
|
||||||
decompressor::DecompressFace(decompress,dd[i]);
|
decompressor::DecompressFace(decompress,dd[i]);
|
||||||
}
|
}
|
||||||
if ( dd.size() ) acceleratorFenceComputeStream();
|
|
||||||
}
|
}
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
// Set up routines
|
// Set up routines
|
||||||
@ -708,6 +705,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
std::cout << "BuildSurfaceList size is "<<surface_list.size()<<std::endl;
|
||||||
}
|
}
|
||||||
/// Introduce a block structure and switch off comms on boundaries
|
/// Introduce a block structure and switch off comms on boundaries
|
||||||
void DirichletBlock(const Coordinate &dirichlet_block)
|
void DirichletBlock(const Coordinate &dirichlet_block)
|
||||||
|
@ -526,7 +526,7 @@ inline void acceleratorFreeCpu (void *ptr){free(ptr);};
|
|||||||
//////////////////////////////////////////////
|
//////////////////////////////////////////////
|
||||||
|
|
||||||
#ifdef GRID_SYCL
|
#ifdef GRID_SYCL
|
||||||
inline void acceleratorFenceComputeStream(void){ theGridAccelerator->submit_barrier();};
|
inline void acceleratorFenceComputeStream(void){ theGridAccelerator->ext_oneapi_submit_barrier(); };
|
||||||
#else
|
#else
|
||||||
// Ordering within a stream guaranteed on Nvidia & AMD
|
// Ordering within a stream guaranteed on Nvidia & AMD
|
||||||
inline void acceleratorFenceComputeStream(void){ };
|
inline void acceleratorFenceComputeStream(void){ };
|
||||||
|
@ -227,7 +227,7 @@ int main(int argc, char **argv) {
|
|||||||
// std::vector<Real> hasenbusch({ light_mass, 0.005, 0.0145, 0.045, 0.108, 0.25, 0.51 , pv_mass }); // Updated
|
// std::vector<Real> hasenbusch({ light_mass, 0.005, 0.0145, 0.045, 0.108, 0.25, 0.51 , pv_mass }); // Updated
|
||||||
// std::vector<Real> hasenbusch({ light_mass, 0.0145, 0.045, 0.108, 0.25, 0.51 , 0.75 , pv_mass });
|
// std::vector<Real> hasenbusch({ light_mass, 0.0145, 0.045, 0.108, 0.25, 0.51 , 0.75 , pv_mass });
|
||||||
|
|
||||||
int SP_iters=10000;
|
int SP_iters=9000;
|
||||||
|
|
||||||
RationalActionParams OFRp; // Up/down
|
RationalActionParams OFRp; // Up/down
|
||||||
OFRp.lo = 6.0e-5;
|
OFRp.lo = 6.0e-5;
|
||||||
@ -362,12 +362,12 @@ int main(int argc, char **argv) {
|
|||||||
|
|
||||||
// Probably dominates the force - back to EOFA.
|
// Probably dominates the force - back to EOFA.
|
||||||
OneFlavourRationalParams SFRp;
|
OneFlavourRationalParams SFRp;
|
||||||
SFRp.lo = 0.25;
|
SFRp.lo = 0.1;
|
||||||
SFRp.hi = 25.0;
|
SFRp.hi = 25.0;
|
||||||
SFRp.MaxIter = 10000;
|
SFRp.MaxIter = 10000;
|
||||||
SFRp.tolerance= 1.0e-5;
|
SFRp.tolerance= 1.0e-8;
|
||||||
SFRp.mdtolerance= 2.0e-4;
|
SFRp.mdtolerance= 2.0e-4;
|
||||||
SFRp.degree = 8;
|
SFRp.degree = 12;
|
||||||
SFRp.precision= 50;
|
SFRp.precision= 50;
|
||||||
|
|
||||||
MobiusEOFAFermionD Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
|
MobiusEOFAFermionD Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
|
||||||
|
@ -244,11 +244,6 @@ int main(int argc, char **argv) {
|
|||||||
Coordinate shm;
|
Coordinate shm;
|
||||||
|
|
||||||
GlobalSharedMemory::GetShmDims(mpi,shm);
|
GlobalSharedMemory::GetShmDims(mpi,shm);
|
||||||
|
|
||||||
Coordinate CommDim(Nd);
|
|
||||||
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
|
|
||||||
|
|
||||||
Coordinate NonDirichlet(Nd+1,0);
|
|
||||||
|
|
||||||
//////////////////////////
|
//////////////////////////
|
||||||
// Fermion Grids
|
// Fermion Grids
|
||||||
@ -277,8 +272,6 @@ int main(int argc, char **argv) {
|
|||||||
std::vector<Complex> boundary = {1,1,1,-1};
|
std::vector<Complex> boundary = {1,1,1,-1};
|
||||||
FermionAction::ImplParams Params(boundary);
|
FermionAction::ImplParams Params(boundary);
|
||||||
FermionActionF::ImplParams ParamsF(boundary);
|
FermionActionF::ImplParams ParamsF(boundary);
|
||||||
Params.dirichlet=NonDirichlet;
|
|
||||||
ParamsF.dirichlet=NonDirichlet;
|
|
||||||
|
|
||||||
// double StoppingCondition = 1e-14;
|
// double StoppingCondition = 1e-14;
|
||||||
// double MDStoppingCondition = 1e-9;
|
// double MDStoppingCondition = 1e-9;
|
||||||
@ -305,12 +298,12 @@ int main(int argc, char **argv) {
|
|||||||
|
|
||||||
// Probably dominates the force - back to EOFA.
|
// Probably dominates the force - back to EOFA.
|
||||||
OneFlavourRationalParams SFRp;
|
OneFlavourRationalParams SFRp;
|
||||||
SFRp.lo = 0.25;
|
SFRp.lo = 0.1;
|
||||||
SFRp.hi = 25.0;
|
SFRp.hi = 30.0;
|
||||||
SFRp.MaxIter = 10000;
|
SFRp.MaxIter = 10000;
|
||||||
SFRp.tolerance= 1.0e-5;
|
SFRp.tolerance= 1.0e-8;
|
||||||
SFRp.mdtolerance= 2.0e-4;
|
SFRp.mdtolerance= 2.0e-6;
|
||||||
SFRp.degree = 8;
|
SFRp.degree = 10;
|
||||||
SFRp.precision= 50;
|
SFRp.precision= 50;
|
||||||
|
|
||||||
MobiusEOFAFermionD Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
|
MobiusEOFAFermionD Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
|
||||||
@ -370,19 +363,17 @@ int main(int argc, char **argv) {
|
|||||||
////////////////////////////////////
|
////////////////////////////////////
|
||||||
std::vector<Real> light_den;
|
std::vector<Real> light_den;
|
||||||
std::vector<Real> light_num;
|
std::vector<Real> light_num;
|
||||||
std::vector<int> dirichlet_den;
|
|
||||||
std::vector<int> dirichlet_num;
|
|
||||||
|
|
||||||
int n_hasenbusch = hasenbusch.size();
|
int n_hasenbusch = hasenbusch.size();
|
||||||
light_den.push_back(light_mass); dirichlet_den.push_back(0);
|
light_den.push_back(light_mass);
|
||||||
for(int h=0;h<n_hasenbusch;h++){
|
for(int h=0;h<n_hasenbusch;h++){
|
||||||
light_den.push_back(hasenbusch[h]); dirichlet_den.push_back(0);
|
light_den.push_back(hasenbusch[h]);
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int h=0;h<n_hasenbusch;h++){
|
for(int h=0;h<n_hasenbusch;h++){
|
||||||
light_num.push_back(hasenbusch[h]); dirichlet_num.push_back(0);
|
light_num.push_back(hasenbusch[h]);
|
||||||
}
|
}
|
||||||
light_num.push_back(pv_mass); dirichlet_num.push_back(0);
|
light_num.push_back(pv_mass);
|
||||||
|
|
||||||
std::vector<FermionAction *> Numerators;
|
std::vector<FermionAction *> Numerators;
|
||||||
std::vector<FermionAction *> Denominators;
|
std::vector<FermionAction *> Denominators;
|
||||||
@ -408,9 +399,7 @@ int main(int argc, char **argv) {
|
|||||||
std::cout << GridLogMessage
|
std::cout << GridLogMessage
|
||||||
<< " 2f quotient Action ";
|
<< " 2f quotient Action ";
|
||||||
std::cout << "det D("<<light_den[h]<<")";
|
std::cout << "det D("<<light_den[h]<<")";
|
||||||
if ( dirichlet_den[h] ) std::cout << "^dirichlet ";
|
|
||||||
std::cout << "/ det D("<<light_num[h]<<")";
|
std::cout << "/ det D("<<light_num[h]<<")";
|
||||||
if ( dirichlet_num[h] ) std::cout << "^dirichlet ";
|
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
FermionAction::ImplParams ParamsNum(boundary);
|
FermionAction::ImplParams ParamsNum(boundary);
|
||||||
@ -418,21 +407,11 @@ int main(int argc, char **argv) {
|
|||||||
FermionActionF::ImplParams ParamsDenF(boundary);
|
FermionActionF::ImplParams ParamsDenF(boundary);
|
||||||
FermionActionF::ImplParams ParamsNumF(boundary);
|
FermionActionF::ImplParams ParamsNumF(boundary);
|
||||||
|
|
||||||
ParamsNum.dirichlet = NonDirichlet;
|
|
||||||
ParamsDen.dirichlet = NonDirichlet;
|
|
||||||
|
|
||||||
ParamsNum.partialDirichlet = 0;
|
|
||||||
ParamsDen.partialDirichlet = 0;
|
|
||||||
|
|
||||||
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, ParamsNum));
|
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, ParamsNum));
|
||||||
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, ParamsDen));
|
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, ParamsDen));
|
||||||
|
|
||||||
ParamsDenF.dirichlet = ParamsDen.dirichlet;
|
|
||||||
ParamsDenF.partialDirichlet = ParamsDen.partialDirichlet;
|
|
||||||
DenominatorsF.push_back(new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[h],M5,b,c, ParamsDenF));
|
DenominatorsF.push_back(new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[h],M5,b,c, ParamsDenF));
|
||||||
|
|
||||||
ParamsNumF.dirichlet = ParamsNum.dirichlet;
|
|
||||||
ParamsNumF.partialDirichlet = ParamsNum.partialDirichlet;
|
|
||||||
NumeratorsF.push_back (new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_num[h],M5,b,c, ParamsNumF));
|
NumeratorsF.push_back (new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_num[h],M5,b,c, ParamsNumF));
|
||||||
|
|
||||||
LinOpD.push_back(new LinearOperatorD(*Denominators[h]));
|
LinOpD.push_back(new LinearOperatorD(*Denominators[h]));
|
||||||
@ -469,7 +448,6 @@ int main(int argc, char **argv) {
|
|||||||
// Gauge action
|
// Gauge action
|
||||||
/////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////
|
||||||
Level3.push_back(&GaugeAction);
|
Level3.push_back(&GaugeAction);
|
||||||
// TheHMC.TheAction.push_back(Level1);
|
|
||||||
TheHMC.TheAction.push_back(Level2);
|
TheHMC.TheAction.push_back(Level2);
|
||||||
TheHMC.TheAction.push_back(Level3);
|
TheHMC.TheAction.push_back(Level3);
|
||||||
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
||||||
|
@ -425,7 +425,7 @@ void Benchmark(int Ls, Coordinate Dirichlet)
|
|||||||
|
|
||||||
err = r_eo-result;
|
err = r_eo-result;
|
||||||
n2e= norm2(err);
|
n2e= norm2(err);
|
||||||
std::cout<<GridLogMessage << "norm diff "<< n2e<< " Line "<<__LINE__ <<std::endl;
|
std::cout<<GridLogMessage << "norm diff "<< n2e<<std::endl;
|
||||||
assert(n2e<1.0e-4);
|
assert(n2e<1.0e-4);
|
||||||
|
|
||||||
pickCheckerboard(Even,src_e,err);
|
pickCheckerboard(Even,src_e,err);
|
||||||
|
90
documentation/David_notes.txt
Normal file
90
documentation/David_notes.txt
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
Branch: develop
|
||||||
|
|
||||||
|
Files:
|
||||||
|
|
||||||
|
Grid/lattice/PaddedCell.h -- Halo exchange
|
||||||
|
tests/Test_general_stencil.cc -- test local off axis stencil addressing
|
||||||
|
tests/debug/Test_padded_cell.cc -- test PaddedCell halo exchange and the General local stencil by computing ALL plaquettes on lattice
|
||||||
|
|
||||||
|
Functionality:
|
||||||
|
|
||||||
|
-- extend a lattice field:
|
||||||
|
Grid/lattice/PaddedCell.h
|
||||||
|
|
||||||
|
// Constructor
|
||||||
|
PaddedCell(int _depth,GridCartesian *_grid)
|
||||||
|
|
||||||
|
// Expand a field "in" to depth "d"
|
||||||
|
template<class vobj>
|
||||||
|
inline Lattice<vobj> Exchange(Lattice<vobj> &in)
|
||||||
|
|
||||||
|
// Take the "apple core" of in to a smaller local volume
|
||||||
|
template<class vobj>
|
||||||
|
inline Lattice<vobj> Extract(Lattice<vobj> &in)
|
||||||
|
|
||||||
|
-- Plaquette test:
|
||||||
|
tests/debug/Test_padded_cell.cc
|
||||||
|
/////////////////////////////////////////////////
|
||||||
|
// Create a padded cell of extra padding depth=1
|
||||||
|
/////////////////////////////////////////////////
|
||||||
|
int depth = 1;
|
||||||
|
PaddedCell Ghost(depth,&GRID);
|
||||||
|
LatticeGaugeField Ughost = Ghost.Exchange(Umu);
|
||||||
|
|
||||||
|
///// Array for the site plaquette
|
||||||
|
GridBase *GhostGrid = Ughost.Grid();
|
||||||
|
LatticeComplex gplaq(GhostGrid);
|
||||||
|
|
||||||
|
std::vector<Coordinate> shifts;
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
for(int nu=mu+1;nu<Nd;nu++){
|
||||||
|
|
||||||
|
// Umu(x) Unu(x+mu) Umu^dag(x+nu) Unu^dag(x)
|
||||||
|
Coordinate shift_0(Nd,0);
|
||||||
|
Coordinate shift_mu(Nd,0); shift_mu[mu]=1;
|
||||||
|
Coordinate shift_nu(Nd,0); shift_nu[nu]=1;
|
||||||
|
shifts.push_back(shift_0);
|
||||||
|
shifts.push_back(shift_mu);
|
||||||
|
shifts.push_back(shift_nu);
|
||||||
|
shifts.push_back(shift_0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
GeneralLocalStencil gStencil(GhostGrid,shifts);
|
||||||
|
|
||||||
|
gplaq=Zero();
|
||||||
|
{
|
||||||
|
autoView( gp_v , gplaq, CpuWrite);
|
||||||
|
autoView( t_v , trplaq, CpuRead);
|
||||||
|
autoView( U_v , Ughost, CpuRead);
|
||||||
|
for(int ss=0;ss<gp_v.size();ss++){
|
||||||
|
int s=0;
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
for(int nu=mu+1;nu<Nd;nu++){
|
||||||
|
|
||||||
|
auto SE0 = gStencil.GetEntry(s+0,ss);
|
||||||
|
auto SE1 = gStencil.GetEntry(s+1,ss);
|
||||||
|
auto SE2 = gStencil.GetEntry(s+2,ss);
|
||||||
|
auto SE3 = gStencil.GetEntry(s+3,ss);
|
||||||
|
|
||||||
|
int o0 = SE0->_offset;
|
||||||
|
int o1 = SE1->_offset;
|
||||||
|
int o2 = SE2->_offset;
|
||||||
|
int o3 = SE3->_offset;
|
||||||
|
|
||||||
|
auto U0 = U_v[o0](mu);
|
||||||
|
auto U1 = U_v[o1](nu);
|
||||||
|
auto U2 = adj(U_v[o2](mu));
|
||||||
|
auto U3 = adj(U_v[o3](nu));
|
||||||
|
|
||||||
|
gpermute(U0,SE0->_permute);
|
||||||
|
gpermute(U1,SE1->_permute);
|
||||||
|
gpermute(U2,SE2->_permute);
|
||||||
|
gpermute(U3,SE3->_permute);
|
||||||
|
|
||||||
|
gp_v[ss]() =gp_v[ss]() + trace( U0*U1*U2*U3 );
|
||||||
|
s=s+4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cplaq = Ghost.Extract(gplaq);
|
133
examples/socket_grid.cc
Normal file
133
examples/socket_grid.cc
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
#include <sys/socket.h>
|
||||||
|
#include <sys/un.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <err.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <assert.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
static int sock;
|
||||||
|
static const char *sock_path_fmt = "/tmp/GridUnixSocket.%d";
|
||||||
|
static char sock_path[256];
|
||||||
|
|
||||||
|
class UnixSockets {
|
||||||
|
public:
|
||||||
|
static void Open(int rank)
|
||||||
|
{
|
||||||
|
int errnum;
|
||||||
|
|
||||||
|
sock = socket(AF_UNIX, SOCK_DGRAM, 0); assert(sock>0);
|
||||||
|
printf("allocated socket %d\n",sock);
|
||||||
|
|
||||||
|
struct sockaddr_un sa_un = { 0 };
|
||||||
|
sa_un.sun_family = AF_UNIX;
|
||||||
|
snprintf(sa_un.sun_path, sizeof(sa_un.sun_path),sock_path_fmt,rank);
|
||||||
|
unlink(sa_un.sun_path);
|
||||||
|
if (bind(sock, (struct sockaddr *)&sa_un, sizeof(sa_un))) {
|
||||||
|
perror("bind failure");
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
printf("bound socket %d to %s\n",sock,sa_un.sun_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int RecvFileDescriptor(void)
|
||||||
|
{
|
||||||
|
int n;
|
||||||
|
int fd;
|
||||||
|
char buf[1];
|
||||||
|
struct iovec iov;
|
||||||
|
struct msghdr msg;
|
||||||
|
struct cmsghdr *cmsg;
|
||||||
|
char cms[CMSG_SPACE(sizeof(int))];
|
||||||
|
|
||||||
|
iov.iov_base = buf;
|
||||||
|
iov.iov_len = 1;
|
||||||
|
|
||||||
|
memset(&msg, 0, sizeof msg);
|
||||||
|
msg.msg_name = 0;
|
||||||
|
msg.msg_namelen = 0;
|
||||||
|
msg.msg_iov = &iov;
|
||||||
|
msg.msg_iovlen = 1;
|
||||||
|
|
||||||
|
msg.msg_control = (caddr_t)cms;
|
||||||
|
msg.msg_controllen = sizeof cms;
|
||||||
|
|
||||||
|
if((n=recvmsg(sock, &msg, 0)) < 0) {
|
||||||
|
perror("recvmsg failed");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if(n == 0){
|
||||||
|
perror("recvmsg returned 0");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
cmsg = CMSG_FIRSTHDR(&msg);
|
||||||
|
memmove(&fd, CMSG_DATA(cmsg), sizeof(int));
|
||||||
|
printf("received fd %d from socket %d\n",fd,sock);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void SendFileDescriptor(int fildes,int xmit_to_rank)
|
||||||
|
{
|
||||||
|
struct msghdr msg;
|
||||||
|
struct iovec iov;
|
||||||
|
struct cmsghdr *cmsg = NULL;
|
||||||
|
char ctrl[CMSG_SPACE(sizeof(int))];
|
||||||
|
char data = ' ';
|
||||||
|
|
||||||
|
memset(&msg, 0, sizeof(struct msghdr));
|
||||||
|
memset(ctrl, 0, CMSG_SPACE(sizeof(int)));
|
||||||
|
iov.iov_base = &data;
|
||||||
|
iov.iov_len = sizeof(data);
|
||||||
|
|
||||||
|
sprintf(sock_path,sock_path_fmt,xmit_to_rank);
|
||||||
|
printf("sending FD %d over socket %d to rank %d AF_UNIX path %s\n",fildes,sock,xmit_to_rank,sock_path);fflush(stdout);
|
||||||
|
|
||||||
|
struct sockaddr_un sa_un = { 0 };
|
||||||
|
sa_un.sun_family = AF_UNIX;
|
||||||
|
snprintf(sa_un.sun_path, sizeof(sa_un.sun_path),sock_path_fmt,xmit_to_rank);
|
||||||
|
|
||||||
|
msg.msg_name = (void *)&sa_un;
|
||||||
|
msg.msg_namelen = sizeof(sa_un);
|
||||||
|
msg.msg_iov = &iov;
|
||||||
|
msg.msg_iovlen = 1;
|
||||||
|
msg.msg_controllen = CMSG_SPACE(sizeof(int));
|
||||||
|
msg.msg_control = ctrl;
|
||||||
|
|
||||||
|
cmsg = CMSG_FIRSTHDR(&msg);
|
||||||
|
cmsg->cmsg_level = SOL_SOCKET;
|
||||||
|
cmsg->cmsg_type = SCM_RIGHTS;
|
||||||
|
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
|
||||||
|
|
||||||
|
*((int *) CMSG_DATA(cmsg)) = fildes;
|
||||||
|
|
||||||
|
if ( sendmsg(sock, &msg, 0) == -1 ) perror("sendmsg failed");
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
int main(int argc, char **argv)
|
||||||
|
{
|
||||||
|
int me = fork()?0:1;
|
||||||
|
|
||||||
|
UnixSockets::Open(me);
|
||||||
|
|
||||||
|
// need MPI barrier
|
||||||
|
sleep(10);
|
||||||
|
const char * message = "Hello, World\n";
|
||||||
|
if( me ) {
|
||||||
|
int fd = open("foo",O_RDWR|O_CREAT,0666);
|
||||||
|
if ( fd < 0 ) {
|
||||||
|
perror("failed to open file");
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
// rank 1 sends ot rank 0
|
||||||
|
UnixSockets::SendFileDescriptor(fd,0);
|
||||||
|
close(fd);
|
||||||
|
} else {
|
||||||
|
// rank 0 sends receives frmo rank 1
|
||||||
|
int fd = UnixSockets::RecvFileDescriptor();
|
||||||
|
write(fd,(const void *)message,strlen(message));
|
||||||
|
close(fd);
|
||||||
|
}
|
||||||
|
}
|
@ -3,8 +3,14 @@ export https_proxy=http://proxy-chain.intel.com:911
|
|||||||
export LD_LIBRARY_PATH=$HOME/prereqs/lib/:$LD_LIBRARY_PATH
|
export LD_LIBRARY_PATH=$HOME/prereqs/lib/:$LD_LIBRARY_PATH
|
||||||
|
|
||||||
module load intel-release
|
module load intel-release
|
||||||
source /opt/intel/oneapi/PVC_setup.sh
|
module load intel-comp-rt/embargo-ci-neo
|
||||||
|
|
||||||
|
#source /opt/intel/oneapi/PVC_setup.sh
|
||||||
#source /opt/intel/oneapi/ATS_setup.sh
|
#source /opt/intel/oneapi/ATS_setup.sh
|
||||||
|
#module load intel-nightly/20230331
|
||||||
|
#module load intel-comp-rt/ci-neo-master/026093
|
||||||
|
|
||||||
|
#module load intel/mpich
|
||||||
module load intel/mpich/pvc45.3
|
module load intel/mpich/pvc45.3
|
||||||
export PATH=~/ATS/pti-gpu/tools/onetrace/:$PATH
|
export PATH=~/ATS/pti-gpu/tools/onetrace/:$PATH
|
||||||
|
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
CXX=mpicxx-openmpi-mp CXXFLAGS=-I/opt/local/include/ LDFLAGS=-L/opt/local/lib/ ../../configure --enable-simd=GEN --enable-debug --enable-comms=mpi --enable-unified=yes
|
BREW=/opt/local/
|
||||||
|
MPICXX=mpicxx CXX=c++-12 ../../configure --enable-simd=GEN --enable-comms=mpi-auto --enable-unified=yes --prefix $HOME/QCD/GridInstallOpt --with-lime=/Users/peterboyle/QCD/SciDAC/install/ --with-openssl=$BREW
|
||||||
|
@ -115,6 +115,7 @@ int main(int argc, char ** argv)
|
|||||||
if (SE->_permute & 0x2 ) { permute(check[i],tmp,1); tmp=check[i];}
|
if (SE->_permute & 0x2 ) { permute(check[i],tmp,1); tmp=check[i];}
|
||||||
if (SE->_permute & 0x4 ) { permute(check[i],tmp,2); tmp=check[i];}
|
if (SE->_permute & 0x4 ) { permute(check[i],tmp,2); tmp=check[i];}
|
||||||
if (SE->_permute & 0x8 ) { permute(check[i],tmp,3); tmp=check[i];}
|
if (SE->_permute & 0x8 ) { permute(check[i],tmp,3); tmp=check[i];}
|
||||||
|
// std::cout<<GridLogMessage<<"stencil["<<i<<"] "<< check[i]<< " perm "<<(uint32_t)SE->_permute <<std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
Real nrmC = norm2(Check);
|
Real nrmC = norm2(Check);
|
||||||
@ -138,18 +139,17 @@ int main(int argc, char ** argv)
|
|||||||
ddiff = check -bar;
|
ddiff = check -bar;
|
||||||
diff =norm2(ddiff);
|
diff =norm2(ddiff);
|
||||||
if ( diff > 0){
|
if ( diff > 0){
|
||||||
std::cout <<"Coor (" << coor[0]<<","<<coor[1]<<","<<coor[2]<<","<<coor[3]
|
std::cout <<"Diff at Coor (" << coor[0]<<","<<coor[1]<<","<<coor[2]<<","<<coor[3]
|
||||||
<<") " <<check<<" vs "<<bar<<std::endl;
|
<<") stencil " <<check<<" vs cshift "<<bar<<std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}}}}
|
}}}}
|
||||||
|
|
||||||
if (nrm > 1.0e-4) {
|
if (nrm > 1.0e-4) {
|
||||||
autoView( check , Check, CpuRead);
|
autoView( check , Check, CpuRead);
|
||||||
autoView( bar , Bar, CpuRead);
|
autoView( bar , Bar, CpuRead);
|
||||||
for(int i=0;i<check.size();i++){
|
for(int i=0;i<check.size();i++){
|
||||||
std::cout << i<<" Check "<<check[i]<< "\n"<<i<<" Bar "<<bar[i]<<std::endl;
|
std::cout << i<<" ERROR Check \n"<<check[i]<< "\n"<<i<<" Bar \n"<<bar[i]<<std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (nrm > 1.0e-4) exit(-1);
|
if (nrm > 1.0e-4) exit(-1);
|
||||||
|
@ -63,7 +63,9 @@ int main(int argc, char** argv) {
|
|||||||
std::cout << "Dimension of adjoint representation: "<< SU2Adjoint::Dimension << std::endl;
|
std::cout << "Dimension of adjoint representation: "<< SU2Adjoint::Dimension << std::endl;
|
||||||
|
|
||||||
// guard as this code fails to compile for Nc != 3
|
// guard as this code fails to compile for Nc != 3
|
||||||
#if (Nc == 3)
|
#if 1
|
||||||
|
|
||||||
|
std::cout << " Printing Adjoint Generators"<< std::endl;
|
||||||
|
|
||||||
SU2Adjoint::printGenerators();
|
SU2Adjoint::printGenerators();
|
||||||
SU2::testGenerators();
|
SU2::testGenerators();
|
||||||
@ -148,10 +150,33 @@ int main(int argc, char** argv) {
|
|||||||
typename AdjointRep<Nc>::LatticeMatrix Vrmu = peekLorentz(Vr,mu);
|
typename AdjointRep<Nc>::LatticeMatrix Vrmu = peekLorentz(Vr,mu);
|
||||||
pokeLorentz(UrVr,Urmu*Vrmu, mu);
|
pokeLorentz(UrVr,Urmu*Vrmu, mu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef typename SU_Adjoint<Nc>::AMatrix AdjointMatrix;
|
||||||
typename AdjointRep<Nc>::LatticeField Diff_check = UVr - UrVr;
|
typename AdjointRep<Nc>::LatticeField Diff_check = UVr - UrVr;
|
||||||
std::cout << GridLogMessage << "Group structure SU("<<Nc<<") check difference (Adjoint representation) : " << norm2(Diff_check) << std::endl;
|
std::cout << GridLogMessage << "Group structure SU("<<Nc<<") check difference (Adjoint representation) : " << norm2(Diff_check) << std::endl;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "****************************************** " << std::endl;
|
||||||
|
std::cout << GridLogMessage << " MAP BETWEEN FUNDAMENTAL AND ADJOINT CHECK " << std::endl;
|
||||||
|
std::cout << GridLogMessage << "****************************************** " << std::endl;
|
||||||
|
for(int a=0;a<Nc*Nc-1;a++){
|
||||||
|
for(int b=0;b<Nc*Nc-1;b++){
|
||||||
|
for(int c=0;c<Nc*Nc-1;c++){
|
||||||
|
ColourMatrix Ta;
|
||||||
|
ColourMatrix Tb;
|
||||||
|
ColourMatrix Tc;
|
||||||
|
SU3::generator(a, Ta);
|
||||||
|
SU3::generator(b, Tb);
|
||||||
|
SU3::generator(c, Tc);
|
||||||
|
AdjointMatrix TRa;
|
||||||
|
SU3Adjoint::generator(a,TRa);
|
||||||
|
Complex tr1 = trace ( Tc * ( Ta*Tb-Tb*Ta)); // i/2 fabc
|
||||||
|
Complex tr2 = TRa()()(b,c) * Complex(0,1);
|
||||||
|
std::cout << " 2 Tr( Tc[Ta,Tb]) " << 2.0*tr1<<std::endl;
|
||||||
|
std::cout << " - TRa_bc " << tr2<<std::endl;
|
||||||
|
assert(abs( (2.0*tr1-tr2) ) < 1.0e-7);
|
||||||
|
std::cout << "------------------"<<std::endl;
|
||||||
|
}}}
|
||||||
|
|
||||||
// Check correspondence of algebra and group transformations
|
// Check correspondence of algebra and group transformations
|
||||||
// Create a random vector
|
// Create a random vector
|
||||||
SU3::LatticeAlgebraVector h_adj(grid);
|
SU3::LatticeAlgebraVector h_adj(grid);
|
||||||
|
184
tests/debug/Test_padded_cell.cc
Normal file
184
tests/debug/Test_padded_cell.cc
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/Test_padded_cell.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
#include <Grid/lattice/PaddedCell.h>
|
||||||
|
#include <Grid/stencil/GeneralLocalStencil.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
template<class vobj> void gpermute(vobj & inout,int perm){
|
||||||
|
vobj tmp=inout;
|
||||||
|
if (perm & 0x1 ) { permute(inout,tmp,0); tmp=inout;}
|
||||||
|
if (perm & 0x2 ) { permute(inout,tmp,1); tmp=inout;}
|
||||||
|
if (perm & 0x4 ) { permute(inout,tmp,2); tmp=inout;}
|
||||||
|
if (perm & 0x8 ) { permute(inout,tmp,3); tmp=inout;}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
Coordinate latt_size = GridDefaultLatt();
|
||||||
|
Coordinate simd_layout= GridDefaultSimd(Nd,vComplexD::Nsimd());
|
||||||
|
Coordinate mpi_layout = GridDefaultMpi();
|
||||||
|
std::cout << " mpi "<<mpi_layout<<std::endl;
|
||||||
|
std::cout << " simd "<<simd_layout<<std::endl;
|
||||||
|
std::cout << " latt "<<latt_size<<std::endl;
|
||||||
|
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
|
||||||
|
|
||||||
|
GridParallelRNG pRNG(&GRID);
|
||||||
|
pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
||||||
|
LatticeGaugeField Umu(&GRID);
|
||||||
|
|
||||||
|
SU<Nc>::HotConfiguration(pRNG,Umu);
|
||||||
|
|
||||||
|
Real plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
|
||||||
|
LatticeComplex trplaq(&GRID);
|
||||||
|
|
||||||
|
std::vector<LatticeColourMatrix> U(Nd, Umu.Grid());
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << " Average plaquette "<<plaq<<std::endl;
|
||||||
|
|
||||||
|
LatticeComplex cplaq(&GRID); cplaq=Zero();
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////
|
||||||
|
// Create a padded cell of extra padding depth=1
|
||||||
|
/////////////////////////////////////////////////
|
||||||
|
int depth = 1;
|
||||||
|
PaddedCell Ghost(depth,&GRID);
|
||||||
|
LatticeGaugeField Ughost = Ghost.Exchange(Umu);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
// Temporary debug Hack for single rank sim:
|
||||||
|
// Check the contents of the cell are periodcally replicated
|
||||||
|
// In future ONLY pad those dimensions that are not local to node
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
#if 0
|
||||||
|
{
|
||||||
|
double diff=0;
|
||||||
|
double n=0;
|
||||||
|
{
|
||||||
|
autoView( Ug_v , Ughost, CpuRead);
|
||||||
|
autoView( Ul_v , Umu , CpuRead);
|
||||||
|
for(int x=0;x<latt_size[0]+2;x++){
|
||||||
|
for(int y=0;y<latt_size[1]+2;y++){
|
||||||
|
for(int z=0;z<latt_size[2]+2;z++){
|
||||||
|
for(int t=0;t<latt_size[3]+2;t++){
|
||||||
|
int lx=(x-1+latt_size[0])%latt_size[0];
|
||||||
|
int ly=(y-1+latt_size[1])%latt_size[1];
|
||||||
|
int lz=(z-1+latt_size[2])%latt_size[2];
|
||||||
|
int lt=(t-1+latt_size[3])%latt_size[3];
|
||||||
|
Coordinate gcoor({x,y,z,t});
|
||||||
|
Coordinate lcoor({lx,ly,lz,lt});
|
||||||
|
LorentzColourMatrix g;
|
||||||
|
LorentzColourMatrix l;
|
||||||
|
peekLocalSite(g,Ug_v,gcoor);
|
||||||
|
peekLocalSite(l,Ul_v,lcoor);
|
||||||
|
g=g-l;
|
||||||
|
assert(norm2(g)==0);
|
||||||
|
diff = diff + norm2(g);
|
||||||
|
n = n + norm2(l);
|
||||||
|
}}}}
|
||||||
|
}
|
||||||
|
std::cout << "padded field check diff "<< diff <<" / "<< n<<std::endl;
|
||||||
|
std::cout << norm2(Ughost)<< " " << norm2(Umu)<<std::endl;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
///// Array for the site plaquette
|
||||||
|
GridBase *GhostGrid = Ughost.Grid();
|
||||||
|
LatticeComplex gplaq(GhostGrid);
|
||||||
|
|
||||||
|
std::vector<Coordinate> shifts;
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
for(int nu=mu+1;nu<Nd;nu++){
|
||||||
|
|
||||||
|
// Umu(x) Unu(x+mu) Umu^dag(x+nu) Unu^dag(x)
|
||||||
|
Coordinate shift_0(Nd,0);
|
||||||
|
Coordinate shift_mu(Nd,0); shift_mu[mu]=1;
|
||||||
|
Coordinate shift_nu(Nd,0); shift_nu[nu]=1;
|
||||||
|
shifts.push_back(shift_0);
|
||||||
|
shifts.push_back(shift_mu);
|
||||||
|
shifts.push_back(shift_nu);
|
||||||
|
shifts.push_back(shift_0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
GeneralLocalStencil gStencil(GhostGrid,shifts);
|
||||||
|
|
||||||
|
gplaq=Zero();
|
||||||
|
{
|
||||||
|
autoView( gp_v , gplaq, CpuWrite);
|
||||||
|
autoView( t_v , trplaq, CpuRead);
|
||||||
|
autoView( U_v , Ughost, CpuRead);
|
||||||
|
for(int ss=0;ss<gp_v.size();ss++){
|
||||||
|
int s=0;
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
for(int nu=mu+1;nu<Nd;nu++){
|
||||||
|
|
||||||
|
auto SE0 = gStencil.GetEntry(s+0,ss);
|
||||||
|
auto SE1 = gStencil.GetEntry(s+1,ss);
|
||||||
|
auto SE2 = gStencil.GetEntry(s+2,ss);
|
||||||
|
auto SE3 = gStencil.GetEntry(s+3,ss);
|
||||||
|
|
||||||
|
int o0 = SE0->_offset;
|
||||||
|
int o1 = SE1->_offset;
|
||||||
|
int o2 = SE2->_offset;
|
||||||
|
int o3 = SE3->_offset;
|
||||||
|
|
||||||
|
auto U0 = U_v[o0](mu);
|
||||||
|
auto U1 = U_v[o1](nu);
|
||||||
|
auto U2 = adj(U_v[o2](mu));
|
||||||
|
auto U3 = adj(U_v[o3](nu));
|
||||||
|
|
||||||
|
gpermute(U0,SE0->_permute);
|
||||||
|
gpermute(U1,SE1->_permute);
|
||||||
|
gpermute(U2,SE2->_permute);
|
||||||
|
gpermute(U3,SE3->_permute);
|
||||||
|
|
||||||
|
gp_v[ss]() =gp_v[ss]() + trace( U0*U1*U2*U3 );
|
||||||
|
s=s+4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cplaq = Ghost.Extract(gplaq);
|
||||||
|
RealD vol = cplaq.Grid()->gSites();
|
||||||
|
RealD faces = (Nd * (Nd-1))/2;
|
||||||
|
auto p = TensorRemove(sum(cplaq));
|
||||||
|
auto result = p.real()/vol/faces/Nc;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << " Average plaquette via padded cell "<<result<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Diff "<<result-plaq<<std::endl;
|
||||||
|
|
||||||
|
assert(fabs(result-plaq)<1.0e-8);
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
@ -476,7 +476,9 @@ int main (int argc, char ** argv)
|
|||||||
// ForceTest<GimplTypesR>(BdyNf2eo,U,DDHMCFilter);
|
// ForceTest<GimplTypesR>(BdyNf2eo,U,DDHMCFilter);
|
||||||
|
|
||||||
//////////////////// One flavour boundary det ////////////////////
|
//////////////////// One flavour boundary det ////////////////////
|
||||||
|
/*
|
||||||
RationalActionParams OFRp; // Up/down
|
RationalActionParams OFRp; // Up/down
|
||||||
|
int SP_iters = 3000;
|
||||||
OFRp.lo = 6.0e-5;
|
OFRp.lo = 6.0e-5;
|
||||||
OFRp.hi = 90.0;
|
OFRp.hi = 90.0;
|
||||||
OFRp.inv_pow = 2;
|
OFRp.inv_pow = 2;
|
||||||
@ -489,7 +491,7 @@ int main (int argc, char ** argv)
|
|||||||
// OFRp.degree = 16;
|
// OFRp.degree = 16;
|
||||||
OFRp.precision= 80;
|
OFRp.precision= 80;
|
||||||
OFRp.BoundsCheckFreq=0;
|
OFRp.BoundsCheckFreq=0;
|
||||||
/*
|
*/
|
||||||
OneFlavourRationalParams OFRp; // Up/down
|
OneFlavourRationalParams OFRp; // Up/down
|
||||||
OFRp.lo = 4.0e-5;
|
OFRp.lo = 4.0e-5;
|
||||||
OFRp.hi = 90.0;
|
OFRp.hi = 90.0;
|
||||||
@ -499,7 +501,6 @@ int main (int argc, char ** argv)
|
|||||||
OFRp.degree = 18;
|
OFRp.degree = 18;
|
||||||
OFRp.precision= 80;
|
OFRp.precision= 80;
|
||||||
OFRp.BoundsCheckFreq=0;
|
OFRp.BoundsCheckFreq=0;
|
||||||
*/
|
|
||||||
std::vector<RealD> ActionTolByPole({
|
std::vector<RealD> ActionTolByPole({
|
||||||
1.0e-7,1.0e-8,1.0e-8,1.0e-8,
|
1.0e-7,1.0e-8,1.0e-8,1.0e-8,
|
||||||
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
|
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
|
||||||
|
Reference in New Issue
Block a user