1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-14 22:07:05 +01:00

Compare commits

..

29 Commits

Author SHA1 Message Date
bbec7f9fa9 Debug code 2023-04-20 14:54:36 -04:00
3aa43e6065 Debug info 2023-04-20 14:21:13 -04:00
78ac4044ff HMC 2023-04-20 13:28:07 -04:00
119c3db47f Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2023-04-18 15:13:16 -04:00
21bbdb8fc2 Crusher 2023-04-18 15:11:16 -04:00
739bd7572c Example code 2023-04-17 21:51:55 +00:00
074627a5bd Pass file descriptors through AF_UNIX for level_zero 2023-04-17 21:50:52 +00:00
6a23b2c599 Drop UVM 2023-04-17 21:49:58 +00:00
bd891fb3f5 tests to compile 2023-04-12 18:32:44 -04:00
3984265851 Merge pull request #432 from paboyle/hotfix/nvcc-warnings
Unused statements generating warnings removed
2023-04-12 16:59:02 -04:00
45361d188f Merge pull request #427 from fjosw/feat/bug_report_issue_template
Feat/bug report issue template
2023-04-12 16:58:41 -04:00
80c9d77e02 Merge pull request #433 from paboyle/hotfix/virtual-dtor
Virtual destructor for LinearOperator
2023-04-12 16:56:18 -04:00
3aff64dddb Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2023-04-11 12:19:15 -07:00
b4f2ca81ff Copy queue and compute queue same as better concurrency 2023-04-11 12:18:21 -07:00
d1dea5f840 New driver 2023-04-11 12:16:52 -07:00
54f8b84d16 Fence 2023-04-11 12:16:08 -07:00
da503fef0e Name change on barrier routine 2023-04-11 12:14:04 -07:00
4a6802098a Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2023-04-07 15:43:28 -04:00
f9b41a84d2 Trajectory runs to completion on Crusher within wall clock time 2023-04-07 15:42:45 -04:00
9e64387933 mores unused statements removed 2023-04-07 14:27:18 +01:00
983b681d46 unused statement cleaning 2023-04-07 14:12:02 +01:00
86dac5ff4f Better printing 2023-04-04 07:42:19 -07:00
4a382fad3f Use distinct SYCL queue for copies 2023-04-04 07:41:41 -07:00
cc753670d9 Barrier elimination, surface list build 2023-04-04 07:39:14 -07:00
cc9d88ea1c Fence changes and EXT kernel loop cout reduction 2023-04-04 07:37:23 -07:00
b281b0166e Put the barrier in the subroutine 2023-04-04 07:36:03 -07:00
6a21f694ff Apply barrier in Gather kernel sequence.
Could place before comms, or in Gather, but decided to insist Gather means Gather is done
2023-04-04 07:33:24 -07:00
39214702f6 feat: indentation fixed. 2023-03-28 16:30:34 +02:00
3e4614c63a feat: draft for bug-report issue template added. 2023-03-28 16:24:35 +02:00
22 changed files with 407 additions and 101 deletions

54
.github/ISSUE_TEMPLATE/bug-report.yml vendored Normal file
View File

@ -0,0 +1,54 @@
name: Bug report
description: Report a bug.
title: "<insert title>"
labels: [bug]
body:
- type: markdown
attributes:
value: >
Thank you for taking the time to file a bug report.
Please check that the code is pointing to the HEAD of develop
or any commit in master which is tagged with a version number.
- type: textarea
attributes:
label: "Describe the issue:"
description: >
Describe the issue and any previous attempt to solve it.
validations:
required: true
- type: textarea
attributes:
label: "Code example:"
description: >
If relevant, show how to reproduce the issue using a minimal working
example.
placeholder: |
<< your code here >>
render: shell
validations:
required: false
- type: textarea
attributes:
label: "Target platform:"
description: >
Give a description of the target platform (CPU, network, compiler).
Please give the full CPU part description, using for example
`cat /proc/cpuinfo | grep 'model name' | uniq` (Linux)
or `sysctl machdep.cpu.brand_string` (macOS) and the full output
the `--version` option of your compiler.
validations:
required: true
- type: textarea
attributes:
label: "Configure options:"
description: >
Please give the exact configure command used and attach
`config.log`, `grid.config.summary` and the output of `make V=1`.
render: shell
validations:
required: true

View File

@ -166,16 +166,16 @@ public:
rsqf[s] =rsq[s]; rsqf[s] =rsq[s];
std::cout<<GridLogMessage<<"ConjugateGradientMultiShiftMixedPrecCleanup: shift "<< s <<" target resid "<<rsq[s]<<std::endl; std::cout<<GridLogMessage<<"ConjugateGradientMultiShiftMixedPrecCleanup: shift "<< s <<" target resid "<<rsq[s]<<std::endl;
// ps_d[s] = src_d; // ps_d[s] = src_d;
precisionChangeFast(ps_f[s],src_d); precisionChange(ps_f[s],src_d);
} }
// r and p for primary // r and p for primary
p_d = src_d; //primary copy --- make this a reference to ps_d to save axpys p_d = src_d; //primary copy --- make this a reference to ps_d to save axpys
r_d = p_d; r_d = p_d;
//MdagM+m[0] //MdagM+m[0]
precisionChangeFast(p_f,p_d); precisionChange(p_f,p_d);
Linop_f.HermOpAndNorm(p_f,mmp_f,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp) Linop_f.HermOpAndNorm(p_f,mmp_f,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp)
precisionChangeFast(tmp_d,mmp_f); precisionChange(tmp_d,mmp_f);
Linop_d.HermOpAndNorm(p_d,mmp_d,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp) Linop_d.HermOpAndNorm(p_d,mmp_d,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp)
tmp_d = tmp_d - mmp_d; tmp_d = tmp_d - mmp_d;
std::cout << " Testing operators match "<<norm2(mmp_d)<<" f "<<norm2(mmp_f)<<" diff "<< norm2(tmp_d)<<std::endl; std::cout << " Testing operators match "<<norm2(mmp_d)<<" f "<<norm2(mmp_f)<<" diff "<< norm2(tmp_d)<<std::endl;
@ -204,7 +204,7 @@ public:
for(int s=0;s<nshift;s++) { for(int s=0;s<nshift;s++) {
axpby(psi_d[s],0.,-bs[s]*alpha[s],src_d,src_d); axpby(psi_d[s],0.,-bs[s]*alpha[s],src_d,src_d);
precisionChangeFast(psi_f[s],psi_d[s]); precisionChange(psi_f[s],psi_d[s]);
} }
/////////////////////////////////////// ///////////////////////////////////////
@ -225,7 +225,7 @@ public:
AXPYTimer.Stop(); AXPYTimer.Stop();
PrecChangeTimer.Start(); PrecChangeTimer.Start();
precisionChangeFast(r_f, r_d); precisionChange(r_f, r_d);
PrecChangeTimer.Stop(); PrecChangeTimer.Stop();
AXPYTimer.Start(); AXPYTimer.Start();
@ -243,13 +243,13 @@ public:
cp=c; cp=c;
PrecChangeTimer.Start(); PrecChangeTimer.Start();
precisionChangeFast(p_f, p_d); //get back single prec search direction for linop precisionChange(p_f, p_d); //get back single prec search direction for linop
PrecChangeTimer.Stop(); PrecChangeTimer.Stop();
MatrixTimer.Start(); MatrixTimer.Start();
Linop_f.HermOp(p_f,mmp_f); Linop_f.HermOp(p_f,mmp_f);
MatrixTimer.Stop(); MatrixTimer.Stop();
PrecChangeTimer.Start(); PrecChangeTimer.Start();
precisionChangeFast(mmp_d, mmp_f); // From Float to Double precisionChange(mmp_d, mmp_f); // From Float to Double
PrecChangeTimer.Stop(); PrecChangeTimer.Stop();
d=real(innerProduct(p_d,mmp_d)); d=real(innerProduct(p_d,mmp_d));
@ -311,7 +311,7 @@ public:
SolverTimer.Stop(); SolverTimer.Stop();
for(int s=0;s<nshift;s++){ for(int s=0;s<nshift;s++){
precisionChangeFast(psi_d[s],psi_f[s]); precisionChange(psi_d[s],psi_f[s]);
} }

View File

@ -211,7 +211,7 @@ public:
Linop_d.HermOpAndNorm(p_d,mmp_d,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp) Linop_d.HermOpAndNorm(p_d,mmp_d,d,qq); // mmp = MdagM p d=real(dot(p, mmp)), qq=norm2(mmp)
tmp_d = tmp_d - mmp_d; tmp_d = tmp_d - mmp_d;
std::cout << " Testing operators match "<<norm2(mmp_d)<<" f "<<norm2(mmp_f)<<" diff "<< norm2(tmp_d)<<std::endl; std::cout << " Testing operators match "<<norm2(mmp_d)<<" f "<<norm2(mmp_f)<<" diff "<< norm2(tmp_d)<<std::endl;
// assert(norm2(tmp_d)< 1.0e-4); assert(norm2(tmp_d)< 1.0);
axpy(mmp_d,mass[0],p_d,mmp_d); axpy(mmp_d,mass[0],p_d,mmp_d);
RealD rn = norm2(p_d); RealD rn = norm2(p_d);

View File

@ -519,7 +519,6 @@ void MemoryManager::Audit(std::string s)
uint64_t LruBytes1=0; uint64_t LruBytes1=0;
uint64_t LruBytes2=0; uint64_t LruBytes2=0;
uint64_t LruCnt=0; uint64_t LruCnt=0;
uint64_t LockedBytes=0;
std::cout << " Memory Manager::Audit() from "<<s<<std::endl; std::cout << " Memory Manager::Audit() from "<<s<<std::endl;
for(auto it=LRU.begin();it!=LRU.end();it++){ for(auto it=LRU.begin();it!=LRU.end();it++){

View File

@ -27,9 +27,10 @@ Author: Christoph Lehner <christoph@lhnr.de>
*************************************************************************************/ *************************************************************************************/
/* END LEGAL */ /* END LEGAL */
#define header "SharedMemoryMpi: "
#include <Grid/GridCore.h> #include <Grid/GridCore.h>
#include <pwd.h> #include <pwd.h>
#include <syscall.h>
#ifdef GRID_CUDA #ifdef GRID_CUDA
#include <cuda_runtime_api.h> #include <cuda_runtime_api.h>
@ -39,11 +40,118 @@ Author: Christoph Lehner <christoph@lhnr.de>
#endif #endif
#ifdef GRID_SYCL #ifdef GRID_SYCL
#define GRID_SYCL_LEVEL_ZERO_IPC #define GRID_SYCL_LEVEL_ZERO_IPC
#include <syscall.h>
#define SHM_SOCKETS
#endif
#include <sys/socket.h>
#include <sys/un.h>
NAMESPACE_BEGIN(Grid);
#ifdef SHM_SOCKETS
/*
* Barbaric extra intranode communication route in case we need sockets to pass FDs
* Forced by level_zero not being nicely designed
*/
static int sock;
static const char *sock_path_fmt = "/tmp/GridUnixSocket.%d";
static char sock_path[256];
class UnixSockets {
public:
static void Open(int rank)
{
int errnum;
sock = socket(AF_UNIX, SOCK_DGRAM, 0); assert(sock>0);
struct sockaddr_un sa_un = { 0 };
sa_un.sun_family = AF_UNIX;
snprintf(sa_un.sun_path, sizeof(sa_un.sun_path),sock_path_fmt,rank);
unlink(sa_un.sun_path);
if (bind(sock, (struct sockaddr *)&sa_un, sizeof(sa_un))) {
perror("bind failure");
exit(EXIT_FAILURE);
}
}
static int RecvFileDescriptor(void)
{
int n;
int fd;
char buf[1];
struct iovec iov;
struct msghdr msg;
struct cmsghdr *cmsg;
char cms[CMSG_SPACE(sizeof(int))];
iov.iov_base = buf;
iov.iov_len = 1;
memset(&msg, 0, sizeof msg);
msg.msg_name = 0;
msg.msg_namelen = 0;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = (caddr_t)cms;
msg.msg_controllen = sizeof cms;
if((n=recvmsg(sock, &msg, 0)) < 0) {
perror("recvmsg failed");
return -1;
}
if(n == 0){
perror("recvmsg returned 0");
return -1;
}
cmsg = CMSG_FIRSTHDR(&msg);
memmove(&fd, CMSG_DATA(cmsg), sizeof(int));
return fd;
}
static void SendFileDescriptor(int fildes,int xmit_to_rank)
{
struct msghdr msg;
struct iovec iov;
struct cmsghdr *cmsg = NULL;
char ctrl[CMSG_SPACE(sizeof(int))];
char data = ' ';
memset(&msg, 0, sizeof(struct msghdr));
memset(ctrl, 0, CMSG_SPACE(sizeof(int)));
iov.iov_base = &data;
iov.iov_len = sizeof(data);
sprintf(sock_path,sock_path_fmt,xmit_to_rank);
struct sockaddr_un sa_un = { 0 };
sa_un.sun_family = AF_UNIX;
snprintf(sa_un.sun_path, sizeof(sa_un.sun_path),sock_path_fmt,xmit_to_rank);
msg.msg_name = (void *)&sa_un;
msg.msg_namelen = sizeof(sa_un);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_controllen = CMSG_SPACE(sizeof(int));
msg.msg_control = ctrl;
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
*((int *) CMSG_DATA(cmsg)) = fildes;
sendmsg(sock, &msg, 0);
};
};
#endif #endif
NAMESPACE_BEGIN(Grid);
#define header "SharedMemoryMpi: "
/*Construct from an MPI communicator*/ /*Construct from an MPI communicator*/
void GlobalSharedMemory::Init(Grid_MPI_Comm comm) void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
{ {
@ -170,10 +278,7 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm,SHM); if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm,SHM);
else OptimalCommunicatorSharedMemory(processors,optimal_comm,SHM); else OptimalCommunicatorSharedMemory(processors,optimal_comm,SHM);
} }
static inline int divides(int a,int b)
{
return ( b == ( (b/a)*a ) );
}
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM) void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
{ {
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
@ -483,8 +588,13 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
/////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Loop over ranks/gpu's on our node // Loop over ranks/gpu's on our node
/////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef SHM_SOCKETS
UnixSockets::Open(WorldShmRank);
#endif
for(int r=0;r<WorldShmSize;r++){ for(int r=0;r<WorldShmSize;r++){
MPI_Barrier(WorldShmComm);
#ifndef GRID_MPI3_SHM_NONE #ifndef GRID_MPI3_SHM_NONE
////////////////////////////////////////////////// //////////////////////////////////////////////////
// If it is me, pass around the IPC access key // If it is me, pass around the IPC access key
@ -492,24 +602,32 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
void * thisBuf = ShmCommBuf; void * thisBuf = ShmCommBuf;
if(!Stencil_force_mpi) { if(!Stencil_force_mpi) {
#ifdef GRID_SYCL_LEVEL_ZERO_IPC #ifdef GRID_SYCL_LEVEL_ZERO_IPC
typedef struct { int fd; pid_t pid ; } clone_mem_t; typedef struct { int fd; pid_t pid ; ze_ipc_mem_handle_t ze; } clone_mem_t;
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_device()); auto zeDevice = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_device());
auto zeContext = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_context()); auto zeContext = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_context());
ze_ipc_mem_handle_t ihandle; ze_ipc_mem_handle_t ihandle;
clone_mem_t handle; clone_mem_t handle;
if ( r==WorldShmRank ) { if ( r==WorldShmRank ) {
auto err = zeMemGetIpcHandle(zeContext,ShmCommBuf,&ihandle); auto err = zeMemGetIpcHandle(zeContext,ShmCommBuf,&ihandle);
if ( err != ZE_RESULT_SUCCESS ) { if ( err != ZE_RESULT_SUCCESS ) {
std::cout << "SharedMemoryMPI.cc zeMemGetIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl; std::cerr << "SharedMemoryMPI.cc zeMemGetIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} else { } else {
std::cout << "SharedMemoryMPI.cc zeMemGetIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl; std::cout << "SharedMemoryMPI.cc zeMemGetIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
} }
memcpy((void *)&handle.fd,(void *)&ihandle,sizeof(int)); memcpy((void *)&handle.fd,(void *)&ihandle,sizeof(int));
handle.pid = getpid(); handle.pid = getpid();
memcpy((void *)&handle.ze,(void *)&ihandle,sizeof(ihandle));
#ifdef SHM_SOCKETS
for(int rr=0;rr<WorldShmSize;rr++){
if(rr!=r){
UnixSockets::SendFileDescriptor(handle.fd,rr);
}
}
#endif
} }
#endif #endif
#ifdef GRID_CUDA #ifdef GRID_CUDA
@ -537,6 +655,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
// Share this IPC handle across the Shm Comm // Share this IPC handle across the Shm Comm
////////////////////////////////////////////////// //////////////////////////////////////////////////
{ {
MPI_Barrier(WorldShmComm);
int ierr=MPI_Bcast(&handle, int ierr=MPI_Bcast(&handle,
sizeof(handle), sizeof(handle),
MPI_BYTE, MPI_BYTE,
@ -552,6 +671,10 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
#ifdef GRID_SYCL_LEVEL_ZERO_IPC #ifdef GRID_SYCL_LEVEL_ZERO_IPC
if ( r!=WorldShmRank ) { if ( r!=WorldShmRank ) {
thisBuf = nullptr; thisBuf = nullptr;
int myfd;
#ifdef SHM_SOCKETS
myfd=UnixSockets::RecvFileDescriptor();
#else
std::cout<<"mapping seeking remote pid/fd " std::cout<<"mapping seeking remote pid/fd "
<<handle.pid<<"/" <<handle.pid<<"/"
<<handle.fd<<std::endl; <<handle.fd<<std::endl;
@ -559,16 +682,22 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
int pidfd = syscall(SYS_pidfd_open,handle.pid,0); int pidfd = syscall(SYS_pidfd_open,handle.pid,0);
std::cout<<"Using IpcHandle pidfd "<<pidfd<<"\n"; std::cout<<"Using IpcHandle pidfd "<<pidfd<<"\n";
// int myfd = syscall(SYS_pidfd_getfd,pidfd,handle.fd,0); // int myfd = syscall(SYS_pidfd_getfd,pidfd,handle.fd,0);
int myfd = syscall(438,pidfd,handle.fd,0); myfd = syscall(438,pidfd,handle.fd,0);
int err_t = errno;
std::cout<<"Using IpcHandle myfd "<<myfd<<"\n"; if (myfd < 0) {
fprintf(stderr,"pidfd_getfd returned %d errno was %d\n", myfd,err_t); fflush(stderr);
perror("pidfd_getfd failed ");
assert(0);
}
#endif
std::cout<<"Using IpcHandle mapped remote pid "<<handle.pid <<" FD "<<handle.fd <<" to myfd "<<myfd<<"\n";
memcpy((void *)&ihandle,(void *)&handle.ze,sizeof(ihandle));
memcpy((void *)&ihandle,(void *)&myfd,sizeof(int)); memcpy((void *)&ihandle,(void *)&myfd,sizeof(int));
auto err = zeMemOpenIpcHandle(zeContext,zeDevice,ihandle,0,&thisBuf); auto err = zeMemOpenIpcHandle(zeContext,zeDevice,ihandle,0,&thisBuf);
if ( err != ZE_RESULT_SUCCESS ) { if ( err != ZE_RESULT_SUCCESS ) {
std::cout << "SharedMemoryMPI.cc "<<zeContext<<" "<<zeDevice<<std::endl; std::cerr << "SharedMemoryMPI.cc "<<zeContext<<" "<<zeDevice<<std::endl;
std::cout << "SharedMemoryMPI.cc zeMemOpenIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl; std::cerr << "SharedMemoryMPI.cc zeMemOpenIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} else { } else {
std::cout << "SharedMemoryMPI.cc zeMemOpenIpcHandle succeeded for rank "<<r<<std::endl; std::cout << "SharedMemoryMPI.cc zeMemOpenIpcHandle succeeded for rank "<<r<<std::endl;
@ -603,6 +732,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
#else #else
WorldShmCommBufs[r] = ShmCommBuf; WorldShmCommBufs[r] = ShmCommBuf;
#endif #endif
MPI_Barrier(WorldShmComm);
} }
_ShmAllocBytes=bytes; _ShmAllocBytes=bytes;

View File

@ -507,6 +507,7 @@ public:
} }
this->face_table_computed=1; this->face_table_computed=1;
assert(this->u_comm_offset==this->_unified_buffer_size); assert(this->u_comm_offset==this->_unified_buffer_size);
accelerator_barrier();
} }
}; };

View File

@ -196,7 +196,6 @@ void WilsonFermion5D<Impl>::DhopDir(const FermionField &in, FermionField &out,in
uint64_t Nsite = Umu.Grid()->oSites(); uint64_t Nsite = Umu.Grid()->oSites();
Kernels::DhopDirKernel(Stencil,Umu,Stencil.CommBuf(),Ls,Nsite,in,out,dirdisp,gamma); Kernels::DhopDirKernel(Stencil,Umu,Stencil.CommBuf(),Ls,Nsite,in,out,dirdisp,gamma);
}; };
template<class Impl> template<class Impl>
void WilsonFermion5D<Impl>::DhopDirAll(const FermionField &in, std::vector<FermionField> &out) void WilsonFermion5D<Impl>::DhopDirAll(const FermionField &in, std::vector<FermionField> &out)
@ -247,10 +246,14 @@ void WilsonFermion5D<Impl>::DerivInternal(StencilImpl & st,
Kernels::DhopDirKernel(st, U, st.CommBuf(), Ls, Usites, B, Btilde, mu,gamma); Kernels::DhopDirKernel(st, U, st.CommBuf(), Ls, Usites, B, Btilde, mu,gamma);
std::cout << " InsertForce Btilde "<< norm2(Btilde)<<std::endl;
//////////////////////////// ////////////////////////////
// spin trace outer product // spin trace outer product
//////////////////////////// ////////////////////////////
Impl::InsertForce5D(mat, Btilde, Atilde, mu); Impl::InsertForce5D(mat, Btilde, Atilde, mu);
std::cout << " InsertForce "<< norm2(mat)<<std::endl;
} }
} }
@ -332,8 +335,7 @@ void WilsonFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st, Lebesg
///////////////////////////// /////////////////////////////
{ {
GRID_TRACE("Gather"); GRID_TRACE("Gather");
st.HaloExchangeOptGather(in,compressor); st.HaloExchangeOptGather(in,compressor); // Put the barrier in the routine
accelerator_barrier();
} }
std::vector<std::vector<CommsRequest_t> > requests; std::vector<std::vector<CommsRequest_t> > requests;

View File

@ -428,9 +428,10 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
auto ptr = &st.surface_list[0]; \ auto ptr = &st.surface_list[0]; \
accelerator_forNB( ss, sz, Simd::Nsimd(), { \ accelerator_forNB( ss, sz, Simd::Nsimd(), { \
int sF = ptr[ss]; \ int sF = ptr[ss]; \
int sU = ss/Ls; \ int sU = sF/Ls; \
WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,in_v,out_v); \ WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,in_v,out_v); \
}); }); \
accelerator_barrier();
#define ASM_CALL(A) \ #define ASM_CALL(A) \
thread_for( sss, Nsite, { \ thread_for( sss, Nsite, { \
@ -474,9 +475,10 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteInt); return;} if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteInt); return;}
#endif #endif
} else if( exterior ) { } else if( exterior ) {
// dependent on result of merge
acceleratorFenceComputeStream(); acceleratorFenceComputeStream();
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteExt); return;} if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL_EXT(GenericDhopSiteExt); return;}
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteExt); return;} if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL_EXT(HandDhopSiteExt); return;}
#ifndef GRID_CUDA #ifndef GRID_CUDA
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteExt); return;} if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteExt); return;}
#endif #endif
@ -506,9 +508,10 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagInt); return;} if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagInt); return;}
#endif #endif
} else if( exterior ) { } else if( exterior ) {
// Dependent on result of merge
acceleratorFenceComputeStream(); acceleratorFenceComputeStream();
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteDagExt); return;} if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL_EXT(GenericDhopSiteDagExt); return;}
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDagExt); return;} if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL_EXT(HandDhopSiteDagExt); return;}
#ifndef GRID_CUDA #ifndef GRID_CUDA
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagExt); return;} if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagExt); return;}
#endif #endif

View File

@ -119,13 +119,19 @@ public:
// X^dag Der_oe MeeInv Meo Y // X^dag Der_oe MeeInv Meo Y
// Use Mooee as nontrivial but gauge field indept // Use Mooee as nontrivial but gauge field indept
this->_Mat.MeooeDag (V,tmp1); // odd->even -- implicit -0.5 factor to be applied this->_Mat.MeooeDag (V,tmp1); // odd->even -- implicit -0.5 factor to be applied
std::cout << " tmp 1" << norm2(tmp1)<<std::endl;
this->_Mat.MooeeInvDag(tmp1,tmp2); // even->even this->_Mat.MooeeInvDag(tmp1,tmp2); // even->even
std::cout << " tmp 1" << norm2(tmp2)<<std::endl;
this->_Mat.MoeDeriv(ForceO,U,tmp2,DaggerYes); this->_Mat.MoeDeriv(ForceO,U,tmp2,DaggerYes);
std::cout << " ForceO " << norm2(ForceO)<<std::endl;
// Accumulate X^dag M_oe MeeInv Der_eo Y // Accumulate X^dag M_oe MeeInv Der_eo Y
this->_Mat.Meooe (U,tmp1); // even->odd -- implicit -0.5 factor to be applied this->_Mat.Meooe (U,tmp1); // even->odd -- implicit -0.5 factor to be applied
std::cout << " tmp 1" << norm2(tmp1)<<std::endl;
this->_Mat.MooeeInv(tmp1,tmp2); // even->even this->_Mat.MooeeInv(tmp1,tmp2); // even->even
std::cout << " tmp 2" << norm2(tmp2)<<std::endl;
this->_Mat.MeoDeriv(ForceE,tmp2,V,DaggerYes); this->_Mat.MeoDeriv(ForceE,tmp2,V,DaggerYes);
std::cout << " ForceE " << norm2(ForceE)<<std::endl;
assert(ForceE.Checkerboard()==Even); assert(ForceE.Checkerboard()==Even);
assert(ForceO.Checkerboard()==Odd); assert(ForceO.Checkerboard()==Odd);

View File

@ -53,9 +53,10 @@ NAMESPACE_BEGIN(Grid);
Integer ReliableUpdateFreq; Integer ReliableUpdateFreq;
protected: protected:
//Action evaluation
//Allow derived classes to override the multishift CG //Allow derived classes to override the multishift CG
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, FermionFieldD &out){ virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, FermionFieldD &out){
#if 0 #if 1
SchurDifferentiableOperator<ImplD> schurOp(numerator ? NumOpD : DenOpD); SchurDifferentiableOperator<ImplD> schurOp(numerator ? NumOpD : DenOpD);
ConjugateGradientMultiShift<FermionFieldD> msCG(MaxIter, approx); ConjugateGradientMultiShift<FermionFieldD> msCG(MaxIter, approx);
msCG(schurOp,in, out); msCG(schurOp,in, out);
@ -70,9 +71,10 @@ NAMESPACE_BEGIN(Grid);
msCG(schurOpD, in, out); msCG(schurOpD, in, out);
#endif #endif
} }
//Force evaluation
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, std::vector<FermionFieldD> &out_elems, FermionFieldD &out){ virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, std::vector<FermionFieldD> &out_elems, FermionFieldD &out){
SchurDifferentiableOperator<ImplD> schurOpD(numerator ? NumOpD : DenOpD); SchurDifferentiableOperator<ImplD> schurOpD(numerator ? NumOpD : DenOpD);
SchurDifferentiableOperator<ImplF> schurOpF (numerator ? NumOpF : DenOpF); SchurDifferentiableOperator<ImplF> schurOpF(numerator ? NumOpF : DenOpF);
FermionFieldD inD(NumOpD.FermionRedBlackGrid()); FermionFieldD inD(NumOpD.FermionRedBlackGrid());
FermionFieldD outD(NumOpD.FermionRedBlackGrid()); FermionFieldD outD(NumOpD.FermionRedBlackGrid());
@ -84,20 +86,15 @@ NAMESPACE_BEGIN(Grid);
virtual void ImportGauge(const typename ImplD::GaugeField &Ud){ virtual void ImportGauge(const typename ImplD::GaugeField &Ud){
typename ImplF::GaugeField Uf(NumOpF.GaugeGrid()); typename ImplF::GaugeField Uf(NumOpF.GaugeGrid());
typename ImplD::GaugeField Ud2(NumOpD.GaugeGrid());
precisionChange(Uf, Ud); precisionChange(Uf, Ud);
precisionChange(Ud2, Ud);
std::cout << "Importing "<<norm2(Ud)<<" "<< norm2(Uf)<<" " << norm2(Ud2)<<std::endl; std::cout << "Importing "<<norm2(Ud)<<" "<< norm2(Uf)<<" " <<std::endl;
NumOpD.ImportGauge(Ud); NumOpD.ImportGauge(Ud);
DenOpD.ImportGauge(Ud); DenOpD.ImportGauge(Ud);
NumOpF.ImportGauge(Uf); NumOpF.ImportGauge(Uf);
DenOpF.ImportGauge(Uf); DenOpF.ImportGauge(Uf);
NumOpD.ImportGauge(Ud2);
DenOpD.ImportGauge(Ud2);
} }
public: public:

View File

@ -207,20 +207,27 @@ NAMESPACE_BEGIN(Grid);
//X = (Mdag M)^-1 V^dag phi //X = (Mdag M)^-1 V^dag phi
//Y = (Mdag)^-1 V^dag phi //Y = (Mdag)^-1 V^dag phi
Vpc.MpcDag(PhiOdd,Y); // Y= Vdag phi Vpc.MpcDag(PhiOdd,Y); // Y= Vdag phi
std::cout << GridLogMessage <<" Y "<<norm2(Y)<<std::endl;
X=Zero(); X=Zero();
DerivativeSolver(Mpc,Y,X); // X= (MdagM)^-1 Vdag phi DerivativeSolver(Mpc,Y,X); // X= (MdagM)^-1 Vdag phi
std::cout << GridLogMessage <<" X "<<norm2(X)<<std::endl;
Mpc.Mpc(X,Y); // Y= Mdag^-1 Vdag phi Mpc.Mpc(X,Y); // Y= Mdag^-1 Vdag phi
std::cout << GridLogMessage <<" Y "<<norm2(Y)<<std::endl;
// phi^dag V (Mdag M)^-1 dV^dag phi // phi^dag V (Mdag M)^-1 dV^dag phi
Vpc.MpcDagDeriv(force , X, PhiOdd ); dSdU = force; Vpc.MpcDagDeriv(force , X, PhiOdd ); dSdU = force;
std::cout << GridLogMessage <<" deriv "<<norm2(force)<<std::endl;
// phi^dag dV (Mdag M)^-1 V^dag phi // phi^dag dV (Mdag M)^-1 V^dag phi
Vpc.MpcDeriv(force , PhiOdd, X ); dSdU = dSdU+force; Vpc.MpcDeriv(force , PhiOdd, X ); dSdU = dSdU+force;
std::cout << GridLogMessage <<" deriv "<<norm2(force)<<std::endl;
// - phi^dag V (Mdag M)^-1 Mdag dM (Mdag M)^-1 V^dag phi // - phi^dag V (Mdag M)^-1 Mdag dM (Mdag M)^-1 V^dag phi
// - phi^dag V (Mdag M)^-1 dMdag M (Mdag M)^-1 V^dag phi // - phi^dag V (Mdag M)^-1 dMdag M (Mdag M)^-1 V^dag phi
Mpc.MpcDeriv(force,Y,X); dSdU = dSdU-force; Mpc.MpcDeriv(force,Y,X); dSdU = dSdU-force;
std::cout << GridLogMessage <<" deriv "<<norm2(force)<<std::endl;
Mpc.MpcDagDeriv(force,X,Y); dSdU = dSdU-force; Mpc.MpcDagDeriv(force,X,Y); dSdU = dSdU-force;
std::cout << GridLogMessage <<" deriv "<<norm2(force)<<std::endl;
// FIXME No force contribution from EvenEven assumed here // FIXME No force contribution from EvenEven assumed here
// Needs a fix for clover. // Needs a fix for clover.

View File

@ -339,8 +339,8 @@ public:
// Vectors that live on the symmetric heap in case of SHMEM // Vectors that live on the symmetric heap in case of SHMEM
// These are used; either SHM objects or refs to the above symmetric heap vectors // These are used; either SHM objects or refs to the above symmetric heap vectors
// depending on comms target // depending on comms target
Vector<cobj *> u_simd_send_buf; std::vector<cobj *> u_simd_send_buf;
Vector<cobj *> u_simd_recv_buf; std::vector<cobj *> u_simd_recv_buf;
int u_comm_offset; int u_comm_offset;
int _unified_buffer_size; int _unified_buffer_size;
@ -348,7 +348,7 @@ public:
//////////////////////////////////////// ////////////////////////////////////////
// Stencil query // Stencil query
//////////////////////////////////////// ////////////////////////////////////////
#ifdef SHM_FAST_PATH #if 1
inline int SameNode(int point) { inline int SameNode(int point) {
int dimension = this->_directions[point]; int dimension = this->_directions[point];
@ -434,7 +434,6 @@ public:
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs) void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
{ {
accelerator_barrier();
for(int i=0;i<Packets.size();i++){ for(int i=0;i<Packets.size();i++){
_grid->StencilSendToRecvFromBegin(MpiReqs, _grid->StencilSendToRecvFromBegin(MpiReqs,
Packets[i].send_buf, Packets[i].send_buf,
@ -666,11 +665,9 @@ public:
for(int i=0;i<mm.size();i++){ for(int i=0;i<mm.size();i++){
decompressor::MergeFace(decompress,mm[i]); decompressor::MergeFace(decompress,mm[i]);
} }
if ( mm.size() ) acceleratorFenceComputeStream();
for(int i=0;i<dd.size();i++){ for(int i=0;i<dd.size();i++){
decompressor::DecompressFace(decompress,dd[i]); decompressor::DecompressFace(decompress,dd[i]);
} }
if ( dd.size() ) acceleratorFenceComputeStream();
} }
//////////////////////////////////////// ////////////////////////////////////////
// Set up routines // Set up routines
@ -708,6 +705,7 @@ public:
} }
} }
} }
std::cout << "BuildSurfaceList size is "<<surface_list.size()<<std::endl;
} }
/// Introduce a block structure and switch off comms on boundaries /// Introduce a block structure and switch off comms on boundaries
void DirichletBlock(const Coordinate &dirichlet_block) void DirichletBlock(const Coordinate &dirichlet_block)
@ -1369,10 +1367,11 @@ public:
int recv_from_rank; int recv_from_rank;
int xmit_to_rank; int xmit_to_rank;
int shm_send=0; int shm_send=0;
int shm_recv=0;
_grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank); _grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
#ifdef SHM_FAST_PATH #ifdef SHM_FAST_PATH
#warning STENCIL SHM FAST PATH SELECTED #warning STENCIL SHM FAST PATH SELECTED
int shm_recv=0;
// shm == receive pointer if offnode // shm == receive pointer if offnode
// shm == Translate[send pointer] if on node -- my view of his send pointer // shm == Translate[send pointer] if on node -- my view of his send pointer
cobj *shm = (cobj *) _grid->ShmBufferTranslate(recv_from_rank,sp); cobj *shm = (cobj *) _grid->ShmBufferTranslate(recv_from_rank,sp);
@ -1405,7 +1404,6 @@ public:
acceleratorMemSet(rp,0,bytes); // Zero prefill comms buffer to zero acceleratorMemSet(rp,0,bytes); // Zero prefill comms buffer to zero
} }
int do_send = (comms_send|comms_partial_send) && (!shm_send ); int do_send = (comms_send|comms_partial_send) && (!shm_send );
int do_recv = (comms_send|comms_partial_send) && (!shm_recv );
AddPacket((void *)sp,(void *)rp, AddPacket((void *)sp,(void *)rp,
xmit_to_rank,do_send, xmit_to_rank,do_send,
recv_from_rank,do_send, recv_from_rank,do_send,

View File

@ -133,7 +133,6 @@ typename vobj::scalar_object extractLane(int lane, const vobj & __restrict__ vec
typedef scalar_type * pointer; typedef scalar_type * pointer;
constexpr int words=sizeof(vobj)/sizeof(vector_type); constexpr int words=sizeof(vobj)/sizeof(vector_type);
constexpr int Nsimd=vector_type::Nsimd();
scalar_object extracted; scalar_object extracted;
pointer __restrict__ sp = (pointer)&extracted; // Type pun pointer __restrict__ sp = (pointer)&extracted; // Type pun
@ -153,7 +152,6 @@ void insertLane(int lane, vobj & __restrict__ vec,const typename vobj::scalar_ob
typedef scalar_type * pointer; typedef scalar_type * pointer;
constexpr int words=sizeof(vobj)/sizeof(vector_type); constexpr int words=sizeof(vobj)/sizeof(vector_type);
constexpr int Nsimd=vector_type::Nsimd();
pointer __restrict__ sp = (pointer)&extracted; pointer __restrict__ sp = (pointer)&extracted;
vector_type *vp = (vector_type *)&vec; vector_type *vp = (vector_type *)&vec;
@ -178,8 +176,6 @@ void extract(const vobj &vec,const ExtractPointerArray<sobj> &extracted, int off
const int s = Nsimd/Nextr; const int s = Nsimd/Nextr;
vector_type * vp = (vector_type *)&vec; vector_type * vp = (vector_type *)&vec;
scalar_type vtmp;
sobj_scalar_type stmp;
for(int w=0;w<words;w++){ for(int w=0;w<words;w++){
for(int i=0;i<Nextr;i++){ for(int i=0;i<Nextr;i++){
sobj_scalar_type * pointer = (sobj_scalar_type *)& extracted[i][offset]; sobj_scalar_type * pointer = (sobj_scalar_type *)& extracted[i][offset];
@ -205,7 +201,6 @@ void merge(vobj &vec,const ExtractPointerArray<sobj> &extracted, int offset)
vector_type * vp = (vector_type *)&vec; vector_type * vp = (vector_type *)&vec;
scalar_type vtmp; scalar_type vtmp;
sobj_scalar_type stmp;
for(int w=0;w<words;w++){ for(int w=0;w<words;w++){
for(int i=0;i<Nextr;i++){ for(int i=0;i<Nextr;i++){
sobj_scalar_type * pointer = (sobj_scalar_type *)& extracted[i][offset]; sobj_scalar_type * pointer = (sobj_scalar_type *)& extracted[i][offset];
@ -242,9 +237,6 @@ void copyLane(vobjOut & __restrict__ vecOut, int lane_out, const vobjIn & __rest
typedef oextract_type * opointer; typedef oextract_type * opointer;
typedef iextract_type * ipointer; typedef iextract_type * ipointer;
constexpr int oNsimd=ovector_type::Nsimd();
constexpr int iNsimd=ivector_type::Nsimd();
iscalar_type itmp; iscalar_type itmp;
oscalar_type otmp; oscalar_type otmp;

View File

@ -526,7 +526,7 @@ inline void acceleratorFreeCpu (void *ptr){free(ptr);};
////////////////////////////////////////////// //////////////////////////////////////////////
#ifdef GRID_SYCL #ifdef GRID_SYCL
inline void acceleratorFenceComputeStream(void){ theGridAccelerator->submit_barrier();}; inline void acceleratorFenceComputeStream(void){ theGridAccelerator->ext_oneapi_submit_barrier(); };
#else #else
// Ordering within a stream guaranteed on Nvidia & AMD // Ordering within a stream guaranteed on Nvidia & AMD
inline void acceleratorFenceComputeStream(void){ }; inline void acceleratorFenceComputeStream(void){ };

View File

@ -227,7 +227,7 @@ int main(int argc, char **argv) {
// std::vector<Real> hasenbusch({ light_mass, 0.005, 0.0145, 0.045, 0.108, 0.25, 0.51 , pv_mass }); // Updated // std::vector<Real> hasenbusch({ light_mass, 0.005, 0.0145, 0.045, 0.108, 0.25, 0.51 , pv_mass }); // Updated
// std::vector<Real> hasenbusch({ light_mass, 0.0145, 0.045, 0.108, 0.25, 0.51 , 0.75 , pv_mass }); // std::vector<Real> hasenbusch({ light_mass, 0.0145, 0.045, 0.108, 0.25, 0.51 , 0.75 , pv_mass });
int SP_iters=10000; int SP_iters=9000;
RationalActionParams OFRp; // Up/down RationalActionParams OFRp; // Up/down
OFRp.lo = 6.0e-5; OFRp.lo = 6.0e-5;
@ -362,12 +362,12 @@ int main(int argc, char **argv) {
// Probably dominates the force - back to EOFA. // Probably dominates the force - back to EOFA.
OneFlavourRationalParams SFRp; OneFlavourRationalParams SFRp;
SFRp.lo = 0.25; SFRp.lo = 0.1;
SFRp.hi = 25.0; SFRp.hi = 25.0;
SFRp.MaxIter = 10000; SFRp.MaxIter = 10000;
SFRp.tolerance= 1.0e-5; SFRp.tolerance= 1.0e-8;
SFRp.mdtolerance= 2.0e-4; SFRp.mdtolerance= 2.0e-4;
SFRp.degree = 8; SFRp.degree = 12;
SFRp.precision= 50; SFRp.precision= 50;
MobiusEOFAFermionD Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c); MobiusEOFAFermionD Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);

View File

@ -329,7 +329,6 @@ int main(int argc, char **argv) {
auto grid4= GridPtr; auto grid4= GridPtr;
auto rbgrid4= GridRBPtr;
auto rbgrid = StrangeOp.FermionRedBlackGrid(); auto rbgrid = StrangeOp.FermionRedBlackGrid();
auto grid = StrangeOp.FermionGrid(); auto grid = StrangeOp.FermionGrid();
if(1){ if(1){

View File

@ -244,11 +244,6 @@ int main(int argc, char **argv) {
Coordinate shm; Coordinate shm;
GlobalSharedMemory::GetShmDims(mpi,shm); GlobalSharedMemory::GetShmDims(mpi,shm);
Coordinate CommDim(Nd);
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
Coordinate NonDirichlet(Nd+1,0);
////////////////////////// //////////////////////////
// Fermion Grids // Fermion Grids
@ -277,8 +272,6 @@ int main(int argc, char **argv) {
std::vector<Complex> boundary = {1,1,1,-1}; std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary); FermionAction::ImplParams Params(boundary);
FermionActionF::ImplParams ParamsF(boundary); FermionActionF::ImplParams ParamsF(boundary);
Params.dirichlet=NonDirichlet;
ParamsF.dirichlet=NonDirichlet;
// double StoppingCondition = 1e-14; // double StoppingCondition = 1e-14;
// double MDStoppingCondition = 1e-9; // double MDStoppingCondition = 1e-9;
@ -305,12 +298,12 @@ int main(int argc, char **argv) {
// Probably dominates the force - back to EOFA. // Probably dominates the force - back to EOFA.
OneFlavourRationalParams SFRp; OneFlavourRationalParams SFRp;
SFRp.lo = 0.25; SFRp.lo = 0.1;
SFRp.hi = 25.0; SFRp.hi = 30.0;
SFRp.MaxIter = 10000; SFRp.MaxIter = 10000;
SFRp.tolerance= 1.0e-5; SFRp.tolerance= 1.0e-8;
SFRp.mdtolerance= 2.0e-4; SFRp.mdtolerance= 2.0e-6;
SFRp.degree = 8; SFRp.degree = 10;
SFRp.precision= 50; SFRp.precision= 50;
MobiusEOFAFermionD Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c); MobiusEOFAFermionD Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
@ -370,19 +363,17 @@ int main(int argc, char **argv) {
//////////////////////////////////// ////////////////////////////////////
std::vector<Real> light_den; std::vector<Real> light_den;
std::vector<Real> light_num; std::vector<Real> light_num;
std::vector<int> dirichlet_den;
std::vector<int> dirichlet_num;
int n_hasenbusch = hasenbusch.size(); int n_hasenbusch = hasenbusch.size();
light_den.push_back(light_mass); dirichlet_den.push_back(0); light_den.push_back(light_mass);
for(int h=0;h<n_hasenbusch;h++){ for(int h=0;h<n_hasenbusch;h++){
light_den.push_back(hasenbusch[h]); dirichlet_den.push_back(0); light_den.push_back(hasenbusch[h]);
} }
for(int h=0;h<n_hasenbusch;h++){ for(int h=0;h<n_hasenbusch;h++){
light_num.push_back(hasenbusch[h]); dirichlet_num.push_back(0); light_num.push_back(hasenbusch[h]);
} }
light_num.push_back(pv_mass); dirichlet_num.push_back(0); light_num.push_back(pv_mass);
std::vector<FermionAction *> Numerators; std::vector<FermionAction *> Numerators;
std::vector<FermionAction *> Denominators; std::vector<FermionAction *> Denominators;
@ -408,9 +399,7 @@ int main(int argc, char **argv) {
std::cout << GridLogMessage std::cout << GridLogMessage
<< " 2f quotient Action "; << " 2f quotient Action ";
std::cout << "det D("<<light_den[h]<<")"; std::cout << "det D("<<light_den[h]<<")";
if ( dirichlet_den[h] ) std::cout << "^dirichlet ";
std::cout << "/ det D("<<light_num[h]<<")"; std::cout << "/ det D("<<light_num[h]<<")";
if ( dirichlet_num[h] ) std::cout << "^dirichlet ";
std::cout << std::endl; std::cout << std::endl;
FermionAction::ImplParams ParamsNum(boundary); FermionAction::ImplParams ParamsNum(boundary);
@ -418,21 +407,11 @@ int main(int argc, char **argv) {
FermionActionF::ImplParams ParamsDenF(boundary); FermionActionF::ImplParams ParamsDenF(boundary);
FermionActionF::ImplParams ParamsNumF(boundary); FermionActionF::ImplParams ParamsNumF(boundary);
ParamsNum.dirichlet = NonDirichlet;
ParamsDen.dirichlet = NonDirichlet;
ParamsNum.partialDirichlet = 0;
ParamsDen.partialDirichlet = 0;
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, ParamsNum)); Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, ParamsNum));
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, ParamsDen)); Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, ParamsDen));
ParamsDenF.dirichlet = ParamsDen.dirichlet;
ParamsDenF.partialDirichlet = ParamsDen.partialDirichlet;
DenominatorsF.push_back(new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[h],M5,b,c, ParamsDenF)); DenominatorsF.push_back(new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[h],M5,b,c, ParamsDenF));
ParamsNumF.dirichlet = ParamsNum.dirichlet;
ParamsNumF.partialDirichlet = ParamsNum.partialDirichlet;
NumeratorsF.push_back (new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_num[h],M5,b,c, ParamsNumF)); NumeratorsF.push_back (new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_num[h],M5,b,c, ParamsNumF));
LinOpD.push_back(new LinearOperatorD(*Denominators[h])); LinOpD.push_back(new LinearOperatorD(*Denominators[h]));
@ -469,7 +448,6 @@ int main(int argc, char **argv) {
// Gauge action // Gauge action
///////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////
Level3.push_back(&GaugeAction); Level3.push_back(&GaugeAction);
// TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2); TheHMC.TheAction.push_back(Level2);
TheHMC.TheAction.push_back(Level3); TheHMC.TheAction.push_back(Level3);
std::cout << GridLogMessage << " Action complete "<< std::endl; std::cout << GridLogMessage << " Action complete "<< std::endl;

View File

@ -425,7 +425,7 @@ void Benchmark(int Ls, Coordinate Dirichlet)
err = r_eo-result; err = r_eo-result;
n2e= norm2(err); n2e= norm2(err);
std::cout<<GridLogMessage << "norm diff "<< n2e<< " Line "<<__LINE__ <<std::endl; std::cout<<GridLogMessage << "norm diff "<< n2e<<std::endl;
assert(n2e<1.0e-4); assert(n2e<1.0e-4);
pickCheckerboard(Even,src_e,err); pickCheckerboard(Even,src_e,err);

133
examples/socket_grid.cc Normal file
View File

@ -0,0 +1,133 @@
#include <sys/socket.h>
#include <sys/un.h>
#include <unistd.h>
#include <stdio.h>
#include <err.h>
#include <fcntl.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
static int sock;
static const char *sock_path_fmt = "/tmp/GridUnixSocket.%d";
static char sock_path[256];
class UnixSockets {
public:
static void Open(int rank)
{
int errnum;
sock = socket(AF_UNIX, SOCK_DGRAM, 0); assert(sock>0);
printf("allocated socket %d\n",sock);
struct sockaddr_un sa_un = { 0 };
sa_un.sun_family = AF_UNIX;
snprintf(sa_un.sun_path, sizeof(sa_un.sun_path),sock_path_fmt,rank);
unlink(sa_un.sun_path);
if (bind(sock, (struct sockaddr *)&sa_un, sizeof(sa_un))) {
perror("bind failure");
exit(EXIT_FAILURE);
}
printf("bound socket %d to %s\n",sock,sa_un.sun_path);
}
static int RecvFileDescriptor(void)
{
int n;
int fd;
char buf[1];
struct iovec iov;
struct msghdr msg;
struct cmsghdr *cmsg;
char cms[CMSG_SPACE(sizeof(int))];
iov.iov_base = buf;
iov.iov_len = 1;
memset(&msg, 0, sizeof msg);
msg.msg_name = 0;
msg.msg_namelen = 0;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = (caddr_t)cms;
msg.msg_controllen = sizeof cms;
if((n=recvmsg(sock, &msg, 0)) < 0) {
perror("recvmsg failed");
return -1;
}
if(n == 0){
perror("recvmsg returned 0");
return -1;
}
cmsg = CMSG_FIRSTHDR(&msg);
memmove(&fd, CMSG_DATA(cmsg), sizeof(int));
printf("received fd %d from socket %d\n",fd,sock);
return fd;
}
static void SendFileDescriptor(int fildes,int xmit_to_rank)
{
struct msghdr msg;
struct iovec iov;
struct cmsghdr *cmsg = NULL;
char ctrl[CMSG_SPACE(sizeof(int))];
char data = ' ';
memset(&msg, 0, sizeof(struct msghdr));
memset(ctrl, 0, CMSG_SPACE(sizeof(int)));
iov.iov_base = &data;
iov.iov_len = sizeof(data);
sprintf(sock_path,sock_path_fmt,xmit_to_rank);
printf("sending FD %d over socket %d to rank %d AF_UNIX path %s\n",fildes,sock,xmit_to_rank,sock_path);fflush(stdout);
struct sockaddr_un sa_un = { 0 };
sa_un.sun_family = AF_UNIX;
snprintf(sa_un.sun_path, sizeof(sa_un.sun_path),sock_path_fmt,xmit_to_rank);
msg.msg_name = (void *)&sa_un;
msg.msg_namelen = sizeof(sa_un);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_controllen = CMSG_SPACE(sizeof(int));
msg.msg_control = ctrl;
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
*((int *) CMSG_DATA(cmsg)) = fildes;
if ( sendmsg(sock, &msg, 0) == -1 ) perror("sendmsg failed");
};
};
int main(int argc, char **argv)
{
int me = fork()?0:1;
UnixSockets::Open(me);
// need MPI barrier
sleep(10);
const char * message = "Hello, World\n";
if( me ) {
int fd = open("foo",O_RDWR|O_CREAT,0666);
if ( fd < 0 ) {
perror("failed to open file");
exit(EXIT_FAILURE);
}
// rank 1 sends ot rank 0
UnixSockets::SendFileDescriptor(fd,0);
close(fd);
} else {
// rank 0 sends receives frmo rank 1
int fd = UnixSockets::RecvFileDescriptor();
write(fd,(const void *)message,strlen(message));
close(fd);
}
}

View File

@ -3,8 +3,14 @@ export https_proxy=http://proxy-chain.intel.com:911
export LD_LIBRARY_PATH=$HOME/prereqs/lib/:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$HOME/prereqs/lib/:$LD_LIBRARY_PATH
module load intel-release module load intel-release
source /opt/intel/oneapi/PVC_setup.sh module load intel-comp-rt/embargo-ci-neo
#source /opt/intel/oneapi/PVC_setup.sh
#source /opt/intel/oneapi/ATS_setup.sh #source /opt/intel/oneapi/ATS_setup.sh
#module load intel-nightly/20230331
#module load intel-comp-rt/ci-neo-master/026093
#module load intel/mpich
module load intel/mpich/pvc45.3 module load intel/mpich/pvc45.3
export PATH=~/ATS/pti-gpu/tools/onetrace/:$PATH export PATH=~/ATS/pti-gpu/tools/onetrace/:$PATH

View File

@ -476,7 +476,9 @@ int main (int argc, char ** argv)
// ForceTest<GimplTypesR>(BdyNf2eo,U,DDHMCFilter); // ForceTest<GimplTypesR>(BdyNf2eo,U,DDHMCFilter);
//////////////////// One flavour boundary det //////////////////// //////////////////// One flavour boundary det ////////////////////
/*
RationalActionParams OFRp; // Up/down RationalActionParams OFRp; // Up/down
int SP_iters = 3000;
OFRp.lo = 6.0e-5; OFRp.lo = 6.0e-5;
OFRp.hi = 90.0; OFRp.hi = 90.0;
OFRp.inv_pow = 2; OFRp.inv_pow = 2;
@ -489,7 +491,7 @@ int main (int argc, char ** argv)
// OFRp.degree = 16; // OFRp.degree = 16;
OFRp.precision= 80; OFRp.precision= 80;
OFRp.BoundsCheckFreq=0; OFRp.BoundsCheckFreq=0;
/* */
OneFlavourRationalParams OFRp; // Up/down OneFlavourRationalParams OFRp; // Up/down
OFRp.lo = 4.0e-5; OFRp.lo = 4.0e-5;
OFRp.hi = 90.0; OFRp.hi = 90.0;
@ -499,7 +501,6 @@ int main (int argc, char ** argv)
OFRp.degree = 18; OFRp.degree = 18;
OFRp.precision= 80; OFRp.precision= 80;
OFRp.BoundsCheckFreq=0; OFRp.BoundsCheckFreq=0;
*/
std::vector<RealD> ActionTolByPole({ std::vector<RealD> ActionTolByPole({
1.0e-7,1.0e-8,1.0e-8,1.0e-8, 1.0e-7,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8, 1.0e-8,1.0e-8,1.0e-8,1.0e-8,

View File

@ -85,7 +85,7 @@ int main(int argc, char **argv) {
TheHMC.Resources.AddObservable<PlaqObs>(); TheHMC.Resources.AddObservable<PlaqObs>();
////////////////////////////////////////////// //////////////////////////////////////////////
const int Ls = 4; const int Ls = 8;
Real beta = 2.13; Real beta = 2.13;
Real light_mass = 0.01; Real light_mass = 0.01;
Real strange_mass = 0.04; Real strange_mass = 0.04;