mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-13 04:37:05 +01:00
Compare commits
10 Commits
eafc150034
...
feature/de
Author | SHA1 | Date | |
---|---|---|---|
8cf809e231 | |||
94019a922e | |||
d6b2727f86 | |||
74a4f43946 | |||
1caf8b0f86 | |||
8fe429346f | |||
b91fc1b6b4 | |||
6815e138b4 | |||
e29b97b3ea | |||
ad2b699d2b |
@ -136,7 +136,7 @@ public:
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
column.resize(_processors[d]);
|
||||
column[0] = accum;
|
||||
std::vector<CommsRequest_t> list;
|
||||
std::vector<MpiCommsRequest_t> list;
|
||||
for(int p=1;p<_processors[d];p++){
|
||||
ShiftedRanks(d,p,source,dest);
|
||||
SendToRecvFromBegin(list,
|
||||
@ -166,8 +166,8 @@ public:
|
||||
////////////////////////////////////////////////////////////
|
||||
// Face exchange, buffer swap in translational invariant way
|
||||
////////////////////////////////////////////////////////////
|
||||
void CommsComplete(std::vector<CommsRequest_t> &list);
|
||||
void SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void CommsComplete(std::vector<MpiCommsRequest_t> &list);
|
||||
void SendToRecvFromBegin(std::vector<MpiCommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
@ -186,6 +186,12 @@ public:
|
||||
int recv_from_rank,int do_recv,
|
||||
int bytes,int dir);
|
||||
|
||||
double StencilSendToRecvFromPrepare(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,int do_xmit,
|
||||
void *recv,
|
||||
int recv_from_rank,int do_recv,
|
||||
int xbytes,int rbytes,int dir);
|
||||
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,int do_xmit,
|
||||
|
@ -317,7 +317,7 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<MpiCommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
@ -342,7 +342,7 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
}
|
||||
void CartesianCommunicator::CommsComplete(std::vector<CommsRequest_t> &list)
|
||||
void CartesianCommunicator::CommsComplete(std::vector<MpiCommsRequest_t> &list)
|
||||
{
|
||||
int nreq=list.size();
|
||||
|
||||
@ -361,7 +361,7 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||
int from,
|
||||
int bytes)
|
||||
{
|
||||
std::vector<CommsRequest_t> reqs(0);
|
||||
std::vector<MpiCommsRequest_t> reqs(0);
|
||||
unsigned long xcrc = crc32(0L, Z_NULL, 0);
|
||||
unsigned long rcrc = crc32(0L, Z_NULL, 0);
|
||||
|
||||
@ -391,12 +391,224 @@ double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
int bytes,int dir)
|
||||
{
|
||||
std::vector<CommsRequest_t> list;
|
||||
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,dox,recv,from,dor,bytes,bytes,dir);
|
||||
double offbytes = StencilSendToRecvFromPrepare(list,xmit,dest,dox,recv,from,dor,bytes,bytes,dir);
|
||||
offbytes += StencilSendToRecvFromBegin(list,xmit,dest,dox,recv,from,dor,bytes,bytes,dir);
|
||||
StencilSendToRecvFromComplete(list,dir);
|
||||
return offbytes;
|
||||
}
|
||||
|
||||
#undef NVLINK_GET // Define to use get instead of put DMA
|
||||
|
||||
#ifdef ACCELERATOR_AWARE_MPI
|
||||
double CartesianCommunicator::StencilSendToRecvFromPrepare(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,int dox,
|
||||
void *recv,
|
||||
int from,int dor,
|
||||
int xbytes,int rbytes,int dir)
|
||||
{
|
||||
return 0.0; // Do nothing -- no preparation required
|
||||
}
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,int dox,
|
||||
void *recv,
|
||||
int from,int dor,
|
||||
int xbytes,int rbytes,int dir)
|
||||
{
|
||||
int ncomm =communicator_halo.size();
|
||||
int commdir=dir%ncomm;
|
||||
|
||||
MPI_Request xrq;
|
||||
MPI_Request rrq;
|
||||
|
||||
int ierr;
|
||||
int gdest = ShmRanks[dest];
|
||||
int gfrom = ShmRanks[from];
|
||||
int gme = ShmRanks[_processor];
|
||||
|
||||
assert(dest != _processor);
|
||||
assert(from != _processor);
|
||||
assert(gme == ShmRank);
|
||||
double off_node_bytes=0.0;
|
||||
int tag;
|
||||
|
||||
if ( dor ) {
|
||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+from*32;
|
||||
ierr=MPI_Irecv(recv, rbytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
off_node_bytes+=rbytes;
|
||||
}
|
||||
}
|
||||
|
||||
if (dox) {
|
||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+_processor*32;
|
||||
ierr =MPI_Isend(xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
off_node_bytes+=xbytes;
|
||||
} else {
|
||||
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
||||
assert(shm!=NULL);
|
||||
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,xbytes);
|
||||
}
|
||||
}
|
||||
return off_node_bytes;
|
||||
}
|
||||
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list,int dir)
|
||||
{
|
||||
int nreq=list.size();
|
||||
|
||||
acceleratorCopySynchronise();
|
||||
|
||||
if (nreq==0) return;
|
||||
std::vector<MPI_Status> status(nreq);
|
||||
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
||||
assert(ierr==0);
|
||||
list.resize(0);
|
||||
this->StencilBarrier();
|
||||
}
|
||||
|
||||
#else /* NOT ... ACCELERATOR_AWARE_MPI */
|
||||
///////////////////////////////////////////
|
||||
// Pipeline mode through host memory
|
||||
///////////////////////////////////////////
|
||||
/*
|
||||
* In prepare (phase 1):
|
||||
* PHASE 1: (prepare)
|
||||
* - post MPI receive buffers asynch
|
||||
* - post device - host send buffer transfer asynch
|
||||
* PHASE 2: (Begin)
|
||||
* - complete all copies
|
||||
* - post MPI send asynch
|
||||
* - post device - device transfers
|
||||
* PHASE 3: (Complete)
|
||||
* - MPI_waitall
|
||||
* - host-device transfers
|
||||
*
|
||||
*********************************
|
||||
* NB could split this further:
|
||||
*--------------------------------
|
||||
* PHASE 1: (Prepare)
|
||||
* - post MPI receive buffers asynch
|
||||
* - post device - host send buffer transfer asynch
|
||||
* PHASE 2: (BeginInterNode)
|
||||
* - complete all copies
|
||||
* - post MPI send asynch
|
||||
* PHASE 3: (BeginIntraNode)
|
||||
* - post device - device transfers
|
||||
* PHASE 4: (Complete)
|
||||
* - MPI_waitall
|
||||
* - host-device transfers asynch
|
||||
* - (complete all copies)
|
||||
*/
|
||||
double CartesianCommunicator::StencilSendToRecvFromPrepare(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,int dox,
|
||||
void *recv,
|
||||
int from,int dor,
|
||||
int xbytes,int rbytes,int dir)
|
||||
{
|
||||
/*
|
||||
* Bring sequence from Stencil.h down to lower level.
|
||||
* Assume using XeLink is ok
|
||||
*/
|
||||
int ncomm =communicator_halo.size();
|
||||
int commdir=dir%ncomm;
|
||||
|
||||
MPI_Request xrq;
|
||||
MPI_Request rrq;
|
||||
|
||||
int ierr;
|
||||
int gdest = ShmRanks[dest];
|
||||
int gfrom = ShmRanks[from];
|
||||
int gme = ShmRanks[_processor];
|
||||
|
||||
assert(dest != _processor);
|
||||
assert(from != _processor);
|
||||
assert(gme == ShmRank);
|
||||
double off_node_bytes=0.0;
|
||||
int tag;
|
||||
|
||||
void * host_recv = NULL;
|
||||
void * host_xmit = NULL;
|
||||
|
||||
/*
|
||||
* PHASE 1: (Prepare)
|
||||
* - post MPI receive buffers asynch
|
||||
* - post device - host send buffer transfer asynch
|
||||
*/
|
||||
|
||||
if ( dor ) {
|
||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+from*32;
|
||||
host_recv = this->HostBufferMalloc(rbytes);
|
||||
ierr=MPI_Irecv(host_recv, rbytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||
assert(ierr==0);
|
||||
CommsRequest_t srq;
|
||||
srq.PacketType = InterNodeRecv;
|
||||
srq.bytes = rbytes;
|
||||
srq.req = rrq;
|
||||
srq.host_buf = host_recv;
|
||||
srq.device_buf = recv;
|
||||
list.push_back(srq);
|
||||
off_node_bytes+=rbytes;
|
||||
}
|
||||
}
|
||||
|
||||
if (dox) {
|
||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
#undef DEVICE_TO_HOST_CONCURRENT // pipeline
|
||||
#ifdef DEVICE_TO_HOST_CONCURRENT
|
||||
tag= dir+_processor*32;
|
||||
|
||||
host_xmit = this->HostBufferMalloc(xbytes);
|
||||
acceleratorCopyFromDeviceAsynch(xmit, host_xmit,xbytes); // Make this Asynch
|
||||
|
||||
// ierr =MPI_Isend(host_xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
// assert(ierr==0);
|
||||
// off_node_bytes+=xbytes;
|
||||
|
||||
CommsRequest_t srq;
|
||||
srq.PacketType = InterNodeXmit;
|
||||
srq.bytes = xbytes;
|
||||
// srq.req = xrq;
|
||||
srq.host_buf = host_xmit;
|
||||
srq.device_buf = xmit;
|
||||
list.push_back(srq);
|
||||
#else
|
||||
tag= dir+_processor*32;
|
||||
|
||||
host_xmit = this->HostBufferMalloc(xbytes);
|
||||
const int chunks=1;
|
||||
for(int n=0;n<chunks;n++){
|
||||
void * host_xmitc = (void *)( (uint64_t) host_xmit + n*xbytes/chunks);
|
||||
void * xmitc = (void *)( (uint64_t) xmit + n*xbytes/chunks);
|
||||
acceleratorCopyFromDeviceAsynch(xmitc, host_xmitc,xbytes/chunks); // Make this Asynch
|
||||
}
|
||||
acceleratorCopySynchronise(); // Complete all pending copy transfers
|
||||
|
||||
ierr =MPI_Isend(host_xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
assert(ierr==0);
|
||||
off_node_bytes+=xbytes;
|
||||
|
||||
CommsRequest_t srq;
|
||||
srq.PacketType = InterNodeXmit;
|
||||
srq.bytes = xbytes;
|
||||
srq.req = xrq;
|
||||
srq.host_buf = host_xmit;
|
||||
srq.device_buf = xmit;
|
||||
list.push_back(srq);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
return off_node_bytes;
|
||||
}
|
||||
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,int dox,
|
||||
@ -421,54 +633,86 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
||||
double off_node_bytes=0.0;
|
||||
int tag;
|
||||
|
||||
if ( dor ) {
|
||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+from*32;
|
||||
ierr=MPI_Irecv(recv, rbytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
off_node_bytes+=rbytes;
|
||||
}
|
||||
#ifdef NVLINK_GET
|
||||
void *shm = (void *) this->ShmBufferTranslate(from,xmit);
|
||||
assert(shm!=NULL);
|
||||
acceleratorCopyDeviceToDeviceAsynch(shm,recv,rbytes);
|
||||
#endif
|
||||
}
|
||||
void * host_xmit = NULL;
|
||||
|
||||
////////////////////////////////
|
||||
// Receives already posted
|
||||
// Copies already started
|
||||
////////////////////////////////
|
||||
/*
|
||||
* PHASE 2: (Begin)
|
||||
* - complete all copies
|
||||
* - post MPI send asynch
|
||||
*/
|
||||
|
||||
// static int printed;
|
||||
// if((printed<8) && this->IsBoss() ) {
|
||||
// printf("dir %d doX %d doR %d Face size %ld %ld\n",dir,dox,dor,xbytes,rbytes);
|
||||
// printed++;
|
||||
// }
|
||||
|
||||
if (dox) {
|
||||
// rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
|
||||
|
||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
#ifdef DEVICE_TO_HOST_CONCURRENT
|
||||
tag= dir+_processor*32;
|
||||
ierr =MPI_Isend(xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
// Find the send in the prepared list
|
||||
int list_idx=-1;
|
||||
for(int idx = 0; idx<list.size();idx++){
|
||||
|
||||
if ( (list[idx].device_buf==xmit)
|
||||
&&(list[idx].PacketType==InterNodeXmit)
|
||||
&&(list[idx].bytes==xbytes) ) {
|
||||
|
||||
list_idx = idx;
|
||||
host_xmit = list[idx].host_buf;
|
||||
}
|
||||
}
|
||||
assert(list_idx != -1); // found it
|
||||
ierr =MPI_Isend(host_xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
list[list_idx].req = xrq; // Update the MPI request in the list
|
||||
off_node_bytes+=xbytes;
|
||||
#endif
|
||||
} else {
|
||||
#ifndef NVLINK_GET
|
||||
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
||||
assert(shm!=NULL);
|
||||
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,xbytes);
|
||||
#endif
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return off_node_bytes;
|
||||
}
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list,int dir)
|
||||
{
|
||||
int nreq=list.size();
|
||||
|
||||
acceleratorCopySynchronise();
|
||||
|
||||
if (nreq==0) return;
|
||||
|
||||
std::vector<MPI_Status> status(nreq);
|
||||
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
||||
std::vector<MPI_Request> MpiRequests(nreq);
|
||||
|
||||
for(int r=0;r<nreq;r++){
|
||||
MpiRequests[r] = list[r].req;
|
||||
}
|
||||
|
||||
int ierr = MPI_Waitall(nreq,&MpiRequests[0],&status[0]);
|
||||
assert(ierr==0);
|
||||
list.resize(0);
|
||||
|
||||
for(int r=0;r<nreq;r++){
|
||||
if ( list[r].PacketType==InterNodeRecv ) {
|
||||
acceleratorCopyToDeviceAsynch(list[r].host_buf,list[r].device_buf,list[r].bytes);
|
||||
}
|
||||
}
|
||||
|
||||
acceleratorCopySynchronise(); // Complete all pending copy transfers
|
||||
list.resize(0); // Delete the list
|
||||
this->HostBufferFreeAll(); // Clean up the buffer allocs
|
||||
this->StencilBarrier();
|
||||
}
|
||||
#endif
|
||||
////////////////////////////////////////////
|
||||
// END PIPELINE MODE / NO CUDA AWARE MPI
|
||||
////////////////////////////////////////////
|
||||
|
||||
void CartesianCommunicator::StencilBarrier(void)
|
||||
{
|
||||
MPI_Barrier (ShmComm);
|
||||
|
@ -132,6 +132,15 @@ double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
{
|
||||
return 2.0*bytes;
|
||||
}
|
||||
double CartesianCommunicator::StencilSendToRecvFromPrepare(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,int dox,
|
||||
void *recv,
|
||||
int recv_from_rank,int dor,
|
||||
int xbytes,int rbytes, int dir)
|
||||
{
|
||||
return xbytes+rbytes;
|
||||
}
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,int dox,
|
||||
|
@ -46,8 +46,22 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
#if defined (GRID_COMMS_MPI3)
|
||||
typedef MPI_Comm Grid_MPI_Comm;
|
||||
typedef MPI_Request MpiCommsRequest_t;
|
||||
#ifdef ACCELERATOR_AWARE_MPI
|
||||
typedef MPI_Request CommsRequest_t;
|
||||
#else
|
||||
enum PacketType_t { InterNodeXmit, InterNodeRecv, IntraNodeXmit, IntraNodeRecv };
|
||||
typedef struct {
|
||||
PacketType_t PacketType;
|
||||
void *host_buf;
|
||||
void *device_buf;
|
||||
unsigned long bytes;
|
||||
MpiCommsRequest_t req;
|
||||
} CommsRequest_t;
|
||||
#endif
|
||||
|
||||
#else
|
||||
typedef int MpiCommsRequest_t;
|
||||
typedef int CommsRequest_t;
|
||||
typedef int Grid_MPI_Comm;
|
||||
#endif
|
||||
|
@ -42,6 +42,11 @@ Author: Christoph Lehner <christoph@lhnr.de>
|
||||
#ifdef ACCELERATOR_AWARE_MPI
|
||||
#define GRID_SYCL_LEVEL_ZERO_IPC
|
||||
#define SHM_SOCKETS
|
||||
#else
|
||||
#ifdef HAVE_NUMAIF_H
|
||||
#warning " Using NUMAIF "
|
||||
#include <numaif.h>
|
||||
#endif
|
||||
#endif
|
||||
#include <syscall.h>
|
||||
#endif
|
||||
@ -537,7 +542,38 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
// Each MPI rank should allocate our own buffer
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#ifndef ACCELERATOR_AWARE_MPI
|
||||
HostCommBuf= malloc(bytes);
|
||||
printf("Host buffer allocate for GPU non-aware MPI\n");
|
||||
#if 0
|
||||
HostCommBuf= acceleratorAllocHost(bytes);
|
||||
#else
|
||||
HostCommBuf= malloc(bytes); /// CHANGE THIS TO malloc_host
|
||||
#ifdef HAVE_NUMAIF_H
|
||||
#warning "Moving host buffers to specific NUMA domain"
|
||||
int numa;
|
||||
char *numa_name=(char *)getenv("MPI_BUF_NUMA");
|
||||
if(numa_name) {
|
||||
unsigned long page_size = sysconf(_SC_PAGESIZE);
|
||||
numa = atoi(numa_name);
|
||||
unsigned long page_count = bytes/page_size;
|
||||
std::vector<void *> pages(page_count);
|
||||
std::vector<int> nodes(page_count,numa);
|
||||
std::vector<int> status(page_count,-1);
|
||||
for(unsigned long p=0;p<page_count;p++){
|
||||
pages[p] =(void *) ((uint64_t) HostCommBuf + p*page_size);
|
||||
}
|
||||
int ret = move_pages(0,
|
||||
page_count,
|
||||
&pages[0],
|
||||
&nodes[0],
|
||||
&status[0],
|
||||
MPOL_MF_MOVE);
|
||||
printf("Host buffer move to numa domain %d : move_pages returned %d\n",numa,ret);
|
||||
if (ret) perror(" move_pages failed for reason:");
|
||||
}
|
||||
#endif
|
||||
acceleratorPin(HostCommBuf,bytes);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
ShmCommBuf = acceleratorAllocDevice(bytes);
|
||||
if (ShmCommBuf == (void *)NULL ) {
|
||||
|
@ -467,8 +467,8 @@ public:
|
||||
send_buf.resize(buffer_size*2*depth);
|
||||
recv_buf.resize(buffer_size*2*depth);
|
||||
|
||||
std::vector<CommsRequest_t> fwd_req;
|
||||
std::vector<CommsRequest_t> bwd_req;
|
||||
std::vector<MpiCommsRequest_t> fwd_req;
|
||||
std::vector<MpiCommsRequest_t> bwd_req;
|
||||
|
||||
int words = buffer_size;
|
||||
int bytes = words * sizeof(vobj);
|
||||
|
@ -124,6 +124,11 @@ public:
|
||||
RealD _b;
|
||||
RealD _c;
|
||||
|
||||
// possible boost
|
||||
std::vector<ComplexD> qmu;
|
||||
void set_qmu(std::vector<ComplexD> _qmu) { qmu=_qmu; assert(qmu.size()==Nd);};
|
||||
void addQmu(const FermionField &in, FermionField &out, int dag);
|
||||
|
||||
// Cayley form Moebius (tanh and zolotarev)
|
||||
std::vector<Coeff_t> omega;
|
||||
std::vector<Coeff_t> bs; // S dependent coeffs
|
||||
|
@ -60,6 +60,50 @@ public:
|
||||
// virtual void Instantiatable(void)=0;
|
||||
virtual void Instantiatable(void) =0;
|
||||
|
||||
void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<Complex> boundary, std::vector<double> twist)
|
||||
{
|
||||
std::cout << "Free Propagator for PartialFraction"<<std::endl;
|
||||
FermionField in_k(in.Grid());
|
||||
FermionField prop_k(in.Grid());
|
||||
|
||||
FFT theFFT((GridCartesian *) in.Grid());
|
||||
|
||||
//phase for boundary condition
|
||||
ComplexField coor(in.Grid());
|
||||
ComplexField ph(in.Grid()); ph = Zero();
|
||||
FermionField in_buf(in.Grid()); in_buf = Zero();
|
||||
typedef typename Simd::scalar_type Scalar;
|
||||
Scalar ci(0.0,1.0);
|
||||
assert(twist.size() == Nd);//check that twist is Nd
|
||||
assert(boundary.size() == Nd);//check that boundary conditions is Nd
|
||||
int shift = 0;
|
||||
for(unsigned int nu = 0; nu < Nd; nu++)
|
||||
{
|
||||
// Shift coordinate lattice index by 1 to account for 5th dimension.
|
||||
LatticeCoordinate(coor, nu + shift);
|
||||
double boundary_phase = ::acos(real(boundary[nu]));
|
||||
ph = ph + boundary_phase*coor*((1./(in.Grid()->_fdimensions[nu+shift])));
|
||||
//momenta for propagator shifted by twist+boundary
|
||||
twist[nu] = twist[nu] + boundary_phase/((2.0*M_PI));
|
||||
}
|
||||
in_buf = exp(ci*ph*(-1.0))*in;
|
||||
|
||||
theFFT.FFT_all_dim(in_k,in,FFT::forward);
|
||||
this->MomentumSpacePropagatorHw(prop_k,in_k,mass,twist);
|
||||
theFFT.FFT_all_dim(out,prop_k,FFT::backward);
|
||||
|
||||
//phase for boundary condition
|
||||
out = out * exp(ci*ph);
|
||||
};
|
||||
|
||||
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass) {
|
||||
std::vector<double> twist(Nd,0.0); //default: periodic boundarys in all directions
|
||||
std::vector<Complex> boundary;
|
||||
for(int i=0;i<Nd;i++) boundary.push_back(1);//default: periodic boundary conditions
|
||||
FreePropagator(in,out,mass,boundary,twist);
|
||||
};
|
||||
|
||||
|
||||
// Efficient support for multigrid coarsening
|
||||
virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp);
|
||||
virtual void MdirAll(const FermionField &in, std::vector<FermionField> &out);
|
||||
|
@ -83,11 +83,70 @@ public:
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
RealD _mass,RealD M5,const ImplParams &p= ImplParams());
|
||||
|
||||
PartialFractionFermion5D(GaugeField &_Umu,
|
||||
GridCartesian &FiveDimGrid,
|
||||
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||
GridCartesian &FourDimGrid,
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
RealD _mass,RealD M5,std::vector<RealD> &_qmu,const ImplParams &p= ImplParams());
|
||||
|
||||
void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<Complex> boundary, std::vector<double> twist)
|
||||
{
|
||||
std::cout << "Free Propagator for PartialFraction"<<std::endl;
|
||||
FermionField in_k(in.Grid());
|
||||
FermionField prop_k(in.Grid());
|
||||
|
||||
FFT theFFT((GridCartesian *) in.Grid());
|
||||
|
||||
//phase for boundary condition
|
||||
ComplexField coor(in.Grid());
|
||||
ComplexField ph(in.Grid()); ph = Zero();
|
||||
FermionField in_buf(in.Grid()); in_buf = Zero();
|
||||
typedef typename Simd::scalar_type Scalar;
|
||||
Scalar ci(0.0,1.0);
|
||||
assert(twist.size() == Nd);//check that twist is Nd
|
||||
assert(boundary.size() == Nd);//check that boundary conditions is Nd
|
||||
int shift = 0;
|
||||
for(unsigned int nu = 0; nu < Nd; nu++)
|
||||
{
|
||||
// Shift coordinate lattice index by 1 to account for 5th dimension.
|
||||
LatticeCoordinate(coor, nu + shift);
|
||||
double boundary_phase = ::acos(real(boundary[nu]));
|
||||
ph = ph + boundary_phase*coor*((1./(in.Grid()->_fdimensions[nu+shift])));
|
||||
//momenta for propagator shifted by twist+boundary
|
||||
twist[nu] = twist[nu] + boundary_phase/((2.0*M_PI));
|
||||
}
|
||||
in_buf = exp(ci*ph*(-1.0))*in;
|
||||
|
||||
theFFT.FFT_all_dim(in_k,in,FFT::forward);
|
||||
if ( this->qmu.size() ){
|
||||
this->MomentumSpacePropagatorHwQ(prop_k,in_k,mass,twist,this->qmu);
|
||||
} else {
|
||||
this->MomentumSpacePropagatorHw(prop_k,in_k,mass,twist);
|
||||
}
|
||||
theFFT.FFT_all_dim(out,prop_k,FFT::backward);
|
||||
|
||||
//phase for boundary condition
|
||||
out = out * exp(ci*ph);
|
||||
};
|
||||
|
||||
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass) {
|
||||
std::vector<double> twist(Nd,0.0); //default: periodic boundarys in all directions
|
||||
std::vector<Complex> boundary;
|
||||
for(int i=0;i<Nd;i++) boundary.push_back(1);//default: periodic boundary conditions
|
||||
FreePropagator(in,out,mass,boundary,twist);
|
||||
};
|
||||
|
||||
void set_qmu(std::vector<RealD> _qmu) { qmu=_qmu; assert(qmu.size()==Nd);};
|
||||
void addQmu(const FermionField &in, FermionField &out, int dag);
|
||||
|
||||
protected:
|
||||
|
||||
virtual void SetCoefficientsTanh(Approx::zolotarev_data *zdata,RealD scale);
|
||||
virtual void SetCoefficientsZolotarev(RealD zolo_hi,Approx::zolotarev_data *zdata);
|
||||
|
||||
std::vector<RealD> qmu;
|
||||
|
||||
// Part frac
|
||||
RealD mass;
|
||||
RealD dw_diag;
|
||||
|
@ -109,6 +109,8 @@ public:
|
||||
void MomentumSpacePropagatorHt_5d(FermionField &out,const FermionField &in,RealD mass,std::vector<double> twist) ;
|
||||
void MomentumSpacePropagatorHt(FermionField &out,const FermionField &in,RealD mass,std::vector<double> twist) ;
|
||||
void MomentumSpacePropagatorHw(FermionField &out,const FermionField &in,RealD mass,std::vector<double> twist) ;
|
||||
void MomentumSpacePropagatorHwQ(FermionField &out,const FermionField &in,RealD mass,std::vector<double> twist,
|
||||
std::vector<double> qmu) ;
|
||||
|
||||
// Implement hopping term non-hermitian hopping term; half cb or both
|
||||
// Implement s-diagonal DW
|
||||
@ -117,6 +119,9 @@ public:
|
||||
void DhopOE(const FermionField &in, FermionField &out,int dag);
|
||||
void DhopEO(const FermionField &in, FermionField &out,int dag);
|
||||
|
||||
void DhopComms (const FermionField &in, FermionField &out);
|
||||
void DhopCalc (const FermionField &in, FermionField &out,uint64_t *ids);
|
||||
|
||||
// add a DhopComm
|
||||
// -- suboptimal interface will presently trigger multiple comms.
|
||||
void DhopDir(const FermionField &in, FermionField &out,int dir,int disp);
|
||||
|
@ -57,6 +57,10 @@ public:
|
||||
int Ls, int Nsite, const FermionField &in, FermionField &out,
|
||||
int interior=1,int exterior=1) ;
|
||||
|
||||
static void DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int Ls, int Nsite, const FermionField &in, FermionField &out,
|
||||
uint64_t *ids);
|
||||
|
||||
static void DhopDagKernel(int Opt,StencilImpl &st, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int Ls, int Nsite, const FermionField &in, FermionField &out,
|
||||
int interior=1,int exterior=1) ;
|
||||
|
@ -48,7 +48,8 @@ CayleyFermion5D<Impl>::CayleyFermion5D(GaugeField &_Umu,
|
||||
FourDimGrid,
|
||||
FourDimRedBlackGrid,_M5,p),
|
||||
mass_plus(_mass), mass_minus(_mass)
|
||||
{
|
||||
{
|
||||
// qmu defaults to zero size;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
@ -270,6 +271,34 @@ void CayleyFermion5D<Impl>::MeooeDag5D (const FermionField &psi, FermionField
|
||||
M5Ddag(psi,psi,Din,lower,diag,upper);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CayleyFermion5D<Impl>::addQmu(const FermionField &psi,FermionField &chi, int dag)
|
||||
{
|
||||
if ( qmu.size() ) {
|
||||
|
||||
Gamma::Algebra Gmu [] = {
|
||||
Gamma::Algebra::GammaX,
|
||||
Gamma::Algebra::GammaY,
|
||||
Gamma::Algebra::GammaZ,
|
||||
Gamma::Algebra::GammaT
|
||||
};
|
||||
std::vector<ComplexD> coeff(Nd);
|
||||
ComplexD ci(0,1);
|
||||
|
||||
assert(qmu.size()==Nd);
|
||||
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
coeff[mu] = ci*qmu[mu];
|
||||
if ( dag ) coeff[mu] = conjugate(coeff[mu]);
|
||||
}
|
||||
|
||||
chi = chi + Gamma(Gmu[0])*psi*coeff[0];
|
||||
for(int mu=1;mu<Nd;mu++){
|
||||
chi = chi + Gamma(Gmu[mu])*psi*coeff[mu];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CayleyFermion5D<Impl>::M (const FermionField &psi, FermionField &chi)
|
||||
{
|
||||
@ -277,8 +306,12 @@ void CayleyFermion5D<Impl>::M (const FermionField &psi, FermionField &chi)
|
||||
|
||||
// Assemble Din
|
||||
Meooe5D(psi,Din);
|
||||
|
||||
|
||||
this->DW(Din,chi,DaggerNo);
|
||||
|
||||
// add i q_mu gamma_mu here
|
||||
addQmu(Din,chi,DaggerNo);
|
||||
|
||||
// ((b D_W + D_w hop terms +1) on s-diag
|
||||
axpby(chi,1.0,1.0,chi,psi);
|
||||
|
||||
@ -295,6 +328,9 @@ void CayleyFermion5D<Impl>::Mdag (const FermionField &psi, FermionField &chi)
|
||||
FermionField Din(psi.Grid());
|
||||
// Apply Dw
|
||||
this->DW(psi,Din,DaggerYes);
|
||||
|
||||
// add -i conj(q_mu) gamma_mu here ... if qmu is real, gammm_5 hermitian, otherwise not.
|
||||
addQmu(psi,Din,DaggerYes);
|
||||
|
||||
MeooeDag5D(Din,chi);
|
||||
|
||||
|
@ -42,13 +42,13 @@ template<class Impl>
|
||||
void ContinuedFractionFermion5D<Impl>::SetCoefficientsZolotarev(RealD zolo_hi,Approx::zolotarev_data *zdata)
|
||||
{
|
||||
// How to check Ls matches??
|
||||
// std::cout<<GridLogMessage << Ls << " Ls"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->n << " - n"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->da << " -da "<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->db << " -db"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->dn << " -dn"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->dd << " -dd"<<std::endl;
|
||||
std::cout<<GridLogMessage << zdata->n << " - n"<<std::endl;
|
||||
std::cout<<GridLogMessage << zdata->da << " -da "<<std::endl;
|
||||
std::cout<<GridLogMessage << zdata->db << " -db"<<std::endl;
|
||||
std::cout<<GridLogMessage << zdata->dn << " -dn"<<std::endl;
|
||||
std::cout<<GridLogMessage << zdata->dd << " -dd"<<std::endl;
|
||||
int Ls = this->Ls;
|
||||
std::cout<<GridLogMessage << Ls << " Ls"<<std::endl;
|
||||
assert(zdata->db==Ls);// Beta has Ls coeffs
|
||||
|
||||
R=(1+this->mass)/(1-this->mass);
|
||||
@ -320,7 +320,7 @@ ContinuedFractionFermion5D<Impl>::ContinuedFractionFermion5D(
|
||||
int Ls = this->Ls;
|
||||
conformable(solution5d.Grid(),this->FermionGrid());
|
||||
conformable(exported4d.Grid(),this->GaugeGrid());
|
||||
ExtractSlice(exported4d, solution5d, Ls-1, Ls-1);
|
||||
ExtractSlice(exported4d, solution5d, Ls-1, 0);
|
||||
}
|
||||
template<class Impl>
|
||||
void ContinuedFractionFermion5D<Impl>::ImportPhysicalFermionSource(const FermionField &input4d,FermionField &imported5d)
|
||||
@ -330,7 +330,7 @@ ContinuedFractionFermion5D<Impl>::ContinuedFractionFermion5D(
|
||||
conformable(input4d.Grid() ,this->GaugeGrid());
|
||||
FermionField tmp(this->FermionGrid());
|
||||
tmp=Zero();
|
||||
InsertSlice(input4d, tmp, Ls-1, Ls-1);
|
||||
InsertSlice(input4d, tmp, Ls-1, 0);
|
||||
tmp=Gamma(Gamma::Algebra::Gamma5)*tmp;
|
||||
this->Dminus(tmp,imported5d);
|
||||
}
|
||||
|
@ -237,7 +237,32 @@ void PartialFractionFermion5D<Impl>::M_internal(const FermionField &psi, Fermi
|
||||
// ( 0 -sqrt(p_i)*amax | 2 R gamma_5 + p0/amax 2H
|
||||
//
|
||||
|
||||
this->DW(psi,D,DaggerNo);
|
||||
this->DW(psi,D,DaggerNo);
|
||||
|
||||
// DW - DW+iqslash
|
||||
// (g5 Dw)^dag = g5 Dw
|
||||
// (iqmu g5 gmu)^dag = (-i qmu gmu^dag g5^dag) = i qmu g5 gmu
|
||||
if ( qmu.size() ) {
|
||||
|
||||
std::cout<< "Mat" << "qmu ("<<qmu[0]<<","<<qmu[1]<<","<<qmu[2]<<","<<qmu[3]<<")"<<std::endl;
|
||||
assert(qmu.size()==Nd);
|
||||
|
||||
FermionField qslash_psi(psi.Grid());
|
||||
|
||||
Gamma::Algebra Gmu [] = {
|
||||
Gamma::Algebra::GammaX,
|
||||
Gamma::Algebra::GammaY,
|
||||
Gamma::Algebra::GammaZ,
|
||||
Gamma::Algebra::GammaT
|
||||
};
|
||||
qslash_psi = qmu[0]*(Gamma(Gmu[0])*psi);
|
||||
for(int mu=1;mu<Nd;mu++){
|
||||
qslash_psi = qslash_psi + qmu[mu]*(Gamma(Gmu[mu])*psi);
|
||||
}
|
||||
ComplexD ci(0.0,1.0);
|
||||
qslash_psi = ci*qslash_psi ; // i qslash
|
||||
D = D + qslash_psi;
|
||||
}
|
||||
|
||||
int nblock=(Ls-1)/2;
|
||||
for(int b=0;b<nblock;b++){
|
||||
@ -255,15 +280,55 @@ void PartialFractionFermion5D<Impl>::M_internal(const FermionField &psi, Fermi
|
||||
}
|
||||
|
||||
{
|
||||
// The 'conventional' Cayley overlap operator is
|
||||
//
|
||||
// Dov = (1+m)/2 + (1-m)/2 g5 sgn Hw
|
||||
//
|
||||
//
|
||||
// With massless limit 1/2(1+g5 sgnHw)
|
||||
//
|
||||
// Luscher shows quite neatly that 1+g5 sgn Hw has tree level propagator i qslash +O(a^2)
|
||||
//
|
||||
// However, the conventional normalisation has both a leading order factor of 2 in Zq
|
||||
// at tree level AND a mass dependent (1-m) that are convenient to absorb.
|
||||
//
|
||||
// In WilsonFermion5DImplementation.h, the tree level propagator for Hw is
|
||||
//
|
||||
// num = -i sin kmu gmu
|
||||
//
|
||||
// denom ( sqrt(sk^2 + (2shk^2 - 1)^2
|
||||
// b_k = sk2 - M5;
|
||||
//
|
||||
// w_k = sqrt(sk + b_k*b_k);
|
||||
//
|
||||
// denom= ( w_k + b_k + mass*mass) ;
|
||||
//
|
||||
// denom= one/denom;
|
||||
// out = num*denom;
|
||||
//
|
||||
// Chroma, and Grid define partial fraction via 4d operator
|
||||
//
|
||||
// Dpf = 2/(1-m) x Dov = (1+m)/(1-m) + g5 sgn Hw
|
||||
//
|
||||
// Now since:
|
||||
//
|
||||
// (1+m)/(1-m) = (1-m)/(1-m) + 2m/(1-m) = 1 + 2m/(1-m)
|
||||
//
|
||||
// This corresponds to a modified mass parameter
|
||||
//
|
||||
// It has an annoying
|
||||
//
|
||||
//
|
||||
double R=(1+this->mass)/(1-this->mass);
|
||||
//R g5 psi[Ls] + p[0] H
|
||||
//R g5 psi[Ls] + p[0] Hw
|
||||
ag5xpbg5y_ssp(chi,R*scale,psi,p[nblock]*scale/amax,D,Ls-1,Ls-1);
|
||||
|
||||
|
||||
for(int b=0;b<nblock;b++){
|
||||
int s = 2*b+1;
|
||||
double pp = p[nblock-1-b];
|
||||
axpby_ssp(chi,1.0,chi,-sqrt(amax*pp)*scale*sign,psi,Ls-1,s);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@ -443,7 +508,7 @@ PartialFractionFermion5D<Impl>::PartialFractionFermion5D(GaugeField &_Umu,
|
||||
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
qmu.resize(0);
|
||||
assert((Ls&0x1)==1); // Odd Ls required
|
||||
int nrational=Ls-1;
|
||||
|
||||
@ -461,6 +526,22 @@ PartialFractionFermion5D<Impl>::PartialFractionFermion5D(GaugeField &_Umu,
|
||||
Approx::zolotarev_free(zdata);
|
||||
|
||||
}
|
||||
template<class Impl>
|
||||
PartialFractionFermion5D<Impl>::PartialFractionFermion5D(GaugeField &_Umu,
|
||||
GridCartesian &FiveDimGrid,
|
||||
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||
GridCartesian &FourDimGrid,
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
RealD _mass,RealD M5,
|
||||
std::vector<RealD> &_qmu,
|
||||
const ImplParams &p)
|
||||
: PartialFractionFermion5D<Impl>(_Umu,
|
||||
FiveDimGrid,FiveDimRedBlackGrid,
|
||||
FourDimGrid,FourDimRedBlackGrid,
|
||||
_mass,M5,p)
|
||||
{
|
||||
qmu=_qmu;
|
||||
}
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -332,22 +332,18 @@ void WilsonFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st,
|
||||
|
||||
// std::cout << " WilsonFermion5D Communicate Begin " <<std::endl;
|
||||
std::vector<std::vector<CommsRequest_t> > requests;
|
||||
auto id=traceStart("Communicate overlapped");
|
||||
st.CommunicateBegin(requests);
|
||||
|
||||
#if 1
|
||||
/////////////////////////////
|
||||
// Overlap with comms
|
||||
/////////////////////////////
|
||||
{
|
||||
// std::cout << " WilsonFermion5D Comms merge " <<std::endl;
|
||||
GRID_TRACE("MergeSHM");
|
||||
st.CommsMergeSHM(compressor);// Could do this inside parallel region overlapped with comms
|
||||
}
|
||||
|
||||
st.CommunicateBegin(requests);
|
||||
st.CommsMergeSHM(compressor);// Could do this inside parallel region overlapped with comms
|
||||
#endif
|
||||
|
||||
/////////////////////////////
|
||||
// do the compute interior
|
||||
/////////////////////////////
|
||||
// std::cout << " WilsonFermion5D Interior " <<std::endl;
|
||||
int Opt = WilsonKernelsStatic::Opt; // Why pass this. Kernels should know
|
||||
if (dag == DaggerYes) {
|
||||
GRID_TRACE("DhopDagInterior");
|
||||
@ -356,13 +352,23 @@ void WilsonFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st,
|
||||
GRID_TRACE("DhopInterior");
|
||||
Kernels::DhopKernel (Opt,st,U,st.CommBuf(),LLs,U.oSites(),in,out,1,0);
|
||||
}
|
||||
|
||||
|
||||
//ifdef GRID_ACCELERATED
|
||||
#if 0
|
||||
/////////////////////////////
|
||||
// Overlap with comms -- on GPU the interior kernel call is nonblocking
|
||||
/////////////////////////////
|
||||
st.CommunicateBegin(requests);
|
||||
st.CommsMergeSHM(compressor);// Could do this inside parallel region overlapped with comms
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////
|
||||
// Complete comms
|
||||
/////////////////////////////
|
||||
// std::cout << " WilsonFermion5D Comms Complete " <<std::endl;
|
||||
st.CommunicateComplete(requests);
|
||||
traceStop(id);
|
||||
// traceStop(id);
|
||||
|
||||
/////////////////////////////
|
||||
// do the compute exterior
|
||||
@ -438,6 +444,29 @@ void WilsonFermion5D<Impl>::DhopEO(const FermionField &in, FermionField &out,int
|
||||
|
||||
DhopInternal(StencilOdd,UmuEven,in,out,dag);
|
||||
}
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::DhopComms(const FermionField &in, FermionField &out)
|
||||
{
|
||||
int dag =0 ;
|
||||
conformable(in.Grid(),FermionGrid()); // verifies full grid
|
||||
conformable(in.Grid(),out.Grid());
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
Compressor compressor(dag);
|
||||
Stencil.HaloExchangeOpt(in,compressor);
|
||||
}
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::DhopCalc(const FermionField &in, FermionField &out,uint64_t *ids)
|
||||
{
|
||||
conformable(in.Grid(),FermionGrid()); // verifies full grid
|
||||
conformable(in.Grid(),out.Grid());
|
||||
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
|
||||
int LLs = in.Grid()->_rdimensions[0];
|
||||
int Opt = WilsonKernelsStatic::Opt;
|
||||
Kernels::DhopKernel(Opt,Stencil,Umu,Stencil.CommBuf(),LLs,Umu.oSites(),in,out,ids);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::Dhop(const FermionField &in, FermionField &out,int dag)
|
||||
{
|
||||
@ -740,6 +769,15 @@ void WilsonFermion5D<Impl>::MomentumSpacePropagatorHt(FermionField &out,const Fe
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::MomentumSpacePropagatorHw(FermionField &out,const FermionField &in,RealD mass,std::vector<double> twist)
|
||||
{
|
||||
std::vector<double> empty_q(Nd,0.0);
|
||||
MomentumSpacePropagatorHwQ(out,in,mass,twist,empty_q);
|
||||
}
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::MomentumSpacePropagatorHwQ(FermionField &out,const FermionField &in,
|
||||
RealD mass,
|
||||
std::vector<double> twist,
|
||||
std::vector<double> qmu)
|
||||
{
|
||||
Gamma::Algebra Gmu [] = {
|
||||
Gamma::Algebra::GammaX,
|
||||
@ -755,6 +793,7 @@ void WilsonFermion5D<Impl>::MomentumSpacePropagatorHw(FermionField &out,const Fe
|
||||
typedef typename FermionField::scalar_type ScalComplex;
|
||||
|
||||
typedef Lattice<iSinglet<vector_type> > LatComplex;
|
||||
typedef iSpinMatrix<ScalComplex> SpinMat;
|
||||
|
||||
|
||||
Coordinate latt_size = _grid->_fdimensions;
|
||||
@ -772,8 +811,10 @@ void WilsonFermion5D<Impl>::MomentumSpacePropagatorHw(FermionField &out,const Fe
|
||||
LatComplex kmu(_grid);
|
||||
ScalComplex ci(0.0,1.0);
|
||||
|
||||
std::cout<< "Feynman Rule" << "qmu ("<<qmu[0]<<","<<qmu[1]<<","<<qmu[2]<<","<<qmu[3]<<")"<<std::endl;
|
||||
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
|
||||
|
||||
LatticeCoordinate(kmu,mu);
|
||||
|
||||
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||
@ -782,9 +823,18 @@ void WilsonFermion5D<Impl>::MomentumSpacePropagatorHw(FermionField &out,const Fe
|
||||
kmu = kmu + TwoPiL * one * twist[mu];//momentum for twisted boundary conditions
|
||||
|
||||
sk2 = sk2 + 2.0*sin(kmu*0.5)*sin(kmu*0.5);
|
||||
sk = sk + sin(kmu)*sin(kmu);
|
||||
|
||||
num = num - sin(kmu)*ci*(Gamma(Gmu[mu])*in);
|
||||
sk = sk + (sin(kmu)+qmu[mu])*(sin(kmu)+qmu[mu]);
|
||||
|
||||
// Terms for boosted Fermion
|
||||
// 1/2 [ -i gamma.(sin p + q ) ]
|
||||
// [ --------------------- + 1 ]
|
||||
// [ wq + b ]
|
||||
//
|
||||
// wq = sqrt( (sinp+q)^2 + b^2 )
|
||||
//
|
||||
|
||||
num = num - (sin(kmu)+qmu[mu])*ci*(Gamma(Gmu[mu])*in);
|
||||
|
||||
}
|
||||
num = num + mass * in ;
|
||||
|
@ -411,6 +411,46 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
|
||||
#undef LoopBody
|
||||
}
|
||||
|
||||
#ifdef GRID_SYCL
|
||||
extern "C" {
|
||||
ulong SYCL_EXTERNAL __attribute__((overloadable)) intel_get_cycle_counter( void );
|
||||
uint SYCL_EXTERNAL __attribute__((overloadable)) intel_get_active_channel_mask( void );
|
||||
uint SYCL_EXTERNAL __attribute__((overloadable)) intel_get_grf_register( uint reg );
|
||||
uint SYCL_EXTERNAL __attribute__((overloadable)) intel_get_flag_register( uint flag );
|
||||
uint SYCL_EXTERNAL __attribute__((overloadable)) intel_get_control_register( uint reg );
|
||||
uint SYCL_EXTERNAL __attribute__((overloadable)) intel_get_hw_thread_id( void );
|
||||
uint SYCL_EXTERNAL __attribute__((overloadable)) intel_get_slice_id( void );
|
||||
uint SYCL_EXTERNAL __attribute__((overloadable)) intel_get_subslice_id( void );
|
||||
uint SYCL_EXTERNAL __attribute__((overloadable)) intel_get_eu_id( void );
|
||||
uint SYCL_EXTERNAL __attribute__((overloadable)) intel_get_eu_thread_id( void );
|
||||
void SYCL_EXTERNAL __attribute__((overloadable)) intel_eu_thread_pause( uint value );
|
||||
}
|
||||
#ifdef GRID_SIMT
|
||||
#define MAKE_ID(A) (intel_get_eu_id()<<16)|(intel_get_slice_id()<<8)|(intel_get_subslice_id())
|
||||
#else
|
||||
#define MAKE_ID(A) (0)
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
#define MAKE_ID(A) (0)
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#define KERNEL_CALL_ID(A) \
|
||||
const uint64_t NN = Nsite*Ls; \
|
||||
accelerator_forNB( ss, NN, Simd::Nsimd(), { \
|
||||
int sF = ss; \
|
||||
int sU = ss/Ls; \
|
||||
WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,in_v,out_v); \
|
||||
const int Nsimd = SiteHalfSpinor::Nsimd(); \
|
||||
const int lane=acceleratorSIMTlane(Nsimd); \
|
||||
int idx=sF*Nsimd+lane; \
|
||||
uint64_t id = MAKE_ID(); \
|
||||
ids[idx]=id; \
|
||||
}); \
|
||||
accelerator_barrier();
|
||||
|
||||
#define KERNEL_CALLNB(A) \
|
||||
const uint64_t NN = Nsite*Ls; \
|
||||
@ -418,7 +458,7 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
|
||||
int sF = ss; \
|
||||
int sU = ss/Ls; \
|
||||
WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,in_v,out_v); \
|
||||
});
|
||||
});
|
||||
|
||||
#define KERNEL_CALL(A) KERNEL_CALLNB(A); accelerator_barrier();
|
||||
|
||||
@ -451,6 +491,8 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
|
||||
WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,Ls,1,in_v,out_v); \
|
||||
});}
|
||||
|
||||
|
||||
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int Ls, int Nsite, const FermionField &in, FermionField &out,
|
||||
@ -485,6 +527,18 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
|
||||
}
|
||||
assert(0 && " Kernel optimisation case not covered ");
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int Ls, int Nsite, const FermionField &in, FermionField &out,
|
||||
uint64_t *ids)
|
||||
{
|
||||
autoView(U_v , U,AcceleratorRead);
|
||||
autoView(in_v , in,AcceleratorRead);
|
||||
autoView(out_v,out,AcceleratorWrite);
|
||||
autoView(st_v , st,AcceleratorRead);
|
||||
KERNEL_CALL_ID(GenericDhopSite);
|
||||
}
|
||||
template <class Impl>
|
||||
void WilsonKernels<Impl>::DhopDagKernel(int Opt,StencilImpl &st, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
int Ls, int Nsite, const FermionField &in, FermionField &out,
|
||||
|
@ -368,6 +368,15 @@ public:
|
||||
// accelerator_barrier(); // All kernels should ALREADY be complete
|
||||
// _grid->StencilBarrier(); // Everyone is here, so noone running slow and still using receive buffer
|
||||
// But the HaloGather had a barrier too.
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
_grid->StencilSendToRecvFromPrepare(MpiReqs,
|
||||
Packets[i].send_buf,
|
||||
Packets[i].to_rank,Packets[i].do_send,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,Packets[i].do_recv,
|
||||
Packets[i].xbytes,Packets[i].rbytes,i);
|
||||
}
|
||||
acceleratorCopySynchronise();
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
_grid->StencilSendToRecvFromBegin(MpiReqs,
|
||||
Packets[i].send_buf,
|
||||
@ -393,8 +402,6 @@ public:
|
||||
else DslashLogFull();
|
||||
// acceleratorCopySynchronise();// is in the StencilSendToRecvFromComplete
|
||||
// accelerator_barrier();
|
||||
_grid->StencilBarrier();
|
||||
// run any checksums
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
if ( Packets[i].do_recv )
|
||||
FlightRecorder::recvLog(Packets[i].recv_buf,Packets[i].rbytes,Packets[i].from_rank);
|
||||
|
@ -209,6 +209,17 @@ void Lambda6Apply(uint64_t num1, uint64_t num2, uint64_t num3,
|
||||
} \
|
||||
}
|
||||
|
||||
inline void *acceleratorAllocHost(size_t bytes)
|
||||
{
|
||||
void *ptr=NULL;
|
||||
auto err = cudaMallocHost((void **)&ptr,bytes);
|
||||
if( err != cudaSuccess ) {
|
||||
ptr = (void *) NULL;
|
||||
printf(" cudaMallocHost failed for %d %s \n",bytes,cudaGetErrorString(err));
|
||||
assert(0);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
inline void *acceleratorAllocShared(size_t bytes)
|
||||
{
|
||||
void *ptr=NULL;
|
||||
@ -230,8 +241,10 @@ inline void *acceleratorAllocDevice(size_t bytes)
|
||||
}
|
||||
return ptr;
|
||||
};
|
||||
|
||||
inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);};
|
||||
inline void acceleratorFreeDevice(void *ptr){ cudaFree(ptr);};
|
||||
inline void acceleratorFreeHost(void *ptr){ cudaFree(ptr);};
|
||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { cudaMemcpy(to,from,bytes, cudaMemcpyHostToDevice);}
|
||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);}
|
||||
inline void acceleratorCopyToDeviceAsync(void *from, void *to, size_t bytes, cudaStream_t stream = copyStream) { cudaMemcpyAsync(to,from,bytes, cudaMemcpyHostToDevice, stream);}
|
||||
@ -322,12 +335,17 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
||||
#define accelerator_barrier(dummy) { theGridAccelerator->wait(); }
|
||||
|
||||
inline void *acceleratorAllocShared(size_t bytes){ return malloc_shared(bytes,*theGridAccelerator);};
|
||||
inline void *acceleratorAllocHost(size_t bytes) { return malloc_host(bytes,*theGridAccelerator);};
|
||||
inline void *acceleratorAllocDevice(size_t bytes){ return malloc_device(bytes,*theGridAccelerator);};
|
||||
inline void acceleratorFreeHost(void *ptr){free(ptr,*theGridAccelerator);};
|
||||
inline void acceleratorFreeShared(void *ptr){free(ptr,*theGridAccelerator);};
|
||||
inline void acceleratorFreeDevice(void *ptr){free(ptr,*theGridAccelerator);};
|
||||
|
||||
inline void acceleratorCopySynchronise(void) { theCopyAccelerator->wait(); }
|
||||
|
||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { theCopyAccelerator->memcpy(to,from,bytes);}
|
||||
inline void acceleratorCopyToDeviceAsynch(void *from,void *to,size_t bytes) { theCopyAccelerator->memcpy(to,from,bytes); }
|
||||
inline void acceleratorCopyFromDeviceAsynch(void *from,void *to,size_t bytes){ theCopyAccelerator->memcpy(to,from,bytes); }
|
||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { theCopyAccelerator->memcpy(to,from,bytes); theCopyAccelerator->wait();}
|
||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ theCopyAccelerator->memcpy(to,from,bytes); theCopyAccelerator->wait();}
|
||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) { theCopyAccelerator->memset(base,value,bytes); theCopyAccelerator->wait();}
|
||||
@ -438,6 +456,16 @@ void LambdaApply(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
|
||||
} \
|
||||
}
|
||||
|
||||
inline void *acceleratorAllocHost(size_t bytes)
|
||||
{
|
||||
void *ptr=NULL;
|
||||
auto err = hipMallocHost((void **)&ptr,bytes);
|
||||
if( err != hipSuccess ) {
|
||||
ptr = (void *) NULL;
|
||||
fprintf(stderr," hipMallocManaged failed for %ld %s \n",bytes,hipGetErrorString(err)); fflush(stderr);
|
||||
}
|
||||
return ptr;
|
||||
};
|
||||
inline void *acceleratorAllocShared(size_t bytes)
|
||||
{
|
||||
void *ptr=NULL;
|
||||
@ -461,12 +489,12 @@ inline void *acceleratorAllocDevice(size_t bytes)
|
||||
return ptr;
|
||||
};
|
||||
|
||||
inline void acceleratorFreeHost(void *ptr){ auto discard=hipFree(ptr);};
|
||||
inline void acceleratorFreeShared(void *ptr){ auto discard=hipFree(ptr);};
|
||||
inline void acceleratorFreeDevice(void *ptr){ auto discard=hipFree(ptr);};
|
||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { auto discard=hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);}
|
||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ auto discard=hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);}
|
||||
//inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);}
|
||||
//inline void acceleratorCopySynchronise(void) { }
|
||||
|
||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) { auto discard=hipMemset(base,value,bytes);}
|
||||
|
||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
|
||||
@ -483,6 +511,13 @@ inline void acceleratorCopySynchronise(void) { auto discard=hipStreamSynchronize
|
||||
|
||||
#endif
|
||||
|
||||
inline void acceleratorPin(void *ptr,unsigned long bytes)
|
||||
{
|
||||
#ifdef GRID_SYCL
|
||||
sycl::ext::oneapi::experimental::prepare_for_device_copy(ptr,bytes,theCopyAccelerator->get_context());
|
||||
#endif
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Common on all GPU targets
|
||||
//////////////////////////////////////////////
|
||||
@ -537,8 +572,10 @@ inline void acceleratorCopySynchronise(void) {};
|
||||
inline int acceleratorIsCommunicable(void *ptr){ return 1; }
|
||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) { memset(base,value,bytes);}
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
inline void *acceleratorAllocHost(size_t bytes){return _mm_malloc(bytes,GRID_ALLOC_ALIGN);};
|
||||
inline void *acceleratorAllocShared(size_t bytes){return _mm_malloc(bytes,GRID_ALLOC_ALIGN);};
|
||||
inline void *acceleratorAllocDevice(size_t bytes){return _mm_malloc(bytes,GRID_ALLOC_ALIGN);};
|
||||
inline void acceleratorFreeHost(void *ptr){_mm_free(ptr);};
|
||||
inline void acceleratorFreeShared(void *ptr){_mm_free(ptr);};
|
||||
inline void acceleratorFreeDevice(void *ptr){_mm_free(ptr);};
|
||||
#else
|
||||
|
@ -1,5 +1,5 @@
|
||||
# additional include paths necessary to compile the C++ library
|
||||
SUBDIRS = Grid HMC benchmarks tests examples
|
||||
SUBDIRS = Grid benchmarks tests examples HMC
|
||||
|
||||
include $(top_srcdir)/doxygen.inc
|
||||
|
||||
|
@ -52,7 +52,7 @@ int main (int argc, char ** argv)
|
||||
|
||||
int threads = GridThread::GetThreads();
|
||||
|
||||
int Ls=16;
|
||||
int Ls=8;
|
||||
for(int i=0;i<argc;i++) {
|
||||
if(std::string(argv[i]) == "-Ls"){
|
||||
std::stringstream ss(argv[i+1]); ss >> Ls;
|
||||
|
15
configure.ac
15
configure.ac
@ -72,6 +72,7 @@ AC_CHECK_HEADERS(malloc/malloc.h)
|
||||
AC_CHECK_HEADERS(malloc.h)
|
||||
AC_CHECK_HEADERS(endian.h)
|
||||
AC_CHECK_HEADERS(execinfo.h)
|
||||
AC_CHECK_HEADERS(numaif.h)
|
||||
AC_CHECK_DECLS([ntohll],[], [], [[#include <arpa/inet.h>]])
|
||||
AC_CHECK_DECLS([be64toh],[], [], [[#include <arpa/inet.h>]])
|
||||
|
||||
@ -240,6 +241,20 @@ case ${ac_SFW_FP16} in
|
||||
esac
|
||||
|
||||
|
||||
############### MPI BOUNCE TO HOST
|
||||
AC_ARG_ENABLE([accelerator-aware-mpi],
|
||||
[AS_HELP_STRING([--enable-accelerator-aware-mpi=yes|no],[run mpi transfers from device])],
|
||||
[ac_ACCELERATOR_AWARE_MPI=${enable_accelerator_aware_mpi}], [ac_ACCELERATOR_AWARE_MPI=yes])
|
||||
|
||||
# Force accelerator CSHIFT now
|
||||
AC_DEFINE([ACCELERATOR_CSHIFT],[1],[ Cshift runs on device])
|
||||
|
||||
case ${ac_ACCELERATOR_AWARE_MPI} in
|
||||
yes)
|
||||
AC_DEFINE([ACCELERATOR_AWARE_MPI],[1],[ Stencil can use device pointers]);;
|
||||
*);;
|
||||
esac
|
||||
|
||||
############### SYCL/CUDA/HIP/none
|
||||
AC_ARG_ENABLE([accelerator],
|
||||
[AS_HELP_STRING([--enable-accelerator=cuda|sycl|hip|none],[enable none,cuda,sycl,hip acceleration])],
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
##PBS -q EarlyAppAccess
|
||||
#PBS -q debug
|
||||
#PBS -l select=1
|
||||
#PBS -l walltime=00:20:00
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
@ -12,27 +13,24 @@ source ../sourceme.sh
|
||||
cp $PBS_NODEFILE nodefile
|
||||
|
||||
export OMP_NUM_THREADS=4
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
#export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
|
||||
CMD="mpiexec -np 12 -ppn 12 -envall \
|
||||
./Benchmark_dwf_fp32 --mpi 2.1.2.3 --grid 32.32.64.48 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --debug-signals"
|
||||
./gpu_tile.sh ./Benchmark_dwf_fp32 --mpi 2.1.2.3 --grid 32.32.64.96 \
|
||||
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 8 "
|
||||
|
||||
#for f in 1 2 3 4 5 6 7 8
|
||||
for f in 1
|
||||
do
|
||||
echo $CMD
|
||||
$CMD | tee 1node.32.32.64.48.dwf.hbm.$f
|
||||
done
|
||||
$CMD
|
||||
|
||||
|
@ -1,58 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
##PBS -q EarlyAppAccess
|
||||
#PBS -q debug
|
||||
#PBS -l select=2
|
||||
#PBS -l walltime=00:20:00
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
source ../sourceme.sh
|
||||
#module load pti-gpu
|
||||
|
||||
|
||||
cp $PBS_NODEFILE nodefile
|
||||
|
||||
export OMP_NUM_THREADS=4
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
#export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
|
||||
# 12 ppn, 2 nodes, 24 ranks
|
||||
#
|
||||
CMD="mpiexec -np 24 -ppn 12 -envall \
|
||||
./gpu_tile.sh \
|
||||
./Benchmark_comms_host_device --mpi 2.2.2.3 --grid 24.32.32.24 \
|
||||
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
||||
#$CMD | tee 2node.comms.hbm
|
||||
# Local vol 16.16.16.32
|
||||
#
|
||||
|
||||
#VOL=32.64.64.96
|
||||
|
||||
CMD="mpiexec -np 24 -ppn 12 -envall \
|
||||
./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 32.32.64.48 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap --debug-signals"
|
||||
|
||||
#for f in 1 2 3 4 5 6 7 8
|
||||
for f in 1
|
||||
for VOL in 32.32.32.96 32.64.64.96
|
||||
do
|
||||
for AT in 32
|
||||
do
|
||||
CMD="mpiexec -np 24 -ppn 12 -envall \
|
||||
./gpu_tile.sh ./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid $VOL \
|
||||
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads $AT --comms-overlap "
|
||||
|
||||
echo $CMD
|
||||
$CMD | tee 2node.32.32.64.48.dwf.hbm.$f
|
||||
$CMD
|
||||
done
|
||||
done
|
||||
|
||||
CMD="mpiexec -np 24 -ppn 12 -envall \
|
||||
./gpu_tile.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 64.64.64.96 \
|
||||
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
#$CMD | tee 2node.64.64.64.96.dwf.hbm
|
||||
|
||||
|
@ -4,10 +4,12 @@
|
||||
#export NUMA_MAP=(0 0 1 1 0 0 1 1 0 0 1 1);
|
||||
#export GPU_MAP=(0.0 0.1 3.0 3.1 1.0 1.1 4.0 4.1 2.0 2.1 5.0 5.1)
|
||||
|
||||
export NUMA_MAP=(0 0 0 0 0 0 1 1 1 1 1 1 );
|
||||
export NUMA_PMAP=(0 0 0 1 1 1 0 0 0 1 1 1 );
|
||||
export NUMA_HMAP=(2 2 2 3 3 3 3 2 2 2 2 3 3 3 );
|
||||
export GPU_MAP=(0.0 1.0 2.0 3.0 4.0 5.0 0.1 1.1 2.1 3.1 4.1 5.1 )
|
||||
|
||||
export NUMA=${NUMA_MAP[$PALS_LOCAL_RANKID]}
|
||||
export NUMAP=${NUMA_PMAP[$PALS_LOCAL_RANKID]}
|
||||
export NUMAH=${NUMA_HMAP[$PALS_LOCAL_RANKID]}
|
||||
export gpu_id=${GPU_MAP[$PALS_LOCAL_RANKID]}
|
||||
|
||||
unset EnableWalkerPartition
|
||||
@ -17,18 +19,19 @@ export ONEAPI_DEVICE_FILTER=gpu,level_zero
|
||||
|
||||
export SYCL_PI_LEVEL_ZERO_DEVICE_SCOPE_EVENTS=0
|
||||
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:5
|
||||
#export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:2
|
||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:3
|
||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
|
||||
#export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:2
|
||||
#export SYCL_PI_LEVEL_ZERO_USM_RESIDENT=1
|
||||
|
||||
#export MPI_BUF_NUMA=$NUMAH
|
||||
|
||||
echo "rank $PALS_RANKID ; local rank $PALS_LOCAL_RANKID ; ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK ; NUMA $NUMA "
|
||||
|
||||
if [ $PALS_RANKID = "0" ]
|
||||
then
|
||||
# numactl -m $NUMA -N $NUMA onetrace --chrome-device-timeline "$@"
|
||||
# numactl -m $NUMA -N $NUMA unitrace --chrome-kernel-logging --chrome-mpi-logging --chrome-sycl-logging --demangle "$@"
|
||||
numactl -m $NUMA -N $NUMA "$@"
|
||||
numactl -p $NUMAP -N $NUMAP unitrace --chrome-kernel-logging --chrome-mpi-logging --chrome-sycl-logging --demangle "$@"
|
||||
# numactl -p $NUMAP -N $NUMAP "$@"
|
||||
else
|
||||
numactl -m $NUMA -N $NUMA "$@"
|
||||
numactl -p $NUMAP -N $NUMAP "$@"
|
||||
fi
|
@ -1,6 +1,7 @@
|
||||
#Ahead of time compile for PVC
|
||||
export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-targets=spir64_gen -Xs -device -Xs pvc -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl "
|
||||
export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare -qmkl=parallel -fsycl -fno-exceptions "
|
||||
|
||||
export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-targets=spir64_gen -Xs -device -Xs pvc -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl -lnuma -L/opt/aurora/24.180.3/spack/unified/0.8.0/install/linux-sles15-x86_64/oneapi-2024.07.30.002/numactl-2.0.14-7v6edad/lib"
|
||||
export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare -qmkl=parallel -fsycl -fno-exceptions -I/opt/aurora/24.180.3/spack/unified/0.8.0/install/linux-sles15-x86_64/oneapi-2024.07.30.002/numactl-2.0.14-7v6edad/include/"
|
||||
|
||||
#JIT compile
|
||||
#export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl "
|
||||
@ -17,7 +18,7 @@ export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-co
|
||||
--with-lime=$CLIME \
|
||||
--enable-shm=nvlink \
|
||||
--enable-accelerator=sycl \
|
||||
--enable-accelerator-aware-mpi=yes\
|
||||
--enable-accelerator-aware-mpi=no\
|
||||
--enable-unified=no \
|
||||
MPICXX=mpicxx \
|
||||
CXX=icpx
|
||||
|
@ -2,6 +2,7 @@
|
||||
#module load mpich/icc-all-debug-pmix-gpu/52.2
|
||||
#module load mpich-config/mode/deterministic
|
||||
#module load intel_compute_runtime/release/821.35
|
||||
module load pti-gpu
|
||||
|
||||
source ~/spack/share/spack/setup-env.sh
|
||||
spack load c-lime
|
||||
|
239
tests/Test_dwf_dslash_repro.cc
Normal file
239
tests/Test_dwf_dslash_repro.cc
Normal file
@ -0,0 +1,239 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/Test_dwf_cg_prec.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
|
||||
#ifndef HOST_NAME_MAX
|
||||
#define HOST_NAME_MAX _POSIX_HOST_NAME_MAX
|
||||
#endif
|
||||
|
||||
typedef LatticeFermionD FermionField;
|
||||
|
||||
int VerifyOnDevice(const FermionField &res, FermionField &ref)
|
||||
{
|
||||
deviceVector<int> Fails(1);
|
||||
int * Fail = &Fails[0];
|
||||
int FailHost=0;
|
||||
|
||||
typedef typename FermionField::vector_object vobj;
|
||||
typedef typename vobj::scalar_type scalar_type;
|
||||
typedef typename vobj::vector_type vector_type;
|
||||
|
||||
const uint64_t NN = res.Grid()->oSites();
|
||||
|
||||
acceleratorPut(*Fail,FailHost);
|
||||
|
||||
accelerator_barrier();
|
||||
// Inject an error
|
||||
|
||||
int injection=0;
|
||||
if(getenv("GRID_ERROR_INJECT")) injection=1;
|
||||
autoView(res_v,res,AcceleratorWrite);
|
||||
autoView(ref_v,ref,AcceleratorRead);
|
||||
if ( res.Grid()->ThisRank()== 0 )
|
||||
{
|
||||
if (((random()&0xF)==0)&&injection) {
|
||||
uint64_t sF = random()%(NN);
|
||||
int lane=0;
|
||||
printf("Error injection site %ld on rank %d\n",sF,res.Grid()->ThisRank());
|
||||
auto vv = acceleratorGet(res_v[sF]);
|
||||
double *dd = (double *)&vv;
|
||||
*dd=M_PI;
|
||||
acceleratorPut(res_v[sF],vv);
|
||||
}
|
||||
}
|
||||
|
||||
accelerator_for( sF, NN, vobj::Nsimd(), {
|
||||
#ifdef GRID_SIMT
|
||||
{
|
||||
int blane = acceleratorSIMTlane(vobj::Nsimd());
|
||||
#else
|
||||
for(int blane;blane<vobj::Nsimd();blane++){
|
||||
#endif
|
||||
vector_type *vtrr = (vector_type *)&res_v[sF];
|
||||
vector_type *vtrf = (vector_type *)&ref_v[sF];
|
||||
int words = sizeof(vobj)/sizeof(vector_type);
|
||||
|
||||
for(int w=0;w<words;w++){
|
||||
scalar_type rrtmp = getlane(vtrr[w], blane);
|
||||
scalar_type rftmp = getlane(vtrf[w], blane);
|
||||
if ( rrtmp != rftmp) {
|
||||
*Fail=1;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
FailHost = acceleratorGet(*Fail);
|
||||
|
||||
return FailHost;
|
||||
}
|
||||
void PrintFails(const FermionField &res, FermionField &ref,uint64_t *ids)
|
||||
{
|
||||
typedef typename FermionField::vector_object vobj;
|
||||
|
||||
const int Nsimd=vobj::Nsimd();
|
||||
const uint64_t NN = res.Grid()->oSites();
|
||||
|
||||
///////////////////////////////
|
||||
// Pull back to host
|
||||
///////////////////////////////
|
||||
autoView(res_v,res,CpuRead);
|
||||
autoView(ref_v,ref,CpuRead);
|
||||
|
||||
std::vector<uint64_t> ids_host(NN*Nsimd);
|
||||
|
||||
acceleratorCopyFromDevice(ids,&ids_host[0],NN*Nsimd*sizeof(uint64_t));
|
||||
|
||||
//////////////////////////////////////////////////////////////
|
||||
// Redo check on host and print IDs
|
||||
//////////////////////////////////////////////////////////////
|
||||
|
||||
for(int ss=0;ss< NN; ss++){
|
||||
int sF = ss;
|
||||
for(int lane=0;lane<Nsimd;lane++){
|
||||
|
||||
auto rr = extractLane(lane,res_v[sF]);
|
||||
auto rf = extractLane(lane,ref_v[sF]);
|
||||
uint64_t id = ids_host[lane+Nsimd*sF];
|
||||
// std::cout << GridHostname()<<" id["<<sF<<"] lane "<<lane<<" id "<<id<<std::endl;
|
||||
for(int s=0;s<4;s++){
|
||||
for(int c=0;c<3;c++){
|
||||
if ( rr()(s)(c)!=rf()(s)(c) ) {
|
||||
int subslice=(id>>0 )&0xFF;
|
||||
int slice =(id>>8 )&0xFF;
|
||||
int eu =(id>>16)&0xFF;
|
||||
std::cout << GridHostname()<<" miscompare site "<<sF<<" "<<rr()(s)(c)<<" "<<rf()(s)(c)<<" EU "<<eu<<" slice "<<slice<<" subslice "<<subslice<<std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int main (int argc, char ** argv)
|
||||
{
|
||||
char hostname[HOST_NAME_MAX+1];
|
||||
gethostname(hostname, HOST_NAME_MAX+1);
|
||||
std::string host(hostname);
|
||||
|
||||
Grid_init(&argc,&argv);
|
||||
|
||||
const int Ls=12;
|
||||
|
||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexD::Nsimd()),GridDefaultMpi());
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||
|
||||
std::vector<int> seeds4({1,2,3,4});
|
||||
std::vector<int> seeds5({5,6,7,8});
|
||||
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
|
||||
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
||||
|
||||
LatticeGaugeField Umu(UGrid);
|
||||
LatticeFermionD src(FGrid); random(RNG5,src);
|
||||
LatticeFermionD junk(FGrid); random(RNG5,junk);
|
||||
|
||||
LatticeFermionD result(FGrid); result=Zero();
|
||||
LatticeFermionD ref(FGrid); ref=Zero();
|
||||
|
||||
SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||
|
||||
RealD mass=0.1;
|
||||
RealD M5=1.8;
|
||||
|
||||
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||
|
||||
int nsecs=600;
|
||||
if( GridCmdOptionExists(argv,argv+argc,"--seconds") ){
|
||||
std::string arg = GridCmdOptionPayload(argv,argv+argc,"--seconds");
|
||||
GridCmdOptionInt(arg,nsecs);
|
||||
}
|
||||
|
||||
std::cout << GridLogMessage << "::::::::::::: Job startup Barrier " << std::endl;
|
||||
UGrid->Barrier();
|
||||
std::cout << GridLogMessage << "::::::::::::: Job startup Barrier complete" << std::endl;
|
||||
|
||||
std::cout << GridLogMessage << "::::::::::::: Starting DWF repro for "<<nsecs <<" seconds" << std::endl;
|
||||
|
||||
time_t now;
|
||||
time_t start = time(NULL);
|
||||
UGrid->Broadcast(0,(void *)&start,sizeof(start));
|
||||
|
||||
FlightRecorder::ContinueOnFail = 0;
|
||||
FlightRecorder::PrintEntireLog = 0;
|
||||
FlightRecorder::ChecksumComms = 0;
|
||||
FlightRecorder::ChecksumCommsSend=0;
|
||||
|
||||
if(char *s=getenv("GRID_PRINT_ENTIRE_LOG")) FlightRecorder::PrintEntireLog = atoi(s);
|
||||
if(char *s=getenv("GRID_CHECKSUM_RECV_BUF")) FlightRecorder::ChecksumComms = atoi(s);
|
||||
if(char *s=getenv("GRID_CHECKSUM_SEND_BUF")) FlightRecorder::ChecksumCommsSend = atoi(s);
|
||||
|
||||
const uint64_t NN = FGrid->oSites()*vComplexD::Nsimd();
|
||||
|
||||
deviceVector<uint64_t> ids_device(NN);
|
||||
uint64_t *ids = &ids_device[0];
|
||||
|
||||
|
||||
Ddwf.DhopComms(src,ref);
|
||||
Ddwf.DhopCalc(src,ref,ids);
|
||||
|
||||
Ddwf.DhopComms(src,result);
|
||||
|
||||
int iter=0;
|
||||
do {
|
||||
|
||||
result=junk;
|
||||
|
||||
Ddwf.DhopCalc(src,result,ids);
|
||||
|
||||
if ( VerifyOnDevice(result, ref) ) {
|
||||
printf("Node %s Iter %d detected fails\n",GridHostname(),iter);
|
||||
PrintFails(result,ref,ids);
|
||||
// std::cout << " Dslash "<<iter<<" is WRONG! "<<std::endl;
|
||||
}
|
||||
//else {
|
||||
// printf("Node %s Iter %d detected NO fails\n",GridHostname(),iter);
|
||||
// PrintFails(result,ref,ids);
|
||||
// std::cout << " Dslash "<<iter<<" is OK! "<<std::endl;
|
||||
//}
|
||||
|
||||
|
||||
iter ++;
|
||||
now = time(NULL); UGrid->Broadcast(0,(void *)&now,sizeof(now));
|
||||
} while (now < (start + nsecs) );
|
||||
|
||||
|
||||
Grid_finalize();
|
||||
}
|
@ -74,7 +74,7 @@ int main (int argc, char ** argv)
|
||||
|
||||
{
|
||||
std::cout<<"****************************************"<<std::endl;
|
||||
std::cout << "Testing OverlapWilsonPartialFractionZolotarevFermionD Hw kernel Mom space 4d propagator \n";
|
||||
std::cout << "Testing OverlapWilsonPartialFractionTanhFermionD Hw kernel Mom space 4d propagator \n";
|
||||
std::cout<<"****************************************"<<std::endl;
|
||||
|
||||
// LatticeFermionD src(&GRID); gaussian(pRNG,src);
|
||||
@ -88,7 +88,7 @@ int main (int argc, char ** argv)
|
||||
|
||||
RealD mass=0.1;
|
||||
RealD M5 =0.8;
|
||||
OverlapWilsonPartialFractionZolotarevFermionD Dov(Umu,*FGrid,*FrbGrid,GRID,RBGRID,mass,M5,0.001,8.0);
|
||||
OverlapWilsonPartialFractionTanhFermionD Dov(Umu,*FGrid,*FrbGrid,GRID,RBGRID,mass,M5,1.0);
|
||||
|
||||
// Momentum space prop
|
||||
std::cout << " Solving by FFT and Feynman rules" <<std::endl;
|
||||
@ -119,7 +119,7 @@ int main (int argc, char ** argv)
|
||||
std::cout << " Solving by Conjugate Gradient (CGNE)" <<std::endl;
|
||||
Dov.Mdag(src5,tmp5);
|
||||
src5=tmp5;
|
||||
MdagMLinearOperator<OverlapWilsonPartialFractionZolotarevFermionD,LatticeFermionD> HermOp(Dov);
|
||||
MdagMLinearOperator<OverlapWilsonPartialFractionTanhFermionD,LatticeFermionD> HermOp(Dov);
|
||||
ConjugateGradient<LatticeFermionD> CG(1.0e-8,10000);
|
||||
CG(HermOp,src5,result5);
|
||||
std::cout << " Solved by Conjugate Gradient (CGNE)" <<std::endl;
|
||||
@ -229,7 +229,7 @@ int main (int argc, char ** argv)
|
||||
|
||||
{
|
||||
std::cout<<"****************************************"<<std::endl;
|
||||
std::cout<<"Testing OverlapWilsonPartialFractionZolotarevFermionD Hw kernel Mom space 4d propagator with q\n";
|
||||
std::cout<<"Testing OverlapWilsonPartialFractionTanhFermionD Hw kernel Mom space 4d propagator with q\n";
|
||||
std::cout<<"****************************************"<<std::endl;
|
||||
|
||||
// LatticeFermionD src(&GRID); gaussian(pRNG,src);
|
||||
@ -243,7 +243,9 @@ int main (int argc, char ** argv)
|
||||
|
||||
RealD mass=0.1;
|
||||
RealD M5 =0.8;
|
||||
OverlapWilsonPartialFractionZolotarevFermionD Dov(Umu,*FGrid,*FrbGrid,GRID,RBGRID,mass,M5,0.001,8.0);
|
||||
OverlapWilsonPartialFractionTanhFermionD Dov(Umu,*FGrid,*FrbGrid,GRID,RBGRID,mass,M5,1.0);
|
||||
std::vector<RealD> qmu({1.0,0.0,0.0,0.0});
|
||||
Dov.set_qmu(qmu);
|
||||
|
||||
// Momentum space prop
|
||||
std::cout << " Solving by FFT and Feynman rules" <<std::endl;
|
||||
@ -274,7 +276,7 @@ int main (int argc, char ** argv)
|
||||
std::cout << " Solving by Conjugate Gradient (CGNE)" <<std::endl;
|
||||
Dov.Mdag(src5,tmp5);
|
||||
src5=tmp5;
|
||||
MdagMLinearOperator<OverlapWilsonPartialFractionZolotarevFermionD,LatticeFermionD> HermOp(Dov);
|
||||
MdagMLinearOperator<OverlapWilsonPartialFractionTanhFermionD,LatticeFermionD> HermOp(Dov);
|
||||
ConjugateGradient<LatticeFermionD> CG(1.0e-8,10000);
|
||||
CG(HermOp,src5,result5);
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1,7 +1,6 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/qdpxx/Test_qdpxx_munprec.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
@ -26,13 +25,17 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <chroma.h>
|
||||
#include <actions/ferm/invert/syssolver_linop_cg_array.h>
|
||||
#include <actions/ferm/invert/syssolver_linop_aggregate.h>
|
||||
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
int Ls=8;
|
||||
double M5=1.6;
|
||||
double mq=0.01;
|
||||
double zolo_lo = 0.1;
|
||||
double zolo_hi = 2.0;
|
||||
double zolo_lo = 0.01;
|
||||
double zolo_hi = 7.0;
|
||||
double mobius_scale=2.0;
|
||||
|
||||
enum ChromaAction {
|
||||
@ -55,11 +58,6 @@ enum ChromaAction {
|
||||
void calc_grid (ChromaAction action,Grid::LatticeGaugeField & lat, Grid::LatticeFermion &src, Grid::LatticeFermion &res,int dag);
|
||||
void calc_chroma (ChromaAction action,Grid::LatticeGaugeField & lat, Grid::LatticeFermion &src, Grid::LatticeFermion &res,int dag);
|
||||
|
||||
#include <chroma.h>
|
||||
#include <actions/ferm/invert/syssolver_linop_cg_array.h>
|
||||
#include <actions/ferm/invert/syssolver_linop_aggregate.h>
|
||||
|
||||
|
||||
|
||||
namespace Chroma {
|
||||
|
||||
@ -81,7 +79,7 @@ public:
|
||||
|
||||
std::vector<int> x(4);
|
||||
QDP::multi1d<int> cx(4);
|
||||
std::vector<int> gd= gr.Grid()->GlobalDimensions();
|
||||
Grid::Coordinate gd = gr.Grid()->GlobalDimensions();
|
||||
|
||||
for (x[0]=0;x[0]<gd[0];x[0]++){
|
||||
for (x[1]=0;x[1]<gd[1];x[1]++){
|
||||
@ -124,7 +122,7 @@ public:
|
||||
|
||||
std::vector<int> x(5);
|
||||
QDP::multi1d<int> cx(4);
|
||||
std::vector<int> gd= gr.Grid()->GlobalDimensions();
|
||||
Grid::Coordinate gd= gr.Grid()->GlobalDimensions();
|
||||
|
||||
for (x[0]=0;x[0]<gd[0];x[0]++){
|
||||
for (x[1]=0;x[1]<gd[1];x[1]++){
|
||||
@ -166,7 +164,7 @@ public:
|
||||
|
||||
std::vector<int> x(5);
|
||||
QDP::multi1d<int> cx(4);
|
||||
std::vector<int> gd= gr.Grid()->GlobalDimensions();
|
||||
Grid::Coordinate gd= gr.Grid()->GlobalDimensions();
|
||||
|
||||
for (x[0]=0;x[0]<gd[0];x[0]++){
|
||||
for (x[1]=0;x[1]<gd[1];x[1]++){
|
||||
@ -304,7 +302,30 @@ public:
|
||||
// param.approximation_type=COEFF_TYPE_TANH_UNSCALED;
|
||||
// param.approximation_type=COEFF_TYPE_TANH;
|
||||
param.tuning_strategy_xml=
|
||||
"<TuningStrategy><Name>OVEXT_CONSTANT_STRATEGY</Name></TuningStrategy>\n";
|
||||
"<TuningStrategy><Name>OVEXT_CONSTANT_STRATEGY</Name><TuningConstant>1.0</TuningConstant></TuningStrategy>\n";
|
||||
UnprecOvExtFermActArray S_f(cfs,param);
|
||||
Handle< FermState<T4,U,U> > fs( S_f.createState(u) );
|
||||
Handle< LinearOperatorArray<T4> > M(S_f.linOp(fs));
|
||||
return M;
|
||||
}
|
||||
if ( parms == HwPartFracTanh ) {
|
||||
if ( Ls%2 == 0 ) {
|
||||
printf("Ls is not odd\n");
|
||||
exit(-1);
|
||||
}
|
||||
UnprecOvExtFermActArrayParams param;
|
||||
param.OverMass=M5;
|
||||
param.Mass=_mq;
|
||||
param.RatPolyDeg = Ls;
|
||||
param.ApproxMin =eps_lo;
|
||||
param.ApproxMax =eps_hi;
|
||||
param.b5 =1.0;
|
||||
param.c5 =1.0;
|
||||
// param.approximation_type=COEFF_TYPE_ZOLOTAREV;
|
||||
param.approximation_type=COEFF_TYPE_TANH_UNSCALED;
|
||||
//param.approximation_type=COEFF_TYPE_TANH;
|
||||
param.tuning_strategy_xml=
|
||||
"<TuningStrategy><Name>OVEXT_CONSTANT_STRATEGY</Name><TuningConstant>1.0</TuningConstant></TuningStrategy>\n";
|
||||
UnprecOvExtFermActArray S_f(cfs,param);
|
||||
Handle< FermState<T4,U,U> > fs( S_f.createState(u) );
|
||||
Handle< LinearOperatorArray<T4> > M(S_f.linOp(fs));
|
||||
@ -316,7 +337,35 @@ public:
|
||||
param.ApproxMin=eps_lo;
|
||||
param.ApproxMax=eps_hi;
|
||||
param.approximation_type=COEFF_TYPE_ZOLOTAREV;
|
||||
param.RatPolyDeg=Ls;
|
||||
param.RatPolyDeg=Ls-1;
|
||||
// The following is why I think Chroma made some directional errors:
|
||||
param.AuxFermAct= std::string(
|
||||
"<AuxFermAct>\n"
|
||||
" <FermAct>UNPRECONDITIONED_WILSON</FermAct>\n"
|
||||
" <Mass>-1.8</Mass>\n"
|
||||
" <b5>1</b5>\n"
|
||||
" <c5>0</c5>\n"
|
||||
" <MaxCG>1000</MaxCG>\n"
|
||||
" <RsdCG>1.0e-9</RsdCG>\n"
|
||||
" <FermionBC>\n"
|
||||
" <FermBC>SIMPLE_FERMBC</FermBC>\n"
|
||||
" <boundary>1 1 1 1</boundary>\n"
|
||||
" </FermionBC> \n"
|
||||
"</AuxFermAct>"
|
||||
);
|
||||
param.AuxFermActGrp= std::string("");
|
||||
UnprecOvlapContFrac5DFermActArray S_f(fbc,param);
|
||||
Handle< FermState<T4,U,U> > fs( S_f.createState(u) );
|
||||
Handle< LinearOperatorArray<T4> > M(S_f.linOp(fs));
|
||||
return M;
|
||||
}
|
||||
if ( parms == HwContFracTanh ) {
|
||||
UnprecOvlapContFrac5DFermActParams param;
|
||||
param.Mass=_mq; // How is M5 set? Wilson mass In AuxFermAct
|
||||
param.ApproxMin=eps_lo;
|
||||
param.ApproxMax=eps_hi;
|
||||
param.approximation_type=COEFF_TYPE_TANH_UNSCALED;
|
||||
param.RatPolyDeg=Ls-1;
|
||||
// The following is why I think Chroma made some directional errors:
|
||||
param.AuxFermAct= std::string(
|
||||
"<AuxFermAct>\n"
|
||||
@ -378,7 +427,14 @@ int main (int argc,char **argv )
|
||||
* Setup QDP
|
||||
*********************************************************/
|
||||
Chroma::initialize(&argc,&argv);
|
||||
Chroma::WilsonTypeFermActs4DEnv::registerAll();
|
||||
// Chroma::WilsonTypeFermActs4DEnv::registerAll();
|
||||
Chroma::WilsonTypeFermActsEnv::registerAll();
|
||||
//bool linkageHack(void)
|
||||
//{
|
||||
// bool foo = true;
|
||||
// Inline Measurements
|
||||
// InlineAggregateEnv::registerAll();
|
||||
// GaugeInitEnv::registerAll();
|
||||
|
||||
/********************************************************
|
||||
* Setup Grid
|
||||
@ -388,26 +444,34 @@ int main (int argc,char **argv )
|
||||
Grid::GridDefaultSimd(Grid::Nd,Grid::vComplex::Nsimd()),
|
||||
Grid::GridDefaultMpi());
|
||||
|
||||
std::vector<int> gd = UGrid->GlobalDimensions();
|
||||
Grid::Coordinate gd = UGrid->GlobalDimensions();
|
||||
QDP::multi1d<int> nrow(QDP::Nd);
|
||||
for(int mu=0;mu<4;mu++) nrow[mu] = gd[mu];
|
||||
|
||||
QDP::Layout::setLattSize(nrow);
|
||||
QDP::Layout::create();
|
||||
|
||||
Grid::GridCartesian * FGrid = Grid::SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||
Grid::LatticeGaugeField lat(UGrid);
|
||||
Grid::LatticeFermion src(FGrid);
|
||||
Grid::LatticeFermion res_chroma(FGrid);
|
||||
Grid::LatticeFermion res_grid (FGrid);
|
||||
|
||||
std::vector<ChromaAction> ActionList({
|
||||
HtCayleyTanh, // Plain old DWF.
|
||||
HmCayleyTanh,
|
||||
HwCayleyTanh,
|
||||
HtCayleyZolo, // Plain old DWF.
|
||||
HmCayleyZolo,
|
||||
HwCayleyZolo
|
||||
HwCayleyZolo,
|
||||
HwPartFracZolo,
|
||||
HwContFracZolo,
|
||||
HwContFracTanh
|
||||
});
|
||||
std::vector<int> LsList({
|
||||
8,//HtCayleyTanh, // Plain old DWF.
|
||||
8,//HmCayleyTanh,
|
||||
8,//HwCayleyTanh,
|
||||
8,//HtCayleyZolo, // Plain old DWF.
|
||||
8,//HmCayleyZolo,
|
||||
8,//HwCayleyZolo,
|
||||
9,//HwPartFracZolo
|
||||
9, //HwContFracZolo
|
||||
9 //HwContFracTanh
|
||||
});
|
||||
std::vector<std::string> ActionName({
|
||||
"HtCayleyTanh",
|
||||
@ -415,10 +479,19 @@ int main (int argc,char **argv )
|
||||
"HwCayleyTanh",
|
||||
"HtCayleyZolo",
|
||||
"HmCayleyZolo",
|
||||
"HwCayleyZolo"
|
||||
"HwCayleyZolo",
|
||||
"HwPartFracZolo",
|
||||
"HwContFracZolo",
|
||||
"HwContFracTanh"
|
||||
});
|
||||
|
||||
for(int i=0;i<ActionList.size();i++) {
|
||||
Ls = LsList[i];
|
||||
Grid::GridCartesian * FGrid = Grid::SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||
Grid::LatticeGaugeField lat(UGrid);
|
||||
Grid::LatticeFermion src(FGrid);
|
||||
Grid::LatticeFermion res_chroma(FGrid);
|
||||
Grid::LatticeFermion res_grid (FGrid);
|
||||
std::cout << "*****************************"<<std::endl;
|
||||
std::cout << "Action "<<ActionName[i]<<std::endl;
|
||||
std::cout << "*****************************"<<std::endl;
|
||||
@ -439,6 +512,7 @@ int main (int argc,char **argv )
|
||||
|
||||
std::cout << "Norm of difference "<<Grid::norm2(res_chroma)<<std::endl;
|
||||
}
|
||||
delete FGrid;
|
||||
}
|
||||
|
||||
std::cout << "Finished test "<<std::endl;
|
||||
@ -502,7 +576,7 @@ void calc_grid(ChromaAction action,Grid::LatticeGaugeField & Umu, Grid::LatticeF
|
||||
Grid::gaussian(RNG5,src);
|
||||
Grid::gaussian(RNG5,res);
|
||||
|
||||
Grid::SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||
Grid::SU<Grid::Nc>::HotConfiguration(RNG4,Umu);
|
||||
|
||||
/*
|
||||
Grid::LatticeColourMatrix U(UGrid);
|
||||
@ -519,7 +593,7 @@ void calc_grid(ChromaAction action,Grid::LatticeGaugeField & Umu, Grid::LatticeF
|
||||
|
||||
if ( action == HtCayleyTanh ) {
|
||||
|
||||
Grid::DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5);
|
||||
Grid::DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5);
|
||||
|
||||
std::cout << Grid::GridLogMessage <<" Calling domain wall multiply "<<std::endl;
|
||||
|
||||
@ -535,7 +609,7 @@ void calc_grid(ChromaAction action,Grid::LatticeGaugeField & Umu, Grid::LatticeF
|
||||
|
||||
Grid::Real _b = 0.5*(mobius_scale +1.0);
|
||||
Grid::Real _c = 0.5*(mobius_scale -1.0);
|
||||
Grid::MobiusZolotarevFermionR D(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,_b,_c,zolo_lo,zolo_hi);
|
||||
Grid::MobiusZolotarevFermionD D(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,_b,_c,zolo_lo,zolo_hi);
|
||||
|
||||
std::cout << Grid::GridLogMessage <<" Calling mobius zolo multiply "<<std::endl;
|
||||
|
||||
@ -549,7 +623,7 @@ void calc_grid(ChromaAction action,Grid::LatticeGaugeField & Umu, Grid::LatticeF
|
||||
|
||||
if ( action == HtCayleyZolo ) {
|
||||
|
||||
Grid::ShamirZolotarevFermionR D(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,zolo_lo,zolo_hi);
|
||||
Grid::ShamirZolotarevFermionD D(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,zolo_lo,zolo_hi);
|
||||
|
||||
std::cout << Grid::GridLogMessage <<" Calling shamir zolo multiply "<<std::endl;
|
||||
|
||||
@ -561,6 +635,60 @@ void calc_grid(ChromaAction action,Grid::LatticeGaugeField & Umu, Grid::LatticeF
|
||||
return;
|
||||
}
|
||||
|
||||
if ( action == HwPartFracTanh ) {
|
||||
|
||||
Grid::OverlapWilsonPartialFractionTanhFermionD Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,1.0);
|
||||
|
||||
std::cout << Grid::GridLogMessage <<" Calling part frac tanh multiply "<<std::endl;
|
||||
|
||||
if ( dag )
|
||||
Dov.Mdag(src,res);
|
||||
else
|
||||
Dov.M(src,res);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if ( action == HwContFracTanh ) {
|
||||
|
||||
Grid::OverlapWilsonContFracTanhFermionD Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,1.0);
|
||||
|
||||
std::cout << Grid::GridLogMessage <<" Calling cont frac tanh multiply "<<std::endl;
|
||||
|
||||
if ( dag )
|
||||
Dov.Mdag(src,res);
|
||||
else
|
||||
Dov.M(src,res);
|
||||
|
||||
return;
|
||||
}
|
||||
if ( action == HwContFracZolo ) {
|
||||
|
||||
Grid::OverlapWilsonContFracZolotarevFermionD Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,zolo_lo,zolo_hi);
|
||||
|
||||
std::cout << Grid::GridLogMessage <<" Calling cont frac zolo multiply "<<std::endl;
|
||||
|
||||
if ( dag )
|
||||
Dov.Mdag(src,res);
|
||||
else
|
||||
Dov.M(src,res);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if ( action == HwPartFracZolo ) {
|
||||
|
||||
Grid::OverlapWilsonPartialFractionZolotarevFermionD Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,zolo_lo,zolo_hi);
|
||||
std::cout << Grid::GridLogMessage <<" Calling part frac zolotarev multiply "<<std::endl;
|
||||
|
||||
if ( dag )
|
||||
Dov.Mdag(src,res);
|
||||
else
|
||||
Dov.M(src,res);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
if ( action == HmCayleyTanh ) {
|
||||
Grid::Real _b = 0.5*(mobius_scale +1.0);
|
||||
@ -581,7 +709,7 @@ void calc_grid(ChromaAction action,Grid::LatticeGaugeField & Umu, Grid::LatticeF
|
||||
|
||||
if ( action == HmCayleyTanh ) {
|
||||
|
||||
Grid::ScaledShamirFermionR D(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,mobius_scale);
|
||||
Grid::ScaledShamirFermionD D(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,mobius_scale);
|
||||
|
||||
std::cout << Grid::GridLogMessage <<" Calling scaled shamir multiply "<<std::endl;
|
||||
|
||||
@ -595,7 +723,7 @@ void calc_grid(ChromaAction action,Grid::LatticeGaugeField & Umu, Grid::LatticeF
|
||||
|
||||
if ( action == HwCayleyTanh ) {
|
||||
|
||||
Grid::OverlapWilsonCayleyTanhFermionR D(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,1.0);
|
||||
Grid::OverlapWilsonCayleyTanhFermionD D(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,1.0);
|
||||
|
||||
if ( dag )
|
||||
D.Mdag(src,res);
|
||||
@ -607,7 +735,7 @@ void calc_grid(ChromaAction action,Grid::LatticeGaugeField & Umu, Grid::LatticeF
|
||||
|
||||
if ( action == HwCayleyZolo ) {
|
||||
|
||||
Grid::OverlapWilsonCayleyZolotarevFermionR D(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,zolo_lo,zolo_hi);
|
||||
Grid::OverlapWilsonCayleyZolotarevFermionD D(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,_mass,_M5,zolo_lo,zolo_hi);
|
||||
|
||||
if ( dag )
|
||||
D.Mdag(src,res);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/*************************************************************************************
|
||||
*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
@ -67,7 +67,13 @@ int main(int argc, char** argv) {
|
||||
result = Zero();
|
||||
LatticeGaugeField Umu(UGrid);
|
||||
|
||||
#if 0
|
||||
FieldMetaData header;
|
||||
std::string file("ckpoint_lat.4000");
|
||||
NerscIO::readConfiguration(Umu,header,file);
|
||||
#else
|
||||
SU<Nc>::HotConfiguration(RNG4, Umu);
|
||||
#endif
|
||||
|
||||
std::cout << GridLogMessage << "Lattice dimensions: " << GridDefaultLatt()
|
||||
<< " Ls: " << Ls << std::endl;
|
||||
|
@ -54,15 +54,30 @@ int main (int argc, char ** argv)
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||
|
||||
std::vector<ComplexD> qmu;
|
||||
qmu.push_back(ComplexD(0.1,0.0));
|
||||
qmu.push_back(ComplexD(0.0,0.0));
|
||||
qmu.push_back(ComplexD(0.0,0.0));
|
||||
qmu.push_back(ComplexD(0.0,0.01));
|
||||
|
||||
|
||||
std::vector<int> seeds4({1,2,3,4});
|
||||
std::vector<int> seeds5({5,6,7,8});
|
||||
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
|
||||
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
||||
|
||||
LatticeFermion tmp(FGrid);
|
||||
LatticeFermion src(FGrid); random(RNG5,src);
|
||||
LatticeFermion result(FGrid); result=Zero();
|
||||
LatticeGaugeField Umu(UGrid); SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||
|
||||
LatticeGaugeField Umu(UGrid);
|
||||
#if 0
|
||||
FieldMetaData header;
|
||||
std::string file("ckpoint_lat.4000");
|
||||
NerscIO::readConfiguration(Umu,header,file);
|
||||
#else
|
||||
SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||
#endif
|
||||
|
||||
std::vector<LatticeColourMatrix> U(4,UGrid);
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
||||
@ -71,8 +86,15 @@ int main (int argc, char ** argv)
|
||||
RealD mass=0.1;
|
||||
RealD M5=1.8;
|
||||
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||
Ddwf.qmu = qmu;
|
||||
|
||||
Ddwf.M(src,tmp);
|
||||
std::cout << " |M src|^2 "<<norm2(tmp)<<std::endl;
|
||||
MdagMLinearOperator<DomainWallFermionD,LatticeFermion> HermOp(Ddwf);
|
||||
HermOp.HermOp(src,tmp);
|
||||
|
||||
std::cout << " <src|MdagM| src> "<<innerProduct(src,tmp)<<std::endl;
|
||||
|
||||
ConjugateGradient<LatticeFermion> CG(1.0e-6,10000);
|
||||
CG(HermOp,src,result);
|
||||
|
||||
|
Reference in New Issue
Block a user